1 /* Copyright (C) 2017 The Android Open Source Project 2 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 3 * 4 * This file implements interfaces from the file jvmti.h. This implementation 5 * is licensed under the same terms as the file jvmti.h. The 6 * copyright and license information for the file jvmti.h follows. 7 * 8 * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. 9 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 10 * 11 * This code is free software; you can redistribute it and/or modify it 12 * under the terms of the GNU General Public License version 2 only, as 13 * published by the Free Software Foundation. Oracle designates this 14 * particular file as subject to the "Classpath" exception as provided 15 * by Oracle in the LICENSE file that accompanied this code. 16 * 17 * This code is distributed in the hope that it will be useful, but WITHOUT 18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 20 * version 2 for more details (a copy is included in the LICENSE file that 21 * accompanied this code). 22 * 23 * You should have received a copy of the GNU General Public License version 24 * 2 along with this work; if not, write to the Free Software Foundation, 25 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 26 * 27 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 28 * or visit www.oracle.com if you need additional information or have any 29 * questions. 30 */ 31 32 #include <functional> 33 #include <iosfwd> 34 #include <mutex> 35 36 #include "deopt_manager.h" 37 38 #include "art_jvmti.h" 39 #include "art_method-inl.h" 40 #include "base/mutex-inl.h" 41 #include "base/pointer_size.h" 42 #include "dex/dex_file_annotations.h" 43 #include "dex/modifiers.h" 44 #include "events-inl.h" 45 #include "gc/collector_type.h" 46 #include "gc/heap.h" 47 #include "gc/scoped_gc_critical_section.h" 48 #include "instrumentation.h" 49 #include "jit/jit.h" 50 #include "jit/jit_code_cache.h" 51 #include "jni/jni_internal.h" 52 #include "mirror/class-inl.h" 53 #include "mirror/object_array-inl.h" 54 #include "nativehelper/scoped_local_ref.h" 55 #include "oat/oat_file_manager.h" 56 #include "read_barrier_config.h" 57 #include "runtime_callbacks.h" 58 #include "scoped_thread_state_change-inl.h" 59 #include "scoped_thread_state_change.h" 60 #include "thread-current-inl.h" 61 #include "thread_list.h" 62 #include "ti_phase.h" 63 64 namespace openjdkjvmti { 65 66 static constexpr const char* kInstrumentationKey = "JVMTI_DeoptRequester"; 67 68 // We could make this much more selective in the future so we only return true when we 69 // actually care about the method at this time (ie active frames had locals changed). For now we 70 // just assume that if anything has changed any frame's locals we care about all methods. This only 71 // impacts whether we are able to OSR or not so maybe not really important to maintain frame 72 // specific information. HaveLocalsChanged()73 bool JvmtiMethodInspectionCallback::HaveLocalsChanged() { 74 return manager_->HaveLocalsChanged(); 75 } 76 DeoptManager()77 DeoptManager::DeoptManager() 78 : deoptimization_status_lock_("JVMTI_DeoptimizationStatusLock", 79 static_cast<art::LockLevel>( 80 art::LockLevel::kClassLinkerClassesLock + 1)), 81 deoptimization_condition_("JVMTI_DeoptimizationCondition", deoptimization_status_lock_), 82 performing_deoptimization_(false), 83 global_deopt_count_(0), 84 deopter_count_(0), 85 breakpoint_status_lock_("JVMTI_BreakpointStatusLock", 86 static_cast<art::LockLevel>(art::LockLevel::kAbortLock + 1)), 87 inspection_callback_(this), 88 set_local_variable_called_(false) { } 89 Setup()90 void DeoptManager::Setup() { 91 art::ScopedThreadStateChange stsc(art::Thread::Current(), 92 art::ThreadState::kWaitingForDebuggerToAttach); 93 art::ScopedSuspendAll ssa("Add method Inspection Callback"); 94 art::RuntimeCallbacks* callbacks = art::Runtime::Current()->GetRuntimeCallbacks(); 95 callbacks->AddMethodInspectionCallback(&inspection_callback_); 96 } 97 DumpDeoptInfo(art::Thread * self,std::ostream & stream)98 void DeoptManager::DumpDeoptInfo(art::Thread* self, std::ostream& stream) { 99 art::ScopedObjectAccess soa(self); 100 art::MutexLock mutll(self, *art::Locks::thread_list_lock_); 101 art::MutexLock mudsl(self, deoptimization_status_lock_); 102 art::MutexLock mubsl(self, breakpoint_status_lock_); 103 stream << "Deoptimizer count: " << deopter_count_ << "\n"; 104 stream << "Global deopt count: " << global_deopt_count_ << "\n"; 105 stream << "Can perform OSR: " << !set_local_variable_called_.load() << "\n"; 106 for (const auto& [bp, loc] : this->breakpoint_status_) { 107 stream << "Breakpoint: " << bp->PrettyMethod() << " @ 0x" << std::hex << loc << "\n"; 108 } 109 struct DumpThreadDeoptCount : public art::Closure { 110 public: 111 DumpThreadDeoptCount(std::ostream& stream, std::mutex& mu) 112 : cnt_(0), stream_(stream), mu_(mu) {} 113 void Run(art::Thread* self) override { 114 { 115 std::lock_guard<std::mutex> lg(mu_); 116 std::string name; 117 self->GetThreadName(name); 118 stream_ << "Thread " << name << " (id: " << std::dec << self->GetThreadId() 119 << ") force interpreter count " << self->ForceInterpreterCount() << "\n"; 120 } 121 // Increment this after unlocking the mutex so we won't race its destructor. 122 cnt_++; 123 } 124 125 void WaitForCount(size_t threads) { 126 while (cnt_.load() != threads) { 127 sched_yield(); 128 } 129 } 130 131 private: 132 std::atomic<size_t> cnt_; 133 std::ostream& stream_; 134 std::mutex& mu_; 135 }; 136 137 std::mutex mu; 138 DumpThreadDeoptCount dtdc(stream, mu); 139 auto func = [](art::Thread* thread, void* ctx) { 140 reinterpret_cast<DumpThreadDeoptCount*>(ctx)->Run(thread); 141 }; 142 art::Runtime::Current()->GetThreadList()->ForEach(func, &dtdc); 143 } 144 FinishSetup()145 void DeoptManager::FinishSetup() { 146 art::Thread* self = art::Thread::Current(); 147 art::Runtime* runtime = art::Runtime::Current(); 148 if (runtime->IsJavaDebuggable()) { 149 return; 150 } 151 152 // See if we can enable all JVMTI functions. 153 if (PhaseUtil::GetPhaseUnchecked() == JVMTI_PHASE_ONLOAD) { 154 // We are still early enough to change the compiler options and get full JVMTI support. 155 LOG(INFO) << "Openjdkjvmti plugin loaded on a non-debuggable runtime. Changing runtime to " 156 << "debuggable state. Please pass '--debuggable' to dex2oat and " 157 << "'-Xcompiler-option --debuggable' to dalvikvm in the future."; 158 DCHECK(runtime->GetJit() == nullptr) << "Jit should not be running yet!"; 159 art::ScopedSuspendAll ssa(__FUNCTION__); 160 // TODO check if we need to hold deoptimization_status_lock_ here. 161 art::MutexLock mu(self, deoptimization_status_lock_); 162 runtime->AddCompilerOption("--debuggable"); 163 runtime->SetRuntimeDebugState(art::Runtime::RuntimeDebugState::kJavaDebuggableAtInit); 164 runtime->DeoptimizeBootImage(); 165 return; 166 } 167 168 // Runtime has already started in non-debuggable mode. Only kArtTiVersion agents can be 169 // retrieved and they will all be best-effort. 170 LOG(WARNING) << "Openjdkjvmti plugin was loaded on a non-debuggable Runtime. Plugin was " 171 << "loaded too late to change runtime state to support all capabilities. Only " 172 << "kArtTiVersion (0x" << std::hex << kArtTiVersion << ") environments are " 173 << "available. Some functionality might not work properly."; 174 175 // Transition the runtime to debuggable: 176 // 1. Wait for any background verification tasks to finish. We don't support 177 // background verification after moving to debuggable state. 178 runtime->GetOatFileManager().WaitForBackgroundVerificationTasksToFinish(); 179 180 // Do the transition in ScopedJITSuspend, so we don't start any JIT compilations 181 // before the transition to debuggable is finished. 182 art::jit::ScopedJitSuspend suspend_jit; 183 art::ScopedSuspendAll ssa(__FUNCTION__); 184 185 // 2. Discard any JITed code that was generated before, since they would be 186 // compiled without debug support. 187 art::jit::Jit* jit = runtime->GetJit(); 188 if (jit != nullptr) { 189 jit->GetCodeCache()->InvalidateAllCompiledCode(); 190 jit->GetCodeCache()->TransitionToDebuggable(); 191 jit->GetJitCompiler()->SetDebuggableCompilerOption(true); 192 } 193 194 // 3. Change the state to JavaDebuggable, so that debug features can be 195 // enabled from now on. 196 runtime->SetRuntimeDebugState(art::Runtime::RuntimeDebugState::kJavaDebuggable); 197 198 // 4. Update all entrypoints to avoid using any AOT code. 199 runtime->GetInstrumentation()->UpdateEntrypointsForDebuggable(); 200 } 201 MethodHasBreakpoints(art::ArtMethod * method)202 bool DeoptManager::MethodHasBreakpoints(art::ArtMethod* method) { 203 art::MutexLock lk(art::Thread::Current(), breakpoint_status_lock_); 204 return MethodHasBreakpointsLocked(method); 205 } 206 MethodHasBreakpointsLocked(art::ArtMethod * method)207 bool DeoptManager::MethodHasBreakpointsLocked(art::ArtMethod* method) { 208 auto elem = breakpoint_status_.find(method); 209 return elem != breakpoint_status_.end() && elem->second != 0; 210 } 211 RemoveDeoptimizeAllMethods()212 void DeoptManager::RemoveDeoptimizeAllMethods() { 213 art::Thread* self = art::Thread::Current(); 214 art::ScopedThreadSuspension sts(self, art::ThreadState::kSuspended); 215 deoptimization_status_lock_.ExclusiveLock(self); 216 RemoveDeoptimizeAllMethodsLocked(self); 217 } 218 AddDeoptimizeAllMethods()219 void DeoptManager::AddDeoptimizeAllMethods() { 220 art::Thread* self = art::Thread::Current(); 221 art::ScopedThreadSuspension sts(self, art::ThreadState::kSuspended); 222 deoptimization_status_lock_.ExclusiveLock(self); 223 AddDeoptimizeAllMethodsLocked(self); 224 } 225 AddMethodBreakpoint(art::ArtMethod * method)226 void DeoptManager::AddMethodBreakpoint(art::ArtMethod* method) { 227 DCHECK(method->IsInvokable()); 228 DCHECK(!method->IsProxyMethod()) << method->PrettyMethod(); 229 DCHECK(!method->IsNative()) << method->PrettyMethod(); 230 231 art::Thread* self = art::Thread::Current(); 232 method = method->GetCanonicalMethod(); 233 bool is_default = method->IsDefault(); 234 235 art::ScopedThreadSuspension sts(self, art::ThreadState::kSuspended); 236 deoptimization_status_lock_.ExclusiveLock(self); 237 { 238 breakpoint_status_lock_.ExclusiveLock(self); 239 240 DCHECK_GT(deopter_count_, 0u) << "unexpected deotpimization request"; 241 242 if (MethodHasBreakpointsLocked(method)) { 243 // Don't need to do anything extra. 244 breakpoint_status_[method]++; 245 // Another thread might be deoptimizing the very method we just added new breakpoints for. 246 // Wait for any deopts to finish before moving on. 247 breakpoint_status_lock_.ExclusiveUnlock(self); 248 WaitForDeoptimizationToFinish(self); 249 return; 250 } 251 breakpoint_status_[method] = 1; 252 breakpoint_status_lock_.ExclusiveUnlock(self); 253 } 254 auto instrumentation = art::Runtime::Current()->GetInstrumentation(); 255 if (instrumentation->IsForcedInterpretOnly()) { 256 // We are already interpreting everything so no need to do anything. 257 deoptimization_status_lock_.ExclusiveUnlock(self); 258 return; 259 } else if (is_default) { 260 AddDeoptimizeAllMethodsLocked(self); 261 } else { 262 PerformLimitedDeoptimization(self, method); 263 } 264 } 265 RemoveMethodBreakpoint(art::ArtMethod * method)266 void DeoptManager::RemoveMethodBreakpoint(art::ArtMethod* method) { 267 DCHECK(method->IsInvokable()) << method->PrettyMethod(); 268 DCHECK(!method->IsProxyMethod()) << method->PrettyMethod(); 269 DCHECK(!method->IsNative()) << method->PrettyMethod(); 270 271 art::Thread* self = art::Thread::Current(); 272 method = method->GetCanonicalMethod(); 273 bool is_default = method->IsDefault(); 274 275 art::ScopedThreadSuspension sts(self, art::ThreadState::kSuspended); 276 // Ideally we should do a ScopedSuspendAll right here to get the full mutator_lock_ that we might 277 // need but since that is very heavy we will instead just use a condition variable to make sure we 278 // don't race with ourselves. 279 deoptimization_status_lock_.ExclusiveLock(self); 280 bool is_last_breakpoint; 281 { 282 art::MutexLock mu(self, breakpoint_status_lock_); 283 284 DCHECK_GT(deopter_count_, 0u) << "unexpected deotpimization request"; 285 DCHECK(MethodHasBreakpointsLocked(method)) << "Breakpoint on a method was removed without " 286 << "breakpoints present!"; 287 breakpoint_status_[method] -= 1; 288 is_last_breakpoint = (breakpoint_status_[method] == 0); 289 } 290 auto instrumentation = art::Runtime::Current()->GetInstrumentation(); 291 if (UNLIKELY(instrumentation->IsForcedInterpretOnly())) { 292 // We don't need to do anything since we are interpreting everything anyway. 293 deoptimization_status_lock_.ExclusiveUnlock(self); 294 return; 295 } else if (is_last_breakpoint) { 296 if (UNLIKELY(is_default)) { 297 RemoveDeoptimizeAllMethodsLocked(self); 298 } else { 299 PerformLimitedUndeoptimization(self, method); 300 } 301 } else { 302 // Another thread might be deoptimizing the very methods we just removed breakpoints from. Wait 303 // for any deopts to finish before moving on. 304 WaitForDeoptimizationToFinish(self); 305 } 306 } 307 WaitForDeoptimizationToFinishLocked(art::Thread * self)308 void DeoptManager::WaitForDeoptimizationToFinishLocked(art::Thread* self) { 309 while (performing_deoptimization_) { 310 deoptimization_condition_.Wait(self); 311 } 312 } 313 WaitForDeoptimizationToFinish(art::Thread * self)314 void DeoptManager::WaitForDeoptimizationToFinish(art::Thread* self) { 315 WaitForDeoptimizationToFinishLocked(self); 316 deoptimization_status_lock_.ExclusiveUnlock(self); 317 } 318 319 // Users should make sure that only gc-critical-section safe code is used while a 320 // ScopedDeoptimizationContext exists. 321 class ScopedDeoptimizationContext : public art::ValueObject { 322 public: ScopedDeoptimizationContext(art::Thread * self,DeoptManager * deopt)323 ScopedDeoptimizationContext(art::Thread* self, DeoptManager* deopt) 324 RELEASE(deopt->deoptimization_status_lock_) 325 ACQUIRE(art::Locks::mutator_lock_) 326 ACQUIRE(art::Roles::uninterruptible_) 327 : self_(self), 328 deopt_(deopt), 329 critical_section_(self_, "JVMTI Deoptimizing methods"), 330 uninterruptible_cause_(nullptr) { 331 deopt_->WaitForDeoptimizationToFinishLocked(self_); 332 DCHECK(!deopt->performing_deoptimization_) 333 << "Already performing deoptimization on another thread!"; 334 // Use performing_deoptimization_ to keep track of the lock. 335 deopt_->performing_deoptimization_ = true; 336 deopt_->deoptimization_status_lock_.Unlock(self_); 337 uninterruptible_cause_ = critical_section_.Enter(art::gc::kGcCauseInstrumentation, 338 art::gc::kCollectorTypeCriticalSection); 339 art::Runtime::Current()->GetThreadList()->SuspendAll("JMVTI Deoptimizing methods", 340 /*long_suspend=*/ false); 341 } 342 343 ~ScopedDeoptimizationContext() RELEASE(art::Locks::mutator_lock_)344 RELEASE(art::Locks::mutator_lock_) 345 RELEASE(art::Roles::uninterruptible_) { 346 // Can be suspended again. 347 critical_section_.Exit(uninterruptible_cause_); 348 // Release the mutator lock. 349 art::Runtime::Current()->GetThreadList()->ResumeAll(); 350 // Let other threads know it's fine to proceed. 351 art::MutexLock lk(self_, deopt_->deoptimization_status_lock_); 352 deopt_->performing_deoptimization_ = false; 353 deopt_->deoptimization_condition_.Broadcast(self_); 354 } 355 356 private: 357 art::Thread* self_; 358 DeoptManager* deopt_; 359 art::gc::GCCriticalSection critical_section_; 360 const char* uninterruptible_cause_; 361 }; 362 AddDeoptimizeAllMethodsLocked(art::Thread * self)363 void DeoptManager::AddDeoptimizeAllMethodsLocked(art::Thread* self) { 364 global_deopt_count_++; 365 if (global_deopt_count_ == 1) { 366 PerformGlobalDeoptimization(self); 367 } else { 368 WaitForDeoptimizationToFinish(self); 369 } 370 } 371 Shutdown()372 void DeoptManager::Shutdown() { 373 art::Thread* self = art::Thread::Current(); 374 art::Runtime* runtime = art::Runtime::Current(); 375 376 // Do the transition in ScopedJITSuspend, so we don't start any JIT compilations 377 // before the transition to debuggable is finished. 378 art::jit::ScopedJitSuspend suspend_jit; 379 380 art::ScopedThreadStateChange sts(self, art::ThreadState::kSuspended); 381 deoptimization_status_lock_.ExclusiveLock(self); 382 ScopedDeoptimizationContext sdc(self, this); 383 384 art::RuntimeCallbacks* callbacks = runtime->GetRuntimeCallbacks(); 385 callbacks->RemoveMethodInspectionCallback(&inspection_callback_); 386 387 if (runtime->IsShuttingDown(self)) { 388 return; 389 } 390 391 // If we attach a debugger to a non-debuggable runtime, we switch the runtime to debuggable to 392 // provide a consistent (though still best effort) support. Since we are detaching the debugger 393 // now, switch it back to non-debuggable if there are no other debugger / profiling tools are 394 // active. 395 runtime->GetInstrumentation()->DisableDeoptimization(kInstrumentationKey, 396 /*try_switch_to_non_debuggable=*/true); 397 runtime->GetInstrumentation()->DisableDeoptimization(kDeoptManagerInstrumentationKey, 398 /*try_switch_to_non_debuggable=*/true); 399 } 400 RemoveDeoptimizeAllMethodsLocked(art::Thread * self)401 void DeoptManager::RemoveDeoptimizeAllMethodsLocked(art::Thread* self) { 402 DCHECK_GT(global_deopt_count_, 0u) << "Request to remove non-existent global deoptimization!"; 403 global_deopt_count_--; 404 if (global_deopt_count_ == 0) { 405 PerformGlobalUndeoptimization(self); 406 } else { 407 WaitForDeoptimizationToFinish(self); 408 } 409 } 410 PerformLimitedDeoptimization(art::Thread * self,art::ArtMethod * method)411 void DeoptManager::PerformLimitedDeoptimization(art::Thread* self, art::ArtMethod* method) { 412 ScopedDeoptimizationContext sdc(self, this); 413 art::Runtime::Current()->GetInstrumentation()->Deoptimize(method); 414 } 415 PerformLimitedUndeoptimization(art::Thread * self,art::ArtMethod * method)416 void DeoptManager::PerformLimitedUndeoptimization(art::Thread* self, art::ArtMethod* method) { 417 ScopedDeoptimizationContext sdc(self, this); 418 art::Runtime::Current()->GetInstrumentation()->Undeoptimize(method); 419 } 420 PerformGlobalDeoptimization(art::Thread * self)421 void DeoptManager::PerformGlobalDeoptimization(art::Thread* self) { 422 ScopedDeoptimizationContext sdc(self, this); 423 art::Runtime::Current()->GetInstrumentation()->DeoptimizeEverything( 424 kDeoptManagerInstrumentationKey); 425 } 426 PerformGlobalUndeoptimization(art::Thread * self)427 void DeoptManager::PerformGlobalUndeoptimization(art::Thread* self) { 428 ScopedDeoptimizationContext sdc(self, this); 429 art::Runtime::Current()->GetInstrumentation()->UndeoptimizeEverything( 430 kDeoptManagerInstrumentationKey); 431 } 432 AddDeoptimizeThreadMethods(art::ScopedObjectAccessUnchecked & soa,jthread jtarget)433 jvmtiError DeoptManager::AddDeoptimizeThreadMethods(art::ScopedObjectAccessUnchecked& soa, jthread jtarget) { 434 art::Locks::thread_list_lock_->ExclusiveLock(soa.Self()); 435 art::Thread* target = nullptr; 436 jvmtiError err = OK; 437 if (!ThreadUtil::GetNativeThread(jtarget, soa, &target, &err)) { 438 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self()); 439 return err; 440 } 441 // We don't need additional locking here because we hold the Thread_list_lock_. 442 if (target->IncrementForceInterpreterCount() == 1) { 443 struct DeoptClosure : public art::Closure { 444 public: 445 explicit DeoptClosure(DeoptManager* manager) : manager_(manager) {} 446 void Run(art::Thread* self) override REQUIRES_SHARED(art::Locks::mutator_lock_) { 447 manager_->DeoptimizeThread(self); 448 } 449 450 private: 451 DeoptManager* manager_; 452 }; 453 DeoptClosure c(this); 454 target->RequestSynchronousCheckpoint(&c); 455 } else { 456 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self()); 457 } 458 return OK; 459 } 460 RemoveDeoptimizeThreadMethods(art::ScopedObjectAccessUnchecked & soa,jthread jtarget)461 jvmtiError DeoptManager::RemoveDeoptimizeThreadMethods(art::ScopedObjectAccessUnchecked& soa, jthread jtarget) { 462 art::MutexLock mu(soa.Self(), *art::Locks::thread_list_lock_); 463 art::Thread* target = nullptr; 464 jvmtiError err = OK; 465 if (!ThreadUtil::GetNativeThread(jtarget, soa, &target, &err)) { 466 return err; 467 } 468 // We don't need additional locking here because we hold the Thread_list_lock_. 469 DCHECK_GT(target->ForceInterpreterCount(), 0u); 470 target->DecrementForceInterpreterCount(); 471 return OK; 472 } 473 474 RemoveDeoptimizationRequester()475 void DeoptManager::RemoveDeoptimizationRequester() { 476 art::Thread* self = art::Thread::Current(); 477 art::ScopedThreadStateChange sts(self, art::ThreadState::kSuspended); 478 deoptimization_status_lock_.ExclusiveLock(self); 479 DCHECK_GT(deopter_count_, 0u) << "Removing deoptimization requester without any being present"; 480 deopter_count_--; 481 if (deopter_count_ == 0) { 482 ScopedDeoptimizationContext sdc(self, this); 483 art::Runtime::Current()->GetInstrumentation()->DisableDeoptimization( 484 kInstrumentationKey, /*try_switch_to_non_debuggable=*/false); 485 return; 486 } else { 487 deoptimization_status_lock_.ExclusiveUnlock(self); 488 } 489 } 490 AddDeoptimizationRequester()491 void DeoptManager::AddDeoptimizationRequester() { 492 art::Thread* self = art::Thread::Current(); 493 art::ScopedThreadStateChange stsc(self, art::ThreadState::kSuspended); 494 deoptimization_status_lock_.ExclusiveLock(self); 495 deopter_count_++; 496 if (deopter_count_ == 1) { 497 // When we add a deoptimization requester, we should enable entry / exit hooks. We only call 498 // this in debuggable runtimes and hence it won't be necessary to update entrypoints but we 499 // still need to inform instrumentation that we need to actually run entry / exit hooks. Though 500 // entrypoints are capable of running entry / exit hooks they won't run them unless enabled. 501 ScopedDeoptimizationContext sdc(self, this); 502 art::Runtime::Current()->GetInstrumentation()->EnableEntryExitHooks(kInstrumentationKey); 503 return; 504 } 505 deoptimization_status_lock_.ExclusiveUnlock(self); 506 } 507 DeoptimizeThread(art::Thread * target)508 void DeoptManager::DeoptimizeThread(art::Thread* target) { 509 // We might or might not be running on the target thread (self) so get Thread::Current 510 // directly. 511 art::ScopedThreadSuspension sts(art::Thread::Current(), art::ThreadState::kSuspended); 512 art::gc::ScopedGCCriticalSection sgccs(art::Thread::Current(), 513 art::gc::GcCause::kGcCauseDebugger, 514 art::gc::CollectorType::kCollectorTypeDebugger); 515 art::ScopedSuspendAll ssa("Instrument thread stack"); 516 // Prepare the stack so methods can be deoptimized as and when required. 517 // This by itself doesn't cause any methods to deoptimize but enables 518 // deoptimization on demand. 519 art::Runtime::Current()->GetInstrumentation()->InstrumentThreadStack(target, 520 /* force_deopt= */ false); 521 } 522 523 extern DeoptManager* gDeoptManager; Get()524 DeoptManager* DeoptManager::Get() { 525 return gDeoptManager; 526 } 527 528 } // namespace openjdkjvmti 529