1// Copyright (c) 2015, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include "vm/thread.h"
6
7#include "vm/cpu.h"
8#include "vm/dart_api_state.h"
9#include "vm/growable_array.h"
10#include "vm/heap/safepoint.h"
11#include "vm/isolate.h"
12#include "vm/json_stream.h"
13#include "vm/lockers.h"
14#include "vm/log.h"
15#include "vm/message_handler.h"
16#include "vm/native_entry.h"
17#include "vm/object.h"
18#include "vm/object_store.h"
19#include "vm/os_thread.h"
20#include "vm/profiler.h"
21#include "vm/runtime_entry.h"
22#include "vm/service.h"
23#include "vm/stub_code.h"
24#include "vm/symbols.h"
25#include "vm/thread_interrupter.h"
26#include "vm/thread_registry.h"
27#include "vm/timeline.h"
28#include "vm/zone.h"
29
30namespace dart {
31
32#if !defined(PRODUCT)
33DECLARE_FLAG(bool, trace_service);
34DECLARE_FLAG(bool, trace_service_verbose);
35#endif // !defined(PRODUCT)
36
37Thread::~Thread() {
38 // We should cleanly exit any isolate before destruction.
39 ASSERT(isolate_ == nullptr);
40 ASSERT(store_buffer_block_ == nullptr);
41 ASSERT(marking_stack_block_ == nullptr);
42 // There should be no top api scopes at this point.
43 ASSERT(api_top_scope() == nullptr);
44 // Delete the reusable api scope if there is one.
45 if (api_reusable_scope_ != nullptr) {
46 delete api_reusable_scope_;
47 api_reusable_scope_ = nullptr;
48 }
49
50 DO_IF_TSAN(delete tsan_utils_);
51}
52
53#if defined(DEBUG)
54#define REUSABLE_HANDLE_SCOPE_INIT(object) \
55 reusable_##object##_handle_scope_active_(false),
56#else
57#define REUSABLE_HANDLE_SCOPE_INIT(object)
58#endif // defined(DEBUG)
59
60#define REUSABLE_HANDLE_INITIALIZERS(object) object##_handle_(nullptr),
61
62Thread::Thread(bool is_vm_isolate)
63 : ThreadState(false),
64 write_barrier_mask_(UntaggedObject::kGenerationalBarrierMask),
65 active_exception_(Object::null()),
66 active_stacktrace_(Object::null()),
67 global_object_pool_(ObjectPool::null()),
68 resume_pc_(0),
69 execution_state_(kThreadInNative),
70 safepoint_state_(0),
71 api_top_scope_(nullptr),
72 double_truncate_round_supported_(
73 TargetCPUFeatures::double_truncate_round_supported() ? 1 : 0),
74 tsan_utils_(DO_IF_TSAN(new TsanUtils()) DO_IF_NOT_TSAN(nullptr)),
75 task_kind_(kUnknownTask),
76 dart_stream_(nullptr),
77 service_extension_stream_(nullptr),
78 thread_lock_(),
79 api_reusable_scope_(nullptr),
80 no_callback_scope_depth_(0),
81#if defined(DEBUG)
82 no_safepoint_scope_depth_(0),
83#endif
84 reusable_handles_(),
85 stack_overflow_count_(0),
86 hierarchy_info_(nullptr),
87 type_usage_info_(nullptr),
88 sticky_error_(Error::null()),
89 REUSABLE_HANDLE_LIST(REUSABLE_HANDLE_INITIALIZERS)
90 REUSABLE_HANDLE_LIST(REUSABLE_HANDLE_SCOPE_INIT)
91#if defined(USING_SAFE_STACK)
92 saved_safestack_limit_(0),
93#endif
94#if !defined(PRODUCT) || defined(FORCE_INCLUDE_SAMPLING_HEAP_PROFILER)
95 next_(nullptr),
96 heap_sampler_(this) {
97#else
98 next_(nullptr) {
99#endif
100
101#if defined(SUPPORT_TIMELINE)
102 dart_stream_ = Timeline::GetDartStream();
103 ASSERT(dart_stream_ != nullptr);
104#endif
105#ifndef PRODUCT
106 service_extension_stream_ = &Service::extension_stream;
107 ASSERT(service_extension_stream_ != nullptr);
108#endif
109#define DEFAULT_INIT(type_name, member_name, init_expr, default_init_value) \
110 member_name = default_init_value;
111 CACHED_CONSTANTS_LIST(DEFAULT_INIT)
112#undef DEFAULT_INIT
113
114 for (intptr_t i = 0; i < kNumberOfDartAvailableCpuRegs; ++i) {
115 write_barrier_wrappers_entry_points_[i] = 0;
116 }
117
118#define DEFAULT_INIT(name) name##_entry_point_ = 0;
119 RUNTIME_ENTRY_LIST(DEFAULT_INIT)
120#undef DEFAULT_INIT
121
122#define DEFAULT_INIT(returntype, name, ...) name##_entry_point_ = 0;
123 LEAF_RUNTIME_ENTRY_LIST(DEFAULT_INIT)
124#undef DEFAULT_INIT
125
126 // We cannot initialize the VM constants here for the vm isolate thread
127 // due to boot strapping issues.
128 if (!is_vm_isolate) {
129 InitVMConstants();
130 }
131
132#if defined(DART_HOST_OS_FUCHSIA)
133 next_task_id_ = trace_generate_nonce();
134#else
135 next_task_id_ = Random::GlobalNextUInt64();
136#endif
137
138 memset(s: &unboxed_runtime_arg_, c: 0, n: sizeof(simd128_value_t));
139}
140
141static const double double_nan_constant = NAN;
142
143static const struct ALIGN16 {
144 uint64_t a;
145 uint64_t b;
146} double_negate_constant = {.a: 0x8000000000000000ULL, .b: 0x8000000000000000ULL};
147
148static const struct ALIGN16 {
149 uint64_t a;
150 uint64_t b;
151} double_abs_constant = {.a: 0x7FFFFFFFFFFFFFFFULL, .b: 0x7FFFFFFFFFFFFFFFULL};
152
153static const struct ALIGN16 {
154 uint32_t a;
155 uint32_t b;
156 uint32_t c;
157 uint32_t d;
158} float_not_constant = {.a: 0xFFFFFFFF, .b: 0xFFFFFFFF, .c: 0xFFFFFFFF, .d: 0xFFFFFFFF};
159
160static const struct ALIGN16 {
161 uint32_t a;
162 uint32_t b;
163 uint32_t c;
164 uint32_t d;
165} float_negate_constant = {.a: 0x80000000, .b: 0x80000000, .c: 0x80000000, .d: 0x80000000};
166
167static const struct ALIGN16 {
168 uint32_t a;
169 uint32_t b;
170 uint32_t c;
171 uint32_t d;
172} float_absolute_constant = {.a: 0x7FFFFFFF, .b: 0x7FFFFFFF, .c: 0x7FFFFFFF, .d: 0x7FFFFFFF};
173
174static const struct ALIGN16 {
175 uint32_t a;
176 uint32_t b;
177 uint32_t c;
178 uint32_t d;
179} float_zerow_constant = {.a: 0xFFFFFFFF, .b: 0xFFFFFFFF, .c: 0xFFFFFFFF, .d: 0x00000000};
180
181void Thread::InitVMConstants() {
182#if defined(DART_COMPRESSED_POINTERS)
183 heap_base_ = Object::null()->heap_base();
184#endif
185
186#define ASSERT_VM_HEAP(type_name, member_name, init_expr, default_init_value) \
187 ASSERT((init_expr)->IsOldObject());
188 CACHED_VM_OBJECTS_LIST(ASSERT_VM_HEAP)
189#undef ASSERT_VM_HEAP
190
191#define INIT_VALUE(type_name, member_name, init_expr, default_init_value) \
192 ASSERT(member_name == default_init_value); \
193 member_name = (init_expr);
194 CACHED_CONSTANTS_LIST(INIT_VALUE)
195#undef INIT_VALUE
196
197 for (intptr_t i = 0; i < kNumberOfDartAvailableCpuRegs; ++i) {
198 write_barrier_wrappers_entry_points_[i] =
199 StubCode::WriteBarrierWrappers().EntryPoint() +
200 i * kStoreBufferWrapperSize;
201 }
202
203#define INIT_VALUE(name) \
204 ASSERT(name##_entry_point_ == 0); \
205 name##_entry_point_ = k##name##RuntimeEntry.GetEntryPoint();
206 RUNTIME_ENTRY_LIST(INIT_VALUE)
207#undef INIT_VALUE
208
209#define INIT_VALUE(returntype, name, ...) \
210 ASSERT(name##_entry_point_ == 0); \
211 name##_entry_point_ = k##name##RuntimeEntry.GetEntryPoint();
212 LEAF_RUNTIME_ENTRY_LIST(INIT_VALUE)
213#undef INIT_VALUE
214
215// Setup the thread specific reusable handles.
216#define REUSABLE_HANDLE_ALLOCATION(object) \
217 this->object##_handle_ = this->AllocateReusableHandle<object>();
218 REUSABLE_HANDLE_LIST(REUSABLE_HANDLE_ALLOCATION)
219#undef REUSABLE_HANDLE_ALLOCATION
220}
221
222void Thread::set_active_exception(const Object& value) {
223 active_exception_ = value.ptr();
224}
225
226void Thread::set_active_stacktrace(const Object& value) {
227 active_stacktrace_ = value.ptr();
228}
229
230ErrorPtr Thread::sticky_error() const {
231 return sticky_error_;
232}
233
234void Thread::set_sticky_error(const Error& value) {
235 ASSERT(!value.IsNull());
236 sticky_error_ = value.ptr();
237}
238
239void Thread::ClearStickyError() {
240 sticky_error_ = Error::null();
241}
242
243ErrorPtr Thread::StealStickyError() {
244 NoSafepointScope no_safepoint;
245 ErrorPtr return_value = sticky_error_;
246 sticky_error_ = Error::null();
247 return return_value;
248}
249
250const char* Thread::TaskKindToCString(TaskKind kind) {
251 switch (kind) {
252 case kUnknownTask:
253 return "kUnknownTask";
254 case kMutatorTask:
255 return "kMutatorTask";
256 case kCompilerTask:
257 return "kCompilerTask";
258 case kSweeperTask:
259 return "kSweeperTask";
260 case kMarkerTask:
261 return "kMarkerTask";
262 default:
263 UNREACHABLE();
264 return "";
265 }
266}
267
268void Thread::AssertNonMutatorInvariants() {
269 ASSERT(BypassSafepoints());
270 ASSERT(store_buffer_block_ == nullptr);
271 ASSERT(marking_stack_block_ == nullptr);
272 ASSERT(deferred_marking_stack_block_ == nullptr);
273 AssertNonDartMutatorInvariants();
274}
275
276void Thread::AssertNonDartMutatorInvariants() {
277 ASSERT(!IsDartMutatorThread());
278 ASSERT(isolate() == nullptr);
279 ASSERT(isolate_group() != nullptr);
280 ASSERT(task_kind_ != kMutatorTask);
281 DEBUG_ASSERT(!IsAnyReusableHandleScopeActive());
282}
283
284void Thread::AssertEmptyStackInvariants() {
285 ASSERT(zone() == nullptr);
286 ASSERT(top_handle_scope() == nullptr);
287 ASSERT(long_jump_base() == nullptr);
288 ASSERT(top_resource() == nullptr);
289 ASSERT(top_exit_frame_info_ == 0);
290 ASSERT(api_top_scope_ == nullptr);
291 ASSERT(!pending_deopts_.HasPendingDeopts());
292 ASSERT(compiler_state_ == nullptr);
293 ASSERT(hierarchy_info_ == nullptr);
294 ASSERT(type_usage_info_ == nullptr);
295 ASSERT(no_active_isolate_scope_ == nullptr);
296 ASSERT(compiler_timings_ == nullptr);
297 ASSERT(!exit_through_ffi_);
298 ASSERT(runtime_call_deopt_ability_ == RuntimeCallDeoptAbility::kCanLazyDeopt);
299 ASSERT(no_callback_scope_depth_ == 0);
300 ASSERT(force_growth_scope_depth_ == 0);
301 ASSERT(no_reload_scope_depth_ == 0);
302 ASSERT(stopped_mutators_scope_depth_ == 0);
303 ASSERT(stack_overflow_flags_ == 0);
304 DEBUG_ASSERT(!inside_compiler_);
305 DEBUG_ASSERT(no_safepoint_scope_depth_ == 0);
306
307 // Avoid running these asserts for `vm-isolate`.
308 if (active_stacktrace_.untag() != 0) {
309 ASSERT(sticky_error() == Error::null());
310 ASSERT(active_exception_ == Object::null());
311 ASSERT(active_stacktrace_ == Object::null());
312 }
313}
314
315void Thread::AssertEmptyThreadInvariants() {
316 AssertEmptyStackInvariants();
317
318 ASSERT(top_ == 0);
319 ASSERT(end_ == 0);
320 ASSERT(true_end_ == 0);
321 ASSERT(isolate_ == nullptr);
322 ASSERT(isolate_group_ == nullptr);
323 ASSERT(os_thread() == nullptr);
324 ASSERT(vm_tag_ == VMTag::kInvalidTagId);
325 ASSERT(task_kind_ == kUnknownTask);
326 ASSERT(execution_state_ == Thread::kThreadInNative);
327 ASSERT(scheduled_dart_mutator_isolate_ == nullptr);
328
329 ASSERT(write_barrier_mask_ == UntaggedObject::kGenerationalBarrierMask);
330 ASSERT(store_buffer_block_ == nullptr);
331 ASSERT(marking_stack_block_ == nullptr);
332 ASSERT(deferred_marking_stack_block_ == nullptr);
333 ASSERT(!is_unwind_in_progress_);
334
335 ASSERT(saved_stack_limit_ == OSThread::kInvalidStackLimit);
336 ASSERT(stack_limit_.load() == 0);
337 ASSERT(safepoint_state_ == 0);
338
339 // Avoid running these asserts for `vm-isolate`.
340 if (active_stacktrace_.untag() != 0) {
341 ASSERT(field_table_values_ == nullptr);
342 ASSERT(global_object_pool_ == Object::null());
343#define CHECK_REUSABLE_HANDLE(object) ASSERT(object##_handle_->IsNull());
344 REUSABLE_HANDLE_LIST(CHECK_REUSABLE_HANDLE)
345#undef CHECK_REUSABLE_HANDLE
346 }
347}
348
349bool Thread::HasActiveState() {
350 // Do we have active dart frames?
351 if (top_exit_frame_info() != 0) {
352 return true;
353 }
354 // Do we have active embedder scopes?
355 if (api_top_scope() != nullptr) {
356 return true;
357 }
358 // Do we have active vm zone?
359 if (zone() != nullptr) {
360 return true;
361 }
362 AssertEmptyStackInvariants();
363 return false;
364}
365
366void Thread::EnterIsolate(Isolate* isolate) {
367 const bool is_resumable = isolate->mutator_thread() != nullptr;
368
369 // To let VM's thread pool (if we run on it) know that this thread is
370 // occupying a mutator again (decreases its max size).
371 const bool is_nested_reenter =
372 (is_resumable && isolate->mutator_thread()->top_exit_frame_info() != 0);
373
374 auto group = isolate->group();
375 if (!(is_nested_reenter && isolate->mutator_thread()->OwnsSafepoint())) {
376 group->IncreaseMutatorCount(mutator: isolate, is_nested_reenter);
377 }
378
379 // Two threads cannot enter isolate at same time.
380 ASSERT(isolate->scheduled_mutator_thread_ == nullptr);
381
382 // We lazily create a [Thread] structure for the mutator thread, but we'll
383 // reuse it until the death of the isolate.
384 Thread* thread = nullptr;
385 if (is_resumable) {
386 thread = isolate->mutator_thread();
387 ASSERT(thread->scheduled_dart_mutator_isolate_ == isolate);
388 ASSERT(thread->isolate() == isolate);
389 ASSERT(thread->isolate_group() == isolate->group());
390 {
391 // Descheduled isolates are reloadable (if nothing else prevents it).
392 RawReloadParticipationScope enable_reload(thread);
393 thread->ExitSafepoint();
394 }
395 } else {
396 thread = AddActiveThread(group, isolate, /*is_dart_mutator*/ true,
397 /*bypass_safepoint=*/false);
398 thread->SetupState(kMutatorTask);
399 thread->SetupMutatorState(kMutatorTask);
400 thread->SetupDartMutatorState(isolate);
401 }
402
403 isolate->scheduled_mutator_thread_ = thread;
404 ResumeDartMutatorThreadInternal(thread);
405}
406
407static bool ShouldSuspend(bool isolate_shutdown, Thread* thread) {
408 // Must destroy thread.
409 if (isolate_shutdown) return false;
410
411 // Must retain thread.
412 if (thread->HasActiveState() || thread->OwnsSafepoint()) return true;
413
414 // Could do either. When there are few isolates suspend to avoid work
415 // entering and leaving. When there are many isolate, destroy the thread to
416 // avoid the root set growing too big.
417 const intptr_t kMaxSuspendedThreads = 20;
418 auto group = thread->isolate_group();
419 return group->thread_registry()->active_isolates_count() <
420 kMaxSuspendedThreads;
421}
422
423void Thread::ExitIsolate(bool isolate_shutdown) {
424 Thread* thread = Thread::Current();
425 ASSERT(thread != nullptr);
426 ASSERT(thread->IsDartMutatorThread());
427 ASSERT(thread->isolate() != nullptr);
428 ASSERT(thread->isolate_group() != nullptr);
429 ASSERT(thread->isolate()->mutator_thread_ == thread);
430 ASSERT(thread->isolate()->scheduled_mutator_thread_ == thread);
431 DEBUG_ASSERT(!thread->IsAnyReusableHandleScopeActive());
432
433 auto isolate = thread->isolate();
434 auto group = thread->isolate_group();
435
436 thread->set_vm_tag(isolate->is_runnable() ? VMTag::kIdleTagId
437 : VMTag::kLoadWaitTagId);
438 if (thread->sticky_error() != Error::null()) {
439 ASSERT(isolate->sticky_error_ == Error::null());
440 isolate->sticky_error_ = thread->StealStickyError();
441 }
442
443 isolate->scheduled_mutator_thread_ = nullptr;
444
445 // Right now we keep the [Thread] object across the isolate's lifetime. This
446 // makes entering/exiting quite fast as it mainly boils down to safepoint
447 // transitions. Though any operation that walks over all active threads will
448 // see this thread as well (e.g. safepoint operations).
449 const bool is_nested_exit = thread->top_exit_frame_info() != 0;
450 if (ShouldSuspend(isolate_shutdown, thread)) {
451 const auto tag =
452 isolate->is_runnable() ? VMTag::kIdleTagId : VMTag::kLoadWaitTagId;
453 SuspendDartMutatorThreadInternal(thread, tag);
454 {
455 // Descheduled isolates are reloadable (if nothing else prevents it).
456 RawReloadParticipationScope enable_reload(thread);
457 thread->EnterSafepoint();
458 }
459 thread->set_execution_state(Thread::kThreadInNative);
460 } else {
461 thread->ResetDartMutatorState(isolate);
462 thread->ResetMutatorState();
463 thread->ResetState();
464 SuspendDartMutatorThreadInternal(thread, tag: VMTag::kInvalidTagId);
465 FreeActiveThread(thread, /*bypass_safepoint=*/false);
466 }
467
468 // To let VM's thread pool (if we run on it) know that this thread is
469 // occupying a mutator again (decreases its max size).
470 ASSERT(!(isolate_shutdown && is_nested_exit));
471 if (!(is_nested_exit && thread->OwnsSafepoint())) {
472 group->DecreaseMutatorCount(mutator: isolate, is_nested_exit);
473 }
474}
475
476bool Thread::EnterIsolateGroupAsHelper(IsolateGroup* isolate_group,
477 TaskKind kind,
478 bool bypass_safepoint) {
479 Thread* thread = AddActiveThread(group: isolate_group, isolate: nullptr,
480 /*is_dart_mutator=*/false, bypass_safepoint);
481 if (thread != nullptr) {
482 thread->SetupState(kind);
483 // Even if [bypass_safepoint] is true, a thread may need mutator state (e.g.
484 // parallel scavenger threads write to the [Thread]s storebuffer)
485 thread->SetupMutatorState(kind);
486 ResumeThreadInternal(thread);
487
488 thread->AssertNonDartMutatorInvariants();
489 return true;
490 }
491 return false;
492}
493
494void Thread::ExitIsolateGroupAsHelper(bool bypass_safepoint) {
495 Thread* thread = Thread::Current();
496 thread->AssertNonDartMutatorInvariants();
497
498 // Even if [bypass_safepoint] is true, a thread may need mutator state (e.g.
499 // parallel scavenger threads write to the [Thread]s storebuffer)
500 thread->ResetMutatorState();
501 thread->ResetState();
502 SuspendThreadInternal(thread, tag: VMTag::kInvalidTagId);
503 FreeActiveThread(thread, bypass_safepoint);
504}
505
506bool Thread::EnterIsolateGroupAsNonMutator(IsolateGroup* isolate_group,
507 TaskKind kind) {
508 Thread* thread =
509 AddActiveThread(group: isolate_group, isolate: nullptr,
510 /*is_dart_mutator=*/false, /*bypass_safepoint=*/true);
511 if (thread != nullptr) {
512 thread->SetupState(kind);
513 ResumeThreadInternal(thread);
514
515 thread->AssertNonMutatorInvariants();
516 return true;
517 }
518 return false;
519}
520
521void Thread::ExitIsolateGroupAsNonMutator() {
522 Thread* thread = Thread::Current();
523 ASSERT(thread != nullptr);
524 thread->AssertNonMutatorInvariants();
525
526 thread->ResetState();
527 SuspendThreadInternal(thread, tag: VMTag::kInvalidTagId);
528 FreeActiveThread(thread, /*bypass_safepoint=*/true);
529}
530
531void Thread::ResumeDartMutatorThreadInternal(Thread* thread) {
532 ResumeThreadInternal(thread);
533 if (Dart::vm_isolate() != nullptr &&
534 thread->isolate() != Dart::vm_isolate()) {
535#if defined(USING_SIMULATOR)
536 thread->SetStackLimit(Simulator::Current()->overflow_stack_limit());
537#else
538 thread->SetStackLimit(OSThread::Current()->overflow_stack_limit());
539#endif
540 }
541}
542
543void Thread::SuspendDartMutatorThreadInternal(Thread* thread,
544 VMTag::VMTagId tag) {
545 thread->ClearStackLimit();
546 SuspendThreadInternal(thread, tag);
547}
548
549void Thread::ResumeThreadInternal(Thread* thread) {
550 ASSERT(!thread->IsAtSafepoint());
551 ASSERT(thread->isolate_group() != nullptr);
552 ASSERT(thread->execution_state() == Thread::kThreadInNative);
553 ASSERT(thread->vm_tag() == VMTag::kInvalidTagId ||
554 thread->vm_tag() == VMTag::kIdleTagId ||
555 thread->vm_tag() == VMTag::kLoadWaitTagId);
556
557 thread->set_vm_tag(VMTag::kVMTagId);
558 thread->set_execution_state(Thread::kThreadInVM);
559
560 OSThread* os_thread = OSThread::Current();
561 thread->set_os_thread(os_thread);
562 os_thread->set_thread(thread);
563 Thread::SetCurrent(thread);
564 os_thread->EnableThreadInterrupts();
565
566#if !defined(PRODUCT) || defined(FORCE_INCLUDE_SAMPLING_HEAP_PROFILER)
567 thread->heap_sampler().Initialize();
568#endif
569}
570
571void Thread::SuspendThreadInternal(Thread* thread, VMTag::VMTagId tag) {
572 thread->heap()->new_space()->AbandonRemainingTLAB(thread);
573
574#if !defined(PRODUCT) || defined(FORCE_INCLUDE_SAMPLING_HEAP_PROFILER)
575 thread->heap_sampler().Cleanup();
576#endif
577
578 OSThread* os_thread = thread->os_thread();
579 ASSERT(os_thread != nullptr);
580 os_thread->DisableThreadInterrupts();
581 os_thread->set_thread(nullptr);
582 OSThread::SetCurrent(os_thread);
583 thread->set_os_thread(nullptr);
584
585 thread->set_vm_tag(tag);
586}
587
588Thread* Thread::AddActiveThread(IsolateGroup* group,
589 Isolate* isolate,
590 bool is_dart_mutator,
591 bool bypass_safepoint) {
592 // NOTE: We cannot just use `Dart::vm_isolate() == this` here, since during
593 // VM startup it might not have been set at this point.
594 const bool is_vm_isolate =
595 Dart::vm_isolate() == nullptr || Dart::vm_isolate() == isolate;
596
597 auto thread_registry = group->thread_registry();
598 auto safepoint_handler = group->safepoint_handler();
599 MonitorLocker ml(thread_registry->threads_lock());
600
601 if (!bypass_safepoint) {
602 while (safepoint_handler->AnySafepointInProgressLocked()) {
603 ml.Wait();
604 }
605 }
606
607 Thread* thread = thread_registry->GetFreeThreadLocked(is_vm_isolate);
608 thread->AssertEmptyThreadInvariants();
609
610 thread->isolate_ = isolate; // May be nullptr.
611 thread->isolate_group_ = group;
612 thread->scheduled_dart_mutator_isolate_ = isolate;
613
614 // We start at being at-safepoint (in case any safepoint operation is
615 // in-progress, we'll check into it once leaving the safepoint)
616 thread->set_safepoint_state(Thread::SetBypassSafepoints(value: bypass_safepoint, state: 0));
617 thread->runtime_call_deopt_ability_ = RuntimeCallDeoptAbility::kCanLazyDeopt;
618 ASSERT(!thread->IsAtSafepoint());
619
620 ASSERT(thread->saved_stack_limit_ == OSThread::kInvalidStackLimit);
621 return thread;
622}
623
624void Thread::FreeActiveThread(Thread* thread, bool bypass_safepoint) {
625 ASSERT(!thread->HasActiveState());
626 ASSERT(!thread->IsAtSafepoint());
627
628 if (!bypass_safepoint) {
629 // GC helper threads don't have any handle state to clear, and the GC might
630 // be currently visiting thread state. If this is not a GC helper, the GC
631 // can't be visiting thread state because its waiting for this thread to
632 // check in.
633 thread->ClearReusableHandles();
634 }
635
636 auto group = thread->isolate_group_;
637 auto thread_registry = group->thread_registry();
638
639 MonitorLocker ml(thread_registry->threads_lock());
640
641 if (!bypass_safepoint) {
642 // There may be a pending safepoint operation on another thread that is
643 // waiting for us to check-in.
644 //
645 // Though notice we're holding the thread registrys' threads_lock, which
646 // means if this other thread runs code as part of a safepoint operation it
647 // will still wait for us to finish here before it tries to iterate the
648 // active mutators (e.g. when GC starts/stops incremental marking).
649 //
650 // The thread is empty and the corresponding isolate (if any) is therefore
651 // at event-loop boundary (or shutting down). We participate in reload in
652 // those scenarios.
653 //
654 // (It may be that an active [RELOAD_OPERATION_SCOPE] sent an OOB message to
655 // this isolate but it didn't handle the OOB due to shutting down, so we'll
656 // still have to update the reloading thread that it's ok to continue)
657 RawReloadParticipationScope enable_reload(thread);
658 thread->EnterSafepoint();
659 }
660
661 thread->isolate_ = nullptr;
662 thread->isolate_group_ = nullptr;
663 thread->scheduled_dart_mutator_isolate_ = nullptr;
664 thread->set_execution_state(Thread::kThreadInNative);
665 thread->stack_limit_.store(arg: 0);
666 thread->safepoint_state_ = 0;
667
668 thread->AssertEmptyThreadInvariants();
669 thread_registry->ReturnThreadLocked(thread);
670}
671
672void Thread::ReleaseStoreBuffer() {
673 ASSERT(IsAtSafepoint() || OwnsSafepoint());
674 if (store_buffer_block_ == nullptr || store_buffer_block_->IsEmpty()) {
675 return; // Nothing to release.
676 }
677 // Prevent scheduling another GC by ignoring the threshold.
678 StoreBufferRelease(policy: StoreBuffer::kIgnoreThreshold);
679 // Make sure to get an *empty* block; the isolate needs all entries
680 // at GC time.
681 // TODO(koda): Replace with an epilogue (PrepareAfterGC) that acquires.
682 store_buffer_block_ = isolate_group()->store_buffer()->PopEmptyBlock();
683}
684
685void Thread::SetStackLimit(uword limit) {
686 // The thread setting the stack limit is not necessarily the thread which
687 // the stack limit is being set on.
688 MonitorLocker ml(&thread_lock_);
689 if (!HasScheduledInterrupts()) {
690 // No interrupt pending, set stack_limit_ too.
691 stack_limit_.store(arg: limit);
692 }
693 saved_stack_limit_ = limit;
694}
695
696void Thread::ClearStackLimit() {
697 SetStackLimit(OSThread::kInvalidStackLimit);
698}
699
700static bool IsInterruptLimit(uword limit) {
701 return (limit & ~Thread::kInterruptsMask) ==
702 (kInterruptStackLimit & ~Thread::kInterruptsMask);
703}
704
705void Thread::ScheduleInterrupts(uword interrupt_bits) {
706 ASSERT((interrupt_bits & ~kInterruptsMask) == 0); // Must fit in mask.
707
708 uword old_limit = stack_limit_.load();
709 uword new_limit;
710 do {
711 if (IsInterruptLimit(limit: old_limit)) {
712 new_limit = old_limit | interrupt_bits;
713 } else {
714 new_limit = (kInterruptStackLimit & ~kInterruptsMask) | interrupt_bits;
715 }
716 } while (!stack_limit_.compare_exchange_weak(expected&: old_limit, desired: new_limit));
717}
718
719uword Thread::GetAndClearInterrupts() {
720 uword interrupt_bits = 0;
721 uword old_limit = stack_limit_.load();
722 uword new_limit = saved_stack_limit_;
723 do {
724 if (IsInterruptLimit(limit: old_limit)) {
725 interrupt_bits = interrupt_bits | (old_limit & kInterruptsMask);
726 } else {
727 return interrupt_bits;
728 }
729 } while (!stack_limit_.compare_exchange_weak(expected&: old_limit, desired: new_limit));
730
731 return interrupt_bits;
732}
733
734ErrorPtr Thread::HandleInterrupts() {
735 uword interrupt_bits = GetAndClearInterrupts();
736 if ((interrupt_bits & kVMInterrupt) != 0) {
737 CheckForSafepoint();
738 if (isolate_group()->store_buffer()->Overflowed()) {
739 // Evacuate: If the popular store buffer targets are copied instead of
740 // promoted, the store buffer won't shrink and a second scavenge will
741 // occur that does promote them.
742 heap()->CollectGarbage(thread: this, type: GCType::kEvacuate, reason: GCReason::kStoreBuffer);
743 }
744
745#if !defined(PRODUCT)
746 if (isolate()->TakeHasCompletedBlocks()) {
747 Profiler::ProcessCompletedBlocks(isolate: isolate());
748 }
749#endif // !defined(PRODUCT)
750
751#if !defined(PRODUCT) || defined(FORCE_INCLUDE_SAMPLING_HEAP_PROFILER)
752 HeapProfileSampler& sampler = heap_sampler();
753 if (sampler.ShouldSetThreadSamplingInterval()) {
754 sampler.SetThreadSamplingInterval();
755 }
756 if (sampler.ShouldUpdateThreadEnable()) {
757 sampler.UpdateThreadEnable();
758 }
759#endif // !defined(PRODUCT) || defined(FORCE_INCLUDE_SAMPLING_HEAP_PROFILER)
760 }
761 if ((interrupt_bits & kMessageInterrupt) != 0) {
762 MessageHandler::MessageStatus status =
763 isolate()->message_handler()->HandleOOBMessages();
764 if (status != MessageHandler::kOK) {
765 // False result from HandleOOBMessages signals that the isolate should
766 // be terminating.
767 if (FLAG_trace_isolates) {
768 OS::PrintErr(
769 format: "[!] Terminating isolate due to OOB message:\n"
770 "\tisolate: %s\n",
771 isolate()->name());
772 }
773 return StealStickyError();
774 }
775 }
776 return Error::null();
777}
778
779uword Thread::GetAndClearStackOverflowFlags() {
780 uword stack_overflow_flags = stack_overflow_flags_;
781 stack_overflow_flags_ = 0;
782 return stack_overflow_flags;
783}
784
785void Thread::StoreBufferBlockProcess(StoreBuffer::ThresholdPolicy policy) {
786 StoreBufferRelease(policy);
787 StoreBufferAcquire();
788}
789
790void Thread::StoreBufferAddObject(ObjectPtr obj) {
791 ASSERT(this == Thread::Current());
792 store_buffer_block_->Push(obj);
793 if (store_buffer_block_->IsFull()) {
794 StoreBufferBlockProcess(policy: StoreBuffer::kCheckThreshold);
795 }
796}
797
798void Thread::StoreBufferAddObjectGC(ObjectPtr obj) {
799 store_buffer_block_->Push(obj);
800 if (store_buffer_block_->IsFull()) {
801 StoreBufferBlockProcess(policy: StoreBuffer::kIgnoreThreshold);
802 }
803}
804
805void Thread::StoreBufferRelease(StoreBuffer::ThresholdPolicy policy) {
806 StoreBufferBlock* block = store_buffer_block_;
807 store_buffer_block_ = nullptr;
808 isolate_group()->store_buffer()->PushBlock(block, policy);
809}
810
811void Thread::StoreBufferAcquire() {
812 store_buffer_block_ = isolate_group()->store_buffer()->PopNonFullBlock();
813}
814
815void Thread::MarkingStackBlockProcess() {
816 MarkingStackRelease();
817 MarkingStackAcquire();
818}
819
820void Thread::DeferredMarkingStackBlockProcess() {
821 DeferredMarkingStackRelease();
822 DeferredMarkingStackAcquire();
823}
824
825void Thread::MarkingStackAddObject(ObjectPtr obj) {
826 marking_stack_block_->Push(obj);
827 if (marking_stack_block_->IsFull()) {
828 MarkingStackBlockProcess();
829 }
830}
831
832void Thread::DeferredMarkingStackAddObject(ObjectPtr obj) {
833 deferred_marking_stack_block_->Push(obj);
834 if (deferred_marking_stack_block_->IsFull()) {
835 DeferredMarkingStackBlockProcess();
836 }
837}
838
839void Thread::MarkingStackRelease() {
840 MarkingStackBlock* block = marking_stack_block_;
841 marking_stack_block_ = nullptr;
842 write_barrier_mask_ = UntaggedObject::kGenerationalBarrierMask;
843 isolate_group()->marking_stack()->PushBlock(block);
844}
845
846void Thread::MarkingStackAcquire() {
847 marking_stack_block_ = isolate_group()->marking_stack()->PopEmptyBlock();
848 write_barrier_mask_ = UntaggedObject::kGenerationalBarrierMask |
849 UntaggedObject::kIncrementalBarrierMask;
850}
851
852void Thread::DeferredMarkingStackRelease() {
853 MarkingStackBlock* block = deferred_marking_stack_block_;
854 deferred_marking_stack_block_ = nullptr;
855 isolate_group()->deferred_marking_stack()->PushBlock(block);
856}
857
858void Thread::DeferredMarkingStackAcquire() {
859 deferred_marking_stack_block_ =
860 isolate_group()->deferred_marking_stack()->PopEmptyBlock();
861}
862
863Heap* Thread::heap() const {
864 return isolate_group_->heap();
865}
866
867bool Thread::IsExecutingDartCode() const {
868 return (top_exit_frame_info() == 0) && VMTag::IsDartTag(id: vm_tag());
869}
870
871bool Thread::HasExitedDartCode() const {
872 return (top_exit_frame_info() != 0) && !VMTag::IsDartTag(id: vm_tag());
873}
874
875template <class C>
876C* Thread::AllocateReusableHandle() {
877 C* handle = reinterpret_cast<C*>(reusable_handles_.AllocateScopedHandle());
878 C::initializeHandle(handle, C::null());
879 return handle;
880}
881
882void Thread::ClearReusableHandles() {
883#define CLEAR_REUSABLE_HANDLE(object) *object##_handle_ = object::null();
884 REUSABLE_HANDLE_LIST(CLEAR_REUSABLE_HANDLE)
885#undef CLEAR_REUSABLE_HANDLE
886}
887
888void Thread::VisitObjectPointers(ObjectPointerVisitor* visitor,
889 ValidationPolicy validation_policy) {
890 ASSERT(visitor != nullptr);
891
892 if (zone() != nullptr) {
893 zone()->VisitObjectPointers(visitor);
894 }
895
896 // Visit objects in thread specific handles area.
897 reusable_handles_.VisitObjectPointers(visitor);
898
899 visitor->VisitPointer(p: reinterpret_cast<ObjectPtr*>(&global_object_pool_));
900 visitor->VisitPointer(p: reinterpret_cast<ObjectPtr*>(&active_exception_));
901 visitor->VisitPointer(p: reinterpret_cast<ObjectPtr*>(&active_stacktrace_));
902 visitor->VisitPointer(p: reinterpret_cast<ObjectPtr*>(&sticky_error_));
903
904 // Visit the api local scope as it has all the api local handles.
905 ApiLocalScope* scope = api_top_scope_;
906 while (scope != nullptr) {
907 scope->local_handles()->VisitObjectPointers(visitor);
908 scope = scope->previous();
909 }
910
911 // Only the mutator thread can run Dart code.
912 if (IsDartMutatorThread()) {
913 // The MarkTask, which calls this method, can run on a different thread. We
914 // therefore assume the mutator is at a safepoint and we can iterate its
915 // stack.
916 // TODO(vm-team): It would be beneficial to be able to ask the mutator
917 // thread whether it is in fact blocked at the moment (at a "safepoint") so
918 // we can safely iterate its stack.
919 //
920 // Unfortunately we cannot use `this->IsAtSafepoint()` here because that
921 // will return `false` even though the mutator thread is waiting for mark
922 // tasks (which iterate its stack) to finish.
923 const StackFrameIterator::CrossThreadPolicy cross_thread_policy =
924 StackFrameIterator::kAllowCrossThreadIteration;
925
926 // Iterate over all the stack frames and visit objects on the stack.
927 StackFrameIterator frames_iterator(top_exit_frame_info(), validation_policy,
928 this, cross_thread_policy);
929 StackFrame* frame = frames_iterator.NextFrame();
930 visitor->set_gc_root_type("frame");
931 while (frame != nullptr) {
932 frame->VisitObjectPointers(visitor);
933 frame = frames_iterator.NextFrame();
934 }
935 visitor->clear_gc_root_type();
936 } else {
937 // We are not on the mutator thread.
938 RELEASE_ASSERT(top_exit_frame_info() == 0);
939 }
940}
941
942class RestoreWriteBarrierInvariantVisitor : public ObjectPointerVisitor {
943 public:
944 RestoreWriteBarrierInvariantVisitor(IsolateGroup* group,
945 Thread* thread,
946 Thread::RestoreWriteBarrierInvariantOp op)
947 : ObjectPointerVisitor(group),
948 thread_(thread),
949 current_(Thread::Current()),
950 op_(op) {}
951
952 void VisitPointers(ObjectPtr* first, ObjectPtr* last) override {
953 for (; first != last + 1; first++) {
954 ObjectPtr obj = *first;
955 // Stores into new-space objects don't need a write barrier.
956 if (obj->IsImmediateOrNewObject()) continue;
957
958 // To avoid adding too much work into the remembered set, skip large
959 // arrays. Write barrier elimination will not remove the barrier
960 // if we can trigger GC between array allocation and store.
961 if (obj->GetClassId() == kArrayCid) {
962 const auto length = Smi::Value(raw_smi: Array::RawCast(raw: obj)->untag()->length());
963 if (length > Array::kMaxLengthForWriteBarrierElimination) {
964 continue;
965 }
966 }
967
968 // Dart code won't store into VM-internal objects except Contexts and
969 // UnhandledExceptions. This assumption is checked by an assertion in
970 // WriteBarrierElimination::UpdateVectorForBlock.
971 if (!obj->IsDartInstance() && !obj->IsContext() &&
972 !obj->IsUnhandledException())
973 continue;
974
975 // Dart code won't store into canonical instances.
976 if (obj->untag()->IsCanonical()) continue;
977
978 // Objects in the VM isolate heap are immutable and won't be
979 // stored into. Check this condition last because there's no bit
980 // in the header for it.
981 if (obj->untag()->InVMIsolateHeap()) continue;
982
983 switch (op_) {
984 case Thread::RestoreWriteBarrierInvariantOp::kAddToRememberedSet:
985 obj->untag()->EnsureInRememberedSet(thread: current_);
986 if (current_->is_marking()) {
987 current_->DeferredMarkingStackAddObject(obj);
988 }
989 break;
990 case Thread::RestoreWriteBarrierInvariantOp::kAddToDeferredMarkingStack:
991 // Re-scan obj when finalizing marking.
992 current_->DeferredMarkingStackAddObject(obj);
993 break;
994 }
995 }
996 }
997
998#if defined(DART_COMPRESSED_POINTERS)
999 void VisitCompressedPointers(uword heap_base,
1000 CompressedObjectPtr* first,
1001 CompressedObjectPtr* last) override {
1002 UNREACHABLE(); // Stack slots are not compressed.
1003 }
1004#endif
1005
1006 private:
1007 Thread* const thread_;
1008 Thread* const current_;
1009 Thread::RestoreWriteBarrierInvariantOp op_;
1010};
1011
1012// Write barrier elimination assumes that all live temporaries will be
1013// in the remembered set after a scavenge triggered by a non-Dart-call
1014// instruction (see Instruction::CanCallDart()), and additionally they will be
1015// in the deferred marking stack if concurrent marking started. Specifically,
1016// this includes any instruction which will always create an exit frame
1017// below the current frame before any other Dart frames.
1018//
1019// Therefore, to support this assumption, we scan the stack after a scavenge
1020// or when concurrent marking begins and add all live temporaries in
1021// Dart frames preceding an exit frame to the store buffer or deferred
1022// marking stack.
1023void Thread::RestoreWriteBarrierInvariant(RestoreWriteBarrierInvariantOp op) {
1024 ASSERT(IsAtSafepoint() || OwnsGCSafepoint());
1025 ASSERT(IsDartMutatorThread());
1026 if (!FLAG_eliminate_write_barriers) return;
1027
1028 const StackFrameIterator::CrossThreadPolicy cross_thread_policy =
1029 StackFrameIterator::kAllowCrossThreadIteration;
1030 StackFrameIterator frames_iterator(top_exit_frame_info(),
1031 ValidationPolicy::kDontValidateFrames,
1032 this, cross_thread_policy);
1033 RestoreWriteBarrierInvariantVisitor visitor(isolate_group(), this, op);
1034 ObjectStore* object_store = isolate_group()->object_store();
1035 bool scan_next_dart_frame = false;
1036 for (StackFrame* frame = frames_iterator.NextFrame(); frame != nullptr;
1037 frame = frames_iterator.NextFrame()) {
1038 if (frame->IsExitFrame()) {
1039 scan_next_dart_frame = true;
1040 } else if (frame->IsEntryFrame()) {
1041 /* Continue searching. */
1042 } else if (frame->IsStubFrame()) {
1043 const uword pc = frame->pc();
1044 if (Code::ContainsInstructionAt(
1045 code: object_store->init_late_static_field_stub(), pc) ||
1046 Code::ContainsInstructionAt(
1047 code: object_store->init_late_final_static_field_stub(), pc) ||
1048 Code::ContainsInstructionAt(
1049 code: object_store->init_late_instance_field_stub(), pc) ||
1050 Code::ContainsInstructionAt(
1051 code: object_store->init_late_final_instance_field_stub(), pc)) {
1052 scan_next_dart_frame = true;
1053 }
1054 } else {
1055 ASSERT(frame->IsDartFrame(/*validate=*/false));
1056 if (scan_next_dart_frame) {
1057 frame->VisitObjectPointers(visitor: &visitor);
1058 }
1059 scan_next_dart_frame = false;
1060 }
1061 }
1062}
1063
1064void Thread::DeferredMarkLiveTemporaries() {
1065 RestoreWriteBarrierInvariant(
1066 op: RestoreWriteBarrierInvariantOp::kAddToDeferredMarkingStack);
1067}
1068
1069void Thread::RememberLiveTemporaries() {
1070 RestoreWriteBarrierInvariant(
1071 op: RestoreWriteBarrierInvariantOp::kAddToRememberedSet);
1072}
1073
1074bool Thread::CanLoadFromThread(const Object& object) {
1075 // In order to allow us to use assembler helper routines with non-[Code]
1076 // objects *before* stubs are initialized, we only loop ver the stubs if the
1077 // [object] is in fact a [Code] object.
1078 if (object.IsCode()) {
1079#define CHECK_OBJECT(type_name, member_name, expr, default_init_value) \
1080 if (object.ptr() == expr) { \
1081 return true; \
1082 }
1083 CACHED_VM_STUBS_LIST(CHECK_OBJECT)
1084#undef CHECK_OBJECT
1085 }
1086
1087 // For non [Code] objects we check if the object equals to any of the cached
1088 // non-stub entries.
1089#define CHECK_OBJECT(type_name, member_name, expr, default_init_value) \
1090 if (object.ptr() == expr) { \
1091 return true; \
1092 }
1093 CACHED_NON_VM_STUB_LIST(CHECK_OBJECT)
1094#undef CHECK_OBJECT
1095 return false;
1096}
1097
1098intptr_t Thread::OffsetFromThread(const Object& object) {
1099 // In order to allow us to use assembler helper routines with non-[Code]
1100 // objects *before* stubs are initialized, we only loop ver the stubs if the
1101 // [object] is in fact a [Code] object.
1102 if (object.IsCode()) {
1103#define COMPUTE_OFFSET(type_name, member_name, expr, default_init_value) \
1104 ASSERT((expr)->untag()->InVMIsolateHeap()); \
1105 if (object.ptr() == expr) { \
1106 return Thread::member_name##offset(); \
1107 }
1108 CACHED_VM_STUBS_LIST(COMPUTE_OFFSET)
1109#undef COMPUTE_OFFSET
1110 }
1111
1112 // For non [Code] objects we check if the object equals to any of the cached
1113 // non-stub entries.
1114#define COMPUTE_OFFSET(type_name, member_name, expr, default_init_value) \
1115 if (object.ptr() == expr) { \
1116 return Thread::member_name##offset(); \
1117 }
1118 CACHED_NON_VM_STUB_LIST(COMPUTE_OFFSET)
1119#undef COMPUTE_OFFSET
1120
1121 UNREACHABLE();
1122 return -1;
1123}
1124
1125bool Thread::ObjectAtOffset(intptr_t offset, Object* object) {
1126 if (Isolate::Current() == Dart::vm_isolate()) {
1127 // --disassemble-stubs runs before all the references through
1128 // thread have targets
1129 return false;
1130 }
1131
1132#define COMPUTE_OFFSET(type_name, member_name, expr, default_init_value) \
1133 if (Thread::member_name##offset() == offset) { \
1134 *object = expr; \
1135 return true; \
1136 }
1137 CACHED_VM_OBJECTS_LIST(COMPUTE_OFFSET)
1138#undef COMPUTE_OFFSET
1139 return false;
1140}
1141
1142intptr_t Thread::OffsetFromThread(const RuntimeEntry* runtime_entry) {
1143#define COMPUTE_OFFSET(name) \
1144 if (runtime_entry == &k##name##RuntimeEntry) { \
1145 return Thread::name##_entry_point_offset(); \
1146 }
1147 RUNTIME_ENTRY_LIST(COMPUTE_OFFSET)
1148#undef COMPUTE_OFFSET
1149
1150#define COMPUTE_OFFSET(returntype, name, ...) \
1151 if (runtime_entry == &k##name##RuntimeEntry) { \
1152 return Thread::name##_entry_point_offset(); \
1153 }
1154 LEAF_RUNTIME_ENTRY_LIST(COMPUTE_OFFSET)
1155#undef COMPUTE_OFFSET
1156
1157 UNREACHABLE();
1158 return -1;
1159}
1160
1161#if defined(DEBUG)
1162bool Thread::TopErrorHandlerIsSetJump() const {
1163 if (long_jump_base() == nullptr) return false;
1164 if (top_exit_frame_info_ == 0) return true;
1165#if defined(USING_SIMULATOR) || defined(USING_SAFE_STACK)
1166 // False positives: simulator stack and native stack are unordered.
1167 return true;
1168#else
1169 return reinterpret_cast<uword>(long_jump_base()) < top_exit_frame_info_;
1170#endif
1171}
1172
1173bool Thread::TopErrorHandlerIsExitFrame() const {
1174 if (top_exit_frame_info_ == 0) return false;
1175 if (long_jump_base() == nullptr) return true;
1176#if defined(USING_SIMULATOR) || defined(USING_SAFE_STACK)
1177 // False positives: simulator stack and native stack are unordered.
1178 return true;
1179#else
1180 return top_exit_frame_info_ < reinterpret_cast<uword>(long_jump_base());
1181#endif
1182}
1183#endif // defined(DEBUG)
1184
1185bool Thread::IsValidHandle(Dart_Handle object) const {
1186 return IsValidLocalHandle(object) || IsValidZoneHandle(object) ||
1187 IsValidScopedHandle(object);
1188}
1189
1190bool Thread::IsValidLocalHandle(Dart_Handle object) const {
1191 ApiLocalScope* scope = api_top_scope_;
1192 while (scope != nullptr) {
1193 if (scope->local_handles()->IsValidHandle(object)) {
1194 return true;
1195 }
1196 scope = scope->previous();
1197 }
1198 return false;
1199}
1200
1201intptr_t Thread::CountLocalHandles() const {
1202 intptr_t total = 0;
1203 ApiLocalScope* scope = api_top_scope_;
1204 while (scope != nullptr) {
1205 total += scope->local_handles()->CountHandles();
1206 scope = scope->previous();
1207 }
1208 return total;
1209}
1210
1211int Thread::ZoneSizeInBytes() const {
1212 int total = 0;
1213 ApiLocalScope* scope = api_top_scope_;
1214 while (scope != nullptr) {
1215 total += scope->zone()->SizeInBytes();
1216 scope = scope->previous();
1217 }
1218 return total;
1219}
1220
1221void Thread::EnterApiScope() {
1222 ASSERT(MayAllocateHandles());
1223 ApiLocalScope* new_scope = api_reusable_scope();
1224 if (new_scope == nullptr) {
1225 new_scope = new ApiLocalScope(api_top_scope(), top_exit_frame_info());
1226 ASSERT(new_scope != nullptr);
1227 } else {
1228 new_scope->Reinit(thread: this, previous: api_top_scope(), stack_marker: top_exit_frame_info());
1229 set_api_reusable_scope(nullptr);
1230 }
1231 set_api_top_scope(new_scope); // New scope is now the top scope.
1232}
1233
1234void Thread::ExitApiScope() {
1235 ASSERT(MayAllocateHandles());
1236 ApiLocalScope* scope = api_top_scope();
1237 ApiLocalScope* reusable_scope = api_reusable_scope();
1238 set_api_top_scope(scope->previous()); // Reset top scope to previous.
1239 if (reusable_scope == nullptr) {
1240 scope->Reset(thread: this); // Reset the old scope which we just exited.
1241 set_api_reusable_scope(scope);
1242 } else {
1243 ASSERT(reusable_scope != scope);
1244 delete scope;
1245 }
1246}
1247
1248void Thread::UnwindScopes(uword stack_marker) {
1249 // Unwind all scopes using the same stack_marker, i.e. all scopes allocated
1250 // under the same top_exit_frame_info.
1251 ApiLocalScope* scope = api_top_scope_;
1252 while (scope != nullptr && scope->stack_marker() != 0 &&
1253 scope->stack_marker() == stack_marker) {
1254 api_top_scope_ = scope->previous();
1255 delete scope;
1256 scope = api_top_scope_;
1257 }
1258}
1259
1260void Thread::EnterSafepointUsingLock() {
1261 isolate_group()->safepoint_handler()->EnterSafepointUsingLock(T: this);
1262}
1263
1264void Thread::ExitSafepointUsingLock() {
1265 isolate_group()->safepoint_handler()->ExitSafepointUsingLock(T: this);
1266}
1267
1268void Thread::BlockForSafepoint() {
1269 isolate_group()->safepoint_handler()->BlockForSafepoint(T: this);
1270}
1271
1272bool Thread::OwnsGCSafepoint() const {
1273 return isolate_group()->safepoint_handler()->InnermostSafepointOperation(
1274 current_thread: this) <= SafepointLevel::kGCAndDeopt;
1275}
1276
1277bool Thread::OwnsDeoptSafepoint() const {
1278 return isolate_group()->safepoint_handler()->InnermostSafepointOperation(
1279 current_thread: this) == SafepointLevel::kGCAndDeopt;
1280}
1281
1282bool Thread::OwnsReloadSafepoint() const {
1283 return isolate_group()->safepoint_handler()->InnermostSafepointOperation(
1284 current_thread: this) <= SafepointLevel::kGCAndDeoptAndReload;
1285}
1286
1287bool Thread::OwnsSafepoint() const {
1288 return isolate_group()->safepoint_handler()->InnermostSafepointOperation(
1289 current_thread: this) != SafepointLevel::kNoSafepoint;
1290}
1291
1292bool Thread::CanAcquireSafepointLocks() const {
1293 // A thread may acquire locks and then enter a safepoint operation (e.g.
1294 // holding program lock, allocating objects which triggers GC).
1295 //
1296 // So if this code is called inside safepoint operation, we generally have to
1297 // assume other threads may hold locks and are blocked on the safepoint,
1298 // meaning we cannot hold safepoint and acquire locks (deadlock!).
1299 //
1300 // Though if we own a reload safepoint operation it means all other mutators
1301 // are blocked in very specific places, where we know no locks are held. As
1302 // such we allow the current thread to acquire locks.
1303 //
1304 // Example: We own reload safepoint operation, load kernel, which allocates
1305 // symbols, where the symbol implementation acquires the symbol lock (we know
1306 // other mutators at reload safepoint do not hold symbol lock).
1307 return isolate_group()->safepoint_handler()->InnermostSafepointOperation(
1308 current_thread: this) >= SafepointLevel::kGCAndDeoptAndReload;
1309}
1310
1311void Thread::SetupState(TaskKind kind) {
1312 task_kind_ = kind;
1313}
1314
1315void Thread::ResetState() {
1316 task_kind_ = kUnknownTask;
1317 vm_tag_ = VMTag::kInvalidTagId;
1318}
1319
1320void Thread::SetupMutatorState(TaskKind kind) {
1321 ASSERT(store_buffer_block_ == nullptr);
1322
1323 if (isolate_group()->marking_stack() != nullptr) {
1324 // Concurrent mark in progress. Enable barrier for this thread.
1325 MarkingStackAcquire();
1326 DeferredMarkingStackAcquire();
1327 }
1328
1329 // TODO(koda): Use StoreBufferAcquire once we properly flush
1330 // before Scavenge.
1331 if (kind == kMutatorTask) {
1332 StoreBufferAcquire();
1333 } else {
1334 store_buffer_block_ = isolate_group()->store_buffer()->PopEmptyBlock();
1335 }
1336}
1337
1338void Thread::ResetMutatorState() {
1339 ASSERT(execution_state() == Thread::kThreadInVM);
1340 ASSERT(store_buffer_block_ != nullptr);
1341
1342 if (is_marking()) {
1343 MarkingStackRelease();
1344 DeferredMarkingStackRelease();
1345 }
1346 StoreBufferRelease();
1347}
1348
1349void Thread::SetupDartMutatorState(Isolate* isolate) {
1350 field_table_values_ = isolate->field_table_->table();
1351 isolate->mutator_thread_ = this;
1352
1353 SetupDartMutatorStateDependingOnSnapshot(isolate->group());
1354}
1355
1356void Thread::SetupDartMutatorStateDependingOnSnapshot(IsolateGroup* group) {
1357 // The snapshot may or may not have been read at this point (on isolate group
1358 // creation, the first isolate is first time entered before the snapshot is
1359 // read)
1360 //
1361 // So we call this code explicitly after snapshot reading time and whenever we
1362 // enter an isolate with a new thread object.
1363#if defined(DART_PRECOMPILED_RUNTIME)
1364 auto object_store = group->object_store();
1365 if (object_store != nullptr) {
1366 global_object_pool_ = object_store->global_object_pool();
1367
1368 auto dispatch_table = group->dispatch_table();
1369 if (dispatch_table != nullptr) {
1370 dispatch_table_array_ = dispatch_table->ArrayOrigin();
1371 }
1372#define INIT_ENTRY_POINT(name) \
1373 if (object_store->name() != Object::null()) { \
1374 name##_entry_point_ = Function::EntryPointOf(object_store->name()); \
1375 }
1376 CACHED_FUNCTION_ENTRY_POINTS_LIST(INIT_ENTRY_POINT)
1377#undef INIT_ENTRY_POINT
1378 }
1379#endif // defined(DART_PRECOMPILED_RUNTIME)
1380}
1381
1382void Thread::ResetDartMutatorState(Isolate* isolate) {
1383 ASSERT(execution_state() == Thread::kThreadInVM);
1384
1385 isolate->mutator_thread_ = nullptr;
1386 is_unwind_in_progress_ = false;
1387
1388 field_table_values_ = nullptr;
1389 ONLY_IN_PRECOMPILED(global_object_pool_ = ObjectPool::null());
1390 ONLY_IN_PRECOMPILED(dispatch_table_array_ = nullptr);
1391}
1392
1393DisableThreadInterruptsScope::DisableThreadInterruptsScope(Thread* thread)
1394 : StackResource(thread) {
1395 if (thread != nullptr) {
1396 OSThread* os_thread = thread->os_thread();
1397 ASSERT(os_thread != nullptr);
1398 os_thread->DisableThreadInterrupts();
1399 }
1400}
1401
1402DisableThreadInterruptsScope::~DisableThreadInterruptsScope() {
1403 if (thread() != nullptr) {
1404 OSThread* os_thread = thread()->os_thread();
1405 ASSERT(os_thread != nullptr);
1406 os_thread->EnableThreadInterrupts();
1407 }
1408}
1409
1410NoReloadScope::NoReloadScope(Thread* thread) : ThreadStackResource(thread) {
1411#if !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME)
1412 thread->no_reload_scope_depth_++;
1413 ASSERT(thread->no_reload_scope_depth_ >= 0);
1414#endif // !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME)
1415}
1416
1417NoReloadScope::~NoReloadScope() {
1418#if !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME)
1419 thread()->no_reload_scope_depth_ -= 1;
1420 ASSERT(thread()->no_reload_scope_depth_ >= 0);
1421 auto isolate = thread()->isolate();
1422 const intptr_t state = thread()->safepoint_state();
1423
1424 if (thread()->no_reload_scope_depth_ == 0) {
1425 // If we were asked to go to a reload safepoint & block for a reload
1426 // safepoint operation on another thread - *while* being inside
1427 // [NoReloadScope] - we may have handled & ignored the OOB message telling
1428 // us to reload.
1429 //
1430 // Since we're exiting now the [NoReloadScope], we'll make another OOB
1431 // reload request message to ourselves, which will be handled in
1432 // well-defined place where we can perform reload.
1433 if (isolate != nullptr &&
1434 Thread::IsSafepointLevelRequested(
1435 state, level: SafepointLevel::kGCAndDeoptAndReload)) {
1436 isolate->SendInternalLibMessage(msg_id: Isolate::kCheckForReload, /*ignored=*/capability: -1);
1437 }
1438 }
1439#endif // !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME)
1440}
1441
1442} // namespace dart
1443

source code of flutter_engine/third_party/dart/runtime/vm/thread.cc