| 1 | // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file |
| 2 | // for details. All rights reserved. Use of this source code is governed by a |
| 3 | // BSD-style license that can be found in the LICENSE file. |
| 4 | |
| 5 | #include "vm/profiler.h" |
| 6 | |
| 7 | #include "platform/address_sanitizer.h" |
| 8 | #include "platform/atomic.h" |
| 9 | #include "platform/memory_sanitizer.h" |
| 10 | #include "platform/utils.h" |
| 11 | #include "vm/allocation.h" |
| 12 | #include "vm/code_patcher.h" |
| 13 | #if !defined(DART_PRECOMPILED_RUNTIME) |
| 14 | #include "vm/compiler/compiler_state.h" |
| 15 | #endif |
| 16 | #include "vm/debugger.h" |
| 17 | #include "vm/instructions.h" |
| 18 | #include "vm/isolate.h" |
| 19 | #include "vm/json_stream.h" |
| 20 | #include "vm/lockers.h" |
| 21 | #include "vm/message_handler.h" |
| 22 | #include "vm/native_symbol.h" |
| 23 | #include "vm/object.h" |
| 24 | #include "vm/os.h" |
| 25 | #include "vm/profiler_service.h" |
| 26 | #include "vm/reusable_handles.h" |
| 27 | #include "vm/signal_handler.h" |
| 28 | #include "vm/simulator.h" |
| 29 | #include "vm/stack_frame.h" |
| 30 | #include "vm/timeline.h" |
| 31 | #include "vm/version.h" |
| 32 | |
| 33 | namespace dart { |
| 34 | |
| 35 | static constexpr intptr_t kMaxSamplesPerTick = 4; |
| 36 | |
| 37 | DEFINE_FLAG(bool, trace_profiled_isolates, false, "Trace profiled isolates." ); |
| 38 | |
| 39 | DEFINE_FLAG(int, |
| 40 | profile_period, |
| 41 | 1000, |
| 42 | "Time between profiler samples in microseconds. Minimum 50." ); |
| 43 | DEFINE_FLAG(int, |
| 44 | max_profile_depth, |
| 45 | Sample::kPCArraySizeInWords* kMaxSamplesPerTick, |
| 46 | "Maximum number stack frames walked. Minimum 1. Maximum 255." ); |
| 47 | #if defined(USING_SIMULATOR) |
| 48 | DEFINE_FLAG(bool, profile_vm, true, "Always collect native stack traces." ); |
| 49 | #else |
| 50 | DEFINE_FLAG(bool, profile_vm, false, "Always collect native stack traces." ); |
| 51 | #endif |
| 52 | DEFINE_FLAG(bool, |
| 53 | profile_vm_allocation, |
| 54 | false, |
| 55 | "Collect native stack traces when tracing Dart allocations." ); |
| 56 | |
| 57 | DEFINE_FLAG( |
| 58 | int, |
| 59 | sample_buffer_duration, |
| 60 | 0, |
| 61 | "Defines the size of the profiler sample buffer to contain at least " |
| 62 | "N seconds of samples at a given sample rate. If not provided, the " |
| 63 | "default is ~4 seconds. Large values will greatly increase memory " |
| 64 | "consumption." ); |
| 65 | |
| 66 | // Include native stack dumping helpers into AOT compiler even in PRODUCT |
| 67 | // mode. This allows to report more informative errors when gen_snapshot |
| 68 | // crashes. |
| 69 | #if !defined(PRODUCT) || defined(DART_PRECOMPILER) |
| 70 | ProfilerCounters Profiler::counters_ = {}; |
| 71 | |
| 72 | static void DumpStackFrame(intptr_t frame_index, uword pc, uword fp) { |
| 73 | uword start = 0; |
| 74 | if (auto const name = NativeSymbolResolver::LookupSymbolName(pc, start: &start)) { |
| 75 | uword offset = pc - start; |
| 76 | OS::PrintErr(format: " pc 0x%" Pp " fp 0x%" Pp " %s+0x%" Px "\n" , pc, fp, name, |
| 77 | offset); |
| 78 | NativeSymbolResolver::FreeSymbolName(name); |
| 79 | return; |
| 80 | } |
| 81 | |
| 82 | char* dso_name; |
| 83 | uword dso_base; |
| 84 | if (NativeSymbolResolver::LookupSharedObject(pc, dso_base: &dso_base, dso_name: &dso_name)) { |
| 85 | uword dso_offset = pc - dso_base; |
| 86 | OS::PrintErr(format: " pc 0x%" Pp " fp 0x%" Pp " %s+0x%" Px "\n" , pc, fp, dso_name, |
| 87 | dso_offset); |
| 88 | NativeSymbolResolver::FreeSymbolName(name: dso_name); |
| 89 | return; |
| 90 | } |
| 91 | |
| 92 | OS::PrintErr(format: " pc 0x%" Pp " fp 0x%" Pp " Unknown symbol\n" , pc, fp); |
| 93 | } |
| 94 | |
| 95 | class ProfilerStackWalker : public ValueObject { |
| 96 | public: |
| 97 | ProfilerStackWalker(Dart_Port port_id, |
| 98 | Sample* head_sample, |
| 99 | SampleBuffer* sample_buffer, |
| 100 | intptr_t skip_count = 0) |
| 101 | : port_id_(port_id), |
| 102 | sample_(head_sample), |
| 103 | sample_buffer_(sample_buffer), |
| 104 | skip_count_(skip_count), |
| 105 | frames_skipped_(0), |
| 106 | frame_index_(0), |
| 107 | total_frames_(0) { |
| 108 | if (sample_ == nullptr) { |
| 109 | ASSERT(sample_buffer_ == nullptr); |
| 110 | } else { |
| 111 | ASSERT(sample_buffer_ != nullptr); |
| 112 | ASSERT(sample_->head_sample()); |
| 113 | } |
| 114 | } |
| 115 | |
| 116 | bool Append(uword pc, uword fp) { |
| 117 | if (frames_skipped_ < skip_count_) { |
| 118 | frames_skipped_++; |
| 119 | return true; |
| 120 | } |
| 121 | |
| 122 | if (sample_ == nullptr) { |
| 123 | DumpStackFrame(frame_index: frame_index_, pc, fp); |
| 124 | frame_index_++; |
| 125 | total_frames_++; |
| 126 | return true; |
| 127 | } |
| 128 | if (total_frames_ >= FLAG_max_profile_depth) { |
| 129 | sample_->set_truncated_trace(true); |
| 130 | return false; |
| 131 | } |
| 132 | ASSERT(sample_ != nullptr); |
| 133 | if (frame_index_ == Sample::kPCArraySizeInWords) { |
| 134 | Sample* new_sample = sample_buffer_->ReserveSampleAndLink(previous: sample_); |
| 135 | if (new_sample == nullptr) { |
| 136 | // Could not reserve new sample- mark this as truncated. |
| 137 | sample_->set_truncated_trace(true); |
| 138 | return false; |
| 139 | } |
| 140 | frame_index_ = 0; |
| 141 | sample_ = new_sample; |
| 142 | } |
| 143 | ASSERT(frame_index_ < Sample::kPCArraySizeInWords); |
| 144 | sample_->SetAt(i: frame_index_, pc); |
| 145 | frame_index_++; |
| 146 | total_frames_++; |
| 147 | return true; |
| 148 | } |
| 149 | |
| 150 | protected: |
| 151 | Dart_Port port_id_; |
| 152 | Sample* sample_; |
| 153 | SampleBuffer* sample_buffer_; |
| 154 | intptr_t skip_count_; |
| 155 | intptr_t frames_skipped_; |
| 156 | intptr_t frame_index_; |
| 157 | intptr_t total_frames_; |
| 158 | }; |
| 159 | |
| 160 | // The layout of C stack frames. |
| 161 | #if defined(HOST_ARCH_IA32) || defined(HOST_ARCH_X64) || \ |
| 162 | defined(HOST_ARCH_ARM) || defined(HOST_ARCH_ARM64) |
| 163 | // +-------------+ |
| 164 | // | saved IP/LR | |
| 165 | // +-------------+ |
| 166 | // | saved FP | <- FP |
| 167 | // +-------------+ |
| 168 | static constexpr intptr_t kHostSavedCallerPcSlotFromFp = 1; |
| 169 | static constexpr intptr_t kHostSavedCallerFpSlotFromFp = 0; |
| 170 | #elif defined(HOST_ARCH_RISCV32) || defined(HOST_ARCH_RISCV64) |
| 171 | // +-------------+ |
| 172 | // | | <- FP |
| 173 | // +-------------+ |
| 174 | // | saved RA | |
| 175 | // +-------------+ |
| 176 | // | saved FP | |
| 177 | // +-------------+ |
| 178 | static constexpr intptr_t kHostSavedCallerPcSlotFromFp = -1; |
| 179 | static constexpr intptr_t kHostSavedCallerFpSlotFromFp = -2; |
| 180 | #else |
| 181 | #error What architecture? |
| 182 | #endif |
| 183 | |
| 184 | // If the VM is compiled without frame pointers (which is the default on |
| 185 | // recent GCC versions with optimizing enabled) the stack walking code may |
| 186 | // fail. |
| 187 | // |
| 188 | class ProfilerNativeStackWalker : public ProfilerStackWalker { |
| 189 | public: |
| 190 | ProfilerNativeStackWalker(ProfilerCounters* counters, |
| 191 | Dart_Port port_id, |
| 192 | Sample* sample, |
| 193 | SampleBuffer* sample_buffer, |
| 194 | uword stack_lower, |
| 195 | uword stack_upper, |
| 196 | uword pc, |
| 197 | uword fp, |
| 198 | uword sp, |
| 199 | intptr_t skip_count = 0) |
| 200 | : ProfilerStackWalker(port_id, sample, sample_buffer, skip_count), |
| 201 | counters_(counters), |
| 202 | stack_upper_(stack_upper), |
| 203 | original_pc_(pc), |
| 204 | original_fp_(fp), |
| 205 | original_sp_(sp), |
| 206 | lower_bound_(stack_lower) {} |
| 207 | |
| 208 | void walk() { |
| 209 | const uword kMaxStep = VirtualMemory::PageSize(); |
| 210 | |
| 211 | Append(pc: original_pc_, fp: original_fp_); |
| 212 | |
| 213 | uword* pc = reinterpret_cast<uword*>(original_pc_); |
| 214 | uword* fp = reinterpret_cast<uword*>(original_fp_); |
| 215 | uword* previous_fp = fp; |
| 216 | |
| 217 | uword gap = original_fp_ - original_sp_; |
| 218 | if (gap >= kMaxStep) { |
| 219 | // Gap between frame pointer and stack pointer is |
| 220 | // too large. |
| 221 | counters_->incomplete_sample_fp_step.fetch_add(arg: 1); |
| 222 | return; |
| 223 | } |
| 224 | |
| 225 | if (!ValidFramePointer(fp)) { |
| 226 | counters_->incomplete_sample_fp_bounds.fetch_add(arg: 1); |
| 227 | return; |
| 228 | } |
| 229 | |
| 230 | while (true) { |
| 231 | pc = CallerPC(fp); |
| 232 | previous_fp = fp; |
| 233 | fp = CallerFP(fp); |
| 234 | |
| 235 | if (fp == nullptr) { |
| 236 | return; |
| 237 | } |
| 238 | |
| 239 | if (fp <= previous_fp) { |
| 240 | // Frame pointer did not move to a higher address. |
| 241 | counters_->incomplete_sample_fp_step.fetch_add(arg: 1); |
| 242 | return; |
| 243 | } |
| 244 | |
| 245 | gap = fp - previous_fp; |
| 246 | if (gap >= kMaxStep) { |
| 247 | // Frame pointer step is too large. |
| 248 | counters_->incomplete_sample_fp_step.fetch_add(arg: 1); |
| 249 | return; |
| 250 | } |
| 251 | |
| 252 | if (!ValidFramePointer(fp)) { |
| 253 | // Frame pointer is outside of isolate stack boundary. |
| 254 | counters_->incomplete_sample_fp_bounds.fetch_add(arg: 1); |
| 255 | return; |
| 256 | } |
| 257 | |
| 258 | const uword pc_value = reinterpret_cast<uword>(pc); |
| 259 | if ((pc_value + 1) < pc_value) { |
| 260 | // It is not uncommon to encounter an invalid pc as we |
| 261 | // traverse a stack frame. Most of these we can tolerate. If |
| 262 | // the pc is so large that adding one to it will cause an |
| 263 | // overflow it is invalid and it will cause headaches later |
| 264 | // while we are building the profile. Discard it. |
| 265 | counters_->incomplete_sample_bad_pc.fetch_add(arg: 1); |
| 266 | return; |
| 267 | } |
| 268 | |
| 269 | // Move the lower bound up. |
| 270 | lower_bound_ = reinterpret_cast<uword>(fp); |
| 271 | |
| 272 | if (!Append(pc: pc_value, fp: reinterpret_cast<uword>(fp))) { |
| 273 | return; |
| 274 | } |
| 275 | } |
| 276 | } |
| 277 | |
| 278 | private: |
| 279 | uword* CallerPC(uword* fp) const { |
| 280 | ASSERT(fp != nullptr); |
| 281 | uword* caller_pc_ptr = fp + kHostSavedCallerPcSlotFromFp; |
| 282 | // This may actually be uninitialized, by design (see class comment above). |
| 283 | MSAN_UNPOISON(caller_pc_ptr, kWordSize); |
| 284 | ASAN_UNPOISON(caller_pc_ptr, kWordSize); |
| 285 | return reinterpret_cast<uword*>(*caller_pc_ptr); |
| 286 | } |
| 287 | |
| 288 | uword* CallerFP(uword* fp) const { |
| 289 | ASSERT(fp != nullptr); |
| 290 | uword* caller_fp_ptr = fp + kHostSavedCallerFpSlotFromFp; |
| 291 | // This may actually be uninitialized, by design (see class comment above). |
| 292 | MSAN_UNPOISON(caller_fp_ptr, kWordSize); |
| 293 | ASAN_UNPOISON(caller_fp_ptr, kWordSize); |
| 294 | return reinterpret_cast<uword*>(*caller_fp_ptr); |
| 295 | } |
| 296 | |
| 297 | bool ValidFramePointer(uword* fp) const { |
| 298 | if (fp == nullptr) { |
| 299 | return false; |
| 300 | } |
| 301 | uword cursor = reinterpret_cast<uword>(fp); |
| 302 | cursor += sizeof(fp); |
| 303 | bool r = (cursor >= lower_bound_) && (cursor < stack_upper_); |
| 304 | return r; |
| 305 | } |
| 306 | |
| 307 | ProfilerCounters* const counters_; |
| 308 | const uword stack_upper_; |
| 309 | const uword original_pc_; |
| 310 | const uword original_fp_; |
| 311 | const uword original_sp_; |
| 312 | uword lower_bound_; |
| 313 | }; |
| 314 | |
| 315 | static bool ValidateThreadStackBounds(uintptr_t fp, |
| 316 | uintptr_t sp, |
| 317 | uword stack_lower, |
| 318 | uword stack_upper) { |
| 319 | if (stack_lower >= stack_upper) { |
| 320 | // Stack boundary is invalid. |
| 321 | return false; |
| 322 | } |
| 323 | |
| 324 | if ((sp < stack_lower) || (sp >= stack_upper)) { |
| 325 | // Stack pointer is outside thread's stack boundary. |
| 326 | return false; |
| 327 | } |
| 328 | |
| 329 | if ((fp < stack_lower) || (fp >= stack_upper)) { |
| 330 | // Frame pointer is outside threads's stack boundary. |
| 331 | return false; |
| 332 | } |
| 333 | |
| 334 | return true; |
| 335 | } |
| 336 | |
| 337 | // Get |thread|'s stack boundary and verify that |sp| and |fp| are within |
| 338 | // it. Return |false| if anything looks suspicious. |
| 339 | static bool GetAndValidateThreadStackBounds(OSThread* os_thread, |
| 340 | Thread* thread, |
| 341 | uintptr_t fp, |
| 342 | uintptr_t sp, |
| 343 | uword* stack_lower, |
| 344 | uword* stack_upper) { |
| 345 | ASSERT(os_thread != nullptr); |
| 346 | ASSERT(stack_lower != nullptr); |
| 347 | ASSERT(stack_upper != nullptr); |
| 348 | |
| 349 | #if defined(USING_SIMULATOR) |
| 350 | const bool use_simulator_stack_bounds = |
| 351 | thread != nullptr && thread->IsExecutingDartCode(); |
| 352 | if (use_simulator_stack_bounds) { |
| 353 | Isolate* isolate = thread->isolate(); |
| 354 | ASSERT(isolate != nullptr); |
| 355 | Simulator* simulator = isolate->simulator(); |
| 356 | *stack_lower = simulator->stack_limit(); |
| 357 | *stack_upper = simulator->stack_base(); |
| 358 | } |
| 359 | #else |
| 360 | const bool use_simulator_stack_bounds = false; |
| 361 | #endif // defined(USING_SIMULATOR) |
| 362 | |
| 363 | if (!use_simulator_stack_bounds) { |
| 364 | *stack_lower = os_thread->stack_limit(); |
| 365 | *stack_upper = os_thread->stack_base(); |
| 366 | } |
| 367 | |
| 368 | if ((*stack_lower == 0) || (*stack_upper == 0)) { |
| 369 | return false; |
| 370 | } |
| 371 | |
| 372 | if (!use_simulator_stack_bounds && (sp > *stack_lower)) { |
| 373 | // The stack pointer gives us a tighter lower bound. |
| 374 | *stack_lower = sp; |
| 375 | } |
| 376 | |
| 377 | return ValidateThreadStackBounds(fp, sp, stack_lower: *stack_lower, stack_upper: *stack_upper); |
| 378 | } |
| 379 | |
| 380 | void Profiler::DumpStackTrace(void* context) { |
| 381 | if (context == nullptr) { |
| 382 | DumpStackTrace(/*for_crash=*/true); |
| 383 | return; |
| 384 | } |
| 385 | #if defined(DART_HOST_OS_LINUX) || defined(DART_HOST_OS_MACOS) || \ |
| 386 | defined(DART_HOST_OS_ANDROID) |
| 387 | ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context); |
| 388 | mcontext_t mcontext = ucontext->uc_mcontext; |
| 389 | uword pc = SignalHandler::GetProgramCounter(mcontext); |
| 390 | uword fp = SignalHandler::GetFramePointer(mcontext); |
| 391 | uword sp = SignalHandler::GetCStackPointer(mcontext); |
| 392 | DumpStackTrace(sp, fp, pc, /*for_crash=*/true); |
| 393 | #elif defined(DART_HOST_OS_WINDOWS) |
| 394 | CONTEXT* ctx = reinterpret_cast<CONTEXT*>(context); |
| 395 | #if defined(HOST_ARCH_IA32) |
| 396 | uword pc = static_cast<uword>(ctx->Eip); |
| 397 | uword fp = static_cast<uword>(ctx->Ebp); |
| 398 | uword sp = static_cast<uword>(ctx->Esp); |
| 399 | #elif defined(HOST_ARCH_X64) |
| 400 | uword pc = static_cast<uword>(ctx->Rip); |
| 401 | uword fp = static_cast<uword>(ctx->Rbp); |
| 402 | uword sp = static_cast<uword>(ctx->Rsp); |
| 403 | #elif defined(HOST_ARCH_ARM) |
| 404 | uword pc = static_cast<uword>(ctx->Pc); |
| 405 | uword fp = static_cast<uword>(ctx->R11); |
| 406 | uword sp = static_cast<uword>(ctx->Sp); |
| 407 | #elif defined(HOST_ARCH_ARM64) |
| 408 | uword pc = static_cast<uword>(ctx->Pc); |
| 409 | uword fp = static_cast<uword>(ctx->Fp); |
| 410 | uword sp = static_cast<uword>(ctx->Sp); |
| 411 | #else |
| 412 | #error Unsupported architecture. |
| 413 | #endif |
| 414 | DumpStackTrace(sp, fp, pc, /*for_crash=*/true); |
| 415 | #else |
| 416 | // TODO(fschneider): Add support for more platforms. |
| 417 | // Do nothing on unsupported platforms. |
| 418 | #endif |
| 419 | } |
| 420 | |
| 421 | void Profiler::DumpStackTrace(bool for_crash) { |
| 422 | uintptr_t sp = OSThread::GetCurrentStackPointer(); |
| 423 | uintptr_t fp = 0; |
| 424 | uintptr_t pc = OS::GetProgramCounter(); |
| 425 | |
| 426 | COPY_FP_REGISTER(fp); |
| 427 | |
| 428 | DumpStackTrace(sp, fp, pc, for_crash); |
| 429 | } |
| 430 | |
| 431 | static void DumpCompilerState(Thread* thread) { |
| 432 | #if !defined(DART_PRECOMPILED_RUNTIME) |
| 433 | if (thread != nullptr && thread->execution_state() == Thread::kThreadInVM && |
| 434 | thread->HasCompilerState()) { |
| 435 | thread->compiler_state().ReportCrash(); |
| 436 | } |
| 437 | #endif |
| 438 | } |
| 439 | |
| 440 | void Profiler::DumpStackTrace(uword sp, uword fp, uword pc, bool for_crash) { |
| 441 | if (for_crash) { |
| 442 | // Allow only one stack trace to prevent recursively printing stack traces |
| 443 | // if we hit an assert while printing the stack. |
| 444 | static RelaxedAtomic<uintptr_t> started_dump = 0; |
| 445 | if (started_dump.fetch_add(arg: 1u) != 0) { |
| 446 | OS::PrintErr(format: "Aborting reentrant request for stack trace.\n" ); |
| 447 | return; |
| 448 | } |
| 449 | } |
| 450 | |
| 451 | auto os_thread = OSThread::Current(); |
| 452 | ASSERT(os_thread != nullptr); |
| 453 | auto thread = Thread::Current(); // nullptr if no current isolate. |
| 454 | auto isolate = thread == nullptr ? nullptr : thread->isolate(); |
| 455 | auto isolate_group = thread == nullptr ? nullptr : thread->isolate_group(); |
| 456 | auto source = isolate_group == nullptr ? nullptr : isolate_group->source(); |
| 457 | auto vm_source = |
| 458 | Dart::vm_isolate() == nullptr ? nullptr : Dart::vm_isolate()->source(); |
| 459 | const char* isolate_group_name = |
| 460 | isolate_group == nullptr ? "(nil)" : isolate_group->source()->name; |
| 461 | const char* isolate_name = isolate == nullptr ? "(nil)" : isolate->name(); |
| 462 | #if defined(PRODUCT) |
| 463 | const intptr_t thread_id = -1; |
| 464 | #else |
| 465 | const intptr_t thread_id = OSThread::ThreadIdToIntPtr(id: os_thread->trace_id()); |
| 466 | #endif |
| 467 | |
| 468 | OS::PrintErr(format: "version=%s\n" , Version::String()); |
| 469 | OS::PrintErr(format: "pid=%" Pd ", thread=%" Pd |
| 470 | ", isolate_group=%s(%p), isolate=%s(%p)\n" , |
| 471 | static_cast<intptr_t>(OS::ProcessId()), thread_id, |
| 472 | isolate_group_name, isolate_group, isolate_name, isolate); |
| 473 | #if defined(DART_COMPRESSED_POINTERS) |
| 474 | const char kCompressedPointers[] = "yes" ; |
| 475 | #else |
| 476 | const char kCompressedPointers[] = "no" ; |
| 477 | #endif |
| 478 | #if defined(USING_SIMULATOR) |
| 479 | const char kUsingSimulator[] = "yes" ; |
| 480 | #else |
| 481 | const char kUsingSimulator[] = "no" ; |
| 482 | #endif |
| 483 | OS::PrintErr(format: "os=%s, arch=%s, comp=%s, sim=%s\n" , kHostOperatingSystemName, |
| 484 | kTargetArchitectureName, kCompressedPointers, kUsingSimulator); |
| 485 | OS::PrintErr(format: "isolate_instructions=%" Px ", vm_instructions=%" Px "\n" , |
| 486 | source == nullptr |
| 487 | ? 0 |
| 488 | : reinterpret_cast<uword>(source->snapshot_instructions), |
| 489 | vm_source == nullptr |
| 490 | ? 0 |
| 491 | : reinterpret_cast<uword>(vm_source->snapshot_instructions)); |
| 492 | OS::PrintErr(format: "fp=%" Px ", sp=%" Px ", pc=%" Px "\n" , fp, sp, pc); |
| 493 | |
| 494 | uword stack_lower = 0; |
| 495 | uword stack_upper = 0; |
| 496 | if (!GetAndValidateThreadStackBounds(os_thread, thread, fp, sp, stack_lower: &stack_lower, |
| 497 | stack_upper: &stack_upper)) { |
| 498 | OS::PrintErr( |
| 499 | format: "Stack dump aborted because GetAndValidateThreadStackBounds failed.\n" ); |
| 500 | if (pc != 0) { // At the very least dump the top frame. |
| 501 | DumpStackFrame(frame_index: 0, pc, fp); |
| 502 | } |
| 503 | DumpCompilerState(thread); |
| 504 | return; |
| 505 | } |
| 506 | |
| 507 | ProfilerNativeStackWalker native_stack_walker(&counters_, ILLEGAL_PORT, |
| 508 | nullptr, nullptr, stack_lower, |
| 509 | stack_upper, pc, fp, sp, |
| 510 | /*skip_count=*/0); |
| 511 | native_stack_walker.walk(); |
| 512 | OS::PrintErr(format: "-- End of DumpStackTrace\n" ); |
| 513 | |
| 514 | if (thread != nullptr) { |
| 515 | if (thread->execution_state() == Thread::kThreadInNative) { |
| 516 | TransitionNativeToVM transition(thread); |
| 517 | StackFrame::DumpCurrentTrace(); |
| 518 | } else if (thread->execution_state() == Thread::kThreadInVM) { |
| 519 | StackFrame::DumpCurrentTrace(); |
| 520 | } |
| 521 | } |
| 522 | |
| 523 | DumpCompilerState(thread); |
| 524 | } |
| 525 | #endif // !defined(PRODUCT) || defined(DART_PRECOMPILER) |
| 526 | |
| 527 | #ifndef PRODUCT |
| 528 | |
| 529 | RelaxedAtomic<bool> Profiler::initialized_ = false; |
| 530 | SampleBlockBuffer* Profiler::sample_block_buffer_ = nullptr; |
| 531 | |
| 532 | bool SampleBlockProcessor::initialized_ = false; |
| 533 | bool SampleBlockProcessor::shutdown_ = false; |
| 534 | bool SampleBlockProcessor::thread_running_ = false; |
| 535 | ThreadJoinId SampleBlockProcessor::processor_thread_id_ = |
| 536 | OSThread::kInvalidThreadJoinId; |
| 537 | Monitor* SampleBlockProcessor::monitor_ = nullptr; |
| 538 | |
| 539 | void Profiler::Init() { |
| 540 | // Place some sane restrictions on user controlled flags. |
| 541 | SetSampleDepth(FLAG_max_profile_depth); |
| 542 | if (!FLAG_profiler) { |
| 543 | return; |
| 544 | } |
| 545 | ASSERT(!initialized_); |
| 546 | SetSamplePeriod(FLAG_profile_period); |
| 547 | // The profiler may have been shutdown previously, in which case the sample |
| 548 | // buffer will have already been initialized. |
| 549 | if (sample_block_buffer_ == nullptr) { |
| 550 | intptr_t num_blocks = CalculateSampleBufferCapacity(); |
| 551 | sample_block_buffer_ = new SampleBlockBuffer(num_blocks); |
| 552 | } |
| 553 | ThreadInterrupter::Init(); |
| 554 | ThreadInterrupter::Startup(); |
| 555 | SampleBlockProcessor::Init(); |
| 556 | SampleBlockProcessor::Startup(); |
| 557 | initialized_ = true; |
| 558 | } |
| 559 | |
| 560 | class SampleBlockCleanupVisitor : public IsolateVisitor { |
| 561 | public: |
| 562 | SampleBlockCleanupVisitor() = default; |
| 563 | virtual ~SampleBlockCleanupVisitor() = default; |
| 564 | |
| 565 | void VisitIsolate(Isolate* isolate) { |
| 566 | isolate->set_current_allocation_sample_block(nullptr); |
| 567 | isolate->set_current_sample_block(nullptr); |
| 568 | } |
| 569 | }; |
| 570 | |
| 571 | void Profiler::Cleanup() { |
| 572 | if (!FLAG_profiler) { |
| 573 | return; |
| 574 | } |
| 575 | ASSERT(initialized_); |
| 576 | ThreadInterrupter::Cleanup(); |
| 577 | SampleBlockProcessor::Cleanup(); |
| 578 | SampleBlockCleanupVisitor visitor; |
| 579 | Isolate::VisitIsolates(visitor: &visitor); |
| 580 | initialized_ = false; |
| 581 | } |
| 582 | |
| 583 | void Profiler::UpdateRunningState() { |
| 584 | if (!FLAG_profiler && initialized_) { |
| 585 | Cleanup(); |
| 586 | } else if (FLAG_profiler && !initialized_) { |
| 587 | Init(); |
| 588 | } |
| 589 | } |
| 590 | |
| 591 | void Profiler::SetSampleDepth(intptr_t depth) { |
| 592 | const int kMinimumDepth = 2; |
| 593 | const int kMaximumDepth = 255; |
| 594 | if (depth < kMinimumDepth) { |
| 595 | FLAG_max_profile_depth = kMinimumDepth; |
| 596 | } else if (depth > kMaximumDepth) { |
| 597 | FLAG_max_profile_depth = kMaximumDepth; |
| 598 | } else { |
| 599 | FLAG_max_profile_depth = depth; |
| 600 | } |
| 601 | } |
| 602 | |
| 603 | static intptr_t SamplesPerSecond() { |
| 604 | const intptr_t kMicrosPerSec = 1000000; |
| 605 | return kMicrosPerSec / FLAG_profile_period; |
| 606 | } |
| 607 | |
| 608 | intptr_t Profiler::CalculateSampleBufferCapacity() { |
| 609 | if (FLAG_sample_buffer_duration <= 0) { |
| 610 | return SampleBlockBuffer::kDefaultBlockCount; |
| 611 | } |
| 612 | // Deeper stacks require more than a single Sample object to be represented |
| 613 | // correctly. These samples are chained, so we need to determine the worst |
| 614 | // case sample chain length for a single stack. |
| 615 | const intptr_t max_sample_chain_length = |
| 616 | FLAG_max_profile_depth / kMaxSamplesPerTick; |
| 617 | const intptr_t sample_count = FLAG_sample_buffer_duration * |
| 618 | SamplesPerSecond() * max_sample_chain_length; |
| 619 | return (sample_count / SampleBlock::kSamplesPerBlock) + 1; |
| 620 | } |
| 621 | |
| 622 | void Profiler::SetSamplePeriod(intptr_t period) { |
| 623 | const int kMinimumProfilePeriod = 50; |
| 624 | if (period < kMinimumProfilePeriod) { |
| 625 | FLAG_profile_period = kMinimumProfilePeriod; |
| 626 | } else { |
| 627 | FLAG_profile_period = period; |
| 628 | } |
| 629 | ThreadInterrupter::SetInterruptPeriod(FLAG_profile_period); |
| 630 | } |
| 631 | |
| 632 | void Profiler::UpdateSamplePeriod() { |
| 633 | SetSamplePeriod(FLAG_profile_period); |
| 634 | } |
| 635 | |
| 636 | SampleBlockBuffer::SampleBlockBuffer(intptr_t blocks, |
| 637 | intptr_t samples_per_block) { |
| 638 | const intptr_t size = Utils::RoundUp( |
| 639 | x: blocks * samples_per_block * sizeof(Sample), alignment: VirtualMemory::PageSize()); |
| 640 | const bool executable = false; |
| 641 | const bool compressed = false; |
| 642 | memory_ = |
| 643 | VirtualMemory::Allocate(size, is_executable: executable, is_compressed: compressed, name: "dart-profiler" ); |
| 644 | if (memory_ == nullptr) { |
| 645 | OUT_OF_MEMORY(); |
| 646 | } |
| 647 | sample_buffer_ = reinterpret_cast<Sample*>(memory_->address()); |
| 648 | blocks_ = new SampleBlock[blocks]; |
| 649 | for (intptr_t i = 0; i < blocks; ++i) { |
| 650 | blocks_[i].Init(samples: &sample_buffer_[i * samples_per_block], capacity: samples_per_block); |
| 651 | } |
| 652 | capacity_ = blocks; |
| 653 | cursor_ = 0; |
| 654 | } |
| 655 | |
| 656 | SampleBlockBuffer::~SampleBlockBuffer() { |
| 657 | delete[] blocks_; |
| 658 | blocks_ = nullptr; |
| 659 | delete memory_; |
| 660 | memory_ = nullptr; |
| 661 | capacity_ = 0; |
| 662 | cursor_ = 0; |
| 663 | } |
| 664 | |
| 665 | SampleBlock* SampleBlockBuffer::ReserveSampleBlock() { |
| 666 | intptr_t capacity = capacity_; |
| 667 | intptr_t start = cursor_.fetch_add(arg: 1) % capacity; |
| 668 | intptr_t i = start; |
| 669 | do { |
| 670 | SampleBlock* block = &blocks_[i]; |
| 671 | if (block->TryAllocateFree()) { |
| 672 | return block; |
| 673 | } |
| 674 | i = (i + 1) % capacity; |
| 675 | } while (i != start); |
| 676 | |
| 677 | // No free blocks: try for completed block instead. |
| 678 | i = start; |
| 679 | do { |
| 680 | SampleBlock* block = &blocks_[i]; |
| 681 | if (block->TryAllocateCompleted()) { |
| 682 | return block; |
| 683 | } |
| 684 | i = (i + 1) % capacity; |
| 685 | } while (i != start); |
| 686 | |
| 687 | return nullptr; |
| 688 | } |
| 689 | |
| 690 | void SampleBlockBuffer::FreeCompletedBlocks() { |
| 691 | for (intptr_t i = 0; i < capacity_; i++) { |
| 692 | blocks_[i].FreeCompleted(); |
| 693 | } |
| 694 | } |
| 695 | |
| 696 | bool SampleBlock::HasStreamableSamples(const GrowableObjectArray& tag_table, |
| 697 | UserTag* tag) { |
| 698 | for (intptr_t i = 0; i < capacity_; ++i) { |
| 699 | Sample* sample = At(idx: i); |
| 700 | uword sample_tag = sample->user_tag(); |
| 701 | for (intptr_t j = 0; j < tag_table.Length(); ++j) { |
| 702 | *tag ^= tag_table.At(index: j); |
| 703 | if (tag->tag() == sample_tag && tag->streamable()) { |
| 704 | return true; |
| 705 | } |
| 706 | } |
| 707 | } |
| 708 | return false; |
| 709 | } |
| 710 | |
| 711 | static void FlushSampleBlocks(Isolate* isolate) { |
| 712 | SampleBlock* block = isolate->current_sample_block(); |
| 713 | if (block != nullptr) { |
| 714 | isolate->set_current_sample_block(nullptr); |
| 715 | block->MarkCompleted(); |
| 716 | } |
| 717 | |
| 718 | block = isolate->current_allocation_sample_block(); |
| 719 | if (block != nullptr) { |
| 720 | isolate->set_current_allocation_sample_block(nullptr); |
| 721 | block->MarkCompleted(); |
| 722 | } |
| 723 | } |
| 724 | |
| 725 | ProcessedSampleBuffer* SampleBlockBuffer::BuildProcessedSampleBuffer( |
| 726 | SampleFilter* filter, |
| 727 | ProcessedSampleBuffer* buffer) { |
| 728 | Thread* thread = Thread::Current(); |
| 729 | Zone* zone = thread->zone(); |
| 730 | |
| 731 | if (buffer == nullptr) { |
| 732 | buffer = new (zone) ProcessedSampleBuffer(); |
| 733 | } |
| 734 | |
| 735 | FlushSampleBlocks(isolate: thread->isolate()); |
| 736 | |
| 737 | for (intptr_t i = 0; i < capacity_; ++i) { |
| 738 | SampleBlock* block = &blocks_[i]; |
| 739 | if (block->TryAcquireStreaming(isolate: thread->isolate())) { |
| 740 | block->BuildProcessedSampleBuffer(filter, buffer); |
| 741 | if (filter->take_samples()) { |
| 742 | block->StreamingToFree(); |
| 743 | } else { |
| 744 | block->StreamingToCompleted(); |
| 745 | } |
| 746 | } |
| 747 | } |
| 748 | |
| 749 | return buffer; |
| 750 | } |
| 751 | |
| 752 | Sample* SampleBlock::ReserveSample() { |
| 753 | intptr_t slot = cursor_.fetch_add(arg: 1u); |
| 754 | if (slot < capacity_) { |
| 755 | return At(idx: slot); |
| 756 | } |
| 757 | return nullptr; |
| 758 | } |
| 759 | |
| 760 | Sample* SampleBlock::ReserveSampleAndLink(Sample* previous) { |
| 761 | ASSERT(previous != nullptr); |
| 762 | SampleBlockBuffer* buffer = Profiler::sample_block_buffer(); |
| 763 | Isolate* isolate = owner_; |
| 764 | ASSERT(isolate != nullptr); |
| 765 | Sample* next = previous->is_allocation_sample() |
| 766 | ? buffer->ReserveAllocationSample(isolate) |
| 767 | : buffer->ReserveCPUSample(isolate); |
| 768 | if (next == nullptr) { |
| 769 | return nullptr; // No blocks left, so drop sample. |
| 770 | } |
| 771 | next->Init(port: previous->port(), timestamp: previous->timestamp(), tid: previous->tid()); |
| 772 | next->set_head_sample(false); |
| 773 | // Mark that previous continues at next. |
| 774 | previous->SetContinuation(next); |
| 775 | return next; |
| 776 | } |
| 777 | |
| 778 | Sample* SampleBlockBuffer::ReserveCPUSample(Isolate* isolate) { |
| 779 | return ReserveSampleImpl(isolate, allocation_sample: false); |
| 780 | } |
| 781 | |
| 782 | Sample* SampleBlockBuffer::ReserveAllocationSample(Isolate* isolate) { |
| 783 | return ReserveSampleImpl(isolate, allocation_sample: true); |
| 784 | } |
| 785 | |
| 786 | Sample* SampleBlockBuffer::ReserveSampleImpl(Isolate* isolate, |
| 787 | bool allocation_sample) { |
| 788 | SampleBlock* block = allocation_sample |
| 789 | ? isolate->current_allocation_sample_block() |
| 790 | : isolate->current_sample_block(); |
| 791 | Sample* sample = nullptr; |
| 792 | if (block != nullptr) { |
| 793 | sample = block->ReserveSample(); |
| 794 | } |
| 795 | if (sample != nullptr) { |
| 796 | return sample; |
| 797 | } |
| 798 | |
| 799 | SampleBlock* next = ReserveSampleBlock(); |
| 800 | if (next == nullptr) { |
| 801 | // We're out of blocks to reserve. Drop the sample. |
| 802 | return nullptr; |
| 803 | } |
| 804 | |
| 805 | next->set_owner(isolate); |
| 806 | if (allocation_sample) { |
| 807 | isolate->set_current_allocation_sample_block(next); |
| 808 | } else { |
| 809 | isolate->set_current_sample_block(next); |
| 810 | } |
| 811 | if (block != nullptr) { |
| 812 | block->MarkCompleted(); |
| 813 | if (!Isolate::IsSystemIsolate(isolate) && |
| 814 | isolate->TrySetHasCompletedBlocks()) { |
| 815 | isolate->mutator_thread()->ScheduleInterrupts(interrupt_bits: Thread::kVMInterrupt); |
| 816 | } |
| 817 | } |
| 818 | return next->ReserveSample(); |
| 819 | } |
| 820 | |
| 821 | // Attempts to find the true return address when a Dart frame is being setup |
| 822 | // or torn down. |
| 823 | // NOTE: Architecture specific implementations below. |
| 824 | class ReturnAddressLocator : public ValueObject { |
| 825 | public: |
| 826 | ReturnAddressLocator(Sample* sample, const Code& code) |
| 827 | : stack_buffer_(sample->GetStackBuffer()), |
| 828 | pc_(sample->pc()), |
| 829 | code_(Code::ZoneHandle(ptr: code.ptr())) { |
| 830 | ASSERT(!code_.IsNull()); |
| 831 | ASSERT(code_.ContainsInstructionAt(pc())); |
| 832 | } |
| 833 | |
| 834 | ReturnAddressLocator(uword pc, uword* stack_buffer, const Code& code) |
| 835 | : stack_buffer_(stack_buffer), |
| 836 | pc_(pc), |
| 837 | code_(Code::ZoneHandle(ptr: code.ptr())) { |
| 838 | ASSERT(!code_.IsNull()); |
| 839 | ASSERT(code_.ContainsInstructionAt(pc_)); |
| 840 | } |
| 841 | |
| 842 | uword pc() { return pc_; } |
| 843 | |
| 844 | // Returns false on failure. |
| 845 | bool LocateReturnAddress(uword* return_address); |
| 846 | |
| 847 | // Returns offset into code object. |
| 848 | intptr_t RelativePC() { |
| 849 | ASSERT(pc() >= code_.PayloadStart()); |
| 850 | return static_cast<intptr_t>(pc() - code_.PayloadStart()); |
| 851 | } |
| 852 | |
| 853 | uint8_t* CodePointer(intptr_t offset) { |
| 854 | const intptr_t size = code_.Size(); |
| 855 | ASSERT(offset < size); |
| 856 | uint8_t* code_pointer = reinterpret_cast<uint8_t*>(code_.PayloadStart()); |
| 857 | code_pointer += offset; |
| 858 | return code_pointer; |
| 859 | } |
| 860 | |
| 861 | uword StackAt(intptr_t i) { |
| 862 | ASSERT(i >= 0); |
| 863 | ASSERT(i < Sample::kStackBufferSizeInWords); |
| 864 | return stack_buffer_[i]; |
| 865 | } |
| 866 | |
| 867 | private: |
| 868 | uword* stack_buffer_; |
| 869 | uword pc_; |
| 870 | const Code& code_; |
| 871 | }; |
| 872 | |
| 873 | #if defined(TARGET_ARCH_IA32) || defined(TARGET_ARCH_X64) |
| 874 | bool ReturnAddressLocator::LocateReturnAddress(uword* return_address) { |
| 875 | ASSERT(return_address != nullptr); |
| 876 | const intptr_t offset = RelativePC(); |
| 877 | ASSERT(offset >= 0); |
| 878 | const intptr_t size = code_.Size(); |
| 879 | ASSERT(offset < size); |
| 880 | const intptr_t prologue_offset = code_.GetPrologueOffset(); |
| 881 | if (offset < prologue_offset) { |
| 882 | // Before the prologue, return address is at the top of the stack. |
| 883 | // TODO(johnmccutchan): Some intrinsics and stubs do not conform to the |
| 884 | // expected stack layout. Use a more robust solution for those code objects. |
| 885 | *return_address = StackAt(i: 0); |
| 886 | return true; |
| 887 | } |
| 888 | // Detect if we are: |
| 889 | // push ebp <--- here |
| 890 | // mov ebp, esp |
| 891 | // on X64 the register names are different but the sequence is the same. |
| 892 | ProloguePattern pp(pc()); |
| 893 | if (pp.IsValid()) { |
| 894 | // Stack layout: |
| 895 | // 0 RETURN ADDRESS. |
| 896 | *return_address = StackAt(i: 0); |
| 897 | return true; |
| 898 | } |
| 899 | // Detect if we are: |
| 900 | // push ebp |
| 901 | // mov ebp, esp <--- here |
| 902 | // on X64 the register names are different but the sequence is the same. |
| 903 | SetFramePointerPattern sfpp(pc()); |
| 904 | if (sfpp.IsValid()) { |
| 905 | // Stack layout: |
| 906 | // 0 CALLER FRAME POINTER |
| 907 | // 1 RETURN ADDRESS |
| 908 | *return_address = StackAt(i: 1); |
| 909 | return true; |
| 910 | } |
| 911 | // Detect if we are: |
| 912 | // ret <--- here |
| 913 | ReturnPattern rp(pc()); |
| 914 | if (rp.IsValid()) { |
| 915 | // Stack layout: |
| 916 | // 0 RETURN ADDRESS. |
| 917 | *return_address = StackAt(i: 0); |
| 918 | return true; |
| 919 | } |
| 920 | return false; |
| 921 | } |
| 922 | #elif defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64) || \ |
| 923 | defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64) |
| 924 | bool ReturnAddressLocator::LocateReturnAddress(uword* return_address) { |
| 925 | ASSERT(return_address != nullptr); |
| 926 | return false; |
| 927 | } |
| 928 | #else |
| 929 | #error ReturnAddressLocator implementation missing for this architecture. |
| 930 | #endif |
| 931 | |
| 932 | bool SampleFilter::TimeFilterSample(Sample* sample) { |
| 933 | if ((time_origin_micros_ == -1) || (time_extent_micros_ == -1)) { |
| 934 | // No time filter passed in, always pass. |
| 935 | return true; |
| 936 | } |
| 937 | const int64_t timestamp = sample->timestamp(); |
| 938 | int64_t delta = timestamp - time_origin_micros_; |
| 939 | return (delta >= 0) && (delta <= time_extent_micros_); |
| 940 | } |
| 941 | |
| 942 | bool SampleFilter::TaskFilterSample(Sample* sample) { |
| 943 | const intptr_t task = static_cast<intptr_t>(sample->thread_task()); |
| 944 | if (thread_task_mask_ == kNoTaskFilter) { |
| 945 | return true; |
| 946 | } |
| 947 | return (task & thread_task_mask_) != 0; |
| 948 | } |
| 949 | |
| 950 | ClearProfileVisitor::ClearProfileVisitor(Isolate* isolate) |
| 951 | : SampleVisitor(isolate->main_port()) {} |
| 952 | |
| 953 | void ClearProfileVisitor::VisitSample(Sample* sample) { |
| 954 | sample->Clear(); |
| 955 | } |
| 956 | |
| 957 | // Executing Dart code, walk the stack. |
| 958 | class ProfilerDartStackWalker : public ProfilerStackWalker { |
| 959 | public: |
| 960 | ProfilerDartStackWalker(Thread* thread, |
| 961 | Sample* sample, |
| 962 | SampleBuffer* sample_buffer, |
| 963 | uword pc, |
| 964 | uword fp, |
| 965 | uword sp, |
| 966 | uword lr, |
| 967 | bool allocation_sample, |
| 968 | intptr_t skip_count = 0) |
| 969 | : ProfilerStackWalker((thread->isolate() != nullptr) |
| 970 | ? thread->isolate()->main_port() |
| 971 | : ILLEGAL_PORT, |
| 972 | sample, |
| 973 | sample_buffer, |
| 974 | skip_count), |
| 975 | thread_(thread), |
| 976 | pc_(reinterpret_cast<uword*>(pc)), |
| 977 | fp_(reinterpret_cast<uword*>(fp)), |
| 978 | sp_(reinterpret_cast<uword*>(sp)), |
| 979 | lr_(reinterpret_cast<uword*>(lr)) {} |
| 980 | |
| 981 | void walk() { |
| 982 | RELEASE_ASSERT(StubCode::HasBeenInitialized()); |
| 983 | if (thread_->isolate()->IsDeoptimizing()) { |
| 984 | sample_->set_ignore_sample(true); |
| 985 | return; |
| 986 | } |
| 987 | |
| 988 | uword* exit_fp = reinterpret_cast<uword*>(thread_->top_exit_frame_info()); |
| 989 | bool has_exit_frame = exit_fp != nullptr; |
| 990 | if (has_exit_frame) { |
| 991 | // Exited from compiled code. |
| 992 | pc_ = nullptr; |
| 993 | fp_ = exit_fp; |
| 994 | |
| 995 | // Skip exit frame. |
| 996 | pc_ = CallerPC(); |
| 997 | fp_ = CallerFP(); |
| 998 | } else { |
| 999 | if (thread_->vm_tag() == VMTag::kDartTagId) { |
| 1000 | // Running compiled code. |
| 1001 | // Use the FP and PC from the thread interrupt or simulator; already set |
| 1002 | // in the constructor. |
| 1003 | } else { |
| 1004 | // No Dart on the stack; caller shouldn't use this walker. |
| 1005 | UNREACHABLE(); |
| 1006 | } |
| 1007 | |
| 1008 | const bool is_entry_frame = |
| 1009 | #if defined(TARGET_ARCH_IA32) || defined(TARGET_ARCH_X64) |
| 1010 | StubCode::InInvocationStub(pc: Stack(index: 0)) || |
| 1011 | StubCode::InInvocationStub(pc: Stack(index: 1)); |
| 1012 | #else |
| 1013 | StubCode::InInvocationStub(reinterpret_cast<uword>(lr_)); |
| 1014 | #endif |
| 1015 | if (is_entry_frame) { |
| 1016 | // During the prologue of a function, CallerPC will return the caller's |
| 1017 | // caller. For most frames, the missing PC will be added during profile |
| 1018 | // processing. However, during this stack walk, it can cause us to fail |
| 1019 | // to identify the entry frame and lead the stack walk into the weeds. |
| 1020 | // Do not continue the stalk walk since this might be a false positive |
| 1021 | // from a Smi or unboxed value. |
| 1022 | sample_->set_ignore_sample(true); |
| 1023 | return; |
| 1024 | } |
| 1025 | } |
| 1026 | |
| 1027 | sample_->set_exit_frame_sample(has_exit_frame); |
| 1028 | |
| 1029 | for (;;) { |
| 1030 | // Skip entry frame. |
| 1031 | if (StubCode::InInvocationStub(pc: reinterpret_cast<uword>(pc_))) { |
| 1032 | pc_ = nullptr; |
| 1033 | fp_ = ExitLink(); |
| 1034 | if (fp_ == nullptr) { |
| 1035 | break; // End of Dart stack. |
| 1036 | } |
| 1037 | |
| 1038 | // Skip exit frame. |
| 1039 | pc_ = CallerPC(); |
| 1040 | fp_ = CallerFP(); |
| 1041 | |
| 1042 | // At least one frame between exit and next entry frame. |
| 1043 | RELEASE_ASSERT( |
| 1044 | !StubCode::InInvocationStub(reinterpret_cast<uword>(pc_))); |
| 1045 | } |
| 1046 | |
| 1047 | if (!Append(pc: reinterpret_cast<uword>(pc_), fp: reinterpret_cast<uword>(fp_))) { |
| 1048 | break; // Sample is full. |
| 1049 | } |
| 1050 | |
| 1051 | pc_ = CallerPC(); |
| 1052 | fp_ = CallerFP(); |
| 1053 | } |
| 1054 | } |
| 1055 | |
| 1056 | private: |
| 1057 | uword* CallerPC() const { |
| 1058 | ASSERT(fp_ != nullptr); |
| 1059 | uword* caller_pc_ptr = fp_ + kSavedCallerPcSlotFromFp; |
| 1060 | // MSan/ASan are unaware of frames initialized by generated code. |
| 1061 | MSAN_UNPOISON(caller_pc_ptr, kWordSize); |
| 1062 | ASAN_UNPOISON(caller_pc_ptr, kWordSize); |
| 1063 | return reinterpret_cast<uword*>(*caller_pc_ptr); |
| 1064 | } |
| 1065 | |
| 1066 | uword* CallerFP() const { |
| 1067 | ASSERT(fp_ != nullptr); |
| 1068 | uword* caller_fp_ptr = fp_ + kSavedCallerFpSlotFromFp; |
| 1069 | // MSan/ASan are unaware of frames initialized by generated code. |
| 1070 | MSAN_UNPOISON(caller_fp_ptr, kWordSize); |
| 1071 | ASAN_UNPOISON(caller_fp_ptr, kWordSize); |
| 1072 | return reinterpret_cast<uword*>(*caller_fp_ptr); |
| 1073 | } |
| 1074 | |
| 1075 | uword* ExitLink() const { |
| 1076 | ASSERT(fp_ != nullptr); |
| 1077 | uword* exit_link_ptr = fp_ + kExitLinkSlotFromEntryFp; |
| 1078 | // MSan/ASan are unaware of frames initialized by generated code. |
| 1079 | MSAN_UNPOISON(exit_link_ptr, kWordSize); |
| 1080 | ASAN_UNPOISON(exit_link_ptr, kWordSize); |
| 1081 | return reinterpret_cast<uword*>(*exit_link_ptr); |
| 1082 | } |
| 1083 | |
| 1084 | uword Stack(intptr_t index) const { |
| 1085 | ASSERT(sp_ != nullptr); |
| 1086 | uword* stack_ptr = sp_ + index; |
| 1087 | // MSan/ASan are unaware of frames initialized by generated code. |
| 1088 | MSAN_UNPOISON(stack_ptr, kWordSize); |
| 1089 | ASAN_UNPOISON(stack_ptr, kWordSize); |
| 1090 | return *stack_ptr; |
| 1091 | } |
| 1092 | |
| 1093 | Thread* const thread_; |
| 1094 | uword* pc_; |
| 1095 | uword* fp_; |
| 1096 | uword* sp_; |
| 1097 | uword* lr_; |
| 1098 | }; |
| 1099 | |
| 1100 | static void CopyStackBuffer(Sample* sample, uword sp_addr) { |
| 1101 | ASSERT(sample != nullptr); |
| 1102 | uword* sp = reinterpret_cast<uword*>(sp_addr); |
| 1103 | uword* buffer = sample->GetStackBuffer(); |
| 1104 | if (sp != nullptr) { |
| 1105 | for (intptr_t i = 0; i < Sample::kStackBufferSizeInWords; i++) { |
| 1106 | MSAN_UNPOISON(sp, kWordSize); |
| 1107 | ASAN_UNPOISON(sp, kWordSize); |
| 1108 | buffer[i] = *sp; |
| 1109 | sp++; |
| 1110 | } |
| 1111 | } |
| 1112 | } |
| 1113 | |
| 1114 | #if defined(DART_HOST_OS_WINDOWS) |
| 1115 | // On Windows this code is synchronously executed from the thread interrupter |
| 1116 | // thread. This means we can safely have a static fault_address. |
| 1117 | static uword fault_address = 0; |
| 1118 | static LONG GuardPageExceptionFilter(EXCEPTION_POINTERS* ep) { |
| 1119 | fault_address = 0; |
| 1120 | if (ep->ExceptionRecord->ExceptionCode != STATUS_GUARD_PAGE_VIOLATION) { |
| 1121 | return EXCEPTION_CONTINUE_SEARCH; |
| 1122 | } |
| 1123 | // https://goo.gl/p5Fe10 |
| 1124 | fault_address = ep->ExceptionRecord->ExceptionInformation[1]; |
| 1125 | // Read access. |
| 1126 | ASSERT(ep->ExceptionRecord->ExceptionInformation[0] == 0); |
| 1127 | return EXCEPTION_EXECUTE_HANDLER; |
| 1128 | } |
| 1129 | #endif |
| 1130 | |
| 1131 | // All memory access done to collect the sample is performed in CollectSample. |
| 1132 | static void CollectSample(Isolate* isolate, |
| 1133 | bool exited_dart_code, |
| 1134 | bool in_dart_code, |
| 1135 | Sample* sample, |
| 1136 | ProfilerNativeStackWalker* native_stack_walker, |
| 1137 | ProfilerDartStackWalker* dart_stack_walker, |
| 1138 | uword pc, |
| 1139 | uword fp, |
| 1140 | uword sp, |
| 1141 | ProfilerCounters* counters) { |
| 1142 | ASSERT(counters != nullptr); |
| 1143 | #if defined(DART_HOST_OS_WINDOWS) |
| 1144 | // Use structured exception handling to trap guard page access on Windows. |
| 1145 | __try { |
| 1146 | #endif |
| 1147 | |
| 1148 | if (in_dart_code) { |
| 1149 | // We can only trust the stack pointer if we are executing Dart code. |
| 1150 | // See http://dartbug.com/20421 for details. |
| 1151 | CopyStackBuffer(sample, sp_addr: sp); |
| 1152 | } |
| 1153 | |
| 1154 | if (FLAG_profile_vm) { |
| 1155 | // Always walk the native stack collecting both native and Dart frames. |
| 1156 | counters->stack_walker_native.fetch_add(arg: 1); |
| 1157 | native_stack_walker->walk(); |
| 1158 | } else if (StubCode::HasBeenInitialized() && exited_dart_code) { |
| 1159 | counters->stack_walker_dart_exit.fetch_add(arg: 1); |
| 1160 | // We have a valid exit frame info, use the Dart stack walker. |
| 1161 | dart_stack_walker->walk(); |
| 1162 | } else if (StubCode::HasBeenInitialized() && in_dart_code) { |
| 1163 | counters->stack_walker_dart.fetch_add(arg: 1); |
| 1164 | // We are executing Dart code. We have frame pointers. |
| 1165 | dart_stack_walker->walk(); |
| 1166 | } else { |
| 1167 | counters->stack_walker_none.fetch_add(arg: 1); |
| 1168 | sample->SetAt(i: 0, pc); |
| 1169 | } |
| 1170 | |
| 1171 | #if defined(DART_HOST_OS_WINDOWS) |
| 1172 | // Use structured exception handling to trap guard page access. |
| 1173 | } __except (GuardPageExceptionFilter(GetExceptionInformation())) { // NOLINT |
| 1174 | // Sample collection triggered a guard page fault: |
| 1175 | // 1) discard entire sample. |
| 1176 | sample->set_ignore_sample(true); |
| 1177 | |
| 1178 | // 2) Reenable guard bit on page that triggered the fault. |
| 1179 | // https://goo.gl/5mCsXW |
| 1180 | DWORD new_protect = PAGE_READWRITE | PAGE_GUARD; |
| 1181 | DWORD old_protect = 0; |
| 1182 | BOOL success = |
| 1183 | VirtualProtect(reinterpret_cast<void*>(fault_address), |
| 1184 | sizeof(fault_address), new_protect, &old_protect); |
| 1185 | USE(success); |
| 1186 | ASSERT(success); |
| 1187 | ASSERT(old_protect == PAGE_READWRITE); |
| 1188 | } |
| 1189 | #endif |
| 1190 | } |
| 1191 | |
| 1192 | static Sample* SetupSample(Thread* thread, |
| 1193 | bool allocation_sample, |
| 1194 | ThreadId tid) { |
| 1195 | ASSERT(thread != nullptr); |
| 1196 | Isolate* isolate = thread->isolate(); |
| 1197 | SampleBlockBuffer* buffer = Profiler::sample_block_buffer(); |
| 1198 | Sample* sample = allocation_sample ? buffer->ReserveAllocationSample(isolate) |
| 1199 | : buffer->ReserveCPUSample(isolate); |
| 1200 | if (sample == nullptr) { |
| 1201 | return nullptr; |
| 1202 | } |
| 1203 | sample->Init(port: isolate->main_port(), timestamp: OS::GetCurrentMonotonicMicros(), tid); |
| 1204 | uword vm_tag = thread->vm_tag(); |
| 1205 | #if defined(USING_SIMULATOR) |
| 1206 | // When running in the simulator, the runtime entry function address |
| 1207 | // (stored as the vm tag) is the address of a redirect function. |
| 1208 | // Attempt to find the real runtime entry function address and use that. |
| 1209 | uword redirect_vm_tag = Simulator::FunctionForRedirect(vm_tag); |
| 1210 | if (redirect_vm_tag != 0) { |
| 1211 | vm_tag = redirect_vm_tag; |
| 1212 | } |
| 1213 | #endif |
| 1214 | sample->set_vm_tag(vm_tag); |
| 1215 | sample->set_user_tag(isolate->user_tag()); |
| 1216 | sample->set_thread_task(thread->task_kind()); |
| 1217 | return sample; |
| 1218 | } |
| 1219 | |
| 1220 | static bool CheckIsolate(Isolate* isolate) { |
| 1221 | if ((isolate == nullptr) || (Dart::vm_isolate() == nullptr)) { |
| 1222 | // No isolate. |
| 1223 | return false; |
| 1224 | } |
| 1225 | return isolate != Dart::vm_isolate(); |
| 1226 | } |
| 1227 | |
| 1228 | void Profiler::SampleAllocation(Thread* thread, |
| 1229 | intptr_t cid, |
| 1230 | uint32_t identity_hash) { |
| 1231 | ASSERT(thread != nullptr); |
| 1232 | OSThread* os_thread = thread->os_thread(); |
| 1233 | ASSERT(os_thread != nullptr); |
| 1234 | Isolate* isolate = thread->isolate(); |
| 1235 | if (!CheckIsolate(isolate)) { |
| 1236 | return; |
| 1237 | } |
| 1238 | const bool exited_dart_code = thread->HasExitedDartCode(); |
| 1239 | |
| 1240 | SampleBlockBuffer* buffer = Profiler::sample_block_buffer(); |
| 1241 | if (buffer == nullptr) { |
| 1242 | // Profiler not initialized. |
| 1243 | return; |
| 1244 | } |
| 1245 | |
| 1246 | uintptr_t sp = OSThread::GetCurrentStackPointer(); |
| 1247 | uintptr_t fp = 0; |
| 1248 | uintptr_t pc = OS::GetProgramCounter(); |
| 1249 | uintptr_t lr = 0; |
| 1250 | |
| 1251 | COPY_FP_REGISTER(fp); |
| 1252 | |
| 1253 | uword stack_lower = 0; |
| 1254 | uword stack_upper = 0; |
| 1255 | |
| 1256 | if (!GetAndValidateThreadStackBounds(os_thread, thread, fp, sp, stack_lower: &stack_lower, |
| 1257 | stack_upper: &stack_upper)) { |
| 1258 | // Could not get stack boundary. |
| 1259 | return; |
| 1260 | } |
| 1261 | |
| 1262 | Sample* sample = |
| 1263 | SetupSample(thread, /*allocation_block*/ allocation_sample: true, tid: os_thread->trace_id()); |
| 1264 | if (sample == nullptr) { |
| 1265 | // We were unable to assign a sample for this allocation. |
| 1266 | counters_.sample_allocation_failure++; |
| 1267 | return; |
| 1268 | } |
| 1269 | sample->SetAllocationCid(cid); |
| 1270 | sample->set_allocation_identity_hash(identity_hash); |
| 1271 | |
| 1272 | if (FLAG_profile_vm_allocation) { |
| 1273 | ProfilerNativeStackWalker native_stack_walker( |
| 1274 | &counters_, (isolate != nullptr) ? isolate->main_port() : ILLEGAL_PORT, |
| 1275 | sample, isolate->current_allocation_sample_block(), stack_lower, |
| 1276 | stack_upper, pc, fp, sp); |
| 1277 | native_stack_walker.walk(); |
| 1278 | } else if (exited_dart_code) { |
| 1279 | ProfilerDartStackWalker dart_exit_stack_walker( |
| 1280 | thread, sample, isolate->current_allocation_sample_block(), pc, fp, sp, |
| 1281 | lr, /* allocation_sample*/ true); |
| 1282 | dart_exit_stack_walker.walk(); |
| 1283 | } else { |
| 1284 | // Fall back. |
| 1285 | uintptr_t pc = OS::GetProgramCounter(); |
| 1286 | sample->SetAt(i: 0, pc); |
| 1287 | } |
| 1288 | } |
| 1289 | |
| 1290 | void Profiler::SampleThreadSingleFrame(Thread* thread, |
| 1291 | Sample* sample, |
| 1292 | uintptr_t pc) { |
| 1293 | ASSERT(thread != nullptr); |
| 1294 | OSThread* os_thread = thread->os_thread(); |
| 1295 | ASSERT(os_thread != nullptr); |
| 1296 | Isolate* isolate = thread->isolate(); |
| 1297 | |
| 1298 | ASSERT(Profiler::sample_block_buffer() != nullptr); |
| 1299 | |
| 1300 | // Increment counter for vm tag. |
| 1301 | VMTagCounters* counters = isolate->vm_tag_counters(); |
| 1302 | ASSERT(counters != nullptr); |
| 1303 | if (thread->IsDartMutatorThread()) { |
| 1304 | counters->Increment(tag: sample->vm_tag()); |
| 1305 | } |
| 1306 | |
| 1307 | // Write the single pc value. |
| 1308 | sample->SetAt(i: 0, pc); |
| 1309 | } |
| 1310 | |
| 1311 | void Profiler::SampleThread(Thread* thread, |
| 1312 | const InterruptedThreadState& state) { |
| 1313 | ASSERT(thread != nullptr); |
| 1314 | OSThread* os_thread = thread->os_thread(); |
| 1315 | ASSERT(os_thread != nullptr); |
| 1316 | Isolate* isolate = thread->isolate(); |
| 1317 | |
| 1318 | // Thread is not doing VM work. |
| 1319 | if (thread->task_kind() == Thread::kUnknownTask) { |
| 1320 | counters_.bail_out_unknown_task.fetch_add(arg: 1); |
| 1321 | return; |
| 1322 | } |
| 1323 | |
| 1324 | if (StubCode::HasBeenInitialized() && StubCode::InJumpToFrameStub(pc: state.pc)) { |
| 1325 | // The JumpToFrame stub manually adjusts the stack pointer, frame |
| 1326 | // pointer, and some isolate state. It is not safe to walk the |
| 1327 | // stack when executing this stub. |
| 1328 | counters_.bail_out_jump_to_exception_handler.fetch_add(arg: 1); |
| 1329 | return; |
| 1330 | } |
| 1331 | |
| 1332 | const bool in_dart_code = thread->IsExecutingDartCode(); |
| 1333 | |
| 1334 | uintptr_t sp = 0; |
| 1335 | uintptr_t fp = state.fp; |
| 1336 | uintptr_t pc = state.pc; |
| 1337 | uintptr_t lr = state.lr; |
| 1338 | #if defined(USING_SIMULATOR) |
| 1339 | Simulator* simulator = nullptr; |
| 1340 | #endif |
| 1341 | |
| 1342 | if (in_dart_code) { |
| 1343 | // If we're in Dart code, use the Dart stack pointer. |
| 1344 | #if defined(USING_SIMULATOR) |
| 1345 | simulator = isolate->simulator(); |
| 1346 | sp = simulator->get_register(SPREG); |
| 1347 | fp = simulator->get_register(FPREG); |
| 1348 | pc = simulator->get_pc(); |
| 1349 | lr = simulator->get_lr(); |
| 1350 | #else |
| 1351 | sp = state.dsp; |
| 1352 | #endif |
| 1353 | } else { |
| 1354 | // If we're in runtime code, use the C stack pointer. |
| 1355 | sp = state.csp; |
| 1356 | } |
| 1357 | |
| 1358 | if (!CheckIsolate(isolate)) { |
| 1359 | counters_.bail_out_check_isolate.fetch_add(arg: 1); |
| 1360 | return; |
| 1361 | } |
| 1362 | |
| 1363 | SampleBlockBuffer* sample_block_buffer = Profiler::sample_block_buffer(); |
| 1364 | if (sample_block_buffer == nullptr) { |
| 1365 | // Profiler not initialized. |
| 1366 | return; |
| 1367 | } |
| 1368 | |
| 1369 | // Setup sample. |
| 1370 | Sample* sample = |
| 1371 | SetupSample(thread, /*allocation_block*/ allocation_sample: false, tid: os_thread->trace_id()); |
| 1372 | if (sample == nullptr) { |
| 1373 | // We were unable to assign a sample for this profiler tick. |
| 1374 | counters_.sample_allocation_failure++; |
| 1375 | return; |
| 1376 | } |
| 1377 | |
| 1378 | if (thread->IsDartMutatorThread()) { |
| 1379 | if (isolate->IsDeoptimizing()) { |
| 1380 | counters_.single_frame_sample_deoptimizing.fetch_add(arg: 1); |
| 1381 | SampleThreadSingleFrame(thread, sample, pc); |
| 1382 | return; |
| 1383 | } |
| 1384 | } |
| 1385 | |
| 1386 | uword stack_lower = 0; |
| 1387 | uword stack_upper = 0; |
| 1388 | if (!GetAndValidateThreadStackBounds(os_thread, thread, fp, sp, stack_lower: &stack_lower, |
| 1389 | stack_upper: &stack_upper)) { |
| 1390 | counters_.single_frame_sample_get_and_validate_stack_bounds.fetch_add(arg: 1); |
| 1391 | // Could not get stack boundary. |
| 1392 | SampleThreadSingleFrame(thread, sample, pc); |
| 1393 | return; |
| 1394 | } |
| 1395 | |
| 1396 | // At this point we have a valid stack boundary for this isolate and |
| 1397 | // know that our initial stack and frame pointers are within the boundary. |
| 1398 | |
| 1399 | // Increment counter for vm tag. |
| 1400 | VMTagCounters* counters = isolate->vm_tag_counters(); |
| 1401 | ASSERT(counters != nullptr); |
| 1402 | if (thread->IsDartMutatorThread()) { |
| 1403 | counters->Increment(tag: sample->vm_tag()); |
| 1404 | } |
| 1405 | |
| 1406 | ProfilerNativeStackWalker native_stack_walker( |
| 1407 | &counters_, (isolate != nullptr) ? isolate->main_port() : ILLEGAL_PORT, |
| 1408 | sample, isolate->current_sample_block(), stack_lower, stack_upper, pc, fp, |
| 1409 | sp); |
| 1410 | const bool exited_dart_code = thread->HasExitedDartCode(); |
| 1411 | ProfilerDartStackWalker dart_stack_walker( |
| 1412 | thread, sample, isolate->current_sample_block(), pc, fp, sp, lr, |
| 1413 | /* allocation_sample*/ false); |
| 1414 | |
| 1415 | // All memory access is done inside CollectSample. |
| 1416 | CollectSample(isolate, exited_dart_code, in_dart_code, sample, |
| 1417 | native_stack_walker: &native_stack_walker, dart_stack_walker: &dart_stack_walker, pc, fp, sp, |
| 1418 | counters: &counters_); |
| 1419 | } |
| 1420 | |
| 1421 | CodeDescriptor::CodeDescriptor(const AbstractCode code) : code_(code) {} |
| 1422 | |
| 1423 | uword CodeDescriptor::Start() const { |
| 1424 | return code_.PayloadStart(); |
| 1425 | } |
| 1426 | |
| 1427 | uword CodeDescriptor::Size() const { |
| 1428 | return code_.Size(); |
| 1429 | } |
| 1430 | |
| 1431 | int64_t CodeDescriptor::CompileTimestamp() const { |
| 1432 | return code_.compile_timestamp(); |
| 1433 | } |
| 1434 | |
| 1435 | CodeLookupTable::CodeLookupTable(Thread* thread) { |
| 1436 | Build(thread); |
| 1437 | } |
| 1438 | |
| 1439 | class CodeLookupTableBuilder : public ObjectVisitor { |
| 1440 | public: |
| 1441 | explicit CodeLookupTableBuilder(CodeLookupTable* table) : table_(table) { |
| 1442 | ASSERT(table_ != nullptr); |
| 1443 | } |
| 1444 | |
| 1445 | ~CodeLookupTableBuilder() {} |
| 1446 | |
| 1447 | void VisitObject(ObjectPtr raw_obj) override { |
| 1448 | if (raw_obj->IsCode() && !Code::IsUnknownDartCode(code: Code::RawCast(raw: raw_obj))) { |
| 1449 | table_->Add(code: Code::Handle(ptr: Code::RawCast(raw: raw_obj))); |
| 1450 | } |
| 1451 | } |
| 1452 | |
| 1453 | private: |
| 1454 | CodeLookupTable* table_; |
| 1455 | }; |
| 1456 | |
| 1457 | void CodeLookupTable::Build(Thread* thread) { |
| 1458 | ASSERT(thread != nullptr); |
| 1459 | Isolate* isolate = thread->isolate(); |
| 1460 | ASSERT(isolate != nullptr); |
| 1461 | Isolate* vm_isolate = Dart::vm_isolate(); |
| 1462 | ASSERT(vm_isolate != nullptr); |
| 1463 | |
| 1464 | // Clear. |
| 1465 | code_objects_.Clear(); |
| 1466 | |
| 1467 | thread->CheckForSafepoint(); |
| 1468 | // Add all found Code objects. |
| 1469 | { |
| 1470 | TimelineBeginEndScope tl(Timeline::GetIsolateStream(), |
| 1471 | "CodeLookupTable::Build HeapIterationScope" ); |
| 1472 | HeapIterationScope iteration(thread); |
| 1473 | CodeLookupTableBuilder cltb(this); |
| 1474 | iteration.IterateVMIsolateObjects(visitor: &cltb); |
| 1475 | iteration.IterateOldObjects(visitor: &cltb); |
| 1476 | } |
| 1477 | thread->CheckForSafepoint(); |
| 1478 | |
| 1479 | // Sort by entry. |
| 1480 | code_objects_.Sort(compare: CodeDescriptor::Compare); |
| 1481 | |
| 1482 | #if defined(DEBUG) |
| 1483 | if (length() <= 1) { |
| 1484 | return; |
| 1485 | } |
| 1486 | ASSERT(FindCode(0) == nullptr); |
| 1487 | ASSERT(FindCode(~0) == nullptr); |
| 1488 | // Sanity check that we don't have duplicate entries and that the entries |
| 1489 | // are sorted. |
| 1490 | for (intptr_t i = 0; i < length() - 1; i++) { |
| 1491 | const CodeDescriptor* a = At(i); |
| 1492 | const CodeDescriptor* b = At(i + 1); |
| 1493 | ASSERT(a->Start() < b->Start()); |
| 1494 | ASSERT(FindCode(a->Start()) == a); |
| 1495 | ASSERT(FindCode(b->Start()) == b); |
| 1496 | ASSERT(FindCode(a->Start() + a->Size() - 1) == a); |
| 1497 | ASSERT(FindCode(b->Start() + b->Size() - 1) == b); |
| 1498 | } |
| 1499 | #endif |
| 1500 | } |
| 1501 | |
| 1502 | void CodeLookupTable::Add(const Object& code) { |
| 1503 | ASSERT(!code.IsNull()); |
| 1504 | ASSERT(code.IsCode()); |
| 1505 | CodeDescriptor* cd = new CodeDescriptor(AbstractCode(code.ptr())); |
| 1506 | code_objects_.Add(value: cd); |
| 1507 | } |
| 1508 | |
| 1509 | const CodeDescriptor* CodeLookupTable::FindCode(uword pc) const { |
| 1510 | intptr_t first = 0; |
| 1511 | intptr_t count = length(); |
| 1512 | while (count > 0) { |
| 1513 | intptr_t current = first; |
| 1514 | intptr_t step = count / 2; |
| 1515 | current += step; |
| 1516 | const CodeDescriptor* cd = At(index: current); |
| 1517 | if (pc >= cd->Start()) { |
| 1518 | first = ++current; |
| 1519 | count -= step + 1; |
| 1520 | } else { |
| 1521 | count = step; |
| 1522 | } |
| 1523 | } |
| 1524 | // First points to the first code object whose entry is greater than PC. |
| 1525 | // That means the code object we need to check is first - 1. |
| 1526 | if (first == 0) { |
| 1527 | return nullptr; |
| 1528 | } |
| 1529 | first--; |
| 1530 | ASSERT(first >= 0); |
| 1531 | ASSERT(first < length()); |
| 1532 | const CodeDescriptor* cd = At(index: first); |
| 1533 | if (cd->Contains(pc)) { |
| 1534 | return cd; |
| 1535 | } |
| 1536 | return nullptr; |
| 1537 | } |
| 1538 | |
| 1539 | ProcessedSampleBuffer* SampleBuffer::BuildProcessedSampleBuffer( |
| 1540 | SampleFilter* filter, |
| 1541 | ProcessedSampleBuffer* buffer) { |
| 1542 | Thread* thread = Thread::Current(); |
| 1543 | Zone* zone = thread->zone(); |
| 1544 | |
| 1545 | if (buffer == nullptr) { |
| 1546 | buffer = new (zone) ProcessedSampleBuffer(); |
| 1547 | } |
| 1548 | |
| 1549 | const intptr_t length = capacity(); |
| 1550 | for (intptr_t i = 0; i < length; i++) { |
| 1551 | thread->CheckForSafepoint(); |
| 1552 | Sample* sample = At(idx: i); |
| 1553 | if (sample->ignore_sample()) { |
| 1554 | // Bad sample. |
| 1555 | continue; |
| 1556 | } |
| 1557 | if (!sample->head_sample()) { |
| 1558 | // An inner sample in a chain of samples. |
| 1559 | continue; |
| 1560 | } |
| 1561 | if (sample->timestamp() == 0) { |
| 1562 | // Empty. |
| 1563 | continue; |
| 1564 | } |
| 1565 | if (sample->At(i: 0) == 0) { |
| 1566 | // No frames. |
| 1567 | continue; |
| 1568 | } |
| 1569 | if (filter != nullptr) { |
| 1570 | // If we're requesting all the native allocation samples, we don't care |
| 1571 | // whether or not we're in the same isolate as the sample. |
| 1572 | if (sample->port() != filter->port()) { |
| 1573 | // Another isolate. |
| 1574 | continue; |
| 1575 | } |
| 1576 | if (!filter->TimeFilterSample(sample)) { |
| 1577 | // Did not pass time filter. |
| 1578 | continue; |
| 1579 | } |
| 1580 | if (!filter->TaskFilterSample(sample)) { |
| 1581 | // Did not pass task filter. |
| 1582 | continue; |
| 1583 | } |
| 1584 | if (!filter->FilterSample(sample)) { |
| 1585 | // Did not pass filter. |
| 1586 | continue; |
| 1587 | } |
| 1588 | } |
| 1589 | buffer->Add(sample: BuildProcessedSample(sample, clt: buffer->code_lookup_table())); |
| 1590 | } |
| 1591 | return buffer; |
| 1592 | } |
| 1593 | |
| 1594 | ProcessedSample* SampleBuffer::BuildProcessedSample( |
| 1595 | Sample* sample, |
| 1596 | const CodeLookupTable& clt) { |
| 1597 | Thread* thread = Thread::Current(); |
| 1598 | Zone* zone = thread->zone(); |
| 1599 | |
| 1600 | ProcessedSample* processed_sample = new (zone) ProcessedSample(); |
| 1601 | |
| 1602 | // Copy state bits from sample. |
| 1603 | processed_sample->set_timestamp(sample->timestamp()); |
| 1604 | processed_sample->set_tid(sample->tid()); |
| 1605 | processed_sample->set_vm_tag(sample->vm_tag()); |
| 1606 | processed_sample->set_user_tag(sample->user_tag()); |
| 1607 | if (sample->is_allocation_sample()) { |
| 1608 | processed_sample->set_allocation_cid(sample->allocation_cid()); |
| 1609 | processed_sample->set_allocation_identity_hash( |
| 1610 | sample->allocation_identity_hash()); |
| 1611 | } |
| 1612 | processed_sample->set_first_frame_executing(!sample->exit_frame_sample()); |
| 1613 | |
| 1614 | // Copy stack trace from sample(s). |
| 1615 | bool truncated = false; |
| 1616 | Sample* current = sample; |
| 1617 | while (current != nullptr) { |
| 1618 | for (intptr_t i = 0; i < Sample::kPCArraySizeInWords; i++) { |
| 1619 | if (current->At(i) == 0) { |
| 1620 | break; |
| 1621 | } |
| 1622 | processed_sample->Add(pc: current->At(i)); |
| 1623 | } |
| 1624 | |
| 1625 | truncated = truncated || current->truncated_trace(); |
| 1626 | current = Next(sample: current); |
| 1627 | } |
| 1628 | |
| 1629 | if (!sample->exit_frame_sample()) { |
| 1630 | processed_sample->FixupCaller(clt, /* pc_marker */ 0, |
| 1631 | stack_buffer: sample->GetStackBuffer()); |
| 1632 | } |
| 1633 | |
| 1634 | processed_sample->set_truncated(truncated); |
| 1635 | return processed_sample; |
| 1636 | } |
| 1637 | |
| 1638 | Sample* SampleBuffer::Next(Sample* sample) { |
| 1639 | if (!sample->is_continuation_sample()) return nullptr; |
| 1640 | Sample* next_sample = sample->continuation_sample(); |
| 1641 | // Sanity check. |
| 1642 | ASSERT(sample != next_sample); |
| 1643 | // Detect invalid chaining. |
| 1644 | if (sample->port() != next_sample->port()) { |
| 1645 | return nullptr; |
| 1646 | } |
| 1647 | if (sample->timestamp() != next_sample->timestamp()) { |
| 1648 | return nullptr; |
| 1649 | } |
| 1650 | if (sample->tid() != next_sample->tid()) { |
| 1651 | return nullptr; |
| 1652 | } |
| 1653 | return next_sample; |
| 1654 | } |
| 1655 | |
| 1656 | ProcessedSample::ProcessedSample() |
| 1657 | : pcs_(Sample::kPCArraySizeInWords), |
| 1658 | timestamp_(0), |
| 1659 | vm_tag_(0), |
| 1660 | user_tag_(0), |
| 1661 | allocation_cid_(-1), |
| 1662 | allocation_identity_hash_(0), |
| 1663 | truncated_(false) {} |
| 1664 | |
| 1665 | void ProcessedSample::FixupCaller(const CodeLookupTable& clt, |
| 1666 | uword pc_marker, |
| 1667 | uword* stack_buffer) { |
| 1668 | const CodeDescriptor* cd = clt.FindCode(pc: At(index: 0)); |
| 1669 | if (cd == nullptr) { |
| 1670 | // No Dart code. |
| 1671 | return; |
| 1672 | } |
| 1673 | if (cd->CompileTimestamp() > timestamp()) { |
| 1674 | // Code compiled after sample. Ignore. |
| 1675 | return; |
| 1676 | } |
| 1677 | CheckForMissingDartFrame(clt, code: cd, pc_marker, stack_buffer); |
| 1678 | } |
| 1679 | |
| 1680 | void ProcessedSample::CheckForMissingDartFrame(const CodeLookupTable& clt, |
| 1681 | const CodeDescriptor* cd, |
| 1682 | uword pc_marker, |
| 1683 | uword* stack_buffer) { |
| 1684 | ASSERT(cd != nullptr); |
| 1685 | const Code& code = Code::Handle(ptr: Code::RawCast(raw: cd->code().ptr())); |
| 1686 | ASSERT(!code.IsNull()); |
| 1687 | // Some stubs (and intrinsics) do not push a frame onto the stack leaving |
| 1688 | // the frame pointer in the caller. |
| 1689 | // |
| 1690 | // PC -> STUB |
| 1691 | // FP -> DART3 <-+ |
| 1692 | // DART2 <-| <- TOP FRAME RETURN ADDRESS. |
| 1693 | // DART1 <-| |
| 1694 | // ..... |
| 1695 | // |
| 1696 | // In this case, traversing the linked stack frames will not collect a PC |
| 1697 | // inside DART3. The stack will incorrectly be: STUB, DART2, DART1. |
| 1698 | // In Dart code, after pushing the FP onto the stack, an IP in the current |
| 1699 | // function is pushed onto the stack as well. This stack slot is called |
| 1700 | // the PC marker. We can use the PC marker to insert DART3 into the stack |
| 1701 | // so that it will correctly be: STUB, DART3, DART2, DART1. Note the |
| 1702 | // inserted PC may not accurately reflect the true return address into DART3. |
| 1703 | |
| 1704 | // The pc marker is our current best guess of a return address. |
| 1705 | uword return_address = pc_marker; |
| 1706 | |
| 1707 | // Attempt to find a better return address. |
| 1708 | ReturnAddressLocator ral(At(index: 0), stack_buffer, code); |
| 1709 | |
| 1710 | if (!ral.LocateReturnAddress(return_address: &return_address)) { |
| 1711 | ASSERT(return_address == pc_marker); |
| 1712 | if (code.GetPrologueOffset() == 0) { |
| 1713 | // Code has the prologue at offset 0. The frame is already setup and |
| 1714 | // can be trusted. |
| 1715 | return; |
| 1716 | } |
| 1717 | // Could not find a better return address than the pc_marker. |
| 1718 | if (code.ContainsInstructionAt(addr: return_address)) { |
| 1719 | // PC marker is in the same code as pc, no missing frame. |
| 1720 | return; |
| 1721 | } |
| 1722 | } |
| 1723 | |
| 1724 | if (clt.FindCode(pc: return_address) == nullptr) { |
| 1725 | // Return address is not from a Dart code object. Do not insert. |
| 1726 | return; |
| 1727 | } |
| 1728 | |
| 1729 | if (return_address != 0) { |
| 1730 | InsertAt(index: 1, pc: return_address); |
| 1731 | } |
| 1732 | } |
| 1733 | |
| 1734 | ProcessedSampleBuffer::ProcessedSampleBuffer() |
| 1735 | : code_lookup_table_(new CodeLookupTable(Thread::Current())) { |
| 1736 | ASSERT(code_lookup_table_ != nullptr); |
| 1737 | } |
| 1738 | |
| 1739 | void SampleBlockProcessor::Init() { |
| 1740 | ASSERT(!initialized_); |
| 1741 | if (monitor_ == nullptr) { |
| 1742 | monitor_ = new Monitor(); |
| 1743 | } |
| 1744 | ASSERT(monitor_ != nullptr); |
| 1745 | initialized_ = true; |
| 1746 | shutdown_ = false; |
| 1747 | } |
| 1748 | |
| 1749 | void SampleBlockProcessor::Startup() { |
| 1750 | ASSERT(initialized_); |
| 1751 | ASSERT(processor_thread_id_ == OSThread::kInvalidThreadJoinId); |
| 1752 | MonitorLocker startup_ml(monitor_); |
| 1753 | OSThread::Start(name: "Dart Profiler SampleBlockProcessor" , function: ThreadMain, parameter: 0); |
| 1754 | while (!thread_running_) { |
| 1755 | startup_ml.Wait(); |
| 1756 | } |
| 1757 | ASSERT(processor_thread_id_ != OSThread::kInvalidThreadJoinId); |
| 1758 | } |
| 1759 | |
| 1760 | void SampleBlockProcessor::Cleanup() { |
| 1761 | { |
| 1762 | MonitorLocker shutdown_ml(monitor_); |
| 1763 | if (shutdown_) { |
| 1764 | // Already shutdown. |
| 1765 | return; |
| 1766 | } |
| 1767 | shutdown_ = true; |
| 1768 | // Notify. |
| 1769 | shutdown_ml.Notify(); |
| 1770 | ASSERT(initialized_); |
| 1771 | } |
| 1772 | |
| 1773 | // Join the thread. |
| 1774 | ASSERT(processor_thread_id_ != OSThread::kInvalidThreadJoinId); |
| 1775 | OSThread::Join(id: processor_thread_id_); |
| 1776 | processor_thread_id_ = OSThread::kInvalidThreadJoinId; |
| 1777 | initialized_ = false; |
| 1778 | ASSERT(!thread_running_); |
| 1779 | } |
| 1780 | |
| 1781 | class StreamableSampleFilter : public SampleFilter { |
| 1782 | public: |
| 1783 | explicit StreamableSampleFilter(Dart_Port port) |
| 1784 | : SampleFilter(port, kNoTaskFilter, -1, -1, true) {} |
| 1785 | |
| 1786 | bool FilterSample(Sample* sample) override { |
| 1787 | const UserTag& tag = |
| 1788 | UserTag::Handle(ptr: UserTag::FindTagById(tag_id: sample->user_tag())); |
| 1789 | return tag.streamable(); |
| 1790 | } |
| 1791 | }; |
| 1792 | |
| 1793 | void Profiler::ProcessCompletedBlocks(Isolate* isolate) { |
| 1794 | if (!Service::profiler_stream.enabled()) return; |
| 1795 | auto thread = Thread::Current(); |
| 1796 | if (Isolate::IsSystemIsolate(isolate)) return; |
| 1797 | |
| 1798 | TIMELINE_DURATION(thread, Isolate, "Profiler::ProcessCompletedBlocks" ) |
| 1799 | DisableThreadInterruptsScope dtis(thread); |
| 1800 | StackZone zone(thread); |
| 1801 | HandleScope handle_scope(thread); |
| 1802 | StreamableSampleFilter filter(isolate->main_port()); |
| 1803 | Profile profile; |
| 1804 | profile.Build(thread, filter: &filter, sample_block_buffer: Profiler::sample_block_buffer()); |
| 1805 | ServiceEvent event(isolate, ServiceEvent::kCpuSamples); |
| 1806 | event.set_cpu_profile(&profile); |
| 1807 | Service::HandleEvent(event: &event); |
| 1808 | } |
| 1809 | |
| 1810 | void Profiler::IsolateShutdown(Thread* thread) { |
| 1811 | FlushSampleBlocks(isolate: thread->isolate()); |
| 1812 | ProcessCompletedBlocks(isolate: thread->isolate()); |
| 1813 | } |
| 1814 | |
| 1815 | class SampleBlockProcessorVisitor : public IsolateVisitor { |
| 1816 | public: |
| 1817 | SampleBlockProcessorVisitor() = default; |
| 1818 | virtual ~SampleBlockProcessorVisitor() = default; |
| 1819 | |
| 1820 | void VisitIsolate(Isolate* isolate) { |
| 1821 | if (isolate->TakeHasCompletedBlocks()) { |
| 1822 | const bool kBypassSafepoint = false; |
| 1823 | Thread::EnterIsolateGroupAsHelper( |
| 1824 | isolate_group: isolate->group(), kind: Thread::kSampleBlockTask, bypass_safepoint: kBypassSafepoint); |
| 1825 | Profiler::ProcessCompletedBlocks(isolate); |
| 1826 | Thread::ExitIsolateGroupAsHelper(bypass_safepoint: kBypassSafepoint); |
| 1827 | } |
| 1828 | } |
| 1829 | }; |
| 1830 | |
| 1831 | void SampleBlockProcessor::ThreadMain(uword parameters) { |
| 1832 | ASSERT(initialized_); |
| 1833 | { |
| 1834 | // Signal to main thread we are ready. |
| 1835 | MonitorLocker startup_ml(monitor_); |
| 1836 | OSThread* os_thread = OSThread::Current(); |
| 1837 | ASSERT(os_thread != nullptr); |
| 1838 | processor_thread_id_ = OSThread::GetCurrentThreadJoinId(thread: os_thread); |
| 1839 | thread_running_ = true; |
| 1840 | startup_ml.Notify(); |
| 1841 | } |
| 1842 | |
| 1843 | SampleBlockProcessorVisitor visitor; |
| 1844 | MonitorLocker wait_ml(monitor_); |
| 1845 | // Wakeup every 100ms. |
| 1846 | const int64_t wakeup_interval = 1000 * 100; |
| 1847 | while (true) { |
| 1848 | wait_ml.WaitMicros(micros: wakeup_interval); |
| 1849 | if (shutdown_) { |
| 1850 | break; |
| 1851 | } |
| 1852 | Isolate::VisitIsolates(visitor: &visitor); |
| 1853 | } |
| 1854 | // Signal to main thread we are exiting. |
| 1855 | thread_running_ = false; |
| 1856 | } |
| 1857 | |
| 1858 | #endif // !PRODUCT |
| 1859 | |
| 1860 | } // namespace dart |
| 1861 | |