| 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * trace_hwlat.c - A simple Hardware Latency detector. |
| 4 | * |
| 5 | * Use this tracer to detect large system latencies induced by the behavior of |
| 6 | * certain underlying system hardware or firmware, independent of Linux itself. |
| 7 | * The code was developed originally to detect the presence of SMIs on Intel |
| 8 | * and AMD systems, although there is no dependency upon x86 herein. |
| 9 | * |
| 10 | * The classical example usage of this tracer is in detecting the presence of |
| 11 | * SMIs or System Management Interrupts on Intel and AMD systems. An SMI is a |
| 12 | * somewhat special form of hardware interrupt spawned from earlier CPU debug |
| 13 | * modes in which the (BIOS/EFI/etc.) firmware arranges for the South Bridge |
| 14 | * LPC (or other device) to generate a special interrupt under certain |
| 15 | * circumstances, for example, upon expiration of a special SMI timer device, |
| 16 | * due to certain external thermal readings, on certain I/O address accesses, |
| 17 | * and other situations. An SMI hits a special CPU pin, triggers a special |
| 18 | * SMI mode (complete with special memory map), and the OS is unaware. |
| 19 | * |
| 20 | * Although certain hardware-inducing latencies are necessary (for example, |
| 21 | * a modern system often requires an SMI handler for correct thermal control |
| 22 | * and remote management) they can wreak havoc upon any OS-level performance |
| 23 | * guarantees toward low-latency, especially when the OS is not even made |
| 24 | * aware of the presence of these interrupts. For this reason, we need a |
| 25 | * somewhat brute force mechanism to detect these interrupts. In this case, |
| 26 | * we do it by hogging all of the CPU(s) for configurable timer intervals, |
| 27 | * sampling the built-in CPU timer, looking for discontiguous readings. |
| 28 | * |
| 29 | * WARNING: This implementation necessarily introduces latencies. Therefore, |
| 30 | * you should NEVER use this tracer while running in a production |
| 31 | * environment requiring any kind of low-latency performance |
| 32 | * guarantee(s). |
| 33 | * |
| 34 | * Copyright (C) 2008-2009 Jon Masters, Red Hat, Inc. <jcm@redhat.com> |
| 35 | * Copyright (C) 2013-2016 Steven Rostedt, Red Hat, Inc. <srostedt@redhat.com> |
| 36 | * |
| 37 | * Includes useful feedback from Clark Williams <williams@redhat.com> |
| 38 | * |
| 39 | */ |
| 40 | #include <linux/kthread.h> |
| 41 | #include <linux/tracefs.h> |
| 42 | #include <linux/uaccess.h> |
| 43 | #include <linux/cpumask.h> |
| 44 | #include <linux/delay.h> |
| 45 | #include <linux/sched/clock.h> |
| 46 | #include "trace.h" |
| 47 | |
| 48 | static struct trace_array *hwlat_trace; |
| 49 | |
| 50 | #define U64STR_SIZE 22 /* 20 digits max */ |
| 51 | |
| 52 | #define BANNER "hwlat_detector: " |
| 53 | #define DEFAULT_SAMPLE_WINDOW 1000000 /* 1s */ |
| 54 | #define DEFAULT_SAMPLE_WIDTH 500000 /* 0.5s */ |
| 55 | #define DEFAULT_LAT_THRESHOLD 10 /* 10us */ |
| 56 | |
| 57 | static struct dentry *hwlat_sample_width; /* sample width us */ |
| 58 | static struct dentry *hwlat_sample_window; /* sample window us */ |
| 59 | static struct dentry *hwlat_thread_mode; /* hwlat thread mode */ |
| 60 | |
| 61 | enum { |
| 62 | MODE_NONE = 0, |
| 63 | MODE_ROUND_ROBIN, |
| 64 | MODE_PER_CPU, |
| 65 | MODE_MAX |
| 66 | }; |
| 67 | static char *thread_mode_str[] = { "none" , "round-robin" , "per-cpu" }; |
| 68 | |
| 69 | /* Save the previous tracing_thresh value */ |
| 70 | static unsigned long save_tracing_thresh; |
| 71 | |
| 72 | /* runtime kthread data */ |
| 73 | struct hwlat_kthread_data { |
| 74 | struct task_struct *kthread; |
| 75 | /* NMI timestamp counters */ |
| 76 | u64 nmi_ts_start; |
| 77 | u64 nmi_total_ts; |
| 78 | int nmi_count; |
| 79 | int nmi_cpu; |
| 80 | }; |
| 81 | |
| 82 | static struct hwlat_kthread_data hwlat_single_cpu_data; |
| 83 | static DEFINE_PER_CPU(struct hwlat_kthread_data, hwlat_per_cpu_data); |
| 84 | |
| 85 | /* Tells NMIs to call back to the hwlat tracer to record timestamps */ |
| 86 | bool trace_hwlat_callback_enabled; |
| 87 | |
| 88 | /* If the user changed threshold, remember it */ |
| 89 | static u64 last_tracing_thresh = DEFAULT_LAT_THRESHOLD * NSEC_PER_USEC; |
| 90 | |
| 91 | /* Individual latency samples are stored here when detected. */ |
| 92 | struct hwlat_sample { |
| 93 | u64 seqnum; /* unique sequence */ |
| 94 | u64 duration; /* delta */ |
| 95 | u64 outer_duration; /* delta (outer loop) */ |
| 96 | u64 nmi_total_ts; /* Total time spent in NMIs */ |
| 97 | struct timespec64 timestamp; /* wall time */ |
| 98 | int nmi_count; /* # NMIs during this sample */ |
| 99 | int count; /* # of iterations over thresh */ |
| 100 | }; |
| 101 | |
| 102 | /* keep the global state somewhere. */ |
| 103 | static struct hwlat_data { |
| 104 | |
| 105 | struct mutex lock; /* protect changes */ |
| 106 | |
| 107 | u64 count; /* total since reset */ |
| 108 | |
| 109 | u64 sample_window; /* total sampling window (on+off) */ |
| 110 | u64 sample_width; /* active sampling portion of window */ |
| 111 | |
| 112 | int thread_mode; /* thread mode */ |
| 113 | |
| 114 | } hwlat_data = { |
| 115 | .sample_window = DEFAULT_SAMPLE_WINDOW, |
| 116 | .sample_width = DEFAULT_SAMPLE_WIDTH, |
| 117 | .thread_mode = MODE_ROUND_ROBIN |
| 118 | }; |
| 119 | |
| 120 | static struct hwlat_kthread_data *get_cpu_data(void) |
| 121 | { |
| 122 | if (hwlat_data.thread_mode == MODE_PER_CPU) |
| 123 | return this_cpu_ptr(&hwlat_per_cpu_data); |
| 124 | else |
| 125 | return &hwlat_single_cpu_data; |
| 126 | } |
| 127 | |
| 128 | static bool hwlat_busy; |
| 129 | |
| 130 | static void trace_hwlat_sample(struct hwlat_sample *sample) |
| 131 | { |
| 132 | struct trace_array *tr = hwlat_trace; |
| 133 | struct trace_buffer *buffer = tr->array_buffer.buffer; |
| 134 | struct ring_buffer_event *event; |
| 135 | struct hwlat_entry *entry; |
| 136 | |
| 137 | event = trace_buffer_lock_reserve(buffer, type: TRACE_HWLAT, len: sizeof(*entry), |
| 138 | trace_ctx: tracing_gen_ctx()); |
| 139 | if (!event) |
| 140 | return; |
| 141 | entry = ring_buffer_event_data(event); |
| 142 | entry->seqnum = sample->seqnum; |
| 143 | entry->duration = sample->duration; |
| 144 | entry->outer_duration = sample->outer_duration; |
| 145 | entry->timestamp = sample->timestamp; |
| 146 | entry->nmi_total_ts = sample->nmi_total_ts; |
| 147 | entry->nmi_count = sample->nmi_count; |
| 148 | entry->count = sample->count; |
| 149 | |
| 150 | trace_buffer_unlock_commit_nostack(buffer, event); |
| 151 | } |
| 152 | |
| 153 | /* Macros to encapsulate the time capturing infrastructure */ |
| 154 | #define time_type u64 |
| 155 | #define time_get() trace_clock_local() |
| 156 | #define time_to_us(x) div_u64(x, 1000) |
| 157 | #define time_sub(a, b) ((a) - (b)) |
| 158 | #define init_time(a, b) (a = b) |
| 159 | #define time_u64(a) a |
| 160 | |
| 161 | void trace_hwlat_callback(bool enter) |
| 162 | { |
| 163 | struct hwlat_kthread_data *kdata = get_cpu_data(); |
| 164 | |
| 165 | if (!kdata->kthread) |
| 166 | return; |
| 167 | |
| 168 | /* |
| 169 | * Currently trace_clock_local() calls sched_clock() and the |
| 170 | * generic version is not NMI safe. |
| 171 | */ |
| 172 | if (!IS_ENABLED(CONFIG_GENERIC_SCHED_CLOCK)) { |
| 173 | if (enter) |
| 174 | kdata->nmi_ts_start = time_get(); |
| 175 | else |
| 176 | kdata->nmi_total_ts += time_get() - kdata->nmi_ts_start; |
| 177 | } |
| 178 | |
| 179 | if (enter) |
| 180 | kdata->nmi_count++; |
| 181 | } |
| 182 | |
| 183 | /* |
| 184 | * hwlat_err - report a hwlat error. |
| 185 | */ |
| 186 | #define hwlat_err(msg) ({ \ |
| 187 | struct trace_array *tr = hwlat_trace; \ |
| 188 | \ |
| 189 | trace_array_printk_buf(tr->array_buffer.buffer, _THIS_IP_, msg); \ |
| 190 | }) |
| 191 | |
| 192 | /** |
| 193 | * get_sample - sample the CPU TSC and look for likely hardware latencies |
| 194 | * |
| 195 | * Used to repeatedly capture the CPU TSC (or similar), looking for potential |
| 196 | * hardware-induced latency. Called with interrupts disabled and with |
| 197 | * hwlat_data.lock held. |
| 198 | */ |
| 199 | static int get_sample(void) |
| 200 | { |
| 201 | struct hwlat_kthread_data *kdata = get_cpu_data(); |
| 202 | struct trace_array *tr = hwlat_trace; |
| 203 | struct hwlat_sample s; |
| 204 | time_type start, t1, t2, last_t2; |
| 205 | s64 diff, outer_diff, total, last_total = 0; |
| 206 | u64 sample = 0; |
| 207 | u64 thresh = tracing_thresh; |
| 208 | u64 outer_sample = 0; |
| 209 | int ret = -1; |
| 210 | unsigned int count = 0; |
| 211 | |
| 212 | do_div(thresh, NSEC_PER_USEC); /* modifies interval value */ |
| 213 | |
| 214 | kdata->nmi_total_ts = 0; |
| 215 | kdata->nmi_count = 0; |
| 216 | /* Make sure NMIs see this first */ |
| 217 | barrier(); |
| 218 | |
| 219 | trace_hwlat_callback_enabled = true; |
| 220 | |
| 221 | init_time(last_t2, 0); |
| 222 | start = time_get(); /* start timestamp */ |
| 223 | outer_diff = 0; |
| 224 | |
| 225 | do { |
| 226 | |
| 227 | t1 = time_get(); /* we'll look for a discontinuity */ |
| 228 | t2 = time_get(); |
| 229 | |
| 230 | if (time_u64(last_t2)) { |
| 231 | /* Check the delta from outer loop (t2 to next t1) */ |
| 232 | outer_diff = time_to_us(time_sub(t1, last_t2)); |
| 233 | /* This shouldn't happen */ |
| 234 | if (outer_diff < 0) { |
| 235 | hwlat_err(BANNER "time running backwards\n" ); |
| 236 | goto out; |
| 237 | } |
| 238 | if (outer_diff > outer_sample) |
| 239 | outer_sample = outer_diff; |
| 240 | } |
| 241 | last_t2 = t2; |
| 242 | |
| 243 | total = time_to_us(time_sub(t2, start)); /* sample width */ |
| 244 | |
| 245 | /* Check for possible overflows */ |
| 246 | if (total < last_total) { |
| 247 | hwlat_err("Time total overflowed\n" ); |
| 248 | break; |
| 249 | } |
| 250 | last_total = total; |
| 251 | |
| 252 | /* This checks the inner loop (t1 to t2) */ |
| 253 | diff = time_to_us(time_sub(t2, t1)); /* current diff */ |
| 254 | |
| 255 | if (diff > thresh || outer_diff > thresh) { |
| 256 | if (!count) |
| 257 | ktime_get_real_ts64(tv: &s.timestamp); |
| 258 | count++; |
| 259 | } |
| 260 | |
| 261 | /* This shouldn't happen */ |
| 262 | if (diff < 0) { |
| 263 | hwlat_err(BANNER "time running backwards\n" ); |
| 264 | goto out; |
| 265 | } |
| 266 | |
| 267 | if (diff > sample) |
| 268 | sample = diff; /* only want highest value */ |
| 269 | |
| 270 | } while (total <= hwlat_data.sample_width); |
| 271 | |
| 272 | barrier(); /* finish the above in the view for NMIs */ |
| 273 | trace_hwlat_callback_enabled = false; |
| 274 | barrier(); /* Make sure nmi_total_ts is no longer updated */ |
| 275 | |
| 276 | ret = 0; |
| 277 | |
| 278 | /* If we exceed the threshold value, we have found a hardware latency */ |
| 279 | if (sample > thresh || outer_sample > thresh) { |
| 280 | u64 latency; |
| 281 | |
| 282 | ret = 1; |
| 283 | |
| 284 | /* We read in microseconds */ |
| 285 | if (kdata->nmi_total_ts) |
| 286 | do_div(kdata->nmi_total_ts, NSEC_PER_USEC); |
| 287 | |
| 288 | hwlat_data.count++; |
| 289 | s.seqnum = hwlat_data.count; |
| 290 | s.duration = sample; |
| 291 | s.outer_duration = outer_sample; |
| 292 | s.nmi_total_ts = kdata->nmi_total_ts; |
| 293 | s.nmi_count = kdata->nmi_count; |
| 294 | s.count = count; |
| 295 | trace_hwlat_sample(sample: &s); |
| 296 | |
| 297 | latency = max(sample, outer_sample); |
| 298 | |
| 299 | /* Keep a running maximum ever recorded hardware latency */ |
| 300 | if (latency > tr->max_latency) { |
| 301 | tr->max_latency = latency; |
| 302 | latency_fsnotify(tr); |
| 303 | } |
| 304 | } |
| 305 | |
| 306 | out: |
| 307 | return ret; |
| 308 | } |
| 309 | |
| 310 | static struct cpumask save_cpumask; |
| 311 | |
| 312 | static void move_to_next_cpu(void) |
| 313 | { |
| 314 | struct cpumask *current_mask = &save_cpumask; |
| 315 | struct trace_array *tr = hwlat_trace; |
| 316 | int next_cpu; |
| 317 | |
| 318 | /* |
| 319 | * If for some reason the user modifies the CPU affinity |
| 320 | * of this thread, then stop migrating for the duration |
| 321 | * of the current test. |
| 322 | */ |
| 323 | if (!cpumask_equal(src1p: current_mask, current->cpus_ptr)) |
| 324 | goto change_mode; |
| 325 | |
| 326 | cpus_read_lock(); |
| 327 | cpumask_and(dstp: current_mask, cpu_online_mask, src2p: tr->tracing_cpumask); |
| 328 | next_cpu = cpumask_next_wrap(raw_smp_processor_id(), src: current_mask); |
| 329 | cpus_read_unlock(); |
| 330 | |
| 331 | if (next_cpu >= nr_cpu_ids) /* Shouldn't happen! */ |
| 332 | goto change_mode; |
| 333 | |
| 334 | cpumask_clear(dstp: current_mask); |
| 335 | cpumask_set_cpu(cpu: next_cpu, dstp: current_mask); |
| 336 | |
| 337 | set_cpus_allowed_ptr(current, new_mask: current_mask); |
| 338 | return; |
| 339 | |
| 340 | change_mode: |
| 341 | hwlat_data.thread_mode = MODE_NONE; |
| 342 | pr_info(BANNER "cpumask changed while in round-robin mode, switching to mode none\n" ); |
| 343 | } |
| 344 | |
| 345 | /* |
| 346 | * kthread_fn - The CPU time sampling/hardware latency detection kernel thread |
| 347 | * |
| 348 | * Used to periodically sample the CPU TSC via a call to get_sample. We |
| 349 | * disable interrupts, which does (intentionally) introduce latency since we |
| 350 | * need to ensure nothing else might be running (and thus preempting). |
| 351 | * Obviously this should never be used in production environments. |
| 352 | * |
| 353 | * Executes one loop interaction on each CPU in tracing_cpumask sysfs file. |
| 354 | */ |
| 355 | static int kthread_fn(void *data) |
| 356 | { |
| 357 | u64 interval; |
| 358 | |
| 359 | while (!kthread_should_stop()) { |
| 360 | |
| 361 | if (hwlat_data.thread_mode == MODE_ROUND_ROBIN) |
| 362 | move_to_next_cpu(); |
| 363 | |
| 364 | local_irq_disable(); |
| 365 | get_sample(); |
| 366 | local_irq_enable(); |
| 367 | |
| 368 | mutex_lock(&hwlat_data.lock); |
| 369 | interval = hwlat_data.sample_window - hwlat_data.sample_width; |
| 370 | mutex_unlock(lock: &hwlat_data.lock); |
| 371 | |
| 372 | do_div(interval, USEC_PER_MSEC); /* modifies interval value */ |
| 373 | |
| 374 | /* Always sleep for at least 1ms */ |
| 375 | if (interval < 1) |
| 376 | interval = 1; |
| 377 | |
| 378 | if (msleep_interruptible(msecs: interval)) |
| 379 | break; |
| 380 | } |
| 381 | |
| 382 | return 0; |
| 383 | } |
| 384 | |
| 385 | /* |
| 386 | * stop_stop_kthread - Inform the hardware latency sampling/detector kthread to stop |
| 387 | * |
| 388 | * This kicks the running hardware latency sampling/detector kernel thread and |
| 389 | * tells it to stop sampling now. Use this on unload and at system shutdown. |
| 390 | */ |
| 391 | static void stop_single_kthread(void) |
| 392 | { |
| 393 | struct hwlat_kthread_data *kdata = get_cpu_data(); |
| 394 | struct task_struct *kthread; |
| 395 | |
| 396 | cpus_read_lock(); |
| 397 | kthread = kdata->kthread; |
| 398 | |
| 399 | if (!kthread) |
| 400 | goto out_put_cpus; |
| 401 | |
| 402 | kthread_stop(k: kthread); |
| 403 | kdata->kthread = NULL; |
| 404 | |
| 405 | out_put_cpus: |
| 406 | cpus_read_unlock(); |
| 407 | } |
| 408 | |
| 409 | |
| 410 | /* |
| 411 | * start_single_kthread - Kick off the hardware latency sampling/detector kthread |
| 412 | * |
| 413 | * This starts the kernel thread that will sit and sample the CPU timestamp |
| 414 | * counter (TSC or similar) and look for potential hardware latencies. |
| 415 | */ |
| 416 | static int start_single_kthread(struct trace_array *tr) |
| 417 | { |
| 418 | struct hwlat_kthread_data *kdata = get_cpu_data(); |
| 419 | struct cpumask *current_mask = &save_cpumask; |
| 420 | struct task_struct *kthread; |
| 421 | int next_cpu; |
| 422 | |
| 423 | cpus_read_lock(); |
| 424 | if (kdata->kthread) |
| 425 | goto out_put_cpus; |
| 426 | |
| 427 | kthread = kthread_create(kthread_fn, NULL, "hwlatd" ); |
| 428 | if (IS_ERR(ptr: kthread)) { |
| 429 | pr_err(BANNER "could not start sampling thread\n" ); |
| 430 | cpus_read_unlock(); |
| 431 | return -ENOMEM; |
| 432 | } |
| 433 | |
| 434 | /* Just pick the first CPU on first iteration */ |
| 435 | cpumask_and(dstp: current_mask, cpu_online_mask, src2p: tr->tracing_cpumask); |
| 436 | |
| 437 | if (hwlat_data.thread_mode == MODE_ROUND_ROBIN) { |
| 438 | next_cpu = cpumask_first(srcp: current_mask); |
| 439 | cpumask_clear(dstp: current_mask); |
| 440 | cpumask_set_cpu(cpu: next_cpu, dstp: current_mask); |
| 441 | |
| 442 | } |
| 443 | |
| 444 | set_cpus_allowed_ptr(p: kthread, new_mask: current_mask); |
| 445 | |
| 446 | kdata->kthread = kthread; |
| 447 | wake_up_process(tsk: kthread); |
| 448 | |
| 449 | out_put_cpus: |
| 450 | cpus_read_unlock(); |
| 451 | return 0; |
| 452 | } |
| 453 | |
| 454 | /* |
| 455 | * stop_cpu_kthread - Stop a hwlat cpu kthread |
| 456 | */ |
| 457 | static void stop_cpu_kthread(unsigned int cpu) |
| 458 | { |
| 459 | struct task_struct *kthread; |
| 460 | |
| 461 | kthread = per_cpu(hwlat_per_cpu_data, cpu).kthread; |
| 462 | if (kthread) |
| 463 | kthread_stop(k: kthread); |
| 464 | per_cpu(hwlat_per_cpu_data, cpu).kthread = NULL; |
| 465 | } |
| 466 | |
| 467 | /* |
| 468 | * stop_per_cpu_kthreads - Inform the hardware latency sampling/detector kthread to stop |
| 469 | * |
| 470 | * This kicks the running hardware latency sampling/detector kernel threads and |
| 471 | * tells it to stop sampling now. Use this on unload and at system shutdown. |
| 472 | */ |
| 473 | static void stop_per_cpu_kthreads(void) |
| 474 | { |
| 475 | unsigned int cpu; |
| 476 | |
| 477 | cpus_read_lock(); |
| 478 | for_each_online_cpu(cpu) |
| 479 | stop_cpu_kthread(cpu); |
| 480 | cpus_read_unlock(); |
| 481 | } |
| 482 | |
| 483 | /* |
| 484 | * start_cpu_kthread - Start a hwlat cpu kthread |
| 485 | */ |
| 486 | static int start_cpu_kthread(unsigned int cpu) |
| 487 | { |
| 488 | struct task_struct *kthread; |
| 489 | |
| 490 | /* Do not start a new hwlatd thread if it is already running */ |
| 491 | if (per_cpu(hwlat_per_cpu_data, cpu).kthread) |
| 492 | return 0; |
| 493 | |
| 494 | kthread = kthread_run_on_cpu(threadfn: kthread_fn, NULL, cpu, namefmt: "hwlatd/%u" ); |
| 495 | if (IS_ERR(ptr: kthread)) { |
| 496 | pr_err(BANNER "could not start sampling thread\n" ); |
| 497 | return -ENOMEM; |
| 498 | } |
| 499 | |
| 500 | per_cpu(hwlat_per_cpu_data, cpu).kthread = kthread; |
| 501 | |
| 502 | return 0; |
| 503 | } |
| 504 | |
| 505 | #ifdef CONFIG_HOTPLUG_CPU |
| 506 | static void hwlat_hotplug_workfn(struct work_struct *dummy) |
| 507 | { |
| 508 | struct trace_array *tr = hwlat_trace; |
| 509 | unsigned int cpu = smp_processor_id(); |
| 510 | |
| 511 | mutex_lock(&trace_types_lock); |
| 512 | mutex_lock(&hwlat_data.lock); |
| 513 | cpus_read_lock(); |
| 514 | |
| 515 | if (!hwlat_busy || hwlat_data.thread_mode != MODE_PER_CPU) |
| 516 | goto out_unlock; |
| 517 | |
| 518 | if (!cpu_online(cpu)) |
| 519 | goto out_unlock; |
| 520 | if (!cpumask_test_cpu(cpu, cpumask: tr->tracing_cpumask)) |
| 521 | goto out_unlock; |
| 522 | |
| 523 | start_cpu_kthread(cpu); |
| 524 | |
| 525 | out_unlock: |
| 526 | cpus_read_unlock(); |
| 527 | mutex_unlock(lock: &hwlat_data.lock); |
| 528 | mutex_unlock(lock: &trace_types_lock); |
| 529 | } |
| 530 | |
| 531 | static DECLARE_WORK(hwlat_hotplug_work, hwlat_hotplug_workfn); |
| 532 | |
| 533 | /* |
| 534 | * hwlat_cpu_init - CPU hotplug online callback function |
| 535 | */ |
| 536 | static int hwlat_cpu_init(unsigned int cpu) |
| 537 | { |
| 538 | schedule_work_on(cpu, work: &hwlat_hotplug_work); |
| 539 | return 0; |
| 540 | } |
| 541 | |
| 542 | /* |
| 543 | * hwlat_cpu_die - CPU hotplug offline callback function |
| 544 | */ |
| 545 | static int hwlat_cpu_die(unsigned int cpu) |
| 546 | { |
| 547 | stop_cpu_kthread(cpu); |
| 548 | return 0; |
| 549 | } |
| 550 | |
| 551 | static void hwlat_init_hotplug_support(void) |
| 552 | { |
| 553 | int ret; |
| 554 | |
| 555 | ret = cpuhp_setup_state(state: CPUHP_AP_ONLINE_DYN, name: "trace/hwlat:online" , |
| 556 | startup: hwlat_cpu_init, teardown: hwlat_cpu_die); |
| 557 | if (ret < 0) |
| 558 | pr_warn(BANNER "Error to init cpu hotplug support\n" ); |
| 559 | |
| 560 | return; |
| 561 | } |
| 562 | #else /* CONFIG_HOTPLUG_CPU */ |
| 563 | static void hwlat_init_hotplug_support(void) |
| 564 | { |
| 565 | return; |
| 566 | } |
| 567 | #endif /* CONFIG_HOTPLUG_CPU */ |
| 568 | |
| 569 | /* |
| 570 | * start_per_cpu_kthreads - Kick off the hardware latency sampling/detector kthreads |
| 571 | * |
| 572 | * This starts the kernel threads that will sit on potentially all cpus and |
| 573 | * sample the CPU timestamp counter (TSC or similar) and look for potential |
| 574 | * hardware latencies. |
| 575 | */ |
| 576 | static int start_per_cpu_kthreads(struct trace_array *tr) |
| 577 | { |
| 578 | struct cpumask *current_mask = &save_cpumask; |
| 579 | unsigned int cpu; |
| 580 | int retval; |
| 581 | |
| 582 | cpus_read_lock(); |
| 583 | /* |
| 584 | * Run only on CPUs in which hwlat is allowed to run. |
| 585 | */ |
| 586 | cpumask_and(dstp: current_mask, cpu_online_mask, src2p: tr->tracing_cpumask); |
| 587 | |
| 588 | for_each_cpu(cpu, current_mask) { |
| 589 | retval = start_cpu_kthread(cpu); |
| 590 | if (retval) |
| 591 | goto out_error; |
| 592 | } |
| 593 | cpus_read_unlock(); |
| 594 | |
| 595 | return 0; |
| 596 | |
| 597 | out_error: |
| 598 | cpus_read_unlock(); |
| 599 | stop_per_cpu_kthreads(); |
| 600 | return retval; |
| 601 | } |
| 602 | |
| 603 | static void *s_mode_start(struct seq_file *s, loff_t *pos) |
| 604 | { |
| 605 | int mode = *pos; |
| 606 | |
| 607 | mutex_lock(&hwlat_data.lock); |
| 608 | |
| 609 | if (mode >= MODE_MAX) |
| 610 | return NULL; |
| 611 | |
| 612 | return pos; |
| 613 | } |
| 614 | |
| 615 | static void *s_mode_next(struct seq_file *s, void *v, loff_t *pos) |
| 616 | { |
| 617 | int mode = ++(*pos); |
| 618 | |
| 619 | if (mode >= MODE_MAX) |
| 620 | return NULL; |
| 621 | |
| 622 | return pos; |
| 623 | } |
| 624 | |
| 625 | static int s_mode_show(struct seq_file *s, void *v) |
| 626 | { |
| 627 | loff_t *pos = v; |
| 628 | int mode = *pos; |
| 629 | |
| 630 | if (mode == hwlat_data.thread_mode) |
| 631 | seq_printf(m: s, fmt: "[%s]" , thread_mode_str[mode]); |
| 632 | else |
| 633 | seq_printf(m: s, fmt: "%s" , thread_mode_str[mode]); |
| 634 | |
| 635 | if (mode < MODE_MAX - 1) /* if mode is any but last */ |
| 636 | seq_puts(m: s, s: " " ); |
| 637 | |
| 638 | return 0; |
| 639 | } |
| 640 | |
| 641 | static void s_mode_stop(struct seq_file *s, void *v) |
| 642 | { |
| 643 | seq_puts(m: s, s: "\n" ); |
| 644 | mutex_unlock(lock: &hwlat_data.lock); |
| 645 | } |
| 646 | |
| 647 | static const struct seq_operations thread_mode_seq_ops = { |
| 648 | .start = s_mode_start, |
| 649 | .next = s_mode_next, |
| 650 | .show = s_mode_show, |
| 651 | .stop = s_mode_stop |
| 652 | }; |
| 653 | |
| 654 | static int hwlat_mode_open(struct inode *inode, struct file *file) |
| 655 | { |
| 656 | return seq_open(file, &thread_mode_seq_ops); |
| 657 | }; |
| 658 | |
| 659 | static void hwlat_tracer_start(struct trace_array *tr); |
| 660 | static void hwlat_tracer_stop(struct trace_array *tr); |
| 661 | |
| 662 | /** |
| 663 | * hwlat_mode_write - Write function for "mode" entry |
| 664 | * @filp: The active open file structure |
| 665 | * @ubuf: The user buffer that contains the value to write |
| 666 | * @cnt: The maximum number of bytes to write to "file" |
| 667 | * @ppos: The current position in @file |
| 668 | * |
| 669 | * This function provides a write implementation for the "mode" interface |
| 670 | * to the hardware latency detector. hwlatd has different operation modes. |
| 671 | * The "none" sets the allowed cpumask for a single hwlatd thread at the |
| 672 | * startup and lets the scheduler handle the migration. The default mode is |
| 673 | * the "round-robin" one, in which a single hwlatd thread runs, migrating |
| 674 | * among the allowed CPUs in a round-robin fashion. The "per-cpu" mode |
| 675 | * creates one hwlatd thread per allowed CPU. |
| 676 | */ |
| 677 | static ssize_t hwlat_mode_write(struct file *filp, const char __user *ubuf, |
| 678 | size_t cnt, loff_t *ppos) |
| 679 | { |
| 680 | struct trace_array *tr = hwlat_trace; |
| 681 | const char *mode; |
| 682 | char buf[64]; |
| 683 | int ret, i; |
| 684 | |
| 685 | if (cnt >= sizeof(buf)) |
| 686 | return -EINVAL; |
| 687 | |
| 688 | if (copy_from_user(to: buf, from: ubuf, n: cnt)) |
| 689 | return -EFAULT; |
| 690 | |
| 691 | buf[cnt] = 0; |
| 692 | |
| 693 | mode = strstrip(str: buf); |
| 694 | |
| 695 | ret = -EINVAL; |
| 696 | |
| 697 | /* |
| 698 | * trace_types_lock is taken to avoid concurrency on start/stop |
| 699 | * and hwlat_busy. |
| 700 | */ |
| 701 | mutex_lock(&trace_types_lock); |
| 702 | if (hwlat_busy) |
| 703 | hwlat_tracer_stop(tr); |
| 704 | |
| 705 | mutex_lock(&hwlat_data.lock); |
| 706 | |
| 707 | for (i = 0; i < MODE_MAX; i++) { |
| 708 | if (strcmp(mode, thread_mode_str[i]) == 0) { |
| 709 | hwlat_data.thread_mode = i; |
| 710 | ret = cnt; |
| 711 | } |
| 712 | } |
| 713 | |
| 714 | mutex_unlock(lock: &hwlat_data.lock); |
| 715 | |
| 716 | if (hwlat_busy) |
| 717 | hwlat_tracer_start(tr); |
| 718 | mutex_unlock(lock: &trace_types_lock); |
| 719 | |
| 720 | *ppos += cnt; |
| 721 | |
| 722 | |
| 723 | |
| 724 | return ret; |
| 725 | } |
| 726 | |
| 727 | /* |
| 728 | * The width parameter is read/write using the generic trace_min_max_param |
| 729 | * method. The *val is protected by the hwlat_data lock and is upper |
| 730 | * bounded by the window parameter. |
| 731 | */ |
| 732 | static struct trace_min_max_param hwlat_width = { |
| 733 | .lock = &hwlat_data.lock, |
| 734 | .val = &hwlat_data.sample_width, |
| 735 | .max = &hwlat_data.sample_window, |
| 736 | .min = NULL, |
| 737 | }; |
| 738 | |
| 739 | /* |
| 740 | * The window parameter is read/write using the generic trace_min_max_param |
| 741 | * method. The *val is protected by the hwlat_data lock and is lower |
| 742 | * bounded by the width parameter. |
| 743 | */ |
| 744 | static struct trace_min_max_param hwlat_window = { |
| 745 | .lock = &hwlat_data.lock, |
| 746 | .val = &hwlat_data.sample_window, |
| 747 | .max = NULL, |
| 748 | .min = &hwlat_data.sample_width, |
| 749 | }; |
| 750 | |
| 751 | static const struct file_operations thread_mode_fops = { |
| 752 | .open = hwlat_mode_open, |
| 753 | .read = seq_read, |
| 754 | .llseek = seq_lseek, |
| 755 | .release = seq_release, |
| 756 | .write = hwlat_mode_write |
| 757 | }; |
| 758 | /** |
| 759 | * init_tracefs - A function to initialize the tracefs interface files |
| 760 | * |
| 761 | * This function creates entries in tracefs for "hwlat_detector". |
| 762 | * It creates the hwlat_detector directory in the tracing directory, |
| 763 | * and within that directory is the count, width and window files to |
| 764 | * change and view those values. |
| 765 | */ |
| 766 | static int init_tracefs(void) |
| 767 | { |
| 768 | int ret; |
| 769 | struct dentry *top_dir; |
| 770 | |
| 771 | ret = tracing_init_dentry(); |
| 772 | if (ret) |
| 773 | return -ENOMEM; |
| 774 | |
| 775 | top_dir = tracefs_create_dir(name: "hwlat_detector" , NULL); |
| 776 | if (!top_dir) |
| 777 | return -ENOMEM; |
| 778 | |
| 779 | hwlat_sample_window = tracefs_create_file(name: "window" , TRACE_MODE_WRITE, |
| 780 | parent: top_dir, |
| 781 | data: &hwlat_window, |
| 782 | fops: &trace_min_max_fops); |
| 783 | if (!hwlat_sample_window) |
| 784 | goto err; |
| 785 | |
| 786 | hwlat_sample_width = tracefs_create_file(name: "width" , TRACE_MODE_WRITE, |
| 787 | parent: top_dir, |
| 788 | data: &hwlat_width, |
| 789 | fops: &trace_min_max_fops); |
| 790 | if (!hwlat_sample_width) |
| 791 | goto err; |
| 792 | |
| 793 | hwlat_thread_mode = trace_create_file(name: "mode" , TRACE_MODE_WRITE, |
| 794 | parent: top_dir, |
| 795 | NULL, |
| 796 | fops: &thread_mode_fops); |
| 797 | if (!hwlat_thread_mode) |
| 798 | goto err; |
| 799 | |
| 800 | return 0; |
| 801 | |
| 802 | err: |
| 803 | tracefs_remove(dentry: top_dir); |
| 804 | return -ENOMEM; |
| 805 | } |
| 806 | |
| 807 | static void hwlat_tracer_start(struct trace_array *tr) |
| 808 | { |
| 809 | int err; |
| 810 | |
| 811 | if (hwlat_data.thread_mode == MODE_PER_CPU) |
| 812 | err = start_per_cpu_kthreads(tr); |
| 813 | else |
| 814 | err = start_single_kthread(tr); |
| 815 | if (err) |
| 816 | pr_err(BANNER "Cannot start hwlat kthread\n" ); |
| 817 | } |
| 818 | |
| 819 | static void hwlat_tracer_stop(struct trace_array *tr) |
| 820 | { |
| 821 | if (hwlat_data.thread_mode == MODE_PER_CPU) |
| 822 | stop_per_cpu_kthreads(); |
| 823 | else |
| 824 | stop_single_kthread(); |
| 825 | } |
| 826 | |
| 827 | static int hwlat_tracer_init(struct trace_array *tr) |
| 828 | { |
| 829 | /* Only allow one instance to enable this */ |
| 830 | if (hwlat_busy) |
| 831 | return -EBUSY; |
| 832 | |
| 833 | hwlat_trace = tr; |
| 834 | |
| 835 | hwlat_data.count = 0; |
| 836 | tr->max_latency = 0; |
| 837 | save_tracing_thresh = tracing_thresh; |
| 838 | |
| 839 | /* tracing_thresh is in nsecs, we speak in usecs */ |
| 840 | if (!tracing_thresh) |
| 841 | tracing_thresh = last_tracing_thresh; |
| 842 | |
| 843 | if (tracer_tracing_is_on(tr)) |
| 844 | hwlat_tracer_start(tr); |
| 845 | |
| 846 | hwlat_busy = true; |
| 847 | |
| 848 | return 0; |
| 849 | } |
| 850 | |
| 851 | static void hwlat_tracer_reset(struct trace_array *tr) |
| 852 | { |
| 853 | hwlat_tracer_stop(tr); |
| 854 | |
| 855 | /* the tracing threshold is static between runs */ |
| 856 | last_tracing_thresh = tracing_thresh; |
| 857 | |
| 858 | tracing_thresh = save_tracing_thresh; |
| 859 | hwlat_busy = false; |
| 860 | } |
| 861 | |
| 862 | static struct tracer hwlat_tracer __read_mostly = |
| 863 | { |
| 864 | .name = "hwlat" , |
| 865 | .init = hwlat_tracer_init, |
| 866 | .reset = hwlat_tracer_reset, |
| 867 | .start = hwlat_tracer_start, |
| 868 | .stop = hwlat_tracer_stop, |
| 869 | .allow_instances = true, |
| 870 | }; |
| 871 | |
| 872 | __init static int init_hwlat_tracer(void) |
| 873 | { |
| 874 | int ret; |
| 875 | |
| 876 | mutex_init(&hwlat_data.lock); |
| 877 | |
| 878 | ret = register_tracer(type: &hwlat_tracer); |
| 879 | if (ret) |
| 880 | return ret; |
| 881 | |
| 882 | hwlat_init_hotplug_support(); |
| 883 | |
| 884 | init_tracefs(); |
| 885 | |
| 886 | return 0; |
| 887 | } |
| 888 | late_initcall(init_hwlat_tracer); |
| 889 | |