| 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * event tracer |
| 4 | * |
| 5 | * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com> |
| 6 | * |
| 7 | * - Added format output of fields of the trace point. |
| 8 | * This was based off of work by Tom Zanussi <tzanussi@gmail.com>. |
| 9 | * |
| 10 | */ |
| 11 | |
| 12 | #define pr_fmt(fmt) fmt |
| 13 | |
| 14 | #include <linux/workqueue.h> |
| 15 | #include <linux/security.h> |
| 16 | #include <linux/spinlock.h> |
| 17 | #include <linux/kthread.h> |
| 18 | #include <linux/tracefs.h> |
| 19 | #include <linux/uaccess.h> |
| 20 | #include <linux/module.h> |
| 21 | #include <linux/ctype.h> |
| 22 | #include <linux/sort.h> |
| 23 | #include <linux/slab.h> |
| 24 | #include <linux/delay.h> |
| 25 | |
| 26 | #include <trace/events/sched.h> |
| 27 | #include <trace/syscall.h> |
| 28 | |
| 29 | #include <asm/setup.h> |
| 30 | |
| 31 | #include "trace_output.h" |
| 32 | |
| 33 | #undef TRACE_SYSTEM |
| 34 | #define TRACE_SYSTEM "TRACE_SYSTEM" |
| 35 | |
| 36 | DEFINE_MUTEX(event_mutex); |
| 37 | |
| 38 | LIST_HEAD(ftrace_events); |
| 39 | static LIST_HEAD(ftrace_generic_fields); |
| 40 | static LIST_HEAD(ftrace_common_fields); |
| 41 | static bool eventdir_initialized; |
| 42 | |
| 43 | static LIST_HEAD(module_strings); |
| 44 | |
| 45 | struct module_string { |
| 46 | struct list_head next; |
| 47 | struct module *module; |
| 48 | char *str; |
| 49 | }; |
| 50 | |
| 51 | #define GFP_TRACE (GFP_KERNEL | __GFP_ZERO) |
| 52 | |
| 53 | static struct kmem_cache *field_cachep; |
| 54 | static struct kmem_cache *file_cachep; |
| 55 | |
| 56 | static inline int system_refcount(struct event_subsystem *system) |
| 57 | { |
| 58 | return system->ref_count; |
| 59 | } |
| 60 | |
| 61 | static int system_refcount_inc(struct event_subsystem *system) |
| 62 | { |
| 63 | return system->ref_count++; |
| 64 | } |
| 65 | |
| 66 | static int system_refcount_dec(struct event_subsystem *system) |
| 67 | { |
| 68 | return --system->ref_count; |
| 69 | } |
| 70 | |
| 71 | /* Double loops, do not use break, only goto's work */ |
| 72 | #define do_for_each_event_file(tr, file) \ |
| 73 | list_for_each_entry(tr, &ftrace_trace_arrays, list) { \ |
| 74 | list_for_each_entry(file, &tr->events, list) |
| 75 | |
| 76 | #define do_for_each_event_file_safe(tr, file) \ |
| 77 | list_for_each_entry(tr, &ftrace_trace_arrays, list) { \ |
| 78 | struct trace_event_file *___n; \ |
| 79 | list_for_each_entry_safe(file, ___n, &tr->events, list) |
| 80 | |
| 81 | #define while_for_each_event_file() \ |
| 82 | } |
| 83 | |
| 84 | static struct ftrace_event_field * |
| 85 | __find_event_field(struct list_head *head, const char *name) |
| 86 | { |
| 87 | struct ftrace_event_field *field; |
| 88 | |
| 89 | list_for_each_entry(field, head, link) { |
| 90 | if (!strcmp(field->name, name)) |
| 91 | return field; |
| 92 | } |
| 93 | |
| 94 | return NULL; |
| 95 | } |
| 96 | |
| 97 | struct ftrace_event_field * |
| 98 | trace_find_event_field(struct trace_event_call *call, char *name) |
| 99 | { |
| 100 | struct ftrace_event_field *field; |
| 101 | struct list_head *head; |
| 102 | |
| 103 | head = trace_get_fields(event_call: call); |
| 104 | field = __find_event_field(head, name); |
| 105 | if (field) |
| 106 | return field; |
| 107 | |
| 108 | field = __find_event_field(head: &ftrace_generic_fields, name); |
| 109 | if (field) |
| 110 | return field; |
| 111 | |
| 112 | return __find_event_field(head: &ftrace_common_fields, name); |
| 113 | } |
| 114 | |
| 115 | static int __trace_define_field(struct list_head *head, const char *type, |
| 116 | const char *name, int offset, int size, |
| 117 | int is_signed, int filter_type, int len, |
| 118 | int need_test) |
| 119 | { |
| 120 | struct ftrace_event_field *field; |
| 121 | |
| 122 | field = kmem_cache_alloc(field_cachep, GFP_TRACE); |
| 123 | if (!field) |
| 124 | return -ENOMEM; |
| 125 | |
| 126 | field->name = name; |
| 127 | field->type = type; |
| 128 | |
| 129 | if (filter_type == FILTER_OTHER) |
| 130 | field->filter_type = filter_assign_type(type); |
| 131 | else |
| 132 | field->filter_type = filter_type; |
| 133 | |
| 134 | field->offset = offset; |
| 135 | field->size = size; |
| 136 | field->is_signed = is_signed; |
| 137 | field->needs_test = need_test; |
| 138 | field->len = len; |
| 139 | |
| 140 | list_add(new: &field->link, head); |
| 141 | |
| 142 | return 0; |
| 143 | } |
| 144 | |
| 145 | int trace_define_field(struct trace_event_call *call, const char *type, |
| 146 | const char *name, int offset, int size, int is_signed, |
| 147 | int filter_type) |
| 148 | { |
| 149 | struct list_head *head; |
| 150 | |
| 151 | if (WARN_ON(!call->class)) |
| 152 | return 0; |
| 153 | |
| 154 | head = trace_get_fields(event_call: call); |
| 155 | return __trace_define_field(head, type, name, offset, size, |
| 156 | is_signed, filter_type, len: 0, need_test: 0); |
| 157 | } |
| 158 | EXPORT_SYMBOL_GPL(trace_define_field); |
| 159 | |
| 160 | static int trace_define_field_ext(struct trace_event_call *call, const char *type, |
| 161 | const char *name, int offset, int size, int is_signed, |
| 162 | int filter_type, int len, int need_test) |
| 163 | { |
| 164 | struct list_head *head; |
| 165 | |
| 166 | if (WARN_ON(!call->class)) |
| 167 | return 0; |
| 168 | |
| 169 | head = trace_get_fields(event_call: call); |
| 170 | return __trace_define_field(head, type, name, offset, size, |
| 171 | is_signed, filter_type, len, need_test); |
| 172 | } |
| 173 | |
| 174 | #define __generic_field(type, item, filter_type) \ |
| 175 | ret = __trace_define_field(&ftrace_generic_fields, #type, \ |
| 176 | #item, 0, 0, is_signed_type(type), \ |
| 177 | filter_type, 0, 0); \ |
| 178 | if (ret) \ |
| 179 | return ret; |
| 180 | |
| 181 | #define __common_field(type, item) \ |
| 182 | ret = __trace_define_field(&ftrace_common_fields, #type, \ |
| 183 | "common_" #item, \ |
| 184 | offsetof(typeof(ent), item), \ |
| 185 | sizeof(ent.item), \ |
| 186 | is_signed_type(type), FILTER_OTHER, \ |
| 187 | 0, 0); \ |
| 188 | if (ret) \ |
| 189 | return ret; |
| 190 | |
| 191 | static int trace_define_generic_fields(void) |
| 192 | { |
| 193 | int ret; |
| 194 | |
| 195 | __generic_field(int, CPU, FILTER_CPU); |
| 196 | __generic_field(int, cpu, FILTER_CPU); |
| 197 | __generic_field(int, common_cpu, FILTER_CPU); |
| 198 | __generic_field(char *, COMM, FILTER_COMM); |
| 199 | __generic_field(char *, comm, FILTER_COMM); |
| 200 | __generic_field(char *, stacktrace, FILTER_STACKTRACE); |
| 201 | __generic_field(char *, STACKTRACE, FILTER_STACKTRACE); |
| 202 | |
| 203 | return ret; |
| 204 | } |
| 205 | |
| 206 | static int trace_define_common_fields(void) |
| 207 | { |
| 208 | int ret; |
| 209 | struct trace_entry ent; |
| 210 | |
| 211 | __common_field(unsigned short, type); |
| 212 | __common_field(unsigned char, flags); |
| 213 | /* Holds both preempt_count and migrate_disable */ |
| 214 | __common_field(unsigned char, preempt_count); |
| 215 | __common_field(int, pid); |
| 216 | |
| 217 | return ret; |
| 218 | } |
| 219 | |
| 220 | static void trace_destroy_fields(struct trace_event_call *call) |
| 221 | { |
| 222 | struct ftrace_event_field *field, *next; |
| 223 | struct list_head *head; |
| 224 | |
| 225 | head = trace_get_fields(event_call: call); |
| 226 | list_for_each_entry_safe(field, next, head, link) { |
| 227 | list_del(entry: &field->link); |
| 228 | kmem_cache_free(s: field_cachep, objp: field); |
| 229 | } |
| 230 | } |
| 231 | |
| 232 | /* |
| 233 | * run-time version of trace_event_get_offsets_<call>() that returns the last |
| 234 | * accessible offset of trace fields excluding __dynamic_array bytes |
| 235 | */ |
| 236 | int trace_event_get_offsets(struct trace_event_call *call) |
| 237 | { |
| 238 | struct ftrace_event_field *tail; |
| 239 | struct list_head *head; |
| 240 | |
| 241 | head = trace_get_fields(event_call: call); |
| 242 | /* |
| 243 | * head->next points to the last field with the largest offset, |
| 244 | * since it was added last by trace_define_field() |
| 245 | */ |
| 246 | tail = list_first_entry(head, struct ftrace_event_field, link); |
| 247 | return tail->offset + tail->size; |
| 248 | } |
| 249 | |
| 250 | |
| 251 | static struct trace_event_fields *find_event_field(const char *fmt, |
| 252 | struct trace_event_call *call) |
| 253 | { |
| 254 | struct trace_event_fields *field = call->class->fields_array; |
| 255 | const char *p = fmt; |
| 256 | int len; |
| 257 | |
| 258 | if (!(len = str_has_prefix(str: fmt, prefix: "REC->" ))) |
| 259 | return NULL; |
| 260 | fmt += len; |
| 261 | for (p = fmt; *p; p++) { |
| 262 | if (!isalnum(*p) && *p != '_') |
| 263 | break; |
| 264 | } |
| 265 | len = p - fmt; |
| 266 | |
| 267 | for (; field->type; field++) { |
| 268 | if (strncmp(field->name, fmt, len) || field->name[len]) |
| 269 | continue; |
| 270 | |
| 271 | return field; |
| 272 | } |
| 273 | return NULL; |
| 274 | } |
| 275 | |
| 276 | /* |
| 277 | * Check if the referenced field is an array and return true, |
| 278 | * as arrays are OK to dereference. |
| 279 | */ |
| 280 | static bool test_field(const char *fmt, struct trace_event_call *call) |
| 281 | { |
| 282 | struct trace_event_fields *field; |
| 283 | |
| 284 | field = find_event_field(fmt, call); |
| 285 | if (!field) |
| 286 | return false; |
| 287 | |
| 288 | /* This is an array and is OK to dereference. */ |
| 289 | return strchr(field->type, '[') != NULL; |
| 290 | } |
| 291 | |
| 292 | /* Look for a string within an argument */ |
| 293 | static bool find_print_string(const char *arg, const char *str, const char *end) |
| 294 | { |
| 295 | const char *r; |
| 296 | |
| 297 | r = strstr(arg, str); |
| 298 | return r && r < end; |
| 299 | } |
| 300 | |
| 301 | /* Return true if the argument pointer is safe */ |
| 302 | static bool process_pointer(const char *fmt, int len, struct trace_event_call *call) |
| 303 | { |
| 304 | const char *r, *e, *a; |
| 305 | |
| 306 | e = fmt + len; |
| 307 | |
| 308 | /* Find the REC-> in the argument */ |
| 309 | r = strstr(fmt, "REC->" ); |
| 310 | if (r && r < e) { |
| 311 | /* |
| 312 | * Addresses of events on the buffer, or an array on the buffer is |
| 313 | * OK to dereference. There's ways to fool this, but |
| 314 | * this is to catch common mistakes, not malicious code. |
| 315 | */ |
| 316 | a = strchr(fmt, '&'); |
| 317 | if ((a && (a < r)) || test_field(fmt: r, call)) |
| 318 | return true; |
| 319 | } else if (find_print_string(arg: fmt, str: "__get_dynamic_array(" , end: e)) { |
| 320 | return true; |
| 321 | } else if (find_print_string(arg: fmt, str: "__get_rel_dynamic_array(" , end: e)) { |
| 322 | return true; |
| 323 | } else if (find_print_string(arg: fmt, str: "__get_dynamic_array_len(" , end: e)) { |
| 324 | return true; |
| 325 | } else if (find_print_string(arg: fmt, str: "__get_rel_dynamic_array_len(" , end: e)) { |
| 326 | return true; |
| 327 | } else if (find_print_string(arg: fmt, str: "__get_sockaddr(" , end: e)) { |
| 328 | return true; |
| 329 | } else if (find_print_string(arg: fmt, str: "__get_rel_sockaddr(" , end: e)) { |
| 330 | return true; |
| 331 | } |
| 332 | return false; |
| 333 | } |
| 334 | |
| 335 | /* Return true if the string is safe */ |
| 336 | static bool process_string(const char *fmt, int len, struct trace_event_call *call) |
| 337 | { |
| 338 | struct trace_event_fields *field; |
| 339 | const char *r, *e, *s; |
| 340 | |
| 341 | e = fmt + len; |
| 342 | |
| 343 | /* |
| 344 | * There are several helper functions that return strings. |
| 345 | * If the argument contains a function, then assume its field is valid. |
| 346 | * It is considered that the argument has a function if it has: |
| 347 | * alphanumeric or '_' before a parenthesis. |
| 348 | */ |
| 349 | s = fmt; |
| 350 | do { |
| 351 | r = strstr(s, "(" ); |
| 352 | if (!r || r >= e) |
| 353 | break; |
| 354 | for (int i = 1; r - i >= s; i++) { |
| 355 | char ch = *(r - i); |
| 356 | if (isspace(ch)) |
| 357 | continue; |
| 358 | if (isalnum(ch) || ch == '_') |
| 359 | return true; |
| 360 | /* Anything else, this isn't a function */ |
| 361 | break; |
| 362 | } |
| 363 | /* A function could be wrapped in parenthesis, try the next one */ |
| 364 | s = r + 1; |
| 365 | } while (s < e); |
| 366 | |
| 367 | /* |
| 368 | * Check for arrays. If the argument has: foo[REC->val] |
| 369 | * then it is very likely that foo is an array of strings |
| 370 | * that are safe to use. |
| 371 | */ |
| 372 | r = strstr(s, "[" ); |
| 373 | if (r && r < e) { |
| 374 | r = strstr(r, "REC->" ); |
| 375 | if (r && r < e) |
| 376 | return true; |
| 377 | } |
| 378 | |
| 379 | /* |
| 380 | * If there's any strings in the argument consider this arg OK as it |
| 381 | * could be: REC->field ? "foo" : "bar" and we don't want to get into |
| 382 | * verifying that logic here. |
| 383 | */ |
| 384 | if (find_print_string(arg: fmt, str: "\"" , end: e)) |
| 385 | return true; |
| 386 | |
| 387 | /* Dereferenced strings are also valid like any other pointer */ |
| 388 | if (process_pointer(fmt, len, call)) |
| 389 | return true; |
| 390 | |
| 391 | /* Make sure the field is found */ |
| 392 | field = find_event_field(fmt, call); |
| 393 | if (!field) |
| 394 | return false; |
| 395 | |
| 396 | /* Test this field's string before printing the event */ |
| 397 | call->flags |= TRACE_EVENT_FL_TEST_STR; |
| 398 | field->needs_test = 1; |
| 399 | |
| 400 | return true; |
| 401 | } |
| 402 | |
| 403 | static void handle_dereference_arg(const char *arg_str, u64 string_flags, int len, |
| 404 | u64 *dereference_flags, int arg, |
| 405 | struct trace_event_call *call) |
| 406 | { |
| 407 | if (string_flags & (1ULL << arg)) { |
| 408 | if (process_string(fmt: arg_str, len, call)) |
| 409 | *dereference_flags &= ~(1ULL << arg); |
| 410 | } else if (process_pointer(fmt: arg_str, len, call)) |
| 411 | *dereference_flags &= ~(1ULL << arg); |
| 412 | else |
| 413 | pr_warn("TRACE EVENT ERROR: Bad dereference argument: '%.*s'\n" , |
| 414 | len, arg_str); |
| 415 | } |
| 416 | |
| 417 | /* |
| 418 | * Examine the print fmt of the event looking for unsafe dereference |
| 419 | * pointers using %p* that could be recorded in the trace event and |
| 420 | * much later referenced after the pointer was freed. Dereferencing |
| 421 | * pointers are OK, if it is dereferenced into the event itself. |
| 422 | */ |
| 423 | static void test_event_printk(struct trace_event_call *call) |
| 424 | { |
| 425 | u64 dereference_flags = 0; |
| 426 | u64 string_flags = 0; |
| 427 | bool first = true; |
| 428 | const char *fmt; |
| 429 | int parens = 0; |
| 430 | char in_quote = 0; |
| 431 | int start_arg = 0; |
| 432 | int arg = 0; |
| 433 | int i, e; |
| 434 | |
| 435 | fmt = call->print_fmt; |
| 436 | |
| 437 | if (!fmt) |
| 438 | return; |
| 439 | |
| 440 | for (i = 0; fmt[i]; i++) { |
| 441 | switch (fmt[i]) { |
| 442 | case '\\': |
| 443 | i++; |
| 444 | if (!fmt[i]) |
| 445 | return; |
| 446 | continue; |
| 447 | case '"': |
| 448 | case '\'': |
| 449 | /* |
| 450 | * The print fmt starts with a string that |
| 451 | * is processed first to find %p* usage, |
| 452 | * then after the first string, the print fmt |
| 453 | * contains arguments that are used to check |
| 454 | * if the dereferenced %p* usage is safe. |
| 455 | */ |
| 456 | if (first) { |
| 457 | if (fmt[i] == '\'') |
| 458 | continue; |
| 459 | if (in_quote) { |
| 460 | arg = 0; |
| 461 | first = false; |
| 462 | /* |
| 463 | * If there was no %p* uses |
| 464 | * the fmt is OK. |
| 465 | */ |
| 466 | if (!dereference_flags) |
| 467 | return; |
| 468 | } |
| 469 | } |
| 470 | if (in_quote) { |
| 471 | if (in_quote == fmt[i]) |
| 472 | in_quote = 0; |
| 473 | } else { |
| 474 | in_quote = fmt[i]; |
| 475 | } |
| 476 | continue; |
| 477 | case '%': |
| 478 | if (!first || !in_quote) |
| 479 | continue; |
| 480 | i++; |
| 481 | if (!fmt[i]) |
| 482 | return; |
| 483 | switch (fmt[i]) { |
| 484 | case '%': |
| 485 | continue; |
| 486 | case 'p': |
| 487 | do_pointer: |
| 488 | /* Find dereferencing fields */ |
| 489 | switch (fmt[i + 1]) { |
| 490 | case 'B': case 'R': case 'r': |
| 491 | case 'b': case 'M': case 'm': |
| 492 | case 'I': case 'i': case 'E': |
| 493 | case 'U': case 'V': case 'N': |
| 494 | case 'a': case 'd': case 'D': |
| 495 | case 'g': case 't': case 'C': |
| 496 | case 'O': case 'f': |
| 497 | if (WARN_ONCE(arg == 63, |
| 498 | "Too many args for event: %s" , |
| 499 | trace_event_name(call))) |
| 500 | return; |
| 501 | dereference_flags |= 1ULL << arg; |
| 502 | } |
| 503 | break; |
| 504 | default: |
| 505 | { |
| 506 | bool star = false; |
| 507 | int j; |
| 508 | |
| 509 | /* Increment arg if %*s exists. */ |
| 510 | for (j = 0; fmt[i + j]; j++) { |
| 511 | if (isdigit(c: fmt[i + j]) || |
| 512 | fmt[i + j] == '.') |
| 513 | continue; |
| 514 | if (fmt[i + j] == '*') { |
| 515 | star = true; |
| 516 | /* Handle %*pbl case */ |
| 517 | if (!j && fmt[i + 1] == 'p') { |
| 518 | arg++; |
| 519 | i++; |
| 520 | goto do_pointer; |
| 521 | } |
| 522 | continue; |
| 523 | } |
| 524 | if ((fmt[i + j] == 's')) { |
| 525 | if (star) |
| 526 | arg++; |
| 527 | if (WARN_ONCE(arg == 63, |
| 528 | "Too many args for event: %s" , |
| 529 | trace_event_name(call))) |
| 530 | return; |
| 531 | dereference_flags |= 1ULL << arg; |
| 532 | string_flags |= 1ULL << arg; |
| 533 | } |
| 534 | break; |
| 535 | } |
| 536 | break; |
| 537 | } /* default */ |
| 538 | |
| 539 | } /* switch */ |
| 540 | arg++; |
| 541 | continue; |
| 542 | case '(': |
| 543 | if (in_quote) |
| 544 | continue; |
| 545 | parens++; |
| 546 | continue; |
| 547 | case ')': |
| 548 | if (in_quote) |
| 549 | continue; |
| 550 | parens--; |
| 551 | if (WARN_ONCE(parens < 0, |
| 552 | "Paren mismatch for event: %s\narg='%s'\n%*s" , |
| 553 | trace_event_name(call), |
| 554 | fmt + start_arg, |
| 555 | (i - start_arg) + 5, "^" )) |
| 556 | return; |
| 557 | continue; |
| 558 | case ',': |
| 559 | if (in_quote || parens) |
| 560 | continue; |
| 561 | e = i; |
| 562 | i++; |
| 563 | while (isspace(fmt[i])) |
| 564 | i++; |
| 565 | |
| 566 | /* |
| 567 | * If start_arg is zero, then this is the start of the |
| 568 | * first argument. The processing of the argument happens |
| 569 | * when the end of the argument is found, as it needs to |
| 570 | * handle parenthesis and such. |
| 571 | */ |
| 572 | if (!start_arg) { |
| 573 | start_arg = i; |
| 574 | /* Balance out the i++ in the for loop */ |
| 575 | i--; |
| 576 | continue; |
| 577 | } |
| 578 | |
| 579 | if (dereference_flags & (1ULL << arg)) { |
| 580 | handle_dereference_arg(arg_str: fmt + start_arg, string_flags, |
| 581 | len: e - start_arg, |
| 582 | dereference_flags: &dereference_flags, arg, call); |
| 583 | } |
| 584 | |
| 585 | start_arg = i; |
| 586 | arg++; |
| 587 | /* Balance out the i++ in the for loop */ |
| 588 | i--; |
| 589 | } |
| 590 | } |
| 591 | |
| 592 | if (dereference_flags & (1ULL << arg)) { |
| 593 | handle_dereference_arg(arg_str: fmt + start_arg, string_flags, |
| 594 | len: i - start_arg, |
| 595 | dereference_flags: &dereference_flags, arg, call); |
| 596 | } |
| 597 | |
| 598 | /* |
| 599 | * If you triggered the below warning, the trace event reported |
| 600 | * uses an unsafe dereference pointer %p*. As the data stored |
| 601 | * at the trace event time may no longer exist when the trace |
| 602 | * event is printed, dereferencing to the original source is |
| 603 | * unsafe. The source of the dereference must be copied into the |
| 604 | * event itself, and the dereference must access the copy instead. |
| 605 | */ |
| 606 | if (WARN_ON_ONCE(dereference_flags)) { |
| 607 | arg = 1; |
| 608 | while (!(dereference_flags & 1)) { |
| 609 | dereference_flags >>= 1; |
| 610 | arg++; |
| 611 | } |
| 612 | pr_warn("event %s has unsafe dereference of argument %d\n" , |
| 613 | trace_event_name(call), arg); |
| 614 | pr_warn("print_fmt: %s\n" , fmt); |
| 615 | } |
| 616 | } |
| 617 | |
| 618 | int trace_event_raw_init(struct trace_event_call *call) |
| 619 | { |
| 620 | int id; |
| 621 | |
| 622 | id = register_trace_event(event: &call->event); |
| 623 | if (!id) |
| 624 | return -ENODEV; |
| 625 | |
| 626 | test_event_printk(call); |
| 627 | |
| 628 | return 0; |
| 629 | } |
| 630 | EXPORT_SYMBOL_GPL(trace_event_raw_init); |
| 631 | |
| 632 | bool trace_event_ignore_this_pid(struct trace_event_file *trace_file) |
| 633 | { |
| 634 | struct trace_array *tr = trace_file->tr; |
| 635 | struct trace_pid_list *no_pid_list; |
| 636 | struct trace_pid_list *pid_list; |
| 637 | |
| 638 | pid_list = rcu_dereference_raw(tr->filtered_pids); |
| 639 | no_pid_list = rcu_dereference_raw(tr->filtered_no_pids); |
| 640 | |
| 641 | if (!pid_list && !no_pid_list) |
| 642 | return false; |
| 643 | |
| 644 | /* |
| 645 | * This is recorded at every sched_switch for this task. |
| 646 | * Thus, even if the task migrates the ignore value will be the same. |
| 647 | */ |
| 648 | return this_cpu_read(tr->array_buffer.data->ignore_pid) != 0; |
| 649 | } |
| 650 | EXPORT_SYMBOL_GPL(trace_event_ignore_this_pid); |
| 651 | |
| 652 | void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer, |
| 653 | struct trace_event_file *trace_file, |
| 654 | unsigned long len) |
| 655 | { |
| 656 | struct trace_event_call *event_call = trace_file->event_call; |
| 657 | |
| 658 | if ((trace_file->flags & EVENT_FILE_FL_PID_FILTER) && |
| 659 | trace_event_ignore_this_pid(trace_file)) |
| 660 | return NULL; |
| 661 | |
| 662 | /* |
| 663 | * If CONFIG_PREEMPTION is enabled, then the tracepoint itself disables |
| 664 | * preemption (adding one to the preempt_count). Since we are |
| 665 | * interested in the preempt_count at the time the tracepoint was |
| 666 | * hit, we need to subtract one to offset the increment. |
| 667 | */ |
| 668 | fbuffer->trace_ctx = tracing_gen_ctx_dec(); |
| 669 | fbuffer->trace_file = trace_file; |
| 670 | |
| 671 | fbuffer->event = |
| 672 | trace_event_buffer_lock_reserve(current_buffer: &fbuffer->buffer, trace_file, |
| 673 | type: event_call->event.type, len, |
| 674 | trace_ctx: fbuffer->trace_ctx); |
| 675 | if (!fbuffer->event) |
| 676 | return NULL; |
| 677 | |
| 678 | fbuffer->regs = NULL; |
| 679 | fbuffer->entry = ring_buffer_event_data(event: fbuffer->event); |
| 680 | return fbuffer->entry; |
| 681 | } |
| 682 | EXPORT_SYMBOL_GPL(trace_event_buffer_reserve); |
| 683 | |
| 684 | int trace_event_reg(struct trace_event_call *call, |
| 685 | enum trace_reg type, void *data) |
| 686 | { |
| 687 | struct trace_event_file *file = data; |
| 688 | |
| 689 | WARN_ON(!(call->flags & TRACE_EVENT_FL_TRACEPOINT)); |
| 690 | switch (type) { |
| 691 | case TRACE_REG_REGISTER: |
| 692 | return tracepoint_probe_register(tp: call->tp, |
| 693 | probe: call->class->probe, |
| 694 | data: file); |
| 695 | case TRACE_REG_UNREGISTER: |
| 696 | tracepoint_probe_unregister(tp: call->tp, |
| 697 | probe: call->class->probe, |
| 698 | data: file); |
| 699 | return 0; |
| 700 | |
| 701 | #ifdef CONFIG_PERF_EVENTS |
| 702 | case TRACE_REG_PERF_REGISTER: |
| 703 | if (!call->class->perf_probe) |
| 704 | return -ENODEV; |
| 705 | return tracepoint_probe_register(tp: call->tp, |
| 706 | probe: call->class->perf_probe, |
| 707 | data: call); |
| 708 | case TRACE_REG_PERF_UNREGISTER: |
| 709 | tracepoint_probe_unregister(tp: call->tp, |
| 710 | probe: call->class->perf_probe, |
| 711 | data: call); |
| 712 | return 0; |
| 713 | case TRACE_REG_PERF_OPEN: |
| 714 | case TRACE_REG_PERF_CLOSE: |
| 715 | case TRACE_REG_PERF_ADD: |
| 716 | case TRACE_REG_PERF_DEL: |
| 717 | return 0; |
| 718 | #endif |
| 719 | } |
| 720 | return 0; |
| 721 | } |
| 722 | EXPORT_SYMBOL_GPL(trace_event_reg); |
| 723 | |
| 724 | void trace_event_enable_cmd_record(bool enable) |
| 725 | { |
| 726 | struct trace_event_file *file; |
| 727 | struct trace_array *tr; |
| 728 | |
| 729 | lockdep_assert_held(&event_mutex); |
| 730 | |
| 731 | do_for_each_event_file(tr, file) { |
| 732 | |
| 733 | if (!(file->flags & EVENT_FILE_FL_ENABLED)) |
| 734 | continue; |
| 735 | |
| 736 | if (enable) { |
| 737 | tracing_start_cmdline_record(); |
| 738 | set_bit(nr: EVENT_FILE_FL_RECORDED_CMD_BIT, addr: &file->flags); |
| 739 | } else { |
| 740 | tracing_stop_cmdline_record(); |
| 741 | clear_bit(nr: EVENT_FILE_FL_RECORDED_CMD_BIT, addr: &file->flags); |
| 742 | } |
| 743 | } while_for_each_event_file(); |
| 744 | } |
| 745 | |
| 746 | void trace_event_enable_tgid_record(bool enable) |
| 747 | { |
| 748 | struct trace_event_file *file; |
| 749 | struct trace_array *tr; |
| 750 | |
| 751 | lockdep_assert_held(&event_mutex); |
| 752 | |
| 753 | do_for_each_event_file(tr, file) { |
| 754 | if (!(file->flags & EVENT_FILE_FL_ENABLED)) |
| 755 | continue; |
| 756 | |
| 757 | if (enable) { |
| 758 | tracing_start_tgid_record(); |
| 759 | set_bit(nr: EVENT_FILE_FL_RECORDED_TGID_BIT, addr: &file->flags); |
| 760 | } else { |
| 761 | tracing_stop_tgid_record(); |
| 762 | clear_bit(nr: EVENT_FILE_FL_RECORDED_TGID_BIT, |
| 763 | addr: &file->flags); |
| 764 | } |
| 765 | } while_for_each_event_file(); |
| 766 | } |
| 767 | |
| 768 | static int __ftrace_event_enable_disable(struct trace_event_file *file, |
| 769 | int enable, int soft_disable) |
| 770 | { |
| 771 | struct trace_event_call *call = file->event_call; |
| 772 | struct trace_array *tr = file->tr; |
| 773 | bool soft_mode = atomic_read(v: &file->sm_ref) != 0; |
| 774 | int ret = 0; |
| 775 | int disable; |
| 776 | |
| 777 | switch (enable) { |
| 778 | case 0: |
| 779 | /* |
| 780 | * When soft_disable is set and enable is cleared, the sm_ref |
| 781 | * reference counter is decremented. If it reaches 0, we want |
| 782 | * to clear the SOFT_DISABLED flag but leave the event in the |
| 783 | * state that it was. That is, if the event was enabled and |
| 784 | * SOFT_DISABLED isn't set, then do nothing. But if SOFT_DISABLED |
| 785 | * is set we do not want the event to be enabled before we |
| 786 | * clear the bit. |
| 787 | * |
| 788 | * When soft_disable is not set but the soft_mode is, |
| 789 | * we do nothing. Do not disable the tracepoint, otherwise |
| 790 | * "soft enable"s (clearing the SOFT_DISABLED bit) won't work. |
| 791 | */ |
| 792 | if (soft_disable) { |
| 793 | if (atomic_dec_return(v: &file->sm_ref) > 0) |
| 794 | break; |
| 795 | disable = file->flags & EVENT_FILE_FL_SOFT_DISABLED; |
| 796 | soft_mode = false; |
| 797 | /* Disable use of trace_buffered_event */ |
| 798 | trace_buffered_event_disable(); |
| 799 | } else |
| 800 | disable = !soft_mode; |
| 801 | |
| 802 | if (disable && (file->flags & EVENT_FILE_FL_ENABLED)) { |
| 803 | clear_bit(nr: EVENT_FILE_FL_ENABLED_BIT, addr: &file->flags); |
| 804 | if (file->flags & EVENT_FILE_FL_RECORDED_CMD) { |
| 805 | tracing_stop_cmdline_record(); |
| 806 | clear_bit(nr: EVENT_FILE_FL_RECORDED_CMD_BIT, addr: &file->flags); |
| 807 | } |
| 808 | |
| 809 | if (file->flags & EVENT_FILE_FL_RECORDED_TGID) { |
| 810 | tracing_stop_tgid_record(); |
| 811 | clear_bit(nr: EVENT_FILE_FL_RECORDED_TGID_BIT, addr: &file->flags); |
| 812 | } |
| 813 | |
| 814 | ret = call->class->reg(call, TRACE_REG_UNREGISTER, file); |
| 815 | |
| 816 | WARN_ON_ONCE(ret); |
| 817 | } |
| 818 | /* If in soft mode, just set the SOFT_DISABLE_BIT, else clear it */ |
| 819 | if (soft_mode) |
| 820 | set_bit(nr: EVENT_FILE_FL_SOFT_DISABLED_BIT, addr: &file->flags); |
| 821 | else |
| 822 | clear_bit(nr: EVENT_FILE_FL_SOFT_DISABLED_BIT, addr: &file->flags); |
| 823 | break; |
| 824 | case 1: |
| 825 | /* |
| 826 | * When soft_disable is set and enable is set, we want to |
| 827 | * register the tracepoint for the event, but leave the event |
| 828 | * as is. That means, if the event was already enabled, we do |
| 829 | * nothing. If the event is disabled, we set SOFT_DISABLED |
| 830 | * before enabling the event tracepoint, so it still seems |
| 831 | * to be disabled. |
| 832 | */ |
| 833 | if (!soft_disable) |
| 834 | clear_bit(nr: EVENT_FILE_FL_SOFT_DISABLED_BIT, addr: &file->flags); |
| 835 | else { |
| 836 | if (atomic_inc_return(v: &file->sm_ref) > 1) |
| 837 | break; |
| 838 | /* Enable use of trace_buffered_event */ |
| 839 | trace_buffered_event_enable(); |
| 840 | } |
| 841 | |
| 842 | if (!(file->flags & EVENT_FILE_FL_ENABLED)) { |
| 843 | bool cmd = false, tgid = false; |
| 844 | |
| 845 | /* Keep the event disabled, when going to soft mode. */ |
| 846 | if (soft_disable) |
| 847 | set_bit(nr: EVENT_FILE_FL_SOFT_DISABLED_BIT, addr: &file->flags); |
| 848 | |
| 849 | if (tr->trace_flags & TRACE_ITER(RECORD_CMD)) { |
| 850 | cmd = true; |
| 851 | tracing_start_cmdline_record(); |
| 852 | set_bit(nr: EVENT_FILE_FL_RECORDED_CMD_BIT, addr: &file->flags); |
| 853 | } |
| 854 | |
| 855 | if (tr->trace_flags & TRACE_ITER(RECORD_TGID)) { |
| 856 | tgid = true; |
| 857 | tracing_start_tgid_record(); |
| 858 | set_bit(nr: EVENT_FILE_FL_RECORDED_TGID_BIT, addr: &file->flags); |
| 859 | } |
| 860 | |
| 861 | ret = call->class->reg(call, TRACE_REG_REGISTER, file); |
| 862 | if (ret) { |
| 863 | if (cmd) |
| 864 | tracing_stop_cmdline_record(); |
| 865 | if (tgid) |
| 866 | tracing_stop_tgid_record(); |
| 867 | pr_info("event trace: Could not enable event " |
| 868 | "%s\n" , trace_event_name(call)); |
| 869 | break; |
| 870 | } |
| 871 | set_bit(nr: EVENT_FILE_FL_ENABLED_BIT, addr: &file->flags); |
| 872 | |
| 873 | /* WAS_ENABLED gets set but never cleared. */ |
| 874 | set_bit(nr: EVENT_FILE_FL_WAS_ENABLED_BIT, addr: &file->flags); |
| 875 | } |
| 876 | break; |
| 877 | } |
| 878 | |
| 879 | return ret; |
| 880 | } |
| 881 | |
| 882 | int trace_event_enable_disable(struct trace_event_file *file, |
| 883 | int enable, int soft_disable) |
| 884 | { |
| 885 | return __ftrace_event_enable_disable(file, enable, soft_disable); |
| 886 | } |
| 887 | |
| 888 | static int ftrace_event_enable_disable(struct trace_event_file *file, |
| 889 | int enable) |
| 890 | { |
| 891 | return __ftrace_event_enable_disable(file, enable, soft_disable: 0); |
| 892 | } |
| 893 | |
| 894 | #ifdef CONFIG_MODULES |
| 895 | struct event_mod_load { |
| 896 | struct list_head list; |
| 897 | char *module; |
| 898 | char *match; |
| 899 | char *system; |
| 900 | char *event; |
| 901 | }; |
| 902 | |
| 903 | static void free_event_mod(struct event_mod_load *event_mod) |
| 904 | { |
| 905 | list_del(entry: &event_mod->list); |
| 906 | kfree(objp: event_mod->module); |
| 907 | kfree(objp: event_mod->match); |
| 908 | kfree(objp: event_mod->system); |
| 909 | kfree(objp: event_mod->event); |
| 910 | kfree(objp: event_mod); |
| 911 | } |
| 912 | |
| 913 | static void clear_mod_events(struct trace_array *tr) |
| 914 | { |
| 915 | struct event_mod_load *event_mod, *n; |
| 916 | |
| 917 | list_for_each_entry_safe(event_mod, n, &tr->mod_events, list) { |
| 918 | free_event_mod(event_mod); |
| 919 | } |
| 920 | } |
| 921 | |
| 922 | static int remove_cache_mod(struct trace_array *tr, const char *mod, |
| 923 | const char *match, const char *system, const char *event) |
| 924 | { |
| 925 | struct event_mod_load *event_mod, *n; |
| 926 | int ret = -EINVAL; |
| 927 | |
| 928 | list_for_each_entry_safe(event_mod, n, &tr->mod_events, list) { |
| 929 | if (strcmp(event_mod->module, mod) != 0) |
| 930 | continue; |
| 931 | |
| 932 | if (match && strcmp(event_mod->match, match) != 0) |
| 933 | continue; |
| 934 | |
| 935 | if (system && |
| 936 | (!event_mod->system || strcmp(event_mod->system, system) != 0)) |
| 937 | continue; |
| 938 | |
| 939 | if (event && |
| 940 | (!event_mod->event || strcmp(event_mod->event, event) != 0)) |
| 941 | continue; |
| 942 | |
| 943 | free_event_mod(event_mod); |
| 944 | ret = 0; |
| 945 | } |
| 946 | |
| 947 | return ret; |
| 948 | } |
| 949 | |
| 950 | static int cache_mod(struct trace_array *tr, const char *mod, int set, |
| 951 | const char *match, const char *system, const char *event) |
| 952 | { |
| 953 | struct event_mod_load *event_mod; |
| 954 | |
| 955 | /* If the module exists, then this just failed to find an event */ |
| 956 | if (module_exists(module: mod)) |
| 957 | return -EINVAL; |
| 958 | |
| 959 | /* See if this is to remove a cached filter */ |
| 960 | if (!set) |
| 961 | return remove_cache_mod(tr, mod, match, system, event); |
| 962 | |
| 963 | event_mod = kzalloc(sizeof(*event_mod), GFP_KERNEL); |
| 964 | if (!event_mod) |
| 965 | return -ENOMEM; |
| 966 | |
| 967 | INIT_LIST_HEAD(list: &event_mod->list); |
| 968 | event_mod->module = kstrdup(s: mod, GFP_KERNEL); |
| 969 | if (!event_mod->module) |
| 970 | goto out_free; |
| 971 | |
| 972 | if (match) { |
| 973 | event_mod->match = kstrdup(s: match, GFP_KERNEL); |
| 974 | if (!event_mod->match) |
| 975 | goto out_free; |
| 976 | } |
| 977 | |
| 978 | if (system) { |
| 979 | event_mod->system = kstrdup(s: system, GFP_KERNEL); |
| 980 | if (!event_mod->system) |
| 981 | goto out_free; |
| 982 | } |
| 983 | |
| 984 | if (event) { |
| 985 | event_mod->event = kstrdup(s: event, GFP_KERNEL); |
| 986 | if (!event_mod->event) |
| 987 | goto out_free; |
| 988 | } |
| 989 | |
| 990 | list_add(new: &event_mod->list, head: &tr->mod_events); |
| 991 | |
| 992 | return 0; |
| 993 | |
| 994 | out_free: |
| 995 | free_event_mod(event_mod); |
| 996 | |
| 997 | return -ENOMEM; |
| 998 | } |
| 999 | #else /* CONFIG_MODULES */ |
| 1000 | static inline void clear_mod_events(struct trace_array *tr) { } |
| 1001 | static int cache_mod(struct trace_array *tr, const char *mod, int set, |
| 1002 | const char *match, const char *system, const char *event) |
| 1003 | { |
| 1004 | return -EINVAL; |
| 1005 | } |
| 1006 | #endif |
| 1007 | |
| 1008 | static void ftrace_clear_events(struct trace_array *tr) |
| 1009 | { |
| 1010 | struct trace_event_file *file; |
| 1011 | |
| 1012 | mutex_lock(&event_mutex); |
| 1013 | list_for_each_entry(file, &tr->events, list) { |
| 1014 | ftrace_event_enable_disable(file, enable: 0); |
| 1015 | } |
| 1016 | clear_mod_events(tr); |
| 1017 | mutex_unlock(lock: &event_mutex); |
| 1018 | } |
| 1019 | |
| 1020 | static void |
| 1021 | event_filter_pid_sched_process_exit(void *data, struct task_struct *task) |
| 1022 | { |
| 1023 | struct trace_pid_list *pid_list; |
| 1024 | struct trace_array *tr = data; |
| 1025 | |
| 1026 | pid_list = rcu_dereference_raw(tr->filtered_pids); |
| 1027 | trace_filter_add_remove_task(pid_list, NULL, task); |
| 1028 | |
| 1029 | pid_list = rcu_dereference_raw(tr->filtered_no_pids); |
| 1030 | trace_filter_add_remove_task(pid_list, NULL, task); |
| 1031 | } |
| 1032 | |
| 1033 | static void |
| 1034 | event_filter_pid_sched_process_fork(void *data, |
| 1035 | struct task_struct *self, |
| 1036 | struct task_struct *task) |
| 1037 | { |
| 1038 | struct trace_pid_list *pid_list; |
| 1039 | struct trace_array *tr = data; |
| 1040 | |
| 1041 | pid_list = rcu_dereference_sched(tr->filtered_pids); |
| 1042 | trace_filter_add_remove_task(pid_list, self, task); |
| 1043 | |
| 1044 | pid_list = rcu_dereference_sched(tr->filtered_no_pids); |
| 1045 | trace_filter_add_remove_task(pid_list, self, task); |
| 1046 | } |
| 1047 | |
| 1048 | void trace_event_follow_fork(struct trace_array *tr, bool enable) |
| 1049 | { |
| 1050 | if (enable) { |
| 1051 | register_trace_prio_sched_process_fork(probe: event_filter_pid_sched_process_fork, |
| 1052 | data: tr, INT_MIN); |
| 1053 | register_trace_prio_sched_process_free(probe: event_filter_pid_sched_process_exit, |
| 1054 | data: tr, INT_MAX); |
| 1055 | } else { |
| 1056 | unregister_trace_sched_process_fork(probe: event_filter_pid_sched_process_fork, |
| 1057 | data: tr); |
| 1058 | unregister_trace_sched_process_free(probe: event_filter_pid_sched_process_exit, |
| 1059 | data: tr); |
| 1060 | } |
| 1061 | } |
| 1062 | |
| 1063 | static void |
| 1064 | event_filter_pid_sched_switch_probe_pre(void *data, bool preempt, |
| 1065 | struct task_struct *prev, |
| 1066 | struct task_struct *next, |
| 1067 | unsigned int prev_state) |
| 1068 | { |
| 1069 | struct trace_array *tr = data; |
| 1070 | struct trace_pid_list *no_pid_list; |
| 1071 | struct trace_pid_list *pid_list; |
| 1072 | bool ret; |
| 1073 | |
| 1074 | pid_list = rcu_dereference_sched(tr->filtered_pids); |
| 1075 | no_pid_list = rcu_dereference_sched(tr->filtered_no_pids); |
| 1076 | |
| 1077 | /* |
| 1078 | * Sched switch is funny, as we only want to ignore it |
| 1079 | * in the notrace case if both prev and next should be ignored. |
| 1080 | */ |
| 1081 | ret = trace_ignore_this_task(NULL, filtered_no_pids: no_pid_list, task: prev) && |
| 1082 | trace_ignore_this_task(NULL, filtered_no_pids: no_pid_list, task: next); |
| 1083 | |
| 1084 | this_cpu_write(tr->array_buffer.data->ignore_pid, ret || |
| 1085 | (trace_ignore_this_task(pid_list, NULL, prev) && |
| 1086 | trace_ignore_this_task(pid_list, NULL, next))); |
| 1087 | } |
| 1088 | |
| 1089 | static void |
| 1090 | event_filter_pid_sched_switch_probe_post(void *data, bool preempt, |
| 1091 | struct task_struct *prev, |
| 1092 | struct task_struct *next, |
| 1093 | unsigned int prev_state) |
| 1094 | { |
| 1095 | struct trace_array *tr = data; |
| 1096 | struct trace_pid_list *no_pid_list; |
| 1097 | struct trace_pid_list *pid_list; |
| 1098 | |
| 1099 | pid_list = rcu_dereference_sched(tr->filtered_pids); |
| 1100 | no_pid_list = rcu_dereference_sched(tr->filtered_no_pids); |
| 1101 | |
| 1102 | this_cpu_write(tr->array_buffer.data->ignore_pid, |
| 1103 | trace_ignore_this_task(pid_list, no_pid_list, next)); |
| 1104 | } |
| 1105 | |
| 1106 | static void |
| 1107 | event_filter_pid_sched_wakeup_probe_pre(void *data, struct task_struct *task) |
| 1108 | { |
| 1109 | struct trace_array *tr = data; |
| 1110 | struct trace_pid_list *no_pid_list; |
| 1111 | struct trace_pid_list *pid_list; |
| 1112 | |
| 1113 | /* Nothing to do if we are already tracing */ |
| 1114 | if (!this_cpu_read(tr->array_buffer.data->ignore_pid)) |
| 1115 | return; |
| 1116 | |
| 1117 | pid_list = rcu_dereference_sched(tr->filtered_pids); |
| 1118 | no_pid_list = rcu_dereference_sched(tr->filtered_no_pids); |
| 1119 | |
| 1120 | this_cpu_write(tr->array_buffer.data->ignore_pid, |
| 1121 | trace_ignore_this_task(pid_list, no_pid_list, task)); |
| 1122 | } |
| 1123 | |
| 1124 | static void |
| 1125 | event_filter_pid_sched_wakeup_probe_post(void *data, struct task_struct *task) |
| 1126 | { |
| 1127 | struct trace_array *tr = data; |
| 1128 | struct trace_pid_list *no_pid_list; |
| 1129 | struct trace_pid_list *pid_list; |
| 1130 | |
| 1131 | /* Nothing to do if we are not tracing */ |
| 1132 | if (this_cpu_read(tr->array_buffer.data->ignore_pid)) |
| 1133 | return; |
| 1134 | |
| 1135 | pid_list = rcu_dereference_sched(tr->filtered_pids); |
| 1136 | no_pid_list = rcu_dereference_sched(tr->filtered_no_pids); |
| 1137 | |
| 1138 | /* Set tracing if current is enabled */ |
| 1139 | this_cpu_write(tr->array_buffer.data->ignore_pid, |
| 1140 | trace_ignore_this_task(pid_list, no_pid_list, current)); |
| 1141 | } |
| 1142 | |
| 1143 | static void unregister_pid_events(struct trace_array *tr) |
| 1144 | { |
| 1145 | unregister_trace_sched_switch(probe: event_filter_pid_sched_switch_probe_pre, data: tr); |
| 1146 | unregister_trace_sched_switch(probe: event_filter_pid_sched_switch_probe_post, data: tr); |
| 1147 | |
| 1148 | unregister_trace_sched_wakeup(probe: event_filter_pid_sched_wakeup_probe_pre, data: tr); |
| 1149 | unregister_trace_sched_wakeup(probe: event_filter_pid_sched_wakeup_probe_post, data: tr); |
| 1150 | |
| 1151 | unregister_trace_sched_wakeup_new(probe: event_filter_pid_sched_wakeup_probe_pre, data: tr); |
| 1152 | unregister_trace_sched_wakeup_new(probe: event_filter_pid_sched_wakeup_probe_post, data: tr); |
| 1153 | |
| 1154 | unregister_trace_sched_waking(probe: event_filter_pid_sched_wakeup_probe_pre, data: tr); |
| 1155 | unregister_trace_sched_waking(probe: event_filter_pid_sched_wakeup_probe_post, data: tr); |
| 1156 | } |
| 1157 | |
| 1158 | static void __ftrace_clear_event_pids(struct trace_array *tr, int type) |
| 1159 | { |
| 1160 | struct trace_pid_list *pid_list; |
| 1161 | struct trace_pid_list *no_pid_list; |
| 1162 | struct trace_event_file *file; |
| 1163 | int cpu; |
| 1164 | |
| 1165 | pid_list = rcu_dereference_protected(tr->filtered_pids, |
| 1166 | lockdep_is_held(&event_mutex)); |
| 1167 | no_pid_list = rcu_dereference_protected(tr->filtered_no_pids, |
| 1168 | lockdep_is_held(&event_mutex)); |
| 1169 | |
| 1170 | /* Make sure there's something to do */ |
| 1171 | if (!pid_type_enabled(type, pid_list, no_pid_list)) |
| 1172 | return; |
| 1173 | |
| 1174 | if (!still_need_pid_events(type, pid_list, no_pid_list)) { |
| 1175 | unregister_pid_events(tr); |
| 1176 | |
| 1177 | list_for_each_entry(file, &tr->events, list) { |
| 1178 | clear_bit(nr: EVENT_FILE_FL_PID_FILTER_BIT, addr: &file->flags); |
| 1179 | } |
| 1180 | |
| 1181 | for_each_possible_cpu(cpu) |
| 1182 | per_cpu_ptr(tr->array_buffer.data, cpu)->ignore_pid = false; |
| 1183 | } |
| 1184 | |
| 1185 | if (type & TRACE_PIDS) |
| 1186 | rcu_assign_pointer(tr->filtered_pids, NULL); |
| 1187 | |
| 1188 | if (type & TRACE_NO_PIDS) |
| 1189 | rcu_assign_pointer(tr->filtered_no_pids, NULL); |
| 1190 | |
| 1191 | /* Wait till all users are no longer using pid filtering */ |
| 1192 | tracepoint_synchronize_unregister(); |
| 1193 | |
| 1194 | if ((type & TRACE_PIDS) && pid_list) |
| 1195 | trace_pid_list_free(pid_list); |
| 1196 | |
| 1197 | if ((type & TRACE_NO_PIDS) && no_pid_list) |
| 1198 | trace_pid_list_free(pid_list: no_pid_list); |
| 1199 | } |
| 1200 | |
| 1201 | static void ftrace_clear_event_pids(struct trace_array *tr, int type) |
| 1202 | { |
| 1203 | mutex_lock(&event_mutex); |
| 1204 | __ftrace_clear_event_pids(tr, type); |
| 1205 | mutex_unlock(lock: &event_mutex); |
| 1206 | } |
| 1207 | |
| 1208 | static void __put_system(struct event_subsystem *system) |
| 1209 | { |
| 1210 | struct event_filter *filter = system->filter; |
| 1211 | |
| 1212 | WARN_ON_ONCE(system_refcount(system) == 0); |
| 1213 | if (system_refcount_dec(system)) |
| 1214 | return; |
| 1215 | |
| 1216 | list_del(entry: &system->list); |
| 1217 | |
| 1218 | if (filter) { |
| 1219 | kfree(objp: filter->filter_string); |
| 1220 | kfree(objp: filter); |
| 1221 | } |
| 1222 | kfree_const(x: system->name); |
| 1223 | kfree(objp: system); |
| 1224 | } |
| 1225 | |
| 1226 | static void __get_system(struct event_subsystem *system) |
| 1227 | { |
| 1228 | WARN_ON_ONCE(system_refcount(system) == 0); |
| 1229 | system_refcount_inc(system); |
| 1230 | } |
| 1231 | |
| 1232 | static void __get_system_dir(struct trace_subsystem_dir *dir) |
| 1233 | { |
| 1234 | WARN_ON_ONCE(dir->ref_count == 0); |
| 1235 | dir->ref_count++; |
| 1236 | __get_system(system: dir->subsystem); |
| 1237 | } |
| 1238 | |
| 1239 | static void __put_system_dir(struct trace_subsystem_dir *dir) |
| 1240 | { |
| 1241 | WARN_ON_ONCE(dir->ref_count == 0); |
| 1242 | /* If the subsystem is about to be freed, the dir must be too */ |
| 1243 | WARN_ON_ONCE(system_refcount(dir->subsystem) == 1 && dir->ref_count != 1); |
| 1244 | |
| 1245 | __put_system(system: dir->subsystem); |
| 1246 | if (!--dir->ref_count) |
| 1247 | kfree(objp: dir); |
| 1248 | } |
| 1249 | |
| 1250 | static void put_system(struct trace_subsystem_dir *dir) |
| 1251 | { |
| 1252 | mutex_lock(&event_mutex); |
| 1253 | __put_system_dir(dir); |
| 1254 | mutex_unlock(lock: &event_mutex); |
| 1255 | } |
| 1256 | |
| 1257 | static void remove_subsystem(struct trace_subsystem_dir *dir) |
| 1258 | { |
| 1259 | if (!dir) |
| 1260 | return; |
| 1261 | |
| 1262 | if (!--dir->nr_events) { |
| 1263 | eventfs_remove_dir(ei: dir->ei); |
| 1264 | list_del(entry: &dir->list); |
| 1265 | __put_system_dir(dir); |
| 1266 | } |
| 1267 | } |
| 1268 | |
| 1269 | void event_file_get(struct trace_event_file *file) |
| 1270 | { |
| 1271 | refcount_inc(r: &file->ref); |
| 1272 | } |
| 1273 | |
| 1274 | void event_file_put(struct trace_event_file *file) |
| 1275 | { |
| 1276 | if (WARN_ON_ONCE(!refcount_read(&file->ref))) { |
| 1277 | if (file->flags & EVENT_FILE_FL_FREED) |
| 1278 | kmem_cache_free(s: file_cachep, objp: file); |
| 1279 | return; |
| 1280 | } |
| 1281 | |
| 1282 | if (refcount_dec_and_test(r: &file->ref)) { |
| 1283 | /* Count should only go to zero when it is freed */ |
| 1284 | if (WARN_ON_ONCE(!(file->flags & EVENT_FILE_FL_FREED))) |
| 1285 | return; |
| 1286 | kmem_cache_free(s: file_cachep, objp: file); |
| 1287 | } |
| 1288 | } |
| 1289 | |
| 1290 | static void remove_event_file_dir(struct trace_event_file *file) |
| 1291 | { |
| 1292 | eventfs_remove_dir(ei: file->ei); |
| 1293 | list_del(entry: &file->list); |
| 1294 | remove_subsystem(dir: file->system); |
| 1295 | free_event_filter(filter: file->filter); |
| 1296 | file->flags |= EVENT_FILE_FL_FREED; |
| 1297 | event_file_put(file); |
| 1298 | } |
| 1299 | |
| 1300 | /* |
| 1301 | * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events. |
| 1302 | */ |
| 1303 | static int |
| 1304 | __ftrace_set_clr_event_nolock(struct trace_array *tr, const char *match, |
| 1305 | const char *sub, const char *event, int set, |
| 1306 | const char *mod) |
| 1307 | { |
| 1308 | struct trace_event_file *file; |
| 1309 | struct trace_event_call *call; |
| 1310 | char *module __free(kfree) = NULL; |
| 1311 | const char *name; |
| 1312 | int ret = -EINVAL; |
| 1313 | int eret = 0; |
| 1314 | |
| 1315 | if (mod) { |
| 1316 | char *p; |
| 1317 | |
| 1318 | module = kstrdup(s: mod, GFP_KERNEL); |
| 1319 | if (!module) |
| 1320 | return -ENOMEM; |
| 1321 | |
| 1322 | /* Replace all '-' with '_' as that's what modules do */ |
| 1323 | for (p = strchr(module, '-'); p; p = strchr(p + 1, '-')) |
| 1324 | *p = '_'; |
| 1325 | } |
| 1326 | |
| 1327 | list_for_each_entry(file, &tr->events, list) { |
| 1328 | |
| 1329 | call = file->event_call; |
| 1330 | |
| 1331 | /* If a module is specified, skip events that are not that module */ |
| 1332 | if (module && (!call->module || strcmp(module_name(call->module), module))) |
| 1333 | continue; |
| 1334 | |
| 1335 | name = trace_event_name(call); |
| 1336 | |
| 1337 | if (!name || !call->class || !call->class->reg) |
| 1338 | continue; |
| 1339 | |
| 1340 | if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) |
| 1341 | continue; |
| 1342 | |
| 1343 | if (match && |
| 1344 | strcmp(match, name) != 0 && |
| 1345 | strcmp(match, call->class->system) != 0) |
| 1346 | continue; |
| 1347 | |
| 1348 | if (sub && strcmp(sub, call->class->system) != 0) |
| 1349 | continue; |
| 1350 | |
| 1351 | if (event && strcmp(event, name) != 0) |
| 1352 | continue; |
| 1353 | |
| 1354 | ret = ftrace_event_enable_disable(file, enable: set); |
| 1355 | |
| 1356 | /* |
| 1357 | * Save the first error and return that. Some events |
| 1358 | * may still have been enabled, but let the user |
| 1359 | * know that something went wrong. |
| 1360 | */ |
| 1361 | if (ret && !eret) |
| 1362 | eret = ret; |
| 1363 | |
| 1364 | ret = eret; |
| 1365 | } |
| 1366 | |
| 1367 | /* |
| 1368 | * If this is a module setting and nothing was found, |
| 1369 | * check if the module was loaded. If it wasn't cache it. |
| 1370 | */ |
| 1371 | if (module && ret == -EINVAL && !eret) |
| 1372 | ret = cache_mod(tr, mod: module, set, match, system: sub, event); |
| 1373 | |
| 1374 | return ret; |
| 1375 | } |
| 1376 | |
| 1377 | static int __ftrace_set_clr_event(struct trace_array *tr, const char *match, |
| 1378 | const char *sub, const char *event, int set, |
| 1379 | const char *mod) |
| 1380 | { |
| 1381 | int ret; |
| 1382 | |
| 1383 | mutex_lock(&event_mutex); |
| 1384 | ret = __ftrace_set_clr_event_nolock(tr, match, sub, event, set, mod); |
| 1385 | mutex_unlock(lock: &event_mutex); |
| 1386 | |
| 1387 | return ret; |
| 1388 | } |
| 1389 | |
| 1390 | int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set) |
| 1391 | { |
| 1392 | char *event = NULL, *sub = NULL, *match, *mod; |
| 1393 | int ret; |
| 1394 | |
| 1395 | if (!tr) |
| 1396 | return -ENOENT; |
| 1397 | |
| 1398 | /* Modules events can be appended with :mod:<module> */ |
| 1399 | mod = strstr(buf, ":mod:" ); |
| 1400 | if (mod) { |
| 1401 | *mod = '\0'; |
| 1402 | /* move to the module name */ |
| 1403 | mod += 5; |
| 1404 | } |
| 1405 | |
| 1406 | /* |
| 1407 | * The buf format can be <subsystem>:<event-name> |
| 1408 | * *:<event-name> means any event by that name. |
| 1409 | * :<event-name> is the same. |
| 1410 | * |
| 1411 | * <subsystem>:* means all events in that subsystem |
| 1412 | * <subsystem>: means the same. |
| 1413 | * |
| 1414 | * <name> (no ':') means all events in a subsystem with |
| 1415 | * the name <name> or any event that matches <name> |
| 1416 | */ |
| 1417 | |
| 1418 | match = strsep(&buf, ":" ); |
| 1419 | if (buf) { |
| 1420 | sub = match; |
| 1421 | event = buf; |
| 1422 | match = NULL; |
| 1423 | |
| 1424 | if (!strlen(sub) || strcmp(sub, "*" ) == 0) |
| 1425 | sub = NULL; |
| 1426 | if (!strlen(event) || strcmp(event, "*" ) == 0) |
| 1427 | event = NULL; |
| 1428 | } else if (mod) { |
| 1429 | /* Allow wildcard for no length or star */ |
| 1430 | if (!strlen(match) || strcmp(match, "*" ) == 0) |
| 1431 | match = NULL; |
| 1432 | } |
| 1433 | |
| 1434 | ret = __ftrace_set_clr_event(tr, match, sub, event, set, mod); |
| 1435 | |
| 1436 | /* Put back the colon to allow this to be called again */ |
| 1437 | if (buf) |
| 1438 | *(buf - 1) = ':'; |
| 1439 | |
| 1440 | return ret; |
| 1441 | } |
| 1442 | |
| 1443 | /** |
| 1444 | * trace_set_clr_event - enable or disable an event |
| 1445 | * @system: system name to match (NULL for any system) |
| 1446 | * @event: event name to match (NULL for all events, within system) |
| 1447 | * @set: 1 to enable, 0 to disable |
| 1448 | * |
| 1449 | * This is a way for other parts of the kernel to enable or disable |
| 1450 | * event recording. |
| 1451 | * |
| 1452 | * Returns 0 on success, -EINVAL if the parameters do not match any |
| 1453 | * registered events. |
| 1454 | */ |
| 1455 | int trace_set_clr_event(const char *system, const char *event, int set) |
| 1456 | { |
| 1457 | struct trace_array *tr = top_trace_array(); |
| 1458 | |
| 1459 | if (!tr) |
| 1460 | return -ENODEV; |
| 1461 | |
| 1462 | return __ftrace_set_clr_event(tr, NULL, sub: system, event, set, NULL); |
| 1463 | } |
| 1464 | EXPORT_SYMBOL_GPL(trace_set_clr_event); |
| 1465 | |
| 1466 | /** |
| 1467 | * trace_array_set_clr_event - enable or disable an event for a trace array. |
| 1468 | * @tr: concerned trace array. |
| 1469 | * @system: system name to match (NULL for any system) |
| 1470 | * @event: event name to match (NULL for all events, within system) |
| 1471 | * @enable: true to enable, false to disable |
| 1472 | * |
| 1473 | * This is a way for other parts of the kernel to enable or disable |
| 1474 | * event recording. |
| 1475 | * |
| 1476 | * Returns 0 on success, -EINVAL if the parameters do not match any |
| 1477 | * registered events. |
| 1478 | */ |
| 1479 | int trace_array_set_clr_event(struct trace_array *tr, const char *system, |
| 1480 | const char *event, bool enable) |
| 1481 | { |
| 1482 | int set; |
| 1483 | |
| 1484 | if (!tr) |
| 1485 | return -ENOENT; |
| 1486 | |
| 1487 | set = (enable == true) ? 1 : 0; |
| 1488 | return __ftrace_set_clr_event(tr, NULL, sub: system, event, set, NULL); |
| 1489 | } |
| 1490 | EXPORT_SYMBOL_GPL(trace_array_set_clr_event); |
| 1491 | |
| 1492 | /* 128 should be much more than enough */ |
| 1493 | #define EVENT_BUF_SIZE 127 |
| 1494 | |
| 1495 | static ssize_t |
| 1496 | ftrace_event_write(struct file *file, const char __user *ubuf, |
| 1497 | size_t cnt, loff_t *ppos) |
| 1498 | { |
| 1499 | struct trace_parser parser; |
| 1500 | struct seq_file *m = file->private_data; |
| 1501 | struct trace_array *tr = m->private; |
| 1502 | ssize_t read, ret; |
| 1503 | |
| 1504 | if (!cnt) |
| 1505 | return 0; |
| 1506 | |
| 1507 | ret = tracing_update_buffers(tr); |
| 1508 | if (ret < 0) |
| 1509 | return ret; |
| 1510 | |
| 1511 | if (trace_parser_get_init(parser: &parser, EVENT_BUF_SIZE + 1)) |
| 1512 | return -ENOMEM; |
| 1513 | |
| 1514 | read = trace_get_user(parser: &parser, ubuf, cnt, ppos); |
| 1515 | |
| 1516 | if (read >= 0 && trace_parser_loaded(parser: (&parser))) { |
| 1517 | int set = 1; |
| 1518 | |
| 1519 | if (*parser.buffer == '!') |
| 1520 | set = 0; |
| 1521 | |
| 1522 | ret = ftrace_set_clr_event(tr, buf: parser.buffer + !set, set); |
| 1523 | if (ret) |
| 1524 | goto out_put; |
| 1525 | } |
| 1526 | |
| 1527 | ret = read; |
| 1528 | |
| 1529 | out_put: |
| 1530 | trace_parser_put(parser: &parser); |
| 1531 | |
| 1532 | return ret; |
| 1533 | } |
| 1534 | |
| 1535 | static void * |
| 1536 | t_next(struct seq_file *m, void *v, loff_t *pos) |
| 1537 | { |
| 1538 | struct trace_event_file *file = v; |
| 1539 | struct trace_event_call *call; |
| 1540 | struct trace_array *tr = m->private; |
| 1541 | |
| 1542 | (*pos)++; |
| 1543 | |
| 1544 | list_for_each_entry_continue(file, &tr->events, list) { |
| 1545 | call = file->event_call; |
| 1546 | /* |
| 1547 | * The ftrace subsystem is for showing formats only. |
| 1548 | * They can not be enabled or disabled via the event files. |
| 1549 | */ |
| 1550 | if (call->class && call->class->reg && |
| 1551 | !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)) |
| 1552 | return file; |
| 1553 | } |
| 1554 | |
| 1555 | return NULL; |
| 1556 | } |
| 1557 | |
| 1558 | static void *t_start(struct seq_file *m, loff_t *pos) |
| 1559 | { |
| 1560 | struct trace_event_file *file; |
| 1561 | struct trace_array *tr = m->private; |
| 1562 | loff_t l; |
| 1563 | |
| 1564 | mutex_lock(&event_mutex); |
| 1565 | |
| 1566 | file = list_entry(&tr->events, struct trace_event_file, list); |
| 1567 | for (l = 0; l <= *pos; ) { |
| 1568 | file = t_next(m, v: file, pos: &l); |
| 1569 | if (!file) |
| 1570 | break; |
| 1571 | } |
| 1572 | return file; |
| 1573 | } |
| 1574 | |
| 1575 | enum set_event_iter_type { |
| 1576 | SET_EVENT_FILE, |
| 1577 | SET_EVENT_MOD, |
| 1578 | }; |
| 1579 | |
| 1580 | struct set_event_iter { |
| 1581 | enum set_event_iter_type type; |
| 1582 | union { |
| 1583 | struct trace_event_file *file; |
| 1584 | struct event_mod_load *event_mod; |
| 1585 | }; |
| 1586 | }; |
| 1587 | |
| 1588 | static void * |
| 1589 | s_next(struct seq_file *m, void *v, loff_t *pos) |
| 1590 | { |
| 1591 | struct set_event_iter *iter = v; |
| 1592 | struct trace_event_file *file; |
| 1593 | struct trace_array *tr = m->private; |
| 1594 | |
| 1595 | (*pos)++; |
| 1596 | |
| 1597 | if (iter->type == SET_EVENT_FILE) { |
| 1598 | file = iter->file; |
| 1599 | list_for_each_entry_continue(file, &tr->events, list) { |
| 1600 | if (file->flags & EVENT_FILE_FL_ENABLED) { |
| 1601 | iter->file = file; |
| 1602 | return iter; |
| 1603 | } |
| 1604 | } |
| 1605 | #ifdef CONFIG_MODULES |
| 1606 | iter->type = SET_EVENT_MOD; |
| 1607 | iter->event_mod = list_entry(&tr->mod_events, struct event_mod_load, list); |
| 1608 | #endif |
| 1609 | } |
| 1610 | |
| 1611 | #ifdef CONFIG_MODULES |
| 1612 | list_for_each_entry_continue(iter->event_mod, &tr->mod_events, list) |
| 1613 | return iter; |
| 1614 | #endif |
| 1615 | |
| 1616 | /* |
| 1617 | * The iter is allocated in s_start() and passed via the 'v' |
| 1618 | * parameter. To stop the iterator, NULL must be returned. But |
| 1619 | * the return value is what the 'v' parameter in s_stop() receives |
| 1620 | * and frees. Free iter here as it will no longer be used. |
| 1621 | */ |
| 1622 | kfree(objp: iter); |
| 1623 | return NULL; |
| 1624 | } |
| 1625 | |
| 1626 | static void *s_start(struct seq_file *m, loff_t *pos) |
| 1627 | { |
| 1628 | struct trace_array *tr = m->private; |
| 1629 | struct set_event_iter *iter; |
| 1630 | loff_t l; |
| 1631 | |
| 1632 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); |
| 1633 | mutex_lock(&event_mutex); |
| 1634 | if (!iter) |
| 1635 | return NULL; |
| 1636 | |
| 1637 | iter->type = SET_EVENT_FILE; |
| 1638 | iter->file = list_entry(&tr->events, struct trace_event_file, list); |
| 1639 | |
| 1640 | for (l = 0; l <= *pos; ) { |
| 1641 | iter = s_next(m, v: iter, pos: &l); |
| 1642 | if (!iter) |
| 1643 | break; |
| 1644 | } |
| 1645 | return iter; |
| 1646 | } |
| 1647 | |
| 1648 | static int t_show(struct seq_file *m, void *v) |
| 1649 | { |
| 1650 | struct trace_event_file *file = v; |
| 1651 | struct trace_event_call *call = file->event_call; |
| 1652 | |
| 1653 | if (strcmp(call->class->system, TRACE_SYSTEM) != 0) |
| 1654 | seq_printf(m, fmt: "%s:" , call->class->system); |
| 1655 | seq_printf(m, fmt: "%s\n" , trace_event_name(call)); |
| 1656 | |
| 1657 | return 0; |
| 1658 | } |
| 1659 | |
| 1660 | static void t_stop(struct seq_file *m, void *p) |
| 1661 | { |
| 1662 | mutex_unlock(lock: &event_mutex); |
| 1663 | } |
| 1664 | |
| 1665 | #ifdef CONFIG_MODULES |
| 1666 | static int s_show(struct seq_file *m, void *v) |
| 1667 | { |
| 1668 | struct set_event_iter *iter = v; |
| 1669 | const char *system; |
| 1670 | const char *event; |
| 1671 | |
| 1672 | if (iter->type == SET_EVENT_FILE) |
| 1673 | return t_show(m, v: iter->file); |
| 1674 | |
| 1675 | /* When match is set, system and event are not */ |
| 1676 | if (iter->event_mod->match) { |
| 1677 | seq_printf(m, fmt: "%s:mod:%s\n" , iter->event_mod->match, |
| 1678 | iter->event_mod->module); |
| 1679 | return 0; |
| 1680 | } |
| 1681 | |
| 1682 | system = iter->event_mod->system ? : "*" ; |
| 1683 | event = iter->event_mod->event ? : "*" ; |
| 1684 | |
| 1685 | seq_printf(m, fmt: "%s:%s:mod:%s\n" , system, event, iter->event_mod->module); |
| 1686 | |
| 1687 | return 0; |
| 1688 | } |
| 1689 | #else /* CONFIG_MODULES */ |
| 1690 | static int s_show(struct seq_file *m, void *v) |
| 1691 | { |
| 1692 | struct set_event_iter *iter = v; |
| 1693 | |
| 1694 | return t_show(m, iter->file); |
| 1695 | } |
| 1696 | #endif |
| 1697 | |
| 1698 | static void s_stop(struct seq_file *m, void *v) |
| 1699 | { |
| 1700 | kfree(objp: v); |
| 1701 | t_stop(m, NULL); |
| 1702 | } |
| 1703 | |
| 1704 | static void * |
| 1705 | __next(struct seq_file *m, void *v, loff_t *pos, int type) |
| 1706 | { |
| 1707 | struct trace_array *tr = m->private; |
| 1708 | struct trace_pid_list *pid_list; |
| 1709 | |
| 1710 | if (type == TRACE_PIDS) |
| 1711 | pid_list = rcu_dereference_sched(tr->filtered_pids); |
| 1712 | else |
| 1713 | pid_list = rcu_dereference_sched(tr->filtered_no_pids); |
| 1714 | |
| 1715 | return trace_pid_next(pid_list, v, pos); |
| 1716 | } |
| 1717 | |
| 1718 | static void * |
| 1719 | p_next(struct seq_file *m, void *v, loff_t *pos) |
| 1720 | { |
| 1721 | return __next(m, v, pos, type: TRACE_PIDS); |
| 1722 | } |
| 1723 | |
| 1724 | static void * |
| 1725 | np_next(struct seq_file *m, void *v, loff_t *pos) |
| 1726 | { |
| 1727 | return __next(m, v, pos, type: TRACE_NO_PIDS); |
| 1728 | } |
| 1729 | |
| 1730 | static void *__start(struct seq_file *m, loff_t *pos, int type) |
| 1731 | __acquires(RCU) |
| 1732 | { |
| 1733 | struct trace_pid_list *pid_list; |
| 1734 | struct trace_array *tr = m->private; |
| 1735 | |
| 1736 | /* |
| 1737 | * Grab the mutex, to keep calls to p_next() having the same |
| 1738 | * tr->filtered_pids as p_start() has. |
| 1739 | * If we just passed the tr->filtered_pids around, then RCU would |
| 1740 | * have been enough, but doing that makes things more complex. |
| 1741 | */ |
| 1742 | mutex_lock(&event_mutex); |
| 1743 | rcu_read_lock_sched(); |
| 1744 | |
| 1745 | if (type == TRACE_PIDS) |
| 1746 | pid_list = rcu_dereference_sched(tr->filtered_pids); |
| 1747 | else |
| 1748 | pid_list = rcu_dereference_sched(tr->filtered_no_pids); |
| 1749 | |
| 1750 | if (!pid_list) |
| 1751 | return NULL; |
| 1752 | |
| 1753 | return trace_pid_start(pid_list, pos); |
| 1754 | } |
| 1755 | |
| 1756 | static void *p_start(struct seq_file *m, loff_t *pos) |
| 1757 | __acquires(RCU) |
| 1758 | { |
| 1759 | return __start(m, pos, type: TRACE_PIDS); |
| 1760 | } |
| 1761 | |
| 1762 | static void *np_start(struct seq_file *m, loff_t *pos) |
| 1763 | __acquires(RCU) |
| 1764 | { |
| 1765 | return __start(m, pos, type: TRACE_NO_PIDS); |
| 1766 | } |
| 1767 | |
| 1768 | static void p_stop(struct seq_file *m, void *p) |
| 1769 | __releases(RCU) |
| 1770 | { |
| 1771 | rcu_read_unlock_sched(); |
| 1772 | mutex_unlock(lock: &event_mutex); |
| 1773 | } |
| 1774 | |
| 1775 | static ssize_t |
| 1776 | event_enable_read(struct file *filp, char __user *ubuf, size_t cnt, |
| 1777 | loff_t *ppos) |
| 1778 | { |
| 1779 | struct trace_event_file *file; |
| 1780 | unsigned long flags; |
| 1781 | char buf[4] = "0" ; |
| 1782 | |
| 1783 | mutex_lock(&event_mutex); |
| 1784 | file = event_file_file(filp); |
| 1785 | if (likely(file)) |
| 1786 | flags = file->flags; |
| 1787 | mutex_unlock(lock: &event_mutex); |
| 1788 | |
| 1789 | if (!file) |
| 1790 | return -ENODEV; |
| 1791 | |
| 1792 | if (flags & EVENT_FILE_FL_ENABLED && |
| 1793 | !(flags & EVENT_FILE_FL_SOFT_DISABLED)) |
| 1794 | strcpy(p: buf, q: "1" ); |
| 1795 | |
| 1796 | if (atomic_read(v: &file->sm_ref) != 0) |
| 1797 | strcat(p: buf, q: "*" ); |
| 1798 | |
| 1799 | strcat(p: buf, q: "\n" ); |
| 1800 | |
| 1801 | return simple_read_from_buffer(to: ubuf, count: cnt, ppos, from: buf, strlen(buf)); |
| 1802 | } |
| 1803 | |
| 1804 | static ssize_t |
| 1805 | event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt, |
| 1806 | loff_t *ppos) |
| 1807 | { |
| 1808 | struct trace_event_file *file; |
| 1809 | unsigned long val; |
| 1810 | int ret; |
| 1811 | |
| 1812 | ret = kstrtoul_from_user(s: ubuf, count: cnt, base: 10, res: &val); |
| 1813 | if (ret) |
| 1814 | return ret; |
| 1815 | |
| 1816 | guard(mutex)(T: &event_mutex); |
| 1817 | |
| 1818 | switch (val) { |
| 1819 | case 0: |
| 1820 | case 1: |
| 1821 | file = event_file_file(filp); |
| 1822 | if (!file) |
| 1823 | return -ENODEV; |
| 1824 | ret = tracing_update_buffers(tr: file->tr); |
| 1825 | if (ret < 0) |
| 1826 | return ret; |
| 1827 | ret = ftrace_event_enable_disable(file, enable: val); |
| 1828 | if (ret < 0) |
| 1829 | return ret; |
| 1830 | break; |
| 1831 | |
| 1832 | default: |
| 1833 | return -EINVAL; |
| 1834 | } |
| 1835 | |
| 1836 | *ppos += cnt; |
| 1837 | |
| 1838 | return cnt; |
| 1839 | } |
| 1840 | |
| 1841 | /* |
| 1842 | * Returns: |
| 1843 | * 0 : no events exist? |
| 1844 | * 1 : all events are disabled |
| 1845 | * 2 : all events are enabled |
| 1846 | * 3 : some events are enabled and some are enabled |
| 1847 | */ |
| 1848 | int trace_events_enabled(struct trace_array *tr, const char *system) |
| 1849 | { |
| 1850 | struct trace_event_call *call; |
| 1851 | struct trace_event_file *file; |
| 1852 | int set = 0; |
| 1853 | |
| 1854 | guard(mutex)(T: &event_mutex); |
| 1855 | |
| 1856 | list_for_each_entry(file, &tr->events, list) { |
| 1857 | call = file->event_call; |
| 1858 | if ((call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) || |
| 1859 | !trace_event_name(call) || !call->class || !call->class->reg) |
| 1860 | continue; |
| 1861 | |
| 1862 | if (system && strcmp(call->class->system, system) != 0) |
| 1863 | continue; |
| 1864 | |
| 1865 | /* |
| 1866 | * We need to find out if all the events are set |
| 1867 | * or if all events or cleared, or if we have |
| 1868 | * a mixture. |
| 1869 | */ |
| 1870 | set |= (1 << !!(file->flags & EVENT_FILE_FL_ENABLED)); |
| 1871 | |
| 1872 | /* |
| 1873 | * If we have a mixture, no need to look further. |
| 1874 | */ |
| 1875 | if (set == 3) |
| 1876 | break; |
| 1877 | } |
| 1878 | |
| 1879 | return set; |
| 1880 | } |
| 1881 | |
| 1882 | static ssize_t |
| 1883 | system_enable_read(struct file *filp, char __user *ubuf, size_t cnt, |
| 1884 | loff_t *ppos) |
| 1885 | { |
| 1886 | const char set_to_char[4] = { '?', '0', '1', 'X' }; |
| 1887 | struct trace_subsystem_dir *dir = filp->private_data; |
| 1888 | struct event_subsystem *system = dir->subsystem; |
| 1889 | struct trace_array *tr = dir->tr; |
| 1890 | char buf[2]; |
| 1891 | int set; |
| 1892 | int ret; |
| 1893 | |
| 1894 | set = trace_events_enabled(tr, system: system ? system->name : NULL); |
| 1895 | |
| 1896 | buf[0] = set_to_char[set]; |
| 1897 | buf[1] = '\n'; |
| 1898 | |
| 1899 | ret = simple_read_from_buffer(to: ubuf, count: cnt, ppos, from: buf, available: 2); |
| 1900 | |
| 1901 | return ret; |
| 1902 | } |
| 1903 | |
| 1904 | static ssize_t |
| 1905 | system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt, |
| 1906 | loff_t *ppos) |
| 1907 | { |
| 1908 | struct trace_subsystem_dir *dir = filp->private_data; |
| 1909 | struct event_subsystem *system = dir->subsystem; |
| 1910 | const char *name = NULL; |
| 1911 | unsigned long val; |
| 1912 | ssize_t ret; |
| 1913 | |
| 1914 | ret = kstrtoul_from_user(s: ubuf, count: cnt, base: 10, res: &val); |
| 1915 | if (ret) |
| 1916 | return ret; |
| 1917 | |
| 1918 | ret = tracing_update_buffers(tr: dir->tr); |
| 1919 | if (ret < 0) |
| 1920 | return ret; |
| 1921 | |
| 1922 | if (val != 0 && val != 1) |
| 1923 | return -EINVAL; |
| 1924 | |
| 1925 | /* |
| 1926 | * Opening of "enable" adds a ref count to system, |
| 1927 | * so the name is safe to use. |
| 1928 | */ |
| 1929 | if (system) |
| 1930 | name = system->name; |
| 1931 | |
| 1932 | ret = __ftrace_set_clr_event(tr: dir->tr, NULL, sub: name, NULL, set: val, NULL); |
| 1933 | if (ret) |
| 1934 | goto out; |
| 1935 | |
| 1936 | ret = cnt; |
| 1937 | |
| 1938 | out: |
| 1939 | *ppos += cnt; |
| 1940 | |
| 1941 | return ret; |
| 1942 | } |
| 1943 | |
| 1944 | enum { |
| 1945 | = 1, |
| 1946 | FORMAT_FIELD_SEPERATOR = 2, |
| 1947 | FORMAT_PRINTFMT = 3, |
| 1948 | }; |
| 1949 | |
| 1950 | static void *f_next(struct seq_file *m, void *v, loff_t *pos) |
| 1951 | { |
| 1952 | struct trace_event_file *file = event_file_data(filp: m->private); |
| 1953 | struct trace_event_call *call = file->event_call; |
| 1954 | struct list_head *common_head = &ftrace_common_fields; |
| 1955 | struct list_head *head = trace_get_fields(event_call: call); |
| 1956 | struct list_head *node = v; |
| 1957 | |
| 1958 | (*pos)++; |
| 1959 | |
| 1960 | switch ((unsigned long)v) { |
| 1961 | case FORMAT_HEADER: |
| 1962 | node = common_head; |
| 1963 | break; |
| 1964 | |
| 1965 | case FORMAT_FIELD_SEPERATOR: |
| 1966 | node = head; |
| 1967 | break; |
| 1968 | |
| 1969 | case FORMAT_PRINTFMT: |
| 1970 | /* all done */ |
| 1971 | return NULL; |
| 1972 | } |
| 1973 | |
| 1974 | node = node->prev; |
| 1975 | if (node == common_head) |
| 1976 | return (void *)FORMAT_FIELD_SEPERATOR; |
| 1977 | else if (node == head) |
| 1978 | return (void *)FORMAT_PRINTFMT; |
| 1979 | else |
| 1980 | return node; |
| 1981 | } |
| 1982 | |
| 1983 | static int f_show(struct seq_file *m, void *v) |
| 1984 | { |
| 1985 | struct trace_event_file *file = event_file_data(filp: m->private); |
| 1986 | struct trace_event_call *call = file->event_call; |
| 1987 | struct ftrace_event_field *field; |
| 1988 | const char *array_descriptor; |
| 1989 | |
| 1990 | switch ((unsigned long)v) { |
| 1991 | case FORMAT_HEADER: |
| 1992 | seq_printf(m, fmt: "name: %s\n" , trace_event_name(call)); |
| 1993 | seq_printf(m, fmt: "ID: %d\n" , call->event.type); |
| 1994 | seq_puts(m, s: "format:\n" ); |
| 1995 | return 0; |
| 1996 | |
| 1997 | case FORMAT_FIELD_SEPERATOR: |
| 1998 | seq_putc(m, c: '\n'); |
| 1999 | return 0; |
| 2000 | |
| 2001 | case FORMAT_PRINTFMT: |
| 2002 | seq_printf(m, fmt: "\nprint fmt: %s\n" , |
| 2003 | call->print_fmt); |
| 2004 | return 0; |
| 2005 | } |
| 2006 | |
| 2007 | field = list_entry(v, struct ftrace_event_field, link); |
| 2008 | /* |
| 2009 | * Smartly shows the array type(except dynamic array). |
| 2010 | * Normal: |
| 2011 | * field:TYPE VAR |
| 2012 | * If TYPE := TYPE[LEN], it is shown: |
| 2013 | * field:TYPE VAR[LEN] |
| 2014 | */ |
| 2015 | array_descriptor = strchr(field->type, '['); |
| 2016 | |
| 2017 | if (str_has_prefix(str: field->type, prefix: "__data_loc" )) |
| 2018 | array_descriptor = NULL; |
| 2019 | |
| 2020 | if (!array_descriptor) |
| 2021 | seq_printf(m, fmt: "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n" , |
| 2022 | field->type, field->name, field->offset, |
| 2023 | field->size, !!field->is_signed); |
| 2024 | else if (field->len) |
| 2025 | seq_printf(m, fmt: "\tfield:%.*s %s[%d];\toffset:%u;\tsize:%u;\tsigned:%d;\n" , |
| 2026 | (int)(array_descriptor - field->type), |
| 2027 | field->type, field->name, |
| 2028 | field->len, field->offset, |
| 2029 | field->size, !!field->is_signed); |
| 2030 | else |
| 2031 | seq_printf(m, fmt: "\tfield:%.*s %s[];\toffset:%u;\tsize:%u;\tsigned:%d;\n" , |
| 2032 | (int)(array_descriptor - field->type), |
| 2033 | field->type, field->name, |
| 2034 | field->offset, field->size, !!field->is_signed); |
| 2035 | |
| 2036 | return 0; |
| 2037 | } |
| 2038 | |
| 2039 | static void *f_start(struct seq_file *m, loff_t *pos) |
| 2040 | { |
| 2041 | struct trace_event_file *file; |
| 2042 | void *p = (void *)FORMAT_HEADER; |
| 2043 | loff_t l = 0; |
| 2044 | |
| 2045 | /* ->stop() is called even if ->start() fails */ |
| 2046 | mutex_lock(&event_mutex); |
| 2047 | file = event_file_file(filp: m->private); |
| 2048 | if (!file) |
| 2049 | return ERR_PTR(error: -ENODEV); |
| 2050 | |
| 2051 | while (l < *pos && p) |
| 2052 | p = f_next(m, v: p, pos: &l); |
| 2053 | |
| 2054 | return p; |
| 2055 | } |
| 2056 | |
| 2057 | static void f_stop(struct seq_file *m, void *p) |
| 2058 | { |
| 2059 | mutex_unlock(lock: &event_mutex); |
| 2060 | } |
| 2061 | |
| 2062 | static const struct seq_operations trace_format_seq_ops = { |
| 2063 | .start = f_start, |
| 2064 | .next = f_next, |
| 2065 | .stop = f_stop, |
| 2066 | .show = f_show, |
| 2067 | }; |
| 2068 | |
| 2069 | static int trace_format_open(struct inode *inode, struct file *file) |
| 2070 | { |
| 2071 | struct seq_file *m; |
| 2072 | int ret; |
| 2073 | |
| 2074 | /* Do we want to hide event format files on tracefs lockdown? */ |
| 2075 | |
| 2076 | ret = seq_open(file, &trace_format_seq_ops); |
| 2077 | if (ret < 0) |
| 2078 | return ret; |
| 2079 | |
| 2080 | m = file->private_data; |
| 2081 | m->private = file; |
| 2082 | |
| 2083 | return 0; |
| 2084 | } |
| 2085 | |
| 2086 | #ifdef CONFIG_PERF_EVENTS |
| 2087 | static ssize_t |
| 2088 | event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) |
| 2089 | { |
| 2090 | int id = (long)event_file_data(filp); |
| 2091 | char buf[32]; |
| 2092 | int len; |
| 2093 | |
| 2094 | if (unlikely(!id)) |
| 2095 | return -ENODEV; |
| 2096 | |
| 2097 | len = sprintf(buf, fmt: "%d\n" , id); |
| 2098 | |
| 2099 | return simple_read_from_buffer(to: ubuf, count: cnt, ppos, from: buf, available: len); |
| 2100 | } |
| 2101 | #endif |
| 2102 | |
| 2103 | static ssize_t |
| 2104 | event_filter_read(struct file *filp, char __user *ubuf, size_t cnt, |
| 2105 | loff_t *ppos) |
| 2106 | { |
| 2107 | struct trace_event_file *file; |
| 2108 | struct trace_seq *s; |
| 2109 | int r = -ENODEV; |
| 2110 | |
| 2111 | if (*ppos) |
| 2112 | return 0; |
| 2113 | |
| 2114 | s = kmalloc(sizeof(*s), GFP_KERNEL); |
| 2115 | |
| 2116 | if (!s) |
| 2117 | return -ENOMEM; |
| 2118 | |
| 2119 | trace_seq_init(s); |
| 2120 | |
| 2121 | mutex_lock(&event_mutex); |
| 2122 | file = event_file_file(filp); |
| 2123 | if (file) |
| 2124 | print_event_filter(file, s); |
| 2125 | mutex_unlock(lock: &event_mutex); |
| 2126 | |
| 2127 | if (file) |
| 2128 | r = simple_read_from_buffer(to: ubuf, count: cnt, ppos, |
| 2129 | from: s->buffer, available: trace_seq_used(s)); |
| 2130 | |
| 2131 | kfree(objp: s); |
| 2132 | |
| 2133 | return r; |
| 2134 | } |
| 2135 | |
| 2136 | static ssize_t |
| 2137 | event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt, |
| 2138 | loff_t *ppos) |
| 2139 | { |
| 2140 | struct trace_event_file *file; |
| 2141 | char *buf; |
| 2142 | int err = -ENODEV; |
| 2143 | |
| 2144 | if (cnt >= PAGE_SIZE) |
| 2145 | return -EINVAL; |
| 2146 | |
| 2147 | buf = memdup_user_nul(ubuf, cnt); |
| 2148 | if (IS_ERR(ptr: buf)) |
| 2149 | return PTR_ERR(ptr: buf); |
| 2150 | |
| 2151 | mutex_lock(&event_mutex); |
| 2152 | file = event_file_file(filp); |
| 2153 | if (file) { |
| 2154 | if (file->flags & EVENT_FILE_FL_FREED) |
| 2155 | err = -ENODEV; |
| 2156 | else |
| 2157 | err = apply_event_filter(file, filter_string: buf); |
| 2158 | } |
| 2159 | mutex_unlock(lock: &event_mutex); |
| 2160 | |
| 2161 | kfree(objp: buf); |
| 2162 | if (err < 0) |
| 2163 | return err; |
| 2164 | |
| 2165 | *ppos += cnt; |
| 2166 | |
| 2167 | return cnt; |
| 2168 | } |
| 2169 | |
| 2170 | static LIST_HEAD(event_subsystems); |
| 2171 | |
| 2172 | static int subsystem_open(struct inode *inode, struct file *filp) |
| 2173 | { |
| 2174 | struct trace_subsystem_dir *dir = NULL, *iter_dir; |
| 2175 | struct trace_array *tr = NULL, *iter_tr; |
| 2176 | struct event_subsystem *system = NULL; |
| 2177 | int ret; |
| 2178 | |
| 2179 | if (tracing_is_disabled()) |
| 2180 | return -ENODEV; |
| 2181 | |
| 2182 | /* Make sure the system still exists */ |
| 2183 | mutex_lock(&event_mutex); |
| 2184 | mutex_lock(&trace_types_lock); |
| 2185 | list_for_each_entry(iter_tr, &ftrace_trace_arrays, list) { |
| 2186 | list_for_each_entry(iter_dir, &iter_tr->systems, list) { |
| 2187 | if (iter_dir == inode->i_private) { |
| 2188 | /* Don't open systems with no events */ |
| 2189 | tr = iter_tr; |
| 2190 | dir = iter_dir; |
| 2191 | if (dir->nr_events) { |
| 2192 | __get_system_dir(dir); |
| 2193 | system = dir->subsystem; |
| 2194 | } |
| 2195 | goto exit_loop; |
| 2196 | } |
| 2197 | } |
| 2198 | } |
| 2199 | exit_loop: |
| 2200 | mutex_unlock(lock: &trace_types_lock); |
| 2201 | mutex_unlock(lock: &event_mutex); |
| 2202 | |
| 2203 | if (!system) |
| 2204 | return -ENODEV; |
| 2205 | |
| 2206 | /* Still need to increment the ref count of the system */ |
| 2207 | if (trace_array_get(tr) < 0) { |
| 2208 | put_system(dir); |
| 2209 | return -ENODEV; |
| 2210 | } |
| 2211 | |
| 2212 | ret = tracing_open_generic(inode, filp); |
| 2213 | if (ret < 0) { |
| 2214 | trace_array_put(tr); |
| 2215 | put_system(dir); |
| 2216 | } |
| 2217 | |
| 2218 | return ret; |
| 2219 | } |
| 2220 | |
| 2221 | static int system_tr_open(struct inode *inode, struct file *filp) |
| 2222 | { |
| 2223 | struct trace_subsystem_dir *dir; |
| 2224 | struct trace_array *tr = inode->i_private; |
| 2225 | int ret; |
| 2226 | |
| 2227 | /* Make a temporary dir that has no system but points to tr */ |
| 2228 | dir = kzalloc(sizeof(*dir), GFP_KERNEL); |
| 2229 | if (!dir) |
| 2230 | return -ENOMEM; |
| 2231 | |
| 2232 | ret = tracing_open_generic_tr(inode, filp); |
| 2233 | if (ret < 0) { |
| 2234 | kfree(objp: dir); |
| 2235 | return ret; |
| 2236 | } |
| 2237 | dir->tr = tr; |
| 2238 | filp->private_data = dir; |
| 2239 | |
| 2240 | return 0; |
| 2241 | } |
| 2242 | |
| 2243 | static int subsystem_release(struct inode *inode, struct file *file) |
| 2244 | { |
| 2245 | struct trace_subsystem_dir *dir = file->private_data; |
| 2246 | |
| 2247 | trace_array_put(tr: dir->tr); |
| 2248 | |
| 2249 | /* |
| 2250 | * If dir->subsystem is NULL, then this is a temporary |
| 2251 | * descriptor that was made for a trace_array to enable |
| 2252 | * all subsystems. |
| 2253 | */ |
| 2254 | if (dir->subsystem) |
| 2255 | put_system(dir); |
| 2256 | else |
| 2257 | kfree(objp: dir); |
| 2258 | |
| 2259 | return 0; |
| 2260 | } |
| 2261 | |
| 2262 | static ssize_t |
| 2263 | subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt, |
| 2264 | loff_t *ppos) |
| 2265 | { |
| 2266 | struct trace_subsystem_dir *dir = filp->private_data; |
| 2267 | struct event_subsystem *system = dir->subsystem; |
| 2268 | struct trace_seq *s; |
| 2269 | int r; |
| 2270 | |
| 2271 | if (*ppos) |
| 2272 | return 0; |
| 2273 | |
| 2274 | s = kmalloc(sizeof(*s), GFP_KERNEL); |
| 2275 | if (!s) |
| 2276 | return -ENOMEM; |
| 2277 | |
| 2278 | trace_seq_init(s); |
| 2279 | |
| 2280 | print_subsystem_event_filter(system, s); |
| 2281 | r = simple_read_from_buffer(to: ubuf, count: cnt, ppos, |
| 2282 | from: s->buffer, available: trace_seq_used(s)); |
| 2283 | |
| 2284 | kfree(objp: s); |
| 2285 | |
| 2286 | return r; |
| 2287 | } |
| 2288 | |
| 2289 | static ssize_t |
| 2290 | subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt, |
| 2291 | loff_t *ppos) |
| 2292 | { |
| 2293 | struct trace_subsystem_dir *dir = filp->private_data; |
| 2294 | char *buf; |
| 2295 | int err; |
| 2296 | |
| 2297 | if (cnt >= PAGE_SIZE) |
| 2298 | return -EINVAL; |
| 2299 | |
| 2300 | buf = memdup_user_nul(ubuf, cnt); |
| 2301 | if (IS_ERR(ptr: buf)) |
| 2302 | return PTR_ERR(ptr: buf); |
| 2303 | |
| 2304 | err = apply_subsystem_event_filter(dir, filter_string: buf); |
| 2305 | kfree(objp: buf); |
| 2306 | if (err < 0) |
| 2307 | return err; |
| 2308 | |
| 2309 | *ppos += cnt; |
| 2310 | |
| 2311 | return cnt; |
| 2312 | } |
| 2313 | |
| 2314 | static ssize_t |
| 2315 | (struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) |
| 2316 | { |
| 2317 | struct trace_array *tr = filp->private_data; |
| 2318 | struct trace_seq *s; |
| 2319 | int r; |
| 2320 | |
| 2321 | if (*ppos) |
| 2322 | return 0; |
| 2323 | |
| 2324 | s = kmalloc(sizeof(*s), GFP_KERNEL); |
| 2325 | if (!s) |
| 2326 | return -ENOMEM; |
| 2327 | |
| 2328 | trace_seq_init(s); |
| 2329 | |
| 2330 | ring_buffer_print_page_header(buffer: tr->array_buffer.buffer, s); |
| 2331 | r = simple_read_from_buffer(to: ubuf, count: cnt, ppos, |
| 2332 | from: s->buffer, available: trace_seq_used(s)); |
| 2333 | |
| 2334 | kfree(objp: s); |
| 2335 | |
| 2336 | return r; |
| 2337 | } |
| 2338 | |
| 2339 | static ssize_t |
| 2340 | (struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) |
| 2341 | { |
| 2342 | struct trace_seq *s; |
| 2343 | int r; |
| 2344 | |
| 2345 | if (*ppos) |
| 2346 | return 0; |
| 2347 | |
| 2348 | s = kmalloc(sizeof(*s), GFP_KERNEL); |
| 2349 | if (!s) |
| 2350 | return -ENOMEM; |
| 2351 | |
| 2352 | trace_seq_init(s); |
| 2353 | |
| 2354 | ring_buffer_print_entry_header(s); |
| 2355 | r = simple_read_from_buffer(to: ubuf, count: cnt, ppos, |
| 2356 | from: s->buffer, available: trace_seq_used(s)); |
| 2357 | |
| 2358 | kfree(objp: s); |
| 2359 | |
| 2360 | return r; |
| 2361 | } |
| 2362 | |
| 2363 | static void ignore_task_cpu(void *data) |
| 2364 | { |
| 2365 | struct trace_array *tr = data; |
| 2366 | struct trace_pid_list *pid_list; |
| 2367 | struct trace_pid_list *no_pid_list; |
| 2368 | |
| 2369 | /* |
| 2370 | * This function is called by on_each_cpu() while the |
| 2371 | * event_mutex is held. |
| 2372 | */ |
| 2373 | pid_list = rcu_dereference_protected(tr->filtered_pids, |
| 2374 | mutex_is_locked(&event_mutex)); |
| 2375 | no_pid_list = rcu_dereference_protected(tr->filtered_no_pids, |
| 2376 | mutex_is_locked(&event_mutex)); |
| 2377 | |
| 2378 | this_cpu_write(tr->array_buffer.data->ignore_pid, |
| 2379 | trace_ignore_this_task(pid_list, no_pid_list, current)); |
| 2380 | } |
| 2381 | |
| 2382 | static void register_pid_events(struct trace_array *tr) |
| 2383 | { |
| 2384 | /* |
| 2385 | * Register a probe that is called before all other probes |
| 2386 | * to set ignore_pid if next or prev do not match. |
| 2387 | * Register a probe this is called after all other probes |
| 2388 | * to only keep ignore_pid set if next pid matches. |
| 2389 | */ |
| 2390 | register_trace_prio_sched_switch(probe: event_filter_pid_sched_switch_probe_pre, |
| 2391 | data: tr, INT_MAX); |
| 2392 | register_trace_prio_sched_switch(probe: event_filter_pid_sched_switch_probe_post, |
| 2393 | data: tr, prio: 0); |
| 2394 | |
| 2395 | register_trace_prio_sched_wakeup(probe: event_filter_pid_sched_wakeup_probe_pre, |
| 2396 | data: tr, INT_MAX); |
| 2397 | register_trace_prio_sched_wakeup(probe: event_filter_pid_sched_wakeup_probe_post, |
| 2398 | data: tr, prio: 0); |
| 2399 | |
| 2400 | register_trace_prio_sched_wakeup_new(probe: event_filter_pid_sched_wakeup_probe_pre, |
| 2401 | data: tr, INT_MAX); |
| 2402 | register_trace_prio_sched_wakeup_new(probe: event_filter_pid_sched_wakeup_probe_post, |
| 2403 | data: tr, prio: 0); |
| 2404 | |
| 2405 | register_trace_prio_sched_waking(probe: event_filter_pid_sched_wakeup_probe_pre, |
| 2406 | data: tr, INT_MAX); |
| 2407 | register_trace_prio_sched_waking(probe: event_filter_pid_sched_wakeup_probe_post, |
| 2408 | data: tr, prio: 0); |
| 2409 | } |
| 2410 | |
| 2411 | static ssize_t |
| 2412 | event_pid_write(struct file *filp, const char __user *ubuf, |
| 2413 | size_t cnt, loff_t *ppos, int type) |
| 2414 | { |
| 2415 | struct seq_file *m = filp->private_data; |
| 2416 | struct trace_array *tr = m->private; |
| 2417 | struct trace_pid_list *filtered_pids = NULL; |
| 2418 | struct trace_pid_list *other_pids = NULL; |
| 2419 | struct trace_pid_list *pid_list; |
| 2420 | struct trace_event_file *file; |
| 2421 | ssize_t ret; |
| 2422 | |
| 2423 | if (!cnt) |
| 2424 | return 0; |
| 2425 | |
| 2426 | ret = tracing_update_buffers(tr); |
| 2427 | if (ret < 0) |
| 2428 | return ret; |
| 2429 | |
| 2430 | guard(mutex)(T: &event_mutex); |
| 2431 | |
| 2432 | if (type == TRACE_PIDS) { |
| 2433 | filtered_pids = rcu_dereference_protected(tr->filtered_pids, |
| 2434 | lockdep_is_held(&event_mutex)); |
| 2435 | other_pids = rcu_dereference_protected(tr->filtered_no_pids, |
| 2436 | lockdep_is_held(&event_mutex)); |
| 2437 | } else { |
| 2438 | filtered_pids = rcu_dereference_protected(tr->filtered_no_pids, |
| 2439 | lockdep_is_held(&event_mutex)); |
| 2440 | other_pids = rcu_dereference_protected(tr->filtered_pids, |
| 2441 | lockdep_is_held(&event_mutex)); |
| 2442 | } |
| 2443 | |
| 2444 | ret = trace_pid_write(filtered_pids, new_pid_list: &pid_list, ubuf, cnt); |
| 2445 | if (ret < 0) |
| 2446 | return ret; |
| 2447 | |
| 2448 | if (type == TRACE_PIDS) |
| 2449 | rcu_assign_pointer(tr->filtered_pids, pid_list); |
| 2450 | else |
| 2451 | rcu_assign_pointer(tr->filtered_no_pids, pid_list); |
| 2452 | |
| 2453 | list_for_each_entry(file, &tr->events, list) { |
| 2454 | set_bit(nr: EVENT_FILE_FL_PID_FILTER_BIT, addr: &file->flags); |
| 2455 | } |
| 2456 | |
| 2457 | if (filtered_pids) { |
| 2458 | tracepoint_synchronize_unregister(); |
| 2459 | trace_pid_list_free(pid_list: filtered_pids); |
| 2460 | } else if (pid_list && !other_pids) { |
| 2461 | register_pid_events(tr); |
| 2462 | } |
| 2463 | |
| 2464 | /* |
| 2465 | * Ignoring of pids is done at task switch. But we have to |
| 2466 | * check for those tasks that are currently running. |
| 2467 | * Always do this in case a pid was appended or removed. |
| 2468 | */ |
| 2469 | on_each_cpu(func: ignore_task_cpu, info: tr, wait: 1); |
| 2470 | |
| 2471 | *ppos += ret; |
| 2472 | |
| 2473 | return ret; |
| 2474 | } |
| 2475 | |
| 2476 | static ssize_t |
| 2477 | ftrace_event_pid_write(struct file *filp, const char __user *ubuf, |
| 2478 | size_t cnt, loff_t *ppos) |
| 2479 | { |
| 2480 | return event_pid_write(filp, ubuf, cnt, ppos, type: TRACE_PIDS); |
| 2481 | } |
| 2482 | |
| 2483 | static ssize_t |
| 2484 | ftrace_event_npid_write(struct file *filp, const char __user *ubuf, |
| 2485 | size_t cnt, loff_t *ppos) |
| 2486 | { |
| 2487 | return event_pid_write(filp, ubuf, cnt, ppos, type: TRACE_NO_PIDS); |
| 2488 | } |
| 2489 | |
| 2490 | static int ftrace_event_avail_open(struct inode *inode, struct file *file); |
| 2491 | static int ftrace_event_set_open(struct inode *inode, struct file *file); |
| 2492 | static int ftrace_event_set_pid_open(struct inode *inode, struct file *file); |
| 2493 | static int ftrace_event_set_npid_open(struct inode *inode, struct file *file); |
| 2494 | static int ftrace_event_release(struct inode *inode, struct file *file); |
| 2495 | |
| 2496 | static const struct seq_operations show_event_seq_ops = { |
| 2497 | .start = t_start, |
| 2498 | .next = t_next, |
| 2499 | .show = t_show, |
| 2500 | .stop = t_stop, |
| 2501 | }; |
| 2502 | |
| 2503 | static const struct seq_operations show_set_event_seq_ops = { |
| 2504 | .start = s_start, |
| 2505 | .next = s_next, |
| 2506 | .show = s_show, |
| 2507 | .stop = s_stop, |
| 2508 | }; |
| 2509 | |
| 2510 | static const struct seq_operations show_set_pid_seq_ops = { |
| 2511 | .start = p_start, |
| 2512 | .next = p_next, |
| 2513 | .show = trace_pid_show, |
| 2514 | .stop = p_stop, |
| 2515 | }; |
| 2516 | |
| 2517 | static const struct seq_operations show_set_no_pid_seq_ops = { |
| 2518 | .start = np_start, |
| 2519 | .next = np_next, |
| 2520 | .show = trace_pid_show, |
| 2521 | .stop = p_stop, |
| 2522 | }; |
| 2523 | |
| 2524 | static const struct file_operations ftrace_avail_fops = { |
| 2525 | .open = ftrace_event_avail_open, |
| 2526 | .read = seq_read, |
| 2527 | .llseek = seq_lseek, |
| 2528 | .release = seq_release, |
| 2529 | }; |
| 2530 | |
| 2531 | static const struct file_operations ftrace_set_event_fops = { |
| 2532 | .open = ftrace_event_set_open, |
| 2533 | .read = seq_read, |
| 2534 | .write = ftrace_event_write, |
| 2535 | .llseek = seq_lseek, |
| 2536 | .release = ftrace_event_release, |
| 2537 | }; |
| 2538 | |
| 2539 | static const struct file_operations ftrace_set_event_pid_fops = { |
| 2540 | .open = ftrace_event_set_pid_open, |
| 2541 | .read = seq_read, |
| 2542 | .write = ftrace_event_pid_write, |
| 2543 | .llseek = seq_lseek, |
| 2544 | .release = ftrace_event_release, |
| 2545 | }; |
| 2546 | |
| 2547 | static const struct file_operations ftrace_set_event_notrace_pid_fops = { |
| 2548 | .open = ftrace_event_set_npid_open, |
| 2549 | .read = seq_read, |
| 2550 | .write = ftrace_event_npid_write, |
| 2551 | .llseek = seq_lseek, |
| 2552 | .release = ftrace_event_release, |
| 2553 | }; |
| 2554 | |
| 2555 | static const struct file_operations ftrace_enable_fops = { |
| 2556 | .open = tracing_open_file_tr, |
| 2557 | .read = event_enable_read, |
| 2558 | .write = event_enable_write, |
| 2559 | .release = tracing_release_file_tr, |
| 2560 | .llseek = default_llseek, |
| 2561 | }; |
| 2562 | |
| 2563 | static const struct file_operations ftrace_event_format_fops = { |
| 2564 | .open = trace_format_open, |
| 2565 | .read = seq_read, |
| 2566 | .llseek = seq_lseek, |
| 2567 | .release = seq_release, |
| 2568 | }; |
| 2569 | |
| 2570 | #ifdef CONFIG_PERF_EVENTS |
| 2571 | static const struct file_operations ftrace_event_id_fops = { |
| 2572 | .read = event_id_read, |
| 2573 | .llseek = default_llseek, |
| 2574 | }; |
| 2575 | #endif |
| 2576 | |
| 2577 | static const struct file_operations ftrace_event_filter_fops = { |
| 2578 | .open = tracing_open_file_tr, |
| 2579 | .read = event_filter_read, |
| 2580 | .write = event_filter_write, |
| 2581 | .release = tracing_release_file_tr, |
| 2582 | .llseek = default_llseek, |
| 2583 | }; |
| 2584 | |
| 2585 | static const struct file_operations ftrace_subsystem_filter_fops = { |
| 2586 | .open = subsystem_open, |
| 2587 | .read = subsystem_filter_read, |
| 2588 | .write = subsystem_filter_write, |
| 2589 | .llseek = default_llseek, |
| 2590 | .release = subsystem_release, |
| 2591 | }; |
| 2592 | |
| 2593 | static const struct file_operations ftrace_system_enable_fops = { |
| 2594 | .open = subsystem_open, |
| 2595 | .read = system_enable_read, |
| 2596 | .write = system_enable_write, |
| 2597 | .llseek = default_llseek, |
| 2598 | .release = subsystem_release, |
| 2599 | }; |
| 2600 | |
| 2601 | static const struct file_operations ftrace_tr_enable_fops = { |
| 2602 | .open = system_tr_open, |
| 2603 | .read = system_enable_read, |
| 2604 | .write = system_enable_write, |
| 2605 | .llseek = default_llseek, |
| 2606 | .release = subsystem_release, |
| 2607 | }; |
| 2608 | |
| 2609 | static const struct file_operations = { |
| 2610 | .open = tracing_open_generic_tr, |
| 2611 | .read = show_header_page_file, |
| 2612 | .llseek = default_llseek, |
| 2613 | .release = tracing_release_generic_tr, |
| 2614 | }; |
| 2615 | |
| 2616 | static const struct file_operations = { |
| 2617 | .open = tracing_open_generic_tr, |
| 2618 | .read = show_header_event_file, |
| 2619 | .llseek = default_llseek, |
| 2620 | .release = tracing_release_generic_tr, |
| 2621 | }; |
| 2622 | |
| 2623 | static int |
| 2624 | ftrace_event_open(struct inode *inode, struct file *file, |
| 2625 | const struct seq_operations *seq_ops) |
| 2626 | { |
| 2627 | struct seq_file *m; |
| 2628 | int ret; |
| 2629 | |
| 2630 | ret = security_locked_down(what: LOCKDOWN_TRACEFS); |
| 2631 | if (ret) |
| 2632 | return ret; |
| 2633 | |
| 2634 | ret = seq_open(file, seq_ops); |
| 2635 | if (ret < 0) |
| 2636 | return ret; |
| 2637 | m = file->private_data; |
| 2638 | /* copy tr over to seq ops */ |
| 2639 | m->private = inode->i_private; |
| 2640 | |
| 2641 | return ret; |
| 2642 | } |
| 2643 | |
| 2644 | static int ftrace_event_release(struct inode *inode, struct file *file) |
| 2645 | { |
| 2646 | struct trace_array *tr = inode->i_private; |
| 2647 | |
| 2648 | trace_array_put(tr); |
| 2649 | |
| 2650 | return seq_release(inode, file); |
| 2651 | } |
| 2652 | |
| 2653 | static int |
| 2654 | ftrace_event_avail_open(struct inode *inode, struct file *file) |
| 2655 | { |
| 2656 | const struct seq_operations *seq_ops = &show_event_seq_ops; |
| 2657 | |
| 2658 | /* Checks for tracefs lockdown */ |
| 2659 | return ftrace_event_open(inode, file, seq_ops); |
| 2660 | } |
| 2661 | |
| 2662 | static int |
| 2663 | ftrace_event_set_open(struct inode *inode, struct file *file) |
| 2664 | { |
| 2665 | const struct seq_operations *seq_ops = &show_set_event_seq_ops; |
| 2666 | struct trace_array *tr = inode->i_private; |
| 2667 | int ret; |
| 2668 | |
| 2669 | ret = tracing_check_open_get_tr(tr); |
| 2670 | if (ret) |
| 2671 | return ret; |
| 2672 | |
| 2673 | if ((file->f_mode & FMODE_WRITE) && |
| 2674 | (file->f_flags & O_TRUNC)) |
| 2675 | ftrace_clear_events(tr); |
| 2676 | |
| 2677 | ret = ftrace_event_open(inode, file, seq_ops); |
| 2678 | if (ret < 0) |
| 2679 | trace_array_put(tr); |
| 2680 | return ret; |
| 2681 | } |
| 2682 | |
| 2683 | static int |
| 2684 | ftrace_event_set_pid_open(struct inode *inode, struct file *file) |
| 2685 | { |
| 2686 | const struct seq_operations *seq_ops = &show_set_pid_seq_ops; |
| 2687 | struct trace_array *tr = inode->i_private; |
| 2688 | int ret; |
| 2689 | |
| 2690 | ret = tracing_check_open_get_tr(tr); |
| 2691 | if (ret) |
| 2692 | return ret; |
| 2693 | |
| 2694 | if ((file->f_mode & FMODE_WRITE) && |
| 2695 | (file->f_flags & O_TRUNC)) |
| 2696 | ftrace_clear_event_pids(tr, type: TRACE_PIDS); |
| 2697 | |
| 2698 | ret = ftrace_event_open(inode, file, seq_ops); |
| 2699 | if (ret < 0) |
| 2700 | trace_array_put(tr); |
| 2701 | return ret; |
| 2702 | } |
| 2703 | |
| 2704 | static int |
| 2705 | ftrace_event_set_npid_open(struct inode *inode, struct file *file) |
| 2706 | { |
| 2707 | const struct seq_operations *seq_ops = &show_set_no_pid_seq_ops; |
| 2708 | struct trace_array *tr = inode->i_private; |
| 2709 | int ret; |
| 2710 | |
| 2711 | ret = tracing_check_open_get_tr(tr); |
| 2712 | if (ret) |
| 2713 | return ret; |
| 2714 | |
| 2715 | if ((file->f_mode & FMODE_WRITE) && |
| 2716 | (file->f_flags & O_TRUNC)) |
| 2717 | ftrace_clear_event_pids(tr, type: TRACE_NO_PIDS); |
| 2718 | |
| 2719 | ret = ftrace_event_open(inode, file, seq_ops); |
| 2720 | if (ret < 0) |
| 2721 | trace_array_put(tr); |
| 2722 | return ret; |
| 2723 | } |
| 2724 | |
| 2725 | static struct event_subsystem * |
| 2726 | create_new_subsystem(const char *name) |
| 2727 | { |
| 2728 | struct event_subsystem *system; |
| 2729 | |
| 2730 | /* need to create new entry */ |
| 2731 | system = kmalloc(sizeof(*system), GFP_KERNEL); |
| 2732 | if (!system) |
| 2733 | return NULL; |
| 2734 | |
| 2735 | system->ref_count = 1; |
| 2736 | |
| 2737 | /* Only allocate if dynamic (kprobes and modules) */ |
| 2738 | system->name = kstrdup_const(s: name, GFP_KERNEL); |
| 2739 | if (!system->name) |
| 2740 | goto out_free; |
| 2741 | |
| 2742 | system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL); |
| 2743 | if (!system->filter) |
| 2744 | goto out_free; |
| 2745 | |
| 2746 | list_add(new: &system->list, head: &event_subsystems); |
| 2747 | |
| 2748 | return system; |
| 2749 | |
| 2750 | out_free: |
| 2751 | kfree_const(x: system->name); |
| 2752 | kfree(objp: system); |
| 2753 | return NULL; |
| 2754 | } |
| 2755 | |
| 2756 | static int system_callback(const char *name, umode_t *mode, void **data, |
| 2757 | const struct file_operations **fops) |
| 2758 | { |
| 2759 | if (strcmp(name, "filter" ) == 0) |
| 2760 | *fops = &ftrace_subsystem_filter_fops; |
| 2761 | |
| 2762 | else if (strcmp(name, "enable" ) == 0) |
| 2763 | *fops = &ftrace_system_enable_fops; |
| 2764 | |
| 2765 | else |
| 2766 | return 0; |
| 2767 | |
| 2768 | *mode = TRACE_MODE_WRITE; |
| 2769 | return 1; |
| 2770 | } |
| 2771 | |
| 2772 | static struct eventfs_inode * |
| 2773 | event_subsystem_dir(struct trace_array *tr, const char *name, |
| 2774 | struct trace_event_file *file, struct eventfs_inode *parent) |
| 2775 | { |
| 2776 | struct event_subsystem *system, *iter; |
| 2777 | struct trace_subsystem_dir *dir; |
| 2778 | struct eventfs_inode *ei; |
| 2779 | int nr_entries; |
| 2780 | static struct eventfs_entry system_entries[] = { |
| 2781 | { |
| 2782 | .name = "filter" , |
| 2783 | .callback = system_callback, |
| 2784 | }, |
| 2785 | { |
| 2786 | .name = "enable" , |
| 2787 | .callback = system_callback, |
| 2788 | } |
| 2789 | }; |
| 2790 | |
| 2791 | /* First see if we did not already create this dir */ |
| 2792 | list_for_each_entry(dir, &tr->systems, list) { |
| 2793 | system = dir->subsystem; |
| 2794 | if (strcmp(system->name, name) == 0) { |
| 2795 | dir->nr_events++; |
| 2796 | file->system = dir; |
| 2797 | return dir->ei; |
| 2798 | } |
| 2799 | } |
| 2800 | |
| 2801 | /* Now see if the system itself exists. */ |
| 2802 | system = NULL; |
| 2803 | list_for_each_entry(iter, &event_subsystems, list) { |
| 2804 | if (strcmp(iter->name, name) == 0) { |
| 2805 | system = iter; |
| 2806 | break; |
| 2807 | } |
| 2808 | } |
| 2809 | |
| 2810 | dir = kmalloc(sizeof(*dir), GFP_KERNEL); |
| 2811 | if (!dir) |
| 2812 | goto out_fail; |
| 2813 | |
| 2814 | if (!system) { |
| 2815 | system = create_new_subsystem(name); |
| 2816 | if (!system) |
| 2817 | goto out_free; |
| 2818 | } else |
| 2819 | __get_system(system); |
| 2820 | |
| 2821 | /* ftrace only has directories no files */ |
| 2822 | if (strcmp(name, "ftrace" ) == 0) |
| 2823 | nr_entries = 0; |
| 2824 | else |
| 2825 | nr_entries = ARRAY_SIZE(system_entries); |
| 2826 | |
| 2827 | ei = eventfs_create_dir(name, parent, entries: system_entries, size: nr_entries, data: dir); |
| 2828 | if (IS_ERR(ptr: ei)) { |
| 2829 | pr_warn("Failed to create system directory %s\n" , name); |
| 2830 | __put_system(system); |
| 2831 | goto out_free; |
| 2832 | } |
| 2833 | |
| 2834 | dir->ei = ei; |
| 2835 | dir->tr = tr; |
| 2836 | dir->ref_count = 1; |
| 2837 | dir->nr_events = 1; |
| 2838 | dir->subsystem = system; |
| 2839 | file->system = dir; |
| 2840 | |
| 2841 | list_add(new: &dir->list, head: &tr->systems); |
| 2842 | |
| 2843 | return dir->ei; |
| 2844 | |
| 2845 | out_free: |
| 2846 | kfree(objp: dir); |
| 2847 | out_fail: |
| 2848 | /* Only print this message if failed on memory allocation */ |
| 2849 | if (!dir || !system) |
| 2850 | pr_warn("No memory to create event subsystem %s\n" , name); |
| 2851 | return NULL; |
| 2852 | } |
| 2853 | |
| 2854 | static int |
| 2855 | event_define_fields(struct trace_event_call *call) |
| 2856 | { |
| 2857 | struct list_head *head; |
| 2858 | int ret = 0; |
| 2859 | |
| 2860 | /* |
| 2861 | * Other events may have the same class. Only update |
| 2862 | * the fields if they are not already defined. |
| 2863 | */ |
| 2864 | head = trace_get_fields(event_call: call); |
| 2865 | if (list_empty(head)) { |
| 2866 | struct trace_event_fields *field = call->class->fields_array; |
| 2867 | unsigned int offset = sizeof(struct trace_entry); |
| 2868 | |
| 2869 | for (; field->type; field++) { |
| 2870 | if (field->type == TRACE_FUNCTION_TYPE) { |
| 2871 | field->define_fields(call); |
| 2872 | break; |
| 2873 | } |
| 2874 | |
| 2875 | offset = ALIGN(offset, field->align); |
| 2876 | ret = trace_define_field_ext(call, type: field->type, name: field->name, |
| 2877 | offset, size: field->size, |
| 2878 | is_signed: field->is_signed, filter_type: field->filter_type, |
| 2879 | len: field->len, need_test: field->needs_test); |
| 2880 | if (WARN_ON_ONCE(ret)) { |
| 2881 | pr_err("error code is %d\n" , ret); |
| 2882 | break; |
| 2883 | } |
| 2884 | |
| 2885 | offset += field->size; |
| 2886 | } |
| 2887 | } |
| 2888 | |
| 2889 | return ret; |
| 2890 | } |
| 2891 | |
| 2892 | static int event_callback(const char *name, umode_t *mode, void **data, |
| 2893 | const struct file_operations **fops) |
| 2894 | { |
| 2895 | struct trace_event_file *file = *data; |
| 2896 | struct trace_event_call *call = file->event_call; |
| 2897 | |
| 2898 | if (strcmp(name, "format" ) == 0) { |
| 2899 | *mode = TRACE_MODE_READ; |
| 2900 | *fops = &ftrace_event_format_fops; |
| 2901 | return 1; |
| 2902 | } |
| 2903 | |
| 2904 | /* |
| 2905 | * Only event directories that can be enabled should have |
| 2906 | * triggers or filters, with the exception of the "print" |
| 2907 | * event that can have a "trigger" file. |
| 2908 | */ |
| 2909 | if (!(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)) { |
| 2910 | if (call->class->reg && strcmp(name, "enable" ) == 0) { |
| 2911 | *mode = TRACE_MODE_WRITE; |
| 2912 | *fops = &ftrace_enable_fops; |
| 2913 | return 1; |
| 2914 | } |
| 2915 | |
| 2916 | if (strcmp(name, "filter" ) == 0) { |
| 2917 | *mode = TRACE_MODE_WRITE; |
| 2918 | *fops = &ftrace_event_filter_fops; |
| 2919 | return 1; |
| 2920 | } |
| 2921 | } |
| 2922 | |
| 2923 | if (!(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) || |
| 2924 | strcmp(trace_event_name(call), "print" ) == 0) { |
| 2925 | if (strcmp(name, "trigger" ) == 0) { |
| 2926 | *mode = TRACE_MODE_WRITE; |
| 2927 | *fops = &event_trigger_fops; |
| 2928 | return 1; |
| 2929 | } |
| 2930 | } |
| 2931 | |
| 2932 | #ifdef CONFIG_PERF_EVENTS |
| 2933 | if (call->event.type && call->class->reg && |
| 2934 | strcmp(name, "id" ) == 0) { |
| 2935 | *mode = TRACE_MODE_READ; |
| 2936 | *data = (void *)(long)call->event.type; |
| 2937 | *fops = &ftrace_event_id_fops; |
| 2938 | return 1; |
| 2939 | } |
| 2940 | #endif |
| 2941 | |
| 2942 | #ifdef CONFIG_HIST_TRIGGERS |
| 2943 | if (strcmp(name, "hist" ) == 0) { |
| 2944 | *mode = TRACE_MODE_READ; |
| 2945 | *fops = &event_hist_fops; |
| 2946 | return 1; |
| 2947 | } |
| 2948 | #endif |
| 2949 | #ifdef CONFIG_HIST_TRIGGERS_DEBUG |
| 2950 | if (strcmp(name, "hist_debug" ) == 0) { |
| 2951 | *mode = TRACE_MODE_READ; |
| 2952 | *fops = &event_hist_debug_fops; |
| 2953 | return 1; |
| 2954 | } |
| 2955 | #endif |
| 2956 | #ifdef CONFIG_TRACE_EVENT_INJECT |
| 2957 | if (call->event.type && call->class->reg && |
| 2958 | strcmp(name, "inject" ) == 0) { |
| 2959 | *mode = 0200; |
| 2960 | *fops = &event_inject_fops; |
| 2961 | return 1; |
| 2962 | } |
| 2963 | #endif |
| 2964 | return 0; |
| 2965 | } |
| 2966 | |
| 2967 | /* The file is incremented on creation and freeing the enable file decrements it */ |
| 2968 | static void event_release(const char *name, void *data) |
| 2969 | { |
| 2970 | struct trace_event_file *file = data; |
| 2971 | |
| 2972 | event_file_put(file); |
| 2973 | } |
| 2974 | |
| 2975 | static int |
| 2976 | event_create_dir(struct eventfs_inode *parent, struct trace_event_file *file) |
| 2977 | { |
| 2978 | struct trace_event_call *call = file->event_call; |
| 2979 | struct trace_array *tr = file->tr; |
| 2980 | struct eventfs_inode *e_events; |
| 2981 | struct eventfs_inode *ei; |
| 2982 | const char *name; |
| 2983 | int nr_entries; |
| 2984 | int ret; |
| 2985 | static struct eventfs_entry event_entries[] = { |
| 2986 | { |
| 2987 | .name = "enable" , |
| 2988 | .callback = event_callback, |
| 2989 | .release = event_release, |
| 2990 | }, |
| 2991 | { |
| 2992 | .name = "filter" , |
| 2993 | .callback = event_callback, |
| 2994 | }, |
| 2995 | { |
| 2996 | .name = "trigger" , |
| 2997 | .callback = event_callback, |
| 2998 | }, |
| 2999 | { |
| 3000 | .name = "format" , |
| 3001 | .callback = event_callback, |
| 3002 | }, |
| 3003 | #ifdef CONFIG_PERF_EVENTS |
| 3004 | { |
| 3005 | .name = "id" , |
| 3006 | .callback = event_callback, |
| 3007 | }, |
| 3008 | #endif |
| 3009 | #ifdef CONFIG_HIST_TRIGGERS |
| 3010 | { |
| 3011 | .name = "hist" , |
| 3012 | .callback = event_callback, |
| 3013 | }, |
| 3014 | #endif |
| 3015 | #ifdef CONFIG_HIST_TRIGGERS_DEBUG |
| 3016 | { |
| 3017 | .name = "hist_debug" , |
| 3018 | .callback = event_callback, |
| 3019 | }, |
| 3020 | #endif |
| 3021 | #ifdef CONFIG_TRACE_EVENT_INJECT |
| 3022 | { |
| 3023 | .name = "inject" , |
| 3024 | .callback = event_callback, |
| 3025 | }, |
| 3026 | #endif |
| 3027 | }; |
| 3028 | |
| 3029 | /* |
| 3030 | * If the trace point header did not define TRACE_SYSTEM |
| 3031 | * then the system would be called "TRACE_SYSTEM". This should |
| 3032 | * never happen. |
| 3033 | */ |
| 3034 | if (WARN_ON_ONCE(strcmp(call->class->system, TRACE_SYSTEM) == 0)) |
| 3035 | return -ENODEV; |
| 3036 | |
| 3037 | e_events = event_subsystem_dir(tr, name: call->class->system, file, parent); |
| 3038 | if (!e_events) |
| 3039 | return -ENOMEM; |
| 3040 | |
| 3041 | nr_entries = ARRAY_SIZE(event_entries); |
| 3042 | |
| 3043 | name = trace_event_name(call); |
| 3044 | ei = eventfs_create_dir(name, parent: e_events, entries: event_entries, size: nr_entries, data: file); |
| 3045 | if (IS_ERR(ptr: ei)) { |
| 3046 | pr_warn("Could not create tracefs '%s' directory\n" , name); |
| 3047 | return -1; |
| 3048 | } |
| 3049 | |
| 3050 | file->ei = ei; |
| 3051 | |
| 3052 | ret = event_define_fields(call); |
| 3053 | if (ret < 0) { |
| 3054 | pr_warn("Could not initialize trace point events/%s\n" , name); |
| 3055 | return ret; |
| 3056 | } |
| 3057 | |
| 3058 | /* Gets decremented on freeing of the "enable" file */ |
| 3059 | event_file_get(file); |
| 3060 | |
| 3061 | return 0; |
| 3062 | } |
| 3063 | |
| 3064 | static void remove_event_from_tracers(struct trace_event_call *call) |
| 3065 | { |
| 3066 | struct trace_event_file *file; |
| 3067 | struct trace_array *tr; |
| 3068 | |
| 3069 | do_for_each_event_file_safe(tr, file) { |
| 3070 | if (file->event_call != call) |
| 3071 | continue; |
| 3072 | |
| 3073 | remove_event_file_dir(file); |
| 3074 | /* |
| 3075 | * The do_for_each_event_file_safe() is |
| 3076 | * a double loop. After finding the call for this |
| 3077 | * trace_array, we use break to jump to the next |
| 3078 | * trace_array. |
| 3079 | */ |
| 3080 | break; |
| 3081 | } while_for_each_event_file(); |
| 3082 | } |
| 3083 | |
| 3084 | static void event_remove(struct trace_event_call *call) |
| 3085 | { |
| 3086 | struct trace_array *tr; |
| 3087 | struct trace_event_file *file; |
| 3088 | |
| 3089 | do_for_each_event_file(tr, file) { |
| 3090 | if (file->event_call != call) |
| 3091 | continue; |
| 3092 | |
| 3093 | if (file->flags & EVENT_FILE_FL_WAS_ENABLED) |
| 3094 | tr->clear_trace = true; |
| 3095 | |
| 3096 | ftrace_event_enable_disable(file, enable: 0); |
| 3097 | /* |
| 3098 | * The do_for_each_event_file() is |
| 3099 | * a double loop. After finding the call for this |
| 3100 | * trace_array, we use break to jump to the next |
| 3101 | * trace_array. |
| 3102 | */ |
| 3103 | break; |
| 3104 | } while_for_each_event_file(); |
| 3105 | |
| 3106 | if (call->event.funcs) |
| 3107 | __unregister_trace_event(event: &call->event); |
| 3108 | remove_event_from_tracers(call); |
| 3109 | list_del(entry: &call->list); |
| 3110 | } |
| 3111 | |
| 3112 | static int event_init(struct trace_event_call *call) |
| 3113 | { |
| 3114 | int ret = 0; |
| 3115 | const char *name; |
| 3116 | |
| 3117 | name = trace_event_name(call); |
| 3118 | if (WARN_ON(!name)) |
| 3119 | return -EINVAL; |
| 3120 | |
| 3121 | if (call->class->raw_init) { |
| 3122 | ret = call->class->raw_init(call); |
| 3123 | if (ret < 0 && ret != -ENOSYS) |
| 3124 | pr_warn("Could not initialize trace events/%s\n" , name); |
| 3125 | } |
| 3126 | |
| 3127 | return ret; |
| 3128 | } |
| 3129 | |
| 3130 | static int |
| 3131 | __register_event(struct trace_event_call *call, struct module *mod) |
| 3132 | { |
| 3133 | int ret; |
| 3134 | |
| 3135 | ret = event_init(call); |
| 3136 | if (ret < 0) |
| 3137 | return ret; |
| 3138 | |
| 3139 | down_write(sem: &trace_event_sem); |
| 3140 | list_add(new: &call->list, head: &ftrace_events); |
| 3141 | up_write(sem: &trace_event_sem); |
| 3142 | |
| 3143 | if (call->flags & TRACE_EVENT_FL_DYNAMIC) |
| 3144 | atomic_set(v: &call->refcnt, i: 0); |
| 3145 | else |
| 3146 | call->module = mod; |
| 3147 | |
| 3148 | return 0; |
| 3149 | } |
| 3150 | |
| 3151 | static char *eval_replace(char *ptr, struct trace_eval_map *map, int len) |
| 3152 | { |
| 3153 | int rlen; |
| 3154 | int elen; |
| 3155 | |
| 3156 | /* Find the length of the eval value as a string */ |
| 3157 | elen = snprintf(buf: ptr, size: 0, fmt: "%ld" , map->eval_value); |
| 3158 | /* Make sure there's enough room to replace the string with the value */ |
| 3159 | if (len < elen) |
| 3160 | return NULL; |
| 3161 | |
| 3162 | snprintf(buf: ptr, size: elen + 1, fmt: "%ld" , map->eval_value); |
| 3163 | |
| 3164 | /* Get the rest of the string of ptr */ |
| 3165 | rlen = strlen(ptr + len); |
| 3166 | memmove(ptr + elen, ptr + len, rlen); |
| 3167 | /* Make sure we end the new string */ |
| 3168 | ptr[elen + rlen] = 0; |
| 3169 | |
| 3170 | return ptr + elen; |
| 3171 | } |
| 3172 | |
| 3173 | static void update_event_printk(struct trace_event_call *call, |
| 3174 | struct trace_eval_map *map) |
| 3175 | { |
| 3176 | char *ptr; |
| 3177 | int quote = 0; |
| 3178 | int len = strlen(map->eval_string); |
| 3179 | |
| 3180 | for (ptr = call->print_fmt; *ptr; ptr++) { |
| 3181 | if (*ptr == '\\') { |
| 3182 | ptr++; |
| 3183 | /* paranoid */ |
| 3184 | if (!*ptr) |
| 3185 | break; |
| 3186 | continue; |
| 3187 | } |
| 3188 | if (*ptr == '"') { |
| 3189 | quote ^= 1; |
| 3190 | continue; |
| 3191 | } |
| 3192 | if (quote) |
| 3193 | continue; |
| 3194 | if (isdigit(c: *ptr)) { |
| 3195 | /* skip numbers */ |
| 3196 | do { |
| 3197 | ptr++; |
| 3198 | /* Check for alpha chars like ULL */ |
| 3199 | } while (isalnum(*ptr)); |
| 3200 | if (!*ptr) |
| 3201 | break; |
| 3202 | /* |
| 3203 | * A number must have some kind of delimiter after |
| 3204 | * it, and we can ignore that too. |
| 3205 | */ |
| 3206 | continue; |
| 3207 | } |
| 3208 | if (isalpha(*ptr) || *ptr == '_') { |
| 3209 | if (strncmp(map->eval_string, ptr, len) == 0 && |
| 3210 | !isalnum(ptr[len]) && ptr[len] != '_') { |
| 3211 | ptr = eval_replace(ptr, map, len); |
| 3212 | /* enum/sizeof string smaller than value */ |
| 3213 | if (WARN_ON_ONCE(!ptr)) |
| 3214 | return; |
| 3215 | /* |
| 3216 | * No need to decrement here, as eval_replace() |
| 3217 | * returns the pointer to the character passed |
| 3218 | * the eval, and two evals can not be placed |
| 3219 | * back to back without something in between. |
| 3220 | * We can skip that something in between. |
| 3221 | */ |
| 3222 | continue; |
| 3223 | } |
| 3224 | skip_more: |
| 3225 | do { |
| 3226 | ptr++; |
| 3227 | } while (isalnum(*ptr) || *ptr == '_'); |
| 3228 | if (!*ptr) |
| 3229 | break; |
| 3230 | /* |
| 3231 | * If what comes after this variable is a '.' or |
| 3232 | * '->' then we can continue to ignore that string. |
| 3233 | */ |
| 3234 | if (*ptr == '.' || (ptr[0] == '-' && ptr[1] == '>')) { |
| 3235 | ptr += *ptr == '.' ? 1 : 2; |
| 3236 | if (!*ptr) |
| 3237 | break; |
| 3238 | goto skip_more; |
| 3239 | } |
| 3240 | /* |
| 3241 | * Once again, we can skip the delimiter that came |
| 3242 | * after the string. |
| 3243 | */ |
| 3244 | continue; |
| 3245 | } |
| 3246 | } |
| 3247 | } |
| 3248 | |
| 3249 | static void add_str_to_module(struct module *module, char *str) |
| 3250 | { |
| 3251 | struct module_string *modstr; |
| 3252 | |
| 3253 | modstr = kmalloc(sizeof(*modstr), GFP_KERNEL); |
| 3254 | |
| 3255 | /* |
| 3256 | * If we failed to allocate memory here, then we'll just |
| 3257 | * let the str memory leak when the module is removed. |
| 3258 | * If this fails to allocate, there's worse problems than |
| 3259 | * a leaked string on module removal. |
| 3260 | */ |
| 3261 | if (WARN_ON_ONCE(!modstr)) |
| 3262 | return; |
| 3263 | |
| 3264 | modstr->module = module; |
| 3265 | modstr->str = str; |
| 3266 | |
| 3267 | list_add(new: &modstr->next, head: &module_strings); |
| 3268 | } |
| 3269 | |
| 3270 | #define ATTRIBUTE_STR "__attribute__(" |
| 3271 | #define ATTRIBUTE_STR_LEN (sizeof(ATTRIBUTE_STR) - 1) |
| 3272 | |
| 3273 | /* Remove all __attribute__() from @type. Return allocated string or @type. */ |
| 3274 | static char *sanitize_field_type(const char *type) |
| 3275 | { |
| 3276 | char *attr, *tmp, *next, *ret = (char *)type; |
| 3277 | int depth; |
| 3278 | |
| 3279 | next = (char *)type; |
| 3280 | while ((attr = strstr(next, ATTRIBUTE_STR))) { |
| 3281 | /* Retry if "__attribute__(" is a part of another word. */ |
| 3282 | if (attr != next && !isspace(attr[-1])) { |
| 3283 | next = attr + ATTRIBUTE_STR_LEN; |
| 3284 | continue; |
| 3285 | } |
| 3286 | |
| 3287 | if (ret == type) { |
| 3288 | ret = kstrdup(s: type, GFP_KERNEL); |
| 3289 | if (WARN_ON_ONCE(!ret)) |
| 3290 | return NULL; |
| 3291 | attr = ret + (attr - type); |
| 3292 | } |
| 3293 | |
| 3294 | /* the ATTRIBUTE_STR already has the first '(' */ |
| 3295 | depth = 1; |
| 3296 | next = attr + ATTRIBUTE_STR_LEN; |
| 3297 | do { |
| 3298 | tmp = strpbrk(next, "()" ); |
| 3299 | /* There is unbalanced parentheses */ |
| 3300 | if (WARN_ON_ONCE(!tmp)) { |
| 3301 | kfree(objp: ret); |
| 3302 | return (char *)type; |
| 3303 | } |
| 3304 | |
| 3305 | if (*tmp == '(') |
| 3306 | depth++; |
| 3307 | else |
| 3308 | depth--; |
| 3309 | next = tmp + 1; |
| 3310 | } while (depth > 0); |
| 3311 | next = skip_spaces(next); |
| 3312 | strcpy(p: attr, q: next); |
| 3313 | next = attr; |
| 3314 | } |
| 3315 | return ret; |
| 3316 | } |
| 3317 | |
| 3318 | static char *find_replacable_eval(const char *type, const char *eval_string, |
| 3319 | int len) |
| 3320 | { |
| 3321 | char *ptr; |
| 3322 | |
| 3323 | if (!eval_string) |
| 3324 | return NULL; |
| 3325 | |
| 3326 | ptr = strchr(type, '['); |
| 3327 | if (!ptr) |
| 3328 | return NULL; |
| 3329 | ptr++; |
| 3330 | |
| 3331 | if (!isalpha(*ptr) && *ptr != '_') |
| 3332 | return NULL; |
| 3333 | |
| 3334 | if (strncmp(eval_string, ptr, len) != 0) |
| 3335 | return NULL; |
| 3336 | |
| 3337 | return ptr; |
| 3338 | } |
| 3339 | |
| 3340 | static void update_event_fields(struct trace_event_call *call, |
| 3341 | struct trace_eval_map *map) |
| 3342 | { |
| 3343 | struct ftrace_event_field *field; |
| 3344 | const char *eval_string = NULL; |
| 3345 | struct list_head *head; |
| 3346 | int len = 0; |
| 3347 | char *ptr; |
| 3348 | char *str; |
| 3349 | |
| 3350 | /* Dynamic events should never have field maps */ |
| 3351 | if (call->flags & TRACE_EVENT_FL_DYNAMIC) |
| 3352 | return; |
| 3353 | |
| 3354 | if (map) { |
| 3355 | eval_string = map->eval_string; |
| 3356 | len = strlen(map->eval_string); |
| 3357 | } |
| 3358 | |
| 3359 | head = trace_get_fields(event_call: call); |
| 3360 | list_for_each_entry(field, head, link) { |
| 3361 | str = sanitize_field_type(type: field->type); |
| 3362 | if (!str) |
| 3363 | return; |
| 3364 | |
| 3365 | ptr = find_replacable_eval(type: str, eval_string, len); |
| 3366 | if (ptr) { |
| 3367 | if (str == field->type) { |
| 3368 | str = kstrdup(s: field->type, GFP_KERNEL); |
| 3369 | if (WARN_ON_ONCE(!str)) |
| 3370 | return; |
| 3371 | ptr = str + (ptr - field->type); |
| 3372 | } |
| 3373 | |
| 3374 | ptr = eval_replace(ptr, map, len); |
| 3375 | /* enum/sizeof string smaller than value */ |
| 3376 | if (WARN_ON_ONCE(!ptr)) { |
| 3377 | kfree(objp: str); |
| 3378 | continue; |
| 3379 | } |
| 3380 | } |
| 3381 | |
| 3382 | if (str == field->type) |
| 3383 | continue; |
| 3384 | /* |
| 3385 | * If the event is part of a module, then we need to free the string |
| 3386 | * when the module is removed. Otherwise, it will stay allocated |
| 3387 | * until a reboot. |
| 3388 | */ |
| 3389 | if (call->module) |
| 3390 | add_str_to_module(module: call->module, str); |
| 3391 | |
| 3392 | field->type = str; |
| 3393 | if (field->filter_type == FILTER_OTHER) |
| 3394 | field->filter_type = filter_assign_type(type: field->type); |
| 3395 | } |
| 3396 | } |
| 3397 | |
| 3398 | /* Update all events for replacing eval and sanitizing */ |
| 3399 | void trace_event_update_all(struct trace_eval_map **map, int len) |
| 3400 | { |
| 3401 | struct trace_event_call *call, *p; |
| 3402 | const char *last_system = NULL; |
| 3403 | bool first = false; |
| 3404 | bool updated; |
| 3405 | int last_i; |
| 3406 | int i; |
| 3407 | |
| 3408 | down_write(sem: &trace_event_sem); |
| 3409 | list_for_each_entry_safe(call, p, &ftrace_events, list) { |
| 3410 | /* events are usually grouped together with systems */ |
| 3411 | if (!last_system || call->class->system != last_system) { |
| 3412 | first = true; |
| 3413 | last_i = 0; |
| 3414 | last_system = call->class->system; |
| 3415 | } |
| 3416 | |
| 3417 | updated = false; |
| 3418 | /* |
| 3419 | * Since calls are grouped by systems, the likelihood that the |
| 3420 | * next call in the iteration belongs to the same system as the |
| 3421 | * previous call is high. As an optimization, we skip searching |
| 3422 | * for a map[] that matches the call's system if the last call |
| 3423 | * was from the same system. That's what last_i is for. If the |
| 3424 | * call has the same system as the previous call, then last_i |
| 3425 | * will be the index of the first map[] that has a matching |
| 3426 | * system. |
| 3427 | */ |
| 3428 | for (i = last_i; i < len; i++) { |
| 3429 | if (call->class->system == map[i]->system) { |
| 3430 | /* Save the first system if need be */ |
| 3431 | if (first) { |
| 3432 | last_i = i; |
| 3433 | first = false; |
| 3434 | } |
| 3435 | update_event_printk(call, map: map[i]); |
| 3436 | update_event_fields(call, map: map[i]); |
| 3437 | updated = true; |
| 3438 | } |
| 3439 | } |
| 3440 | /* If not updated yet, update field for sanitizing. */ |
| 3441 | if (!updated) |
| 3442 | update_event_fields(call, NULL); |
| 3443 | cond_resched(); |
| 3444 | } |
| 3445 | up_write(sem: &trace_event_sem); |
| 3446 | } |
| 3447 | |
| 3448 | static bool event_in_systems(struct trace_event_call *call, |
| 3449 | const char *systems) |
| 3450 | { |
| 3451 | const char *system; |
| 3452 | const char *p; |
| 3453 | |
| 3454 | if (!systems) |
| 3455 | return true; |
| 3456 | |
| 3457 | system = call->class->system; |
| 3458 | p = strstr(systems, system); |
| 3459 | if (!p) |
| 3460 | return false; |
| 3461 | |
| 3462 | if (p != systems && !isspace(*(p - 1)) && *(p - 1) != ',') |
| 3463 | return false; |
| 3464 | |
| 3465 | p += strlen(system); |
| 3466 | return !*p || isspace(*p) || *p == ','; |
| 3467 | } |
| 3468 | |
| 3469 | #ifdef CONFIG_HIST_TRIGGERS |
| 3470 | /* |
| 3471 | * Wake up waiter on the hist_poll_wq from irq_work because the hist trigger |
| 3472 | * may happen in any context. |
| 3473 | */ |
| 3474 | static void hist_poll_event_irq_work(struct irq_work *work) |
| 3475 | { |
| 3476 | wake_up_all(&hist_poll_wq); |
| 3477 | } |
| 3478 | |
| 3479 | DEFINE_IRQ_WORK(hist_poll_work, hist_poll_event_irq_work); |
| 3480 | DECLARE_WAIT_QUEUE_HEAD(hist_poll_wq); |
| 3481 | #endif |
| 3482 | |
| 3483 | static struct trace_event_file * |
| 3484 | trace_create_new_event(struct trace_event_call *call, |
| 3485 | struct trace_array *tr) |
| 3486 | { |
| 3487 | struct trace_pid_list *no_pid_list; |
| 3488 | struct trace_pid_list *pid_list; |
| 3489 | struct trace_event_file *file; |
| 3490 | unsigned int first; |
| 3491 | |
| 3492 | if (!event_in_systems(call, systems: tr->system_names)) |
| 3493 | return NULL; |
| 3494 | |
| 3495 | file = kmem_cache_alloc(file_cachep, GFP_TRACE); |
| 3496 | if (!file) |
| 3497 | return ERR_PTR(error: -ENOMEM); |
| 3498 | |
| 3499 | pid_list = rcu_dereference_protected(tr->filtered_pids, |
| 3500 | lockdep_is_held(&event_mutex)); |
| 3501 | no_pid_list = rcu_dereference_protected(tr->filtered_no_pids, |
| 3502 | lockdep_is_held(&event_mutex)); |
| 3503 | |
| 3504 | if (!trace_pid_list_first(pid_list, pid: &first) || |
| 3505 | !trace_pid_list_first(pid_list: no_pid_list, pid: &first)) |
| 3506 | file->flags |= EVENT_FILE_FL_PID_FILTER; |
| 3507 | |
| 3508 | file->event_call = call; |
| 3509 | file->tr = tr; |
| 3510 | atomic_set(v: &file->sm_ref, i: 0); |
| 3511 | atomic_set(v: &file->tm_ref, i: 0); |
| 3512 | INIT_LIST_HEAD(list: &file->triggers); |
| 3513 | list_add(new: &file->list, head: &tr->events); |
| 3514 | refcount_set(r: &file->ref, n: 1); |
| 3515 | |
| 3516 | return file; |
| 3517 | } |
| 3518 | |
| 3519 | #define MAX_BOOT_TRIGGERS 32 |
| 3520 | |
| 3521 | static struct boot_triggers { |
| 3522 | const char *event; |
| 3523 | char *trigger; |
| 3524 | } bootup_triggers[MAX_BOOT_TRIGGERS]; |
| 3525 | |
| 3526 | static char bootup_trigger_buf[COMMAND_LINE_SIZE]; |
| 3527 | static int nr_boot_triggers; |
| 3528 | |
| 3529 | static __init int setup_trace_triggers(char *str) |
| 3530 | { |
| 3531 | char *trigger; |
| 3532 | char *buf; |
| 3533 | int i; |
| 3534 | |
| 3535 | strscpy(bootup_trigger_buf, str, COMMAND_LINE_SIZE); |
| 3536 | trace_set_ring_buffer_expanded(NULL); |
| 3537 | disable_tracing_selftest(reason: "running event triggers" ); |
| 3538 | |
| 3539 | buf = bootup_trigger_buf; |
| 3540 | for (i = 0; i < MAX_BOOT_TRIGGERS; i++) { |
| 3541 | trigger = strsep(&buf, "," ); |
| 3542 | if (!trigger) |
| 3543 | break; |
| 3544 | bootup_triggers[i].event = strsep(&trigger, "." ); |
| 3545 | bootup_triggers[i].trigger = trigger; |
| 3546 | if (!bootup_triggers[i].trigger) |
| 3547 | break; |
| 3548 | } |
| 3549 | |
| 3550 | nr_boot_triggers = i; |
| 3551 | return 1; |
| 3552 | } |
| 3553 | __setup("trace_trigger=" , setup_trace_triggers); |
| 3554 | |
| 3555 | /* Add an event to a trace directory */ |
| 3556 | static int |
| 3557 | __trace_add_new_event(struct trace_event_call *call, struct trace_array *tr) |
| 3558 | { |
| 3559 | struct trace_event_file *file; |
| 3560 | |
| 3561 | file = trace_create_new_event(call, tr); |
| 3562 | /* |
| 3563 | * trace_create_new_event() returns ERR_PTR(-ENOMEM) if failed |
| 3564 | * allocation, or NULL if the event is not part of the tr->system_names. |
| 3565 | * When the event is not part of the tr->system_names, return zero, not |
| 3566 | * an error. |
| 3567 | */ |
| 3568 | if (!file) |
| 3569 | return 0; |
| 3570 | |
| 3571 | if (IS_ERR(ptr: file)) |
| 3572 | return PTR_ERR(ptr: file); |
| 3573 | |
| 3574 | if (eventdir_initialized) |
| 3575 | return event_create_dir(parent: tr->event_dir, file); |
| 3576 | else |
| 3577 | return event_define_fields(call); |
| 3578 | } |
| 3579 | |
| 3580 | static void trace_early_triggers(struct trace_event_file *file, const char *name) |
| 3581 | { |
| 3582 | int ret; |
| 3583 | int i; |
| 3584 | |
| 3585 | for (i = 0; i < nr_boot_triggers; i++) { |
| 3586 | if (strcmp(name, bootup_triggers[i].event)) |
| 3587 | continue; |
| 3588 | mutex_lock(&event_mutex); |
| 3589 | ret = trigger_process_regex(file, buff: bootup_triggers[i].trigger); |
| 3590 | mutex_unlock(lock: &event_mutex); |
| 3591 | if (ret) |
| 3592 | pr_err("Failed to register trigger '%s' on event %s\n" , |
| 3593 | bootup_triggers[i].trigger, |
| 3594 | bootup_triggers[i].event); |
| 3595 | } |
| 3596 | } |
| 3597 | |
| 3598 | /* |
| 3599 | * Just create a descriptor for early init. A descriptor is required |
| 3600 | * for enabling events at boot. We want to enable events before |
| 3601 | * the filesystem is initialized. |
| 3602 | */ |
| 3603 | static int |
| 3604 | __trace_early_add_new_event(struct trace_event_call *call, |
| 3605 | struct trace_array *tr) |
| 3606 | { |
| 3607 | struct trace_event_file *file; |
| 3608 | int ret; |
| 3609 | |
| 3610 | file = trace_create_new_event(call, tr); |
| 3611 | /* |
| 3612 | * trace_create_new_event() returns ERR_PTR(-ENOMEM) if failed |
| 3613 | * allocation, or NULL if the event is not part of the tr->system_names. |
| 3614 | * When the event is not part of the tr->system_names, return zero, not |
| 3615 | * an error. |
| 3616 | */ |
| 3617 | if (!file) |
| 3618 | return 0; |
| 3619 | |
| 3620 | if (IS_ERR(ptr: file)) |
| 3621 | return PTR_ERR(ptr: file); |
| 3622 | |
| 3623 | ret = event_define_fields(call); |
| 3624 | if (ret) |
| 3625 | return ret; |
| 3626 | |
| 3627 | trace_early_triggers(file, name: trace_event_name(call)); |
| 3628 | |
| 3629 | return 0; |
| 3630 | } |
| 3631 | |
| 3632 | struct ftrace_module_file_ops; |
| 3633 | static void __add_event_to_tracers(struct trace_event_call *call); |
| 3634 | |
| 3635 | /* Add an additional event_call dynamically */ |
| 3636 | int trace_add_event_call(struct trace_event_call *call) |
| 3637 | { |
| 3638 | int ret; |
| 3639 | lockdep_assert_held(&event_mutex); |
| 3640 | |
| 3641 | guard(mutex)(T: &trace_types_lock); |
| 3642 | |
| 3643 | ret = __register_event(call, NULL); |
| 3644 | if (ret < 0) |
| 3645 | return ret; |
| 3646 | |
| 3647 | __add_event_to_tracers(call); |
| 3648 | return ret; |
| 3649 | } |
| 3650 | EXPORT_SYMBOL_GPL(trace_add_event_call); |
| 3651 | |
| 3652 | /* |
| 3653 | * Must be called under locking of trace_types_lock, event_mutex and |
| 3654 | * trace_event_sem. |
| 3655 | */ |
| 3656 | static void __trace_remove_event_call(struct trace_event_call *call) |
| 3657 | { |
| 3658 | event_remove(call); |
| 3659 | trace_destroy_fields(call); |
| 3660 | } |
| 3661 | |
| 3662 | static int probe_remove_event_call(struct trace_event_call *call) |
| 3663 | { |
| 3664 | struct trace_array *tr; |
| 3665 | struct trace_event_file *file; |
| 3666 | |
| 3667 | #ifdef CONFIG_PERF_EVENTS |
| 3668 | if (call->perf_refcount) |
| 3669 | return -EBUSY; |
| 3670 | #endif |
| 3671 | do_for_each_event_file(tr, file) { |
| 3672 | if (file->event_call != call) |
| 3673 | continue; |
| 3674 | /* |
| 3675 | * We can't rely on ftrace_event_enable_disable(enable => 0) |
| 3676 | * we are going to do, soft mode can suppress |
| 3677 | * TRACE_REG_UNREGISTER. |
| 3678 | */ |
| 3679 | if (file->flags & EVENT_FILE_FL_ENABLED) |
| 3680 | goto busy; |
| 3681 | |
| 3682 | if (file->flags & EVENT_FILE_FL_WAS_ENABLED) |
| 3683 | tr->clear_trace = true; |
| 3684 | /* |
| 3685 | * The do_for_each_event_file_safe() is |
| 3686 | * a double loop. After finding the call for this |
| 3687 | * trace_array, we use break to jump to the next |
| 3688 | * trace_array. |
| 3689 | */ |
| 3690 | break; |
| 3691 | } while_for_each_event_file(); |
| 3692 | |
| 3693 | __trace_remove_event_call(call); |
| 3694 | |
| 3695 | return 0; |
| 3696 | busy: |
| 3697 | /* No need to clear the trace now */ |
| 3698 | list_for_each_entry(tr, &ftrace_trace_arrays, list) { |
| 3699 | tr->clear_trace = false; |
| 3700 | } |
| 3701 | return -EBUSY; |
| 3702 | } |
| 3703 | |
| 3704 | /* Remove an event_call */ |
| 3705 | int trace_remove_event_call(struct trace_event_call *call) |
| 3706 | { |
| 3707 | int ret; |
| 3708 | |
| 3709 | lockdep_assert_held(&event_mutex); |
| 3710 | |
| 3711 | mutex_lock(&trace_types_lock); |
| 3712 | down_write(sem: &trace_event_sem); |
| 3713 | ret = probe_remove_event_call(call); |
| 3714 | up_write(sem: &trace_event_sem); |
| 3715 | mutex_unlock(lock: &trace_types_lock); |
| 3716 | |
| 3717 | return ret; |
| 3718 | } |
| 3719 | EXPORT_SYMBOL_GPL(trace_remove_event_call); |
| 3720 | |
| 3721 | #define for_each_event(event, start, end) \ |
| 3722 | for (event = start; \ |
| 3723 | (unsigned long)event < (unsigned long)end; \ |
| 3724 | event++) |
| 3725 | |
| 3726 | #ifdef CONFIG_MODULES |
| 3727 | static void update_mod_cache(struct trace_array *tr, struct module *mod) |
| 3728 | { |
| 3729 | struct event_mod_load *event_mod, *n; |
| 3730 | |
| 3731 | list_for_each_entry_safe(event_mod, n, &tr->mod_events, list) { |
| 3732 | if (strcmp(event_mod->module, mod->name) != 0) |
| 3733 | continue; |
| 3734 | |
| 3735 | __ftrace_set_clr_event_nolock(tr, match: event_mod->match, |
| 3736 | sub: event_mod->system, |
| 3737 | event: event_mod->event, set: 1, mod: mod->name); |
| 3738 | free_event_mod(event_mod); |
| 3739 | } |
| 3740 | } |
| 3741 | |
| 3742 | static void update_cache_events(struct module *mod) |
| 3743 | { |
| 3744 | struct trace_array *tr; |
| 3745 | |
| 3746 | list_for_each_entry(tr, &ftrace_trace_arrays, list) |
| 3747 | update_mod_cache(tr, mod); |
| 3748 | } |
| 3749 | |
| 3750 | static void trace_module_add_events(struct module *mod) |
| 3751 | { |
| 3752 | struct trace_event_call **call, **start, **end; |
| 3753 | |
| 3754 | if (!mod->num_trace_events) |
| 3755 | return; |
| 3756 | |
| 3757 | /* Don't add infrastructure for mods without tracepoints */ |
| 3758 | if (trace_module_has_bad_taint(mod)) { |
| 3759 | pr_err("%s: module has bad taint, not creating trace events\n" , |
| 3760 | mod->name); |
| 3761 | return; |
| 3762 | } |
| 3763 | |
| 3764 | start = mod->trace_events; |
| 3765 | end = mod->trace_events + mod->num_trace_events; |
| 3766 | |
| 3767 | for_each_event(call, start, end) { |
| 3768 | __register_event(call: *call, mod); |
| 3769 | __add_event_to_tracers(call: *call); |
| 3770 | } |
| 3771 | |
| 3772 | update_cache_events(mod); |
| 3773 | } |
| 3774 | |
| 3775 | static void trace_module_remove_events(struct module *mod) |
| 3776 | { |
| 3777 | struct trace_event_call *call, *p; |
| 3778 | struct module_string *modstr, *m; |
| 3779 | |
| 3780 | down_write(sem: &trace_event_sem); |
| 3781 | list_for_each_entry_safe(call, p, &ftrace_events, list) { |
| 3782 | if ((call->flags & TRACE_EVENT_FL_DYNAMIC) || !call->module) |
| 3783 | continue; |
| 3784 | if (call->module == mod) |
| 3785 | __trace_remove_event_call(call); |
| 3786 | } |
| 3787 | /* Check for any strings allocated for this module */ |
| 3788 | list_for_each_entry_safe(modstr, m, &module_strings, next) { |
| 3789 | if (modstr->module != mod) |
| 3790 | continue; |
| 3791 | list_del(entry: &modstr->next); |
| 3792 | kfree(objp: modstr->str); |
| 3793 | kfree(objp: modstr); |
| 3794 | } |
| 3795 | up_write(sem: &trace_event_sem); |
| 3796 | |
| 3797 | /* |
| 3798 | * It is safest to reset the ring buffer if the module being unloaded |
| 3799 | * registered any events that were used. The only worry is if |
| 3800 | * a new module gets loaded, and takes on the same id as the events |
| 3801 | * of this module. When printing out the buffer, traced events left |
| 3802 | * over from this module may be passed to the new module events and |
| 3803 | * unexpected results may occur. |
| 3804 | */ |
| 3805 | tracing_reset_all_online_cpus_unlocked(); |
| 3806 | } |
| 3807 | |
| 3808 | static int trace_module_notify(struct notifier_block *self, |
| 3809 | unsigned long val, void *data) |
| 3810 | { |
| 3811 | struct module *mod = data; |
| 3812 | |
| 3813 | mutex_lock(&event_mutex); |
| 3814 | mutex_lock(&trace_types_lock); |
| 3815 | switch (val) { |
| 3816 | case MODULE_STATE_COMING: |
| 3817 | trace_module_add_events(mod); |
| 3818 | break; |
| 3819 | case MODULE_STATE_GOING: |
| 3820 | trace_module_remove_events(mod); |
| 3821 | break; |
| 3822 | } |
| 3823 | mutex_unlock(lock: &trace_types_lock); |
| 3824 | mutex_unlock(lock: &event_mutex); |
| 3825 | |
| 3826 | return NOTIFY_OK; |
| 3827 | } |
| 3828 | |
| 3829 | static struct notifier_block trace_module_nb = { |
| 3830 | .notifier_call = trace_module_notify, |
| 3831 | .priority = 1, /* higher than trace.c module notify */ |
| 3832 | }; |
| 3833 | #endif /* CONFIG_MODULES */ |
| 3834 | |
| 3835 | /* Create a new event directory structure for a trace directory. */ |
| 3836 | static void |
| 3837 | __trace_add_event_dirs(struct trace_array *tr) |
| 3838 | { |
| 3839 | struct trace_event_call *call; |
| 3840 | int ret; |
| 3841 | |
| 3842 | lockdep_assert_held(&trace_event_sem); |
| 3843 | |
| 3844 | list_for_each_entry(call, &ftrace_events, list) { |
| 3845 | ret = __trace_add_new_event(call, tr); |
| 3846 | if (ret < 0) |
| 3847 | pr_warn("Could not create directory for event %s\n" , |
| 3848 | trace_event_name(call)); |
| 3849 | } |
| 3850 | } |
| 3851 | |
| 3852 | /* Returns any file that matches the system and event */ |
| 3853 | struct trace_event_file * |
| 3854 | __find_event_file(struct trace_array *tr, const char *system, const char *event) |
| 3855 | { |
| 3856 | struct trace_event_file *file; |
| 3857 | struct trace_event_call *call; |
| 3858 | const char *name; |
| 3859 | |
| 3860 | list_for_each_entry(file, &tr->events, list) { |
| 3861 | |
| 3862 | call = file->event_call; |
| 3863 | name = trace_event_name(call); |
| 3864 | |
| 3865 | if (!name || !call->class) |
| 3866 | continue; |
| 3867 | |
| 3868 | if (strcmp(event, name) == 0 && |
| 3869 | strcmp(system, call->class->system) == 0) |
| 3870 | return file; |
| 3871 | } |
| 3872 | return NULL; |
| 3873 | } |
| 3874 | |
| 3875 | /* Returns valid trace event files that match system and event */ |
| 3876 | struct trace_event_file * |
| 3877 | find_event_file(struct trace_array *tr, const char *system, const char *event) |
| 3878 | { |
| 3879 | struct trace_event_file *file; |
| 3880 | |
| 3881 | file = __find_event_file(tr, system, event); |
| 3882 | if (!file || !file->event_call->class->reg || |
| 3883 | file->event_call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) |
| 3884 | return NULL; |
| 3885 | |
| 3886 | return file; |
| 3887 | } |
| 3888 | |
| 3889 | /** |
| 3890 | * trace_get_event_file - Find and return a trace event file |
| 3891 | * @instance: The name of the trace instance containing the event |
| 3892 | * @system: The name of the system containing the event |
| 3893 | * @event: The name of the event |
| 3894 | * |
| 3895 | * Return a trace event file given the trace instance name, trace |
| 3896 | * system, and trace event name. If the instance name is NULL, it |
| 3897 | * refers to the top-level trace array. |
| 3898 | * |
| 3899 | * This function will look it up and return it if found, after calling |
| 3900 | * trace_array_get() to prevent the instance from going away, and |
| 3901 | * increment the event's module refcount to prevent it from being |
| 3902 | * removed. |
| 3903 | * |
| 3904 | * To release the file, call trace_put_event_file(), which will call |
| 3905 | * trace_array_put() and decrement the event's module refcount. |
| 3906 | * |
| 3907 | * Return: The trace event on success, ERR_PTR otherwise. |
| 3908 | */ |
| 3909 | struct trace_event_file *trace_get_event_file(const char *instance, |
| 3910 | const char *system, |
| 3911 | const char *event) |
| 3912 | { |
| 3913 | struct trace_array *tr = top_trace_array(); |
| 3914 | struct trace_event_file *file = NULL; |
| 3915 | int ret = -EINVAL; |
| 3916 | |
| 3917 | if (instance) { |
| 3918 | tr = trace_array_find_get(instance); |
| 3919 | if (!tr) |
| 3920 | return ERR_PTR(error: -ENOENT); |
| 3921 | } else { |
| 3922 | ret = trace_array_get(tr); |
| 3923 | if (ret) |
| 3924 | return ERR_PTR(error: ret); |
| 3925 | } |
| 3926 | |
| 3927 | guard(mutex)(T: &event_mutex); |
| 3928 | |
| 3929 | file = find_event_file(tr, system, event); |
| 3930 | if (!file) { |
| 3931 | trace_array_put(tr); |
| 3932 | return ERR_PTR(error: -EINVAL); |
| 3933 | } |
| 3934 | |
| 3935 | /* Don't let event modules unload while in use */ |
| 3936 | ret = trace_event_try_get_ref(call: file->event_call); |
| 3937 | if (!ret) { |
| 3938 | trace_array_put(tr); |
| 3939 | return ERR_PTR(error: -EBUSY); |
| 3940 | } |
| 3941 | |
| 3942 | return file; |
| 3943 | } |
| 3944 | EXPORT_SYMBOL_GPL(trace_get_event_file); |
| 3945 | |
| 3946 | /** |
| 3947 | * trace_put_event_file - Release a file from trace_get_event_file() |
| 3948 | * @file: The trace event file |
| 3949 | * |
| 3950 | * If a file was retrieved using trace_get_event_file(), this should |
| 3951 | * be called when it's no longer needed. It will cancel the previous |
| 3952 | * trace_array_get() called by that function, and decrement the |
| 3953 | * event's module refcount. |
| 3954 | */ |
| 3955 | void trace_put_event_file(struct trace_event_file *file) |
| 3956 | { |
| 3957 | mutex_lock(&event_mutex); |
| 3958 | trace_event_put_ref(call: file->event_call); |
| 3959 | mutex_unlock(lock: &event_mutex); |
| 3960 | |
| 3961 | trace_array_put(tr: file->tr); |
| 3962 | } |
| 3963 | EXPORT_SYMBOL_GPL(trace_put_event_file); |
| 3964 | |
| 3965 | #ifdef CONFIG_DYNAMIC_FTRACE |
| 3966 | |
| 3967 | /* Avoid typos */ |
| 3968 | #define ENABLE_EVENT_STR "enable_event" |
| 3969 | #define DISABLE_EVENT_STR "disable_event" |
| 3970 | |
| 3971 | struct event_probe_data { |
| 3972 | struct trace_event_file *file; |
| 3973 | unsigned long count; |
| 3974 | int ref; |
| 3975 | bool enable; |
| 3976 | }; |
| 3977 | |
| 3978 | static void update_event_probe(struct event_probe_data *data) |
| 3979 | { |
| 3980 | if (data->enable) |
| 3981 | clear_bit(nr: EVENT_FILE_FL_SOFT_DISABLED_BIT, addr: &data->file->flags); |
| 3982 | else |
| 3983 | set_bit(nr: EVENT_FILE_FL_SOFT_DISABLED_BIT, addr: &data->file->flags); |
| 3984 | } |
| 3985 | |
| 3986 | static void |
| 3987 | event_enable_probe(unsigned long ip, unsigned long parent_ip, |
| 3988 | struct trace_array *tr, struct ftrace_probe_ops *ops, |
| 3989 | void *data) |
| 3990 | { |
| 3991 | struct ftrace_func_mapper *mapper = data; |
| 3992 | struct event_probe_data *edata; |
| 3993 | void **pdata; |
| 3994 | |
| 3995 | pdata = ftrace_func_mapper_find_ip(mapper, ip); |
| 3996 | if (!pdata || !*pdata) |
| 3997 | return; |
| 3998 | |
| 3999 | edata = *pdata; |
| 4000 | update_event_probe(data: edata); |
| 4001 | } |
| 4002 | |
| 4003 | static void |
| 4004 | event_enable_count_probe(unsigned long ip, unsigned long parent_ip, |
| 4005 | struct trace_array *tr, struct ftrace_probe_ops *ops, |
| 4006 | void *data) |
| 4007 | { |
| 4008 | struct ftrace_func_mapper *mapper = data; |
| 4009 | struct event_probe_data *edata; |
| 4010 | void **pdata; |
| 4011 | |
| 4012 | pdata = ftrace_func_mapper_find_ip(mapper, ip); |
| 4013 | if (!pdata || !*pdata) |
| 4014 | return; |
| 4015 | |
| 4016 | edata = *pdata; |
| 4017 | |
| 4018 | if (!edata->count) |
| 4019 | return; |
| 4020 | |
| 4021 | /* Skip if the event is in a state we want to switch to */ |
| 4022 | if (edata->enable == !(edata->file->flags & EVENT_FILE_FL_SOFT_DISABLED)) |
| 4023 | return; |
| 4024 | |
| 4025 | if (edata->count != -1) |
| 4026 | (edata->count)--; |
| 4027 | |
| 4028 | update_event_probe(data: edata); |
| 4029 | } |
| 4030 | |
| 4031 | static int |
| 4032 | event_enable_print(struct seq_file *m, unsigned long ip, |
| 4033 | struct ftrace_probe_ops *ops, void *data) |
| 4034 | { |
| 4035 | struct ftrace_func_mapper *mapper = data; |
| 4036 | struct event_probe_data *edata; |
| 4037 | void **pdata; |
| 4038 | |
| 4039 | pdata = ftrace_func_mapper_find_ip(mapper, ip); |
| 4040 | |
| 4041 | if (WARN_ON_ONCE(!pdata || !*pdata)) |
| 4042 | return 0; |
| 4043 | |
| 4044 | edata = *pdata; |
| 4045 | |
| 4046 | seq_printf(m, fmt: "%ps:" , (void *)ip); |
| 4047 | |
| 4048 | seq_printf(m, fmt: "%s:%s:%s" , |
| 4049 | edata->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR, |
| 4050 | edata->file->event_call->class->system, |
| 4051 | trace_event_name(call: edata->file->event_call)); |
| 4052 | |
| 4053 | if (edata->count == -1) |
| 4054 | seq_puts(m, s: ":unlimited\n" ); |
| 4055 | else |
| 4056 | seq_printf(m, fmt: ":count=%ld\n" , edata->count); |
| 4057 | |
| 4058 | return 0; |
| 4059 | } |
| 4060 | |
| 4061 | static int |
| 4062 | event_enable_init(struct ftrace_probe_ops *ops, struct trace_array *tr, |
| 4063 | unsigned long ip, void *init_data, void **data) |
| 4064 | { |
| 4065 | struct ftrace_func_mapper *mapper = *data; |
| 4066 | struct event_probe_data *edata = init_data; |
| 4067 | int ret; |
| 4068 | |
| 4069 | if (!mapper) { |
| 4070 | mapper = allocate_ftrace_func_mapper(); |
| 4071 | if (!mapper) |
| 4072 | return -ENODEV; |
| 4073 | *data = mapper; |
| 4074 | } |
| 4075 | |
| 4076 | ret = ftrace_func_mapper_add_ip(mapper, ip, data: edata); |
| 4077 | if (ret < 0) |
| 4078 | return ret; |
| 4079 | |
| 4080 | edata->ref++; |
| 4081 | |
| 4082 | return 0; |
| 4083 | } |
| 4084 | |
| 4085 | static int free_probe_data(void *data) |
| 4086 | { |
| 4087 | struct event_probe_data *edata = data; |
| 4088 | |
| 4089 | edata->ref--; |
| 4090 | if (!edata->ref) { |
| 4091 | /* Remove soft mode */ |
| 4092 | __ftrace_event_enable_disable(file: edata->file, enable: 0, soft_disable: 1); |
| 4093 | trace_event_put_ref(call: edata->file->event_call); |
| 4094 | kfree(objp: edata); |
| 4095 | } |
| 4096 | return 0; |
| 4097 | } |
| 4098 | |
| 4099 | static void |
| 4100 | event_enable_free(struct ftrace_probe_ops *ops, struct trace_array *tr, |
| 4101 | unsigned long ip, void *data) |
| 4102 | { |
| 4103 | struct ftrace_func_mapper *mapper = data; |
| 4104 | struct event_probe_data *edata; |
| 4105 | |
| 4106 | if (!ip) { |
| 4107 | if (!mapper) |
| 4108 | return; |
| 4109 | free_ftrace_func_mapper(mapper, free_func: free_probe_data); |
| 4110 | return; |
| 4111 | } |
| 4112 | |
| 4113 | edata = ftrace_func_mapper_remove_ip(mapper, ip); |
| 4114 | |
| 4115 | if (WARN_ON_ONCE(!edata)) |
| 4116 | return; |
| 4117 | |
| 4118 | if (WARN_ON_ONCE(edata->ref <= 0)) |
| 4119 | return; |
| 4120 | |
| 4121 | free_probe_data(data: edata); |
| 4122 | } |
| 4123 | |
| 4124 | static struct ftrace_probe_ops event_enable_probe_ops = { |
| 4125 | .func = event_enable_probe, |
| 4126 | .print = event_enable_print, |
| 4127 | .init = event_enable_init, |
| 4128 | .free = event_enable_free, |
| 4129 | }; |
| 4130 | |
| 4131 | static struct ftrace_probe_ops event_enable_count_probe_ops = { |
| 4132 | .func = event_enable_count_probe, |
| 4133 | .print = event_enable_print, |
| 4134 | .init = event_enable_init, |
| 4135 | .free = event_enable_free, |
| 4136 | }; |
| 4137 | |
| 4138 | static struct ftrace_probe_ops event_disable_probe_ops = { |
| 4139 | .func = event_enable_probe, |
| 4140 | .print = event_enable_print, |
| 4141 | .init = event_enable_init, |
| 4142 | .free = event_enable_free, |
| 4143 | }; |
| 4144 | |
| 4145 | static struct ftrace_probe_ops event_disable_count_probe_ops = { |
| 4146 | .func = event_enable_count_probe, |
| 4147 | .print = event_enable_print, |
| 4148 | .init = event_enable_init, |
| 4149 | .free = event_enable_free, |
| 4150 | }; |
| 4151 | |
| 4152 | static int |
| 4153 | event_enable_func(struct trace_array *tr, struct ftrace_hash *hash, |
| 4154 | char *glob, char *cmd, char *param, int enabled) |
| 4155 | { |
| 4156 | struct trace_event_file *file; |
| 4157 | struct ftrace_probe_ops *ops; |
| 4158 | struct event_probe_data *data; |
| 4159 | unsigned long count = -1; |
| 4160 | const char *system; |
| 4161 | const char *event; |
| 4162 | char *number; |
| 4163 | bool enable; |
| 4164 | int ret; |
| 4165 | |
| 4166 | if (!tr) |
| 4167 | return -ENODEV; |
| 4168 | |
| 4169 | /* hash funcs only work with set_ftrace_filter */ |
| 4170 | if (!enabled || !param) |
| 4171 | return -EINVAL; |
| 4172 | |
| 4173 | system = strsep(¶m, ":" ); |
| 4174 | if (!param) |
| 4175 | return -EINVAL; |
| 4176 | |
| 4177 | event = strsep(¶m, ":" ); |
| 4178 | |
| 4179 | guard(mutex)(T: &event_mutex); |
| 4180 | |
| 4181 | file = find_event_file(tr, system, event); |
| 4182 | if (!file) |
| 4183 | return -EINVAL; |
| 4184 | |
| 4185 | enable = strcmp(cmd, ENABLE_EVENT_STR) == 0; |
| 4186 | |
| 4187 | if (enable) |
| 4188 | ops = param ? &event_enable_count_probe_ops : &event_enable_probe_ops; |
| 4189 | else |
| 4190 | ops = param ? &event_disable_count_probe_ops : &event_disable_probe_ops; |
| 4191 | |
| 4192 | if (glob[0] == '!') |
| 4193 | return unregister_ftrace_function_probe_func(glob: glob+1, tr, ops); |
| 4194 | |
| 4195 | if (param) { |
| 4196 | number = strsep(¶m, ":" ); |
| 4197 | |
| 4198 | if (!strlen(number)) |
| 4199 | return -EINVAL; |
| 4200 | |
| 4201 | /* |
| 4202 | * We use the callback data field (which is a pointer) |
| 4203 | * as our counter. |
| 4204 | */ |
| 4205 | ret = kstrtoul(s: number, base: 0, res: &count); |
| 4206 | if (ret) |
| 4207 | return ret; |
| 4208 | } |
| 4209 | |
| 4210 | /* Don't let event modules unload while probe registered */ |
| 4211 | ret = trace_event_try_get_ref(call: file->event_call); |
| 4212 | if (!ret) |
| 4213 | return -EBUSY; |
| 4214 | |
| 4215 | ret = __ftrace_event_enable_disable(file, enable: 1, soft_disable: 1); |
| 4216 | if (ret < 0) |
| 4217 | goto out_put; |
| 4218 | |
| 4219 | ret = -ENOMEM; |
| 4220 | data = kzalloc(sizeof(*data), GFP_KERNEL); |
| 4221 | if (!data) |
| 4222 | goto out_put; |
| 4223 | |
| 4224 | data->enable = enable; |
| 4225 | data->count = count; |
| 4226 | data->file = file; |
| 4227 | |
| 4228 | ret = register_ftrace_function_probe(glob, tr, ops, data); |
| 4229 | /* |
| 4230 | * The above returns on success the # of functions enabled, |
| 4231 | * but if it didn't find any functions it returns zero. |
| 4232 | * Consider no functions a failure too. |
| 4233 | */ |
| 4234 | |
| 4235 | /* Just return zero, not the number of enabled functions */ |
| 4236 | if (ret > 0) |
| 4237 | return 0; |
| 4238 | |
| 4239 | kfree(objp: data); |
| 4240 | |
| 4241 | if (!ret) |
| 4242 | ret = -ENOENT; |
| 4243 | |
| 4244 | __ftrace_event_enable_disable(file, enable: 0, soft_disable: 1); |
| 4245 | out_put: |
| 4246 | trace_event_put_ref(call: file->event_call); |
| 4247 | return ret; |
| 4248 | } |
| 4249 | |
| 4250 | static struct ftrace_func_command event_enable_cmd = { |
| 4251 | .name = ENABLE_EVENT_STR, |
| 4252 | .func = event_enable_func, |
| 4253 | }; |
| 4254 | |
| 4255 | static struct ftrace_func_command event_disable_cmd = { |
| 4256 | .name = DISABLE_EVENT_STR, |
| 4257 | .func = event_enable_func, |
| 4258 | }; |
| 4259 | |
| 4260 | static __init int register_event_cmds(void) |
| 4261 | { |
| 4262 | int ret; |
| 4263 | |
| 4264 | ret = register_ftrace_command(cmd: &event_enable_cmd); |
| 4265 | if (WARN_ON(ret < 0)) |
| 4266 | return ret; |
| 4267 | ret = register_ftrace_command(cmd: &event_disable_cmd); |
| 4268 | if (WARN_ON(ret < 0)) |
| 4269 | unregister_ftrace_command(cmd: &event_enable_cmd); |
| 4270 | return ret; |
| 4271 | } |
| 4272 | #else |
| 4273 | static inline int register_event_cmds(void) { return 0; } |
| 4274 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
| 4275 | |
| 4276 | /* |
| 4277 | * The top level array and trace arrays created by boot-time tracing |
| 4278 | * have already had its trace_event_file descriptors created in order |
| 4279 | * to allow for early events to be recorded. |
| 4280 | * This function is called after the tracefs has been initialized, |
| 4281 | * and we now have to create the files associated to the events. |
| 4282 | */ |
| 4283 | static void __trace_early_add_event_dirs(struct trace_array *tr) |
| 4284 | { |
| 4285 | struct trace_event_file *file; |
| 4286 | int ret; |
| 4287 | |
| 4288 | |
| 4289 | list_for_each_entry(file, &tr->events, list) { |
| 4290 | ret = event_create_dir(parent: tr->event_dir, file); |
| 4291 | if (ret < 0) |
| 4292 | pr_warn("Could not create directory for event %s\n" , |
| 4293 | trace_event_name(file->event_call)); |
| 4294 | } |
| 4295 | } |
| 4296 | |
| 4297 | /* |
| 4298 | * For early boot up, the top trace array and the trace arrays created |
| 4299 | * by boot-time tracing require to have a list of events that can be |
| 4300 | * enabled. This must be done before the filesystem is set up in order |
| 4301 | * to allow events to be traced early. |
| 4302 | */ |
| 4303 | void __trace_early_add_events(struct trace_array *tr) |
| 4304 | { |
| 4305 | struct trace_event_call *call; |
| 4306 | int ret; |
| 4307 | |
| 4308 | list_for_each_entry(call, &ftrace_events, list) { |
| 4309 | /* Early boot up should not have any modules loaded */ |
| 4310 | if (!(call->flags & TRACE_EVENT_FL_DYNAMIC) && |
| 4311 | WARN_ON_ONCE(call->module)) |
| 4312 | continue; |
| 4313 | |
| 4314 | ret = __trace_early_add_new_event(call, tr); |
| 4315 | if (ret < 0) |
| 4316 | pr_warn("Could not create early event %s\n" , |
| 4317 | trace_event_name(call)); |
| 4318 | } |
| 4319 | } |
| 4320 | |
| 4321 | /* Remove the event directory structure for a trace directory. */ |
| 4322 | static void |
| 4323 | __trace_remove_event_dirs(struct trace_array *tr) |
| 4324 | { |
| 4325 | struct trace_event_file *file, *next; |
| 4326 | |
| 4327 | list_for_each_entry_safe(file, next, &tr->events, list) |
| 4328 | remove_event_file_dir(file); |
| 4329 | } |
| 4330 | |
| 4331 | static void __add_event_to_tracers(struct trace_event_call *call) |
| 4332 | { |
| 4333 | struct trace_array *tr; |
| 4334 | |
| 4335 | list_for_each_entry(tr, &ftrace_trace_arrays, list) |
| 4336 | __trace_add_new_event(call, tr); |
| 4337 | } |
| 4338 | |
| 4339 | extern struct trace_event_call *__start_ftrace_events[]; |
| 4340 | extern struct trace_event_call *__stop_ftrace_events[]; |
| 4341 | |
| 4342 | static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata; |
| 4343 | |
| 4344 | static __init int setup_trace_event(char *str) |
| 4345 | { |
| 4346 | strscpy(bootup_event_buf, str, COMMAND_LINE_SIZE); |
| 4347 | trace_set_ring_buffer_expanded(NULL); |
| 4348 | disable_tracing_selftest(reason: "running event tracing" ); |
| 4349 | |
| 4350 | return 1; |
| 4351 | } |
| 4352 | __setup("trace_event=" , setup_trace_event); |
| 4353 | |
| 4354 | static int events_callback(const char *name, umode_t *mode, void **data, |
| 4355 | const struct file_operations **fops) |
| 4356 | { |
| 4357 | if (strcmp(name, "enable" ) == 0) { |
| 4358 | *mode = TRACE_MODE_WRITE; |
| 4359 | *fops = &ftrace_tr_enable_fops; |
| 4360 | return 1; |
| 4361 | } |
| 4362 | |
| 4363 | if (strcmp(name, "header_page" ) == 0) { |
| 4364 | *mode = TRACE_MODE_READ; |
| 4365 | *fops = &ftrace_show_header_page_fops; |
| 4366 | |
| 4367 | } else if (strcmp(name, "header_event" ) == 0) { |
| 4368 | *mode = TRACE_MODE_READ; |
| 4369 | *fops = &ftrace_show_header_event_fops; |
| 4370 | } else |
| 4371 | return 0; |
| 4372 | |
| 4373 | return 1; |
| 4374 | } |
| 4375 | |
| 4376 | /* Expects to have event_mutex held when called */ |
| 4377 | static int |
| 4378 | create_event_toplevel_files(struct dentry *parent, struct trace_array *tr) |
| 4379 | { |
| 4380 | struct eventfs_inode *e_events; |
| 4381 | struct dentry *entry; |
| 4382 | int nr_entries; |
| 4383 | static struct eventfs_entry events_entries[] = { |
| 4384 | { |
| 4385 | .name = "enable" , |
| 4386 | .callback = events_callback, |
| 4387 | }, |
| 4388 | { |
| 4389 | .name = "header_page" , |
| 4390 | .callback = events_callback, |
| 4391 | }, |
| 4392 | { |
| 4393 | .name = "header_event" , |
| 4394 | .callback = events_callback, |
| 4395 | }, |
| 4396 | }; |
| 4397 | |
| 4398 | entry = trace_create_file(name: "set_event" , TRACE_MODE_WRITE, parent, |
| 4399 | data: tr, fops: &ftrace_set_event_fops); |
| 4400 | if (!entry) |
| 4401 | return -ENOMEM; |
| 4402 | |
| 4403 | nr_entries = ARRAY_SIZE(events_entries); |
| 4404 | |
| 4405 | e_events = eventfs_create_events_dir(name: "events" , parent, entries: events_entries, |
| 4406 | size: nr_entries, data: tr); |
| 4407 | if (IS_ERR(ptr: e_events)) { |
| 4408 | pr_warn("Could not create tracefs 'events' directory\n" ); |
| 4409 | return -ENOMEM; |
| 4410 | } |
| 4411 | |
| 4412 | /* There are not as crucial, just warn if they are not created */ |
| 4413 | |
| 4414 | trace_create_file(name: "set_event_pid" , TRACE_MODE_WRITE, parent, |
| 4415 | data: tr, fops: &ftrace_set_event_pid_fops); |
| 4416 | |
| 4417 | trace_create_file(name: "set_event_notrace_pid" , |
| 4418 | TRACE_MODE_WRITE, parent, data: tr, |
| 4419 | fops: &ftrace_set_event_notrace_pid_fops); |
| 4420 | |
| 4421 | tr->event_dir = e_events; |
| 4422 | |
| 4423 | return 0; |
| 4424 | } |
| 4425 | |
| 4426 | /** |
| 4427 | * event_trace_add_tracer - add a instance of a trace_array to events |
| 4428 | * @parent: The parent dentry to place the files/directories for events in |
| 4429 | * @tr: The trace array associated with these events |
| 4430 | * |
| 4431 | * When a new instance is created, it needs to set up its events |
| 4432 | * directory, as well as other files associated with events. It also |
| 4433 | * creates the event hierarchy in the @parent/events directory. |
| 4434 | * |
| 4435 | * Returns 0 on success. |
| 4436 | * |
| 4437 | * Must be called with event_mutex held. |
| 4438 | */ |
| 4439 | int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr) |
| 4440 | { |
| 4441 | int ret; |
| 4442 | |
| 4443 | lockdep_assert_held(&event_mutex); |
| 4444 | |
| 4445 | ret = create_event_toplevel_files(parent, tr); |
| 4446 | if (ret) |
| 4447 | goto out; |
| 4448 | |
| 4449 | down_write(sem: &trace_event_sem); |
| 4450 | /* If tr already has the event list, it is initialized in early boot. */ |
| 4451 | if (unlikely(!list_empty(&tr->events))) |
| 4452 | __trace_early_add_event_dirs(tr); |
| 4453 | else |
| 4454 | __trace_add_event_dirs(tr); |
| 4455 | up_write(sem: &trace_event_sem); |
| 4456 | |
| 4457 | out: |
| 4458 | return ret; |
| 4459 | } |
| 4460 | |
| 4461 | /* |
| 4462 | * The top trace array already had its file descriptors created. |
| 4463 | * Now the files themselves need to be created. |
| 4464 | */ |
| 4465 | static __init int |
| 4466 | early_event_add_tracer(struct dentry *parent, struct trace_array *tr) |
| 4467 | { |
| 4468 | int ret; |
| 4469 | |
| 4470 | guard(mutex)(T: &event_mutex); |
| 4471 | |
| 4472 | ret = create_event_toplevel_files(parent, tr); |
| 4473 | if (ret) |
| 4474 | return ret; |
| 4475 | |
| 4476 | down_write(sem: &trace_event_sem); |
| 4477 | __trace_early_add_event_dirs(tr); |
| 4478 | up_write(sem: &trace_event_sem); |
| 4479 | |
| 4480 | return 0; |
| 4481 | } |
| 4482 | |
| 4483 | /* Must be called with event_mutex held */ |
| 4484 | int event_trace_del_tracer(struct trace_array *tr) |
| 4485 | { |
| 4486 | lockdep_assert_held(&event_mutex); |
| 4487 | |
| 4488 | /* Disable any event triggers and associated soft-disabled events */ |
| 4489 | clear_event_triggers(tr); |
| 4490 | |
| 4491 | /* Clear the pid list */ |
| 4492 | __ftrace_clear_event_pids(tr, type: TRACE_PIDS | TRACE_NO_PIDS); |
| 4493 | |
| 4494 | /* Disable any running events */ |
| 4495 | __ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, set: 0, NULL); |
| 4496 | |
| 4497 | /* Make sure no more events are being executed */ |
| 4498 | tracepoint_synchronize_unregister(); |
| 4499 | |
| 4500 | down_write(sem: &trace_event_sem); |
| 4501 | __trace_remove_event_dirs(tr); |
| 4502 | eventfs_remove_events_dir(ei: tr->event_dir); |
| 4503 | up_write(sem: &trace_event_sem); |
| 4504 | |
| 4505 | tr->event_dir = NULL; |
| 4506 | |
| 4507 | return 0; |
| 4508 | } |
| 4509 | |
| 4510 | static __init int event_trace_memsetup(void) |
| 4511 | { |
| 4512 | field_cachep = KMEM_CACHE(ftrace_event_field, SLAB_PANIC); |
| 4513 | file_cachep = KMEM_CACHE(trace_event_file, SLAB_PANIC); |
| 4514 | return 0; |
| 4515 | } |
| 4516 | |
| 4517 | __init void |
| 4518 | early_enable_events(struct trace_array *tr, char *buf, bool disable_first) |
| 4519 | { |
| 4520 | char *token; |
| 4521 | int ret; |
| 4522 | |
| 4523 | while (true) { |
| 4524 | token = strsep(&buf, "," ); |
| 4525 | |
| 4526 | if (!token) |
| 4527 | break; |
| 4528 | |
| 4529 | if (*token) { |
| 4530 | /* Restarting syscalls requires that we stop them first */ |
| 4531 | if (disable_first) |
| 4532 | ftrace_set_clr_event(tr, buf: token, set: 0); |
| 4533 | |
| 4534 | ret = ftrace_set_clr_event(tr, buf: token, set: 1); |
| 4535 | if (ret) |
| 4536 | pr_warn("Failed to enable trace event: %s\n" , token); |
| 4537 | } |
| 4538 | |
| 4539 | /* Put back the comma to allow this to be called again */ |
| 4540 | if (buf) |
| 4541 | *(buf - 1) = ','; |
| 4542 | } |
| 4543 | } |
| 4544 | |
| 4545 | static __init int event_trace_enable(void) |
| 4546 | { |
| 4547 | struct trace_array *tr = top_trace_array(); |
| 4548 | struct trace_event_call **iter, *call; |
| 4549 | int ret; |
| 4550 | |
| 4551 | if (!tr) |
| 4552 | return -ENODEV; |
| 4553 | |
| 4554 | for_each_event(iter, __start_ftrace_events, __stop_ftrace_events) { |
| 4555 | |
| 4556 | call = *iter; |
| 4557 | ret = event_init(call); |
| 4558 | if (!ret) |
| 4559 | list_add(new: &call->list, head: &ftrace_events); |
| 4560 | } |
| 4561 | |
| 4562 | register_trigger_cmds(); |
| 4563 | |
| 4564 | /* |
| 4565 | * We need the top trace array to have a working set of trace |
| 4566 | * points at early init, before the debug files and directories |
| 4567 | * are created. Create the file entries now, and attach them |
| 4568 | * to the actual file dentries later. |
| 4569 | */ |
| 4570 | __trace_early_add_events(tr); |
| 4571 | |
| 4572 | early_enable_events(tr, buf: bootup_event_buf, disable_first: false); |
| 4573 | |
| 4574 | trace_printk_start_comm(); |
| 4575 | |
| 4576 | register_event_cmds(); |
| 4577 | |
| 4578 | |
| 4579 | return 0; |
| 4580 | } |
| 4581 | |
| 4582 | /* |
| 4583 | * event_trace_enable() is called from trace_event_init() first to |
| 4584 | * initialize events and perhaps start any events that are on the |
| 4585 | * command line. Unfortunately, there are some events that will not |
| 4586 | * start this early, like the system call tracepoints that need |
| 4587 | * to set the %SYSCALL_WORK_SYSCALL_TRACEPOINT flag of pid 1. But |
| 4588 | * event_trace_enable() is called before pid 1 starts, and this flag |
| 4589 | * is never set, making the syscall tracepoint never get reached, but |
| 4590 | * the event is enabled regardless (and not doing anything). |
| 4591 | */ |
| 4592 | static __init int event_trace_enable_again(void) |
| 4593 | { |
| 4594 | struct trace_array *tr; |
| 4595 | |
| 4596 | tr = top_trace_array(); |
| 4597 | if (!tr) |
| 4598 | return -ENODEV; |
| 4599 | |
| 4600 | early_enable_events(tr, buf: bootup_event_buf, disable_first: true); |
| 4601 | |
| 4602 | return 0; |
| 4603 | } |
| 4604 | |
| 4605 | early_initcall(event_trace_enable_again); |
| 4606 | |
| 4607 | /* Init fields which doesn't related to the tracefs */ |
| 4608 | static __init int event_trace_init_fields(void) |
| 4609 | { |
| 4610 | if (trace_define_generic_fields()) |
| 4611 | pr_warn("tracing: Failed to allocated generic fields" ); |
| 4612 | |
| 4613 | if (trace_define_common_fields()) |
| 4614 | pr_warn("tracing: Failed to allocate common fields" ); |
| 4615 | |
| 4616 | return 0; |
| 4617 | } |
| 4618 | |
| 4619 | __init int event_trace_init(void) |
| 4620 | { |
| 4621 | struct trace_array *tr; |
| 4622 | int ret; |
| 4623 | |
| 4624 | tr = top_trace_array(); |
| 4625 | if (!tr) |
| 4626 | return -ENODEV; |
| 4627 | |
| 4628 | trace_create_file(name: "available_events" , TRACE_MODE_READ, |
| 4629 | NULL, data: tr, fops: &ftrace_avail_fops); |
| 4630 | |
| 4631 | ret = early_event_add_tracer(NULL, tr); |
| 4632 | if (ret) |
| 4633 | return ret; |
| 4634 | |
| 4635 | #ifdef CONFIG_MODULES |
| 4636 | ret = register_module_notifier(nb: &trace_module_nb); |
| 4637 | if (ret) |
| 4638 | pr_warn("Failed to register trace events module notifier\n" ); |
| 4639 | #endif |
| 4640 | |
| 4641 | eventdir_initialized = true; |
| 4642 | |
| 4643 | return 0; |
| 4644 | } |
| 4645 | |
| 4646 | void __init trace_event_init(void) |
| 4647 | { |
| 4648 | event_trace_memsetup(); |
| 4649 | init_ftrace_syscalls(); |
| 4650 | event_trace_enable(); |
| 4651 | event_trace_init_fields(); |
| 4652 | } |
| 4653 | |
| 4654 | #ifdef CONFIG_EVENT_TRACE_STARTUP_TEST |
| 4655 | |
| 4656 | static DEFINE_SPINLOCK(test_spinlock); |
| 4657 | static DEFINE_SPINLOCK(test_spinlock_irq); |
| 4658 | static DEFINE_MUTEX(test_mutex); |
| 4659 | |
| 4660 | static __init void test_work(struct work_struct *dummy) |
| 4661 | { |
| 4662 | spin_lock(lock: &test_spinlock); |
| 4663 | spin_lock_irq(lock: &test_spinlock_irq); |
| 4664 | udelay(usec: 1); |
| 4665 | spin_unlock_irq(lock: &test_spinlock_irq); |
| 4666 | spin_unlock(lock: &test_spinlock); |
| 4667 | |
| 4668 | mutex_lock(&test_mutex); |
| 4669 | msleep(msecs: 1); |
| 4670 | mutex_unlock(lock: &test_mutex); |
| 4671 | } |
| 4672 | |
| 4673 | static __init int event_test_thread(void *unused) |
| 4674 | { |
| 4675 | void *test_malloc; |
| 4676 | |
| 4677 | test_malloc = kmalloc(1234, GFP_KERNEL); |
| 4678 | if (!test_malloc) |
| 4679 | pr_info("failed to kmalloc\n" ); |
| 4680 | |
| 4681 | schedule_on_each_cpu(func: test_work); |
| 4682 | |
| 4683 | kfree(objp: test_malloc); |
| 4684 | |
| 4685 | set_current_state(TASK_INTERRUPTIBLE); |
| 4686 | while (!kthread_should_stop()) { |
| 4687 | schedule(); |
| 4688 | set_current_state(TASK_INTERRUPTIBLE); |
| 4689 | } |
| 4690 | __set_current_state(TASK_RUNNING); |
| 4691 | |
| 4692 | return 0; |
| 4693 | } |
| 4694 | |
| 4695 | /* |
| 4696 | * Do various things that may trigger events. |
| 4697 | */ |
| 4698 | static __init void event_test_stuff(void) |
| 4699 | { |
| 4700 | struct task_struct *test_thread; |
| 4701 | |
| 4702 | test_thread = kthread_run(event_test_thread, NULL, "test-events" ); |
| 4703 | msleep(msecs: 1); |
| 4704 | kthread_stop(k: test_thread); |
| 4705 | } |
| 4706 | |
| 4707 | /* |
| 4708 | * For every trace event defined, we will test each trace point separately, |
| 4709 | * and then by groups, and finally all trace points. |
| 4710 | */ |
| 4711 | static __init void event_trace_self_tests(void) |
| 4712 | { |
| 4713 | struct trace_subsystem_dir *dir; |
| 4714 | struct trace_event_file *file; |
| 4715 | struct trace_event_call *call; |
| 4716 | struct event_subsystem *system; |
| 4717 | struct trace_array *tr; |
| 4718 | int ret; |
| 4719 | |
| 4720 | tr = top_trace_array(); |
| 4721 | if (!tr) |
| 4722 | return; |
| 4723 | |
| 4724 | pr_info("Running tests on trace events:\n" ); |
| 4725 | |
| 4726 | list_for_each_entry(file, &tr->events, list) { |
| 4727 | |
| 4728 | call = file->event_call; |
| 4729 | |
| 4730 | /* Only test those that have a probe */ |
| 4731 | if (!call->class || !call->class->probe) |
| 4732 | continue; |
| 4733 | |
| 4734 | /* |
| 4735 | * Testing syscall events here is pretty useless, but |
| 4736 | * we still do it if configured. But this is time consuming. |
| 4737 | * What we really need is a user thread to perform the |
| 4738 | * syscalls as we test. |
| 4739 | */ |
| 4740 | #ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS |
| 4741 | if (call->class->system && |
| 4742 | strcmp(call->class->system, "syscalls" ) == 0) |
| 4743 | continue; |
| 4744 | #endif |
| 4745 | |
| 4746 | pr_info("Testing event %s: " , trace_event_name(call)); |
| 4747 | |
| 4748 | /* |
| 4749 | * If an event is already enabled, someone is using |
| 4750 | * it and the self test should not be on. |
| 4751 | */ |
| 4752 | if (file->flags & EVENT_FILE_FL_ENABLED) { |
| 4753 | pr_warn("Enabled event during self test!\n" ); |
| 4754 | WARN_ON_ONCE(1); |
| 4755 | continue; |
| 4756 | } |
| 4757 | |
| 4758 | ftrace_event_enable_disable(file, enable: 1); |
| 4759 | event_test_stuff(); |
| 4760 | ftrace_event_enable_disable(file, enable: 0); |
| 4761 | |
| 4762 | pr_cont("OK\n" ); |
| 4763 | } |
| 4764 | |
| 4765 | /* Now test at the sub system level */ |
| 4766 | |
| 4767 | pr_info("Running tests on trace event systems:\n" ); |
| 4768 | |
| 4769 | list_for_each_entry(dir, &tr->systems, list) { |
| 4770 | |
| 4771 | system = dir->subsystem; |
| 4772 | |
| 4773 | /* the ftrace system is special, skip it */ |
| 4774 | if (strcmp(system->name, "ftrace" ) == 0) |
| 4775 | continue; |
| 4776 | |
| 4777 | pr_info("Testing event system %s: " , system->name); |
| 4778 | |
| 4779 | ret = __ftrace_set_clr_event(tr, NULL, sub: system->name, NULL, set: 1, NULL); |
| 4780 | if (WARN_ON_ONCE(ret)) { |
| 4781 | pr_warn("error enabling system %s\n" , |
| 4782 | system->name); |
| 4783 | continue; |
| 4784 | } |
| 4785 | |
| 4786 | event_test_stuff(); |
| 4787 | |
| 4788 | ret = __ftrace_set_clr_event(tr, NULL, sub: system->name, NULL, set: 0, NULL); |
| 4789 | if (WARN_ON_ONCE(ret)) { |
| 4790 | pr_warn("error disabling system %s\n" , |
| 4791 | system->name); |
| 4792 | continue; |
| 4793 | } |
| 4794 | |
| 4795 | pr_cont("OK\n" ); |
| 4796 | } |
| 4797 | |
| 4798 | /* Test with all events enabled */ |
| 4799 | |
| 4800 | pr_info("Running tests on all trace events:\n" ); |
| 4801 | pr_info("Testing all events: " ); |
| 4802 | |
| 4803 | ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, set: 1, NULL); |
| 4804 | if (WARN_ON_ONCE(ret)) { |
| 4805 | pr_warn("error enabling all events\n" ); |
| 4806 | return; |
| 4807 | } |
| 4808 | |
| 4809 | event_test_stuff(); |
| 4810 | |
| 4811 | /* reset sysname */ |
| 4812 | ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, set: 0, NULL); |
| 4813 | if (WARN_ON_ONCE(ret)) { |
| 4814 | pr_warn("error disabling all events\n" ); |
| 4815 | return; |
| 4816 | } |
| 4817 | |
| 4818 | pr_cont("OK\n" ); |
| 4819 | } |
| 4820 | |
| 4821 | #ifdef CONFIG_FUNCTION_TRACER |
| 4822 | |
| 4823 | static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable); |
| 4824 | |
| 4825 | static struct trace_event_file event_trace_file __initdata; |
| 4826 | |
| 4827 | static void __init |
| 4828 | function_test_events_call(unsigned long ip, unsigned long parent_ip, |
| 4829 | struct ftrace_ops *op, struct ftrace_regs *regs) |
| 4830 | { |
| 4831 | struct trace_buffer *buffer; |
| 4832 | struct ring_buffer_event *event; |
| 4833 | struct ftrace_entry *entry; |
| 4834 | unsigned int trace_ctx; |
| 4835 | long disabled; |
| 4836 | int cpu; |
| 4837 | |
| 4838 | trace_ctx = tracing_gen_ctx(); |
| 4839 | preempt_disable_notrace(); |
| 4840 | cpu = raw_smp_processor_id(); |
| 4841 | disabled = atomic_inc_return(v: &per_cpu(ftrace_test_event_disable, cpu)); |
| 4842 | |
| 4843 | if (disabled != 1) |
| 4844 | goto out; |
| 4845 | |
| 4846 | event = trace_event_buffer_lock_reserve(current_buffer: &buffer, trace_file: &event_trace_file, |
| 4847 | type: TRACE_FN, len: sizeof(*entry), |
| 4848 | trace_ctx); |
| 4849 | if (!event) |
| 4850 | goto out; |
| 4851 | entry = ring_buffer_event_data(event); |
| 4852 | entry->ip = ip; |
| 4853 | entry->parent_ip = parent_ip; |
| 4854 | |
| 4855 | event_trigger_unlock_commit(file: &event_trace_file, buffer, event, |
| 4856 | entry, trace_ctx); |
| 4857 | out: |
| 4858 | atomic_dec(v: &per_cpu(ftrace_test_event_disable, cpu)); |
| 4859 | preempt_enable_notrace(); |
| 4860 | } |
| 4861 | |
| 4862 | static struct ftrace_ops trace_ops __initdata = |
| 4863 | { |
| 4864 | .func = function_test_events_call, |
| 4865 | }; |
| 4866 | |
| 4867 | static __init void event_trace_self_test_with_function(void) |
| 4868 | { |
| 4869 | int ret; |
| 4870 | |
| 4871 | event_trace_file.tr = top_trace_array(); |
| 4872 | if (WARN_ON(!event_trace_file.tr)) |
| 4873 | return; |
| 4874 | |
| 4875 | ret = register_ftrace_function(ops: &trace_ops); |
| 4876 | if (WARN_ON(ret < 0)) { |
| 4877 | pr_info("Failed to enable function tracer for event tests\n" ); |
| 4878 | return; |
| 4879 | } |
| 4880 | pr_info("Running tests again, along with the function tracer\n" ); |
| 4881 | event_trace_self_tests(); |
| 4882 | unregister_ftrace_function(ops: &trace_ops); |
| 4883 | } |
| 4884 | #else |
| 4885 | static __init void event_trace_self_test_with_function(void) |
| 4886 | { |
| 4887 | } |
| 4888 | #endif |
| 4889 | |
| 4890 | static __init int event_trace_self_tests_init(void) |
| 4891 | { |
| 4892 | if (!tracing_selftest_disabled) { |
| 4893 | event_trace_self_tests(); |
| 4894 | event_trace_self_test_with_function(); |
| 4895 | } |
| 4896 | |
| 4897 | return 0; |
| 4898 | } |
| 4899 | |
| 4900 | late_initcall(event_trace_self_tests_init); |
| 4901 | |
| 4902 | #endif |
| 4903 | |