Branch data Line data Source code
1 : : // SPDX-License-Identifier: GPL-2.0
2 : :
3 : : #ifndef _LINUX_KERNEL_TRACE_H
4 : : #define _LINUX_KERNEL_TRACE_H
5 : :
6 : : #include <linux/fs.h>
7 : : #include <linux/atomic.h>
8 : : #include <linux/sched.h>
9 : : #include <linux/clocksource.h>
10 : : #include <linux/ring_buffer.h>
11 : : #include <linux/mmiotrace.h>
12 : : #include <linux/tracepoint.h>
13 : : #include <linux/ftrace.h>
14 : : #include <linux/trace.h>
15 : : #include <linux/hw_breakpoint.h>
16 : : #include <linux/trace_seq.h>
17 : : #include <linux/trace_events.h>
18 : : #include <linux/compiler.h>
19 : : #include <linux/glob.h>
20 : : #include <linux/irq_work.h>
21 : : #include <linux/workqueue.h>
22 : :
23 : : #ifdef CONFIG_FTRACE_SYSCALLS
24 : : #include <asm/unistd.h> /* For NR_SYSCALLS */
25 : : #include <asm/syscall.h> /* some archs define it here */
26 : : #endif
27 : :
28 : : enum trace_type {
29 : : __TRACE_FIRST_TYPE = 0,
30 : :
31 : : TRACE_FN,
32 : : TRACE_CTX,
33 : : TRACE_WAKE,
34 : : TRACE_STACK,
35 : : TRACE_PRINT,
36 : : TRACE_BPRINT,
37 : : TRACE_MMIO_RW,
38 : : TRACE_MMIO_MAP,
39 : : TRACE_BRANCH,
40 : : TRACE_GRAPH_RET,
41 : : TRACE_GRAPH_ENT,
42 : : TRACE_USER_STACK,
43 : : TRACE_BLK,
44 : : TRACE_BPUTS,
45 : : TRACE_HWLAT,
46 : : TRACE_RAW_DATA,
47 : :
48 : : __TRACE_LAST_TYPE,
49 : : };
50 : :
51 : :
52 : : #undef __field
53 : : #define __field(type, item) type item;
54 : :
55 : : #undef __field_fn
56 : : #define __field_fn(type, item) type item;
57 : :
58 : : #undef __field_struct
59 : : #define __field_struct(type, item) __field(type, item)
60 : :
61 : : #undef __field_desc
62 : : #define __field_desc(type, container, item)
63 : :
64 : : #undef __array
65 : : #define __array(type, item, size) type item[size];
66 : :
67 : : #undef __array_desc
68 : : #define __array_desc(type, container, item, size)
69 : :
70 : : #undef __dynamic_array
71 : : #define __dynamic_array(type, item) type item[];
72 : :
73 : : #undef F_STRUCT
74 : : #define F_STRUCT(args...) args
75 : :
76 : : #undef FTRACE_ENTRY
77 : : #define FTRACE_ENTRY(name, struct_name, id, tstruct, print) \
78 : : struct struct_name { \
79 : : struct trace_entry ent; \
80 : : tstruct \
81 : : }
82 : :
83 : : #undef FTRACE_ENTRY_DUP
84 : : #define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk)
85 : :
86 : : #undef FTRACE_ENTRY_REG
87 : : #define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print, regfn) \
88 : : FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print))
89 : :
90 : : #undef FTRACE_ENTRY_PACKED
91 : : #define FTRACE_ENTRY_PACKED(name, struct_name, id, tstruct, print) \
92 : : FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print)) __packed
93 : :
94 : : #include "trace_entries.h"
95 : :
96 : : /* Use this for memory failure errors */
97 : : #define MEM_FAIL(condition, fmt, ...) ({ \
98 : : static bool __section(.data.once) __warned; \
99 : : int __ret_warn_once = !!(condition); \
100 : : \
101 : : if (unlikely(__ret_warn_once && !__warned)) { \
102 : : __warned = true; \
103 : : pr_err("ERROR: " fmt, ##__VA_ARGS__); \
104 : : } \
105 : : unlikely(__ret_warn_once); \
106 : : })
107 : :
108 : : /*
109 : : * syscalls are special, and need special handling, this is why
110 : : * they are not included in trace_entries.h
111 : : */
112 : : struct syscall_trace_enter {
113 : : struct trace_entry ent;
114 : : int nr;
115 : : unsigned long args[];
116 : : };
117 : :
118 : : struct syscall_trace_exit {
119 : : struct trace_entry ent;
120 : : int nr;
121 : : long ret;
122 : : };
123 : :
124 : : struct kprobe_trace_entry_head {
125 : : struct trace_entry ent;
126 : : unsigned long ip;
127 : : };
128 : :
129 : : struct kretprobe_trace_entry_head {
130 : : struct trace_entry ent;
131 : : unsigned long func;
132 : : unsigned long ret_ip;
133 : : };
134 : :
135 : : /*
136 : : * trace_flag_type is an enumeration that holds different
137 : : * states when a trace occurs. These are:
138 : : * IRQS_OFF - interrupts were disabled
139 : : * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags
140 : : * NEED_RESCHED - reschedule is requested
141 : : * HARDIRQ - inside an interrupt handler
142 : : * SOFTIRQ - inside a softirq handler
143 : : */
144 : : enum trace_flag_type {
145 : : TRACE_FLAG_IRQS_OFF = 0x01,
146 : : TRACE_FLAG_IRQS_NOSUPPORT = 0x02,
147 : : TRACE_FLAG_NEED_RESCHED = 0x04,
148 : : TRACE_FLAG_HARDIRQ = 0x08,
149 : : TRACE_FLAG_SOFTIRQ = 0x10,
150 : : TRACE_FLAG_PREEMPT_RESCHED = 0x20,
151 : : TRACE_FLAG_NMI = 0x40,
152 : : };
153 : :
154 : : #define TRACE_BUF_SIZE 1024
155 : :
156 : : struct trace_array;
157 : :
158 : : /*
159 : : * The CPU trace array - it consists of thousands of trace entries
160 : : * plus some other descriptor data: (for example which task started
161 : : * the trace, etc.)
162 : : */
163 : : struct trace_array_cpu {
164 : : atomic_t disabled;
165 : : void *buffer_page; /* ring buffer spare */
166 : :
167 : : unsigned long entries;
168 : : unsigned long saved_latency;
169 : : unsigned long critical_start;
170 : : unsigned long critical_end;
171 : : unsigned long critical_sequence;
172 : : unsigned long nice;
173 : : unsigned long policy;
174 : : unsigned long rt_priority;
175 : : unsigned long skipped_entries;
176 : : u64 preempt_timestamp;
177 : : pid_t pid;
178 : : kuid_t uid;
179 : : char comm[TASK_COMM_LEN];
180 : :
181 : : bool ignore_pid;
182 : : #ifdef CONFIG_FUNCTION_TRACER
183 : : bool ftrace_ignore_pid;
184 : : #endif
185 : : };
186 : :
187 : : struct tracer;
188 : : struct trace_option_dentry;
189 : :
190 : : struct array_buffer {
191 : : struct trace_array *tr;
192 : : struct trace_buffer *buffer;
193 : : struct trace_array_cpu __percpu *data;
194 : : u64 time_start;
195 : : int cpu;
196 : : };
197 : :
198 : : #define TRACE_FLAGS_MAX_SIZE 32
199 : :
200 : : struct trace_options {
201 : : struct tracer *tracer;
202 : : struct trace_option_dentry *topts;
203 : : };
204 : :
205 : : struct trace_pid_list {
206 : : int pid_max;
207 : : unsigned long *pids;
208 : : };
209 : :
210 : : typedef bool (*cond_update_fn_t)(struct trace_array *tr, void *cond_data);
211 : :
212 : : /**
213 : : * struct cond_snapshot - conditional snapshot data and callback
214 : : *
215 : : * The cond_snapshot structure encapsulates a callback function and
216 : : * data associated with the snapshot for a given tracing instance.
217 : : *
218 : : * When a snapshot is taken conditionally, by invoking
219 : : * tracing_snapshot_cond(tr, cond_data), the cond_data passed in is
220 : : * passed in turn to the cond_snapshot.update() function. That data
221 : : * can be compared by the update() implementation with the cond_data
222 : : * contained wihin the struct cond_snapshot instance associated with
223 : : * the trace_array. Because the tr->max_lock is held throughout the
224 : : * update() call, the update() function can directly retrieve the
225 : : * cond_snapshot and cond_data associated with the per-instance
226 : : * snapshot associated with the trace_array.
227 : : *
228 : : * The cond_snapshot.update() implementation can save data to be
229 : : * associated with the snapshot if it decides to, and returns 'true'
230 : : * in that case, or it returns 'false' if the conditional snapshot
231 : : * shouldn't be taken.
232 : : *
233 : : * The cond_snapshot instance is created and associated with the
234 : : * user-defined cond_data by tracing_cond_snapshot_enable().
235 : : * Likewise, the cond_snapshot instance is destroyed and is no longer
236 : : * associated with the trace instance by
237 : : * tracing_cond_snapshot_disable().
238 : : *
239 : : * The method below is required.
240 : : *
241 : : * @update: When a conditional snapshot is invoked, the update()
242 : : * callback function is invoked with the tr->max_lock held. The
243 : : * update() implementation signals whether or not to actually
244 : : * take the snapshot, by returning 'true' if so, 'false' if no
245 : : * snapshot should be taken. Because the max_lock is held for
246 : : * the duration of update(), the implementation is safe to
247 : : * directly retrieven and save any implementation data it needs
248 : : * to in association with the snapshot.
249 : : */
250 : : struct cond_snapshot {
251 : : void *cond_data;
252 : : cond_update_fn_t update;
253 : : };
254 : :
255 : : /*
256 : : * The trace array - an array of per-CPU trace arrays. This is the
257 : : * highest level data structure that individual tracers deal with.
258 : : * They have on/off state as well:
259 : : */
260 : : struct trace_array {
261 : : struct list_head list;
262 : : char *name;
263 : : struct array_buffer array_buffer;
264 : : #ifdef CONFIG_TRACER_MAX_TRACE
265 : : /*
266 : : * The max_buffer is used to snapshot the trace when a maximum
267 : : * latency is reached, or when the user initiates a snapshot.
268 : : * Some tracers will use this to store a maximum trace while
269 : : * it continues examining live traces.
270 : : *
271 : : * The buffers for the max_buffer are set up the same as the array_buffer
272 : : * When a snapshot is taken, the buffer of the max_buffer is swapped
273 : : * with the buffer of the array_buffer and the buffers are reset for
274 : : * the array_buffer so the tracing can continue.
275 : : */
276 : : struct array_buffer max_buffer;
277 : : bool allocated_snapshot;
278 : : #endif
279 : : #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
280 : : unsigned long max_latency;
281 : : #ifdef CONFIG_FSNOTIFY
282 : : struct dentry *d_max_latency;
283 : : struct work_struct fsnotify_work;
284 : : struct irq_work fsnotify_irqwork;
285 : : #endif
286 : : #endif
287 : : struct trace_pid_list __rcu *filtered_pids;
288 : : /*
289 : : * max_lock is used to protect the swapping of buffers
290 : : * when taking a max snapshot. The buffers themselves are
291 : : * protected by per_cpu spinlocks. But the action of the swap
292 : : * needs its own lock.
293 : : *
294 : : * This is defined as a arch_spinlock_t in order to help
295 : : * with performance when lockdep debugging is enabled.
296 : : *
297 : : * It is also used in other places outside the update_max_tr
298 : : * so it needs to be defined outside of the
299 : : * CONFIG_TRACER_MAX_TRACE.
300 : : */
301 : : arch_spinlock_t max_lock;
302 : : int buffer_disabled;
303 : : #ifdef CONFIG_FTRACE_SYSCALLS
304 : : int sys_refcount_enter;
305 : : int sys_refcount_exit;
306 : : struct trace_event_file __rcu *enter_syscall_files[NR_syscalls];
307 : : struct trace_event_file __rcu *exit_syscall_files[NR_syscalls];
308 : : #endif
309 : : int stop_count;
310 : : int clock_id;
311 : : int nr_topts;
312 : : bool clear_trace;
313 : : int buffer_percent;
314 : : unsigned int n_err_log_entries;
315 : : struct tracer *current_trace;
316 : : unsigned int trace_flags;
317 : : unsigned char trace_flags_index[TRACE_FLAGS_MAX_SIZE];
318 : : unsigned int flags;
319 : : raw_spinlock_t start_lock;
320 : : struct list_head err_log;
321 : : struct dentry *dir;
322 : : struct dentry *options;
323 : : struct dentry *percpu_dir;
324 : : struct dentry *event_dir;
325 : : struct trace_options *topts;
326 : : struct list_head systems;
327 : : struct list_head events;
328 : : struct trace_event_file *trace_marker_file;
329 : : cpumask_var_t tracing_cpumask; /* only trace on set CPUs */
330 : : int ref;
331 : : #ifdef CONFIG_FUNCTION_TRACER
332 : : struct ftrace_ops *ops;
333 : : struct trace_pid_list __rcu *function_pids;
334 : : #ifdef CONFIG_DYNAMIC_FTRACE
335 : : /* All of these are protected by the ftrace_lock */
336 : : struct list_head func_probes;
337 : : struct list_head mod_trace;
338 : : struct list_head mod_notrace;
339 : : #endif
340 : : /* function tracing enabled */
341 : : int function_enabled;
342 : : #endif
343 : : int time_stamp_abs_ref;
344 : : struct list_head hist_vars;
345 : : #ifdef CONFIG_TRACER_SNAPSHOT
346 : : struct cond_snapshot *cond_snapshot;
347 : : #endif
348 : : };
349 : :
350 : : enum {
351 : : TRACE_ARRAY_FL_GLOBAL = (1 << 0)
352 : : };
353 : :
354 : : extern struct list_head ftrace_trace_arrays;
355 : :
356 : : extern struct mutex trace_types_lock;
357 : :
358 : : extern int trace_array_get(struct trace_array *tr);
359 : : extern int tracing_check_open_get_tr(struct trace_array *tr);
360 : : extern struct trace_array *trace_array_find(const char *instance);
361 : : extern struct trace_array *trace_array_find_get(const char *instance);
362 : :
363 : : extern int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs);
364 : : extern int tracing_set_clock(struct trace_array *tr, const char *clockstr);
365 : :
366 : : extern bool trace_clock_in_ns(struct trace_array *tr);
367 : :
368 : : /*
369 : : * The global tracer (top) should be the first trace array added,
370 : : * but we check the flag anyway.
371 : : */
372 : 52 : static inline struct trace_array *top_trace_array(void)
373 : : {
374 : 52 : struct trace_array *tr;
375 : :
376 [ + - + - : 52 : if (list_empty(&ftrace_trace_arrays))
+ - - - -
- ]
377 : : return NULL;
378 : :
379 : 52 : tr = list_entry(ftrace_trace_arrays.prev,
380 : : typeof(*tr), list);
381 [ - + - + : 52 : WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL));
- + - - -
- ]
382 : : return tr;
383 : : }
384 : :
385 : : #define FTRACE_CMP_TYPE(var, type) \
386 : : __builtin_types_compatible_p(typeof(var), type *)
387 : :
388 : : #undef IF_ASSIGN
389 : : #define IF_ASSIGN(var, entry, etype, id) \
390 : : if (FTRACE_CMP_TYPE(var, etype)) { \
391 : : var = (typeof(var))(entry); \
392 : : WARN_ON(id != 0 && (entry)->type != id); \
393 : : break; \
394 : : }
395 : :
396 : : /* Will cause compile errors if type is not found. */
397 : : extern void __ftrace_bad_type(void);
398 : :
399 : : /*
400 : : * The trace_assign_type is a verifier that the entry type is
401 : : * the same as the type being assigned. To add new types simply
402 : : * add a line with the following format:
403 : : *
404 : : * IF_ASSIGN(var, ent, type, id);
405 : : *
406 : : * Where "type" is the trace type that includes the trace_entry
407 : : * as the "ent" item. And "id" is the trace identifier that is
408 : : * used in the trace_type enum.
409 : : *
410 : : * If the type can have more than one id, then use zero.
411 : : */
412 : : #define trace_assign_type(var, ent) \
413 : : do { \
414 : : IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \
415 : : IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \
416 : : IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \
417 : : IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\
418 : : IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \
419 : : IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \
420 : : IF_ASSIGN(var, ent, struct bputs_entry, TRACE_BPUTS); \
421 : : IF_ASSIGN(var, ent, struct hwlat_entry, TRACE_HWLAT); \
422 : : IF_ASSIGN(var, ent, struct raw_data_entry, TRACE_RAW_DATA);\
423 : : IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \
424 : : TRACE_MMIO_RW); \
425 : : IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \
426 : : TRACE_MMIO_MAP); \
427 : : IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
428 : : IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \
429 : : TRACE_GRAPH_ENT); \
430 : : IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \
431 : : TRACE_GRAPH_RET); \
432 : : __ftrace_bad_type(); \
433 : : } while (0)
434 : :
435 : : /*
436 : : * An option specific to a tracer. This is a boolean value.
437 : : * The bit is the bit index that sets its value on the
438 : : * flags value in struct tracer_flags.
439 : : */
440 : : struct tracer_opt {
441 : : const char *name; /* Will appear on the trace_options file */
442 : : u32 bit; /* Mask assigned in val field in tracer_flags */
443 : : };
444 : :
445 : : /*
446 : : * The set of specific options for a tracer. Your tracer
447 : : * have to set the initial value of the flags val.
448 : : */
449 : : struct tracer_flags {
450 : : u32 val;
451 : : struct tracer_opt *opts;
452 : : struct tracer *trace;
453 : : };
454 : :
455 : : /* Makes more easy to define a tracer opt */
456 : : #define TRACER_OPT(s, b) .name = #s, .bit = b
457 : :
458 : :
459 : : struct trace_option_dentry {
460 : : struct tracer_opt *opt;
461 : : struct tracer_flags *flags;
462 : : struct trace_array *tr;
463 : : struct dentry *entry;
464 : : };
465 : :
466 : : /**
467 : : * struct tracer - a specific tracer and its callbacks to interact with tracefs
468 : : * @name: the name chosen to select it on the available_tracers file
469 : : * @init: called when one switches to this tracer (echo name > current_tracer)
470 : : * @reset: called when one switches to another tracer
471 : : * @start: called when tracing is unpaused (echo 1 > tracing_on)
472 : : * @stop: called when tracing is paused (echo 0 > tracing_on)
473 : : * @update_thresh: called when tracing_thresh is updated
474 : : * @open: called when the trace file is opened
475 : : * @pipe_open: called when the trace_pipe file is opened
476 : : * @close: called when the trace file is released
477 : : * @pipe_close: called when the trace_pipe file is released
478 : : * @read: override the default read callback on trace_pipe
479 : : * @splice_read: override the default splice_read callback on trace_pipe
480 : : * @selftest: selftest to run on boot (see trace_selftest.c)
481 : : * @print_headers: override the first lines that describe your columns
482 : : * @print_line: callback that prints a trace
483 : : * @set_flag: signals one of your private flags changed (trace_options file)
484 : : * @flags: your private flags
485 : : */
486 : : struct tracer {
487 : : const char *name;
488 : : int (*init)(struct trace_array *tr);
489 : : void (*reset)(struct trace_array *tr);
490 : : void (*start)(struct trace_array *tr);
491 : : void (*stop)(struct trace_array *tr);
492 : : int (*update_thresh)(struct trace_array *tr);
493 : : void (*open)(struct trace_iterator *iter);
494 : : void (*pipe_open)(struct trace_iterator *iter);
495 : : void (*close)(struct trace_iterator *iter);
496 : : void (*pipe_close)(struct trace_iterator *iter);
497 : : ssize_t (*read)(struct trace_iterator *iter,
498 : : struct file *filp, char __user *ubuf,
499 : : size_t cnt, loff_t *ppos);
500 : : ssize_t (*splice_read)(struct trace_iterator *iter,
501 : : struct file *filp,
502 : : loff_t *ppos,
503 : : struct pipe_inode_info *pipe,
504 : : size_t len,
505 : : unsigned int flags);
506 : : #ifdef CONFIG_FTRACE_STARTUP_TEST
507 : : int (*selftest)(struct tracer *trace,
508 : : struct trace_array *tr);
509 : : #endif
510 : : void (*print_header)(struct seq_file *m);
511 : : enum print_line_t (*print_line)(struct trace_iterator *iter);
512 : : /* If you handled the flag setting, return 0 */
513 : : int (*set_flag)(struct trace_array *tr,
514 : : u32 old_flags, u32 bit, int set);
515 : : /* Return 0 if OK with change, else return non-zero */
516 : : int (*flag_changed)(struct trace_array *tr,
517 : : u32 mask, int set);
518 : : struct tracer *next;
519 : : struct tracer_flags *flags;
520 : : int enabled;
521 : : int ref;
522 : : bool print_max;
523 : : bool allow_instances;
524 : : #ifdef CONFIG_TRACER_MAX_TRACE
525 : : bool use_max_tr;
526 : : #endif
527 : : /* True if tracer cannot be enabled in kernel param */
528 : : bool noboot;
529 : : };
530 : :
531 : :
532 : : /* Only current can touch trace_recursion */
533 : :
534 : : /*
535 : : * For function tracing recursion:
536 : : * The order of these bits are important.
537 : : *
538 : : * When function tracing occurs, the following steps are made:
539 : : * If arch does not support a ftrace feature:
540 : : * call internal function (uses INTERNAL bits) which calls...
541 : : * If callback is registered to the "global" list, the list
542 : : * function is called and recursion checks the GLOBAL bits.
543 : : * then this function calls...
544 : : * The function callback, which can use the FTRACE bits to
545 : : * check for recursion.
546 : : *
547 : : * Now if the arch does not suppport a feature, and it calls
548 : : * the global list function which calls the ftrace callback
549 : : * all three of these steps will do a recursion protection.
550 : : * There's no reason to do one if the previous caller already
551 : : * did. The recursion that we are protecting against will
552 : : * go through the same steps again.
553 : : *
554 : : * To prevent the multiple recursion checks, if a recursion
555 : : * bit is set that is higher than the MAX bit of the current
556 : : * check, then we know that the check was made by the previous
557 : : * caller, and we can skip the current check.
558 : : */
559 : : enum {
560 : : TRACE_BUFFER_BIT,
561 : : TRACE_BUFFER_NMI_BIT,
562 : : TRACE_BUFFER_IRQ_BIT,
563 : : TRACE_BUFFER_SIRQ_BIT,
564 : :
565 : : /* Start of function recursion bits */
566 : : TRACE_FTRACE_BIT,
567 : : TRACE_FTRACE_NMI_BIT,
568 : : TRACE_FTRACE_IRQ_BIT,
569 : : TRACE_FTRACE_SIRQ_BIT,
570 : :
571 : : /* INTERNAL_BITs must be greater than FTRACE_BITs */
572 : : TRACE_INTERNAL_BIT,
573 : : TRACE_INTERNAL_NMI_BIT,
574 : : TRACE_INTERNAL_IRQ_BIT,
575 : : TRACE_INTERNAL_SIRQ_BIT,
576 : :
577 : : TRACE_BRANCH_BIT,
578 : : /*
579 : : * Abuse of the trace_recursion.
580 : : * As we need a way to maintain state if we are tracing the function
581 : : * graph in irq because we want to trace a particular function that
582 : : * was called in irq context but we have irq tracing off. Since this
583 : : * can only be modified by current, we can reuse trace_recursion.
584 : : */
585 : : TRACE_IRQ_BIT,
586 : :
587 : : /* Set if the function is in the set_graph_function file */
588 : : TRACE_GRAPH_BIT,
589 : :
590 : : /*
591 : : * In the very unlikely case that an interrupt came in
592 : : * at a start of graph tracing, and we want to trace
593 : : * the function in that interrupt, the depth can be greater
594 : : * than zero, because of the preempted start of a previous
595 : : * trace. In an even more unlikely case, depth could be 2
596 : : * if a softirq interrupted the start of graph tracing,
597 : : * followed by an interrupt preempting a start of graph
598 : : * tracing in the softirq, and depth can even be 3
599 : : * if an NMI came in at the start of an interrupt function
600 : : * that preempted a softirq start of a function that
601 : : * preempted normal context!!!! Luckily, it can't be
602 : : * greater than 3, so the next two bits are a mask
603 : : * of what the depth is when we set TRACE_GRAPH_BIT
604 : : */
605 : :
606 : : TRACE_GRAPH_DEPTH_START_BIT,
607 : : TRACE_GRAPH_DEPTH_END_BIT,
608 : :
609 : : /*
610 : : * To implement set_graph_notrace, if this bit is set, we ignore
611 : : * function graph tracing of called functions, until the return
612 : : * function is called to clear it.
613 : : */
614 : : TRACE_GRAPH_NOTRACE_BIT,
615 : : };
616 : :
617 : : #define trace_recursion_set(bit) do { (current)->trace_recursion |= (1<<(bit)); } while (0)
618 : : #define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(1<<(bit)); } while (0)
619 : : #define trace_recursion_test(bit) ((current)->trace_recursion & (1<<(bit)))
620 : :
621 : : #define trace_recursion_depth() \
622 : : (((current)->trace_recursion >> TRACE_GRAPH_DEPTH_START_BIT) & 3)
623 : : #define trace_recursion_set_depth(depth) \
624 : : do { \
625 : : current->trace_recursion &= \
626 : : ~(3 << TRACE_GRAPH_DEPTH_START_BIT); \
627 : : current->trace_recursion |= \
628 : : ((depth) & 3) << TRACE_GRAPH_DEPTH_START_BIT; \
629 : : } while (0)
630 : :
631 : : #define TRACE_CONTEXT_BITS 4
632 : :
633 : : #define TRACE_FTRACE_START TRACE_FTRACE_BIT
634 : : #define TRACE_FTRACE_MAX ((1 << (TRACE_FTRACE_START + TRACE_CONTEXT_BITS)) - 1)
635 : :
636 : : #define TRACE_LIST_START TRACE_INTERNAL_BIT
637 : : #define TRACE_LIST_MAX ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1)
638 : :
639 : : #define TRACE_CONTEXT_MASK TRACE_LIST_MAX
640 : :
641 : : static __always_inline int trace_get_context_bit(void)
642 : : {
643 : : int bit;
644 : :
645 : : if (in_interrupt()) {
646 : : if (in_nmi())
647 : : bit = 0;
648 : :
649 : : else if (in_irq())
650 : : bit = 1;
651 : : else
652 : : bit = 2;
653 : : } else
654 : : bit = 3;
655 : :
656 : : return bit;
657 : : }
658 : :
659 : : static __always_inline int trace_test_and_set_recursion(int start, int max)
660 : : {
661 : : unsigned int val = current->trace_recursion;
662 : : int bit;
663 : :
664 : : /* A previous recursion check was made */
665 : : if ((val & TRACE_CONTEXT_MASK) > max)
666 : : return 0;
667 : :
668 : : bit = trace_get_context_bit() + start;
669 : : if (unlikely(val & (1 << bit)))
670 : : return -1;
671 : :
672 : : val |= 1 << bit;
673 : : current->trace_recursion = val;
674 : : barrier();
675 : :
676 : : return bit;
677 : : }
678 : :
679 : : static __always_inline void trace_clear_recursion(int bit)
680 : : {
681 : : unsigned int val = current->trace_recursion;
682 : :
683 : : if (!bit)
684 : : return;
685 : :
686 : : bit = 1 << bit;
687 : : val &= ~bit;
688 : :
689 : : barrier();
690 : : current->trace_recursion = val;
691 : : }
692 : :
693 : : static inline struct ring_buffer_iter *
694 : 0 : trace_buffer_iter(struct trace_iterator *iter, int cpu)
695 : : {
696 [ # # # # : 0 : return iter->buffer_iter ? iter->buffer_iter[cpu] : NULL;
# # # # #
# # # # #
# # # # #
# # # # #
# # ]
697 : : }
698 : :
699 : : int tracer_init(struct tracer *t, struct trace_array *tr);
700 : : int tracing_is_enabled(void);
701 : : void tracing_reset_online_cpus(struct array_buffer *buf);
702 : : void tracing_reset_current(int cpu);
703 : : void tracing_reset_all_online_cpus(void);
704 : : int tracing_open_generic(struct inode *inode, struct file *filp);
705 : : int tracing_open_generic_tr(struct inode *inode, struct file *filp);
706 : : bool tracing_is_disabled(void);
707 : : bool tracer_tracing_is_on(struct trace_array *tr);
708 : : void tracer_tracing_on(struct trace_array *tr);
709 : : void tracer_tracing_off(struct trace_array *tr);
710 : : struct dentry *trace_create_file(const char *name,
711 : : umode_t mode,
712 : : struct dentry *parent,
713 : : void *data,
714 : : const struct file_operations *fops);
715 : :
716 : : struct dentry *tracing_init_dentry(void);
717 : :
718 : : struct ring_buffer_event;
719 : :
720 : : struct ring_buffer_event *
721 : : trace_buffer_lock_reserve(struct trace_buffer *buffer,
722 : : int type,
723 : : unsigned long len,
724 : : unsigned long flags,
725 : : int pc);
726 : :
727 : : struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
728 : : struct trace_array_cpu *data);
729 : :
730 : : struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
731 : : int *ent_cpu, u64 *ent_ts);
732 : :
733 : : void trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
734 : : struct ring_buffer_event *event);
735 : :
736 : : int trace_empty(struct trace_iterator *iter);
737 : :
738 : : void *trace_find_next_entry_inc(struct trace_iterator *iter);
739 : :
740 : : void trace_init_global_iter(struct trace_iterator *iter);
741 : :
742 : : void tracing_iter_reset(struct trace_iterator *iter, int cpu);
743 : :
744 : : unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu);
745 : : unsigned long trace_total_entries(struct trace_array *tr);
746 : :
747 : : void trace_function(struct trace_array *tr,
748 : : unsigned long ip,
749 : : unsigned long parent_ip,
750 : : unsigned long flags, int pc);
751 : : void trace_graph_function(struct trace_array *tr,
752 : : unsigned long ip,
753 : : unsigned long parent_ip,
754 : : unsigned long flags, int pc);
755 : : void trace_latency_header(struct seq_file *m);
756 : : void trace_default_header(struct seq_file *m);
757 : : void print_trace_header(struct seq_file *m, struct trace_iterator *iter);
758 : : int trace_empty(struct trace_iterator *iter);
759 : :
760 : : void trace_graph_return(struct ftrace_graph_ret *trace);
761 : : int trace_graph_entry(struct ftrace_graph_ent *trace);
762 : : void set_graph_array(struct trace_array *tr);
763 : :
764 : : void tracing_start_cmdline_record(void);
765 : : void tracing_stop_cmdline_record(void);
766 : : void tracing_start_tgid_record(void);
767 : : void tracing_stop_tgid_record(void);
768 : :
769 : : int register_tracer(struct tracer *type);
770 : : int is_tracing_stopped(void);
771 : :
772 : : loff_t tracing_lseek(struct file *file, loff_t offset, int whence);
773 : :
774 : : extern cpumask_var_t __read_mostly tracing_buffer_mask;
775 : :
776 : : #define for_each_tracing_cpu(cpu) \
777 : : for_each_cpu(cpu, tracing_buffer_mask)
778 : :
779 : : extern unsigned long nsecs_to_usecs(unsigned long nsecs);
780 : :
781 : : extern unsigned long tracing_thresh;
782 : :
783 : : /* PID filtering */
784 : :
785 : : extern int pid_max;
786 : :
787 : : bool trace_find_filtered_pid(struct trace_pid_list *filtered_pids,
788 : : pid_t search_pid);
789 : : bool trace_ignore_this_task(struct trace_pid_list *filtered_pids,
790 : : struct task_struct *task);
791 : : void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
792 : : struct task_struct *self,
793 : : struct task_struct *task);
794 : : void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos);
795 : : void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos);
796 : : int trace_pid_show(struct seq_file *m, void *v);
797 : : void trace_free_pid_list(struct trace_pid_list *pid_list);
798 : : int trace_pid_write(struct trace_pid_list *filtered_pids,
799 : : struct trace_pid_list **new_pid_list,
800 : : const char __user *ubuf, size_t cnt);
801 : :
802 : : #ifdef CONFIG_TRACER_MAX_TRACE
803 : : void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
804 : : void *cond_data);
805 : : void update_max_tr_single(struct trace_array *tr,
806 : : struct task_struct *tsk, int cpu);
807 : : #endif /* CONFIG_TRACER_MAX_TRACE */
808 : :
809 : : #if (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \
810 : : defined(CONFIG_FSNOTIFY)
811 : :
812 : : void latency_fsnotify(struct trace_array *tr);
813 : :
814 : : #else
815 : :
816 : : static inline void latency_fsnotify(struct trace_array *tr) { }
817 : :
818 : : #endif
819 : :
820 : : #ifdef CONFIG_STACKTRACE
821 : : void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
822 : : int pc);
823 : : #else
824 : : static inline void __trace_stack(struct trace_array *tr, unsigned long flags,
825 : : int skip, int pc)
826 : : {
827 : : }
828 : : #endif /* CONFIG_STACKTRACE */
829 : :
830 : : extern u64 ftrace_now(int cpu);
831 : :
832 : : extern void trace_find_cmdline(int pid, char comm[]);
833 : : extern int trace_find_tgid(int pid);
834 : : extern void trace_event_follow_fork(struct trace_array *tr, bool enable);
835 : :
836 : : #ifdef CONFIG_DYNAMIC_FTRACE
837 : : extern unsigned long ftrace_update_tot_cnt;
838 : : extern unsigned long ftrace_number_of_pages;
839 : : extern unsigned long ftrace_number_of_groups;
840 : : void ftrace_init_trace_array(struct trace_array *tr);
841 : : #else
842 : 0 : static inline void ftrace_init_trace_array(struct trace_array *tr) { }
843 : : #endif
844 : : #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
845 : : extern int DYN_FTRACE_TEST_NAME(void);
846 : : #define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2
847 : : extern int DYN_FTRACE_TEST_NAME2(void);
848 : :
849 : : extern bool ring_buffer_expanded;
850 : : extern bool tracing_selftest_disabled;
851 : :
852 : : #ifdef CONFIG_FTRACE_STARTUP_TEST
853 : : extern int trace_selftest_startup_function(struct tracer *trace,
854 : : struct trace_array *tr);
855 : : extern int trace_selftest_startup_function_graph(struct tracer *trace,
856 : : struct trace_array *tr);
857 : : extern int trace_selftest_startup_irqsoff(struct tracer *trace,
858 : : struct trace_array *tr);
859 : : extern int trace_selftest_startup_preemptoff(struct tracer *trace,
860 : : struct trace_array *tr);
861 : : extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace,
862 : : struct trace_array *tr);
863 : : extern int trace_selftest_startup_wakeup(struct tracer *trace,
864 : : struct trace_array *tr);
865 : : extern int trace_selftest_startup_nop(struct tracer *trace,
866 : : struct trace_array *tr);
867 : : extern int trace_selftest_startup_branch(struct tracer *trace,
868 : : struct trace_array *tr);
869 : : /*
870 : : * Tracer data references selftest functions that only occur
871 : : * on boot up. These can be __init functions. Thus, when selftests
872 : : * are enabled, then the tracers need to reference __init functions.
873 : : */
874 : : #define __tracer_data __refdata
875 : : #else
876 : : /* Tracers are seldom changed. Optimize when selftests are disabled. */
877 : : #define __tracer_data __read_mostly
878 : : #endif /* CONFIG_FTRACE_STARTUP_TEST */
879 : :
880 : : extern void *head_page(struct trace_array_cpu *data);
881 : : extern unsigned long long ns2usecs(u64 nsec);
882 : : extern int
883 : : trace_vbprintk(unsigned long ip, const char *fmt, va_list args);
884 : : extern int
885 : : trace_vprintk(unsigned long ip, const char *fmt, va_list args);
886 : : extern int
887 : : trace_array_vprintk(struct trace_array *tr,
888 : : unsigned long ip, const char *fmt, va_list args);
889 : : int trace_array_printk_buf(struct trace_buffer *buffer,
890 : : unsigned long ip, const char *fmt, ...);
891 : : void trace_printk_seq(struct trace_seq *s);
892 : : enum print_line_t print_trace_line(struct trace_iterator *iter);
893 : :
894 : : extern char trace_find_mark(unsigned long long duration);
895 : :
896 : : struct ftrace_hash;
897 : :
898 : : struct ftrace_mod_load {
899 : : struct list_head list;
900 : : char *func;
901 : : char *module;
902 : : int enable;
903 : : };
904 : :
905 : : enum {
906 : : FTRACE_HASH_FL_MOD = (1 << 0),
907 : : };
908 : :
909 : : struct ftrace_hash {
910 : : unsigned long size_bits;
911 : : struct hlist_head *buckets;
912 : : unsigned long count;
913 : : unsigned long flags;
914 : : struct rcu_head rcu;
915 : : };
916 : :
917 : : struct ftrace_func_entry *
918 : : ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip);
919 : :
920 : : static __always_inline bool ftrace_hash_empty(struct ftrace_hash *hash)
921 : : {
922 : : return !hash || !(hash->count || (hash->flags & FTRACE_HASH_FL_MOD));
923 : : }
924 : :
925 : : /* Standard output formatting function used for function return traces */
926 : : #ifdef CONFIG_FUNCTION_GRAPH_TRACER
927 : :
928 : : /* Flag options */
929 : : #define TRACE_GRAPH_PRINT_OVERRUN 0x1
930 : : #define TRACE_GRAPH_PRINT_CPU 0x2
931 : : #define TRACE_GRAPH_PRINT_OVERHEAD 0x4
932 : : #define TRACE_GRAPH_PRINT_PROC 0x8
933 : : #define TRACE_GRAPH_PRINT_DURATION 0x10
934 : : #define TRACE_GRAPH_PRINT_ABS_TIME 0x20
935 : : #define TRACE_GRAPH_PRINT_REL_TIME 0x40
936 : : #define TRACE_GRAPH_PRINT_IRQS 0x80
937 : : #define TRACE_GRAPH_PRINT_TAIL 0x100
938 : : #define TRACE_GRAPH_SLEEP_TIME 0x200
939 : : #define TRACE_GRAPH_GRAPH_TIME 0x400
940 : : #define TRACE_GRAPH_PRINT_FILL_SHIFT 28
941 : : #define TRACE_GRAPH_PRINT_FILL_MASK (0x3 << TRACE_GRAPH_PRINT_FILL_SHIFT)
942 : :
943 : : extern void ftrace_graph_sleep_time_control(bool enable);
944 : :
945 : : #ifdef CONFIG_FUNCTION_PROFILER
946 : : extern void ftrace_graph_graph_time_control(bool enable);
947 : : #else
948 : : static inline void ftrace_graph_graph_time_control(bool enable) { }
949 : : #endif
950 : :
951 : : extern enum print_line_t
952 : : print_graph_function_flags(struct trace_iterator *iter, u32 flags);
953 : : extern void print_graph_headers_flags(struct seq_file *s, u32 flags);
954 : : extern void
955 : : trace_print_graph_duration(unsigned long long duration, struct trace_seq *s);
956 : : extern void graph_trace_open(struct trace_iterator *iter);
957 : : extern void graph_trace_close(struct trace_iterator *iter);
958 : : extern int __trace_graph_entry(struct trace_array *tr,
959 : : struct ftrace_graph_ent *trace,
960 : : unsigned long flags, int pc);
961 : : extern void __trace_graph_return(struct trace_array *tr,
962 : : struct ftrace_graph_ret *trace,
963 : : unsigned long flags, int pc);
964 : :
965 : : #ifdef CONFIG_DYNAMIC_FTRACE
966 : : extern struct ftrace_hash __rcu *ftrace_graph_hash;
967 : : extern struct ftrace_hash __rcu *ftrace_graph_notrace_hash;
968 : :
969 : : static inline int ftrace_graph_addr(struct ftrace_graph_ent *trace)
970 : : {
971 : : unsigned long addr = trace->func;
972 : : int ret = 0;
973 : : struct ftrace_hash *hash;
974 : :
975 : : preempt_disable_notrace();
976 : :
977 : : /*
978 : : * Have to open code "rcu_dereference_sched()" because the
979 : : * function graph tracer can be called when RCU is not
980 : : * "watching".
981 : : * Protected with schedule_on_each_cpu(ftrace_sync)
982 : : */
983 : : hash = rcu_dereference_protected(ftrace_graph_hash, !preemptible());
984 : :
985 : : if (ftrace_hash_empty(hash)) {
986 : : ret = 1;
987 : : goto out;
988 : : }
989 : :
990 : : if (ftrace_lookup_ip(hash, addr)) {
991 : :
992 : : /*
993 : : * This needs to be cleared on the return functions
994 : : * when the depth is zero.
995 : : */
996 : : trace_recursion_set(TRACE_GRAPH_BIT);
997 : : trace_recursion_set_depth(trace->depth);
998 : :
999 : : /*
1000 : : * If no irqs are to be traced, but a set_graph_function
1001 : : * is set, and called by an interrupt handler, we still
1002 : : * want to trace it.
1003 : : */
1004 : : if (in_irq())
1005 : : trace_recursion_set(TRACE_IRQ_BIT);
1006 : : else
1007 : : trace_recursion_clear(TRACE_IRQ_BIT);
1008 : : ret = 1;
1009 : : }
1010 : :
1011 : : out:
1012 : : preempt_enable_notrace();
1013 : : return ret;
1014 : : }
1015 : :
1016 : : static inline void ftrace_graph_addr_finish(struct ftrace_graph_ret *trace)
1017 : : {
1018 : : if (trace_recursion_test(TRACE_GRAPH_BIT) &&
1019 : : trace->depth == trace_recursion_depth())
1020 : : trace_recursion_clear(TRACE_GRAPH_BIT);
1021 : : }
1022 : :
1023 : : static inline int ftrace_graph_notrace_addr(unsigned long addr)
1024 : : {
1025 : : int ret = 0;
1026 : : struct ftrace_hash *notrace_hash;
1027 : :
1028 : : preempt_disable_notrace();
1029 : :
1030 : : /*
1031 : : * Have to open code "rcu_dereference_sched()" because the
1032 : : * function graph tracer can be called when RCU is not
1033 : : * "watching".
1034 : : * Protected with schedule_on_each_cpu(ftrace_sync)
1035 : : */
1036 : : notrace_hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
1037 : : !preemptible());
1038 : :
1039 : : if (ftrace_lookup_ip(notrace_hash, addr))
1040 : : ret = 1;
1041 : :
1042 : : preempt_enable_notrace();
1043 : : return ret;
1044 : : }
1045 : : #else
1046 : : static inline int ftrace_graph_addr(struct ftrace_graph_ent *trace)
1047 : : {
1048 : : return 1;
1049 : : }
1050 : :
1051 : : static inline int ftrace_graph_notrace_addr(unsigned long addr)
1052 : : {
1053 : : return 0;
1054 : : }
1055 : : static inline void ftrace_graph_addr_finish(struct ftrace_graph_ret *trace)
1056 : : { }
1057 : : #endif /* CONFIG_DYNAMIC_FTRACE */
1058 : :
1059 : : extern unsigned int fgraph_max_depth;
1060 : :
1061 : : static inline bool ftrace_graph_ignore_func(struct ftrace_graph_ent *trace)
1062 : : {
1063 : : /* trace it when it is-nested-in or is a function enabled. */
1064 : : return !(trace_recursion_test(TRACE_GRAPH_BIT) ||
1065 : : ftrace_graph_addr(trace)) ||
1066 : : (trace->depth < 0) ||
1067 : : (fgraph_max_depth && trace->depth >= fgraph_max_depth);
1068 : : }
1069 : :
1070 : : #else /* CONFIG_FUNCTION_GRAPH_TRACER */
1071 : : static inline enum print_line_t
1072 : : print_graph_function_flags(struct trace_iterator *iter, u32 flags)
1073 : : {
1074 : : return TRACE_TYPE_UNHANDLED;
1075 : : }
1076 : : #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1077 : :
1078 : : extern struct list_head ftrace_pids;
1079 : :
1080 : : #ifdef CONFIG_FUNCTION_TRACER
1081 : : struct ftrace_func_command {
1082 : : struct list_head list;
1083 : : char *name;
1084 : : int (*func)(struct trace_array *tr,
1085 : : struct ftrace_hash *hash,
1086 : : char *func, char *cmd,
1087 : : char *params, int enable);
1088 : : };
1089 : : extern bool ftrace_filter_param __initdata;
1090 : : static inline int ftrace_trace_task(struct trace_array *tr)
1091 : : {
1092 : : return !this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid);
1093 : : }
1094 : : extern int ftrace_is_dead(void);
1095 : : int ftrace_create_function_files(struct trace_array *tr,
1096 : : struct dentry *parent);
1097 : : void ftrace_destroy_function_files(struct trace_array *tr);
1098 : : void ftrace_init_global_array_ops(struct trace_array *tr);
1099 : : void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func);
1100 : : void ftrace_reset_array_ops(struct trace_array *tr);
1101 : : void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer);
1102 : : void ftrace_init_tracefs_toplevel(struct trace_array *tr,
1103 : : struct dentry *d_tracer);
1104 : : void ftrace_clear_pids(struct trace_array *tr);
1105 : : int init_function_trace(void);
1106 : : void ftrace_pid_follow_fork(struct trace_array *tr, bool enable);
1107 : : #else
1108 : : static inline int ftrace_trace_task(struct trace_array *tr)
1109 : : {
1110 : : return 1;
1111 : : }
1112 : 0 : static inline int ftrace_is_dead(void) { return 0; }
1113 : : static inline int
1114 : 13 : ftrace_create_function_files(struct trace_array *tr,
1115 : : struct dentry *parent)
1116 : : {
1117 : 13 : return 0;
1118 : : }
1119 : 0 : static inline void ftrace_destroy_function_files(struct trace_array *tr) { }
1120 : : static inline __init void
1121 : 13 : ftrace_init_global_array_ops(struct trace_array *tr) { }
1122 : : static inline void ftrace_reset_array_ops(struct trace_array *tr) { }
1123 : 13 : static inline void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d) { }
1124 : 13 : static inline void ftrace_init_tracefs_toplevel(struct trace_array *tr, struct dentry *d) { }
1125 : 0 : static inline void ftrace_clear_pids(struct trace_array *tr) { }
1126 : 13 : static inline int init_function_trace(void) { return 0; }
1127 : : static inline void ftrace_pid_follow_fork(struct trace_array *tr, bool enable) { }
1128 : : /* ftace_func_t type is not defined, use macro instead of static inline */
1129 : : #define ftrace_init_array_ops(tr, func) do { } while (0)
1130 : : #endif /* CONFIG_FUNCTION_TRACER */
1131 : :
1132 : : #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE)
1133 : :
1134 : : struct ftrace_probe_ops {
1135 : : void (*func)(unsigned long ip,
1136 : : unsigned long parent_ip,
1137 : : struct trace_array *tr,
1138 : : struct ftrace_probe_ops *ops,
1139 : : void *data);
1140 : : int (*init)(struct ftrace_probe_ops *ops,
1141 : : struct trace_array *tr,
1142 : : unsigned long ip, void *init_data,
1143 : : void **data);
1144 : : void (*free)(struct ftrace_probe_ops *ops,
1145 : : struct trace_array *tr,
1146 : : unsigned long ip, void *data);
1147 : : int (*print)(struct seq_file *m,
1148 : : unsigned long ip,
1149 : : struct ftrace_probe_ops *ops,
1150 : : void *data);
1151 : : };
1152 : :
1153 : : struct ftrace_func_mapper;
1154 : : typedef int (*ftrace_mapper_func)(void *data);
1155 : :
1156 : : struct ftrace_func_mapper *allocate_ftrace_func_mapper(void);
1157 : : void **ftrace_func_mapper_find_ip(struct ftrace_func_mapper *mapper,
1158 : : unsigned long ip);
1159 : : int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper,
1160 : : unsigned long ip, void *data);
1161 : : void *ftrace_func_mapper_remove_ip(struct ftrace_func_mapper *mapper,
1162 : : unsigned long ip);
1163 : : void free_ftrace_func_mapper(struct ftrace_func_mapper *mapper,
1164 : : ftrace_mapper_func free_func);
1165 : :
1166 : : extern int
1167 : : register_ftrace_function_probe(char *glob, struct trace_array *tr,
1168 : : struct ftrace_probe_ops *ops, void *data);
1169 : : extern int
1170 : : unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr,
1171 : : struct ftrace_probe_ops *ops);
1172 : : extern void clear_ftrace_function_probes(struct trace_array *tr);
1173 : :
1174 : : int register_ftrace_command(struct ftrace_func_command *cmd);
1175 : : int unregister_ftrace_command(struct ftrace_func_command *cmd);
1176 : :
1177 : : void ftrace_create_filter_files(struct ftrace_ops *ops,
1178 : : struct dentry *parent);
1179 : : void ftrace_destroy_filter_files(struct ftrace_ops *ops);
1180 : :
1181 : : extern int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
1182 : : int len, int reset);
1183 : : extern int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
1184 : : int len, int reset);
1185 : : #else
1186 : : struct ftrace_func_command;
1187 : :
1188 : : static inline __init int register_ftrace_command(struct ftrace_func_command *cmd)
1189 : : {
1190 : : return -EINVAL;
1191 : : }
1192 : : static inline __init int unregister_ftrace_command(char *cmd_name)
1193 : : {
1194 : : return -EINVAL;
1195 : : }
1196 : 0 : static inline void clear_ftrace_function_probes(struct trace_array *tr)
1197 : : {
1198 : 0 : }
1199 : :
1200 : : /*
1201 : : * The ops parameter passed in is usually undefined.
1202 : : * This must be a macro.
1203 : : */
1204 : : #define ftrace_create_filter_files(ops, parent) do { } while (0)
1205 : : #define ftrace_destroy_filter_files(ops) do { } while (0)
1206 : : #endif /* CONFIG_FUNCTION_TRACER && CONFIG_DYNAMIC_FTRACE */
1207 : :
1208 : : bool ftrace_event_is_function(struct trace_event_call *call);
1209 : :
1210 : : /*
1211 : : * struct trace_parser - servers for reading the user input separated by spaces
1212 : : * @cont: set if the input is not complete - no final space char was found
1213 : : * @buffer: holds the parsed user input
1214 : : * @idx: user input length
1215 : : * @size: buffer size
1216 : : */
1217 : : struct trace_parser {
1218 : : bool cont;
1219 : : char *buffer;
1220 : : unsigned idx;
1221 : : unsigned size;
1222 : : };
1223 : :
1224 : 0 : static inline bool trace_parser_loaded(struct trace_parser *parser)
1225 : : {
1226 [ # # ]: 0 : return (parser->idx != 0);
1227 : : }
1228 : :
1229 : : static inline bool trace_parser_cont(struct trace_parser *parser)
1230 : : {
1231 : : return parser->cont;
1232 : : }
1233 : :
1234 : 0 : static inline void trace_parser_clear(struct trace_parser *parser)
1235 : : {
1236 : 0 : parser->cont = false;
1237 : 0 : parser->idx = 0;
1238 : 0 : }
1239 : :
1240 : : extern int trace_parser_get_init(struct trace_parser *parser, int size);
1241 : : extern void trace_parser_put(struct trace_parser *parser);
1242 : : extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1243 : : size_t cnt, loff_t *ppos);
1244 : :
1245 : : /*
1246 : : * Only create function graph options if function graph is configured.
1247 : : */
1248 : : #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1249 : : # define FGRAPH_FLAGS \
1250 : : C(DISPLAY_GRAPH, "display-graph"),
1251 : : #else
1252 : : # define FGRAPH_FLAGS
1253 : : #endif
1254 : :
1255 : : #ifdef CONFIG_BRANCH_TRACER
1256 : : # define BRANCH_FLAGS \
1257 : : C(BRANCH, "branch"),
1258 : : #else
1259 : : # define BRANCH_FLAGS
1260 : : #endif
1261 : :
1262 : : #ifdef CONFIG_FUNCTION_TRACER
1263 : : # define FUNCTION_FLAGS \
1264 : : C(FUNCTION, "function-trace"), \
1265 : : C(FUNC_FORK, "function-fork"),
1266 : : # define FUNCTION_DEFAULT_FLAGS TRACE_ITER_FUNCTION
1267 : : #else
1268 : : # define FUNCTION_FLAGS
1269 : : # define FUNCTION_DEFAULT_FLAGS 0UL
1270 : : # define TRACE_ITER_FUNC_FORK 0UL
1271 : : #endif
1272 : :
1273 : : #ifdef CONFIG_STACKTRACE
1274 : : # define STACK_FLAGS \
1275 : : C(STACKTRACE, "stacktrace"),
1276 : : #else
1277 : : # define STACK_FLAGS
1278 : : #endif
1279 : :
1280 : : /*
1281 : : * trace_iterator_flags is an enumeration that defines bit
1282 : : * positions into trace_flags that controls the output.
1283 : : *
1284 : : * NOTE: These bits must match the trace_options array in
1285 : : * trace.c (this macro guarantees it).
1286 : : */
1287 : : #define TRACE_FLAGS \
1288 : : C(PRINT_PARENT, "print-parent"), \
1289 : : C(SYM_OFFSET, "sym-offset"), \
1290 : : C(SYM_ADDR, "sym-addr"), \
1291 : : C(VERBOSE, "verbose"), \
1292 : : C(RAW, "raw"), \
1293 : : C(HEX, "hex"), \
1294 : : C(BIN, "bin"), \
1295 : : C(BLOCK, "block"), \
1296 : : C(PRINTK, "trace_printk"), \
1297 : : C(ANNOTATE, "annotate"), \
1298 : : C(USERSTACKTRACE, "userstacktrace"), \
1299 : : C(SYM_USEROBJ, "sym-userobj"), \
1300 : : C(PRINTK_MSGONLY, "printk-msg-only"), \
1301 : : C(CONTEXT_INFO, "context-info"), /* Print pid/cpu/time */ \
1302 : : C(LATENCY_FMT, "latency-format"), \
1303 : : C(RECORD_CMD, "record-cmd"), \
1304 : : C(RECORD_TGID, "record-tgid"), \
1305 : : C(OVERWRITE, "overwrite"), \
1306 : : C(STOP_ON_FREE, "disable_on_free"), \
1307 : : C(IRQ_INFO, "irq-info"), \
1308 : : C(MARKERS, "markers"), \
1309 : : C(EVENT_FORK, "event-fork"), \
1310 : : FUNCTION_FLAGS \
1311 : : FGRAPH_FLAGS \
1312 : : STACK_FLAGS \
1313 : : BRANCH_FLAGS
1314 : :
1315 : : /*
1316 : : * By defining C, we can make TRACE_FLAGS a list of bit names
1317 : : * that will define the bits for the flag masks.
1318 : : */
1319 : : #undef C
1320 : : #define C(a, b) TRACE_ITER_##a##_BIT
1321 : :
1322 : : enum trace_iterator_bits {
1323 : : TRACE_FLAGS
1324 : : /* Make sure we don't go more than we have bits for */
1325 : : TRACE_ITER_LAST_BIT
1326 : : };
1327 : :
1328 : : /*
1329 : : * By redefining C, we can make TRACE_FLAGS a list of masks that
1330 : : * use the bits as defined above.
1331 : : */
1332 : : #undef C
1333 : : #define C(a, b) TRACE_ITER_##a = (1 << TRACE_ITER_##a##_BIT)
1334 : :
1335 : : enum trace_iterator_flags { TRACE_FLAGS };
1336 : :
1337 : : /*
1338 : : * TRACE_ITER_SYM_MASK masks the options in trace_flags that
1339 : : * control the output of kernel symbols.
1340 : : */
1341 : : #define TRACE_ITER_SYM_MASK \
1342 : : (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
1343 : :
1344 : : extern struct tracer nop_trace;
1345 : :
1346 : : #ifdef CONFIG_BRANCH_TRACER
1347 : : extern int enable_branch_tracing(struct trace_array *tr);
1348 : : extern void disable_branch_tracing(void);
1349 : : static inline int trace_branch_enable(struct trace_array *tr)
1350 : : {
1351 : : if (tr->trace_flags & TRACE_ITER_BRANCH)
1352 : : return enable_branch_tracing(tr);
1353 : : return 0;
1354 : : }
1355 : : static inline void trace_branch_disable(void)
1356 : : {
1357 : : /* due to races, always disable */
1358 : : disable_branch_tracing();
1359 : : }
1360 : : #else
1361 : 0 : static inline int trace_branch_enable(struct trace_array *tr)
1362 : : {
1363 : 0 : return 0;
1364 : : }
1365 : 0 : static inline void trace_branch_disable(void)
1366 : : {
1367 [ # # ]: 0 : }
1368 : : #endif /* CONFIG_BRANCH_TRACER */
1369 : :
1370 : : /* set ring buffers to default size if not already done so */
1371 : : int tracing_update_buffers(void);
1372 : :
1373 : : struct ftrace_event_field {
1374 : : struct list_head link;
1375 : : const char *name;
1376 : : const char *type;
1377 : : int filter_type;
1378 : : int offset;
1379 : : int size;
1380 : : int is_signed;
1381 : : };
1382 : :
1383 : : struct prog_entry;
1384 : :
1385 : : struct event_filter {
1386 : : struct prog_entry __rcu *prog;
1387 : : char *filter_string;
1388 : : };
1389 : :
1390 : : struct event_subsystem {
1391 : : struct list_head list;
1392 : : const char *name;
1393 : : struct event_filter *filter;
1394 : : int ref_count;
1395 : : };
1396 : :
1397 : : struct trace_subsystem_dir {
1398 : : struct list_head list;
1399 : : struct event_subsystem *subsystem;
1400 : : struct trace_array *tr;
1401 : : struct dentry *entry;
1402 : : int ref_count;
1403 : : int nr_events;
1404 : : };
1405 : :
1406 : : extern int call_filter_check_discard(struct trace_event_call *call, void *rec,
1407 : : struct trace_buffer *buffer,
1408 : : struct ring_buffer_event *event);
1409 : :
1410 : : void trace_buffer_unlock_commit_regs(struct trace_array *tr,
1411 : : struct trace_buffer *buffer,
1412 : : struct ring_buffer_event *event,
1413 : : unsigned long flags, int pc,
1414 : : struct pt_regs *regs);
1415 : :
1416 : 0 : static inline void trace_buffer_unlock_commit(struct trace_array *tr,
1417 : : struct trace_buffer *buffer,
1418 : : struct ring_buffer_event *event,
1419 : : unsigned long flags, int pc)
1420 : : {
1421 : 0 : trace_buffer_unlock_commit_regs(tr, buffer, event, flags, pc, NULL);
1422 : 0 : }
1423 : :
1424 : : DECLARE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
1425 : : DECLARE_PER_CPU(int, trace_buffered_event_cnt);
1426 : : void trace_buffered_event_disable(void);
1427 : : void trace_buffered_event_enable(void);
1428 : :
1429 : : static inline void
1430 : 0 : __trace_event_discard_commit(struct trace_buffer *buffer,
1431 : : struct ring_buffer_event *event)
1432 : : {
1433 [ # # # # ]: 0 : if (this_cpu_read(trace_buffered_event) == event) {
1434 : : /* Simply release the temp buffer */
1435 : 0 : this_cpu_dec(trace_buffered_event_cnt);
1436 : 0 : return;
1437 : : }
1438 : 0 : ring_buffer_discard_commit(buffer, event);
1439 : : }
1440 : :
1441 : : /*
1442 : : * Helper function for event_trigger_unlock_commit{_regs}().
1443 : : * If there are event triggers attached to this event that requires
1444 : : * filtering against its fields, then they wil be called as the
1445 : : * entry already holds the field information of the current event.
1446 : : *
1447 : : * It also checks if the event should be discarded or not.
1448 : : * It is to be discarded if the event is soft disabled and the
1449 : : * event was only recorded to process triggers, or if the event
1450 : : * filter is active and this event did not match the filters.
1451 : : *
1452 : : * Returns true if the event is discarded, false otherwise.
1453 : : */
1454 : : static inline bool
1455 : 0 : __event_trigger_test_discard(struct trace_event_file *file,
1456 : : struct trace_buffer *buffer,
1457 : : struct ring_buffer_event *event,
1458 : : void *entry,
1459 : : enum event_trigger_type *tt)
1460 : : {
1461 : 0 : unsigned long eflags = file->flags;
1462 : :
1463 [ # # ]: 0 : if (eflags & EVENT_FILE_FL_TRIGGER_COND)
1464 : 0 : *tt = event_triggers_call(file, entry, event);
1465 : :
1466 [ # # ]: 0 : if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
1467 [ # # # # ]: 0 : (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
1468 : 0 : !filter_match_preds(file->filter, entry))) {
1469 : 0 : __trace_event_discard_commit(buffer, event);
1470 : 0 : return true;
1471 : : }
1472 : :
1473 : : return false;
1474 : : }
1475 : :
1476 : : /**
1477 : : * event_trigger_unlock_commit - handle triggers and finish event commit
1478 : : * @file: The file pointer assoctiated to the event
1479 : : * @buffer: The ring buffer that the event is being written to
1480 : : * @event: The event meta data in the ring buffer
1481 : : * @entry: The event itself
1482 : : * @irq_flags: The state of the interrupts at the start of the event
1483 : : * @pc: The state of the preempt count at the start of the event.
1484 : : *
1485 : : * This is a helper function to handle triggers that require data
1486 : : * from the event itself. It also tests the event against filters and
1487 : : * if the event is soft disabled and should be discarded.
1488 : : */
1489 : : static inline void
1490 : 0 : event_trigger_unlock_commit(struct trace_event_file *file,
1491 : : struct trace_buffer *buffer,
1492 : : struct ring_buffer_event *event,
1493 : : void *entry, unsigned long irq_flags, int pc)
1494 : : {
1495 : 0 : enum event_trigger_type tt = ETT_NONE;
1496 : :
1497 [ # # ]: 0 : if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
1498 : 0 : trace_buffer_unlock_commit(file->tr, buffer, event, irq_flags, pc);
1499 : :
1500 [ # # ]: 0 : if (tt)
1501 : 0 : event_triggers_post_call(file, tt);
1502 : 0 : }
1503 : :
1504 : : /**
1505 : : * event_trigger_unlock_commit_regs - handle triggers and finish event commit
1506 : : * @file: The file pointer assoctiated to the event
1507 : : * @buffer: The ring buffer that the event is being written to
1508 : : * @event: The event meta data in the ring buffer
1509 : : * @entry: The event itself
1510 : : * @irq_flags: The state of the interrupts at the start of the event
1511 : : * @pc: The state of the preempt count at the start of the event.
1512 : : *
1513 : : * This is a helper function to handle triggers that require data
1514 : : * from the event itself. It also tests the event against filters and
1515 : : * if the event is soft disabled and should be discarded.
1516 : : *
1517 : : * Same as event_trigger_unlock_commit() but calls
1518 : : * trace_buffer_unlock_commit_regs() instead of trace_buffer_unlock_commit().
1519 : : */
1520 : : static inline void
1521 : 0 : event_trigger_unlock_commit_regs(struct trace_event_file *file,
1522 : : struct trace_buffer *buffer,
1523 : : struct ring_buffer_event *event,
1524 : : void *entry, unsigned long irq_flags, int pc,
1525 : : struct pt_regs *regs)
1526 : : {
1527 : 0 : enum event_trigger_type tt = ETT_NONE;
1528 : :
1529 [ # # ]: 0 : if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
1530 : 0 : trace_buffer_unlock_commit_regs(file->tr, buffer, event,
1531 : : irq_flags, pc, regs);
1532 : :
1533 [ # # ]: 0 : if (tt)
1534 : 0 : event_triggers_post_call(file, tt);
1535 : 0 : }
1536 : :
1537 : : #define FILTER_PRED_INVALID ((unsigned short)-1)
1538 : : #define FILTER_PRED_IS_RIGHT (1 << 15)
1539 : : #define FILTER_PRED_FOLD (1 << 15)
1540 : :
1541 : : /*
1542 : : * The max preds is the size of unsigned short with
1543 : : * two flags at the MSBs. One bit is used for both the IS_RIGHT
1544 : : * and FOLD flags. The other is reserved.
1545 : : *
1546 : : * 2^14 preds is way more than enough.
1547 : : */
1548 : : #define MAX_FILTER_PRED 16384
1549 : :
1550 : : struct filter_pred;
1551 : : struct regex;
1552 : :
1553 : : typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event);
1554 : :
1555 : : typedef int (*regex_match_func)(char *str, struct regex *r, int len);
1556 : :
1557 : : enum regex_type {
1558 : : MATCH_FULL = 0,
1559 : : MATCH_FRONT_ONLY,
1560 : : MATCH_MIDDLE_ONLY,
1561 : : MATCH_END_ONLY,
1562 : : MATCH_GLOB,
1563 : : MATCH_INDEX,
1564 : : };
1565 : :
1566 : : struct regex {
1567 : : char pattern[MAX_FILTER_STR_VAL];
1568 : : int len;
1569 : : int field_len;
1570 : : regex_match_func match;
1571 : : };
1572 : :
1573 : : struct filter_pred {
1574 : : filter_pred_fn_t fn;
1575 : : u64 val;
1576 : : struct regex regex;
1577 : : unsigned short *ops;
1578 : : struct ftrace_event_field *field;
1579 : : int offset;
1580 : : int not;
1581 : : int op;
1582 : : };
1583 : :
1584 : 0 : static inline bool is_string_field(struct ftrace_event_field *field)
1585 : : {
1586 : 0 : return field->filter_type == FILTER_DYN_STRING ||
1587 : : field->filter_type == FILTER_STATIC_STRING ||
1588 [ # # # # ]: 0 : field->filter_type == FILTER_PTR_STRING ||
1589 : : field->filter_type == FILTER_COMM;
1590 : : }
1591 : :
1592 : : static inline bool is_function_field(struct ftrace_event_field *field)
1593 : : {
1594 : : return field->filter_type == FILTER_TRACE_FN;
1595 : : }
1596 : :
1597 : : extern enum regex_type
1598 : : filter_parse_regex(char *buff, int len, char **search, int *not);
1599 : : extern void print_event_filter(struct trace_event_file *file,
1600 : : struct trace_seq *s);
1601 : : extern int apply_event_filter(struct trace_event_file *file,
1602 : : char *filter_string);
1603 : : extern int apply_subsystem_event_filter(struct trace_subsystem_dir *dir,
1604 : : char *filter_string);
1605 : : extern void print_subsystem_event_filter(struct event_subsystem *system,
1606 : : struct trace_seq *s);
1607 : : extern int filter_assign_type(const char *type);
1608 : : extern int create_event_filter(struct trace_array *tr,
1609 : : struct trace_event_call *call,
1610 : : char *filter_str, bool set_str,
1611 : : struct event_filter **filterp);
1612 : : extern void free_event_filter(struct event_filter *filter);
1613 : :
1614 : : struct ftrace_event_field *
1615 : : trace_find_event_field(struct trace_event_call *call, char *name);
1616 : :
1617 : : extern void trace_event_enable_cmd_record(bool enable);
1618 : : extern void trace_event_enable_tgid_record(bool enable);
1619 : :
1620 : : extern int event_trace_init(void);
1621 : : extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr);
1622 : : extern int event_trace_del_tracer(struct trace_array *tr);
1623 : :
1624 : : extern struct trace_event_file *__find_event_file(struct trace_array *tr,
1625 : : const char *system,
1626 : : const char *event);
1627 : : extern struct trace_event_file *find_event_file(struct trace_array *tr,
1628 : : const char *system,
1629 : : const char *event);
1630 : :
1631 : 0 : static inline void *event_file_data(struct file *filp)
1632 : : {
1633 [ # # # # : 0 : return READ_ONCE(file_inode(filp)->i_private);
# # # # #
# # # # #
# # # # ]
1634 : : }
1635 : :
1636 : : extern struct mutex event_mutex;
1637 : : extern struct list_head ftrace_events;
1638 : :
1639 : : extern const struct file_operations event_trigger_fops;
1640 : : extern const struct file_operations event_hist_fops;
1641 : : extern const struct file_operations event_inject_fops;
1642 : :
1643 : : #ifdef CONFIG_HIST_TRIGGERS
1644 : : extern int register_trigger_hist_cmd(void);
1645 : : extern int register_trigger_hist_enable_disable_cmds(void);
1646 : : #else
1647 : 13 : static inline int register_trigger_hist_cmd(void) { return 0; }
1648 : 13 : static inline int register_trigger_hist_enable_disable_cmds(void) { return 0; }
1649 : : #endif
1650 : :
1651 : : extern int register_trigger_cmds(void);
1652 : : extern void clear_event_triggers(struct trace_array *tr);
1653 : :
1654 : : struct event_trigger_data {
1655 : : unsigned long count;
1656 : : int ref;
1657 : : struct event_trigger_ops *ops;
1658 : : struct event_command *cmd_ops;
1659 : : struct event_filter __rcu *filter;
1660 : : char *filter_str;
1661 : : void *private_data;
1662 : : bool paused;
1663 : : bool paused_tmp;
1664 : : struct list_head list;
1665 : : char *name;
1666 : : struct list_head named_list;
1667 : : struct event_trigger_data *named_data;
1668 : : };
1669 : :
1670 : : /* Avoid typos */
1671 : : #define ENABLE_EVENT_STR "enable_event"
1672 : : #define DISABLE_EVENT_STR "disable_event"
1673 : : #define ENABLE_HIST_STR "enable_hist"
1674 : : #define DISABLE_HIST_STR "disable_hist"
1675 : :
1676 : : struct enable_trigger_data {
1677 : : struct trace_event_file *file;
1678 : : bool enable;
1679 : : bool hist;
1680 : : };
1681 : :
1682 : : extern int event_enable_trigger_print(struct seq_file *m,
1683 : : struct event_trigger_ops *ops,
1684 : : struct event_trigger_data *data);
1685 : : extern void event_enable_trigger_free(struct event_trigger_ops *ops,
1686 : : struct event_trigger_data *data);
1687 : : extern int event_enable_trigger_func(struct event_command *cmd_ops,
1688 : : struct trace_event_file *file,
1689 : : char *glob, char *cmd, char *param);
1690 : : extern int event_enable_register_trigger(char *glob,
1691 : : struct event_trigger_ops *ops,
1692 : : struct event_trigger_data *data,
1693 : : struct trace_event_file *file);
1694 : : extern void event_enable_unregister_trigger(char *glob,
1695 : : struct event_trigger_ops *ops,
1696 : : struct event_trigger_data *test,
1697 : : struct trace_event_file *file);
1698 : : extern void trigger_data_free(struct event_trigger_data *data);
1699 : : extern int event_trigger_init(struct event_trigger_ops *ops,
1700 : : struct event_trigger_data *data);
1701 : : extern int trace_event_trigger_enable_disable(struct trace_event_file *file,
1702 : : int trigger_enable);
1703 : : extern void update_cond_flag(struct trace_event_file *file);
1704 : : extern int set_trigger_filter(char *filter_str,
1705 : : struct event_trigger_data *trigger_data,
1706 : : struct trace_event_file *file);
1707 : : extern struct event_trigger_data *find_named_trigger(const char *name);
1708 : : extern bool is_named_trigger(struct event_trigger_data *test);
1709 : : extern int save_named_trigger(const char *name,
1710 : : struct event_trigger_data *data);
1711 : : extern void del_named_trigger(struct event_trigger_data *data);
1712 : : extern void pause_named_trigger(struct event_trigger_data *data);
1713 : : extern void unpause_named_trigger(struct event_trigger_data *data);
1714 : : extern void set_named_trigger_data(struct event_trigger_data *data,
1715 : : struct event_trigger_data *named_data);
1716 : : extern struct event_trigger_data *
1717 : : get_named_trigger_data(struct event_trigger_data *data);
1718 : : extern int register_event_command(struct event_command *cmd);
1719 : : extern int unregister_event_command(struct event_command *cmd);
1720 : : extern int register_trigger_hist_enable_disable_cmds(void);
1721 : :
1722 : : /**
1723 : : * struct event_trigger_ops - callbacks for trace event triggers
1724 : : *
1725 : : * The methods in this structure provide per-event trigger hooks for
1726 : : * various trigger operations.
1727 : : *
1728 : : * All the methods below, except for @init() and @free(), must be
1729 : : * implemented.
1730 : : *
1731 : : * @func: The trigger 'probe' function called when the triggering
1732 : : * event occurs. The data passed into this callback is the data
1733 : : * that was supplied to the event_command @reg() function that
1734 : : * registered the trigger (see struct event_command) along with
1735 : : * the trace record, rec.
1736 : : *
1737 : : * @init: An optional initialization function called for the trigger
1738 : : * when the trigger is registered (via the event_command reg()
1739 : : * function). This can be used to perform per-trigger
1740 : : * initialization such as incrementing a per-trigger reference
1741 : : * count, for instance. This is usually implemented by the
1742 : : * generic utility function @event_trigger_init() (see
1743 : : * trace_event_triggers.c).
1744 : : *
1745 : : * @free: An optional de-initialization function called for the
1746 : : * trigger when the trigger is unregistered (via the
1747 : : * event_command @reg() function). This can be used to perform
1748 : : * per-trigger de-initialization such as decrementing a
1749 : : * per-trigger reference count and freeing corresponding trigger
1750 : : * data, for instance. This is usually implemented by the
1751 : : * generic utility function @event_trigger_free() (see
1752 : : * trace_event_triggers.c).
1753 : : *
1754 : : * @print: The callback function invoked to have the trigger print
1755 : : * itself. This is usually implemented by a wrapper function
1756 : : * that calls the generic utility function @event_trigger_print()
1757 : : * (see trace_event_triggers.c).
1758 : : */
1759 : : struct event_trigger_ops {
1760 : : void (*func)(struct event_trigger_data *data,
1761 : : void *rec,
1762 : : struct ring_buffer_event *rbe);
1763 : : int (*init)(struct event_trigger_ops *ops,
1764 : : struct event_trigger_data *data);
1765 : : void (*free)(struct event_trigger_ops *ops,
1766 : : struct event_trigger_data *data);
1767 : : int (*print)(struct seq_file *m,
1768 : : struct event_trigger_ops *ops,
1769 : : struct event_trigger_data *data);
1770 : : };
1771 : :
1772 : : /**
1773 : : * struct event_command - callbacks and data members for event commands
1774 : : *
1775 : : * Event commands are invoked by users by writing the command name
1776 : : * into the 'trigger' file associated with a trace event. The
1777 : : * parameters associated with a specific invocation of an event
1778 : : * command are used to create an event trigger instance, which is
1779 : : * added to the list of trigger instances associated with that trace
1780 : : * event. When the event is hit, the set of triggers associated with
1781 : : * that event is invoked.
1782 : : *
1783 : : * The data members in this structure provide per-event command data
1784 : : * for various event commands.
1785 : : *
1786 : : * All the data members below, except for @post_trigger, must be set
1787 : : * for each event command.
1788 : : *
1789 : : * @name: The unique name that identifies the event command. This is
1790 : : * the name used when setting triggers via trigger files.
1791 : : *
1792 : : * @trigger_type: A unique id that identifies the event command
1793 : : * 'type'. This value has two purposes, the first to ensure that
1794 : : * only one trigger of the same type can be set at a given time
1795 : : * for a particular event e.g. it doesn't make sense to have both
1796 : : * a traceon and traceoff trigger attached to a single event at
1797 : : * the same time, so traceon and traceoff have the same type
1798 : : * though they have different names. The @trigger_type value is
1799 : : * also used as a bit value for deferring the actual trigger
1800 : : * action until after the current event is finished. Some
1801 : : * commands need to do this if they themselves log to the trace
1802 : : * buffer (see the @post_trigger() member below). @trigger_type
1803 : : * values are defined by adding new values to the trigger_type
1804 : : * enum in include/linux/trace_events.h.
1805 : : *
1806 : : * @flags: See the enum event_command_flags below.
1807 : : *
1808 : : * All the methods below, except for @set_filter() and @unreg_all(),
1809 : : * must be implemented.
1810 : : *
1811 : : * @func: The callback function responsible for parsing and
1812 : : * registering the trigger written to the 'trigger' file by the
1813 : : * user. It allocates the trigger instance and registers it with
1814 : : * the appropriate trace event. It makes use of the other
1815 : : * event_command callback functions to orchestrate this, and is
1816 : : * usually implemented by the generic utility function
1817 : : * @event_trigger_callback() (see trace_event_triggers.c).
1818 : : *
1819 : : * @reg: Adds the trigger to the list of triggers associated with the
1820 : : * event, and enables the event trigger itself, after
1821 : : * initializing it (via the event_trigger_ops @init() function).
1822 : : * This is also where commands can use the @trigger_type value to
1823 : : * make the decision as to whether or not multiple instances of
1824 : : * the trigger should be allowed. This is usually implemented by
1825 : : * the generic utility function @register_trigger() (see
1826 : : * trace_event_triggers.c).
1827 : : *
1828 : : * @unreg: Removes the trigger from the list of triggers associated
1829 : : * with the event, and disables the event trigger itself, after
1830 : : * initializing it (via the event_trigger_ops @free() function).
1831 : : * This is usually implemented by the generic utility function
1832 : : * @unregister_trigger() (see trace_event_triggers.c).
1833 : : *
1834 : : * @unreg_all: An optional function called to remove all the triggers
1835 : : * from the list of triggers associated with the event. Called
1836 : : * when a trigger file is opened in truncate mode.
1837 : : *
1838 : : * @set_filter: An optional function called to parse and set a filter
1839 : : * for the trigger. If no @set_filter() method is set for the
1840 : : * event command, filters set by the user for the command will be
1841 : : * ignored. This is usually implemented by the generic utility
1842 : : * function @set_trigger_filter() (see trace_event_triggers.c).
1843 : : *
1844 : : * @get_trigger_ops: The callback function invoked to retrieve the
1845 : : * event_trigger_ops implementation associated with the command.
1846 : : */
1847 : : struct event_command {
1848 : : struct list_head list;
1849 : : char *name;
1850 : : enum event_trigger_type trigger_type;
1851 : : int flags;
1852 : : int (*func)(struct event_command *cmd_ops,
1853 : : struct trace_event_file *file,
1854 : : char *glob, char *cmd, char *params);
1855 : : int (*reg)(char *glob,
1856 : : struct event_trigger_ops *ops,
1857 : : struct event_trigger_data *data,
1858 : : struct trace_event_file *file);
1859 : : void (*unreg)(char *glob,
1860 : : struct event_trigger_ops *ops,
1861 : : struct event_trigger_data *data,
1862 : : struct trace_event_file *file);
1863 : : void (*unreg_all)(struct trace_event_file *file);
1864 : : int (*set_filter)(char *filter_str,
1865 : : struct event_trigger_data *data,
1866 : : struct trace_event_file *file);
1867 : : struct event_trigger_ops *(*get_trigger_ops)(char *cmd, char *param);
1868 : : };
1869 : :
1870 : : /**
1871 : : * enum event_command_flags - flags for struct event_command
1872 : : *
1873 : : * @POST_TRIGGER: A flag that says whether or not this command needs
1874 : : * to have its action delayed until after the current event has
1875 : : * been closed. Some triggers need to avoid being invoked while
1876 : : * an event is currently in the process of being logged, since
1877 : : * the trigger may itself log data into the trace buffer. Thus
1878 : : * we make sure the current event is committed before invoking
1879 : : * those triggers. To do that, the trigger invocation is split
1880 : : * in two - the first part checks the filter using the current
1881 : : * trace record; if a command has the @post_trigger flag set, it
1882 : : * sets a bit for itself in the return value, otherwise it
1883 : : * directly invokes the trigger. Once all commands have been
1884 : : * either invoked or set their return flag, the current record is
1885 : : * either committed or discarded. At that point, if any commands
1886 : : * have deferred their triggers, those commands are finally
1887 : : * invoked following the close of the current event. In other
1888 : : * words, if the event_trigger_ops @func() probe implementation
1889 : : * itself logs to the trace buffer, this flag should be set,
1890 : : * otherwise it can be left unspecified.
1891 : : *
1892 : : * @NEEDS_REC: A flag that says whether or not this command needs
1893 : : * access to the trace record in order to perform its function,
1894 : : * regardless of whether or not it has a filter associated with
1895 : : * it (filters make a trigger require access to the trace record
1896 : : * but are not always present).
1897 : : */
1898 : : enum event_command_flags {
1899 : : EVENT_CMD_FL_POST_TRIGGER = 1,
1900 : : EVENT_CMD_FL_NEEDS_REC = 2,
1901 : : };
1902 : :
1903 : 0 : static inline bool event_command_post_trigger(struct event_command *cmd_ops)
1904 : : {
1905 [ # # # # ]: 0 : return cmd_ops->flags & EVENT_CMD_FL_POST_TRIGGER;
1906 : : }
1907 : :
1908 : 0 : static inline bool event_command_needs_rec(struct event_command *cmd_ops)
1909 : : {
1910 [ # # ]: 0 : return cmd_ops->flags & EVENT_CMD_FL_NEEDS_REC;
1911 : : }
1912 : :
1913 : : extern int trace_event_enable_disable(struct trace_event_file *file,
1914 : : int enable, int soft_disable);
1915 : : extern int tracing_alloc_snapshot(void);
1916 : : extern void tracing_snapshot_cond(struct trace_array *tr, void *cond_data);
1917 : : extern int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update);
1918 : :
1919 : : extern int tracing_snapshot_cond_disable(struct trace_array *tr);
1920 : : extern void *tracing_cond_snapshot_data(struct trace_array *tr);
1921 : :
1922 : : extern const char *__start___trace_bprintk_fmt[];
1923 : : extern const char *__stop___trace_bprintk_fmt[];
1924 : :
1925 : : extern const char *__start___tracepoint_str[];
1926 : : extern const char *__stop___tracepoint_str[];
1927 : :
1928 : : void trace_printk_control(bool enabled);
1929 : : void trace_printk_start_comm(void);
1930 : : int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
1931 : : int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
1932 : :
1933 : : /* Used from boot time tracer */
1934 : : extern int trace_set_options(struct trace_array *tr, char *option);
1935 : : extern int tracing_set_tracer(struct trace_array *tr, const char *buf);
1936 : : extern ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
1937 : : unsigned long size, int cpu_id);
1938 : : extern int tracing_set_cpumask(struct trace_array *tr,
1939 : : cpumask_var_t tracing_cpumask_new);
1940 : :
1941 : :
1942 : : #define MAX_EVENT_NAME_LEN 64
1943 : :
1944 : : extern int trace_run_command(const char *buf, int (*createfn)(int, char**));
1945 : : extern ssize_t trace_parse_run_command(struct file *file,
1946 : : const char __user *buffer, size_t count, loff_t *ppos,
1947 : : int (*createfn)(int, char**));
1948 : :
1949 : : extern unsigned int err_pos(char *cmd, const char *str);
1950 : : extern void tracing_log_err(struct trace_array *tr,
1951 : : const char *loc, const char *cmd,
1952 : : const char **errs, u8 type, u8 pos);
1953 : :
1954 : : /*
1955 : : * Normal trace_printk() and friends allocates special buffers
1956 : : * to do the manipulation, as well as saves the print formats
1957 : : * into sections to display. But the trace infrastructure wants
1958 : : * to use these without the added overhead at the price of being
1959 : : * a bit slower (used mainly for warnings, where we don't care
1960 : : * about performance). The internal_trace_puts() is for such
1961 : : * a purpose.
1962 : : */
1963 : : #define internal_trace_puts(str) __trace_puts(_THIS_IP_, str, strlen(str))
1964 : :
1965 : : #undef FTRACE_ENTRY
1966 : : #define FTRACE_ENTRY(call, struct_name, id, tstruct, print) \
1967 : : extern struct trace_event_call \
1968 : : __aligned(4) event_##call;
1969 : : #undef FTRACE_ENTRY_DUP
1970 : : #define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print) \
1971 : : FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print))
1972 : : #undef FTRACE_ENTRY_PACKED
1973 : : #define FTRACE_ENTRY_PACKED(call, struct_name, id, tstruct, print) \
1974 : : FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print))
1975 : :
1976 : : #include "trace_entries.h"
1977 : :
1978 : : #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_FUNCTION_TRACER)
1979 : : int perf_ftrace_event_register(struct trace_event_call *call,
1980 : : enum trace_reg type, void *data);
1981 : : #else
1982 : : #define perf_ftrace_event_register NULL
1983 : : #endif
1984 : :
1985 : : #ifdef CONFIG_FTRACE_SYSCALLS
1986 : : void init_ftrace_syscalls(void);
1987 : : const char *get_syscall_name(int syscall);
1988 : : #else
1989 : 13 : static inline void init_ftrace_syscalls(void) { }
1990 : : static inline const char *get_syscall_name(int syscall)
1991 : : {
1992 : : return NULL;
1993 : : }
1994 : : #endif
1995 : :
1996 : : #ifdef CONFIG_EVENT_TRACING
1997 : : void trace_event_init(void);
1998 : : void trace_event_eval_update(struct trace_eval_map **map, int len);
1999 : : /* Used from boot time tracer */
2000 : : extern int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set);
2001 : : extern int trigger_process_regex(struct trace_event_file *file, char *buff);
2002 : : #else
2003 : : static inline void __init trace_event_init(void) { }
2004 : : static inline void trace_event_eval_update(struct trace_eval_map **map, int len) { }
2005 : : #endif
2006 : :
2007 : : #ifdef CONFIG_TRACER_SNAPSHOT
2008 : : void tracing_snapshot_instance(struct trace_array *tr);
2009 : : int tracing_alloc_snapshot_instance(struct trace_array *tr);
2010 : : #else
2011 : : static inline void tracing_snapshot_instance(struct trace_array *tr) { }
2012 : : static inline int tracing_alloc_snapshot_instance(struct trace_array *tr)
2013 : : {
2014 : : return 0;
2015 : : }
2016 : : #endif
2017 : :
2018 : : #ifdef CONFIG_PREEMPT_TRACER
2019 : : void tracer_preempt_on(unsigned long a0, unsigned long a1);
2020 : : void tracer_preempt_off(unsigned long a0, unsigned long a1);
2021 : : #else
2022 : : static inline void tracer_preempt_on(unsigned long a0, unsigned long a1) { }
2023 : : static inline void tracer_preempt_off(unsigned long a0, unsigned long a1) { }
2024 : : #endif
2025 : : #ifdef CONFIG_IRQSOFF_TRACER
2026 : : void tracer_hardirqs_on(unsigned long a0, unsigned long a1);
2027 : : void tracer_hardirqs_off(unsigned long a0, unsigned long a1);
2028 : : #else
2029 : : static inline void tracer_hardirqs_on(unsigned long a0, unsigned long a1) { }
2030 : : static inline void tracer_hardirqs_off(unsigned long a0, unsigned long a1) { }
2031 : : #endif
2032 : :
2033 : : extern struct trace_iterator *tracepoint_print_iter;
2034 : :
2035 : : /*
2036 : : * Reset the state of the trace_iterator so that it can read consumed data.
2037 : : * Normally, the trace_iterator is used for reading the data when it is not
2038 : : * consumed, and must retain state.
2039 : : */
2040 : 0 : static __always_inline void trace_iterator_reset(struct trace_iterator *iter)
2041 : : {
2042 : 0 : const size_t offset = offsetof(struct trace_iterator, seq);
2043 : :
2044 : : /*
2045 : : * Keep gcc from complaining about overwriting more than just one
2046 : : * member in the structure.
2047 : : */
2048 : 0 : memset((char *)iter + offset, 0, sizeof(struct trace_iterator) - offset);
2049 : :
2050 : 0 : iter->pos = -1;
2051 : : }
2052 : :
2053 : : #endif /* _LINUX_KERNEL_TRACE_H */
|