Branch data Line data Source code
1 : : // SPDX-License-Identifier: GPL-2.0
2 : :
3 : : #ifndef _LINUX_KERNEL_TRACE_H
4 : : #define _LINUX_KERNEL_TRACE_H
5 : :
6 : : #include <linux/fs.h>
7 : : #include <linux/atomic.h>
8 : : #include <linux/sched.h>
9 : : #include <linux/clocksource.h>
10 : : #include <linux/ring_buffer.h>
11 : : #include <linux/mmiotrace.h>
12 : : #include <linux/tracepoint.h>
13 : : #include <linux/ftrace.h>
14 : : #include <linux/hw_breakpoint.h>
15 : : #include <linux/trace_seq.h>
16 : : #include <linux/trace_events.h>
17 : : #include <linux/compiler.h>
18 : : #include <linux/glob.h>
19 : :
20 : : #ifdef CONFIG_FTRACE_SYSCALLS
21 : : #include <asm/unistd.h> /* For NR_SYSCALLS */
22 : : #include <asm/syscall.h> /* some archs define it here */
23 : : #endif
24 : :
25 : : enum trace_type {
26 : : __TRACE_FIRST_TYPE = 0,
27 : :
28 : : TRACE_FN,
29 : : TRACE_CTX,
30 : : TRACE_WAKE,
31 : : TRACE_STACK,
32 : : TRACE_PRINT,
33 : : TRACE_BPRINT,
34 : : TRACE_MMIO_RW,
35 : : TRACE_MMIO_MAP,
36 : : TRACE_BRANCH,
37 : : TRACE_GRAPH_RET,
38 : : TRACE_GRAPH_ENT,
39 : : TRACE_USER_STACK,
40 : : TRACE_BLK,
41 : : TRACE_BPUTS,
42 : : TRACE_HWLAT,
43 : : TRACE_RAW_DATA,
44 : :
45 : : __TRACE_LAST_TYPE,
46 : : };
47 : :
48 : :
49 : : #undef __field
50 : : #define __field(type, item) type item;
51 : :
52 : : #undef __field_struct
53 : : #define __field_struct(type, item) __field(type, item)
54 : :
55 : : #undef __field_desc
56 : : #define __field_desc(type, container, item)
57 : :
58 : : #undef __array
59 : : #define __array(type, item, size) type item[size];
60 : :
61 : : #undef __array_desc
62 : : #define __array_desc(type, container, item, size)
63 : :
64 : : #undef __dynamic_array
65 : : #define __dynamic_array(type, item) type item[];
66 : :
67 : : #undef F_STRUCT
68 : : #define F_STRUCT(args...) args
69 : :
70 : : #undef FTRACE_ENTRY
71 : : #define FTRACE_ENTRY(name, struct_name, id, tstruct, print, filter) \
72 : : struct struct_name { \
73 : : struct trace_entry ent; \
74 : : tstruct \
75 : : }
76 : :
77 : : #undef FTRACE_ENTRY_DUP
78 : : #define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk, filter)
79 : :
80 : : #undef FTRACE_ENTRY_REG
81 : : #define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print, \
82 : : filter, regfn) \
83 : : FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \
84 : : filter)
85 : :
86 : : #undef FTRACE_ENTRY_PACKED
87 : : #define FTRACE_ENTRY_PACKED(name, struct_name, id, tstruct, print, \
88 : : filter) \
89 : : FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \
90 : : filter) __packed
91 : :
92 : : #include "trace_entries.h"
93 : :
94 : : /*
95 : : * syscalls are special, and need special handling, this is why
96 : : * they are not included in trace_entries.h
97 : : */
98 : : struct syscall_trace_enter {
99 : : struct trace_entry ent;
100 : : int nr;
101 : : unsigned long args[];
102 : : };
103 : :
104 : : struct syscall_trace_exit {
105 : : struct trace_entry ent;
106 : : int nr;
107 : : long ret;
108 : : };
109 : :
110 : : struct kprobe_trace_entry_head {
111 : : struct trace_entry ent;
112 : : unsigned long ip;
113 : : };
114 : :
115 : : struct kretprobe_trace_entry_head {
116 : : struct trace_entry ent;
117 : : unsigned long func;
118 : : unsigned long ret_ip;
119 : : };
120 : :
121 : : /*
122 : : * trace_flag_type is an enumeration that holds different
123 : : * states when a trace occurs. These are:
124 : : * IRQS_OFF - interrupts were disabled
125 : : * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags
126 : : * NEED_RESCHED - reschedule is requested
127 : : * HARDIRQ - inside an interrupt handler
128 : : * SOFTIRQ - inside a softirq handler
129 : : */
130 : : enum trace_flag_type {
131 : : TRACE_FLAG_IRQS_OFF = 0x01,
132 : : TRACE_FLAG_IRQS_NOSUPPORT = 0x02,
133 : : TRACE_FLAG_NEED_RESCHED = 0x04,
134 : : TRACE_FLAG_HARDIRQ = 0x08,
135 : : TRACE_FLAG_SOFTIRQ = 0x10,
136 : : TRACE_FLAG_PREEMPT_RESCHED = 0x20,
137 : : TRACE_FLAG_NMI = 0x40,
138 : : };
139 : :
140 : : #define TRACE_BUF_SIZE 1024
141 : :
142 : : struct trace_array;
143 : :
144 : : /*
145 : : * The CPU trace array - it consists of thousands of trace entries
146 : : * plus some other descriptor data: (for example which task started
147 : : * the trace, etc.)
148 : : */
149 : : struct trace_array_cpu {
150 : : atomic_t disabled;
151 : : void *buffer_page; /* ring buffer spare */
152 : :
153 : : unsigned long entries;
154 : : unsigned long saved_latency;
155 : : unsigned long critical_start;
156 : : unsigned long critical_end;
157 : : unsigned long critical_sequence;
158 : : unsigned long nice;
159 : : unsigned long policy;
160 : : unsigned long rt_priority;
161 : : unsigned long skipped_entries;
162 : : u64 preempt_timestamp;
163 : : pid_t pid;
164 : : kuid_t uid;
165 : : char comm[TASK_COMM_LEN];
166 : :
167 : : bool ignore_pid;
168 : : #ifdef CONFIG_FUNCTION_TRACER
169 : : bool ftrace_ignore_pid;
170 : : #endif
171 : : };
172 : :
173 : : struct tracer;
174 : : struct trace_option_dentry;
175 : :
176 : : struct trace_buffer {
177 : : struct trace_array *tr;
178 : : struct ring_buffer *buffer;
179 : : struct trace_array_cpu __percpu *data;
180 : : u64 time_start;
181 : : int cpu;
182 : : };
183 : :
184 : : #define TRACE_FLAGS_MAX_SIZE 32
185 : :
186 : : struct trace_options {
187 : : struct tracer *tracer;
188 : : struct trace_option_dentry *topts;
189 : : };
190 : :
191 : : struct trace_pid_list {
192 : : int pid_max;
193 : : unsigned long *pids;
194 : : };
195 : :
196 : : typedef bool (*cond_update_fn_t)(struct trace_array *tr, void *cond_data);
197 : :
198 : : /**
199 : : * struct cond_snapshot - conditional snapshot data and callback
200 : : *
201 : : * The cond_snapshot structure encapsulates a callback function and
202 : : * data associated with the snapshot for a given tracing instance.
203 : : *
204 : : * When a snapshot is taken conditionally, by invoking
205 : : * tracing_snapshot_cond(tr, cond_data), the cond_data passed in is
206 : : * passed in turn to the cond_snapshot.update() function. That data
207 : : * can be compared by the update() implementation with the cond_data
208 : : * contained wihin the struct cond_snapshot instance associated with
209 : : * the trace_array. Because the tr->max_lock is held throughout the
210 : : * update() call, the update() function can directly retrieve the
211 : : * cond_snapshot and cond_data associated with the per-instance
212 : : * snapshot associated with the trace_array.
213 : : *
214 : : * The cond_snapshot.update() implementation can save data to be
215 : : * associated with the snapshot if it decides to, and returns 'true'
216 : : * in that case, or it returns 'false' if the conditional snapshot
217 : : * shouldn't be taken.
218 : : *
219 : : * The cond_snapshot instance is created and associated with the
220 : : * user-defined cond_data by tracing_cond_snapshot_enable().
221 : : * Likewise, the cond_snapshot instance is destroyed and is no longer
222 : : * associated with the trace instance by
223 : : * tracing_cond_snapshot_disable().
224 : : *
225 : : * The method below is required.
226 : : *
227 : : * @update: When a conditional snapshot is invoked, the update()
228 : : * callback function is invoked with the tr->max_lock held. The
229 : : * update() implementation signals whether or not to actually
230 : : * take the snapshot, by returning 'true' if so, 'false' if no
231 : : * snapshot should be taken. Because the max_lock is held for
232 : : * the duration of update(), the implementation is safe to
233 : : * directly retrieven and save any implementation data it needs
234 : : * to in association with the snapshot.
235 : : */
236 : : struct cond_snapshot {
237 : : void *cond_data;
238 : : cond_update_fn_t update;
239 : : };
240 : :
241 : : /*
242 : : * The trace array - an array of per-CPU trace arrays. This is the
243 : : * highest level data structure that individual tracers deal with.
244 : : * They have on/off state as well:
245 : : */
246 : : struct trace_array {
247 : : struct list_head list;
248 : : char *name;
249 : : struct trace_buffer trace_buffer;
250 : : #ifdef CONFIG_TRACER_MAX_TRACE
251 : : /*
252 : : * The max_buffer is used to snapshot the trace when a maximum
253 : : * latency is reached, or when the user initiates a snapshot.
254 : : * Some tracers will use this to store a maximum trace while
255 : : * it continues examining live traces.
256 : : *
257 : : * The buffers for the max_buffer are set up the same as the trace_buffer
258 : : * When a snapshot is taken, the buffer of the max_buffer is swapped
259 : : * with the buffer of the trace_buffer and the buffers are reset for
260 : : * the trace_buffer so the tracing can continue.
261 : : */
262 : : struct trace_buffer max_buffer;
263 : : bool allocated_snapshot;
264 : : #endif
265 : : #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
266 : : unsigned long max_latency;
267 : : #endif
268 : : struct trace_pid_list __rcu *filtered_pids;
269 : : /*
270 : : * max_lock is used to protect the swapping of buffers
271 : : * when taking a max snapshot. The buffers themselves are
272 : : * protected by per_cpu spinlocks. But the action of the swap
273 : : * needs its own lock.
274 : : *
275 : : * This is defined as a arch_spinlock_t in order to help
276 : : * with performance when lockdep debugging is enabled.
277 : : *
278 : : * It is also used in other places outside the update_max_tr
279 : : * so it needs to be defined outside of the
280 : : * CONFIG_TRACER_MAX_TRACE.
281 : : */
282 : : arch_spinlock_t max_lock;
283 : : int buffer_disabled;
284 : : #ifdef CONFIG_FTRACE_SYSCALLS
285 : : int sys_refcount_enter;
286 : : int sys_refcount_exit;
287 : : struct trace_event_file __rcu *enter_syscall_files[NR_syscalls];
288 : : struct trace_event_file __rcu *exit_syscall_files[NR_syscalls];
289 : : #endif
290 : : int stop_count;
291 : : int clock_id;
292 : : int nr_topts;
293 : : bool clear_trace;
294 : : int buffer_percent;
295 : : unsigned int n_err_log_entries;
296 : : struct tracer *current_trace;
297 : : unsigned int trace_flags;
298 : : unsigned char trace_flags_index[TRACE_FLAGS_MAX_SIZE];
299 : : unsigned int flags;
300 : : raw_spinlock_t start_lock;
301 : : struct list_head err_log;
302 : : struct dentry *dir;
303 : : struct dentry *options;
304 : : struct dentry *percpu_dir;
305 : : struct dentry *event_dir;
306 : : struct trace_options *topts;
307 : : struct list_head systems;
308 : : struct list_head events;
309 : : struct trace_event_file *trace_marker_file;
310 : : cpumask_var_t tracing_cpumask; /* only trace on set CPUs */
311 : : int ref;
312 : : int trace_ref;
313 : : #ifdef CONFIG_FUNCTION_TRACER
314 : : struct ftrace_ops *ops;
315 : : struct trace_pid_list __rcu *function_pids;
316 : : #ifdef CONFIG_DYNAMIC_FTRACE
317 : : /* All of these are protected by the ftrace_lock */
318 : : struct list_head func_probes;
319 : : struct list_head mod_trace;
320 : : struct list_head mod_notrace;
321 : : #endif
322 : : /* function tracing enabled */
323 : : int function_enabled;
324 : : #endif
325 : : int time_stamp_abs_ref;
326 : : struct list_head hist_vars;
327 : : #ifdef CONFIG_TRACER_SNAPSHOT
328 : : struct cond_snapshot *cond_snapshot;
329 : : #endif
330 : : };
331 : :
332 : : enum {
333 : : TRACE_ARRAY_FL_GLOBAL = (1 << 0)
334 : : };
335 : :
336 : : extern struct list_head ftrace_trace_arrays;
337 : :
338 : : extern struct mutex trace_types_lock;
339 : :
340 : : extern int trace_array_get(struct trace_array *tr);
341 : : extern void trace_array_put(struct trace_array *tr);
342 : : extern int tracing_check_open_get_tr(struct trace_array *tr);
343 : :
344 : : extern int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs);
345 : : extern int tracing_set_clock(struct trace_array *tr, const char *clockstr);
346 : :
347 : : extern bool trace_clock_in_ns(struct trace_array *tr);
348 : :
349 : : /*
350 : : * The global tracer (top) should be the first trace array added,
351 : : * but we check the flag anyway.
352 : : */
353 : 828 : static inline struct trace_array *top_trace_array(void)
354 : : {
355 : : struct trace_array *tr;
356 : :
357 [ + - ]: 828 : if (list_empty(&ftrace_trace_arrays))
358 : : return NULL;
359 : :
360 : 828 : tr = list_entry(ftrace_trace_arrays.prev,
361 : : typeof(*tr), list);
362 [ - + ]: 828 : WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL));
363 : 828 : return tr;
364 : : }
365 : :
366 : : #define FTRACE_CMP_TYPE(var, type) \
367 : : __builtin_types_compatible_p(typeof(var), type *)
368 : :
369 : : #undef IF_ASSIGN
370 : : #define IF_ASSIGN(var, entry, etype, id) \
371 : : if (FTRACE_CMP_TYPE(var, etype)) { \
372 : : var = (typeof(var))(entry); \
373 : : WARN_ON(id != 0 && (entry)->type != id); \
374 : : break; \
375 : : }
376 : :
377 : : /* Will cause compile errors if type is not found. */
378 : : extern void __ftrace_bad_type(void);
379 : :
380 : : /*
381 : : * The trace_assign_type is a verifier that the entry type is
382 : : * the same as the type being assigned. To add new types simply
383 : : * add a line with the following format:
384 : : *
385 : : * IF_ASSIGN(var, ent, type, id);
386 : : *
387 : : * Where "type" is the trace type that includes the trace_entry
388 : : * as the "ent" item. And "id" is the trace identifier that is
389 : : * used in the trace_type enum.
390 : : *
391 : : * If the type can have more than one id, then use zero.
392 : : */
393 : : #define trace_assign_type(var, ent) \
394 : : do { \
395 : : IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \
396 : : IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \
397 : : IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \
398 : : IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\
399 : : IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \
400 : : IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \
401 : : IF_ASSIGN(var, ent, struct bputs_entry, TRACE_BPUTS); \
402 : : IF_ASSIGN(var, ent, struct hwlat_entry, TRACE_HWLAT); \
403 : : IF_ASSIGN(var, ent, struct raw_data_entry, TRACE_RAW_DATA);\
404 : : IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \
405 : : TRACE_MMIO_RW); \
406 : : IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \
407 : : TRACE_MMIO_MAP); \
408 : : IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
409 : : IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \
410 : : TRACE_GRAPH_ENT); \
411 : : IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \
412 : : TRACE_GRAPH_RET); \
413 : : __ftrace_bad_type(); \
414 : : } while (0)
415 : :
416 : : /*
417 : : * An option specific to a tracer. This is a boolean value.
418 : : * The bit is the bit index that sets its value on the
419 : : * flags value in struct tracer_flags.
420 : : */
421 : : struct tracer_opt {
422 : : const char *name; /* Will appear on the trace_options file */
423 : : u32 bit; /* Mask assigned in val field in tracer_flags */
424 : : };
425 : :
426 : : /*
427 : : * The set of specific options for a tracer. Your tracer
428 : : * have to set the initial value of the flags val.
429 : : */
430 : : struct tracer_flags {
431 : : u32 val;
432 : : struct tracer_opt *opts;
433 : : struct tracer *trace;
434 : : };
435 : :
436 : : /* Makes more easy to define a tracer opt */
437 : : #define TRACER_OPT(s, b) .name = #s, .bit = b
438 : :
439 : :
440 : : struct trace_option_dentry {
441 : : struct tracer_opt *opt;
442 : : struct tracer_flags *flags;
443 : : struct trace_array *tr;
444 : : struct dentry *entry;
445 : : };
446 : :
447 : : /**
448 : : * struct tracer - a specific tracer and its callbacks to interact with tracefs
449 : : * @name: the name chosen to select it on the available_tracers file
450 : : * @init: called when one switches to this tracer (echo name > current_tracer)
451 : : * @reset: called when one switches to another tracer
452 : : * @start: called when tracing is unpaused (echo 1 > tracing_on)
453 : : * @stop: called when tracing is paused (echo 0 > tracing_on)
454 : : * @update_thresh: called when tracing_thresh is updated
455 : : * @open: called when the trace file is opened
456 : : * @pipe_open: called when the trace_pipe file is opened
457 : : * @close: called when the trace file is released
458 : : * @pipe_close: called when the trace_pipe file is released
459 : : * @read: override the default read callback on trace_pipe
460 : : * @splice_read: override the default splice_read callback on trace_pipe
461 : : * @selftest: selftest to run on boot (see trace_selftest.c)
462 : : * @print_headers: override the first lines that describe your columns
463 : : * @print_line: callback that prints a trace
464 : : * @set_flag: signals one of your private flags changed (trace_options file)
465 : : * @flags: your private flags
466 : : */
467 : : struct tracer {
468 : : const char *name;
469 : : int (*init)(struct trace_array *tr);
470 : : void (*reset)(struct trace_array *tr);
471 : : void (*start)(struct trace_array *tr);
472 : : void (*stop)(struct trace_array *tr);
473 : : int (*update_thresh)(struct trace_array *tr);
474 : : void (*open)(struct trace_iterator *iter);
475 : : void (*pipe_open)(struct trace_iterator *iter);
476 : : void (*close)(struct trace_iterator *iter);
477 : : void (*pipe_close)(struct trace_iterator *iter);
478 : : ssize_t (*read)(struct trace_iterator *iter,
479 : : struct file *filp, char __user *ubuf,
480 : : size_t cnt, loff_t *ppos);
481 : : ssize_t (*splice_read)(struct trace_iterator *iter,
482 : : struct file *filp,
483 : : loff_t *ppos,
484 : : struct pipe_inode_info *pipe,
485 : : size_t len,
486 : : unsigned int flags);
487 : : #ifdef CONFIG_FTRACE_STARTUP_TEST
488 : : int (*selftest)(struct tracer *trace,
489 : : struct trace_array *tr);
490 : : #endif
491 : : void (*print_header)(struct seq_file *m);
492 : : enum print_line_t (*print_line)(struct trace_iterator *iter);
493 : : /* If you handled the flag setting, return 0 */
494 : : int (*set_flag)(struct trace_array *tr,
495 : : u32 old_flags, u32 bit, int set);
496 : : /* Return 0 if OK with change, else return non-zero */
497 : : int (*flag_changed)(struct trace_array *tr,
498 : : u32 mask, int set);
499 : : struct tracer *next;
500 : : struct tracer_flags *flags;
501 : : int enabled;
502 : : bool print_max;
503 : : bool allow_instances;
504 : : #ifdef CONFIG_TRACER_MAX_TRACE
505 : : bool use_max_tr;
506 : : #endif
507 : : /* True if tracer cannot be enabled in kernel param */
508 : : bool noboot;
509 : : };
510 : :
511 : :
512 : : /* Only current can touch trace_recursion */
513 : :
514 : : /*
515 : : * For function tracing recursion:
516 : : * The order of these bits are important.
517 : : *
518 : : * When function tracing occurs, the following steps are made:
519 : : * If arch does not support a ftrace feature:
520 : : * call internal function (uses INTERNAL bits) which calls...
521 : : * If callback is registered to the "global" list, the list
522 : : * function is called and recursion checks the GLOBAL bits.
523 : : * then this function calls...
524 : : * The function callback, which can use the FTRACE bits to
525 : : * check for recursion.
526 : : *
527 : : * Now if the arch does not suppport a feature, and it calls
528 : : * the global list function which calls the ftrace callback
529 : : * all three of these steps will do a recursion protection.
530 : : * There's no reason to do one if the previous caller already
531 : : * did. The recursion that we are protecting against will
532 : : * go through the same steps again.
533 : : *
534 : : * To prevent the multiple recursion checks, if a recursion
535 : : * bit is set that is higher than the MAX bit of the current
536 : : * check, then we know that the check was made by the previous
537 : : * caller, and we can skip the current check.
538 : : */
539 : : enum {
540 : : TRACE_BUFFER_BIT,
541 : : TRACE_BUFFER_NMI_BIT,
542 : : TRACE_BUFFER_IRQ_BIT,
543 : : TRACE_BUFFER_SIRQ_BIT,
544 : :
545 : : /* Start of function recursion bits */
546 : : TRACE_FTRACE_BIT,
547 : : TRACE_FTRACE_NMI_BIT,
548 : : TRACE_FTRACE_IRQ_BIT,
549 : : TRACE_FTRACE_SIRQ_BIT,
550 : :
551 : : /* INTERNAL_BITs must be greater than FTRACE_BITs */
552 : : TRACE_INTERNAL_BIT,
553 : : TRACE_INTERNAL_NMI_BIT,
554 : : TRACE_INTERNAL_IRQ_BIT,
555 : : TRACE_INTERNAL_SIRQ_BIT,
556 : :
557 : : TRACE_BRANCH_BIT,
558 : : /*
559 : : * Abuse of the trace_recursion.
560 : : * As we need a way to maintain state if we are tracing the function
561 : : * graph in irq because we want to trace a particular function that
562 : : * was called in irq context but we have irq tracing off. Since this
563 : : * can only be modified by current, we can reuse trace_recursion.
564 : : */
565 : : TRACE_IRQ_BIT,
566 : :
567 : : /* Set if the function is in the set_graph_function file */
568 : : TRACE_GRAPH_BIT,
569 : :
570 : : /*
571 : : * In the very unlikely case that an interrupt came in
572 : : * at a start of graph tracing, and we want to trace
573 : : * the function in that interrupt, the depth can be greater
574 : : * than zero, because of the preempted start of a previous
575 : : * trace. In an even more unlikely case, depth could be 2
576 : : * if a softirq interrupted the start of graph tracing,
577 : : * followed by an interrupt preempting a start of graph
578 : : * tracing in the softirq, and depth can even be 3
579 : : * if an NMI came in at the start of an interrupt function
580 : : * that preempted a softirq start of a function that
581 : : * preempted normal context!!!! Luckily, it can't be
582 : : * greater than 3, so the next two bits are a mask
583 : : * of what the depth is when we set TRACE_GRAPH_BIT
584 : : */
585 : :
586 : : TRACE_GRAPH_DEPTH_START_BIT,
587 : : TRACE_GRAPH_DEPTH_END_BIT,
588 : :
589 : : /*
590 : : * To implement set_graph_notrace, if this bit is set, we ignore
591 : : * function graph tracing of called functions, until the return
592 : : * function is called to clear it.
593 : : */
594 : : TRACE_GRAPH_NOTRACE_BIT,
595 : : };
596 : :
597 : : #define trace_recursion_set(bit) do { (current)->trace_recursion |= (1<<(bit)); } while (0)
598 : : #define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(1<<(bit)); } while (0)
599 : : #define trace_recursion_test(bit) ((current)->trace_recursion & (1<<(bit)))
600 : :
601 : : #define trace_recursion_depth() \
602 : : (((current)->trace_recursion >> TRACE_GRAPH_DEPTH_START_BIT) & 3)
603 : : #define trace_recursion_set_depth(depth) \
604 : : do { \
605 : : current->trace_recursion &= \
606 : : ~(3 << TRACE_GRAPH_DEPTH_START_BIT); \
607 : : current->trace_recursion |= \
608 : : ((depth) & 3) << TRACE_GRAPH_DEPTH_START_BIT; \
609 : : } while (0)
610 : :
611 : : #define TRACE_CONTEXT_BITS 4
612 : :
613 : : #define TRACE_FTRACE_START TRACE_FTRACE_BIT
614 : : #define TRACE_FTRACE_MAX ((1 << (TRACE_FTRACE_START + TRACE_CONTEXT_BITS)) - 1)
615 : :
616 : : #define TRACE_LIST_START TRACE_INTERNAL_BIT
617 : : #define TRACE_LIST_MAX ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1)
618 : :
619 : : #define TRACE_CONTEXT_MASK TRACE_LIST_MAX
620 : :
621 : : static __always_inline int trace_get_context_bit(void)
622 : : {
623 : : int bit;
624 : :
625 [ # # # # ]: 0 : if (in_interrupt()) {
626 [ # # # # ]: 0 : if (in_nmi())
627 : : bit = 0;
628 : :
629 [ # # # # ]: 0 : else if (in_irq())
630 : : bit = 1;
631 : : else
632 : : bit = 2;
633 : : } else
634 : : bit = 3;
635 : :
636 : : return bit;
637 : : }
638 : :
639 : : static __always_inline int trace_test_and_set_recursion(int start, int max)
640 : : {
641 : 0 : unsigned int val = current->trace_recursion;
642 : : int bit;
643 : :
644 : : /* A previous recursion check was made */
645 [ # # # # ]: 0 : if ((val & TRACE_CONTEXT_MASK) > max)
646 : : return 0;
647 : :
648 : 0 : bit = trace_get_context_bit() + start;
649 [ # # # # ]: 0 : if (unlikely(val & (1 << bit)))
650 : : return -1;
651 : :
652 : 0 : val |= 1 << bit;
653 : 0 : current->trace_recursion = val;
654 : 0 : barrier();
655 : :
656 : : return bit;
657 : : }
658 : :
659 : : static __always_inline void trace_clear_recursion(int bit)
660 : : {
661 : 0 : unsigned int val = current->trace_recursion;
662 : :
663 [ # # # # ]: 0 : if (!bit)
664 : : return;
665 : :
666 : 0 : bit = 1 << bit;
667 : 0 : val &= ~bit;
668 : :
669 : 0 : barrier();
670 : 0 : current->trace_recursion = val;
671 : : }
672 : :
673 : : static inline struct ring_buffer_iter *
674 : : trace_buffer_iter(struct trace_iterator *iter, int cpu)
675 : : {
676 [ # # # # : 0 : return iter->buffer_iter ? iter->buffer_iter[cpu] : NULL;
# # # # #
# # # #
# ]
677 : : }
678 : :
679 : : int tracer_init(struct tracer *t, struct trace_array *tr);
680 : : int tracing_is_enabled(void);
681 : : void tracing_reset_online_cpus(struct trace_buffer *buf);
682 : : void tracing_reset_current(int cpu);
683 : : void tracing_reset_all_online_cpus(void);
684 : : int tracing_open_generic(struct inode *inode, struct file *filp);
685 : : int tracing_open_generic_tr(struct inode *inode, struct file *filp);
686 : : bool tracing_is_disabled(void);
687 : : bool tracer_tracing_is_on(struct trace_array *tr);
688 : : void tracer_tracing_on(struct trace_array *tr);
689 : : void tracer_tracing_off(struct trace_array *tr);
690 : : struct dentry *trace_create_file(const char *name,
691 : : umode_t mode,
692 : : struct dentry *parent,
693 : : void *data,
694 : : const struct file_operations *fops);
695 : :
696 : : struct dentry *tracing_init_dentry(void);
697 : :
698 : : struct ring_buffer_event;
699 : :
700 : : struct ring_buffer_event *
701 : : trace_buffer_lock_reserve(struct ring_buffer *buffer,
702 : : int type,
703 : : unsigned long len,
704 : : unsigned long flags,
705 : : int pc);
706 : :
707 : : struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
708 : : struct trace_array_cpu *data);
709 : :
710 : : struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
711 : : int *ent_cpu, u64 *ent_ts);
712 : :
713 : : void trace_buffer_unlock_commit_nostack(struct ring_buffer *buffer,
714 : : struct ring_buffer_event *event);
715 : :
716 : : int trace_empty(struct trace_iterator *iter);
717 : :
718 : : void *trace_find_next_entry_inc(struct trace_iterator *iter);
719 : :
720 : : void trace_init_global_iter(struct trace_iterator *iter);
721 : :
722 : : void tracing_iter_reset(struct trace_iterator *iter, int cpu);
723 : :
724 : : unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu);
725 : : unsigned long trace_total_entries(struct trace_array *tr);
726 : :
727 : : void trace_function(struct trace_array *tr,
728 : : unsigned long ip,
729 : : unsigned long parent_ip,
730 : : unsigned long flags, int pc);
731 : : void trace_graph_function(struct trace_array *tr,
732 : : unsigned long ip,
733 : : unsigned long parent_ip,
734 : : unsigned long flags, int pc);
735 : : void trace_latency_header(struct seq_file *m);
736 : : void trace_default_header(struct seq_file *m);
737 : : void print_trace_header(struct seq_file *m, struct trace_iterator *iter);
738 : : int trace_empty(struct trace_iterator *iter);
739 : :
740 : : void trace_graph_return(struct ftrace_graph_ret *trace);
741 : : int trace_graph_entry(struct ftrace_graph_ent *trace);
742 : : void set_graph_array(struct trace_array *tr);
743 : :
744 : : void tracing_start_cmdline_record(void);
745 : : void tracing_stop_cmdline_record(void);
746 : : void tracing_start_tgid_record(void);
747 : : void tracing_stop_tgid_record(void);
748 : :
749 : : int register_tracer(struct tracer *type);
750 : : int is_tracing_stopped(void);
751 : :
752 : : loff_t tracing_lseek(struct file *file, loff_t offset, int whence);
753 : :
754 : : extern cpumask_var_t __read_mostly tracing_buffer_mask;
755 : :
756 : : #define for_each_tracing_cpu(cpu) \
757 : : for_each_cpu(cpu, tracing_buffer_mask)
758 : :
759 : : extern unsigned long nsecs_to_usecs(unsigned long nsecs);
760 : :
761 : : extern unsigned long tracing_thresh;
762 : :
763 : : /* PID filtering */
764 : :
765 : : extern int pid_max;
766 : :
767 : : bool trace_find_filtered_pid(struct trace_pid_list *filtered_pids,
768 : : pid_t search_pid);
769 : : bool trace_ignore_this_task(struct trace_pid_list *filtered_pids,
770 : : struct task_struct *task);
771 : : void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
772 : : struct task_struct *self,
773 : : struct task_struct *task);
774 : : void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos);
775 : : void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos);
776 : : int trace_pid_show(struct seq_file *m, void *v);
777 : : void trace_free_pid_list(struct trace_pid_list *pid_list);
778 : : int trace_pid_write(struct trace_pid_list *filtered_pids,
779 : : struct trace_pid_list **new_pid_list,
780 : : const char __user *ubuf, size_t cnt);
781 : :
782 : : #ifdef CONFIG_TRACER_MAX_TRACE
783 : : void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
784 : : void *cond_data);
785 : : void update_max_tr_single(struct trace_array *tr,
786 : : struct task_struct *tsk, int cpu);
787 : : #endif /* CONFIG_TRACER_MAX_TRACE */
788 : :
789 : : #ifdef CONFIG_STACKTRACE
790 : : void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
791 : : int pc);
792 : : #else
793 : : static inline void __trace_stack(struct trace_array *tr, unsigned long flags,
794 : : int skip, int pc)
795 : : {
796 : : }
797 : : #endif /* CONFIG_STACKTRACE */
798 : :
799 : : extern u64 ftrace_now(int cpu);
800 : :
801 : : extern void trace_find_cmdline(int pid, char comm[]);
802 : : extern int trace_find_tgid(int pid);
803 : : extern void trace_event_follow_fork(struct trace_array *tr, bool enable);
804 : :
805 : : #ifdef CONFIG_DYNAMIC_FTRACE
806 : : extern unsigned long ftrace_update_tot_cnt;
807 : : void ftrace_init_trace_array(struct trace_array *tr);
808 : : #else
809 : : static inline void ftrace_init_trace_array(struct trace_array *tr) { }
810 : : #endif
811 : : #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
812 : : extern int DYN_FTRACE_TEST_NAME(void);
813 : : #define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2
814 : : extern int DYN_FTRACE_TEST_NAME2(void);
815 : :
816 : : extern bool ring_buffer_expanded;
817 : : extern bool tracing_selftest_disabled;
818 : :
819 : : #ifdef CONFIG_FTRACE_STARTUP_TEST
820 : : extern int trace_selftest_startup_function(struct tracer *trace,
821 : : struct trace_array *tr);
822 : : extern int trace_selftest_startup_function_graph(struct tracer *trace,
823 : : struct trace_array *tr);
824 : : extern int trace_selftest_startup_irqsoff(struct tracer *trace,
825 : : struct trace_array *tr);
826 : : extern int trace_selftest_startup_preemptoff(struct tracer *trace,
827 : : struct trace_array *tr);
828 : : extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace,
829 : : struct trace_array *tr);
830 : : extern int trace_selftest_startup_wakeup(struct tracer *trace,
831 : : struct trace_array *tr);
832 : : extern int trace_selftest_startup_nop(struct tracer *trace,
833 : : struct trace_array *tr);
834 : : extern int trace_selftest_startup_branch(struct tracer *trace,
835 : : struct trace_array *tr);
836 : : /*
837 : : * Tracer data references selftest functions that only occur
838 : : * on boot up. These can be __init functions. Thus, when selftests
839 : : * are enabled, then the tracers need to reference __init functions.
840 : : */
841 : : #define __tracer_data __refdata
842 : : #else
843 : : /* Tracers are seldom changed. Optimize when selftests are disabled. */
844 : : #define __tracer_data __read_mostly
845 : : #endif /* CONFIG_FTRACE_STARTUP_TEST */
846 : :
847 : : extern void *head_page(struct trace_array_cpu *data);
848 : : extern unsigned long long ns2usecs(u64 nsec);
849 : : extern int
850 : : trace_vbprintk(unsigned long ip, const char *fmt, va_list args);
851 : : extern int
852 : : trace_vprintk(unsigned long ip, const char *fmt, va_list args);
853 : : extern int
854 : : trace_array_vprintk(struct trace_array *tr,
855 : : unsigned long ip, const char *fmt, va_list args);
856 : : int trace_array_printk(struct trace_array *tr,
857 : : unsigned long ip, const char *fmt, ...);
858 : : int trace_array_printk_buf(struct ring_buffer *buffer,
859 : : unsigned long ip, const char *fmt, ...);
860 : : void trace_printk_seq(struct trace_seq *s);
861 : : enum print_line_t print_trace_line(struct trace_iterator *iter);
862 : :
863 : : extern char trace_find_mark(unsigned long long duration);
864 : :
865 : : struct ftrace_hash;
866 : :
867 : : struct ftrace_mod_load {
868 : : struct list_head list;
869 : : char *func;
870 : : char *module;
871 : : int enable;
872 : : };
873 : :
874 : : enum {
875 : : FTRACE_HASH_FL_MOD = (1 << 0),
876 : : };
877 : :
878 : : struct ftrace_hash {
879 : : unsigned long size_bits;
880 : : struct hlist_head *buckets;
881 : : unsigned long count;
882 : : unsigned long flags;
883 : : struct rcu_head rcu;
884 : : };
885 : :
886 : : struct ftrace_func_entry *
887 : : ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip);
888 : :
889 : : static __always_inline bool ftrace_hash_empty(struct ftrace_hash *hash)
890 : : {
891 [ # # # # : 513774 : return !hash || !(hash->count || (hash->flags & FTRACE_HASH_FL_MOD));
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# + - + -
- + ]
892 : : }
893 : :
894 : : /* Standard output formatting function used for function return traces */
895 : : #ifdef CONFIG_FUNCTION_GRAPH_TRACER
896 : :
897 : : /* Flag options */
898 : : #define TRACE_GRAPH_PRINT_OVERRUN 0x1
899 : : #define TRACE_GRAPH_PRINT_CPU 0x2
900 : : #define TRACE_GRAPH_PRINT_OVERHEAD 0x4
901 : : #define TRACE_GRAPH_PRINT_PROC 0x8
902 : : #define TRACE_GRAPH_PRINT_DURATION 0x10
903 : : #define TRACE_GRAPH_PRINT_ABS_TIME 0x20
904 : : #define TRACE_GRAPH_PRINT_REL_TIME 0x40
905 : : #define TRACE_GRAPH_PRINT_IRQS 0x80
906 : : #define TRACE_GRAPH_PRINT_TAIL 0x100
907 : : #define TRACE_GRAPH_SLEEP_TIME 0x200
908 : : #define TRACE_GRAPH_GRAPH_TIME 0x400
909 : : #define TRACE_GRAPH_PRINT_FILL_SHIFT 28
910 : : #define TRACE_GRAPH_PRINT_FILL_MASK (0x3 << TRACE_GRAPH_PRINT_FILL_SHIFT)
911 : :
912 : : extern void ftrace_graph_sleep_time_control(bool enable);
913 : :
914 : : #ifdef CONFIG_FUNCTION_PROFILER
915 : : extern void ftrace_graph_graph_time_control(bool enable);
916 : : #else
917 : : static inline void ftrace_graph_graph_time_control(bool enable) { }
918 : : #endif
919 : :
920 : : extern enum print_line_t
921 : : print_graph_function_flags(struct trace_iterator *iter, u32 flags);
922 : : extern void print_graph_headers_flags(struct seq_file *s, u32 flags);
923 : : extern void
924 : : trace_print_graph_duration(unsigned long long duration, struct trace_seq *s);
925 : : extern void graph_trace_open(struct trace_iterator *iter);
926 : : extern void graph_trace_close(struct trace_iterator *iter);
927 : : extern int __trace_graph_entry(struct trace_array *tr,
928 : : struct ftrace_graph_ent *trace,
929 : : unsigned long flags, int pc);
930 : : extern void __trace_graph_return(struct trace_array *tr,
931 : : struct ftrace_graph_ret *trace,
932 : : unsigned long flags, int pc);
933 : :
934 : : #ifdef CONFIG_DYNAMIC_FTRACE
935 : : extern struct ftrace_hash __rcu *ftrace_graph_hash;
936 : : extern struct ftrace_hash __rcu *ftrace_graph_notrace_hash;
937 : :
938 : 0 : static inline int ftrace_graph_addr(struct ftrace_graph_ent *trace)
939 : : {
940 : 0 : unsigned long addr = trace->func;
941 : : int ret = 0;
942 : : struct ftrace_hash *hash;
943 : :
944 : 0 : preempt_disable_notrace();
945 : :
946 : : /*
947 : : * Have to open code "rcu_dereference_sched()" because the
948 : : * function graph tracer can be called when RCU is not
949 : : * "watching".
950 : : * Protected with schedule_on_each_cpu(ftrace_sync)
951 : : */
952 : 0 : hash = rcu_dereference_protected(ftrace_graph_hash, !preemptible());
953 : :
954 [ # # ]: 0 : if (ftrace_hash_empty(hash)) {
955 : : ret = 1;
956 : : goto out;
957 : : }
958 : :
959 [ # # ]: 0 : if (ftrace_lookup_ip(hash, addr)) {
960 : :
961 : : /*
962 : : * This needs to be cleared on the return functions
963 : : * when the depth is zero.
964 : : */
965 : 0 : trace_recursion_set(TRACE_GRAPH_BIT);
966 : 0 : trace_recursion_set_depth(trace->depth);
967 : :
968 : : /*
969 : : * If no irqs are to be traced, but a set_graph_function
970 : : * is set, and called by an interrupt handler, we still
971 : : * want to trace it.
972 : : */
973 [ # # ]: 0 : if (in_irq())
974 : 0 : trace_recursion_set(TRACE_IRQ_BIT);
975 : : else
976 : 0 : trace_recursion_clear(TRACE_IRQ_BIT);
977 : : ret = 1;
978 : : }
979 : :
980 : : out:
981 : 0 : preempt_enable_notrace();
982 : 0 : return ret;
983 : : }
984 : :
985 : 0 : static inline void ftrace_graph_addr_finish(struct ftrace_graph_ret *trace)
986 : : {
987 [ # # # # ]: 0 : if (trace_recursion_test(TRACE_GRAPH_BIT) &&
988 : 0 : trace->depth == trace_recursion_depth())
989 : 0 : trace_recursion_clear(TRACE_GRAPH_BIT);
990 : 0 : }
991 : :
992 : : static inline int ftrace_graph_notrace_addr(unsigned long addr)
993 : : {
994 : : int ret = 0;
995 : : struct ftrace_hash *notrace_hash;
996 : :
997 : 0 : preempt_disable_notrace();
998 : :
999 : : /*
1000 : : * Have to open code "rcu_dereference_sched()" because the
1001 : : * function graph tracer can be called when RCU is not
1002 : : * "watching".
1003 : : * Protected with schedule_on_each_cpu(ftrace_sync)
1004 : : */
1005 : 0 : notrace_hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
1006 : : !preemptible());
1007 : :
1008 [ # # ]: 0 : if (ftrace_lookup_ip(notrace_hash, addr))
1009 : : ret = 1;
1010 : :
1011 : 0 : preempt_enable_notrace();
1012 : : return ret;
1013 : : }
1014 : : #else
1015 : : static inline int ftrace_graph_addr(struct ftrace_graph_ent *trace)
1016 : : {
1017 : : return 1;
1018 : : }
1019 : :
1020 : : static inline int ftrace_graph_notrace_addr(unsigned long addr)
1021 : : {
1022 : : return 0;
1023 : : }
1024 : : static inline void ftrace_graph_addr_finish(struct ftrace_graph_ret *trace)
1025 : : { }
1026 : : #endif /* CONFIG_DYNAMIC_FTRACE */
1027 : :
1028 : : extern unsigned int fgraph_max_depth;
1029 : :
1030 : 0 : static inline bool ftrace_graph_ignore_func(struct ftrace_graph_ent *trace)
1031 : : {
1032 : : /* trace it when it is-nested-in or is a function enabled. */
1033 [ # # ]: 0 : return !(trace_recursion_test(TRACE_GRAPH_BIT) ||
1034 [ # # ]: 0 : ftrace_graph_addr(trace)) ||
1035 [ # # # # ]: 0 : (trace->depth < 0) ||
1036 [ # # ]: 0 : (fgraph_max_depth && trace->depth >= fgraph_max_depth);
1037 : : }
1038 : :
1039 : : #else /* CONFIG_FUNCTION_GRAPH_TRACER */
1040 : : static inline enum print_line_t
1041 : : print_graph_function_flags(struct trace_iterator *iter, u32 flags)
1042 : : {
1043 : : return TRACE_TYPE_UNHANDLED;
1044 : : }
1045 : : #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1046 : :
1047 : : extern struct list_head ftrace_pids;
1048 : :
1049 : : #ifdef CONFIG_FUNCTION_TRACER
1050 : : struct ftrace_func_command {
1051 : : struct list_head list;
1052 : : char *name;
1053 : : int (*func)(struct trace_array *tr,
1054 : : struct ftrace_hash *hash,
1055 : : char *func, char *cmd,
1056 : : char *params, int enable);
1057 : : };
1058 : : extern bool ftrace_filter_param __initdata;
1059 : : static inline int ftrace_trace_task(struct trace_array *tr)
1060 : : {
1061 : 0 : return !this_cpu_read(tr->trace_buffer.data->ftrace_ignore_pid);
1062 : : }
1063 : : extern int ftrace_is_dead(void);
1064 : : int ftrace_create_function_files(struct trace_array *tr,
1065 : : struct dentry *parent);
1066 : : void ftrace_destroy_function_files(struct trace_array *tr);
1067 : : void ftrace_init_global_array_ops(struct trace_array *tr);
1068 : : void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func);
1069 : : void ftrace_reset_array_ops(struct trace_array *tr);
1070 : : void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer);
1071 : : void ftrace_init_tracefs_toplevel(struct trace_array *tr,
1072 : : struct dentry *d_tracer);
1073 : : void ftrace_clear_pids(struct trace_array *tr);
1074 : : int init_function_trace(void);
1075 : : void ftrace_pid_follow_fork(struct trace_array *tr, bool enable);
1076 : : #else
1077 : : static inline int ftrace_trace_task(struct trace_array *tr)
1078 : : {
1079 : : return 1;
1080 : : }
1081 : : static inline int ftrace_is_dead(void) { return 0; }
1082 : : static inline int
1083 : : ftrace_create_function_files(struct trace_array *tr,
1084 : : struct dentry *parent)
1085 : : {
1086 : : return 0;
1087 : : }
1088 : : static inline void ftrace_destroy_function_files(struct trace_array *tr) { }
1089 : : static inline __init void
1090 : : ftrace_init_global_array_ops(struct trace_array *tr) { }
1091 : : static inline void ftrace_reset_array_ops(struct trace_array *tr) { }
1092 : : static inline void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d) { }
1093 : : static inline void ftrace_init_tracefs_toplevel(struct trace_array *tr, struct dentry *d) { }
1094 : : static inline void ftrace_clear_pids(struct trace_array *tr) { }
1095 : : static inline int init_function_trace(void) { return 0; }
1096 : : static inline void ftrace_pid_follow_fork(struct trace_array *tr, bool enable) { }
1097 : : /* ftace_func_t type is not defined, use macro instead of static inline */
1098 : : #define ftrace_init_array_ops(tr, func) do { } while (0)
1099 : : #endif /* CONFIG_FUNCTION_TRACER */
1100 : :
1101 : : #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE)
1102 : :
1103 : : struct ftrace_probe_ops {
1104 : : void (*func)(unsigned long ip,
1105 : : unsigned long parent_ip,
1106 : : struct trace_array *tr,
1107 : : struct ftrace_probe_ops *ops,
1108 : : void *data);
1109 : : int (*init)(struct ftrace_probe_ops *ops,
1110 : : struct trace_array *tr,
1111 : : unsigned long ip, void *init_data,
1112 : : void **data);
1113 : : void (*free)(struct ftrace_probe_ops *ops,
1114 : : struct trace_array *tr,
1115 : : unsigned long ip, void *data);
1116 : : int (*print)(struct seq_file *m,
1117 : : unsigned long ip,
1118 : : struct ftrace_probe_ops *ops,
1119 : : void *data);
1120 : : };
1121 : :
1122 : : struct ftrace_func_mapper;
1123 : : typedef int (*ftrace_mapper_func)(void *data);
1124 : :
1125 : : struct ftrace_func_mapper *allocate_ftrace_func_mapper(void);
1126 : : void **ftrace_func_mapper_find_ip(struct ftrace_func_mapper *mapper,
1127 : : unsigned long ip);
1128 : : int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper,
1129 : : unsigned long ip, void *data);
1130 : : void *ftrace_func_mapper_remove_ip(struct ftrace_func_mapper *mapper,
1131 : : unsigned long ip);
1132 : : void free_ftrace_func_mapper(struct ftrace_func_mapper *mapper,
1133 : : ftrace_mapper_func free_func);
1134 : :
1135 : : extern int
1136 : : register_ftrace_function_probe(char *glob, struct trace_array *tr,
1137 : : struct ftrace_probe_ops *ops, void *data);
1138 : : extern int
1139 : : unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr,
1140 : : struct ftrace_probe_ops *ops);
1141 : : extern void clear_ftrace_function_probes(struct trace_array *tr);
1142 : :
1143 : : int register_ftrace_command(struct ftrace_func_command *cmd);
1144 : : int unregister_ftrace_command(struct ftrace_func_command *cmd);
1145 : :
1146 : : void ftrace_create_filter_files(struct ftrace_ops *ops,
1147 : : struct dentry *parent);
1148 : : void ftrace_destroy_filter_files(struct ftrace_ops *ops);
1149 : : #else
1150 : : struct ftrace_func_command;
1151 : :
1152 : : static inline __init int register_ftrace_command(struct ftrace_func_command *cmd)
1153 : : {
1154 : : return -EINVAL;
1155 : : }
1156 : : static inline __init int unregister_ftrace_command(char *cmd_name)
1157 : : {
1158 : : return -EINVAL;
1159 : : }
1160 : : static inline void clear_ftrace_function_probes(struct trace_array *tr)
1161 : : {
1162 : : }
1163 : :
1164 : : /*
1165 : : * The ops parameter passed in is usually undefined.
1166 : : * This must be a macro.
1167 : : */
1168 : : #define ftrace_create_filter_files(ops, parent) do { } while (0)
1169 : : #define ftrace_destroy_filter_files(ops) do { } while (0)
1170 : : #endif /* CONFIG_FUNCTION_TRACER && CONFIG_DYNAMIC_FTRACE */
1171 : :
1172 : : bool ftrace_event_is_function(struct trace_event_call *call);
1173 : :
1174 : : /*
1175 : : * struct trace_parser - servers for reading the user input separated by spaces
1176 : : * @cont: set if the input is not complete - no final space char was found
1177 : : * @buffer: holds the parsed user input
1178 : : * @idx: user input length
1179 : : * @size: buffer size
1180 : : */
1181 : : struct trace_parser {
1182 : : bool cont;
1183 : : char *buffer;
1184 : : unsigned idx;
1185 : : unsigned size;
1186 : : };
1187 : :
1188 : : static inline bool trace_parser_loaded(struct trace_parser *parser)
1189 : : {
1190 : 0 : return (parser->idx != 0);
1191 : : }
1192 : :
1193 : : static inline bool trace_parser_cont(struct trace_parser *parser)
1194 : : {
1195 : 0 : return parser->cont;
1196 : : }
1197 : :
1198 : : static inline void trace_parser_clear(struct trace_parser *parser)
1199 : : {
1200 : 0 : parser->cont = false;
1201 : 0 : parser->idx = 0;
1202 : : }
1203 : :
1204 : : extern int trace_parser_get_init(struct trace_parser *parser, int size);
1205 : : extern void trace_parser_put(struct trace_parser *parser);
1206 : : extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1207 : : size_t cnt, loff_t *ppos);
1208 : :
1209 : : /*
1210 : : * Only create function graph options if function graph is configured.
1211 : : */
1212 : : #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1213 : : # define FGRAPH_FLAGS \
1214 : : C(DISPLAY_GRAPH, "display-graph"),
1215 : : #else
1216 : : # define FGRAPH_FLAGS
1217 : : #endif
1218 : :
1219 : : #ifdef CONFIG_BRANCH_TRACER
1220 : : # define BRANCH_FLAGS \
1221 : : C(BRANCH, "branch"),
1222 : : #else
1223 : : # define BRANCH_FLAGS
1224 : : #endif
1225 : :
1226 : : #ifdef CONFIG_FUNCTION_TRACER
1227 : : # define FUNCTION_FLAGS \
1228 : : C(FUNCTION, "function-trace"), \
1229 : : C(FUNC_FORK, "function-fork"),
1230 : : # define FUNCTION_DEFAULT_FLAGS TRACE_ITER_FUNCTION
1231 : : #else
1232 : : # define FUNCTION_FLAGS
1233 : : # define FUNCTION_DEFAULT_FLAGS 0UL
1234 : : # define TRACE_ITER_FUNC_FORK 0UL
1235 : : #endif
1236 : :
1237 : : #ifdef CONFIG_STACKTRACE
1238 : : # define STACK_FLAGS \
1239 : : C(STACKTRACE, "stacktrace"),
1240 : : #else
1241 : : # define STACK_FLAGS
1242 : : #endif
1243 : :
1244 : : /*
1245 : : * trace_iterator_flags is an enumeration that defines bit
1246 : : * positions into trace_flags that controls the output.
1247 : : *
1248 : : * NOTE: These bits must match the trace_options array in
1249 : : * trace.c (this macro guarantees it).
1250 : : */
1251 : : #define TRACE_FLAGS \
1252 : : C(PRINT_PARENT, "print-parent"), \
1253 : : C(SYM_OFFSET, "sym-offset"), \
1254 : : C(SYM_ADDR, "sym-addr"), \
1255 : : C(VERBOSE, "verbose"), \
1256 : : C(RAW, "raw"), \
1257 : : C(HEX, "hex"), \
1258 : : C(BIN, "bin"), \
1259 : : C(BLOCK, "block"), \
1260 : : C(PRINTK, "trace_printk"), \
1261 : : C(ANNOTATE, "annotate"), \
1262 : : C(USERSTACKTRACE, "userstacktrace"), \
1263 : : C(SYM_USEROBJ, "sym-userobj"), \
1264 : : C(PRINTK_MSGONLY, "printk-msg-only"), \
1265 : : C(CONTEXT_INFO, "context-info"), /* Print pid/cpu/time */ \
1266 : : C(LATENCY_FMT, "latency-format"), \
1267 : : C(RECORD_CMD, "record-cmd"), \
1268 : : C(RECORD_TGID, "record-tgid"), \
1269 : : C(OVERWRITE, "overwrite"), \
1270 : : C(STOP_ON_FREE, "disable_on_free"), \
1271 : : C(IRQ_INFO, "irq-info"), \
1272 : : C(MARKERS, "markers"), \
1273 : : C(EVENT_FORK, "event-fork"), \
1274 : : FUNCTION_FLAGS \
1275 : : FGRAPH_FLAGS \
1276 : : STACK_FLAGS \
1277 : : BRANCH_FLAGS
1278 : :
1279 : : /*
1280 : : * By defining C, we can make TRACE_FLAGS a list of bit names
1281 : : * that will define the bits for the flag masks.
1282 : : */
1283 : : #undef C
1284 : : #define C(a, b) TRACE_ITER_##a##_BIT
1285 : :
1286 : : enum trace_iterator_bits {
1287 : : TRACE_FLAGS
1288 : : /* Make sure we don't go more than we have bits for */
1289 : : TRACE_ITER_LAST_BIT
1290 : : };
1291 : :
1292 : : /*
1293 : : * By redefining C, we can make TRACE_FLAGS a list of masks that
1294 : : * use the bits as defined above.
1295 : : */
1296 : : #undef C
1297 : : #define C(a, b) TRACE_ITER_##a = (1 << TRACE_ITER_##a##_BIT)
1298 : :
1299 : : enum trace_iterator_flags { TRACE_FLAGS };
1300 : :
1301 : : /*
1302 : : * TRACE_ITER_SYM_MASK masks the options in trace_flags that
1303 : : * control the output of kernel symbols.
1304 : : */
1305 : : #define TRACE_ITER_SYM_MASK \
1306 : : (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
1307 : :
1308 : : extern struct tracer nop_trace;
1309 : :
1310 : : #ifdef CONFIG_BRANCH_TRACER
1311 : : extern int enable_branch_tracing(struct trace_array *tr);
1312 : : extern void disable_branch_tracing(void);
1313 : : static inline int trace_branch_enable(struct trace_array *tr)
1314 : : {
1315 : : if (tr->trace_flags & TRACE_ITER_BRANCH)
1316 : : return enable_branch_tracing(tr);
1317 : : return 0;
1318 : : }
1319 : : static inline void trace_branch_disable(void)
1320 : : {
1321 : : /* due to races, always disable */
1322 : : disable_branch_tracing();
1323 : : }
1324 : : #else
1325 : : static inline int trace_branch_enable(struct trace_array *tr)
1326 : : {
1327 : : return 0;
1328 : : }
1329 : : static inline void trace_branch_disable(void)
1330 : : {
1331 : : }
1332 : : #endif /* CONFIG_BRANCH_TRACER */
1333 : :
1334 : : /* set ring buffers to default size if not already done so */
1335 : : int tracing_update_buffers(void);
1336 : :
1337 : : struct ftrace_event_field {
1338 : : struct list_head link;
1339 : : const char *name;
1340 : : const char *type;
1341 : : int filter_type;
1342 : : int offset;
1343 : : int size;
1344 : : int is_signed;
1345 : : };
1346 : :
1347 : : struct prog_entry;
1348 : :
1349 : : struct event_filter {
1350 : : struct prog_entry __rcu *prog;
1351 : : char *filter_string;
1352 : : };
1353 : :
1354 : : struct event_subsystem {
1355 : : struct list_head list;
1356 : : const char *name;
1357 : : struct event_filter *filter;
1358 : : int ref_count;
1359 : : };
1360 : :
1361 : : struct trace_subsystem_dir {
1362 : : struct list_head list;
1363 : : struct event_subsystem *subsystem;
1364 : : struct trace_array *tr;
1365 : : struct dentry *entry;
1366 : : int ref_count;
1367 : : int nr_events;
1368 : : };
1369 : :
1370 : : extern int call_filter_check_discard(struct trace_event_call *call, void *rec,
1371 : : struct ring_buffer *buffer,
1372 : : struct ring_buffer_event *event);
1373 : :
1374 : : void trace_buffer_unlock_commit_regs(struct trace_array *tr,
1375 : : struct ring_buffer *buffer,
1376 : : struct ring_buffer_event *event,
1377 : : unsigned long flags, int pc,
1378 : : struct pt_regs *regs);
1379 : :
1380 : : static inline void trace_buffer_unlock_commit(struct trace_array *tr,
1381 : : struct ring_buffer *buffer,
1382 : : struct ring_buffer_event *event,
1383 : : unsigned long flags, int pc)
1384 : : {
1385 : 0 : trace_buffer_unlock_commit_regs(tr, buffer, event, flags, pc, NULL);
1386 : : }
1387 : :
1388 : : DECLARE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
1389 : : DECLARE_PER_CPU(int, trace_buffered_event_cnt);
1390 : : void trace_buffered_event_disable(void);
1391 : : void trace_buffered_event_enable(void);
1392 : :
1393 : : static inline void
1394 : 0 : __trace_event_discard_commit(struct ring_buffer *buffer,
1395 : : struct ring_buffer_event *event)
1396 : : {
1397 [ # # ]: 0 : if (this_cpu_read(trace_buffered_event) == event) {
1398 : : /* Simply release the temp buffer */
1399 : 0 : this_cpu_dec(trace_buffered_event_cnt);
1400 : 0 : return;
1401 : : }
1402 : 0 : ring_buffer_discard_commit(buffer, event);
1403 : : }
1404 : :
1405 : : /*
1406 : : * Helper function for event_trigger_unlock_commit{_regs}().
1407 : : * If there are event triggers attached to this event that requires
1408 : : * filtering against its fields, then they wil be called as the
1409 : : * entry already holds the field information of the current event.
1410 : : *
1411 : : * It also checks if the event should be discarded or not.
1412 : : * It is to be discarded if the event is soft disabled and the
1413 : : * event was only recorded to process triggers, or if the event
1414 : : * filter is active and this event did not match the filters.
1415 : : *
1416 : : * Returns true if the event is discarded, false otherwise.
1417 : : */
1418 : : static inline bool
1419 : 0 : __event_trigger_test_discard(struct trace_event_file *file,
1420 : : struct ring_buffer *buffer,
1421 : : struct ring_buffer_event *event,
1422 : : void *entry,
1423 : : enum event_trigger_type *tt)
1424 : : {
1425 : 0 : unsigned long eflags = file->flags;
1426 : :
1427 [ # # ]: 0 : if (eflags & EVENT_FILE_FL_TRIGGER_COND)
1428 : 0 : *tt = event_triggers_call(file, entry, event);
1429 : :
1430 [ # # # # ]: 0 : if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
1431 [ # # ]: 0 : (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
1432 : 0 : !filter_match_preds(file->filter, entry))) {
1433 : 0 : __trace_event_discard_commit(buffer, event);
1434 : 0 : return true;
1435 : : }
1436 : :
1437 : : return false;
1438 : : }
1439 : :
1440 : : /**
1441 : : * event_trigger_unlock_commit - handle triggers and finish event commit
1442 : : * @file: The file pointer assoctiated to the event
1443 : : * @buffer: The ring buffer that the event is being written to
1444 : : * @event: The event meta data in the ring buffer
1445 : : * @entry: The event itself
1446 : : * @irq_flags: The state of the interrupts at the start of the event
1447 : : * @pc: The state of the preempt count at the start of the event.
1448 : : *
1449 : : * This is a helper function to handle triggers that require data
1450 : : * from the event itself. It also tests the event against filters and
1451 : : * if the event is soft disabled and should be discarded.
1452 : : */
1453 : : static inline void
1454 : 0 : event_trigger_unlock_commit(struct trace_event_file *file,
1455 : : struct ring_buffer *buffer,
1456 : : struct ring_buffer_event *event,
1457 : : void *entry, unsigned long irq_flags, int pc)
1458 : : {
1459 : 0 : enum event_trigger_type tt = ETT_NONE;
1460 : :
1461 [ # # ]: 0 : if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
1462 : 0 : trace_buffer_unlock_commit(file->tr, buffer, event, irq_flags, pc);
1463 : :
1464 [ # # ]: 0 : if (tt)
1465 : 0 : event_triggers_post_call(file, tt);
1466 : 0 : }
1467 : :
1468 : : /**
1469 : : * event_trigger_unlock_commit_regs - handle triggers and finish event commit
1470 : : * @file: The file pointer assoctiated to the event
1471 : : * @buffer: The ring buffer that the event is being written to
1472 : : * @event: The event meta data in the ring buffer
1473 : : * @entry: The event itself
1474 : : * @irq_flags: The state of the interrupts at the start of the event
1475 : : * @pc: The state of the preempt count at the start of the event.
1476 : : *
1477 : : * This is a helper function to handle triggers that require data
1478 : : * from the event itself. It also tests the event against filters and
1479 : : * if the event is soft disabled and should be discarded.
1480 : : *
1481 : : * Same as event_trigger_unlock_commit() but calls
1482 : : * trace_buffer_unlock_commit_regs() instead of trace_buffer_unlock_commit().
1483 : : */
1484 : : static inline void
1485 : 0 : event_trigger_unlock_commit_regs(struct trace_event_file *file,
1486 : : struct ring_buffer *buffer,
1487 : : struct ring_buffer_event *event,
1488 : : void *entry, unsigned long irq_flags, int pc,
1489 : : struct pt_regs *regs)
1490 : : {
1491 : 0 : enum event_trigger_type tt = ETT_NONE;
1492 : :
1493 [ # # ]: 0 : if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
1494 : 0 : trace_buffer_unlock_commit_regs(file->tr, buffer, event,
1495 : : irq_flags, pc, regs);
1496 : :
1497 [ # # ]: 0 : if (tt)
1498 : 0 : event_triggers_post_call(file, tt);
1499 : 0 : }
1500 : :
1501 : : #define FILTER_PRED_INVALID ((unsigned short)-1)
1502 : : #define FILTER_PRED_IS_RIGHT (1 << 15)
1503 : : #define FILTER_PRED_FOLD (1 << 15)
1504 : :
1505 : : /*
1506 : : * The max preds is the size of unsigned short with
1507 : : * two flags at the MSBs. One bit is used for both the IS_RIGHT
1508 : : * and FOLD flags. The other is reserved.
1509 : : *
1510 : : * 2^14 preds is way more than enough.
1511 : : */
1512 : : #define MAX_FILTER_PRED 16384
1513 : :
1514 : : struct filter_pred;
1515 : : struct regex;
1516 : :
1517 : : typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event);
1518 : :
1519 : : typedef int (*regex_match_func)(char *str, struct regex *r, int len);
1520 : :
1521 : : enum regex_type {
1522 : : MATCH_FULL = 0,
1523 : : MATCH_FRONT_ONLY,
1524 : : MATCH_MIDDLE_ONLY,
1525 : : MATCH_END_ONLY,
1526 : : MATCH_GLOB,
1527 : : MATCH_INDEX,
1528 : : };
1529 : :
1530 : : struct regex {
1531 : : char pattern[MAX_FILTER_STR_VAL];
1532 : : int len;
1533 : : int field_len;
1534 : : regex_match_func match;
1535 : : };
1536 : :
1537 : : struct filter_pred {
1538 : : filter_pred_fn_t fn;
1539 : : u64 val;
1540 : : struct regex regex;
1541 : : unsigned short *ops;
1542 : : struct ftrace_event_field *field;
1543 : : int offset;
1544 : : int not;
1545 : : int op;
1546 : : };
1547 : :
1548 : : static inline bool is_string_field(struct ftrace_event_field *field)
1549 : : {
1550 : 0 : return field->filter_type == FILTER_DYN_STRING ||
1551 : : field->filter_type == FILTER_STATIC_STRING ||
1552 : 0 : field->filter_type == FILTER_PTR_STRING ||
1553 : : field->filter_type == FILTER_COMM;
1554 : : }
1555 : :
1556 : : static inline bool is_function_field(struct ftrace_event_field *field)
1557 : : {
1558 : : return field->filter_type == FILTER_TRACE_FN;
1559 : : }
1560 : :
1561 : : extern enum regex_type
1562 : : filter_parse_regex(char *buff, int len, char **search, int *not);
1563 : : extern void print_event_filter(struct trace_event_file *file,
1564 : : struct trace_seq *s);
1565 : : extern int apply_event_filter(struct trace_event_file *file,
1566 : : char *filter_string);
1567 : : extern int apply_subsystem_event_filter(struct trace_subsystem_dir *dir,
1568 : : char *filter_string);
1569 : : extern void print_subsystem_event_filter(struct event_subsystem *system,
1570 : : struct trace_seq *s);
1571 : : extern int filter_assign_type(const char *type);
1572 : : extern int create_event_filter(struct trace_array *tr,
1573 : : struct trace_event_call *call,
1574 : : char *filter_str, bool set_str,
1575 : : struct event_filter **filterp);
1576 : : extern void free_event_filter(struct event_filter *filter);
1577 : :
1578 : : struct ftrace_event_field *
1579 : : trace_find_event_field(struct trace_event_call *call, char *name);
1580 : :
1581 : : extern void trace_event_enable_cmd_record(bool enable);
1582 : : extern void trace_event_enable_tgid_record(bool enable);
1583 : :
1584 : : extern int event_trace_init(void);
1585 : : extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr);
1586 : : extern int event_trace_del_tracer(struct trace_array *tr);
1587 : :
1588 : : extern struct trace_event_file *__find_event_file(struct trace_array *tr,
1589 : : const char *system,
1590 : : const char *event);
1591 : : extern struct trace_event_file *find_event_file(struct trace_array *tr,
1592 : : const char *system,
1593 : : const char *event);
1594 : :
1595 : : static inline void *event_file_data(struct file *filp)
1596 : : {
1597 : : return READ_ONCE(file_inode(filp)->i_private);
1598 : : }
1599 : :
1600 : : extern struct mutex event_mutex;
1601 : : extern struct list_head ftrace_events;
1602 : :
1603 : : extern const struct file_operations event_trigger_fops;
1604 : : extern const struct file_operations event_hist_fops;
1605 : :
1606 : : #ifdef CONFIG_HIST_TRIGGERS
1607 : : extern int register_trigger_hist_cmd(void);
1608 : : extern int register_trigger_hist_enable_disable_cmds(void);
1609 : : #else
1610 : : static inline int register_trigger_hist_cmd(void) { return 0; }
1611 : : static inline int register_trigger_hist_enable_disable_cmds(void) { return 0; }
1612 : : #endif
1613 : :
1614 : : extern int register_trigger_cmds(void);
1615 : : extern void clear_event_triggers(struct trace_array *tr);
1616 : :
1617 : : struct event_trigger_data {
1618 : : unsigned long count;
1619 : : int ref;
1620 : : struct event_trigger_ops *ops;
1621 : : struct event_command *cmd_ops;
1622 : : struct event_filter __rcu *filter;
1623 : : char *filter_str;
1624 : : void *private_data;
1625 : : bool paused;
1626 : : bool paused_tmp;
1627 : : struct list_head list;
1628 : : char *name;
1629 : : struct list_head named_list;
1630 : : struct event_trigger_data *named_data;
1631 : : };
1632 : :
1633 : : /* Avoid typos */
1634 : : #define ENABLE_EVENT_STR "enable_event"
1635 : : #define DISABLE_EVENT_STR "disable_event"
1636 : : #define ENABLE_HIST_STR "enable_hist"
1637 : : #define DISABLE_HIST_STR "disable_hist"
1638 : :
1639 : : struct enable_trigger_data {
1640 : : struct trace_event_file *file;
1641 : : bool enable;
1642 : : bool hist;
1643 : : };
1644 : :
1645 : : extern int event_enable_trigger_print(struct seq_file *m,
1646 : : struct event_trigger_ops *ops,
1647 : : struct event_trigger_data *data);
1648 : : extern void event_enable_trigger_free(struct event_trigger_ops *ops,
1649 : : struct event_trigger_data *data);
1650 : : extern int event_enable_trigger_func(struct event_command *cmd_ops,
1651 : : struct trace_event_file *file,
1652 : : char *glob, char *cmd, char *param);
1653 : : extern int event_enable_register_trigger(char *glob,
1654 : : struct event_trigger_ops *ops,
1655 : : struct event_trigger_data *data,
1656 : : struct trace_event_file *file);
1657 : : extern void event_enable_unregister_trigger(char *glob,
1658 : : struct event_trigger_ops *ops,
1659 : : struct event_trigger_data *test,
1660 : : struct trace_event_file *file);
1661 : : extern void trigger_data_free(struct event_trigger_data *data);
1662 : : extern int event_trigger_init(struct event_trigger_ops *ops,
1663 : : struct event_trigger_data *data);
1664 : : extern int trace_event_trigger_enable_disable(struct trace_event_file *file,
1665 : : int trigger_enable);
1666 : : extern void update_cond_flag(struct trace_event_file *file);
1667 : : extern int set_trigger_filter(char *filter_str,
1668 : : struct event_trigger_data *trigger_data,
1669 : : struct trace_event_file *file);
1670 : : extern struct event_trigger_data *find_named_trigger(const char *name);
1671 : : extern bool is_named_trigger(struct event_trigger_data *test);
1672 : : extern int save_named_trigger(const char *name,
1673 : : struct event_trigger_data *data);
1674 : : extern void del_named_trigger(struct event_trigger_data *data);
1675 : : extern void pause_named_trigger(struct event_trigger_data *data);
1676 : : extern void unpause_named_trigger(struct event_trigger_data *data);
1677 : : extern void set_named_trigger_data(struct event_trigger_data *data,
1678 : : struct event_trigger_data *named_data);
1679 : : extern struct event_trigger_data *
1680 : : get_named_trigger_data(struct event_trigger_data *data);
1681 : : extern int register_event_command(struct event_command *cmd);
1682 : : extern int unregister_event_command(struct event_command *cmd);
1683 : : extern int register_trigger_hist_enable_disable_cmds(void);
1684 : :
1685 : : /**
1686 : : * struct event_trigger_ops - callbacks for trace event triggers
1687 : : *
1688 : : * The methods in this structure provide per-event trigger hooks for
1689 : : * various trigger operations.
1690 : : *
1691 : : * All the methods below, except for @init() and @free(), must be
1692 : : * implemented.
1693 : : *
1694 : : * @func: The trigger 'probe' function called when the triggering
1695 : : * event occurs. The data passed into this callback is the data
1696 : : * that was supplied to the event_command @reg() function that
1697 : : * registered the trigger (see struct event_command) along with
1698 : : * the trace record, rec.
1699 : : *
1700 : : * @init: An optional initialization function called for the trigger
1701 : : * when the trigger is registered (via the event_command reg()
1702 : : * function). This can be used to perform per-trigger
1703 : : * initialization such as incrementing a per-trigger reference
1704 : : * count, for instance. This is usually implemented by the
1705 : : * generic utility function @event_trigger_init() (see
1706 : : * trace_event_triggers.c).
1707 : : *
1708 : : * @free: An optional de-initialization function called for the
1709 : : * trigger when the trigger is unregistered (via the
1710 : : * event_command @reg() function). This can be used to perform
1711 : : * per-trigger de-initialization such as decrementing a
1712 : : * per-trigger reference count and freeing corresponding trigger
1713 : : * data, for instance. This is usually implemented by the
1714 : : * generic utility function @event_trigger_free() (see
1715 : : * trace_event_triggers.c).
1716 : : *
1717 : : * @print: The callback function invoked to have the trigger print
1718 : : * itself. This is usually implemented by a wrapper function
1719 : : * that calls the generic utility function @event_trigger_print()
1720 : : * (see trace_event_triggers.c).
1721 : : */
1722 : : struct event_trigger_ops {
1723 : : void (*func)(struct event_trigger_data *data,
1724 : : void *rec,
1725 : : struct ring_buffer_event *rbe);
1726 : : int (*init)(struct event_trigger_ops *ops,
1727 : : struct event_trigger_data *data);
1728 : : void (*free)(struct event_trigger_ops *ops,
1729 : : struct event_trigger_data *data);
1730 : : int (*print)(struct seq_file *m,
1731 : : struct event_trigger_ops *ops,
1732 : : struct event_trigger_data *data);
1733 : : };
1734 : :
1735 : : /**
1736 : : * struct event_command - callbacks and data members for event commands
1737 : : *
1738 : : * Event commands are invoked by users by writing the command name
1739 : : * into the 'trigger' file associated with a trace event. The
1740 : : * parameters associated with a specific invocation of an event
1741 : : * command are used to create an event trigger instance, which is
1742 : : * added to the list of trigger instances associated with that trace
1743 : : * event. When the event is hit, the set of triggers associated with
1744 : : * that event is invoked.
1745 : : *
1746 : : * The data members in this structure provide per-event command data
1747 : : * for various event commands.
1748 : : *
1749 : : * All the data members below, except for @post_trigger, must be set
1750 : : * for each event command.
1751 : : *
1752 : : * @name: The unique name that identifies the event command. This is
1753 : : * the name used when setting triggers via trigger files.
1754 : : *
1755 : : * @trigger_type: A unique id that identifies the event command
1756 : : * 'type'. This value has two purposes, the first to ensure that
1757 : : * only one trigger of the same type can be set at a given time
1758 : : * for a particular event e.g. it doesn't make sense to have both
1759 : : * a traceon and traceoff trigger attached to a single event at
1760 : : * the same time, so traceon and traceoff have the same type
1761 : : * though they have different names. The @trigger_type value is
1762 : : * also used as a bit value for deferring the actual trigger
1763 : : * action until after the current event is finished. Some
1764 : : * commands need to do this if they themselves log to the trace
1765 : : * buffer (see the @post_trigger() member below). @trigger_type
1766 : : * values are defined by adding new values to the trigger_type
1767 : : * enum in include/linux/trace_events.h.
1768 : : *
1769 : : * @flags: See the enum event_command_flags below.
1770 : : *
1771 : : * All the methods below, except for @set_filter() and @unreg_all(),
1772 : : * must be implemented.
1773 : : *
1774 : : * @func: The callback function responsible for parsing and
1775 : : * registering the trigger written to the 'trigger' file by the
1776 : : * user. It allocates the trigger instance and registers it with
1777 : : * the appropriate trace event. It makes use of the other
1778 : : * event_command callback functions to orchestrate this, and is
1779 : : * usually implemented by the generic utility function
1780 : : * @event_trigger_callback() (see trace_event_triggers.c).
1781 : : *
1782 : : * @reg: Adds the trigger to the list of triggers associated with the
1783 : : * event, and enables the event trigger itself, after
1784 : : * initializing it (via the event_trigger_ops @init() function).
1785 : : * This is also where commands can use the @trigger_type value to
1786 : : * make the decision as to whether or not multiple instances of
1787 : : * the trigger should be allowed. This is usually implemented by
1788 : : * the generic utility function @register_trigger() (see
1789 : : * trace_event_triggers.c).
1790 : : *
1791 : : * @unreg: Removes the trigger from the list of triggers associated
1792 : : * with the event, and disables the event trigger itself, after
1793 : : * initializing it (via the event_trigger_ops @free() function).
1794 : : * This is usually implemented by the generic utility function
1795 : : * @unregister_trigger() (see trace_event_triggers.c).
1796 : : *
1797 : : * @unreg_all: An optional function called to remove all the triggers
1798 : : * from the list of triggers associated with the event. Called
1799 : : * when a trigger file is opened in truncate mode.
1800 : : *
1801 : : * @set_filter: An optional function called to parse and set a filter
1802 : : * for the trigger. If no @set_filter() method is set for the
1803 : : * event command, filters set by the user for the command will be
1804 : : * ignored. This is usually implemented by the generic utility
1805 : : * function @set_trigger_filter() (see trace_event_triggers.c).
1806 : : *
1807 : : * @get_trigger_ops: The callback function invoked to retrieve the
1808 : : * event_trigger_ops implementation associated with the command.
1809 : : */
1810 : : struct event_command {
1811 : : struct list_head list;
1812 : : char *name;
1813 : : enum event_trigger_type trigger_type;
1814 : : int flags;
1815 : : int (*func)(struct event_command *cmd_ops,
1816 : : struct trace_event_file *file,
1817 : : char *glob, char *cmd, char *params);
1818 : : int (*reg)(char *glob,
1819 : : struct event_trigger_ops *ops,
1820 : : struct event_trigger_data *data,
1821 : : struct trace_event_file *file);
1822 : : void (*unreg)(char *glob,
1823 : : struct event_trigger_ops *ops,
1824 : : struct event_trigger_data *data,
1825 : : struct trace_event_file *file);
1826 : : void (*unreg_all)(struct trace_event_file *file);
1827 : : int (*set_filter)(char *filter_str,
1828 : : struct event_trigger_data *data,
1829 : : struct trace_event_file *file);
1830 : : struct event_trigger_ops *(*get_trigger_ops)(char *cmd, char *param);
1831 : : };
1832 : :
1833 : : /**
1834 : : * enum event_command_flags - flags for struct event_command
1835 : : *
1836 : : * @POST_TRIGGER: A flag that says whether or not this command needs
1837 : : * to have its action delayed until after the current event has
1838 : : * been closed. Some triggers need to avoid being invoked while
1839 : : * an event is currently in the process of being logged, since
1840 : : * the trigger may itself log data into the trace buffer. Thus
1841 : : * we make sure the current event is committed before invoking
1842 : : * those triggers. To do that, the trigger invocation is split
1843 : : * in two - the first part checks the filter using the current
1844 : : * trace record; if a command has the @post_trigger flag set, it
1845 : : * sets a bit for itself in the return value, otherwise it
1846 : : * directly invokes the trigger. Once all commands have been
1847 : : * either invoked or set their return flag, the current record is
1848 : : * either committed or discarded. At that point, if any commands
1849 : : * have deferred their triggers, those commands are finally
1850 : : * invoked following the close of the current event. In other
1851 : : * words, if the event_trigger_ops @func() probe implementation
1852 : : * itself logs to the trace buffer, this flag should be set,
1853 : : * otherwise it can be left unspecified.
1854 : : *
1855 : : * @NEEDS_REC: A flag that says whether or not this command needs
1856 : : * access to the trace record in order to perform its function,
1857 : : * regardless of whether or not it has a filter associated with
1858 : : * it (filters make a trigger require access to the trace record
1859 : : * but are not always present).
1860 : : */
1861 : : enum event_command_flags {
1862 : : EVENT_CMD_FL_POST_TRIGGER = 1,
1863 : : EVENT_CMD_FL_NEEDS_REC = 2,
1864 : : };
1865 : :
1866 : : static inline bool event_command_post_trigger(struct event_command *cmd_ops)
1867 : : {
1868 : 0 : return cmd_ops->flags & EVENT_CMD_FL_POST_TRIGGER;
1869 : : }
1870 : :
1871 : : static inline bool event_command_needs_rec(struct event_command *cmd_ops)
1872 : : {
1873 : 0 : return cmd_ops->flags & EVENT_CMD_FL_NEEDS_REC;
1874 : : }
1875 : :
1876 : : extern int trace_event_enable_disable(struct trace_event_file *file,
1877 : : int enable, int soft_disable);
1878 : : extern int tracing_alloc_snapshot(void);
1879 : : extern void tracing_snapshot_cond(struct trace_array *tr, void *cond_data);
1880 : : extern int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update);
1881 : :
1882 : : extern int tracing_snapshot_cond_disable(struct trace_array *tr);
1883 : : extern void *tracing_cond_snapshot_data(struct trace_array *tr);
1884 : :
1885 : : extern const char *__start___trace_bprintk_fmt[];
1886 : : extern const char *__stop___trace_bprintk_fmt[];
1887 : :
1888 : : extern const char *__start___tracepoint_str[];
1889 : : extern const char *__stop___tracepoint_str[];
1890 : :
1891 : : void trace_printk_control(bool enabled);
1892 : : void trace_printk_init_buffers(void);
1893 : : void trace_printk_start_comm(void);
1894 : : int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
1895 : : int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
1896 : :
1897 : : #define MAX_EVENT_NAME_LEN 64
1898 : :
1899 : : extern int trace_run_command(const char *buf, int (*createfn)(int, char**));
1900 : : extern ssize_t trace_parse_run_command(struct file *file,
1901 : : const char __user *buffer, size_t count, loff_t *ppos,
1902 : : int (*createfn)(int, char**));
1903 : :
1904 : : extern unsigned int err_pos(char *cmd, const char *str);
1905 : : extern void tracing_log_err(struct trace_array *tr,
1906 : : const char *loc, const char *cmd,
1907 : : const char **errs, u8 type, u8 pos);
1908 : :
1909 : : /*
1910 : : * Normal trace_printk() and friends allocates special buffers
1911 : : * to do the manipulation, as well as saves the print formats
1912 : : * into sections to display. But the trace infrastructure wants
1913 : : * to use these without the added overhead at the price of being
1914 : : * a bit slower (used mainly for warnings, where we don't care
1915 : : * about performance). The internal_trace_puts() is for such
1916 : : * a purpose.
1917 : : */
1918 : : #define internal_trace_puts(str) __trace_puts(_THIS_IP_, str, strlen(str))
1919 : :
1920 : : #undef FTRACE_ENTRY
1921 : : #define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \
1922 : : extern struct trace_event_call \
1923 : : __aligned(4) event_##call;
1924 : : #undef FTRACE_ENTRY_DUP
1925 : : #define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print, filter) \
1926 : : FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \
1927 : : filter)
1928 : : #undef FTRACE_ENTRY_PACKED
1929 : : #define FTRACE_ENTRY_PACKED(call, struct_name, id, tstruct, print, filter) \
1930 : : FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \
1931 : : filter)
1932 : :
1933 : : #include "trace_entries.h"
1934 : :
1935 : : #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_FUNCTION_TRACER)
1936 : : int perf_ftrace_event_register(struct trace_event_call *call,
1937 : : enum trace_reg type, void *data);
1938 : : #else
1939 : : #define perf_ftrace_event_register NULL
1940 : : #endif
1941 : :
1942 : : #ifdef CONFIG_FTRACE_SYSCALLS
1943 : : void init_ftrace_syscalls(void);
1944 : : const char *get_syscall_name(int syscall);
1945 : : #else
1946 : : static inline void init_ftrace_syscalls(void) { }
1947 : : static inline const char *get_syscall_name(int syscall)
1948 : : {
1949 : : return NULL;
1950 : : }
1951 : : #endif
1952 : :
1953 : : #ifdef CONFIG_EVENT_TRACING
1954 : : void trace_event_init(void);
1955 : : void trace_event_eval_update(struct trace_eval_map **map, int len);
1956 : : #else
1957 : : static inline void __init trace_event_init(void) { }
1958 : : static inline void trace_event_eval_update(struct trace_eval_map **map, int len) { }
1959 : : #endif
1960 : :
1961 : : #ifdef CONFIG_TRACER_SNAPSHOT
1962 : : void tracing_snapshot_instance(struct trace_array *tr);
1963 : : int tracing_alloc_snapshot_instance(struct trace_array *tr);
1964 : : #else
1965 : : static inline void tracing_snapshot_instance(struct trace_array *tr) { }
1966 : : static inline int tracing_alloc_snapshot_instance(struct trace_array *tr)
1967 : : {
1968 : : return 0;
1969 : : }
1970 : : #endif
1971 : :
1972 : : #ifdef CONFIG_PREEMPT_TRACER
1973 : : void tracer_preempt_on(unsigned long a0, unsigned long a1);
1974 : : void tracer_preempt_off(unsigned long a0, unsigned long a1);
1975 : : #else
1976 : : static inline void tracer_preempt_on(unsigned long a0, unsigned long a1) { }
1977 : : static inline void tracer_preempt_off(unsigned long a0, unsigned long a1) { }
1978 : : #endif
1979 : : #ifdef CONFIG_IRQSOFF_TRACER
1980 : : void tracer_hardirqs_on(unsigned long a0, unsigned long a1);
1981 : : void tracer_hardirqs_off(unsigned long a0, unsigned long a1);
1982 : : #else
1983 : : static inline void tracer_hardirqs_on(unsigned long a0, unsigned long a1) { }
1984 : : static inline void tracer_hardirqs_off(unsigned long a0, unsigned long a1) { }
1985 : : #endif
1986 : :
1987 : : extern struct trace_iterator *tracepoint_print_iter;
1988 : :
1989 : : /*
1990 : : * Reset the state of the trace_iterator so that it can read consumed data.
1991 : : * Normally, the trace_iterator is used for reading the data when it is not
1992 : : * consumed, and must retain state.
1993 : : */
1994 : : static __always_inline void trace_iterator_reset(struct trace_iterator *iter)
1995 : : {
1996 : : const size_t offset = offsetof(struct trace_iterator, seq);
1997 : :
1998 : : /*
1999 : : * Keep gcc from complaining about overwriting more than just one
2000 : : * member in the structure.
2001 : : */
2002 : 0 : memset((char *)iter + offset, 0, sizeof(struct trace_iterator) - offset);
2003 : :
2004 : 0 : iter->pos = -1;
2005 : : }
2006 : :
2007 : : #endif /* _LINUX_KERNEL_TRACE_H */
|