mirror of
https://github.com/torvalds/linux.git
synced 2025-12-07 11:56:58 +00:00
Merge tag 'trace-v6.19-2' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace
Pull tracing fixes from Steven Rostedt: - Fix accounting of stop_count in file release On opening the trace file, if "pause-on-trace" option is set, it will increment the stop_count. On file release, it checks if stop_count is set, and if so it decrements it. Since this code was originally written, the stop_count can be incremented by other use cases. This makes just checking the stop_count not enough to know if it should be decremented. Add a new iterator flag called "PAUSE" and have it set if the open disables tracing and only decrement the stop_count if that flag is set on close. - Remove length field in trace_seq_printf() of print_synth_event() When printing the synthetic event that has a static length array field, the vsprintf() of the trace_seq_printf() triggered a "(efault)" in the output. That's because the print_fmt replaced the "%.*s" with "%s" causing the arguments to be off. - Fix a bunch of typos * tag 'trace-v6.19-2' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace: tracing: Fix typo in trace_seq.c tracing: Fix typo in trace_probe.c tracing: Fix multiple typos in trace_osnoise.c tracing: Fix multiple typos in trace_events_user.c tracing: Fix typo in trace_events_trigger.c tracing: Fix typo in trace_events_hist.c tracing: Fix typo in trace_events_filter.c tracing: Fix multiple typos in trace_events.c tracing: Fix multiple typos in trace.c tracing: Fix typo in ring_buffer_benchmark.c tracing: Fix multiple typos in ring_buffer.c tracing: Fix typo in fprobe.c tracing: Fix typo in fpgraph.c tracing: Fix fixed array of synthetic event tracing: Fix enabling of tracing on file release
This commit is contained in:
@@ -138,6 +138,7 @@ enum trace_iter_flags {
|
||||
TRACE_FILE_LAT_FMT = 1,
|
||||
TRACE_FILE_ANNOTATE = 2,
|
||||
TRACE_FILE_TIME_IN_NS = 4,
|
||||
TRACE_FILE_PAUSE = 8,
|
||||
};
|
||||
|
||||
|
||||
|
||||
@@ -163,7 +163,7 @@ enum {
|
||||
#define RET_STACK(t, offset) ((struct ftrace_ret_stack *)(&(t)->ret_stack[offset]))
|
||||
|
||||
/*
|
||||
* Each fgraph_ops has a reservered unsigned long at the end (top) of the
|
||||
* Each fgraph_ops has a reserved unsigned long at the end (top) of the
|
||||
* ret_stack to store task specific state.
|
||||
*/
|
||||
#define SHADOW_STACK_TASK_VARS(ret_stack) \
|
||||
|
||||
@@ -30,7 +30,7 @@
|
||||
* fprobe_table: hold 'fprobe_hlist::hlist' for checking the fprobe still
|
||||
* exists. The key is the address of fprobe instance.
|
||||
* fprobe_ip_table: hold 'fprobe_hlist::array[*]' for searching the fprobe
|
||||
* instance related to the funciton address. The key is the ftrace IP
|
||||
* instance related to the function address. The key is the ftrace IP
|
||||
* address.
|
||||
*
|
||||
* When unregistering the fprobe, fprobe_hlist::fp and fprobe_hlist::array[*].fp
|
||||
|
||||
@@ -1770,7 +1770,7 @@ static bool rb_meta_init(struct trace_buffer *buffer, int scratch_size)
|
||||
bmeta->total_size = total_size;
|
||||
bmeta->buffers_offset = (void *)ptr - (void *)bmeta;
|
||||
|
||||
/* Zero out the scatch pad */
|
||||
/* Zero out the scratch pad */
|
||||
memset((void *)bmeta + sizeof(*bmeta), 0, bmeta->buffers_offset - sizeof(*bmeta));
|
||||
|
||||
return false;
|
||||
@@ -6089,7 +6089,7 @@ static void rb_clear_buffer_page(struct buffer_page *page)
|
||||
* id field, and updated via this function.
|
||||
*
|
||||
* But for a fixed memory mapped buffer, the id is already assigned for
|
||||
* fixed memory ording in the memory layout and can not be used. Instead
|
||||
* fixed memory ordering in the memory layout and can not be used. Instead
|
||||
* the index of where the page lies in the memory layout is used.
|
||||
*
|
||||
* For the normal pages, set the buffer page id with the passed in @id
|
||||
@@ -7669,7 +7669,7 @@ static __init int test_ringbuffer(void)
|
||||
/*
|
||||
* Show buffer is enabled before setting rb_test_started.
|
||||
* Yes there's a small race window where events could be
|
||||
* dropped and the thread wont catch it. But when a ring
|
||||
* dropped and the thread won't catch it. But when a ring
|
||||
* buffer gets enabled, there will always be some kind of
|
||||
* delay before other CPUs see it. Thus, we don't care about
|
||||
* those dropped events. We care about events dropped after
|
||||
|
||||
@@ -433,7 +433,7 @@ static int __init ring_buffer_benchmark_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/* make a one meg buffer in overwite mode */
|
||||
/* make a one meg buffer in overwrite mode */
|
||||
buffer = ring_buffer_alloc(1000000, RB_FL_OVERWRITE);
|
||||
if (!buffer)
|
||||
return -ENOMEM;
|
||||
|
||||
@@ -125,7 +125,7 @@ cpumask_var_t __read_mostly tracing_buffer_mask;
|
||||
* If there is an oops (or kernel panic) and the ftrace_dump_on_oops
|
||||
* is set, then ftrace_dump is called. This will output the contents
|
||||
* of the ftrace buffers to the console. This is very useful for
|
||||
* capturing traces that lead to crashes and outputing it to a
|
||||
* capturing traces that lead to crashes and outputting it to a
|
||||
* serial console.
|
||||
*
|
||||
* It is default off, but you can enable it with either specifying
|
||||
@@ -134,7 +134,7 @@ cpumask_var_t __read_mostly tracing_buffer_mask;
|
||||
* Set 1 if you want to dump buffers of all CPUs
|
||||
* Set 2 if you want to dump the buffer of the CPU that triggered oops
|
||||
* Set instance name if you want to dump the specific trace instance
|
||||
* Multiple instance dump is also supported, and instances are seperated
|
||||
* Multiple instance dump is also supported, and instances are separated
|
||||
* by commas.
|
||||
*/
|
||||
/* Set to string format zero to disable by default */
|
||||
@@ -4709,8 +4709,10 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)
|
||||
* If pause-on-trace is enabled, then stop the trace while
|
||||
* dumping, unless this is the "snapshot" file
|
||||
*/
|
||||
if (!iter->snapshot && (tr->trace_flags & TRACE_ITER(PAUSE_ON_TRACE)))
|
||||
if (!iter->snapshot && (tr->trace_flags & TRACE_ITER(PAUSE_ON_TRACE))) {
|
||||
iter->iter_flags |= TRACE_FILE_PAUSE;
|
||||
tracing_stop_tr(tr);
|
||||
}
|
||||
|
||||
if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
|
||||
for_each_tracing_cpu(cpu) {
|
||||
@@ -4842,7 +4844,7 @@ static int tracing_release(struct inode *inode, struct file *file)
|
||||
if (iter->trace && iter->trace->close)
|
||||
iter->trace->close(iter);
|
||||
|
||||
if (!iter->snapshot && tr->stop_count)
|
||||
if (iter->iter_flags & TRACE_FILE_PAUSE)
|
||||
/* reenable tracing if it was previously enabled */
|
||||
tracing_start_tr(tr);
|
||||
|
||||
@@ -5276,7 +5278,7 @@ int set_tracer_flag(struct trace_array *tr, u64 mask, int enabled)
|
||||
return -EINVAL;
|
||||
/*
|
||||
* An instance must always have it set.
|
||||
* by default, that's the global_trace instane.
|
||||
* by default, that's the global_trace instance.
|
||||
*/
|
||||
if (printk_trace == tr)
|
||||
update_printk_trace(&global_trace);
|
||||
@@ -7554,7 +7556,7 @@ char *trace_user_fault_read(struct trace_user_buf_info *tinfo,
|
||||
migrate_disable();
|
||||
|
||||
/*
|
||||
* Now preemption is being enabed and another task can come in
|
||||
* Now preemption is being enabled and another task can come in
|
||||
* and use the same buffer and corrupt our data.
|
||||
*/
|
||||
preempt_enable_notrace();
|
||||
@@ -11329,7 +11331,7 @@ __init static void do_allocate_snapshot(const char *name)
|
||||
/*
|
||||
* When allocate_snapshot is set, the next call to
|
||||
* allocate_trace_buffers() (called by trace_array_get_by_name())
|
||||
* will allocate the snapshot buffer. That will alse clear
|
||||
* will allocate the snapshot buffer. That will also clear
|
||||
* this flag.
|
||||
*/
|
||||
allocate_snapshot = true;
|
||||
|
||||
@@ -360,7 +360,7 @@ static bool process_string(const char *fmt, int len, struct trace_event_call *ca
|
||||
/* Anything else, this isn't a function */
|
||||
break;
|
||||
}
|
||||
/* A function could be wrapped in parethesis, try the next one */
|
||||
/* A function could be wrapped in parenthesis, try the next one */
|
||||
s = r + 1;
|
||||
} while (s < e);
|
||||
|
||||
@@ -567,7 +567,7 @@ static void test_event_printk(struct trace_event_call *call)
|
||||
* If start_arg is zero, then this is the start of the
|
||||
* first argument. The processing of the argument happens
|
||||
* when the end of the argument is found, as it needs to
|
||||
* handle paranthesis and such.
|
||||
* handle parenthesis and such.
|
||||
*/
|
||||
if (!start_arg) {
|
||||
start_arg = i;
|
||||
@@ -785,7 +785,7 @@ static int __ftrace_event_enable_disable(struct trace_event_file *file,
|
||||
*
|
||||
* When soft_disable is not set but the soft_mode is,
|
||||
* we do nothing. Do not disable the tracepoint, otherwise
|
||||
* "soft enable"s (clearing the SOFT_DISABLED bit) wont work.
|
||||
* "soft enable"s (clearing the SOFT_DISABLED bit) won't work.
|
||||
*/
|
||||
if (soft_disable) {
|
||||
if (atomic_dec_return(&file->sm_ref) > 0)
|
||||
@@ -1394,7 +1394,7 @@ int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
|
||||
if (!tr)
|
||||
return -ENOENT;
|
||||
|
||||
/* Modules events can be appened with :mod:<module> */
|
||||
/* Modules events can be appended with :mod:<module> */
|
||||
mod = strstr(buf, ":mod:");
|
||||
if (mod) {
|
||||
*mod = '\0';
|
||||
|
||||
@@ -142,7 +142,7 @@ static bool is_not(const char *str)
|
||||
}
|
||||
|
||||
/**
|
||||
* struct prog_entry - a singe entry in the filter program
|
||||
* struct prog_entry - a single entry in the filter program
|
||||
* @target: Index to jump to on a branch (actually one minus the index)
|
||||
* @when_to_branch: The value of the result of the predicate to do a branch
|
||||
* @pred: The predicate to execute.
|
||||
|
||||
@@ -5283,7 +5283,7 @@ hist_trigger_actions(struct hist_trigger_data *hist_data,
|
||||
* on the stack, so when the histogram trigger is initialized
|
||||
* a percpu array of 4 hist_pad structures is allocated.
|
||||
* This will cover every context from normal, softirq, irq and NMI
|
||||
* in the very unlikely event that a tigger happens at each of
|
||||
* in the very unlikely event that a trigger happens at each of
|
||||
* these contexts and interrupts a currently active trigger.
|
||||
*/
|
||||
struct hist_pad {
|
||||
|
||||
@@ -375,7 +375,6 @@ static enum print_line_t print_synth_event(struct trace_iterator *iter,
|
||||
n_u64++;
|
||||
} else {
|
||||
trace_seq_printf(s, print_fmt, se->fields[i]->name,
|
||||
STR_VAR_LEN_MAX,
|
||||
(char *)&entry->fields[n_u64].as_u64,
|
||||
i == se->n_fields - 1 ? "" : " ");
|
||||
n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
|
||||
|
||||
@@ -732,7 +732,7 @@ static void unregister_trigger(char *glob,
|
||||
* param - text following cmd and ':' and stripped of filter
|
||||
* filter - the optional filter text following (and including) 'if'
|
||||
*
|
||||
* To illustrate the use of these componenents, here are some concrete
|
||||
* To illustrate the use of these components, here are some concrete
|
||||
* examples. For the following triggers:
|
||||
*
|
||||
* echo 'traceon:5 if pid == 0' > trigger
|
||||
|
||||
@@ -1041,7 +1041,7 @@ static int user_field_array_size(const char *type)
|
||||
|
||||
static int user_field_size(const char *type)
|
||||
{
|
||||
/* long is not allowed from a user, since it's ambigious in size */
|
||||
/* long is not allowed from a user, since it's ambiguous in size */
|
||||
if (strcmp(type, "s64") == 0)
|
||||
return sizeof(s64);
|
||||
if (strcmp(type, "u64") == 0)
|
||||
@@ -1079,7 +1079,7 @@ static int user_field_size(const char *type)
|
||||
if (str_has_prefix(type, "__rel_loc "))
|
||||
return sizeof(u32);
|
||||
|
||||
/* Uknown basic type, error */
|
||||
/* Unknown basic type, error */
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@@ -2465,7 +2465,7 @@ static long user_events_ioctl_reg(struct user_event_file_info *info,
|
||||
/*
|
||||
* Prevent users from using the same address and bit multiple times
|
||||
* within the same mm address space. This can cause unexpected behavior
|
||||
* for user processes that is far easier to debug if this is explictly
|
||||
* for user processes that is far easier to debug if this is explicitly
|
||||
* an error upon registering.
|
||||
*/
|
||||
if (current_user_event_enabler_exists((unsigned long)reg.enable_addr,
|
||||
|
||||
@@ -329,7 +329,7 @@ static struct osnoise_data {
|
||||
u64 print_stack; /* print IRQ stack if total > */
|
||||
int timerlat_tracer; /* timerlat tracer */
|
||||
#endif
|
||||
bool tainted; /* infor users and developers about a problem */
|
||||
bool tainted; /* info users and developers about a problem */
|
||||
} osnoise_data = {
|
||||
.sample_period = DEFAULT_SAMPLE_PERIOD,
|
||||
.sample_runtime = DEFAULT_SAMPLE_RUNTIME,
|
||||
@@ -738,7 +738,7 @@ cond_move_thread_delta_start(struct osnoise_variables *osn_var, u64 duration)
|
||||
/*
|
||||
* get_int_safe_duration - Get the duration of a window
|
||||
*
|
||||
* The irq, softirq and thread varaibles need to have its duration without
|
||||
* The irq, softirq and thread variables need to have its duration without
|
||||
* the interference from higher priority interrupts. Instead of keeping a
|
||||
* variable to discount the interrupt interference from these variables, the
|
||||
* starting time of these variables are pushed forward with the interrupt's
|
||||
@@ -1460,7 +1460,7 @@ static int run_osnoise(void)
|
||||
stop_in = osnoise_data.stop_tracing * NSEC_PER_USEC;
|
||||
|
||||
/*
|
||||
* Start timestemp
|
||||
* Start timestamp
|
||||
*/
|
||||
start = time_get();
|
||||
|
||||
@@ -1881,7 +1881,7 @@ static int timerlat_main(void *data)
|
||||
tlat->kthread = current;
|
||||
osn_var->pid = current->pid;
|
||||
/*
|
||||
* Anotate the arrival time.
|
||||
* Annotate the arrival time.
|
||||
*/
|
||||
tlat->abs_period = hrtimer_cb_get_time(&tlat->timer);
|
||||
|
||||
@@ -1978,7 +1978,7 @@ static void stop_per_cpu_kthreads(void)
|
||||
}
|
||||
|
||||
/*
|
||||
* start_kthread - Start a workload tread
|
||||
* start_kthread - Start a workload thread
|
||||
*/
|
||||
static int start_kthread(unsigned int cpu)
|
||||
{
|
||||
@@ -2705,7 +2705,7 @@ static int osnoise_create_cpu_timerlat_fd(struct dentry *top_dir)
|
||||
* Why not using tracing instance per_cpu/ dir?
|
||||
*
|
||||
* Because osnoise/timerlat have a single workload, having
|
||||
* multiple files like these are wast of memory.
|
||||
* multiple files like these are waste of memory.
|
||||
*/
|
||||
per_cpu = tracefs_create_dir("per_cpu", top_dir);
|
||||
if (!per_cpu)
|
||||
|
||||
@@ -517,7 +517,7 @@ static void clear_btf_context(struct traceprobe_parse_context *ctx)
|
||||
}
|
||||
}
|
||||
|
||||
/* Return 1 if the field separater is arrow operator ('->') */
|
||||
/* Return 1 if the field separator is arrow operator ('->') */
|
||||
static int split_next_field(char *varname, char **next_field,
|
||||
struct traceprobe_parse_context *ctx)
|
||||
{
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
*
|
||||
* A write to the buffer will either succeed or fail. That is, unlike
|
||||
* sprintf() there will not be a partial write (well it may write into
|
||||
* the buffer but it wont update the pointers). This allows users to
|
||||
* the buffer but it won't update the pointers). This allows users to
|
||||
* try to write something into the trace_seq buffer and if it fails
|
||||
* they can flush it and try again.
|
||||
*
|
||||
|
||||
Reference in New Issue
Block a user