Merge tag 'trace-3.13' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 16 Nov 2013 20:23:18 +0000 (12:23 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 16 Nov 2013 20:23:18 +0000 (12:23 -0800)
Pull tracing update from Steven Rostedt:
 "This batch of changes is mostly clean ups and small bug fixes.  The
  only real feature that was added this release is from Namhyung Kim,
  who introduced "set_graph_notrace" filter that lets you run the
  function graph tracer and not trace particular functions and their
  call chain.

  Tom Zanussi added some updates to the ftrace multibuffer tracing that
  made it more consistent with the top level tracing.

  One of the fixes for perf function tracing required an API change in
  RCU; the addition of "rcu_is_watching()".  As Paul McKenney is pushing
  that change in this release too, he gave me a branch that included all
  the changes to get that working, and I pulled that into my tree in
  order to complete the perf function tracing fix"

* tag 'trace-3.13' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace:
  tracing: Add rcu annotation for syscall trace descriptors
  tracing: Do not use signed enums with unsigned long long in fgragh output
  tracing: Remove unused function ftrace_off_permanent()
  tracing: Do not assign filp->private_data to freed memory
  tracing: Add helper function tracing_is_disabled()
  tracing: Open tracer when ftrace_dump_on_oops is used
  tracing: Add support for SOFT_DISABLE to syscall events
  tracing: Make register/unregister_ftrace_command __init
  tracing: Update event filters for multibuffer
  recordmcount.pl: Add support for __fentry__
  ftrace: Have control op function callback only trace when RCU is watching
  rcu: Do not trace rcu_is_watching() functions
  ftrace/x86: skip over the breakpoint for ftrace caller
  trace/trace_stat: use rbtree postorder iteration helper instead of opencoding
  ftrace: Add set_graph_notrace filter
  ftrace: Narrow down the protected area of graph_lock
  ftrace: Introduce struct ftrace_graph_data
  ftrace: Get rid of ftrace_graph_filter_enabled
  tracing: Fix potential out-of-bounds in trace_get_user()
  tracing: Show more exact help information about snapshot

23 files changed:
arch/x86/kernel/ftrace.c
include/linux/ftrace.h
include/linux/ftrace_event.h
include/linux/kernel.h
include/linux/syscalls.h
include/trace/ftrace.h
kernel/rcu/tiny.c
kernel/rcu/tree.c
kernel/trace/ftrace.c
kernel/trace/trace.c
kernel/trace/trace.h
kernel/trace/trace_branch.c
kernel/trace/trace_events.c
kernel/trace/trace_events_filter.c
kernel/trace/trace_export.c
kernel/trace/trace_functions_graph.c
kernel/trace/trace_kprobe.c
kernel/trace/trace_mmiotrace.c
kernel/trace/trace_sched_switch.c
kernel/trace/trace_stat.c
kernel/trace/trace_syscalls.c
kernel/trace/trace_uprobe.c
scripts/recordmcount.pl

index 42a392a9fd02fd3c0e306e3a4f4da323444dc3fd..d4bdd253fea71358ca080ba567a44935a9830396 100644 (file)
@@ -248,6 +248,15 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
        return ret;
 }
 
+static int is_ftrace_caller(unsigned long ip)
+{
+       if (ip == (unsigned long)(&ftrace_call) ||
+               ip == (unsigned long)(&ftrace_regs_call))
+               return 1;
+
+       return 0;
+}
+
 /*
  * A breakpoint was added to the code address we are about to
  * modify, and this is the handle that will just skip over it.
@@ -257,10 +266,13 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
  */
 int ftrace_int3_handler(struct pt_regs *regs)
 {
+       unsigned long ip;
+
        if (WARN_ON_ONCE(!regs))
                return 0;
 
-       if (!ftrace_location(regs->ip - 1))
+       ip = regs->ip - 1;
+       if (!ftrace_location(ip) && !is_ftrace_caller(ip))
                return 0;
 
        regs->ip += MCOUNT_INSN_SIZE - 1;
index 9f15c0064c50586ed08ee740698bcc6f2450c959..31ea4b428360c7e22929325ce90a76aee00a046b 100644 (file)
@@ -533,11 +533,11 @@ static inline int ftrace_force_update(void) { return 0; }
 static inline void ftrace_disable_daemon(void) { }
 static inline void ftrace_enable_daemon(void) { }
 static inline void ftrace_release_mod(struct module *mod) {}
-static inline int register_ftrace_command(struct ftrace_func_command *cmd)
+static inline __init int register_ftrace_command(struct ftrace_func_command *cmd)
 {
        return -EINVAL;
 }
-static inline int unregister_ftrace_command(char *cmd_name)
+static inline __init int unregister_ftrace_command(char *cmd_name)
 {
        return -EINVAL;
 }
@@ -721,6 +721,7 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
 extern char __irqentry_text_start[];
 extern char __irqentry_text_end[];
 
+#define FTRACE_NOTRACE_DEPTH 65536
 #define FTRACE_RETFUNC_DEPTH 50
 #define FTRACE_RETSTACK_ALLOC_SIZE 32
 extern int register_ftrace_graph(trace_func_graph_ret_t retfunc,
index 5eaa746735ff7f4ce73f0ac5f0ed11ba78a24d50..9abbe630c456775b19cc999f36887efa94bbdd41 100644 (file)
@@ -202,6 +202,7 @@ enum {
        TRACE_EVENT_FL_NO_SET_FILTER_BIT,
        TRACE_EVENT_FL_IGNORE_ENABLE_BIT,
        TRACE_EVENT_FL_WAS_ENABLED_BIT,
+       TRACE_EVENT_FL_USE_CALL_FILTER_BIT,
 };
 
 /*
@@ -213,6 +214,7 @@ enum {
  *  WAS_ENABLED   - Set and stays set when an event was ever enabled
  *                    (used for module unloading, if a module event is enabled,
  *                     it is best to clear the buffers that used it).
+ *  USE_CALL_FILTER - For ftrace internal events, don't use file filter
  */
 enum {
        TRACE_EVENT_FL_FILTERED         = (1 << TRACE_EVENT_FL_FILTERED_BIT),
@@ -220,6 +222,7 @@ enum {
        TRACE_EVENT_FL_NO_SET_FILTER    = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT),
        TRACE_EVENT_FL_IGNORE_ENABLE    = (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT),
        TRACE_EVENT_FL_WAS_ENABLED      = (1 << TRACE_EVENT_FL_WAS_ENABLED_BIT),
+       TRACE_EVENT_FL_USE_CALL_FILTER  = (1 << TRACE_EVENT_FL_USE_CALL_FILTER_BIT),
 };
 
 struct ftrace_event_call {
@@ -238,6 +241,7 @@ struct ftrace_event_call {
         *   bit 2:             failed to apply filter
         *   bit 3:             ftrace internal event (do not enable)
         *   bit 4:             Event was enabled by module
+        *   bit 5:             use call filter rather than file filter
         */
        int                     flags; /* static flags of different events */
 
@@ -253,6 +257,8 @@ struct ftrace_subsystem_dir;
 enum {
        FTRACE_EVENT_FL_ENABLED_BIT,
        FTRACE_EVENT_FL_RECORDED_CMD_BIT,
+       FTRACE_EVENT_FL_FILTERED_BIT,
+       FTRACE_EVENT_FL_NO_SET_FILTER_BIT,
        FTRACE_EVENT_FL_SOFT_MODE_BIT,
        FTRACE_EVENT_FL_SOFT_DISABLED_BIT,
 };
@@ -261,6 +267,8 @@ enum {
  * Ftrace event file flags:
  *  ENABLED      - The event is enabled
  *  RECORDED_CMD  - The comms should be recorded at sched_switch
+ *  FILTERED     - The event has a filter attached
+ *  NO_SET_FILTER - Set when filter has error and is to be ignored
  *  SOFT_MODE     - The event is enabled/disabled by SOFT_DISABLED
  *  SOFT_DISABLED - When set, do not trace the event (even though its
  *                   tracepoint may be enabled)
@@ -268,6 +276,8 @@ enum {
 enum {
        FTRACE_EVENT_FL_ENABLED         = (1 << FTRACE_EVENT_FL_ENABLED_BIT),
        FTRACE_EVENT_FL_RECORDED_CMD    = (1 << FTRACE_EVENT_FL_RECORDED_CMD_BIT),
+       FTRACE_EVENT_FL_FILTERED        = (1 << FTRACE_EVENT_FL_FILTERED_BIT),
+       FTRACE_EVENT_FL_NO_SET_FILTER   = (1 << FTRACE_EVENT_FL_NO_SET_FILTER_BIT),
        FTRACE_EVENT_FL_SOFT_MODE       = (1 << FTRACE_EVENT_FL_SOFT_MODE_BIT),
        FTRACE_EVENT_FL_SOFT_DISABLED   = (1 << FTRACE_EVENT_FL_SOFT_DISABLED_BIT),
 };
@@ -275,6 +285,7 @@ enum {
 struct ftrace_event_file {
        struct list_head                list;
        struct ftrace_event_call        *event_call;
+       struct event_filter             *filter;
        struct dentry                   *dir;
        struct trace_array              *tr;
        struct ftrace_subsystem_dir     *system;
@@ -310,12 +321,16 @@ struct ftrace_event_file {
 
 #define MAX_FILTER_STR_VAL     256     /* Should handle KSYM_SYMBOL_LEN */
 
-extern void destroy_preds(struct ftrace_event_call *call);
+extern void destroy_preds(struct ftrace_event_file *file);
+extern void destroy_call_preds(struct ftrace_event_call *call);
 extern int filter_match_preds(struct event_filter *filter, void *rec);
-extern int filter_current_check_discard(struct ring_buffer *buffer,
-                                       struct ftrace_event_call *call,
-                                       void *rec,
-                                       struct ring_buffer_event *event);
+
+extern int filter_check_discard(struct ftrace_event_file *file, void *rec,
+                               struct ring_buffer *buffer,
+                               struct ring_buffer_event *event);
+extern int call_filter_check_discard(struct ftrace_event_call *call, void *rec,
+                                    struct ring_buffer *buffer,
+                                    struct ring_buffer_event *event);
 
 enum {
        FILTER_OTHER = 0,
index 672ddc4de4af0511b20a4d269bcae1c517c3d5c4..d4e98d13eff4bda01b08313986b63135cc80ee2b 100644 (file)
@@ -501,7 +501,6 @@ void tracing_snapshot_alloc(void);
 
 extern void tracing_start(void);
 extern void tracing_stop(void);
-extern void ftrace_off_permanent(void);
 
 static inline __printf(1, 2)
 void ____trace_printk_check_format(const char *fmt, ...)
@@ -639,7 +638,6 @@ extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode);
 #else
 static inline void tracing_start(void) { }
 static inline void tracing_stop(void) { }
-static inline void ftrace_off_permanent(void) { }
 static inline void trace_dump_stack(int skip) { }
 
 static inline void tracing_on(void) { }
index c27f846f6b71ae10cc5a98de630f55e898b2c8e5..94273bbe605007462f6e0daacf01715f2a7aca6a 100644 (file)
@@ -120,7 +120,7 @@ extern struct trace_event_functions exit_syscall_print_funcs;
                .class                  = &event_class_syscall_enter,   \
                .event.funcs            = &enter_syscall_print_funcs,   \
                .data                   = (void *)&__syscall_meta_##sname,\
-               .flags                  = TRACE_EVENT_FL_CAP_ANY,       \
+               .flags                  = TRACE_EVENT_FL_CAP_ANY,       \
        };                                                              \
        static struct ftrace_event_call __used                          \
          __attribute__((section("_ftrace_events")))                    \
@@ -134,7 +134,7 @@ extern struct trace_event_functions exit_syscall_print_funcs;
                .class                  = &event_class_syscall_exit,    \
                .event.funcs            = &exit_syscall_print_funcs,    \
                .data                   = (void *)&__syscall_meta_##sname,\
-               .flags                  = TRACE_EVENT_FL_CAP_ANY,       \
+               .flags                  = TRACE_EVENT_FL_CAP_ANY,       \
        };                                                              \
        static struct ftrace_event_call __used                          \
          __attribute__((section("_ftrace_events")))                    \
index 5c7ab17cbb0225eb3147d108a38d2564f5f23db8..52594b20179e0d8459a0810c7fbcc75fceda84ff 100644 (file)
@@ -437,9 +437,8 @@ static inline notrace int ftrace_get_offsets_##call(                        \
  *     { <assign>; }  <-- Here we assign the entries by the __field and
  *                        __array macros.
  *
- *     if (!filter_current_check_discard(buffer, event_call, entry, event))
- *             trace_nowake_buffer_unlock_commit(buffer,
- *                                                event, irq_flags, pc);
+ *     if (!filter_check_discard(ftrace_file, entry, buffer, event))
+ *             trace_buffer_unlock_commit(buffer, event, irq_flags, pc);
  * }
  *
  * static struct trace_event ftrace_event_type_<call> = {
@@ -553,7 +552,7 @@ ftrace_raw_event_##call(void *__data, proto)                                \
                                                                        \
        { assign; }                                                     \
                                                                        \
-       if (!filter_current_check_discard(buffer, event_call, entry, event)) \
+       if (!filter_check_discard(ftrace_file, entry, buffer, event))   \
                trace_buffer_unlock_commit(buffer, event, irq_flags, pc); \
 }
 /*
index 0c9a934cfec19d0859b57c2ca9eca86830401ab2..1254f312d02483f524319ee5ab9d2e1a13f48678 100644 (file)
@@ -181,7 +181,7 @@ EXPORT_SYMBOL_GPL(rcu_irq_enter);
 /*
  * Test whether RCU thinks that the current CPU is idle.
  */
-bool __rcu_is_watching(void)
+bool notrace __rcu_is_watching(void)
 {
        return rcu_dynticks_nesting;
 }
index 4c06ddfea7cd2beffbf428a952e1efada1d1fa57..dd081987a8ec62349ad7721476cb97606077f28a 100644 (file)
@@ -664,7 +664,7 @@ void rcu_nmi_exit(void)
  * rcu_is_watching(), the caller of __rcu_is_watching() must have at
  * least disabled preemption.
  */
-bool __rcu_is_watching(void)
+bool notrace __rcu_is_watching(void)
 {
        return atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
 }
@@ -675,7 +675,7 @@ bool __rcu_is_watching(void)
  * If the current CPU is in its idle loop and is neither in an interrupt
  * or NMI handler, return true.
  */
-bool rcu_is_watching(void)
+bool notrace rcu_is_watching(void)
 {
        int ret;
 
index 03cf44ac54d3666b434a26a30bc6f7a1e34cbf56..22fa556967609465a0dbd98e238ee1d14b52e328 100644 (file)
@@ -3307,7 +3307,11 @@ void unregister_ftrace_function_probe_all(char *glob)
 static LIST_HEAD(ftrace_commands);
 static DEFINE_MUTEX(ftrace_cmd_mutex);
 
-int register_ftrace_command(struct ftrace_func_command *cmd)
+/*
+ * Currently we only register ftrace commands from __init, so mark this
+ * __init too.
+ */
+__init int register_ftrace_command(struct ftrace_func_command *cmd)
 {
        struct ftrace_func_command *p;
        int ret = 0;
@@ -3326,7 +3330,11 @@ int register_ftrace_command(struct ftrace_func_command *cmd)
        return ret;
 }
 
-int unregister_ftrace_command(struct ftrace_func_command *cmd)
+/*
+ * Currently we only unregister ftrace commands from __init, so mark
+ * this __init too.
+ */
+__init int unregister_ftrace_command(struct ftrace_func_command *cmd)
 {
        struct ftrace_func_command *p, *n;
        int ret = -ENODEV;
@@ -3641,7 +3649,7 @@ __setup("ftrace_filter=", set_ftrace_filter);
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
-static int ftrace_set_func(unsigned long *array, int *idx, char *buffer);
+static int ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer);
 
 static int __init set_graph_function(char *str)
 {
@@ -3659,7 +3667,7 @@ static void __init set_ftrace_early_graph(char *buf)
                func = strsep(&buf, ",");
                /* we allow only one expression at a time */
                ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
-                                     func);
+                                     FTRACE_GRAPH_MAX_FUNCS, func);
                if (ret)
                        printk(KERN_DEBUG "ftrace: function %s not "
                                          "traceable\n", func);
@@ -3776,15 +3784,25 @@ static const struct file_operations ftrace_notrace_fops = {
 static DEFINE_MUTEX(graph_lock);
 
 int ftrace_graph_count;
-int ftrace_graph_filter_enabled;
+int ftrace_graph_notrace_count;
 unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
+unsigned long ftrace_graph_notrace_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
+
+struct ftrace_graph_data {
+       unsigned long *table;
+       size_t size;
+       int *count;
+       const struct seq_operations *seq_ops;
+};
 
 static void *
 __g_next(struct seq_file *m, loff_t *pos)
 {
-       if (*pos >= ftrace_graph_count)
+       struct ftrace_graph_data *fgd = m->private;
+
+       if (*pos >= *fgd->count)
                return NULL;
-       return &ftrace_graph_funcs[*pos];
+       return &fgd->table[*pos];
 }
 
 static void *
@@ -3796,10 +3814,12 @@ g_next(struct seq_file *m, void *v, loff_t *pos)
 
 static void *g_start(struct seq_file *m, loff_t *pos)
 {
+       struct ftrace_graph_data *fgd = m->private;
+
        mutex_lock(&graph_lock);
 
        /* Nothing, tell g_show to print all functions are enabled */
-       if (!ftrace_graph_filter_enabled && !*pos)
+       if (!*fgd->count && !*pos)
                return (void *)1;
 
        return __g_next(m, pos);
@@ -3835,38 +3855,88 @@ static const struct seq_operations ftrace_graph_seq_ops = {
 };
 
 static int
-ftrace_graph_open(struct inode *inode, struct file *file)
+__ftrace_graph_open(struct inode *inode, struct file *file,
+                   struct ftrace_graph_data *fgd)
 {
        int ret = 0;
 
-       if (unlikely(ftrace_disabled))
-               return -ENODEV;
-
        mutex_lock(&graph_lock);
        if ((file->f_mode & FMODE_WRITE) &&
            (file->f_flags & O_TRUNC)) {
-               ftrace_graph_filter_enabled = 0;
-               ftrace_graph_count = 0;
-               memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
+               *fgd->count = 0;
+               memset(fgd->table, 0, fgd->size * sizeof(*fgd->table));
        }
        mutex_unlock(&graph_lock);
 
-       if (file->f_mode & FMODE_READ)
-               ret = seq_open(file, &ftrace_graph_seq_ops);
+       if (file->f_mode & FMODE_READ) {
+               ret = seq_open(file, fgd->seq_ops);
+               if (!ret) {
+                       struct seq_file *m = file->private_data;
+                       m->private = fgd;
+               }
+       } else
+               file->private_data = fgd;
 
        return ret;
 }
 
+static int
+ftrace_graph_open(struct inode *inode, struct file *file)
+{
+       struct ftrace_graph_data *fgd;
+
+       if (unlikely(ftrace_disabled))
+               return -ENODEV;
+
+       fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
+       if (fgd == NULL)
+               return -ENOMEM;
+
+       fgd->table = ftrace_graph_funcs;
+       fgd->size = FTRACE_GRAPH_MAX_FUNCS;
+       fgd->count = &ftrace_graph_count;
+       fgd->seq_ops = &ftrace_graph_seq_ops;
+
+       return __ftrace_graph_open(inode, file, fgd);
+}
+
+static int
+ftrace_graph_notrace_open(struct inode *inode, struct file *file)
+{
+       struct ftrace_graph_data *fgd;
+
+       if (unlikely(ftrace_disabled))
+               return -ENODEV;
+
+       fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
+       if (fgd == NULL)
+               return -ENOMEM;
+
+       fgd->table = ftrace_graph_notrace_funcs;
+       fgd->size = FTRACE_GRAPH_MAX_FUNCS;
+       fgd->count = &ftrace_graph_notrace_count;
+       fgd->seq_ops = &ftrace_graph_seq_ops;
+
+       return __ftrace_graph_open(inode, file, fgd);
+}
+
 static int
 ftrace_graph_release(struct inode *inode, struct file *file)
 {
-       if (file->f_mode & FMODE_READ)
+       if (file->f_mode & FMODE_READ) {
+               struct seq_file *m = file->private_data;
+
+               kfree(m->private);
                seq_release(inode, file);
+       } else {
+               kfree(file->private_data);
+       }
+
        return 0;
 }
 
 static int
-ftrace_set_func(unsigned long *array, int *idx, char *buffer)
+ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer)
 {
        struct dyn_ftrace *rec;
        struct ftrace_page *pg;
@@ -3879,7 +3949,7 @@ ftrace_set_func(unsigned long *array, int *idx, char *buffer)
 
        /* decode regex */
        type = filter_parse_regex(buffer, strlen(buffer), &search, &not);
-       if (!not && *idx >= FTRACE_GRAPH_MAX_FUNCS)
+       if (!not && *idx >= size)
                return -EBUSY;
 
        search_len = strlen(search);
@@ -3907,7 +3977,7 @@ ftrace_set_func(unsigned long *array, int *idx, char *buffer)
                                fail = 0;
                                if (!exists) {
                                        array[(*idx)++] = rec->ip;
-                                       if (*idx >= FTRACE_GRAPH_MAX_FUNCS)
+                                       if (*idx >= size)
                                                goto out;
                                }
                        } else {
@@ -3925,8 +3995,6 @@ out:
        if (fail)
                return -EINVAL;
 
-       ftrace_graph_filter_enabled = !!(*idx);
-
        return 0;
 }
 
@@ -3935,36 +4003,33 @@ ftrace_graph_write(struct file *file, const char __user *ubuf,
                   size_t cnt, loff_t *ppos)
 {
        struct trace_parser parser;
-       ssize_t read, ret;
+       ssize_t read, ret = 0;
+       struct ftrace_graph_data *fgd = file->private_data;
 
        if (!cnt)
                return 0;
 
-       mutex_lock(&graph_lock);
-
-       if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) {
-               ret = -ENOMEM;
-               goto out_unlock;
-       }
+       if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX))
+               return -ENOMEM;
 
        read = trace_get_user(&parser, ubuf, cnt, ppos);
 
        if (read >= 0 && trace_parser_loaded((&parser))) {
                parser.buffer[parser.idx] = 0;
 
+               mutex_lock(&graph_lock);
+
                /* we allow only one expression at a time */
-               ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
-                                       parser.buffer);
-               if (ret)
-                       goto out_free;
+               ret = ftrace_set_func(fgd->table, fgd->count, fgd->size,
+                                     parser.buffer);
+
+               mutex_unlock(&graph_lock);
        }
 
-       ret = read;
+       if (!ret)
+               ret = read;
 
-out_free:
        trace_parser_put(&parser);
-out_unlock:
-       mutex_unlock(&graph_lock);
 
        return ret;
 }
@@ -3976,6 +4041,14 @@ static const struct file_operations ftrace_graph_fops = {
        .llseek         = ftrace_filter_lseek,
        .release        = ftrace_graph_release,
 };
+
+static const struct file_operations ftrace_graph_notrace_fops = {
+       .open           = ftrace_graph_notrace_open,
+       .read           = seq_read,
+       .write          = ftrace_graph_write,
+       .llseek         = ftrace_filter_lseek,
+       .release        = ftrace_graph_release,
+};
 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 
 static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
@@ -3997,6 +4070,9 @@ static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
        trace_create_file("set_graph_function", 0444, d_tracer,
                                    NULL,
                                    &ftrace_graph_fops);
+       trace_create_file("set_graph_notrace", 0444, d_tracer,
+                                   NULL,
+                                   &ftrace_graph_notrace_fops);
 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 
        return 0;
@@ -4320,12 +4396,21 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
         */
        preempt_disable_notrace();
        trace_recursion_set(TRACE_CONTROL_BIT);
+
+       /*
+        * Control funcs (perf) uses RCU. Only trace if
+        * RCU is currently active.
+        */
+       if (!rcu_is_watching())
+               goto out;
+
        do_for_each_ftrace_op(op, ftrace_control_list) {
                if (!(op->flags & FTRACE_OPS_FL_STUB) &&
                    !ftrace_function_local_disabled(op) &&
                    ftrace_ops_test(op, ip, regs))
                        op->func(ip, parent_ip, op, regs);
        } while_for_each_ftrace_op(op);
+ out:
        trace_recursion_clear(TRACE_CONTROL_BIT);
        preempt_enable_notrace();
 }
index d9fea7dfd5d3bbb0570ac1e11537d3c1ce05d330..9d20cd9743efc0bf02cff0cf0f5536c19d0252cc 100644 (file)
@@ -235,13 +235,33 @@ void trace_array_put(struct trace_array *this_tr)
        mutex_unlock(&trace_types_lock);
 }
 
-int filter_current_check_discard(struct ring_buffer *buffer,
-                                struct ftrace_event_call *call, void *rec,
-                                struct ring_buffer_event *event)
+int filter_check_discard(struct ftrace_event_file *file, void *rec,
+                        struct ring_buffer *buffer,
+                        struct ring_buffer_event *event)
 {
-       return filter_check_discard(call, rec, buffer, event);
+       if (unlikely(file->flags & FTRACE_EVENT_FL_FILTERED) &&
+           !filter_match_preds(file->filter, rec)) {
+               ring_buffer_discard_commit(buffer, event);
+               return 1;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(filter_check_discard);
+
+int call_filter_check_discard(struct ftrace_event_call *call, void *rec,
+                             struct ring_buffer *buffer,
+                             struct ring_buffer_event *event)
+{
+       if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
+           !filter_match_preds(call->filter, rec)) {
+               ring_buffer_discard_commit(buffer, event);
+               return 1;
+       }
+
+       return 0;
 }
-EXPORT_SYMBOL_GPL(filter_current_check_discard);
+EXPORT_SYMBOL_GPL(call_filter_check_discard);
 
 cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
 {
@@ -843,9 +863,12 @@ int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
        if (isspace(ch)) {
                parser->buffer[parser->idx] = 0;
                parser->cont = false;
-       } else {
+       } else if (parser->idx < parser->size - 1) {
                parser->cont = true;
                parser->buffer[parser->idx++] = ch;
+       } else {
+               ret = -EINVAL;
+               goto out;
        }
 
        *ppos += read;
@@ -1260,21 +1283,6 @@ int is_tracing_stopped(void)
        return global_trace.stop_count;
 }
 
-/**
- * ftrace_off_permanent - disable all ftrace code permanently
- *
- * This should only be called when a serious anomally has
- * been detected.  This will turn off the function tracing,
- * ring buffers, and other tracing utilites. It takes no
- * locks and can be called from any context.
- */
-void ftrace_off_permanent(void)
-{
-       tracing_disabled = 1;
-       ftrace_stop();
-       tracing_off_permanent();
-}
-
 /**
  * tracing_start - quick start of the tracer
  *
@@ -1631,7 +1639,7 @@ trace_function(struct trace_array *tr,
        entry->ip                       = ip;
        entry->parent_ip                = parent_ip;
 
-       if (!filter_check_discard(call, entry, buffer, event))
+       if (!call_filter_check_discard(call, entry, buffer, event))
                __buffer_unlock_commit(buffer, event);
 }
 
@@ -1715,7 +1723,7 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer,
 
        entry->size = trace.nr_entries;
 
-       if (!filter_check_discard(call, entry, buffer, event))
+       if (!call_filter_check_discard(call, entry, buffer, event))
                __buffer_unlock_commit(buffer, event);
 
  out:
@@ -1817,7 +1825,7 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
        trace.entries           = entry->caller;
 
        save_stack_trace_user(&trace);
-       if (!filter_check_discard(call, entry, buffer, event))
+       if (!call_filter_check_discard(call, entry, buffer, event))
                __buffer_unlock_commit(buffer, event);
 
  out_drop_count:
@@ -2009,7 +2017,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
        entry->fmt                      = fmt;
 
        memcpy(entry->buf, tbuffer, sizeof(u32) * len);
-       if (!filter_check_discard(call, entry, buffer, event)) {
+       if (!call_filter_check_discard(call, entry, buffer, event)) {
                __buffer_unlock_commit(buffer, event);
                ftrace_trace_stack(buffer, flags, 6, pc);
        }
@@ -2064,7 +2072,7 @@ __trace_array_vprintk(struct ring_buffer *buffer,
 
        memcpy(&entry->buf, tbuffer, len);
        entry->buf[len] = '\0';
-       if (!filter_check_discard(call, entry, buffer, event)) {
+       if (!call_filter_check_discard(call, entry, buffer, event)) {
                __buffer_unlock_commit(buffer, event);
                ftrace_trace_stack(buffer, flags, 6, pc);
        }
@@ -2761,7 +2769,7 @@ static void show_snapshot_main_help(struct seq_file *m)
        seq_printf(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n");
        seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
        seq_printf(m, "#                      Takes a snapshot of the main buffer.\n");
-       seq_printf(m, "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate)\n");
+       seq_printf(m, "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n");
        seq_printf(m, "#                      (Doesn't have to be '2' works with any number that\n");
        seq_printf(m, "#                       is not a '0' or '1')\n");
 }
@@ -2965,6 +2973,11 @@ int tracing_open_generic(struct inode *inode, struct file *filp)
        return 0;
 }
 
+bool tracing_is_disabled(void)
+{
+       return (tracing_disabled) ? true: false;
+}
+
 /*
  * Open and update trace_array ref count.
  * Must have the current trace_array passed to it.
@@ -5455,12 +5468,12 @@ static struct ftrace_func_command ftrace_snapshot_cmd = {
        .func                   = ftrace_trace_snapshot_callback,
 };
 
-static int register_snapshot_cmd(void)
+static __init int register_snapshot_cmd(void)
 {
        return register_ftrace_command(&ftrace_snapshot_cmd);
 }
 #else
-static inline int register_snapshot_cmd(void) { return 0; }
+static inline __init int register_snapshot_cmd(void) { return 0; }
 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
 
 struct dentry *tracing_init_dentry_tr(struct trace_array *tr)
@@ -6254,6 +6267,17 @@ void trace_init_global_iter(struct trace_iterator *iter)
        iter->trace = iter->tr->current_trace;
        iter->cpu_file = RING_BUFFER_ALL_CPUS;
        iter->trace_buffer = &global_trace.trace_buffer;
+
+       if (iter->trace && iter->trace->open)
+               iter->trace->open(iter);
+
+       /* Annotate start of buffers if we had overruns */
+       if (ring_buffer_overruns(iter->trace_buffer->buffer))
+               iter->iter_flags |= TRACE_FILE_ANNOTATE;
+
+       /* Output in nanoseconds only if we are using a clock in nanoseconds. */
+       if (trace_clocks[iter->tr->clock_id].in_ns)
+               iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
 }
 
 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
index 73d08aa25b557a379a63c4602818c8cd33367547..ea189e027b80c3928a1c4390091b1eaa8f3699db 100644 (file)
@@ -193,8 +193,8 @@ struct trace_array {
 #ifdef CONFIG_FTRACE_SYSCALLS
        int                     sys_refcount_enter;
        int                     sys_refcount_exit;
-       DECLARE_BITMAP(enabled_enter_syscalls, NR_syscalls);
-       DECLARE_BITMAP(enabled_exit_syscalls, NR_syscalls);
+       struct ftrace_event_file __rcu *enter_syscall_files[NR_syscalls];
+       struct ftrace_event_file __rcu *exit_syscall_files[NR_syscalls];
 #endif
        int                     stop_count;
        int                     clock_id;
@@ -515,6 +515,7 @@ void tracing_reset_online_cpus(struct trace_buffer *buf);
 void tracing_reset_current(int cpu);
 void tracing_reset_all_online_cpus(void);
 int tracing_open_generic(struct inode *inode, struct file *filp);
+bool tracing_is_disabled(void);
 struct dentry *trace_create_file(const char *name,
                                 umode_t mode,
                                 struct dentry *parent,
@@ -712,6 +713,8 @@ extern unsigned long trace_flags;
 #define TRACE_GRAPH_PRINT_PROC          0x8
 #define TRACE_GRAPH_PRINT_DURATION      0x10
 #define TRACE_GRAPH_PRINT_ABS_TIME      0x20
+#define TRACE_GRAPH_PRINT_FILL_SHIFT   28
+#define TRACE_GRAPH_PRINT_FILL_MASK    (0x3 << TRACE_GRAPH_PRINT_FILL_SHIFT)
 
 extern enum print_line_t
 print_graph_function_flags(struct trace_iterator *iter, u32 flags);
@@ -731,15 +734,16 @@ extern void __trace_graph_return(struct trace_array *tr,
 #ifdef CONFIG_DYNAMIC_FTRACE
 /* TODO: make this variable */
 #define FTRACE_GRAPH_MAX_FUNCS         32
-extern int ftrace_graph_filter_enabled;
 extern int ftrace_graph_count;
 extern unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS];
+extern int ftrace_graph_notrace_count;
+extern unsigned long ftrace_graph_notrace_funcs[FTRACE_GRAPH_MAX_FUNCS];
 
 static inline int ftrace_graph_addr(unsigned long addr)
 {
        int i;
 
-       if (!ftrace_graph_filter_enabled)
+       if (!ftrace_graph_count)
                return 1;
 
        for (i = 0; i < ftrace_graph_count; i++) {
@@ -759,11 +763,31 @@ static inline int ftrace_graph_addr(unsigned long addr)
 
        return 0;
 }
+
+static inline int ftrace_graph_notrace_addr(unsigned long addr)
+{
+       int i;
+
+       if (!ftrace_graph_notrace_count)
+               return 0;
+
+       for (i = 0; i < ftrace_graph_notrace_count; i++) {
+               if (addr == ftrace_graph_notrace_funcs[i])
+                       return 1;
+       }
+
+       return 0;
+}
 #else
 static inline int ftrace_graph_addr(unsigned long addr)
 {
        return 1;
 }
+
+static inline int ftrace_graph_notrace_addr(unsigned long addr)
+{
+       return 0;
+}
 #endif /* CONFIG_DYNAMIC_FTRACE */
 #else /* CONFIG_FUNCTION_GRAPH_TRACER */
 static inline enum print_line_t
@@ -987,9 +1011,9 @@ struct filter_pred {
 
 extern enum regex_type
 filter_parse_regex(char *buff, int len, char **search, int *not);
-extern void print_event_filter(struct ftrace_event_call *call,
+extern void print_event_filter(struct ftrace_event_file *file,
                               struct trace_seq *s);
-extern int apply_event_filter(struct ftrace_event_call *call,
+extern int apply_event_filter(struct ftrace_event_file *file,
                              char *filter_string);
 extern int apply_subsystem_event_filter(struct ftrace_subsystem_dir *dir,
                                        char *filter_string);
@@ -1000,20 +1024,6 @@ extern int filter_assign_type(const char *type);
 struct ftrace_event_field *
 trace_find_event_field(struct ftrace_event_call *call, char *name);
 
-static inline int
-filter_check_discard(struct ftrace_event_call *call, void *rec,
-                    struct ring_buffer *buffer,
-                    struct ring_buffer_event *event)
-{
-       if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
-           !filter_match_preds(call->filter, rec)) {
-               ring_buffer_discard_commit(buffer, event);
-               return 1;
-       }
-
-       return 0;
-}
-
 extern void trace_event_enable_cmd_record(bool enable);
 extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr);
 extern int event_trace_del_tracer(struct trace_array *tr);
index d594da0dc03ce53db530e238df1d410c4b89e29a..697fb9bac8f0d69d8e5a5023d5e5d10fdd3ed09c 100644 (file)
@@ -78,7 +78,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
        entry->line = f->line;
        entry->correct = val == expect;
 
-       if (!filter_check_discard(call, entry, buffer, event))
+       if (!call_filter_check_discard(call, entry, buffer, event))
                __buffer_unlock_commit(buffer, event);
 
  out:
index 368a4d50cc308cbe9564d4b01925b7937b0c4aba..f919a2e21bf30e68ad6a99ee627d63f5bb6cb44c 100644 (file)
@@ -989,7 +989,7 @@ static ssize_t
 event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
                  loff_t *ppos)
 {
-       struct ftrace_event_call *call;
+       struct ftrace_event_file *file;
        struct trace_seq *s;
        int r = -ENODEV;
 
@@ -1004,12 +1004,12 @@ event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
        trace_seq_init(s);
 
        mutex_lock(&event_mutex);
-       call = event_file_data(filp);
-       if (call)
-               print_event_filter(call, s);
+       file = event_file_data(filp);
+       if (file)
+               print_event_filter(file, s);
        mutex_unlock(&event_mutex);
 
-       if (call)
+       if (file)
                r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
 
        kfree(s);
@@ -1021,7 +1021,7 @@ static ssize_t
 event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
                   loff_t *ppos)
 {
-       struct ftrace_event_call *call;
+       struct ftrace_event_file *file;
        char *buf;
        int err = -ENODEV;
 
@@ -1039,9 +1039,9 @@ event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
        buf[cnt] = '\0';
 
        mutex_lock(&event_mutex);
-       call = event_file_data(filp);
-       if (call)
-               err = apply_event_filter(call, buf);
+       file = event_file_data(filp);
+       if (file)
+               err = apply_event_filter(file, buf);
        mutex_unlock(&event_mutex);
 
        free_page((unsigned long) buf);
@@ -1062,6 +1062,9 @@ static int subsystem_open(struct inode *inode, struct file *filp)
        struct trace_array *tr;
        int ret;
 
+       if (tracing_is_disabled())
+               return -ENODEV;
+
        /* Make sure the system still exists */
        mutex_lock(&trace_types_lock);
        mutex_lock(&event_mutex);
@@ -1108,6 +1111,9 @@ static int system_tr_open(struct inode *inode, struct file *filp)
        struct trace_array *tr = inode->i_private;
        int ret;
 
+       if (tracing_is_disabled())
+               return -ENODEV;
+
        if (trace_array_get(tr) < 0)
                return -ENODEV;
 
@@ -1124,11 +1130,12 @@ static int system_tr_open(struct inode *inode, struct file *filp)
        if (ret < 0) {
                trace_array_put(tr);
                kfree(dir);
+               return ret;
        }
 
        filp->private_data = dir;
 
-       return ret;
+       return 0;
 }
 
 static int subsystem_release(struct inode *inode, struct file *file)
@@ -1539,7 +1546,7 @@ event_create_dir(struct dentry *parent, struct ftrace_event_file *file)
                        return -1;
                }
        }
-       trace_create_file("filter", 0644, file->dir, call,
+       trace_create_file("filter", 0644, file->dir, file,
                          &ftrace_event_filter_fops);
 
        trace_create_file("format", 0444, file->dir, call,
@@ -1577,6 +1584,7 @@ static void event_remove(struct ftrace_event_call *call)
                if (file->event_call != call)
                        continue;
                ftrace_event_enable_disable(file, 0);
+               destroy_preds(file);
                /*
                 * The do_for_each_event_file() is
                 * a double loop. After finding the call for this
@@ -1700,7 +1708,7 @@ static void __trace_remove_event_call(struct ftrace_event_call *call)
 {
        event_remove(call);
        trace_destroy_fields(call);
-       destroy_preds(call);
+       destroy_call_preds(call);
 }
 
 static int probe_remove_event_call(struct ftrace_event_call *call)
index 97daa8cf958df8e73c9fa8090f89cbaf4e63893e..2468f56dc5db2d1403df5692f1d9e0da912d6a1c 100644 (file)
@@ -637,10 +637,18 @@ static void append_filter_err(struct filter_parse_state *ps,
        free_page((unsigned long) buf);
 }
 
+static inline struct event_filter *event_filter(struct ftrace_event_file *file)
+{
+       if (file->event_call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
+               return file->event_call->filter;
+       else
+               return file->filter;
+}
+
 /* caller must hold event_mutex */
-void print_event_filter(struct ftrace_event_call *call, struct trace_seq *s)
+void print_event_filter(struct ftrace_event_file *file, struct trace_seq *s)
 {
-       struct event_filter *filter = call->filter;
+       struct event_filter *filter = event_filter(file);
 
        if (filter && filter->filter_string)
                trace_seq_printf(s, "%s\n", filter->filter_string);
@@ -766,11 +774,21 @@ static void __free_preds(struct event_filter *filter)
        filter->n_preds = 0;
 }
 
-static void filter_disable(struct ftrace_event_call *call)
+static void call_filter_disable(struct ftrace_event_call *call)
 {
        call->flags &= ~TRACE_EVENT_FL_FILTERED;
 }
 
+static void filter_disable(struct ftrace_event_file *file)
+{
+       struct ftrace_event_call *call = file->event_call;
+
+       if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
+               call_filter_disable(call);
+       else
+               file->flags &= ~FTRACE_EVENT_FL_FILTERED;
+}
+
 static void __free_filter(struct event_filter *filter)
 {
        if (!filter)
@@ -781,16 +799,30 @@ static void __free_filter(struct event_filter *filter)
        kfree(filter);
 }
 
+void destroy_call_preds(struct ftrace_event_call *call)
+{
+       __free_filter(call->filter);
+       call->filter = NULL;
+}
+
+static void destroy_file_preds(struct ftrace_event_file *file)
+{
+       __free_filter(file->filter);
+       file->filter = NULL;
+}
+
 /*
- * Called when destroying the ftrace_event_call.
- * The call is being freed, so we do not need to worry about
- * the call being currently used. This is for module code removing
+ * Called when destroying the ftrace_event_file.
+ * The file is being freed, so we do not need to worry about
+ * the file being currently used. This is for module code removing
  * the tracepoints from within it.
  */
-void destroy_preds(struct ftrace_event_call *call)
+void destroy_preds(struct ftrace_event_file *file)
 {
-       __free_filter(call->filter);
-       call->filter = NULL;
+       if (file->event_call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
+               destroy_call_preds(file->event_call);
+       else
+               destroy_file_preds(file);
 }
 
 static struct event_filter *__alloc_filter(void)
@@ -825,28 +857,56 @@ static int __alloc_preds(struct event_filter *filter, int n_preds)
        return 0;
 }
 
-static void filter_free_subsystem_preds(struct event_subsystem *system)
+static inline void __remove_filter(struct ftrace_event_file *file)
 {
+       struct ftrace_event_call *call = file->event_call;
+
+       filter_disable(file);
+       if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
+               remove_filter_string(call->filter);
+       else
+               remove_filter_string(file->filter);
+}
+
+static void filter_free_subsystem_preds(struct event_subsystem *system,
+                                       struct trace_array *tr)
+{
+       struct ftrace_event_file *file;
        struct ftrace_event_call *call;
 
-       list_for_each_entry(call, &ftrace_events, list) {
+       list_for_each_entry(file, &tr->events, list) {
+               call = file->event_call;
                if (strcmp(call->class->system, system->name) != 0)
                        continue;
 
-               filter_disable(call);
-               remove_filter_string(call->filter);
+               __remove_filter(file);
        }
 }
 
-static void filter_free_subsystem_filters(struct event_subsystem *system)
+static inline void __free_subsystem_filter(struct ftrace_event_file *file)
 {
+       struct ftrace_event_call *call = file->event_call;
+
+       if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) {
+               __free_filter(call->filter);
+               call->filter = NULL;
+       } else {
+               __free_filter(file->filter);
+               file->filter = NULL;
+       }
+}
+
+static void filter_free_subsystem_filters(struct event_subsystem *system,
+                                         struct trace_array *tr)
+{
+       struct ftrace_event_file *file;
        struct ftrace_event_call *call;
 
-       list_for_each_entry(call, &ftrace_events, list) {
+       list_for_each_entry(file, &tr->events, list) {
+               call = file->event_call;
                if (strcmp(call->class->system, system->name) != 0)
                        continue;
-               __free_filter(call->filter);
-               call->filter = NULL;
+               __free_subsystem_filter(file);
        }
 }
 
@@ -1617,15 +1677,85 @@ fail:
        return err;
 }
 
+static inline void event_set_filtered_flag(struct ftrace_event_file *file)
+{
+       struct ftrace_event_call *call = file->event_call;
+
+       if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
+               call->flags |= TRACE_EVENT_FL_FILTERED;
+       else
+               file->flags |= FTRACE_EVENT_FL_FILTERED;
+}
+
+static inline void event_set_filter(struct ftrace_event_file *file,
+                                   struct event_filter *filter)
+{
+       struct ftrace_event_call *call = file->event_call;
+
+       if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
+               rcu_assign_pointer(call->filter, filter);
+       else
+               rcu_assign_pointer(file->filter, filter);
+}
+
+static inline void event_clear_filter(struct ftrace_event_file *file)
+{
+       struct ftrace_event_call *call = file->event_call;
+
+       if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
+               RCU_INIT_POINTER(call->filter, NULL);
+       else
+               RCU_INIT_POINTER(file->filter, NULL);
+}
+
+static inline void
+event_set_no_set_filter_flag(struct ftrace_event_file *file)
+{
+       struct ftrace_event_call *call = file->event_call;
+
+       if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
+               call->flags |= TRACE_EVENT_FL_NO_SET_FILTER;
+       else
+               file->flags |= FTRACE_EVENT_FL_NO_SET_FILTER;
+}
+
+static inline void
+event_clear_no_set_filter_flag(struct ftrace_event_file *file)
+{
+       struct ftrace_event_call *call = file->event_call;
+
+       if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
+               call->flags &= ~TRACE_EVENT_FL_NO_SET_FILTER;
+       else
+               file->flags &= ~FTRACE_EVENT_FL_NO_SET_FILTER;
+}
+
+static inline bool
+event_no_set_filter_flag(struct ftrace_event_file *file)
+{
+       struct ftrace_event_call *call = file->event_call;
+
+       if (file->flags & FTRACE_EVENT_FL_NO_SET_FILTER)
+               return true;
+
+       if ((call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) &&
+           (call->flags & TRACE_EVENT_FL_NO_SET_FILTER))
+               return true;
+
+       return false;
+}
+
 struct filter_list {
        struct list_head        list;
        struct event_filter     *filter;
 };
 
 static int replace_system_preds(struct event_subsystem *system,
+                               struct trace_array *tr,
                                struct filter_parse_state *ps,
                                char *filter_string)
 {
+       struct ftrace_event_file *file;
        struct ftrace_event_call *call;
        struct filter_list *filter_item;
        struct filter_list *tmp;
@@ -1633,8 +1763,8 @@ static int replace_system_preds(struct event_subsystem *system,
        bool fail = true;
        int err;
 
-       list_for_each_entry(call, &ftrace_events, list) {
-
+       list_for_each_entry(file, &tr->events, list) {
+               call = file->event_call;
                if (strcmp(call->class->system, system->name) != 0)
                        continue;
 
@@ -1644,18 +1774,20 @@ static int replace_system_preds(struct event_subsystem *system,
                 */
                err = replace_preds(call, NULL, ps, filter_string, true);
                if (err)
-                       call->flags |= TRACE_EVENT_FL_NO_SET_FILTER;
+                       event_set_no_set_filter_flag(file);
                else
-                       call->flags &= ~TRACE_EVENT_FL_NO_SET_FILTER;
+                       event_clear_no_set_filter_flag(file);
        }
 
-       list_for_each_entry(call, &ftrace_events, list) {
+       list_for_each_entry(file, &tr->events, list) {
                struct event_filter *filter;
 
+               call = file->event_call;
+
                if (strcmp(call->class->system, system->name) != 0)
                        continue;
 
-               if (call->flags & TRACE_EVENT_FL_NO_SET_FILTER)
+               if (event_no_set_filter_flag(file))
                        continue;
 
                filter_item = kzalloc(sizeof(*filter_item), GFP_KERNEL);
@@ -1676,17 +1808,17 @@ static int replace_system_preds(struct event_subsystem *system,
 
                err = replace_preds(call, filter, ps, filter_string, false);
                if (err) {
-                       filter_disable(call);
+                       filter_disable(file);
                        parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0);
                        append_filter_err(ps, filter);
                } else
-                       call->flags |= TRACE_EVENT_FL_FILTERED;
+                       event_set_filtered_flag(file);
                /*
                 * Regardless of if this returned an error, we still
                 * replace the filter for the call.
                 */
-               filter = call->filter;
-               rcu_assign_pointer(call->filter, filter_item->filter);
+               filter = event_filter(file);
+               event_set_filter(file, filter_item->filter);
                filter_item->filter = filter;
 
                fail = false;
@@ -1816,6 +1948,7 @@ static int create_filter(struct ftrace_event_call *call,
  * and always remembers @filter_str.
  */
 static int create_system_filter(struct event_subsystem *system,
+                               struct trace_array *tr,
                                char *filter_str, struct event_filter **filterp)
 {
        struct event_filter *filter = NULL;
@@ -1824,7 +1957,7 @@ static int create_system_filter(struct event_subsystem *system,
 
        err = create_filter_start(filter_str, true, &ps, &filter);
        if (!err) {
-               err = replace_system_preds(system, ps, filter_str);
+               err = replace_system_preds(system, tr, ps, filter_str);
                if (!err) {
                        /* System filters just show a default message */
                        kfree(filter->filter_string);
@@ -1840,20 +1973,25 @@ static int create_system_filter(struct event_subsystem *system,
 }
 
 /* caller must hold event_mutex */
-int apply_event_filter(struct ftrace_event_call *call, char *filter_string)
+int apply_event_filter(struct ftrace_event_file *file, char *filter_string)
 {
+       struct ftrace_event_call *call = file->event_call;
        struct event_filter *filter;
        int err;
 
        if (!strcmp(strstrip(filter_string), "0")) {
-               filter_disable(call);
-               filter = call->filter;
+               filter_disable(file);
+               filter = event_filter(file);
+
                if (!filter)
                        return 0;
-               RCU_INIT_POINTER(call->filter, NULL);
+
+               event_clear_filter(file);
+
                /* Make sure the filter is not being used */
                synchronize_sched();
                __free_filter(filter);
+
                return 0;
        }
 
@@ -1866,14 +2004,15 @@ int apply_event_filter(struct ftrace_event_call *call, char *filter_string)
         * string
         */
        if (filter) {
-               struct event_filter *tmp = call->filter;
+               struct event_filter *tmp;
 
+               tmp = event_filter(file);
                if (!err)
-                       call->flags |= TRACE_EVENT_FL_FILTERED;
+                       event_set_filtered_flag(file);
                else
-                       filter_disable(call);
+                       filter_disable(file);
 
-               rcu_assign_pointer(call->filter, filter);
+               event_set_filter(file, filter);
 
                if (tmp) {
                        /* Make sure the call is done with the filter */
@@ -1889,6 +2028,7 @@ int apply_subsystem_event_filter(struct ftrace_subsystem_dir *dir,
                                 char *filter_string)
 {
        struct event_subsystem *system = dir->subsystem;
+       struct trace_array *tr = dir->tr;
        struct event_filter *filter;
        int err = 0;
 
@@ -1901,18 +2041,18 @@ int apply_subsystem_event_filter(struct ftrace_subsystem_dir *dir,
        }
 
        if (!strcmp(strstrip(filter_string), "0")) {
-               filter_free_subsystem_preds(system);
+               filter_free_subsystem_preds(system, tr);
                remove_filter_string(system->filter);
                filter = system->filter;
                system->filter = NULL;
                /* Ensure all filters are no longer used */
                synchronize_sched();
-               filter_free_subsystem_filters(system);
+               filter_free_subsystem_filters(system, tr);
                __free_filter(filter);
                goto out_unlock;
        }
 
-       err = create_system_filter(system, filter_string, &filter);
+       err = create_system_filter(system, tr, filter_string, &filter);
        if (filter) {
                /*
                 * No event actually uses the system filter
index d21a74670088345fd97a81724e894ba9a0baff79..7c3e3e72e2b6bd2f0a6bd929ff67526497281b4a 100644 (file)
@@ -180,7 +180,7 @@ struct ftrace_event_call __used event_##call = {                    \
        .event.type             = etype,                                \
        .class                  = &event_class_ftrace_##call,           \
        .print_fmt              = print,                                \
-       .flags                  = TRACE_EVENT_FL_IGNORE_ENABLE,         \
+       .flags                  = TRACE_EVENT_FL_IGNORE_ENABLE | TRACE_EVENT_FL_USE_CALL_FILTER, \
 };                                                                     \
 struct ftrace_event_call __used                                                \
 __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call;
index b5c09242683d8cd1c989d2706702d4d42e182d73..0b99120d395cc166583e400e80108b7d07540935 100644 (file)
@@ -82,9 +82,9 @@ static struct trace_array *graph_array;
  * to fill in space into DURATION column.
  */
 enum {
-       DURATION_FILL_FULL  = -1,
-       DURATION_FILL_START = -2,
-       DURATION_FILL_END   = -3,
+       FLAGS_FILL_FULL  = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT,
+       FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT,
+       FLAGS_FILL_END   = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT,
 };
 
 static enum print_line_t
@@ -114,16 +114,37 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
                return -EBUSY;
        }
 
+       /*
+        * The curr_ret_stack is an index to ftrace return stack of
+        * current task.  Its value should be in [0, FTRACE_RETFUNC_
+        * DEPTH) when the function graph tracer is used.  To support
+        * filtering out specific functions, it makes the index
+        * negative by subtracting huge value (FTRACE_NOTRACE_DEPTH)
+        * so when it sees a negative index the ftrace will ignore
+        * the record.  And the index gets recovered when returning
+        * from the filtered function by adding the FTRACE_NOTRACE_
+        * DEPTH and then it'll continue to record functions normally.
+        *
+        * The curr_ret_stack is initialized to -1 and get increased
+        * in this function.  So it can be less than -1 only if it was
+        * filtered out via ftrace_graph_notrace_addr() which can be
+        * set from set_graph_notrace file in debugfs by user.
+        */
+       if (current->curr_ret_stack < -1)
+               return -EBUSY;
+
        calltime = trace_clock_local();
 
        index = ++current->curr_ret_stack;
+       if (ftrace_graph_notrace_addr(func))
+               current->curr_ret_stack -= FTRACE_NOTRACE_DEPTH;
        barrier();
        current->ret_stack[index].ret = ret;
        current->ret_stack[index].func = func;
        current->ret_stack[index].calltime = calltime;
        current->ret_stack[index].subtime = 0;
        current->ret_stack[index].fp = frame_pointer;
-       *depth = index;
+       *depth = current->curr_ret_stack;
 
        return 0;
 }
@@ -137,7 +158,17 @@ ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
 
        index = current->curr_ret_stack;
 
-       if (unlikely(index < 0)) {
+       /*
+        * A negative index here means that it's just returned from a
+        * notrace'd function.  Recover index to get an original
+        * return address.  See ftrace_push_return_trace().
+        *
+        * TODO: Need to check whether the stack gets corrupted.
+        */
+       if (index < 0)
+               index += FTRACE_NOTRACE_DEPTH;
+
+       if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) {
                ftrace_graph_stop();
                WARN_ON(1);
                /* Might as well panic, otherwise we have no where to go */
@@ -193,6 +224,15 @@ unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
        trace.rettime = trace_clock_local();
        barrier();
        current->curr_ret_stack--;
+       /*
+        * The curr_ret_stack can be less than -1 only if it was
+        * filtered out and it's about to return from the function.
+        * Recover the index and continue to trace normal functions.
+        */
+       if (current->curr_ret_stack < -1) {
+               current->curr_ret_stack += FTRACE_NOTRACE_DEPTH;
+               return ret;
+       }
 
        /*
         * The trace should run after decrementing the ret counter
@@ -230,7 +270,7 @@ int __trace_graph_entry(struct trace_array *tr,
                return 0;
        entry   = ring_buffer_event_data(event);
        entry->graph_ent                        = *trace;
-       if (!filter_current_check_discard(buffer, call, entry, event))
+       if (!call_filter_check_discard(call, entry, buffer, event))
                __buffer_unlock_commit(buffer, event);
 
        return 1;
@@ -259,10 +299,20 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
 
        /* trace it when it is-nested-in or is a function enabled. */
        if ((!(trace->depth || ftrace_graph_addr(trace->func)) ||
-            ftrace_graph_ignore_irqs()) ||
+            ftrace_graph_ignore_irqs()) || (trace->depth < 0) ||
            (max_depth && trace->depth >= max_depth))
                return 0;
 
+       /*
+        * Do not trace a function if it's filtered by set_graph_notrace.
+        * Make the index of ret stack negative to indicate that it should
+        * ignore further functions.  But it needs its own ret stack entry
+        * to recover the original index in order to continue tracing after
+        * returning from the function.
+        */
+       if (ftrace_graph_notrace_addr(trace->func))
+               return 1;
+
        local_irq_save(flags);
        cpu = raw_smp_processor_id();
        data = per_cpu_ptr(tr->trace_buffer.data, cpu);
@@ -335,7 +385,7 @@ void __trace_graph_return(struct trace_array *tr,
                return;
        entry   = ring_buffer_event_data(event);
        entry->ret                              = *trace;
-       if (!filter_current_check_discard(buffer, call, entry, event))
+       if (!call_filter_check_discard(call, entry, buffer, event))
                __buffer_unlock_commit(buffer, event);
 }
 
@@ -652,7 +702,7 @@ print_graph_irq(struct trace_iterator *iter, unsigned long addr,
        }
 
        /* No overhead */
-       ret = print_graph_duration(DURATION_FILL_START, s, flags);
+       ret = print_graph_duration(0, s, flags | FLAGS_FILL_START);
        if (ret != TRACE_TYPE_HANDLED)
                return ret;
 
@@ -664,7 +714,7 @@ print_graph_irq(struct trace_iterator *iter, unsigned long addr,
        if (!ret)
                return TRACE_TYPE_PARTIAL_LINE;
 
-       ret = print_graph_duration(DURATION_FILL_END, s, flags);
+       ret = print_graph_duration(0, s, flags | FLAGS_FILL_END);
        if (ret != TRACE_TYPE_HANDLED)
                return ret;
 
@@ -729,14 +779,14 @@ print_graph_duration(unsigned long long duration, struct trace_seq *s,
                        return TRACE_TYPE_HANDLED;
 
        /* No real adata, just filling the column with spaces */
-       switch (duration) {
-       case DURATION_FILL_FULL:
+       switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) {
+       case FLAGS_FILL_FULL:
                ret = trace_seq_puts(s, "              |  ");
                return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
-       case DURATION_FILL_START:
+       case FLAGS_FILL_START:
                ret = trace_seq_puts(s, "  ");
                return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
-       case DURATION_FILL_END:
+       case FLAGS_FILL_END:
                ret = trace_seq_puts(s, " |");
                return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
        }
@@ -852,7 +902,7 @@ print_graph_entry_nested(struct trace_iterator *iter,
        }
 
        /* No time */
-       ret = print_graph_duration(DURATION_FILL_FULL, s, flags);
+       ret = print_graph_duration(0, s, flags | FLAGS_FILL_FULL);
        if (ret != TRACE_TYPE_HANDLED)
                return ret;
 
@@ -1172,7 +1222,7 @@ print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
                return TRACE_TYPE_PARTIAL_LINE;
 
        /* No time */
-       ret = print_graph_duration(DURATION_FILL_FULL, s, flags);
+       ret = print_graph_duration(0, s, flags | FLAGS_FILL_FULL);
        if (ret != TRACE_TYPE_HANDLED)
                return ret;
 
index 243f6834d02675c9c1101e1eabe18e56eb7d3244..dae9541ada9ea9f46723878979f951834e8e84f1 100644 (file)
@@ -835,7 +835,7 @@ __kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs,
        entry->ip = (unsigned long)tp->rp.kp.addr;
        store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
 
-       if (!filter_current_check_discard(buffer, call, entry, event))
+       if (!filter_check_discard(ftrace_file, entry, buffer, event))
                trace_buffer_unlock_commit_regs(buffer, event,
                                                irq_flags, pc, regs);
 }
@@ -884,7 +884,7 @@ __kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri,
        entry->ret_ip = (unsigned long)ri->ret_addr;
        store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
 
-       if (!filter_current_check_discard(buffer, call, entry, event))
+       if (!filter_check_discard(ftrace_file, entry, buffer, event))
                trace_buffer_unlock_commit_regs(buffer, event,
                                                irq_flags, pc, regs);
 }
index b3dcfb2f0fef4e1bc9e9815021adc3e8dfe7d32a..0abd9b8634747e3001354f96b90dad379d6d2fd6 100644 (file)
@@ -323,7 +323,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
        entry   = ring_buffer_event_data(event);
        entry->rw                       = *rw;
 
-       if (!filter_check_discard(call, entry, buffer, event))
+       if (!call_filter_check_discard(call, entry, buffer, event))
                trace_buffer_unlock_commit(buffer, event, 0, pc);
 }
 
@@ -353,7 +353,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
        entry   = ring_buffer_event_data(event);
        entry->map                      = *map;
 
-       if (!filter_check_discard(call, entry, buffer, event))
+       if (!call_filter_check_discard(call, entry, buffer, event))
                trace_buffer_unlock_commit(buffer, event, 0, pc);
 }
 
index 4e98e3b257a3a74d3716a0fc4ab35c2a1530fc43..3f34dc9b40f350bac7ae8869b17e4f5216b05d1a 100644 (file)
@@ -45,7 +45,7 @@ tracing_sched_switch_trace(struct trace_array *tr,
        entry->next_state               = next->state;
        entry->next_cpu = task_cpu(next);
 
-       if (!filter_check_discard(call, entry, buffer, event))
+       if (!call_filter_check_discard(call, entry, buffer, event))
                trace_buffer_unlock_commit(buffer, event, flags, pc);
 }
 
@@ -101,7 +101,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
        entry->next_state               = wakee->state;
        entry->next_cpu                 = task_cpu(wakee);
 
-       if (!filter_check_discard(call, entry, buffer, event))
+       if (!call_filter_check_discard(call, entry, buffer, event))
                trace_buffer_unlock_commit(buffer, event, flags, pc);
 }
 
index 847f88a6194b4e4183e0ed08dd03828f6e5ff1bd..7af67360b3307413865e2ed0719ea63894f01016 100644 (file)
@@ -43,46 +43,15 @@ static DEFINE_MUTEX(all_stat_sessions_mutex);
 /* The root directory for all stat files */
 static struct dentry           *stat_dir;
 
-/*
- * Iterate through the rbtree using a post order traversal path
- * to release the next node.
- * It won't necessary release one at each iteration
- * but it will at least advance closer to the next one
- * to be released.
- */
-static struct rb_node *release_next(struct tracer_stat *ts,
-                                   struct rb_node *node)
+static void __reset_stat_session(struct stat_session *session)
 {
-       struct stat_node *snode;
-       struct rb_node *parent = rb_parent(node);
-
-       if (node->rb_left)
-               return node->rb_left;
-       else if (node->rb_right)
-               return node->rb_right;
-       else {
-               if (!parent)
-                       ;
-               else if (parent->rb_left == node)
-                       parent->rb_left = NULL;
-               else
-                       parent->rb_right = NULL;
+       struct stat_node *snode, *n;
 
-               snode = container_of(node, struct stat_node, node);
-               if (ts->stat_release)
-                       ts->stat_release(snode->stat);
+       rbtree_postorder_for_each_entry_safe(snode, n, &session->stat_root, node) {
+               if (session->ts->stat_release)
+                       session->ts->stat_release(snode->stat);
                kfree(snode);
-
-               return parent;
        }
-}
-
-static void __reset_stat_session(struct stat_session *session)
-{
-       struct rb_node *node = session->stat_root.rb_node;
-
-       while (node)
-               node = release_next(session->ts, node);
 
        session->stat_root = RB_ROOT;
 }
index 559329d9bd2fa4d9dd22f2ce31b4dcba6b5f1119..e4b6d11bdf78f35a2a6d8e831cd3ec21d97d9c00 100644 (file)
@@ -302,6 +302,7 @@ static int __init syscall_exit_define_fields(struct ftrace_event_call *call)
 static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
 {
        struct trace_array *tr = data;
+       struct ftrace_event_file *ftrace_file;
        struct syscall_trace_enter *entry;
        struct syscall_metadata *sys_data;
        struct ring_buffer_event *event;
@@ -314,7 +315,13 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
        syscall_nr = trace_get_syscall_nr(current, regs);
        if (syscall_nr < 0)
                return;
-       if (!test_bit(syscall_nr, tr->enabled_enter_syscalls))
+
+       /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE) */
+       ftrace_file = rcu_dereference_sched(tr->enter_syscall_files[syscall_nr]);
+       if (!ftrace_file)
+               return;
+
+       if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &ftrace_file->flags))
                return;
 
        sys_data = syscall_nr_to_meta(syscall_nr);
@@ -336,8 +343,7 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
        entry->nr = syscall_nr;
        syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args);
 
-       if (!filter_current_check_discard(buffer, sys_data->enter_event,
-                                         entry, event))
+       if (!filter_check_discard(ftrace_file, entry, buffer, event))
                trace_current_buffer_unlock_commit(buffer, event,
                                                   irq_flags, pc);
 }
@@ -345,6 +351,7 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
 static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
 {
        struct trace_array *tr = data;
+       struct ftrace_event_file *ftrace_file;
        struct syscall_trace_exit *entry;
        struct syscall_metadata *sys_data;
        struct ring_buffer_event *event;
@@ -356,7 +363,13 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
        syscall_nr = trace_get_syscall_nr(current, regs);
        if (syscall_nr < 0)
                return;
-       if (!test_bit(syscall_nr, tr->enabled_exit_syscalls))
+
+       /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE()) */
+       ftrace_file = rcu_dereference_sched(tr->exit_syscall_files[syscall_nr]);
+       if (!ftrace_file)
+               return;
+
+       if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &ftrace_file->flags))
                return;
 
        sys_data = syscall_nr_to_meta(syscall_nr);
@@ -377,8 +390,7 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
        entry->nr = syscall_nr;
        entry->ret = syscall_get_return_value(current, regs);
 
-       if (!filter_current_check_discard(buffer, sys_data->exit_event,
-                                         entry, event))
+       if (!filter_check_discard(ftrace_file, entry, buffer, event))
                trace_current_buffer_unlock_commit(buffer, event,
                                                   irq_flags, pc);
 }
@@ -397,7 +409,7 @@ static int reg_event_syscall_enter(struct ftrace_event_file *file,
        if (!tr->sys_refcount_enter)
                ret = register_trace_sys_enter(ftrace_syscall_enter, tr);
        if (!ret) {
-               set_bit(num, tr->enabled_enter_syscalls);
+               rcu_assign_pointer(tr->enter_syscall_files[num], file);
                tr->sys_refcount_enter++;
        }
        mutex_unlock(&syscall_trace_lock);
@@ -415,10 +427,15 @@ static void unreg_event_syscall_enter(struct ftrace_event_file *file,
                return;
        mutex_lock(&syscall_trace_lock);
        tr->sys_refcount_enter--;
-       clear_bit(num, tr->enabled_enter_syscalls);
+       rcu_assign_pointer(tr->enter_syscall_files[num], NULL);
        if (!tr->sys_refcount_enter)
                unregister_trace_sys_enter(ftrace_syscall_enter, tr);
        mutex_unlock(&syscall_trace_lock);
+       /*
+        * Callers expect the event to be completely disabled on
+        * return, so wait for current handlers to finish.
+        */
+       synchronize_sched();
 }
 
 static int reg_event_syscall_exit(struct ftrace_event_file *file,
@@ -435,7 +452,7 @@ static int reg_event_syscall_exit(struct ftrace_event_file *file,
        if (!tr->sys_refcount_exit)
                ret = register_trace_sys_exit(ftrace_syscall_exit, tr);
        if (!ret) {
-               set_bit(num, tr->enabled_exit_syscalls);
+               rcu_assign_pointer(tr->exit_syscall_files[num], file);
                tr->sys_refcount_exit++;
        }
        mutex_unlock(&syscall_trace_lock);
@@ -453,10 +470,15 @@ static void unreg_event_syscall_exit(struct ftrace_event_file *file,
                return;
        mutex_lock(&syscall_trace_lock);
        tr->sys_refcount_exit--;
-       clear_bit(num, tr->enabled_exit_syscalls);
+       rcu_assign_pointer(tr->exit_syscall_files[num], NULL);
        if (!tr->sys_refcount_exit)
                unregister_trace_sys_exit(ftrace_syscall_exit, tr);
        mutex_unlock(&syscall_trace_lock);
+       /*
+        * Callers expect the event to be completely disabled on
+        * return, so wait for current handlers to finish.
+        */
+       synchronize_sched();
 }
 
 static int __init init_syscall_trace(struct ftrace_event_call *call)
index 272261b5f94ff84e3291be05aaa3b1a8c1ca00ca..b6dcc42ef7f5043e6115c53a5a47aa631bd04c57 100644 (file)
@@ -128,6 +128,7 @@ alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
        if (is_ret)
                tu->consumer.ret_handler = uretprobe_dispatcher;
        init_trace_uprobe_filter(&tu->filter);
+       tu->call.flags |= TRACE_EVENT_FL_USE_CALL_FILTER;
        return tu;
 
 error:
@@ -561,7 +562,7 @@ static void uprobe_trace_print(struct trace_uprobe *tu,
        for (i = 0; i < tu->nr_args; i++)
                call_fetch(&tu->args[i].fetch, regs, data + tu->args[i].offset);
 
-       if (!filter_current_check_discard(buffer, call, entry, event))
+       if (!call_filter_check_discard(call, entry, buffer, event))
                trace_buffer_unlock_commit(buffer, event, 0, 0);
 }
 
index a674fd5507c19db162af66787dcea6c65c19082d..d0da66396f6201b5dbce7e8b9aa2ae421aefd40a 100755 (executable)
@@ -214,13 +214,13 @@ $local_regex = "^[0-9a-fA-F]+\\s+t\\s+(\\S+)";
 $weak_regex = "^[0-9a-fA-F]+\\s+([wW])\\s+(\\S+)";
 $section_regex = "Disassembly of section\\s+(\\S+):";
 $function_regex = "^([0-9a-fA-F]+)\\s+<(.*?)>:";
-$mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\smcount\$";
+$mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s(mcount|__fentry__)\$";
 $section_type = '@progbits';
 $mcount_adjust = 0;
 $type = ".long";
 
 if ($arch eq "x86_64") {
-    $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\smcount([+-]0x[0-9a-zA-Z]+)?\$";
+    $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s(mcount|__fentry__)([+-]0x[0-9a-zA-Z]+)?\$";
     $type = ".quad";
     $alignment = 8;
     $mcount_adjust = -1;