Merge tag 'trace-v3.20' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt...
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 12 Feb 2015 16:37:41 +0000 (08:37 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 12 Feb 2015 16:37:41 +0000 (08:37 -0800)
Pull tracing updates from Steven Rostedt:
 "The updates included in this pull request for ftrace are:

   o Several clean ups to the code

     One such clean up was to convert to 64 bit time keeping, in the
     ring buffer benchmark code.

   o Adding of __print_array() helper macro for TRACE_EVENT()

   o Updating the sample/trace_events/ to add samples of different ways
     to make trace events.  Lots of features have been added since the
     sample code was made, and these features are mostly unknown.
     Developers have been making their own hacks to do things that are
     already available.

   o Performance improvements.  Most notably, I found a performance bug
     where a waiter that is waiting for a full page from the ring buffer
     will see that a full page is not available, and go to sleep.  The
     sched event caused by it going to sleep would cause it to wake up
     again.  It would see that there was still not a full page, and go
     back to sleep again, and that would wake it up again, until finally
     it would see a full page.  This change has been marked for stable.

  Other improvements include removing global locks from fast paths"

* tag 'trace-v3.20' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace:
  ring-buffer: Do not wake up a splice waiter when page is not full
  tracing: Fix unmapping loop in tracing_mark_write
  tracing: Add samples of DECLARE_EVENT_CLASS() and DEFINE_EVENT()
  tracing: Add TRACE_EVENT_FN example
  tracing: Add TRACE_EVENT_CONDITION sample
  tracing: Update the TRACE_EVENT fields available in the sample code
  tracing: Separate out initializing top level dir from instances
  tracing: Make tracing_init_dentry_tr() static
  trace: Use 64-bit timekeeping
  tracing: Add array printing helper
  tracing: Remove newline from trace_printk warning banner
  tracing: Use IS_ERR() check for return value of tracing_init_dentry()
  tracing: Remove unneeded includes of debugfs.h and fs.h
  tracing: Remove taking of trace_types_lock in pipe files
  tracing: Add ref count to tracer for when they are being read by pipe

1  2 
include/linux/ftrace_event.h
include/trace/ftrace.h
kernel/trace/ftrace.c
kernel/trace/trace.c
kernel/trace/trace_events.c
kernel/trace/trace_kprobe.c
kernel/trace/trace_uprobe.c

index d36f68b08acc1fc95c0df5867df545ac2976d0d6,5aa4a92695472079306455063f10a7cebb896424..c674ee8f7fca508c24f172a72e8c055b8fc4ec24
@@@ -44,6 -44,10 +44,10 @@@ const char *ftrace_print_bitmask_seq(st
  const char *ftrace_print_hex_seq(struct trace_seq *p,
                                 const unsigned char *buf, int len);
  
+ const char *ftrace_print_array_seq(struct trace_seq *p,
+                                  const void *buf, int buf_len,
+                                  size_t el_size);
  struct trace_iterator;
  struct trace_event;
  
@@@ -595,7 -599,7 +599,7 @@@ extern int  ftrace_profile_set_filter(s
                                     char *filter_str);
  extern void ftrace_profile_free_filter(struct perf_event *event);
  extern void *perf_trace_buf_prepare(int size, unsigned short type,
 -                                  struct pt_regs *regs, int *rctxp);
 +                                  struct pt_regs **regs, int *rctxp);
  
  static inline void
  perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr,
diff --combined include/trace/ftrace.h
index 27609dfcce25916120521b23215dd473fab0051a,304901fc5f34c6ef25c9176a0803ada2714e5e4f..41bf65f04dd9e501fa4e3066402232553a19fef4
  #undef __print_hex
  #define __print_hex(buf, buf_len) ftrace_print_hex_seq(p, buf, buf_len)
  
+ #undef __print_array
+ #define __print_array(array, count, el_size)                          \
+       ({                                                              \
+               BUILD_BUG_ON(el_size != 1 && el_size != 2 &&            \
+                            el_size != 4 && el_size != 8);             \
+               ftrace_print_array_seq(p, array, count, el_size);       \
+       })
  #undef DECLARE_EVENT_CLASS
  #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)        \
  static notrace enum print_line_t                                      \
@@@ -674,6 -682,7 +682,7 @@@ static inline void ftrace_test_probe_##
  #undef __get_dynamic_array_len
  #undef __get_str
  #undef __get_bitmask
+ #undef __print_array
  
  #undef TP_printk
  #define TP_printk(fmt, args...) "\"" fmt "\", "  __stringify(args)
@@@ -763,7 -772,7 +772,7 @@@ perf_trace_##call(void *__data, proto
        struct ftrace_event_call *event_call = __data;                  \
        struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
        struct ftrace_raw_##call *entry;                                \
 -      struct pt_regs __regs;                                          \
 +      struct pt_regs *__regs;                                         \
        u64 __addr = 0, __count = 1;                                    \
        struct task_struct *__task = NULL;                              \
        struct hlist_head *head;                                        \
                             sizeof(u64));                              \
        __entry_size -= sizeof(u32);                                    \
                                                                        \
 -      perf_fetch_caller_regs(&__regs);                                \
        entry = perf_trace_buf_prepare(__entry_size,                    \
                        event_call->event.type, &__regs, &rctx);        \
        if (!entry)                                                     \
                return;                                                 \
                                                                        \
 +      perf_fetch_caller_regs(__regs);                                 \
 +                                                                      \
        tstruct                                                         \
                                                                        \
        { assign; }                                                     \
                                                                        \
        perf_trace_buf_submit(entry, __entry_size, rctx, __addr,        \
 -              __count, &__regs, head, __task);                        \
 +              __count, __regs, head, __task);                         \
  }
  
  /*
diff --combined kernel/trace/ftrace.c
index 224e768bdc738da7c47aca41fcc6d9ecd4c190b4,80c9d34540ddbc7ece4d9cc42e62edf5bd1ced7f..45e5cb143d173d979576689dbc8e7a66703eee06
@@@ -2497,14 -2497,12 +2497,14 @@@ static void ftrace_run_update_code(int 
  }
  
  static void ftrace_run_modify_code(struct ftrace_ops *ops, int command,
 -                                 struct ftrace_hash *old_hash)
 +                                 struct ftrace_ops_hash *old_hash)
  {
        ops->flags |= FTRACE_OPS_FL_MODIFYING;
 -      ops->old_hash.filter_hash = old_hash;
 +      ops->old_hash.filter_hash = old_hash->filter_hash;
 +      ops->old_hash.notrace_hash = old_hash->notrace_hash;
        ftrace_run_update_code(command);
        ops->old_hash.filter_hash = NULL;
 +      ops->old_hash.notrace_hash = NULL;
        ops->flags &= ~FTRACE_OPS_FL_MODIFYING;
  }
  
@@@ -3581,7 -3579,7 +3581,7 @@@ static struct ftrace_ops trace_probe_op
  
  static int ftrace_probe_registered;
  
 -static void __enable_ftrace_function_probe(struct ftrace_hash *old_hash)
 +static void __enable_ftrace_function_probe(struct ftrace_ops_hash *old_hash)
  {
        int ret;
        int i;
@@@ -3639,7 -3637,6 +3639,7 @@@ in
  register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
                              void *data)
  {
 +      struct ftrace_ops_hash old_hash_ops;
        struct ftrace_func_probe *entry;
        struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash;
        struct ftrace_hash *old_hash = *orig_hash;
  
        mutex_lock(&trace_probe_ops.func_hash->regex_lock);
  
 +      old_hash_ops.filter_hash = old_hash;
 +      /* Probes only have filters */
 +      old_hash_ops.notrace_hash = NULL;
 +
        hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
        if (!hash) {
                count = -ENOMEM;
  
        ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
  
 -      __enable_ftrace_function_probe(old_hash);
 +      __enable_ftrace_function_probe(&old_hash_ops);
  
        if (!ret)
                free_ftrace_hash_rcu(old_hash);
@@@ -4013,34 -4006,10 +4013,34 @@@ ftrace_match_addr(struct ftrace_hash *h
  }
  
  static void ftrace_ops_update_code(struct ftrace_ops *ops,
 -                                 struct ftrace_hash *old_hash)
 +                                 struct ftrace_ops_hash *old_hash)
  {
 -      if (ops->flags & FTRACE_OPS_FL_ENABLED && ftrace_enabled)
 +      struct ftrace_ops *op;
 +
 +      if (!ftrace_enabled)
 +              return;
 +
 +      if (ops->flags & FTRACE_OPS_FL_ENABLED) {
                ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS, old_hash);
 +              return;
 +      }
 +
 +      /*
 +       * If this is the shared global_ops filter, then we need to
 +       * check if there is another ops that shares it, is enabled.
 +       * If so, we still need to run the modify code.
 +       */
 +      if (ops->func_hash != &global_ops.local_hash)
 +              return;
 +
 +      do_for_each_ftrace_op(op, ftrace_ops_list) {
 +              if (op->func_hash == &global_ops.local_hash &&
 +                  op->flags & FTRACE_OPS_FL_ENABLED) {
 +                      ftrace_run_modify_code(op, FTRACE_UPDATE_CALLS, old_hash);
 +                      /* Only need to do this once */
 +                      return;
 +              }
 +      } while_for_each_ftrace_op(op);
  }
  
  static int
@@@ -4048,7 -4017,6 +4048,7 @@@ ftrace_set_hash(struct ftrace_ops *ops
                unsigned long ip, int remove, int reset, int enable)
  {
        struct ftrace_hash **orig_hash;
 +      struct ftrace_ops_hash old_hash_ops;
        struct ftrace_hash *old_hash;
        struct ftrace_hash *hash;
        int ret;
  
        mutex_lock(&ftrace_lock);
        old_hash = *orig_hash;
 +      old_hash_ops.filter_hash = ops->func_hash->filter_hash;
 +      old_hash_ops.notrace_hash = ops->func_hash->notrace_hash;
        ret = ftrace_hash_move(ops, enable, orig_hash, hash);
        if (!ret) {
 -              ftrace_ops_update_code(ops, old_hash);
 +              ftrace_ops_update_code(ops, &old_hash_ops);
                free_ftrace_hash_rcu(old_hash);
        }
        mutex_unlock(&ftrace_lock);
@@@ -4301,7 -4267,6 +4301,7 @@@ static void __init set_ftrace_early_fil
  int ftrace_regex_release(struct inode *inode, struct file *file)
  {
        struct seq_file *m = (struct seq_file *)file->private_data;
 +      struct ftrace_ops_hash old_hash_ops;
        struct ftrace_iterator *iter;
        struct ftrace_hash **orig_hash;
        struct ftrace_hash *old_hash;
  
                mutex_lock(&ftrace_lock);
                old_hash = *orig_hash;
 +              old_hash_ops.filter_hash = iter->ops->func_hash->filter_hash;
 +              old_hash_ops.notrace_hash = iter->ops->func_hash->notrace_hash;
                ret = ftrace_hash_move(iter->ops, filter_hash,
                                       orig_hash, iter->hash);
                if (!ret) {
 -                      ftrace_ops_update_code(iter->ops, old_hash);
 +                      ftrace_ops_update_code(iter->ops, &old_hash_ops);
                        free_ftrace_hash_rcu(old_hash);
                }
                mutex_unlock(&ftrace_lock);
@@@ -5456,7 -5419,7 +5456,7 @@@ static __init int ftrace_init_debugfs(v
        struct dentry *d_tracer;
  
        d_tracer = tracing_init_dentry();
-       if (!d_tracer)
+       if (IS_ERR(d_tracer))
                return 0;
  
        ftrace_init_dyn_debugfs(d_tracer);
diff --combined kernel/trace/trace.c
index 4a9079b9f082fd3bb14e3b46522b1540b001fea1,2078b86750e0507d27bb1655bd5ef59819cfa338..77b8dc528006cf2c937b242ded546f425a1a9caf
@@@ -2036,7 -2036,8 +2036,8 @@@ void trace_printk_init_buffers(void
  
        /* trace_printk() is for debug use only. Don't use it in production. */
  
-       pr_warning("\n**********************************************************\n");
+       pr_warning("\n");
+       pr_warning("**********************************************************\n");
        pr_warning("**   NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE   **\n");
        pr_warning("**                                                      **\n");
        pr_warning("** trace_printk() being used. Allocating extra memory.  **\n");
@@@ -4140,6 -4141,12 +4141,12 @@@ static int tracing_set_tracer(struct tr
                goto out;
        }
  
+       /* If trace pipe files are being read, we can't change the tracer */
+       if (tr->current_trace->ref) {
+               ret = -EBUSY;
+               goto out;
+       }
        trace_branch_disable();
  
        tr->current_trace->enabled--;
@@@ -4326,17 -4333,7 +4333,7 @@@ static int tracing_open_pipe(struct ino
        }
  
        trace_seq_init(&iter->seq);
-       /*
-        * We make a copy of the current tracer to avoid concurrent
-        * changes on it while we are reading.
-        */
-       iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL);
-       if (!iter->trace) {
-               ret = -ENOMEM;
-               goto fail;
-       }
-       *iter->trace = *tr->current_trace;
+       iter->trace = tr->current_trace;
  
        if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
                ret = -ENOMEM;
                iter->trace->pipe_open(iter);
  
        nonseekable_open(inode, filp);
+       tr->current_trace->ref++;
  out:
        mutex_unlock(&trace_types_lock);
        return ret;
@@@ -4382,6 -4381,8 +4381,8 @@@ static int tracing_release_pipe(struct 
  
        mutex_lock(&trace_types_lock);
  
+       tr->current_trace->ref--;
        if (iter->trace->pipe_close)
                iter->trace->pipe_close(iter);
  
  
        free_cpumask_var(iter->started);
        mutex_destroy(&iter->mutex);
-       kfree(iter->trace);
        kfree(iter);
  
        trace_array_put(tr);
@@@ -4422,7 -4422,7 +4422,7 @@@ tracing_poll_pipe(struct file *filp, po
        return trace_poll(iter, filp, poll_table);
  }
  
- /* Must be called with trace_types_lock mutex held. */
+ /* Must be called with iter->mutex held. */
  static int tracing_wait_pipe(struct file *filp)
  {
        struct trace_iterator *iter = filp->private_data;
@@@ -4467,7 -4467,6 +4467,6 @@@ tracing_read_pipe(struct file *filp, ch
                  size_t cnt, loff_t *ppos)
  {
        struct trace_iterator *iter = filp->private_data;
-       struct trace_array *tr = iter->tr;
        ssize_t sret;
  
        /* return any leftover data */
  
        trace_seq_init(&iter->seq);
  
-       /* copy the tracer to avoid using a global lock all around */
-       mutex_lock(&trace_types_lock);
-       if (unlikely(iter->trace->name != tr->current_trace->name))
-               *iter->trace = *tr->current_trace;
-       mutex_unlock(&trace_types_lock);
        /*
         * Avoid more than one consumer on a single file descriptor
         * This is just a matter of traces coherency, the ring buffer itself
@@@ -4642,7 -4635,6 +4635,6 @@@ static ssize_t tracing_splice_read_pipe
                .ops            = &tracing_pipe_buf_ops,
                .spd_release    = tracing_spd_release_pipe,
        };
-       struct trace_array *tr = iter->tr;
        ssize_t ret;
        size_t rem;
        unsigned int i;
        if (splice_grow_spd(pipe, &spd))
                return -ENOMEM;
  
-       /* copy the tracer to avoid using a global lock all around */
-       mutex_lock(&trace_types_lock);
-       if (unlikely(iter->trace->name != tr->current_trace->name))
-               *iter->trace = *tr->current_trace;
-       mutex_unlock(&trace_types_lock);
        mutex_lock(&iter->mutex);
  
        if (iter->trace->splice_read) {
@@@ -4942,7 -4928,7 +4928,7 @@@ tracing_mark_write(struct file *filp, c
        *fpos += written;
  
   out_unlock:
-       for (i = 0; i < nr_pages; i++){
+       for (i = nr_pages - 1; i >= 0; i--) {
                kunmap_atomic(map_page[i]);
                put_page(pages[i]);
        }
@@@ -5331,6 -5317,8 +5317,8 @@@ static int tracing_buffers_open(struct 
  
        filp->private_data = info;
  
+       tr->current_trace->ref++;
        mutex_unlock(&trace_types_lock);
  
        ret = nonseekable_open(inode, filp);
@@@ -5361,21 -5349,16 +5349,16 @@@ tracing_buffers_read(struct file *filp
        if (!count)
                return 0;
  
-       mutex_lock(&trace_types_lock);
  #ifdef CONFIG_TRACER_MAX_TRACE
-       if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
-               size = -EBUSY;
-               goto out_unlock;
-       }
+       if (iter->snapshot && iter->tr->current_trace->use_max_tr)
+               return -EBUSY;
  #endif
  
        if (!info->spare)
                info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
                                                          iter->cpu_file);
-       size = -ENOMEM;
        if (!info->spare)
-               goto out_unlock;
+               return -ENOMEM;
  
        /* Do we have previous read data to read? */
        if (info->read < PAGE_SIZE)
  
        if (ret < 0) {
                if (trace_empty(iter)) {
-                       if ((filp->f_flags & O_NONBLOCK)) {
-                               size = -EAGAIN;
-                               goto out_unlock;
-                       }
-                       mutex_unlock(&trace_types_lock);
+                       if ((filp->f_flags & O_NONBLOCK))
+                               return -EAGAIN;
                        ret = wait_on_pipe(iter, false);
-                       mutex_lock(&trace_types_lock);
-                       if (ret) {
-                               size = ret;
-                               goto out_unlock;
-                       }
+                       if (ret)
+                               return ret;
                        goto again;
                }
-               size = 0;
-               goto out_unlock;
+               return 0;
        }
  
        info->read = 0;
                size = count;
  
        ret = copy_to_user(ubuf, info->spare + info->read, size);
-       if (ret == size) {
-               size = -EFAULT;
-               goto out_unlock;
-       }
+       if (ret == size)
+               return -EFAULT;
        size -= ret;
  
        *ppos += size;
        info->read += size;
  
-  out_unlock:
-       mutex_unlock(&trace_types_lock);
        return size;
  }
  
@@@ -5437,6 -5411,8 +5411,8 @@@ static int tracing_buffers_release(stru
  
        mutex_lock(&trace_types_lock);
  
+       iter->tr->current_trace->ref--;
        __trace_array_put(iter->tr);
  
        if (info->spare)
@@@ -5522,30 -5498,20 +5498,20 @@@ tracing_buffers_splice_read(struct fil
        int entries, size, i;
        ssize_t ret = 0;
  
-       mutex_lock(&trace_types_lock);
  #ifdef CONFIG_TRACER_MAX_TRACE
-       if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
-               ret = -EBUSY;
-               goto out;
-       }
+       if (iter->snapshot && iter->tr->current_trace->use_max_tr)
+               return -EBUSY;
  #endif
  
-       if (splice_grow_spd(pipe, &spd)) {
-               ret = -ENOMEM;
-               goto out;
-       }
+       if (splice_grow_spd(pipe, &spd))
+               return -ENOMEM;
  
-       if (*ppos & (PAGE_SIZE - 1)) {
-               ret = -EINVAL;
-               goto out;
-       }
+       if (*ppos & (PAGE_SIZE - 1))
+               return -EINVAL;
  
        if (len & (PAGE_SIZE - 1)) {
-               if (len < PAGE_SIZE) {
-                       ret = -EINVAL;
-                       goto out;
-               }
+               if (len < PAGE_SIZE)
+                       return -EINVAL;
                len &= PAGE_MASK;
        }
  
        /* did we read anything? */
        if (!spd.nr_pages) {
                if (ret)
-                       goto out;
+                       return ret;
+               if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
+                       return -EAGAIN;
  
-               if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) {
-                       ret = -EAGAIN;
-                       goto out;
-               }
-               mutex_unlock(&trace_types_lock);
                ret = wait_on_pipe(iter, true);
-               mutex_lock(&trace_types_lock);
                if (ret)
-                       goto out;
+                       return ret;
  
                goto again;
        }
  
        ret = splice_to_pipe(pipe, &spd);
        splice_shrink_spd(&spd);
- out:
-       mutex_unlock(&trace_types_lock);
  
        return ret;
  }
@@@ -5854,28 -5815,11 +5815,11 @@@ static __init int register_snapshot_cmd
  static inline __init int register_snapshot_cmd(void) { return 0; }
  #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
  
- struct dentry *tracing_init_dentry_tr(struct trace_array *tr)
+ static struct dentry *tracing_get_dentry(struct trace_array *tr)
  {
-       if (tr->dir)
-               return tr->dir;
-       if (!debugfs_initialized())
-               return NULL;
-       if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
-               tr->dir = debugfs_create_dir("tracing", NULL);
-       if (!tr->dir)
-               pr_warn_once("Could not create debugfs directory 'tracing'\n");
        return tr->dir;
  }
  
- struct dentry *tracing_init_dentry(void)
- {
-       return tracing_init_dentry_tr(&global_trace);
- }
  static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
  {
        struct dentry *d_tracer;
        if (tr->percpu_dir)
                return tr->percpu_dir;
  
-       d_tracer = tracing_init_dentry_tr(tr);
-       if (!d_tracer)
+       d_tracer = tracing_get_dentry(tr);
+       if (IS_ERR(d_tracer))
                return NULL;
  
        tr->percpu_dir = debugfs_create_dir("per_cpu", d_tracer);
@@@ -6086,8 -6030,8 +6030,8 @@@ static struct dentry *trace_options_ini
        if (tr->options)
                return tr->options;
  
-       d_tracer = tracing_init_dentry_tr(tr);
-       if (!d_tracer)
+       d_tracer = tracing_get_dentry(tr);
+       if (IS_ERR(d_tracer))
                return NULL;
  
        tr->options = debugfs_create_dir("options", d_tracer);
@@@ -6416,7 -6360,7 +6360,7 @@@ static int instance_delete(const char *
                goto out_unlock;
  
        ret = -EBUSY;
-       if (tr->ref)
+       if (tr->ref || (tr->current_trace && tr->current_trace->ref))
                goto out_unlock;
  
        list_del(&tr->list);
@@@ -6571,6 -6515,33 +6515,33 @@@ init_tracer_debugfs(struct trace_array 
  
  }
  
+ /**
+  * tracing_init_dentry - initialize top level trace array
+  *
+  * This is called when creating files or directories in the tracing
+  * directory. It is called via fs_initcall() by any of the boot up code
+  * and expects to return the dentry of the top level tracing directory.
+  */
+ struct dentry *tracing_init_dentry(void)
+ {
+       struct trace_array *tr = &global_trace;
+       if (tr->dir)
+               return tr->dir;
+       if (WARN_ON(!debugfs_initialized()))
+               return ERR_PTR(-ENODEV);
+       tr->dir = debugfs_create_dir("tracing", NULL);
+       if (!tr->dir) {
+               pr_warn_once("Could not create debugfs directory 'tracing'\n");
+               return ERR_PTR(-ENOMEM);
+       }
+       return tr->dir;
+ }
  static __init int tracer_init_debugfs(void)
  {
        struct dentry *d_tracer;
        trace_access_lock_init();
  
        d_tracer = tracing_init_dentry();
-       if (!d_tracer)
+       if (IS_ERR(d_tracer))
                return 0;
  
        init_tracer_debugfs(&global_trace, d_tracer);
@@@ -6811,7 -6782,6 +6782,6 @@@ __init static int tracer_alloc_buffers(
        int ring_buf_size;
        int ret = -ENOMEM;
  
        if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
                goto out;
  
@@@ -6918,6 -6888,7 +6888,6 @@@ void __init trace_init(void
                        tracepoint_printk = 0;
        }
        tracer_alloc_buffers();
 -      init_ftrace_syscalls();
        trace_event_init();     
  }
  
index b03a0ea77b993cf9f175ed7b44fc239832de7def,4ff8c1394017a2a3dd74114f921206f9281a32bd..db54dda10ccc179f733db8fd8407cc402c11e409
@@@ -2429,39 -2429,12 +2429,39 @@@ static __init int event_trace_memsetup(
        return 0;
  }
  
 +static __init void
 +early_enable_events(struct trace_array *tr, bool disable_first)
 +{
 +      char *buf = bootup_event_buf;
 +      char *token;
 +      int ret;
 +
 +      while (true) {
 +              token = strsep(&buf, ",");
 +
 +              if (!token)
 +                      break;
 +              if (!*token)
 +                      continue;
 +
 +              /* Restarting syscalls requires that we stop them first */
 +              if (disable_first)
 +                      ftrace_set_clr_event(tr, token, 0);
 +
 +              ret = ftrace_set_clr_event(tr, token, 1);
 +              if (ret)
 +                      pr_warn("Failed to enable trace event: %s\n", token);
 +
 +              /* Put back the comma to allow this to be called again */
 +              if (buf)
 +                      *(buf - 1) = ',';
 +      }
 +}
 +
  static __init int event_trace_enable(void)
  {
        struct trace_array *tr = top_trace_array();
        struct ftrace_event_call **iter, *call;
 -      char *buf = bootup_event_buf;
 -      char *token;
        int ret;
  
        if (!tr)
         */
        __trace_early_add_events(tr);
  
 -      while (true) {
 -              token = strsep(&buf, ",");
 -
 -              if (!token)
 -                      break;
 -              if (!*token)
 -                      continue;
 -
 -              ret = ftrace_set_clr_event(tr, token, 1);
 -              if (ret)
 -                      pr_warn("Failed to enable trace event: %s\n", token);
 -      }
 +      early_enable_events(tr, false);
  
        trace_printk_start_comm();
  
        return 0;
  }
  
 +/*
 + * event_trace_enable() is called from trace_event_init() first to
 + * initialize events and perhaps start any events that are on the
 + * command line. Unfortunately, there are some events that will not
 + * start this early, like the system call tracepoints that need
 + * to set the TIF_SYSCALL_TRACEPOINT flag of pid 1. But event_trace_enable()
 + * is called before pid 1 starts, and this flag is never set, making
 + * the syscall tracepoint never get reached, but the event is enabled
 + * regardless (and not doing anything).
 + */
 +static __init int event_trace_enable_again(void)
 +{
 +      struct trace_array *tr;
 +
 +      tr = top_trace_array();
 +      if (!tr)
 +              return -ENODEV;
 +
 +      early_enable_events(tr, true);
 +
 +      return 0;
 +}
 +
 +early_initcall(event_trace_enable_again);
 +
  static __init int event_trace_init(void)
  {
        struct trace_array *tr;
                return -ENODEV;
  
        d_tracer = tracing_init_dentry();
-       if (!d_tracer)
+       if (IS_ERR(d_tracer))
                return 0;
  
        entry = debugfs_create_file("available_events", 0444, d_tracer,
index 296079ae658300123e157d3265fe9e0f5a352bcf,b4a00def88f5c5784e0d0a8ffd18edbfac694f23..d73f565b4e062127789bce243dc9c2e365dbb175
@@@ -1148,7 -1148,7 +1148,7 @@@ kprobe_perf_func(struct trace_kprobe *t
        size = ALIGN(__size + sizeof(u32), sizeof(u64));
        size -= sizeof(u32);
  
 -      entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx);
 +      entry = perf_trace_buf_prepare(size, call->event.type, NULL, &rctx);
        if (!entry)
                return;
  
@@@ -1179,7 -1179,7 +1179,7 @@@ kretprobe_perf_func(struct trace_kprob
        size = ALIGN(__size + sizeof(u32), sizeof(u64));
        size -= sizeof(u32);
  
 -      entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx);
 +      entry = perf_trace_buf_prepare(size, call->event.type, NULL, &rctx);
        if (!entry)
                return;
  
@@@ -1320,7 -1320,7 +1320,7 @@@ static __init int init_kprobe_trace(voi
                return -EINVAL;
  
        d_tracer = tracing_init_dentry();
-       if (!d_tracer)
+       if (IS_ERR(d_tracer))
                return 0;
  
        entry = debugfs_create_file("kprobe_events", 0644, d_tracer,
index b11441321e7a473a6e7086f28d791dcd44b8a6f3,5f0eba9e5e6bcb6566f4ccd291dc8ebe8ac1b4d8..7dc1c8abecd6c6fd831a3f952d67454261dc2ab9
@@@ -1111,7 -1111,7 +1111,7 @@@ static void __uprobe_perf_func(struct t
        if (hlist_empty(head))
                goto out;
  
 -      entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx);
 +      entry = perf_trace_buf_prepare(size, call->event.type, NULL, &rctx);
        if (!entry)
                goto out;
  
@@@ -1321,7 -1321,7 +1321,7 @@@ static __init int init_uprobe_trace(voi
        struct dentry *d_tracer;
  
        d_tracer = tracing_init_dentry();
-       if (!d_tracer)
+       if (IS_ERR(d_tracer))
                return 0;
  
        trace_create_file("uprobe_events", 0644, d_tracer,