Merge tag 'powerpc-3.20-1' of git://git.kernel.org/pub/scm/linux/kernel/git/mpe/linux
[linux-drm-fsl-dcu.git] / include / linux / perf_event.h
index 1d3631448b910e945e9658ebfebc6fd3e998744a..2cdc9d422bed9245bef3e0e62a007efad80be6c1 100644 (file)
@@ -79,11 +79,6 @@ struct perf_branch_stack {
        struct perf_branch_entry        entries[0];
 };
 
-struct perf_regs {
-       __u64           abi;
-       struct pt_regs  *regs;
-};
-
 struct task_struct;
 
 /*
@@ -455,11 +450,6 @@ struct perf_event {
 #endif /* CONFIG_PERF_EVENTS */
 };
 
-enum perf_event_context_type {
-       task_context,
-       cpu_context,
-};
-
 /**
  * struct perf_event_context - event context structure
  *
@@ -467,7 +457,6 @@ enum perf_event_context_type {
  */
 struct perf_event_context {
        struct pmu                      *pmu;
-       enum perf_event_context_type    type;
        /*
         * Protect the states of the events in the list,
         * nr_active, and the list:
@@ -480,6 +469,7 @@ struct perf_event_context {
         */
        struct mutex                    mutex;
 
+       struct list_head                active_ctx_list;
        struct list_head                pinned_groups;
        struct list_head                flexible_groups;
        struct list_head                event_list;
@@ -530,7 +520,6 @@ struct perf_cpu_context {
        int                             exclusive;
        struct hrtimer                  hrtimer;
        ktime_t                         hrtimer_interval;
-       struct list_head                rotation_list;
        struct pmu                      *unique_pmu;
        struct perf_cgroup              *cgrp;
 };
@@ -610,7 +599,14 @@ struct perf_sample_data {
                u32     reserved;
        }                               cpu_entry;
        struct perf_callchain_entry     *callchain;
+
+       /*
+        * regs_user may point to task_pt_regs or to regs_user_copy, depending
+        * on arch details.
+        */
        struct perf_regs                regs_user;
+       struct pt_regs                  regs_user_copy;
+
        struct perf_regs                regs_intr;
        u64                             stack_user_size;
 } ____cacheline_aligned;
@@ -663,6 +659,7 @@ static inline int is_software_event(struct perf_event *event)
 
 extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
 
+extern void ___perf_sw_event(u32, u64, struct pt_regs *, u64);
 extern void __perf_sw_event(u32, u64, struct pt_regs *, u64);
 
 #ifndef perf_arch_fetch_caller_regs
@@ -687,14 +684,25 @@ static inline void perf_fetch_caller_regs(struct pt_regs *regs)
 static __always_inline void
 perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
 {
-       struct pt_regs hot_regs;
+       if (static_key_false(&perf_swevent_enabled[event_id]))
+               __perf_sw_event(event_id, nr, regs, addr);
+}
+
+DECLARE_PER_CPU(struct pt_regs, __perf_regs[4]);
 
+/*
+ * 'Special' version for the scheduler, it hard assumes no recursion,
+ * which is guaranteed by us not actually scheduling inside other swevents
+ * because those disable preemption.
+ */
+static __always_inline void
+perf_sw_event_sched(u32 event_id, u64 nr, u64 addr)
+{
        if (static_key_false(&perf_swevent_enabled[event_id])) {
-               if (!regs) {
-                       perf_fetch_caller_regs(&hot_regs);
-                       regs = &hot_regs;
-               }
-               __perf_sw_event(event_id, nr, regs, addr);
+               struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]);
+
+               perf_fetch_caller_regs(regs);
+               ___perf_sw_event(event_id, nr, regs, addr);
        }
 }
 
@@ -710,7 +718,7 @@ static inline void perf_event_task_sched_in(struct task_struct *prev,
 static inline void perf_event_task_sched_out(struct task_struct *prev,
                                             struct task_struct *next)
 {
-       perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0);
+       perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0);
 
        if (static_key_false(&perf_sched_events.key))
                __perf_event_task_sched_out(prev, next);
@@ -821,6 +829,8 @@ static inline int perf_event_refresh(struct perf_event *event, int refresh)
 static inline void
 perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)    { }
 static inline void
+perf_sw_event_sched(u32 event_id, u64 nr, u64 addr)                    { }
+static inline void
 perf_bp_event(struct perf_event *event, void *data)                    { }
 
 static inline int perf_register_guest_info_callbacks