perf/x86: Add flags to event constraints
authorStephane Eranian <eranian@google.com>
Thu, 24 Jan 2013 15:10:27 +0000 (16:10 +0100)
committerArnaldo Carvalho de Melo <acme@redhat.com>
Mon, 1 Apr 2013 15:15:04 +0000 (12:15 -0300)
This patch adds a flags field to each event constraint.
It can be used to store event specific features which can
then later be used by scheduling code or low-level x86 code.

The flags are propagated into event->hw.flags during the
get_event_constraint() call. They are cleared during the
put_event_constraint() call.

This mechanism is going to be used by the PEBS-LL patches.
It avoids defining yet another table to hold event specific
information.

Signed-off-by: Stephane Eranian <eranian@google.com>
Cc: peterz@infradead.org
Cc: ak@linux.intel.com
Cc: jolsa@redhat.com
Cc: namhyung.kim@lge.com
Link: http://lkml.kernel.org/r/1359040242-8269-4-git-send-email-eranian@google.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
arch/x86/kernel/cpu/perf_event.c
arch/x86/kernel/cpu/perf_event.h
arch/x86/kernel/cpu/perf_event_intel.c
arch/x86/kernel/cpu/perf_event_intel_ds.c
arch/x86/kernel/cpu/perf_event_intel_uncore.c
include/linux/perf_event.h

index 6e8ab0427041dc590790c48105a210092bbe2181..8ba51518f689218e4211956ff284abd681192f3d 100644 (file)
@@ -1489,7 +1489,7 @@ static int __init init_hw_perf_events(void)
 
        unconstrained = (struct event_constraint)
                __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1,
-                                  0, x86_pmu.num_counters, 0);
+                                  0, x86_pmu.num_counters, 0, 0);
 
        x86_pmu.attr_rdpmc = 1; /* enable userspace RDPMC usage by default */
        x86_pmu_format_group.attrs = x86_pmu.format_attrs;
index b1518eed5f99acadacab91e8d3f6a6458e0e7bc5..9686d38eb4587c07a6ef059224b78e177e8d3daa 100644 (file)
@@ -59,6 +59,7 @@ struct event_constraint {
        u64     cmask;
        int     weight;
        int     overlap;
+       int     flags;
 };
 
 struct amd_nb {
@@ -170,16 +171,17 @@ struct cpu_hw_events {
        void                            *kfree_on_online;
 };
 
-#define __EVENT_CONSTRAINT(c, n, m, w, o) {\
+#define __EVENT_CONSTRAINT(c, n, m, w, o, f) {\
        { .idxmsk64 = (n) },            \
        .code = (c),                    \
        .cmask = (m),                   \
        .weight = (w),                  \
        .overlap = (o),                 \
+       .flags = f,                     \
 }
 
 #define EVENT_CONSTRAINT(c, n, m)      \
-       __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 0)
+       __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 0, 0)
 
 /*
  * The overlap flag marks event constraints with overlapping counter
@@ -203,7 +205,7 @@ struct cpu_hw_events {
  * and its counter masks must be kept at a minimum.
  */
 #define EVENT_CONSTRAINT_OVERLAP(c, n, m)      \
-       __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 1)
+       __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 1, 0)
 
 /*
  * Constraint on the Event code.
index dab7580c47aee2e71e501afbfa94c17e2790777d..df3beaac3397886b0c8c275b4047742adb08c311 100644 (file)
@@ -1392,8 +1392,11 @@ x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
 
        if (x86_pmu.event_constraints) {
                for_each_event_constraint(c, x86_pmu.event_constraints) {
-                       if ((event->hw.config & c->cmask) == c->code)
+                       if ((event->hw.config & c->cmask) == c->code) {
+                               /* hw.flags zeroed at initialization */
+                               event->hw.flags |= c->flags;
                                return c;
+                       }
                }
        }
 
@@ -1438,6 +1441,7 @@ intel_put_shared_regs_event_constraints(struct cpu_hw_events *cpuc,
 static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
                                        struct perf_event *event)
 {
+       event->hw.flags = 0;
        intel_put_shared_regs_event_constraints(cpuc, event);
 }
 
index 826054a4f2ee4c54ea4cfb9d29ab117db35e7fc5..f30d85bcbda96b7cdae12e55443944afc975f3d2 100644 (file)
@@ -430,8 +430,10 @@ struct event_constraint *intel_pebs_constraints(struct perf_event *event)
 
        if (x86_pmu.pebs_constraints) {
                for_each_event_constraint(c, x86_pmu.pebs_constraints) {
-                       if ((event->hw.config & c->cmask) == c->code)
+                       if ((event->hw.config & c->cmask) == c->code) {
+                               event->hw.flags |= c->flags;
                                return c;
+                       }
                }
        }
 
index b43200dbfe7e179d65386f0dfb3b4afd3f33bf36..75da9e18b1286b16538aa96bfede9fa681cfe19e 100644 (file)
@@ -2438,7 +2438,7 @@ static int __init uncore_type_init(struct intel_uncore_type *type)
 
        type->unconstrainted = (struct event_constraint)
                __EVENT_CONSTRAINT(0, (1ULL << type->num_counters) - 1,
-                               0, type->num_counters, 0);
+                               0, type->num_counters, 0, 0);
 
        for (i = 0; i < type->num_boxes; i++) {
                pmus[i].func_id = -1;
index 1c592114c437d65dbd6d4add43eac1b81899a6b1..cd3bb2cd94940bf3dc1c007d61129b5ec7b86288 100644 (file)
@@ -127,6 +127,7 @@ struct hw_perf_event {
                        int             event_base_rdpmc;
                        int             idx;
                        int             last_cpu;
+                       int             flags;
 
                        struct hw_perf_event_extra extra_reg;
                        struct hw_perf_event_extra branch_reg;