Merge remote-tracking branches 'asoc/fix/atmel', 'asoc/fix/fsl', 'asoc/fix/tegra...
[linux-drm-fsl-dcu.git] / kernel / trace / trace_event_perf.c
1 /*
2  * trace event based perf event profiling/tracing
3  *
4  * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
5  * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
6  */
7
8 #include <linux/module.h>
9 #include <linux/kprobes.h>
10 #include "trace.h"
11
12 static char __percpu *perf_trace_buf[PERF_NR_CONTEXTS];
13
14 /*
15  * Force it to be aligned to unsigned long to avoid misaligned accesses
16  * suprises
17  */
18 typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)])
19         perf_trace_t;
20
21 /* Count the events in use (per event id, not per instance) */
22 static int      total_ref_count;
23
24 static int perf_trace_event_perm(struct ftrace_event_call *tp_event,
25                                  struct perf_event *p_event)
26 {
27         if (tp_event->perf_perm) {
28                 int ret = tp_event->perf_perm(tp_event, p_event);
29                 if (ret)
30                         return ret;
31         }
32
33         /* The ftrace function trace is allowed only for root. */
34         if (ftrace_event_is_function(tp_event) &&
35             perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))
36                 return -EPERM;
37
38         /* No tracing, just counting, so no obvious leak */
39         if (!(p_event->attr.sample_type & PERF_SAMPLE_RAW))
40                 return 0;
41
42         /* Some events are ok to be traced by non-root users... */
43         if (p_event->attach_state == PERF_ATTACH_TASK) {
44                 if (tp_event->flags & TRACE_EVENT_FL_CAP_ANY)
45                         return 0;
46         }
47
48         /*
49          * ...otherwise raw tracepoint data can be a severe data leak,
50          * only allow root to have these.
51          */
52         if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))
53                 return -EPERM;
54
55         return 0;
56 }
57
58 static int perf_trace_event_reg(struct ftrace_event_call *tp_event,
59                                 struct perf_event *p_event)
60 {
61         struct hlist_head __percpu *list;
62         int ret = -ENOMEM;
63         int cpu;
64
65         p_event->tp_event = tp_event;
66         if (tp_event->perf_refcount++ > 0)
67                 return 0;
68
69         list = alloc_percpu(struct hlist_head);
70         if (!list)
71                 goto fail;
72
73         for_each_possible_cpu(cpu)
74                 INIT_HLIST_HEAD(per_cpu_ptr(list, cpu));
75
76         tp_event->perf_events = list;
77
78         if (!total_ref_count) {
79                 char __percpu *buf;
80                 int i;
81
82                 for (i = 0; i < PERF_NR_CONTEXTS; i++) {
83                         buf = (char __percpu *)alloc_percpu(perf_trace_t);
84                         if (!buf)
85                                 goto fail;
86
87                         perf_trace_buf[i] = buf;
88                 }
89         }
90
91         ret = tp_event->class->reg(tp_event, TRACE_REG_PERF_REGISTER, NULL);
92         if (ret)
93                 goto fail;
94
95         total_ref_count++;
96         return 0;
97
98 fail:
99         if (!total_ref_count) {
100                 int i;
101
102                 for (i = 0; i < PERF_NR_CONTEXTS; i++) {
103                         free_percpu(perf_trace_buf[i]);
104                         perf_trace_buf[i] = NULL;
105                 }
106         }
107
108         if (!--tp_event->perf_refcount) {
109                 free_percpu(tp_event->perf_events);
110                 tp_event->perf_events = NULL;
111         }
112
113         return ret;
114 }
115
116 static void perf_trace_event_unreg(struct perf_event *p_event)
117 {
118         struct ftrace_event_call *tp_event = p_event->tp_event;
119         int i;
120
121         if (--tp_event->perf_refcount > 0)
122                 goto out;
123
124         tp_event->class->reg(tp_event, TRACE_REG_PERF_UNREGISTER, NULL);
125
126         /*
127          * Ensure our callback won't be called anymore. The buffers
128          * will be freed after that.
129          */
130         tracepoint_synchronize_unregister();
131
132         free_percpu(tp_event->perf_events);
133         tp_event->perf_events = NULL;
134
135         if (!--total_ref_count) {
136                 for (i = 0; i < PERF_NR_CONTEXTS; i++) {
137                         free_percpu(perf_trace_buf[i]);
138                         perf_trace_buf[i] = NULL;
139                 }
140         }
141 out:
142         module_put(tp_event->mod);
143 }
144
145 static int perf_trace_event_open(struct perf_event *p_event)
146 {
147         struct ftrace_event_call *tp_event = p_event->tp_event;
148         return tp_event->class->reg(tp_event, TRACE_REG_PERF_OPEN, p_event);
149 }
150
151 static void perf_trace_event_close(struct perf_event *p_event)
152 {
153         struct ftrace_event_call *tp_event = p_event->tp_event;
154         tp_event->class->reg(tp_event, TRACE_REG_PERF_CLOSE, p_event);
155 }
156
157 static int perf_trace_event_init(struct ftrace_event_call *tp_event,
158                                  struct perf_event *p_event)
159 {
160         int ret;
161
162         ret = perf_trace_event_perm(tp_event, p_event);
163         if (ret)
164                 return ret;
165
166         ret = perf_trace_event_reg(tp_event, p_event);
167         if (ret)
168                 return ret;
169
170         ret = perf_trace_event_open(p_event);
171         if (ret) {
172                 perf_trace_event_unreg(p_event);
173                 return ret;
174         }
175
176         return 0;
177 }
178
179 int perf_trace_init(struct perf_event *p_event)
180 {
181         struct ftrace_event_call *tp_event;
182         u64 event_id = p_event->attr.config;
183         int ret = -EINVAL;
184
185         mutex_lock(&event_mutex);
186         list_for_each_entry(tp_event, &ftrace_events, list) {
187                 if (tp_event->event.type == event_id &&
188                     tp_event->class && tp_event->class->reg &&
189                     try_module_get(tp_event->mod)) {
190                         ret = perf_trace_event_init(tp_event, p_event);
191                         if (ret)
192                                 module_put(tp_event->mod);
193                         break;
194                 }
195         }
196         mutex_unlock(&event_mutex);
197
198         return ret;
199 }
200
201 void perf_trace_destroy(struct perf_event *p_event)
202 {
203         mutex_lock(&event_mutex);
204         perf_trace_event_close(p_event);
205         perf_trace_event_unreg(p_event);
206         mutex_unlock(&event_mutex);
207 }
208
209 int perf_trace_add(struct perf_event *p_event, int flags)
210 {
211         struct ftrace_event_call *tp_event = p_event->tp_event;
212         struct hlist_head __percpu *pcpu_list;
213         struct hlist_head *list;
214
215         pcpu_list = tp_event->perf_events;
216         if (WARN_ON_ONCE(!pcpu_list))
217                 return -EINVAL;
218
219         if (!(flags & PERF_EF_START))
220                 p_event->hw.state = PERF_HES_STOPPED;
221
222         list = this_cpu_ptr(pcpu_list);
223         hlist_add_head_rcu(&p_event->hlist_entry, list);
224
225         return tp_event->class->reg(tp_event, TRACE_REG_PERF_ADD, p_event);
226 }
227
228 void perf_trace_del(struct perf_event *p_event, int flags)
229 {
230         struct ftrace_event_call *tp_event = p_event->tp_event;
231         hlist_del_rcu(&p_event->hlist_entry);
232         tp_event->class->reg(tp_event, TRACE_REG_PERF_DEL, p_event);
233 }
234
235 __kprobes void *perf_trace_buf_prepare(int size, unsigned short type,
236                                        struct pt_regs *regs, int *rctxp)
237 {
238         struct trace_entry *entry;
239         unsigned long flags;
240         char *raw_data;
241         int pc;
242
243         BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long));
244
245         if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
246                         "perf buffer not large enough"))
247                 return NULL;
248
249         pc = preempt_count();
250
251         *rctxp = perf_swevent_get_recursion_context();
252         if (*rctxp < 0)
253                 return NULL;
254
255         raw_data = this_cpu_ptr(perf_trace_buf[*rctxp]);
256
257         /* zero the dead bytes from align to not leak stack to user */
258         memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64));
259
260         entry = (struct trace_entry *)raw_data;
261         local_save_flags(flags);
262         tracing_generic_entry_update(entry, flags, pc);
263         entry->type = type;
264
265         return raw_data;
266 }
267 EXPORT_SYMBOL_GPL(perf_trace_buf_prepare);
268
269 #ifdef CONFIG_FUNCTION_TRACER
270 static void
271 perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip,
272                           struct ftrace_ops *ops, struct pt_regs *pt_regs)
273 {
274         struct ftrace_entry *entry;
275         struct hlist_head *head;
276         struct pt_regs regs;
277         int rctx;
278
279         head = this_cpu_ptr(event_function.perf_events);
280         if (hlist_empty(head))
281                 return;
282
283 #define ENTRY_SIZE (ALIGN(sizeof(struct ftrace_entry) + sizeof(u32), \
284                     sizeof(u64)) - sizeof(u32))
285
286         BUILD_BUG_ON(ENTRY_SIZE > PERF_MAX_TRACE_SIZE);
287
288         perf_fetch_caller_regs(&regs);
289
290         entry = perf_trace_buf_prepare(ENTRY_SIZE, TRACE_FN, NULL, &rctx);
291         if (!entry)
292                 return;
293
294         entry->ip = ip;
295         entry->parent_ip = parent_ip;
296         perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, 0,
297                               1, &regs, head, NULL);
298
299 #undef ENTRY_SIZE
300 }
301
302 static int perf_ftrace_function_register(struct perf_event *event)
303 {
304         struct ftrace_ops *ops = &event->ftrace_ops;
305
306         ops->flags |= FTRACE_OPS_FL_CONTROL;
307         ops->func = perf_ftrace_function_call;
308         return register_ftrace_function(ops);
309 }
310
311 static int perf_ftrace_function_unregister(struct perf_event *event)
312 {
313         struct ftrace_ops *ops = &event->ftrace_ops;
314         int ret = unregister_ftrace_function(ops);
315         ftrace_free_filter(ops);
316         return ret;
317 }
318
319 static void perf_ftrace_function_enable(struct perf_event *event)
320 {
321         ftrace_function_local_enable(&event->ftrace_ops);
322 }
323
324 static void perf_ftrace_function_disable(struct perf_event *event)
325 {
326         ftrace_function_local_disable(&event->ftrace_ops);
327 }
328
329 int perf_ftrace_event_register(struct ftrace_event_call *call,
330                                enum trace_reg type, void *data)
331 {
332         switch (type) {
333         case TRACE_REG_REGISTER:
334         case TRACE_REG_UNREGISTER:
335                 break;
336         case TRACE_REG_PERF_REGISTER:
337         case TRACE_REG_PERF_UNREGISTER:
338                 return 0;
339         case TRACE_REG_PERF_OPEN:
340                 return perf_ftrace_function_register(data);
341         case TRACE_REG_PERF_CLOSE:
342                 return perf_ftrace_function_unregister(data);
343         case TRACE_REG_PERF_ADD:
344                 perf_ftrace_function_enable(data);
345                 return 0;
346         case TRACE_REG_PERF_DEL:
347                 perf_ftrace_function_disable(data);
348                 return 0;
349         }
350
351         return -EINVAL;
352 }
353 #endif /* CONFIG_FUNCTION_TRACER */