Merge tag 'sunxi-fixes-for-4.3' of https://git.kernel.org/pub/scm/linux/kernel/git...
[linux-drm-fsl-dcu.git] / kernel / irq / manage.c
1 /*
2  * linux/kernel/irq/manage.c
3  *
4  * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
5  * Copyright (C) 2005-2006 Thomas Gleixner
6  *
7  * This file contains driver APIs to the irq subsystem.
8  */
9
10 #define pr_fmt(fmt) "genirq: " fmt
11
12 #include <linux/irq.h>
13 #include <linux/kthread.h>
14 #include <linux/module.h>
15 #include <linux/random.h>
16 #include <linux/interrupt.h>
17 #include <linux/slab.h>
18 #include <linux/sched.h>
19 #include <linux/sched/rt.h>
20 #include <linux/task_work.h>
21
22 #include "internals.h"
23
24 #ifdef CONFIG_IRQ_FORCED_THREADING
25 __read_mostly bool force_irqthreads;
26
27 static int __init setup_forced_irqthreads(char *arg)
28 {
29         force_irqthreads = true;
30         return 0;
31 }
32 early_param("threadirqs", setup_forced_irqthreads);
33 #endif
34
35 static void __synchronize_hardirq(struct irq_desc *desc)
36 {
37         bool inprogress;
38
39         do {
40                 unsigned long flags;
41
42                 /*
43                  * Wait until we're out of the critical section.  This might
44                  * give the wrong answer due to the lack of memory barriers.
45                  */
46                 while (irqd_irq_inprogress(&desc->irq_data))
47                         cpu_relax();
48
49                 /* Ok, that indicated we're done: double-check carefully. */
50                 raw_spin_lock_irqsave(&desc->lock, flags);
51                 inprogress = irqd_irq_inprogress(&desc->irq_data);
52                 raw_spin_unlock_irqrestore(&desc->lock, flags);
53
54                 /* Oops, that failed? */
55         } while (inprogress);
56 }
57
58 /**
59  *      synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs)
60  *      @irq: interrupt number to wait for
61  *
62  *      This function waits for any pending hard IRQ handlers for this
63  *      interrupt to complete before returning. If you use this
64  *      function while holding a resource the IRQ handler may need you
65  *      will deadlock. It does not take associated threaded handlers
66  *      into account.
67  *
68  *      Do not use this for shutdown scenarios where you must be sure
69  *      that all parts (hardirq and threaded handler) have completed.
70  *
71  *      Returns: false if a threaded handler is active.
72  *
73  *      This function may be called - with care - from IRQ context.
74  */
75 bool synchronize_hardirq(unsigned int irq)
76 {
77         struct irq_desc *desc = irq_to_desc(irq);
78
79         if (desc) {
80                 __synchronize_hardirq(desc);
81                 return !atomic_read(&desc->threads_active);
82         }
83
84         return true;
85 }
86 EXPORT_SYMBOL(synchronize_hardirq);
87
88 /**
89  *      synchronize_irq - wait for pending IRQ handlers (on other CPUs)
90  *      @irq: interrupt number to wait for
91  *
92  *      This function waits for any pending IRQ handlers for this interrupt
93  *      to complete before returning. If you use this function while
94  *      holding a resource the IRQ handler may need you will deadlock.
95  *
96  *      This function may be called - with care - from IRQ context.
97  */
98 void synchronize_irq(unsigned int irq)
99 {
100         struct irq_desc *desc = irq_to_desc(irq);
101
102         if (desc) {
103                 __synchronize_hardirq(desc);
104                 /*
105                  * We made sure that no hardirq handler is
106                  * running. Now verify that no threaded handlers are
107                  * active.
108                  */
109                 wait_event(desc->wait_for_threads,
110                            !atomic_read(&desc->threads_active));
111         }
112 }
113 EXPORT_SYMBOL(synchronize_irq);
114
115 #ifdef CONFIG_SMP
116 cpumask_var_t irq_default_affinity;
117
118 static int __irq_can_set_affinity(struct irq_desc *desc)
119 {
120         if (!desc || !irqd_can_balance(&desc->irq_data) ||
121             !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
122                 return 0;
123         return 1;
124 }
125
126 /**
127  *      irq_can_set_affinity - Check if the affinity of a given irq can be set
128  *      @irq:           Interrupt to check
129  *
130  */
131 int irq_can_set_affinity(unsigned int irq)
132 {
133         return __irq_can_set_affinity(irq_to_desc(irq));
134 }
135
136 /**
137  *      irq_set_thread_affinity - Notify irq threads to adjust affinity
138  *      @desc:          irq descriptor which has affitnity changed
139  *
140  *      We just set IRQTF_AFFINITY and delegate the affinity setting
141  *      to the interrupt thread itself. We can not call
142  *      set_cpus_allowed_ptr() here as we hold desc->lock and this
143  *      code can be called from hard interrupt context.
144  */
145 void irq_set_thread_affinity(struct irq_desc *desc)
146 {
147         struct irqaction *action = desc->action;
148
149         while (action) {
150                 if (action->thread)
151                         set_bit(IRQTF_AFFINITY, &action->thread_flags);
152                 action = action->next;
153         }
154 }
155
156 #ifdef CONFIG_GENERIC_PENDING_IRQ
157 static inline bool irq_can_move_pcntxt(struct irq_data *data)
158 {
159         return irqd_can_move_in_process_context(data);
160 }
161 static inline bool irq_move_pending(struct irq_data *data)
162 {
163         return irqd_is_setaffinity_pending(data);
164 }
165 static inline void
166 irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask)
167 {
168         cpumask_copy(desc->pending_mask, mask);
169 }
170 static inline void
171 irq_get_pending(struct cpumask *mask, struct irq_desc *desc)
172 {
173         cpumask_copy(mask, desc->pending_mask);
174 }
175 #else
176 static inline bool irq_can_move_pcntxt(struct irq_data *data) { return true; }
177 static inline bool irq_move_pending(struct irq_data *data) { return false; }
178 static inline void
179 irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { }
180 static inline void
181 irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { }
182 #endif
183
184 int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
185                         bool force)
186 {
187         struct irq_desc *desc = irq_data_to_desc(data);
188         struct irq_chip *chip = irq_data_get_irq_chip(data);
189         int ret;
190
191         ret = chip->irq_set_affinity(data, mask, force);
192         switch (ret) {
193         case IRQ_SET_MASK_OK:
194         case IRQ_SET_MASK_OK_DONE:
195                 cpumask_copy(desc->irq_common_data.affinity, mask);
196         case IRQ_SET_MASK_OK_NOCOPY:
197                 irq_set_thread_affinity(desc);
198                 ret = 0;
199         }
200
201         return ret;
202 }
203
204 int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
205                             bool force)
206 {
207         struct irq_chip *chip = irq_data_get_irq_chip(data);
208         struct irq_desc *desc = irq_data_to_desc(data);
209         int ret = 0;
210
211         if (!chip || !chip->irq_set_affinity)
212                 return -EINVAL;
213
214         if (irq_can_move_pcntxt(data)) {
215                 ret = irq_do_set_affinity(data, mask, force);
216         } else {
217                 irqd_set_move_pending(data);
218                 irq_copy_pending(desc, mask);
219         }
220
221         if (desc->affinity_notify) {
222                 kref_get(&desc->affinity_notify->kref);
223                 schedule_work(&desc->affinity_notify->work);
224         }
225         irqd_set(data, IRQD_AFFINITY_SET);
226
227         return ret;
228 }
229
230 int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force)
231 {
232         struct irq_desc *desc = irq_to_desc(irq);
233         unsigned long flags;
234         int ret;
235
236         if (!desc)
237                 return -EINVAL;
238
239         raw_spin_lock_irqsave(&desc->lock, flags);
240         ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
241         raw_spin_unlock_irqrestore(&desc->lock, flags);
242         return ret;
243 }
244
245 int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
246 {
247         unsigned long flags;
248         struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
249
250         if (!desc)
251                 return -EINVAL;
252         desc->affinity_hint = m;
253         irq_put_desc_unlock(desc, flags);
254         /* set the initial affinity to prevent every interrupt being on CPU0 */
255         if (m)
256                 __irq_set_affinity(irq, m, false);
257         return 0;
258 }
259 EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
260
261 /**
262  *      irq_set_vcpu_affinity - Set vcpu affinity for the interrupt
263  *      @irq: interrupt number to set affinity
264  *      @vcpu_info: vCPU specific data
265  *
266  *      This function uses the vCPU specific data to set the vCPU
267  *      affinity for an irq. The vCPU specific data is passed from
268  *      outside, such as KVM. One example code path is as below:
269  *      KVM -> IOMMU -> irq_set_vcpu_affinity().
270  */
271 int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info)
272 {
273         unsigned long flags;
274         struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
275         struct irq_data *data;
276         struct irq_chip *chip;
277         int ret = -ENOSYS;
278
279         if (!desc)
280                 return -EINVAL;
281
282         data = irq_desc_get_irq_data(desc);
283         chip = irq_data_get_irq_chip(data);
284         if (chip && chip->irq_set_vcpu_affinity)
285                 ret = chip->irq_set_vcpu_affinity(data, vcpu_info);
286         irq_put_desc_unlock(desc, flags);
287
288         return ret;
289 }
290 EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity);
291
292 static void irq_affinity_notify(struct work_struct *work)
293 {
294         struct irq_affinity_notify *notify =
295                 container_of(work, struct irq_affinity_notify, work);
296         struct irq_desc *desc = irq_to_desc(notify->irq);
297         cpumask_var_t cpumask;
298         unsigned long flags;
299
300         if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
301                 goto out;
302
303         raw_spin_lock_irqsave(&desc->lock, flags);
304         if (irq_move_pending(&desc->irq_data))
305                 irq_get_pending(cpumask, desc);
306         else
307                 cpumask_copy(cpumask, desc->irq_common_data.affinity);
308         raw_spin_unlock_irqrestore(&desc->lock, flags);
309
310         notify->notify(notify, cpumask);
311
312         free_cpumask_var(cpumask);
313 out:
314         kref_put(&notify->kref, notify->release);
315 }
316
317 /**
318  *      irq_set_affinity_notifier - control notification of IRQ affinity changes
319  *      @irq:           Interrupt for which to enable/disable notification
320  *      @notify:        Context for notification, or %NULL to disable
321  *                      notification.  Function pointers must be initialised;
322  *                      the other fields will be initialised by this function.
323  *
324  *      Must be called in process context.  Notification may only be enabled
325  *      after the IRQ is allocated and must be disabled before the IRQ is
326  *      freed using free_irq().
327  */
328 int
329 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
330 {
331         struct irq_desc *desc = irq_to_desc(irq);
332         struct irq_affinity_notify *old_notify;
333         unsigned long flags;
334
335         /* The release function is promised process context */
336         might_sleep();
337
338         if (!desc)
339                 return -EINVAL;
340
341         /* Complete initialisation of *notify */
342         if (notify) {
343                 notify->irq = irq;
344                 kref_init(&notify->kref);
345                 INIT_WORK(&notify->work, irq_affinity_notify);
346         }
347
348         raw_spin_lock_irqsave(&desc->lock, flags);
349         old_notify = desc->affinity_notify;
350         desc->affinity_notify = notify;
351         raw_spin_unlock_irqrestore(&desc->lock, flags);
352
353         if (old_notify)
354                 kref_put(&old_notify->kref, old_notify->release);
355
356         return 0;
357 }
358 EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
359
360 #ifndef CONFIG_AUTO_IRQ_AFFINITY
361 /*
362  * Generic version of the affinity autoselector.
363  */
364 static int setup_affinity(struct irq_desc *desc, struct cpumask *mask)
365 {
366         struct cpumask *set = irq_default_affinity;
367         int node = irq_desc_get_node(desc);
368
369         /* Excludes PER_CPU and NO_BALANCE interrupts */
370         if (!__irq_can_set_affinity(desc))
371                 return 0;
372
373         /*
374          * Preserve an userspace affinity setup, but make sure that
375          * one of the targets is online.
376          */
377         if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
378                 if (cpumask_intersects(desc->irq_common_data.affinity,
379                                        cpu_online_mask))
380                         set = desc->irq_common_data.affinity;
381                 else
382                         irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
383         }
384
385         cpumask_and(mask, cpu_online_mask, set);
386         if (node != NUMA_NO_NODE) {
387                 const struct cpumask *nodemask = cpumask_of_node(node);
388
389                 /* make sure at least one of the cpus in nodemask is online */
390                 if (cpumask_intersects(mask, nodemask))
391                         cpumask_and(mask, mask, nodemask);
392         }
393         irq_do_set_affinity(&desc->irq_data, mask, false);
394         return 0;
395 }
396 #else
397 /* Wrapper for ALPHA specific affinity selector magic */
398 static inline int setup_affinity(struct irq_desc *d, struct cpumask *mask)
399 {
400         return irq_select_affinity(irq_desc_get_irq(d));
401 }
402 #endif
403
404 /*
405  * Called when affinity is set via /proc/irq
406  */
407 int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask)
408 {
409         struct irq_desc *desc = irq_to_desc(irq);
410         unsigned long flags;
411         int ret;
412
413         raw_spin_lock_irqsave(&desc->lock, flags);
414         ret = setup_affinity(desc, mask);
415         raw_spin_unlock_irqrestore(&desc->lock, flags);
416         return ret;
417 }
418
419 #else
420 static inline int
421 setup_affinity(struct irq_desc *desc, struct cpumask *mask)
422 {
423         return 0;
424 }
425 #endif
426
427 void __disable_irq(struct irq_desc *desc)
428 {
429         if (!desc->depth++)
430                 irq_disable(desc);
431 }
432
433 static int __disable_irq_nosync(unsigned int irq)
434 {
435         unsigned long flags;
436         struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
437
438         if (!desc)
439                 return -EINVAL;
440         __disable_irq(desc);
441         irq_put_desc_busunlock(desc, flags);
442         return 0;
443 }
444
445 /**
446  *      disable_irq_nosync - disable an irq without waiting
447  *      @irq: Interrupt to disable
448  *
449  *      Disable the selected interrupt line.  Disables and Enables are
450  *      nested.
451  *      Unlike disable_irq(), this function does not ensure existing
452  *      instances of the IRQ handler have completed before returning.
453  *
454  *      This function may be called from IRQ context.
455  */
456 void disable_irq_nosync(unsigned int irq)
457 {
458         __disable_irq_nosync(irq);
459 }
460 EXPORT_SYMBOL(disable_irq_nosync);
461
462 /**
463  *      disable_irq - disable an irq and wait for completion
464  *      @irq: Interrupt to disable
465  *
466  *      Disable the selected interrupt line.  Enables and Disables are
467  *      nested.
468  *      This function waits for any pending IRQ handlers for this interrupt
469  *      to complete before returning. If you use this function while
470  *      holding a resource the IRQ handler may need you will deadlock.
471  *
472  *      This function may be called - with care - from IRQ context.
473  */
474 void disable_irq(unsigned int irq)
475 {
476         if (!__disable_irq_nosync(irq))
477                 synchronize_irq(irq);
478 }
479 EXPORT_SYMBOL(disable_irq);
480
481 /**
482  *      disable_hardirq - disables an irq and waits for hardirq completion
483  *      @irq: Interrupt to disable
484  *
485  *      Disable the selected interrupt line.  Enables and Disables are
486  *      nested.
487  *      This function waits for any pending hard IRQ handlers for this
488  *      interrupt to complete before returning. If you use this function while
489  *      holding a resource the hard IRQ handler may need you will deadlock.
490  *
491  *      When used to optimistically disable an interrupt from atomic context
492  *      the return value must be checked.
493  *
494  *      Returns: false if a threaded handler is active.
495  *
496  *      This function may be called - with care - from IRQ context.
497  */
498 bool disable_hardirq(unsigned int irq)
499 {
500         if (!__disable_irq_nosync(irq))
501                 return synchronize_hardirq(irq);
502
503         return false;
504 }
505 EXPORT_SYMBOL_GPL(disable_hardirq);
506
507 void __enable_irq(struct irq_desc *desc)
508 {
509         switch (desc->depth) {
510         case 0:
511  err_out:
512                 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n",
513                      irq_desc_get_irq(desc));
514                 break;
515         case 1: {
516                 if (desc->istate & IRQS_SUSPENDED)
517                         goto err_out;
518                 /* Prevent probing on this irq: */
519                 irq_settings_set_noprobe(desc);
520                 irq_enable(desc);
521                 check_irq_resend(desc);
522                 /* fall-through */
523         }
524         default:
525                 desc->depth--;
526         }
527 }
528
529 /**
530  *      enable_irq - enable handling of an irq
531  *      @irq: Interrupt to enable
532  *
533  *      Undoes the effect of one call to disable_irq().  If this
534  *      matches the last disable, processing of interrupts on this
535  *      IRQ line is re-enabled.
536  *
537  *      This function may be called from IRQ context only when
538  *      desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
539  */
540 void enable_irq(unsigned int irq)
541 {
542         unsigned long flags;
543         struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
544
545         if (!desc)
546                 return;
547         if (WARN(!desc->irq_data.chip,
548                  KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
549                 goto out;
550
551         __enable_irq(desc);
552 out:
553         irq_put_desc_busunlock(desc, flags);
554 }
555 EXPORT_SYMBOL(enable_irq);
556
557 static int set_irq_wake_real(unsigned int irq, unsigned int on)
558 {
559         struct irq_desc *desc = irq_to_desc(irq);
560         int ret = -ENXIO;
561
562         if (irq_desc_get_chip(desc)->flags &  IRQCHIP_SKIP_SET_WAKE)
563                 return 0;
564
565         if (desc->irq_data.chip->irq_set_wake)
566                 ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
567
568         return ret;
569 }
570
571 /**
572  *      irq_set_irq_wake - control irq power management wakeup
573  *      @irq:   interrupt to control
574  *      @on:    enable/disable power management wakeup
575  *
576  *      Enable/disable power management wakeup mode, which is
577  *      disabled by default.  Enables and disables must match,
578  *      just as they match for non-wakeup mode support.
579  *
580  *      Wakeup mode lets this IRQ wake the system from sleep
581  *      states like "suspend to RAM".
582  */
583 int irq_set_irq_wake(unsigned int irq, unsigned int on)
584 {
585         unsigned long flags;
586         struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
587         int ret = 0;
588
589         if (!desc)
590                 return -EINVAL;
591
592         /* wakeup-capable irqs can be shared between drivers that
593          * don't need to have the same sleep mode behaviors.
594          */
595         if (on) {
596                 if (desc->wake_depth++ == 0) {
597                         ret = set_irq_wake_real(irq, on);
598                         if (ret)
599                                 desc->wake_depth = 0;
600                         else
601                                 irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
602                 }
603         } else {
604                 if (desc->wake_depth == 0) {
605                         WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
606                 } else if (--desc->wake_depth == 0) {
607                         ret = set_irq_wake_real(irq, on);
608                         if (ret)
609                                 desc->wake_depth = 1;
610                         else
611                                 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
612                 }
613         }
614         irq_put_desc_busunlock(desc, flags);
615         return ret;
616 }
617 EXPORT_SYMBOL(irq_set_irq_wake);
618
619 /*
620  * Internal function that tells the architecture code whether a
621  * particular irq has been exclusively allocated or is available
622  * for driver use.
623  */
624 int can_request_irq(unsigned int irq, unsigned long irqflags)
625 {
626         unsigned long flags;
627         struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
628         int canrequest = 0;
629
630         if (!desc)
631                 return 0;
632
633         if (irq_settings_can_request(desc)) {
634                 if (!desc->action ||
635                     irqflags & desc->action->flags & IRQF_SHARED)
636                         canrequest = 1;
637         }
638         irq_put_desc_unlock(desc, flags);
639         return canrequest;
640 }
641
642 int __irq_set_trigger(struct irq_desc *desc, unsigned long flags)
643 {
644         struct irq_chip *chip = desc->irq_data.chip;
645         int ret, unmask = 0;
646
647         if (!chip || !chip->irq_set_type) {
648                 /*
649                  * IRQF_TRIGGER_* but the PIC does not support multiple
650                  * flow-types?
651                  */
652                 pr_debug("No set_type function for IRQ %d (%s)\n",
653                          irq_desc_get_irq(desc),
654                          chip ? (chip->name ? : "unknown") : "unknown");
655                 return 0;
656         }
657
658         flags &= IRQ_TYPE_SENSE_MASK;
659
660         if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
661                 if (!irqd_irq_masked(&desc->irq_data))
662                         mask_irq(desc);
663                 if (!irqd_irq_disabled(&desc->irq_data))
664                         unmask = 1;
665         }
666
667         /* caller masked out all except trigger mode flags */
668         ret = chip->irq_set_type(&desc->irq_data, flags);
669
670         switch (ret) {
671         case IRQ_SET_MASK_OK:
672         case IRQ_SET_MASK_OK_DONE:
673                 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
674                 irqd_set(&desc->irq_data, flags);
675
676         case IRQ_SET_MASK_OK_NOCOPY:
677                 flags = irqd_get_trigger_type(&desc->irq_data);
678                 irq_settings_set_trigger_mask(desc, flags);
679                 irqd_clear(&desc->irq_data, IRQD_LEVEL);
680                 irq_settings_clr_level(desc);
681                 if (flags & IRQ_TYPE_LEVEL_MASK) {
682                         irq_settings_set_level(desc);
683                         irqd_set(&desc->irq_data, IRQD_LEVEL);
684                 }
685
686                 ret = 0;
687                 break;
688         default:
689                 pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n",
690                        flags, irq_desc_get_irq(desc), chip->irq_set_type);
691         }
692         if (unmask)
693                 unmask_irq(desc);
694         return ret;
695 }
696
697 #ifdef CONFIG_HARDIRQS_SW_RESEND
698 int irq_set_parent(int irq, int parent_irq)
699 {
700         unsigned long flags;
701         struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
702
703         if (!desc)
704                 return -EINVAL;
705
706         desc->parent_irq = parent_irq;
707
708         irq_put_desc_unlock(desc, flags);
709         return 0;
710 }
711 #endif
712
713 /*
714  * Default primary interrupt handler for threaded interrupts. Is
715  * assigned as primary handler when request_threaded_irq is called
716  * with handler == NULL. Useful for oneshot interrupts.
717  */
718 static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
719 {
720         return IRQ_WAKE_THREAD;
721 }
722
723 /*
724  * Primary handler for nested threaded interrupts. Should never be
725  * called.
726  */
727 static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
728 {
729         WARN(1, "Primary handler called for nested irq %d\n", irq);
730         return IRQ_NONE;
731 }
732
733 static int irq_wait_for_interrupt(struct irqaction *action)
734 {
735         set_current_state(TASK_INTERRUPTIBLE);
736
737         while (!kthread_should_stop()) {
738
739                 if (test_and_clear_bit(IRQTF_RUNTHREAD,
740                                        &action->thread_flags)) {
741                         __set_current_state(TASK_RUNNING);
742                         return 0;
743                 }
744                 schedule();
745                 set_current_state(TASK_INTERRUPTIBLE);
746         }
747         __set_current_state(TASK_RUNNING);
748         return -1;
749 }
750
751 /*
752  * Oneshot interrupts keep the irq line masked until the threaded
753  * handler finished. unmask if the interrupt has not been disabled and
754  * is marked MASKED.
755  */
756 static void irq_finalize_oneshot(struct irq_desc *desc,
757                                  struct irqaction *action)
758 {
759         if (!(desc->istate & IRQS_ONESHOT))
760                 return;
761 again:
762         chip_bus_lock(desc);
763         raw_spin_lock_irq(&desc->lock);
764
765         /*
766          * Implausible though it may be we need to protect us against
767          * the following scenario:
768          *
769          * The thread is faster done than the hard interrupt handler
770          * on the other CPU. If we unmask the irq line then the
771          * interrupt can come in again and masks the line, leaves due
772          * to IRQS_INPROGRESS and the irq line is masked forever.
773          *
774          * This also serializes the state of shared oneshot handlers
775          * versus "desc->threads_onehsot |= action->thread_mask;" in
776          * irq_wake_thread(). See the comment there which explains the
777          * serialization.
778          */
779         if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
780                 raw_spin_unlock_irq(&desc->lock);
781                 chip_bus_sync_unlock(desc);
782                 cpu_relax();
783                 goto again;
784         }
785
786         /*
787          * Now check again, whether the thread should run. Otherwise
788          * we would clear the threads_oneshot bit of this thread which
789          * was just set.
790          */
791         if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
792                 goto out_unlock;
793
794         desc->threads_oneshot &= ~action->thread_mask;
795
796         if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
797             irqd_irq_masked(&desc->irq_data))
798                 unmask_threaded_irq(desc);
799
800 out_unlock:
801         raw_spin_unlock_irq(&desc->lock);
802         chip_bus_sync_unlock(desc);
803 }
804
805 #ifdef CONFIG_SMP
806 /*
807  * Check whether we need to change the affinity of the interrupt thread.
808  */
809 static void
810 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
811 {
812         cpumask_var_t mask;
813         bool valid = true;
814
815         if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
816                 return;
817
818         /*
819          * In case we are out of memory we set IRQTF_AFFINITY again and
820          * try again next time
821          */
822         if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
823                 set_bit(IRQTF_AFFINITY, &action->thread_flags);
824                 return;
825         }
826
827         raw_spin_lock_irq(&desc->lock);
828         /*
829          * This code is triggered unconditionally. Check the affinity
830          * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
831          */
832         if (desc->irq_common_data.affinity)
833                 cpumask_copy(mask, desc->irq_common_data.affinity);
834         else
835                 valid = false;
836         raw_spin_unlock_irq(&desc->lock);
837
838         if (valid)
839                 set_cpus_allowed_ptr(current, mask);
840         free_cpumask_var(mask);
841 }
842 #else
843 static inline void
844 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
845 #endif
846
847 /*
848  * Interrupts which are not explicitely requested as threaded
849  * interrupts rely on the implicit bh/preempt disable of the hard irq
850  * context. So we need to disable bh here to avoid deadlocks and other
851  * side effects.
852  */
853 static irqreturn_t
854 irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
855 {
856         irqreturn_t ret;
857
858         local_bh_disable();
859         ret = action->thread_fn(action->irq, action->dev_id);
860         irq_finalize_oneshot(desc, action);
861         local_bh_enable();
862         return ret;
863 }
864
865 /*
866  * Interrupts explicitly requested as threaded interrupts want to be
867  * preemtible - many of them need to sleep and wait for slow busses to
868  * complete.
869  */
870 static irqreturn_t irq_thread_fn(struct irq_desc *desc,
871                 struct irqaction *action)
872 {
873         irqreturn_t ret;
874
875         ret = action->thread_fn(action->irq, action->dev_id);
876         irq_finalize_oneshot(desc, action);
877         return ret;
878 }
879
880 static void wake_threads_waitq(struct irq_desc *desc)
881 {
882         if (atomic_dec_and_test(&desc->threads_active))
883                 wake_up(&desc->wait_for_threads);
884 }
885
886 static void irq_thread_dtor(struct callback_head *unused)
887 {
888         struct task_struct *tsk = current;
889         struct irq_desc *desc;
890         struct irqaction *action;
891
892         if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
893                 return;
894
895         action = kthread_data(tsk);
896
897         pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
898                tsk->comm, tsk->pid, action->irq);
899
900
901         desc = irq_to_desc(action->irq);
902         /*
903          * If IRQTF_RUNTHREAD is set, we need to decrement
904          * desc->threads_active and wake possible waiters.
905          */
906         if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
907                 wake_threads_waitq(desc);
908
909         /* Prevent a stale desc->threads_oneshot */
910         irq_finalize_oneshot(desc, action);
911 }
912
913 /*
914  * Interrupt handler thread
915  */
916 static int irq_thread(void *data)
917 {
918         struct callback_head on_exit_work;
919         struct irqaction *action = data;
920         struct irq_desc *desc = irq_to_desc(action->irq);
921         irqreturn_t (*handler_fn)(struct irq_desc *desc,
922                         struct irqaction *action);
923
924         if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD,
925                                         &action->thread_flags))
926                 handler_fn = irq_forced_thread_fn;
927         else
928                 handler_fn = irq_thread_fn;
929
930         init_task_work(&on_exit_work, irq_thread_dtor);
931         task_work_add(current, &on_exit_work, false);
932
933         irq_thread_check_affinity(desc, action);
934
935         while (!irq_wait_for_interrupt(action)) {
936                 irqreturn_t action_ret;
937
938                 irq_thread_check_affinity(desc, action);
939
940                 action_ret = handler_fn(desc, action);
941                 if (action_ret == IRQ_HANDLED)
942                         atomic_inc(&desc->threads_handled);
943
944                 wake_threads_waitq(desc);
945         }
946
947         /*
948          * This is the regular exit path. __free_irq() is stopping the
949          * thread via kthread_stop() after calling
950          * synchronize_irq(). So neither IRQTF_RUNTHREAD nor the
951          * oneshot mask bit can be set. We cannot verify that as we
952          * cannot touch the oneshot mask at this point anymore as
953          * __setup_irq() might have given out currents thread_mask
954          * again.
955          */
956         task_work_cancel(current, irq_thread_dtor);
957         return 0;
958 }
959
960 /**
961  *      irq_wake_thread - wake the irq thread for the action identified by dev_id
962  *      @irq:           Interrupt line
963  *      @dev_id:        Device identity for which the thread should be woken
964  *
965  */
966 void irq_wake_thread(unsigned int irq, void *dev_id)
967 {
968         struct irq_desc *desc = irq_to_desc(irq);
969         struct irqaction *action;
970         unsigned long flags;
971
972         if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
973                 return;
974
975         raw_spin_lock_irqsave(&desc->lock, flags);
976         for (action = desc->action; action; action = action->next) {
977                 if (action->dev_id == dev_id) {
978                         if (action->thread)
979                                 __irq_wake_thread(desc, action);
980                         break;
981                 }
982         }
983         raw_spin_unlock_irqrestore(&desc->lock, flags);
984 }
985 EXPORT_SYMBOL_GPL(irq_wake_thread);
986
987 static void irq_setup_forced_threading(struct irqaction *new)
988 {
989         if (!force_irqthreads)
990                 return;
991         if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
992                 return;
993
994         new->flags |= IRQF_ONESHOT;
995
996         if (!new->thread_fn) {
997                 set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
998                 new->thread_fn = new->handler;
999                 new->handler = irq_default_primary_handler;
1000         }
1001 }
1002
1003 static int irq_request_resources(struct irq_desc *desc)
1004 {
1005         struct irq_data *d = &desc->irq_data;
1006         struct irq_chip *c = d->chip;
1007
1008         return c->irq_request_resources ? c->irq_request_resources(d) : 0;
1009 }
1010
1011 static void irq_release_resources(struct irq_desc *desc)
1012 {
1013         struct irq_data *d = &desc->irq_data;
1014         struct irq_chip *c = d->chip;
1015
1016         if (c->irq_release_resources)
1017                 c->irq_release_resources(d);
1018 }
1019
1020 /*
1021  * Internal function to register an irqaction - typically used to
1022  * allocate special interrupts that are part of the architecture.
1023  */
1024 static int
1025 __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1026 {
1027         struct irqaction *old, **old_ptr;
1028         unsigned long flags, thread_mask = 0;
1029         int ret, nested, shared = 0;
1030         cpumask_var_t mask;
1031
1032         if (!desc)
1033                 return -EINVAL;
1034
1035         if (desc->irq_data.chip == &no_irq_chip)
1036                 return -ENOSYS;
1037         if (!try_module_get(desc->owner))
1038                 return -ENODEV;
1039
1040         /*
1041          * Check whether the interrupt nests into another interrupt
1042          * thread.
1043          */
1044         nested = irq_settings_is_nested_thread(desc);
1045         if (nested) {
1046                 if (!new->thread_fn) {
1047                         ret = -EINVAL;
1048                         goto out_mput;
1049                 }
1050                 /*
1051                  * Replace the primary handler which was provided from
1052                  * the driver for non nested interrupt handling by the
1053                  * dummy function which warns when called.
1054                  */
1055                 new->handler = irq_nested_primary_handler;
1056         } else {
1057                 if (irq_settings_can_thread(desc))
1058                         irq_setup_forced_threading(new);
1059         }
1060
1061         /*
1062          * Create a handler thread when a thread function is supplied
1063          * and the interrupt does not nest into another interrupt
1064          * thread.
1065          */
1066         if (new->thread_fn && !nested) {
1067                 struct task_struct *t;
1068                 static const struct sched_param param = {
1069                         .sched_priority = MAX_USER_RT_PRIO/2,
1070                 };
1071
1072                 t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
1073                                    new->name);
1074                 if (IS_ERR(t)) {
1075                         ret = PTR_ERR(t);
1076                         goto out_mput;
1077                 }
1078
1079                 sched_setscheduler_nocheck(t, SCHED_FIFO, &param);
1080
1081                 /*
1082                  * We keep the reference to the task struct even if
1083                  * the thread dies to avoid that the interrupt code
1084                  * references an already freed task_struct.
1085                  */
1086                 get_task_struct(t);
1087                 new->thread = t;
1088                 /*
1089                  * Tell the thread to set its affinity. This is
1090                  * important for shared interrupt handlers as we do
1091                  * not invoke setup_affinity() for the secondary
1092                  * handlers as everything is already set up. Even for
1093                  * interrupts marked with IRQF_NO_BALANCE this is
1094                  * correct as we want the thread to move to the cpu(s)
1095                  * on which the requesting code placed the interrupt.
1096                  */
1097                 set_bit(IRQTF_AFFINITY, &new->thread_flags);
1098         }
1099
1100         if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
1101                 ret = -ENOMEM;
1102                 goto out_thread;
1103         }
1104
1105         /*
1106          * Drivers are often written to work w/o knowledge about the
1107          * underlying irq chip implementation, so a request for a
1108          * threaded irq without a primary hard irq context handler
1109          * requires the ONESHOT flag to be set. Some irq chips like
1110          * MSI based interrupts are per se one shot safe. Check the
1111          * chip flags, so we can avoid the unmask dance at the end of
1112          * the threaded handler for those.
1113          */
1114         if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)
1115                 new->flags &= ~IRQF_ONESHOT;
1116
1117         /*
1118          * The following block of code has to be executed atomically
1119          */
1120         raw_spin_lock_irqsave(&desc->lock, flags);
1121         old_ptr = &desc->action;
1122         old = *old_ptr;
1123         if (old) {
1124                 /*
1125                  * Can't share interrupts unless both agree to and are
1126                  * the same type (level, edge, polarity). So both flag
1127                  * fields must have IRQF_SHARED set and the bits which
1128                  * set the trigger type must match. Also all must
1129                  * agree on ONESHOT.
1130                  */
1131                 if (!((old->flags & new->flags) & IRQF_SHARED) ||
1132                     ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK) ||
1133                     ((old->flags ^ new->flags) & IRQF_ONESHOT))
1134                         goto mismatch;
1135
1136                 /* All handlers must agree on per-cpuness */
1137                 if ((old->flags & IRQF_PERCPU) !=
1138                     (new->flags & IRQF_PERCPU))
1139                         goto mismatch;
1140
1141                 /* add new interrupt at end of irq queue */
1142                 do {
1143                         /*
1144                          * Or all existing action->thread_mask bits,
1145                          * so we can find the next zero bit for this
1146                          * new action.
1147                          */
1148                         thread_mask |= old->thread_mask;
1149                         old_ptr = &old->next;
1150                         old = *old_ptr;
1151                 } while (old);
1152                 shared = 1;
1153         }
1154
1155         /*
1156          * Setup the thread mask for this irqaction for ONESHOT. For
1157          * !ONESHOT irqs the thread mask is 0 so we can avoid a
1158          * conditional in irq_wake_thread().
1159          */
1160         if (new->flags & IRQF_ONESHOT) {
1161                 /*
1162                  * Unlikely to have 32 resp 64 irqs sharing one line,
1163                  * but who knows.
1164                  */
1165                 if (thread_mask == ~0UL) {
1166                         ret = -EBUSY;
1167                         goto out_mask;
1168                 }
1169                 /*
1170                  * The thread_mask for the action is or'ed to
1171                  * desc->thread_active to indicate that the
1172                  * IRQF_ONESHOT thread handler has been woken, but not
1173                  * yet finished. The bit is cleared when a thread
1174                  * completes. When all threads of a shared interrupt
1175                  * line have completed desc->threads_active becomes
1176                  * zero and the interrupt line is unmasked. See
1177                  * handle.c:irq_wake_thread() for further information.
1178                  *
1179                  * If no thread is woken by primary (hard irq context)
1180                  * interrupt handlers, then desc->threads_active is
1181                  * also checked for zero to unmask the irq line in the
1182                  * affected hard irq flow handlers
1183                  * (handle_[fasteoi|level]_irq).
1184                  *
1185                  * The new action gets the first zero bit of
1186                  * thread_mask assigned. See the loop above which or's
1187                  * all existing action->thread_mask bits.
1188                  */
1189                 new->thread_mask = 1 << ffz(thread_mask);
1190
1191         } else if (new->handler == irq_default_primary_handler &&
1192                    !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
1193                 /*
1194                  * The interrupt was requested with handler = NULL, so
1195                  * we use the default primary handler for it. But it
1196                  * does not have the oneshot flag set. In combination
1197                  * with level interrupts this is deadly, because the
1198                  * default primary handler just wakes the thread, then
1199                  * the irq lines is reenabled, but the device still
1200                  * has the level irq asserted. Rinse and repeat....
1201                  *
1202                  * While this works for edge type interrupts, we play
1203                  * it safe and reject unconditionally because we can't
1204                  * say for sure which type this interrupt really
1205                  * has. The type flags are unreliable as the
1206                  * underlying chip implementation can override them.
1207                  */
1208                 pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n",
1209                        irq);
1210                 ret = -EINVAL;
1211                 goto out_mask;
1212         }
1213
1214         if (!shared) {
1215                 ret = irq_request_resources(desc);
1216                 if (ret) {
1217                         pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n",
1218                                new->name, irq, desc->irq_data.chip->name);
1219                         goto out_mask;
1220                 }
1221
1222                 init_waitqueue_head(&desc->wait_for_threads);
1223
1224                 /* Setup the type (level, edge polarity) if configured: */
1225                 if (new->flags & IRQF_TRIGGER_MASK) {
1226                         ret = __irq_set_trigger(desc,
1227                                                 new->flags & IRQF_TRIGGER_MASK);
1228
1229                         if (ret)
1230                                 goto out_mask;
1231                 }
1232
1233                 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
1234                                   IRQS_ONESHOT | IRQS_WAITING);
1235                 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
1236
1237                 if (new->flags & IRQF_PERCPU) {
1238                         irqd_set(&desc->irq_data, IRQD_PER_CPU);
1239                         irq_settings_set_per_cpu(desc);
1240                 }
1241
1242                 if (new->flags & IRQF_ONESHOT)
1243                         desc->istate |= IRQS_ONESHOT;
1244
1245                 if (irq_settings_can_autoenable(desc))
1246                         irq_startup(desc, true);
1247                 else
1248                         /* Undo nested disables: */
1249                         desc->depth = 1;
1250
1251                 /* Exclude IRQ from balancing if requested */
1252                 if (new->flags & IRQF_NOBALANCING) {
1253                         irq_settings_set_no_balancing(desc);
1254                         irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1255                 }
1256
1257                 /* Set default affinity mask once everything is setup */
1258                 setup_affinity(desc, mask);
1259
1260         } else if (new->flags & IRQF_TRIGGER_MASK) {
1261                 unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
1262                 unsigned int omsk = irq_settings_get_trigger_mask(desc);
1263
1264                 if (nmsk != omsk)
1265                         /* hope the handler works with current  trigger mode */
1266                         pr_warning("irq %d uses trigger mode %u; requested %u\n",
1267                                    irq, nmsk, omsk);
1268         }
1269
1270         new->irq = irq;
1271         *old_ptr = new;
1272
1273         irq_pm_install_action(desc, new);
1274
1275         /* Reset broken irq detection when installing new handler */
1276         desc->irq_count = 0;
1277         desc->irqs_unhandled = 0;
1278
1279         /*
1280          * Check whether we disabled the irq via the spurious handler
1281          * before. Reenable it and give it another chance.
1282          */
1283         if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
1284                 desc->istate &= ~IRQS_SPURIOUS_DISABLED;
1285                 __enable_irq(desc);
1286         }
1287
1288         raw_spin_unlock_irqrestore(&desc->lock, flags);
1289
1290         /*
1291          * Strictly no need to wake it up, but hung_task complains
1292          * when no hard interrupt wakes the thread up.
1293          */
1294         if (new->thread)
1295                 wake_up_process(new->thread);
1296
1297         register_irq_proc(irq, desc);
1298         new->dir = NULL;
1299         register_handler_proc(irq, new);
1300         free_cpumask_var(mask);
1301
1302         return 0;
1303
1304 mismatch:
1305         if (!(new->flags & IRQF_PROBE_SHARED)) {
1306                 pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
1307                        irq, new->flags, new->name, old->flags, old->name);
1308 #ifdef CONFIG_DEBUG_SHIRQ
1309                 dump_stack();
1310 #endif
1311         }
1312         ret = -EBUSY;
1313
1314 out_mask:
1315         raw_spin_unlock_irqrestore(&desc->lock, flags);
1316         free_cpumask_var(mask);
1317
1318 out_thread:
1319         if (new->thread) {
1320                 struct task_struct *t = new->thread;
1321
1322                 new->thread = NULL;
1323                 kthread_stop(t);
1324                 put_task_struct(t);
1325         }
1326 out_mput:
1327         module_put(desc->owner);
1328         return ret;
1329 }
1330
1331 /**
1332  *      setup_irq - setup an interrupt
1333  *      @irq: Interrupt line to setup
1334  *      @act: irqaction for the interrupt
1335  *
1336  * Used to statically setup interrupts in the early boot process.
1337  */
1338 int setup_irq(unsigned int irq, struct irqaction *act)
1339 {
1340         int retval;
1341         struct irq_desc *desc = irq_to_desc(irq);
1342
1343         if (WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1344                 return -EINVAL;
1345         chip_bus_lock(desc);
1346         retval = __setup_irq(irq, desc, act);
1347         chip_bus_sync_unlock(desc);
1348
1349         return retval;
1350 }
1351 EXPORT_SYMBOL_GPL(setup_irq);
1352
1353 /*
1354  * Internal function to unregister an irqaction - used to free
1355  * regular and special interrupts that are part of the architecture.
1356  */
1357 static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
1358 {
1359         struct irq_desc *desc = irq_to_desc(irq);
1360         struct irqaction *action, **action_ptr;
1361         unsigned long flags;
1362
1363         WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1364
1365         if (!desc)
1366                 return NULL;
1367
1368         raw_spin_lock_irqsave(&desc->lock, flags);
1369
1370         /*
1371          * There can be multiple actions per IRQ descriptor, find the right
1372          * one based on the dev_id:
1373          */
1374         action_ptr = &desc->action;
1375         for (;;) {
1376                 action = *action_ptr;
1377
1378                 if (!action) {
1379                         WARN(1, "Trying to free already-free IRQ %d\n", irq);
1380                         raw_spin_unlock_irqrestore(&desc->lock, flags);
1381
1382                         return NULL;
1383                 }
1384
1385                 if (action->dev_id == dev_id)
1386                         break;
1387                 action_ptr = &action->next;
1388         }
1389
1390         /* Found it - now remove it from the list of entries: */
1391         *action_ptr = action->next;
1392
1393         irq_pm_remove_action(desc, action);
1394
1395         /* If this was the last handler, shut down the IRQ line: */
1396         if (!desc->action) {
1397                 irq_shutdown(desc);
1398                 irq_release_resources(desc);
1399         }
1400
1401 #ifdef CONFIG_SMP
1402         /* make sure affinity_hint is cleaned up */
1403         if (WARN_ON_ONCE(desc->affinity_hint))
1404                 desc->affinity_hint = NULL;
1405 #endif
1406
1407         raw_spin_unlock_irqrestore(&desc->lock, flags);
1408
1409         unregister_handler_proc(irq, action);
1410
1411         /* Make sure it's not being used on another CPU: */
1412         synchronize_irq(irq);
1413
1414 #ifdef CONFIG_DEBUG_SHIRQ
1415         /*
1416          * It's a shared IRQ -- the driver ought to be prepared for an IRQ
1417          * event to happen even now it's being freed, so let's make sure that
1418          * is so by doing an extra call to the handler ....
1419          *
1420          * ( We do this after actually deregistering it, to make sure that a
1421          *   'real' IRQ doesn't run in * parallel with our fake. )
1422          */
1423         if (action->flags & IRQF_SHARED) {
1424                 local_irq_save(flags);
1425                 action->handler(irq, dev_id);
1426                 local_irq_restore(flags);
1427         }
1428 #endif
1429
1430         if (action->thread) {
1431                 kthread_stop(action->thread);
1432                 put_task_struct(action->thread);
1433         }
1434
1435         module_put(desc->owner);
1436         return action;
1437 }
1438
1439 /**
1440  *      remove_irq - free an interrupt
1441  *      @irq: Interrupt line to free
1442  *      @act: irqaction for the interrupt
1443  *
1444  * Used to remove interrupts statically setup by the early boot process.
1445  */
1446 void remove_irq(unsigned int irq, struct irqaction *act)
1447 {
1448         struct irq_desc *desc = irq_to_desc(irq);
1449
1450         if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1451             __free_irq(irq, act->dev_id);
1452 }
1453 EXPORT_SYMBOL_GPL(remove_irq);
1454
1455 /**
1456  *      free_irq - free an interrupt allocated with request_irq
1457  *      @irq: Interrupt line to free
1458  *      @dev_id: Device identity to free
1459  *
1460  *      Remove an interrupt handler. The handler is removed and if the
1461  *      interrupt line is no longer in use by any driver it is disabled.
1462  *      On a shared IRQ the caller must ensure the interrupt is disabled
1463  *      on the card it drives before calling this function. The function
1464  *      does not return until any executing interrupts for this IRQ
1465  *      have completed.
1466  *
1467  *      This function must not be called from interrupt context.
1468  */
1469 void free_irq(unsigned int irq, void *dev_id)
1470 {
1471         struct irq_desc *desc = irq_to_desc(irq);
1472
1473         if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1474                 return;
1475
1476 #ifdef CONFIG_SMP
1477         if (WARN_ON(desc->affinity_notify))
1478                 desc->affinity_notify = NULL;
1479 #endif
1480
1481         chip_bus_lock(desc);
1482         kfree(__free_irq(irq, dev_id));
1483         chip_bus_sync_unlock(desc);
1484 }
1485 EXPORT_SYMBOL(free_irq);
1486
1487 /**
1488  *      request_threaded_irq - allocate an interrupt line
1489  *      @irq: Interrupt line to allocate
1490  *      @handler: Function to be called when the IRQ occurs.
1491  *                Primary handler for threaded interrupts
1492  *                If NULL and thread_fn != NULL the default
1493  *                primary handler is installed
1494  *      @thread_fn: Function called from the irq handler thread
1495  *                  If NULL, no irq thread is created
1496  *      @irqflags: Interrupt type flags
1497  *      @devname: An ascii name for the claiming device
1498  *      @dev_id: A cookie passed back to the handler function
1499  *
1500  *      This call allocates interrupt resources and enables the
1501  *      interrupt line and IRQ handling. From the point this
1502  *      call is made your handler function may be invoked. Since
1503  *      your handler function must clear any interrupt the board
1504  *      raises, you must take care both to initialise your hardware
1505  *      and to set up the interrupt handler in the right order.
1506  *
1507  *      If you want to set up a threaded irq handler for your device
1508  *      then you need to supply @handler and @thread_fn. @handler is
1509  *      still called in hard interrupt context and has to check
1510  *      whether the interrupt originates from the device. If yes it
1511  *      needs to disable the interrupt on the device and return
1512  *      IRQ_WAKE_THREAD which will wake up the handler thread and run
1513  *      @thread_fn. This split handler design is necessary to support
1514  *      shared interrupts.
1515  *
1516  *      Dev_id must be globally unique. Normally the address of the
1517  *      device data structure is used as the cookie. Since the handler
1518  *      receives this value it makes sense to use it.
1519  *
1520  *      If your interrupt is shared you must pass a non NULL dev_id
1521  *      as this is required when freeing the interrupt.
1522  *
1523  *      Flags:
1524  *
1525  *      IRQF_SHARED             Interrupt is shared
1526  *      IRQF_TRIGGER_*          Specify active edge(s) or level
1527  *
1528  */
1529 int request_threaded_irq(unsigned int irq, irq_handler_t handler,
1530                          irq_handler_t thread_fn, unsigned long irqflags,
1531                          const char *devname, void *dev_id)
1532 {
1533         struct irqaction *action;
1534         struct irq_desc *desc;
1535         int retval;
1536
1537         /*
1538          * Sanity-check: shared interrupts must pass in a real dev-ID,
1539          * otherwise we'll have trouble later trying to figure out
1540          * which interrupt is which (messes up the interrupt freeing
1541          * logic etc).
1542          *
1543          * Also IRQF_COND_SUSPEND only makes sense for shared interrupts and
1544          * it cannot be set along with IRQF_NO_SUSPEND.
1545          */
1546         if (((irqflags & IRQF_SHARED) && !dev_id) ||
1547             (!(irqflags & IRQF_SHARED) && (irqflags & IRQF_COND_SUSPEND)) ||
1548             ((irqflags & IRQF_NO_SUSPEND) && (irqflags & IRQF_COND_SUSPEND)))
1549                 return -EINVAL;
1550
1551         desc = irq_to_desc(irq);
1552         if (!desc)
1553                 return -EINVAL;
1554
1555         if (!irq_settings_can_request(desc) ||
1556             WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1557                 return -EINVAL;
1558
1559         if (!handler) {
1560                 if (!thread_fn)
1561                         return -EINVAL;
1562                 handler = irq_default_primary_handler;
1563         }
1564
1565         action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1566         if (!action)
1567                 return -ENOMEM;
1568
1569         action->handler = handler;
1570         action->thread_fn = thread_fn;
1571         action->flags = irqflags;
1572         action->name = devname;
1573         action->dev_id = dev_id;
1574
1575         chip_bus_lock(desc);
1576         retval = __setup_irq(irq, desc, action);
1577         chip_bus_sync_unlock(desc);
1578
1579         if (retval)
1580                 kfree(action);
1581
1582 #ifdef CONFIG_DEBUG_SHIRQ_FIXME
1583         if (!retval && (irqflags & IRQF_SHARED)) {
1584                 /*
1585                  * It's a shared IRQ -- the driver ought to be prepared for it
1586                  * to happen immediately, so let's make sure....
1587                  * We disable the irq to make sure that a 'real' IRQ doesn't
1588                  * run in parallel with our fake.
1589                  */
1590                 unsigned long flags;
1591
1592                 disable_irq(irq);
1593                 local_irq_save(flags);
1594
1595                 handler(irq, dev_id);
1596
1597                 local_irq_restore(flags);
1598                 enable_irq(irq);
1599         }
1600 #endif
1601         return retval;
1602 }
1603 EXPORT_SYMBOL(request_threaded_irq);
1604
1605 /**
1606  *      request_any_context_irq - allocate an interrupt line
1607  *      @irq: Interrupt line to allocate
1608  *      @handler: Function to be called when the IRQ occurs.
1609  *                Threaded handler for threaded interrupts.
1610  *      @flags: Interrupt type flags
1611  *      @name: An ascii name for the claiming device
1612  *      @dev_id: A cookie passed back to the handler function
1613  *
1614  *      This call allocates interrupt resources and enables the
1615  *      interrupt line and IRQ handling. It selects either a
1616  *      hardirq or threaded handling method depending on the
1617  *      context.
1618  *
1619  *      On failure, it returns a negative value. On success,
1620  *      it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
1621  */
1622 int request_any_context_irq(unsigned int irq, irq_handler_t handler,
1623                             unsigned long flags, const char *name, void *dev_id)
1624 {
1625         struct irq_desc *desc = irq_to_desc(irq);
1626         int ret;
1627
1628         if (!desc)
1629                 return -EINVAL;
1630
1631         if (irq_settings_is_nested_thread(desc)) {
1632                 ret = request_threaded_irq(irq, NULL, handler,
1633                                            flags, name, dev_id);
1634                 return !ret ? IRQC_IS_NESTED : ret;
1635         }
1636
1637         ret = request_irq(irq, handler, flags, name, dev_id);
1638         return !ret ? IRQC_IS_HARDIRQ : ret;
1639 }
1640 EXPORT_SYMBOL_GPL(request_any_context_irq);
1641
1642 void enable_percpu_irq(unsigned int irq, unsigned int type)
1643 {
1644         unsigned int cpu = smp_processor_id();
1645         unsigned long flags;
1646         struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1647
1648         if (!desc)
1649                 return;
1650
1651         type &= IRQ_TYPE_SENSE_MASK;
1652         if (type != IRQ_TYPE_NONE) {
1653                 int ret;
1654
1655                 ret = __irq_set_trigger(desc, type);
1656
1657                 if (ret) {
1658                         WARN(1, "failed to set type for IRQ%d\n", irq);
1659                         goto out;
1660                 }
1661         }
1662
1663         irq_percpu_enable(desc, cpu);
1664 out:
1665         irq_put_desc_unlock(desc, flags);
1666 }
1667 EXPORT_SYMBOL_GPL(enable_percpu_irq);
1668
1669 void disable_percpu_irq(unsigned int irq)
1670 {
1671         unsigned int cpu = smp_processor_id();
1672         unsigned long flags;
1673         struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1674
1675         if (!desc)
1676                 return;
1677
1678         irq_percpu_disable(desc, cpu);
1679         irq_put_desc_unlock(desc, flags);
1680 }
1681 EXPORT_SYMBOL_GPL(disable_percpu_irq);
1682
1683 /*
1684  * Internal function to unregister a percpu irqaction.
1685  */
1686 static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
1687 {
1688         struct irq_desc *desc = irq_to_desc(irq);
1689         struct irqaction *action;
1690         unsigned long flags;
1691
1692         WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1693
1694         if (!desc)
1695                 return NULL;
1696
1697         raw_spin_lock_irqsave(&desc->lock, flags);
1698
1699         action = desc->action;
1700         if (!action || action->percpu_dev_id != dev_id) {
1701                 WARN(1, "Trying to free already-free IRQ %d\n", irq);
1702                 goto bad;
1703         }
1704
1705         if (!cpumask_empty(desc->percpu_enabled)) {
1706                 WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
1707                      irq, cpumask_first(desc->percpu_enabled));
1708                 goto bad;
1709         }
1710
1711         /* Found it - now remove it from the list of entries: */
1712         desc->action = NULL;
1713
1714         raw_spin_unlock_irqrestore(&desc->lock, flags);
1715
1716         unregister_handler_proc(irq, action);
1717
1718         module_put(desc->owner);
1719         return action;
1720
1721 bad:
1722         raw_spin_unlock_irqrestore(&desc->lock, flags);
1723         return NULL;
1724 }
1725
1726 /**
1727  *      remove_percpu_irq - free a per-cpu interrupt
1728  *      @irq: Interrupt line to free
1729  *      @act: irqaction for the interrupt
1730  *
1731  * Used to remove interrupts statically setup by the early boot process.
1732  */
1733 void remove_percpu_irq(unsigned int irq, struct irqaction *act)
1734 {
1735         struct irq_desc *desc = irq_to_desc(irq);
1736
1737         if (desc && irq_settings_is_per_cpu_devid(desc))
1738             __free_percpu_irq(irq, act->percpu_dev_id);
1739 }
1740
1741 /**
1742  *      free_percpu_irq - free an interrupt allocated with request_percpu_irq
1743  *      @irq: Interrupt line to free
1744  *      @dev_id: Device identity to free
1745  *
1746  *      Remove a percpu interrupt handler. The handler is removed, but
1747  *      the interrupt line is not disabled. This must be done on each
1748  *      CPU before calling this function. The function does not return
1749  *      until any executing interrupts for this IRQ have completed.
1750  *
1751  *      This function must not be called from interrupt context.
1752  */
1753 void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
1754 {
1755         struct irq_desc *desc = irq_to_desc(irq);
1756
1757         if (!desc || !irq_settings_is_per_cpu_devid(desc))
1758                 return;
1759
1760         chip_bus_lock(desc);
1761         kfree(__free_percpu_irq(irq, dev_id));
1762         chip_bus_sync_unlock(desc);
1763 }
1764
1765 /**
1766  *      setup_percpu_irq - setup a per-cpu interrupt
1767  *      @irq: Interrupt line to setup
1768  *      @act: irqaction for the interrupt
1769  *
1770  * Used to statically setup per-cpu interrupts in the early boot process.
1771  */
1772 int setup_percpu_irq(unsigned int irq, struct irqaction *act)
1773 {
1774         struct irq_desc *desc = irq_to_desc(irq);
1775         int retval;
1776
1777         if (!desc || !irq_settings_is_per_cpu_devid(desc))
1778                 return -EINVAL;
1779         chip_bus_lock(desc);
1780         retval = __setup_irq(irq, desc, act);
1781         chip_bus_sync_unlock(desc);
1782
1783         return retval;
1784 }
1785
1786 /**
1787  *      request_percpu_irq - allocate a percpu interrupt line
1788  *      @irq: Interrupt line to allocate
1789  *      @handler: Function to be called when the IRQ occurs.
1790  *      @devname: An ascii name for the claiming device
1791  *      @dev_id: A percpu cookie passed back to the handler function
1792  *
1793  *      This call allocates interrupt resources, but doesn't
1794  *      automatically enable the interrupt. It has to be done on each
1795  *      CPU using enable_percpu_irq().
1796  *
1797  *      Dev_id must be globally unique. It is a per-cpu variable, and
1798  *      the handler gets called with the interrupted CPU's instance of
1799  *      that variable.
1800  */
1801 int request_percpu_irq(unsigned int irq, irq_handler_t handler,
1802                        const char *devname, void __percpu *dev_id)
1803 {
1804         struct irqaction *action;
1805         struct irq_desc *desc;
1806         int retval;
1807
1808         if (!dev_id)
1809                 return -EINVAL;
1810
1811         desc = irq_to_desc(irq);
1812         if (!desc || !irq_settings_can_request(desc) ||
1813             !irq_settings_is_per_cpu_devid(desc))
1814                 return -EINVAL;
1815
1816         action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1817         if (!action)
1818                 return -ENOMEM;
1819
1820         action->handler = handler;
1821         action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND;
1822         action->name = devname;
1823         action->percpu_dev_id = dev_id;
1824
1825         chip_bus_lock(desc);
1826         retval = __setup_irq(irq, desc, action);
1827         chip_bus_sync_unlock(desc);
1828
1829         if (retval)
1830                 kfree(action);
1831
1832         return retval;
1833 }
1834
1835 /**
1836  *      irq_get_irqchip_state - returns the irqchip state of a interrupt.
1837  *      @irq: Interrupt line that is forwarded to a VM
1838  *      @which: One of IRQCHIP_STATE_* the caller wants to know about
1839  *      @state: a pointer to a boolean where the state is to be storeed
1840  *
1841  *      This call snapshots the internal irqchip state of an
1842  *      interrupt, returning into @state the bit corresponding to
1843  *      stage @which
1844  *
1845  *      This function should be called with preemption disabled if the
1846  *      interrupt controller has per-cpu registers.
1847  */
1848 int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
1849                           bool *state)
1850 {
1851         struct irq_desc *desc;
1852         struct irq_data *data;
1853         struct irq_chip *chip;
1854         unsigned long flags;
1855         int err = -EINVAL;
1856
1857         desc = irq_get_desc_buslock(irq, &flags, 0);
1858         if (!desc)
1859                 return err;
1860
1861         data = irq_desc_get_irq_data(desc);
1862
1863         do {
1864                 chip = irq_data_get_irq_chip(data);
1865                 if (chip->irq_get_irqchip_state)
1866                         break;
1867 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
1868                 data = data->parent_data;
1869 #else
1870                 data = NULL;
1871 #endif
1872         } while (data);
1873
1874         if (data)
1875                 err = chip->irq_get_irqchip_state(data, which, state);
1876
1877         irq_put_desc_busunlock(desc, flags);
1878         return err;
1879 }
1880 EXPORT_SYMBOL_GPL(irq_get_irqchip_state);
1881
1882 /**
1883  *      irq_set_irqchip_state - set the state of a forwarded interrupt.
1884  *      @irq: Interrupt line that is forwarded to a VM
1885  *      @which: State to be restored (one of IRQCHIP_STATE_*)
1886  *      @val: Value corresponding to @which
1887  *
1888  *      This call sets the internal irqchip state of an interrupt,
1889  *      depending on the value of @which.
1890  *
1891  *      This function should be called with preemption disabled if the
1892  *      interrupt controller has per-cpu registers.
1893  */
1894 int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
1895                           bool val)
1896 {
1897         struct irq_desc *desc;
1898         struct irq_data *data;
1899         struct irq_chip *chip;
1900         unsigned long flags;
1901         int err = -EINVAL;
1902
1903         desc = irq_get_desc_buslock(irq, &flags, 0);
1904         if (!desc)
1905                 return err;
1906
1907         data = irq_desc_get_irq_data(desc);
1908
1909         do {
1910                 chip = irq_data_get_irq_chip(data);
1911                 if (chip->irq_set_irqchip_state)
1912                         break;
1913 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
1914                 data = data->parent_data;
1915 #else
1916                 data = NULL;
1917 #endif
1918         } while (data);
1919
1920         if (data)
1921                 err = chip->irq_set_irqchip_state(data, which, val);
1922
1923         irq_put_desc_busunlock(desc, flags);
1924         return err;
1925 }
1926 EXPORT_SYMBOL_GPL(irq_set_irqchip_state);