2 * Copyright (C) 2013, 2014 ARM Limited, All Rights Reserved.
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #include <linux/cpu.h>
19 #include <linux/cpu_pm.h>
20 #include <linux/delay.h>
21 #include <linux/interrupt.h>
23 #include <linux/of_address.h>
24 #include <linux/of_irq.h>
25 #include <linux/percpu.h>
26 #include <linux/slab.h>
28 #include <linux/irqchip.h>
29 #include <linux/irqchip/arm-gic-v3.h>
31 #include <asm/cputype.h>
32 #include <asm/exception.h>
33 #include <asm/smp_plat.h>
36 #include "irq-gic-common.h"
38 struct redist_region {
39 void __iomem *redist_base;
40 phys_addr_t phys_base;
43 struct gic_chip_data {
44 void __iomem *dist_base;
45 struct redist_region *redist_regions;
47 struct irq_domain *domain;
49 u32 nr_redist_regions;
53 static struct gic_chip_data gic_data __read_mostly;
54 static struct static_key supports_deactivate = STATIC_KEY_INIT_TRUE;
56 #define gic_data_rdist() (this_cpu_ptr(gic_data.rdists.rdist))
57 #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
58 #define gic_data_rdist_sgi_base() (gic_data_rdist_rd_base() + SZ_64K)
60 /* Our default, arbitrary priority value. Linux only uses one anyway. */
61 #define DEFAULT_PMR_VALUE 0xf0
63 static inline unsigned int gic_irq(struct irq_data *d)
68 static inline int gic_irq_in_rdist(struct irq_data *d)
70 return gic_irq(d) < 32;
73 static inline void __iomem *gic_dist_base(struct irq_data *d)
75 if (gic_irq_in_rdist(d)) /* SGI+PPI -> SGI_base for this CPU */
76 return gic_data_rdist_sgi_base();
78 if (d->hwirq <= 1023) /* SPI -> dist_base */
79 return gic_data.dist_base;
84 static void gic_do_wait_for_rwp(void __iomem *base)
86 u32 count = 1000000; /* 1s! */
88 while (readl_relaxed(base + GICD_CTLR) & GICD_CTLR_RWP) {
91 pr_err_ratelimited("RWP timeout, gone fishing\n");
99 /* Wait for completion of a distributor change */
100 static void gic_dist_wait_for_rwp(void)
102 gic_do_wait_for_rwp(gic_data.dist_base);
105 /* Wait for completion of a redistributor change */
106 static void gic_redist_wait_for_rwp(void)
108 gic_do_wait_for_rwp(gic_data_rdist_rd_base());
112 static DEFINE_STATIC_KEY_FALSE(is_cavium_thunderx);
114 static u64 __maybe_unused gic_read_iar(void)
116 if (static_branch_unlikely(&is_cavium_thunderx))
117 return gic_read_iar_cavium_thunderx();
119 return gic_read_iar_common();
123 static void gic_enable_redist(bool enable)
126 u32 count = 1000000; /* 1s! */
129 rbase = gic_data_rdist_rd_base();
131 val = readl_relaxed(rbase + GICR_WAKER);
133 /* Wake up this CPU redistributor */
134 val &= ~GICR_WAKER_ProcessorSleep;
136 val |= GICR_WAKER_ProcessorSleep;
137 writel_relaxed(val, rbase + GICR_WAKER);
139 if (!enable) { /* Check that GICR_WAKER is writeable */
140 val = readl_relaxed(rbase + GICR_WAKER);
141 if (!(val & GICR_WAKER_ProcessorSleep))
142 return; /* No PM support in this redistributor */
146 val = readl_relaxed(rbase + GICR_WAKER);
147 if (enable ^ (val & GICR_WAKER_ChildrenAsleep))
153 pr_err_ratelimited("redistributor failed to %s...\n",
154 enable ? "wakeup" : "sleep");
158 * Routines to disable, enable, EOI and route interrupts
160 static int gic_peek_irq(struct irq_data *d, u32 offset)
162 u32 mask = 1 << (gic_irq(d) % 32);
165 if (gic_irq_in_rdist(d))
166 base = gic_data_rdist_sgi_base();
168 base = gic_data.dist_base;
170 return !!(readl_relaxed(base + offset + (gic_irq(d) / 32) * 4) & mask);
173 static void gic_poke_irq(struct irq_data *d, u32 offset)
175 u32 mask = 1 << (gic_irq(d) % 32);
176 void (*rwp_wait)(void);
179 if (gic_irq_in_rdist(d)) {
180 base = gic_data_rdist_sgi_base();
181 rwp_wait = gic_redist_wait_for_rwp;
183 base = gic_data.dist_base;
184 rwp_wait = gic_dist_wait_for_rwp;
187 writel_relaxed(mask, base + offset + (gic_irq(d) / 32) * 4);
191 static void gic_mask_irq(struct irq_data *d)
193 gic_poke_irq(d, GICD_ICENABLER);
196 static void gic_eoimode1_mask_irq(struct irq_data *d)
200 * When masking a forwarded interrupt, make sure it is
201 * deactivated as well.
203 * This ensures that an interrupt that is getting
204 * disabled/masked will not get "stuck", because there is
205 * noone to deactivate it (guest is being terminated).
207 if (irqd_is_forwarded_to_vcpu(d))
208 gic_poke_irq(d, GICD_ICACTIVER);
211 static void gic_unmask_irq(struct irq_data *d)
213 gic_poke_irq(d, GICD_ISENABLER);
216 static int gic_irq_set_irqchip_state(struct irq_data *d,
217 enum irqchip_irq_state which, bool val)
221 if (d->hwirq >= gic_data.irq_nr) /* PPI/SPI only */
225 case IRQCHIP_STATE_PENDING:
226 reg = val ? GICD_ISPENDR : GICD_ICPENDR;
229 case IRQCHIP_STATE_ACTIVE:
230 reg = val ? GICD_ISACTIVER : GICD_ICACTIVER;
233 case IRQCHIP_STATE_MASKED:
234 reg = val ? GICD_ICENABLER : GICD_ISENABLER;
241 gic_poke_irq(d, reg);
245 static int gic_irq_get_irqchip_state(struct irq_data *d,
246 enum irqchip_irq_state which, bool *val)
248 if (d->hwirq >= gic_data.irq_nr) /* PPI/SPI only */
252 case IRQCHIP_STATE_PENDING:
253 *val = gic_peek_irq(d, GICD_ISPENDR);
256 case IRQCHIP_STATE_ACTIVE:
257 *val = gic_peek_irq(d, GICD_ISACTIVER);
260 case IRQCHIP_STATE_MASKED:
261 *val = !gic_peek_irq(d, GICD_ISENABLER);
271 static void gic_eoi_irq(struct irq_data *d)
273 gic_write_eoir(gic_irq(d));
276 static void gic_eoimode1_eoi_irq(struct irq_data *d)
279 * No need to deactivate an LPI, or an interrupt that
280 * is is getting forwarded to a vcpu.
282 if (gic_irq(d) >= 8192 || irqd_is_forwarded_to_vcpu(d))
284 gic_write_dir(gic_irq(d));
287 static int gic_set_type(struct irq_data *d, unsigned int type)
289 unsigned int irq = gic_irq(d);
290 void (*rwp_wait)(void);
293 /* Interrupt configuration for SGIs can't be changed */
297 /* SPIs have restrictions on the supported types */
298 if (irq >= 32 && type != IRQ_TYPE_LEVEL_HIGH &&
299 type != IRQ_TYPE_EDGE_RISING)
302 if (gic_irq_in_rdist(d)) {
303 base = gic_data_rdist_sgi_base();
304 rwp_wait = gic_redist_wait_for_rwp;
306 base = gic_data.dist_base;
307 rwp_wait = gic_dist_wait_for_rwp;
310 return gic_configure_irq(irq, type, base, rwp_wait);
313 static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
316 irqd_set_forwarded_to_vcpu(d);
318 irqd_clr_forwarded_to_vcpu(d);
322 static u64 gic_mpidr_to_affinity(u64 mpidr)
326 aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 |
327 MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
328 MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
329 MPIDR_AFFINITY_LEVEL(mpidr, 0));
334 static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
339 irqnr = gic_read_iar();
341 if (likely(irqnr > 15 && irqnr < 1020) || irqnr >= 8192) {
344 if (static_key_true(&supports_deactivate))
345 gic_write_eoir(irqnr);
347 err = handle_domain_irq(gic_data.domain, irqnr, regs);
349 WARN_ONCE(true, "Unexpected interrupt received!\n");
350 if (static_key_true(&supports_deactivate)) {
352 gic_write_dir(irqnr);
354 gic_write_eoir(irqnr);
360 gic_write_eoir(irqnr);
361 if (static_key_true(&supports_deactivate))
362 gic_write_dir(irqnr);
364 handle_IPI(irqnr, regs);
366 WARN_ONCE(true, "Unexpected SGI received!\n");
370 } while (irqnr != ICC_IAR1_EL1_SPURIOUS);
373 static void __init gic_dist_init(void)
377 void __iomem *base = gic_data.dist_base;
379 /* Disable the distributor */
380 writel_relaxed(0, base + GICD_CTLR);
381 gic_dist_wait_for_rwp();
383 gic_dist_config(base, gic_data.irq_nr, gic_dist_wait_for_rwp);
385 /* Enable distributor with ARE, Group1 */
386 writel_relaxed(GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1,
390 * Set all global interrupts to the boot CPU only. ARE must be
393 affinity = gic_mpidr_to_affinity(cpu_logical_map(smp_processor_id()));
394 for (i = 32; i < gic_data.irq_nr; i++)
395 writeq_relaxed(affinity, base + GICD_IROUTER + i * 8);
398 static int gic_populate_rdist(void)
400 u64 mpidr = cpu_logical_map(smp_processor_id());
406 * Convert affinity to a 32bit value that can be matched to
407 * GICR_TYPER bits [63:32].
409 aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 |
410 MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
411 MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
412 MPIDR_AFFINITY_LEVEL(mpidr, 0));
414 for (i = 0; i < gic_data.nr_redist_regions; i++) {
415 void __iomem *ptr = gic_data.redist_regions[i].redist_base;
418 reg = readl_relaxed(ptr + GICR_PIDR2) & GIC_PIDR2_ARCH_MASK;
419 if (reg != GIC_PIDR2_ARCH_GICv3 &&
420 reg != GIC_PIDR2_ARCH_GICv4) { /* We're in trouble... */
421 pr_warn("No redistributor present @%p\n", ptr);
426 typer = readq_relaxed(ptr + GICR_TYPER);
427 if ((typer >> 32) == aff) {
428 u64 offset = ptr - gic_data.redist_regions[i].redist_base;
429 gic_data_rdist_rd_base() = ptr;
430 gic_data_rdist()->phys_base = gic_data.redist_regions[i].phys_base + offset;
431 pr_info("CPU%d: found redistributor %llx region %d:%pa\n",
433 (unsigned long long)mpidr,
434 i, &gic_data_rdist()->phys_base);
438 if (gic_data.redist_stride) {
439 ptr += gic_data.redist_stride;
441 ptr += SZ_64K * 2; /* Skip RD_base + SGI_base */
442 if (typer & GICR_TYPER_VLPIS)
443 ptr += SZ_64K * 2; /* Skip VLPI_base + reserved page */
445 } while (!(typer & GICR_TYPER_LAST));
448 /* We couldn't even deal with ourselves... */
449 WARN(true, "CPU%d: mpidr %llx has no re-distributor!\n",
450 smp_processor_id(), (unsigned long long)mpidr);
454 static void gic_cpu_sys_reg_init(void)
457 * Need to check that the SRE bit has actually been set. If
458 * not, it means that SRE is disabled at EL2. We're going to
459 * die painfully, and there is nothing we can do about it.
461 * Kindly inform the luser.
463 if (!gic_enable_sre())
464 pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n");
466 /* Set priority mask register */
467 gic_write_pmr(DEFAULT_PMR_VALUE);
469 if (static_key_true(&supports_deactivate)) {
470 /* EOI drops priority only (mode 1) */
471 gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop);
473 /* EOI deactivates interrupt too (mode 0) */
474 gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop_dir);
477 /* ... and let's hit the road... */
481 static int gic_dist_supports_lpis(void)
483 return !!(readl_relaxed(gic_data.dist_base + GICD_TYPER) & GICD_TYPER_LPIS);
486 static void gic_cpu_init(void)
490 /* Register ourselves with the rest of the world */
491 if (gic_populate_rdist())
494 gic_enable_redist(true);
496 rbase = gic_data_rdist_sgi_base();
498 gic_cpu_config(rbase, gic_redist_wait_for_rwp);
500 /* Give LPIs a spin */
501 if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis())
504 /* initialise system registers */
505 gic_cpu_sys_reg_init();
509 static int gic_secondary_init(struct notifier_block *nfb,
510 unsigned long action, void *hcpu)
512 if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
518 * Notifier for enabling the GIC CPU interface. Set an arbitrarily high
519 * priority because the GIC needs to be up before the ARM generic timers.
521 static struct notifier_block gic_cpu_notifier = {
522 .notifier_call = gic_secondary_init,
526 static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
530 u64 mpidr = cpu_logical_map(cpu);
533 while (cpu < nr_cpu_ids) {
535 * If we ever get a cluster of more than 16 CPUs, just
536 * scream and skip that CPU.
538 if (WARN_ON((mpidr & 0xff) >= 16))
541 tlist |= 1 << (mpidr & 0xf);
543 cpu = cpumask_next(cpu, mask);
544 if (cpu >= nr_cpu_ids)
547 mpidr = cpu_logical_map(cpu);
549 if (cluster_id != (mpidr & ~0xffUL)) {
559 #define MPIDR_TO_SGI_AFFINITY(cluster_id, level) \
560 (MPIDR_AFFINITY_LEVEL(cluster_id, level) \
561 << ICC_SGI1R_AFFINITY_## level ##_SHIFT)
563 static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq)
567 val = (MPIDR_TO_SGI_AFFINITY(cluster_id, 3) |
568 MPIDR_TO_SGI_AFFINITY(cluster_id, 2) |
569 irq << ICC_SGI1R_SGI_ID_SHIFT |
570 MPIDR_TO_SGI_AFFINITY(cluster_id, 1) |
571 tlist << ICC_SGI1R_TARGET_LIST_SHIFT);
573 pr_debug("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val);
574 gic_write_sgi1r(val);
577 static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
581 if (WARN_ON(irq >= 16))
585 * Ensure that stores to Normal memory are visible to the
586 * other CPUs before issuing the IPI.
590 for_each_cpu(cpu, mask) {
591 u64 cluster_id = cpu_logical_map(cpu) & ~0xffUL;
594 tlist = gic_compute_target_list(&cpu, mask, cluster_id);
595 gic_send_sgi(cluster_id, tlist, irq);
598 /* Force the above writes to ICC_SGI1R_EL1 to be executed */
602 static void gic_smp_init(void)
604 set_smp_cross_call(gic_raise_softirq);
605 register_cpu_notifier(&gic_cpu_notifier);
608 static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
611 unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
616 if (gic_irq_in_rdist(d))
619 /* If interrupt was enabled, disable it first */
620 enabled = gic_peek_irq(d, GICD_ISENABLER);
624 reg = gic_dist_base(d) + GICD_IROUTER + (gic_irq(d) * 8);
625 val = gic_mpidr_to_affinity(cpu_logical_map(cpu));
627 writeq_relaxed(val, reg);
630 * If the interrupt was enabled, enabled it again. Otherwise,
631 * just wait for the distributor to have digested our changes.
636 gic_dist_wait_for_rwp();
638 return IRQ_SET_MASK_OK;
641 #define gic_set_affinity NULL
642 #define gic_smp_init() do { } while(0)
646 static int gic_cpu_pm_notifier(struct notifier_block *self,
647 unsigned long cmd, void *v)
649 if (cmd == CPU_PM_EXIT) {
650 gic_enable_redist(true);
651 gic_cpu_sys_reg_init();
652 } else if (cmd == CPU_PM_ENTER) {
654 gic_enable_redist(false);
659 static struct notifier_block gic_cpu_pm_notifier_block = {
660 .notifier_call = gic_cpu_pm_notifier,
663 static void gic_cpu_pm_init(void)
665 cpu_pm_register_notifier(&gic_cpu_pm_notifier_block);
669 static inline void gic_cpu_pm_init(void) { }
670 #endif /* CONFIG_CPU_PM */
672 static struct irq_chip gic_chip = {
674 .irq_mask = gic_mask_irq,
675 .irq_unmask = gic_unmask_irq,
676 .irq_eoi = gic_eoi_irq,
677 .irq_set_type = gic_set_type,
678 .irq_set_affinity = gic_set_affinity,
679 .irq_get_irqchip_state = gic_irq_get_irqchip_state,
680 .irq_set_irqchip_state = gic_irq_set_irqchip_state,
681 .flags = IRQCHIP_SET_TYPE_MASKED,
684 static struct irq_chip gic_eoimode1_chip = {
686 .irq_mask = gic_eoimode1_mask_irq,
687 .irq_unmask = gic_unmask_irq,
688 .irq_eoi = gic_eoimode1_eoi_irq,
689 .irq_set_type = gic_set_type,
690 .irq_set_affinity = gic_set_affinity,
691 .irq_get_irqchip_state = gic_irq_get_irqchip_state,
692 .irq_set_irqchip_state = gic_irq_set_irqchip_state,
693 .irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity,
694 .flags = IRQCHIP_SET_TYPE_MASKED,
697 #define GIC_ID_NR (1U << gic_data.rdists.id_bits)
699 static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
702 struct irq_chip *chip = &gic_chip;
704 if (static_key_true(&supports_deactivate))
705 chip = &gic_eoimode1_chip;
707 /* SGIs are private to the core kernel */
711 if (hw >= gic_data.irq_nr && hw < 8192)
719 irq_set_percpu_devid(irq);
720 irq_domain_set_info(d, irq, hw, chip, d->host_data,
721 handle_percpu_devid_irq, NULL, NULL);
722 irq_set_status_flags(irq, IRQ_NOAUTOEN);
725 if (hw >= 32 && hw < gic_data.irq_nr) {
726 irq_domain_set_info(d, irq, hw, chip, d->host_data,
727 handle_fasteoi_irq, NULL, NULL);
731 if (hw >= 8192 && hw < GIC_ID_NR) {
732 if (!gic_dist_supports_lpis())
734 irq_domain_set_info(d, irq, hw, chip, d->host_data,
735 handle_fasteoi_irq, NULL, NULL);
741 static int gic_irq_domain_xlate(struct irq_domain *d,
742 struct device_node *controller,
743 const u32 *intspec, unsigned int intsize,
744 unsigned long *out_hwirq, unsigned int *out_type)
746 if (d->of_node != controller)
753 *out_hwirq = intspec[1] + 32;
756 *out_hwirq = intspec[1] + 16;
758 case GIC_IRQ_TYPE_LPI: /* LPI */
759 *out_hwirq = intspec[1];
765 *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
769 static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
770 unsigned int nr_irqs, void *arg)
773 irq_hw_number_t hwirq;
774 unsigned int type = IRQ_TYPE_NONE;
775 struct of_phandle_args *irq_data = arg;
777 ret = gic_irq_domain_xlate(domain, irq_data->np, irq_data->args,
778 irq_data->args_count, &hwirq, &type);
782 for (i = 0; i < nr_irqs; i++)
783 gic_irq_domain_map(domain, virq + i, hwirq + i);
788 static void gic_irq_domain_free(struct irq_domain *domain, unsigned int virq,
789 unsigned int nr_irqs)
793 for (i = 0; i < nr_irqs; i++) {
794 struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
795 irq_set_handler(virq + i, NULL);
796 irq_domain_reset_irq_data(d);
800 static const struct irq_domain_ops gic_irq_domain_ops = {
801 .xlate = gic_irq_domain_xlate,
802 .alloc = gic_irq_domain_alloc,
803 .free = gic_irq_domain_free,
806 static void gicv3_enable_quirks(void)
809 if (cpus_have_cap(ARM64_WORKAROUND_CAVIUM_23154))
810 static_branch_enable(&is_cavium_thunderx);
814 static int __init gic_of_init(struct device_node *node, struct device_node *parent)
816 void __iomem *dist_base;
817 struct redist_region *rdist_regs;
819 u32 nr_redist_regions;
826 dist_base = of_iomap(node, 0);
828 pr_err("%s: unable to map gic dist registers\n",
833 reg = readl_relaxed(dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
834 if (reg != GIC_PIDR2_ARCH_GICv3 && reg != GIC_PIDR2_ARCH_GICv4) {
835 pr_err("%s: no distributor detected, giving up\n",
841 if (of_property_read_u32(node, "#redistributor-regions", &nr_redist_regions))
842 nr_redist_regions = 1;
844 rdist_regs = kzalloc(sizeof(*rdist_regs) * nr_redist_regions, GFP_KERNEL);
850 for (i = 0; i < nr_redist_regions; i++) {
854 ret = of_address_to_resource(node, 1 + i, &res);
855 rdist_regs[i].redist_base = of_iomap(node, 1 + i);
856 if (ret || !rdist_regs[i].redist_base) {
857 pr_err("%s: couldn't map region %d\n",
860 goto out_unmap_rdist;
862 rdist_regs[i].phys_base = res.start;
865 if (of_property_read_u64(node, "redistributor-stride", &redist_stride))
868 if (!is_hyp_mode_available())
869 static_key_slow_dec(&supports_deactivate);
871 if (static_key_true(&supports_deactivate))
872 pr_info("GIC: Using split EOI/Deactivate mode\n");
874 gic_data.dist_base = dist_base;
875 gic_data.redist_regions = rdist_regs;
876 gic_data.nr_redist_regions = nr_redist_regions;
877 gic_data.redist_stride = redist_stride;
879 gicv3_enable_quirks();
882 * Find out how many interrupts are supported.
883 * The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI)
885 typer = readl_relaxed(gic_data.dist_base + GICD_TYPER);
886 gic_data.rdists.id_bits = GICD_TYPER_ID_BITS(typer);
887 gic_irqs = GICD_TYPER_IRQS(typer);
890 gic_data.irq_nr = gic_irqs;
892 gic_data.domain = irq_domain_add_tree(node, &gic_irq_domain_ops,
894 gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist));
896 if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) {
901 set_handle_irq(gic_handle_irq);
903 if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis())
904 its_init(node, &gic_data.rdists, gic_data.domain);
915 irq_domain_remove(gic_data.domain);
916 free_percpu(gic_data.rdists.rdist);
918 for (i = 0; i < nr_redist_regions; i++)
919 if (rdist_regs[i].redist_base)
920 iounmap(rdist_regs[i].redist_base);
927 IRQCHIP_DECLARE(gic_v3, "arm,gic-v3", gic_of_init);