spi: efm32: use $vendor,$device scheme for compatible string
[linux.git] / virt / kvm / arm / vgic.c
1 /*
2  * Copyright (C) 2012 ARM Ltd.
3  * Author: Marc Zyngier <marc.zyngier@arm.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17  */
18
19 #include <linux/cpu.h>
20 #include <linux/kvm.h>
21 #include <linux/kvm_host.h>
22 #include <linux/interrupt.h>
23 #include <linux/io.h>
24 #include <linux/of.h>
25 #include <linux/of_address.h>
26 #include <linux/of_irq.h>
27
28 #include <linux/irqchip/arm-gic.h>
29
30 #include <asm/kvm_emulate.h>
31 #include <asm/kvm_arm.h>
32 #include <asm/kvm_mmu.h>
33
34 /*
35  * How the whole thing works (courtesy of Christoffer Dall):
36  *
37  * - At any time, the dist->irq_pending_on_cpu is the oracle that knows if
38  *   something is pending
39  * - VGIC pending interrupts are stored on the vgic.irq_state vgic
40  *   bitmap (this bitmap is updated by both user land ioctls and guest
41  *   mmio ops, and other in-kernel peripherals such as the
42  *   arch. timers) and indicate the 'wire' state.
43  * - Every time the bitmap changes, the irq_pending_on_cpu oracle is
44  *   recalculated
45  * - To calculate the oracle, we need info for each cpu from
46  *   compute_pending_for_cpu, which considers:
47  *   - PPI: dist->irq_state & dist->irq_enable
48  *   - SPI: dist->irq_state & dist->irq_enable & dist->irq_spi_target
49  *   - irq_spi_target is a 'formatted' version of the GICD_ICFGR
50  *     registers, stored on each vcpu. We only keep one bit of
51  *     information per interrupt, making sure that only one vcpu can
52  *     accept the interrupt.
53  * - The same is true when injecting an interrupt, except that we only
54  *   consider a single interrupt at a time. The irq_spi_cpu array
55  *   contains the target CPU for each SPI.
56  *
57  * The handling of level interrupts adds some extra complexity. We
58  * need to track when the interrupt has been EOIed, so we can sample
59  * the 'line' again. This is achieved as such:
60  *
61  * - When a level interrupt is moved onto a vcpu, the corresponding
62  *   bit in irq_active is set. As long as this bit is set, the line
63  *   will be ignored for further interrupts. The interrupt is injected
64  *   into the vcpu with the GICH_LR_EOI bit set (generate a
65  *   maintenance interrupt on EOI).
66  * - When the interrupt is EOIed, the maintenance interrupt fires,
67  *   and clears the corresponding bit in irq_active. This allow the
68  *   interrupt line to be sampled again.
69  */
70
71 #define VGIC_ADDR_UNDEF         (-1)
72 #define IS_VGIC_ADDR_UNDEF(_x)  ((_x) == VGIC_ADDR_UNDEF)
73
74 #define PRODUCT_ID_KVM          0x4b    /* ASCII code K */
75 #define IMPLEMENTER_ARM         0x43b
76 #define GICC_ARCH_VERSION_V2    0x2
77
78 /* Physical address of vgic virtual cpu interface */
79 static phys_addr_t vgic_vcpu_base;
80
81 /* Virtual control interface base address */
82 static void __iomem *vgic_vctrl_base;
83
84 static struct device_node *vgic_node;
85
86 #define ACCESS_READ_VALUE       (1 << 0)
87 #define ACCESS_READ_RAZ         (0 << 0)
88 #define ACCESS_READ_MASK(x)     ((x) & (1 << 0))
89 #define ACCESS_WRITE_IGNORED    (0 << 1)
90 #define ACCESS_WRITE_SETBIT     (1 << 1)
91 #define ACCESS_WRITE_CLEARBIT   (2 << 1)
92 #define ACCESS_WRITE_VALUE      (3 << 1)
93 #define ACCESS_WRITE_MASK(x)    ((x) & (3 << 1))
94
95 static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu);
96 static void vgic_update_state(struct kvm *kvm);
97 static void vgic_kick_vcpus(struct kvm *kvm);
98 static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg);
99 static u32 vgic_nr_lr;
100
101 static unsigned int vgic_maint_irq;
102
103 static u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x,
104                                 int cpuid, u32 offset)
105 {
106         offset >>= 2;
107         if (!offset)
108                 return x->percpu[cpuid].reg;
109         else
110                 return x->shared.reg + offset - 1;
111 }
112
113 static int vgic_bitmap_get_irq_val(struct vgic_bitmap *x,
114                                    int cpuid, int irq)
115 {
116         if (irq < VGIC_NR_PRIVATE_IRQS)
117                 return test_bit(irq, x->percpu[cpuid].reg_ul);
118
119         return test_bit(irq - VGIC_NR_PRIVATE_IRQS, x->shared.reg_ul);
120 }
121
122 static void vgic_bitmap_set_irq_val(struct vgic_bitmap *x, int cpuid,
123                                     int irq, int val)
124 {
125         unsigned long *reg;
126
127         if (irq < VGIC_NR_PRIVATE_IRQS) {
128                 reg = x->percpu[cpuid].reg_ul;
129         } else {
130                 reg =  x->shared.reg_ul;
131                 irq -= VGIC_NR_PRIVATE_IRQS;
132         }
133
134         if (val)
135                 set_bit(irq, reg);
136         else
137                 clear_bit(irq, reg);
138 }
139
140 static unsigned long *vgic_bitmap_get_cpu_map(struct vgic_bitmap *x, int cpuid)
141 {
142         if (unlikely(cpuid >= VGIC_MAX_CPUS))
143                 return NULL;
144         return x->percpu[cpuid].reg_ul;
145 }
146
147 static unsigned long *vgic_bitmap_get_shared_map(struct vgic_bitmap *x)
148 {
149         return x->shared.reg_ul;
150 }
151
152 static u32 *vgic_bytemap_get_reg(struct vgic_bytemap *x, int cpuid, u32 offset)
153 {
154         offset >>= 2;
155         BUG_ON(offset > (VGIC_NR_IRQS / 4));
156         if (offset < 8)
157                 return x->percpu[cpuid] + offset;
158         else
159                 return x->shared + offset - 8;
160 }
161
162 #define VGIC_CFG_LEVEL  0
163 #define VGIC_CFG_EDGE   1
164
165 static bool vgic_irq_is_edge(struct kvm_vcpu *vcpu, int irq)
166 {
167         struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
168         int irq_val;
169
170         irq_val = vgic_bitmap_get_irq_val(&dist->irq_cfg, vcpu->vcpu_id, irq);
171         return irq_val == VGIC_CFG_EDGE;
172 }
173
174 static int vgic_irq_is_enabled(struct kvm_vcpu *vcpu, int irq)
175 {
176         struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
177
178         return vgic_bitmap_get_irq_val(&dist->irq_enabled, vcpu->vcpu_id, irq);
179 }
180
181 static int vgic_irq_is_active(struct kvm_vcpu *vcpu, int irq)
182 {
183         struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
184
185         return vgic_bitmap_get_irq_val(&dist->irq_active, vcpu->vcpu_id, irq);
186 }
187
188 static void vgic_irq_set_active(struct kvm_vcpu *vcpu, int irq)
189 {
190         struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
191
192         vgic_bitmap_set_irq_val(&dist->irq_active, vcpu->vcpu_id, irq, 1);
193 }
194
195 static void vgic_irq_clear_active(struct kvm_vcpu *vcpu, int irq)
196 {
197         struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
198
199         vgic_bitmap_set_irq_val(&dist->irq_active, vcpu->vcpu_id, irq, 0);
200 }
201
202 static int vgic_dist_irq_is_pending(struct kvm_vcpu *vcpu, int irq)
203 {
204         struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
205
206         return vgic_bitmap_get_irq_val(&dist->irq_state, vcpu->vcpu_id, irq);
207 }
208
209 static void vgic_dist_irq_set(struct kvm_vcpu *vcpu, int irq)
210 {
211         struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
212
213         vgic_bitmap_set_irq_val(&dist->irq_state, vcpu->vcpu_id, irq, 1);
214 }
215
216 static void vgic_dist_irq_clear(struct kvm_vcpu *vcpu, int irq)
217 {
218         struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
219
220         vgic_bitmap_set_irq_val(&dist->irq_state, vcpu->vcpu_id, irq, 0);
221 }
222
223 static void vgic_cpu_irq_set(struct kvm_vcpu *vcpu, int irq)
224 {
225         if (irq < VGIC_NR_PRIVATE_IRQS)
226                 set_bit(irq, vcpu->arch.vgic_cpu.pending_percpu);
227         else
228                 set_bit(irq - VGIC_NR_PRIVATE_IRQS,
229                         vcpu->arch.vgic_cpu.pending_shared);
230 }
231
232 static void vgic_cpu_irq_clear(struct kvm_vcpu *vcpu, int irq)
233 {
234         if (irq < VGIC_NR_PRIVATE_IRQS)
235                 clear_bit(irq, vcpu->arch.vgic_cpu.pending_percpu);
236         else
237                 clear_bit(irq - VGIC_NR_PRIVATE_IRQS,
238                           vcpu->arch.vgic_cpu.pending_shared);
239 }
240
241 static u32 mmio_data_read(struct kvm_exit_mmio *mmio, u32 mask)
242 {
243         return *((u32 *)mmio->data) & mask;
244 }
245
246 static void mmio_data_write(struct kvm_exit_mmio *mmio, u32 mask, u32 value)
247 {
248         *((u32 *)mmio->data) = value & mask;
249 }
250
251 /**
252  * vgic_reg_access - access vgic register
253  * @mmio:   pointer to the data describing the mmio access
254  * @reg:    pointer to the virtual backing of vgic distributor data
255  * @offset: least significant 2 bits used for word offset
256  * @mode:   ACCESS_ mode (see defines above)
257  *
258  * Helper to make vgic register access easier using one of the access
259  * modes defined for vgic register access
260  * (read,raz,write-ignored,setbit,clearbit,write)
261  */
262 static void vgic_reg_access(struct kvm_exit_mmio *mmio, u32 *reg,
263                             phys_addr_t offset, int mode)
264 {
265         int word_offset = (offset & 3) * 8;
266         u32 mask = (1UL << (mmio->len * 8)) - 1;
267         u32 regval;
268
269         /*
270          * Any alignment fault should have been delivered to the guest
271          * directly (ARM ARM B3.12.7 "Prioritization of aborts").
272          */
273
274         if (reg) {
275                 regval = *reg;
276         } else {
277                 BUG_ON(mode != (ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED));
278                 regval = 0;
279         }
280
281         if (mmio->is_write) {
282                 u32 data = mmio_data_read(mmio, mask) << word_offset;
283                 switch (ACCESS_WRITE_MASK(mode)) {
284                 case ACCESS_WRITE_IGNORED:
285                         return;
286
287                 case ACCESS_WRITE_SETBIT:
288                         regval |= data;
289                         break;
290
291                 case ACCESS_WRITE_CLEARBIT:
292                         regval &= ~data;
293                         break;
294
295                 case ACCESS_WRITE_VALUE:
296                         regval = (regval & ~(mask << word_offset)) | data;
297                         break;
298                 }
299                 *reg = regval;
300         } else {
301                 switch (ACCESS_READ_MASK(mode)) {
302                 case ACCESS_READ_RAZ:
303                         regval = 0;
304                         /* fall through */
305
306                 case ACCESS_READ_VALUE:
307                         mmio_data_write(mmio, mask, regval >> word_offset);
308                 }
309         }
310 }
311
312 static bool handle_mmio_misc(struct kvm_vcpu *vcpu,
313                              struct kvm_exit_mmio *mmio, phys_addr_t offset)
314 {
315         u32 reg;
316         u32 word_offset = offset & 3;
317
318         switch (offset & ~3) {
319         case 0:                 /* GICD_CTLR */
320                 reg = vcpu->kvm->arch.vgic.enabled;
321                 vgic_reg_access(mmio, &reg, word_offset,
322                                 ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
323                 if (mmio->is_write) {
324                         vcpu->kvm->arch.vgic.enabled = reg & 1;
325                         vgic_update_state(vcpu->kvm);
326                         return true;
327                 }
328                 break;
329
330         case 4:                 /* GICD_TYPER */
331                 reg  = (atomic_read(&vcpu->kvm->online_vcpus) - 1) << 5;
332                 reg |= (VGIC_NR_IRQS >> 5) - 1;
333                 vgic_reg_access(mmio, &reg, word_offset,
334                                 ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
335                 break;
336
337         case 8:                 /* GICD_IIDR */
338                 reg = (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0);
339                 vgic_reg_access(mmio, &reg, word_offset,
340                                 ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
341                 break;
342         }
343
344         return false;
345 }
346
347 static bool handle_mmio_raz_wi(struct kvm_vcpu *vcpu,
348                                struct kvm_exit_mmio *mmio, phys_addr_t offset)
349 {
350         vgic_reg_access(mmio, NULL, offset,
351                         ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
352         return false;
353 }
354
355 static bool handle_mmio_set_enable_reg(struct kvm_vcpu *vcpu,
356                                        struct kvm_exit_mmio *mmio,
357                                        phys_addr_t offset)
358 {
359         u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_enabled,
360                                        vcpu->vcpu_id, offset);
361         vgic_reg_access(mmio, reg, offset,
362                         ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT);
363         if (mmio->is_write) {
364                 vgic_update_state(vcpu->kvm);
365                 return true;
366         }
367
368         return false;
369 }
370
371 static bool handle_mmio_clear_enable_reg(struct kvm_vcpu *vcpu,
372                                          struct kvm_exit_mmio *mmio,
373                                          phys_addr_t offset)
374 {
375         u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_enabled,
376                                        vcpu->vcpu_id, offset);
377         vgic_reg_access(mmio, reg, offset,
378                         ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT);
379         if (mmio->is_write) {
380                 if (offset < 4) /* Force SGI enabled */
381                         *reg |= 0xffff;
382                 vgic_retire_disabled_irqs(vcpu);
383                 vgic_update_state(vcpu->kvm);
384                 return true;
385         }
386
387         return false;
388 }
389
390 static bool handle_mmio_set_pending_reg(struct kvm_vcpu *vcpu,
391                                         struct kvm_exit_mmio *mmio,
392                                         phys_addr_t offset)
393 {
394         u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_state,
395                                        vcpu->vcpu_id, offset);
396         vgic_reg_access(mmio, reg, offset,
397                         ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT);
398         if (mmio->is_write) {
399                 vgic_update_state(vcpu->kvm);
400                 return true;
401         }
402
403         return false;
404 }
405
406 static bool handle_mmio_clear_pending_reg(struct kvm_vcpu *vcpu,
407                                           struct kvm_exit_mmio *mmio,
408                                           phys_addr_t offset)
409 {
410         u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_state,
411                                        vcpu->vcpu_id, offset);
412         vgic_reg_access(mmio, reg, offset,
413                         ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT);
414         if (mmio->is_write) {
415                 vgic_update_state(vcpu->kvm);
416                 return true;
417         }
418
419         return false;
420 }
421
422 static bool handle_mmio_priority_reg(struct kvm_vcpu *vcpu,
423                                      struct kvm_exit_mmio *mmio,
424                                      phys_addr_t offset)
425 {
426         u32 *reg = vgic_bytemap_get_reg(&vcpu->kvm->arch.vgic.irq_priority,
427                                         vcpu->vcpu_id, offset);
428         vgic_reg_access(mmio, reg, offset,
429                         ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
430         return false;
431 }
432
433 #define GICD_ITARGETSR_SIZE     32
434 #define GICD_CPUTARGETS_BITS    8
435 #define GICD_IRQS_PER_ITARGETSR (GICD_ITARGETSR_SIZE / GICD_CPUTARGETS_BITS)
436 static u32 vgic_get_target_reg(struct kvm *kvm, int irq)
437 {
438         struct vgic_dist *dist = &kvm->arch.vgic;
439         int i;
440         u32 val = 0;
441
442         irq -= VGIC_NR_PRIVATE_IRQS;
443
444         for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++)
445                 val |= 1 << (dist->irq_spi_cpu[irq + i] + i * 8);
446
447         return val;
448 }
449
450 static void vgic_set_target_reg(struct kvm *kvm, u32 val, int irq)
451 {
452         struct vgic_dist *dist = &kvm->arch.vgic;
453         struct kvm_vcpu *vcpu;
454         int i, c;
455         unsigned long *bmap;
456         u32 target;
457
458         irq -= VGIC_NR_PRIVATE_IRQS;
459
460         /*
461          * Pick the LSB in each byte. This ensures we target exactly
462          * one vcpu per IRQ. If the byte is null, assume we target
463          * CPU0.
464          */
465         for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++) {
466                 int shift = i * GICD_CPUTARGETS_BITS;
467                 target = ffs((val >> shift) & 0xffU);
468                 target = target ? (target - 1) : 0;
469                 dist->irq_spi_cpu[irq + i] = target;
470                 kvm_for_each_vcpu(c, vcpu, kvm) {
471                         bmap = vgic_bitmap_get_shared_map(&dist->irq_spi_target[c]);
472                         if (c == target)
473                                 set_bit(irq + i, bmap);
474                         else
475                                 clear_bit(irq + i, bmap);
476                 }
477         }
478 }
479
480 static bool handle_mmio_target_reg(struct kvm_vcpu *vcpu,
481                                    struct kvm_exit_mmio *mmio,
482                                    phys_addr_t offset)
483 {
484         u32 reg;
485
486         /* We treat the banked interrupts targets as read-only */
487         if (offset < 32) {
488                 u32 roreg = 1 << vcpu->vcpu_id;
489                 roreg |= roreg << 8;
490                 roreg |= roreg << 16;
491
492                 vgic_reg_access(mmio, &roreg, offset,
493                                 ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
494                 return false;
495         }
496
497         reg = vgic_get_target_reg(vcpu->kvm, offset & ~3U);
498         vgic_reg_access(mmio, &reg, offset,
499                         ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
500         if (mmio->is_write) {
501                 vgic_set_target_reg(vcpu->kvm, reg, offset & ~3U);
502                 vgic_update_state(vcpu->kvm);
503                 return true;
504         }
505
506         return false;
507 }
508
509 static u32 vgic_cfg_expand(u16 val)
510 {
511         u32 res = 0;
512         int i;
513
514         /*
515          * Turn a 16bit value like abcd...mnop into a 32bit word
516          * a0b0c0d0...m0n0o0p0, which is what the HW cfg register is.
517          */
518         for (i = 0; i < 16; i++)
519                 res |= ((val >> i) & VGIC_CFG_EDGE) << (2 * i + 1);
520
521         return res;
522 }
523
524 static u16 vgic_cfg_compress(u32 val)
525 {
526         u16 res = 0;
527         int i;
528
529         /*
530          * Turn a 32bit word a0b0c0d0...m0n0o0p0 into 16bit value like
531          * abcd...mnop which is what we really care about.
532          */
533         for (i = 0; i < 16; i++)
534                 res |= ((val >> (i * 2 + 1)) & VGIC_CFG_EDGE) << i;
535
536         return res;
537 }
538
539 /*
540  * The distributor uses 2 bits per IRQ for the CFG register, but the
541  * LSB is always 0. As such, we only keep the upper bit, and use the
542  * two above functions to compress/expand the bits
543  */
544 static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu,
545                                 struct kvm_exit_mmio *mmio, phys_addr_t offset)
546 {
547         u32 val;
548         u32 *reg;
549
550         offset >>= 1;
551         reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg,
552                                   vcpu->vcpu_id, offset);
553
554         if (offset & 2)
555                 val = *reg >> 16;
556         else
557                 val = *reg & 0xffff;
558
559         val = vgic_cfg_expand(val);
560         vgic_reg_access(mmio, &val, offset,
561                         ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
562         if (mmio->is_write) {
563                 if (offset < 4) {
564                         *reg = ~0U; /* Force PPIs/SGIs to 1 */
565                         return false;
566                 }
567
568                 val = vgic_cfg_compress(val);
569                 if (offset & 2) {
570                         *reg &= 0xffff;
571                         *reg |= val << 16;
572                 } else {
573                         *reg &= 0xffff << 16;
574                         *reg |= val;
575                 }
576         }
577
578         return false;
579 }
580
581 static bool handle_mmio_sgi_reg(struct kvm_vcpu *vcpu,
582                                 struct kvm_exit_mmio *mmio, phys_addr_t offset)
583 {
584         u32 reg;
585         vgic_reg_access(mmio, &reg, offset,
586                         ACCESS_READ_RAZ | ACCESS_WRITE_VALUE);
587         if (mmio->is_write) {
588                 vgic_dispatch_sgi(vcpu, reg);
589                 vgic_update_state(vcpu->kvm);
590                 return true;
591         }
592
593         return false;
594 }
595
596 #define LR_CPUID(lr)    \
597         (((lr) & GICH_LR_PHYSID_CPUID) >> GICH_LR_PHYSID_CPUID_SHIFT)
598 #define LR_IRQID(lr)    \
599         ((lr) & GICH_LR_VIRTUALID)
600
601 static void vgic_retire_lr(int lr_nr, int irq, struct vgic_cpu *vgic_cpu)
602 {
603         clear_bit(lr_nr, vgic_cpu->lr_used);
604         vgic_cpu->vgic_lr[lr_nr] &= ~GICH_LR_STATE;
605         vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY;
606 }
607
608 /**
609  * vgic_unqueue_irqs - move pending IRQs from LRs to the distributor
610  * @vgic_cpu: Pointer to the vgic_cpu struct holding the LRs
611  *
612  * Move any pending IRQs that have already been assigned to LRs back to the
613  * emulated distributor state so that the complete emulated state can be read
614  * from the main emulation structures without investigating the LRs.
615  *
616  * Note that IRQs in the active state in the LRs get their pending state moved
617  * to the distributor but the active state stays in the LRs, because we don't
618  * track the active state on the distributor side.
619  */
620 static void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
621 {
622         struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
623         struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
624         int vcpu_id = vcpu->vcpu_id;
625         int i, irq, source_cpu;
626         u32 *lr;
627
628         for_each_set_bit(i, vgic_cpu->lr_used, vgic_cpu->nr_lr) {
629                 lr = &vgic_cpu->vgic_lr[i];
630                 irq = LR_IRQID(*lr);
631                 source_cpu = LR_CPUID(*lr);
632
633                 /*
634                  * There are three options for the state bits:
635                  *
636                  * 01: pending
637                  * 10: active
638                  * 11: pending and active
639                  *
640                  * If the LR holds only an active interrupt (not pending) then
641                  * just leave it alone.
642                  */
643                 if ((*lr & GICH_LR_STATE) == GICH_LR_ACTIVE_BIT)
644                         continue;
645
646                 /*
647                  * Reestablish the pending state on the distributor and the
648                  * CPU interface.  It may have already been pending, but that
649                  * is fine, then we are only setting a few bits that were
650                  * already set.
651                  */
652                 vgic_dist_irq_set(vcpu, irq);
653                 if (irq < VGIC_NR_SGIS)
654                         dist->irq_sgi_sources[vcpu_id][irq] |= 1 << source_cpu;
655                 *lr &= ~GICH_LR_PENDING_BIT;
656
657                 /*
658                  * If there's no state left on the LR (it could still be
659                  * active), then the LR does not hold any useful info and can
660                  * be marked as free for other use.
661                  */
662                 if (!(*lr & GICH_LR_STATE))
663                         vgic_retire_lr(i, irq, vgic_cpu);
664
665                 /* Finally update the VGIC state. */
666                 vgic_update_state(vcpu->kvm);
667         }
668 }
669
670 /* Handle reads of GICD_CPENDSGIRn and GICD_SPENDSGIRn */
671 static bool read_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu,
672                                         struct kvm_exit_mmio *mmio,
673                                         phys_addr_t offset)
674 {
675         struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
676         int sgi;
677         int min_sgi = (offset & ~0x3) * 4;
678         int max_sgi = min_sgi + 3;
679         int vcpu_id = vcpu->vcpu_id;
680         u32 reg = 0;
681
682         /* Copy source SGIs from distributor side */
683         for (sgi = min_sgi; sgi <= max_sgi; sgi++) {
684                 int shift = 8 * (sgi - min_sgi);
685                 reg |= (u32)dist->irq_sgi_sources[vcpu_id][sgi] << shift;
686         }
687
688         mmio_data_write(mmio, ~0, reg);
689         return false;
690 }
691
692 static bool write_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu,
693                                          struct kvm_exit_mmio *mmio,
694                                          phys_addr_t offset, bool set)
695 {
696         struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
697         int sgi;
698         int min_sgi = (offset & ~0x3) * 4;
699         int max_sgi = min_sgi + 3;
700         int vcpu_id = vcpu->vcpu_id;
701         u32 reg;
702         bool updated = false;
703
704         reg = mmio_data_read(mmio, ~0);
705
706         /* Clear pending SGIs on the distributor */
707         for (sgi = min_sgi; sgi <= max_sgi; sgi++) {
708                 u8 mask = reg >> (8 * (sgi - min_sgi));
709                 if (set) {
710                         if ((dist->irq_sgi_sources[vcpu_id][sgi] & mask) != mask)
711                                 updated = true;
712                         dist->irq_sgi_sources[vcpu_id][sgi] |= mask;
713                 } else {
714                         if (dist->irq_sgi_sources[vcpu_id][sgi] & mask)
715                                 updated = true;
716                         dist->irq_sgi_sources[vcpu_id][sgi] &= ~mask;
717                 }
718         }
719
720         if (updated)
721                 vgic_update_state(vcpu->kvm);
722
723         return updated;
724 }
725
726 static bool handle_mmio_sgi_set(struct kvm_vcpu *vcpu,
727                                 struct kvm_exit_mmio *mmio,
728                                 phys_addr_t offset)
729 {
730         if (!mmio->is_write)
731                 return read_set_clear_sgi_pend_reg(vcpu, mmio, offset);
732         else
733                 return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, true);
734 }
735
736 static bool handle_mmio_sgi_clear(struct kvm_vcpu *vcpu,
737                                   struct kvm_exit_mmio *mmio,
738                                   phys_addr_t offset)
739 {
740         if (!mmio->is_write)
741                 return read_set_clear_sgi_pend_reg(vcpu, mmio, offset);
742         else
743                 return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, false);
744 }
745
746 /*
747  * I would have liked to use the kvm_bus_io_*() API instead, but it
748  * cannot cope with banked registers (only the VM pointer is passed
749  * around, and we need the vcpu). One of these days, someone please
750  * fix it!
751  */
752 struct mmio_range {
753         phys_addr_t base;
754         unsigned long len;
755         bool (*handle_mmio)(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
756                             phys_addr_t offset);
757 };
758
759 static const struct mmio_range vgic_dist_ranges[] = {
760         {
761                 .base           = GIC_DIST_CTRL,
762                 .len            = 12,
763                 .handle_mmio    = handle_mmio_misc,
764         },
765         {
766                 .base           = GIC_DIST_IGROUP,
767                 .len            = VGIC_NR_IRQS / 8,
768                 .handle_mmio    = handle_mmio_raz_wi,
769         },
770         {
771                 .base           = GIC_DIST_ENABLE_SET,
772                 .len            = VGIC_NR_IRQS / 8,
773                 .handle_mmio    = handle_mmio_set_enable_reg,
774         },
775         {
776                 .base           = GIC_DIST_ENABLE_CLEAR,
777                 .len            = VGIC_NR_IRQS / 8,
778                 .handle_mmio    = handle_mmio_clear_enable_reg,
779         },
780         {
781                 .base           = GIC_DIST_PENDING_SET,
782                 .len            = VGIC_NR_IRQS / 8,
783                 .handle_mmio    = handle_mmio_set_pending_reg,
784         },
785         {
786                 .base           = GIC_DIST_PENDING_CLEAR,
787                 .len            = VGIC_NR_IRQS / 8,
788                 .handle_mmio    = handle_mmio_clear_pending_reg,
789         },
790         {
791                 .base           = GIC_DIST_ACTIVE_SET,
792                 .len            = VGIC_NR_IRQS / 8,
793                 .handle_mmio    = handle_mmio_raz_wi,
794         },
795         {
796                 .base           = GIC_DIST_ACTIVE_CLEAR,
797                 .len            = VGIC_NR_IRQS / 8,
798                 .handle_mmio    = handle_mmio_raz_wi,
799         },
800         {
801                 .base           = GIC_DIST_PRI,
802                 .len            = VGIC_NR_IRQS,
803                 .handle_mmio    = handle_mmio_priority_reg,
804         },
805         {
806                 .base           = GIC_DIST_TARGET,
807                 .len            = VGIC_NR_IRQS,
808                 .handle_mmio    = handle_mmio_target_reg,
809         },
810         {
811                 .base           = GIC_DIST_CONFIG,
812                 .len            = VGIC_NR_IRQS / 4,
813                 .handle_mmio    = handle_mmio_cfg_reg,
814         },
815         {
816                 .base           = GIC_DIST_SOFTINT,
817                 .len            = 4,
818                 .handle_mmio    = handle_mmio_sgi_reg,
819         },
820         {
821                 .base           = GIC_DIST_SGI_PENDING_CLEAR,
822                 .len            = VGIC_NR_SGIS,
823                 .handle_mmio    = handle_mmio_sgi_clear,
824         },
825         {
826                 .base           = GIC_DIST_SGI_PENDING_SET,
827                 .len            = VGIC_NR_SGIS,
828                 .handle_mmio    = handle_mmio_sgi_set,
829         },
830         {}
831 };
832
833 static const
834 struct mmio_range *find_matching_range(const struct mmio_range *ranges,
835                                        struct kvm_exit_mmio *mmio,
836                                        phys_addr_t offset)
837 {
838         const struct mmio_range *r = ranges;
839
840         while (r->len) {
841                 if (offset >= r->base &&
842                     (offset + mmio->len) <= (r->base + r->len))
843                         return r;
844                 r++;
845         }
846
847         return NULL;
848 }
849
850 /**
851  * vgic_handle_mmio - handle an in-kernel MMIO access
852  * @vcpu:       pointer to the vcpu performing the access
853  * @run:        pointer to the kvm_run structure
854  * @mmio:       pointer to the data describing the access
855  *
856  * returns true if the MMIO access has been performed in kernel space,
857  * and false if it needs to be emulated in user space.
858  */
859 bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
860                       struct kvm_exit_mmio *mmio)
861 {
862         const struct mmio_range *range;
863         struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
864         unsigned long base = dist->vgic_dist_base;
865         bool updated_state;
866         unsigned long offset;
867
868         if (!irqchip_in_kernel(vcpu->kvm) ||
869             mmio->phys_addr < base ||
870             (mmio->phys_addr + mmio->len) > (base + KVM_VGIC_V2_DIST_SIZE))
871                 return false;
872
873         /* We don't support ldrd / strd or ldm / stm to the emulated vgic */
874         if (mmio->len > 4) {
875                 kvm_inject_dabt(vcpu, mmio->phys_addr);
876                 return true;
877         }
878
879         offset = mmio->phys_addr - base;
880         range = find_matching_range(vgic_dist_ranges, mmio, offset);
881         if (unlikely(!range || !range->handle_mmio)) {
882                 pr_warn("Unhandled access %d %08llx %d\n",
883                         mmio->is_write, mmio->phys_addr, mmio->len);
884                 return false;
885         }
886
887         spin_lock(&vcpu->kvm->arch.vgic.lock);
888         offset = mmio->phys_addr - range->base - base;
889         updated_state = range->handle_mmio(vcpu, mmio, offset);
890         spin_unlock(&vcpu->kvm->arch.vgic.lock);
891         kvm_prepare_mmio(run, mmio);
892         kvm_handle_mmio_return(vcpu, run);
893
894         if (updated_state)
895                 vgic_kick_vcpus(vcpu->kvm);
896
897         return true;
898 }
899
900 static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg)
901 {
902         struct kvm *kvm = vcpu->kvm;
903         struct vgic_dist *dist = &kvm->arch.vgic;
904         int nrcpus = atomic_read(&kvm->online_vcpus);
905         u8 target_cpus;
906         int sgi, mode, c, vcpu_id;
907
908         vcpu_id = vcpu->vcpu_id;
909
910         sgi = reg & 0xf;
911         target_cpus = (reg >> 16) & 0xff;
912         mode = (reg >> 24) & 3;
913
914         switch (mode) {
915         case 0:
916                 if (!target_cpus)
917                         return;
918
919         case 1:
920                 target_cpus = ((1 << nrcpus) - 1) & ~(1 << vcpu_id) & 0xff;
921                 break;
922
923         case 2:
924                 target_cpus = 1 << vcpu_id;
925                 break;
926         }
927
928         kvm_for_each_vcpu(c, vcpu, kvm) {
929                 if (target_cpus & 1) {
930                         /* Flag the SGI as pending */
931                         vgic_dist_irq_set(vcpu, sgi);
932                         dist->irq_sgi_sources[c][sgi] |= 1 << vcpu_id;
933                         kvm_debug("SGI%d from CPU%d to CPU%d\n", sgi, vcpu_id, c);
934                 }
935
936                 target_cpus >>= 1;
937         }
938 }
939
940 static int compute_pending_for_cpu(struct kvm_vcpu *vcpu)
941 {
942         struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
943         unsigned long *pending, *enabled, *pend_percpu, *pend_shared;
944         unsigned long pending_private, pending_shared;
945         int vcpu_id;
946
947         vcpu_id = vcpu->vcpu_id;
948         pend_percpu = vcpu->arch.vgic_cpu.pending_percpu;
949         pend_shared = vcpu->arch.vgic_cpu.pending_shared;
950
951         pending = vgic_bitmap_get_cpu_map(&dist->irq_state, vcpu_id);
952         enabled = vgic_bitmap_get_cpu_map(&dist->irq_enabled, vcpu_id);
953         bitmap_and(pend_percpu, pending, enabled, VGIC_NR_PRIVATE_IRQS);
954
955         pending = vgic_bitmap_get_shared_map(&dist->irq_state);
956         enabled = vgic_bitmap_get_shared_map(&dist->irq_enabled);
957         bitmap_and(pend_shared, pending, enabled, VGIC_NR_SHARED_IRQS);
958         bitmap_and(pend_shared, pend_shared,
959                    vgic_bitmap_get_shared_map(&dist->irq_spi_target[vcpu_id]),
960                    VGIC_NR_SHARED_IRQS);
961
962         pending_private = find_first_bit(pend_percpu, VGIC_NR_PRIVATE_IRQS);
963         pending_shared = find_first_bit(pend_shared, VGIC_NR_SHARED_IRQS);
964         return (pending_private < VGIC_NR_PRIVATE_IRQS ||
965                 pending_shared < VGIC_NR_SHARED_IRQS);
966 }
967
968 /*
969  * Update the interrupt state and determine which CPUs have pending
970  * interrupts. Must be called with distributor lock held.
971  */
972 static void vgic_update_state(struct kvm *kvm)
973 {
974         struct vgic_dist *dist = &kvm->arch.vgic;
975         struct kvm_vcpu *vcpu;
976         int c;
977
978         if (!dist->enabled) {
979                 set_bit(0, &dist->irq_pending_on_cpu);
980                 return;
981         }
982
983         kvm_for_each_vcpu(c, vcpu, kvm) {
984                 if (compute_pending_for_cpu(vcpu)) {
985                         pr_debug("CPU%d has pending interrupts\n", c);
986                         set_bit(c, &dist->irq_pending_on_cpu);
987                 }
988         }
989 }
990
991 #define MK_LR_PEND(src, irq)    \
992         (GICH_LR_PENDING_BIT | ((src) << GICH_LR_PHYSID_CPUID_SHIFT) | (irq))
993
994 /*
995  * An interrupt may have been disabled after being made pending on the
996  * CPU interface (the classic case is a timer running while we're
997  * rebooting the guest - the interrupt would kick as soon as the CPU
998  * interface gets enabled, with deadly consequences).
999  *
1000  * The solution is to examine already active LRs, and check the
1001  * interrupt is still enabled. If not, just retire it.
1002  */
1003 static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu)
1004 {
1005         struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1006         int lr;
1007
1008         for_each_set_bit(lr, vgic_cpu->lr_used, vgic_cpu->nr_lr) {
1009                 int irq = vgic_cpu->vgic_lr[lr] & GICH_LR_VIRTUALID;
1010
1011                 if (!vgic_irq_is_enabled(vcpu, irq)) {
1012                         vgic_retire_lr(lr, irq, vgic_cpu);
1013                         if (vgic_irq_is_active(vcpu, irq))
1014                                 vgic_irq_clear_active(vcpu, irq);
1015                 }
1016         }
1017 }
1018
1019 /*
1020  * Queue an interrupt to a CPU virtual interface. Return true on success,
1021  * or false if it wasn't possible to queue it.
1022  */
1023 static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
1024 {
1025         struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1026         int lr;
1027
1028         /* Sanitize the input... */
1029         BUG_ON(sgi_source_id & ~7);
1030         BUG_ON(sgi_source_id && irq >= VGIC_NR_SGIS);
1031         BUG_ON(irq >= VGIC_NR_IRQS);
1032
1033         kvm_debug("Queue IRQ%d\n", irq);
1034
1035         lr = vgic_cpu->vgic_irq_lr_map[irq];
1036
1037         /* Do we have an active interrupt for the same CPUID? */
1038         if (lr != LR_EMPTY &&
1039             (LR_CPUID(vgic_cpu->vgic_lr[lr]) == sgi_source_id)) {
1040                 kvm_debug("LR%d piggyback for IRQ%d %x\n",
1041                           lr, irq, vgic_cpu->vgic_lr[lr]);
1042                 BUG_ON(!test_bit(lr, vgic_cpu->lr_used));
1043                 vgic_cpu->vgic_lr[lr] |= GICH_LR_PENDING_BIT;
1044                 return true;
1045         }
1046
1047         /* Try to use another LR for this interrupt */
1048         lr = find_first_zero_bit((unsigned long *)vgic_cpu->lr_used,
1049                                vgic_cpu->nr_lr);
1050         if (lr >= vgic_cpu->nr_lr)
1051                 return false;
1052
1053         kvm_debug("LR%d allocated for IRQ%d %x\n", lr, irq, sgi_source_id);
1054         vgic_cpu->vgic_lr[lr] = MK_LR_PEND(sgi_source_id, irq);
1055         vgic_cpu->vgic_irq_lr_map[irq] = lr;
1056         set_bit(lr, vgic_cpu->lr_used);
1057
1058         if (!vgic_irq_is_edge(vcpu, irq))
1059                 vgic_cpu->vgic_lr[lr] |= GICH_LR_EOI;
1060
1061         return true;
1062 }
1063
1064 static bool vgic_queue_sgi(struct kvm_vcpu *vcpu, int irq)
1065 {
1066         struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1067         unsigned long sources;
1068         int vcpu_id = vcpu->vcpu_id;
1069         int c;
1070
1071         sources = dist->irq_sgi_sources[vcpu_id][irq];
1072
1073         for_each_set_bit(c, &sources, VGIC_MAX_CPUS) {
1074                 if (vgic_queue_irq(vcpu, c, irq))
1075                         clear_bit(c, &sources);
1076         }
1077
1078         dist->irq_sgi_sources[vcpu_id][irq] = sources;
1079
1080         /*
1081          * If the sources bitmap has been cleared it means that we
1082          * could queue all the SGIs onto link registers (see the
1083          * clear_bit above), and therefore we are done with them in
1084          * our emulated gic and can get rid of them.
1085          */
1086         if (!sources) {
1087                 vgic_dist_irq_clear(vcpu, irq);
1088                 vgic_cpu_irq_clear(vcpu, irq);
1089                 return true;
1090         }
1091
1092         return false;
1093 }
1094
1095 static bool vgic_queue_hwirq(struct kvm_vcpu *vcpu, int irq)
1096 {
1097         if (vgic_irq_is_active(vcpu, irq))
1098                 return true; /* level interrupt, already queued */
1099
1100         if (vgic_queue_irq(vcpu, 0, irq)) {
1101                 if (vgic_irq_is_edge(vcpu, irq)) {
1102                         vgic_dist_irq_clear(vcpu, irq);
1103                         vgic_cpu_irq_clear(vcpu, irq);
1104                 } else {
1105                         vgic_irq_set_active(vcpu, irq);
1106                 }
1107
1108                 return true;
1109         }
1110
1111         return false;
1112 }
1113
1114 /*
1115  * Fill the list registers with pending interrupts before running the
1116  * guest.
1117  */
1118 static void __kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
1119 {
1120         struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1121         struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1122         int i, vcpu_id;
1123         int overflow = 0;
1124
1125         vcpu_id = vcpu->vcpu_id;
1126
1127         /*
1128          * We may not have any pending interrupt, or the interrupts
1129          * may have been serviced from another vcpu. In all cases,
1130          * move along.
1131          */
1132         if (!kvm_vgic_vcpu_pending_irq(vcpu)) {
1133                 pr_debug("CPU%d has no pending interrupt\n", vcpu_id);
1134                 goto epilog;
1135         }
1136
1137         /* SGIs */
1138         for_each_set_bit(i, vgic_cpu->pending_percpu, VGIC_NR_SGIS) {
1139                 if (!vgic_queue_sgi(vcpu, i))
1140                         overflow = 1;
1141         }
1142
1143         /* PPIs */
1144         for_each_set_bit_from(i, vgic_cpu->pending_percpu, VGIC_NR_PRIVATE_IRQS) {
1145                 if (!vgic_queue_hwirq(vcpu, i))
1146                         overflow = 1;
1147         }
1148
1149         /* SPIs */
1150         for_each_set_bit(i, vgic_cpu->pending_shared, VGIC_NR_SHARED_IRQS) {
1151                 if (!vgic_queue_hwirq(vcpu, i + VGIC_NR_PRIVATE_IRQS))
1152                         overflow = 1;
1153         }
1154
1155 epilog:
1156         if (overflow) {
1157                 vgic_cpu->vgic_hcr |= GICH_HCR_UIE;
1158         } else {
1159                 vgic_cpu->vgic_hcr &= ~GICH_HCR_UIE;
1160                 /*
1161                  * We're about to run this VCPU, and we've consumed
1162                  * everything the distributor had in store for
1163                  * us. Claim we don't have anything pending. We'll
1164                  * adjust that if needed while exiting.
1165                  */
1166                 clear_bit(vcpu_id, &dist->irq_pending_on_cpu);
1167         }
1168 }
1169
1170 static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
1171 {
1172         struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1173         bool level_pending = false;
1174
1175         kvm_debug("MISR = %08x\n", vgic_cpu->vgic_misr);
1176
1177         if (vgic_cpu->vgic_misr & GICH_MISR_EOI) {
1178                 /*
1179                  * Some level interrupts have been EOIed. Clear their
1180                  * active bit.
1181                  */
1182                 int lr, irq;
1183
1184                 for_each_set_bit(lr, (unsigned long *)vgic_cpu->vgic_eisr,
1185                                  vgic_cpu->nr_lr) {
1186                         irq = vgic_cpu->vgic_lr[lr] & GICH_LR_VIRTUALID;
1187
1188                         vgic_irq_clear_active(vcpu, irq);
1189                         vgic_cpu->vgic_lr[lr] &= ~GICH_LR_EOI;
1190
1191                         /* Any additional pending interrupt? */
1192                         if (vgic_dist_irq_is_pending(vcpu, irq)) {
1193                                 vgic_cpu_irq_set(vcpu, irq);
1194                                 level_pending = true;
1195                         } else {
1196                                 vgic_cpu_irq_clear(vcpu, irq);
1197                         }
1198
1199                         /*
1200                          * Despite being EOIed, the LR may not have
1201                          * been marked as empty.
1202                          */
1203                         set_bit(lr, (unsigned long *)vgic_cpu->vgic_elrsr);
1204                         vgic_cpu->vgic_lr[lr] &= ~GICH_LR_ACTIVE_BIT;
1205                 }
1206         }
1207
1208         if (vgic_cpu->vgic_misr & GICH_MISR_U)
1209                 vgic_cpu->vgic_hcr &= ~GICH_HCR_UIE;
1210
1211         return level_pending;
1212 }
1213
1214 /*
1215  * Sync back the VGIC state after a guest run. The distributor lock is
1216  * needed so we don't get preempted in the middle of the state processing.
1217  */
1218 static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
1219 {
1220         struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1221         struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1222         int lr, pending;
1223         bool level_pending;
1224
1225         level_pending = vgic_process_maintenance(vcpu);
1226
1227         /* Clear mappings for empty LRs */
1228         for_each_set_bit(lr, (unsigned long *)vgic_cpu->vgic_elrsr,
1229                          vgic_cpu->nr_lr) {
1230                 int irq;
1231
1232                 if (!test_and_clear_bit(lr, vgic_cpu->lr_used))
1233                         continue;
1234
1235                 irq = vgic_cpu->vgic_lr[lr] & GICH_LR_VIRTUALID;
1236
1237                 BUG_ON(irq >= VGIC_NR_IRQS);
1238                 vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY;
1239         }
1240
1241         /* Check if we still have something up our sleeve... */
1242         pending = find_first_zero_bit((unsigned long *)vgic_cpu->vgic_elrsr,
1243                                       vgic_cpu->nr_lr);
1244         if (level_pending || pending < vgic_cpu->nr_lr)
1245                 set_bit(vcpu->vcpu_id, &dist->irq_pending_on_cpu);
1246 }
1247
1248 void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
1249 {
1250         struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1251
1252         if (!irqchip_in_kernel(vcpu->kvm))
1253                 return;
1254
1255         spin_lock(&dist->lock);
1256         __kvm_vgic_flush_hwstate(vcpu);
1257         spin_unlock(&dist->lock);
1258 }
1259
1260 void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
1261 {
1262         struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1263
1264         if (!irqchip_in_kernel(vcpu->kvm))
1265                 return;
1266
1267         spin_lock(&dist->lock);
1268         __kvm_vgic_sync_hwstate(vcpu);
1269         spin_unlock(&dist->lock);
1270 }
1271
1272 int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
1273 {
1274         struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1275
1276         if (!irqchip_in_kernel(vcpu->kvm))
1277                 return 0;
1278
1279         return test_bit(vcpu->vcpu_id, &dist->irq_pending_on_cpu);
1280 }
1281
1282 static void vgic_kick_vcpus(struct kvm *kvm)
1283 {
1284         struct kvm_vcpu *vcpu;
1285         int c;
1286
1287         /*
1288          * We've injected an interrupt, time to find out who deserves
1289          * a good kick...
1290          */
1291         kvm_for_each_vcpu(c, vcpu, kvm) {
1292                 if (kvm_vgic_vcpu_pending_irq(vcpu))
1293                         kvm_vcpu_kick(vcpu);
1294         }
1295 }
1296
1297 static int vgic_validate_injection(struct kvm_vcpu *vcpu, int irq, int level)
1298 {
1299         int is_edge = vgic_irq_is_edge(vcpu, irq);
1300         int state = vgic_dist_irq_is_pending(vcpu, irq);
1301
1302         /*
1303          * Only inject an interrupt if:
1304          * - edge triggered and we have a rising edge
1305          * - level triggered and we change level
1306          */
1307         if (is_edge)
1308                 return level > state;
1309         else
1310                 return level != state;
1311 }
1312
1313 static bool vgic_update_irq_state(struct kvm *kvm, int cpuid,
1314                                   unsigned int irq_num, bool level)
1315 {
1316         struct vgic_dist *dist = &kvm->arch.vgic;
1317         struct kvm_vcpu *vcpu;
1318         int is_edge, is_level;
1319         int enabled;
1320         bool ret = true;
1321
1322         spin_lock(&dist->lock);
1323
1324         vcpu = kvm_get_vcpu(kvm, cpuid);
1325         is_edge = vgic_irq_is_edge(vcpu, irq_num);
1326         is_level = !is_edge;
1327
1328         if (!vgic_validate_injection(vcpu, irq_num, level)) {
1329                 ret = false;
1330                 goto out;
1331         }
1332
1333         if (irq_num >= VGIC_NR_PRIVATE_IRQS) {
1334                 cpuid = dist->irq_spi_cpu[irq_num - VGIC_NR_PRIVATE_IRQS];
1335                 vcpu = kvm_get_vcpu(kvm, cpuid);
1336         }
1337
1338         kvm_debug("Inject IRQ%d level %d CPU%d\n", irq_num, level, cpuid);
1339
1340         if (level)
1341                 vgic_dist_irq_set(vcpu, irq_num);
1342         else
1343                 vgic_dist_irq_clear(vcpu, irq_num);
1344
1345         enabled = vgic_irq_is_enabled(vcpu, irq_num);
1346
1347         if (!enabled) {
1348                 ret = false;
1349                 goto out;
1350         }
1351
1352         if (is_level && vgic_irq_is_active(vcpu, irq_num)) {
1353                 /*
1354                  * Level interrupt in progress, will be picked up
1355                  * when EOId.
1356                  */
1357                 ret = false;
1358                 goto out;
1359         }
1360
1361         if (level) {
1362                 vgic_cpu_irq_set(vcpu, irq_num);
1363                 set_bit(cpuid, &dist->irq_pending_on_cpu);
1364         }
1365
1366 out:
1367         spin_unlock(&dist->lock);
1368
1369         return ret;
1370 }
1371
1372 /**
1373  * kvm_vgic_inject_irq - Inject an IRQ from a device to the vgic
1374  * @kvm:     The VM structure pointer
1375  * @cpuid:   The CPU for PPIs
1376  * @irq_num: The IRQ number that is assigned to the device
1377  * @level:   Edge-triggered:  true:  to trigger the interrupt
1378  *                            false: to ignore the call
1379  *           Level-sensitive  true:  activates an interrupt
1380  *                            false: deactivates an interrupt
1381  *
1382  * The GIC is not concerned with devices being active-LOW or active-HIGH for
1383  * level-sensitive interrupts.  You can think of the level parameter as 1
1384  * being HIGH and 0 being LOW and all devices being active-HIGH.
1385  */
1386 int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
1387                         bool level)
1388 {
1389         if (vgic_update_irq_state(kvm, cpuid, irq_num, level))
1390                 vgic_kick_vcpus(kvm);
1391
1392         return 0;
1393 }
1394
1395 static irqreturn_t vgic_maintenance_handler(int irq, void *data)
1396 {
1397         /*
1398          * We cannot rely on the vgic maintenance interrupt to be
1399          * delivered synchronously. This means we can only use it to
1400          * exit the VM, and we perform the handling of EOIed
1401          * interrupts on the exit path (see vgic_process_maintenance).
1402          */
1403         return IRQ_HANDLED;
1404 }
1405
1406 /**
1407  * kvm_vgic_vcpu_init - Initialize per-vcpu VGIC state
1408  * @vcpu: pointer to the vcpu struct
1409  *
1410  * Initialize the vgic_cpu struct and vgic_dist struct fields pertaining to
1411  * this vcpu and enable the VGIC for this VCPU
1412  */
1413 int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
1414 {
1415         struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1416         struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1417         int i;
1418
1419         if (vcpu->vcpu_id >= VGIC_MAX_CPUS)
1420                 return -EBUSY;
1421
1422         for (i = 0; i < VGIC_NR_IRQS; i++) {
1423                 if (i < VGIC_NR_PPIS)
1424                         vgic_bitmap_set_irq_val(&dist->irq_enabled,
1425                                                 vcpu->vcpu_id, i, 1);
1426                 if (i < VGIC_NR_PRIVATE_IRQS)
1427                         vgic_bitmap_set_irq_val(&dist->irq_cfg,
1428                                                 vcpu->vcpu_id, i, VGIC_CFG_EDGE);
1429
1430                 vgic_cpu->vgic_irq_lr_map[i] = LR_EMPTY;
1431         }
1432
1433         /*
1434          * By forcing VMCR to zero, the GIC will restore the binary
1435          * points to their reset values. Anything else resets to zero
1436          * anyway.
1437          */
1438         vgic_cpu->vgic_vmcr = 0;
1439
1440         vgic_cpu->nr_lr = vgic_nr_lr;
1441         vgic_cpu->vgic_hcr = GICH_HCR_EN; /* Get the show on the road... */
1442
1443         return 0;
1444 }
1445
1446 static void vgic_init_maintenance_interrupt(void *info)
1447 {
1448         enable_percpu_irq(vgic_maint_irq, 0);
1449 }
1450
1451 static int vgic_cpu_notify(struct notifier_block *self,
1452                            unsigned long action, void *cpu)
1453 {
1454         switch (action) {
1455         case CPU_STARTING:
1456         case CPU_STARTING_FROZEN:
1457                 vgic_init_maintenance_interrupt(NULL);
1458                 break;
1459         case CPU_DYING:
1460         case CPU_DYING_FROZEN:
1461                 disable_percpu_irq(vgic_maint_irq);
1462                 break;
1463         }
1464
1465         return NOTIFY_OK;
1466 }
1467
1468 static struct notifier_block vgic_cpu_nb = {
1469         .notifier_call = vgic_cpu_notify,
1470 };
1471
1472 int kvm_vgic_hyp_init(void)
1473 {
1474         int ret;
1475         struct resource vctrl_res;
1476         struct resource vcpu_res;
1477
1478         vgic_node = of_find_compatible_node(NULL, NULL, "arm,cortex-a15-gic");
1479         if (!vgic_node) {
1480                 kvm_err("error: no compatible vgic node in DT\n");
1481                 return -ENODEV;
1482         }
1483
1484         vgic_maint_irq = irq_of_parse_and_map(vgic_node, 0);
1485         if (!vgic_maint_irq) {
1486                 kvm_err("error getting vgic maintenance irq from DT\n");
1487                 ret = -ENXIO;
1488                 goto out;
1489         }
1490
1491         ret = request_percpu_irq(vgic_maint_irq, vgic_maintenance_handler,
1492                                  "vgic", kvm_get_running_vcpus());
1493         if (ret) {
1494                 kvm_err("Cannot register interrupt %d\n", vgic_maint_irq);
1495                 goto out;
1496         }
1497
1498         ret = register_cpu_notifier(&vgic_cpu_nb);
1499         if (ret) {
1500                 kvm_err("Cannot register vgic CPU notifier\n");
1501                 goto out_free_irq;
1502         }
1503
1504         ret = of_address_to_resource(vgic_node, 2, &vctrl_res);
1505         if (ret) {
1506                 kvm_err("Cannot obtain VCTRL resource\n");
1507                 goto out_free_irq;
1508         }
1509
1510         vgic_vctrl_base = of_iomap(vgic_node, 2);
1511         if (!vgic_vctrl_base) {
1512                 kvm_err("Cannot ioremap VCTRL\n");
1513                 ret = -ENOMEM;
1514                 goto out_free_irq;
1515         }
1516
1517         vgic_nr_lr = readl_relaxed(vgic_vctrl_base + GICH_VTR);
1518         vgic_nr_lr = (vgic_nr_lr & 0x3f) + 1;
1519
1520         ret = create_hyp_io_mappings(vgic_vctrl_base,
1521                                      vgic_vctrl_base + resource_size(&vctrl_res),
1522                                      vctrl_res.start);
1523         if (ret) {
1524                 kvm_err("Cannot map VCTRL into hyp\n");
1525                 goto out_unmap;
1526         }
1527
1528         kvm_info("%s@%llx IRQ%d\n", vgic_node->name,
1529                  vctrl_res.start, vgic_maint_irq);
1530         on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1);
1531
1532         if (of_address_to_resource(vgic_node, 3, &vcpu_res)) {
1533                 kvm_err("Cannot obtain VCPU resource\n");
1534                 ret = -ENXIO;
1535                 goto out_unmap;
1536         }
1537         vgic_vcpu_base = vcpu_res.start;
1538
1539         goto out;
1540
1541 out_unmap:
1542         iounmap(vgic_vctrl_base);
1543 out_free_irq:
1544         free_percpu_irq(vgic_maint_irq, kvm_get_running_vcpus());
1545 out:
1546         of_node_put(vgic_node);
1547         return ret;
1548 }
1549
1550 /**
1551  * kvm_vgic_init - Initialize global VGIC state before running any VCPUs
1552  * @kvm: pointer to the kvm struct
1553  *
1554  * Map the virtual CPU interface into the VM before running any VCPUs.  We
1555  * can't do this at creation time, because user space must first set the
1556  * virtual CPU interface address in the guest physical address space.  Also
1557  * initialize the ITARGETSRn regs to 0 on the emulated distributor.
1558  */
1559 int kvm_vgic_init(struct kvm *kvm)
1560 {
1561         int ret = 0, i;
1562
1563         if (!irqchip_in_kernel(kvm))
1564                 return 0;
1565
1566         mutex_lock(&kvm->lock);
1567
1568         if (vgic_initialized(kvm))
1569                 goto out;
1570
1571         if (IS_VGIC_ADDR_UNDEF(kvm->arch.vgic.vgic_dist_base) ||
1572             IS_VGIC_ADDR_UNDEF(kvm->arch.vgic.vgic_cpu_base)) {
1573                 kvm_err("Need to set vgic cpu and dist addresses first\n");
1574                 ret = -ENXIO;
1575                 goto out;
1576         }
1577
1578         ret = kvm_phys_addr_ioremap(kvm, kvm->arch.vgic.vgic_cpu_base,
1579                                     vgic_vcpu_base, KVM_VGIC_V2_CPU_SIZE);
1580         if (ret) {
1581                 kvm_err("Unable to remap VGIC CPU to VCPU\n");
1582                 goto out;
1583         }
1584
1585         for (i = VGIC_NR_PRIVATE_IRQS; i < VGIC_NR_IRQS; i += 4)
1586                 vgic_set_target_reg(kvm, 0, i);
1587
1588         kvm->arch.vgic.ready = true;
1589 out:
1590         mutex_unlock(&kvm->lock);
1591         return ret;
1592 }
1593
1594 int kvm_vgic_create(struct kvm *kvm)
1595 {
1596         int i, vcpu_lock_idx = -1, ret = 0;
1597         struct kvm_vcpu *vcpu;
1598
1599         mutex_lock(&kvm->lock);
1600
1601         if (kvm->arch.vgic.vctrl_base) {
1602                 ret = -EEXIST;
1603                 goto out;
1604         }
1605
1606         /*
1607          * Any time a vcpu is run, vcpu_load is called which tries to grab the
1608          * vcpu->mutex.  By grabbing the vcpu->mutex of all VCPUs we ensure
1609          * that no other VCPUs are run while we create the vgic.
1610          */
1611         kvm_for_each_vcpu(i, vcpu, kvm) {
1612                 if (!mutex_trylock(&vcpu->mutex))
1613                         goto out_unlock;
1614                 vcpu_lock_idx = i;
1615         }
1616
1617         kvm_for_each_vcpu(i, vcpu, kvm) {
1618                 if (vcpu->arch.has_run_once) {
1619                         ret = -EBUSY;
1620                         goto out_unlock;
1621                 }
1622         }
1623
1624         spin_lock_init(&kvm->arch.vgic.lock);
1625         kvm->arch.vgic.vctrl_base = vgic_vctrl_base;
1626         kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF;
1627         kvm->arch.vgic.vgic_cpu_base = VGIC_ADDR_UNDEF;
1628
1629 out_unlock:
1630         for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
1631                 vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
1632                 mutex_unlock(&vcpu->mutex);
1633         }
1634
1635 out:
1636         mutex_unlock(&kvm->lock);
1637         return ret;
1638 }
1639
1640 static bool vgic_ioaddr_overlap(struct kvm *kvm)
1641 {
1642         phys_addr_t dist = kvm->arch.vgic.vgic_dist_base;
1643         phys_addr_t cpu = kvm->arch.vgic.vgic_cpu_base;
1644
1645         if (IS_VGIC_ADDR_UNDEF(dist) || IS_VGIC_ADDR_UNDEF(cpu))
1646                 return 0;
1647         if ((dist <= cpu && dist + KVM_VGIC_V2_DIST_SIZE > cpu) ||
1648             (cpu <= dist && cpu + KVM_VGIC_V2_CPU_SIZE > dist))
1649                 return -EBUSY;
1650         return 0;
1651 }
1652
1653 static int vgic_ioaddr_assign(struct kvm *kvm, phys_addr_t *ioaddr,
1654                               phys_addr_t addr, phys_addr_t size)
1655 {
1656         int ret;
1657
1658         if (addr & ~KVM_PHYS_MASK)
1659                 return -E2BIG;
1660
1661         if (addr & (SZ_4K - 1))
1662                 return -EINVAL;
1663
1664         if (!IS_VGIC_ADDR_UNDEF(*ioaddr))
1665                 return -EEXIST;
1666         if (addr + size < addr)
1667                 return -EINVAL;
1668
1669         ret = vgic_ioaddr_overlap(kvm);
1670         if (ret)
1671                 return ret;
1672         *ioaddr = addr;
1673         return ret;
1674 }
1675
1676 /**
1677  * kvm_vgic_addr - set or get vgic VM base addresses
1678  * @kvm:   pointer to the vm struct
1679  * @type:  the VGIC addr type, one of KVM_VGIC_V2_ADDR_TYPE_XXX
1680  * @addr:  pointer to address value
1681  * @write: if true set the address in the VM address space, if false read the
1682  *          address
1683  *
1684  * Set or get the vgic base addresses for the distributor and the virtual CPU
1685  * interface in the VM physical address space.  These addresses are properties
1686  * of the emulated core/SoC and therefore user space initially knows this
1687  * information.
1688  */
1689 int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
1690 {
1691         int r = 0;
1692         struct vgic_dist *vgic = &kvm->arch.vgic;
1693
1694         mutex_lock(&kvm->lock);
1695         switch (type) {
1696         case KVM_VGIC_V2_ADDR_TYPE_DIST:
1697                 if (write) {
1698                         r = vgic_ioaddr_assign(kvm, &vgic->vgic_dist_base,
1699                                                *addr, KVM_VGIC_V2_DIST_SIZE);
1700                 } else {
1701                         *addr = vgic->vgic_dist_base;
1702                 }
1703                 break;
1704         case KVM_VGIC_V2_ADDR_TYPE_CPU:
1705                 if (write) {
1706                         r = vgic_ioaddr_assign(kvm, &vgic->vgic_cpu_base,
1707                                                *addr, KVM_VGIC_V2_CPU_SIZE);
1708                 } else {
1709                         *addr = vgic->vgic_cpu_base;
1710                 }
1711                 break;
1712         default:
1713                 r = -ENODEV;
1714         }
1715
1716         mutex_unlock(&kvm->lock);
1717         return r;
1718 }
1719
1720 static bool handle_cpu_mmio_misc(struct kvm_vcpu *vcpu,
1721                                  struct kvm_exit_mmio *mmio, phys_addr_t offset)
1722 {
1723         struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1724         u32 reg, mask = 0, shift = 0;
1725         bool updated = false;
1726
1727         switch (offset & ~0x3) {
1728         case GIC_CPU_CTRL:
1729                 mask = GICH_VMCR_CTRL_MASK;
1730                 shift = GICH_VMCR_CTRL_SHIFT;
1731                 break;
1732         case GIC_CPU_PRIMASK:
1733                 mask = GICH_VMCR_PRIMASK_MASK;
1734                 shift = GICH_VMCR_PRIMASK_SHIFT;
1735                 break;
1736         case GIC_CPU_BINPOINT:
1737                 mask = GICH_VMCR_BINPOINT_MASK;
1738                 shift = GICH_VMCR_BINPOINT_SHIFT;
1739                 break;
1740         case GIC_CPU_ALIAS_BINPOINT:
1741                 mask = GICH_VMCR_ALIAS_BINPOINT_MASK;
1742                 shift = GICH_VMCR_ALIAS_BINPOINT_SHIFT;
1743                 break;
1744         }
1745
1746         if (!mmio->is_write) {
1747                 reg = (vgic_cpu->vgic_vmcr & mask) >> shift;
1748                 mmio_data_write(mmio, ~0, reg);
1749         } else {
1750                 reg = mmio_data_read(mmio, ~0);
1751                 reg = (reg << shift) & mask;
1752                 if (reg != (vgic_cpu->vgic_vmcr & mask))
1753                         updated = true;
1754                 vgic_cpu->vgic_vmcr &= ~mask;
1755                 vgic_cpu->vgic_vmcr |= reg;
1756         }
1757         return updated;
1758 }
1759
1760 static bool handle_mmio_abpr(struct kvm_vcpu *vcpu,
1761                              struct kvm_exit_mmio *mmio, phys_addr_t offset)
1762 {
1763         return handle_cpu_mmio_misc(vcpu, mmio, GIC_CPU_ALIAS_BINPOINT);
1764 }
1765
1766 static bool handle_cpu_mmio_ident(struct kvm_vcpu *vcpu,
1767                                   struct kvm_exit_mmio *mmio,
1768                                   phys_addr_t offset)
1769 {
1770         u32 reg;
1771
1772         if (mmio->is_write)
1773                 return false;
1774
1775         /* GICC_IIDR */
1776         reg = (PRODUCT_ID_KVM << 20) |
1777               (GICC_ARCH_VERSION_V2 << 16) |
1778               (IMPLEMENTER_ARM << 0);
1779         mmio_data_write(mmio, ~0, reg);
1780         return false;
1781 }
1782
1783 /*
1784  * CPU Interface Register accesses - these are not accessed by the VM, but by
1785  * user space for saving and restoring VGIC state.
1786  */
1787 static const struct mmio_range vgic_cpu_ranges[] = {
1788         {
1789                 .base           = GIC_CPU_CTRL,
1790                 .len            = 12,
1791                 .handle_mmio    = handle_cpu_mmio_misc,
1792         },
1793         {
1794                 .base           = GIC_CPU_ALIAS_BINPOINT,
1795                 .len            = 4,
1796                 .handle_mmio    = handle_mmio_abpr,
1797         },
1798         {
1799                 .base           = GIC_CPU_ACTIVEPRIO,
1800                 .len            = 16,
1801                 .handle_mmio    = handle_mmio_raz_wi,
1802         },
1803         {
1804                 .base           = GIC_CPU_IDENT,
1805                 .len            = 4,
1806                 .handle_mmio    = handle_cpu_mmio_ident,
1807         },
1808 };
1809
1810 static int vgic_attr_regs_access(struct kvm_device *dev,
1811                                  struct kvm_device_attr *attr,
1812                                  u32 *reg, bool is_write)
1813 {
1814         const struct mmio_range *r = NULL, *ranges;
1815         phys_addr_t offset;
1816         int ret, cpuid, c;
1817         struct kvm_vcpu *vcpu, *tmp_vcpu;
1818         struct vgic_dist *vgic;
1819         struct kvm_exit_mmio mmio;
1820
1821         offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
1822         cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >>
1823                 KVM_DEV_ARM_VGIC_CPUID_SHIFT;
1824
1825         mutex_lock(&dev->kvm->lock);
1826
1827         if (cpuid >= atomic_read(&dev->kvm->online_vcpus)) {
1828                 ret = -EINVAL;
1829                 goto out;
1830         }
1831
1832         vcpu = kvm_get_vcpu(dev->kvm, cpuid);
1833         vgic = &dev->kvm->arch.vgic;
1834
1835         mmio.len = 4;
1836         mmio.is_write = is_write;
1837         if (is_write)
1838                 mmio_data_write(&mmio, ~0, *reg);
1839         switch (attr->group) {
1840         case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
1841                 mmio.phys_addr = vgic->vgic_dist_base + offset;
1842                 ranges = vgic_dist_ranges;
1843                 break;
1844         case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
1845                 mmio.phys_addr = vgic->vgic_cpu_base + offset;
1846                 ranges = vgic_cpu_ranges;
1847                 break;
1848         default:
1849                 BUG();
1850         }
1851         r = find_matching_range(ranges, &mmio, offset);
1852
1853         if (unlikely(!r || !r->handle_mmio)) {
1854                 ret = -ENXIO;
1855                 goto out;
1856         }
1857
1858
1859         spin_lock(&vgic->lock);
1860
1861         /*
1862          * Ensure that no other VCPU is running by checking the vcpu->cpu
1863          * field.  If no other VPCUs are running we can safely access the VGIC
1864          * state, because even if another VPU is run after this point, that
1865          * VCPU will not touch the vgic state, because it will block on
1866          * getting the vgic->lock in kvm_vgic_sync_hwstate().
1867          */
1868         kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm) {
1869                 if (unlikely(tmp_vcpu->cpu != -1)) {
1870                         ret = -EBUSY;
1871                         goto out_vgic_unlock;
1872                 }
1873         }
1874
1875         /*
1876          * Move all pending IRQs from the LRs on all VCPUs so the pending
1877          * state can be properly represented in the register state accessible
1878          * through this API.
1879          */
1880         kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm)
1881                 vgic_unqueue_irqs(tmp_vcpu);
1882
1883         offset -= r->base;
1884         r->handle_mmio(vcpu, &mmio, offset);
1885
1886         if (!is_write)
1887                 *reg = mmio_data_read(&mmio, ~0);
1888
1889         ret = 0;
1890 out_vgic_unlock:
1891         spin_unlock(&vgic->lock);
1892 out:
1893         mutex_unlock(&dev->kvm->lock);
1894         return ret;
1895 }
1896
1897 static int vgic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1898 {
1899         int r;
1900
1901         switch (attr->group) {
1902         case KVM_DEV_ARM_VGIC_GRP_ADDR: {
1903                 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
1904                 u64 addr;
1905                 unsigned long type = (unsigned long)attr->attr;
1906
1907                 if (copy_from_user(&addr, uaddr, sizeof(addr)))
1908                         return -EFAULT;
1909
1910                 r = kvm_vgic_addr(dev->kvm, type, &addr, true);
1911                 return (r == -ENODEV) ? -ENXIO : r;
1912         }
1913
1914         case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
1915         case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
1916                 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
1917                 u32 reg;
1918
1919                 if (get_user(reg, uaddr))
1920                         return -EFAULT;
1921
1922                 return vgic_attr_regs_access(dev, attr, &reg, true);
1923         }
1924
1925         }
1926
1927         return -ENXIO;
1928 }
1929
1930 static int vgic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1931 {
1932         int r = -ENXIO;
1933
1934         switch (attr->group) {
1935         case KVM_DEV_ARM_VGIC_GRP_ADDR: {
1936                 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
1937                 u64 addr;
1938                 unsigned long type = (unsigned long)attr->attr;
1939
1940                 r = kvm_vgic_addr(dev->kvm, type, &addr, false);
1941                 if (r)
1942                         return (r == -ENODEV) ? -ENXIO : r;
1943
1944                 if (copy_to_user(uaddr, &addr, sizeof(addr)))
1945                         return -EFAULT;
1946                 break;
1947         }
1948
1949         case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
1950         case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
1951                 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
1952                 u32 reg = 0;
1953
1954                 r = vgic_attr_regs_access(dev, attr, &reg, false);
1955                 if (r)
1956                         return r;
1957                 r = put_user(reg, uaddr);
1958                 break;
1959         }
1960
1961         }
1962
1963         return r;
1964 }
1965
1966 static int vgic_has_attr_regs(const struct mmio_range *ranges,
1967                               phys_addr_t offset)
1968 {
1969         struct kvm_exit_mmio dev_attr_mmio;
1970
1971         dev_attr_mmio.len = 4;
1972         if (find_matching_range(ranges, &dev_attr_mmio, offset))
1973                 return 0;
1974         else
1975                 return -ENXIO;
1976 }
1977
1978 static int vgic_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1979 {
1980         phys_addr_t offset;
1981
1982         switch (attr->group) {
1983         case KVM_DEV_ARM_VGIC_GRP_ADDR:
1984                 switch (attr->attr) {
1985                 case KVM_VGIC_V2_ADDR_TYPE_DIST:
1986                 case KVM_VGIC_V2_ADDR_TYPE_CPU:
1987                         return 0;
1988                 }
1989                 break;
1990         case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
1991                 offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
1992                 return vgic_has_attr_regs(vgic_dist_ranges, offset);
1993         case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
1994                 offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
1995                 return vgic_has_attr_regs(vgic_cpu_ranges, offset);
1996         }
1997         return -ENXIO;
1998 }
1999
2000 static void vgic_destroy(struct kvm_device *dev)
2001 {
2002         kfree(dev);
2003 }
2004
2005 static int vgic_create(struct kvm_device *dev, u32 type)
2006 {
2007         return kvm_vgic_create(dev->kvm);
2008 }
2009
2010 struct kvm_device_ops kvm_arm_vgic_v2_ops = {
2011         .name = "kvm-arm-vgic",
2012         .create = vgic_create,
2013         .destroy = vgic_destroy,
2014         .set_attr = vgic_set_attr,
2015         .get_attr = vgic_get_attr,
2016         .has_attr = vgic_has_attr,
2017 };