2 * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
5 * Alexander Graf <agraf@suse.de>
6 * Kevin Wolf <mail@kevin-wolf.de>
7 * Paul Mackerras <paulus@samba.org>
10 * Functions relating to running KVM on Book 3S processors where
11 * we don't have access to hypervisor mode, and we run the guest
12 * in problem state (user mode).
14 * This file is derived from arch/powerpc/kvm/44x.c,
15 * by Hollis Blanchard <hollisb@us.ibm.com>.
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License, version 2, as
19 * published by the Free Software Foundation.
22 #include <linux/kvm_host.h>
23 #include <linux/export.h>
24 #include <linux/err.h>
25 #include <linux/slab.h>
28 #include <asm/cputable.h>
29 #include <asm/cacheflush.h>
30 #include <asm/tlbflush.h>
31 #include <asm/uaccess.h>
33 #include <asm/kvm_ppc.h>
34 #include <asm/kvm_book3s.h>
35 #include <asm/mmu_context.h>
36 #include <asm/switch_to.h>
37 #include <asm/firmware.h>
38 #include <asm/hvcall.h>
39 #include <linux/gfp.h>
40 #include <linux/sched.h>
41 #include <linux/vmalloc.h>
42 #include <linux/highmem.h>
43 #include <linux/module.h>
44 #include <linux/miscdevice.h>
48 #define CREATE_TRACE_POINTS
51 /* #define EXIT_DEBUG */
52 /* #define DEBUG_EXT */
54 static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
57 /* Some compatibility defines */
58 #ifdef CONFIG_PPC_BOOK3S_32
59 #define MSR_USER32 MSR_USER
60 #define MSR_USER64 MSR_USER
61 #define HW_PAGE_SIZE PAGE_SIZE
64 static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu)
66 #ifdef CONFIG_PPC_BOOK3S_64
67 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
68 memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb));
69 svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max;
72 vcpu->cpu = smp_processor_id();
73 #ifdef CONFIG_PPC_BOOK3S_32
74 current->thread.kvm_shadow_vcpu = vcpu->arch.shadow_vcpu;
78 static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu)
80 #ifdef CONFIG_PPC_BOOK3S_64
81 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
82 memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb));
83 to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max;
87 kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
91 /* Copy data needed by real-mode code from vcpu to shadow vcpu */
92 void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu,
93 struct kvm_vcpu *vcpu)
95 svcpu->gpr[0] = vcpu->arch.gpr[0];
96 svcpu->gpr[1] = vcpu->arch.gpr[1];
97 svcpu->gpr[2] = vcpu->arch.gpr[2];
98 svcpu->gpr[3] = vcpu->arch.gpr[3];
99 svcpu->gpr[4] = vcpu->arch.gpr[4];
100 svcpu->gpr[5] = vcpu->arch.gpr[5];
101 svcpu->gpr[6] = vcpu->arch.gpr[6];
102 svcpu->gpr[7] = vcpu->arch.gpr[7];
103 svcpu->gpr[8] = vcpu->arch.gpr[8];
104 svcpu->gpr[9] = vcpu->arch.gpr[9];
105 svcpu->gpr[10] = vcpu->arch.gpr[10];
106 svcpu->gpr[11] = vcpu->arch.gpr[11];
107 svcpu->gpr[12] = vcpu->arch.gpr[12];
108 svcpu->gpr[13] = vcpu->arch.gpr[13];
109 svcpu->cr = vcpu->arch.cr;
110 svcpu->xer = vcpu->arch.xer;
111 svcpu->ctr = vcpu->arch.ctr;
112 svcpu->lr = vcpu->arch.lr;
113 svcpu->pc = vcpu->arch.pc;
116 /* Copy data touched by real-mode code from shadow vcpu back to vcpu */
117 void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
118 struct kvmppc_book3s_shadow_vcpu *svcpu)
120 vcpu->arch.gpr[0] = svcpu->gpr[0];
121 vcpu->arch.gpr[1] = svcpu->gpr[1];
122 vcpu->arch.gpr[2] = svcpu->gpr[2];
123 vcpu->arch.gpr[3] = svcpu->gpr[3];
124 vcpu->arch.gpr[4] = svcpu->gpr[4];
125 vcpu->arch.gpr[5] = svcpu->gpr[5];
126 vcpu->arch.gpr[6] = svcpu->gpr[6];
127 vcpu->arch.gpr[7] = svcpu->gpr[7];
128 vcpu->arch.gpr[8] = svcpu->gpr[8];
129 vcpu->arch.gpr[9] = svcpu->gpr[9];
130 vcpu->arch.gpr[10] = svcpu->gpr[10];
131 vcpu->arch.gpr[11] = svcpu->gpr[11];
132 vcpu->arch.gpr[12] = svcpu->gpr[12];
133 vcpu->arch.gpr[13] = svcpu->gpr[13];
134 vcpu->arch.cr = svcpu->cr;
135 vcpu->arch.xer = svcpu->xer;
136 vcpu->arch.ctr = svcpu->ctr;
137 vcpu->arch.lr = svcpu->lr;
138 vcpu->arch.pc = svcpu->pc;
139 vcpu->arch.shadow_srr1 = svcpu->shadow_srr1;
140 vcpu->arch.fault_dar = svcpu->fault_dar;
141 vcpu->arch.fault_dsisr = svcpu->fault_dsisr;
142 vcpu->arch.last_inst = svcpu->last_inst;
145 static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu)
147 int r = 1; /* Indicate we want to get back into the guest */
149 /* We misuse TLB_FLUSH to indicate that we want to clear
150 all shadow cache entries */
151 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
152 kvmppc_mmu_pte_flush(vcpu, 0, 0);
157 /************* MMU Notifiers *************/
158 static void do_kvm_unmap_hva(struct kvm *kvm, unsigned long start,
162 struct kvm_vcpu *vcpu;
163 struct kvm_memslots *slots;
164 struct kvm_memory_slot *memslot;
166 slots = kvm_memslots(kvm);
167 kvm_for_each_memslot(memslot, slots) {
168 unsigned long hva_start, hva_end;
171 hva_start = max(start, memslot->userspace_addr);
172 hva_end = min(end, memslot->userspace_addr +
173 (memslot->npages << PAGE_SHIFT));
174 if (hva_start >= hva_end)
177 * {gfn(page) | page intersects with [hva_start, hva_end)} =
178 * {gfn, gfn+1, ..., gfn_end-1}.
180 gfn = hva_to_gfn_memslot(hva_start, memslot);
181 gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
182 kvm_for_each_vcpu(i, vcpu, kvm)
183 kvmppc_mmu_pte_pflush(vcpu, gfn << PAGE_SHIFT,
184 gfn_end << PAGE_SHIFT);
188 static int kvm_unmap_hva_pr(struct kvm *kvm, unsigned long hva)
190 trace_kvm_unmap_hva(hva);
192 do_kvm_unmap_hva(kvm, hva, hva + PAGE_SIZE);
197 static int kvm_unmap_hva_range_pr(struct kvm *kvm, unsigned long start,
200 do_kvm_unmap_hva(kvm, start, end);
205 static int kvm_age_hva_pr(struct kvm *kvm, unsigned long hva)
207 /* XXX could be more clever ;) */
211 static int kvm_test_age_hva_pr(struct kvm *kvm, unsigned long hva)
213 /* XXX could be more clever ;) */
217 static void kvm_set_spte_hva_pr(struct kvm *kvm, unsigned long hva, pte_t pte)
219 /* The page will get remapped properly on its next fault */
220 do_kvm_unmap_hva(kvm, hva, hva + PAGE_SIZE);
223 /*****************************************/
225 static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
227 ulong smsr = vcpu->arch.shared->msr;
229 /* Guest MSR values */
230 smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE;
231 /* Process MSR values */
232 smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE;
233 /* External providers the guest reserved */
234 smsr |= (vcpu->arch.shared->msr & vcpu->arch.guest_owned_ext);
235 /* 64-bit Process MSR values */
236 #ifdef CONFIG_PPC_BOOK3S_64
237 smsr |= MSR_ISF | MSR_HV;
239 vcpu->arch.shadow_msr = smsr;
242 static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr)
244 ulong old_msr = vcpu->arch.shared->msr;
247 printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr);
250 msr &= to_book3s(vcpu)->msr_mask;
251 vcpu->arch.shared->msr = msr;
252 kvmppc_recalc_shadow_msr(vcpu);
255 if (!vcpu->arch.pending_exceptions) {
256 kvm_vcpu_block(vcpu);
257 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
258 vcpu->stat.halt_wakeup++;
260 /* Unset POW bit after we woke up */
262 vcpu->arch.shared->msr = msr;
266 if ((vcpu->arch.shared->msr & (MSR_PR|MSR_IR|MSR_DR)) !=
267 (old_msr & (MSR_PR|MSR_IR|MSR_DR))) {
268 kvmppc_mmu_flush_segments(vcpu);
269 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
271 /* Preload magic page segment when in kernel mode */
272 if (!(msr & MSR_PR) && vcpu->arch.magic_page_pa) {
273 struct kvm_vcpu_arch *a = &vcpu->arch;
276 kvmppc_mmu_map_segment(vcpu, a->magic_page_ea);
278 kvmppc_mmu_map_segment(vcpu, a->magic_page_pa);
283 * When switching from 32 to 64-bit, we may have a stale 32-bit
284 * magic page around, we need to flush it. Typically 32-bit magic
285 * page will be instanciated when calling into RTAS. Note: We
286 * assume that such transition only happens while in kernel mode,
287 * ie, we never transition from user 32-bit to kernel 64-bit with
288 * a 32-bit magic page around.
290 if (vcpu->arch.magic_page_pa &&
291 !(old_msr & MSR_PR) && !(old_msr & MSR_SF) && (msr & MSR_SF)) {
292 /* going from RTAS to normal kernel code */
293 kvmppc_mmu_pte_flush(vcpu, (uint32_t)vcpu->arch.magic_page_pa,
297 /* Preload FPU if it's enabled */
298 if (vcpu->arch.shared->msr & MSR_FP)
299 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
302 void kvmppc_set_pvr_pr(struct kvm_vcpu *vcpu, u32 pvr)
306 vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB;
307 vcpu->arch.pvr = pvr;
308 #ifdef CONFIG_PPC_BOOK3S_64
309 if ((pvr >= 0x330000) && (pvr < 0x70330000)) {
310 kvmppc_mmu_book3s_64_init(vcpu);
311 if (!to_book3s(vcpu)->hior_explicit)
312 to_book3s(vcpu)->hior = 0xfff00000;
313 to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL;
314 vcpu->arch.cpu_type = KVM_CPU_3S_64;
318 kvmppc_mmu_book3s_32_init(vcpu);
319 if (!to_book3s(vcpu)->hior_explicit)
320 to_book3s(vcpu)->hior = 0;
321 to_book3s(vcpu)->msr_mask = 0xffffffffULL;
322 vcpu->arch.cpu_type = KVM_CPU_3S_32;
325 kvmppc_sanity_check(vcpu);
327 /* If we are in hypervisor level on 970, we can tell the CPU to
328 * treat DCBZ as 32 bytes store */
329 vcpu->arch.hflags &= ~BOOK3S_HFLAG_DCBZ32;
330 if (vcpu->arch.mmu.is_dcbz32(vcpu) && (mfmsr() & MSR_HV) &&
331 !strcmp(cur_cpu_spec->platform, "ppc970"))
332 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
334 /* Cell performs badly if MSR_FEx are set. So let's hope nobody
335 really needs them in a VM on Cell and force disable them. */
336 if (!strcmp(cur_cpu_spec->platform, "ppc-cell-be"))
337 to_book3s(vcpu)->msr_mask &= ~(MSR_FE0 | MSR_FE1);
340 * If they're asking for POWER6 or later, set the flag
341 * indicating that we can do multiple large page sizes
343 * Also set the flag that indicates that tlbie has the large
344 * page bit in the RB operand instead of the instruction.
346 switch (PVR_VER(pvr)) {
351 vcpu->arch.hflags |= BOOK3S_HFLAG_MULTI_PGSIZE |
352 BOOK3S_HFLAG_NEW_TLBIE;
356 #ifdef CONFIG_PPC_BOOK3S_32
357 /* 32 bit Book3S always has 32 byte dcbz */
358 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
361 /* On some CPUs we can execute paired single operations natively */
362 asm ( "mfpvr %0" : "=r"(host_pvr));
364 case 0x00080200: /* lonestar 2.0 */
365 case 0x00088202: /* lonestar 2.2 */
366 case 0x70000100: /* gekko 1.0 */
367 case 0x00080100: /* gekko 2.0 */
368 case 0x00083203: /* gekko 2.3a */
369 case 0x00083213: /* gekko 2.3b */
370 case 0x00083204: /* gekko 2.4 */
371 case 0x00083214: /* gekko 2.4e (8SE) - retail HW2 */
372 case 0x00087200: /* broadway */
373 vcpu->arch.hflags |= BOOK3S_HFLAG_NATIVE_PS;
374 /* Enable HID2.PSE - in case we need it later */
375 mtspr(SPRN_HID2_GEKKO, mfspr(SPRN_HID2_GEKKO) | (1 << 29));
379 /* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To
380 * make Book3s_32 Linux work on Book3s_64, we have to make sure we trap dcbz to
381 * emulate 32 bytes dcbz length.
383 * The Book3s_64 inventors also realized this case and implemented a special bit
384 * in the HID5 register, which is a hypervisor ressource. Thus we can't use it.
386 * My approach here is to patch the dcbz instruction on executing pages.
388 static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
395 hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT);
396 if (is_error_page(hpage))
399 hpage_offset = pte->raddr & ~PAGE_MASK;
400 hpage_offset &= ~0xFFFULL;
404 page = kmap_atomic(hpage);
406 /* patch dcbz into reserved instruction, so we trap */
407 for (i=hpage_offset; i < hpage_offset + (HW_PAGE_SIZE / 4); i++)
408 if ((page[i] & 0xff0007ff) == INS_DCBZ)
409 page[i] &= 0xfffffff7;
415 static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
417 ulong mp_pa = vcpu->arch.magic_page_pa;
419 if (!(vcpu->arch.shared->msr & MSR_SF))
420 mp_pa = (uint32_t)mp_pa;
422 if (unlikely(mp_pa) &&
423 unlikely((mp_pa & KVM_PAM) >> PAGE_SHIFT == gfn)) {
427 return kvm_is_visible_gfn(vcpu->kvm, gfn);
430 int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
431 ulong eaddr, int vec)
433 bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE);
434 bool iswrite = false;
435 int r = RESUME_GUEST;
438 struct kvmppc_pte pte;
439 bool is_mmio = false;
440 bool dr = (vcpu->arch.shared->msr & MSR_DR) ? true : false;
441 bool ir = (vcpu->arch.shared->msr & MSR_IR) ? true : false;
444 relocated = data ? dr : ir;
445 if (data && (vcpu->arch.fault_dsisr & DSISR_ISSTORE))
448 /* Resolve real address if translation turned on */
450 page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data, iswrite);
452 pte.may_execute = true;
454 pte.may_write = true;
455 pte.raddr = eaddr & KVM_PAM;
457 pte.vpage = eaddr >> 12;
458 pte.page_size = MMU_PAGE_64K;
461 switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
463 pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12));
467 vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
469 if ((vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) == MSR_DR)
470 pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12));
472 pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12));
476 page_found = -EINVAL;
480 if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
481 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
483 * If we do the dcbz hack, we have to NX on every execution,
484 * so we can patch the executing code. This renders our guest
487 pte.may_execute = !data;
490 if (page_found == -ENOENT) {
491 /* Page not found in guest PTE entries */
492 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
493 vcpu->arch.shared->dsisr = vcpu->arch.fault_dsisr;
494 vcpu->arch.shared->msr |=
495 vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL;
496 kvmppc_book3s_queue_irqprio(vcpu, vec);
497 } else if (page_found == -EPERM) {
498 /* Storage protection */
499 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
500 vcpu->arch.shared->dsisr = vcpu->arch.fault_dsisr & ~DSISR_NOHPTE;
501 vcpu->arch.shared->dsisr |= DSISR_PROTFAULT;
502 vcpu->arch.shared->msr |=
503 vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL;
504 kvmppc_book3s_queue_irqprio(vcpu, vec);
505 } else if (page_found == -EINVAL) {
506 /* Page not found in guest SLB */
507 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
508 kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80);
509 } else if (!is_mmio &&
510 kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) {
511 if (data && !(vcpu->arch.fault_dsisr & DSISR_NOHPTE)) {
513 * There is already a host HPTE there, presumably
514 * a read-only one for a page the guest thinks
515 * is writable, so get rid of it first.
517 kvmppc_mmu_unmap_page(vcpu, &pte);
519 /* The guest's PTE is not mapped yet. Map on the host */
520 kvmppc_mmu_map_page(vcpu, &pte, iswrite);
522 vcpu->stat.sp_storage++;
523 else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
524 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32)))
525 kvmppc_patch_dcbz(vcpu, &pte);
528 vcpu->stat.mmio_exits++;
529 vcpu->arch.paddr_accessed = pte.raddr;
530 vcpu->arch.vaddr_accessed = pte.eaddr;
531 r = kvmppc_emulate_mmio(run, vcpu);
532 if ( r == RESUME_HOST_NV )
539 static inline int get_fpr_index(int i)
541 return i * TS_FPRWIDTH;
544 /* Give up external provider (FPU, Altivec, VSX) */
545 void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
547 struct thread_struct *t = ¤t->thread;
550 * VSX instructions can access FP and vector registers, so if
551 * we are giving up VSX, make sure we give up FP and VMX as well.
554 msr |= MSR_FP | MSR_VEC;
556 msr &= vcpu->arch.guest_owned_ext;
561 printk(KERN_INFO "Giving up ext 0x%lx\n", msr);
566 * Note that on CPUs with VSX, giveup_fpu stores
567 * both the traditional FP registers and the added VSX
568 * registers into thread.fp_state.fpr[].
570 if (current->thread.regs->msr & MSR_FP)
572 vcpu->arch.fp = t->fp_state;
575 #ifdef CONFIG_ALTIVEC
577 if (current->thread.regs->msr & MSR_VEC)
578 giveup_altivec(current);
579 vcpu->arch.vr = t->vr_state;
583 vcpu->arch.guest_owned_ext &= ~(msr | MSR_VSX);
584 kvmppc_recalc_shadow_msr(vcpu);
587 static int kvmppc_read_inst(struct kvm_vcpu *vcpu)
589 ulong srr0 = kvmppc_get_pc(vcpu);
590 u32 last_inst = kvmppc_get_last_inst(vcpu);
593 ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false);
594 if (ret == -ENOENT) {
595 ulong msr = vcpu->arch.shared->msr;
597 msr = kvmppc_set_field(msr, 33, 33, 1);
598 msr = kvmppc_set_field(msr, 34, 36, 0);
599 vcpu->arch.shared->msr = kvmppc_set_field(msr, 42, 47, 0);
600 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE);
601 return EMULATE_AGAIN;
607 static int kvmppc_check_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr)
610 /* Need to do paired single emulation? */
611 if (!(vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE))
614 /* Read out the instruction */
615 if (kvmppc_read_inst(vcpu) == EMULATE_DONE)
616 /* Need to emulate */
619 return EMULATE_AGAIN;
622 /* Handle external providers (FPU, Altivec, VSX) */
623 static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
626 struct thread_struct *t = ¤t->thread;
628 /* When we have paired singles, we emulate in software */
629 if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)
632 if (!(vcpu->arch.shared->msr & msr)) {
633 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
637 if (msr == MSR_VSX) {
638 /* No VSX? Give an illegal instruction interrupt */
640 if (!cpu_has_feature(CPU_FTR_VSX))
643 kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
648 * We have to load up all the FP and VMX registers before
649 * we can let the guest use VSX instructions.
651 msr = MSR_FP | MSR_VEC | MSR_VSX;
654 /* See if we already own all the ext(s) needed */
655 msr &= ~vcpu->arch.guest_owned_ext;
660 printk(KERN_INFO "Loading up ext 0x%lx\n", msr);
664 t->fp_state = vcpu->arch.fp;
667 load_fp_state(&t->fp_state);
671 #ifdef CONFIG_ALTIVEC
672 t->vr_state = vcpu->arch.vr;
674 enable_kernel_altivec();
675 load_vr_state(&t->vr_state);
679 current->thread.regs->msr |= msr;
680 vcpu->arch.guest_owned_ext |= msr;
681 kvmppc_recalc_shadow_msr(vcpu);
687 * Kernel code using FP or VMX could have flushed guest state to
688 * the thread_struct; if so, get it back now.
690 static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu)
692 unsigned long lost_ext;
694 lost_ext = vcpu->arch.guest_owned_ext & ~current->thread.regs->msr;
698 if (lost_ext & MSR_FP) {
700 load_fp_state(¤t->thread.fp_state);
702 #ifdef CONFIG_ALTIVEC
703 if (lost_ext & MSR_VEC) {
704 enable_kernel_altivec();
705 load_vr_state(¤t->thread.vr_state);
708 current->thread.regs->msr |= lost_ext;
711 int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
712 unsigned int exit_nr)
717 vcpu->stat.sum_exits++;
719 run->exit_reason = KVM_EXIT_UNKNOWN;
720 run->ready_for_interrupt_injection = 1;
722 /* We get here with MSR.EE=1 */
724 trace_kvm_exit(exit_nr, vcpu);
728 case BOOK3S_INTERRUPT_INST_STORAGE:
730 ulong shadow_srr1 = vcpu->arch.shadow_srr1;
731 vcpu->stat.pf_instruc++;
733 #ifdef CONFIG_PPC_BOOK3S_32
734 /* We set segments as unused segments when invalidating them. So
735 * treat the respective fault as segment fault. */
737 struct kvmppc_book3s_shadow_vcpu *svcpu;
740 svcpu = svcpu_get(vcpu);
741 sr = svcpu->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT];
743 if (sr == SR_INVALID) {
744 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
751 /* only care about PTEG not found errors, but leave NX alone */
752 if (shadow_srr1 & 0x40000000) {
753 int idx = srcu_read_lock(&vcpu->kvm->srcu);
754 r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr);
755 srcu_read_unlock(&vcpu->kvm->srcu, idx);
756 vcpu->stat.sp_instruc++;
757 } else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
758 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
760 * XXX If we do the dcbz hack we use the NX bit to flush&patch the page,
761 * so we can't use the NX bit inside the guest. Let's cross our fingers,
762 * that no guest that needs the dcbz hack does NX.
764 kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL);
767 vcpu->arch.shared->msr |= shadow_srr1 & 0x58000000;
768 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
773 case BOOK3S_INTERRUPT_DATA_STORAGE:
775 ulong dar = kvmppc_get_fault_dar(vcpu);
776 u32 fault_dsisr = vcpu->arch.fault_dsisr;
777 vcpu->stat.pf_storage++;
779 #ifdef CONFIG_PPC_BOOK3S_32
780 /* We set segments as unused segments when invalidating them. So
781 * treat the respective fault as segment fault. */
783 struct kvmppc_book3s_shadow_vcpu *svcpu;
786 svcpu = svcpu_get(vcpu);
787 sr = svcpu->sr[dar >> SID_SHIFT];
789 if (sr == SR_INVALID) {
790 kvmppc_mmu_map_segment(vcpu, dar);
798 * We need to handle missing shadow PTEs, and
799 * protection faults due to us mapping a page read-only
800 * when the guest thinks it is writable.
802 if (fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT)) {
803 int idx = srcu_read_lock(&vcpu->kvm->srcu);
804 r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr);
805 srcu_read_unlock(&vcpu->kvm->srcu, idx);
807 vcpu->arch.shared->dar = dar;
808 vcpu->arch.shared->dsisr = fault_dsisr;
809 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
814 case BOOK3S_INTERRUPT_DATA_SEGMENT:
815 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) {
816 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
817 kvmppc_book3s_queue_irqprio(vcpu,
818 BOOK3S_INTERRUPT_DATA_SEGMENT);
822 case BOOK3S_INTERRUPT_INST_SEGMENT:
823 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)) < 0) {
824 kvmppc_book3s_queue_irqprio(vcpu,
825 BOOK3S_INTERRUPT_INST_SEGMENT);
829 /* We're good on these - the host merely wanted to get our attention */
830 case BOOK3S_INTERRUPT_DECREMENTER:
831 case BOOK3S_INTERRUPT_HV_DECREMENTER:
832 vcpu->stat.dec_exits++;
835 case BOOK3S_INTERRUPT_EXTERNAL:
836 case BOOK3S_INTERRUPT_EXTERNAL_LEVEL:
837 case BOOK3S_INTERRUPT_EXTERNAL_HV:
838 vcpu->stat.ext_intr_exits++;
841 case BOOK3S_INTERRUPT_PERFMON:
844 case BOOK3S_INTERRUPT_PROGRAM:
845 case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
847 enum emulation_result er;
851 flags = vcpu->arch.shadow_srr1 & 0x1f0000ull;
853 if (vcpu->arch.shared->msr & MSR_PR) {
855 printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu));
857 if ((kvmppc_get_last_inst(vcpu) & 0xff0007ff) !=
858 (INS_DCBZ & 0xfffffff7)) {
859 kvmppc_core_queue_program(vcpu, flags);
865 vcpu->stat.emulated_inst_exits++;
866 er = kvmppc_emulate_instruction(run, vcpu);
875 printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
876 __func__, kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu));
877 kvmppc_core_queue_program(vcpu, flags);
880 case EMULATE_DO_MMIO:
881 run->exit_reason = KVM_EXIT_MMIO;
884 case EMULATE_EXIT_USER:
892 case BOOK3S_INTERRUPT_SYSCALL:
893 if (vcpu->arch.papr_enabled &&
894 (kvmppc_get_last_sc(vcpu) == 0x44000022) &&
895 !(vcpu->arch.shared->msr & MSR_PR)) {
896 /* SC 1 papr hypercalls */
897 ulong cmd = kvmppc_get_gpr(vcpu, 3);
900 #ifdef CONFIG_PPC_BOOK3S_64
901 if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) {
907 run->papr_hcall.nr = cmd;
908 for (i = 0; i < 9; ++i) {
909 ulong gpr = kvmppc_get_gpr(vcpu, 4 + i);
910 run->papr_hcall.args[i] = gpr;
912 run->exit_reason = KVM_EXIT_PAPR_HCALL;
913 vcpu->arch.hcall_needed = 1;
915 } else if (vcpu->arch.osi_enabled &&
916 (((u32)kvmppc_get_gpr(vcpu, 3)) == OSI_SC_MAGIC_R3) &&
917 (((u32)kvmppc_get_gpr(vcpu, 4)) == OSI_SC_MAGIC_R4)) {
919 u64 *gprs = run->osi.gprs;
922 run->exit_reason = KVM_EXIT_OSI;
923 for (i = 0; i < 32; i++)
924 gprs[i] = kvmppc_get_gpr(vcpu, i);
925 vcpu->arch.osi_needed = 1;
927 } else if (!(vcpu->arch.shared->msr & MSR_PR) &&
928 (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
929 /* KVM PV hypercalls */
930 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
934 vcpu->stat.syscall_exits++;
935 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
939 case BOOK3S_INTERRUPT_FP_UNAVAIL:
940 case BOOK3S_INTERRUPT_ALTIVEC:
941 case BOOK3S_INTERRUPT_VSX:
946 case BOOK3S_INTERRUPT_FP_UNAVAIL: ext_msr = MSR_FP; break;
947 case BOOK3S_INTERRUPT_ALTIVEC: ext_msr = MSR_VEC; break;
948 case BOOK3S_INTERRUPT_VSX: ext_msr = MSR_VSX; break;
951 switch (kvmppc_check_ext(vcpu, exit_nr)) {
953 /* everything ok - let's enable the ext */
954 r = kvmppc_handle_ext(vcpu, exit_nr, ext_msr);
957 /* we need to emulate this instruction */
958 goto program_interrupt;
961 /* nothing to worry about - go again */
966 case BOOK3S_INTERRUPT_ALIGNMENT:
967 if (kvmppc_read_inst(vcpu) == EMULATE_DONE) {
968 vcpu->arch.shared->dsisr = kvmppc_alignment_dsisr(vcpu,
969 kvmppc_get_last_inst(vcpu));
970 vcpu->arch.shared->dar = kvmppc_alignment_dar(vcpu,
971 kvmppc_get_last_inst(vcpu));
972 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
976 case BOOK3S_INTERRUPT_MACHINE_CHECK:
977 case BOOK3S_INTERRUPT_TRACE:
978 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
983 ulong shadow_srr1 = vcpu->arch.shadow_srr1;
984 /* Ugh - bork here! What did we get? */
985 printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n",
986 exit_nr, kvmppc_get_pc(vcpu), shadow_srr1);
993 if (!(r & RESUME_HOST)) {
994 /* To avoid clobbering exit_reason, only check for signals if
995 * we aren't already exiting to userspace for some other
999 * Interrupts could be timers for the guest which we have to
1000 * inject again, so let's postpone them until we're in the guest
1001 * and if we really did time things so badly, then we just exit
1002 * again due to a host external interrupt.
1004 local_irq_disable();
1005 s = kvmppc_prepare_to_enter(vcpu);
1010 kvmppc_fix_ee_before_entry();
1012 kvmppc_handle_lost_ext(vcpu);
1015 trace_kvm_book3s_reenter(r, vcpu);
1020 static int kvm_arch_vcpu_ioctl_get_sregs_pr(struct kvm_vcpu *vcpu,
1021 struct kvm_sregs *sregs)
1023 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
1026 sregs->pvr = vcpu->arch.pvr;
1028 sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1;
1029 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
1030 for (i = 0; i < 64; i++) {
1031 sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige | i;
1032 sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv;
1035 for (i = 0; i < 16; i++)
1036 sregs->u.s.ppc32.sr[i] = vcpu->arch.shared->sr[i];
1038 for (i = 0; i < 8; i++) {
1039 sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw;
1040 sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw;
1047 static int kvm_arch_vcpu_ioctl_set_sregs_pr(struct kvm_vcpu *vcpu,
1048 struct kvm_sregs *sregs)
1050 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
1053 kvmppc_set_pvr_pr(vcpu, sregs->pvr);
1055 vcpu3s->sdr1 = sregs->u.s.sdr1;
1056 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
1057 for (i = 0; i < 64; i++) {
1058 vcpu->arch.mmu.slbmte(vcpu, sregs->u.s.ppc64.slb[i].slbv,
1059 sregs->u.s.ppc64.slb[i].slbe);
1062 for (i = 0; i < 16; i++) {
1063 vcpu->arch.mmu.mtsrin(vcpu, i, sregs->u.s.ppc32.sr[i]);
1065 for (i = 0; i < 8; i++) {
1066 kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), false,
1067 (u32)sregs->u.s.ppc32.ibat[i]);
1068 kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), true,
1069 (u32)(sregs->u.s.ppc32.ibat[i] >> 32));
1070 kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), false,
1071 (u32)sregs->u.s.ppc32.dbat[i]);
1072 kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), true,
1073 (u32)(sregs->u.s.ppc32.dbat[i] >> 32));
1077 /* Flush the MMU after messing with the segments */
1078 kvmppc_mmu_pte_flush(vcpu, 0, 0);
1083 static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
1084 union kvmppc_one_reg *val)
1089 case KVM_REG_PPC_HIOR:
1090 *val = get_reg_val(id, to_book3s(vcpu)->hior);
1100 static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
1101 union kvmppc_one_reg *val)
1106 case KVM_REG_PPC_HIOR:
1107 to_book3s(vcpu)->hior = set_reg_val(id, *val);
1108 to_book3s(vcpu)->hior_explicit = true;
1118 static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm,
1121 struct kvmppc_vcpu_book3s *vcpu_book3s;
1122 struct kvm_vcpu *vcpu;
1126 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
1130 vcpu_book3s = vzalloc(sizeof(struct kvmppc_vcpu_book3s));
1133 vcpu->arch.book3s = vcpu_book3s;
1135 #ifdef CONFIG_KVM_BOOK3S_32
1136 vcpu->arch.shadow_vcpu =
1137 kzalloc(sizeof(*vcpu->arch.shadow_vcpu), GFP_KERNEL);
1138 if (!vcpu->arch.shadow_vcpu)
1142 err = kvm_vcpu_init(vcpu, kvm, id);
1144 goto free_shadow_vcpu;
1147 p = __get_free_page(GFP_KERNEL|__GFP_ZERO);
1150 /* the real shared page fills the last 4k of our page */
1151 vcpu->arch.shared = (void *)(p + PAGE_SIZE - 4096);
1153 #ifdef CONFIG_PPC_BOOK3S_64
1155 * Default to the same as the host if we're on sufficiently
1156 * recent machine that we have 1TB segments;
1157 * otherwise default to PPC970FX.
1159 vcpu->arch.pvr = 0x3C0301;
1160 if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
1161 vcpu->arch.pvr = mfspr(SPRN_PVR);
1163 /* default to book3s_32 (750) */
1164 vcpu->arch.pvr = 0x84202;
1166 kvmppc_set_pvr_pr(vcpu, vcpu->arch.pvr);
1167 vcpu->arch.slb_nr = 64;
1169 vcpu->arch.shadow_msr = MSR_USER64;
1171 err = kvmppc_mmu_init(vcpu);
1178 kvm_vcpu_uninit(vcpu);
1180 #ifdef CONFIG_KVM_BOOK3S_32
1181 kfree(vcpu->arch.shadow_vcpu);
1186 kmem_cache_free(kvm_vcpu_cache, vcpu);
1188 return ERR_PTR(err);
1191 static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu)
1193 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
1195 free_page((unsigned long)vcpu->arch.shared & PAGE_MASK);
1196 kvm_vcpu_uninit(vcpu);
1197 #ifdef CONFIG_KVM_BOOK3S_32
1198 kfree(vcpu->arch.shadow_vcpu);
1201 kmem_cache_free(kvm_vcpu_cache, vcpu);
1204 static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1207 struct thread_fp_state fp;
1209 #ifdef CONFIG_ALTIVEC
1210 struct thread_vr_state vr;
1211 unsigned long uninitialized_var(vrsave);
1219 /* Check if we can run the vcpu at all */
1220 if (!vcpu->arch.sane) {
1221 kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1227 * Interrupts could be timers for the guest which we have to inject
1228 * again, so let's postpone them until we're in the guest and if we
1229 * really did time things so badly, then we just exit again due to
1230 * a host external interrupt.
1232 local_irq_disable();
1233 ret = kvmppc_prepare_to_enter(vcpu);
1239 /* Save FPU state in stack */
1240 if (current->thread.regs->msr & MSR_FP)
1241 giveup_fpu(current);
1242 fp = current->thread.fp_state;
1243 fpexc_mode = current->thread.fpexc_mode;
1245 #ifdef CONFIG_ALTIVEC
1246 /* Save Altivec state in stack */
1247 used_vr = current->thread.used_vr;
1249 if (current->thread.regs->msr & MSR_VEC)
1250 giveup_altivec(current);
1251 vr = current->thread.vr_state;
1252 vrsave = current->thread.vrsave;
1257 /* Save VSX state in stack */
1258 used_vsr = current->thread.used_vsr;
1259 if (used_vsr && (current->thread.regs->msr & MSR_VSX))
1260 __giveup_vsx(current);
1263 /* Remember the MSR with disabled extensions */
1264 ext_msr = current->thread.regs->msr;
1266 /* Preload FPU if it's enabled */
1267 if (vcpu->arch.shared->msr & MSR_FP)
1268 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
1270 kvmppc_fix_ee_before_entry();
1272 ret = __kvmppc_vcpu_run(kvm_run, vcpu);
1274 /* No need for kvm_guest_exit. It's done in handle_exit.
1275 We also get here with interrupts enabled. */
1277 /* Make sure we save the guest FPU/Altivec/VSX state */
1278 kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
1280 current->thread.regs->msr = ext_msr;
1282 /* Restore FPU/VSX state from stack */
1283 current->thread.fp_state = fp;
1284 current->thread.fpexc_mode = fpexc_mode;
1286 #ifdef CONFIG_ALTIVEC
1287 /* Restore Altivec state from stack */
1288 if (used_vr && current->thread.used_vr) {
1289 current->thread.vr_state = vr;
1290 current->thread.vrsave = vrsave;
1292 current->thread.used_vr = used_vr;
1296 current->thread.used_vsr = used_vsr;
1300 vcpu->mode = OUTSIDE_GUEST_MODE;
1305 * Get (and clear) the dirty memory log for a memory slot.
1307 static int kvm_vm_ioctl_get_dirty_log_pr(struct kvm *kvm,
1308 struct kvm_dirty_log *log)
1310 struct kvm_memory_slot *memslot;
1311 struct kvm_vcpu *vcpu;
1317 mutex_lock(&kvm->slots_lock);
1319 r = kvm_get_dirty_log(kvm, log, &is_dirty);
1323 /* If nothing is dirty, don't bother messing with page tables. */
1325 memslot = id_to_memslot(kvm->memslots, log->slot);
1327 ga = memslot->base_gfn << PAGE_SHIFT;
1328 ga_end = ga + (memslot->npages << PAGE_SHIFT);
1330 kvm_for_each_vcpu(n, vcpu, kvm)
1331 kvmppc_mmu_pte_pflush(vcpu, ga, ga_end);
1333 n = kvm_dirty_bitmap_bytes(memslot);
1334 memset(memslot->dirty_bitmap, 0, n);
1339 mutex_unlock(&kvm->slots_lock);
1343 static void kvmppc_core_flush_memslot_pr(struct kvm *kvm,
1344 struct kvm_memory_slot *memslot)
1349 static int kvmppc_core_prepare_memory_region_pr(struct kvm *kvm,
1350 struct kvm_memory_slot *memslot,
1351 struct kvm_userspace_memory_region *mem)
1356 static void kvmppc_core_commit_memory_region_pr(struct kvm *kvm,
1357 struct kvm_userspace_memory_region *mem,
1358 const struct kvm_memory_slot *old)
1363 static void kvmppc_core_free_memslot_pr(struct kvm_memory_slot *free,
1364 struct kvm_memory_slot *dont)
1369 static int kvmppc_core_create_memslot_pr(struct kvm_memory_slot *slot,
1370 unsigned long npages)
1377 static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm,
1378 struct kvm_ppc_smmu_info *info)
1381 struct kvm_vcpu *vcpu;
1385 /* SLB is always 64 entries */
1386 info->slb_size = 64;
1388 /* Standard 4k base page size segment */
1389 info->sps[0].page_shift = 12;
1390 info->sps[0].slb_enc = 0;
1391 info->sps[0].enc[0].page_shift = 12;
1392 info->sps[0].enc[0].pte_enc = 0;
1395 * 64k large page size.
1396 * We only want to put this in if the CPUs we're emulating
1397 * support it, but unfortunately we don't have a vcpu easily
1398 * to hand here to test. Just pick the first vcpu, and if
1399 * that doesn't exist yet, report the minimum capability,
1400 * i.e., no 64k pages.
1401 * 1T segment support goes along with 64k pages.
1404 vcpu = kvm_get_vcpu(kvm, 0);
1405 if (vcpu && (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE)) {
1406 info->flags = KVM_PPC_1T_SEGMENTS;
1407 info->sps[i].page_shift = 16;
1408 info->sps[i].slb_enc = SLB_VSID_L | SLB_VSID_LP_01;
1409 info->sps[i].enc[0].page_shift = 16;
1410 info->sps[i].enc[0].pte_enc = 1;
1414 /* Standard 16M large page size segment */
1415 info->sps[i].page_shift = 24;
1416 info->sps[i].slb_enc = SLB_VSID_L;
1417 info->sps[i].enc[0].page_shift = 24;
1418 info->sps[i].enc[0].pte_enc = 0;
1423 static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm,
1424 struct kvm_ppc_smmu_info *info)
1426 /* We should not get called */
1429 #endif /* CONFIG_PPC64 */
1431 static unsigned int kvm_global_user_count = 0;
1432 static DEFINE_SPINLOCK(kvm_global_user_count_lock);
1434 static int kvmppc_core_init_vm_pr(struct kvm *kvm)
1436 mutex_init(&kvm->arch.hpt_mutex);
1438 if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
1439 spin_lock(&kvm_global_user_count_lock);
1440 if (++kvm_global_user_count == 1)
1441 pSeries_disable_reloc_on_exc();
1442 spin_unlock(&kvm_global_user_count_lock);
1447 static void kvmppc_core_destroy_vm_pr(struct kvm *kvm)
1450 WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
1453 if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
1454 spin_lock(&kvm_global_user_count_lock);
1455 BUG_ON(kvm_global_user_count == 0);
1456 if (--kvm_global_user_count == 0)
1457 pSeries_enable_reloc_on_exc();
1458 spin_unlock(&kvm_global_user_count_lock);
1462 static int kvmppc_core_check_processor_compat_pr(void)
1464 /* we are always compatible */
1468 static long kvm_arch_vm_ioctl_pr(struct file *filp,
1469 unsigned int ioctl, unsigned long arg)
1474 static struct kvmppc_ops kvm_ops_pr = {
1475 .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_pr,
1476 .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_pr,
1477 .get_one_reg = kvmppc_get_one_reg_pr,
1478 .set_one_reg = kvmppc_set_one_reg_pr,
1479 .vcpu_load = kvmppc_core_vcpu_load_pr,
1480 .vcpu_put = kvmppc_core_vcpu_put_pr,
1481 .set_msr = kvmppc_set_msr_pr,
1482 .vcpu_run = kvmppc_vcpu_run_pr,
1483 .vcpu_create = kvmppc_core_vcpu_create_pr,
1484 .vcpu_free = kvmppc_core_vcpu_free_pr,
1485 .check_requests = kvmppc_core_check_requests_pr,
1486 .get_dirty_log = kvm_vm_ioctl_get_dirty_log_pr,
1487 .flush_memslot = kvmppc_core_flush_memslot_pr,
1488 .prepare_memory_region = kvmppc_core_prepare_memory_region_pr,
1489 .commit_memory_region = kvmppc_core_commit_memory_region_pr,
1490 .unmap_hva = kvm_unmap_hva_pr,
1491 .unmap_hva_range = kvm_unmap_hva_range_pr,
1492 .age_hva = kvm_age_hva_pr,
1493 .test_age_hva = kvm_test_age_hva_pr,
1494 .set_spte_hva = kvm_set_spte_hva_pr,
1495 .mmu_destroy = kvmppc_mmu_destroy_pr,
1496 .free_memslot = kvmppc_core_free_memslot_pr,
1497 .create_memslot = kvmppc_core_create_memslot_pr,
1498 .init_vm = kvmppc_core_init_vm_pr,
1499 .destroy_vm = kvmppc_core_destroy_vm_pr,
1500 .get_smmu_info = kvm_vm_ioctl_get_smmu_info_pr,
1501 .emulate_op = kvmppc_core_emulate_op_pr,
1502 .emulate_mtspr = kvmppc_core_emulate_mtspr_pr,
1503 .emulate_mfspr = kvmppc_core_emulate_mfspr_pr,
1504 .fast_vcpu_kick = kvm_vcpu_kick,
1505 .arch_vm_ioctl = kvm_arch_vm_ioctl_pr,
1509 int kvmppc_book3s_init_pr(void)
1513 r = kvmppc_core_check_processor_compat_pr();
1517 kvm_ops_pr.owner = THIS_MODULE;
1518 kvmppc_pr_ops = &kvm_ops_pr;
1520 r = kvmppc_mmu_hpte_sysinit();
1524 void kvmppc_book3s_exit_pr(void)
1526 kvmppc_pr_ops = NULL;
1527 kvmppc_mmu_hpte_sysexit();
1531 * We only support separate modules for book3s 64
1533 #ifdef CONFIG_PPC_BOOK3S_64
1535 module_init(kvmppc_book3s_init_pr);
1536 module_exit(kvmppc_book3s_exit_pr);
1538 MODULE_LICENSE("GPL");
1539 MODULE_ALIAS_MISCDEV(KVM_MINOR);
1540 MODULE_ALIAS("devname:kvm");