powerpc/kvm/book3s_hv: Preserve guest CFAR register value
authorPaul Mackerras <paulus@samba.org>
Mon, 4 Feb 2013 18:10:51 +0000 (18:10 +0000)
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>
Fri, 15 Feb 2013 05:54:33 +0000 (16:54 +1100)
The CFAR (Come-From Address Register) is a useful debugging aid that
exists on POWER7 processors.  Currently HV KVM doesn't save or restore
the CFAR register for guest vcpus, making the CFAR of limited use in
guests.

This adds the necessary code to capture the CFAR value saved in the
early exception entry code (it has to be saved before any branch is
executed), save it in the vcpu.arch struct, and restore it on entry
to the guest.

Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
arch/powerpc/include/asm/exception-64s.h
arch/powerpc/include/asm/kvm_book3s_asm.h
arch/powerpc/include/asm/kvm_host.h
arch/powerpc/kernel/asm-offsets.c
arch/powerpc/kvm/book3s_hv_rmhandlers.S

index 4dfc51588be571487c83590eceb61643b20eee8f..05e6d2ee1db9fc348ce39319a63a3f5ceed6d838 100644 (file)
@@ -199,10 +199,14 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
 
 #define __KVM_HANDLER(area, h, n)                                      \
 do_kvm_##n:                                                            \
+       BEGIN_FTR_SECTION_NESTED(947)                                   \
+       ld      r10,area+EX_CFAR(r13);                                  \
+       std     r10,HSTATE_CFAR(r13);                                   \
+       END_FTR_SECTION_NESTED(CPU_FTR_CFAR,CPU_FTR_CFAR,947);          \
        ld      r10,area+EX_R10(r13);                                   \
-       stw     r9,HSTATE_SCRATCH1(r13);                        \
+       stw     r9,HSTATE_SCRATCH1(r13);                                \
        ld      r9,area+EX_R9(r13);                                     \
-       std     r12,HSTATE_SCRATCH0(r13);                       \
+       std     r12,HSTATE_SCRATCH0(r13);                               \
        li      r12,n;                                                  \
        b       kvmppc_interrupt
 
index 88609b23b775460c96a4d9d805feaf49fecf94a3..cdc3d2717cc6e0feb9858e2cf1502958e8b3d711 100644 (file)
@@ -93,6 +93,9 @@ struct kvmppc_host_state {
        u64 host_dscr;
        u64 dec_expires;
 #endif
+#ifdef CONFIG_PPC_BOOK3S_64
+       u64 cfar;
+#endif
 };
 
 struct kvmppc_book3s_shadow_vcpu {
index ca9bf459db6a56e2bb7d1e450a7b5c4d6026a3ae..03d7beae89a0ebb7fd0e66187db5c643d51b00f3 100644 (file)
@@ -440,6 +440,7 @@ struct kvm_vcpu_arch {
        ulong uamor;
        u32 ctrl;
        ulong dabr;
+       ulong cfar;
 #endif
        u32 vrsave; /* also USPRG0 */
        u32 mmucr;
index beddba432518ec9f56dbb1877c8f2d1e8912c583..e295a09b1f061ec29d6448709d6a677578d9f351 100644 (file)
@@ -479,6 +479,7 @@ int main(void)
        DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst));
        DEFINE(VCPU_TRAP, offsetof(struct kvm_vcpu, arch.trap));
        DEFINE(VCPU_PTID, offsetof(struct kvm_vcpu, arch.ptid));
+       DEFINE(VCPU_CFAR, offsetof(struct kvm_vcpu, arch.cfar));
        DEFINE(VCORE_ENTRY_EXIT, offsetof(struct kvmppc_vcore, entry_exit_count));
        DEFINE(VCORE_NAP_COUNT, offsetof(struct kvmppc_vcore, nap_count));
        DEFINE(VCORE_IN_GUEST, offsetof(struct kvmppc_vcore, in_guest));
@@ -558,6 +559,10 @@ int main(void)
        DEFINE(IPI_PRIORITY, IPI_PRIORITY);
 #endif /* CONFIG_KVM_BOOK3S_64_HV */
 
+#ifdef CONFIG_PPC_BOOK3S_64
+       HSTATE_FIELD(HSTATE_CFAR, cfar);
+#endif /* CONFIG_PPC_BOOK3S_64 */
+
 #else /* CONFIG_PPC_BOOK3S */
        DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr));
        DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer));
index 10b6c358dd770cf7c3c0083aebfdecc321631985..e33d11f1b977c2ea46e3494500c17255cca9daf5 100644 (file)
@@ -539,6 +539,11 @@ fast_guest_return:
 
        /* Enter guest */
 
+BEGIN_FTR_SECTION
+       ld      r5, VCPU_CFAR(r4)
+       mtspr   SPRN_CFAR, r5
+END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
+
        ld      r5, VCPU_LR(r4)
        lwz     r6, VCPU_CR(r4)
        mtlr    r5
@@ -604,6 +609,10 @@ kvmppc_interrupt:
        lwz     r4, HSTATE_SCRATCH1(r13)
        std     r3, VCPU_GPR(R12)(r9)
        stw     r4, VCPU_CR(r9)
+BEGIN_FTR_SECTION
+       ld      r3, HSTATE_CFAR(r13)
+       std     r3, VCPU_CFAR(r9)
+END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
 
        /* Restore R1/R2 so we can handle faults */
        ld      r1, HSTATE_HOST_R1(r13)