2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * KVM/MIPS: Instruction/Exception emulation
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
12 #include <linux/errno.h>
13 #include <linux/err.h>
14 #include <linux/ktime.h>
15 #include <linux/kvm_host.h>
16 #include <linux/module.h>
17 #include <linux/vmalloc.h>
19 #include <linux/bootmem.h>
20 #include <linux/random.h>
22 #include <asm/cacheflush.h>
23 #include <asm/cpu-info.h>
24 #include <asm/mmu_context.h>
25 #include <asm/tlbflush.h>
29 #include <asm/r4kcache.h>
30 #define CONFIG_MIPS_MT
33 #include "interrupt.h"
39 * Compute the return address and do emulate branch simulation, if required.
40 * This function should be called only in branch delay slot active.
42 unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu,
45 unsigned int dspcontrol;
46 union mips_instruction insn;
47 struct kvm_vcpu_arch *arch = &vcpu->arch;
49 long nextpc = KVM_INVALID_INST;
54 /* Read the instruction */
55 insn.word = kvm_get_inst((uint32_t *) epc, vcpu);
57 if (insn.word == KVM_INVALID_INST)
58 return KVM_INVALID_INST;
60 switch (insn.i_format.opcode) {
61 /* jr and jalr are in r_format format. */
63 switch (insn.r_format.func) {
65 arch->gprs[insn.r_format.rd] = epc + 8;
68 nextpc = arch->gprs[insn.r_format.rs];
74 * This group contains:
75 * bltz_op, bgez_op, bltzl_op, bgezl_op,
76 * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
79 switch (insn.i_format.rt) {
82 if ((long)arch->gprs[insn.i_format.rs] < 0)
83 epc = epc + 4 + (insn.i_format.simmediate << 2);
91 if ((long)arch->gprs[insn.i_format.rs] >= 0)
92 epc = epc + 4 + (insn.i_format.simmediate << 2);
100 arch->gprs[31] = epc + 8;
101 if ((long)arch->gprs[insn.i_format.rs] < 0)
102 epc = epc + 4 + (insn.i_format.simmediate << 2);
110 arch->gprs[31] = epc + 8;
111 if ((long)arch->gprs[insn.i_format.rs] >= 0)
112 epc = epc + 4 + (insn.i_format.simmediate << 2);
121 dspcontrol = rddsp(0x01);
123 if (dspcontrol >= 32)
124 epc = epc + 4 + (insn.i_format.simmediate << 2);
132 /* These are unconditional and in j_format. */
134 arch->gprs[31] = instpc + 8;
139 epc |= (insn.j_format.target << 2);
143 /* These are conditional and in i_format. */
146 if (arch->gprs[insn.i_format.rs] ==
147 arch->gprs[insn.i_format.rt])
148 epc = epc + 4 + (insn.i_format.simmediate << 2);
156 if (arch->gprs[insn.i_format.rs] !=
157 arch->gprs[insn.i_format.rt])
158 epc = epc + 4 + (insn.i_format.simmediate << 2);
164 case blez_op: /* not really i_format */
166 /* rt field assumed to be zero */
167 if ((long)arch->gprs[insn.i_format.rs] <= 0)
168 epc = epc + 4 + (insn.i_format.simmediate << 2);
176 /* rt field assumed to be zero */
177 if ((long)arch->gprs[insn.i_format.rs] > 0)
178 epc = epc + 4 + (insn.i_format.simmediate << 2);
184 /* And now the FPA/cp1 branch instructions. */
186 kvm_err("%s: unsupported cop1_op\n", __func__);
193 kvm_err("%s: unaligned epc\n", __func__);
197 kvm_err("%s: DSP branch but not DSP ASE\n", __func__);
201 enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause)
203 unsigned long branch_pc;
204 enum emulation_result er = EMULATE_DONE;
206 if (cause & CAUSEF_BD) {
207 branch_pc = kvm_compute_return_epc(vcpu, vcpu->arch.pc);
208 if (branch_pc == KVM_INVALID_INST) {
211 vcpu->arch.pc = branch_pc;
212 kvm_debug("BD update_pc(): New PC: %#lx\n",
218 kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc);
224 * kvm_mips_count_disabled() - Find whether the CP0_Count timer is disabled.
225 * @vcpu: Virtual CPU.
227 * Returns: 1 if the CP0_Count timer is disabled by either the guest
228 * CP0_Cause.DC bit or the count_ctl.DC bit.
229 * 0 otherwise (in which case CP0_Count timer is running).
231 static inline int kvm_mips_count_disabled(struct kvm_vcpu *vcpu)
233 struct mips_coproc *cop0 = vcpu->arch.cop0;
235 return (vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) ||
236 (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC);
240 * kvm_mips_ktime_to_count() - Scale ktime_t to a 32-bit count.
242 * Caches the dynamic nanosecond bias in vcpu->arch.count_dyn_bias.
244 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
246 static uint32_t kvm_mips_ktime_to_count(struct kvm_vcpu *vcpu, ktime_t now)
251 now_ns = ktime_to_ns(now);
252 delta = now_ns + vcpu->arch.count_dyn_bias;
254 if (delta >= vcpu->arch.count_period) {
255 /* If delta is out of safe range the bias needs adjusting */
256 periods = div64_s64(now_ns, vcpu->arch.count_period);
257 vcpu->arch.count_dyn_bias = -periods * vcpu->arch.count_period;
258 /* Recalculate delta with new bias */
259 delta = now_ns + vcpu->arch.count_dyn_bias;
263 * We've ensured that:
264 * delta < count_period
266 * Therefore the intermediate delta*count_hz will never overflow since
267 * at the boundary condition:
268 * delta = count_period
269 * delta = NSEC_PER_SEC * 2^32 / count_hz
270 * delta * count_hz = NSEC_PER_SEC * 2^32
272 return div_u64(delta * vcpu->arch.count_hz, NSEC_PER_SEC);
276 * kvm_mips_count_time() - Get effective current time.
277 * @vcpu: Virtual CPU.
279 * Get effective monotonic ktime. This is usually a straightforward ktime_get(),
280 * except when the master disable bit is set in count_ctl, in which case it is
281 * count_resume, i.e. the time that the count was disabled.
283 * Returns: Effective monotonic ktime for CP0_Count.
285 static inline ktime_t kvm_mips_count_time(struct kvm_vcpu *vcpu)
287 if (unlikely(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
288 return vcpu->arch.count_resume;
294 * kvm_mips_read_count_running() - Read the current count value as if running.
295 * @vcpu: Virtual CPU.
296 * @now: Kernel time to read CP0_Count at.
298 * Returns the current guest CP0_Count register at time @now and handles if the
299 * timer interrupt is pending and hasn't been handled yet.
301 * Returns: The current value of the guest CP0_Count register.
303 static uint32_t kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
308 /* Is the hrtimer pending? */
309 expires = hrtimer_get_expires(&vcpu->arch.comparecount_timer);
310 if (ktime_compare(now, expires) >= 0) {
312 * Cancel it while we handle it so there's no chance of
313 * interference with the timeout handler.
315 running = hrtimer_cancel(&vcpu->arch.comparecount_timer);
317 /* Nothing should be waiting on the timeout */
318 kvm_mips_callbacks->queue_timer_int(vcpu);
321 * Restart the timer if it was running based on the expiry time
322 * we read, so that we don't push it back 2 periods.
325 expires = ktime_add_ns(expires,
326 vcpu->arch.count_period);
327 hrtimer_start(&vcpu->arch.comparecount_timer, expires,
332 /* Return the biased and scaled guest CP0_Count */
333 return vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now);
337 * kvm_mips_read_count() - Read the current count value.
338 * @vcpu: Virtual CPU.
340 * Read the current guest CP0_Count value, taking into account whether the timer
343 * Returns: The current guest CP0_Count value.
345 uint32_t kvm_mips_read_count(struct kvm_vcpu *vcpu)
347 struct mips_coproc *cop0 = vcpu->arch.cop0;
349 /* If count disabled just read static copy of count */
350 if (kvm_mips_count_disabled(vcpu))
351 return kvm_read_c0_guest_count(cop0);
353 return kvm_mips_read_count_running(vcpu, ktime_get());
357 * kvm_mips_freeze_hrtimer() - Safely stop the hrtimer.
358 * @vcpu: Virtual CPU.
359 * @count: Output pointer for CP0_Count value at point of freeze.
361 * Freeze the hrtimer safely and return both the ktime and the CP0_Count value
362 * at the point it was frozen. It is guaranteed that any pending interrupts at
363 * the point it was frozen are handled, and none after that point.
365 * This is useful where the time/CP0_Count is needed in the calculation of the
368 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
370 * Returns: The ktime at the point of freeze.
372 static ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu,
377 /* stop hrtimer before finding time */
378 hrtimer_cancel(&vcpu->arch.comparecount_timer);
381 /* find count at this point and handle pending hrtimer */
382 *count = kvm_mips_read_count_running(vcpu, now);
388 * kvm_mips_resume_hrtimer() - Resume hrtimer, updating expiry.
389 * @vcpu: Virtual CPU.
390 * @now: ktime at point of resume.
391 * @count: CP0_Count at point of resume.
393 * Resumes the timer and updates the timer expiry based on @now and @count.
394 * This can be used in conjunction with kvm_mips_freeze_timer() when timer
395 * parameters need to be changed.
397 * It is guaranteed that a timer interrupt immediately after resume will be
398 * handled, but not if CP_Compare is exactly at @count. That case is already
399 * handled by kvm_mips_freeze_timer().
401 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
403 static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu,
404 ktime_t now, uint32_t count)
406 struct mips_coproc *cop0 = vcpu->arch.cop0;
411 /* Calculate timeout (wrap 0 to 2^32) */
412 compare = kvm_read_c0_guest_compare(cop0);
413 delta = (u64)(uint32_t)(compare - count - 1) + 1;
414 delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz);
415 expire = ktime_add_ns(now, delta);
417 /* Update hrtimer to use new timeout */
418 hrtimer_cancel(&vcpu->arch.comparecount_timer);
419 hrtimer_start(&vcpu->arch.comparecount_timer, expire, HRTIMER_MODE_ABS);
423 * kvm_mips_update_hrtimer() - Update next expiry time of hrtimer.
424 * @vcpu: Virtual CPU.
426 * Recalculates and updates the expiry time of the hrtimer. This can be used
427 * after timer parameters have been altered which do not depend on the time that
428 * the change occurs (in those cases kvm_mips_freeze_hrtimer() and
429 * kvm_mips_resume_hrtimer() are used directly).
431 * It is guaranteed that no timer interrupts will be lost in the process.
433 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
435 static void kvm_mips_update_hrtimer(struct kvm_vcpu *vcpu)
441 * freeze_hrtimer takes care of a timer interrupts <= count, and
442 * resume_hrtimer the hrtimer takes care of a timer interrupts > count.
444 now = kvm_mips_freeze_hrtimer(vcpu, &count);
445 kvm_mips_resume_hrtimer(vcpu, now, count);
449 * kvm_mips_write_count() - Modify the count and update timer.
450 * @vcpu: Virtual CPU.
451 * @count: Guest CP0_Count value to set.
453 * Sets the CP0_Count value and updates the timer accordingly.
455 void kvm_mips_write_count(struct kvm_vcpu *vcpu, uint32_t count)
457 struct mips_coproc *cop0 = vcpu->arch.cop0;
461 now = kvm_mips_count_time(vcpu);
462 vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now);
464 if (kvm_mips_count_disabled(vcpu))
465 /* The timer's disabled, adjust the static count */
466 kvm_write_c0_guest_count(cop0, count);
469 kvm_mips_resume_hrtimer(vcpu, now, count);
473 * kvm_mips_init_count() - Initialise timer.
474 * @vcpu: Virtual CPU.
476 * Initialise the timer to a sensible frequency, namely 100MHz, zero it, and set
477 * it going if it's enabled.
479 void kvm_mips_init_count(struct kvm_vcpu *vcpu)
482 vcpu->arch.count_hz = 100*1000*1000;
483 vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32,
484 vcpu->arch.count_hz);
485 vcpu->arch.count_dyn_bias = 0;
488 kvm_mips_write_count(vcpu, 0);
492 * kvm_mips_set_count_hz() - Update the frequency of the timer.
493 * @vcpu: Virtual CPU.
494 * @count_hz: Frequency of CP0_Count timer in Hz.
496 * Change the frequency of the CP0_Count timer. This is done atomically so that
497 * CP0_Count is continuous and no timer interrupt is lost.
499 * Returns: -EINVAL if @count_hz is out of range.
502 int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz)
504 struct mips_coproc *cop0 = vcpu->arch.cop0;
509 /* ensure the frequency is in a sensible range... */
510 if (count_hz <= 0 || count_hz > NSEC_PER_SEC)
512 /* ... and has actually changed */
513 if (vcpu->arch.count_hz == count_hz)
516 /* Safely freeze timer so we can keep it continuous */
517 dc = kvm_mips_count_disabled(vcpu);
519 now = kvm_mips_count_time(vcpu);
520 count = kvm_read_c0_guest_count(cop0);
522 now = kvm_mips_freeze_hrtimer(vcpu, &count);
525 /* Update the frequency */
526 vcpu->arch.count_hz = count_hz;
527 vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, count_hz);
528 vcpu->arch.count_dyn_bias = 0;
530 /* Calculate adjusted bias so dynamic count is unchanged */
531 vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now);
533 /* Update and resume hrtimer */
535 kvm_mips_resume_hrtimer(vcpu, now, count);
540 * kvm_mips_write_compare() - Modify compare and update timer.
541 * @vcpu: Virtual CPU.
542 * @compare: New CP0_Compare value.
544 * Update CP0_Compare to a new value and update the timeout.
546 void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare)
548 struct mips_coproc *cop0 = vcpu->arch.cop0;
550 /* if unchanged, must just be an ack */
551 if (kvm_read_c0_guest_compare(cop0) == compare)
555 kvm_write_c0_guest_compare(cop0, compare);
557 /* Update timeout if count enabled */
558 if (!kvm_mips_count_disabled(vcpu))
559 kvm_mips_update_hrtimer(vcpu);
563 * kvm_mips_count_disable() - Disable count.
564 * @vcpu: Virtual CPU.
566 * Disable the CP0_Count timer. A timer interrupt on or before the final stop
567 * time will be handled but not after.
569 * Assumes CP0_Count was previously enabled but now Guest.CP0_Cause.DC or
570 * count_ctl.DC has been set (count disabled).
572 * Returns: The time that the timer was stopped.
574 static ktime_t kvm_mips_count_disable(struct kvm_vcpu *vcpu)
576 struct mips_coproc *cop0 = vcpu->arch.cop0;
581 hrtimer_cancel(&vcpu->arch.comparecount_timer);
583 /* Set the static count from the dynamic count, handling pending TI */
585 count = kvm_mips_read_count_running(vcpu, now);
586 kvm_write_c0_guest_count(cop0, count);
592 * kvm_mips_count_disable_cause() - Disable count using CP0_Cause.DC.
593 * @vcpu: Virtual CPU.
595 * Disable the CP0_Count timer and set CP0_Cause.DC. A timer interrupt on or
596 * before the final stop time will be handled if the timer isn't disabled by
597 * count_ctl.DC, but not after.
599 * Assumes CP0_Cause.DC is clear (count enabled).
601 void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu)
603 struct mips_coproc *cop0 = vcpu->arch.cop0;
605 kvm_set_c0_guest_cause(cop0, CAUSEF_DC);
606 if (!(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
607 kvm_mips_count_disable(vcpu);
611 * kvm_mips_count_enable_cause() - Enable count using CP0_Cause.DC.
612 * @vcpu: Virtual CPU.
614 * Enable the CP0_Count timer and clear CP0_Cause.DC. A timer interrupt after
615 * the start time will be handled if the timer isn't disabled by count_ctl.DC,
616 * potentially before even returning, so the caller should be careful with
617 * ordering of CP0_Cause modifications so as not to lose it.
619 * Assumes CP0_Cause.DC is set (count disabled).
621 void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu)
623 struct mips_coproc *cop0 = vcpu->arch.cop0;
626 kvm_clear_c0_guest_cause(cop0, CAUSEF_DC);
629 * Set the dynamic count to match the static count.
630 * This starts the hrtimer if count_ctl.DC allows it.
631 * Otherwise it conveniently updates the biases.
633 count = kvm_read_c0_guest_count(cop0);
634 kvm_mips_write_count(vcpu, count);
638 * kvm_mips_set_count_ctl() - Update the count control KVM register.
639 * @vcpu: Virtual CPU.
640 * @count_ctl: Count control register new value.
642 * Set the count control KVM register. The timer is updated accordingly.
644 * Returns: -EINVAL if reserved bits are set.
647 int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl)
649 struct mips_coproc *cop0 = vcpu->arch.cop0;
650 s64 changed = count_ctl ^ vcpu->arch.count_ctl;
653 uint32_t count, compare;
655 /* Only allow defined bits to be changed */
656 if (changed & ~(s64)(KVM_REG_MIPS_COUNT_CTL_DC))
659 /* Apply new value */
660 vcpu->arch.count_ctl = count_ctl;
662 /* Master CP0_Count disable */
663 if (changed & KVM_REG_MIPS_COUNT_CTL_DC) {
664 /* Is CP0_Cause.DC already disabling CP0_Count? */
665 if (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC) {
666 if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)
667 /* Just record the current time */
668 vcpu->arch.count_resume = ktime_get();
669 } else if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) {
670 /* disable timer and record current time */
671 vcpu->arch.count_resume = kvm_mips_count_disable(vcpu);
674 * Calculate timeout relative to static count at resume
675 * time (wrap 0 to 2^32).
677 count = kvm_read_c0_guest_count(cop0);
678 compare = kvm_read_c0_guest_compare(cop0);
679 delta = (u64)(uint32_t)(compare - count - 1) + 1;
680 delta = div_u64(delta * NSEC_PER_SEC,
681 vcpu->arch.count_hz);
682 expire = ktime_add_ns(vcpu->arch.count_resume, delta);
684 /* Handle pending interrupt */
686 if (ktime_compare(now, expire) >= 0)
687 /* Nothing should be waiting on the timeout */
688 kvm_mips_callbacks->queue_timer_int(vcpu);
690 /* Resume hrtimer without changing bias */
691 count = kvm_mips_read_count_running(vcpu, now);
692 kvm_mips_resume_hrtimer(vcpu, now, count);
700 * kvm_mips_set_count_resume() - Update the count resume KVM register.
701 * @vcpu: Virtual CPU.
702 * @count_resume: Count resume register new value.
704 * Set the count resume KVM register.
706 * Returns: -EINVAL if out of valid range (0..now).
709 int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume)
712 * It doesn't make sense for the resume time to be in the future, as it
713 * would be possible for the next interrupt to be more than a full
714 * period in the future.
716 if (count_resume < 0 || count_resume > ktime_to_ns(ktime_get()))
719 vcpu->arch.count_resume = ns_to_ktime(count_resume);
724 * kvm_mips_count_timeout() - Push timer forward on timeout.
725 * @vcpu: Virtual CPU.
727 * Handle an hrtimer event by push the hrtimer forward a period.
729 * Returns: The hrtimer_restart value to return to the hrtimer subsystem.
731 enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu)
733 /* Add the Count period to the current expiry time */
734 hrtimer_add_expires_ns(&vcpu->arch.comparecount_timer,
735 vcpu->arch.count_period);
736 return HRTIMER_RESTART;
739 enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu)
741 struct mips_coproc *cop0 = vcpu->arch.cop0;
742 enum emulation_result er = EMULATE_DONE;
744 if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
745 kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc,
746 kvm_read_c0_guest_epc(cop0));
747 kvm_clear_c0_guest_status(cop0, ST0_EXL);
748 vcpu->arch.pc = kvm_read_c0_guest_epc(cop0);
750 } else if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
751 kvm_clear_c0_guest_status(cop0, ST0_ERL);
752 vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
754 kvm_err("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
762 enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
764 kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc,
765 vcpu->arch.pending_exceptions);
767 ++vcpu->stat.wait_exits;
768 trace_kvm_exit(vcpu, WAIT_EXITS);
769 if (!vcpu->arch.pending_exceptions) {
771 kvm_vcpu_block(vcpu);
774 * We we are runnable, then definitely go off to user space to
775 * check if any I/O interrupts are pending.
777 if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) {
778 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
779 vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
787 * XXXKYMA: Linux doesn't seem to use TLBR, return EMULATE_FAIL for now so that
788 * we can catch this, if things ever change
790 enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu)
792 struct mips_coproc *cop0 = vcpu->arch.cop0;
793 uint32_t pc = vcpu->arch.pc;
795 kvm_err("[%#x] COP0_TLBR [%ld]\n", pc, kvm_read_c0_guest_index(cop0));
799 /* Write Guest TLB Entry @ Index */
800 enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu)
802 struct mips_coproc *cop0 = vcpu->arch.cop0;
803 int index = kvm_read_c0_guest_index(cop0);
804 struct kvm_mips_tlb *tlb = NULL;
805 uint32_t pc = vcpu->arch.pc;
807 if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
808 kvm_debug("%s: illegal index: %d\n", __func__, index);
809 kvm_debug("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
810 pc, index, kvm_read_c0_guest_entryhi(cop0),
811 kvm_read_c0_guest_entrylo0(cop0),
812 kvm_read_c0_guest_entrylo1(cop0),
813 kvm_read_c0_guest_pagemask(cop0));
814 index = (index & ~0x80000000) % KVM_MIPS_GUEST_TLB_SIZE;
817 tlb = &vcpu->arch.guest_tlb[index];
819 * Probe the shadow host TLB for the entry being overwritten, if one
820 * matches, invalidate it
822 kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
824 tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
825 tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
826 tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
827 tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
829 kvm_debug("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
830 pc, index, kvm_read_c0_guest_entryhi(cop0),
831 kvm_read_c0_guest_entrylo0(cop0),
832 kvm_read_c0_guest_entrylo1(cop0),
833 kvm_read_c0_guest_pagemask(cop0));
838 /* Write Guest TLB Entry @ Random Index */
839 enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu)
841 struct mips_coproc *cop0 = vcpu->arch.cop0;
842 struct kvm_mips_tlb *tlb = NULL;
843 uint32_t pc = vcpu->arch.pc;
846 get_random_bytes(&index, sizeof(index));
847 index &= (KVM_MIPS_GUEST_TLB_SIZE - 1);
849 tlb = &vcpu->arch.guest_tlb[index];
852 * Probe the shadow host TLB for the entry being overwritten, if one
853 * matches, invalidate it
855 kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
857 tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
858 tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
859 tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
860 tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
862 kvm_debug("[%#x] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n",
863 pc, index, kvm_read_c0_guest_entryhi(cop0),
864 kvm_read_c0_guest_entrylo0(cop0),
865 kvm_read_c0_guest_entrylo1(cop0));
870 enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu)
872 struct mips_coproc *cop0 = vcpu->arch.cop0;
873 long entryhi = kvm_read_c0_guest_entryhi(cop0);
874 uint32_t pc = vcpu->arch.pc;
877 index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
879 kvm_write_c0_guest_index(cop0, index);
881 kvm_debug("[%#x] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi,
888 * kvm_mips_config1_wrmask() - Find mask of writable bits in guest Config1
889 * @vcpu: Virtual CPU.
891 * Finds the mask of bits which are writable in the guest's Config1 CP0
892 * register, by userland (currently read-only to the guest).
894 unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu *vcpu)
896 unsigned int mask = 0;
898 /* Permit FPU to be present if FPU is supported */
899 if (kvm_mips_guest_can_have_fpu(&vcpu->arch))
900 mask |= MIPS_CONF1_FP;
906 * kvm_mips_config3_wrmask() - Find mask of writable bits in guest Config3
907 * @vcpu: Virtual CPU.
909 * Finds the mask of bits which are writable in the guest's Config3 CP0
910 * register, by userland (currently read-only to the guest).
912 unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu)
914 /* Config4 is optional */
915 unsigned int mask = MIPS_CONF_M;
917 /* Permit MSA to be present if MSA is supported */
918 if (kvm_mips_guest_can_have_msa(&vcpu->arch))
919 mask |= MIPS_CONF3_MSA;
925 * kvm_mips_config4_wrmask() - Find mask of writable bits in guest Config4
926 * @vcpu: Virtual CPU.
928 * Finds the mask of bits which are writable in the guest's Config4 CP0
929 * register, by userland (currently read-only to the guest).
931 unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu)
933 /* Config5 is optional */
938 * kvm_mips_config5_wrmask() - Find mask of writable bits in guest Config5
939 * @vcpu: Virtual CPU.
941 * Finds the mask of bits which are writable in the guest's Config5 CP0
942 * register, by the guest itself.
944 unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu)
946 unsigned int mask = 0;
948 /* Permit MSAEn changes if MSA supported and enabled */
949 if (kvm_mips_guest_has_msa(&vcpu->arch))
950 mask |= MIPS_CONF5_MSAEN;
953 * Permit guest FPU mode changes if FPU is enabled and the relevant
954 * feature exists according to FIR register.
956 if (kvm_mips_guest_has_fpu(&vcpu->arch)) {
958 mask |= MIPS_CONF5_FRE;
959 /* We don't support UFR or UFE */
965 enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
966 uint32_t cause, struct kvm_run *run,
967 struct kvm_vcpu *vcpu)
969 struct mips_coproc *cop0 = vcpu->arch.cop0;
970 enum emulation_result er = EMULATE_DONE;
971 int32_t rt, rd, copz, sel, co_bit, op;
972 uint32_t pc = vcpu->arch.pc;
973 unsigned long curr_pc;
976 * Update PC and hold onto current PC in case there is
977 * an error and we want to rollback the PC
979 curr_pc = vcpu->arch.pc;
980 er = update_pc(vcpu, cause);
981 if (er == EMULATE_FAIL)
984 copz = (inst >> 21) & 0x1f;
985 rt = (inst >> 16) & 0x1f;
986 rd = (inst >> 11) & 0x1f;
988 co_bit = (inst >> 25) & 1;
994 case tlbr_op: /* Read indexed TLB entry */
995 er = kvm_mips_emul_tlbr(vcpu);
997 case tlbwi_op: /* Write indexed */
998 er = kvm_mips_emul_tlbwi(vcpu);
1000 case tlbwr_op: /* Write random */
1001 er = kvm_mips_emul_tlbwr(vcpu);
1003 case tlbp_op: /* TLB Probe */
1004 er = kvm_mips_emul_tlbp(vcpu);
1007 kvm_err("!!!COP0_RFE!!!\n");
1010 er = kvm_mips_emul_eret(vcpu);
1011 goto dont_update_pc;
1014 er = kvm_mips_emul_wait(vcpu);
1020 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
1021 cop0->stat[rd][sel]++;
1024 if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
1025 vcpu->arch.gprs[rt] = kvm_mips_read_count(vcpu);
1026 } else if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) {
1027 vcpu->arch.gprs[rt] = 0x0;
1028 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1029 kvm_mips_trans_mfc0(inst, opc, vcpu);
1032 vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
1034 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1035 kvm_mips_trans_mfc0(inst, opc, vcpu);
1040 ("[%#x] MFCz[%d][%d], vcpu->arch.gprs[%d]: %#lx\n",
1041 pc, rd, sel, rt, vcpu->arch.gprs[rt]);
1046 vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
1050 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
1051 cop0->stat[rd][sel]++;
1053 if ((rd == MIPS_CP0_TLB_INDEX)
1054 && (vcpu->arch.gprs[rt] >=
1055 KVM_MIPS_GUEST_TLB_SIZE)) {
1056 kvm_err("Invalid TLB Index: %ld",
1057 vcpu->arch.gprs[rt]);
1061 #define C0_EBASE_CORE_MASK 0xff
1062 if ((rd == MIPS_CP0_PRID) && (sel == 1)) {
1063 /* Preserve CORE number */
1064 kvm_change_c0_guest_ebase(cop0,
1065 ~(C0_EBASE_CORE_MASK),
1066 vcpu->arch.gprs[rt]);
1067 kvm_err("MTCz, cop0->reg[EBASE]: %#lx\n",
1068 kvm_read_c0_guest_ebase(cop0));
1069 } else if (rd == MIPS_CP0_TLB_HI && sel == 0) {
1071 vcpu->arch.gprs[rt] & ASID_MASK;
1072 if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0) &&
1073 ((kvm_read_c0_guest_entryhi(cop0) &
1074 ASID_MASK) != nasid)) {
1075 kvm_debug("MTCz, change ASID from %#lx to %#lx\n",
1076 kvm_read_c0_guest_entryhi(cop0)
1081 /* Blow away the shadow host TLBs */
1082 kvm_mips_flush_host_tlb(1);
1084 kvm_write_c0_guest_entryhi(cop0,
1085 vcpu->arch.gprs[rt]);
1087 /* Are we writing to COUNT */
1088 else if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
1089 kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]);
1091 } else if ((rd == MIPS_CP0_COMPARE) && (sel == 0)) {
1092 kvm_debug("[%#x] MTCz, COMPARE %#lx <- %#lx\n",
1093 pc, kvm_read_c0_guest_compare(cop0),
1094 vcpu->arch.gprs[rt]);
1096 /* If we are writing to COMPARE */
1097 /* Clear pending timer interrupt, if any */
1098 kvm_mips_callbacks->dequeue_timer_int(vcpu);
1099 kvm_mips_write_compare(vcpu,
1100 vcpu->arch.gprs[rt]);
1101 } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
1102 unsigned int old_val, val, change;
1104 old_val = kvm_read_c0_guest_status(cop0);
1105 val = vcpu->arch.gprs[rt];
1106 change = val ^ old_val;
1108 /* Make sure that the NMI bit is never set */
1112 * Don't allow CU1 or FR to be set unless FPU
1113 * capability enabled and exists in guest
1116 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
1117 val &= ~(ST0_CU1 | ST0_FR);
1120 * Also don't allow FR to be set if host doesn't
1123 if (!(current_cpu_data.fpu_id & MIPS_FPIR_F64))
1127 /* Handle changes in FPU mode */
1131 * FPU and Vector register state is made
1132 * UNPREDICTABLE by a change of FR, so don't
1133 * even bother saving it.
1135 if (change & ST0_FR)
1139 * If MSA state is already live, it is undefined
1140 * how it interacts with FR=0 FPU state, and we
1141 * don't want to hit reserved instruction
1142 * exceptions trying to save the MSA state later
1143 * when CU=1 && FR=1, so play it safe and save
1146 if (change & ST0_CU1 && !(val & ST0_FR) &&
1147 vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA)
1151 * Propagate CU1 (FPU enable) changes
1152 * immediately if the FPU context is already
1153 * loaded. When disabling we leave the context
1154 * loaded so it can be quickly enabled again in
1157 if (change & ST0_CU1 &&
1158 vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU)
1159 change_c0_status(ST0_CU1, val);
1163 kvm_write_c0_guest_status(cop0, val);
1165 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1167 * If FPU present, we need CU1/FR bits to take
1168 * effect fairly soon.
1170 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
1171 kvm_mips_trans_mtc0(inst, opc, vcpu);
1173 } else if ((rd == MIPS_CP0_CONFIG) && (sel == 5)) {
1174 unsigned int old_val, val, change, wrmask;
1176 old_val = kvm_read_c0_guest_config5(cop0);
1177 val = vcpu->arch.gprs[rt];
1179 /* Only a few bits are writable in Config5 */
1180 wrmask = kvm_mips_config5_wrmask(vcpu);
1181 change = (val ^ old_val) & wrmask;
1182 val = old_val ^ change;
1185 /* Handle changes in FPU/MSA modes */
1189 * Propagate FRE changes immediately if the FPU
1190 * context is already loaded.
1192 if (change & MIPS_CONF5_FRE &&
1193 vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU)
1194 change_c0_config5(MIPS_CONF5_FRE, val);
1197 * Propagate MSAEn changes immediately if the
1198 * MSA context is already loaded. When disabling
1199 * we leave the context loaded so it can be
1200 * quickly enabled again in the near future.
1202 if (change & MIPS_CONF5_MSAEN &&
1203 vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA)
1204 change_c0_config5(MIPS_CONF5_MSAEN,
1209 kvm_write_c0_guest_config5(cop0, val);
1210 } else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) {
1211 uint32_t old_cause, new_cause;
1213 old_cause = kvm_read_c0_guest_cause(cop0);
1214 new_cause = vcpu->arch.gprs[rt];
1215 /* Update R/W bits */
1216 kvm_change_c0_guest_cause(cop0, 0x08800300,
1218 /* DC bit enabling/disabling timer? */
1219 if ((old_cause ^ new_cause) & CAUSEF_DC) {
1220 if (new_cause & CAUSEF_DC)
1221 kvm_mips_count_disable_cause(vcpu);
1223 kvm_mips_count_enable_cause(vcpu);
1226 cop0->reg[rd][sel] = vcpu->arch.gprs[rt];
1227 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1228 kvm_mips_trans_mtc0(inst, opc, vcpu);
1232 kvm_debug("[%#x] MTCz, cop0->reg[%d][%d]: %#lx\n", pc,
1233 rd, sel, cop0->reg[rd][sel]);
1237 kvm_err("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n",
1238 vcpu->arch.pc, rt, rd, sel);
1243 #ifdef KVM_MIPS_DEBUG_COP0_COUNTERS
1244 cop0->stat[MIPS_CP0_STATUS][0]++;
1247 vcpu->arch.gprs[rt] =
1248 kvm_read_c0_guest_status(cop0);
1251 kvm_debug("[%#lx] mfmcz_op: EI\n",
1253 kvm_set_c0_guest_status(cop0, ST0_IE);
1255 kvm_debug("[%#lx] mfmcz_op: DI\n",
1257 kvm_clear_c0_guest_status(cop0, ST0_IE);
1265 cop0->reg[MIPS_CP0_STATUS][2] & 0xf;
1267 (cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf;
1269 * We don't support any shadow register sets, so
1270 * SRSCtl[PSS] == SRSCtl[CSS] = 0
1276 kvm_debug("WRPGPR[%d][%d] = %#lx\n", pss, rd,
1277 vcpu->arch.gprs[rt]);
1278 vcpu->arch.gprs[rd] = vcpu->arch.gprs[rt];
1282 kvm_err("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n",
1283 vcpu->arch.pc, copz);
1290 /* Rollback PC only if emulation was unsuccessful */
1291 if (er == EMULATE_FAIL)
1292 vcpu->arch.pc = curr_pc;
1296 * This is for special instructions whose emulation
1297 * updates the PC, so do not overwrite the PC under
1304 enum emulation_result kvm_mips_emulate_store(uint32_t inst, uint32_t cause,
1305 struct kvm_run *run,
1306 struct kvm_vcpu *vcpu)
1308 enum emulation_result er = EMULATE_DO_MMIO;
1309 int32_t op, base, rt, offset;
1311 void *data = run->mmio.data;
1312 unsigned long curr_pc;
1315 * Update PC and hold onto current PC in case there is
1316 * an error and we want to rollback the PC
1318 curr_pc = vcpu->arch.pc;
1319 er = update_pc(vcpu, cause);
1320 if (er == EMULATE_FAIL)
1323 rt = (inst >> 16) & 0x1f;
1324 base = (inst >> 21) & 0x1f;
1325 offset = inst & 0xffff;
1326 op = (inst >> 26) & 0x3f;
1331 if (bytes > sizeof(run->mmio.data)) {
1332 kvm_err("%s: bad MMIO length: %d\n", __func__,
1335 run->mmio.phys_addr =
1336 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1338 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1342 run->mmio.len = bytes;
1343 run->mmio.is_write = 1;
1344 vcpu->mmio_needed = 1;
1345 vcpu->mmio_is_write = 1;
1346 *(u8 *) data = vcpu->arch.gprs[rt];
1347 kvm_debug("OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1348 vcpu->arch.host_cp0_badvaddr, vcpu->arch.gprs[rt],
1355 if (bytes > sizeof(run->mmio.data)) {
1356 kvm_err("%s: bad MMIO length: %d\n", __func__,
1359 run->mmio.phys_addr =
1360 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1362 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1367 run->mmio.len = bytes;
1368 run->mmio.is_write = 1;
1369 vcpu->mmio_needed = 1;
1370 vcpu->mmio_is_write = 1;
1371 *(uint32_t *) data = vcpu->arch.gprs[rt];
1373 kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1374 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1375 vcpu->arch.gprs[rt], *(uint32_t *) data);
1380 if (bytes > sizeof(run->mmio.data)) {
1381 kvm_err("%s: bad MMIO length: %d\n", __func__,
1384 run->mmio.phys_addr =
1385 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1387 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1392 run->mmio.len = bytes;
1393 run->mmio.is_write = 1;
1394 vcpu->mmio_needed = 1;
1395 vcpu->mmio_is_write = 1;
1396 *(uint16_t *) data = vcpu->arch.gprs[rt];
1398 kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1399 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1400 vcpu->arch.gprs[rt], *(uint32_t *) data);
1404 kvm_err("Store not yet supported");
1409 /* Rollback PC if emulation was unsuccessful */
1410 if (er == EMULATE_FAIL)
1411 vcpu->arch.pc = curr_pc;
1416 enum emulation_result kvm_mips_emulate_load(uint32_t inst, uint32_t cause,
1417 struct kvm_run *run,
1418 struct kvm_vcpu *vcpu)
1420 enum emulation_result er = EMULATE_DO_MMIO;
1421 int32_t op, base, rt, offset;
1424 rt = (inst >> 16) & 0x1f;
1425 base = (inst >> 21) & 0x1f;
1426 offset = inst & 0xffff;
1427 op = (inst >> 26) & 0x3f;
1429 vcpu->arch.pending_load_cause = cause;
1430 vcpu->arch.io_gpr = rt;
1435 if (bytes > sizeof(run->mmio.data)) {
1436 kvm_err("%s: bad MMIO length: %d\n", __func__,
1441 run->mmio.phys_addr =
1442 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1444 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1449 run->mmio.len = bytes;
1450 run->mmio.is_write = 0;
1451 vcpu->mmio_needed = 1;
1452 vcpu->mmio_is_write = 0;
1458 if (bytes > sizeof(run->mmio.data)) {
1459 kvm_err("%s: bad MMIO length: %d\n", __func__,
1464 run->mmio.phys_addr =
1465 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1467 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1472 run->mmio.len = bytes;
1473 run->mmio.is_write = 0;
1474 vcpu->mmio_needed = 1;
1475 vcpu->mmio_is_write = 0;
1478 vcpu->mmio_needed = 2;
1480 vcpu->mmio_needed = 1;
1487 if (bytes > sizeof(run->mmio.data)) {
1488 kvm_err("%s: bad MMIO length: %d\n", __func__,
1493 run->mmio.phys_addr =
1494 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1496 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1501 run->mmio.len = bytes;
1502 run->mmio.is_write = 0;
1503 vcpu->mmio_is_write = 0;
1506 vcpu->mmio_needed = 2;
1508 vcpu->mmio_needed = 1;
1513 kvm_err("Load not yet supported");
1521 int kvm_mips_sync_icache(unsigned long va, struct kvm_vcpu *vcpu)
1523 unsigned long offset = (va & ~PAGE_MASK);
1524 struct kvm *kvm = vcpu->kvm;
1529 gfn = va >> PAGE_SHIFT;
1531 if (gfn >= kvm->arch.guest_pmap_npages) {
1532 kvm_err("%s: Invalid gfn: %#llx\n", __func__, gfn);
1533 kvm_mips_dump_host_tlbs();
1534 kvm_arch_vcpu_dump_regs(vcpu);
1537 pfn = kvm->arch.guest_pmap[gfn];
1538 pa = (pfn << PAGE_SHIFT) | offset;
1540 kvm_debug("%s: va: %#lx, unmapped: %#x\n", __func__, va,
1543 local_flush_icache_range(CKSEG0ADDR(pa), 32);
1547 #define MIPS_CACHE_OP_INDEX_INV 0x0
1548 #define MIPS_CACHE_OP_INDEX_LD_TAG 0x1
1549 #define MIPS_CACHE_OP_INDEX_ST_TAG 0x2
1550 #define MIPS_CACHE_OP_IMP 0x3
1551 #define MIPS_CACHE_OP_HIT_INV 0x4
1552 #define MIPS_CACHE_OP_FILL_WB_INV 0x5
1553 #define MIPS_CACHE_OP_HIT_HB 0x6
1554 #define MIPS_CACHE_OP_FETCH_LOCK 0x7
1556 #define MIPS_CACHE_ICACHE 0x0
1557 #define MIPS_CACHE_DCACHE 0x1
1558 #define MIPS_CACHE_SEC 0x3
1560 enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
1562 struct kvm_run *run,
1563 struct kvm_vcpu *vcpu)
1565 struct mips_coproc *cop0 = vcpu->arch.cop0;
1566 enum emulation_result er = EMULATE_DONE;
1567 int32_t offset, cache, op_inst, op, base;
1568 struct kvm_vcpu_arch *arch = &vcpu->arch;
1570 unsigned long curr_pc;
1573 * Update PC and hold onto current PC in case there is
1574 * an error and we want to rollback the PC
1576 curr_pc = vcpu->arch.pc;
1577 er = update_pc(vcpu, cause);
1578 if (er == EMULATE_FAIL)
1581 base = (inst >> 21) & 0x1f;
1582 op_inst = (inst >> 16) & 0x1f;
1583 offset = (int16_t)inst;
1584 cache = (inst >> 16) & 0x3;
1585 op = (inst >> 18) & 0x7;
1587 va = arch->gprs[base] + offset;
1589 kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1590 cache, op, base, arch->gprs[base], offset);
1593 * Treat INDEX_INV as a nop, basically issued by Linux on startup to
1594 * invalidate the caches entirely by stepping through all the
1597 if (op == MIPS_CACHE_OP_INDEX_INV) {
1598 kvm_debug("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1599 vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base,
1600 arch->gprs[base], offset);
1602 if (cache == MIPS_CACHE_DCACHE)
1604 else if (cache == MIPS_CACHE_ICACHE)
1607 kvm_err("%s: unsupported CACHE INDEX operation\n",
1609 return EMULATE_FAIL;
1612 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1613 kvm_mips_trans_cache_index(inst, opc, vcpu);
1619 if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
1620 if (kvm_mips_host_tlb_lookup(vcpu, va) < 0)
1621 kvm_mips_handle_kseg0_tlb_fault(va, vcpu);
1622 } else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) ||
1623 KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) {
1626 /* If an entry already exists then skip */
1627 if (kvm_mips_host_tlb_lookup(vcpu, va) >= 0)
1631 * If address not in the guest TLB, then give the guest a fault,
1632 * the resulting handler will do the right thing
1634 index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) |
1635 (kvm_read_c0_guest_entryhi
1636 (cop0) & ASID_MASK));
1639 vcpu->arch.host_cp0_entryhi = (va & VPN2_MASK);
1640 vcpu->arch.host_cp0_badvaddr = va;
1641 er = kvm_mips_emulate_tlbmiss_ld(cause, NULL, run,
1644 goto dont_update_pc;
1646 struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
1648 * Check if the entry is valid, if not then setup a TLB
1649 * invalid exception to the guest
1651 if (!TLB_IS_VALID(*tlb, va)) {
1652 er = kvm_mips_emulate_tlbinv_ld(cause, NULL,
1655 goto dont_update_pc;
1658 * We fault an entry from the guest tlb to the
1661 kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
1667 kvm_err("INVALID CACHE INDEX/ADDRESS (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1668 cache, op, base, arch->gprs[base], offset);
1671 goto dont_update_pc;
1676 /* XXXKYMA: Only a subset of cache ops are supported, used by Linux */
1677 if (cache == MIPS_CACHE_DCACHE
1678 && (op == MIPS_CACHE_OP_FILL_WB_INV
1679 || op == MIPS_CACHE_OP_HIT_INV)) {
1680 flush_dcache_line(va);
1682 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1684 * Replace the CACHE instruction, with a SYNCI, not the same,
1687 kvm_mips_trans_cache_va(inst, opc, vcpu);
1689 } else if (op == MIPS_CACHE_OP_HIT_INV && cache == MIPS_CACHE_ICACHE) {
1690 flush_dcache_line(va);
1691 flush_icache_line(va);
1693 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1694 /* Replace the CACHE instruction, with a SYNCI */
1695 kvm_mips_trans_cache_va(inst, opc, vcpu);
1698 kvm_err("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1699 cache, op, base, arch->gprs[base], offset);
1702 goto dont_update_pc;
1709 vcpu->arch.pc = curr_pc;
1714 enum emulation_result kvm_mips_emulate_inst(unsigned long cause, uint32_t *opc,
1715 struct kvm_run *run,
1716 struct kvm_vcpu *vcpu)
1718 enum emulation_result er = EMULATE_DONE;
1721 /* Fetch the instruction. */
1722 if (cause & CAUSEF_BD)
1725 inst = kvm_get_inst(opc, vcpu);
1727 switch (((union mips_instruction)inst).r_format.opcode) {
1729 er = kvm_mips_emulate_CP0(inst, opc, cause, run, vcpu);
1734 er = kvm_mips_emulate_store(inst, cause, run, vcpu);
1741 er = kvm_mips_emulate_load(inst, cause, run, vcpu);
1745 ++vcpu->stat.cache_exits;
1746 trace_kvm_exit(vcpu, CACHE_EXITS);
1747 er = kvm_mips_emulate_cache(inst, opc, cause, run, vcpu);
1751 kvm_err("Instruction emulation not supported (%p/%#x)\n", opc,
1753 kvm_arch_vcpu_dump_regs(vcpu);
1761 enum emulation_result kvm_mips_emulate_syscall(unsigned long cause,
1763 struct kvm_run *run,
1764 struct kvm_vcpu *vcpu)
1766 struct mips_coproc *cop0 = vcpu->arch.cop0;
1767 struct kvm_vcpu_arch *arch = &vcpu->arch;
1768 enum emulation_result er = EMULATE_DONE;
1770 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1772 kvm_write_c0_guest_epc(cop0, arch->pc);
1773 kvm_set_c0_guest_status(cop0, ST0_EXL);
1775 if (cause & CAUSEF_BD)
1776 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1778 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1780 kvm_debug("Delivering SYSCALL @ pc %#lx\n", arch->pc);
1782 kvm_change_c0_guest_cause(cop0, (0xff),
1783 (T_SYSCALL << CAUSEB_EXCCODE));
1785 /* Set PC to the exception entry point */
1786 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1789 kvm_err("Trying to deliver SYSCALL when EXL is already set\n");
1796 enum emulation_result kvm_mips_emulate_tlbmiss_ld(unsigned long cause,
1798 struct kvm_run *run,
1799 struct kvm_vcpu *vcpu)
1801 struct mips_coproc *cop0 = vcpu->arch.cop0;
1802 struct kvm_vcpu_arch *arch = &vcpu->arch;
1803 unsigned long entryhi = (vcpu->arch. host_cp0_badvaddr & VPN2_MASK) |
1804 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1806 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1808 kvm_write_c0_guest_epc(cop0, arch->pc);
1809 kvm_set_c0_guest_status(cop0, ST0_EXL);
1811 if (cause & CAUSEF_BD)
1812 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1814 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1816 kvm_debug("[EXL == 0] delivering TLB MISS @ pc %#lx\n",
1819 /* set pc to the exception entry point */
1820 arch->pc = KVM_GUEST_KSEG0 + 0x0;
1823 kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
1826 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1829 kvm_change_c0_guest_cause(cop0, (0xff),
1830 (T_TLB_LD_MISS << CAUSEB_EXCCODE));
1832 /* setup badvaddr, context and entryhi registers for the guest */
1833 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1834 /* XXXKYMA: is the context register used by linux??? */
1835 kvm_write_c0_guest_entryhi(cop0, entryhi);
1836 /* Blow away the shadow host TLBs */
1837 kvm_mips_flush_host_tlb(1);
1839 return EMULATE_DONE;
1842 enum emulation_result kvm_mips_emulate_tlbinv_ld(unsigned long cause,
1844 struct kvm_run *run,
1845 struct kvm_vcpu *vcpu)
1847 struct mips_coproc *cop0 = vcpu->arch.cop0;
1848 struct kvm_vcpu_arch *arch = &vcpu->arch;
1849 unsigned long entryhi =
1850 (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1851 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1853 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1855 kvm_write_c0_guest_epc(cop0, arch->pc);
1856 kvm_set_c0_guest_status(cop0, ST0_EXL);
1858 if (cause & CAUSEF_BD)
1859 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1861 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1863 kvm_debug("[EXL == 0] delivering TLB INV @ pc %#lx\n",
1866 /* set pc to the exception entry point */
1867 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1870 kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
1872 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1875 kvm_change_c0_guest_cause(cop0, (0xff),
1876 (T_TLB_LD_MISS << CAUSEB_EXCCODE));
1878 /* setup badvaddr, context and entryhi registers for the guest */
1879 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1880 /* XXXKYMA: is the context register used by linux??? */
1881 kvm_write_c0_guest_entryhi(cop0, entryhi);
1882 /* Blow away the shadow host TLBs */
1883 kvm_mips_flush_host_tlb(1);
1885 return EMULATE_DONE;
1888 enum emulation_result kvm_mips_emulate_tlbmiss_st(unsigned long cause,
1890 struct kvm_run *run,
1891 struct kvm_vcpu *vcpu)
1893 struct mips_coproc *cop0 = vcpu->arch.cop0;
1894 struct kvm_vcpu_arch *arch = &vcpu->arch;
1895 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1896 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1898 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1900 kvm_write_c0_guest_epc(cop0, arch->pc);
1901 kvm_set_c0_guest_status(cop0, ST0_EXL);
1903 if (cause & CAUSEF_BD)
1904 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1906 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1908 kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
1911 /* Set PC to the exception entry point */
1912 arch->pc = KVM_GUEST_KSEG0 + 0x0;
1914 kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
1916 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1919 kvm_change_c0_guest_cause(cop0, (0xff),
1920 (T_TLB_ST_MISS << CAUSEB_EXCCODE));
1922 /* setup badvaddr, context and entryhi registers for the guest */
1923 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1924 /* XXXKYMA: is the context register used by linux??? */
1925 kvm_write_c0_guest_entryhi(cop0, entryhi);
1926 /* Blow away the shadow host TLBs */
1927 kvm_mips_flush_host_tlb(1);
1929 return EMULATE_DONE;
1932 enum emulation_result kvm_mips_emulate_tlbinv_st(unsigned long cause,
1934 struct kvm_run *run,
1935 struct kvm_vcpu *vcpu)
1937 struct mips_coproc *cop0 = vcpu->arch.cop0;
1938 struct kvm_vcpu_arch *arch = &vcpu->arch;
1939 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1940 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1942 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1944 kvm_write_c0_guest_epc(cop0, arch->pc);
1945 kvm_set_c0_guest_status(cop0, ST0_EXL);
1947 if (cause & CAUSEF_BD)
1948 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1950 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1952 kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
1955 /* Set PC to the exception entry point */
1956 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1958 kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
1960 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1963 kvm_change_c0_guest_cause(cop0, (0xff),
1964 (T_TLB_ST_MISS << CAUSEB_EXCCODE));
1966 /* setup badvaddr, context and entryhi registers for the guest */
1967 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1968 /* XXXKYMA: is the context register used by linux??? */
1969 kvm_write_c0_guest_entryhi(cop0, entryhi);
1970 /* Blow away the shadow host TLBs */
1971 kvm_mips_flush_host_tlb(1);
1973 return EMULATE_DONE;
1976 /* TLBMOD: store into address matching TLB with Dirty bit off */
1977 enum emulation_result kvm_mips_handle_tlbmod(unsigned long cause, uint32_t *opc,
1978 struct kvm_run *run,
1979 struct kvm_vcpu *vcpu)
1981 enum emulation_result er = EMULATE_DONE;
1983 struct mips_coproc *cop0 = vcpu->arch.cop0;
1984 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1985 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1988 /* If address not in the guest TLB, then we are in trouble */
1989 index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
1991 /* XXXKYMA Invalidate and retry */
1992 kvm_mips_host_tlb_inv(vcpu, vcpu->arch.host_cp0_badvaddr);
1993 kvm_err("%s: host got TLBMOD for %#lx but entry not present in Guest TLB\n",
1995 kvm_mips_dump_guest_tlbs(vcpu);
1996 kvm_mips_dump_host_tlbs();
1997 return EMULATE_FAIL;
2001 er = kvm_mips_emulate_tlbmod(cause, opc, run, vcpu);
2005 enum emulation_result kvm_mips_emulate_tlbmod(unsigned long cause,
2007 struct kvm_run *run,
2008 struct kvm_vcpu *vcpu)
2010 struct mips_coproc *cop0 = vcpu->arch.cop0;
2011 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
2012 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
2013 struct kvm_vcpu_arch *arch = &vcpu->arch;
2015 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2017 kvm_write_c0_guest_epc(cop0, arch->pc);
2018 kvm_set_c0_guest_status(cop0, ST0_EXL);
2020 if (cause & CAUSEF_BD)
2021 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2023 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2025 kvm_debug("[EXL == 0] Delivering TLB MOD @ pc %#lx\n",
2028 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2030 kvm_debug("[EXL == 1] Delivering TLB MOD @ pc %#lx\n",
2032 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2035 kvm_change_c0_guest_cause(cop0, (0xff), (T_TLB_MOD << CAUSEB_EXCCODE));
2037 /* setup badvaddr, context and entryhi registers for the guest */
2038 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
2039 /* XXXKYMA: is the context register used by linux??? */
2040 kvm_write_c0_guest_entryhi(cop0, entryhi);
2041 /* Blow away the shadow host TLBs */
2042 kvm_mips_flush_host_tlb(1);
2044 return EMULATE_DONE;
2047 enum emulation_result kvm_mips_emulate_fpu_exc(unsigned long cause,
2049 struct kvm_run *run,
2050 struct kvm_vcpu *vcpu)
2052 struct mips_coproc *cop0 = vcpu->arch.cop0;
2053 struct kvm_vcpu_arch *arch = &vcpu->arch;
2055 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2057 kvm_write_c0_guest_epc(cop0, arch->pc);
2058 kvm_set_c0_guest_status(cop0, ST0_EXL);
2060 if (cause & CAUSEF_BD)
2061 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2063 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2067 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2069 kvm_change_c0_guest_cause(cop0, (0xff),
2070 (T_COP_UNUSABLE << CAUSEB_EXCCODE));
2071 kvm_change_c0_guest_cause(cop0, (CAUSEF_CE), (0x1 << CAUSEB_CE));
2073 return EMULATE_DONE;
2076 enum emulation_result kvm_mips_emulate_ri_exc(unsigned long cause,
2078 struct kvm_run *run,
2079 struct kvm_vcpu *vcpu)
2081 struct mips_coproc *cop0 = vcpu->arch.cop0;
2082 struct kvm_vcpu_arch *arch = &vcpu->arch;
2083 enum emulation_result er = EMULATE_DONE;
2085 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2087 kvm_write_c0_guest_epc(cop0, arch->pc);
2088 kvm_set_c0_guest_status(cop0, ST0_EXL);
2090 if (cause & CAUSEF_BD)
2091 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2093 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2095 kvm_debug("Delivering RI @ pc %#lx\n", arch->pc);
2097 kvm_change_c0_guest_cause(cop0, (0xff),
2098 (T_RES_INST << CAUSEB_EXCCODE));
2100 /* Set PC to the exception entry point */
2101 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2104 kvm_err("Trying to deliver RI when EXL is already set\n");
2111 enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause,
2113 struct kvm_run *run,
2114 struct kvm_vcpu *vcpu)
2116 struct mips_coproc *cop0 = vcpu->arch.cop0;
2117 struct kvm_vcpu_arch *arch = &vcpu->arch;
2118 enum emulation_result er = EMULATE_DONE;
2120 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2122 kvm_write_c0_guest_epc(cop0, arch->pc);
2123 kvm_set_c0_guest_status(cop0, ST0_EXL);
2125 if (cause & CAUSEF_BD)
2126 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2128 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2130 kvm_debug("Delivering BP @ pc %#lx\n", arch->pc);
2132 kvm_change_c0_guest_cause(cop0, (0xff),
2133 (T_BREAK << CAUSEB_EXCCODE));
2135 /* Set PC to the exception entry point */
2136 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2139 kvm_err("Trying to deliver BP when EXL is already set\n");
2146 enum emulation_result kvm_mips_emulate_trap_exc(unsigned long cause,
2148 struct kvm_run *run,
2149 struct kvm_vcpu *vcpu)
2151 struct mips_coproc *cop0 = vcpu->arch.cop0;
2152 struct kvm_vcpu_arch *arch = &vcpu->arch;
2153 enum emulation_result er = EMULATE_DONE;
2155 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2157 kvm_write_c0_guest_epc(cop0, arch->pc);
2158 kvm_set_c0_guest_status(cop0, ST0_EXL);
2160 if (cause & CAUSEF_BD)
2161 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2163 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2165 kvm_debug("Delivering TRAP @ pc %#lx\n", arch->pc);
2167 kvm_change_c0_guest_cause(cop0, (0xff),
2168 (T_TRAP << CAUSEB_EXCCODE));
2170 /* Set PC to the exception entry point */
2171 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2174 kvm_err("Trying to deliver TRAP when EXL is already set\n");
2181 enum emulation_result kvm_mips_emulate_msafpe_exc(unsigned long cause,
2183 struct kvm_run *run,
2184 struct kvm_vcpu *vcpu)
2186 struct mips_coproc *cop0 = vcpu->arch.cop0;
2187 struct kvm_vcpu_arch *arch = &vcpu->arch;
2188 enum emulation_result er = EMULATE_DONE;
2190 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2192 kvm_write_c0_guest_epc(cop0, arch->pc);
2193 kvm_set_c0_guest_status(cop0, ST0_EXL);
2195 if (cause & CAUSEF_BD)
2196 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2198 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2200 kvm_debug("Delivering MSAFPE @ pc %#lx\n", arch->pc);
2202 kvm_change_c0_guest_cause(cop0, (0xff),
2203 (T_MSAFPE << CAUSEB_EXCCODE));
2205 /* Set PC to the exception entry point */
2206 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2209 kvm_err("Trying to deliver MSAFPE when EXL is already set\n");
2216 enum emulation_result kvm_mips_emulate_fpe_exc(unsigned long cause,
2218 struct kvm_run *run,
2219 struct kvm_vcpu *vcpu)
2221 struct mips_coproc *cop0 = vcpu->arch.cop0;
2222 struct kvm_vcpu_arch *arch = &vcpu->arch;
2223 enum emulation_result er = EMULATE_DONE;
2225 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2227 kvm_write_c0_guest_epc(cop0, arch->pc);
2228 kvm_set_c0_guest_status(cop0, ST0_EXL);
2230 if (cause & CAUSEF_BD)
2231 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2233 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2235 kvm_debug("Delivering FPE @ pc %#lx\n", arch->pc);
2237 kvm_change_c0_guest_cause(cop0, (0xff),
2238 (T_FPE << CAUSEB_EXCCODE));
2240 /* Set PC to the exception entry point */
2241 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2244 kvm_err("Trying to deliver FPE when EXL is already set\n");
2251 enum emulation_result kvm_mips_emulate_msadis_exc(unsigned long cause,
2253 struct kvm_run *run,
2254 struct kvm_vcpu *vcpu)
2256 struct mips_coproc *cop0 = vcpu->arch.cop0;
2257 struct kvm_vcpu_arch *arch = &vcpu->arch;
2258 enum emulation_result er = EMULATE_DONE;
2260 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2262 kvm_write_c0_guest_epc(cop0, arch->pc);
2263 kvm_set_c0_guest_status(cop0, ST0_EXL);
2265 if (cause & CAUSEF_BD)
2266 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2268 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2270 kvm_debug("Delivering MSADIS @ pc %#lx\n", arch->pc);
2272 kvm_change_c0_guest_cause(cop0, (0xff),
2273 (T_MSADIS << CAUSEB_EXCCODE));
2275 /* Set PC to the exception entry point */
2276 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2279 kvm_err("Trying to deliver MSADIS when EXL is already set\n");
2286 /* ll/sc, rdhwr, sync emulation */
2288 #define OPCODE 0xfc000000
2289 #define BASE 0x03e00000
2290 #define RT 0x001f0000
2291 #define OFFSET 0x0000ffff
2292 #define LL 0xc0000000
2293 #define SC 0xe0000000
2294 #define SPEC0 0x00000000
2295 #define SPEC3 0x7c000000
2296 #define RD 0x0000f800
2297 #define FUNC 0x0000003f
2298 #define SYNC 0x0000000f
2299 #define RDHWR 0x0000003b
2301 enum emulation_result kvm_mips_handle_ri(unsigned long cause, uint32_t *opc,
2302 struct kvm_run *run,
2303 struct kvm_vcpu *vcpu)
2305 struct mips_coproc *cop0 = vcpu->arch.cop0;
2306 struct kvm_vcpu_arch *arch = &vcpu->arch;
2307 enum emulation_result er = EMULATE_DONE;
2308 unsigned long curr_pc;
2312 * Update PC and hold onto current PC in case there is
2313 * an error and we want to rollback the PC
2315 curr_pc = vcpu->arch.pc;
2316 er = update_pc(vcpu, cause);
2317 if (er == EMULATE_FAIL)
2320 /* Fetch the instruction. */
2321 if (cause & CAUSEF_BD)
2324 inst = kvm_get_inst(opc, vcpu);
2326 if (inst == KVM_INVALID_INST) {
2327 kvm_err("%s: Cannot get inst @ %p\n", __func__, opc);
2328 return EMULATE_FAIL;
2331 if ((inst & OPCODE) == SPEC3 && (inst & FUNC) == RDHWR) {
2332 int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
2333 int rd = (inst & RD) >> 11;
2334 int rt = (inst & RT) >> 16;
2335 /* If usermode, check RDHWR rd is allowed by guest HWREna */
2336 if (usermode && !(kvm_read_c0_guest_hwrena(cop0) & BIT(rd))) {
2337 kvm_debug("RDHWR %#x disallowed by HWREna @ %p\n",
2342 case 0: /* CPU number */
2345 case 1: /* SYNCI length */
2346 arch->gprs[rt] = min(current_cpu_data.dcache.linesz,
2347 current_cpu_data.icache.linesz);
2349 case 2: /* Read count register */
2350 arch->gprs[rt] = kvm_mips_read_count(vcpu);
2352 case 3: /* Count register resolution */
2353 switch (current_cpu_data.cputype) {
2363 arch->gprs[rt] = kvm_read_c0_guest_userlocal(cop0);
2367 kvm_debug("RDHWR %#x not supported @ %p\n", rd, opc);
2371 kvm_debug("Emulate RI not supported @ %p: %#x\n", opc, inst);
2375 return EMULATE_DONE;
2379 * Rollback PC (if in branch delay slot then the PC already points to
2380 * branch target), and pass the RI exception to the guest OS.
2382 vcpu->arch.pc = curr_pc;
2383 return kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
2386 enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
2387 struct kvm_run *run)
2389 unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
2390 enum emulation_result er = EMULATE_DONE;
2392 if (run->mmio.len > sizeof(*gpr)) {
2393 kvm_err("Bad MMIO length: %d", run->mmio.len);
2398 er = update_pc(vcpu, vcpu->arch.pending_load_cause);
2399 if (er == EMULATE_FAIL)
2402 switch (run->mmio.len) {
2404 *gpr = *(int32_t *) run->mmio.data;
2408 if (vcpu->mmio_needed == 2)
2409 *gpr = *(int16_t *) run->mmio.data;
2411 *gpr = *(uint16_t *)run->mmio.data;
2415 if (vcpu->mmio_needed == 2)
2416 *gpr = *(int8_t *) run->mmio.data;
2418 *gpr = *(u8 *) run->mmio.data;
2422 if (vcpu->arch.pending_load_cause & CAUSEF_BD)
2423 kvm_debug("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n",
2424 vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr,
2431 static enum emulation_result kvm_mips_emulate_exc(unsigned long cause,
2433 struct kvm_run *run,
2434 struct kvm_vcpu *vcpu)
2436 uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
2437 struct mips_coproc *cop0 = vcpu->arch.cop0;
2438 struct kvm_vcpu_arch *arch = &vcpu->arch;
2439 enum emulation_result er = EMULATE_DONE;
2441 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2443 kvm_write_c0_guest_epc(cop0, arch->pc);
2444 kvm_set_c0_guest_status(cop0, ST0_EXL);
2446 if (cause & CAUSEF_BD)
2447 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2449 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2451 kvm_change_c0_guest_cause(cop0, (0xff),
2452 (exccode << CAUSEB_EXCCODE));
2454 /* Set PC to the exception entry point */
2455 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2456 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
2458 kvm_debug("Delivering EXC %d @ pc %#lx, badVaddr: %#lx\n",
2459 exccode, kvm_read_c0_guest_epc(cop0),
2460 kvm_read_c0_guest_badvaddr(cop0));
2462 kvm_err("Trying to deliver EXC when EXL is already set\n");
2469 enum emulation_result kvm_mips_check_privilege(unsigned long cause,
2471 struct kvm_run *run,
2472 struct kvm_vcpu *vcpu)
2474 enum emulation_result er = EMULATE_DONE;
2475 uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
2476 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
2478 int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
2492 case T_COP_UNUSABLE:
2493 if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 0)
2494 er = EMULATE_PRIV_FAIL;
2502 * We we are accessing Guest kernel space, then send an
2503 * address error exception to the guest
2505 if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
2506 kvm_debug("%s: LD MISS @ %#lx\n", __func__,
2509 cause |= (T_ADDR_ERR_LD << CAUSEB_EXCCODE);
2510 er = EMULATE_PRIV_FAIL;
2516 * We we are accessing Guest kernel space, then send an
2517 * address error exception to the guest
2519 if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
2520 kvm_debug("%s: ST MISS @ %#lx\n", __func__,
2523 cause |= (T_ADDR_ERR_ST << CAUSEB_EXCCODE);
2524 er = EMULATE_PRIV_FAIL;
2529 kvm_debug("%s: address error ST @ %#lx\n", __func__,
2531 if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
2533 cause |= (T_TLB_ST_MISS << CAUSEB_EXCCODE);
2535 er = EMULATE_PRIV_FAIL;
2538 kvm_debug("%s: address error LD @ %#lx\n", __func__,
2540 if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
2542 cause |= (T_TLB_LD_MISS << CAUSEB_EXCCODE);
2544 er = EMULATE_PRIV_FAIL;
2547 er = EMULATE_PRIV_FAIL;
2552 if (er == EMULATE_PRIV_FAIL)
2553 kvm_mips_emulate_exc(cause, opc, run, vcpu);
2559 * User Address (UA) fault, this could happen if
2560 * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this
2561 * case we pass on the fault to the guest kernel and let it handle it.
2562 * (2) TLB entry is present in the Guest TLB but not in the shadow, in this
2563 * case we inject the TLB from the Guest TLB into the shadow host TLB
2565 enum emulation_result kvm_mips_handle_tlbmiss(unsigned long cause,
2567 struct kvm_run *run,
2568 struct kvm_vcpu *vcpu)
2570 enum emulation_result er = EMULATE_DONE;
2571 uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
2572 unsigned long va = vcpu->arch.host_cp0_badvaddr;
2575 kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx, entryhi: %#lx\n",
2576 vcpu->arch.host_cp0_badvaddr, vcpu->arch.host_cp0_entryhi);
2579 * KVM would not have got the exception if this entry was valid in the
2580 * shadow host TLB. Check the Guest TLB, if the entry is not there then
2581 * send the guest an exception. The guest exc handler should then inject
2582 * an entry into the guest TLB.
2584 index = kvm_mips_guest_tlb_lookup(vcpu,
2586 (kvm_read_c0_guest_entryhi(vcpu->arch.cop0) & ASID_MASK));
2588 if (exccode == T_TLB_LD_MISS) {
2589 er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu);
2590 } else if (exccode == T_TLB_ST_MISS) {
2591 er = kvm_mips_emulate_tlbmiss_st(cause, opc, run, vcpu);
2593 kvm_err("%s: invalid exc code: %d\n", __func__,
2598 struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
2601 * Check if the entry is valid, if not then setup a TLB invalid
2602 * exception to the guest
2604 if (!TLB_IS_VALID(*tlb, va)) {
2605 if (exccode == T_TLB_LD_MISS) {
2606 er = kvm_mips_emulate_tlbinv_ld(cause, opc, run,
2608 } else if (exccode == T_TLB_ST_MISS) {
2609 er = kvm_mips_emulate_tlbinv_st(cause, opc, run,
2612 kvm_err("%s: invalid exc code: %d\n", __func__,
2617 kvm_debug("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n",
2618 tlb->tlb_hi, tlb->tlb_lo0, tlb->tlb_lo1);
2620 * OK we have a Guest TLB entry, now inject it into the
2623 kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, NULL,