MIPS: KVM: Use cacheops.h definitions
[linux-drm-fsl-dcu.git] / arch / mips / kvm / emulate.c
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * KVM/MIPS: Instruction/Exception emulation
7  *
8  * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
9  * Authors: Sanjay Lal <sanjayl@kymasys.com>
10  */
11
12 #include <linux/errno.h>
13 #include <linux/err.h>
14 #include <linux/ktime.h>
15 #include <linux/kvm_host.h>
16 #include <linux/module.h>
17 #include <linux/vmalloc.h>
18 #include <linux/fs.h>
19 #include <linux/bootmem.h>
20 #include <linux/random.h>
21 #include <asm/page.h>
22 #include <asm/cacheflush.h>
23 #include <asm/cacheops.h>
24 #include <asm/cpu-info.h>
25 #include <asm/mmu_context.h>
26 #include <asm/tlbflush.h>
27 #include <asm/inst.h>
28
29 #undef CONFIG_MIPS_MT
30 #include <asm/r4kcache.h>
31 #define CONFIG_MIPS_MT
32
33 #include "opcode.h"
34 #include "interrupt.h"
35 #include "commpage.h"
36
37 #include "trace.h"
38
39 /*
40  * Compute the return address and do emulate branch simulation, if required.
41  * This function should be called only in branch delay slot active.
42  */
43 unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu,
44         unsigned long instpc)
45 {
46         unsigned int dspcontrol;
47         union mips_instruction insn;
48         struct kvm_vcpu_arch *arch = &vcpu->arch;
49         long epc = instpc;
50         long nextpc = KVM_INVALID_INST;
51
52         if (epc & 3)
53                 goto unaligned;
54
55         /* Read the instruction */
56         insn.word = kvm_get_inst((uint32_t *) epc, vcpu);
57
58         if (insn.word == KVM_INVALID_INST)
59                 return KVM_INVALID_INST;
60
61         switch (insn.i_format.opcode) {
62                 /* jr and jalr are in r_format format. */
63         case spec_op:
64                 switch (insn.r_format.func) {
65                 case jalr_op:
66                         arch->gprs[insn.r_format.rd] = epc + 8;
67                         /* Fall through */
68                 case jr_op:
69                         nextpc = arch->gprs[insn.r_format.rs];
70                         break;
71                 }
72                 break;
73
74                 /*
75                  * This group contains:
76                  * bltz_op, bgez_op, bltzl_op, bgezl_op,
77                  * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
78                  */
79         case bcond_op:
80                 switch (insn.i_format.rt) {
81                 case bltz_op:
82                 case bltzl_op:
83                         if ((long)arch->gprs[insn.i_format.rs] < 0)
84                                 epc = epc + 4 + (insn.i_format.simmediate << 2);
85                         else
86                                 epc += 8;
87                         nextpc = epc;
88                         break;
89
90                 case bgez_op:
91                 case bgezl_op:
92                         if ((long)arch->gprs[insn.i_format.rs] >= 0)
93                                 epc = epc + 4 + (insn.i_format.simmediate << 2);
94                         else
95                                 epc += 8;
96                         nextpc = epc;
97                         break;
98
99                 case bltzal_op:
100                 case bltzall_op:
101                         arch->gprs[31] = epc + 8;
102                         if ((long)arch->gprs[insn.i_format.rs] < 0)
103                                 epc = epc + 4 + (insn.i_format.simmediate << 2);
104                         else
105                                 epc += 8;
106                         nextpc = epc;
107                         break;
108
109                 case bgezal_op:
110                 case bgezall_op:
111                         arch->gprs[31] = epc + 8;
112                         if ((long)arch->gprs[insn.i_format.rs] >= 0)
113                                 epc = epc + 4 + (insn.i_format.simmediate << 2);
114                         else
115                                 epc += 8;
116                         nextpc = epc;
117                         break;
118                 case bposge32_op:
119                         if (!cpu_has_dsp)
120                                 goto sigill;
121
122                         dspcontrol = rddsp(0x01);
123
124                         if (dspcontrol >= 32)
125                                 epc = epc + 4 + (insn.i_format.simmediate << 2);
126                         else
127                                 epc += 8;
128                         nextpc = epc;
129                         break;
130                 }
131                 break;
132
133                 /* These are unconditional and in j_format. */
134         case jal_op:
135                 arch->gprs[31] = instpc + 8;
136         case j_op:
137                 epc += 4;
138                 epc >>= 28;
139                 epc <<= 28;
140                 epc |= (insn.j_format.target << 2);
141                 nextpc = epc;
142                 break;
143
144                 /* These are conditional and in i_format. */
145         case beq_op:
146         case beql_op:
147                 if (arch->gprs[insn.i_format.rs] ==
148                     arch->gprs[insn.i_format.rt])
149                         epc = epc + 4 + (insn.i_format.simmediate << 2);
150                 else
151                         epc += 8;
152                 nextpc = epc;
153                 break;
154
155         case bne_op:
156         case bnel_op:
157                 if (arch->gprs[insn.i_format.rs] !=
158                     arch->gprs[insn.i_format.rt])
159                         epc = epc + 4 + (insn.i_format.simmediate << 2);
160                 else
161                         epc += 8;
162                 nextpc = epc;
163                 break;
164
165         case blez_op:           /* not really i_format */
166         case blezl_op:
167                 /* rt field assumed to be zero */
168                 if ((long)arch->gprs[insn.i_format.rs] <= 0)
169                         epc = epc + 4 + (insn.i_format.simmediate << 2);
170                 else
171                         epc += 8;
172                 nextpc = epc;
173                 break;
174
175         case bgtz_op:
176         case bgtzl_op:
177                 /* rt field assumed to be zero */
178                 if ((long)arch->gprs[insn.i_format.rs] > 0)
179                         epc = epc + 4 + (insn.i_format.simmediate << 2);
180                 else
181                         epc += 8;
182                 nextpc = epc;
183                 break;
184
185                 /* And now the FPA/cp1 branch instructions. */
186         case cop1_op:
187                 kvm_err("%s: unsupported cop1_op\n", __func__);
188                 break;
189         }
190
191         return nextpc;
192
193 unaligned:
194         kvm_err("%s: unaligned epc\n", __func__);
195         return nextpc;
196
197 sigill:
198         kvm_err("%s: DSP branch but not DSP ASE\n", __func__);
199         return nextpc;
200 }
201
202 enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause)
203 {
204         unsigned long branch_pc;
205         enum emulation_result er = EMULATE_DONE;
206
207         if (cause & CAUSEF_BD) {
208                 branch_pc = kvm_compute_return_epc(vcpu, vcpu->arch.pc);
209                 if (branch_pc == KVM_INVALID_INST) {
210                         er = EMULATE_FAIL;
211                 } else {
212                         vcpu->arch.pc = branch_pc;
213                         kvm_debug("BD update_pc(): New PC: %#lx\n",
214                                   vcpu->arch.pc);
215                 }
216         } else
217                 vcpu->arch.pc += 4;
218
219         kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc);
220
221         return er;
222 }
223
224 /**
225  * kvm_mips_count_disabled() - Find whether the CP0_Count timer is disabled.
226  * @vcpu:       Virtual CPU.
227  *
228  * Returns:     1 if the CP0_Count timer is disabled by either the guest
229  *              CP0_Cause.DC bit or the count_ctl.DC bit.
230  *              0 otherwise (in which case CP0_Count timer is running).
231  */
232 static inline int kvm_mips_count_disabled(struct kvm_vcpu *vcpu)
233 {
234         struct mips_coproc *cop0 = vcpu->arch.cop0;
235
236         return  (vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) ||
237                 (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC);
238 }
239
240 /**
241  * kvm_mips_ktime_to_count() - Scale ktime_t to a 32-bit count.
242  *
243  * Caches the dynamic nanosecond bias in vcpu->arch.count_dyn_bias.
244  *
245  * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
246  */
247 static uint32_t kvm_mips_ktime_to_count(struct kvm_vcpu *vcpu, ktime_t now)
248 {
249         s64 now_ns, periods;
250         u64 delta;
251
252         now_ns = ktime_to_ns(now);
253         delta = now_ns + vcpu->arch.count_dyn_bias;
254
255         if (delta >= vcpu->arch.count_period) {
256                 /* If delta is out of safe range the bias needs adjusting */
257                 periods = div64_s64(now_ns, vcpu->arch.count_period);
258                 vcpu->arch.count_dyn_bias = -periods * vcpu->arch.count_period;
259                 /* Recalculate delta with new bias */
260                 delta = now_ns + vcpu->arch.count_dyn_bias;
261         }
262
263         /*
264          * We've ensured that:
265          *   delta < count_period
266          *
267          * Therefore the intermediate delta*count_hz will never overflow since
268          * at the boundary condition:
269          *   delta = count_period
270          *   delta = NSEC_PER_SEC * 2^32 / count_hz
271          *   delta * count_hz = NSEC_PER_SEC * 2^32
272          */
273         return div_u64(delta * vcpu->arch.count_hz, NSEC_PER_SEC);
274 }
275
276 /**
277  * kvm_mips_count_time() - Get effective current time.
278  * @vcpu:       Virtual CPU.
279  *
280  * Get effective monotonic ktime. This is usually a straightforward ktime_get(),
281  * except when the master disable bit is set in count_ctl, in which case it is
282  * count_resume, i.e. the time that the count was disabled.
283  *
284  * Returns:     Effective monotonic ktime for CP0_Count.
285  */
286 static inline ktime_t kvm_mips_count_time(struct kvm_vcpu *vcpu)
287 {
288         if (unlikely(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
289                 return vcpu->arch.count_resume;
290
291         return ktime_get();
292 }
293
294 /**
295  * kvm_mips_read_count_running() - Read the current count value as if running.
296  * @vcpu:       Virtual CPU.
297  * @now:        Kernel time to read CP0_Count at.
298  *
299  * Returns the current guest CP0_Count register at time @now and handles if the
300  * timer interrupt is pending and hasn't been handled yet.
301  *
302  * Returns:     The current value of the guest CP0_Count register.
303  */
304 static uint32_t kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
305 {
306         ktime_t expires;
307         int running;
308
309         /* Is the hrtimer pending? */
310         expires = hrtimer_get_expires(&vcpu->arch.comparecount_timer);
311         if (ktime_compare(now, expires) >= 0) {
312                 /*
313                  * Cancel it while we handle it so there's no chance of
314                  * interference with the timeout handler.
315                  */
316                 running = hrtimer_cancel(&vcpu->arch.comparecount_timer);
317
318                 /* Nothing should be waiting on the timeout */
319                 kvm_mips_callbacks->queue_timer_int(vcpu);
320
321                 /*
322                  * Restart the timer if it was running based on the expiry time
323                  * we read, so that we don't push it back 2 periods.
324                  */
325                 if (running) {
326                         expires = ktime_add_ns(expires,
327                                                vcpu->arch.count_period);
328                         hrtimer_start(&vcpu->arch.comparecount_timer, expires,
329                                       HRTIMER_MODE_ABS);
330                 }
331         }
332
333         /* Return the biased and scaled guest CP0_Count */
334         return vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now);
335 }
336
337 /**
338  * kvm_mips_read_count() - Read the current count value.
339  * @vcpu:       Virtual CPU.
340  *
341  * Read the current guest CP0_Count value, taking into account whether the timer
342  * is stopped.
343  *
344  * Returns:     The current guest CP0_Count value.
345  */
346 uint32_t kvm_mips_read_count(struct kvm_vcpu *vcpu)
347 {
348         struct mips_coproc *cop0 = vcpu->arch.cop0;
349
350         /* If count disabled just read static copy of count */
351         if (kvm_mips_count_disabled(vcpu))
352                 return kvm_read_c0_guest_count(cop0);
353
354         return kvm_mips_read_count_running(vcpu, ktime_get());
355 }
356
357 /**
358  * kvm_mips_freeze_hrtimer() - Safely stop the hrtimer.
359  * @vcpu:       Virtual CPU.
360  * @count:      Output pointer for CP0_Count value at point of freeze.
361  *
362  * Freeze the hrtimer safely and return both the ktime and the CP0_Count value
363  * at the point it was frozen. It is guaranteed that any pending interrupts at
364  * the point it was frozen are handled, and none after that point.
365  *
366  * This is useful where the time/CP0_Count is needed in the calculation of the
367  * new parameters.
368  *
369  * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
370  *
371  * Returns:     The ktime at the point of freeze.
372  */
373 static ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu,
374                                        uint32_t *count)
375 {
376         ktime_t now;
377
378         /* stop hrtimer before finding time */
379         hrtimer_cancel(&vcpu->arch.comparecount_timer);
380         now = ktime_get();
381
382         /* find count at this point and handle pending hrtimer */
383         *count = kvm_mips_read_count_running(vcpu, now);
384
385         return now;
386 }
387
388 /**
389  * kvm_mips_resume_hrtimer() - Resume hrtimer, updating expiry.
390  * @vcpu:       Virtual CPU.
391  * @now:        ktime at point of resume.
392  * @count:      CP0_Count at point of resume.
393  *
394  * Resumes the timer and updates the timer expiry based on @now and @count.
395  * This can be used in conjunction with kvm_mips_freeze_timer() when timer
396  * parameters need to be changed.
397  *
398  * It is guaranteed that a timer interrupt immediately after resume will be
399  * handled, but not if CP_Compare is exactly at @count. That case is already
400  * handled by kvm_mips_freeze_timer().
401  *
402  * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
403  */
404 static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu,
405                                     ktime_t now, uint32_t count)
406 {
407         struct mips_coproc *cop0 = vcpu->arch.cop0;
408         uint32_t compare;
409         u64 delta;
410         ktime_t expire;
411
412         /* Calculate timeout (wrap 0 to 2^32) */
413         compare = kvm_read_c0_guest_compare(cop0);
414         delta = (u64)(uint32_t)(compare - count - 1) + 1;
415         delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz);
416         expire = ktime_add_ns(now, delta);
417
418         /* Update hrtimer to use new timeout */
419         hrtimer_cancel(&vcpu->arch.comparecount_timer);
420         hrtimer_start(&vcpu->arch.comparecount_timer, expire, HRTIMER_MODE_ABS);
421 }
422
423 /**
424  * kvm_mips_update_hrtimer() - Update next expiry time of hrtimer.
425  * @vcpu:       Virtual CPU.
426  *
427  * Recalculates and updates the expiry time of the hrtimer. This can be used
428  * after timer parameters have been altered which do not depend on the time that
429  * the change occurs (in those cases kvm_mips_freeze_hrtimer() and
430  * kvm_mips_resume_hrtimer() are used directly).
431  *
432  * It is guaranteed that no timer interrupts will be lost in the process.
433  *
434  * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
435  */
436 static void kvm_mips_update_hrtimer(struct kvm_vcpu *vcpu)
437 {
438         ktime_t now;
439         uint32_t count;
440
441         /*
442          * freeze_hrtimer takes care of a timer interrupts <= count, and
443          * resume_hrtimer the hrtimer takes care of a timer interrupts > count.
444          */
445         now = kvm_mips_freeze_hrtimer(vcpu, &count);
446         kvm_mips_resume_hrtimer(vcpu, now, count);
447 }
448
449 /**
450  * kvm_mips_write_count() - Modify the count and update timer.
451  * @vcpu:       Virtual CPU.
452  * @count:      Guest CP0_Count value to set.
453  *
454  * Sets the CP0_Count value and updates the timer accordingly.
455  */
456 void kvm_mips_write_count(struct kvm_vcpu *vcpu, uint32_t count)
457 {
458         struct mips_coproc *cop0 = vcpu->arch.cop0;
459         ktime_t now;
460
461         /* Calculate bias */
462         now = kvm_mips_count_time(vcpu);
463         vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now);
464
465         if (kvm_mips_count_disabled(vcpu))
466                 /* The timer's disabled, adjust the static count */
467                 kvm_write_c0_guest_count(cop0, count);
468         else
469                 /* Update timeout */
470                 kvm_mips_resume_hrtimer(vcpu, now, count);
471 }
472
473 /**
474  * kvm_mips_init_count() - Initialise timer.
475  * @vcpu:       Virtual CPU.
476  *
477  * Initialise the timer to a sensible frequency, namely 100MHz, zero it, and set
478  * it going if it's enabled.
479  */
480 void kvm_mips_init_count(struct kvm_vcpu *vcpu)
481 {
482         /* 100 MHz */
483         vcpu->arch.count_hz = 100*1000*1000;
484         vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32,
485                                           vcpu->arch.count_hz);
486         vcpu->arch.count_dyn_bias = 0;
487
488         /* Starting at 0 */
489         kvm_mips_write_count(vcpu, 0);
490 }
491
492 /**
493  * kvm_mips_set_count_hz() - Update the frequency of the timer.
494  * @vcpu:       Virtual CPU.
495  * @count_hz:   Frequency of CP0_Count timer in Hz.
496  *
497  * Change the frequency of the CP0_Count timer. This is done atomically so that
498  * CP0_Count is continuous and no timer interrupt is lost.
499  *
500  * Returns:     -EINVAL if @count_hz is out of range.
501  *              0 on success.
502  */
503 int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz)
504 {
505         struct mips_coproc *cop0 = vcpu->arch.cop0;
506         int dc;
507         ktime_t now;
508         u32 count;
509
510         /* ensure the frequency is in a sensible range... */
511         if (count_hz <= 0 || count_hz > NSEC_PER_SEC)
512                 return -EINVAL;
513         /* ... and has actually changed */
514         if (vcpu->arch.count_hz == count_hz)
515                 return 0;
516
517         /* Safely freeze timer so we can keep it continuous */
518         dc = kvm_mips_count_disabled(vcpu);
519         if (dc) {
520                 now = kvm_mips_count_time(vcpu);
521                 count = kvm_read_c0_guest_count(cop0);
522         } else {
523                 now = kvm_mips_freeze_hrtimer(vcpu, &count);
524         }
525
526         /* Update the frequency */
527         vcpu->arch.count_hz = count_hz;
528         vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, count_hz);
529         vcpu->arch.count_dyn_bias = 0;
530
531         /* Calculate adjusted bias so dynamic count is unchanged */
532         vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now);
533
534         /* Update and resume hrtimer */
535         if (!dc)
536                 kvm_mips_resume_hrtimer(vcpu, now, count);
537         return 0;
538 }
539
540 /**
541  * kvm_mips_write_compare() - Modify compare and update timer.
542  * @vcpu:       Virtual CPU.
543  * @compare:    New CP0_Compare value.
544  *
545  * Update CP0_Compare to a new value and update the timeout.
546  */
547 void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare)
548 {
549         struct mips_coproc *cop0 = vcpu->arch.cop0;
550
551         /* if unchanged, must just be an ack */
552         if (kvm_read_c0_guest_compare(cop0) == compare)
553                 return;
554
555         /* Update compare */
556         kvm_write_c0_guest_compare(cop0, compare);
557
558         /* Update timeout if count enabled */
559         if (!kvm_mips_count_disabled(vcpu))
560                 kvm_mips_update_hrtimer(vcpu);
561 }
562
563 /**
564  * kvm_mips_count_disable() - Disable count.
565  * @vcpu:       Virtual CPU.
566  *
567  * Disable the CP0_Count timer. A timer interrupt on or before the final stop
568  * time will be handled but not after.
569  *
570  * Assumes CP0_Count was previously enabled but now Guest.CP0_Cause.DC or
571  * count_ctl.DC has been set (count disabled).
572  *
573  * Returns:     The time that the timer was stopped.
574  */
575 static ktime_t kvm_mips_count_disable(struct kvm_vcpu *vcpu)
576 {
577         struct mips_coproc *cop0 = vcpu->arch.cop0;
578         uint32_t count;
579         ktime_t now;
580
581         /* Stop hrtimer */
582         hrtimer_cancel(&vcpu->arch.comparecount_timer);
583
584         /* Set the static count from the dynamic count, handling pending TI */
585         now = ktime_get();
586         count = kvm_mips_read_count_running(vcpu, now);
587         kvm_write_c0_guest_count(cop0, count);
588
589         return now;
590 }
591
592 /**
593  * kvm_mips_count_disable_cause() - Disable count using CP0_Cause.DC.
594  * @vcpu:       Virtual CPU.
595  *
596  * Disable the CP0_Count timer and set CP0_Cause.DC. A timer interrupt on or
597  * before the final stop time will be handled if the timer isn't disabled by
598  * count_ctl.DC, but not after.
599  *
600  * Assumes CP0_Cause.DC is clear (count enabled).
601  */
602 void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu)
603 {
604         struct mips_coproc *cop0 = vcpu->arch.cop0;
605
606         kvm_set_c0_guest_cause(cop0, CAUSEF_DC);
607         if (!(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
608                 kvm_mips_count_disable(vcpu);
609 }
610
611 /**
612  * kvm_mips_count_enable_cause() - Enable count using CP0_Cause.DC.
613  * @vcpu:       Virtual CPU.
614  *
615  * Enable the CP0_Count timer and clear CP0_Cause.DC. A timer interrupt after
616  * the start time will be handled if the timer isn't disabled by count_ctl.DC,
617  * potentially before even returning, so the caller should be careful with
618  * ordering of CP0_Cause modifications so as not to lose it.
619  *
620  * Assumes CP0_Cause.DC is set (count disabled).
621  */
622 void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu)
623 {
624         struct mips_coproc *cop0 = vcpu->arch.cop0;
625         uint32_t count;
626
627         kvm_clear_c0_guest_cause(cop0, CAUSEF_DC);
628
629         /*
630          * Set the dynamic count to match the static count.
631          * This starts the hrtimer if count_ctl.DC allows it.
632          * Otherwise it conveniently updates the biases.
633          */
634         count = kvm_read_c0_guest_count(cop0);
635         kvm_mips_write_count(vcpu, count);
636 }
637
638 /**
639  * kvm_mips_set_count_ctl() - Update the count control KVM register.
640  * @vcpu:       Virtual CPU.
641  * @count_ctl:  Count control register new value.
642  *
643  * Set the count control KVM register. The timer is updated accordingly.
644  *
645  * Returns:     -EINVAL if reserved bits are set.
646  *              0 on success.
647  */
648 int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl)
649 {
650         struct mips_coproc *cop0 = vcpu->arch.cop0;
651         s64 changed = count_ctl ^ vcpu->arch.count_ctl;
652         s64 delta;
653         ktime_t expire, now;
654         uint32_t count, compare;
655
656         /* Only allow defined bits to be changed */
657         if (changed & ~(s64)(KVM_REG_MIPS_COUNT_CTL_DC))
658                 return -EINVAL;
659
660         /* Apply new value */
661         vcpu->arch.count_ctl = count_ctl;
662
663         /* Master CP0_Count disable */
664         if (changed & KVM_REG_MIPS_COUNT_CTL_DC) {
665                 /* Is CP0_Cause.DC already disabling CP0_Count? */
666                 if (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC) {
667                         if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)
668                                 /* Just record the current time */
669                                 vcpu->arch.count_resume = ktime_get();
670                 } else if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) {
671                         /* disable timer and record current time */
672                         vcpu->arch.count_resume = kvm_mips_count_disable(vcpu);
673                 } else {
674                         /*
675                          * Calculate timeout relative to static count at resume
676                          * time (wrap 0 to 2^32).
677                          */
678                         count = kvm_read_c0_guest_count(cop0);
679                         compare = kvm_read_c0_guest_compare(cop0);
680                         delta = (u64)(uint32_t)(compare - count - 1) + 1;
681                         delta = div_u64(delta * NSEC_PER_SEC,
682                                         vcpu->arch.count_hz);
683                         expire = ktime_add_ns(vcpu->arch.count_resume, delta);
684
685                         /* Handle pending interrupt */
686                         now = ktime_get();
687                         if (ktime_compare(now, expire) >= 0)
688                                 /* Nothing should be waiting on the timeout */
689                                 kvm_mips_callbacks->queue_timer_int(vcpu);
690
691                         /* Resume hrtimer without changing bias */
692                         count = kvm_mips_read_count_running(vcpu, now);
693                         kvm_mips_resume_hrtimer(vcpu, now, count);
694                 }
695         }
696
697         return 0;
698 }
699
700 /**
701  * kvm_mips_set_count_resume() - Update the count resume KVM register.
702  * @vcpu:               Virtual CPU.
703  * @count_resume:       Count resume register new value.
704  *
705  * Set the count resume KVM register.
706  *
707  * Returns:     -EINVAL if out of valid range (0..now).
708  *              0 on success.
709  */
710 int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume)
711 {
712         /*
713          * It doesn't make sense for the resume time to be in the future, as it
714          * would be possible for the next interrupt to be more than a full
715          * period in the future.
716          */
717         if (count_resume < 0 || count_resume > ktime_to_ns(ktime_get()))
718                 return -EINVAL;
719
720         vcpu->arch.count_resume = ns_to_ktime(count_resume);
721         return 0;
722 }
723
724 /**
725  * kvm_mips_count_timeout() - Push timer forward on timeout.
726  * @vcpu:       Virtual CPU.
727  *
728  * Handle an hrtimer event by push the hrtimer forward a period.
729  *
730  * Returns:     The hrtimer_restart value to return to the hrtimer subsystem.
731  */
732 enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu)
733 {
734         /* Add the Count period to the current expiry time */
735         hrtimer_add_expires_ns(&vcpu->arch.comparecount_timer,
736                                vcpu->arch.count_period);
737         return HRTIMER_RESTART;
738 }
739
740 enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu)
741 {
742         struct mips_coproc *cop0 = vcpu->arch.cop0;
743         enum emulation_result er = EMULATE_DONE;
744
745         if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
746                 kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc,
747                           kvm_read_c0_guest_epc(cop0));
748                 kvm_clear_c0_guest_status(cop0, ST0_EXL);
749                 vcpu->arch.pc = kvm_read_c0_guest_epc(cop0);
750
751         } else if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
752                 kvm_clear_c0_guest_status(cop0, ST0_ERL);
753                 vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
754         } else {
755                 kvm_err("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
756                         vcpu->arch.pc);
757                 er = EMULATE_FAIL;
758         }
759
760         return er;
761 }
762
763 enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
764 {
765         kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc,
766                   vcpu->arch.pending_exceptions);
767
768         ++vcpu->stat.wait_exits;
769         trace_kvm_exit(vcpu, WAIT_EXITS);
770         if (!vcpu->arch.pending_exceptions) {
771                 vcpu->arch.wait = 1;
772                 kvm_vcpu_block(vcpu);
773
774                 /*
775                  * We we are runnable, then definitely go off to user space to
776                  * check if any I/O interrupts are pending.
777                  */
778                 if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) {
779                         clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
780                         vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
781                 }
782         }
783
784         return EMULATE_DONE;
785 }
786
787 /*
788  * XXXKYMA: Linux doesn't seem to use TLBR, return EMULATE_FAIL for now so that
789  * we can catch this, if things ever change
790  */
791 enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu)
792 {
793         struct mips_coproc *cop0 = vcpu->arch.cop0;
794         uint32_t pc = vcpu->arch.pc;
795
796         kvm_err("[%#x] COP0_TLBR [%ld]\n", pc, kvm_read_c0_guest_index(cop0));
797         return EMULATE_FAIL;
798 }
799
800 /* Write Guest TLB Entry @ Index */
801 enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu)
802 {
803         struct mips_coproc *cop0 = vcpu->arch.cop0;
804         int index = kvm_read_c0_guest_index(cop0);
805         struct kvm_mips_tlb *tlb = NULL;
806         uint32_t pc = vcpu->arch.pc;
807
808         if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
809                 kvm_debug("%s: illegal index: %d\n", __func__, index);
810                 kvm_debug("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
811                           pc, index, kvm_read_c0_guest_entryhi(cop0),
812                           kvm_read_c0_guest_entrylo0(cop0),
813                           kvm_read_c0_guest_entrylo1(cop0),
814                           kvm_read_c0_guest_pagemask(cop0));
815                 index = (index & ~0x80000000) % KVM_MIPS_GUEST_TLB_SIZE;
816         }
817
818         tlb = &vcpu->arch.guest_tlb[index];
819         /*
820          * Probe the shadow host TLB for the entry being overwritten, if one
821          * matches, invalidate it
822          */
823         kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
824
825         tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
826         tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
827         tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
828         tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
829
830         kvm_debug("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
831                   pc, index, kvm_read_c0_guest_entryhi(cop0),
832                   kvm_read_c0_guest_entrylo0(cop0),
833                   kvm_read_c0_guest_entrylo1(cop0),
834                   kvm_read_c0_guest_pagemask(cop0));
835
836         return EMULATE_DONE;
837 }
838
839 /* Write Guest TLB Entry @ Random Index */
840 enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu)
841 {
842         struct mips_coproc *cop0 = vcpu->arch.cop0;
843         struct kvm_mips_tlb *tlb = NULL;
844         uint32_t pc = vcpu->arch.pc;
845         int index;
846
847         get_random_bytes(&index, sizeof(index));
848         index &= (KVM_MIPS_GUEST_TLB_SIZE - 1);
849
850         tlb = &vcpu->arch.guest_tlb[index];
851
852         /*
853          * Probe the shadow host TLB for the entry being overwritten, if one
854          * matches, invalidate it
855          */
856         kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
857
858         tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
859         tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
860         tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
861         tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
862
863         kvm_debug("[%#x] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n",
864                   pc, index, kvm_read_c0_guest_entryhi(cop0),
865                   kvm_read_c0_guest_entrylo0(cop0),
866                   kvm_read_c0_guest_entrylo1(cop0));
867
868         return EMULATE_DONE;
869 }
870
871 enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu)
872 {
873         struct mips_coproc *cop0 = vcpu->arch.cop0;
874         long entryhi = kvm_read_c0_guest_entryhi(cop0);
875         uint32_t pc = vcpu->arch.pc;
876         int index = -1;
877
878         index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
879
880         kvm_write_c0_guest_index(cop0, index);
881
882         kvm_debug("[%#x] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi,
883                   index);
884
885         return EMULATE_DONE;
886 }
887
888 /**
889  * kvm_mips_config1_wrmask() - Find mask of writable bits in guest Config1
890  * @vcpu:       Virtual CPU.
891  *
892  * Finds the mask of bits which are writable in the guest's Config1 CP0
893  * register, by userland (currently read-only to the guest).
894  */
895 unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu *vcpu)
896 {
897         unsigned int mask = 0;
898
899         /* Permit FPU to be present if FPU is supported */
900         if (kvm_mips_guest_can_have_fpu(&vcpu->arch))
901                 mask |= MIPS_CONF1_FP;
902
903         return mask;
904 }
905
906 /**
907  * kvm_mips_config3_wrmask() - Find mask of writable bits in guest Config3
908  * @vcpu:       Virtual CPU.
909  *
910  * Finds the mask of bits which are writable in the guest's Config3 CP0
911  * register, by userland (currently read-only to the guest).
912  */
913 unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu)
914 {
915         /* Config4 is optional */
916         unsigned int mask = MIPS_CONF_M;
917
918         /* Permit MSA to be present if MSA is supported */
919         if (kvm_mips_guest_can_have_msa(&vcpu->arch))
920                 mask |= MIPS_CONF3_MSA;
921
922         return mask;
923 }
924
925 /**
926  * kvm_mips_config4_wrmask() - Find mask of writable bits in guest Config4
927  * @vcpu:       Virtual CPU.
928  *
929  * Finds the mask of bits which are writable in the guest's Config4 CP0
930  * register, by userland (currently read-only to the guest).
931  */
932 unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu)
933 {
934         /* Config5 is optional */
935         return MIPS_CONF_M;
936 }
937
938 /**
939  * kvm_mips_config5_wrmask() - Find mask of writable bits in guest Config5
940  * @vcpu:       Virtual CPU.
941  *
942  * Finds the mask of bits which are writable in the guest's Config5 CP0
943  * register, by the guest itself.
944  */
945 unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu)
946 {
947         unsigned int mask = 0;
948
949         /* Permit MSAEn changes if MSA supported and enabled */
950         if (kvm_mips_guest_has_msa(&vcpu->arch))
951                 mask |= MIPS_CONF5_MSAEN;
952
953         /*
954          * Permit guest FPU mode changes if FPU is enabled and the relevant
955          * feature exists according to FIR register.
956          */
957         if (kvm_mips_guest_has_fpu(&vcpu->arch)) {
958                 if (cpu_has_fre)
959                         mask |= MIPS_CONF5_FRE;
960                 /* We don't support UFR or UFE */
961         }
962
963         return mask;
964 }
965
966 enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
967                                            uint32_t cause, struct kvm_run *run,
968                                            struct kvm_vcpu *vcpu)
969 {
970         struct mips_coproc *cop0 = vcpu->arch.cop0;
971         enum emulation_result er = EMULATE_DONE;
972         int32_t rt, rd, copz, sel, co_bit, op;
973         uint32_t pc = vcpu->arch.pc;
974         unsigned long curr_pc;
975
976         /*
977          * Update PC and hold onto current PC in case there is
978          * an error and we want to rollback the PC
979          */
980         curr_pc = vcpu->arch.pc;
981         er = update_pc(vcpu, cause);
982         if (er == EMULATE_FAIL)
983                 return er;
984
985         copz = (inst >> 21) & 0x1f;
986         rt = (inst >> 16) & 0x1f;
987         rd = (inst >> 11) & 0x1f;
988         sel = inst & 0x7;
989         co_bit = (inst >> 25) & 1;
990
991         if (co_bit) {
992                 op = (inst) & 0xff;
993
994                 switch (op) {
995                 case tlbr_op:   /*  Read indexed TLB entry  */
996                         er = kvm_mips_emul_tlbr(vcpu);
997                         break;
998                 case tlbwi_op:  /*  Write indexed  */
999                         er = kvm_mips_emul_tlbwi(vcpu);
1000                         break;
1001                 case tlbwr_op:  /*  Write random  */
1002                         er = kvm_mips_emul_tlbwr(vcpu);
1003                         break;
1004                 case tlbp_op:   /* TLB Probe */
1005                         er = kvm_mips_emul_tlbp(vcpu);
1006                         break;
1007                 case rfe_op:
1008                         kvm_err("!!!COP0_RFE!!!\n");
1009                         break;
1010                 case eret_op:
1011                         er = kvm_mips_emul_eret(vcpu);
1012                         goto dont_update_pc;
1013                         break;
1014                 case wait_op:
1015                         er = kvm_mips_emul_wait(vcpu);
1016                         break;
1017                 }
1018         } else {
1019                 switch (copz) {
1020                 case mfc_op:
1021 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
1022                         cop0->stat[rd][sel]++;
1023 #endif
1024                         /* Get reg */
1025                         if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
1026                                 vcpu->arch.gprs[rt] = kvm_mips_read_count(vcpu);
1027                         } else if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) {
1028                                 vcpu->arch.gprs[rt] = 0x0;
1029 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1030                                 kvm_mips_trans_mfc0(inst, opc, vcpu);
1031 #endif
1032                         } else {
1033                                 vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
1034
1035 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1036                                 kvm_mips_trans_mfc0(inst, opc, vcpu);
1037 #endif
1038                         }
1039
1040                         kvm_debug
1041                             ("[%#x] MFCz[%d][%d], vcpu->arch.gprs[%d]: %#lx\n",
1042                              pc, rd, sel, rt, vcpu->arch.gprs[rt]);
1043
1044                         break;
1045
1046                 case dmfc_op:
1047                         vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
1048                         break;
1049
1050                 case mtc_op:
1051 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
1052                         cop0->stat[rd][sel]++;
1053 #endif
1054                         if ((rd == MIPS_CP0_TLB_INDEX)
1055                             && (vcpu->arch.gprs[rt] >=
1056                                 KVM_MIPS_GUEST_TLB_SIZE)) {
1057                                 kvm_err("Invalid TLB Index: %ld",
1058                                         vcpu->arch.gprs[rt]);
1059                                 er = EMULATE_FAIL;
1060                                 break;
1061                         }
1062 #define C0_EBASE_CORE_MASK 0xff
1063                         if ((rd == MIPS_CP0_PRID) && (sel == 1)) {
1064                                 /* Preserve CORE number */
1065                                 kvm_change_c0_guest_ebase(cop0,
1066                                                           ~(C0_EBASE_CORE_MASK),
1067                                                           vcpu->arch.gprs[rt]);
1068                                 kvm_err("MTCz, cop0->reg[EBASE]: %#lx\n",
1069                                         kvm_read_c0_guest_ebase(cop0));
1070                         } else if (rd == MIPS_CP0_TLB_HI && sel == 0) {
1071                                 uint32_t nasid =
1072                                         vcpu->arch.gprs[rt] & ASID_MASK;
1073                                 if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0) &&
1074                                     ((kvm_read_c0_guest_entryhi(cop0) &
1075                                       ASID_MASK) != nasid)) {
1076                                         kvm_debug("MTCz, change ASID from %#lx to %#lx\n",
1077                                                 kvm_read_c0_guest_entryhi(cop0)
1078                                                 & ASID_MASK,
1079                                                 vcpu->arch.gprs[rt]
1080                                                 & ASID_MASK);
1081
1082                                         /* Blow away the shadow host TLBs */
1083                                         kvm_mips_flush_host_tlb(1);
1084                                 }
1085                                 kvm_write_c0_guest_entryhi(cop0,
1086                                                            vcpu->arch.gprs[rt]);
1087                         }
1088                         /* Are we writing to COUNT */
1089                         else if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
1090                                 kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]);
1091                                 goto done;
1092                         } else if ((rd == MIPS_CP0_COMPARE) && (sel == 0)) {
1093                                 kvm_debug("[%#x] MTCz, COMPARE %#lx <- %#lx\n",
1094                                           pc, kvm_read_c0_guest_compare(cop0),
1095                                           vcpu->arch.gprs[rt]);
1096
1097                                 /* If we are writing to COMPARE */
1098                                 /* Clear pending timer interrupt, if any */
1099                                 kvm_mips_callbacks->dequeue_timer_int(vcpu);
1100                                 kvm_mips_write_compare(vcpu,
1101                                                        vcpu->arch.gprs[rt]);
1102                         } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
1103                                 unsigned int old_val, val, change;
1104
1105                                 old_val = kvm_read_c0_guest_status(cop0);
1106                                 val = vcpu->arch.gprs[rt];
1107                                 change = val ^ old_val;
1108
1109                                 /* Make sure that the NMI bit is never set */
1110                                 val &= ~ST0_NMI;
1111
1112                                 /*
1113                                  * Don't allow CU1 or FR to be set unless FPU
1114                                  * capability enabled and exists in guest
1115                                  * configuration.
1116                                  */
1117                                 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
1118                                         val &= ~(ST0_CU1 | ST0_FR);
1119
1120                                 /*
1121                                  * Also don't allow FR to be set if host doesn't
1122                                  * support it.
1123                                  */
1124                                 if (!(current_cpu_data.fpu_id & MIPS_FPIR_F64))
1125                                         val &= ~ST0_FR;
1126
1127
1128                                 /* Handle changes in FPU mode */
1129                                 preempt_disable();
1130
1131                                 /*
1132                                  * FPU and Vector register state is made
1133                                  * UNPREDICTABLE by a change of FR, so don't
1134                                  * even bother saving it.
1135                                  */
1136                                 if (change & ST0_FR)
1137                                         kvm_drop_fpu(vcpu);
1138
1139                                 /*
1140                                  * If MSA state is already live, it is undefined
1141                                  * how it interacts with FR=0 FPU state, and we
1142                                  * don't want to hit reserved instruction
1143                                  * exceptions trying to save the MSA state later
1144                                  * when CU=1 && FR=1, so play it safe and save
1145                                  * it first.
1146                                  */
1147                                 if (change & ST0_CU1 && !(val & ST0_FR) &&
1148                                     vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA)
1149                                         kvm_lose_fpu(vcpu);
1150
1151                                 /*
1152                                  * Propagate CU1 (FPU enable) changes
1153                                  * immediately if the FPU context is already
1154                                  * loaded. When disabling we leave the context
1155                                  * loaded so it can be quickly enabled again in
1156                                  * the near future.
1157                                  */
1158                                 if (change & ST0_CU1 &&
1159                                     vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU)
1160                                         change_c0_status(ST0_CU1, val);
1161
1162                                 preempt_enable();
1163
1164                                 kvm_write_c0_guest_status(cop0, val);
1165
1166 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1167                                 /*
1168                                  * If FPU present, we need CU1/FR bits to take
1169                                  * effect fairly soon.
1170                                  */
1171                                 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
1172                                         kvm_mips_trans_mtc0(inst, opc, vcpu);
1173 #endif
1174                         } else if ((rd == MIPS_CP0_CONFIG) && (sel == 5)) {
1175                                 unsigned int old_val, val, change, wrmask;
1176
1177                                 old_val = kvm_read_c0_guest_config5(cop0);
1178                                 val = vcpu->arch.gprs[rt];
1179
1180                                 /* Only a few bits are writable in Config5 */
1181                                 wrmask = kvm_mips_config5_wrmask(vcpu);
1182                                 change = (val ^ old_val) & wrmask;
1183                                 val = old_val ^ change;
1184
1185
1186                                 /* Handle changes in FPU/MSA modes */
1187                                 preempt_disable();
1188
1189                                 /*
1190                                  * Propagate FRE changes immediately if the FPU
1191                                  * context is already loaded.
1192                                  */
1193                                 if (change & MIPS_CONF5_FRE &&
1194                                     vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU)
1195                                         change_c0_config5(MIPS_CONF5_FRE, val);
1196
1197                                 /*
1198                                  * Propagate MSAEn changes immediately if the
1199                                  * MSA context is already loaded. When disabling
1200                                  * we leave the context loaded so it can be
1201                                  * quickly enabled again in the near future.
1202                                  */
1203                                 if (change & MIPS_CONF5_MSAEN &&
1204                                     vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA)
1205                                         change_c0_config5(MIPS_CONF5_MSAEN,
1206                                                           val);
1207
1208                                 preempt_enable();
1209
1210                                 kvm_write_c0_guest_config5(cop0, val);
1211                         } else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) {
1212                                 uint32_t old_cause, new_cause;
1213
1214                                 old_cause = kvm_read_c0_guest_cause(cop0);
1215                                 new_cause = vcpu->arch.gprs[rt];
1216                                 /* Update R/W bits */
1217                                 kvm_change_c0_guest_cause(cop0, 0x08800300,
1218                                                           new_cause);
1219                                 /* DC bit enabling/disabling timer? */
1220                                 if ((old_cause ^ new_cause) & CAUSEF_DC) {
1221                                         if (new_cause & CAUSEF_DC)
1222                                                 kvm_mips_count_disable_cause(vcpu);
1223                                         else
1224                                                 kvm_mips_count_enable_cause(vcpu);
1225                                 }
1226                         } else {
1227                                 cop0->reg[rd][sel] = vcpu->arch.gprs[rt];
1228 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1229                                 kvm_mips_trans_mtc0(inst, opc, vcpu);
1230 #endif
1231                         }
1232
1233                         kvm_debug("[%#x] MTCz, cop0->reg[%d][%d]: %#lx\n", pc,
1234                                   rd, sel, cop0->reg[rd][sel]);
1235                         break;
1236
1237                 case dmtc_op:
1238                         kvm_err("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n",
1239                                 vcpu->arch.pc, rt, rd, sel);
1240                         er = EMULATE_FAIL;
1241                         break;
1242
1243                 case mfmcz_op:
1244 #ifdef KVM_MIPS_DEBUG_COP0_COUNTERS
1245                         cop0->stat[MIPS_CP0_STATUS][0]++;
1246 #endif
1247                         if (rt != 0)
1248                                 vcpu->arch.gprs[rt] =
1249                                     kvm_read_c0_guest_status(cop0);
1250                         /* EI */
1251                         if (inst & 0x20) {
1252                                 kvm_debug("[%#lx] mfmcz_op: EI\n",
1253                                           vcpu->arch.pc);
1254                                 kvm_set_c0_guest_status(cop0, ST0_IE);
1255                         } else {
1256                                 kvm_debug("[%#lx] mfmcz_op: DI\n",
1257                                           vcpu->arch.pc);
1258                                 kvm_clear_c0_guest_status(cop0, ST0_IE);
1259                         }
1260
1261                         break;
1262
1263                 case wrpgpr_op:
1264                         {
1265                                 uint32_t css =
1266                                     cop0->reg[MIPS_CP0_STATUS][2] & 0xf;
1267                                 uint32_t pss =
1268                                     (cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf;
1269                                 /*
1270                                  * We don't support any shadow register sets, so
1271                                  * SRSCtl[PSS] == SRSCtl[CSS] = 0
1272                                  */
1273                                 if (css || pss) {
1274                                         er = EMULATE_FAIL;
1275                                         break;
1276                                 }
1277                                 kvm_debug("WRPGPR[%d][%d] = %#lx\n", pss, rd,
1278                                           vcpu->arch.gprs[rt]);
1279                                 vcpu->arch.gprs[rd] = vcpu->arch.gprs[rt];
1280                         }
1281                         break;
1282                 default:
1283                         kvm_err("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n",
1284                                 vcpu->arch.pc, copz);
1285                         er = EMULATE_FAIL;
1286                         break;
1287                 }
1288         }
1289
1290 done:
1291         /* Rollback PC only if emulation was unsuccessful */
1292         if (er == EMULATE_FAIL)
1293                 vcpu->arch.pc = curr_pc;
1294
1295 dont_update_pc:
1296         /*
1297          * This is for special instructions whose emulation
1298          * updates the PC, so do not overwrite the PC under
1299          * any circumstances
1300          */
1301
1302         return er;
1303 }
1304
1305 enum emulation_result kvm_mips_emulate_store(uint32_t inst, uint32_t cause,
1306                                              struct kvm_run *run,
1307                                              struct kvm_vcpu *vcpu)
1308 {
1309         enum emulation_result er = EMULATE_DO_MMIO;
1310         int32_t op, base, rt, offset;
1311         uint32_t bytes;
1312         void *data = run->mmio.data;
1313         unsigned long curr_pc;
1314
1315         /*
1316          * Update PC and hold onto current PC in case there is
1317          * an error and we want to rollback the PC
1318          */
1319         curr_pc = vcpu->arch.pc;
1320         er = update_pc(vcpu, cause);
1321         if (er == EMULATE_FAIL)
1322                 return er;
1323
1324         rt = (inst >> 16) & 0x1f;
1325         base = (inst >> 21) & 0x1f;
1326         offset = inst & 0xffff;
1327         op = (inst >> 26) & 0x3f;
1328
1329         switch (op) {
1330         case sb_op:
1331                 bytes = 1;
1332                 if (bytes > sizeof(run->mmio.data)) {
1333                         kvm_err("%s: bad MMIO length: %d\n", __func__,
1334                                run->mmio.len);
1335                 }
1336                 run->mmio.phys_addr =
1337                     kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1338                                                    host_cp0_badvaddr);
1339                 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1340                         er = EMULATE_FAIL;
1341                         break;
1342                 }
1343                 run->mmio.len = bytes;
1344                 run->mmio.is_write = 1;
1345                 vcpu->mmio_needed = 1;
1346                 vcpu->mmio_is_write = 1;
1347                 *(u8 *) data = vcpu->arch.gprs[rt];
1348                 kvm_debug("OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1349                           vcpu->arch.host_cp0_badvaddr, vcpu->arch.gprs[rt],
1350                           *(uint8_t *) data);
1351
1352                 break;
1353
1354         case sw_op:
1355                 bytes = 4;
1356                 if (bytes > sizeof(run->mmio.data)) {
1357                         kvm_err("%s: bad MMIO length: %d\n", __func__,
1358                                run->mmio.len);
1359                 }
1360                 run->mmio.phys_addr =
1361                     kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1362                                                    host_cp0_badvaddr);
1363                 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1364                         er = EMULATE_FAIL;
1365                         break;
1366                 }
1367
1368                 run->mmio.len = bytes;
1369                 run->mmio.is_write = 1;
1370                 vcpu->mmio_needed = 1;
1371                 vcpu->mmio_is_write = 1;
1372                 *(uint32_t *) data = vcpu->arch.gprs[rt];
1373
1374                 kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1375                           vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1376                           vcpu->arch.gprs[rt], *(uint32_t *) data);
1377                 break;
1378
1379         case sh_op:
1380                 bytes = 2;
1381                 if (bytes > sizeof(run->mmio.data)) {
1382                         kvm_err("%s: bad MMIO length: %d\n", __func__,
1383                                run->mmio.len);
1384                 }
1385                 run->mmio.phys_addr =
1386                     kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1387                                                    host_cp0_badvaddr);
1388                 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1389                         er = EMULATE_FAIL;
1390                         break;
1391                 }
1392
1393                 run->mmio.len = bytes;
1394                 run->mmio.is_write = 1;
1395                 vcpu->mmio_needed = 1;
1396                 vcpu->mmio_is_write = 1;
1397                 *(uint16_t *) data = vcpu->arch.gprs[rt];
1398
1399                 kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1400                           vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1401                           vcpu->arch.gprs[rt], *(uint32_t *) data);
1402                 break;
1403
1404         default:
1405                 kvm_err("Store not yet supported");
1406                 er = EMULATE_FAIL;
1407                 break;
1408         }
1409
1410         /* Rollback PC if emulation was unsuccessful */
1411         if (er == EMULATE_FAIL)
1412                 vcpu->arch.pc = curr_pc;
1413
1414         return er;
1415 }
1416
1417 enum emulation_result kvm_mips_emulate_load(uint32_t inst, uint32_t cause,
1418                                             struct kvm_run *run,
1419                                             struct kvm_vcpu *vcpu)
1420 {
1421         enum emulation_result er = EMULATE_DO_MMIO;
1422         int32_t op, base, rt, offset;
1423         uint32_t bytes;
1424
1425         rt = (inst >> 16) & 0x1f;
1426         base = (inst >> 21) & 0x1f;
1427         offset = inst & 0xffff;
1428         op = (inst >> 26) & 0x3f;
1429
1430         vcpu->arch.pending_load_cause = cause;
1431         vcpu->arch.io_gpr = rt;
1432
1433         switch (op) {
1434         case lw_op:
1435                 bytes = 4;
1436                 if (bytes > sizeof(run->mmio.data)) {
1437                         kvm_err("%s: bad MMIO length: %d\n", __func__,
1438                                run->mmio.len);
1439                         er = EMULATE_FAIL;
1440                         break;
1441                 }
1442                 run->mmio.phys_addr =
1443                     kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1444                                                    host_cp0_badvaddr);
1445                 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1446                         er = EMULATE_FAIL;
1447                         break;
1448                 }
1449
1450                 run->mmio.len = bytes;
1451                 run->mmio.is_write = 0;
1452                 vcpu->mmio_needed = 1;
1453                 vcpu->mmio_is_write = 0;
1454                 break;
1455
1456         case lh_op:
1457         case lhu_op:
1458                 bytes = 2;
1459                 if (bytes > sizeof(run->mmio.data)) {
1460                         kvm_err("%s: bad MMIO length: %d\n", __func__,
1461                                run->mmio.len);
1462                         er = EMULATE_FAIL;
1463                         break;
1464                 }
1465                 run->mmio.phys_addr =
1466                     kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1467                                                    host_cp0_badvaddr);
1468                 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1469                         er = EMULATE_FAIL;
1470                         break;
1471                 }
1472
1473                 run->mmio.len = bytes;
1474                 run->mmio.is_write = 0;
1475                 vcpu->mmio_needed = 1;
1476                 vcpu->mmio_is_write = 0;
1477
1478                 if (op == lh_op)
1479                         vcpu->mmio_needed = 2;
1480                 else
1481                         vcpu->mmio_needed = 1;
1482
1483                 break;
1484
1485         case lbu_op:
1486         case lb_op:
1487                 bytes = 1;
1488                 if (bytes > sizeof(run->mmio.data)) {
1489                         kvm_err("%s: bad MMIO length: %d\n", __func__,
1490                                run->mmio.len);
1491                         er = EMULATE_FAIL;
1492                         break;
1493                 }
1494                 run->mmio.phys_addr =
1495                     kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1496                                                    host_cp0_badvaddr);
1497                 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1498                         er = EMULATE_FAIL;
1499                         break;
1500                 }
1501
1502                 run->mmio.len = bytes;
1503                 run->mmio.is_write = 0;
1504                 vcpu->mmio_is_write = 0;
1505
1506                 if (op == lb_op)
1507                         vcpu->mmio_needed = 2;
1508                 else
1509                         vcpu->mmio_needed = 1;
1510
1511                 break;
1512
1513         default:
1514                 kvm_err("Load not yet supported");
1515                 er = EMULATE_FAIL;
1516                 break;
1517         }
1518
1519         return er;
1520 }
1521
1522 int kvm_mips_sync_icache(unsigned long va, struct kvm_vcpu *vcpu)
1523 {
1524         unsigned long offset = (va & ~PAGE_MASK);
1525         struct kvm *kvm = vcpu->kvm;
1526         unsigned long pa;
1527         gfn_t gfn;
1528         pfn_t pfn;
1529
1530         gfn = va >> PAGE_SHIFT;
1531
1532         if (gfn >= kvm->arch.guest_pmap_npages) {
1533                 kvm_err("%s: Invalid gfn: %#llx\n", __func__, gfn);
1534                 kvm_mips_dump_host_tlbs();
1535                 kvm_arch_vcpu_dump_regs(vcpu);
1536                 return -1;
1537         }
1538         pfn = kvm->arch.guest_pmap[gfn];
1539         pa = (pfn << PAGE_SHIFT) | offset;
1540
1541         kvm_debug("%s: va: %#lx, unmapped: %#x\n", __func__, va,
1542                   CKSEG0ADDR(pa));
1543
1544         local_flush_icache_range(CKSEG0ADDR(pa), 32);
1545         return 0;
1546 }
1547
1548 enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
1549                                              uint32_t cause,
1550                                              struct kvm_run *run,
1551                                              struct kvm_vcpu *vcpu)
1552 {
1553         struct mips_coproc *cop0 = vcpu->arch.cop0;
1554         enum emulation_result er = EMULATE_DONE;
1555         int32_t offset, cache, op_inst, op, base;
1556         struct kvm_vcpu_arch *arch = &vcpu->arch;
1557         unsigned long va;
1558         unsigned long curr_pc;
1559
1560         /*
1561          * Update PC and hold onto current PC in case there is
1562          * an error and we want to rollback the PC
1563          */
1564         curr_pc = vcpu->arch.pc;
1565         er = update_pc(vcpu, cause);
1566         if (er == EMULATE_FAIL)
1567                 return er;
1568
1569         base = (inst >> 21) & 0x1f;
1570         op_inst = (inst >> 16) & 0x1f;
1571         offset = (int16_t)inst;
1572         cache = op_inst & CacheOp_Cache;
1573         op = op_inst & CacheOp_Op;
1574
1575         va = arch->gprs[base] + offset;
1576
1577         kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1578                   cache, op, base, arch->gprs[base], offset);
1579
1580         /*
1581          * Treat INDEX_INV as a nop, basically issued by Linux on startup to
1582          * invalidate the caches entirely by stepping through all the
1583          * ways/indexes
1584          */
1585         if (op == Index_Writeback_Inv) {
1586                 kvm_debug("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1587                           vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base,
1588                           arch->gprs[base], offset);
1589
1590                 if (cache == Cache_D)
1591                         r4k_blast_dcache();
1592                 else if (cache == Cache_I)
1593                         r4k_blast_icache();
1594                 else {
1595                         kvm_err("%s: unsupported CACHE INDEX operation\n",
1596                                 __func__);
1597                         return EMULATE_FAIL;
1598                 }
1599
1600 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1601                 kvm_mips_trans_cache_index(inst, opc, vcpu);
1602 #endif
1603                 goto done;
1604         }
1605
1606         preempt_disable();
1607         if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
1608                 if (kvm_mips_host_tlb_lookup(vcpu, va) < 0)
1609                         kvm_mips_handle_kseg0_tlb_fault(va, vcpu);
1610         } else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) ||
1611                    KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) {
1612                 int index;
1613
1614                 /* If an entry already exists then skip */
1615                 if (kvm_mips_host_tlb_lookup(vcpu, va) >= 0)
1616                         goto skip_fault;
1617
1618                 /*
1619                  * If address not in the guest TLB, then give the guest a fault,
1620                  * the resulting handler will do the right thing
1621                  */
1622                 index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) |
1623                                                   (kvm_read_c0_guest_entryhi
1624                                                    (cop0) & ASID_MASK));
1625
1626                 if (index < 0) {
1627                         vcpu->arch.host_cp0_entryhi = (va & VPN2_MASK);
1628                         vcpu->arch.host_cp0_badvaddr = va;
1629                         er = kvm_mips_emulate_tlbmiss_ld(cause, NULL, run,
1630                                                          vcpu);
1631                         preempt_enable();
1632                         goto dont_update_pc;
1633                 } else {
1634                         struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
1635                         /*
1636                          * Check if the entry is valid, if not then setup a TLB
1637                          * invalid exception to the guest
1638                          */
1639                         if (!TLB_IS_VALID(*tlb, va)) {
1640                                 er = kvm_mips_emulate_tlbinv_ld(cause, NULL,
1641                                                                 run, vcpu);
1642                                 preempt_enable();
1643                                 goto dont_update_pc;
1644                         } else {
1645                                 /*
1646                                  * We fault an entry from the guest tlb to the
1647                                  * shadow host TLB
1648                                  */
1649                                 kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
1650                                                                      NULL,
1651                                                                      NULL);
1652                         }
1653                 }
1654         } else {
1655                 kvm_err("INVALID CACHE INDEX/ADDRESS (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1656                         cache, op, base, arch->gprs[base], offset);
1657                 er = EMULATE_FAIL;
1658                 preempt_enable();
1659                 goto dont_update_pc;
1660
1661         }
1662
1663 skip_fault:
1664         /* XXXKYMA: Only a subset of cache ops are supported, used by Linux */
1665         if (op_inst == Hit_Writeback_Inv_D || op_inst == Hit_Invalidate_D) {
1666                 flush_dcache_line(va);
1667
1668 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1669                 /*
1670                  * Replace the CACHE instruction, with a SYNCI, not the same,
1671                  * but avoids a trap
1672                  */
1673                 kvm_mips_trans_cache_va(inst, opc, vcpu);
1674 #endif
1675         } else if (op_inst == Hit_Invalidate_I) {
1676                 flush_dcache_line(va);
1677                 flush_icache_line(va);
1678
1679 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1680                 /* Replace the CACHE instruction, with a SYNCI */
1681                 kvm_mips_trans_cache_va(inst, opc, vcpu);
1682 #endif
1683         } else {
1684                 kvm_err("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1685                         cache, op, base, arch->gprs[base], offset);
1686                 er = EMULATE_FAIL;
1687                 preempt_enable();
1688                 goto dont_update_pc;
1689         }
1690
1691         preempt_enable();
1692
1693 dont_update_pc:
1694         /* Rollback PC */
1695         vcpu->arch.pc = curr_pc;
1696 done:
1697         return er;
1698 }
1699
1700 enum emulation_result kvm_mips_emulate_inst(unsigned long cause, uint32_t *opc,
1701                                             struct kvm_run *run,
1702                                             struct kvm_vcpu *vcpu)
1703 {
1704         enum emulation_result er = EMULATE_DONE;
1705         uint32_t inst;
1706
1707         /* Fetch the instruction. */
1708         if (cause & CAUSEF_BD)
1709                 opc += 1;
1710
1711         inst = kvm_get_inst(opc, vcpu);
1712
1713         switch (((union mips_instruction)inst).r_format.opcode) {
1714         case cop0_op:
1715                 er = kvm_mips_emulate_CP0(inst, opc, cause, run, vcpu);
1716                 break;
1717         case sb_op:
1718         case sh_op:
1719         case sw_op:
1720                 er = kvm_mips_emulate_store(inst, cause, run, vcpu);
1721                 break;
1722         case lb_op:
1723         case lbu_op:
1724         case lhu_op:
1725         case lh_op:
1726         case lw_op:
1727                 er = kvm_mips_emulate_load(inst, cause, run, vcpu);
1728                 break;
1729
1730         case cache_op:
1731                 ++vcpu->stat.cache_exits;
1732                 trace_kvm_exit(vcpu, CACHE_EXITS);
1733                 er = kvm_mips_emulate_cache(inst, opc, cause, run, vcpu);
1734                 break;
1735
1736         default:
1737                 kvm_err("Instruction emulation not supported (%p/%#x)\n", opc,
1738                         inst);
1739                 kvm_arch_vcpu_dump_regs(vcpu);
1740                 er = EMULATE_FAIL;
1741                 break;
1742         }
1743
1744         return er;
1745 }
1746
1747 enum emulation_result kvm_mips_emulate_syscall(unsigned long cause,
1748                                                uint32_t *opc,
1749                                                struct kvm_run *run,
1750                                                struct kvm_vcpu *vcpu)
1751 {
1752         struct mips_coproc *cop0 = vcpu->arch.cop0;
1753         struct kvm_vcpu_arch *arch = &vcpu->arch;
1754         enum emulation_result er = EMULATE_DONE;
1755
1756         if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1757                 /* save old pc */
1758                 kvm_write_c0_guest_epc(cop0, arch->pc);
1759                 kvm_set_c0_guest_status(cop0, ST0_EXL);
1760
1761                 if (cause & CAUSEF_BD)
1762                         kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1763                 else
1764                         kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1765
1766                 kvm_debug("Delivering SYSCALL @ pc %#lx\n", arch->pc);
1767
1768                 kvm_change_c0_guest_cause(cop0, (0xff),
1769                                           (EXCCODE_SYS << CAUSEB_EXCCODE));
1770
1771                 /* Set PC to the exception entry point */
1772                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1773
1774         } else {
1775                 kvm_err("Trying to deliver SYSCALL when EXL is already set\n");
1776                 er = EMULATE_FAIL;
1777         }
1778
1779         return er;
1780 }
1781
1782 enum emulation_result kvm_mips_emulate_tlbmiss_ld(unsigned long cause,
1783                                                   uint32_t *opc,
1784                                                   struct kvm_run *run,
1785                                                   struct kvm_vcpu *vcpu)
1786 {
1787         struct mips_coproc *cop0 = vcpu->arch.cop0;
1788         struct kvm_vcpu_arch *arch = &vcpu->arch;
1789         unsigned long entryhi = (vcpu->arch.  host_cp0_badvaddr & VPN2_MASK) |
1790                                 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1791
1792         if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1793                 /* save old pc */
1794                 kvm_write_c0_guest_epc(cop0, arch->pc);
1795                 kvm_set_c0_guest_status(cop0, ST0_EXL);
1796
1797                 if (cause & CAUSEF_BD)
1798                         kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1799                 else
1800                         kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1801
1802                 kvm_debug("[EXL == 0] delivering TLB MISS @ pc %#lx\n",
1803                           arch->pc);
1804
1805                 /* set pc to the exception entry point */
1806                 arch->pc = KVM_GUEST_KSEG0 + 0x0;
1807
1808         } else {
1809                 kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
1810                           arch->pc);
1811
1812                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1813         }
1814
1815         kvm_change_c0_guest_cause(cop0, (0xff),
1816                                   (EXCCODE_TLBL << CAUSEB_EXCCODE));
1817
1818         /* setup badvaddr, context and entryhi registers for the guest */
1819         kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1820         /* XXXKYMA: is the context register used by linux??? */
1821         kvm_write_c0_guest_entryhi(cop0, entryhi);
1822         /* Blow away the shadow host TLBs */
1823         kvm_mips_flush_host_tlb(1);
1824
1825         return EMULATE_DONE;
1826 }
1827
1828 enum emulation_result kvm_mips_emulate_tlbinv_ld(unsigned long cause,
1829                                                  uint32_t *opc,
1830                                                  struct kvm_run *run,
1831                                                  struct kvm_vcpu *vcpu)
1832 {
1833         struct mips_coproc *cop0 = vcpu->arch.cop0;
1834         struct kvm_vcpu_arch *arch = &vcpu->arch;
1835         unsigned long entryhi =
1836                 (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1837                 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1838
1839         if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1840                 /* save old pc */
1841                 kvm_write_c0_guest_epc(cop0, arch->pc);
1842                 kvm_set_c0_guest_status(cop0, ST0_EXL);
1843
1844                 if (cause & CAUSEF_BD)
1845                         kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1846                 else
1847                         kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1848
1849                 kvm_debug("[EXL == 0] delivering TLB INV @ pc %#lx\n",
1850                           arch->pc);
1851
1852                 /* set pc to the exception entry point */
1853                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1854
1855         } else {
1856                 kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
1857                           arch->pc);
1858                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1859         }
1860
1861         kvm_change_c0_guest_cause(cop0, (0xff),
1862                                   (EXCCODE_TLBL << CAUSEB_EXCCODE));
1863
1864         /* setup badvaddr, context and entryhi registers for the guest */
1865         kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1866         /* XXXKYMA: is the context register used by linux??? */
1867         kvm_write_c0_guest_entryhi(cop0, entryhi);
1868         /* Blow away the shadow host TLBs */
1869         kvm_mips_flush_host_tlb(1);
1870
1871         return EMULATE_DONE;
1872 }
1873
1874 enum emulation_result kvm_mips_emulate_tlbmiss_st(unsigned long cause,
1875                                                   uint32_t *opc,
1876                                                   struct kvm_run *run,
1877                                                   struct kvm_vcpu *vcpu)
1878 {
1879         struct mips_coproc *cop0 = vcpu->arch.cop0;
1880         struct kvm_vcpu_arch *arch = &vcpu->arch;
1881         unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1882                                 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1883
1884         if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1885                 /* save old pc */
1886                 kvm_write_c0_guest_epc(cop0, arch->pc);
1887                 kvm_set_c0_guest_status(cop0, ST0_EXL);
1888
1889                 if (cause & CAUSEF_BD)
1890                         kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1891                 else
1892                         kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1893
1894                 kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
1895                           arch->pc);
1896
1897                 /* Set PC to the exception entry point */
1898                 arch->pc = KVM_GUEST_KSEG0 + 0x0;
1899         } else {
1900                 kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
1901                           arch->pc);
1902                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1903         }
1904
1905         kvm_change_c0_guest_cause(cop0, (0xff),
1906                                   (EXCCODE_TLBS << CAUSEB_EXCCODE));
1907
1908         /* setup badvaddr, context and entryhi registers for the guest */
1909         kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1910         /* XXXKYMA: is the context register used by linux??? */
1911         kvm_write_c0_guest_entryhi(cop0, entryhi);
1912         /* Blow away the shadow host TLBs */
1913         kvm_mips_flush_host_tlb(1);
1914
1915         return EMULATE_DONE;
1916 }
1917
1918 enum emulation_result kvm_mips_emulate_tlbinv_st(unsigned long cause,
1919                                                  uint32_t *opc,
1920                                                  struct kvm_run *run,
1921                                                  struct kvm_vcpu *vcpu)
1922 {
1923         struct mips_coproc *cop0 = vcpu->arch.cop0;
1924         struct kvm_vcpu_arch *arch = &vcpu->arch;
1925         unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1926                 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1927
1928         if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1929                 /* save old pc */
1930                 kvm_write_c0_guest_epc(cop0, arch->pc);
1931                 kvm_set_c0_guest_status(cop0, ST0_EXL);
1932
1933                 if (cause & CAUSEF_BD)
1934                         kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1935                 else
1936                         kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1937
1938                 kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
1939                           arch->pc);
1940
1941                 /* Set PC to the exception entry point */
1942                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1943         } else {
1944                 kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
1945                           arch->pc);
1946                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1947         }
1948
1949         kvm_change_c0_guest_cause(cop0, (0xff),
1950                                   (EXCCODE_TLBS << CAUSEB_EXCCODE));
1951
1952         /* setup badvaddr, context and entryhi registers for the guest */
1953         kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1954         /* XXXKYMA: is the context register used by linux??? */
1955         kvm_write_c0_guest_entryhi(cop0, entryhi);
1956         /* Blow away the shadow host TLBs */
1957         kvm_mips_flush_host_tlb(1);
1958
1959         return EMULATE_DONE;
1960 }
1961
1962 /* TLBMOD: store into address matching TLB with Dirty bit off */
1963 enum emulation_result kvm_mips_handle_tlbmod(unsigned long cause, uint32_t *opc,
1964                                              struct kvm_run *run,
1965                                              struct kvm_vcpu *vcpu)
1966 {
1967         enum emulation_result er = EMULATE_DONE;
1968 #ifdef DEBUG
1969         struct mips_coproc *cop0 = vcpu->arch.cop0;
1970         unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1971                                 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1972         int index;
1973
1974         /* If address not in the guest TLB, then we are in trouble */
1975         index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
1976         if (index < 0) {
1977                 /* XXXKYMA Invalidate and retry */
1978                 kvm_mips_host_tlb_inv(vcpu, vcpu->arch.host_cp0_badvaddr);
1979                 kvm_err("%s: host got TLBMOD for %#lx but entry not present in Guest TLB\n",
1980                      __func__, entryhi);
1981                 kvm_mips_dump_guest_tlbs(vcpu);
1982                 kvm_mips_dump_host_tlbs();
1983                 return EMULATE_FAIL;
1984         }
1985 #endif
1986
1987         er = kvm_mips_emulate_tlbmod(cause, opc, run, vcpu);
1988         return er;
1989 }
1990
1991 enum emulation_result kvm_mips_emulate_tlbmod(unsigned long cause,
1992                                               uint32_t *opc,
1993                                               struct kvm_run *run,
1994                                               struct kvm_vcpu *vcpu)
1995 {
1996         struct mips_coproc *cop0 = vcpu->arch.cop0;
1997         unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1998                                 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1999         struct kvm_vcpu_arch *arch = &vcpu->arch;
2000
2001         if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2002                 /* save old pc */
2003                 kvm_write_c0_guest_epc(cop0, arch->pc);
2004                 kvm_set_c0_guest_status(cop0, ST0_EXL);
2005
2006                 if (cause & CAUSEF_BD)
2007                         kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2008                 else
2009                         kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2010
2011                 kvm_debug("[EXL == 0] Delivering TLB MOD @ pc %#lx\n",
2012                           arch->pc);
2013
2014                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2015         } else {
2016                 kvm_debug("[EXL == 1] Delivering TLB MOD @ pc %#lx\n",
2017                           arch->pc);
2018                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2019         }
2020
2021         kvm_change_c0_guest_cause(cop0, (0xff),
2022                                   (EXCCODE_MOD << CAUSEB_EXCCODE));
2023
2024         /* setup badvaddr, context and entryhi registers for the guest */
2025         kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
2026         /* XXXKYMA: is the context register used by linux??? */
2027         kvm_write_c0_guest_entryhi(cop0, entryhi);
2028         /* Blow away the shadow host TLBs */
2029         kvm_mips_flush_host_tlb(1);
2030
2031         return EMULATE_DONE;
2032 }
2033
2034 enum emulation_result kvm_mips_emulate_fpu_exc(unsigned long cause,
2035                                                uint32_t *opc,
2036                                                struct kvm_run *run,
2037                                                struct kvm_vcpu *vcpu)
2038 {
2039         struct mips_coproc *cop0 = vcpu->arch.cop0;
2040         struct kvm_vcpu_arch *arch = &vcpu->arch;
2041
2042         if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2043                 /* save old pc */
2044                 kvm_write_c0_guest_epc(cop0, arch->pc);
2045                 kvm_set_c0_guest_status(cop0, ST0_EXL);
2046
2047                 if (cause & CAUSEF_BD)
2048                         kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2049                 else
2050                         kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2051
2052         }
2053
2054         arch->pc = KVM_GUEST_KSEG0 + 0x180;
2055
2056         kvm_change_c0_guest_cause(cop0, (0xff),
2057                                   (EXCCODE_CPU << CAUSEB_EXCCODE));
2058         kvm_change_c0_guest_cause(cop0, (CAUSEF_CE), (0x1 << CAUSEB_CE));
2059
2060         return EMULATE_DONE;
2061 }
2062
2063 enum emulation_result kvm_mips_emulate_ri_exc(unsigned long cause,
2064                                               uint32_t *opc,
2065                                               struct kvm_run *run,
2066                                               struct kvm_vcpu *vcpu)
2067 {
2068         struct mips_coproc *cop0 = vcpu->arch.cop0;
2069         struct kvm_vcpu_arch *arch = &vcpu->arch;
2070         enum emulation_result er = EMULATE_DONE;
2071
2072         if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2073                 /* save old pc */
2074                 kvm_write_c0_guest_epc(cop0, arch->pc);
2075                 kvm_set_c0_guest_status(cop0, ST0_EXL);
2076
2077                 if (cause & CAUSEF_BD)
2078                         kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2079                 else
2080                         kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2081
2082                 kvm_debug("Delivering RI @ pc %#lx\n", arch->pc);
2083
2084                 kvm_change_c0_guest_cause(cop0, (0xff),
2085                                           (EXCCODE_RI << CAUSEB_EXCCODE));
2086
2087                 /* Set PC to the exception entry point */
2088                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2089
2090         } else {
2091                 kvm_err("Trying to deliver RI when EXL is already set\n");
2092                 er = EMULATE_FAIL;
2093         }
2094
2095         return er;
2096 }
2097
2098 enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause,
2099                                               uint32_t *opc,
2100                                               struct kvm_run *run,
2101                                               struct kvm_vcpu *vcpu)
2102 {
2103         struct mips_coproc *cop0 = vcpu->arch.cop0;
2104         struct kvm_vcpu_arch *arch = &vcpu->arch;
2105         enum emulation_result er = EMULATE_DONE;
2106
2107         if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2108                 /* save old pc */
2109                 kvm_write_c0_guest_epc(cop0, arch->pc);
2110                 kvm_set_c0_guest_status(cop0, ST0_EXL);
2111
2112                 if (cause & CAUSEF_BD)
2113                         kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2114                 else
2115                         kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2116
2117                 kvm_debug("Delivering BP @ pc %#lx\n", arch->pc);
2118
2119                 kvm_change_c0_guest_cause(cop0, (0xff),
2120                                           (EXCCODE_BP << CAUSEB_EXCCODE));
2121
2122                 /* Set PC to the exception entry point */
2123                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2124
2125         } else {
2126                 kvm_err("Trying to deliver BP when EXL is already set\n");
2127                 er = EMULATE_FAIL;
2128         }
2129
2130         return er;
2131 }
2132
2133 enum emulation_result kvm_mips_emulate_trap_exc(unsigned long cause,
2134                                                 uint32_t *opc,
2135                                                 struct kvm_run *run,
2136                                                 struct kvm_vcpu *vcpu)
2137 {
2138         struct mips_coproc *cop0 = vcpu->arch.cop0;
2139         struct kvm_vcpu_arch *arch = &vcpu->arch;
2140         enum emulation_result er = EMULATE_DONE;
2141
2142         if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2143                 /* save old pc */
2144                 kvm_write_c0_guest_epc(cop0, arch->pc);
2145                 kvm_set_c0_guest_status(cop0, ST0_EXL);
2146
2147                 if (cause & CAUSEF_BD)
2148                         kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2149                 else
2150                         kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2151
2152                 kvm_debug("Delivering TRAP @ pc %#lx\n", arch->pc);
2153
2154                 kvm_change_c0_guest_cause(cop0, (0xff),
2155                                           (EXCCODE_TR << CAUSEB_EXCCODE));
2156
2157                 /* Set PC to the exception entry point */
2158                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2159
2160         } else {
2161                 kvm_err("Trying to deliver TRAP when EXL is already set\n");
2162                 er = EMULATE_FAIL;
2163         }
2164
2165         return er;
2166 }
2167
2168 enum emulation_result kvm_mips_emulate_msafpe_exc(unsigned long cause,
2169                                                   uint32_t *opc,
2170                                                   struct kvm_run *run,
2171                                                   struct kvm_vcpu *vcpu)
2172 {
2173         struct mips_coproc *cop0 = vcpu->arch.cop0;
2174         struct kvm_vcpu_arch *arch = &vcpu->arch;
2175         enum emulation_result er = EMULATE_DONE;
2176
2177         if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2178                 /* save old pc */
2179                 kvm_write_c0_guest_epc(cop0, arch->pc);
2180                 kvm_set_c0_guest_status(cop0, ST0_EXL);
2181
2182                 if (cause & CAUSEF_BD)
2183                         kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2184                 else
2185                         kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2186
2187                 kvm_debug("Delivering MSAFPE @ pc %#lx\n", arch->pc);
2188
2189                 kvm_change_c0_guest_cause(cop0, (0xff),
2190                                           (EXCCODE_MSAFPE << CAUSEB_EXCCODE));
2191
2192                 /* Set PC to the exception entry point */
2193                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2194
2195         } else {
2196                 kvm_err("Trying to deliver MSAFPE when EXL is already set\n");
2197                 er = EMULATE_FAIL;
2198         }
2199
2200         return er;
2201 }
2202
2203 enum emulation_result kvm_mips_emulate_fpe_exc(unsigned long cause,
2204                                                uint32_t *opc,
2205                                                struct kvm_run *run,
2206                                                struct kvm_vcpu *vcpu)
2207 {
2208         struct mips_coproc *cop0 = vcpu->arch.cop0;
2209         struct kvm_vcpu_arch *arch = &vcpu->arch;
2210         enum emulation_result er = EMULATE_DONE;
2211
2212         if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2213                 /* save old pc */
2214                 kvm_write_c0_guest_epc(cop0, arch->pc);
2215                 kvm_set_c0_guest_status(cop0, ST0_EXL);
2216
2217                 if (cause & CAUSEF_BD)
2218                         kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2219                 else
2220                         kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2221
2222                 kvm_debug("Delivering FPE @ pc %#lx\n", arch->pc);
2223
2224                 kvm_change_c0_guest_cause(cop0, (0xff),
2225                                           (EXCCODE_FPE << CAUSEB_EXCCODE));
2226
2227                 /* Set PC to the exception entry point */
2228                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2229
2230         } else {
2231                 kvm_err("Trying to deliver FPE when EXL is already set\n");
2232                 er = EMULATE_FAIL;
2233         }
2234
2235         return er;
2236 }
2237
2238 enum emulation_result kvm_mips_emulate_msadis_exc(unsigned long cause,
2239                                                   uint32_t *opc,
2240                                                   struct kvm_run *run,
2241                                                   struct kvm_vcpu *vcpu)
2242 {
2243         struct mips_coproc *cop0 = vcpu->arch.cop0;
2244         struct kvm_vcpu_arch *arch = &vcpu->arch;
2245         enum emulation_result er = EMULATE_DONE;
2246
2247         if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2248                 /* save old pc */
2249                 kvm_write_c0_guest_epc(cop0, arch->pc);
2250                 kvm_set_c0_guest_status(cop0, ST0_EXL);
2251
2252                 if (cause & CAUSEF_BD)
2253                         kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2254                 else
2255                         kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2256
2257                 kvm_debug("Delivering MSADIS @ pc %#lx\n", arch->pc);
2258
2259                 kvm_change_c0_guest_cause(cop0, (0xff),
2260                                           (EXCCODE_MSADIS << CAUSEB_EXCCODE));
2261
2262                 /* Set PC to the exception entry point */
2263                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2264
2265         } else {
2266                 kvm_err("Trying to deliver MSADIS when EXL is already set\n");
2267                 er = EMULATE_FAIL;
2268         }
2269
2270         return er;
2271 }
2272
2273 /* ll/sc, rdhwr, sync emulation */
2274
2275 #define OPCODE 0xfc000000
2276 #define BASE   0x03e00000
2277 #define RT     0x001f0000
2278 #define OFFSET 0x0000ffff
2279 #define LL     0xc0000000
2280 #define SC     0xe0000000
2281 #define SPEC0  0x00000000
2282 #define SPEC3  0x7c000000
2283 #define RD     0x0000f800
2284 #define FUNC   0x0000003f
2285 #define SYNC   0x0000000f
2286 #define RDHWR  0x0000003b
2287
2288 enum emulation_result kvm_mips_handle_ri(unsigned long cause, uint32_t *opc,
2289                                          struct kvm_run *run,
2290                                          struct kvm_vcpu *vcpu)
2291 {
2292         struct mips_coproc *cop0 = vcpu->arch.cop0;
2293         struct kvm_vcpu_arch *arch = &vcpu->arch;
2294         enum emulation_result er = EMULATE_DONE;
2295         unsigned long curr_pc;
2296         uint32_t inst;
2297
2298         /*
2299          * Update PC and hold onto current PC in case there is
2300          * an error and we want to rollback the PC
2301          */
2302         curr_pc = vcpu->arch.pc;
2303         er = update_pc(vcpu, cause);
2304         if (er == EMULATE_FAIL)
2305                 return er;
2306
2307         /* Fetch the instruction. */
2308         if (cause & CAUSEF_BD)
2309                 opc += 1;
2310
2311         inst = kvm_get_inst(opc, vcpu);
2312
2313         if (inst == KVM_INVALID_INST) {
2314                 kvm_err("%s: Cannot get inst @ %p\n", __func__, opc);
2315                 return EMULATE_FAIL;
2316         }
2317
2318         if ((inst & OPCODE) == SPEC3 && (inst & FUNC) == RDHWR) {
2319                 int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
2320                 int rd = (inst & RD) >> 11;
2321                 int rt = (inst & RT) >> 16;
2322                 /* If usermode, check RDHWR rd is allowed by guest HWREna */
2323                 if (usermode && !(kvm_read_c0_guest_hwrena(cop0) & BIT(rd))) {
2324                         kvm_debug("RDHWR %#x disallowed by HWREna @ %p\n",
2325                                   rd, opc);
2326                         goto emulate_ri;
2327                 }
2328                 switch (rd) {
2329                 case 0: /* CPU number */
2330                         arch->gprs[rt] = 0;
2331                         break;
2332                 case 1: /* SYNCI length */
2333                         arch->gprs[rt] = min(current_cpu_data.dcache.linesz,
2334                                              current_cpu_data.icache.linesz);
2335                         break;
2336                 case 2: /* Read count register */
2337                         arch->gprs[rt] = kvm_mips_read_count(vcpu);
2338                         break;
2339                 case 3: /* Count register resolution */
2340                         switch (current_cpu_data.cputype) {
2341                         case CPU_20KC:
2342                         case CPU_25KF:
2343                                 arch->gprs[rt] = 1;
2344                                 break;
2345                         default:
2346                                 arch->gprs[rt] = 2;
2347                         }
2348                         break;
2349                 case 29:
2350                         arch->gprs[rt] = kvm_read_c0_guest_userlocal(cop0);
2351                         break;
2352
2353                 default:
2354                         kvm_debug("RDHWR %#x not supported @ %p\n", rd, opc);
2355                         goto emulate_ri;
2356                 }
2357         } else {
2358                 kvm_debug("Emulate RI not supported @ %p: %#x\n", opc, inst);
2359                 goto emulate_ri;
2360         }
2361
2362         return EMULATE_DONE;
2363
2364 emulate_ri:
2365         /*
2366          * Rollback PC (if in branch delay slot then the PC already points to
2367          * branch target), and pass the RI exception to the guest OS.
2368          */
2369         vcpu->arch.pc = curr_pc;
2370         return kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
2371 }
2372
2373 enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
2374                                                   struct kvm_run *run)
2375 {
2376         unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
2377         enum emulation_result er = EMULATE_DONE;
2378
2379         if (run->mmio.len > sizeof(*gpr)) {
2380                 kvm_err("Bad MMIO length: %d", run->mmio.len);
2381                 er = EMULATE_FAIL;
2382                 goto done;
2383         }
2384
2385         er = update_pc(vcpu, vcpu->arch.pending_load_cause);
2386         if (er == EMULATE_FAIL)
2387                 return er;
2388
2389         switch (run->mmio.len) {
2390         case 4:
2391                 *gpr = *(int32_t *) run->mmio.data;
2392                 break;
2393
2394         case 2:
2395                 if (vcpu->mmio_needed == 2)
2396                         *gpr = *(int16_t *) run->mmio.data;
2397                 else
2398                         *gpr = *(uint16_t *)run->mmio.data;
2399
2400                 break;
2401         case 1:
2402                 if (vcpu->mmio_needed == 2)
2403                         *gpr = *(int8_t *) run->mmio.data;
2404                 else
2405                         *gpr = *(u8 *) run->mmio.data;
2406                 break;
2407         }
2408
2409         if (vcpu->arch.pending_load_cause & CAUSEF_BD)
2410                 kvm_debug("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n",
2411                           vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr,
2412                           vcpu->mmio_needed);
2413
2414 done:
2415         return er;
2416 }
2417
2418 static enum emulation_result kvm_mips_emulate_exc(unsigned long cause,
2419                                                   uint32_t *opc,
2420                                                   struct kvm_run *run,
2421                                                   struct kvm_vcpu *vcpu)
2422 {
2423         uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
2424         struct mips_coproc *cop0 = vcpu->arch.cop0;
2425         struct kvm_vcpu_arch *arch = &vcpu->arch;
2426         enum emulation_result er = EMULATE_DONE;
2427
2428         if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2429                 /* save old pc */
2430                 kvm_write_c0_guest_epc(cop0, arch->pc);
2431                 kvm_set_c0_guest_status(cop0, ST0_EXL);
2432
2433                 if (cause & CAUSEF_BD)
2434                         kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2435                 else
2436                         kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2437
2438                 kvm_change_c0_guest_cause(cop0, (0xff),
2439                                           (exccode << CAUSEB_EXCCODE));
2440
2441                 /* Set PC to the exception entry point */
2442                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2443                 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
2444
2445                 kvm_debug("Delivering EXC %d @ pc %#lx, badVaddr: %#lx\n",
2446                           exccode, kvm_read_c0_guest_epc(cop0),
2447                           kvm_read_c0_guest_badvaddr(cop0));
2448         } else {
2449                 kvm_err("Trying to deliver EXC when EXL is already set\n");
2450                 er = EMULATE_FAIL;
2451         }
2452
2453         return er;
2454 }
2455
2456 enum emulation_result kvm_mips_check_privilege(unsigned long cause,
2457                                                uint32_t *opc,
2458                                                struct kvm_run *run,
2459                                                struct kvm_vcpu *vcpu)
2460 {
2461         enum emulation_result er = EMULATE_DONE;
2462         uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
2463         unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
2464
2465         int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
2466
2467         if (usermode) {
2468                 switch (exccode) {
2469                 case EXCCODE_INT:
2470                 case EXCCODE_SYS:
2471                 case EXCCODE_BP:
2472                 case EXCCODE_RI:
2473                 case EXCCODE_TR:
2474                 case EXCCODE_MSAFPE:
2475                 case EXCCODE_FPE:
2476                 case EXCCODE_MSADIS:
2477                         break;
2478
2479                 case EXCCODE_CPU:
2480                         if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 0)
2481                                 er = EMULATE_PRIV_FAIL;
2482                         break;
2483
2484                 case EXCCODE_MOD:
2485                         break;
2486
2487                 case EXCCODE_TLBL:
2488                         /*
2489                          * We we are accessing Guest kernel space, then send an
2490                          * address error exception to the guest
2491                          */
2492                         if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
2493                                 kvm_debug("%s: LD MISS @ %#lx\n", __func__,
2494                                           badvaddr);
2495                                 cause &= ~0xff;
2496                                 cause |= (EXCCODE_ADEL << CAUSEB_EXCCODE);
2497                                 er = EMULATE_PRIV_FAIL;
2498                         }
2499                         break;
2500
2501                 case EXCCODE_TLBS:
2502                         /*
2503                          * We we are accessing Guest kernel space, then send an
2504                          * address error exception to the guest
2505                          */
2506                         if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
2507                                 kvm_debug("%s: ST MISS @ %#lx\n", __func__,
2508                                           badvaddr);
2509                                 cause &= ~0xff;
2510                                 cause |= (EXCCODE_ADES << CAUSEB_EXCCODE);
2511                                 er = EMULATE_PRIV_FAIL;
2512                         }
2513                         break;
2514
2515                 case EXCCODE_ADES:
2516                         kvm_debug("%s: address error ST @ %#lx\n", __func__,
2517                                   badvaddr);
2518                         if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
2519                                 cause &= ~0xff;
2520                                 cause |= (EXCCODE_TLBS << CAUSEB_EXCCODE);
2521                         }
2522                         er = EMULATE_PRIV_FAIL;
2523                         break;
2524                 case EXCCODE_ADEL:
2525                         kvm_debug("%s: address error LD @ %#lx\n", __func__,
2526                                   badvaddr);
2527                         if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
2528                                 cause &= ~0xff;
2529                                 cause |= (EXCCODE_TLBL << CAUSEB_EXCCODE);
2530                         }
2531                         er = EMULATE_PRIV_FAIL;
2532                         break;
2533                 default:
2534                         er = EMULATE_PRIV_FAIL;
2535                         break;
2536                 }
2537         }
2538
2539         if (er == EMULATE_PRIV_FAIL)
2540                 kvm_mips_emulate_exc(cause, opc, run, vcpu);
2541
2542         return er;
2543 }
2544
2545 /*
2546  * User Address (UA) fault, this could happen if
2547  * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this
2548  *     case we pass on the fault to the guest kernel and let it handle it.
2549  * (2) TLB entry is present in the Guest TLB but not in the shadow, in this
2550  *     case we inject the TLB from the Guest TLB into the shadow host TLB
2551  */
2552 enum emulation_result kvm_mips_handle_tlbmiss(unsigned long cause,
2553                                               uint32_t *opc,
2554                                               struct kvm_run *run,
2555                                               struct kvm_vcpu *vcpu)
2556 {
2557         enum emulation_result er = EMULATE_DONE;
2558         uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
2559         unsigned long va = vcpu->arch.host_cp0_badvaddr;
2560         int index;
2561
2562         kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx, entryhi: %#lx\n",
2563                   vcpu->arch.host_cp0_badvaddr, vcpu->arch.host_cp0_entryhi);
2564
2565         /*
2566          * KVM would not have got the exception if this entry was valid in the
2567          * shadow host TLB. Check the Guest TLB, if the entry is not there then
2568          * send the guest an exception. The guest exc handler should then inject
2569          * an entry into the guest TLB.
2570          */
2571         index = kvm_mips_guest_tlb_lookup(vcpu,
2572                       (va & VPN2_MASK) |
2573                       (kvm_read_c0_guest_entryhi(vcpu->arch.cop0) & ASID_MASK));
2574         if (index < 0) {
2575                 if (exccode == EXCCODE_TLBL) {
2576                         er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu);
2577                 } else if (exccode == EXCCODE_TLBS) {
2578                         er = kvm_mips_emulate_tlbmiss_st(cause, opc, run, vcpu);
2579                 } else {
2580                         kvm_err("%s: invalid exc code: %d\n", __func__,
2581                                 exccode);
2582                         er = EMULATE_FAIL;
2583                 }
2584         } else {
2585                 struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
2586
2587                 /*
2588                  * Check if the entry is valid, if not then setup a TLB invalid
2589                  * exception to the guest
2590                  */
2591                 if (!TLB_IS_VALID(*tlb, va)) {
2592                         if (exccode == EXCCODE_TLBL) {
2593                                 er = kvm_mips_emulate_tlbinv_ld(cause, opc, run,
2594                                                                 vcpu);
2595                         } else if (exccode == EXCCODE_TLBS) {
2596                                 er = kvm_mips_emulate_tlbinv_st(cause, opc, run,
2597                                                                 vcpu);
2598                         } else {
2599                                 kvm_err("%s: invalid exc code: %d\n", __func__,
2600                                         exccode);
2601                                 er = EMULATE_FAIL;
2602                         }
2603                 } else {
2604                         kvm_debug("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n",
2605                                   tlb->tlb_hi, tlb->tlb_lo0, tlb->tlb_lo1);
2606                         /*
2607                          * OK we have a Guest TLB entry, now inject it into the
2608                          * shadow host TLB
2609                          */
2610                         kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, NULL,
2611                                                              NULL);
2612                 }
2613         }
2614
2615         return er;
2616 }