Merge branch 'slab/for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penber...
[linux-drm-fsl-dcu.git] / arch / mips / kvm / kvm_tlb.c
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License.  See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * KVM/MIPS TLB handling, this file is part of the Linux host kernel so that
7 * TLB handlers run from KSEG0
8 *
9 * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
10 * Authors: Sanjay Lal <sanjayl@kymasys.com>
11 */
12
13 #include <linux/init.h>
14 #include <linux/sched.h>
15 #include <linux/smp.h>
16 #include <linux/mm.h>
17 #include <linux/delay.h>
18 #include <linux/module.h>
19 #include <linux/kvm_host.h>
20 #include <linux/srcu.h>
21
22
23 #include <asm/cpu.h>
24 #include <asm/bootinfo.h>
25 #include <asm/mmu_context.h>
26 #include <asm/pgtable.h>
27 #include <asm/cacheflush.h>
28
29 #undef CONFIG_MIPS_MT
30 #include <asm/r4kcache.h>
31 #define CONFIG_MIPS_MT
32
33 #define KVM_GUEST_PC_TLB    0
34 #define KVM_GUEST_SP_TLB    1
35
36 #define PRIx64 "llx"
37
38 /* Use VZ EntryHi.EHINV to invalidate TLB entries */
39 #define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))
40
41 atomic_t kvm_mips_instance;
42 EXPORT_SYMBOL(kvm_mips_instance);
43
44 /* These function pointers are initialized once the KVM module is loaded */
45 pfn_t(*kvm_mips_gfn_to_pfn) (struct kvm *kvm, gfn_t gfn);
46 EXPORT_SYMBOL(kvm_mips_gfn_to_pfn);
47
48 void (*kvm_mips_release_pfn_clean) (pfn_t pfn);
49 EXPORT_SYMBOL(kvm_mips_release_pfn_clean);
50
51 bool(*kvm_mips_is_error_pfn) (pfn_t pfn);
52 EXPORT_SYMBOL(kvm_mips_is_error_pfn);
53
54 uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
55 {
56         return vcpu->arch.guest_kernel_asid[smp_processor_id()] & ASID_MASK;
57 }
58
59
60 uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
61 {
62         return vcpu->arch.guest_user_asid[smp_processor_id()] & ASID_MASK;
63 }
64
65 inline uint32_t kvm_mips_get_commpage_asid (struct kvm_vcpu *vcpu)
66 {
67         return vcpu->kvm->arch.commpage_tlb;
68 }
69
70
71 /*
72  * Structure defining an tlb entry data set.
73  */
74
75 void kvm_mips_dump_host_tlbs(void)
76 {
77         unsigned long old_entryhi;
78         unsigned long old_pagemask;
79         struct kvm_mips_tlb tlb;
80         unsigned long flags;
81         int i;
82
83         local_irq_save(flags);
84
85         old_entryhi = read_c0_entryhi();
86         old_pagemask = read_c0_pagemask();
87
88         printk("HOST TLBs:\n");
89         printk("ASID: %#lx\n", read_c0_entryhi() & ASID_MASK);
90
91         for (i = 0; i < current_cpu_data.tlbsize; i++) {
92                 write_c0_index(i);
93                 mtc0_tlbw_hazard();
94
95                 tlb_read();
96                 tlbw_use_hazard();
97
98                 tlb.tlb_hi = read_c0_entryhi();
99                 tlb.tlb_lo0 = read_c0_entrylo0();
100                 tlb.tlb_lo1 = read_c0_entrylo1();
101                 tlb.tlb_mask = read_c0_pagemask();
102
103                 printk("TLB%c%3d Hi 0x%08lx ",
104                        (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
105                        i, tlb.tlb_hi);
106                 printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
107                        (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
108                        (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
109                        (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
110                        (tlb.tlb_lo0 >> 3) & 7);
111                 printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
112                        (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
113                        (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
114                        (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
115                        (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
116         }
117         write_c0_entryhi(old_entryhi);
118         write_c0_pagemask(old_pagemask);
119         mtc0_tlbw_hazard();
120         local_irq_restore(flags);
121 }
122
123 void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu)
124 {
125         struct mips_coproc *cop0 = vcpu->arch.cop0;
126         struct kvm_mips_tlb tlb;
127         int i;
128
129         printk("Guest TLBs:\n");
130         printk("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0));
131
132         for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
133                 tlb = vcpu->arch.guest_tlb[i];
134                 printk("TLB%c%3d Hi 0x%08lx ",
135                        (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
136                        i, tlb.tlb_hi);
137                 printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
138                        (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
139                        (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
140                        (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
141                        (tlb.tlb_lo0 >> 3) & 7);
142                 printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
143                        (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
144                        (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
145                        (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
146                        (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
147         }
148 }
149
150 void kvm_mips_dump_shadow_tlbs(struct kvm_vcpu *vcpu)
151 {
152         int i;
153         volatile struct kvm_mips_tlb tlb;
154
155         printk("Shadow TLBs:\n");
156         for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
157                 tlb = vcpu->arch.shadow_tlb[smp_processor_id()][i];
158                 printk("TLB%c%3d Hi 0x%08lx ",
159                        (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
160                        i, tlb.tlb_hi);
161                 printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
162                        (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
163                        (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
164                        (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
165                        (tlb.tlb_lo0 >> 3) & 7);
166                 printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
167                        (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
168                        (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
169                        (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
170                        (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
171         }
172 }
173
174 static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
175 {
176         int srcu_idx, err = 0;
177         pfn_t pfn;
178
179         if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE)
180                 return 0;
181
182         srcu_idx = srcu_read_lock(&kvm->srcu);
183         pfn = kvm_mips_gfn_to_pfn(kvm, gfn);
184
185         if (kvm_mips_is_error_pfn(pfn)) {
186                 kvm_err("Couldn't get pfn for gfn %#" PRIx64 "!\n", gfn);
187                 err = -EFAULT;
188                 goto out;
189         }
190
191         kvm->arch.guest_pmap[gfn] = pfn;
192 out:
193         srcu_read_unlock(&kvm->srcu, srcu_idx);
194         return err;
195 }
196
197 /* Translate guest KSEG0 addresses to Host PA */
198 unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
199         unsigned long gva)
200 {
201         gfn_t gfn;
202         uint32_t offset = gva & ~PAGE_MASK;
203         struct kvm *kvm = vcpu->kvm;
204
205         if (KVM_GUEST_KSEGX(gva) != KVM_GUEST_KSEG0) {
206                 kvm_err("%s/%p: Invalid gva: %#lx\n", __func__,
207                         __builtin_return_address(0), gva);
208                 return KVM_INVALID_PAGE;
209         }
210
211         gfn = (KVM_GUEST_CPHYSADDR(gva) >> PAGE_SHIFT);
212
213         if (gfn >= kvm->arch.guest_pmap_npages) {
214                 kvm_err("%s: Invalid gfn: %#llx, GVA: %#lx\n", __func__, gfn,
215                         gva);
216                 return KVM_INVALID_PAGE;
217         }
218
219         if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
220                 return KVM_INVALID_ADDR;
221
222         return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset;
223 }
224
225 /* XXXKYMA: Must be called with interrupts disabled */
226 /* set flush_dcache_mask == 0 if no dcache flush required */
227 int
228 kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
229         unsigned long entrylo0, unsigned long entrylo1, int flush_dcache_mask)
230 {
231         unsigned long flags;
232         unsigned long old_entryhi;
233         volatile int idx;
234
235         local_irq_save(flags);
236
237
238         old_entryhi = read_c0_entryhi();
239         write_c0_entryhi(entryhi);
240         mtc0_tlbw_hazard();
241
242         tlb_probe();
243         tlb_probe_hazard();
244         idx = read_c0_index();
245
246         if (idx > current_cpu_data.tlbsize) {
247                 kvm_err("%s: Invalid Index: %d\n", __func__, idx);
248                 kvm_mips_dump_host_tlbs();
249                 return -1;
250         }
251
252         if (idx < 0) {
253                 idx = read_c0_random() % current_cpu_data.tlbsize;
254                 write_c0_index(idx);
255                 mtc0_tlbw_hazard();
256         }
257         write_c0_entrylo0(entrylo0);
258         write_c0_entrylo1(entrylo1);
259         mtc0_tlbw_hazard();
260
261         tlb_write_indexed();
262         tlbw_use_hazard();
263
264 #ifdef DEBUG
265         if (debug) {
266                 kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] "
267                           "entrylo0(R): 0x%08lx, entrylo1(R): 0x%08lx\n",
268                           vcpu->arch.pc, idx, read_c0_entryhi(),
269                           read_c0_entrylo0(), read_c0_entrylo1());
270         }
271 #endif
272
273         /* Flush D-cache */
274         if (flush_dcache_mask) {
275                 if (entrylo0 & MIPS3_PG_V) {
276                         ++vcpu->stat.flush_dcache_exits;
277                         flush_data_cache_page((entryhi & VPN2_MASK) & ~flush_dcache_mask);
278                 }
279                 if (entrylo1 & MIPS3_PG_V) {
280                         ++vcpu->stat.flush_dcache_exits;
281                         flush_data_cache_page(((entryhi & VPN2_MASK) & ~flush_dcache_mask) |
282                                 (0x1 << PAGE_SHIFT));
283                 }
284         }
285
286         /* Restore old ASID */
287         write_c0_entryhi(old_entryhi);
288         mtc0_tlbw_hazard();
289         tlbw_use_hazard();
290         local_irq_restore(flags);
291         return 0;
292 }
293
294
295 /* XXXKYMA: Must be called with interrupts disabled */
296 int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
297         struct kvm_vcpu *vcpu)
298 {
299         gfn_t gfn;
300         pfn_t pfn0, pfn1;
301         unsigned long vaddr = 0;
302         unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
303         int even;
304         struct kvm *kvm = vcpu->kvm;
305         const int flush_dcache_mask = 0;
306
307
308         if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) {
309                 kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr);
310                 kvm_mips_dump_host_tlbs();
311                 return -1;
312         }
313
314         gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT);
315         if (gfn >= kvm->arch.guest_pmap_npages) {
316                 kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__,
317                         gfn, badvaddr);
318                 kvm_mips_dump_host_tlbs();
319                 return -1;
320         }
321         even = !(gfn & 0x1);
322         vaddr = badvaddr & (PAGE_MASK << 1);
323
324         if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
325                 return -1;
326
327         if (kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1) < 0)
328                 return -1;
329
330         if (even) {
331                 pfn0 = kvm->arch.guest_pmap[gfn];
332                 pfn1 = kvm->arch.guest_pmap[gfn ^ 0x1];
333         } else {
334                 pfn0 = kvm->arch.guest_pmap[gfn ^ 0x1];
335                 pfn1 = kvm->arch.guest_pmap[gfn];
336         }
337
338         entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu));
339         entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) |
340                         (0x1 << 1);
341         entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) |
342                         (0x1 << 1);
343
344         return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
345                                        flush_dcache_mask);
346 }
347
348 int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
349         struct kvm_vcpu *vcpu)
350 {
351         pfn_t pfn0, pfn1;
352         unsigned long flags, old_entryhi = 0, vaddr = 0;
353         unsigned long entrylo0 = 0, entrylo1 = 0;
354
355
356         pfn0 = CPHYSADDR(vcpu->arch.kseg0_commpage) >> PAGE_SHIFT;
357         pfn1 = 0;
358         entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) |
359                         (0x1 << 1);
360         entrylo1 = 0;
361
362         local_irq_save(flags);
363
364         old_entryhi = read_c0_entryhi();
365         vaddr = badvaddr & (PAGE_MASK << 1);
366         write_c0_entryhi(vaddr | kvm_mips_get_kernel_asid(vcpu));
367         mtc0_tlbw_hazard();
368         write_c0_entrylo0(entrylo0);
369         mtc0_tlbw_hazard();
370         write_c0_entrylo1(entrylo1);
371         mtc0_tlbw_hazard();
372         write_c0_index(kvm_mips_get_commpage_asid(vcpu));
373         mtc0_tlbw_hazard();
374         tlb_write_indexed();
375         mtc0_tlbw_hazard();
376         tlbw_use_hazard();
377
378 #ifdef DEBUG
379         kvm_debug ("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0 (R): 0x%08lx, entrylo1(R): 0x%08lx\n",
380              vcpu->arch.pc, read_c0_index(), read_c0_entryhi(),
381              read_c0_entrylo0(), read_c0_entrylo1());
382 #endif
383
384         /* Restore old ASID */
385         write_c0_entryhi(old_entryhi);
386         mtc0_tlbw_hazard();
387         tlbw_use_hazard();
388         local_irq_restore(flags);
389
390         return 0;
391 }
392
393 int
394 kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
395         struct kvm_mips_tlb *tlb, unsigned long *hpa0, unsigned long *hpa1)
396 {
397         unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
398         struct kvm *kvm = vcpu->kvm;
399         pfn_t pfn0, pfn1;
400
401
402         if ((tlb->tlb_hi & VPN2_MASK) == 0) {
403                 pfn0 = 0;
404                 pfn1 = 0;
405         } else {
406                 if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT) < 0)
407                         return -1;
408
409                 if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT) < 0)
410                         return -1;
411
412                 pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT];
413                 pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT];
414         }
415
416         if (hpa0)
417                 *hpa0 = pfn0 << PAGE_SHIFT;
418
419         if (hpa1)
420                 *hpa1 = pfn1 << PAGE_SHIFT;
421
422         /* Get attributes from the Guest TLB */
423         entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ?
424                         kvm_mips_get_kernel_asid(vcpu) : kvm_mips_get_user_asid(vcpu));
425         entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
426                         (tlb->tlb_lo0 & MIPS3_PG_D) | (tlb->tlb_lo0 & MIPS3_PG_V);
427         entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
428                         (tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V);
429
430 #ifdef DEBUG
431         kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
432                   tlb->tlb_lo0, tlb->tlb_lo1);
433 #endif
434
435         return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
436                                        tlb->tlb_mask);
437 }
438
439 int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
440 {
441         int i;
442         int index = -1;
443         struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb;
444
445
446         for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
447                 if (((TLB_VPN2(tlb[i]) & ~tlb[i].tlb_mask) == ((entryhi & VPN2_MASK) & ~tlb[i].tlb_mask)) &&
448                         (TLB_IS_GLOBAL(tlb[i]) || (TLB_ASID(tlb[i]) == (entryhi & ASID_MASK)))) {
449                         index = i;
450                         break;
451                 }
452         }
453
454 #ifdef DEBUG
455         kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n",
456                   __func__, entryhi, index, tlb[i].tlb_lo0, tlb[i].tlb_lo1);
457 #endif
458
459         return index;
460 }
461
462 int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr)
463 {
464         unsigned long old_entryhi, flags;
465         volatile int idx;
466
467
468         local_irq_save(flags);
469
470         old_entryhi = read_c0_entryhi();
471
472         if (KVM_GUEST_KERNEL_MODE(vcpu))
473                 write_c0_entryhi((vaddr & VPN2_MASK) | kvm_mips_get_kernel_asid(vcpu));
474         else {
475                 write_c0_entryhi((vaddr & VPN2_MASK) | kvm_mips_get_user_asid(vcpu));
476         }
477
478         mtc0_tlbw_hazard();
479
480         tlb_probe();
481         tlb_probe_hazard();
482         idx = read_c0_index();
483
484         /* Restore old ASID */
485         write_c0_entryhi(old_entryhi);
486         mtc0_tlbw_hazard();
487         tlbw_use_hazard();
488
489         local_irq_restore(flags);
490
491 #ifdef DEBUG
492         kvm_debug("Host TLB lookup, %#lx, idx: %2d\n", vaddr, idx);
493 #endif
494
495         return idx;
496 }
497
498 int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va)
499 {
500         int idx;
501         unsigned long flags, old_entryhi;
502
503         local_irq_save(flags);
504
505
506         old_entryhi = read_c0_entryhi();
507
508         write_c0_entryhi((va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu));
509         mtc0_tlbw_hazard();
510
511         tlb_probe();
512         tlb_probe_hazard();
513         idx = read_c0_index();
514
515         if (idx >= current_cpu_data.tlbsize)
516                 BUG();
517
518         if (idx > 0) {
519                 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
520                 mtc0_tlbw_hazard();
521
522                 write_c0_entrylo0(0);
523                 mtc0_tlbw_hazard();
524
525                 write_c0_entrylo1(0);
526                 mtc0_tlbw_hazard();
527
528                 tlb_write_indexed();
529                 mtc0_tlbw_hazard();
530         }
531
532         write_c0_entryhi(old_entryhi);
533         mtc0_tlbw_hazard();
534         tlbw_use_hazard();
535
536         local_irq_restore(flags);
537
538 #ifdef DEBUG
539         if (idx > 0) {
540                 kvm_debug("%s: Invalidated entryhi %#lx @ idx %d\n", __func__,
541                           (va & VPN2_MASK) | (vcpu->arch.asid_map[va & ASID_MASK] & ASID_MASK), idx);
542         }
543 #endif
544
545         return 0;
546 }
547
548 /* XXXKYMA: Fix Guest USER/KERNEL no longer share the same ASID*/
549 int kvm_mips_host_tlb_inv_index(struct kvm_vcpu *vcpu, int index)
550 {
551         unsigned long flags, old_entryhi;
552
553         if (index >= current_cpu_data.tlbsize)
554                 BUG();
555
556         local_irq_save(flags);
557
558
559         old_entryhi = read_c0_entryhi();
560
561         write_c0_entryhi(UNIQUE_ENTRYHI(index));
562         mtc0_tlbw_hazard();
563
564         write_c0_index(index);
565         mtc0_tlbw_hazard();
566
567         write_c0_entrylo0(0);
568         mtc0_tlbw_hazard();
569
570         write_c0_entrylo1(0);
571         mtc0_tlbw_hazard();
572
573         tlb_write_indexed();
574         mtc0_tlbw_hazard();
575         tlbw_use_hazard();
576
577         write_c0_entryhi(old_entryhi);
578         mtc0_tlbw_hazard();
579         tlbw_use_hazard();
580
581         local_irq_restore(flags);
582
583         return 0;
584 }
585
586 void kvm_mips_flush_host_tlb(int skip_kseg0)
587 {
588         unsigned long flags;
589         unsigned long old_entryhi, entryhi;
590         unsigned long old_pagemask;
591         int entry = 0;
592         int maxentry = current_cpu_data.tlbsize;
593
594
595         local_irq_save(flags);
596
597         old_entryhi = read_c0_entryhi();
598         old_pagemask = read_c0_pagemask();
599
600         /* Blast 'em all away. */
601         for (entry = 0; entry < maxentry; entry++) {
602
603                 write_c0_index(entry);
604                 mtc0_tlbw_hazard();
605
606                 if (skip_kseg0) {
607                         tlb_read();
608                         tlbw_use_hazard();
609
610                         entryhi = read_c0_entryhi();
611
612                         /* Don't blow away guest kernel entries */
613                         if (KVM_GUEST_KSEGX(entryhi) == KVM_GUEST_KSEG0) {
614                                 continue;
615                         }
616                 }
617
618                 /* Make sure all entries differ. */
619                 write_c0_entryhi(UNIQUE_ENTRYHI(entry));
620                 mtc0_tlbw_hazard();
621                 write_c0_entrylo0(0);
622                 mtc0_tlbw_hazard();
623                 write_c0_entrylo1(0);
624                 mtc0_tlbw_hazard();
625
626                 tlb_write_indexed();
627                 mtc0_tlbw_hazard();
628         }
629
630         tlbw_use_hazard();
631
632         write_c0_entryhi(old_entryhi);
633         write_c0_pagemask(old_pagemask);
634         mtc0_tlbw_hazard();
635         tlbw_use_hazard();
636
637         local_irq_restore(flags);
638 }
639
640 void
641 kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
642                         struct kvm_vcpu *vcpu)
643 {
644         unsigned long asid = asid_cache(cpu);
645
646         if (!((asid += ASID_INC) & ASID_MASK)) {
647                 if (cpu_has_vtag_icache) {
648                         flush_icache_all();
649                 }
650
651                 kvm_local_flush_tlb_all();      /* start new asid cycle */
652
653                 if (!asid)      /* fix version if needed */
654                         asid = ASID_FIRST_VERSION;
655         }
656
657         cpu_context(cpu, mm) = asid_cache(cpu) = asid;
658 }
659
660 void kvm_shadow_tlb_put(struct kvm_vcpu *vcpu)
661 {
662         unsigned long flags;
663         unsigned long old_entryhi;
664         unsigned long old_pagemask;
665         int entry = 0;
666         int cpu = smp_processor_id();
667
668         local_irq_save(flags);
669
670         old_entryhi = read_c0_entryhi();
671         old_pagemask = read_c0_pagemask();
672
673         for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
674                 write_c0_index(entry);
675                 mtc0_tlbw_hazard();
676                 tlb_read();
677                 tlbw_use_hazard();
678
679                 vcpu->arch.shadow_tlb[cpu][entry].tlb_hi = read_c0_entryhi();
680                 vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0 = read_c0_entrylo0();
681                 vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1 = read_c0_entrylo1();
682                 vcpu->arch.shadow_tlb[cpu][entry].tlb_mask = read_c0_pagemask();
683         }
684
685         write_c0_entryhi(old_entryhi);
686         write_c0_pagemask(old_pagemask);
687         mtc0_tlbw_hazard();
688
689         local_irq_restore(flags);
690
691 }
692
693 void kvm_shadow_tlb_load(struct kvm_vcpu *vcpu)
694 {
695         unsigned long flags;
696         unsigned long old_ctx;
697         int entry;
698         int cpu = smp_processor_id();
699
700         local_irq_save(flags);
701
702         old_ctx = read_c0_entryhi();
703
704         for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
705                 write_c0_entryhi(vcpu->arch.shadow_tlb[cpu][entry].tlb_hi);
706                 mtc0_tlbw_hazard();
707                 write_c0_entrylo0(vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0);
708                 write_c0_entrylo1(vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1);
709
710                 write_c0_index(entry);
711                 mtc0_tlbw_hazard();
712
713                 tlb_write_indexed();
714                 tlbw_use_hazard();
715         }
716
717         tlbw_use_hazard();
718         write_c0_entryhi(old_ctx);
719         mtc0_tlbw_hazard();
720         local_irq_restore(flags);
721 }
722
723
724 void kvm_local_flush_tlb_all(void)
725 {
726         unsigned long flags;
727         unsigned long old_ctx;
728         int entry = 0;
729
730         local_irq_save(flags);
731         /* Save old context and create impossible VPN2 value */
732         old_ctx = read_c0_entryhi();
733         write_c0_entrylo0(0);
734         write_c0_entrylo1(0);
735
736         /* Blast 'em all away. */
737         while (entry < current_cpu_data.tlbsize) {
738                 /* Make sure all entries differ. */
739                 write_c0_entryhi(UNIQUE_ENTRYHI(entry));
740                 write_c0_index(entry);
741                 mtc0_tlbw_hazard();
742                 tlb_write_indexed();
743                 entry++;
744         }
745         tlbw_use_hazard();
746         write_c0_entryhi(old_ctx);
747         mtc0_tlbw_hazard();
748
749         local_irq_restore(flags);
750 }
751
752 void kvm_mips_init_shadow_tlb(struct kvm_vcpu *vcpu)
753 {
754         int cpu, entry;
755
756         for_each_possible_cpu(cpu) {
757                 for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
758                         vcpu->arch.shadow_tlb[cpu][entry].tlb_hi =
759                             UNIQUE_ENTRYHI(entry);
760                         vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0 = 0x0;
761                         vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1 = 0x0;
762                         vcpu->arch.shadow_tlb[cpu][entry].tlb_mask =
763                             read_c0_pagemask();
764 #ifdef DEBUG
765                         kvm_debug
766                             ("shadow_tlb[%d][%d]: tlb_hi: %#lx, lo0: %#lx, lo1: %#lx\n",
767                              cpu, entry,
768                              vcpu->arch.shadow_tlb[cpu][entry].tlb_hi,
769                              vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0,
770                              vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1);
771 #endif
772                 }
773         }
774 }
775
776 /* Restore ASID once we are scheduled back after preemption */
777 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
778 {
779         unsigned long flags;
780         int newasid = 0;
781
782 #ifdef DEBUG
783         kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);
784 #endif
785
786         /* Alocate new kernel and user ASIDs if needed */
787
788         local_irq_save(flags);
789
790         if (((vcpu->arch.
791               guest_kernel_asid[cpu] ^ asid_cache(cpu)) & ASID_VERSION_MASK)) {
792                 kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu);
793                 vcpu->arch.guest_kernel_asid[cpu] =
794                     vcpu->arch.guest_kernel_mm.context.asid[cpu];
795                 kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu);
796                 vcpu->arch.guest_user_asid[cpu] =
797                     vcpu->arch.guest_user_mm.context.asid[cpu];
798                 newasid++;
799
800                 kvm_info("[%d]: cpu_context: %#lx\n", cpu,
801                          cpu_context(cpu, current->mm));
802                 kvm_info("[%d]: Allocated new ASID for Guest Kernel: %#x\n",
803                          cpu, vcpu->arch.guest_kernel_asid[cpu]);
804                 kvm_info("[%d]: Allocated new ASID for Guest User: %#x\n", cpu,
805                          vcpu->arch.guest_user_asid[cpu]);
806         }
807
808         if (vcpu->arch.last_sched_cpu != cpu) {
809                 kvm_info("[%d->%d]KVM VCPU[%d] switch\n",
810                          vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
811         }
812
813         /* Only reload shadow host TLB if new ASIDs haven't been allocated */
814 #if 0
815         if ((atomic_read(&kvm_mips_instance) > 1) && !newasid) {
816                 kvm_mips_flush_host_tlb(0);
817                 kvm_shadow_tlb_load(vcpu);
818         }
819 #endif
820
821         if (!newasid) {
822                 /* If we preempted while the guest was executing, then reload the pre-empted ASID */
823                 if (current->flags & PF_VCPU) {
824                         write_c0_entryhi(vcpu->arch.
825                                          preempt_entryhi & ASID_MASK);
826                         ehb();
827                 }
828         } else {
829                 /* New ASIDs were allocated for the VM */
830
831                 /* Were we in guest context? If so then the pre-empted ASID is no longer
832                  * valid, we need to set it to what it should be based on the mode of
833                  * the Guest (Kernel/User)
834                  */
835                 if (current->flags & PF_VCPU) {
836                         if (KVM_GUEST_KERNEL_MODE(vcpu))
837                                 write_c0_entryhi(vcpu->arch.
838                                                  guest_kernel_asid[cpu] &
839                                                  ASID_MASK);
840                         else
841                                 write_c0_entryhi(vcpu->arch.
842                                                  guest_user_asid[cpu] &
843                                                  ASID_MASK);
844                         ehb();
845                 }
846         }
847
848         local_irq_restore(flags);
849
850 }
851
852 /* ASID can change if another task is scheduled during preemption */
853 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
854 {
855         unsigned long flags;
856         uint32_t cpu;
857
858         local_irq_save(flags);
859
860         cpu = smp_processor_id();
861
862
863         vcpu->arch.preempt_entryhi = read_c0_entryhi();
864         vcpu->arch.last_sched_cpu = cpu;
865
866 #if 0
867         if ((atomic_read(&kvm_mips_instance) > 1)) {
868                 kvm_shadow_tlb_put(vcpu);
869         }
870 #endif
871
872         if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
873              ASID_VERSION_MASK)) {
874                 kvm_debug("%s: Dropping MMU Context:  %#lx\n", __func__,
875                           cpu_context(cpu, current->mm));
876                 drop_mmu_context(current->mm, cpu);
877         }
878         write_c0_entryhi(cpu_asid(cpu, current->mm));
879         ehb();
880
881         local_irq_restore(flags);
882 }
883
884 uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu)
885 {
886         struct mips_coproc *cop0 = vcpu->arch.cop0;
887         unsigned long paddr, flags;
888         uint32_t inst;
889         int index;
890
891         if (KVM_GUEST_KSEGX((unsigned long) opc) < KVM_GUEST_KSEG0 ||
892             KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
893                 local_irq_save(flags);
894                 index = kvm_mips_host_tlb_lookup(vcpu, (unsigned long) opc);
895                 if (index >= 0) {
896                         inst = *(opc);
897                 } else {
898                         index =
899                             kvm_mips_guest_tlb_lookup(vcpu,
900                                                       ((unsigned long) opc & VPN2_MASK)
901                                                       |
902                                                       (kvm_read_c0_guest_entryhi
903                                                        (cop0) & ASID_MASK));
904                         if (index < 0) {
905                                 kvm_err
906                                     ("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
907                                      __func__, opc, vcpu, read_c0_entryhi());
908                                 kvm_mips_dump_host_tlbs();
909                                 local_irq_restore(flags);
910                                 return KVM_INVALID_INST;
911                         }
912                         kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
913                                                              &vcpu->arch.
914                                                              guest_tlb[index],
915                                                              NULL, NULL);
916                         inst = *(opc);
917                 }
918                 local_irq_restore(flags);
919         } else if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
920                 paddr =
921                     kvm_mips_translate_guest_kseg0_to_hpa(vcpu,
922                                                          (unsigned long) opc);
923                 inst = *(uint32_t *) CKSEG0ADDR(paddr);
924         } else {
925                 kvm_err("%s: illegal address: %p\n", __func__, opc);
926                 return KVM_INVALID_INST;
927         }
928
929         return inst;
930 }
931
932 EXPORT_SYMBOL(kvm_local_flush_tlb_all);
933 EXPORT_SYMBOL(kvm_shadow_tlb_put);
934 EXPORT_SYMBOL(kvm_mips_handle_mapped_seg_tlb_fault);
935 EXPORT_SYMBOL(kvm_mips_handle_commpage_tlb_fault);
936 EXPORT_SYMBOL(kvm_mips_init_shadow_tlb);
937 EXPORT_SYMBOL(kvm_mips_dump_host_tlbs);
938 EXPORT_SYMBOL(kvm_mips_handle_kseg0_tlb_fault);
939 EXPORT_SYMBOL(kvm_mips_host_tlb_lookup);
940 EXPORT_SYMBOL(kvm_mips_flush_host_tlb);
941 EXPORT_SYMBOL(kvm_mips_guest_tlb_lookup);
942 EXPORT_SYMBOL(kvm_mips_host_tlb_inv);
943 EXPORT_SYMBOL(kvm_mips_translate_guest_kseg0_to_hpa);
944 EXPORT_SYMBOL(kvm_shadow_tlb_load);
945 EXPORT_SYMBOL(kvm_mips_dump_shadow_tlbs);
946 EXPORT_SYMBOL(kvm_mips_dump_guest_tlbs);
947 EXPORT_SYMBOL(kvm_get_inst);
948 EXPORT_SYMBOL(kvm_arch_vcpu_load);
949 EXPORT_SYMBOL(kvm_arch_vcpu_put);