MIPS: KVM: Simplify default guest Config registers
[linux-drm-fsl-dcu.git] / arch / mips / kvm / trap_emul.c
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * KVM/MIPS: Deliver/Emulate exceptions to the guest kernel
7  *
8  * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
9  * Authors: Sanjay Lal <sanjayl@kymasys.com>
10  */
11
12 #include <linux/errno.h>
13 #include <linux/err.h>
14 #include <linux/module.h>
15 #include <linux/vmalloc.h>
16
17 #include <linux/kvm_host.h>
18
19 #include "opcode.h"
20 #include "interrupt.h"
21
22 static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva)
23 {
24         gpa_t gpa;
25         uint32_t kseg = KSEGX(gva);
26
27         if ((kseg == CKSEG0) || (kseg == CKSEG1))
28                 gpa = CPHYSADDR(gva);
29         else {
30                 kvm_err("%s: cannot find GPA for GVA: %#lx\n", __func__, gva);
31                 kvm_mips_dump_host_tlbs();
32                 gpa = KVM_INVALID_ADDR;
33         }
34
35         kvm_debug("%s: gva %#lx, gpa: %#llx\n", __func__, gva, gpa);
36
37         return gpa;
38 }
39
40 static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
41 {
42         struct kvm_run *run = vcpu->run;
43         uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
44         unsigned long cause = vcpu->arch.host_cp0_cause;
45         enum emulation_result er = EMULATE_DONE;
46         int ret = RESUME_GUEST;
47
48         if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1)
49                 er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu);
50         else
51                 er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
52
53         switch (er) {
54         case EMULATE_DONE:
55                 ret = RESUME_GUEST;
56                 break;
57
58         case EMULATE_FAIL:
59                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
60                 ret = RESUME_HOST;
61                 break;
62
63         case EMULATE_WAIT:
64                 run->exit_reason = KVM_EXIT_INTR;
65                 ret = RESUME_HOST;
66                 break;
67
68         default:
69                 BUG();
70         }
71         return ret;
72 }
73
74 static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
75 {
76         struct kvm_run *run = vcpu->run;
77         uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
78         unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
79         unsigned long cause = vcpu->arch.host_cp0_cause;
80         enum emulation_result er = EMULATE_DONE;
81         int ret = RESUME_GUEST;
82
83         if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
84             || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
85                 kvm_debug("USER/KSEG23 ADDR TLB MOD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n",
86                           cause, opc, badvaddr);
87                 er = kvm_mips_handle_tlbmod(cause, opc, run, vcpu);
88
89                 if (er == EMULATE_DONE)
90                         ret = RESUME_GUEST;
91                 else {
92                         run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
93                         ret = RESUME_HOST;
94                 }
95         } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
96                 /*
97                  * XXXKYMA: The guest kernel does not expect to get this fault
98                  * when we are not using HIGHMEM. Need to address this in a
99                  * HIGHMEM kernel
100                  */
101                 kvm_err("TLB MOD fault not handled, cause %#lx, PC: %p, BadVaddr: %#lx\n",
102                         cause, opc, badvaddr);
103                 kvm_mips_dump_host_tlbs();
104                 kvm_arch_vcpu_dump_regs(vcpu);
105                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
106                 ret = RESUME_HOST;
107         } else {
108                 kvm_err("Illegal TLB Mod fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
109                         cause, opc, badvaddr);
110                 kvm_mips_dump_host_tlbs();
111                 kvm_arch_vcpu_dump_regs(vcpu);
112                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
113                 ret = RESUME_HOST;
114         }
115         return ret;
116 }
117
118 static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
119 {
120         struct kvm_run *run = vcpu->run;
121         uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
122         unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
123         unsigned long cause = vcpu->arch.host_cp0_cause;
124         enum emulation_result er = EMULATE_DONE;
125         int ret = RESUME_GUEST;
126
127         if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR)
128             && KVM_GUEST_KERNEL_MODE(vcpu)) {
129                 if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) {
130                         run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
131                         ret = RESUME_HOST;
132                 }
133         } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
134                    || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
135                 kvm_debug("USER ADDR TLB LD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n",
136                           cause, opc, badvaddr);
137                 er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu);
138                 if (er == EMULATE_DONE)
139                         ret = RESUME_GUEST;
140                 else {
141                         run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
142                         ret = RESUME_HOST;
143                 }
144         } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
145                 /*
146                  * All KSEG0 faults are handled by KVM, as the guest kernel does
147                  * not expect to ever get them
148                  */
149                 if (kvm_mips_handle_kseg0_tlb_fault
150                     (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) {
151                         run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
152                         ret = RESUME_HOST;
153                 }
154         } else {
155                 kvm_err("Illegal TLB LD fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
156                         cause, opc, badvaddr);
157                 kvm_mips_dump_host_tlbs();
158                 kvm_arch_vcpu_dump_regs(vcpu);
159                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
160                 ret = RESUME_HOST;
161         }
162         return ret;
163 }
164
165 static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
166 {
167         struct kvm_run *run = vcpu->run;
168         uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
169         unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
170         unsigned long cause = vcpu->arch.host_cp0_cause;
171         enum emulation_result er = EMULATE_DONE;
172         int ret = RESUME_GUEST;
173
174         if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR)
175             && KVM_GUEST_KERNEL_MODE(vcpu)) {
176                 if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) {
177                         run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
178                         ret = RESUME_HOST;
179                 }
180         } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
181                    || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
182                 kvm_debug("USER ADDR TLB ST fault: PC: %#lx, BadVaddr: %#lx\n",
183                           vcpu->arch.pc, badvaddr);
184
185                 /*
186                  * User Address (UA) fault, this could happen if
187                  * (1) TLB entry not present/valid in both Guest and shadow host
188                  *     TLBs, in this case we pass on the fault to the guest
189                  *     kernel and let it handle it.
190                  * (2) TLB entry is present in the Guest TLB but not in the
191                  *     shadow, in this case we inject the TLB from the Guest TLB
192                  *     into the shadow host TLB
193                  */
194
195                 er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu);
196                 if (er == EMULATE_DONE)
197                         ret = RESUME_GUEST;
198                 else {
199                         run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
200                         ret = RESUME_HOST;
201                 }
202         } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
203                 if (kvm_mips_handle_kseg0_tlb_fault
204                     (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) {
205                         run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
206                         ret = RESUME_HOST;
207                 }
208         } else {
209                 kvm_err("Illegal TLB ST fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
210                         cause, opc, badvaddr);
211                 kvm_mips_dump_host_tlbs();
212                 kvm_arch_vcpu_dump_regs(vcpu);
213                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
214                 ret = RESUME_HOST;
215         }
216         return ret;
217 }
218
219 static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu)
220 {
221         struct kvm_run *run = vcpu->run;
222         uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
223         unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
224         unsigned long cause = vcpu->arch.host_cp0_cause;
225         enum emulation_result er = EMULATE_DONE;
226         int ret = RESUME_GUEST;
227
228         if (KVM_GUEST_KERNEL_MODE(vcpu)
229             && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) {
230                 kvm_debug("Emulate Store to MMIO space\n");
231                 er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
232                 if (er == EMULATE_FAIL) {
233                         kvm_err("Emulate Store to MMIO space failed\n");
234                         run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
235                         ret = RESUME_HOST;
236                 } else {
237                         run->exit_reason = KVM_EXIT_MMIO;
238                         ret = RESUME_HOST;
239                 }
240         } else {
241                 kvm_err("Address Error (STORE): cause %#lx, PC: %p, BadVaddr: %#lx\n",
242                         cause, opc, badvaddr);
243                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
244                 ret = RESUME_HOST;
245         }
246         return ret;
247 }
248
249 static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu)
250 {
251         struct kvm_run *run = vcpu->run;
252         uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
253         unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
254         unsigned long cause = vcpu->arch.host_cp0_cause;
255         enum emulation_result er = EMULATE_DONE;
256         int ret = RESUME_GUEST;
257
258         if (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1) {
259                 kvm_debug("Emulate Load from MMIO space @ %#lx\n", badvaddr);
260                 er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
261                 if (er == EMULATE_FAIL) {
262                         kvm_err("Emulate Load from MMIO space failed\n");
263                         run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
264                         ret = RESUME_HOST;
265                 } else {
266                         run->exit_reason = KVM_EXIT_MMIO;
267                         ret = RESUME_HOST;
268                 }
269         } else {
270                 kvm_err("Address Error (LOAD): cause %#lx, PC: %p, BadVaddr: %#lx\n",
271                         cause, opc, badvaddr);
272                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
273                 ret = RESUME_HOST;
274                 er = EMULATE_FAIL;
275         }
276         return ret;
277 }
278
279 static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu)
280 {
281         struct kvm_run *run = vcpu->run;
282         uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
283         unsigned long cause = vcpu->arch.host_cp0_cause;
284         enum emulation_result er = EMULATE_DONE;
285         int ret = RESUME_GUEST;
286
287         er = kvm_mips_emulate_syscall(cause, opc, run, vcpu);
288         if (er == EMULATE_DONE)
289                 ret = RESUME_GUEST;
290         else {
291                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
292                 ret = RESUME_HOST;
293         }
294         return ret;
295 }
296
297 static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu)
298 {
299         struct kvm_run *run = vcpu->run;
300         uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
301         unsigned long cause = vcpu->arch.host_cp0_cause;
302         enum emulation_result er = EMULATE_DONE;
303         int ret = RESUME_GUEST;
304
305         er = kvm_mips_handle_ri(cause, opc, run, vcpu);
306         if (er == EMULATE_DONE)
307                 ret = RESUME_GUEST;
308         else {
309                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
310                 ret = RESUME_HOST;
311         }
312         return ret;
313 }
314
315 static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu)
316 {
317         struct kvm_run *run = vcpu->run;
318         uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
319         unsigned long cause = vcpu->arch.host_cp0_cause;
320         enum emulation_result er = EMULATE_DONE;
321         int ret = RESUME_GUEST;
322
323         er = kvm_mips_emulate_bp_exc(cause, opc, run, vcpu);
324         if (er == EMULATE_DONE)
325                 ret = RESUME_GUEST;
326         else {
327                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
328                 ret = RESUME_HOST;
329         }
330         return ret;
331 }
332
333 static int kvm_trap_emul_handle_trap(struct kvm_vcpu *vcpu)
334 {
335         struct kvm_run *run = vcpu->run;
336         uint32_t __user *opc = (uint32_t __user *)vcpu->arch.pc;
337         unsigned long cause = vcpu->arch.host_cp0_cause;
338         enum emulation_result er = EMULATE_DONE;
339         int ret = RESUME_GUEST;
340
341         er = kvm_mips_emulate_trap_exc(cause, opc, run, vcpu);
342         if (er == EMULATE_DONE) {
343                 ret = RESUME_GUEST;
344         } else {
345                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
346                 ret = RESUME_HOST;
347         }
348         return ret;
349 }
350
351 static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu)
352 {
353         struct kvm_run *run = vcpu->run;
354         uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
355         unsigned long cause = vcpu->arch.host_cp0_cause;
356         enum emulation_result er = EMULATE_DONE;
357         int ret = RESUME_GUEST;
358
359         /* No MSA supported in guest, guest reserved instruction exception */
360         er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
361
362         switch (er) {
363         case EMULATE_DONE:
364                 ret = RESUME_GUEST;
365                 break;
366
367         case EMULATE_FAIL:
368                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
369                 ret = RESUME_HOST;
370                 break;
371
372         default:
373                 BUG();
374         }
375         return ret;
376 }
377
378 static int kvm_trap_emul_vm_init(struct kvm *kvm)
379 {
380         return 0;
381 }
382
383 static int kvm_trap_emul_vcpu_init(struct kvm_vcpu *vcpu)
384 {
385         return 0;
386 }
387
388 static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
389 {
390         struct mips_coproc *cop0 = vcpu->arch.cop0;
391         uint32_t config1;
392         int vcpu_id = vcpu->vcpu_id;
393
394         /*
395          * Arch specific stuff, set up config registers properly so that the
396          * guest will come up as expected, for now we simulate a MIPS 24kc
397          */
398         kvm_write_c0_guest_prid(cop0, 0x00019300);
399         /* Have config1, Cacheable, noncoherent, write-back, write allocate */
400         kvm_write_c0_guest_config(cop0, MIPS_CONF_M | (0x3 << CP0C0_K0) |
401                                   (0x1 << CP0C0_AR) |
402                                   (MMU_TYPE_R4000 << CP0C0_MT));
403
404         /* Read the cache characteristics from the host Config1 Register */
405         config1 = (read_c0_config1() & ~0x7f);
406
407         /* Set up MMU size */
408         config1 &= ~(0x3f << 25);
409         config1 |= ((KVM_MIPS_GUEST_TLB_SIZE - 1) << 25);
410
411         /* We unset some bits that we aren't emulating */
412         config1 &=
413             ~((1 << CP0C1_C2) | (1 << CP0C1_MD) | (1 << CP0C1_PC) |
414               (1 << CP0C1_WR) | (1 << CP0C1_CA));
415         kvm_write_c0_guest_config1(cop0, config1);
416
417         /* Have config3, no tertiary/secondary caches implemented */
418         kvm_write_c0_guest_config2(cop0, MIPS_CONF_M);
419         /* MIPS_CONF_M | (read_c0_config2() & 0xfff) */
420
421         /* No config4, UserLocal */
422         kvm_write_c0_guest_config3(cop0, MIPS_CONF3_ULRI);
423
424         /* Set Wait IE/IXMT Ignore in Config7, IAR, AR */
425         kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10));
426
427         /*
428          * Setup IntCtl defaults, compatibilty mode for timer interrupts (HW5)
429          */
430         kvm_write_c0_guest_intctl(cop0, 0xFC000000);
431
432         /* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */
433         kvm_write_c0_guest_ebase(cop0, KVM_GUEST_KSEG0 | (vcpu_id & 0xFF));
434
435         return 0;
436 }
437
438 static int kvm_trap_emul_get_one_reg(struct kvm_vcpu *vcpu,
439                                      const struct kvm_one_reg *reg,
440                                      s64 *v)
441 {
442         switch (reg->id) {
443         case KVM_REG_MIPS_CP0_COUNT:
444                 *v = kvm_mips_read_count(vcpu);
445                 break;
446         case KVM_REG_MIPS_COUNT_CTL:
447                 *v = vcpu->arch.count_ctl;
448                 break;
449         case KVM_REG_MIPS_COUNT_RESUME:
450                 *v = ktime_to_ns(vcpu->arch.count_resume);
451                 break;
452         case KVM_REG_MIPS_COUNT_HZ:
453                 *v = vcpu->arch.count_hz;
454                 break;
455         default:
456                 return -EINVAL;
457         }
458         return 0;
459 }
460
461 static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu,
462                                      const struct kvm_one_reg *reg,
463                                      s64 v)
464 {
465         struct mips_coproc *cop0 = vcpu->arch.cop0;
466         int ret = 0;
467
468         switch (reg->id) {
469         case KVM_REG_MIPS_CP0_COUNT:
470                 kvm_mips_write_count(vcpu, v);
471                 break;
472         case KVM_REG_MIPS_CP0_COMPARE:
473                 kvm_mips_write_compare(vcpu, v);
474                 break;
475         case KVM_REG_MIPS_CP0_CAUSE:
476                 /*
477                  * If the timer is stopped or started (DC bit) it must look
478                  * atomic with changes to the interrupt pending bits (TI, IRQ5).
479                  * A timer interrupt should not happen in between.
480                  */
481                 if ((kvm_read_c0_guest_cause(cop0) ^ v) & CAUSEF_DC) {
482                         if (v & CAUSEF_DC) {
483                                 /* disable timer first */
484                                 kvm_mips_count_disable_cause(vcpu);
485                                 kvm_change_c0_guest_cause(cop0, ~CAUSEF_DC, v);
486                         } else {
487                                 /* enable timer last */
488                                 kvm_change_c0_guest_cause(cop0, ~CAUSEF_DC, v);
489                                 kvm_mips_count_enable_cause(vcpu);
490                         }
491                 } else {
492                         kvm_write_c0_guest_cause(cop0, v);
493                 }
494                 break;
495         case KVM_REG_MIPS_COUNT_CTL:
496                 ret = kvm_mips_set_count_ctl(vcpu, v);
497                 break;
498         case KVM_REG_MIPS_COUNT_RESUME:
499                 ret = kvm_mips_set_count_resume(vcpu, v);
500                 break;
501         case KVM_REG_MIPS_COUNT_HZ:
502                 ret = kvm_mips_set_count_hz(vcpu, v);
503                 break;
504         default:
505                 return -EINVAL;
506         }
507         return ret;
508 }
509
510 static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
511         /* exit handlers */
512         .handle_cop_unusable = kvm_trap_emul_handle_cop_unusable,
513         .handle_tlb_mod = kvm_trap_emul_handle_tlb_mod,
514         .handle_tlb_st_miss = kvm_trap_emul_handle_tlb_st_miss,
515         .handle_tlb_ld_miss = kvm_trap_emul_handle_tlb_ld_miss,
516         .handle_addr_err_st = kvm_trap_emul_handle_addr_err_st,
517         .handle_addr_err_ld = kvm_trap_emul_handle_addr_err_ld,
518         .handle_syscall = kvm_trap_emul_handle_syscall,
519         .handle_res_inst = kvm_trap_emul_handle_res_inst,
520         .handle_break = kvm_trap_emul_handle_break,
521         .handle_trap = kvm_trap_emul_handle_trap,
522         .handle_msa_disabled = kvm_trap_emul_handle_msa_disabled,
523
524         .vm_init = kvm_trap_emul_vm_init,
525         .vcpu_init = kvm_trap_emul_vcpu_init,
526         .vcpu_setup = kvm_trap_emul_vcpu_setup,
527         .gva_to_gpa = kvm_trap_emul_gva_to_gpa_cb,
528         .queue_timer_int = kvm_mips_queue_timer_int_cb,
529         .dequeue_timer_int = kvm_mips_dequeue_timer_int_cb,
530         .queue_io_int = kvm_mips_queue_io_int_cb,
531         .dequeue_io_int = kvm_mips_dequeue_io_int_cb,
532         .irq_deliver = kvm_mips_irq_deliver_cb,
533         .irq_clear = kvm_mips_irq_clear_cb,
534         .get_one_reg = kvm_trap_emul_get_one_reg,
535         .set_one_reg = kvm_trap_emul_set_one_reg,
536 };
537
538 int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks)
539 {
540         *install_callbacks = &kvm_trap_emul_callbacks;
541         return 0;
542 }