Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma
[linux-drm-fsl-dcu.git] / arch / x86 / kernel / vm86_32.c
1 /*
2  *  Copyright (C) 1994  Linus Torvalds
3  *
4  *  29 dec 2001 - Fixed oopses caused by unchecked access to the vm86
5  *                stack - Manfred Spraul <manfred@colorfullife.com>
6  *
7  *  22 mar 2002 - Manfred detected the stackfaults, but didn't handle
8  *                them correctly. Now the emulation will be in a
9  *                consistent state after stackfaults - Kasper Dupont
10  *                <kasperd@daimi.au.dk>
11  *
12  *  22 mar 2002 - Added missing clear_IF in set_vflags_* Kasper Dupont
13  *                <kasperd@daimi.au.dk>
14  *
15  *  ?? ??? 2002 - Fixed premature returns from handle_vm86_fault
16  *                caused by Kasper Dupont's changes - Stas Sergeev
17  *
18  *   4 apr 2002 - Fixed CHECK_IF_IN_TRAP broken by Stas' changes.
19  *                Kasper Dupont <kasperd@daimi.au.dk>
20  *
21  *   9 apr 2002 - Changed syntax of macros in handle_vm86_fault.
22  *                Kasper Dupont <kasperd@daimi.au.dk>
23  *
24  *   9 apr 2002 - Changed stack access macros to jump to a label
25  *                instead of returning to userspace. This simplifies
26  *                do_int, and is needed by handle_vm6_fault. Kasper
27  *                Dupont <kasperd@daimi.au.dk>
28  *
29  */
30
31 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
32
33 #include <linux/capability.h>
34 #include <linux/errno.h>
35 #include <linux/interrupt.h>
36 #include <linux/syscalls.h>
37 #include <linux/sched.h>
38 #include <linux/kernel.h>
39 #include <linux/signal.h>
40 #include <linux/string.h>
41 #include <linux/mm.h>
42 #include <linux/smp.h>
43 #include <linux/highmem.h>
44 #include <linux/ptrace.h>
45 #include <linux/audit.h>
46 #include <linux/stddef.h>
47 #include <linux/slab.h>
48
49 #include <asm/uaccess.h>
50 #include <asm/io.h>
51 #include <asm/tlbflush.h>
52 #include <asm/irq.h>
53 #include <asm/traps.h>
54 #include <asm/vm86.h>
55
56 /*
57  * Known problems:
58  *
59  * Interrupt handling is not guaranteed:
60  * - a real x86 will disable all interrupts for one instruction
61  *   after a "mov ss,xx" to make stack handling atomic even without
62  *   the 'lss' instruction. We can't guarantee this in v86 mode,
63  *   as the next instruction might result in a page fault or similar.
64  * - a real x86 will have interrupts disabled for one instruction
65  *   past the 'sti' that enables them. We don't bother with all the
66  *   details yet.
67  *
68  * Let's hope these problems do not actually matter for anything.
69  */
70
71
72 /*
73  * 8- and 16-bit register defines..
74  */
75 #define AL(regs)        (((unsigned char *)&((regs)->pt.ax))[0])
76 #define AH(regs)        (((unsigned char *)&((regs)->pt.ax))[1])
77 #define IP(regs)        (*(unsigned short *)&((regs)->pt.ip))
78 #define SP(regs)        (*(unsigned short *)&((regs)->pt.sp))
79
80 /*
81  * virtual flags (16 and 32-bit versions)
82  */
83 #define VFLAGS  (*(unsigned short *)&(current->thread.vm86->veflags))
84 #define VEFLAGS (current->thread.vm86->veflags)
85
86 #define set_flags(X, new, mask) \
87 ((X) = ((X) & ~(mask)) | ((new) & (mask)))
88
89 #define SAFE_MASK       (0xDD5)
90 #define RETURN_MASK     (0xDFF)
91
92 void save_v86_state(struct kernel_vm86_regs *regs, int retval)
93 {
94         struct tss_struct *tss;
95         struct task_struct *tsk = current;
96         struct vm86plus_struct __user *user;
97         struct vm86 *vm86 = current->thread.vm86;
98         long err = 0;
99
100         /*
101          * This gets called from entry.S with interrupts disabled, but
102          * from process context. Enable interrupts here, before trying
103          * to access user space.
104          */
105         local_irq_enable();
106
107         if (!vm86 || !vm86->user_vm86) {
108                 pr_alert("no user_vm86: BAD\n");
109                 do_exit(SIGSEGV);
110         }
111         set_flags(regs->pt.flags, VEFLAGS, X86_EFLAGS_VIF | vm86->veflags_mask);
112         user = vm86->user_vm86;
113
114         if (!access_ok(VERIFY_WRITE, user, vm86->vm86plus.is_vm86pus ?
115                        sizeof(struct vm86plus_struct) :
116                        sizeof(struct vm86_struct))) {
117                 pr_alert("could not access userspace vm86 info\n");
118                 do_exit(SIGSEGV);
119         }
120
121         put_user_try {
122                 put_user_ex(regs->pt.bx, &user->regs.ebx);
123                 put_user_ex(regs->pt.cx, &user->regs.ecx);
124                 put_user_ex(regs->pt.dx, &user->regs.edx);
125                 put_user_ex(regs->pt.si, &user->regs.esi);
126                 put_user_ex(regs->pt.di, &user->regs.edi);
127                 put_user_ex(regs->pt.bp, &user->regs.ebp);
128                 put_user_ex(regs->pt.ax, &user->regs.eax);
129                 put_user_ex(regs->pt.ip, &user->regs.eip);
130                 put_user_ex(regs->pt.cs, &user->regs.cs);
131                 put_user_ex(regs->pt.flags, &user->regs.eflags);
132                 put_user_ex(regs->pt.sp, &user->regs.esp);
133                 put_user_ex(regs->pt.ss, &user->regs.ss);
134                 put_user_ex(regs->es, &user->regs.es);
135                 put_user_ex(regs->ds, &user->regs.ds);
136                 put_user_ex(regs->fs, &user->regs.fs);
137                 put_user_ex(regs->gs, &user->regs.gs);
138
139                 put_user_ex(vm86->screen_bitmap, &user->screen_bitmap);
140         } put_user_catch(err);
141         if (err) {
142                 pr_alert("could not access userspace vm86 info\n");
143                 do_exit(SIGSEGV);
144         }
145
146         tss = &per_cpu(cpu_tss, get_cpu());
147         tsk->thread.sp0 = vm86->saved_sp0;
148         tsk->thread.sysenter_cs = __KERNEL_CS;
149         load_sp0(tss, &tsk->thread);
150         vm86->saved_sp0 = 0;
151         put_cpu();
152
153         memcpy(&regs->pt, &vm86->regs32, sizeof(struct pt_regs));
154
155         lazy_load_gs(vm86->regs32.gs);
156
157         regs->pt.ax = retval;
158 }
159
160 static void mark_screen_rdonly(struct mm_struct *mm)
161 {
162         pgd_t *pgd;
163         pud_t *pud;
164         pmd_t *pmd;
165         pte_t *pte;
166         spinlock_t *ptl;
167         int i;
168
169         down_write(&mm->mmap_sem);
170         pgd = pgd_offset(mm, 0xA0000);
171         if (pgd_none_or_clear_bad(pgd))
172                 goto out;
173         pud = pud_offset(pgd, 0xA0000);
174         if (pud_none_or_clear_bad(pud))
175                 goto out;
176         pmd = pmd_offset(pud, 0xA0000);
177         split_huge_page_pmd_mm(mm, 0xA0000, pmd);
178         if (pmd_none_or_clear_bad(pmd))
179                 goto out;
180         pte = pte_offset_map_lock(mm, pmd, 0xA0000, &ptl);
181         for (i = 0; i < 32; i++) {
182                 if (pte_present(*pte))
183                         set_pte(pte, pte_wrprotect(*pte));
184                 pte++;
185         }
186         pte_unmap_unlock(pte, ptl);
187 out:
188         up_write(&mm->mmap_sem);
189         flush_tlb();
190 }
191
192
193
194 static int do_vm86_irq_handling(int subfunction, int irqnumber);
195 static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus);
196
197 SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, user_vm86)
198 {
199         return do_sys_vm86((struct vm86plus_struct __user *) user_vm86, false);
200 }
201
202
203 SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg)
204 {
205         switch (cmd) {
206         case VM86_REQUEST_IRQ:
207         case VM86_FREE_IRQ:
208         case VM86_GET_IRQ_BITS:
209         case VM86_GET_AND_RESET_IRQ:
210                 return do_vm86_irq_handling(cmd, (int)arg);
211         case VM86_PLUS_INSTALL_CHECK:
212                 /*
213                  * NOTE: on old vm86 stuff this will return the error
214                  *  from access_ok(), because the subfunction is
215                  *  interpreted as (invalid) address to vm86_struct.
216                  *  So the installation check works.
217                  */
218                 return 0;
219         }
220
221         /* we come here only for functions VM86_ENTER, VM86_ENTER_NO_BYPASS */
222         return do_sys_vm86((struct vm86plus_struct __user *) arg, true);
223 }
224
225
226 static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus)
227 {
228         struct tss_struct *tss;
229         struct task_struct *tsk = current;
230         struct vm86 *vm86 = tsk->thread.vm86;
231         struct kernel_vm86_regs vm86regs;
232         struct pt_regs *regs = current_pt_regs();
233         unsigned long err = 0;
234
235         if (!vm86) {
236                 if (!(vm86 = kzalloc(sizeof(*vm86), GFP_KERNEL)))
237                         return -ENOMEM;
238                 tsk->thread.vm86 = vm86;
239         }
240         if (vm86->saved_sp0)
241                 return -EPERM;
242
243         if (!access_ok(VERIFY_READ, user_vm86, plus ?
244                        sizeof(struct vm86_struct) :
245                        sizeof(struct vm86plus_struct)))
246                 return -EFAULT;
247
248         memset(&vm86regs, 0, sizeof(vm86regs));
249         get_user_try {
250                 unsigned short seg;
251                 get_user_ex(vm86regs.pt.bx, &user_vm86->regs.ebx);
252                 get_user_ex(vm86regs.pt.cx, &user_vm86->regs.ecx);
253                 get_user_ex(vm86regs.pt.dx, &user_vm86->regs.edx);
254                 get_user_ex(vm86regs.pt.si, &user_vm86->regs.esi);
255                 get_user_ex(vm86regs.pt.di, &user_vm86->regs.edi);
256                 get_user_ex(vm86regs.pt.bp, &user_vm86->regs.ebp);
257                 get_user_ex(vm86regs.pt.ax, &user_vm86->regs.eax);
258                 get_user_ex(vm86regs.pt.ip, &user_vm86->regs.eip);
259                 get_user_ex(seg, &user_vm86->regs.cs);
260                 vm86regs.pt.cs = seg;
261                 get_user_ex(vm86regs.pt.flags, &user_vm86->regs.eflags);
262                 get_user_ex(vm86regs.pt.sp, &user_vm86->regs.esp);
263                 get_user_ex(seg, &user_vm86->regs.ss);
264                 vm86regs.pt.ss = seg;
265                 get_user_ex(vm86regs.es, &user_vm86->regs.es);
266                 get_user_ex(vm86regs.ds, &user_vm86->regs.ds);
267                 get_user_ex(vm86regs.fs, &user_vm86->regs.fs);
268                 get_user_ex(vm86regs.gs, &user_vm86->regs.gs);
269
270                 get_user_ex(vm86->flags, &user_vm86->flags);
271                 get_user_ex(vm86->screen_bitmap, &user_vm86->screen_bitmap);
272                 get_user_ex(vm86->cpu_type, &user_vm86->cpu_type);
273         } get_user_catch(err);
274         if (err)
275                 return err;
276
277         if (copy_from_user(&vm86->int_revectored,
278                            &user_vm86->int_revectored,
279                            sizeof(struct revectored_struct)))
280                 return -EFAULT;
281         if (copy_from_user(&vm86->int21_revectored,
282                            &user_vm86->int21_revectored,
283                            sizeof(struct revectored_struct)))
284                 return -EFAULT;
285         if (plus) {
286                 if (copy_from_user(&vm86->vm86plus, &user_vm86->vm86plus,
287                                    sizeof(struct vm86plus_info_struct)))
288                         return -EFAULT;
289                 vm86->vm86plus.is_vm86pus = 1;
290         } else
291                 memset(&vm86->vm86plus, 0,
292                        sizeof(struct vm86plus_info_struct));
293
294         memcpy(&vm86->regs32, regs, sizeof(struct pt_regs));
295         vm86->user_vm86 = user_vm86;
296
297 /*
298  * The flags register is also special: we cannot trust that the user
299  * has set it up safely, so this makes sure interrupt etc flags are
300  * inherited from protected mode.
301  */
302         VEFLAGS = vm86regs.pt.flags;
303         vm86regs.pt.flags &= SAFE_MASK;
304         vm86regs.pt.flags |= regs->flags & ~SAFE_MASK;
305         vm86regs.pt.flags |= X86_VM_MASK;
306
307         vm86regs.pt.orig_ax = regs->orig_ax;
308
309         switch (vm86->cpu_type) {
310         case CPU_286:
311                 vm86->veflags_mask = 0;
312                 break;
313         case CPU_386:
314                 vm86->veflags_mask = X86_EFLAGS_NT | X86_EFLAGS_IOPL;
315                 break;
316         case CPU_486:
317                 vm86->veflags_mask = X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL;
318                 break;
319         default:
320                 vm86->veflags_mask = X86_EFLAGS_ID | X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL;
321                 break;
322         }
323
324 /*
325  * Save old state
326  */
327         vm86->saved_sp0 = tsk->thread.sp0;
328         lazy_save_gs(vm86->regs32.gs);
329
330         tss = &per_cpu(cpu_tss, get_cpu());
331         /* make room for real-mode segments */
332         tsk->thread.sp0 += 16;
333         if (cpu_has_sep)
334                 tsk->thread.sysenter_cs = 0;
335         load_sp0(tss, &tsk->thread);
336         put_cpu();
337
338         if (vm86->flags & VM86_SCREEN_BITMAP)
339                 mark_screen_rdonly(tsk->mm);
340
341         memcpy((struct kernel_vm86_regs *)regs, &vm86regs, sizeof(vm86regs));
342         force_iret();
343         return regs->ax;
344 }
345
346 static inline void set_IF(struct kernel_vm86_regs *regs)
347 {
348         VEFLAGS |= X86_EFLAGS_VIF;
349 }
350
351 static inline void clear_IF(struct kernel_vm86_regs *regs)
352 {
353         VEFLAGS &= ~X86_EFLAGS_VIF;
354 }
355
356 static inline void clear_TF(struct kernel_vm86_regs *regs)
357 {
358         regs->pt.flags &= ~X86_EFLAGS_TF;
359 }
360
361 static inline void clear_AC(struct kernel_vm86_regs *regs)
362 {
363         regs->pt.flags &= ~X86_EFLAGS_AC;
364 }
365
366 /*
367  * It is correct to call set_IF(regs) from the set_vflags_*
368  * functions. However someone forgot to call clear_IF(regs)
369  * in the opposite case.
370  * After the command sequence CLI PUSHF STI POPF you should
371  * end up with interrupts disabled, but you ended up with
372  * interrupts enabled.
373  *  ( I was testing my own changes, but the only bug I
374  *    could find was in a function I had not changed. )
375  * [KD]
376  */
377
378 static inline void set_vflags_long(unsigned long flags, struct kernel_vm86_regs *regs)
379 {
380         set_flags(VEFLAGS, flags, current->thread.vm86->veflags_mask);
381         set_flags(regs->pt.flags, flags, SAFE_MASK);
382         if (flags & X86_EFLAGS_IF)
383                 set_IF(regs);
384         else
385                 clear_IF(regs);
386 }
387
388 static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_regs *regs)
389 {
390         set_flags(VFLAGS, flags, current->thread.vm86->veflags_mask);
391         set_flags(regs->pt.flags, flags, SAFE_MASK);
392         if (flags & X86_EFLAGS_IF)
393                 set_IF(regs);
394         else
395                 clear_IF(regs);
396 }
397
398 static inline unsigned long get_vflags(struct kernel_vm86_regs *regs)
399 {
400         unsigned long flags = regs->pt.flags & RETURN_MASK;
401
402         if (VEFLAGS & X86_EFLAGS_VIF)
403                 flags |= X86_EFLAGS_IF;
404         flags |= X86_EFLAGS_IOPL;
405         return flags | (VEFLAGS & current->thread.vm86->veflags_mask);
406 }
407
408 static inline int is_revectored(int nr, struct revectored_struct *bitmap)
409 {
410         __asm__ __volatile__("btl %2,%1\n\tsbbl %0,%0"
411                 :"=r" (nr)
412                 :"m" (*bitmap), "r" (nr));
413         return nr;
414 }
415
416 #define val_byte(val, n) (((__u8 *)&val)[n])
417
418 #define pushb(base, ptr, val, err_label) \
419         do { \
420                 __u8 __val = val; \
421                 ptr--; \
422                 if (put_user(__val, base + ptr) < 0) \
423                         goto err_label; \
424         } while (0)
425
426 #define pushw(base, ptr, val, err_label) \
427         do { \
428                 __u16 __val = val; \
429                 ptr--; \
430                 if (put_user(val_byte(__val, 1), base + ptr) < 0) \
431                         goto err_label; \
432                 ptr--; \
433                 if (put_user(val_byte(__val, 0), base + ptr) < 0) \
434                         goto err_label; \
435         } while (0)
436
437 #define pushl(base, ptr, val, err_label) \
438         do { \
439                 __u32 __val = val; \
440                 ptr--; \
441                 if (put_user(val_byte(__val, 3), base + ptr) < 0) \
442                         goto err_label; \
443                 ptr--; \
444                 if (put_user(val_byte(__val, 2), base + ptr) < 0) \
445                         goto err_label; \
446                 ptr--; \
447                 if (put_user(val_byte(__val, 1), base + ptr) < 0) \
448                         goto err_label; \
449                 ptr--; \
450                 if (put_user(val_byte(__val, 0), base + ptr) < 0) \
451                         goto err_label; \
452         } while (0)
453
454 #define popb(base, ptr, err_label) \
455         ({ \
456                 __u8 __res; \
457                 if (get_user(__res, base + ptr) < 0) \
458                         goto err_label; \
459                 ptr++; \
460                 __res; \
461         })
462
463 #define popw(base, ptr, err_label) \
464         ({ \
465                 __u16 __res; \
466                 if (get_user(val_byte(__res, 0), base + ptr) < 0) \
467                         goto err_label; \
468                 ptr++; \
469                 if (get_user(val_byte(__res, 1), base + ptr) < 0) \
470                         goto err_label; \
471                 ptr++; \
472                 __res; \
473         })
474
475 #define popl(base, ptr, err_label) \
476         ({ \
477                 __u32 __res; \
478                 if (get_user(val_byte(__res, 0), base + ptr) < 0) \
479                         goto err_label; \
480                 ptr++; \
481                 if (get_user(val_byte(__res, 1), base + ptr) < 0) \
482                         goto err_label; \
483                 ptr++; \
484                 if (get_user(val_byte(__res, 2), base + ptr) < 0) \
485                         goto err_label; \
486                 ptr++; \
487                 if (get_user(val_byte(__res, 3), base + ptr) < 0) \
488                         goto err_label; \
489                 ptr++; \
490                 __res; \
491         })
492
493 /* There are so many possible reasons for this function to return
494  * VM86_INTx, so adding another doesn't bother me. We can expect
495  * userspace programs to be able to handle it. (Getting a problem
496  * in userspace is always better than an Oops anyway.) [KD]
497  */
498 static void do_int(struct kernel_vm86_regs *regs, int i,
499     unsigned char __user *ssp, unsigned short sp)
500 {
501         unsigned long __user *intr_ptr;
502         unsigned long segoffs;
503         struct vm86 *vm86 = current->thread.vm86;
504
505         if (regs->pt.cs == BIOSSEG)
506                 goto cannot_handle;
507         if (is_revectored(i, &vm86->int_revectored))
508                 goto cannot_handle;
509         if (i == 0x21 && is_revectored(AH(regs), &vm86->int21_revectored))
510                 goto cannot_handle;
511         intr_ptr = (unsigned long __user *) (i << 2);
512         if (get_user(segoffs, intr_ptr))
513                 goto cannot_handle;
514         if ((segoffs >> 16) == BIOSSEG)
515                 goto cannot_handle;
516         pushw(ssp, sp, get_vflags(regs), cannot_handle);
517         pushw(ssp, sp, regs->pt.cs, cannot_handle);
518         pushw(ssp, sp, IP(regs), cannot_handle);
519         regs->pt.cs = segoffs >> 16;
520         SP(regs) -= 6;
521         IP(regs) = segoffs & 0xffff;
522         clear_TF(regs);
523         clear_IF(regs);
524         clear_AC(regs);
525         return;
526
527 cannot_handle:
528         save_v86_state(regs, VM86_INTx + (i << 8));
529 }
530
531 int handle_vm86_trap(struct kernel_vm86_regs *regs, long error_code, int trapno)
532 {
533         struct vm86 *vm86 = current->thread.vm86;
534
535         if (vm86->vm86plus.is_vm86pus) {
536                 if ((trapno == 3) || (trapno == 1)) {
537                         save_v86_state(regs, VM86_TRAP + (trapno << 8));
538                         return 0;
539                 }
540                 do_int(regs, trapno, (unsigned char __user *) (regs->pt.ss << 4), SP(regs));
541                 return 0;
542         }
543         if (trapno != 1)
544                 return 1; /* we let this handle by the calling routine */
545         current->thread.trap_nr = trapno;
546         current->thread.error_code = error_code;
547         force_sig(SIGTRAP, current);
548         return 0;
549 }
550
551 void handle_vm86_fault(struct kernel_vm86_regs *regs, long error_code)
552 {
553         unsigned char opcode;
554         unsigned char __user *csp;
555         unsigned char __user *ssp;
556         unsigned short ip, sp, orig_flags;
557         int data32, pref_done;
558         struct vm86plus_info_struct *vmpi = &current->thread.vm86->vm86plus;
559
560 #define CHECK_IF_IN_TRAP \
561         if (vmpi->vm86dbg_active && vmpi->vm86dbg_TFpendig) \
562                 newflags |= X86_EFLAGS_TF
563
564         orig_flags = *(unsigned short *)&regs->pt.flags;
565
566         csp = (unsigned char __user *) (regs->pt.cs << 4);
567         ssp = (unsigned char __user *) (regs->pt.ss << 4);
568         sp = SP(regs);
569         ip = IP(regs);
570
571         data32 = 0;
572         pref_done = 0;
573         do {
574                 switch (opcode = popb(csp, ip, simulate_sigsegv)) {
575                 case 0x66:      /* 32-bit data */     data32 = 1; break;
576                 case 0x67:      /* 32-bit address */  break;
577                 case 0x2e:      /* CS */              break;
578                 case 0x3e:      /* DS */              break;
579                 case 0x26:      /* ES */              break;
580                 case 0x36:      /* SS */              break;
581                 case 0x65:      /* GS */              break;
582                 case 0x64:      /* FS */              break;
583                 case 0xf2:      /* repnz */       break;
584                 case 0xf3:      /* rep */             break;
585                 default: pref_done = 1;
586                 }
587         } while (!pref_done);
588
589         switch (opcode) {
590
591         /* pushf */
592         case 0x9c:
593                 if (data32) {
594                         pushl(ssp, sp, get_vflags(regs), simulate_sigsegv);
595                         SP(regs) -= 4;
596                 } else {
597                         pushw(ssp, sp, get_vflags(regs), simulate_sigsegv);
598                         SP(regs) -= 2;
599                 }
600                 IP(regs) = ip;
601                 goto vm86_fault_return;
602
603         /* popf */
604         case 0x9d:
605                 {
606                 unsigned long newflags;
607                 if (data32) {
608                         newflags = popl(ssp, sp, simulate_sigsegv);
609                         SP(regs) += 4;
610                 } else {
611                         newflags = popw(ssp, sp, simulate_sigsegv);
612                         SP(regs) += 2;
613                 }
614                 IP(regs) = ip;
615                 CHECK_IF_IN_TRAP;
616                 if (data32)
617                         set_vflags_long(newflags, regs);
618                 else
619                         set_vflags_short(newflags, regs);
620
621                 goto check_vip;
622                 }
623
624         /* int xx */
625         case 0xcd: {
626                 int intno = popb(csp, ip, simulate_sigsegv);
627                 IP(regs) = ip;
628                 if (vmpi->vm86dbg_active) {
629                         if ((1 << (intno & 7)) & vmpi->vm86dbg_intxxtab[intno >> 3]) {
630                                 save_v86_state(regs, VM86_INTx + (intno << 8));
631                                 return;
632                         }
633                 }
634                 do_int(regs, intno, ssp, sp);
635                 return;
636         }
637
638         /* iret */
639         case 0xcf:
640                 {
641                 unsigned long newip;
642                 unsigned long newcs;
643                 unsigned long newflags;
644                 if (data32) {
645                         newip = popl(ssp, sp, simulate_sigsegv);
646                         newcs = popl(ssp, sp, simulate_sigsegv);
647                         newflags = popl(ssp, sp, simulate_sigsegv);
648                         SP(regs) += 12;
649                 } else {
650                         newip = popw(ssp, sp, simulate_sigsegv);
651                         newcs = popw(ssp, sp, simulate_sigsegv);
652                         newflags = popw(ssp, sp, simulate_sigsegv);
653                         SP(regs) += 6;
654                 }
655                 IP(regs) = newip;
656                 regs->pt.cs = newcs;
657                 CHECK_IF_IN_TRAP;
658                 if (data32) {
659                         set_vflags_long(newflags, regs);
660                 } else {
661                         set_vflags_short(newflags, regs);
662                 }
663                 goto check_vip;
664                 }
665
666         /* cli */
667         case 0xfa:
668                 IP(regs) = ip;
669                 clear_IF(regs);
670                 goto vm86_fault_return;
671
672         /* sti */
673         /*
674          * Damn. This is incorrect: the 'sti' instruction should actually
675          * enable interrupts after the /next/ instruction. Not good.
676          *
677          * Probably needs some horsing around with the TF flag. Aiee..
678          */
679         case 0xfb:
680                 IP(regs) = ip;
681                 set_IF(regs);
682                 goto check_vip;
683
684         default:
685                 save_v86_state(regs, VM86_UNKNOWN);
686         }
687
688         return;
689
690 check_vip:
691         if (VEFLAGS & X86_EFLAGS_VIP) {
692                 save_v86_state(regs, VM86_STI);
693                 return;
694         }
695
696 vm86_fault_return:
697         if (vmpi->force_return_for_pic  && (VEFLAGS & (X86_EFLAGS_IF | X86_EFLAGS_VIF))) {
698                 save_v86_state(regs, VM86_PICRETURN);
699                 return;
700         }
701         if (orig_flags & X86_EFLAGS_TF)
702                 handle_vm86_trap(regs, 0, X86_TRAP_DB);
703         return;
704
705 simulate_sigsegv:
706         /* FIXME: After a long discussion with Stas we finally
707          *        agreed, that this is wrong. Here we should
708          *        really send a SIGSEGV to the user program.
709          *        But how do we create the correct context? We
710          *        are inside a general protection fault handler
711          *        and has just returned from a page fault handler.
712          *        The correct context for the signal handler
713          *        should be a mixture of the two, but how do we
714          *        get the information? [KD]
715          */
716         save_v86_state(regs, VM86_UNKNOWN);
717 }
718
719 /* ---------------- vm86 special IRQ passing stuff ----------------- */
720
721 #define VM86_IRQNAME            "vm86irq"
722
723 static struct vm86_irqs {
724         struct task_struct *tsk;
725         int sig;
726 } vm86_irqs[16];
727
728 static DEFINE_SPINLOCK(irqbits_lock);
729 static int irqbits;
730
731 #define ALLOWED_SIGS (1 /* 0 = don't send a signal */ \
732         | (1 << SIGUSR1) | (1 << SIGUSR2) | (1 << SIGIO)  | (1 << SIGURG) \
733         | (1 << SIGUNUSED))
734
735 static irqreturn_t irq_handler(int intno, void *dev_id)
736 {
737         int irq_bit;
738         unsigned long flags;
739
740         spin_lock_irqsave(&irqbits_lock, flags);
741         irq_bit = 1 << intno;
742         if ((irqbits & irq_bit) || !vm86_irqs[intno].tsk)
743                 goto out;
744         irqbits |= irq_bit;
745         if (vm86_irqs[intno].sig)
746                 send_sig(vm86_irqs[intno].sig, vm86_irqs[intno].tsk, 1);
747         /*
748          * IRQ will be re-enabled when user asks for the irq (whether
749          * polling or as a result of the signal)
750          */
751         disable_irq_nosync(intno);
752         spin_unlock_irqrestore(&irqbits_lock, flags);
753         return IRQ_HANDLED;
754
755 out:
756         spin_unlock_irqrestore(&irqbits_lock, flags);
757         return IRQ_NONE;
758 }
759
760 static inline void free_vm86_irq(int irqnumber)
761 {
762         unsigned long flags;
763
764         free_irq(irqnumber, NULL);
765         vm86_irqs[irqnumber].tsk = NULL;
766
767         spin_lock_irqsave(&irqbits_lock, flags);
768         irqbits &= ~(1 << irqnumber);
769         spin_unlock_irqrestore(&irqbits_lock, flags);
770 }
771
772 void release_vm86_irqs(struct task_struct *task)
773 {
774         int i;
775         for (i = FIRST_VM86_IRQ ; i <= LAST_VM86_IRQ; i++)
776             if (vm86_irqs[i].tsk == task)
777                 free_vm86_irq(i);
778 }
779
780 static inline int get_and_reset_irq(int irqnumber)
781 {
782         int bit;
783         unsigned long flags;
784         int ret = 0;
785
786         if (invalid_vm86_irq(irqnumber)) return 0;
787         if (vm86_irqs[irqnumber].tsk != current) return 0;
788         spin_lock_irqsave(&irqbits_lock, flags);
789         bit = irqbits & (1 << irqnumber);
790         irqbits &= ~bit;
791         if (bit) {
792                 enable_irq(irqnumber);
793                 ret = 1;
794         }
795
796         spin_unlock_irqrestore(&irqbits_lock, flags);
797         return ret;
798 }
799
800
801 static int do_vm86_irq_handling(int subfunction, int irqnumber)
802 {
803         int ret;
804         switch (subfunction) {
805                 case VM86_GET_AND_RESET_IRQ: {
806                         return get_and_reset_irq(irqnumber);
807                 }
808                 case VM86_GET_IRQ_BITS: {
809                         return irqbits;
810                 }
811                 case VM86_REQUEST_IRQ: {
812                         int sig = irqnumber >> 8;
813                         int irq = irqnumber & 255;
814                         if (!capable(CAP_SYS_ADMIN)) return -EPERM;
815                         if (!((1 << sig) & ALLOWED_SIGS)) return -EPERM;
816                         if (invalid_vm86_irq(irq)) return -EPERM;
817                         if (vm86_irqs[irq].tsk) return -EPERM;
818                         ret = request_irq(irq, &irq_handler, 0, VM86_IRQNAME, NULL);
819                         if (ret) return ret;
820                         vm86_irqs[irq].sig = sig;
821                         vm86_irqs[irq].tsk = current;
822                         return irq;
823                 }
824                 case  VM86_FREE_IRQ: {
825                         if (invalid_vm86_irq(irqnumber)) return -EPERM;
826                         if (!vm86_irqs[irqnumber].tsk) return 0;
827                         if (vm86_irqs[irqnumber].tsk != current) return -EPERM;
828                         free_vm86_irq(irqnumber);
829                         return 0;
830                 }
831         }
832         return -EINVAL;
833 }
834