Merge from vendor branch GDB:
[dragonfly.git] / sys / i386 / i386 / vm86.c
1 /*-
2  * Copyright (c) 1997 Jonathan Lemon
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  * modification, are permitted provided that the following conditions
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  * $FreeBSD: src/sys/i386/i386/vm86.c,v 1.31.2.2 2001/10/05 06:18:55 peter Exp $
28  * $DragonFly: src/sys/i386/i386/Attic/vm86.c,v 1.11 2004/08/07 03:42:37 dillon Exp $
29  */
30
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/proc.h>
34 #include <sys/lock.h>
35 #include <sys/malloc.h>
36
37 #include <vm/vm.h>
38 #include <vm/pmap.h>
39 #include <vm/vm_map.h>
40 #include <vm/vm_page.h>
41
42 #include <sys/user.h>
43 #include <sys/thread2.h>
44
45 #include <machine/md_var.h>
46 #include <machine/pcb_ext.h>    /* pcb.h included via sys/user.h */
47 #include <machine/psl.h>
48 #include <machine/specialreg.h>
49 #include <machine/sysarch.h>
50 #include <machine/clock.h>
51
52 extern int i386_extend_pcb      (struct proc *);
53 extern int vm86pa;
54 extern struct pcb *vm86pcb;
55
56 extern int vm86_bioscall(struct vm86frame *);
57 extern void vm86_biosret(struct vm86frame *);
58
59 void vm86_prepcall(struct vm86frame);
60
61 struct system_map {
62         int             type;
63         vm_offset_t     start;
64         vm_offset_t     end;
65 };
66
67 #define HLT     0xf4
68 #define CLI     0xfa
69 #define STI     0xfb
70 #define PUSHF   0x9c
71 #define POPF    0x9d
72 #define INTn    0xcd
73 #define IRET    0xcf
74 #define CALLm   0xff
75 #define OPERAND_SIZE_PREFIX     0x66
76 #define ADDRESS_SIZE_PREFIX     0x67
77 #define PUSH_MASK       ~(PSL_VM | PSL_RF | PSL_I)
78 #define POP_MASK        ~(PSL_VIP | PSL_VIF | PSL_VM | PSL_RF | PSL_IOPL)
79
80 static __inline caddr_t
81 MAKE_ADDR(u_short sel, u_short off)
82 {
83         return ((caddr_t)((sel << 4) + off));
84 }
85
86 static __inline void
87 GET_VEC(u_int vec, u_short *sel, u_short *off)
88 {
89         *sel = vec >> 16;
90         *off = vec & 0xffff;
91 }
92
93 static __inline u_int
94 MAKE_VEC(u_short sel, u_short off)
95 {
96         return ((sel << 16) | off);
97 }
98
99 static __inline void
100 PUSH(u_short x, struct vm86frame *vmf)
101 {
102         vmf->vmf_sp -= 2;
103         susword(MAKE_ADDR(vmf->vmf_ss, vmf->vmf_sp), x);
104 }
105
106 static __inline void
107 PUSHL(u_int x, struct vm86frame *vmf)
108 {
109         vmf->vmf_sp -= 4;
110         suword(MAKE_ADDR(vmf->vmf_ss, vmf->vmf_sp), x);
111 }
112
113 static __inline u_short
114 POP(struct vm86frame *vmf)
115 {
116         u_short x = fusword(MAKE_ADDR(vmf->vmf_ss, vmf->vmf_sp));
117
118         vmf->vmf_sp += 2;
119         return (x);
120 }
121
122 static __inline u_int
123 POPL(struct vm86frame *vmf)
124 {
125         u_int x = fuword(MAKE_ADDR(vmf->vmf_ss, vmf->vmf_sp));
126
127         vmf->vmf_sp += 4;
128         return (x);
129 }
130
131 int
132 vm86_emulate(vmf)
133         struct vm86frame *vmf;
134 {
135         struct vm86_kernel *vm86;
136         caddr_t addr;
137         u_char i_byte;
138         u_int temp_flags;
139         int inc_ip = 1;
140         int retcode = 0;
141
142         /*
143          * pcb_ext contains the address of the extension area, or zero if
144          * the extension is not present.  (This check should not be needed,
145          * as we can't enter vm86 mode until we set up an extension area)
146          */
147         if (curthread->td_pcb->pcb_ext == 0)
148                 return (SIGBUS);
149         vm86 = &curthread->td_pcb->pcb_ext->ext_vm86;
150
151         if (vmf->vmf_eflags & PSL_T)
152                 retcode = SIGTRAP;
153
154         addr = MAKE_ADDR(vmf->vmf_cs, vmf->vmf_ip);
155         i_byte = fubyte(addr);
156         if (i_byte == ADDRESS_SIZE_PREFIX) {
157                 i_byte = fubyte(++addr);
158                 inc_ip++;
159         }
160
161         if (vm86->vm86_has_vme) {
162                 switch (i_byte) {
163                 case OPERAND_SIZE_PREFIX:
164                         i_byte = fubyte(++addr);
165                         inc_ip++;
166                         switch (i_byte) {
167                         case PUSHF:
168                                 if (vmf->vmf_eflags & PSL_VIF)
169                                         PUSHL((vmf->vmf_eflags & PUSH_MASK)
170                                             | PSL_IOPL | PSL_I, vmf);
171                                 else
172                                         PUSHL((vmf->vmf_eflags & PUSH_MASK)
173                                             | PSL_IOPL, vmf);
174                                 vmf->vmf_ip += inc_ip;
175                                 return (0);
176
177                         case POPF:
178                                 temp_flags = POPL(vmf) & POP_MASK;
179                                 vmf->vmf_eflags = (vmf->vmf_eflags & ~POP_MASK)
180                                     | temp_flags | PSL_VM | PSL_I;
181                                 vmf->vmf_ip += inc_ip;
182                                 if (temp_flags & PSL_I) {
183                                         vmf->vmf_eflags |= PSL_VIF;
184                                         if (vmf->vmf_eflags & PSL_VIP)
185                                                 break;
186                                 } else {
187                                         vmf->vmf_eflags &= ~PSL_VIF;
188                                 }
189                                 return (0);
190                         }
191                         break;
192
193                 /* VME faults here if VIP is set, but does not set VIF. */
194                 case STI:
195                         vmf->vmf_eflags |= PSL_VIF;
196                         vmf->vmf_ip += inc_ip;
197                         if ((vmf->vmf_eflags & PSL_VIP) == 0) {
198                                 uprintf("fatal sti\n");
199                                 return (SIGKILL);
200                         }
201                         break;
202
203                 /* VME if no redirection support */
204                 case INTn:
205                         break;
206
207                 /* VME if trying to set PSL_TF, or PSL_I when VIP is set */
208                 case POPF:
209                         temp_flags = POP(vmf) & POP_MASK;
210                         vmf->vmf_flags = (vmf->vmf_flags & ~POP_MASK)
211                             | temp_flags | PSL_VM | PSL_I;
212                         vmf->vmf_ip += inc_ip;
213                         if (temp_flags & PSL_I) {
214                                 vmf->vmf_eflags |= PSL_VIF;
215                                 if (vmf->vmf_eflags & PSL_VIP)
216                                         break;
217                         } else {
218                                 vmf->vmf_eflags &= ~PSL_VIF;
219                         }
220                         return (retcode);
221
222                 /* VME if trying to set PSL_TF, or PSL_I when VIP is set */
223                 case IRET:
224                         vmf->vmf_ip = POP(vmf);
225                         vmf->vmf_cs = POP(vmf);
226                         temp_flags = POP(vmf) & POP_MASK;
227                         vmf->vmf_flags = (vmf->vmf_flags & ~POP_MASK)
228                             | temp_flags | PSL_VM | PSL_I;
229                         if (temp_flags & PSL_I) {
230                                 vmf->vmf_eflags |= PSL_VIF;
231                                 if (vmf->vmf_eflags & PSL_VIP)
232                                         break;
233                         } else {
234                                 vmf->vmf_eflags &= ~PSL_VIF;
235                         }
236                         return (retcode);
237
238                 }
239                 return (SIGBUS);
240         }
241
242         switch (i_byte) {
243         case OPERAND_SIZE_PREFIX:
244                 i_byte = fubyte(++addr);
245                 inc_ip++;
246                 switch (i_byte) {
247                 case PUSHF:
248                         if (vm86->vm86_eflags & PSL_VIF)
249                                 PUSHL((vmf->vmf_flags & PUSH_MASK)
250                                     | PSL_IOPL | PSL_I, vmf);
251                         else
252                                 PUSHL((vmf->vmf_flags & PUSH_MASK)
253                                     | PSL_IOPL, vmf);
254                         vmf->vmf_ip += inc_ip;
255                         return (retcode);
256
257                 case POPF:
258                         temp_flags = POPL(vmf) & POP_MASK;
259                         vmf->vmf_eflags = (vmf->vmf_eflags & ~POP_MASK)
260                             | temp_flags | PSL_VM | PSL_I;
261                         vmf->vmf_ip += inc_ip;
262                         if (temp_flags & PSL_I) {
263                                 vm86->vm86_eflags |= PSL_VIF;
264                                 if (vm86->vm86_eflags & PSL_VIP)
265                                         break;
266                         } else {
267                                 vm86->vm86_eflags &= ~PSL_VIF;
268                         }
269                         return (retcode);
270                 }
271                 return (SIGBUS);
272
273         case CLI:
274                 vm86->vm86_eflags &= ~PSL_VIF;
275                 vmf->vmf_ip += inc_ip;
276                 return (retcode);
277
278         case STI:
279                 /* if there is a pending interrupt, go to the emulator */
280                 vm86->vm86_eflags |= PSL_VIF;
281                 vmf->vmf_ip += inc_ip;
282                 if (vm86->vm86_eflags & PSL_VIP)
283                         break;
284                 return (retcode);
285
286         case PUSHF:
287                 if (vm86->vm86_eflags & PSL_VIF)
288                         PUSH((vmf->vmf_flags & PUSH_MASK)
289                             | PSL_IOPL | PSL_I, vmf);
290                 else
291                         PUSH((vmf->vmf_flags & PUSH_MASK) | PSL_IOPL, vmf);
292                 vmf->vmf_ip += inc_ip;
293                 return (retcode);
294
295         case INTn:
296                 i_byte = fubyte(addr + 1);
297                 if ((vm86->vm86_intmap[i_byte >> 3] & (1 << (i_byte & 7))) != 0)
298                         break;
299                 if (vm86->vm86_eflags & PSL_VIF)
300                         PUSH((vmf->vmf_flags & PUSH_MASK)
301                             | PSL_IOPL | PSL_I, vmf);
302                 else
303                         PUSH((vmf->vmf_flags & PUSH_MASK) | PSL_IOPL, vmf);
304                 PUSH(vmf->vmf_cs, vmf);
305                 PUSH(vmf->vmf_ip + inc_ip + 1, vmf);    /* increment IP */
306                 GET_VEC(fuword((caddr_t)(i_byte * 4)),
307                      &vmf->vmf_cs, &vmf->vmf_ip);
308                 vmf->vmf_flags &= ~PSL_T;
309                 vm86->vm86_eflags &= ~PSL_VIF;
310                 return (retcode);
311
312         case IRET:
313                 vmf->vmf_ip = POP(vmf);
314                 vmf->vmf_cs = POP(vmf);
315                 temp_flags = POP(vmf) & POP_MASK;
316                 vmf->vmf_flags = (vmf->vmf_flags & ~POP_MASK)
317                     | temp_flags | PSL_VM | PSL_I;
318                 if (temp_flags & PSL_I) {
319                         vm86->vm86_eflags |= PSL_VIF;
320                         if (vm86->vm86_eflags & PSL_VIP)
321                                 break;
322                 } else {
323                         vm86->vm86_eflags &= ~PSL_VIF;
324                 }
325                 return (retcode);
326
327         case POPF:
328                 temp_flags = POP(vmf) & POP_MASK;
329                 vmf->vmf_flags = (vmf->vmf_flags & ~POP_MASK)
330                     | temp_flags | PSL_VM | PSL_I;
331                 vmf->vmf_ip += inc_ip;
332                 if (temp_flags & PSL_I) {
333                         vm86->vm86_eflags |= PSL_VIF;
334                         if (vm86->vm86_eflags & PSL_VIP)
335                                 break;
336                 } else {
337                         vm86->vm86_eflags &= ~PSL_VIF;
338                 }
339                 return (retcode);
340         }
341         return (SIGBUS);
342 }
343
344 #define PGTABLE_SIZE    ((1024 + 64) * 1024 / PAGE_SIZE)
345 #define INTMAP_SIZE     32
346 #define IOMAP_SIZE      ctob(IOPAGES)
347 #define TSS_SIZE \
348         (sizeof(struct pcb_ext) - sizeof(struct segment_descriptor) + \
349          INTMAP_SIZE + IOMAP_SIZE + 1)
350
351 struct vm86_layout {
352         pt_entry_t      vml_pgtbl[PGTABLE_SIZE];
353         struct  pcb vml_pcb;
354         struct  pcb_ext vml_ext;
355         char    vml_intmap[INTMAP_SIZE];
356         char    vml_iomap[IOMAP_SIZE];
357         char    vml_iomap_trailer;
358 };
359
360 void
361 vm86_initialize(void)
362 {
363         int i;
364         u_int *addr;
365         struct vm86_layout *vml = (struct vm86_layout *)vm86paddr;
366         struct pcb *pcb;
367         struct pcb_ext *ext;
368         struct soft_segment_descriptor ssd = {
369                 0,                      /* segment base address (overwritten) */
370                 0,                      /* length (overwritten) */
371                 SDT_SYS386TSS,          /* segment type */
372                 0,                      /* priority level */
373                 1,                      /* descriptor present */
374                 0, 0,
375                 0,                      /* default 16 size */
376                 0                       /* granularity */
377         };
378
379         /*
380          * this should be a compile time error, but cpp doesn't grok sizeof().
381          */
382         if (sizeof(struct vm86_layout) > ctob(3))
383                 panic("struct vm86_layout exceeds space allocated in locore.s");
384
385         /*
386          * Below is the memory layout that we use for the vm86 region.
387          *
388          * +--------+
389          * |        | 
390          * |        |
391          * | page 0 |       
392          * |        | +--------+
393          * |        | | stack  |
394          * +--------+ +--------+ <--------- vm86paddr
395          * |        | |Page Tbl| 1M + 64K = 272 entries = 1088 bytes
396          * |        | +--------+
397          * |        | |  PCB   | size: ~240 bytes
398          * | page 1 | |PCB Ext | size: ~140 bytes (includes TSS)
399          * |        | +--------+
400          * |        | |int map |
401          * |        | +--------+
402          * +--------+ |        |
403          * | page 2 | |  I/O   |
404          * +--------+ | bitmap |
405          * | page 3 | |        |
406          * |        | +--------+
407          * +--------+ 
408          */
409
410         /*
411          * A rudimentary PCB must be installed, in order to get to the
412          * PCB extension area.  We use the PCB area as a scratchpad for
413          * data storage, the layout of which is shown below.
414          *
415          * pcb_esi      = new PTD entry 0
416          * pcb_ebp      = pointer to frame on vm86 stack
417          * pcb_esp      =    stack frame pointer at time of switch
418          * pcb_ebx      = va of vm86 page table
419          * pcb_eip      =    argument pointer to initial call
420          * pcb_spare[0] =    saved TSS descriptor, word 0
421          * pcb_space[1] =    saved TSS descriptor, word 1
422          */
423 #define new_ptd         pcb_esi
424 #define vm86_frame      pcb_ebp
425 #define pgtable_va      pcb_ebx
426
427         pcb = &vml->vml_pcb;
428         ext = &vml->vml_ext;
429
430         bzero(pcb, sizeof(struct pcb));
431         pcb->new_ptd = vm86pa | PG_V | PG_RW | PG_U;
432         pcb->vm86_frame = (pt_entry_t)vm86paddr - sizeof(struct vm86frame);
433         pcb->pgtable_va = (vm_offset_t)vm86paddr;
434         pcb->pcb_ext = ext;
435
436         bzero(ext, sizeof(struct pcb_ext)); 
437         ext->ext_tss.tss_esp0 = (vm_offset_t)vm86paddr;
438         ext->ext_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
439         ext->ext_tss.tss_ioopt = 
440                 ((u_int)vml->vml_iomap - (u_int)&ext->ext_tss) << 16;
441         ext->ext_iomap = vml->vml_iomap;
442         ext->ext_vm86.vm86_intmap = vml->vml_intmap;
443
444         if (cpu_feature & CPUID_VME)
445                 ext->ext_vm86.vm86_has_vme = (rcr4() & CR4_VME ? 1 : 0);
446
447         addr = (u_int *)ext->ext_vm86.vm86_intmap;
448         for (i = 0; i < (INTMAP_SIZE + IOMAP_SIZE) / sizeof(u_int); i++)
449                 *addr++ = 0;
450         vml->vml_iomap_trailer = 0xff;
451
452         ssd.ssd_base = (u_int)&ext->ext_tss;
453         ssd.ssd_limit = TSS_SIZE - 1; 
454         ssdtosd(&ssd, &ext->ext_tssd);
455
456         vm86pcb = pcb;
457
458 #if 0
459         /*
460          * use whatever is leftover of the vm86 page layout as a
461          * message buffer so we can capture early output.
462          */
463         msgbufinit((vm_offset_t)vm86paddr + sizeof(struct vm86_layout),
464             ctob(3) - sizeof(struct vm86_layout));
465 #endif
466 }
467
468 vm_offset_t
469 vm86_getpage(struct vm86context *vmc, int pagenum)
470 {
471         int i;
472
473         for (i = 0; i < vmc->npages; i++)
474                 if (vmc->pmap[i].pte_num == pagenum)
475                         return (vmc->pmap[i].kva);
476         return (0);
477 }
478
479 vm_offset_t
480 vm86_addpage(struct vm86context *vmc, int pagenum, vm_offset_t kva)
481 {
482         int i, flags = 0;
483
484         for (i = 0; i < vmc->npages; i++)
485                 if (vmc->pmap[i].pte_num == pagenum)
486                         goto bad;
487
488         if (vmc->npages == VM86_PMAPSIZE)
489                 goto bad;                       /* XXX grow map? */
490
491         if (kva == 0) {
492                 kva = (vm_offset_t)malloc(PAGE_SIZE, M_TEMP, M_WAITOK);
493                 flags = VMAP_MALLOC;
494         }
495
496         i = vmc->npages++;
497         vmc->pmap[i].flags = flags;
498         vmc->pmap[i].kva = kva;
499         vmc->pmap[i].pte_num = pagenum;
500         return (kva);
501 bad:
502         panic("vm86_addpage: not enough room, or overlap");
503 }
504
505 static void
506 vm86_initflags(struct vm86frame *vmf)
507 {
508         int eflags = vmf->vmf_eflags;
509         struct vm86_kernel *vm86 = &curthread->td_pcb->pcb_ext->ext_vm86;
510
511         if (vm86->vm86_has_vme) {
512                 eflags = (vmf->vmf_eflags & ~VME_USERCHANGE) |
513                     (eflags & VME_USERCHANGE) | PSL_VM;
514         } else {
515                 vm86->vm86_eflags = eflags;     /* save VIF, VIP */
516                 eflags = (vmf->vmf_eflags & ~VM_USERCHANGE) |             
517                     (eflags & VM_USERCHANGE) | PSL_VM;
518         }
519         vmf->vmf_eflags = eflags | PSL_VM;
520 }
521
522 /*
523  * called from vm86_bioscall, while in vm86 address space, to finalize setup.
524  */
525 void
526 vm86_prepcall(struct vm86frame vmf)
527 {
528         uintptr_t addr[] = { 0xA00, 0x1000 };   /* code, stack */
529         u_char intcall[] = {
530                 CLI, INTn, 0x00, STI, HLT
531         };
532
533         if ((vmf.vmf_trapno & PAGE_MASK) <= 0xff) {
534                 /* interrupt call requested */
535                 intcall[2] = (u_char)(vmf.vmf_trapno & 0xff);
536                 memcpy((void *)addr[0], (void *)intcall, sizeof(intcall));
537                 vmf.vmf_ip = addr[0];
538                 vmf.vmf_cs = 0;
539         }
540         vmf.vmf_sp = addr[1] - 2;              /* keep aligned */
541         vmf.kernel_fs = vmf.kernel_es = vmf.kernel_ds = 0;
542         vmf.vmf_ss = 0;
543         vmf.vmf_eflags = PSL_VIF | PSL_VM | PSL_USER;
544         vm86_initflags(&vmf);
545 }
546
547 /*
548  * vm86 trap handler; determines whether routine succeeded or not.
549  * Called while in vm86 space, returns to calling process.
550  *
551  * A MP lock ref is held on entry from trap() and must be released prior
552  * to returning to the VM86 call.
553  */
554 void
555 vm86_trap(struct vm86frame *vmf)
556 {
557         caddr_t addr;
558
559         /* "should not happen" */
560         if ((vmf->vmf_eflags & PSL_VM) == 0)
561                 panic("vm86_trap called, but not in vm86 mode");
562
563         addr = MAKE_ADDR(vmf->vmf_cs, vmf->vmf_ip);
564         if (*(u_char *)addr == HLT)
565                 vmf->vmf_trapno = vmf->vmf_eflags & PSL_C;
566         else
567                 vmf->vmf_trapno = vmf->vmf_trapno << 16;
568
569         rel_mplock();
570         vm86_biosret(vmf);
571 }
572
573 int
574 vm86_intcall(int intnum, struct vm86frame *vmf)
575 {
576         int error;
577
578         if (intnum < 0 || intnum > 0xff)
579                 return (EINVAL);
580
581         crit_enter();
582         ASSERT_MP_LOCK_HELD();
583
584         vmf->vmf_trapno = intnum;
585         error = vm86_bioscall(vmf);
586 #if 0
587         /*
588          * removed.  This causes more problems then it solves, we will
589          * have to find another way to detect inappropriate 8254 writes
590          * from the bios
591          */
592         timer_restore();
593 #endif
594         crit_exit();
595         return(error);
596 }
597
598 /*
599  * struct vm86context contains the page table to use when making
600  * vm86 calls.  If intnum is a valid interrupt number (0-255), then
601  * the "interrupt trampoline" will be used, otherwise we use the
602  * caller's cs:ip routine.  
603  */
604 int
605 vm86_datacall(intnum, vmf, vmc)
606         int intnum;
607         struct vm86frame *vmf;
608         struct vm86context *vmc;
609 {
610         pt_entry_t *pte = vm86paddr;
611         u_int page;
612         int i, entry, retval;
613
614         crit_enter();
615         ASSERT_MP_LOCK_HELD();
616
617         for (i = 0; i < vmc->npages; i++) {
618                 page = vtophys(vmc->pmap[i].kva & PG_FRAME);
619                 entry = vmc->pmap[i].pte_num; 
620                 vmc->pmap[i].old_pte = pte[entry];
621                 pte[entry] = page | PG_V | PG_RW | PG_U;
622         }
623
624         vmf->vmf_trapno = intnum;
625         retval = vm86_bioscall(vmf);
626
627         for (i = 0; i < vmc->npages; i++) {
628                 entry = vmc->pmap[i].pte_num;
629                 pte[entry] = vmc->pmap[i].old_pte;
630         }
631         crit_exit();
632         return (retval);
633 }
634
635 vm_offset_t
636 vm86_getaddr(vmc, sel, off)
637         struct vm86context *vmc;
638         u_short sel;
639         u_short off;
640 {
641         int i, page;
642         vm_offset_t addr;
643
644         addr = (vm_offset_t)MAKE_ADDR(sel, off);
645         page = addr >> PAGE_SHIFT;
646         for (i = 0; i < vmc->npages; i++)
647                 if (page == vmc->pmap[i].pte_num)
648                         return (vmc->pmap[i].kva + (addr & PAGE_MASK));
649         return (0);
650 }
651
652 int
653 vm86_getptr(vmc, kva, sel, off)
654         struct vm86context *vmc;
655         vm_offset_t kva;
656         u_short *sel;
657         u_short *off;
658 {
659         int i;
660
661         for (i = 0; i < vmc->npages; i++)
662                 if (kva >= vmc->pmap[i].kva &&
663                     kva < vmc->pmap[i].kva + PAGE_SIZE) {
664                         *off = kva - vmc->pmap[i].kva;
665                         *sel = vmc->pmap[i].pte_num << 8;
666                         return (1);
667                 }
668         return (0);
669         panic("vm86_getptr: address not found");
670 }
671         
672 int
673 vm86_sysarch(struct proc *p, char *args)
674 {
675         int error = 0;
676         struct i386_vm86_args ua;
677         struct vm86_kernel *vm86;
678
679         if ((error = copyin(args, &ua, sizeof(struct i386_vm86_args))) != 0)
680                 return (error);
681
682         if (p->p_thread->td_pcb->pcb_ext == 0)
683                 if ((error = i386_extend_pcb(p)) != 0)
684                         return (error);
685         vm86 = &p->p_thread->td_pcb->pcb_ext->ext_vm86;
686
687         switch (ua.sub_op) {
688         case VM86_INIT: {
689                 struct vm86_init_args sa;
690
691                 if ((error = copyin(ua.sub_args, &sa, sizeof(sa))) != 0)
692                         return (error);
693                 if (cpu_feature & CPUID_VME)
694                         vm86->vm86_has_vme = (rcr4() & CR4_VME ? 1 : 0);
695                 else
696                         vm86->vm86_has_vme = 0;
697                 vm86->vm86_inited = 1;
698                 vm86->vm86_debug = sa.debug;
699                 bcopy(&sa.int_map, vm86->vm86_intmap, 32);
700                 }
701                 break;
702
703 #if 0
704         case VM86_SET_VME: {
705                 struct vm86_vme_args sa;
706         
707                 if ((cpu_feature & CPUID_VME) == 0)
708                         return (ENODEV);
709
710                 if (error = copyin(ua.sub_args, &sa, sizeof(sa)))
711                         return (error);
712                 if (sa.state)
713                         load_cr4(rcr4() | CR4_VME);
714                 else
715                         load_cr4(rcr4() & ~CR4_VME);
716                 }
717                 break;
718 #endif
719
720         case VM86_GET_VME: {
721                 struct vm86_vme_args sa;
722
723                 sa.state = (rcr4() & CR4_VME ? 1 : 0);
724                 error = copyout(&sa, ua.sub_args, sizeof(sa));
725                 }
726                 break;
727
728         case VM86_INTCALL: {
729                 struct vm86_intcall_args sa;
730
731                 if ((error = suser_cred(p->p_ucred, 0)))
732                         return (error);
733                 if ((error = copyin(ua.sub_args, &sa, sizeof(sa))))
734                         return (error);
735                 if ((error = vm86_intcall(sa.intnum, &sa.vmf)))
736                         return (error);
737                 error = copyout(&sa, ua.sub_args, sizeof(sa));
738                 }
739                 break;
740
741         default:
742                 error = EINVAL;
743         }
744         return (error);
745 }