More cleanups to make ports work better.
[dragonfly.git] / sys / i386 / i386 / vm86.c
1 /*-
2  * Copyright (c) 1997 Jonathan Lemon
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  * modification, are permitted provided that the following conditions
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  * $FreeBSD: src/sys/i386/i386/vm86.c,v 1.31.2.2 2001/10/05 06:18:55 peter Exp $
28  * $DragonFly: src/sys/i386/i386/Attic/vm86.c,v 1.9 2003/11/03 22:50:11 dillon Exp $
29  */
30
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/proc.h>
34 #include <sys/lock.h>
35 #include <sys/malloc.h>
36
37 #include <vm/vm.h>
38 #include <vm/pmap.h>
39 #include <vm/vm_map.h>
40 #include <vm/vm_page.h>
41
42 #include <sys/user.h>
43 #include <sys/thread2.h>
44
45 #include <machine/md_var.h>
46 #include <machine/pcb_ext.h>    /* pcb.h included via sys/user.h */
47 #include <machine/psl.h>
48 #include <machine/specialreg.h>
49 #include <machine/sysarch.h>
50
51 extern int i386_extend_pcb      (struct proc *);
52 extern int vm86pa;
53 extern struct pcb *vm86pcb;
54
55 extern int vm86_bioscall(struct vm86frame *);
56 extern void vm86_biosret(struct vm86frame *);
57
58 void vm86_prepcall(struct vm86frame);
59
60 struct system_map {
61         int             type;
62         vm_offset_t     start;
63         vm_offset_t     end;
64 };
65
66 #define HLT     0xf4
67 #define CLI     0xfa
68 #define STI     0xfb
69 #define PUSHF   0x9c
70 #define POPF    0x9d
71 #define INTn    0xcd
72 #define IRET    0xcf
73 #define CALLm   0xff
74 #define OPERAND_SIZE_PREFIX     0x66
75 #define ADDRESS_SIZE_PREFIX     0x67
76 #define PUSH_MASK       ~(PSL_VM | PSL_RF | PSL_I)
77 #define POP_MASK        ~(PSL_VIP | PSL_VIF | PSL_VM | PSL_RF | PSL_IOPL)
78
79 static __inline caddr_t
80 MAKE_ADDR(u_short sel, u_short off)
81 {
82         return ((caddr_t)((sel << 4) + off));
83 }
84
85 static __inline void
86 GET_VEC(u_int vec, u_short *sel, u_short *off)
87 {
88         *sel = vec >> 16;
89         *off = vec & 0xffff;
90 }
91
92 static __inline u_int
93 MAKE_VEC(u_short sel, u_short off)
94 {
95         return ((sel << 16) | off);
96 }
97
98 static __inline void
99 PUSH(u_short x, struct vm86frame *vmf)
100 {
101         vmf->vmf_sp -= 2;
102         susword(MAKE_ADDR(vmf->vmf_ss, vmf->vmf_sp), x);
103 }
104
105 static __inline void
106 PUSHL(u_int x, struct vm86frame *vmf)
107 {
108         vmf->vmf_sp -= 4;
109         suword(MAKE_ADDR(vmf->vmf_ss, vmf->vmf_sp), x);
110 }
111
112 static __inline u_short
113 POP(struct vm86frame *vmf)
114 {
115         u_short x = fusword(MAKE_ADDR(vmf->vmf_ss, vmf->vmf_sp));
116
117         vmf->vmf_sp += 2;
118         return (x);
119 }
120
121 static __inline u_int
122 POPL(struct vm86frame *vmf)
123 {
124         u_int x = fuword(MAKE_ADDR(vmf->vmf_ss, vmf->vmf_sp));
125
126         vmf->vmf_sp += 4;
127         return (x);
128 }
129
130 int
131 vm86_emulate(vmf)
132         struct vm86frame *vmf;
133 {
134         struct vm86_kernel *vm86;
135         caddr_t addr;
136         u_char i_byte;
137         u_int temp_flags;
138         int inc_ip = 1;
139         int retcode = 0;
140
141         /*
142          * pcb_ext contains the address of the extension area, or zero if
143          * the extension is not present.  (This check should not be needed,
144          * as we can't enter vm86 mode until we set up an extension area)
145          */
146         if (curthread->td_pcb->pcb_ext == 0)
147                 return (SIGBUS);
148         vm86 = &curthread->td_pcb->pcb_ext->ext_vm86;
149
150         if (vmf->vmf_eflags & PSL_T)
151                 retcode = SIGTRAP;
152
153         addr = MAKE_ADDR(vmf->vmf_cs, vmf->vmf_ip);
154         i_byte = fubyte(addr);
155         if (i_byte == ADDRESS_SIZE_PREFIX) {
156                 i_byte = fubyte(++addr);
157                 inc_ip++;
158         }
159
160         if (vm86->vm86_has_vme) {
161                 switch (i_byte) {
162                 case OPERAND_SIZE_PREFIX:
163                         i_byte = fubyte(++addr);
164                         inc_ip++;
165                         switch (i_byte) {
166                         case PUSHF:
167                                 if (vmf->vmf_eflags & PSL_VIF)
168                                         PUSHL((vmf->vmf_eflags & PUSH_MASK)
169                                             | PSL_IOPL | PSL_I, vmf);
170                                 else
171                                         PUSHL((vmf->vmf_eflags & PUSH_MASK)
172                                             | PSL_IOPL, vmf);
173                                 vmf->vmf_ip += inc_ip;
174                                 return (0);
175
176                         case POPF:
177                                 temp_flags = POPL(vmf) & POP_MASK;
178                                 vmf->vmf_eflags = (vmf->vmf_eflags & ~POP_MASK)
179                                     | temp_flags | PSL_VM | PSL_I;
180                                 vmf->vmf_ip += inc_ip;
181                                 if (temp_flags & PSL_I) {
182                                         vmf->vmf_eflags |= PSL_VIF;
183                                         if (vmf->vmf_eflags & PSL_VIP)
184                                                 break;
185                                 } else {
186                                         vmf->vmf_eflags &= ~PSL_VIF;
187                                 }
188                                 return (0);
189                         }
190                         break;
191
192                 /* VME faults here if VIP is set, but does not set VIF. */
193                 case STI:
194                         vmf->vmf_eflags |= PSL_VIF;
195                         vmf->vmf_ip += inc_ip;
196                         if ((vmf->vmf_eflags & PSL_VIP) == 0) {
197                                 uprintf("fatal sti\n");
198                                 return (SIGKILL);
199                         }
200                         break;
201
202                 /* VME if no redirection support */
203                 case INTn:
204                         break;
205
206                 /* VME if trying to set PSL_TF, or PSL_I when VIP is set */
207                 case POPF:
208                         temp_flags = POP(vmf) & POP_MASK;
209                         vmf->vmf_flags = (vmf->vmf_flags & ~POP_MASK)
210                             | temp_flags | PSL_VM | PSL_I;
211                         vmf->vmf_ip += inc_ip;
212                         if (temp_flags & PSL_I) {
213                                 vmf->vmf_eflags |= PSL_VIF;
214                                 if (vmf->vmf_eflags & PSL_VIP)
215                                         break;
216                         } else {
217                                 vmf->vmf_eflags &= ~PSL_VIF;
218                         }
219                         return (retcode);
220
221                 /* VME if trying to set PSL_TF, or PSL_I when VIP is set */
222                 case IRET:
223                         vmf->vmf_ip = POP(vmf);
224                         vmf->vmf_cs = POP(vmf);
225                         temp_flags = POP(vmf) & POP_MASK;
226                         vmf->vmf_flags = (vmf->vmf_flags & ~POP_MASK)
227                             | temp_flags | PSL_VM | PSL_I;
228                         if (temp_flags & PSL_I) {
229                                 vmf->vmf_eflags |= PSL_VIF;
230                                 if (vmf->vmf_eflags & PSL_VIP)
231                                         break;
232                         } else {
233                                 vmf->vmf_eflags &= ~PSL_VIF;
234                         }
235                         return (retcode);
236
237                 }
238                 return (SIGBUS);
239         }
240
241         switch (i_byte) {
242         case OPERAND_SIZE_PREFIX:
243                 i_byte = fubyte(++addr);
244                 inc_ip++;
245                 switch (i_byte) {
246                 case PUSHF:
247                         if (vm86->vm86_eflags & PSL_VIF)
248                                 PUSHL((vmf->vmf_flags & PUSH_MASK)
249                                     | PSL_IOPL | PSL_I, vmf);
250                         else
251                                 PUSHL((vmf->vmf_flags & PUSH_MASK)
252                                     | PSL_IOPL, vmf);
253                         vmf->vmf_ip += inc_ip;
254                         return (retcode);
255
256                 case POPF:
257                         temp_flags = POPL(vmf) & POP_MASK;
258                         vmf->vmf_eflags = (vmf->vmf_eflags & ~POP_MASK)
259                             | temp_flags | PSL_VM | PSL_I;
260                         vmf->vmf_ip += inc_ip;
261                         if (temp_flags & PSL_I) {
262                                 vm86->vm86_eflags |= PSL_VIF;
263                                 if (vm86->vm86_eflags & PSL_VIP)
264                                         break;
265                         } else {
266                                 vm86->vm86_eflags &= ~PSL_VIF;
267                         }
268                         return (retcode);
269                 }
270                 return (SIGBUS);
271
272         case CLI:
273                 vm86->vm86_eflags &= ~PSL_VIF;
274                 vmf->vmf_ip += inc_ip;
275                 return (retcode);
276
277         case STI:
278                 /* if there is a pending interrupt, go to the emulator */
279                 vm86->vm86_eflags |= PSL_VIF;
280                 vmf->vmf_ip += inc_ip;
281                 if (vm86->vm86_eflags & PSL_VIP)
282                         break;
283                 return (retcode);
284
285         case PUSHF:
286                 if (vm86->vm86_eflags & PSL_VIF)
287                         PUSH((vmf->vmf_flags & PUSH_MASK)
288                             | PSL_IOPL | PSL_I, vmf);
289                 else
290                         PUSH((vmf->vmf_flags & PUSH_MASK) | PSL_IOPL, vmf);
291                 vmf->vmf_ip += inc_ip;
292                 return (retcode);
293
294         case INTn:
295                 i_byte = fubyte(addr + 1);
296                 if ((vm86->vm86_intmap[i_byte >> 3] & (1 << (i_byte & 7))) != 0)
297                         break;
298                 if (vm86->vm86_eflags & PSL_VIF)
299                         PUSH((vmf->vmf_flags & PUSH_MASK)
300                             | PSL_IOPL | PSL_I, vmf);
301                 else
302                         PUSH((vmf->vmf_flags & PUSH_MASK) | PSL_IOPL, vmf);
303                 PUSH(vmf->vmf_cs, vmf);
304                 PUSH(vmf->vmf_ip + inc_ip + 1, vmf);    /* increment IP */
305                 GET_VEC(fuword((caddr_t)(i_byte * 4)),
306                      &vmf->vmf_cs, &vmf->vmf_ip);
307                 vmf->vmf_flags &= ~PSL_T;
308                 vm86->vm86_eflags &= ~PSL_VIF;
309                 return (retcode);
310
311         case IRET:
312                 vmf->vmf_ip = POP(vmf);
313                 vmf->vmf_cs = POP(vmf);
314                 temp_flags = POP(vmf) & POP_MASK;
315                 vmf->vmf_flags = (vmf->vmf_flags & ~POP_MASK)
316                     | temp_flags | PSL_VM | PSL_I;
317                 if (temp_flags & PSL_I) {
318                         vm86->vm86_eflags |= PSL_VIF;
319                         if (vm86->vm86_eflags & PSL_VIP)
320                                 break;
321                 } else {
322                         vm86->vm86_eflags &= ~PSL_VIF;
323                 }
324                 return (retcode);
325
326         case POPF:
327                 temp_flags = POP(vmf) & POP_MASK;
328                 vmf->vmf_flags = (vmf->vmf_flags & ~POP_MASK)
329                     | temp_flags | PSL_VM | PSL_I;
330                 vmf->vmf_ip += inc_ip;
331                 if (temp_flags & PSL_I) {
332                         vm86->vm86_eflags |= PSL_VIF;
333                         if (vm86->vm86_eflags & PSL_VIP)
334                                 break;
335                 } else {
336                         vm86->vm86_eflags &= ~PSL_VIF;
337                 }
338                 return (retcode);
339         }
340         return (SIGBUS);
341 }
342
343 #define PGTABLE_SIZE    ((1024 + 64) * 1024 / PAGE_SIZE)
344 #define INTMAP_SIZE     32
345 #define IOMAP_SIZE      ctob(IOPAGES)
346 #define TSS_SIZE \
347         (sizeof(struct pcb_ext) - sizeof(struct segment_descriptor) + \
348          INTMAP_SIZE + IOMAP_SIZE + 1)
349
350 struct vm86_layout {
351         pt_entry_t      vml_pgtbl[PGTABLE_SIZE];
352         struct  pcb vml_pcb;
353         struct  pcb_ext vml_ext;
354         char    vml_intmap[INTMAP_SIZE];
355         char    vml_iomap[IOMAP_SIZE];
356         char    vml_iomap_trailer;
357 };
358
359 void
360 vm86_initialize(void)
361 {
362         int i;
363         u_int *addr;
364         struct vm86_layout *vml = (struct vm86_layout *)vm86paddr;
365         struct pcb *pcb;
366         struct pcb_ext *ext;
367         struct soft_segment_descriptor ssd = {
368                 0,                      /* segment base address (overwritten) */
369                 0,                      /* length (overwritten) */
370                 SDT_SYS386TSS,          /* segment type */
371                 0,                      /* priority level */
372                 1,                      /* descriptor present */
373                 0, 0,
374                 0,                      /* default 16 size */
375                 0                       /* granularity */
376         };
377
378         /*
379          * this should be a compile time error, but cpp doesn't grok sizeof().
380          */
381         if (sizeof(struct vm86_layout) > ctob(3))
382                 panic("struct vm86_layout exceeds space allocated in locore.s");
383
384         /*
385          * Below is the memory layout that we use for the vm86 region.
386          *
387          * +--------+
388          * |        | 
389          * |        |
390          * | page 0 |       
391          * |        | +--------+
392          * |        | | stack  |
393          * +--------+ +--------+ <--------- vm86paddr
394          * |        | |Page Tbl| 1M + 64K = 272 entries = 1088 bytes
395          * |        | +--------+
396          * |        | |  PCB   | size: ~240 bytes
397          * | page 1 | |PCB Ext | size: ~140 bytes (includes TSS)
398          * |        | +--------+
399          * |        | |int map |
400          * |        | +--------+
401          * +--------+ |        |
402          * | page 2 | |  I/O   |
403          * +--------+ | bitmap |
404          * | page 3 | |        |
405          * |        | +--------+
406          * +--------+ 
407          */
408
409         /*
410          * A rudimentary PCB must be installed, in order to get to the
411          * PCB extension area.  We use the PCB area as a scratchpad for
412          * data storage, the layout of which is shown below.
413          *
414          * pcb_esi      = new PTD entry 0
415          * pcb_ebp      = pointer to frame on vm86 stack
416          * pcb_esp      =    stack frame pointer at time of switch
417          * pcb_ebx      = va of vm86 page table
418          * pcb_eip      =    argument pointer to initial call
419          * pcb_spare[0] =    saved TSS descriptor, word 0
420          * pcb_space[1] =    saved TSS descriptor, word 1
421          */
422 #define new_ptd         pcb_esi
423 #define vm86_frame      pcb_ebp
424 #define pgtable_va      pcb_ebx
425
426         pcb = &vml->vml_pcb;
427         ext = &vml->vml_ext;
428
429         bzero(pcb, sizeof(struct pcb));
430         pcb->new_ptd = vm86pa | PG_V | PG_RW | PG_U;
431         pcb->vm86_frame = (pt_entry_t)vm86paddr - sizeof(struct vm86frame);
432         pcb->pgtable_va = (vm_offset_t)vm86paddr;
433         pcb->pcb_ext = ext;
434
435         bzero(ext, sizeof(struct pcb_ext)); 
436         ext->ext_tss.tss_esp0 = (vm_offset_t)vm86paddr;
437         ext->ext_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
438         ext->ext_tss.tss_ioopt = 
439                 ((u_int)vml->vml_iomap - (u_int)&ext->ext_tss) << 16;
440         ext->ext_iomap = vml->vml_iomap;
441         ext->ext_vm86.vm86_intmap = vml->vml_intmap;
442
443         if (cpu_feature & CPUID_VME)
444                 ext->ext_vm86.vm86_has_vme = (rcr4() & CR4_VME ? 1 : 0);
445
446         addr = (u_int *)ext->ext_vm86.vm86_intmap;
447         for (i = 0; i < (INTMAP_SIZE + IOMAP_SIZE) / sizeof(u_int); i++)
448                 *addr++ = 0;
449         vml->vml_iomap_trailer = 0xff;
450
451         ssd.ssd_base = (u_int)&ext->ext_tss;
452         ssd.ssd_limit = TSS_SIZE - 1; 
453         ssdtosd(&ssd, &ext->ext_tssd);
454
455         vm86pcb = pcb;
456
457 #if 0
458         /*
459          * use whatever is leftover of the vm86 page layout as a
460          * message buffer so we can capture early output.
461          */
462         msgbufinit((vm_offset_t)vm86paddr + sizeof(struct vm86_layout),
463             ctob(3) - sizeof(struct vm86_layout));
464 #endif
465 }
466
467 vm_offset_t
468 vm86_getpage(struct vm86context *vmc, int pagenum)
469 {
470         int i;
471
472         for (i = 0; i < vmc->npages; i++)
473                 if (vmc->pmap[i].pte_num == pagenum)
474                         return (vmc->pmap[i].kva);
475         return (0);
476 }
477
478 vm_offset_t
479 vm86_addpage(struct vm86context *vmc, int pagenum, vm_offset_t kva)
480 {
481         int i, flags = 0;
482
483         for (i = 0; i < vmc->npages; i++)
484                 if (vmc->pmap[i].pte_num == pagenum)
485                         goto bad;
486
487         if (vmc->npages == VM86_PMAPSIZE)
488                 goto bad;                       /* XXX grow map? */
489
490         if (kva == 0) {
491                 kva = (vm_offset_t)malloc(PAGE_SIZE, M_TEMP, M_WAITOK);
492                 flags = VMAP_MALLOC;
493         }
494
495         i = vmc->npages++;
496         vmc->pmap[i].flags = flags;
497         vmc->pmap[i].kva = kva;
498         vmc->pmap[i].pte_num = pagenum;
499         return (kva);
500 bad:
501         panic("vm86_addpage: not enough room, or overlap");
502 }
503
504 static void
505 vm86_initflags(struct vm86frame *vmf)
506 {
507         int eflags = vmf->vmf_eflags;
508         struct vm86_kernel *vm86 = &curthread->td_pcb->pcb_ext->ext_vm86;
509
510         if (vm86->vm86_has_vme) {
511                 eflags = (vmf->vmf_eflags & ~VME_USERCHANGE) |
512                     (eflags & VME_USERCHANGE) | PSL_VM;
513         } else {
514                 vm86->vm86_eflags = eflags;     /* save VIF, VIP */
515                 eflags = (vmf->vmf_eflags & ~VM_USERCHANGE) |             
516                     (eflags & VM_USERCHANGE) | PSL_VM;
517         }
518         vmf->vmf_eflags = eflags | PSL_VM;
519 }
520
521 /*
522  * called from vm86_bioscall, while in vm86 address space, to finalize setup.
523  */
524 void
525 vm86_prepcall(struct vm86frame vmf)
526 {
527         uintptr_t addr[] = { 0xA00, 0x1000 };   /* code, stack */
528         u_char intcall[] = {
529                 CLI, INTn, 0x00, STI, HLT
530         };
531
532         if ((vmf.vmf_trapno & PAGE_MASK) <= 0xff) {
533                 /* interrupt call requested */
534                 intcall[2] = (u_char)(vmf.vmf_trapno & 0xff);
535                 memcpy((void *)addr[0], (void *)intcall, sizeof(intcall));
536                 vmf.vmf_ip = addr[0];
537                 vmf.vmf_cs = 0;
538         }
539         vmf.vmf_sp = addr[1] - 2;              /* keep aligned */
540         vmf.kernel_fs = vmf.kernel_es = vmf.kernel_ds = 0;
541         vmf.vmf_ss = 0;
542         vmf.vmf_eflags = PSL_VIF | PSL_VM | PSL_USER;
543         vm86_initflags(&vmf);
544 }
545
546 /*
547  * vm86 trap handler; determines whether routine succeeded or not.
548  * Called while in vm86 space, returns to calling process.
549  *
550  * A MP lock ref is held on entry from trap() and must be released prior
551  * to returning to the VM86 call.
552  */
553 void
554 vm86_trap(struct vm86frame *vmf)
555 {
556         caddr_t addr;
557
558         /* "should not happen" */
559         if ((vmf->vmf_eflags & PSL_VM) == 0)
560                 panic("vm86_trap called, but not in vm86 mode");
561
562         addr = MAKE_ADDR(vmf->vmf_cs, vmf->vmf_ip);
563         if (*(u_char *)addr == HLT)
564                 vmf->vmf_trapno = vmf->vmf_eflags & PSL_C;
565         else
566                 vmf->vmf_trapno = vmf->vmf_trapno << 16;
567
568         rel_mplock();
569         vm86_biosret(vmf);
570 }
571
572 int
573 vm86_intcall(int intnum, struct vm86frame *vmf)
574 {
575         int error;
576
577         if (intnum < 0 || intnum > 0xff)
578                 return (EINVAL);
579
580         crit_enter();
581         ASSERT_MP_LOCK_HELD();
582
583         vmf->vmf_trapno = intnum;
584         error = vm86_bioscall(vmf);
585         crit_exit();
586         return(error);
587 }
588
589 /*
590  * struct vm86context contains the page table to use when making
591  * vm86 calls.  If intnum is a valid interrupt number (0-255), then
592  * the "interrupt trampoline" will be used, otherwise we use the
593  * caller's cs:ip routine.  
594  */
595 int
596 vm86_datacall(intnum, vmf, vmc)
597         int intnum;
598         struct vm86frame *vmf;
599         struct vm86context *vmc;
600 {
601         pt_entry_t *pte = vm86paddr;
602         u_int page;
603         int i, entry, retval;
604
605         crit_enter();
606         ASSERT_MP_LOCK_HELD();
607
608         for (i = 0; i < vmc->npages; i++) {
609                 page = vtophys(vmc->pmap[i].kva & PG_FRAME);
610                 entry = vmc->pmap[i].pte_num; 
611                 vmc->pmap[i].old_pte = pte[entry];
612                 pte[entry] = page | PG_V | PG_RW | PG_U;
613         }
614
615         vmf->vmf_trapno = intnum;
616         retval = vm86_bioscall(vmf);
617
618         for (i = 0; i < vmc->npages; i++) {
619                 entry = vmc->pmap[i].pte_num;
620                 pte[entry] = vmc->pmap[i].old_pte;
621         }
622         crit_exit();
623         return (retval);
624 }
625
626 vm_offset_t
627 vm86_getaddr(vmc, sel, off)
628         struct vm86context *vmc;
629         u_short sel;
630         u_short off;
631 {
632         int i, page;
633         vm_offset_t addr;
634
635         addr = (vm_offset_t)MAKE_ADDR(sel, off);
636         page = addr >> PAGE_SHIFT;
637         for (i = 0; i < vmc->npages; i++)
638                 if (page == vmc->pmap[i].pte_num)
639                         return (vmc->pmap[i].kva + (addr & PAGE_MASK));
640         return (0);
641 }
642
643 int
644 vm86_getptr(vmc, kva, sel, off)
645         struct vm86context *vmc;
646         vm_offset_t kva;
647         u_short *sel;
648         u_short *off;
649 {
650         int i;
651
652         for (i = 0; i < vmc->npages; i++)
653                 if (kva >= vmc->pmap[i].kva &&
654                     kva < vmc->pmap[i].kva + PAGE_SIZE) {
655                         *off = kva - vmc->pmap[i].kva;
656                         *sel = vmc->pmap[i].pte_num << 8;
657                         return (1);
658                 }
659         return (0);
660         panic("vm86_getptr: address not found");
661 }
662         
663 int
664 vm86_sysarch(struct proc *p, char *args)
665 {
666         int error = 0;
667         struct i386_vm86_args ua;
668         struct vm86_kernel *vm86;
669
670         if ((error = copyin(args, &ua, sizeof(struct i386_vm86_args))) != 0)
671                 return (error);
672
673         if (p->p_thread->td_pcb->pcb_ext == 0)
674                 if ((error = i386_extend_pcb(p)) != 0)
675                         return (error);
676         vm86 = &p->p_thread->td_pcb->pcb_ext->ext_vm86;
677
678         switch (ua.sub_op) {
679         case VM86_INIT: {
680                 struct vm86_init_args sa;
681
682                 if ((error = copyin(ua.sub_args, &sa, sizeof(sa))) != 0)
683                         return (error);
684                 if (cpu_feature & CPUID_VME)
685                         vm86->vm86_has_vme = (rcr4() & CR4_VME ? 1 : 0);
686                 else
687                         vm86->vm86_has_vme = 0;
688                 vm86->vm86_inited = 1;
689                 vm86->vm86_debug = sa.debug;
690                 bcopy(&sa.int_map, vm86->vm86_intmap, 32);
691                 }
692                 break;
693
694 #if 0
695         case VM86_SET_VME: {
696                 struct vm86_vme_args sa;
697         
698                 if ((cpu_feature & CPUID_VME) == 0)
699                         return (ENODEV);
700
701                 if (error = copyin(ua.sub_args, &sa, sizeof(sa)))
702                         return (error);
703                 if (sa.state)
704                         load_cr4(rcr4() | CR4_VME);
705                 else
706                         load_cr4(rcr4() & ~CR4_VME);
707                 }
708                 break;
709 #endif
710
711         case VM86_GET_VME: {
712                 struct vm86_vme_args sa;
713
714                 sa.state = (rcr4() & CR4_VME ? 1 : 0);
715                 error = copyout(&sa, ua.sub_args, sizeof(sa));
716                 }
717                 break;
718
719         case VM86_INTCALL: {
720                 struct vm86_intcall_args sa;
721
722                 if ((error = suser_cred(p->p_ucred, 0)))
723                         return (error);
724                 if ((error = copyin(ua.sub_args, &sa, sizeof(sa))))
725                         return (error);
726                 if ((error = vm86_intcall(sa.intnum, &sa.vmf)))
727                         return (error);
728                 error = copyout(&sa, ua.sub_args, sizeof(sa));
729                 }
730                 break;
731
732         default:
733                 error = EINVAL;
734         }
735         return (error);
736 }