MP Implementation 1/2: Get the APIC code working again, sweetly integrate the
[dragonfly.git] / sys / platform / pc32 / i386 / vm86.c
1 /*-
2  * Copyright (c) 1997 Jonathan Lemon
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD: src/sys/i386/i386/vm86.c,v 1.31.2.2 2001/10/05 06:18:55 peter Exp $
27  * $DragonFly: src/sys/platform/pc32/i386/vm86.c,v 1.6 2003/07/06 21:23:48 dillon Exp $
28  */
29
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/proc.h>
33 #include <sys/lock.h>
34 #include <sys/malloc.h>
35
36 #include <vm/vm.h>
37 #include <vm/pmap.h>
38 #include <vm/vm_map.h>
39 #include <vm/vm_page.h>
40
41 #include <sys/user.h>
42
43 #include <machine/md_var.h>
44 #include <machine/pcb_ext.h>    /* pcb.h included via sys/user.h */
45 #include <machine/psl.h>
46 #include <machine/specialreg.h>
47 #include <machine/sysarch.h>
48
49 extern int i386_extend_pcb      __P((struct proc *));
50 extern int vm86pa;
51 extern struct pcb *vm86pcb;
52
53 extern int vm86_bioscall(struct vm86frame *);
54 extern void vm86_biosret(struct vm86frame *);
55
56 void vm86_prepcall(struct vm86frame);
57
58 struct system_map {
59         int             type;
60         vm_offset_t     start;
61         vm_offset_t     end;
62 };
63
64 #define HLT     0xf4
65 #define CLI     0xfa
66 #define STI     0xfb
67 #define PUSHF   0x9c
68 #define POPF    0x9d
69 #define INTn    0xcd
70 #define IRET    0xcf
71 #define CALLm   0xff
72 #define OPERAND_SIZE_PREFIX     0x66
73 #define ADDRESS_SIZE_PREFIX     0x67
74 #define PUSH_MASK       ~(PSL_VM | PSL_RF | PSL_I)
75 #define POP_MASK        ~(PSL_VIP | PSL_VIF | PSL_VM | PSL_RF | PSL_IOPL)
76
77 static __inline caddr_t
78 MAKE_ADDR(u_short sel, u_short off)
79 {
80         return ((caddr_t)((sel << 4) + off));
81 }
82
83 static __inline void
84 GET_VEC(u_int vec, u_short *sel, u_short *off)
85 {
86         *sel = vec >> 16;
87         *off = vec & 0xffff;
88 }
89
90 static __inline u_int
91 MAKE_VEC(u_short sel, u_short off)
92 {
93         return ((sel << 16) | off);
94 }
95
96 static __inline void
97 PUSH(u_short x, struct vm86frame *vmf)
98 {
99         vmf->vmf_sp -= 2;
100         susword(MAKE_ADDR(vmf->vmf_ss, vmf->vmf_sp), x);
101 }
102
103 static __inline void
104 PUSHL(u_int x, struct vm86frame *vmf)
105 {
106         vmf->vmf_sp -= 4;
107         suword(MAKE_ADDR(vmf->vmf_ss, vmf->vmf_sp), x);
108 }
109
110 static __inline u_short
111 POP(struct vm86frame *vmf)
112 {
113         u_short x = fusword(MAKE_ADDR(vmf->vmf_ss, vmf->vmf_sp));
114
115         vmf->vmf_sp += 2;
116         return (x);
117 }
118
119 static __inline u_int
120 POPL(struct vm86frame *vmf)
121 {
122         u_int x = fuword(MAKE_ADDR(vmf->vmf_ss, vmf->vmf_sp));
123
124         vmf->vmf_sp += 4;
125         return (x);
126 }
127
128 int
129 vm86_emulate(vmf)
130         struct vm86frame *vmf;
131 {
132         struct vm86_kernel *vm86;
133         caddr_t addr;
134         u_char i_byte;
135         u_int temp_flags;
136         int inc_ip = 1;
137         int retcode = 0;
138
139         /*
140          * pcb_ext contains the address of the extension area, or zero if
141          * the extension is not present.  (This check should not be needed,
142          * as we can't enter vm86 mode until we set up an extension area)
143          */
144         if (curthread->td_pcb->pcb_ext == 0)
145                 return (SIGBUS);
146         vm86 = &curthread->td_pcb->pcb_ext->ext_vm86;
147
148         if (vmf->vmf_eflags & PSL_T)
149                 retcode = SIGTRAP;
150
151         addr = MAKE_ADDR(vmf->vmf_cs, vmf->vmf_ip);
152         i_byte = fubyte(addr);
153         if (i_byte == ADDRESS_SIZE_PREFIX) {
154                 i_byte = fubyte(++addr);
155                 inc_ip++;
156         }
157
158         if (vm86->vm86_has_vme) {
159                 switch (i_byte) {
160                 case OPERAND_SIZE_PREFIX:
161                         i_byte = fubyte(++addr);
162                         inc_ip++;
163                         switch (i_byte) {
164                         case PUSHF:
165                                 if (vmf->vmf_eflags & PSL_VIF)
166                                         PUSHL((vmf->vmf_eflags & PUSH_MASK)
167                                             | PSL_IOPL | PSL_I, vmf);
168                                 else
169                                         PUSHL((vmf->vmf_eflags & PUSH_MASK)
170                                             | PSL_IOPL, vmf);
171                                 vmf->vmf_ip += inc_ip;
172                                 return (0);
173
174                         case POPF:
175                                 temp_flags = POPL(vmf) & POP_MASK;
176                                 vmf->vmf_eflags = (vmf->vmf_eflags & ~POP_MASK)
177                                     | temp_flags | PSL_VM | PSL_I;
178                                 vmf->vmf_ip += inc_ip;
179                                 if (temp_flags & PSL_I) {
180                                         vmf->vmf_eflags |= PSL_VIF;
181                                         if (vmf->vmf_eflags & PSL_VIP)
182                                                 break;
183                                 } else {
184                                         vmf->vmf_eflags &= ~PSL_VIF;
185                                 }
186                                 return (0);
187                         }
188                         break;
189
190                 /* VME faults here if VIP is set, but does not set VIF. */
191                 case STI:
192                         vmf->vmf_eflags |= PSL_VIF;
193                         vmf->vmf_ip += inc_ip;
194                         if ((vmf->vmf_eflags & PSL_VIP) == 0) {
195                                 uprintf("fatal sti\n");
196                                 return (SIGKILL);
197                         }
198                         break;
199
200                 /* VME if no redirection support */
201                 case INTn:
202                         break;
203
204                 /* VME if trying to set PSL_TF, or PSL_I when VIP is set */
205                 case POPF:
206                         temp_flags = POP(vmf) & POP_MASK;
207                         vmf->vmf_flags = (vmf->vmf_flags & ~POP_MASK)
208                             | temp_flags | PSL_VM | PSL_I;
209                         vmf->vmf_ip += inc_ip;
210                         if (temp_flags & PSL_I) {
211                                 vmf->vmf_eflags |= PSL_VIF;
212                                 if (vmf->vmf_eflags & PSL_VIP)
213                                         break;
214                         } else {
215                                 vmf->vmf_eflags &= ~PSL_VIF;
216                         }
217                         return (retcode);
218
219                 /* VME if trying to set PSL_TF, or PSL_I when VIP is set */
220                 case IRET:
221                         vmf->vmf_ip = POP(vmf);
222                         vmf->vmf_cs = POP(vmf);
223                         temp_flags = POP(vmf) & POP_MASK;
224                         vmf->vmf_flags = (vmf->vmf_flags & ~POP_MASK)
225                             | temp_flags | PSL_VM | PSL_I;
226                         if (temp_flags & PSL_I) {
227                                 vmf->vmf_eflags |= PSL_VIF;
228                                 if (vmf->vmf_eflags & PSL_VIP)
229                                         break;
230                         } else {
231                                 vmf->vmf_eflags &= ~PSL_VIF;
232                         }
233                         return (retcode);
234
235                 }
236                 return (SIGBUS);
237         }
238
239         switch (i_byte) {
240         case OPERAND_SIZE_PREFIX:
241                 i_byte = fubyte(++addr);
242                 inc_ip++;
243                 switch (i_byte) {
244                 case PUSHF:
245                         if (vm86->vm86_eflags & PSL_VIF)
246                                 PUSHL((vmf->vmf_flags & PUSH_MASK)
247                                     | PSL_IOPL | PSL_I, vmf);
248                         else
249                                 PUSHL((vmf->vmf_flags & PUSH_MASK)
250                                     | PSL_IOPL, vmf);
251                         vmf->vmf_ip += inc_ip;
252                         return (retcode);
253
254                 case POPF:
255                         temp_flags = POPL(vmf) & POP_MASK;
256                         vmf->vmf_eflags = (vmf->vmf_eflags & ~POP_MASK)
257                             | temp_flags | PSL_VM | PSL_I;
258                         vmf->vmf_ip += inc_ip;
259                         if (temp_flags & PSL_I) {
260                                 vm86->vm86_eflags |= PSL_VIF;
261                                 if (vm86->vm86_eflags & PSL_VIP)
262                                         break;
263                         } else {
264                                 vm86->vm86_eflags &= ~PSL_VIF;
265                         }
266                         return (retcode);
267                 }
268                 return (SIGBUS);
269
270         case CLI:
271                 vm86->vm86_eflags &= ~PSL_VIF;
272                 vmf->vmf_ip += inc_ip;
273                 return (retcode);
274
275         case STI:
276                 /* if there is a pending interrupt, go to the emulator */
277                 vm86->vm86_eflags |= PSL_VIF;
278                 vmf->vmf_ip += inc_ip;
279                 if (vm86->vm86_eflags & PSL_VIP)
280                         break;
281                 return (retcode);
282
283         case PUSHF:
284                 if (vm86->vm86_eflags & PSL_VIF)
285                         PUSH((vmf->vmf_flags & PUSH_MASK)
286                             | PSL_IOPL | PSL_I, vmf);
287                 else
288                         PUSH((vmf->vmf_flags & PUSH_MASK) | PSL_IOPL, vmf);
289                 vmf->vmf_ip += inc_ip;
290                 return (retcode);
291
292         case INTn:
293                 i_byte = fubyte(addr + 1);
294                 if ((vm86->vm86_intmap[i_byte >> 3] & (1 << (i_byte & 7))) != 0)
295                         break;
296                 if (vm86->vm86_eflags & PSL_VIF)
297                         PUSH((vmf->vmf_flags & PUSH_MASK)
298                             | PSL_IOPL | PSL_I, vmf);
299                 else
300                         PUSH((vmf->vmf_flags & PUSH_MASK) | PSL_IOPL, vmf);
301                 PUSH(vmf->vmf_cs, vmf);
302                 PUSH(vmf->vmf_ip + inc_ip + 1, vmf);    /* increment IP */
303                 GET_VEC(fuword((caddr_t)(i_byte * 4)),
304                      &vmf->vmf_cs, &vmf->vmf_ip);
305                 vmf->vmf_flags &= ~PSL_T;
306                 vm86->vm86_eflags &= ~PSL_VIF;
307                 return (retcode);
308
309         case IRET:
310                 vmf->vmf_ip = POP(vmf);
311                 vmf->vmf_cs = POP(vmf);
312                 temp_flags = POP(vmf) & POP_MASK;
313                 vmf->vmf_flags = (vmf->vmf_flags & ~POP_MASK)
314                     | temp_flags | PSL_VM | PSL_I;
315                 if (temp_flags & PSL_I) {
316                         vm86->vm86_eflags |= PSL_VIF;
317                         if (vm86->vm86_eflags & PSL_VIP)
318                                 break;
319                 } else {
320                         vm86->vm86_eflags &= ~PSL_VIF;
321                 }
322                 return (retcode);
323
324         case POPF:
325                 temp_flags = POP(vmf) & POP_MASK;
326                 vmf->vmf_flags = (vmf->vmf_flags & ~POP_MASK)
327                     | temp_flags | PSL_VM | PSL_I;
328                 vmf->vmf_ip += inc_ip;
329                 if (temp_flags & PSL_I) {
330                         vm86->vm86_eflags |= PSL_VIF;
331                         if (vm86->vm86_eflags & PSL_VIP)
332                                 break;
333                 } else {
334                         vm86->vm86_eflags &= ~PSL_VIF;
335                 }
336                 return (retcode);
337         }
338         return (SIGBUS);
339 }
340
341 #define PGTABLE_SIZE    ((1024 + 64) * 1024 / PAGE_SIZE)
342 #define INTMAP_SIZE     32
343 #define IOMAP_SIZE      ctob(IOPAGES)
344 #define TSS_SIZE \
345         (sizeof(struct pcb_ext) - sizeof(struct segment_descriptor) + \
346          INTMAP_SIZE + IOMAP_SIZE + 1)
347
348 struct vm86_layout {
349         pt_entry_t      vml_pgtbl[PGTABLE_SIZE];
350         struct  pcb vml_pcb;
351         struct  pcb_ext vml_ext;
352         char    vml_intmap[INTMAP_SIZE];
353         char    vml_iomap[IOMAP_SIZE];
354         char    vml_iomap_trailer;
355 };
356
357 void
358 vm86_initialize(void)
359 {
360         int i;
361         u_int *addr;
362         struct vm86_layout *vml = (struct vm86_layout *)vm86paddr;
363         struct pcb *pcb;
364         struct pcb_ext *ext;
365         struct soft_segment_descriptor ssd = {
366                 0,                      /* segment base address (overwritten) */
367                 0,                      /* length (overwritten) */
368                 SDT_SYS386TSS,          /* segment type */
369                 0,                      /* priority level */
370                 1,                      /* descriptor present */
371                 0, 0,
372                 0,                      /* default 16 size */
373                 0                       /* granularity */
374         };
375
376         /*
377          * this should be a compile time error, but cpp doesn't grok sizeof().
378          */
379         if (sizeof(struct vm86_layout) > ctob(3))
380                 panic("struct vm86_layout exceeds space allocated in locore.s");
381
382         /*
383          * Below is the memory layout that we use for the vm86 region.
384          *
385          * +--------+
386          * |        | 
387          * |        |
388          * | page 0 |       
389          * |        | +--------+
390          * |        | | stack  |
391          * +--------+ +--------+ <--------- vm86paddr
392          * |        | |Page Tbl| 1M + 64K = 272 entries = 1088 bytes
393          * |        | +--------+
394          * |        | |  PCB   | size: ~240 bytes
395          * | page 1 | |PCB Ext | size: ~140 bytes (includes TSS)
396          * |        | +--------+
397          * |        | |int map |
398          * |        | +--------+
399          * +--------+ |        |
400          * | page 2 | |  I/O   |
401          * +--------+ | bitmap |
402          * | page 3 | |        |
403          * |        | +--------+
404          * +--------+ 
405          */
406
407         /*
408          * A rudimentary PCB must be installed, in order to get to the
409          * PCB extension area.  We use the PCB area as a scratchpad for
410          * data storage, the layout of which is shown below.
411          *
412          * pcb_esi      = new PTD entry 0
413          * pcb_ebp      = pointer to frame on vm86 stack
414          * pcb_esp      =    stack frame pointer at time of switch
415          * pcb_ebx      = va of vm86 page table
416          * pcb_eip      =    argument pointer to initial call
417          * pcb_spare[0] =    saved TSS descriptor, word 0
418          * pcb_space[1] =    saved TSS descriptor, word 1
419          */
420 #define new_ptd         pcb_esi
421 #define vm86_frame      pcb_ebp
422 #define pgtable_va      pcb_ebx
423
424         pcb = &vml->vml_pcb;
425         ext = &vml->vml_ext;
426
427         bzero(pcb, sizeof(struct pcb));
428         pcb->new_ptd = vm86pa | PG_V | PG_RW | PG_U;
429         pcb->vm86_frame = vm86paddr - sizeof(struct vm86frame);
430         pcb->pgtable_va = vm86paddr;
431         pcb->pcb_ext = ext;
432
433         bzero(ext, sizeof(struct pcb_ext)); 
434         ext->ext_tss.tss_esp0 = vm86paddr;
435         ext->ext_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
436         ext->ext_tss.tss_ioopt = 
437                 ((u_int)vml->vml_iomap - (u_int)&ext->ext_tss) << 16;
438         ext->ext_iomap = vml->vml_iomap;
439         ext->ext_vm86.vm86_intmap = vml->vml_intmap;
440
441         if (cpu_feature & CPUID_VME)
442                 ext->ext_vm86.vm86_has_vme = (rcr4() & CR4_VME ? 1 : 0);
443
444         addr = (u_int *)ext->ext_vm86.vm86_intmap;
445         for (i = 0; i < (INTMAP_SIZE + IOMAP_SIZE) / sizeof(u_int); i++)
446                 *addr++ = 0;
447         vml->vml_iomap_trailer = 0xff;
448
449         ssd.ssd_base = (u_int)&ext->ext_tss;
450         ssd.ssd_limit = TSS_SIZE - 1; 
451         ssdtosd(&ssd, &ext->ext_tssd);
452
453         vm86pcb = pcb;
454
455 #if 0
456         /*
457          * use whatever is leftover of the vm86 page layout as a
458          * message buffer so we can capture early output.
459          */
460         msgbufinit((vm_offset_t)vm86paddr + sizeof(struct vm86_layout),
461             ctob(3) - sizeof(struct vm86_layout));
462 #endif
463 }
464
465 vm_offset_t
466 vm86_getpage(struct vm86context *vmc, int pagenum)
467 {
468         int i;
469
470         for (i = 0; i < vmc->npages; i++)
471                 if (vmc->pmap[i].pte_num == pagenum)
472                         return (vmc->pmap[i].kva);
473         return (0);
474 }
475
476 vm_offset_t
477 vm86_addpage(struct vm86context *vmc, int pagenum, vm_offset_t kva)
478 {
479         int i, flags = 0;
480
481         for (i = 0; i < vmc->npages; i++)
482                 if (vmc->pmap[i].pte_num == pagenum)
483                         goto bad;
484
485         if (vmc->npages == VM86_PMAPSIZE)
486                 goto bad;                       /* XXX grow map? */
487
488         if (kva == 0) {
489                 kva = (vm_offset_t)malloc(PAGE_SIZE, M_TEMP, M_WAITOK);
490                 flags = VMAP_MALLOC;
491         }
492
493         i = vmc->npages++;
494         vmc->pmap[i].flags = flags;
495         vmc->pmap[i].kva = kva;
496         vmc->pmap[i].pte_num = pagenum;
497         return (kva);
498 bad:
499         panic("vm86_addpage: not enough room, or overlap");
500 }
501
502 static void
503 vm86_initflags(struct vm86frame *vmf)
504 {
505         int eflags = vmf->vmf_eflags;
506         struct vm86_kernel *vm86 = &curthread->td_pcb->pcb_ext->ext_vm86;
507
508         if (vm86->vm86_has_vme) {
509                 eflags = (vmf->vmf_eflags & ~VME_USERCHANGE) |
510                     (eflags & VME_USERCHANGE) | PSL_VM;
511         } else {
512                 vm86->vm86_eflags = eflags;     /* save VIF, VIP */
513                 eflags = (vmf->vmf_eflags & ~VM_USERCHANGE) |             
514                     (eflags & VM_USERCHANGE) | PSL_VM;
515         }
516         vmf->vmf_eflags = eflags | PSL_VM;
517 }
518
519 /*
520  * called from vm86_bioscall, while in vm86 address space, to finalize setup.
521  */
522 void
523 vm86_prepcall(struct vm86frame vmf)
524 {
525         uintptr_t addr[] = { 0xA00, 0x1000 };   /* code, stack */
526         u_char intcall[] = {
527                 CLI, INTn, 0x00, STI, HLT
528         };
529
530         if ((vmf.vmf_trapno & PAGE_MASK) <= 0xff) {
531                 /* interrupt call requested */
532                 intcall[2] = (u_char)(vmf.vmf_trapno & 0xff);
533                 memcpy((void *)addr[0], (void *)intcall, sizeof(intcall));
534                 vmf.vmf_ip = addr[0];
535                 vmf.vmf_cs = 0;
536         }
537         vmf.vmf_sp = addr[1] - 2;              /* keep aligned */
538         vmf.kernel_fs = vmf.kernel_es = vmf.kernel_ds = 0;
539         vmf.vmf_ss = 0;
540         vmf.vmf_eflags = PSL_VIF | PSL_VM | PSL_USER;
541         vm86_initflags(&vmf);
542 }
543
544 /*
545  * vm86 trap handler; determines whether routine succeeded or not.
546  * Called while in vm86 space, returns to calling process.
547  *
548  * A MP lock ref is held on entry from trap() and must be released prior
549  * to returning to the VM86 call.
550  */
551 void
552 vm86_trap(struct vm86frame *vmf)
553 {
554         caddr_t addr;
555
556         /* "should not happen" */
557         if ((vmf->vmf_eflags & PSL_VM) == 0)
558                 panic("vm86_trap called, but not in vm86 mode");
559
560         addr = MAKE_ADDR(vmf->vmf_cs, vmf->vmf_ip);
561         if (*(u_char *)addr == HLT)
562                 vmf->vmf_trapno = vmf->vmf_eflags & PSL_C;
563         else
564                 vmf->vmf_trapno = vmf->vmf_trapno << 16;
565
566         rel_mplock();
567         vm86_biosret(vmf);
568 }
569
570 int
571 vm86_intcall(int intnum, struct vm86frame *vmf)
572 {
573         if (intnum < 0 || intnum > 0xff)
574                 return (EINVAL);
575
576         ASSERT_MP_LOCK_HELD();
577
578         vmf->vmf_trapno = intnum;
579         return (vm86_bioscall(vmf));
580 }
581
582 /*
583  * struct vm86context contains the page table to use when making
584  * vm86 calls.  If intnum is a valid interrupt number (0-255), then
585  * the "interrupt trampoline" will be used, otherwise we use the
586  * caller's cs:ip routine.  
587  */
588 int
589 vm86_datacall(intnum, vmf, vmc)
590         int intnum;
591         struct vm86frame *vmf;
592         struct vm86context *vmc;
593 {
594         pt_entry_t pte = (pt_entry_t)vm86paddr;
595         u_int page;
596         int i, entry, retval;
597
598         ASSERT_MP_LOCK_HELD();
599
600         for (i = 0; i < vmc->npages; i++) {
601                 page = vtophys(vmc->pmap[i].kva & PG_FRAME);
602                 entry = vmc->pmap[i].pte_num; 
603                 vmc->pmap[i].old_pte = pte[entry];
604                 pte[entry] = page | PG_V | PG_RW | PG_U;
605         }
606
607         vmf->vmf_trapno = intnum;
608         retval = vm86_bioscall(vmf);
609
610         for (i = 0; i < vmc->npages; i++) {
611                 entry = vmc->pmap[i].pte_num;
612                 pte[entry] = vmc->pmap[i].old_pte;
613         }
614
615         return (retval);
616 }
617
618 vm_offset_t
619 vm86_getaddr(vmc, sel, off)
620         struct vm86context *vmc;
621         u_short sel;
622         u_short off;
623 {
624         int i, page;
625         vm_offset_t addr;
626
627         addr = (vm_offset_t)MAKE_ADDR(sel, off);
628         page = addr >> PAGE_SHIFT;
629         for (i = 0; i < vmc->npages; i++)
630                 if (page == vmc->pmap[i].pte_num)
631                         return (vmc->pmap[i].kva + (addr & PAGE_MASK));
632         return (0);
633 }
634
635 int
636 vm86_getptr(vmc, kva, sel, off)
637         struct vm86context *vmc;
638         vm_offset_t kva;
639         u_short *sel;
640         u_short *off;
641 {
642         int i;
643
644         for (i = 0; i < vmc->npages; i++)
645                 if (kva >= vmc->pmap[i].kva &&
646                     kva < vmc->pmap[i].kva + PAGE_SIZE) {
647                         *off = kva - vmc->pmap[i].kva;
648                         *sel = vmc->pmap[i].pte_num << 8;
649                         return (1);
650                 }
651         return (0);
652         panic("vm86_getptr: address not found");
653 }
654         
655 int
656 vm86_sysarch(struct proc *p, char *args)
657 {
658         int error = 0;
659         struct i386_vm86_args ua;
660         struct vm86_kernel *vm86;
661
662         if ((error = copyin(args, &ua, sizeof(struct i386_vm86_args))) != 0)
663                 return (error);
664
665         if (p->p_thread->td_pcb->pcb_ext == 0)
666                 if ((error = i386_extend_pcb(p)) != 0)
667                         return (error);
668         vm86 = &p->p_thread->td_pcb->pcb_ext->ext_vm86;
669
670         switch (ua.sub_op) {
671         case VM86_INIT: {
672                 struct vm86_init_args sa;
673
674                 if ((error = copyin(ua.sub_args, &sa, sizeof(sa))) != 0)
675                         return (error);
676                 if (cpu_feature & CPUID_VME)
677                         vm86->vm86_has_vme = (rcr4() & CR4_VME ? 1 : 0);
678                 else
679                         vm86->vm86_has_vme = 0;
680                 vm86->vm86_inited = 1;
681                 vm86->vm86_debug = sa.debug;
682                 bcopy(&sa.int_map, vm86->vm86_intmap, 32);
683                 }
684                 break;
685
686 #if 0
687         case VM86_SET_VME: {
688                 struct vm86_vme_args sa;
689         
690                 if ((cpu_feature & CPUID_VME) == 0)
691                         return (ENODEV);
692
693                 if (error = copyin(ua.sub_args, &sa, sizeof(sa)))
694                         return (error);
695                 if (sa.state)
696                         load_cr4(rcr4() | CR4_VME);
697                 else
698                         load_cr4(rcr4() & ~CR4_VME);
699                 }
700                 break;
701 #endif
702
703         case VM86_GET_VME: {
704                 struct vm86_vme_args sa;
705
706                 sa.state = (rcr4() & CR4_VME ? 1 : 0);
707                 error = copyout(&sa, ua.sub_args, sizeof(sa));
708                 }
709                 break;
710
711         case VM86_INTCALL: {
712                 struct vm86_intcall_args sa;
713
714                 if ((error = suser_cred(p->p_ucred, 0)))
715                         return (error);
716                 if ((error = copyin(ua.sub_args, &sa, sizeof(sa))))
717                         return (error);
718                 if ((error = vm86_intcall(sa.intnum, &sa.vmf)))
719                         return (error);
720                 error = copyout(&sa, ua.sub_args, sizeof(sa));
721                 }
722                 break;
723
724         default:
725                 error = EINVAL;
726         }
727         return (error);
728 }