kernel tree reorganization stage 1: Major cvs repository work (not logged as
[dragonfly.git] / sys / i386 / i386 / vm86.c
CommitLineData
984263bc
MD
1/*-
2 * Copyright (c) 1997 Jonathan Lemon
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: src/sys/i386/i386/vm86.c,v 1.31.2.2 2001/10/05 06:18:55 peter Exp $
96728c05 27 * $DragonFly: src/sys/i386/i386/Attic/vm86.c,v 1.7 2003/07/08 06:27:26 dillon Exp $
984263bc
MD
28 */
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/proc.h>
33#include <sys/lock.h>
34#include <sys/malloc.h>
35
36#include <vm/vm.h>
37#include <vm/pmap.h>
38#include <vm/vm_map.h>
39#include <vm/vm_page.h>
40
41#include <sys/user.h>
96728c05 42#include <sys/thread2.h>
984263bc
MD
43
44#include <machine/md_var.h>
45#include <machine/pcb_ext.h> /* pcb.h included via sys/user.h */
46#include <machine/psl.h>
47#include <machine/specialreg.h>
48#include <machine/sysarch.h>
49
50extern int i386_extend_pcb __P((struct proc *));
51extern int vm86pa;
52extern struct pcb *vm86pcb;
53
54extern int vm86_bioscall(struct vm86frame *);
55extern void vm86_biosret(struct vm86frame *);
56
57void vm86_prepcall(struct vm86frame);
58
59struct system_map {
60 int type;
61 vm_offset_t start;
62 vm_offset_t end;
63};
64
65#define HLT 0xf4
66#define CLI 0xfa
67#define STI 0xfb
68#define PUSHF 0x9c
69#define POPF 0x9d
70#define INTn 0xcd
71#define IRET 0xcf
72#define CALLm 0xff
73#define OPERAND_SIZE_PREFIX 0x66
74#define ADDRESS_SIZE_PREFIX 0x67
75#define PUSH_MASK ~(PSL_VM | PSL_RF | PSL_I)
76#define POP_MASK ~(PSL_VIP | PSL_VIF | PSL_VM | PSL_RF | PSL_IOPL)
77
78static __inline caddr_t
79MAKE_ADDR(u_short sel, u_short off)
80{
81 return ((caddr_t)((sel << 4) + off));
82}
83
84static __inline void
85GET_VEC(u_int vec, u_short *sel, u_short *off)
86{
87 *sel = vec >> 16;
88 *off = vec & 0xffff;
89}
90
91static __inline u_int
92MAKE_VEC(u_short sel, u_short off)
93{
94 return ((sel << 16) | off);
95}
96
97static __inline void
98PUSH(u_short x, struct vm86frame *vmf)
99{
100 vmf->vmf_sp -= 2;
101 susword(MAKE_ADDR(vmf->vmf_ss, vmf->vmf_sp), x);
102}
103
104static __inline void
105PUSHL(u_int x, struct vm86frame *vmf)
106{
107 vmf->vmf_sp -= 4;
108 suword(MAKE_ADDR(vmf->vmf_ss, vmf->vmf_sp), x);
109}
110
111static __inline u_short
112POP(struct vm86frame *vmf)
113{
114 u_short x = fusword(MAKE_ADDR(vmf->vmf_ss, vmf->vmf_sp));
115
116 vmf->vmf_sp += 2;
117 return (x);
118}
119
120static __inline u_int
121POPL(struct vm86frame *vmf)
122{
123 u_int x = fuword(MAKE_ADDR(vmf->vmf_ss, vmf->vmf_sp));
124
125 vmf->vmf_sp += 4;
126 return (x);
127}
128
129int
130vm86_emulate(vmf)
131 struct vm86frame *vmf;
132{
133 struct vm86_kernel *vm86;
134 caddr_t addr;
135 u_char i_byte;
136 u_int temp_flags;
137 int inc_ip = 1;
138 int retcode = 0;
139
140 /*
141 * pcb_ext contains the address of the extension area, or zero if
142 * the extension is not present. (This check should not be needed,
143 * as we can't enter vm86 mode until we set up an extension area)
144 */
b7c628e4 145 if (curthread->td_pcb->pcb_ext == 0)
984263bc 146 return (SIGBUS);
b7c628e4 147 vm86 = &curthread->td_pcb->pcb_ext->ext_vm86;
984263bc
MD
148
149 if (vmf->vmf_eflags & PSL_T)
150 retcode = SIGTRAP;
151
152 addr = MAKE_ADDR(vmf->vmf_cs, vmf->vmf_ip);
153 i_byte = fubyte(addr);
154 if (i_byte == ADDRESS_SIZE_PREFIX) {
155 i_byte = fubyte(++addr);
156 inc_ip++;
157 }
158
159 if (vm86->vm86_has_vme) {
160 switch (i_byte) {
161 case OPERAND_SIZE_PREFIX:
162 i_byte = fubyte(++addr);
163 inc_ip++;
164 switch (i_byte) {
165 case PUSHF:
166 if (vmf->vmf_eflags & PSL_VIF)
167 PUSHL((vmf->vmf_eflags & PUSH_MASK)
168 | PSL_IOPL | PSL_I, vmf);
169 else
170 PUSHL((vmf->vmf_eflags & PUSH_MASK)
171 | PSL_IOPL, vmf);
172 vmf->vmf_ip += inc_ip;
173 return (0);
174
175 case POPF:
176 temp_flags = POPL(vmf) & POP_MASK;
177 vmf->vmf_eflags = (vmf->vmf_eflags & ~POP_MASK)
178 | temp_flags | PSL_VM | PSL_I;
179 vmf->vmf_ip += inc_ip;
180 if (temp_flags & PSL_I) {
181 vmf->vmf_eflags |= PSL_VIF;
182 if (vmf->vmf_eflags & PSL_VIP)
183 break;
184 } else {
185 vmf->vmf_eflags &= ~PSL_VIF;
186 }
187 return (0);
188 }
189 break;
190
191 /* VME faults here if VIP is set, but does not set VIF. */
192 case STI:
193 vmf->vmf_eflags |= PSL_VIF;
194 vmf->vmf_ip += inc_ip;
195 if ((vmf->vmf_eflags & PSL_VIP) == 0) {
196 uprintf("fatal sti\n");
197 return (SIGKILL);
198 }
199 break;
200
201 /* VME if no redirection support */
202 case INTn:
203 break;
204
205 /* VME if trying to set PSL_TF, or PSL_I when VIP is set */
206 case POPF:
207 temp_flags = POP(vmf) & POP_MASK;
208 vmf->vmf_flags = (vmf->vmf_flags & ~POP_MASK)
209 | temp_flags | PSL_VM | PSL_I;
210 vmf->vmf_ip += inc_ip;
211 if (temp_flags & PSL_I) {
212 vmf->vmf_eflags |= PSL_VIF;
213 if (vmf->vmf_eflags & PSL_VIP)
214 break;
215 } else {
216 vmf->vmf_eflags &= ~PSL_VIF;
217 }
218 return (retcode);
219
220 /* VME if trying to set PSL_TF, or PSL_I when VIP is set */
221 case IRET:
222 vmf->vmf_ip = POP(vmf);
223 vmf->vmf_cs = POP(vmf);
224 temp_flags = POP(vmf) & POP_MASK;
225 vmf->vmf_flags = (vmf->vmf_flags & ~POP_MASK)
226 | temp_flags | PSL_VM | PSL_I;
227 if (temp_flags & PSL_I) {
228 vmf->vmf_eflags |= PSL_VIF;
229 if (vmf->vmf_eflags & PSL_VIP)
230 break;
231 } else {
232 vmf->vmf_eflags &= ~PSL_VIF;
233 }
234 return (retcode);
235
236 }
237 return (SIGBUS);
238 }
239
240 switch (i_byte) {
241 case OPERAND_SIZE_PREFIX:
242 i_byte = fubyte(++addr);
243 inc_ip++;
244 switch (i_byte) {
245 case PUSHF:
246 if (vm86->vm86_eflags & PSL_VIF)
247 PUSHL((vmf->vmf_flags & PUSH_MASK)
248 | PSL_IOPL | PSL_I, vmf);
249 else
250 PUSHL((vmf->vmf_flags & PUSH_MASK)
251 | PSL_IOPL, vmf);
252 vmf->vmf_ip += inc_ip;
253 return (retcode);
254
255 case POPF:
256 temp_flags = POPL(vmf) & POP_MASK;
257 vmf->vmf_eflags = (vmf->vmf_eflags & ~POP_MASK)
258 | temp_flags | PSL_VM | PSL_I;
259 vmf->vmf_ip += inc_ip;
260 if (temp_flags & PSL_I) {
261 vm86->vm86_eflags |= PSL_VIF;
262 if (vm86->vm86_eflags & PSL_VIP)
263 break;
264 } else {
265 vm86->vm86_eflags &= ~PSL_VIF;
266 }
267 return (retcode);
268 }
269 return (SIGBUS);
270
271 case CLI:
272 vm86->vm86_eflags &= ~PSL_VIF;
273 vmf->vmf_ip += inc_ip;
274 return (retcode);
275
276 case STI:
277 /* if there is a pending interrupt, go to the emulator */
278 vm86->vm86_eflags |= PSL_VIF;
279 vmf->vmf_ip += inc_ip;
280 if (vm86->vm86_eflags & PSL_VIP)
281 break;
282 return (retcode);
283
284 case PUSHF:
285 if (vm86->vm86_eflags & PSL_VIF)
286 PUSH((vmf->vmf_flags & PUSH_MASK)
287 | PSL_IOPL | PSL_I, vmf);
288 else
289 PUSH((vmf->vmf_flags & PUSH_MASK) | PSL_IOPL, vmf);
290 vmf->vmf_ip += inc_ip;
291 return (retcode);
292
293 case INTn:
294 i_byte = fubyte(addr + 1);
295 if ((vm86->vm86_intmap[i_byte >> 3] & (1 << (i_byte & 7))) != 0)
296 break;
297 if (vm86->vm86_eflags & PSL_VIF)
298 PUSH((vmf->vmf_flags & PUSH_MASK)
299 | PSL_IOPL | PSL_I, vmf);
300 else
301 PUSH((vmf->vmf_flags & PUSH_MASK) | PSL_IOPL, vmf);
302 PUSH(vmf->vmf_cs, vmf);
303 PUSH(vmf->vmf_ip + inc_ip + 1, vmf); /* increment IP */
304 GET_VEC(fuword((caddr_t)(i_byte * 4)),
305 &vmf->vmf_cs, &vmf->vmf_ip);
306 vmf->vmf_flags &= ~PSL_T;
307 vm86->vm86_eflags &= ~PSL_VIF;
308 return (retcode);
309
310 case IRET:
311 vmf->vmf_ip = POP(vmf);
312 vmf->vmf_cs = POP(vmf);
313 temp_flags = POP(vmf) & POP_MASK;
314 vmf->vmf_flags = (vmf->vmf_flags & ~POP_MASK)
315 | temp_flags | PSL_VM | PSL_I;
316 if (temp_flags & PSL_I) {
317 vm86->vm86_eflags |= PSL_VIF;
318 if (vm86->vm86_eflags & PSL_VIP)
319 break;
320 } else {
321 vm86->vm86_eflags &= ~PSL_VIF;
322 }
323 return (retcode);
324
325 case POPF:
326 temp_flags = POP(vmf) & POP_MASK;
327 vmf->vmf_flags = (vmf->vmf_flags & ~POP_MASK)
328 | temp_flags | PSL_VM | PSL_I;
329 vmf->vmf_ip += inc_ip;
330 if (temp_flags & PSL_I) {
331 vm86->vm86_eflags |= PSL_VIF;
332 if (vm86->vm86_eflags & PSL_VIP)
333 break;
334 } else {
335 vm86->vm86_eflags &= ~PSL_VIF;
336 }
337 return (retcode);
338 }
339 return (SIGBUS);
340}
341
342#define PGTABLE_SIZE ((1024 + 64) * 1024 / PAGE_SIZE)
343#define INTMAP_SIZE 32
344#define IOMAP_SIZE ctob(IOPAGES)
345#define TSS_SIZE \
346 (sizeof(struct pcb_ext) - sizeof(struct segment_descriptor) + \
347 INTMAP_SIZE + IOMAP_SIZE + 1)
348
349struct vm86_layout {
350 pt_entry_t vml_pgtbl[PGTABLE_SIZE];
351 struct pcb vml_pcb;
352 struct pcb_ext vml_ext;
353 char vml_intmap[INTMAP_SIZE];
354 char vml_iomap[IOMAP_SIZE];
355 char vml_iomap_trailer;
356};
357
358void
359vm86_initialize(void)
360{
361 int i;
362 u_int *addr;
363 struct vm86_layout *vml = (struct vm86_layout *)vm86paddr;
364 struct pcb *pcb;
365 struct pcb_ext *ext;
366 struct soft_segment_descriptor ssd = {
367 0, /* segment base address (overwritten) */
368 0, /* length (overwritten) */
369 SDT_SYS386TSS, /* segment type */
370 0, /* priority level */
371 1, /* descriptor present */
372 0, 0,
373 0, /* default 16 size */
374 0 /* granularity */
375 };
376
377 /*
378 * this should be a compile time error, but cpp doesn't grok sizeof().
379 */
380 if (sizeof(struct vm86_layout) > ctob(3))
381 panic("struct vm86_layout exceeds space allocated in locore.s");
382
383 /*
384 * Below is the memory layout that we use for the vm86 region.
385 *
386 * +--------+
387 * | |
388 * | |
389 * | page 0 |
390 * | | +--------+
391 * | | | stack |
392 * +--------+ +--------+ <--------- vm86paddr
393 * | | |Page Tbl| 1M + 64K = 272 entries = 1088 bytes
394 * | | +--------+
395 * | | | PCB | size: ~240 bytes
396 * | page 1 | |PCB Ext | size: ~140 bytes (includes TSS)
397 * | | +--------+
398 * | | |int map |
399 * | | +--------+
400 * +--------+ | |
401 * | page 2 | | I/O |
402 * +--------+ | bitmap |
403 * | page 3 | | |
404 * | | +--------+
405 * +--------+
406 */
407
408 /*
409 * A rudimentary PCB must be installed, in order to get to the
410 * PCB extension area. We use the PCB area as a scratchpad for
411 * data storage, the layout of which is shown below.
412 *
413 * pcb_esi = new PTD entry 0
414 * pcb_ebp = pointer to frame on vm86 stack
415 * pcb_esp = stack frame pointer at time of switch
416 * pcb_ebx = va of vm86 page table
417 * pcb_eip = argument pointer to initial call
418 * pcb_spare[0] = saved TSS descriptor, word 0
419 * pcb_space[1] = saved TSS descriptor, word 1
420 */
421#define new_ptd pcb_esi
422#define vm86_frame pcb_ebp
423#define pgtable_va pcb_ebx
424
425 pcb = &vml->vml_pcb;
426 ext = &vml->vml_ext;
427
428 bzero(pcb, sizeof(struct pcb));
429 pcb->new_ptd = vm86pa | PG_V | PG_RW | PG_U;
430 pcb->vm86_frame = vm86paddr - sizeof(struct vm86frame);
431 pcb->pgtable_va = vm86paddr;
432 pcb->pcb_ext = ext;
433
434 bzero(ext, sizeof(struct pcb_ext));
435 ext->ext_tss.tss_esp0 = vm86paddr;
436 ext->ext_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
437 ext->ext_tss.tss_ioopt =
438 ((u_int)vml->vml_iomap - (u_int)&ext->ext_tss) << 16;
439 ext->ext_iomap = vml->vml_iomap;
440 ext->ext_vm86.vm86_intmap = vml->vml_intmap;
441
442 if (cpu_feature & CPUID_VME)
443 ext->ext_vm86.vm86_has_vme = (rcr4() & CR4_VME ? 1 : 0);
444
445 addr = (u_int *)ext->ext_vm86.vm86_intmap;
446 for (i = 0; i < (INTMAP_SIZE + IOMAP_SIZE) / sizeof(u_int); i++)
447 *addr++ = 0;
448 vml->vml_iomap_trailer = 0xff;
449
450 ssd.ssd_base = (u_int)&ext->ext_tss;
451 ssd.ssd_limit = TSS_SIZE - 1;
452 ssdtosd(&ssd, &ext->ext_tssd);
453
454 vm86pcb = pcb;
455
456#if 0
457 /*
458 * use whatever is leftover of the vm86 page layout as a
459 * message buffer so we can capture early output.
460 */
461 msgbufinit((vm_offset_t)vm86paddr + sizeof(struct vm86_layout),
462 ctob(3) - sizeof(struct vm86_layout));
463#endif
464}
465
466vm_offset_t
467vm86_getpage(struct vm86context *vmc, int pagenum)
468{
469 int i;
470
471 for (i = 0; i < vmc->npages; i++)
472 if (vmc->pmap[i].pte_num == pagenum)
473 return (vmc->pmap[i].kva);
474 return (0);
475}
476
477vm_offset_t
478vm86_addpage(struct vm86context *vmc, int pagenum, vm_offset_t kva)
479{
480 int i, flags = 0;
481
482 for (i = 0; i < vmc->npages; i++)
483 if (vmc->pmap[i].pte_num == pagenum)
484 goto bad;
485
486 if (vmc->npages == VM86_PMAPSIZE)
487 goto bad; /* XXX grow map? */
488
489 if (kva == 0) {
490 kva = (vm_offset_t)malloc(PAGE_SIZE, M_TEMP, M_WAITOK);
491 flags = VMAP_MALLOC;
492 }
493
494 i = vmc->npages++;
495 vmc->pmap[i].flags = flags;
496 vmc->pmap[i].kva = kva;
497 vmc->pmap[i].pte_num = pagenum;
498 return (kva);
499bad:
500 panic("vm86_addpage: not enough room, or overlap");
501}
502
503static void
504vm86_initflags(struct vm86frame *vmf)
505{
506 int eflags = vmf->vmf_eflags;
b7c628e4 507 struct vm86_kernel *vm86 = &curthread->td_pcb->pcb_ext->ext_vm86;
984263bc
MD
508
509 if (vm86->vm86_has_vme) {
510 eflags = (vmf->vmf_eflags & ~VME_USERCHANGE) |
511 (eflags & VME_USERCHANGE) | PSL_VM;
512 } else {
513 vm86->vm86_eflags = eflags; /* save VIF, VIP */
514 eflags = (vmf->vmf_eflags & ~VM_USERCHANGE) |
515 (eflags & VM_USERCHANGE) | PSL_VM;
516 }
517 vmf->vmf_eflags = eflags | PSL_VM;
518}
519
520/*
521 * called from vm86_bioscall, while in vm86 address space, to finalize setup.
522 */
523void
524vm86_prepcall(struct vm86frame vmf)
525{
526 uintptr_t addr[] = { 0xA00, 0x1000 }; /* code, stack */
527 u_char intcall[] = {
528 CLI, INTn, 0x00, STI, HLT
529 };
530
531 if ((vmf.vmf_trapno & PAGE_MASK) <= 0xff) {
532 /* interrupt call requested */
533 intcall[2] = (u_char)(vmf.vmf_trapno & 0xff);
534 memcpy((void *)addr[0], (void *)intcall, sizeof(intcall));
535 vmf.vmf_ip = addr[0];
536 vmf.vmf_cs = 0;
537 }
538 vmf.vmf_sp = addr[1] - 2; /* keep aligned */
539 vmf.kernel_fs = vmf.kernel_es = vmf.kernel_ds = 0;
540 vmf.vmf_ss = 0;
541 vmf.vmf_eflags = PSL_VIF | PSL_VM | PSL_USER;
542 vm86_initflags(&vmf);
543}
544
545/*
546 * vm86 trap handler; determines whether routine succeeded or not.
547 * Called while in vm86 space, returns to calling process.
8a8d5d85
MD
548 *
549 * A MP lock ref is held on entry from trap() and must be released prior
550 * to returning to the VM86 call.
984263bc
MD
551 */
552void
553vm86_trap(struct vm86frame *vmf)
554{
555 caddr_t addr;
556
557 /* "should not happen" */
558 if ((vmf->vmf_eflags & PSL_VM) == 0)
559 panic("vm86_trap called, but not in vm86 mode");
560
561 addr = MAKE_ADDR(vmf->vmf_cs, vmf->vmf_ip);
562 if (*(u_char *)addr == HLT)
563 vmf->vmf_trapno = vmf->vmf_eflags & PSL_C;
564 else
565 vmf->vmf_trapno = vmf->vmf_trapno << 16;
566
8a8d5d85 567 rel_mplock();
984263bc
MD
568 vm86_biosret(vmf);
569}
570
571int
572vm86_intcall(int intnum, struct vm86frame *vmf)
573{
96728c05
MD
574 int error;
575
984263bc
MD
576 if (intnum < 0 || intnum > 0xff)
577 return (EINVAL);
578
96728c05 579 crit_enter();
8a8d5d85
MD
580 ASSERT_MP_LOCK_HELD();
581
984263bc 582 vmf->vmf_trapno = intnum;
96728c05
MD
583 error = vm86_bioscall(vmf);
584 crit_exit();
585 return(error);
984263bc
MD
586}
587
588/*
589 * struct vm86context contains the page table to use when making
590 * vm86 calls. If intnum is a valid interrupt number (0-255), then
591 * the "interrupt trampoline" will be used, otherwise we use the
592 * caller's cs:ip routine.
593 */
594int
595vm86_datacall(intnum, vmf, vmc)
596 int intnum;
597 struct vm86frame *vmf;
598 struct vm86context *vmc;
599{
600 pt_entry_t pte = (pt_entry_t)vm86paddr;
601 u_int page;
602 int i, entry, retval;
603
96728c05 604 crit_enter();
8a8d5d85
MD
605 ASSERT_MP_LOCK_HELD();
606
984263bc
MD
607 for (i = 0; i < vmc->npages; i++) {
608 page = vtophys(vmc->pmap[i].kva & PG_FRAME);
609 entry = vmc->pmap[i].pte_num;
610 vmc->pmap[i].old_pte = pte[entry];
611 pte[entry] = page | PG_V | PG_RW | PG_U;
612 }
613
614 vmf->vmf_trapno = intnum;
615 retval = vm86_bioscall(vmf);
616
617 for (i = 0; i < vmc->npages; i++) {
618 entry = vmc->pmap[i].pte_num;
619 pte[entry] = vmc->pmap[i].old_pte;
620 }
96728c05 621 crit_exit();
984263bc
MD
622 return (retval);
623}
624
625vm_offset_t
626vm86_getaddr(vmc, sel, off)
627 struct vm86context *vmc;
628 u_short sel;
629 u_short off;
630{
631 int i, page;
632 vm_offset_t addr;
633
634 addr = (vm_offset_t)MAKE_ADDR(sel, off);
635 page = addr >> PAGE_SHIFT;
636 for (i = 0; i < vmc->npages; i++)
637 if (page == vmc->pmap[i].pte_num)
638 return (vmc->pmap[i].kva + (addr & PAGE_MASK));
639 return (0);
640}
641
642int
643vm86_getptr(vmc, kva, sel, off)
644 struct vm86context *vmc;
645 vm_offset_t kva;
646 u_short *sel;
647 u_short *off;
648{
649 int i;
650
651 for (i = 0; i < vmc->npages; i++)
652 if (kva >= vmc->pmap[i].kva &&
653 kva < vmc->pmap[i].kva + PAGE_SIZE) {
654 *off = kva - vmc->pmap[i].kva;
655 *sel = vmc->pmap[i].pte_num << 8;
656 return (1);
657 }
658 return (0);
659 panic("vm86_getptr: address not found");
660}
661
662int
dadab5e9 663vm86_sysarch(struct proc *p, char *args)
984263bc
MD
664{
665 int error = 0;
666 struct i386_vm86_args ua;
667 struct vm86_kernel *vm86;
668
669 if ((error = copyin(args, &ua, sizeof(struct i386_vm86_args))) != 0)
670 return (error);
671
b7c628e4 672 if (p->p_thread->td_pcb->pcb_ext == 0)
984263bc
MD
673 if ((error = i386_extend_pcb(p)) != 0)
674 return (error);
b7c628e4 675 vm86 = &p->p_thread->td_pcb->pcb_ext->ext_vm86;
984263bc
MD
676
677 switch (ua.sub_op) {
678 case VM86_INIT: {
679 struct vm86_init_args sa;
680
681 if ((error = copyin(ua.sub_args, &sa, sizeof(sa))) != 0)
682 return (error);
683 if (cpu_feature & CPUID_VME)
684 vm86->vm86_has_vme = (rcr4() & CR4_VME ? 1 : 0);
685 else
686 vm86->vm86_has_vme = 0;
687 vm86->vm86_inited = 1;
688 vm86->vm86_debug = sa.debug;
689 bcopy(&sa.int_map, vm86->vm86_intmap, 32);
690 }
691 break;
692
693#if 0
694 case VM86_SET_VME: {
695 struct vm86_vme_args sa;
696
697 if ((cpu_feature & CPUID_VME) == 0)
698 return (ENODEV);
699
700 if (error = copyin(ua.sub_args, &sa, sizeof(sa)))
701 return (error);
702 if (sa.state)
703 load_cr4(rcr4() | CR4_VME);
704 else
705 load_cr4(rcr4() & ~CR4_VME);
706 }
707 break;
708#endif
709
710 case VM86_GET_VME: {
711 struct vm86_vme_args sa;
712
713 sa.state = (rcr4() & CR4_VME ? 1 : 0);
714 error = copyout(&sa, ua.sub_args, sizeof(sa));
715 }
716 break;
717
718 case VM86_INTCALL: {
719 struct vm86_intcall_args sa;
720
dadab5e9 721 if ((error = suser_cred(p->p_ucred, 0)))
984263bc
MD
722 return (error);
723 if ((error = copyin(ua.sub_args, &sa, sizeof(sa))))
724 return (error);
725 if ((error = vm86_intcall(sa.intnum, &sa.vmf)))
726 return (error);
727 error = copyout(&sa, ua.sub_args, sizeof(sa));
728 }
729 break;
730
731 default:
732 error = EINVAL;
733 }
734 return (error);
735}