2 * Copyright (c) 1997 Jonathan Lemon
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * modification, are permitted provided that the following conditions
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * $FreeBSD: src/sys/i386/i386/vm86.c,v 1.31.2.2 2001/10/05 06:18:55 peter Exp $
28 * $DragonFly: src/sys/i386/i386/Attic/vm86.c,v 1.11 2004/08/07 03:42:37 dillon Exp $
31 #include <sys/param.h>
32 #include <sys/systm.h>
35 #include <sys/malloc.h>
39 #include <vm/vm_map.h>
40 #include <vm/vm_page.h>
43 #include <sys/thread2.h>
45 #include <machine/md_var.h>
46 #include <machine/pcb_ext.h> /* pcb.h included via sys/user.h */
47 #include <machine/psl.h>
48 #include <machine/specialreg.h>
49 #include <machine/sysarch.h>
50 #include <machine/clock.h>
52 extern int i386_extend_pcb (struct proc *);
54 extern struct pcb *vm86pcb;
56 extern int vm86_bioscall(struct vm86frame *);
57 extern void vm86_biosret(struct vm86frame *);
59 void vm86_prepcall(struct vm86frame);
75 #define OPERAND_SIZE_PREFIX 0x66
76 #define ADDRESS_SIZE_PREFIX 0x67
77 #define PUSH_MASK ~(PSL_VM | PSL_RF | PSL_I)
78 #define POP_MASK ~(PSL_VIP | PSL_VIF | PSL_VM | PSL_RF | PSL_IOPL)
80 static __inline caddr_t
81 MAKE_ADDR(u_short sel, u_short off)
83 return ((caddr_t)((sel << 4) + off));
87 GET_VEC(u_int vec, u_short *sel, u_short *off)
94 MAKE_VEC(u_short sel, u_short off)
96 return ((sel << 16) | off);
100 PUSH(u_short x, struct vm86frame *vmf)
103 susword(MAKE_ADDR(vmf->vmf_ss, vmf->vmf_sp), x);
107 PUSHL(u_int x, struct vm86frame *vmf)
110 suword(MAKE_ADDR(vmf->vmf_ss, vmf->vmf_sp), x);
113 static __inline u_short
114 POP(struct vm86frame *vmf)
116 u_short x = fusword(MAKE_ADDR(vmf->vmf_ss, vmf->vmf_sp));
122 static __inline u_int
123 POPL(struct vm86frame *vmf)
125 u_int x = fuword(MAKE_ADDR(vmf->vmf_ss, vmf->vmf_sp));
133 struct vm86frame *vmf;
135 struct vm86_kernel *vm86;
143 * pcb_ext contains the address of the extension area, or zero if
144 * the extension is not present. (This check should not be needed,
145 * as we can't enter vm86 mode until we set up an extension area)
147 if (curthread->td_pcb->pcb_ext == 0)
149 vm86 = &curthread->td_pcb->pcb_ext->ext_vm86;
151 if (vmf->vmf_eflags & PSL_T)
154 addr = MAKE_ADDR(vmf->vmf_cs, vmf->vmf_ip);
155 i_byte = fubyte(addr);
156 if (i_byte == ADDRESS_SIZE_PREFIX) {
157 i_byte = fubyte(++addr);
161 if (vm86->vm86_has_vme) {
163 case OPERAND_SIZE_PREFIX:
164 i_byte = fubyte(++addr);
168 if (vmf->vmf_eflags & PSL_VIF)
169 PUSHL((vmf->vmf_eflags & PUSH_MASK)
170 | PSL_IOPL | PSL_I, vmf);
172 PUSHL((vmf->vmf_eflags & PUSH_MASK)
174 vmf->vmf_ip += inc_ip;
178 temp_flags = POPL(vmf) & POP_MASK;
179 vmf->vmf_eflags = (vmf->vmf_eflags & ~POP_MASK)
180 | temp_flags | PSL_VM | PSL_I;
181 vmf->vmf_ip += inc_ip;
182 if (temp_flags & PSL_I) {
183 vmf->vmf_eflags |= PSL_VIF;
184 if (vmf->vmf_eflags & PSL_VIP)
187 vmf->vmf_eflags &= ~PSL_VIF;
193 /* VME faults here if VIP is set, but does not set VIF. */
195 vmf->vmf_eflags |= PSL_VIF;
196 vmf->vmf_ip += inc_ip;
197 if ((vmf->vmf_eflags & PSL_VIP) == 0) {
198 uprintf("fatal sti\n");
203 /* VME if no redirection support */
207 /* VME if trying to set PSL_TF, or PSL_I when VIP is set */
209 temp_flags = POP(vmf) & POP_MASK;
210 vmf->vmf_flags = (vmf->vmf_flags & ~POP_MASK)
211 | temp_flags | PSL_VM | PSL_I;
212 vmf->vmf_ip += inc_ip;
213 if (temp_flags & PSL_I) {
214 vmf->vmf_eflags |= PSL_VIF;
215 if (vmf->vmf_eflags & PSL_VIP)
218 vmf->vmf_eflags &= ~PSL_VIF;
222 /* VME if trying to set PSL_TF, or PSL_I when VIP is set */
224 vmf->vmf_ip = POP(vmf);
225 vmf->vmf_cs = POP(vmf);
226 temp_flags = POP(vmf) & POP_MASK;
227 vmf->vmf_flags = (vmf->vmf_flags & ~POP_MASK)
228 | temp_flags | PSL_VM | PSL_I;
229 if (temp_flags & PSL_I) {
230 vmf->vmf_eflags |= PSL_VIF;
231 if (vmf->vmf_eflags & PSL_VIP)
234 vmf->vmf_eflags &= ~PSL_VIF;
243 case OPERAND_SIZE_PREFIX:
244 i_byte = fubyte(++addr);
248 if (vm86->vm86_eflags & PSL_VIF)
249 PUSHL((vmf->vmf_flags & PUSH_MASK)
250 | PSL_IOPL | PSL_I, vmf);
252 PUSHL((vmf->vmf_flags & PUSH_MASK)
254 vmf->vmf_ip += inc_ip;
258 temp_flags = POPL(vmf) & POP_MASK;
259 vmf->vmf_eflags = (vmf->vmf_eflags & ~POP_MASK)
260 | temp_flags | PSL_VM | PSL_I;
261 vmf->vmf_ip += inc_ip;
262 if (temp_flags & PSL_I) {
263 vm86->vm86_eflags |= PSL_VIF;
264 if (vm86->vm86_eflags & PSL_VIP)
267 vm86->vm86_eflags &= ~PSL_VIF;
274 vm86->vm86_eflags &= ~PSL_VIF;
275 vmf->vmf_ip += inc_ip;
279 /* if there is a pending interrupt, go to the emulator */
280 vm86->vm86_eflags |= PSL_VIF;
281 vmf->vmf_ip += inc_ip;
282 if (vm86->vm86_eflags & PSL_VIP)
287 if (vm86->vm86_eflags & PSL_VIF)
288 PUSH((vmf->vmf_flags & PUSH_MASK)
289 | PSL_IOPL | PSL_I, vmf);
291 PUSH((vmf->vmf_flags & PUSH_MASK) | PSL_IOPL, vmf);
292 vmf->vmf_ip += inc_ip;
296 i_byte = fubyte(addr + 1);
297 if ((vm86->vm86_intmap[i_byte >> 3] & (1 << (i_byte & 7))) != 0)
299 if (vm86->vm86_eflags & PSL_VIF)
300 PUSH((vmf->vmf_flags & PUSH_MASK)
301 | PSL_IOPL | PSL_I, vmf);
303 PUSH((vmf->vmf_flags & PUSH_MASK) | PSL_IOPL, vmf);
304 PUSH(vmf->vmf_cs, vmf);
305 PUSH(vmf->vmf_ip + inc_ip + 1, vmf); /* increment IP */
306 GET_VEC(fuword((caddr_t)(i_byte * 4)),
307 &vmf->vmf_cs, &vmf->vmf_ip);
308 vmf->vmf_flags &= ~PSL_T;
309 vm86->vm86_eflags &= ~PSL_VIF;
313 vmf->vmf_ip = POP(vmf);
314 vmf->vmf_cs = POP(vmf);
315 temp_flags = POP(vmf) & POP_MASK;
316 vmf->vmf_flags = (vmf->vmf_flags & ~POP_MASK)
317 | temp_flags | PSL_VM | PSL_I;
318 if (temp_flags & PSL_I) {
319 vm86->vm86_eflags |= PSL_VIF;
320 if (vm86->vm86_eflags & PSL_VIP)
323 vm86->vm86_eflags &= ~PSL_VIF;
328 temp_flags = POP(vmf) & POP_MASK;
329 vmf->vmf_flags = (vmf->vmf_flags & ~POP_MASK)
330 | temp_flags | PSL_VM | PSL_I;
331 vmf->vmf_ip += inc_ip;
332 if (temp_flags & PSL_I) {
333 vm86->vm86_eflags |= PSL_VIF;
334 if (vm86->vm86_eflags & PSL_VIP)
337 vm86->vm86_eflags &= ~PSL_VIF;
344 #define PGTABLE_SIZE ((1024 + 64) * 1024 / PAGE_SIZE)
345 #define INTMAP_SIZE 32
346 #define IOMAP_SIZE ctob(IOPAGES)
348 (sizeof(struct pcb_ext) - sizeof(struct segment_descriptor) + \
349 INTMAP_SIZE + IOMAP_SIZE + 1)
352 pt_entry_t vml_pgtbl[PGTABLE_SIZE];
354 struct pcb_ext vml_ext;
355 char vml_intmap[INTMAP_SIZE];
356 char vml_iomap[IOMAP_SIZE];
357 char vml_iomap_trailer;
361 vm86_initialize(void)
365 struct vm86_layout *vml = (struct vm86_layout *)vm86paddr;
368 struct soft_segment_descriptor ssd = {
369 0, /* segment base address (overwritten) */
370 0, /* length (overwritten) */
371 SDT_SYS386TSS, /* segment type */
372 0, /* priority level */
373 1, /* descriptor present */
375 0, /* default 16 size */
380 * this should be a compile time error, but cpp doesn't grok sizeof().
382 if (sizeof(struct vm86_layout) > ctob(3))
383 panic("struct vm86_layout exceeds space allocated in locore.s");
386 * Below is the memory layout that we use for the vm86 region.
394 * +--------+ +--------+ <--------- vm86paddr
395 * | | |Page Tbl| 1M + 64K = 272 entries = 1088 bytes
397 * | | | PCB | size: ~240 bytes
398 * | page 1 | |PCB Ext | size: ~140 bytes (includes TSS)
404 * +--------+ | bitmap |
411 * A rudimentary PCB must be installed, in order to get to the
412 * PCB extension area. We use the PCB area as a scratchpad for
413 * data storage, the layout of which is shown below.
415 * pcb_esi = new PTD entry 0
416 * pcb_ebp = pointer to frame on vm86 stack
417 * pcb_esp = stack frame pointer at time of switch
418 * pcb_ebx = va of vm86 page table
419 * pcb_eip = argument pointer to initial call
420 * pcb_spare[0] = saved TSS descriptor, word 0
421 * pcb_space[1] = saved TSS descriptor, word 1
423 #define new_ptd pcb_esi
424 #define vm86_frame pcb_ebp
425 #define pgtable_va pcb_ebx
430 bzero(pcb, sizeof(struct pcb));
431 pcb->new_ptd = vm86pa | PG_V | PG_RW | PG_U;
432 pcb->vm86_frame = (pt_entry_t)vm86paddr - sizeof(struct vm86frame);
433 pcb->pgtable_va = (vm_offset_t)vm86paddr;
436 bzero(ext, sizeof(struct pcb_ext));
437 ext->ext_tss.tss_esp0 = (vm_offset_t)vm86paddr;
438 ext->ext_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
439 ext->ext_tss.tss_ioopt =
440 ((u_int)vml->vml_iomap - (u_int)&ext->ext_tss) << 16;
441 ext->ext_iomap = vml->vml_iomap;
442 ext->ext_vm86.vm86_intmap = vml->vml_intmap;
444 if (cpu_feature & CPUID_VME)
445 ext->ext_vm86.vm86_has_vme = (rcr4() & CR4_VME ? 1 : 0);
447 addr = (u_int *)ext->ext_vm86.vm86_intmap;
448 for (i = 0; i < (INTMAP_SIZE + IOMAP_SIZE) / sizeof(u_int); i++)
450 vml->vml_iomap_trailer = 0xff;
452 ssd.ssd_base = (u_int)&ext->ext_tss;
453 ssd.ssd_limit = TSS_SIZE - 1;
454 ssdtosd(&ssd, &ext->ext_tssd);
460 * use whatever is leftover of the vm86 page layout as a
461 * message buffer so we can capture early output.
463 msgbufinit((vm_offset_t)vm86paddr + sizeof(struct vm86_layout),
464 ctob(3) - sizeof(struct vm86_layout));
469 vm86_getpage(struct vm86context *vmc, int pagenum)
473 for (i = 0; i < vmc->npages; i++)
474 if (vmc->pmap[i].pte_num == pagenum)
475 return (vmc->pmap[i].kva);
480 vm86_addpage(struct vm86context *vmc, int pagenum, vm_offset_t kva)
484 for (i = 0; i < vmc->npages; i++)
485 if (vmc->pmap[i].pte_num == pagenum)
488 if (vmc->npages == VM86_PMAPSIZE)
489 goto bad; /* XXX grow map? */
492 kva = (vm_offset_t)malloc(PAGE_SIZE, M_TEMP, M_WAITOK);
497 vmc->pmap[i].flags = flags;
498 vmc->pmap[i].kva = kva;
499 vmc->pmap[i].pte_num = pagenum;
502 panic("vm86_addpage: not enough room, or overlap");
506 vm86_initflags(struct vm86frame *vmf)
508 int eflags = vmf->vmf_eflags;
509 struct vm86_kernel *vm86 = &curthread->td_pcb->pcb_ext->ext_vm86;
511 if (vm86->vm86_has_vme) {
512 eflags = (vmf->vmf_eflags & ~VME_USERCHANGE) |
513 (eflags & VME_USERCHANGE) | PSL_VM;
515 vm86->vm86_eflags = eflags; /* save VIF, VIP */
516 eflags = (vmf->vmf_eflags & ~VM_USERCHANGE) |
517 (eflags & VM_USERCHANGE) | PSL_VM;
519 vmf->vmf_eflags = eflags | PSL_VM;
523 * called from vm86_bioscall, while in vm86 address space, to finalize setup.
526 vm86_prepcall(struct vm86frame vmf)
528 uintptr_t addr[] = { 0xA00, 0x1000 }; /* code, stack */
530 CLI, INTn, 0x00, STI, HLT
533 if ((vmf.vmf_trapno & PAGE_MASK) <= 0xff) {
534 /* interrupt call requested */
535 intcall[2] = (u_char)(vmf.vmf_trapno & 0xff);
536 memcpy((void *)addr[0], (void *)intcall, sizeof(intcall));
537 vmf.vmf_ip = addr[0];
540 vmf.vmf_sp = addr[1] - 2; /* keep aligned */
541 vmf.kernel_fs = vmf.kernel_es = vmf.kernel_ds = 0;
543 vmf.vmf_eflags = PSL_VIF | PSL_VM | PSL_USER;
544 vm86_initflags(&vmf);
548 * vm86 trap handler; determines whether routine succeeded or not.
549 * Called while in vm86 space, returns to calling process.
551 * A MP lock ref is held on entry from trap() and must be released prior
552 * to returning to the VM86 call.
555 vm86_trap(struct vm86frame *vmf)
559 /* "should not happen" */
560 if ((vmf->vmf_eflags & PSL_VM) == 0)
561 panic("vm86_trap called, but not in vm86 mode");
563 addr = MAKE_ADDR(vmf->vmf_cs, vmf->vmf_ip);
564 if (*(u_char *)addr == HLT)
565 vmf->vmf_trapno = vmf->vmf_eflags & PSL_C;
567 vmf->vmf_trapno = vmf->vmf_trapno << 16;
574 vm86_intcall(int intnum, struct vm86frame *vmf)
578 if (intnum < 0 || intnum > 0xff)
582 ASSERT_MP_LOCK_HELD();
584 vmf->vmf_trapno = intnum;
585 error = vm86_bioscall(vmf);
588 * removed. This causes more problems then it solves, we will
589 * have to find another way to detect inappropriate 8254 writes
599 * struct vm86context contains the page table to use when making
600 * vm86 calls. If intnum is a valid interrupt number (0-255), then
601 * the "interrupt trampoline" will be used, otherwise we use the
602 * caller's cs:ip routine.
605 vm86_datacall(intnum, vmf, vmc)
607 struct vm86frame *vmf;
608 struct vm86context *vmc;
610 pt_entry_t *pte = vm86paddr;
612 int i, entry, retval;
615 ASSERT_MP_LOCK_HELD();
617 for (i = 0; i < vmc->npages; i++) {
618 page = vtophys(vmc->pmap[i].kva & PG_FRAME);
619 entry = vmc->pmap[i].pte_num;
620 vmc->pmap[i].old_pte = pte[entry];
621 pte[entry] = page | PG_V | PG_RW | PG_U;
624 vmf->vmf_trapno = intnum;
625 retval = vm86_bioscall(vmf);
627 for (i = 0; i < vmc->npages; i++) {
628 entry = vmc->pmap[i].pte_num;
629 pte[entry] = vmc->pmap[i].old_pte;
636 vm86_getaddr(vmc, sel, off)
637 struct vm86context *vmc;
644 addr = (vm_offset_t)MAKE_ADDR(sel, off);
645 page = addr >> PAGE_SHIFT;
646 for (i = 0; i < vmc->npages; i++)
647 if (page == vmc->pmap[i].pte_num)
648 return (vmc->pmap[i].kva + (addr & PAGE_MASK));
653 vm86_getptr(vmc, kva, sel, off)
654 struct vm86context *vmc;
661 for (i = 0; i < vmc->npages; i++)
662 if (kva >= vmc->pmap[i].kva &&
663 kva < vmc->pmap[i].kva + PAGE_SIZE) {
664 *off = kva - vmc->pmap[i].kva;
665 *sel = vmc->pmap[i].pte_num << 8;
669 panic("vm86_getptr: address not found");
673 vm86_sysarch(struct proc *p, char *args)
676 struct i386_vm86_args ua;
677 struct vm86_kernel *vm86;
679 if ((error = copyin(args, &ua, sizeof(struct i386_vm86_args))) != 0)
682 if (p->p_thread->td_pcb->pcb_ext == 0)
683 if ((error = i386_extend_pcb(p)) != 0)
685 vm86 = &p->p_thread->td_pcb->pcb_ext->ext_vm86;
689 struct vm86_init_args sa;
691 if ((error = copyin(ua.sub_args, &sa, sizeof(sa))) != 0)
693 if (cpu_feature & CPUID_VME)
694 vm86->vm86_has_vme = (rcr4() & CR4_VME ? 1 : 0);
696 vm86->vm86_has_vme = 0;
697 vm86->vm86_inited = 1;
698 vm86->vm86_debug = sa.debug;
699 bcopy(&sa.int_map, vm86->vm86_intmap, 32);
705 struct vm86_vme_args sa;
707 if ((cpu_feature & CPUID_VME) == 0)
710 if (error = copyin(ua.sub_args, &sa, sizeof(sa)))
713 load_cr4(rcr4() | CR4_VME);
715 load_cr4(rcr4() & ~CR4_VME);
721 struct vm86_vme_args sa;
723 sa.state = (rcr4() & CR4_VME ? 1 : 0);
724 error = copyout(&sa, ua.sub_args, sizeof(sa));
729 struct vm86_intcall_args sa;
731 if ((error = suser_cred(p->p_ucred, 0)))
733 if ((error = copyin(ua.sub_args, &sa, sizeof(sa))))
735 if ((error = vm86_intcall(sa.intnum, &sa.vmf)))
737 error = copyout(&sa, ua.sub_args, sizeof(sa));