2 * Copyright (c) 2018-2021 Maxime Villard, m00nbsd.net
5 * This code is part of the NVMM hypervisor.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 #include <sys/types.h>
39 #include <machine/segments.h>
40 #include <machine/psl.h>
46 #include <machine/pte.h>
47 #define PAGE_SIZE 4096
51 #include <machine/pmap.h>
52 #define PTE_P X86_PG_V /* 0x001: P (Valid) */
53 #define PTE_W X86_PG_RW /* 0x002: R/W (Read/Write) */
54 #define PSL_MBO PSL_RESERVED_DEFAULT /* 0x00000002 */
55 #define SDT_SYS386BSY SDT_SYSBSY /* 11: system 64-bit TSS busy */
57 #endif /* __NetBSD__ */
61 static char iobuf[IO_SIZE];
64 static uint8_t *instbuf;
67 init_seg(struct nvmm_x64_state_seg *seg, int type, int sel)
70 seg->attrib.type = type;
71 seg->attrib.s = (type & 0b10000) != 0;
78 seg->limit = 0x0000FFFF;
79 seg->base = 0x00000000;
83 reset_machine(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu)
85 struct nvmm_x64_state *state = vcpu->state;
87 memset(state, 0, sizeof(*state));
90 state->gprs[NVMM_X64_GPR_RFLAGS] = PSL_MBO;
91 init_seg(&state->segs[NVMM_X64_SEG_CS], SDT_MEMERA, GSEL(GCODE_SEL, SEL_KPL));
92 init_seg(&state->segs[NVMM_X64_SEG_SS], SDT_MEMRWA, GSEL(GDATA_SEL, SEL_KPL));
93 init_seg(&state->segs[NVMM_X64_SEG_DS], SDT_MEMRWA, GSEL(GDATA_SEL, SEL_KPL));
94 init_seg(&state->segs[NVMM_X64_SEG_ES], SDT_MEMRWA, GSEL(GDATA_SEL, SEL_KPL));
95 init_seg(&state->segs[NVMM_X64_SEG_FS], SDT_MEMRWA, GSEL(GDATA_SEL, SEL_KPL));
96 init_seg(&state->segs[NVMM_X64_SEG_GS], SDT_MEMRWA, GSEL(GDATA_SEL, SEL_KPL));
99 init_seg(&state->segs[NVMM_X64_SEG_GDT], 0, 0);
100 init_seg(&state->segs[NVMM_X64_SEG_IDT], 0, 0);
101 init_seg(&state->segs[NVMM_X64_SEG_LDT], SDT_SYSLDT, 0);
102 init_seg(&state->segs[NVMM_X64_SEG_TR], SDT_SYS386BSY, 0);
104 /* Protected mode enabled. */
105 state->crs[NVMM_X64_CR_CR0] = CR0_PG|CR0_PE|CR0_NE|CR0_TS|CR0_MP|CR0_WP|CR0_AM;
107 /* 64bit mode enabled. */
108 state->crs[NVMM_X64_CR_CR4] = CR4_PAE;
109 state->msrs[NVMM_X64_MSR_EFER] = EFER_LME | EFER_SCE | EFER_LMA;
111 /* Stolen from x86/pmap.c */
112 #define PATENTRY(n, type) (type << ((n) * 8))
113 #define PAT_UC 0x0ULL
114 #define PAT_WC 0x1ULL
115 #define PAT_WT 0x4ULL
116 #define PAT_WP 0x5ULL
117 #define PAT_WB 0x6ULL
118 #define PAT_UCMINUS 0x7ULL
119 state->msrs[NVMM_X64_MSR_PAT] =
120 PATENTRY(0, PAT_WB) | PATENTRY(1, PAT_WT) |
121 PATENTRY(2, PAT_UCMINUS) | PATENTRY(3, PAT_UC) |
122 PATENTRY(4, PAT_WB) | PATENTRY(5, PAT_WT) |
123 PATENTRY(6, PAT_UCMINUS) | PATENTRY(7, PAT_UC);
126 state->crs[NVMM_X64_CR_CR3] = 0x3000;
128 state->gprs[NVMM_X64_GPR_RIP] = 0x2000;
130 if (nvmm_vcpu_setstate(mach, vcpu, NVMM_X64_STATE_ALL) == -1)
131 err(errno, "nvmm_vcpu_setstate");
135 map_pages(struct nvmm_machine *mach)
137 pt_entry_t *L4, *L3, *L2, *L1;
140 instbuf = mmap(NULL, PAGE_SIZE, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE,
142 if (instbuf == MAP_FAILED)
144 databuf = mmap(NULL, PAGE_SIZE, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE,
146 if (databuf == MAP_FAILED)
149 if (nvmm_hva_map(mach, (uintptr_t)instbuf, PAGE_SIZE) == -1)
150 err(errno, "nvmm_hva_map");
151 if (nvmm_hva_map(mach, (uintptr_t)databuf, PAGE_SIZE) == -1)
152 err(errno, "nvmm_hva_map");
153 ret = nvmm_gpa_map(mach, (uintptr_t)instbuf, 0x2000, PAGE_SIZE,
154 PROT_READ|PROT_EXEC);
156 err(errno, "nvmm_gpa_map");
157 ret = nvmm_gpa_map(mach, (uintptr_t)databuf, 0x1000, PAGE_SIZE,
158 PROT_READ|PROT_WRITE);
160 err(errno, "nvmm_gpa_map");
162 L4 = mmap(NULL, PAGE_SIZE, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE,
164 if (L4 == MAP_FAILED)
166 L3 = mmap(NULL, PAGE_SIZE, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE,
168 if (L3 == MAP_FAILED)
170 L2 = mmap(NULL, PAGE_SIZE, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE,
172 if (L2 == MAP_FAILED)
174 L1 = mmap(NULL, PAGE_SIZE, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE,
176 if (L1 == MAP_FAILED)
179 if (nvmm_hva_map(mach, (uintptr_t)L4, PAGE_SIZE) == -1)
180 err(errno, "nvmm_hva_map");
181 if (nvmm_hva_map(mach, (uintptr_t)L3, PAGE_SIZE) == -1)
182 err(errno, "nvmm_hva_map");
183 if (nvmm_hva_map(mach, (uintptr_t)L2, PAGE_SIZE) == -1)
184 err(errno, "nvmm_hva_map");
185 if (nvmm_hva_map(mach, (uintptr_t)L1, PAGE_SIZE) == -1)
186 err(errno, "nvmm_hva_map");
188 ret = nvmm_gpa_map(mach, (uintptr_t)L4, 0x3000, PAGE_SIZE,
189 PROT_READ|PROT_WRITE);
191 err(errno, "nvmm_gpa_map");
192 ret = nvmm_gpa_map(mach, (uintptr_t)L3, 0x4000, PAGE_SIZE,
193 PROT_READ|PROT_WRITE);
195 err(errno, "nvmm_gpa_map");
196 ret = nvmm_gpa_map(mach, (uintptr_t)L2, 0x5000, PAGE_SIZE,
197 PROT_READ|PROT_WRITE);
199 err(errno, "nvmm_gpa_map");
200 ret = nvmm_gpa_map(mach, (uintptr_t)L1, 0x6000, PAGE_SIZE,
201 PROT_READ|PROT_WRITE);
203 err(errno, "nvmm_gpa_map");
205 memset(L4, 0, PAGE_SIZE);
206 memset(L3, 0, PAGE_SIZE);
207 memset(L2, 0, PAGE_SIZE);
208 memset(L1, 0, PAGE_SIZE);
210 L4[0] = PTE_P | PTE_W | 0x4000;
211 L3[0] = PTE_P | PTE_W | 0x5000;
212 L2[0] = PTE_P | PTE_W | 0x6000;
213 L1[0x2000 / PAGE_SIZE] = PTE_P | PTE_W | 0x2000;
214 L1[0x1000 / PAGE_SIZE] = PTE_P | PTE_W | 0x1000;
217 /* -------------------------------------------------------------------------- */
219 static size_t iobuf_off = 0;
222 io_callback(struct nvmm_io *io)
224 if (io->port != 123) {
225 printf("Wrong port\n");
230 memcpy(io->data, iobuf + iobuf_off, io->size);
232 memcpy(iobuf + iobuf_off, io->data, io->size);
234 iobuf_off += io->size;
239 handle_io(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu)
243 ret = nvmm_assist_io(mach, vcpu);
245 err(errno, "nvmm_assist_io");
252 run_machine(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu)
254 struct nvmm_vcpu_exit *exit = vcpu->exit;
257 if (nvmm_vcpu_run(mach, vcpu) == -1)
258 err(errno, "nvmm_vcpu_run");
260 switch (exit->reason) {
261 case NVMM_VCPU_EXIT_NONE:
264 case NVMM_VCPU_EXIT_RDMSR:
268 case NVMM_VCPU_EXIT_IO:
269 handle_io(mach, vcpu);
272 case NVMM_VCPU_EXIT_SHUTDOWN:
273 printf("Shutting down!\n");
277 printf("Invalid!\n");
283 /* -------------------------------------------------------------------------- */
294 run_test(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu,
295 const struct test *test)
300 size = (size_t)test->code_end - (size_t)test->code_begin;
302 reset_machine(mach, vcpu);
305 memset(iobuf, 0, IO_SIZE);
306 memset(databuf, 0, PAGE_SIZE);
307 memcpy(instbuf, test->code_begin, size);
310 strcpy(iobuf, test->wanted);
312 strcpy(databuf, test->wanted);
315 run_machine(mach, vcpu);
323 if (!strcmp(res, test->wanted)) {
324 printf("Test '%s' passed\n", test->name);
326 printf("Test '%s' failed, wanted '%s', got '%s'\n", test->name,
328 errx(-1, "run_test failed");
332 /* -------------------------------------------------------------------------- */
334 extern uint8_t test1_begin, test1_end;
335 extern uint8_t test2_begin, test2_end;
336 extern uint8_t test3_begin, test3_end;
337 extern uint8_t test4_begin, test4_end;
338 extern uint8_t test5_begin, test5_end;
339 extern uint8_t test6_begin, test6_end;
340 extern uint8_t test7_begin, test7_end;
341 extern uint8_t test8_begin, test8_end;
342 extern uint8_t test9_begin, test9_end;
343 extern uint8_t test10_begin, test10_end;
344 extern uint8_t test11_begin, test11_end;
345 extern uint8_t test12_begin, test12_end;
347 static const struct test tests[] = {
348 { "test1 - INB", &test1_begin, &test1_end, "12", true },
349 { "test2 - INW", &test2_begin, &test2_end, "1234", true },
350 { "test3 - INL", &test3_begin, &test3_end, "12345678", true },
351 { "test4 - INSB+REP", &test4_begin, &test4_end, "12345", true },
352 { "test5 - INSW+REP", &test5_begin, &test5_end,
353 "Comment est votre blanquette", true },
354 { "test6 - INSL+REP", &test6_begin, &test6_end,
355 "123456789abcdefghijklmnopqrs", true },
356 { "test7 - OUTB", &test7_begin, &test7_end, "12", false },
357 { "test8 - OUTW", &test8_begin, &test8_end, "1234", false },
358 { "test9 - OUTL", &test9_begin, &test9_end, "12345678", false },
359 { "test10 - OUTSB+REP", &test10_begin, &test10_end, "12345", false },
360 { "test11 - OUTSW+REP", &test11_begin, &test11_end,
361 "Ah, Herr Bramard", false },
362 { "test12 - OUTSL+REP", &test12_begin, &test12_end,
363 "123456789abcdefghijklmnopqrs", false },
364 { NULL, NULL, NULL, NULL, false }
367 static struct nvmm_assist_callbacks callbacks = {
373 * 0x1000: Data, mapped
374 * 0x2000: Instructions, mapped
380 int main(int argc, char *argv[])
382 struct nvmm_machine mach;
383 struct nvmm_vcpu vcpu;
386 if (nvmm_init() == -1)
387 err(errno, "nvmm_init");
388 if (nvmm_machine_create(&mach) == -1)
389 err(errno, "nvmm_machine_create");
390 if (nvmm_vcpu_create(&mach, 0, &vcpu) == -1)
391 err(errno, "nvmm_vcpu_create");
392 nvmm_vcpu_configure(&mach, &vcpu, NVMM_VCPU_CONF_CALLBACKS, &callbacks);
395 for (i = 0; tests[i].name != NULL; i++) {
396 run_test(&mach, &vcpu, &tests[i]);