1 /* $NetBSD: nvmm_x86_vmx.c,v 1.82 2021/03/26 15:59:53 reinoud Exp $ */
4 * Copyright (c) 2018-2020 Maxime Villard, m00nbsd.net
7 * This code is part of the NVMM hypervisor.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
25 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 #include <sys/param.h>
32 #include <sys/systm.h>
34 #include <sys/bitops.h>
35 #include <sys/cpumask.h>
36 #include <sys/globaldata.h>
37 #include <sys/kernel.h>
38 #include <sys/malloc.h> /* contigmalloc, contigfree */
39 #include <sys/thread2.h> /* lwkt_send_ipiq, lwkt_send_ipiq_mask */
41 #include <vm/pmap.h> /* pmap_ept_transform() */
42 #include <vm/vm_map.h>
44 #include <machine/cpufunc.h>
45 #include <machine/md_var.h> /* cpu_* */
46 #include <machine/pmap_inval.h> /* pmap_inval_smp() */
47 #include <machine/segments.h>
48 #include <machine/smp.h> /* smp_active_mask */
49 #include <machine/specialreg.h>
51 #include <dev/virtual/nvmm/nvmm_compat.h>
52 #include <dev/virtual/nvmm/nvmm.h>
53 #include <dev/virtual/nvmm/nvmm_internal.h>
54 #include <dev/virtual/nvmm/x86/nvmm_x86.h>
56 int _vmx_vmxon(paddr_t *pa);
57 int _vmx_vmxoff(void);
58 int vmx_vmlaunch(uint64_t *gprs);
59 int vmx_vmresume(uint64_t *gprs);
60 void vmx_resume_rip(void);
62 #define vmx_vmxon(a) \
63 if (__predict_false(_vmx_vmxon(a) != 0)) { \
64 panic("%s: VMXON failed", __func__); \
66 #define vmx_vmxoff() \
67 if (__predict_false(_vmx_vmxoff() != 0)) { \
68 panic("%s: VMXOFF failed", __func__); \
82 vmx_invept(uint64_t op, struct ept_desc *desc)
85 "invept %[desc],%[op];"
86 "jz vmx_insn_failvalid;"
87 "jc vmx_insn_failinvalid;"
89 : [desc] "m" (*desc), [op] "r" (op)
95 vmx_invvpid(uint64_t op, struct vpid_desc *desc)
98 "invvpid %[desc],%[op];"
99 "jz vmx_insn_failvalid;"
100 "jc vmx_insn_failinvalid;"
102 : [desc] "m" (*desc), [op] "r" (op)
107 static inline uint64_t
108 vmx_vmread(uint64_t field)
113 "vmread %[field],%[value];"
114 "jz vmx_insn_failvalid;"
115 "jc vmx_insn_failinvalid;"
116 : [value] "=r" (value)
117 : [field] "r" (field)
125 vmx_vmwrite(uint64_t field, uint64_t value)
128 "vmwrite %[value],%[field];"
129 "jz vmx_insn_failvalid;"
130 "jc vmx_insn_failinvalid;"
132 : [field] "r" (field), [value] "r" (value)
138 static inline paddr_t
146 : [pa] "m" (*(paddr_t *)&pa)
155 vmx_vmptrld(paddr_t *pa)
159 "jz vmx_insn_failvalid;"
160 "jc vmx_insn_failinvalid;"
168 vmx_vmclear(paddr_t *pa)
172 "jz vmx_insn_failvalid;"
173 "jc vmx_insn_failinvalid;"
183 asm volatile ("cli" ::: "memory");
189 asm volatile ("sti" ::: "memory");
192 #define MSR_IA32_FEATURE_CONTROL 0x003A
193 #define IA32_FEATURE_CONTROL_LOCK __BIT(0)
194 #define IA32_FEATURE_CONTROL_IN_SMX __BIT(1)
195 #define IA32_FEATURE_CONTROL_OUT_SMX __BIT(2)
197 #define MSR_IA32_VMX_BASIC 0x0480
198 #define IA32_VMX_BASIC_IDENT __BITS(30,0)
199 #define IA32_VMX_BASIC_DATA_SIZE __BITS(44,32)
200 #define IA32_VMX_BASIC_MEM_WIDTH __BIT(48)
201 #define IA32_VMX_BASIC_DUAL __BIT(49)
202 #define IA32_VMX_BASIC_MEM_TYPE __BITS(53,50)
203 #define MEM_TYPE_UC 0
204 #define MEM_TYPE_WB 6
205 #define IA32_VMX_BASIC_IO_REPORT __BIT(54)
206 #define IA32_VMX_BASIC_TRUE_CTLS __BIT(55)
208 #define MSR_IA32_VMX_PINBASED_CTLS 0x0481
209 #define MSR_IA32_VMX_PROCBASED_CTLS 0x0482
210 #define MSR_IA32_VMX_EXIT_CTLS 0x0483
211 #define MSR_IA32_VMX_ENTRY_CTLS 0x0484
212 #define MSR_IA32_VMX_PROCBASED_CTLS2 0x048B
214 #define MSR_IA32_VMX_TRUE_PINBASED_CTLS 0x048D
215 #define MSR_IA32_VMX_TRUE_PROCBASED_CTLS 0x048E
216 #define MSR_IA32_VMX_TRUE_EXIT_CTLS 0x048F
217 #define MSR_IA32_VMX_TRUE_ENTRY_CTLS 0x0490
219 #define MSR_IA32_VMX_CR0_FIXED0 0x0486
220 #define MSR_IA32_VMX_CR0_FIXED1 0x0487
221 #define MSR_IA32_VMX_CR4_FIXED0 0x0488
222 #define MSR_IA32_VMX_CR4_FIXED1 0x0489
224 #define MSR_IA32_VMX_EPT_VPID_CAP 0x048C
225 #define IA32_VMX_EPT_VPID_XO __BIT(0)
226 #define IA32_VMX_EPT_VPID_WALKLENGTH_4 __BIT(6)
227 #define IA32_VMX_EPT_VPID_UC __BIT(8)
228 #define IA32_VMX_EPT_VPID_WB __BIT(14)
229 #define IA32_VMX_EPT_VPID_2MB __BIT(16)
230 #define IA32_VMX_EPT_VPID_1GB __BIT(17)
231 #define IA32_VMX_EPT_VPID_INVEPT __BIT(20)
232 #define IA32_VMX_EPT_VPID_FLAGS_AD __BIT(21)
233 #define IA32_VMX_EPT_VPID_ADVANCED_VMEXIT_INFO __BIT(22)
234 #define IA32_VMX_EPT_VPID_SHSTK __BIT(23)
235 #define IA32_VMX_EPT_VPID_INVEPT_CONTEXT __BIT(25)
236 #define IA32_VMX_EPT_VPID_INVEPT_ALL __BIT(26)
237 #define IA32_VMX_EPT_VPID_INVVPID __BIT(32)
238 #define IA32_VMX_EPT_VPID_INVVPID_ADDR __BIT(40)
239 #define IA32_VMX_EPT_VPID_INVVPID_CONTEXT __BIT(41)
240 #define IA32_VMX_EPT_VPID_INVVPID_ALL __BIT(42)
241 #define IA32_VMX_EPT_VPID_INVVPID_CONTEXT_NOG __BIT(43)
243 /* -------------------------------------------------------------------------- */
245 /* 16-bit control fields */
246 #define VMCS_VPID 0x00000000
247 #define VMCS_PIR_VECTOR 0x00000002
248 #define VMCS_EPTP_INDEX 0x00000004
249 /* 16-bit guest-state fields */
250 #define VMCS_GUEST_ES_SELECTOR 0x00000800
251 #define VMCS_GUEST_CS_SELECTOR 0x00000802
252 #define VMCS_GUEST_SS_SELECTOR 0x00000804
253 #define VMCS_GUEST_DS_SELECTOR 0x00000806
254 #define VMCS_GUEST_FS_SELECTOR 0x00000808
255 #define VMCS_GUEST_GS_SELECTOR 0x0000080A
256 #define VMCS_GUEST_LDTR_SELECTOR 0x0000080C
257 #define VMCS_GUEST_TR_SELECTOR 0x0000080E
258 #define VMCS_GUEST_INTR_STATUS 0x00000810
259 #define VMCS_PML_INDEX 0x00000812
260 /* 16-bit host-state fields */
261 #define VMCS_HOST_ES_SELECTOR 0x00000C00
262 #define VMCS_HOST_CS_SELECTOR 0x00000C02
263 #define VMCS_HOST_SS_SELECTOR 0x00000C04
264 #define VMCS_HOST_DS_SELECTOR 0x00000C06
265 #define VMCS_HOST_FS_SELECTOR 0x00000C08
266 #define VMCS_HOST_GS_SELECTOR 0x00000C0A
267 #define VMCS_HOST_TR_SELECTOR 0x00000C0C
268 /* 64-bit control fields */
269 #define VMCS_IO_BITMAP_A 0x00002000
270 #define VMCS_IO_BITMAP_B 0x00002002
271 #define VMCS_MSR_BITMAP 0x00002004
272 #define VMCS_EXIT_MSR_STORE_ADDRESS 0x00002006
273 #define VMCS_EXIT_MSR_LOAD_ADDRESS 0x00002008
274 #define VMCS_ENTRY_MSR_LOAD_ADDRESS 0x0000200A
275 #define VMCS_EXECUTIVE_VMCS 0x0000200C
276 #define VMCS_PML_ADDRESS 0x0000200E
277 #define VMCS_TSC_OFFSET 0x00002010
278 #define VMCS_VIRTUAL_APIC 0x00002012
279 #define VMCS_APIC_ACCESS 0x00002014
280 #define VMCS_PIR_DESC 0x00002016
281 #define VMCS_VM_CONTROL 0x00002018
282 #define VMCS_EPTP 0x0000201A
283 #define EPTP_TYPE __BITS(2,0)
284 #define EPTP_TYPE_UC 0
285 #define EPTP_TYPE_WB 6
286 #define EPTP_WALKLEN __BITS(5,3)
287 #define EPTP_FLAGS_AD __BIT(6)
288 #define EPTP_SSS __BIT(7)
289 #define EPTP_PHYSADDR __BITS(63,12)
290 #define VMCS_EOI_EXIT0 0x0000201C
291 #define VMCS_EOI_EXIT1 0x0000201E
292 #define VMCS_EOI_EXIT2 0x00002020
293 #define VMCS_EOI_EXIT3 0x00002022
294 #define VMCS_EPTP_LIST 0x00002024
295 #define VMCS_VMREAD_BITMAP 0x00002026
296 #define VMCS_VMWRITE_BITMAP 0x00002028
297 #define VMCS_VIRTUAL_EXCEPTION 0x0000202A
298 #define VMCS_XSS_EXIT_BITMAP 0x0000202C
299 #define VMCS_ENCLS_EXIT_BITMAP 0x0000202E
300 #define VMCS_SUBPAGE_PERM_TABLE_PTR 0x00002030
301 #define VMCS_TSC_MULTIPLIER 0x00002032
302 #define VMCS_ENCLV_EXIT_BITMAP 0x00002036
303 /* 64-bit read-only fields */
304 #define VMCS_GUEST_PHYSICAL_ADDRESS 0x00002400
305 /* 64-bit guest-state fields */
306 #define VMCS_LINK_POINTER 0x00002800
307 #define VMCS_GUEST_IA32_DEBUGCTL 0x00002802
308 #define VMCS_GUEST_IA32_PAT 0x00002804
309 #define VMCS_GUEST_IA32_EFER 0x00002806
310 #define VMCS_GUEST_IA32_PERF_GLOBAL_CTRL 0x00002808
311 #define VMCS_GUEST_PDPTE0 0x0000280A
312 #define VMCS_GUEST_PDPTE1 0x0000280C
313 #define VMCS_GUEST_PDPTE2 0x0000280E
314 #define VMCS_GUEST_PDPTE3 0x00002810
315 #define VMCS_GUEST_BNDCFGS 0x00002812
316 #define VMCS_GUEST_RTIT_CTL 0x00002814
317 #define VMCS_GUEST_PKRS 0x00002818
318 /* 64-bit host-state fields */
319 #define VMCS_HOST_IA32_PAT 0x00002C00
320 #define VMCS_HOST_IA32_EFER 0x00002C02
321 #define VMCS_HOST_IA32_PERF_GLOBAL_CTRL 0x00002C04
322 #define VMCS_HOST_IA32_PKRS 0x00002C06
323 /* 32-bit control fields */
324 #define VMCS_PINBASED_CTLS 0x00004000
325 #define PIN_CTLS_INT_EXITING __BIT(0)
326 #define PIN_CTLS_NMI_EXITING __BIT(3)
327 #define PIN_CTLS_VIRTUAL_NMIS __BIT(5)
328 #define PIN_CTLS_ACTIVATE_PREEMPT_TIMER __BIT(6)
329 #define PIN_CTLS_PROCESS_POSTED_INTS __BIT(7)
330 #define VMCS_PROCBASED_CTLS 0x00004002
331 #define PROC_CTLS_INT_WINDOW_EXITING __BIT(2)
332 #define PROC_CTLS_USE_TSC_OFFSETTING __BIT(3)
333 #define PROC_CTLS_HLT_EXITING __BIT(7)
334 #define PROC_CTLS_INVLPG_EXITING __BIT(9)
335 #define PROC_CTLS_MWAIT_EXITING __BIT(10)
336 #define PROC_CTLS_RDPMC_EXITING __BIT(11)
337 #define PROC_CTLS_RDTSC_EXITING __BIT(12)
338 #define PROC_CTLS_RCR3_EXITING __BIT(15)
339 #define PROC_CTLS_LCR3_EXITING __BIT(16)
340 #define PROC_CTLS_RCR8_EXITING __BIT(19)
341 #define PROC_CTLS_LCR8_EXITING __BIT(20)
342 #define PROC_CTLS_USE_TPR_SHADOW __BIT(21)
343 #define PROC_CTLS_NMI_WINDOW_EXITING __BIT(22)
344 #define PROC_CTLS_DR_EXITING __BIT(23)
345 #define PROC_CTLS_UNCOND_IO_EXITING __BIT(24)
346 #define PROC_CTLS_USE_IO_BITMAPS __BIT(25)
347 #define PROC_CTLS_MONITOR_TRAP_FLAG __BIT(27)
348 #define PROC_CTLS_USE_MSR_BITMAPS __BIT(28)
349 #define PROC_CTLS_MONITOR_EXITING __BIT(29)
350 #define PROC_CTLS_PAUSE_EXITING __BIT(30)
351 #define PROC_CTLS_ACTIVATE_CTLS2 __BIT(31)
352 #define VMCS_EXCEPTION_BITMAP 0x00004004
353 #define VMCS_PF_ERROR_MASK 0x00004006
354 #define VMCS_PF_ERROR_MATCH 0x00004008
355 #define VMCS_CR3_TARGET_COUNT 0x0000400A
356 #define VMCS_EXIT_CTLS 0x0000400C
357 #define EXIT_CTLS_SAVE_DEBUG_CONTROLS __BIT(2)
358 #define EXIT_CTLS_HOST_LONG_MODE __BIT(9)
359 #define EXIT_CTLS_LOAD_PERFGLOBALCTRL __BIT(12)
360 #define EXIT_CTLS_ACK_INTERRUPT __BIT(15)
361 #define EXIT_CTLS_SAVE_PAT __BIT(18)
362 #define EXIT_CTLS_LOAD_PAT __BIT(19)
363 #define EXIT_CTLS_SAVE_EFER __BIT(20)
364 #define EXIT_CTLS_LOAD_EFER __BIT(21)
365 #define EXIT_CTLS_SAVE_PREEMPT_TIMER __BIT(22)
366 #define EXIT_CTLS_CLEAR_BNDCFGS __BIT(23)
367 #define EXIT_CTLS_CONCEAL_PT __BIT(24)
368 #define EXIT_CTLS_CLEAR_RTIT_CTL __BIT(25)
369 #define EXIT_CTLS_LOAD_CET __BIT(28)
370 #define EXIT_CTLS_LOAD_PKRS __BIT(29)
371 #define VMCS_EXIT_MSR_STORE_COUNT 0x0000400E
372 #define VMCS_EXIT_MSR_LOAD_COUNT 0x00004010
373 #define VMCS_ENTRY_CTLS 0x00004012
374 #define ENTRY_CTLS_LOAD_DEBUG_CONTROLS __BIT(2)
375 #define ENTRY_CTLS_LONG_MODE __BIT(9)
376 #define ENTRY_CTLS_SMM __BIT(10)
377 #define ENTRY_CTLS_DISABLE_DUAL __BIT(11)
378 #define ENTRY_CTLS_LOAD_PERFGLOBALCTRL __BIT(13)
379 #define ENTRY_CTLS_LOAD_PAT __BIT(14)
380 #define ENTRY_CTLS_LOAD_EFER __BIT(15)
381 #define ENTRY_CTLS_LOAD_BNDCFGS __BIT(16)
382 #define ENTRY_CTLS_CONCEAL_PT __BIT(17)
383 #define ENTRY_CTLS_LOAD_RTIT_CTL __BIT(18)
384 #define ENTRY_CTLS_LOAD_CET __BIT(20)
385 #define ENTRY_CTLS_LOAD_PKRS __BIT(22)
386 #define VMCS_ENTRY_MSR_LOAD_COUNT 0x00004014
387 #define VMCS_ENTRY_INTR_INFO 0x00004016
388 #define INTR_INFO_VECTOR __BITS(7,0)
389 #define INTR_INFO_TYPE __BITS(10,8)
390 #define INTR_TYPE_EXT_INT 0
391 #define INTR_TYPE_NMI 2
392 #define INTR_TYPE_HW_EXC 3
393 #define INTR_TYPE_SW_INT 4
394 #define INTR_TYPE_PRIV_SW_EXC 5
395 #define INTR_TYPE_SW_EXC 6
396 #define INTR_TYPE_OTHER 7
397 #define INTR_INFO_ERROR __BIT(11)
398 #define INTR_INFO_VALID __BIT(31)
399 #define VMCS_ENTRY_EXCEPTION_ERROR 0x00004018
400 #define VMCS_ENTRY_INSTRUCTION_LENGTH 0x0000401A
401 #define VMCS_TPR_THRESHOLD 0x0000401C
402 #define VMCS_PROCBASED_CTLS2 0x0000401E
403 #define PROC_CTLS2_VIRT_APIC_ACCESSES __BIT(0)
404 #define PROC_CTLS2_ENABLE_EPT __BIT(1)
405 #define PROC_CTLS2_DESC_TABLE_EXITING __BIT(2)
406 #define PROC_CTLS2_ENABLE_RDTSCP __BIT(3)
407 #define PROC_CTLS2_VIRT_X2APIC __BIT(4)
408 #define PROC_CTLS2_ENABLE_VPID __BIT(5)
409 #define PROC_CTLS2_WBINVD_EXITING __BIT(6)
410 #define PROC_CTLS2_UNRESTRICTED_GUEST __BIT(7)
411 #define PROC_CTLS2_APIC_REG_VIRT __BIT(8)
412 #define PROC_CTLS2_VIRT_INT_DELIVERY __BIT(9)
413 #define PROC_CTLS2_PAUSE_LOOP_EXITING __BIT(10)
414 #define PROC_CTLS2_RDRAND_EXITING __BIT(11)
415 #define PROC_CTLS2_INVPCID_ENABLE __BIT(12)
416 #define PROC_CTLS2_VMFUNC_ENABLE __BIT(13)
417 #define PROC_CTLS2_VMCS_SHADOWING __BIT(14)
418 #define PROC_CTLS2_ENCLS_EXITING __BIT(15)
419 #define PROC_CTLS2_RDSEED_EXITING __BIT(16)
420 #define PROC_CTLS2_PML_ENABLE __BIT(17)
421 #define PROC_CTLS2_EPT_VIOLATION __BIT(18)
422 #define PROC_CTLS2_CONCEAL_VMX_FROM_PT __BIT(19)
423 #define PROC_CTLS2_XSAVES_ENABLE __BIT(20)
424 #define PROC_CTLS2_MODE_BASED_EXEC_EPT __BIT(22)
425 #define PROC_CTLS2_SUBPAGE_PERMISSIONS __BIT(23)
426 #define PROC_CTLS2_PT_USES_GPA __BIT(24)
427 #define PROC_CTLS2_USE_TSC_SCALING __BIT(25)
428 #define PROC_CTLS2_WAIT_PAUSE_ENABLE __BIT(26)
429 #define PROC_CTLS2_ENCLV_EXITING __BIT(28)
430 #define VMCS_PLE_GAP 0x00004020
431 #define VMCS_PLE_WINDOW 0x00004022
432 /* 32-bit read-only data fields */
433 #define VMCS_INSTRUCTION_ERROR 0x00004400
434 #define VMCS_EXIT_REASON 0x00004402
435 #define VMCS_EXIT_INTR_INFO 0x00004404
436 #define VMCS_EXIT_INTR_ERRCODE 0x00004406
437 #define VMCS_IDT_VECTORING_INFO 0x00004408
438 #define VMCS_IDT_VECTORING_ERROR 0x0000440A
439 #define VMCS_EXIT_INSTRUCTION_LENGTH 0x0000440C
440 #define VMCS_EXIT_INSTRUCTION_INFO 0x0000440E
441 /* 32-bit guest-state fields */
442 #define VMCS_GUEST_ES_LIMIT 0x00004800
443 #define VMCS_GUEST_CS_LIMIT 0x00004802
444 #define VMCS_GUEST_SS_LIMIT 0x00004804
445 #define VMCS_GUEST_DS_LIMIT 0x00004806
446 #define VMCS_GUEST_FS_LIMIT 0x00004808
447 #define VMCS_GUEST_GS_LIMIT 0x0000480A
448 #define VMCS_GUEST_LDTR_LIMIT 0x0000480C
449 #define VMCS_GUEST_TR_LIMIT 0x0000480E
450 #define VMCS_GUEST_GDTR_LIMIT 0x00004810
451 #define VMCS_GUEST_IDTR_LIMIT 0x00004812
452 #define VMCS_GUEST_ES_ACCESS_RIGHTS 0x00004814
453 #define VMCS_GUEST_CS_ACCESS_RIGHTS 0x00004816
454 #define VMCS_GUEST_SS_ACCESS_RIGHTS 0x00004818
455 #define VMCS_GUEST_DS_ACCESS_RIGHTS 0x0000481A
456 #define VMCS_GUEST_FS_ACCESS_RIGHTS 0x0000481C
457 #define VMCS_GUEST_GS_ACCESS_RIGHTS 0x0000481E
458 #define VMCS_GUEST_LDTR_ACCESS_RIGHTS 0x00004820
459 #define VMCS_GUEST_TR_ACCESS_RIGHTS 0x00004822
460 #define VMCS_GUEST_INTERRUPTIBILITY 0x00004824
461 #define INT_STATE_STI __BIT(0)
462 #define INT_STATE_MOVSS __BIT(1)
463 #define INT_STATE_SMI __BIT(2)
464 #define INT_STATE_NMI __BIT(3)
465 #define INT_STATE_ENCLAVE __BIT(4)
466 #define VMCS_GUEST_ACTIVITY 0x00004826
467 #define VMCS_GUEST_SMBASE 0x00004828
468 #define VMCS_GUEST_IA32_SYSENTER_CS 0x0000482A
469 #define VMCS_PREEMPTION_TIMER_VALUE 0x0000482E
470 /* 32-bit host state fields */
471 #define VMCS_HOST_IA32_SYSENTER_CS 0x00004C00
472 /* Natural-Width control fields */
473 #define VMCS_CR0_MASK 0x00006000
474 #define VMCS_CR4_MASK 0x00006002
475 #define VMCS_CR0_SHADOW 0x00006004
476 #define VMCS_CR4_SHADOW 0x00006006
477 #define VMCS_CR3_TARGET0 0x00006008
478 #define VMCS_CR3_TARGET1 0x0000600A
479 #define VMCS_CR3_TARGET2 0x0000600C
480 #define VMCS_CR3_TARGET3 0x0000600E
481 /* Natural-Width read-only fields */
482 #define VMCS_EXIT_QUALIFICATION 0x00006400
483 #define VMCS_IO_RCX 0x00006402
484 #define VMCS_IO_RSI 0x00006404
485 #define VMCS_IO_RDI 0x00006406
486 #define VMCS_IO_RIP 0x00006408
487 #define VMCS_GUEST_LINEAR_ADDRESS 0x0000640A
488 /* Natural-Width guest-state fields */
489 #define VMCS_GUEST_CR0 0x00006800
490 #define VMCS_GUEST_CR3 0x00006802
491 #define VMCS_GUEST_CR4 0x00006804
492 #define VMCS_GUEST_ES_BASE 0x00006806
493 #define VMCS_GUEST_CS_BASE 0x00006808
494 #define VMCS_GUEST_SS_BASE 0x0000680A
495 #define VMCS_GUEST_DS_BASE 0x0000680C
496 #define VMCS_GUEST_FS_BASE 0x0000680E
497 #define VMCS_GUEST_GS_BASE 0x00006810
498 #define VMCS_GUEST_LDTR_BASE 0x00006812
499 #define VMCS_GUEST_TR_BASE 0x00006814
500 #define VMCS_GUEST_GDTR_BASE 0x00006816
501 #define VMCS_GUEST_IDTR_BASE 0x00006818
502 #define VMCS_GUEST_DR7 0x0000681A
503 #define VMCS_GUEST_RSP 0x0000681C
504 #define VMCS_GUEST_RIP 0x0000681E
505 #define VMCS_GUEST_RFLAGS 0x00006820
506 #define VMCS_GUEST_PENDING_DBG_EXCEPTIONS 0x00006822
507 #define VMCS_GUEST_IA32_SYSENTER_ESP 0x00006824
508 #define VMCS_GUEST_IA32_SYSENTER_EIP 0x00006826
509 #define VMCS_GUEST_IA32_S_CET 0x00006828
510 #define VMCS_GUEST_SSP 0x0000682A
511 #define VMCS_GUEST_IA32_INTR_SSP_TABLE 0x0000682C
512 /* Natural-Width host-state fields */
513 #define VMCS_HOST_CR0 0x00006C00
514 #define VMCS_HOST_CR3 0x00006C02
515 #define VMCS_HOST_CR4 0x00006C04
516 #define VMCS_HOST_FS_BASE 0x00006C06
517 #define VMCS_HOST_GS_BASE 0x00006C08
518 #define VMCS_HOST_TR_BASE 0x00006C0A
519 #define VMCS_HOST_GDTR_BASE 0x00006C0C
520 #define VMCS_HOST_IDTR_BASE 0x00006C0E
521 #define VMCS_HOST_IA32_SYSENTER_ESP 0x00006C10
522 #define VMCS_HOST_IA32_SYSENTER_EIP 0x00006C12
523 #define VMCS_HOST_RSP 0x00006C14
524 #define VMCS_HOST_RIP 0x00006C16
525 #define VMCS_HOST_IA32_S_CET 0x00006C18
526 #define VMCS_HOST_SSP 0x00006C1A
527 #define VMCS_HOST_IA32_INTR_SSP_TABLE 0x00006C1C
529 /* VMX basic exit reasons. */
530 #define VMCS_EXITCODE_EXC_NMI 0
531 #define VMCS_EXITCODE_EXT_INT 1
532 #define VMCS_EXITCODE_SHUTDOWN 2
533 #define VMCS_EXITCODE_INIT 3
534 #define VMCS_EXITCODE_SIPI 4
535 #define VMCS_EXITCODE_SMI 5
536 #define VMCS_EXITCODE_OTHER_SMI 6
537 #define VMCS_EXITCODE_INT_WINDOW 7
538 #define VMCS_EXITCODE_NMI_WINDOW 8
539 #define VMCS_EXITCODE_TASK_SWITCH 9
540 #define VMCS_EXITCODE_CPUID 10
541 #define VMCS_EXITCODE_GETSEC 11
542 #define VMCS_EXITCODE_HLT 12
543 #define VMCS_EXITCODE_INVD 13
544 #define VMCS_EXITCODE_INVLPG 14
545 #define VMCS_EXITCODE_RDPMC 15
546 #define VMCS_EXITCODE_RDTSC 16
547 #define VMCS_EXITCODE_RSM 17
548 #define VMCS_EXITCODE_VMCALL 18
549 #define VMCS_EXITCODE_VMCLEAR 19
550 #define VMCS_EXITCODE_VMLAUNCH 20
551 #define VMCS_EXITCODE_VMPTRLD 21
552 #define VMCS_EXITCODE_VMPTRST 22
553 #define VMCS_EXITCODE_VMREAD 23
554 #define VMCS_EXITCODE_VMRESUME 24
555 #define VMCS_EXITCODE_VMWRITE 25
556 #define VMCS_EXITCODE_VMXOFF 26
557 #define VMCS_EXITCODE_VMXON 27
558 #define VMCS_EXITCODE_CR 28
559 #define VMCS_EXITCODE_DR 29
560 #define VMCS_EXITCODE_IO 30
561 #define VMCS_EXITCODE_RDMSR 31
562 #define VMCS_EXITCODE_WRMSR 32
563 #define VMCS_EXITCODE_FAIL_GUEST_INVALID 33
564 #define VMCS_EXITCODE_FAIL_MSR_INVALID 34
565 #define VMCS_EXITCODE_MWAIT 36
566 #define VMCS_EXITCODE_TRAP_FLAG 37
567 #define VMCS_EXITCODE_MONITOR 39
568 #define VMCS_EXITCODE_PAUSE 40
569 #define VMCS_EXITCODE_FAIL_MACHINE_CHECK 41
570 #define VMCS_EXITCODE_TPR_BELOW 43
571 #define VMCS_EXITCODE_APIC_ACCESS 44
572 #define VMCS_EXITCODE_VEOI 45
573 #define VMCS_EXITCODE_GDTR_IDTR 46
574 #define VMCS_EXITCODE_LDTR_TR 47
575 #define VMCS_EXITCODE_EPT_VIOLATION 48
576 #define VMCS_EXITCODE_EPT_MISCONFIG 49
577 #define VMCS_EXITCODE_INVEPT 50
578 #define VMCS_EXITCODE_RDTSCP 51
579 #define VMCS_EXITCODE_PREEMPT_TIMEOUT 52
580 #define VMCS_EXITCODE_INVVPID 53
581 #define VMCS_EXITCODE_WBINVD 54
582 #define VMCS_EXITCODE_XSETBV 55
583 #define VMCS_EXITCODE_APIC_WRITE 56
584 #define VMCS_EXITCODE_RDRAND 57
585 #define VMCS_EXITCODE_INVPCID 58
586 #define VMCS_EXITCODE_VMFUNC 59
587 #define VMCS_EXITCODE_ENCLS 60
588 #define VMCS_EXITCODE_RDSEED 61
589 #define VMCS_EXITCODE_PAGE_LOG_FULL 62
590 #define VMCS_EXITCODE_XSAVES 63
591 #define VMCS_EXITCODE_XRSTORS 64
592 #define VMCS_EXITCODE_SPP 66
593 #define VMCS_EXITCODE_UMWAIT 67
594 #define VMCS_EXITCODE_TPAUSE 68
596 /* -------------------------------------------------------------------------- */
598 static void vmx_vcpu_state_provide(struct nvmm_cpu *, uint64_t);
599 static void vmx_vcpu_state_commit(struct nvmm_cpu *);
602 * These host values are static, they do not change at runtime and are the same
603 * on all CPUs. We save them here because they are not saved in the VMCS.
611 } vmx_global_hstate __cacheline_aligned;
613 #define VMX_MSRLIST_STAR 0
614 #define VMX_MSRLIST_LSTAR 1
615 #define VMX_MSRLIST_CSTAR 2
616 #define VMX_MSRLIST_SFMASK 3
617 #define VMX_MSRLIST_KERNELGSBASE 4
618 #define VMX_MSRLIST_EXIT_NMSR 5
619 #define VMX_MSRLIST_L1DFLUSH 5
621 /* On entry, we may do +1 to include L1DFLUSH. */
622 static size_t vmx_msrlist_entry_nmsr __read_mostly = VMX_MSRLIST_EXIT_NMSR;
626 #define VMXON_IDENT_REVISION __BITS(30,0)
628 uint8_t data[PAGE_SIZE - 4];
631 CTASSERT(sizeof(struct vmxon) == PAGE_SIZE);
638 static struct vmxoncpu vmxoncpu[MAXCPUS];
642 #define VMCS_IDENT_REVISION __BITS(30,0)
643 #define VMCS_IDENT_SHADOW __BIT(31)
646 uint8_t data[PAGE_SIZE - 8];
649 CTASSERT(sizeof(struct vmcs) == PAGE_SIZE);
657 #define VPID_MAX 0xFFFF
659 /* Make sure we never run out of VPIDs. */
660 CTASSERT(VPID_MAX-1 >= NVMM_MAX_MACHINES * NVMM_MAX_VCPUS);
662 static uint64_t vmx_tlb_flush_op __read_mostly;
663 static uint64_t vmx_ept_flush_op __read_mostly;
664 static uint64_t vmx_eptp_type __read_mostly;
666 static uint64_t vmx_pinbased_ctls __read_mostly;
667 static uint64_t vmx_procbased_ctls __read_mostly;
668 static uint64_t vmx_procbased_ctls2 __read_mostly;
669 static uint64_t vmx_entry_ctls __read_mostly;
670 static uint64_t vmx_exit_ctls __read_mostly;
672 static uint64_t vmx_cr0_fixed0 __read_mostly;
673 static uint64_t vmx_cr0_fixed1 __read_mostly;
674 static uint64_t vmx_cr4_fixed0 __read_mostly;
675 static uint64_t vmx_cr4_fixed1 __read_mostly;
677 static bool pmap_ept_has_ad;
678 static int vmx_change_cpu_count;
680 #define VMX_PINBASED_CTLS_ONE \
681 (PIN_CTLS_INT_EXITING| \
682 PIN_CTLS_NMI_EXITING| \
683 PIN_CTLS_VIRTUAL_NMIS)
685 #define VMX_PINBASED_CTLS_ZERO 0
687 #define VMX_PROCBASED_CTLS_ONE \
688 (PROC_CTLS_USE_TSC_OFFSETTING| \
689 PROC_CTLS_HLT_EXITING| \
690 PROC_CTLS_MWAIT_EXITING | \
691 PROC_CTLS_RDPMC_EXITING | \
692 PROC_CTLS_RCR8_EXITING | \
693 PROC_CTLS_LCR8_EXITING | \
694 PROC_CTLS_UNCOND_IO_EXITING | /* no I/O bitmap */ \
695 PROC_CTLS_USE_MSR_BITMAPS | \
696 PROC_CTLS_MONITOR_EXITING | \
697 PROC_CTLS_ACTIVATE_CTLS2)
699 #define VMX_PROCBASED_CTLS_ZERO \
700 (PROC_CTLS_RCR3_EXITING| \
701 PROC_CTLS_LCR3_EXITING)
703 #define VMX_PROCBASED_CTLS2_ONE \
704 (PROC_CTLS2_ENABLE_EPT| \
705 PROC_CTLS2_ENABLE_VPID| \
706 PROC_CTLS2_UNRESTRICTED_GUEST)
708 #define VMX_PROCBASED_CTLS2_ZERO 0
710 #define VMX_ENTRY_CTLS_ONE \
711 (ENTRY_CTLS_LOAD_DEBUG_CONTROLS| \
712 ENTRY_CTLS_LOAD_EFER| \
715 #define VMX_ENTRY_CTLS_ZERO \
717 ENTRY_CTLS_DISABLE_DUAL)
719 #define VMX_EXIT_CTLS_ONE \
720 (EXIT_CTLS_SAVE_DEBUG_CONTROLS| \
721 EXIT_CTLS_HOST_LONG_MODE| \
722 EXIT_CTLS_SAVE_PAT| \
723 EXIT_CTLS_LOAD_PAT| \
724 EXIT_CTLS_SAVE_EFER| \
727 #define VMX_EXIT_CTLS_ZERO 0
729 static uint8_t *vmx_asidmap __read_mostly;
730 static uint32_t vmx_maxasid __read_mostly;
731 static kmutex_t vmx_asidlock __cacheline_aligned;
733 #define VMX_XCR0_MASK_DEFAULT (XCR0_X87|XCR0_SSE)
734 static uint64_t vmx_xcr0_mask __read_mostly;
736 #define VMX_NCPUIDS 32
738 #define VMCS_NPAGES 1
739 #define VMCS_SIZE (VMCS_NPAGES * PAGE_SIZE)
741 #define MSRBM_NPAGES 1
742 #define MSRBM_SIZE (MSRBM_NPAGES * PAGE_SIZE)
744 #define CR0_STATIC_MASK \
745 (CR0_ET | CR0_NW | CR0_CD)
760 /* CR4_LA57 excluded */ \
761 /* CR4_VMXE excluded */ \
762 /* CR4_SMXE excluded */ \
768 /* CR4_PKE excluded */ \
769 /* CR4_CET excluded */ \
770 /* CR4_PKS excluded */)
771 #define CR4_INVALID \
772 (0xFFFFFFFFFFFFFFFFULL & ~CR4_VALID)
774 #define EFER_TLB_FLUSH \
775 (EFER_NXE|EFER_LMA|EFER_LME)
776 #define CR0_TLB_FLUSH \
777 (CR0_PG|CR0_WP|CR0_CD|CR0_NW)
778 #define CR4_TLB_FLUSH \
779 (CR4_PSE|CR4_PAE|CR4_PGE|CR4_PCIDE|CR4_SMEP)
781 /* -------------------------------------------------------------------------- */
783 struct vmx_machdata {
784 volatile uint64_t mach_htlb_gen;
787 static const size_t vmx_vcpu_conf_sizes[NVMM_X86_VCPU_NCONF] = {
788 [NVMM_VCPU_CONF_MD(NVMM_VCPU_CONF_CPUID)] =
789 sizeof(struct nvmm_vcpu_conf_cpuid),
790 [NVMM_VCPU_CONF_MD(NVMM_VCPU_CONF_TPR)] =
791 sizeof(struct nvmm_vcpu_conf_tpr)
797 bool gtlb_want_flush;
798 bool gtsc_want_update;
799 uint64_t vcpu_htlb_gen;
800 cpumask_t htlb_want_flush;
806 int vmcs_cpu; /* 'struct cpu_info *vmcs_ci' in NetBSD */
813 /* Percpu host state, absent from VMCS. */
815 uint64_t kernelgsbase;
817 mcontext_t hmctx; /* TODO: remove this like NetBSD */
822 bool int_window_exit;
823 bool nmi_window_exit;
827 struct msr_entry *gmsr;
829 uint64_t gmsr_misc_enable;
833 uint64_t gprs[NVMM_X64_NGPR];
834 uint64_t drs[NVMM_X64_NDR];
835 uint64_t gtsc_offset;
837 union savefpu gfpu __aligned(64);
839 /* VCPU configuration. */
840 bool cpuidpresent[VMX_NCPUIDS];
841 struct nvmm_vcpu_conf_cpuid cpuid[VMX_NCPUIDS];
842 struct nvmm_vcpu_conf_tpr tpr;
845 static const struct {
850 } vmx_guest_segs[NVMM_X64_NSEG] = {
851 [NVMM_X64_SEG_ES] = {
852 VMCS_GUEST_ES_SELECTOR,
853 VMCS_GUEST_ES_ACCESS_RIGHTS,
857 [NVMM_X64_SEG_CS] = {
858 VMCS_GUEST_CS_SELECTOR,
859 VMCS_GUEST_CS_ACCESS_RIGHTS,
863 [NVMM_X64_SEG_SS] = {
864 VMCS_GUEST_SS_SELECTOR,
865 VMCS_GUEST_SS_ACCESS_RIGHTS,
869 [NVMM_X64_SEG_DS] = {
870 VMCS_GUEST_DS_SELECTOR,
871 VMCS_GUEST_DS_ACCESS_RIGHTS,
875 [NVMM_X64_SEG_FS] = {
876 VMCS_GUEST_FS_SELECTOR,
877 VMCS_GUEST_FS_ACCESS_RIGHTS,
881 [NVMM_X64_SEG_GS] = {
882 VMCS_GUEST_GS_SELECTOR,
883 VMCS_GUEST_GS_ACCESS_RIGHTS,
887 [NVMM_X64_SEG_GDT] = {
888 0, /* doesn't exist */
889 0, /* doesn't exist */
890 VMCS_GUEST_GDTR_LIMIT,
893 [NVMM_X64_SEG_IDT] = {
894 0, /* doesn't exist */
895 0, /* doesn't exist */
896 VMCS_GUEST_IDTR_LIMIT,
899 [NVMM_X64_SEG_LDT] = {
900 VMCS_GUEST_LDTR_SELECTOR,
901 VMCS_GUEST_LDTR_ACCESS_RIGHTS,
902 VMCS_GUEST_LDTR_LIMIT,
905 [NVMM_X64_SEG_TR] = {
906 VMCS_GUEST_TR_SELECTOR,
907 VMCS_GUEST_TR_ACCESS_RIGHTS,
913 /* -------------------------------------------------------------------------- */
916 vmx_get_revision(void)
920 msr = rdmsr(MSR_IA32_VMX_BASIC);
921 msr &= IA32_VMX_BASIC_IDENT;
927 vmx_vmclear_ipi(void *arg1)
929 paddr_t vmcs_pa = (paddr_t)arg1;
930 vmx_vmclear(&vmcs_pa);
934 vmx_vmclear_remote(struct globaldata *ci, paddr_t vmcs_pa)
940 KASSERT(kpreempt_disabled());
942 bound = curlwp_bind();
945 xc = xc_unicast(XC_HIGHPRI, vmx_vmclear_ipi, (void *)vmcs_pa, NULL, ci);
951 #else /* DragonFly */
955 * No need to bind the thread, because any normal kernel thread will
956 * not migrate to another CPU or be preempted (except by an interrupt
959 seq = lwkt_send_ipiq(ci, vmx_vmclear_ipi, (void *)vmcs_pa);
960 /* Must wait for completion, otherwise VMCS would be wrong on CPU
961 * and cause panics. */
962 lwkt_wait_ipiq(ci, seq);
963 #endif /* __NetBSD__ */
967 vmx_vmcs_enter(struct nvmm_cpu *vcpu)
969 struct vmx_cpudata *cpudata = vcpu->cpudata;
970 struct globaldata *vmcs_ci;
973 cpudata->vmcs_refcnt++;
974 if (cpudata->vmcs_refcnt > 1) {
975 KASSERT(kpreempt_disabled());
976 KASSERT(vmx_vmptrst() == cpudata->vmcs_pa);
980 vmcs_cpu = cpudata->vmcs_cpu;
981 cpudata->vmcs_cpu = -2; /* clobber */
985 if (vmcs_cpu == -1) {
986 /* This VMCS is loaded for the first time. */
987 vmx_vmclear(&cpudata->vmcs_pa);
988 cpudata->vmcs_launched = false;
989 } else if (vmcs_cpu != mycpuid) {
990 /* This VMCS is active on a remote CPU. */
991 vmcs_ci = globaldata_find(vmcs_cpu);
992 vmx_vmclear_remote(vmcs_ci, cpudata->vmcs_pa);
993 cpudata->vmcs_launched = false;
995 /* This VMCS is active on curcpu, nothing to do. */
998 vmx_vmptrld(&cpudata->vmcs_pa);
1002 vmx_vmcs_leave(struct nvmm_cpu *vcpu)
1004 struct vmx_cpudata *cpudata = vcpu->cpudata;
1006 KASSERT(kpreempt_disabled());
1007 KASSERT(vmx_vmptrst() == cpudata->vmcs_pa);
1008 KASSERT(cpudata->vmcs_refcnt > 0);
1009 cpudata->vmcs_refcnt--;
1011 if (cpudata->vmcs_refcnt > 0) {
1015 cpudata->vmcs_cpu = mycpuid;
1020 vmx_vmcs_destroy(struct nvmm_cpu *vcpu)
1022 struct vmx_cpudata *cpudata = vcpu->cpudata;
1024 KASSERT(kpreempt_disabled());
1025 KASSERT(vmx_vmptrst() == cpudata->vmcs_pa);
1026 KASSERT(cpudata->vmcs_refcnt == 1);
1027 cpudata->vmcs_refcnt--;
1029 vmx_vmclear(&cpudata->vmcs_pa);
1033 /* -------------------------------------------------------------------------- */
1036 vmx_event_waitexit_enable(struct nvmm_cpu *vcpu, bool nmi)
1038 struct vmx_cpudata *cpudata = vcpu->cpudata;
1041 ctls1 = vmx_vmread(VMCS_PROCBASED_CTLS);
1044 // XXX INT_STATE_NMI?
1045 ctls1 |= PROC_CTLS_NMI_WINDOW_EXITING;
1046 cpudata->nmi_window_exit = true;
1048 ctls1 |= PROC_CTLS_INT_WINDOW_EXITING;
1049 cpudata->int_window_exit = true;
1052 vmx_vmwrite(VMCS_PROCBASED_CTLS, ctls1);
1056 vmx_event_waitexit_disable(struct nvmm_cpu *vcpu, bool nmi)
1058 struct vmx_cpudata *cpudata = vcpu->cpudata;
1061 ctls1 = vmx_vmread(VMCS_PROCBASED_CTLS);
1064 ctls1 &= ~PROC_CTLS_NMI_WINDOW_EXITING;
1065 cpudata->nmi_window_exit = false;
1067 ctls1 &= ~PROC_CTLS_INT_WINDOW_EXITING;
1068 cpudata->int_window_exit = false;
1071 vmx_vmwrite(VMCS_PROCBASED_CTLS, ctls1);
1075 vmx_excp_has_rf(uint8_t vector)
1089 vmx_excp_has_error(uint8_t vector)
1108 vmx_vcpu_inject(struct nvmm_cpu *vcpu)
1110 struct nvmm_comm_page *comm = vcpu->comm;
1111 struct vmx_cpudata *cpudata = vcpu->cpudata;
1112 int type = 0, err = 0, ret = EINVAL;
1113 uint64_t rflags, info, error;
1117 evtype = comm->event.type;
1118 vector = comm->event.vector;
1119 error = comm->event.u.excp.error;
1122 vmx_vmcs_enter(vcpu);
1125 case NVMM_VCPU_EVENT_EXCP:
1126 if (vector == 2 || vector >= 32)
1128 if (vector == 3 || vector == 0)
1130 if (vmx_excp_has_rf(vector)) {
1131 rflags = vmx_vmread(VMCS_GUEST_RFLAGS);
1132 vmx_vmwrite(VMCS_GUEST_RFLAGS, rflags | PSL_RF);
1134 type = INTR_TYPE_HW_EXC;
1135 err = vmx_excp_has_error(vector);
1137 case NVMM_VCPU_EVENT_INTR:
1138 type = INTR_TYPE_EXT_INT;
1140 type = INTR_TYPE_NMI;
1141 vmx_event_waitexit_enable(vcpu, true);
1150 __SHIFTIN((uint64_t)vector, INTR_INFO_VECTOR) |
1151 __SHIFTIN((uint64_t)type, INTR_INFO_TYPE) |
1152 __SHIFTIN((uint64_t)err, INTR_INFO_ERROR) |
1153 __SHIFTIN((uint64_t)1, INTR_INFO_VALID);
1154 vmx_vmwrite(VMCS_ENTRY_INTR_INFO, info);
1155 vmx_vmwrite(VMCS_ENTRY_EXCEPTION_ERROR, error);
1157 cpudata->evt_pending = true;
1161 vmx_vmcs_leave(vcpu);
1166 vmx_inject_ud(struct nvmm_cpu *vcpu)
1168 struct nvmm_comm_page *comm = vcpu->comm;
1171 comm->event.type = NVMM_VCPU_EVENT_EXCP;
1172 comm->event.vector = 6;
1173 comm->event.u.excp.error = 0;
1175 ret = vmx_vcpu_inject(vcpu);
1180 vmx_inject_gp(struct nvmm_cpu *vcpu)
1182 struct nvmm_comm_page *comm = vcpu->comm;
1185 comm->event.type = NVMM_VCPU_EVENT_EXCP;
1186 comm->event.vector = 13;
1187 comm->event.u.excp.error = 0;
1189 ret = vmx_vcpu_inject(vcpu);
1194 vmx_vcpu_event_commit(struct nvmm_cpu *vcpu)
1196 if (__predict_true(!vcpu->comm->event_commit)) {
1199 vcpu->comm->event_commit = false;
1200 return vmx_vcpu_inject(vcpu);
1204 vmx_inkernel_advance(void)
1206 uint64_t rip, inslen, intstate, rflags;
1209 * Maybe we should also apply single-stepping and debug exceptions.
1210 * Matters for guest-ring3, because it can execute 'cpuid' under a
1214 inslen = vmx_vmread(VMCS_EXIT_INSTRUCTION_LENGTH);
1215 rip = vmx_vmread(VMCS_GUEST_RIP);
1216 vmx_vmwrite(VMCS_GUEST_RIP, rip + inslen);
1218 rflags = vmx_vmread(VMCS_GUEST_RFLAGS);
1219 vmx_vmwrite(VMCS_GUEST_RFLAGS, rflags & ~PSL_RF);
1221 intstate = vmx_vmread(VMCS_GUEST_INTERRUPTIBILITY);
1222 vmx_vmwrite(VMCS_GUEST_INTERRUPTIBILITY,
1223 intstate & ~(INT_STATE_STI|INT_STATE_MOVSS));
1227 vmx_exit_invalid(struct nvmm_vcpu_exit *exit, uint64_t code)
1229 exit->u.inv.hwcode = code;
1230 exit->reason = NVMM_VCPU_EXIT_INVALID;
1234 vmx_exit_exc_nmi(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1235 struct nvmm_vcpu_exit *exit)
1239 qual = vmx_vmread(VMCS_EXIT_INTR_INFO);
1241 if ((qual & INTR_INFO_VALID) == 0) {
1244 if (__SHIFTOUT(qual, INTR_INFO_TYPE) != INTR_TYPE_NMI) {
1248 exit->reason = NVMM_VCPU_EXIT_NONE;
1252 vmx_exit_invalid(exit, VMCS_EXITCODE_EXC_NMI);
1255 #define VMX_CPUID_MAX_BASIC 0x16
1256 #define VMX_CPUID_MAX_HYPERVISOR 0x40000000
1257 #define VMX_CPUID_MAX_EXTENDED 0x80000008
1258 static uint32_t vmx_cpuid_max_basic __read_mostly;
1259 static uint32_t vmx_cpuid_max_extended __read_mostly;
1262 vmx_inkernel_exec_cpuid(struct vmx_cpudata *cpudata, uint32_t eax, uint32_t ecx)
1266 x86_cpuid2(eax, ecx, descs);
1267 cpudata->gprs[NVMM_X64_GPR_RAX] = descs[0];
1268 cpudata->gprs[NVMM_X64_GPR_RBX] = descs[1];
1269 cpudata->gprs[NVMM_X64_GPR_RCX] = descs[2];
1270 cpudata->gprs[NVMM_X64_GPR_RDX] = descs[3];
1274 vmx_inkernel_handle_cpuid(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1275 uint32_t eax, uint32_t ecx)
1277 struct vmx_cpudata *cpudata = vcpu->cpudata;
1282 if (eax < 0x40000000) {
1283 if (__predict_false(eax > vmx_cpuid_max_basic)) {
1284 eax = vmx_cpuid_max_basic;
1285 vmx_inkernel_exec_cpuid(cpudata, eax, ecx);
1287 } else if (eax < 0x80000000) {
1288 if (__predict_false(eax > VMX_CPUID_MAX_HYPERVISOR)) {
1289 eax = vmx_cpuid_max_basic;
1290 vmx_inkernel_exec_cpuid(cpudata, eax, ecx);
1293 if (__predict_false(eax > vmx_cpuid_max_extended)) {
1294 eax = vmx_cpuid_max_basic;
1295 vmx_inkernel_exec_cpuid(cpudata, eax, ecx);
1301 cpudata->gprs[NVMM_X64_GPR_RAX] = vmx_cpuid_max_basic;
1304 cpudata->gprs[NVMM_X64_GPR_RAX] &= nvmm_cpuid_00000001.eax;
1306 cpudata->gprs[NVMM_X64_GPR_RBX] &= ~CPUID_LOCAL_APIC_ID;
1307 cpudata->gprs[NVMM_X64_GPR_RBX] |= __SHIFTIN(vcpu->cpuid,
1308 CPUID_LOCAL_APIC_ID);
1310 cpudata->gprs[NVMM_X64_GPR_RCX] &= nvmm_cpuid_00000001.ecx;
1311 cpudata->gprs[NVMM_X64_GPR_RCX] |= CPUID2_RAZ;
1312 if (vmx_procbased_ctls2 & PROC_CTLS2_INVPCID_ENABLE) {
1313 cpudata->gprs[NVMM_X64_GPR_RCX] |= CPUID2_PCID;
1316 cpudata->gprs[NVMM_X64_GPR_RDX] &= nvmm_cpuid_00000001.edx;
1318 /* CPUID2_OSXSAVE depends on CR4. */
1319 cr4 = vmx_vmread(VMCS_GUEST_CR4);
1320 if (!(cr4 & CR4_OSXSAVE)) {
1321 cpudata->gprs[NVMM_X64_GPR_RCX] &= ~CPUID2_OSXSAVE;
1327 cpudata->gprs[NVMM_X64_GPR_RAX] = 0;
1328 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
1329 cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
1330 cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
1332 case 0x00000004: /* Deterministic Cache Parameters */
1333 ncpus = atomic_load_acq_int(&mach->ncpus);
1334 clevel = __SHIFTOUT(cpudata->gprs[NVMM_X64_GPR_RAX],
1335 CPUID_DCP_CACHELEVEL);
1337 cpudata->gprs[NVMM_X64_GPR_RAX] &= ~CPUID_DCP_SHARING;
1339 /* L3 and above: all CPUs. */
1340 cpudata->gprs[NVMM_X64_GPR_RAX] |=
1341 __SHIFTIN(ncpus - 1, CPUID_DCP_SHARING);
1343 /* L2 and below: one LP per CPU. */
1344 cpudata->gprs[NVMM_X64_GPR_RAX] |=
1345 __SHIFTIN(0, CPUID_DCP_SHARING);
1348 cpudata->gprs[NVMM_X64_GPR_RAX] &= ~CPUID_DCP_CORE_P_PKG;
1349 cpudata->gprs[NVMM_X64_GPR_RAX] |=
1350 __SHIFTIN(ncpus - 1, CPUID_DCP_CORE_P_PKG);
1352 case 0x00000005: /* MONITOR/MWAIT */
1353 case 0x00000006: /* Thermal and Power Management */
1354 cpudata->gprs[NVMM_X64_GPR_RAX] = 0;
1355 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
1356 cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
1357 cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
1359 case 0x00000007: /* Structured Extended Feature Flags Enumeration */
1362 cpudata->gprs[NVMM_X64_GPR_RAX] = 0;
1363 cpudata->gprs[NVMM_X64_GPR_RBX] &= nvmm_cpuid_00000007.ebx;
1364 cpudata->gprs[NVMM_X64_GPR_RCX] &= nvmm_cpuid_00000007.ecx;
1365 cpudata->gprs[NVMM_X64_GPR_RDX] &= nvmm_cpuid_00000007.edx;
1366 if (vmx_procbased_ctls2 & PROC_CTLS2_INVPCID_ENABLE) {
1367 cpudata->gprs[NVMM_X64_GPR_RBX] |= CPUID_SEF_INVPCID;
1371 cpudata->gprs[NVMM_X64_GPR_RAX] = 0;
1372 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
1373 cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
1374 cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
1378 case 0x00000008: /* Empty */
1379 case 0x00000009: /* Direct Cache Access Information */
1380 cpudata->gprs[NVMM_X64_GPR_RAX] = 0;
1381 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
1382 cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
1383 cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
1385 case 0x0000000A: /* Architectural Performance Monitoring */
1386 cpudata->gprs[NVMM_X64_GPR_RAX] = 0;
1387 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
1388 cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
1389 cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
1391 case 0x0000000B: /* Extended Topology Enumeration */
1393 case 0: /* Threads */
1394 cpudata->gprs[NVMM_X64_GPR_RAX] = 0;
1395 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
1396 cpudata->gprs[NVMM_X64_GPR_RCX] =
1397 __SHIFTIN(ecx, CPUID_TOP_LVLNUM) |
1398 __SHIFTIN(CPUID_TOP_LVLTYPE_SMT, CPUID_TOP_LVLTYPE);
1399 cpudata->gprs[NVMM_X64_GPR_RDX] = vcpu->cpuid;
1402 ncpus = atomic_load_acq_int(&mach->ncpus);
1403 cpudata->gprs[NVMM_X64_GPR_RAX] = ilog2(ncpus);
1404 cpudata->gprs[NVMM_X64_GPR_RBX] = ncpus;
1405 cpudata->gprs[NVMM_X64_GPR_RCX] =
1406 __SHIFTIN(ecx, CPUID_TOP_LVLNUM) |
1407 __SHIFTIN(CPUID_TOP_LVLTYPE_CORE, CPUID_TOP_LVLTYPE);
1408 cpudata->gprs[NVMM_X64_GPR_RDX] = vcpu->cpuid;
1411 cpudata->gprs[NVMM_X64_GPR_RAX] = 0;
1412 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
1413 cpudata->gprs[NVMM_X64_GPR_RCX] = 0; /* LVLTYPE_INVAL */
1414 cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
1418 case 0x0000000C: /* Empty */
1419 cpudata->gprs[NVMM_X64_GPR_RAX] = 0;
1420 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
1421 cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
1422 cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
1424 case 0x0000000D: /* Processor Extended State Enumeration */
1425 if (vmx_xcr0_mask == 0) {
1430 cpudata->gprs[NVMM_X64_GPR_RAX] = vmx_xcr0_mask & 0xFFFFFFFF;
1431 if (cpudata->gxcr0 & XCR0_SSE) {
1432 cpudata->gprs[NVMM_X64_GPR_RBX] =
1433 sizeof(struct saveymm64);
1435 cpudata->gprs[NVMM_X64_GPR_RBX] =
1436 sizeof(struct save87) + 64; /* XSAVE header */
1438 cpudata->gprs[NVMM_X64_GPR_RBX] += 64; /* XSAVE header */
1439 cpudata->gprs[NVMM_X64_GPR_RCX] = sizeof(struct saveymm64);
1440 cpudata->gprs[NVMM_X64_GPR_RDX] = vmx_xcr0_mask >> 32;
1443 cpudata->gprs[NVMM_X64_GPR_RAX] &=
1444 (CPUID_PES1_XSAVEOPT | CPUID_PES1_XSAVEC |
1446 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
1447 cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
1448 cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
1451 cpudata->gprs[NVMM_X64_GPR_RAX] = 0;
1452 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
1453 cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
1454 cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
1458 case 0x0000000E: /* Empty */
1459 case 0x0000000F: /* Intel RDT Monitoring Enumeration */
1460 case 0x00000010: /* Intel RDT Allocation Enumeration */
1461 cpudata->gprs[NVMM_X64_GPR_RAX] = 0;
1462 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
1463 cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
1464 cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
1466 case 0x00000011: /* Empty */
1467 case 0x00000012: /* Intel SGX Capability Enumeration */
1468 case 0x00000013: /* Empty */
1469 case 0x00000014: /* Intel Processor Trace Enumeration */
1470 cpudata->gprs[NVMM_X64_GPR_RAX] = 0;
1471 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
1472 cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
1473 cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
1475 case 0x00000015: /* TSC and Nominal Core Crystal Clock Information */
1476 case 0x00000016: /* Processor Frequency Information */
1479 case 0x40000000: /* Hypervisor Information */
1480 cpudata->gprs[NVMM_X64_GPR_RAX] = VMX_CPUID_MAX_HYPERVISOR;
1481 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
1482 cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
1483 cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
1484 memcpy(&cpudata->gprs[NVMM_X64_GPR_RBX], "___ ", 4);
1485 memcpy(&cpudata->gprs[NVMM_X64_GPR_RCX], "NVMM", 4);
1486 memcpy(&cpudata->gprs[NVMM_X64_GPR_RDX], " ___", 4);
1490 cpudata->gprs[NVMM_X64_GPR_RAX] = vmx_cpuid_max_extended;
1493 cpudata->gprs[NVMM_X64_GPR_RAX] &= nvmm_cpuid_80000001.eax;
1494 cpudata->gprs[NVMM_X64_GPR_RBX] &= nvmm_cpuid_80000001.ebx;
1495 cpudata->gprs[NVMM_X64_GPR_RCX] &= nvmm_cpuid_80000001.ecx;
1496 cpudata->gprs[NVMM_X64_GPR_RDX] &= nvmm_cpuid_80000001.edx;
1498 case 0x80000002: /* Processor Brand String */
1499 case 0x80000003: /* Processor Brand String */
1500 case 0x80000004: /* Processor Brand String */
1501 case 0x80000005: /* Reserved Zero */
1502 case 0x80000006: /* Cache Information */
1504 case 0x80000007: /* TSC Information */
1505 cpudata->gprs[NVMM_X64_GPR_RAX] &= nvmm_cpuid_80000007.eax;
1506 cpudata->gprs[NVMM_X64_GPR_RBX] &= nvmm_cpuid_80000007.ebx;
1507 cpudata->gprs[NVMM_X64_GPR_RCX] &= nvmm_cpuid_80000007.ecx;
1508 cpudata->gprs[NVMM_X64_GPR_RDX] &= nvmm_cpuid_80000007.edx;
1510 case 0x80000008: /* Address Sizes */
1511 cpudata->gprs[NVMM_X64_GPR_RAX] &= nvmm_cpuid_80000008.eax;
1512 cpudata->gprs[NVMM_X64_GPR_RBX] &= nvmm_cpuid_80000008.ebx;
1513 cpudata->gprs[NVMM_X64_GPR_RCX] &= nvmm_cpuid_80000008.ecx;
1514 cpudata->gprs[NVMM_X64_GPR_RDX] &= nvmm_cpuid_80000008.edx;
1523 vmx_exit_insn(struct nvmm_vcpu_exit *exit, uint64_t reason)
1525 uint64_t inslen, rip;
1527 inslen = vmx_vmread(VMCS_EXIT_INSTRUCTION_LENGTH);
1528 rip = vmx_vmread(VMCS_GUEST_RIP);
1529 exit->u.insn.npc = rip + inslen;
1530 exit->reason = reason;
1534 vmx_exit_cpuid(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1535 struct nvmm_vcpu_exit *exit)
1537 struct vmx_cpudata *cpudata = vcpu->cpudata;
1538 struct nvmm_vcpu_conf_cpuid *cpuid;
1542 eax = (cpudata->gprs[NVMM_X64_GPR_RAX] & 0xFFFFFFFF);
1543 ecx = (cpudata->gprs[NVMM_X64_GPR_RCX] & 0xFFFFFFFF);
1544 vmx_inkernel_exec_cpuid(cpudata, eax, ecx);
1545 vmx_inkernel_handle_cpuid(mach, vcpu, eax, ecx);
1547 for (i = 0; i < VMX_NCPUIDS; i++) {
1548 if (!cpudata->cpuidpresent[i]) {
1551 cpuid = &cpudata->cpuid[i];
1552 if (cpuid->leaf != eax) {
1557 vmx_exit_insn(exit, NVMM_VCPU_EXIT_CPUID);
1560 KASSERT(cpuid->mask);
1563 cpudata->gprs[NVMM_X64_GPR_RAX] &= ~cpuid->u.mask.del.eax;
1564 cpudata->gprs[NVMM_X64_GPR_RBX] &= ~cpuid->u.mask.del.ebx;
1565 cpudata->gprs[NVMM_X64_GPR_RCX] &= ~cpuid->u.mask.del.ecx;
1566 cpudata->gprs[NVMM_X64_GPR_RDX] &= ~cpuid->u.mask.del.edx;
1569 cpudata->gprs[NVMM_X64_GPR_RAX] |= cpuid->u.mask.set.eax;
1570 cpudata->gprs[NVMM_X64_GPR_RBX] |= cpuid->u.mask.set.ebx;
1571 cpudata->gprs[NVMM_X64_GPR_RCX] |= cpuid->u.mask.set.ecx;
1572 cpudata->gprs[NVMM_X64_GPR_RDX] |= cpuid->u.mask.set.edx;
1577 vmx_inkernel_advance();
1578 exit->reason = NVMM_VCPU_EXIT_NONE;
1582 vmx_exit_hlt(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1583 struct nvmm_vcpu_exit *exit)
1585 struct vmx_cpudata *cpudata = vcpu->cpudata;
1588 if (cpudata->int_window_exit) {
1589 rflags = vmx_vmread(VMCS_GUEST_RFLAGS);
1590 if (rflags & PSL_I) {
1591 vmx_event_waitexit_disable(vcpu, false);
1595 vmx_inkernel_advance();
1596 exit->reason = NVMM_VCPU_EXIT_HALTED;
1599 #define VMX_QUAL_CR_NUM __BITS(3,0)
1600 #define VMX_QUAL_CR_TYPE __BITS(5,4)
1601 #define CR_TYPE_WRITE 0
1602 #define CR_TYPE_READ 1
1603 #define CR_TYPE_CLTS 2
1604 #define CR_TYPE_LMSW 3
1605 #define VMX_QUAL_CR_LMSW_OPMEM __BIT(6)
1606 #define VMX_QUAL_CR_GPR __BITS(11,8)
1607 #define VMX_QUAL_CR_LMSW_SRC __BIT(31,16)
1610 vmx_check_cr(uint64_t crval, uint64_t fixed0, uint64_t fixed1)
1612 /* Bits set to 1 in fixed0 are fixed to 1. */
1613 if ((crval & fixed0) != fixed0) {
1616 /* Bits set to 0 in fixed1 are fixed to 0. */
1617 if (crval & ~fixed1) {
1624 vmx_inkernel_handle_cr0(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1627 struct vmx_cpudata *cpudata = vcpu->cpudata;
1628 uint64_t type, gpr, oldcr0, realcr0, fakecr0;
1629 uint64_t efer, ctls1;
1631 type = __SHIFTOUT(qual, VMX_QUAL_CR_TYPE);
1632 if (type != CR_TYPE_WRITE) {
1636 gpr = __SHIFTOUT(qual, VMX_QUAL_CR_GPR);
1639 if (gpr == NVMM_X64_GPR_RSP) {
1640 fakecr0 = vmx_vmread(VMCS_GUEST_RSP);
1642 fakecr0 = cpudata->gprs[gpr];
1646 * fakecr0 is the value the guest believes is in %cr0. realcr0 is the
1647 * actual value in %cr0.
1649 * In fakecr0 we must force CR0_ET to 1.
1651 * In realcr0 we must force CR0_NW and CR0_CD to 0, and CR0_ET and
1655 realcr0 = (fakecr0 & ~CR0_STATIC_MASK) | CR0_ET | CR0_NE;
1657 if (vmx_check_cr(realcr0, vmx_cr0_fixed0, vmx_cr0_fixed1) == -1) {
1662 * XXX Handle 32bit PAE paging, need to set PDPTEs, fetched manually
1666 if (realcr0 & CR0_PG) {
1667 ctls1 = vmx_vmread(VMCS_ENTRY_CTLS);
1668 efer = vmx_vmread(VMCS_GUEST_IA32_EFER);
1669 if (efer & EFER_LME) {
1670 ctls1 |= ENTRY_CTLS_LONG_MODE;
1673 ctls1 &= ~ENTRY_CTLS_LONG_MODE;
1676 vmx_vmwrite(VMCS_GUEST_IA32_EFER, efer);
1677 vmx_vmwrite(VMCS_ENTRY_CTLS, ctls1);
1680 oldcr0 = (vmx_vmread(VMCS_CR0_SHADOW) & CR0_STATIC_MASK) |
1681 (vmx_vmread(VMCS_GUEST_CR0) & ~CR0_STATIC_MASK);
1682 if ((oldcr0 ^ fakecr0) & CR0_TLB_FLUSH) {
1683 cpudata->gtlb_want_flush = true;
1686 vmx_vmwrite(VMCS_CR0_SHADOW, fakecr0);
1687 vmx_vmwrite(VMCS_GUEST_CR0, realcr0);
1688 vmx_inkernel_advance();
1693 vmx_inkernel_handle_cr4(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1696 struct vmx_cpudata *cpudata = vcpu->cpudata;
1697 uint64_t type, gpr, oldcr4, cr4;
1699 type = __SHIFTOUT(qual, VMX_QUAL_CR_TYPE);
1700 if (type != CR_TYPE_WRITE) {
1704 gpr = __SHIFTOUT(qual, VMX_QUAL_CR_GPR);
1707 if (gpr == NVMM_X64_GPR_RSP) {
1708 gpr = vmx_vmread(VMCS_GUEST_RSP);
1710 gpr = cpudata->gprs[gpr];
1713 if (gpr & CR4_INVALID) {
1716 cr4 = gpr | CR4_VMXE;
1717 if (vmx_check_cr(cr4, vmx_cr4_fixed0, vmx_cr4_fixed1) == -1) {
1721 oldcr4 = vmx_vmread(VMCS_GUEST_CR4);
1722 if ((oldcr4 ^ gpr) & CR4_TLB_FLUSH) {
1723 cpudata->gtlb_want_flush = true;
1726 vmx_vmwrite(VMCS_GUEST_CR4, cr4);
1727 vmx_inkernel_advance();
1732 vmx_inkernel_handle_cr8(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1733 uint64_t qual, struct nvmm_vcpu_exit *exit)
1735 struct vmx_cpudata *cpudata = vcpu->cpudata;
1739 type = __SHIFTOUT(qual, VMX_QUAL_CR_TYPE);
1740 if (type == CR_TYPE_WRITE) {
1742 } else if (type == CR_TYPE_READ) {
1748 gpr = __SHIFTOUT(qual, VMX_QUAL_CR_GPR);
1752 if (gpr == NVMM_X64_GPR_RSP) {
1753 cpudata->gcr8 = vmx_vmread(VMCS_GUEST_RSP);
1755 cpudata->gcr8 = cpudata->gprs[gpr];
1757 if (cpudata->tpr.exit_changed) {
1758 exit->reason = NVMM_VCPU_EXIT_TPR_CHANGED;
1761 if (gpr == NVMM_X64_GPR_RSP) {
1762 vmx_vmwrite(VMCS_GUEST_RSP, cpudata->gcr8);
1764 cpudata->gprs[gpr] = cpudata->gcr8;
1768 vmx_inkernel_advance();
1773 vmx_exit_cr(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1774 struct nvmm_vcpu_exit *exit)
1779 exit->reason = NVMM_VCPU_EXIT_NONE;
1781 qual = vmx_vmread(VMCS_EXIT_QUALIFICATION);
1783 switch (__SHIFTOUT(qual, VMX_QUAL_CR_NUM)) {
1785 ret = vmx_inkernel_handle_cr0(mach, vcpu, qual);
1788 ret = vmx_inkernel_handle_cr4(mach, vcpu, qual);
1791 ret = vmx_inkernel_handle_cr8(mach, vcpu, qual, exit);
1799 vmx_inject_gp(vcpu);
1803 #define VMX_QUAL_IO_SIZE __BITS(2,0)
1805 #define IO_SIZE_16 1
1806 #define IO_SIZE_32 3
1807 #define VMX_QUAL_IO_IN __BIT(3)
1808 #define VMX_QUAL_IO_STR __BIT(4)
1809 #define VMX_QUAL_IO_REP __BIT(5)
1810 #define VMX_QUAL_IO_DX __BIT(6)
1811 #define VMX_QUAL_IO_PORT __BITS(31,16)
1813 #define VMX_INFO_IO_ADRSIZE __BITS(9,7)
1814 #define IO_ADRSIZE_16 0
1815 #define IO_ADRSIZE_32 1
1816 #define IO_ADRSIZE_64 2
1817 #define VMX_INFO_IO_SEG __BITS(17,15)
1820 vmx_exit_io(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1821 struct nvmm_vcpu_exit *exit)
1823 uint64_t qual, info, inslen, rip;
1825 qual = vmx_vmread(VMCS_EXIT_QUALIFICATION);
1826 info = vmx_vmread(VMCS_EXIT_INSTRUCTION_INFO);
1828 exit->reason = NVMM_VCPU_EXIT_IO;
1830 exit->u.io.in = (qual & VMX_QUAL_IO_IN) != 0;
1831 exit->u.io.port = __SHIFTOUT(qual, VMX_QUAL_IO_PORT);
1833 KASSERT(__SHIFTOUT(info, VMX_INFO_IO_SEG) < 6);
1834 exit->u.io.seg = __SHIFTOUT(info, VMX_INFO_IO_SEG);
1836 if (__SHIFTOUT(info, VMX_INFO_IO_ADRSIZE) == IO_ADRSIZE_64) {
1837 exit->u.io.address_size = 8;
1838 } else if (__SHIFTOUT(info, VMX_INFO_IO_ADRSIZE) == IO_ADRSIZE_32) {
1839 exit->u.io.address_size = 4;
1840 } else if (__SHIFTOUT(info, VMX_INFO_IO_ADRSIZE) == IO_ADRSIZE_16) {
1841 exit->u.io.address_size = 2;
1844 if (__SHIFTOUT(qual, VMX_QUAL_IO_SIZE) == IO_SIZE_32) {
1845 exit->u.io.operand_size = 4;
1846 } else if (__SHIFTOUT(qual, VMX_QUAL_IO_SIZE) == IO_SIZE_16) {
1847 exit->u.io.operand_size = 2;
1848 } else if (__SHIFTOUT(qual, VMX_QUAL_IO_SIZE) == IO_SIZE_8) {
1849 exit->u.io.operand_size = 1;
1852 exit->u.io.rep = (qual & VMX_QUAL_IO_REP) != 0;
1853 exit->u.io.str = (qual & VMX_QUAL_IO_STR) != 0;
1855 if (exit->u.io.in && exit->u.io.str) {
1856 exit->u.io.seg = NVMM_X64_SEG_ES;
1859 inslen = vmx_vmread(VMCS_EXIT_INSTRUCTION_LENGTH);
1860 rip = vmx_vmread(VMCS_GUEST_RIP);
1861 exit->u.io.npc = rip + inslen;
1863 vmx_vcpu_state_provide(vcpu,
1864 NVMM_X64_STATE_GPRS | NVMM_X64_STATE_SEGS |
1865 NVMM_X64_STATE_CRS | NVMM_X64_STATE_MSRS);
1868 static const uint64_t msr_ignore_list[] = {
1870 MSR_IA32_PLATFORM_ID
1874 vmx_inkernel_handle_msr(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1875 struct nvmm_vcpu_exit *exit)
1877 struct vmx_cpudata *cpudata = vcpu->cpudata;
1881 if (exit->reason == NVMM_VCPU_EXIT_RDMSR) {
1882 if (exit->u.rdmsr.msr == MSR_CR_PAT) {
1883 val = vmx_vmread(VMCS_GUEST_IA32_PAT);
1884 cpudata->gprs[NVMM_X64_GPR_RAX] = (val & 0xFFFFFFFF);
1885 cpudata->gprs[NVMM_X64_GPR_RDX] = (val >> 32);
1888 if (exit->u.rdmsr.msr == MSR_MISC_ENABLE) {
1889 val = cpudata->gmsr_misc_enable;
1890 cpudata->gprs[NVMM_X64_GPR_RAX] = (val & 0xFFFFFFFF);
1891 cpudata->gprs[NVMM_X64_GPR_RDX] = (val >> 32);
1894 if (exit->u.rdmsr.msr == MSR_IA32_ARCH_CAPABILITIES) {
1896 if (cpuid_level < 7) {
1899 x86_cpuid(7, descs);
1900 if (!(descs[3] & CPUID_SEF_ARCH_CAP)) {
1903 val = rdmsr(MSR_IA32_ARCH_CAPABILITIES);
1904 val &= (IA32_ARCH_RDCL_NO |
1908 cpudata->gprs[NVMM_X64_GPR_RAX] = (val & 0xFFFFFFFF);
1909 cpudata->gprs[NVMM_X64_GPR_RDX] = (val >> 32);
1912 for (i = 0; i < __arraycount(msr_ignore_list); i++) {
1913 if (msr_ignore_list[i] != exit->u.rdmsr.msr)
1916 cpudata->gprs[NVMM_X64_GPR_RAX] = (val & 0xFFFFFFFF);
1917 cpudata->gprs[NVMM_X64_GPR_RDX] = (val >> 32);
1921 /* All bets are off if MSR_TSC is actually written to. */
1922 if (exit->u.wrmsr.msr == MSR_TSC) {
1923 cpudata->gtsc_offset = exit->u.wrmsr.val - rdtsc();
1924 cpudata->gtsc_want_update = true;
1927 if (exit->u.wrmsr.msr == MSR_CR_PAT) {
1928 val = exit->u.wrmsr.val;
1929 if (__predict_false(!nvmm_x86_pat_validate(val))) {
1932 vmx_vmwrite(VMCS_GUEST_IA32_PAT, val);
1935 if (exit->u.wrmsr.msr == MSR_MISC_ENABLE) {
1939 for (i = 0; i < __arraycount(msr_ignore_list); i++) {
1940 if (msr_ignore_list[i] != exit->u.wrmsr.msr)
1949 vmx_inkernel_advance();
1953 vmx_inject_gp(vcpu);
1958 vmx_exit_rdmsr(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1959 struct nvmm_vcpu_exit *exit)
1961 struct vmx_cpudata *cpudata = vcpu->cpudata;
1962 uint64_t inslen, rip;
1964 exit->reason = NVMM_VCPU_EXIT_RDMSR;
1965 exit->u.rdmsr.msr = (cpudata->gprs[NVMM_X64_GPR_RCX] & 0xFFFFFFFF);
1967 if (vmx_inkernel_handle_msr(mach, vcpu, exit)) {
1968 exit->reason = NVMM_VCPU_EXIT_NONE;
1972 inslen = vmx_vmread(VMCS_EXIT_INSTRUCTION_LENGTH);
1973 rip = vmx_vmread(VMCS_GUEST_RIP);
1974 exit->u.rdmsr.npc = rip + inslen;
1976 vmx_vcpu_state_provide(vcpu, NVMM_X64_STATE_GPRS);
1980 vmx_exit_wrmsr(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1981 struct nvmm_vcpu_exit *exit)
1983 struct vmx_cpudata *cpudata = vcpu->cpudata;
1984 uint64_t rdx, rax, inslen, rip;
1986 rdx = cpudata->gprs[NVMM_X64_GPR_RDX];
1987 rax = cpudata->gprs[NVMM_X64_GPR_RAX];
1989 exit->reason = NVMM_VCPU_EXIT_WRMSR;
1990 exit->u.wrmsr.msr = (cpudata->gprs[NVMM_X64_GPR_RCX] & 0xFFFFFFFF);
1991 exit->u.wrmsr.val = (rdx << 32) | (rax & 0xFFFFFFFF);
1993 if (vmx_inkernel_handle_msr(mach, vcpu, exit)) {
1994 exit->reason = NVMM_VCPU_EXIT_NONE;
1998 inslen = vmx_vmread(VMCS_EXIT_INSTRUCTION_LENGTH);
1999 rip = vmx_vmread(VMCS_GUEST_RIP);
2000 exit->u.wrmsr.npc = rip + inslen;
2002 vmx_vcpu_state_provide(vcpu, NVMM_X64_STATE_GPRS);
2006 vmx_exit_xsetbv(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
2007 struct nvmm_vcpu_exit *exit)
2009 struct vmx_cpudata *cpudata = vcpu->cpudata;
2012 exit->reason = NVMM_VCPU_EXIT_NONE;
2014 val = (cpudata->gprs[NVMM_X64_GPR_RDX] << 32) |
2015 (cpudata->gprs[NVMM_X64_GPR_RAX] & 0xFFFFFFFF);
2017 if (__predict_false(cpudata->gprs[NVMM_X64_GPR_RCX] != 0)) {
2019 } else if (__predict_false((val & ~vmx_xcr0_mask) != 0)) {
2021 } else if (__predict_false((val & XCR0_X87) == 0)) {
2025 cpudata->gxcr0 = val;
2027 vmx_inkernel_advance();
2031 vmx_inject_gp(vcpu);
2034 #define VMX_EPT_VIOLATION_READ __BIT(0)
2035 #define VMX_EPT_VIOLATION_WRITE __BIT(1)
2036 #define VMX_EPT_VIOLATION_EXECUTE __BIT(2)
2039 vmx_exit_epf(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
2040 struct nvmm_vcpu_exit *exit)
2045 gpa = vmx_vmread(VMCS_GUEST_PHYSICAL_ADDRESS);
2047 exit->reason = NVMM_VCPU_EXIT_MEMORY;
2048 perm = vmx_vmread(VMCS_EXIT_QUALIFICATION);
2049 if (perm & VMX_EPT_VIOLATION_WRITE)
2050 exit->u.mem.prot = PROT_WRITE;
2051 else if (perm & VMX_EPT_VIOLATION_EXECUTE)
2052 exit->u.mem.prot = PROT_EXEC;
2054 exit->u.mem.prot = PROT_READ;
2055 exit->u.mem.gpa = gpa;
2056 exit->u.mem.inst_len = 0;
2058 vmx_vcpu_state_provide(vcpu,
2059 NVMM_X64_STATE_GPRS | NVMM_X64_STATE_SEGS |
2060 NVMM_X64_STATE_CRS | NVMM_X64_STATE_MSRS);
2063 /* -------------------------------------------------------------------------- */
2066 vmx_vcpu_guest_fpu_enter(struct nvmm_cpu *vcpu)
2068 struct vmx_cpudata *cpudata = vcpu->cpudata;
2072 fpu_area_restore(&cpudata->gfpu, svm_xcr0_mask, true);
2073 #else /* DragonFly */
2075 * NOTE: Host FPU state depends on whether the user program used the
2076 * FPU or not. Need to use npxpush()/npxpop() to handle this.
2078 npxpush(&cpudata->hstate.hmctx);
2080 fpurstor(&cpudata->gfpu, vmx_xcr0_mask);
2083 if (vmx_xcr0_mask != 0) {
2084 wrxcr(0, cpudata->gxcr0);
2089 vmx_vcpu_guest_fpu_leave(struct nvmm_cpu *vcpu)
2091 struct vmx_cpudata *cpudata = vcpu->cpudata;
2093 if (vmx_xcr0_mask != 0) {
2094 wrxcr(0, vmx_global_hstate.xcr0);
2098 fpu_area_save(&cpudata->gfpu, svm_xcr0_mask, true);
2100 #else /* DragonFly */
2101 fpusave(&cpudata->gfpu, vmx_xcr0_mask);
2103 npxpop(&cpudata->hstate.hmctx);
2108 vmx_vcpu_guest_dbregs_enter(struct nvmm_cpu *vcpu)
2110 struct vmx_cpudata *cpudata = vcpu->cpudata;
2112 x86_dbregs_save(curlwp);
2116 ldr0(cpudata->drs[NVMM_X64_DR_DR0]);
2117 ldr1(cpudata->drs[NVMM_X64_DR_DR1]);
2118 ldr2(cpudata->drs[NVMM_X64_DR_DR2]);
2119 ldr3(cpudata->drs[NVMM_X64_DR_DR3]);
2120 ldr6(cpudata->drs[NVMM_X64_DR_DR6]);
2124 vmx_vcpu_guest_dbregs_leave(struct nvmm_cpu *vcpu)
2126 struct vmx_cpudata *cpudata = vcpu->cpudata;
2128 cpudata->drs[NVMM_X64_DR_DR0] = rdr0();
2129 cpudata->drs[NVMM_X64_DR_DR1] = rdr1();
2130 cpudata->drs[NVMM_X64_DR_DR2] = rdr2();
2131 cpudata->drs[NVMM_X64_DR_DR3] = rdr3();
2132 cpudata->drs[NVMM_X64_DR_DR6] = rdr6();
2134 x86_dbregs_restore(curlwp);
2138 vmx_vcpu_guest_misc_enter(struct nvmm_cpu *vcpu)
2140 struct vmx_cpudata *cpudata = vcpu->cpudata;
2142 /* This gets restored automatically by the CPU. */
2144 vmx_vmwrite(VMCS_HOST_IDTR_BASE, (uint64_t)curcpu()->ci_idtvec.iv_idt);
2145 #else /* DragonFly */
2146 vmx_vmwrite(VMCS_HOST_IDTR_BASE, (uint64_t)r_idt_arr[mycpuid].rd_base);
2148 vmx_vmwrite(VMCS_HOST_FS_BASE, rdmsr(MSR_FSBASE));
2149 vmx_vmwrite(VMCS_HOST_CR3, rcr3());
2150 vmx_vmwrite(VMCS_HOST_CR4, rcr4());
2152 /* Save the percpu host state. */
2153 cpudata->hstate.kernelgsbase = rdmsr(MSR_KERNELGSBASE);
2157 vmx_vcpu_guest_misc_leave(struct nvmm_cpu *vcpu)
2159 struct vmx_cpudata *cpudata = vcpu->cpudata;
2161 /* Restore the global host state. */
2162 wrmsr(MSR_STAR, vmx_global_hstate.star);
2163 wrmsr(MSR_LSTAR, vmx_global_hstate.lstar);
2164 wrmsr(MSR_CSTAR, vmx_global_hstate.cstar);
2165 wrmsr(MSR_SFMASK, vmx_global_hstate.sfmask);
2167 /* Restore the percpu host state. */
2168 wrmsr(MSR_KERNELGSBASE, cpudata->hstate.kernelgsbase);
2171 /* -------------------------------------------------------------------------- */
2173 #define VMX_INVVPID_ADDRESS 0
2174 #define VMX_INVVPID_CONTEXT 1
2175 #define VMX_INVVPID_ALL 2
2176 #define VMX_INVVPID_CONTEXT_NOGLOBAL 3
2178 #define VMX_INVEPT_CONTEXT 1
2179 #define VMX_INVEPT_ALL 2
2182 vmx_gtlb_catchup(struct nvmm_cpu *vcpu, int hcpu)
2184 struct vmx_cpudata *cpudata = vcpu->cpudata;
2186 if (vcpu->hcpu_last != hcpu) {
2187 cpudata->gtlb_want_flush = true;
2192 vmx_htlb_catchup(struct nvmm_cpu *vcpu, int hcpu)
2194 struct vmx_cpudata *cpudata = vcpu->cpudata;
2195 struct ept_desc ept_desc;
2197 if (__predict_true(!CPUMASK_TESTBIT(cpudata->htlb_want_flush, hcpu))) {
2201 ept_desc.eptp = vmx_vmread(VMCS_EPTP);
2203 vmx_invept(vmx_ept_flush_op, &ept_desc);
2204 ATOMIC_CPUMASK_NANDBIT(cpudata->htlb_want_flush, hcpu);
2207 static inline uint64_t
2208 vmx_htlb_flush(struct nvmm_machine *mach, struct vmx_cpudata *cpudata)
2210 struct ept_desc ept_desc;
2214 machgen = mach->vm->vm_pmap.pm_invgen;
2215 if (__predict_true(machgen == cpudata->vcpu_htlb_gen)) {
2219 ATOMIC_CPUMASK_ORMASK(cpudata->htlb_want_flush, smp_active_mask);
2221 ept_desc.eptp = vmx_vmread(VMCS_EPTP);
2223 vmx_invept(vmx_ept_flush_op, &ept_desc);
2229 vmx_htlb_flush_ack(struct vmx_cpudata *cpudata, uint64_t machgen)
2231 cpudata->vcpu_htlb_gen = machgen;
2232 ATOMIC_CPUMASK_NANDBIT(cpudata->htlb_want_flush, mycpuid);
2236 vmx_exit_evt(struct vmx_cpudata *cpudata)
2238 uint64_t info, err, inslen;
2240 cpudata->evt_pending = false;
2242 info = vmx_vmread(VMCS_IDT_VECTORING_INFO);
2243 if (__predict_true((info & INTR_INFO_VALID) == 0)) {
2246 err = vmx_vmread(VMCS_IDT_VECTORING_ERROR);
2248 vmx_vmwrite(VMCS_ENTRY_INTR_INFO, info);
2249 vmx_vmwrite(VMCS_ENTRY_EXCEPTION_ERROR, err);
2251 switch (__SHIFTOUT(info, INTR_INFO_TYPE)) {
2252 case INTR_TYPE_SW_INT:
2253 case INTR_TYPE_PRIV_SW_EXC:
2254 case INTR_TYPE_SW_EXC:
2255 inslen = vmx_vmread(VMCS_EXIT_INSTRUCTION_LENGTH);
2256 vmx_vmwrite(VMCS_ENTRY_INSTRUCTION_LENGTH, inslen);
2259 cpudata->evt_pending = true;
2263 vmx_vcpu_run(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
2264 struct nvmm_vcpu_exit *exit)
2266 struct nvmm_comm_page *comm = vcpu->comm;
2267 struct vmx_cpudata *cpudata = vcpu->cpudata;
2268 struct vpid_desc vpid_desc;
2269 struct globaldata *gd;
2276 vmx_vmcs_enter(vcpu);
2278 vmx_vcpu_state_commit(vcpu);
2279 comm->state_cached = 0;
2281 if (__predict_false(vmx_vcpu_event_commit(vcpu) != 0)) {
2282 vmx_vmcs_leave(vcpu);
2287 hcpu = gd->gd_cpuid;
2288 launched = cpudata->vmcs_launched;
2290 vmx_gtlb_catchup(vcpu, hcpu);
2291 vmx_htlb_catchup(vcpu, hcpu);
2293 if (vcpu->hcpu_last != hcpu) {
2295 vmx_vmwrite(VMCS_HOST_TR_SELECTOR, ci->ci_tss_sel);
2296 vmx_vmwrite(VMCS_HOST_TR_BASE, (uint64_t)ci->ci_tss);
2297 vmx_vmwrite(VMCS_HOST_GDTR_BASE, (uint64_t)ci->ci_gdt);
2298 #else /* DragonFly */
2299 vmx_vmwrite(VMCS_HOST_TR_SELECTOR, GSEL(GPROC0_SEL, SEL_KPL));
2300 vmx_vmwrite(VMCS_HOST_TR_BASE,
2301 (uint64_t)&gd->gd_prvspace->common_tss);
2302 vmx_vmwrite(VMCS_HOST_GDTR_BASE, (uint64_t)&gdt[hcpu * NGDT]);
2303 #endif /* __NetBSD__ */
2304 vmx_vmwrite(VMCS_HOST_GS_BASE, rdmsr(MSR_GSBASE));
2305 cpudata->gtsc_want_update = true;
2306 vcpu->hcpu_last = hcpu;
2308 #ifdef __DragonFly__
2310 * XXX: We aren't tracking overloaded CPUs (multiple vCPUs
2311 * scheduled on the same physical CPU) yet so there are
2312 * currently no calls to pmap_del_cpu().
2314 pmap_add_cpu(mach->vm, hcpu);
2318 vmx_vcpu_guest_dbregs_enter(vcpu);
2319 vmx_vcpu_guest_misc_enter(vcpu);
2322 if (cpudata->gtlb_want_flush) {
2323 vpid_desc.vpid = cpudata->asid;
2325 vmx_invvpid(vmx_tlb_flush_op, &vpid_desc);
2326 cpudata->gtlb_want_flush = false;
2329 if (__predict_false(cpudata->gtsc_want_update)) {
2330 vmx_vmwrite(VMCS_TSC_OFFSET, cpudata->gtsc_offset);
2331 cpudata->gtsc_want_update = false;
2335 vmx_vcpu_guest_fpu_enter(vcpu);
2336 machgen = vmx_htlb_flush(mach, cpudata);
2338 #ifdef __DragonFly__
2340 * Check for pending host events (e.g., interrupt, AST)
2341 * to make the state safe to VM Entry.
2343 if (__predict_false(gd->gd_reqflags & RQF_HVM_MASK)) {
2344 /* INVEPT executed, so ack hTLB flush. */
2345 vmx_htlb_flush_ack(cpudata, machgen);
2346 vmx_vcpu_guest_fpu_leave(vcpu);
2348 exit->reason = NVMM_VCPU_EXIT_NONE;
2353 lcr2(cpudata->gcr2);
2355 ret = vmx_vmresume(cpudata->gprs);
2357 ret = vmx_vmlaunch(cpudata->gprs);
2359 cpudata->gcr2 = rcr2();
2360 vmx_htlb_flush_ack(cpudata, machgen);
2361 vmx_vcpu_guest_fpu_leave(vcpu);
2364 if (__predict_false(ret != 0)) {
2365 vmx_exit_invalid(exit, -1);
2368 vmx_exit_evt(cpudata);
2372 exitcode = vmx_vmread(VMCS_EXIT_REASON);
2373 exitcode &= __BITS(15,0);
2376 case VMCS_EXITCODE_EXC_NMI:
2377 vmx_exit_exc_nmi(mach, vcpu, exit);
2379 case VMCS_EXITCODE_EXT_INT:
2380 exit->reason = NVMM_VCPU_EXIT_NONE;
2382 case VMCS_EXITCODE_CPUID:
2383 vmx_exit_cpuid(mach, vcpu, exit);
2385 case VMCS_EXITCODE_HLT:
2386 vmx_exit_hlt(mach, vcpu, exit);
2388 case VMCS_EXITCODE_CR:
2389 vmx_exit_cr(mach, vcpu, exit);
2391 case VMCS_EXITCODE_IO:
2392 vmx_exit_io(mach, vcpu, exit);
2394 case VMCS_EXITCODE_RDMSR:
2395 vmx_exit_rdmsr(mach, vcpu, exit);
2397 case VMCS_EXITCODE_WRMSR:
2398 vmx_exit_wrmsr(mach, vcpu, exit);
2400 case VMCS_EXITCODE_SHUTDOWN:
2401 exit->reason = NVMM_VCPU_EXIT_SHUTDOWN;
2403 case VMCS_EXITCODE_MONITOR:
2404 vmx_exit_insn(exit, NVMM_VCPU_EXIT_MONITOR);
2406 case VMCS_EXITCODE_MWAIT:
2407 vmx_exit_insn(exit, NVMM_VCPU_EXIT_MWAIT);
2409 case VMCS_EXITCODE_XSETBV:
2410 vmx_exit_xsetbv(mach, vcpu, exit);
2412 case VMCS_EXITCODE_RDPMC:
2413 case VMCS_EXITCODE_RDTSCP:
2414 case VMCS_EXITCODE_INVVPID:
2415 case VMCS_EXITCODE_INVEPT:
2416 case VMCS_EXITCODE_VMCALL:
2417 case VMCS_EXITCODE_VMCLEAR:
2418 case VMCS_EXITCODE_VMLAUNCH:
2419 case VMCS_EXITCODE_VMPTRLD:
2420 case VMCS_EXITCODE_VMPTRST:
2421 case VMCS_EXITCODE_VMREAD:
2422 case VMCS_EXITCODE_VMRESUME:
2423 case VMCS_EXITCODE_VMWRITE:
2424 case VMCS_EXITCODE_VMXOFF:
2425 case VMCS_EXITCODE_VMXON:
2426 vmx_inject_ud(vcpu);
2427 exit->reason = NVMM_VCPU_EXIT_NONE;
2429 case VMCS_EXITCODE_EPT_VIOLATION:
2430 vmx_exit_epf(mach, vcpu, exit);
2432 case VMCS_EXITCODE_INT_WINDOW:
2433 vmx_event_waitexit_disable(vcpu, false);
2434 exit->reason = NVMM_VCPU_EXIT_INT_READY;
2436 case VMCS_EXITCODE_NMI_WINDOW:
2437 vmx_event_waitexit_disable(vcpu, true);
2438 exit->reason = NVMM_VCPU_EXIT_NMI_READY;
2441 vmx_exit_invalid(exit, exitcode);
2445 /* If no reason to return to userland, keep rolling. */
2446 if (nvmm_return_needed()) {
2449 if (exit->reason != NVMM_VCPU_EXIT_NONE) {
2454 cpudata->vmcs_launched = launched;
2456 vmx_vcpu_guest_misc_leave(vcpu);
2457 vmx_vcpu_guest_dbregs_leave(vcpu);
2459 exit->exitstate.rflags = vmx_vmread(VMCS_GUEST_RFLAGS);
2460 exit->exitstate.cr8 = cpudata->gcr8;
2461 intstate = vmx_vmread(VMCS_GUEST_INTERRUPTIBILITY);
2462 exit->exitstate.int_shadow =
2463 (intstate & (INT_STATE_STI|INT_STATE_MOVSS)) != 0;
2464 exit->exitstate.int_window_exiting = cpudata->int_window_exit;
2465 exit->exitstate.nmi_window_exiting = cpudata->nmi_window_exit;
2466 exit->exitstate.evt_pending = cpudata->evt_pending;
2468 vmx_vmcs_leave(vcpu);
2473 /* -------------------------------------------------------------------------- */
2476 vmx_memalloc(paddr_t *pa, vaddr_t *va, size_t npages)
2479 struct pglist pglist;
2485 ret = uvm_pglistalloc(npages * PAGE_SIZE, 0, ~0UL, PAGE_SIZE, 0,
2489 _pa = VM_PAGE_TO_PHYS(TAILQ_FIRST(&pglist));
2490 _va = uvm_km_alloc(kernel_map, npages * PAGE_SIZE, 0,
2491 UVM_KMF_VAONLY | UVM_KMF_NOWAIT);
2495 for (i = 0; i < npages; i++) {
2496 pmap_kenter_pa(_va + i * PAGE_SIZE, _pa + i * PAGE_SIZE,
2497 VM_PROT_READ | VM_PROT_WRITE, PMAP_WRITE_BACK);
2499 pmap_update(pmap_kernel());
2501 memset((void *)_va, 0, npages * PAGE_SIZE);
2508 for (i = 0; i < npages; i++) {
2509 uvm_pagefree(PHYS_TO_VM_PAGE(_pa + i * PAGE_SIZE));
2513 #else /* DragonFly */
2516 addr = contigmalloc(npages * PAGE_SIZE, M_NVMM, M_WAITOK | M_ZERO,
2517 0, ~0UL, PAGE_SIZE, 0);
2521 *va = (vaddr_t)addr;
2522 *pa = vtophys(addr);
2524 #endif /* __NetBSD__ */
2528 vmx_memfree(paddr_t pa __unused, vaddr_t va, size_t npages)
2533 pmap_kremove(va, npages * PAGE_SIZE);
2534 pmap_update(pmap_kernel());
2535 uvm_km_free(kernel_map, va, npages * PAGE_SIZE, UVM_KMF_VAONLY);
2536 for (i = 0; i < npages; i++) {
2537 uvm_pagefree(PHYS_TO_VM_PAGE(pa + i * PAGE_SIZE));
2539 #else /* DragonFly */
2540 contigfree((void *)va, npages * PAGE_SIZE, M_NVMM);
2541 #endif /* __NetBSD__ */
2544 /* -------------------------------------------------------------------------- */
2547 vmx_vcpu_msr_allow(uint8_t *bitmap, uint64_t msr, bool read, bool write)
2552 if (msr < 0x00002000) {
2554 byte = ((msr - 0x00000000) / 8) + 0;
2555 } else if (msr >= 0xC0000000 && msr < 0xC0002000) {
2557 byte = ((msr - 0xC0000000) / 8) + 1024;
2559 panic("%s: wrong range", __func__);
2562 bitoff = (msr & 0x7);
2565 bitmap[byte] &= ~__BIT(bitoff);
2568 bitmap[2048 + byte] &= ~__BIT(bitoff);
2572 #define VMX_SEG_ATTRIB_TYPE __BITS(3,0)
2573 #define VMX_SEG_ATTRIB_S __BIT(4)
2574 #define VMX_SEG_ATTRIB_DPL __BITS(6,5)
2575 #define VMX_SEG_ATTRIB_P __BIT(7)
2576 #define VMX_SEG_ATTRIB_AVL __BIT(12)
2577 #define VMX_SEG_ATTRIB_L __BIT(13)
2578 #define VMX_SEG_ATTRIB_DEF __BIT(14)
2579 #define VMX_SEG_ATTRIB_G __BIT(15)
2580 #define VMX_SEG_ATTRIB_UNUSABLE __BIT(16)
2583 vmx_vcpu_setstate_seg(const struct nvmm_x64_state_seg *segs, int idx)
2588 __SHIFTIN(segs[idx].attrib.type, VMX_SEG_ATTRIB_TYPE) |
2589 __SHIFTIN(segs[idx].attrib.s, VMX_SEG_ATTRIB_S) |
2590 __SHIFTIN(segs[idx].attrib.dpl, VMX_SEG_ATTRIB_DPL) |
2591 __SHIFTIN(segs[idx].attrib.p, VMX_SEG_ATTRIB_P) |
2592 __SHIFTIN(segs[idx].attrib.avl, VMX_SEG_ATTRIB_AVL) |
2593 __SHIFTIN(segs[idx].attrib.l, VMX_SEG_ATTRIB_L) |
2594 __SHIFTIN(segs[idx].attrib.def, VMX_SEG_ATTRIB_DEF) |
2595 __SHIFTIN(segs[idx].attrib.g, VMX_SEG_ATTRIB_G) |
2596 (!segs[idx].attrib.p ? VMX_SEG_ATTRIB_UNUSABLE : 0);
2598 if (idx != NVMM_X64_SEG_GDT && idx != NVMM_X64_SEG_IDT) {
2599 vmx_vmwrite(vmx_guest_segs[idx].selector, segs[idx].selector);
2600 vmx_vmwrite(vmx_guest_segs[idx].attrib, attrib);
2602 vmx_vmwrite(vmx_guest_segs[idx].limit, segs[idx].limit);
2603 vmx_vmwrite(vmx_guest_segs[idx].base, segs[idx].base);
2607 vmx_vcpu_getstate_seg(struct nvmm_x64_state_seg *segs, int idx)
2609 uint64_t selector = 0, attrib = 0, base, limit;
2611 if (idx != NVMM_X64_SEG_GDT && idx != NVMM_X64_SEG_IDT) {
2612 selector = vmx_vmread(vmx_guest_segs[idx].selector);
2613 attrib = vmx_vmread(vmx_guest_segs[idx].attrib);
2615 limit = vmx_vmread(vmx_guest_segs[idx].limit);
2616 base = vmx_vmread(vmx_guest_segs[idx].base);
2618 segs[idx].selector = selector;
2619 segs[idx].limit = limit;
2620 segs[idx].base = base;
2621 segs[idx].attrib.type = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_TYPE);
2622 segs[idx].attrib.s = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_S);
2623 segs[idx].attrib.dpl = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_DPL);
2624 segs[idx].attrib.p = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_P);
2625 segs[idx].attrib.avl = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_AVL);
2626 segs[idx].attrib.l = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_L);
2627 segs[idx].attrib.def = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_DEF);
2628 segs[idx].attrib.g = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_G);
2629 if (attrib & VMX_SEG_ATTRIB_UNUSABLE) {
2630 segs[idx].attrib.p = 0;
2635 vmx_state_tlb_flush(const struct nvmm_x64_state *state, uint64_t flags)
2637 uint64_t cr0, cr3, cr4, efer;
2639 if (flags & NVMM_X64_STATE_CRS) {
2640 cr0 = vmx_vmread(VMCS_GUEST_CR0);
2641 if ((cr0 ^ state->crs[NVMM_X64_CR_CR0]) & CR0_TLB_FLUSH) {
2644 cr3 = vmx_vmread(VMCS_GUEST_CR3);
2645 if (cr3 != state->crs[NVMM_X64_CR_CR3]) {
2648 cr4 = vmx_vmread(VMCS_GUEST_CR4);
2649 if ((cr4 ^ state->crs[NVMM_X64_CR_CR4]) & CR4_TLB_FLUSH) {
2654 if (flags & NVMM_X64_STATE_MSRS) {
2655 efer = vmx_vmread(VMCS_GUEST_IA32_EFER);
2657 state->msrs[NVMM_X64_MSR_EFER]) & EFER_TLB_FLUSH) {
2666 vmx_vcpu_setstate(struct nvmm_cpu *vcpu)
2668 struct nvmm_comm_page *comm = vcpu->comm;
2669 const struct nvmm_x64_state *state = &comm->state;
2670 struct vmx_cpudata *cpudata = vcpu->cpudata;
2671 union savefpu *fpustate;
2672 uint64_t ctls1, intstate;
2675 flags = comm->state_wanted;
2677 vmx_vmcs_enter(vcpu);
2679 if (vmx_state_tlb_flush(state, flags)) {
2680 cpudata->gtlb_want_flush = true;
2683 if (flags & NVMM_X64_STATE_SEGS) {
2684 vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_CS);
2685 vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_DS);
2686 vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_ES);
2687 vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_FS);
2688 vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_GS);
2689 vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_SS);
2690 vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_GDT);
2691 vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_IDT);
2692 vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_LDT);
2693 vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_TR);
2696 CTASSERT(sizeof(cpudata->gprs) == sizeof(state->gprs));
2697 if (flags & NVMM_X64_STATE_GPRS) {
2698 memcpy(cpudata->gprs, state->gprs, sizeof(state->gprs));
2700 vmx_vmwrite(VMCS_GUEST_RIP, state->gprs[NVMM_X64_GPR_RIP]);
2701 vmx_vmwrite(VMCS_GUEST_RSP, state->gprs[NVMM_X64_GPR_RSP]);
2702 vmx_vmwrite(VMCS_GUEST_RFLAGS, state->gprs[NVMM_X64_GPR_RFLAGS]);
2705 if (flags & NVMM_X64_STATE_CRS) {
2707 * CR0_ET must be 1 both in the shadow and the real register.
2708 * CR0_NE must be 1 in the real register.
2709 * CR0_NW and CR0_CD must be 0 in the real register.
2711 vmx_vmwrite(VMCS_CR0_SHADOW,
2712 (state->crs[NVMM_X64_CR_CR0] & CR0_STATIC_MASK) |
2714 vmx_vmwrite(VMCS_GUEST_CR0,
2715 (state->crs[NVMM_X64_CR_CR0] & ~CR0_STATIC_MASK) |
2718 cpudata->gcr2 = state->crs[NVMM_X64_CR_CR2];
2720 /* XXX We are not handling PDPTE here. */
2721 vmx_vmwrite(VMCS_GUEST_CR3, state->crs[NVMM_X64_CR_CR3]);
2723 /* CR4_VMXE is mandatory. */
2724 vmx_vmwrite(VMCS_GUEST_CR4,
2725 (state->crs[NVMM_X64_CR_CR4] & CR4_VALID) | CR4_VMXE);
2727 cpudata->gcr8 = state->crs[NVMM_X64_CR_CR8];
2729 if (vmx_xcr0_mask != 0) {
2730 /* Clear illegal XCR0 bits, set mandatory X87 bit. */
2731 cpudata->gxcr0 = state->crs[NVMM_X64_CR_XCR0];
2732 cpudata->gxcr0 &= vmx_xcr0_mask;
2733 cpudata->gxcr0 |= XCR0_X87;
2737 CTASSERT(sizeof(cpudata->drs) == sizeof(state->drs));
2738 if (flags & NVMM_X64_STATE_DRS) {
2739 memcpy(cpudata->drs, state->drs, sizeof(state->drs));
2741 cpudata->drs[NVMM_X64_DR_DR6] &= 0xFFFFFFFF;
2742 vmx_vmwrite(VMCS_GUEST_DR7, cpudata->drs[NVMM_X64_DR_DR7]);
2745 if (flags & NVMM_X64_STATE_MSRS) {
2746 cpudata->gmsr[VMX_MSRLIST_STAR].val =
2747 state->msrs[NVMM_X64_MSR_STAR];
2748 cpudata->gmsr[VMX_MSRLIST_LSTAR].val =
2749 state->msrs[NVMM_X64_MSR_LSTAR];
2750 cpudata->gmsr[VMX_MSRLIST_CSTAR].val =
2751 state->msrs[NVMM_X64_MSR_CSTAR];
2752 cpudata->gmsr[VMX_MSRLIST_SFMASK].val =
2753 state->msrs[NVMM_X64_MSR_SFMASK];
2754 cpudata->gmsr[VMX_MSRLIST_KERNELGSBASE].val =
2755 state->msrs[NVMM_X64_MSR_KERNELGSBASE];
2757 vmx_vmwrite(VMCS_GUEST_IA32_EFER,
2758 state->msrs[NVMM_X64_MSR_EFER]);
2759 vmx_vmwrite(VMCS_GUEST_IA32_PAT,
2760 state->msrs[NVMM_X64_MSR_PAT]);
2761 vmx_vmwrite(VMCS_GUEST_IA32_SYSENTER_CS,
2762 state->msrs[NVMM_X64_MSR_SYSENTER_CS]);
2763 vmx_vmwrite(VMCS_GUEST_IA32_SYSENTER_ESP,
2764 state->msrs[NVMM_X64_MSR_SYSENTER_ESP]);
2765 vmx_vmwrite(VMCS_GUEST_IA32_SYSENTER_EIP,
2766 state->msrs[NVMM_X64_MSR_SYSENTER_EIP]);
2769 * QEMU or whatever... probably did NOT want to set the TSC,
2770 * because doing so would destroy tsc mp-synchronization
2771 * across logical cpus. Try to figure out what qemu meant
2774 * If writing the last TSC value we reported via getstate,
2775 * assume that the hypervisor does not want to write to the
2778 * QEMU appears to issue a setstate with the value 0 after
2779 * a 'reboot', so for now also ignore this case.
2781 if (state->msrs[NVMM_X64_MSR_TSC] != cpudata->gtsc_match &&
2782 state->msrs[NVMM_X64_MSR_TSC] != 0) {
2783 cpudata->gtsc_offset =
2784 state->msrs[NVMM_X64_MSR_TSC] - rdtsc();
2785 cpudata->gtsc_want_update = true;
2788 /* ENTRY_CTLS_LONG_MODE must match EFER_LMA. */
2789 ctls1 = vmx_vmread(VMCS_ENTRY_CTLS);
2790 if (state->msrs[NVMM_X64_MSR_EFER] & EFER_LMA) {
2791 ctls1 |= ENTRY_CTLS_LONG_MODE;
2793 ctls1 &= ~ENTRY_CTLS_LONG_MODE;
2795 vmx_vmwrite(VMCS_ENTRY_CTLS, ctls1);
2798 if (flags & NVMM_X64_STATE_INTR) {
2799 intstate = vmx_vmread(VMCS_GUEST_INTERRUPTIBILITY);
2800 intstate &= ~(INT_STATE_STI|INT_STATE_MOVSS);
2801 if (state->intr.int_shadow) {
2802 intstate |= INT_STATE_MOVSS;
2804 vmx_vmwrite(VMCS_GUEST_INTERRUPTIBILITY, intstate);
2806 if (state->intr.int_window_exiting) {
2807 vmx_event_waitexit_enable(vcpu, false);
2809 vmx_event_waitexit_disable(vcpu, false);
2812 if (state->intr.nmi_window_exiting) {
2813 vmx_event_waitexit_enable(vcpu, true);
2815 vmx_event_waitexit_disable(vcpu, true);
2819 CTASSERT(sizeof(cpudata->gfpu) == sizeof(state->fpu));
2820 if (flags & NVMM_X64_STATE_FPU) {
2821 memcpy(&cpudata->gfpu, &state->fpu, sizeof(state->fpu));
2823 fpustate = &cpudata->gfpu;
2824 fpustate->fx_mxcsr_mask &= x86_fpu_mxcsr_mask;
2825 fpustate->fx_mxcsr &= fpustate->fx_mxcsr_mask;
2827 if (vmx_xcr0_mask != 0) {
2828 /* Reset XSTATE_BV, to force a reload. */
2829 cpudata->gfpu.xsh_xstate_bv = vmx_xcr0_mask;
2833 vmx_vmcs_leave(vcpu);
2835 comm->state_wanted = 0;
2836 comm->state_cached |= flags;
2840 vmx_vcpu_getstate(struct nvmm_cpu *vcpu)
2842 struct nvmm_comm_page *comm = vcpu->comm;
2843 struct nvmm_x64_state *state = &comm->state;
2844 struct vmx_cpudata *cpudata = vcpu->cpudata;
2845 uint64_t intstate, flags;
2847 flags = comm->state_wanted;
2849 vmx_vmcs_enter(vcpu);
2851 if (flags & NVMM_X64_STATE_SEGS) {
2852 vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_CS);
2853 vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_DS);
2854 vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_ES);
2855 vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_FS);
2856 vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_GS);
2857 vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_SS);
2858 vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_GDT);
2859 vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_IDT);
2860 vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_LDT);
2861 vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_TR);
2864 CTASSERT(sizeof(cpudata->gprs) == sizeof(state->gprs));
2865 if (flags & NVMM_X64_STATE_GPRS) {
2866 memcpy(state->gprs, cpudata->gprs, sizeof(state->gprs));
2868 state->gprs[NVMM_X64_GPR_RIP] = vmx_vmread(VMCS_GUEST_RIP);
2869 state->gprs[NVMM_X64_GPR_RSP] = vmx_vmread(VMCS_GUEST_RSP);
2870 state->gprs[NVMM_X64_GPR_RFLAGS] = vmx_vmread(VMCS_GUEST_RFLAGS);
2873 if (flags & NVMM_X64_STATE_CRS) {
2874 state->crs[NVMM_X64_CR_CR0] =
2875 (vmx_vmread(VMCS_CR0_SHADOW) & CR0_STATIC_MASK) |
2876 (vmx_vmread(VMCS_GUEST_CR0) & ~CR0_STATIC_MASK);
2877 state->crs[NVMM_X64_CR_CR2] = cpudata->gcr2;
2878 state->crs[NVMM_X64_CR_CR3] = vmx_vmread(VMCS_GUEST_CR3);
2879 state->crs[NVMM_X64_CR_CR4] = vmx_vmread(VMCS_GUEST_CR4);
2880 state->crs[NVMM_X64_CR_CR8] = cpudata->gcr8;
2881 state->crs[NVMM_X64_CR_XCR0] = cpudata->gxcr0;
2884 state->crs[NVMM_X64_CR_CR4] &= ~CR4_VMXE;
2887 CTASSERT(sizeof(cpudata->drs) == sizeof(state->drs));
2888 if (flags & NVMM_X64_STATE_DRS) {
2889 memcpy(state->drs, cpudata->drs, sizeof(state->drs));
2891 state->drs[NVMM_X64_DR_DR7] = vmx_vmread(VMCS_GUEST_DR7);
2894 if (flags & NVMM_X64_STATE_MSRS) {
2895 state->msrs[NVMM_X64_MSR_STAR] =
2896 cpudata->gmsr[VMX_MSRLIST_STAR].val;
2897 state->msrs[NVMM_X64_MSR_LSTAR] =
2898 cpudata->gmsr[VMX_MSRLIST_LSTAR].val;
2899 state->msrs[NVMM_X64_MSR_CSTAR] =
2900 cpudata->gmsr[VMX_MSRLIST_CSTAR].val;
2901 state->msrs[NVMM_X64_MSR_SFMASK] =
2902 cpudata->gmsr[VMX_MSRLIST_SFMASK].val;
2903 state->msrs[NVMM_X64_MSR_KERNELGSBASE] =
2904 cpudata->gmsr[VMX_MSRLIST_KERNELGSBASE].val;
2905 state->msrs[NVMM_X64_MSR_EFER] =
2906 vmx_vmread(VMCS_GUEST_IA32_EFER);
2907 state->msrs[NVMM_X64_MSR_PAT] =
2908 vmx_vmread(VMCS_GUEST_IA32_PAT);
2909 state->msrs[NVMM_X64_MSR_SYSENTER_CS] =
2910 vmx_vmread(VMCS_GUEST_IA32_SYSENTER_CS);
2911 state->msrs[NVMM_X64_MSR_SYSENTER_ESP] =
2912 vmx_vmread(VMCS_GUEST_IA32_SYSENTER_ESP);
2913 state->msrs[NVMM_X64_MSR_SYSENTER_EIP] =
2914 vmx_vmread(VMCS_GUEST_IA32_SYSENTER_EIP);
2915 state->msrs[NVMM_X64_MSR_TSC] = rdtsc() + cpudata->gtsc_offset;
2917 /* Save reported TSC value for later setstate hack. */
2918 cpudata->gtsc_match = state->msrs[NVMM_X64_MSR_TSC];
2921 if (flags & NVMM_X64_STATE_INTR) {
2922 intstate = vmx_vmread(VMCS_GUEST_INTERRUPTIBILITY);
2923 state->intr.int_shadow =
2924 (intstate & (INT_STATE_STI|INT_STATE_MOVSS)) != 0;
2925 state->intr.int_window_exiting = cpudata->int_window_exit;
2926 state->intr.nmi_window_exiting = cpudata->nmi_window_exit;
2927 state->intr.evt_pending = cpudata->evt_pending;
2930 CTASSERT(sizeof(cpudata->gfpu) == sizeof(state->fpu));
2931 if (flags & NVMM_X64_STATE_FPU) {
2932 memcpy(&state->fpu, &cpudata->gfpu, sizeof(state->fpu));
2935 vmx_vmcs_leave(vcpu);
2937 comm->state_wanted = 0;
2938 comm->state_cached |= flags;
2942 vmx_vcpu_state_provide(struct nvmm_cpu *vcpu, uint64_t flags)
2944 vcpu->comm->state_wanted = flags;
2945 vmx_vcpu_getstate(vcpu);
2949 vmx_vcpu_state_commit(struct nvmm_cpu *vcpu)
2951 vcpu->comm->state_wanted = vcpu->comm->state_commit;
2952 vcpu->comm->state_commit = 0;
2953 vmx_vcpu_setstate(vcpu);
2956 /* -------------------------------------------------------------------------- */
2959 vmx_asid_alloc(struct nvmm_cpu *vcpu)
2961 struct vmx_cpudata *cpudata = vcpu->cpudata;
2964 mutex_enter(&vmx_asidlock);
2966 for (i = 0; i < vmx_maxasid; i++) {
2970 if (vmx_asidmap[oct] & __BIT(bit)) {
2976 vmx_asidmap[oct] |= __BIT(bit);
2977 vmx_vmwrite(VMCS_VPID, i);
2978 mutex_exit(&vmx_asidlock);
2982 mutex_exit(&vmx_asidlock);
2984 panic("%s: impossible", __func__);
2988 vmx_asid_free(struct nvmm_cpu *vcpu)
2993 asid = vmx_vmread(VMCS_VPID);
2998 mutex_enter(&vmx_asidlock);
2999 vmx_asidmap[oct] &= ~__BIT(bit);
3000 mutex_exit(&vmx_asidlock);
3004 vmx_vcpu_init(struct nvmm_machine *mach, struct nvmm_cpu *vcpu)
3006 struct vmx_cpudata *cpudata = vcpu->cpudata;
3007 struct vmcs *vmcs = cpudata->vmcs;
3008 struct msr_entry *gmsr = cpudata->gmsr;
3011 rev = vmx_get_revision();
3013 memset(vmcs, 0, VMCS_SIZE);
3014 vmcs->ident = __SHIFTIN(rev, VMCS_IDENT_REVISION);
3017 vmx_vmcs_enter(vcpu);
3019 /* No link pointer. */
3020 vmx_vmwrite(VMCS_LINK_POINTER, 0xFFFFFFFFFFFFFFFF);
3022 /* Install the CTLSs. */
3023 vmx_vmwrite(VMCS_PINBASED_CTLS, vmx_pinbased_ctls);
3024 vmx_vmwrite(VMCS_PROCBASED_CTLS, vmx_procbased_ctls);
3025 vmx_vmwrite(VMCS_PROCBASED_CTLS2, vmx_procbased_ctls2);
3026 vmx_vmwrite(VMCS_ENTRY_CTLS, vmx_entry_ctls);
3027 vmx_vmwrite(VMCS_EXIT_CTLS, vmx_exit_ctls);
3029 /* Allow direct access to certain MSRs. */
3030 memset(cpudata->msrbm, 0xFF, MSRBM_SIZE);
3031 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_EFER, true, true);
3032 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_STAR, true, true);
3033 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_LSTAR, true, true);
3034 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_CSTAR, true, true);
3035 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_SFMASK, true, true);
3036 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_KERNELGSBASE, true, true);
3037 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_SYSENTER_CS, true, true);
3038 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_SYSENTER_ESP, true, true);
3039 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_SYSENTER_EIP, true, true);
3040 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_FSBASE, true, true);
3041 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_GSBASE, true, true);
3042 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_TSC, true, false);
3043 vmx_vmwrite(VMCS_MSR_BITMAP, (uint64_t)cpudata->msrbm_pa);
3046 * List of Guest MSRs loaded on VMENTRY, saved on VMEXIT. This
3047 * includes the L1D_FLUSH MSR, to mitigate L1TF.
3049 gmsr[VMX_MSRLIST_STAR].msr = MSR_STAR;
3050 gmsr[VMX_MSRLIST_STAR].val = 0;
3051 gmsr[VMX_MSRLIST_LSTAR].msr = MSR_LSTAR;
3052 gmsr[VMX_MSRLIST_LSTAR].val = 0;
3053 gmsr[VMX_MSRLIST_CSTAR].msr = MSR_CSTAR;
3054 gmsr[VMX_MSRLIST_CSTAR].val = 0;
3055 gmsr[VMX_MSRLIST_SFMASK].msr = MSR_SFMASK;
3056 gmsr[VMX_MSRLIST_SFMASK].val = 0;
3057 gmsr[VMX_MSRLIST_KERNELGSBASE].msr = MSR_KERNELGSBASE;
3058 gmsr[VMX_MSRLIST_KERNELGSBASE].val = 0;
3059 gmsr[VMX_MSRLIST_L1DFLUSH].msr = MSR_IA32_FLUSH_CMD;
3060 gmsr[VMX_MSRLIST_L1DFLUSH].val = IA32_FLUSH_CMD_L1D_FLUSH;
3061 vmx_vmwrite(VMCS_ENTRY_MSR_LOAD_ADDRESS, cpudata->gmsr_pa);
3062 vmx_vmwrite(VMCS_EXIT_MSR_STORE_ADDRESS, cpudata->gmsr_pa);
3063 vmx_vmwrite(VMCS_ENTRY_MSR_LOAD_COUNT, vmx_msrlist_entry_nmsr);
3064 vmx_vmwrite(VMCS_EXIT_MSR_STORE_COUNT, VMX_MSRLIST_EXIT_NMSR);
3066 /* Set the CR0 mask. Any change of these bits causes a VMEXIT. */
3067 vmx_vmwrite(VMCS_CR0_MASK, CR0_STATIC_MASK);
3069 /* Force unsupported CR4 fields to zero. */
3070 vmx_vmwrite(VMCS_CR4_MASK, CR4_INVALID);
3071 vmx_vmwrite(VMCS_CR4_SHADOW, 0);
3073 /* Set the Host state for resuming. */
3074 vmx_vmwrite(VMCS_HOST_RIP, (uint64_t)vmx_resume_rip);
3075 vmx_vmwrite(VMCS_HOST_CS_SELECTOR, GSEL(GCODE_SEL, SEL_KPL));
3076 vmx_vmwrite(VMCS_HOST_SS_SELECTOR, GSEL(GDATA_SEL, SEL_KPL));
3077 vmx_vmwrite(VMCS_HOST_DS_SELECTOR, GSEL(GDATA_SEL, SEL_KPL));
3078 vmx_vmwrite(VMCS_HOST_ES_SELECTOR, GSEL(GDATA_SEL, SEL_KPL));
3079 vmx_vmwrite(VMCS_HOST_FS_SELECTOR, 0);
3080 vmx_vmwrite(VMCS_HOST_GS_SELECTOR, 0);
3081 vmx_vmwrite(VMCS_HOST_IA32_SYSENTER_CS, 0);
3082 vmx_vmwrite(VMCS_HOST_IA32_SYSENTER_ESP, 0);
3083 vmx_vmwrite(VMCS_HOST_IA32_SYSENTER_EIP, 0);
3084 vmx_vmwrite(VMCS_HOST_IA32_PAT, rdmsr(MSR_CR_PAT));
3085 vmx_vmwrite(VMCS_HOST_IA32_EFER, rdmsr(MSR_EFER));
3086 vmx_vmwrite(VMCS_HOST_CR0, rcr0() & ~CR0_TS);
3088 /* Generate ASID. */
3089 vmx_asid_alloc(vcpu);
3091 /* Enable Extended Paging, 4-Level. */
3093 __SHIFTIN(vmx_eptp_type, EPTP_TYPE) |
3094 __SHIFTIN(4-1, EPTP_WALKLEN) |
3095 (pmap_ept_has_ad ? EPTP_FLAGS_AD : 0) |
3096 vtophys(vmspace_pmap(mach->vm)->pm_pml4);
3097 vmx_vmwrite(VMCS_EPTP, eptp);
3099 /* Init IA32_MISC_ENABLE. */
3100 cpudata->gmsr_misc_enable = rdmsr(MSR_MISC_ENABLE);
3101 cpudata->gmsr_misc_enable &=
3102 ~(IA32_MISC_PERFMON_EN|IA32_MISC_EISST_EN|IA32_MISC_MWAIT_EN);
3103 cpudata->gmsr_misc_enable |=
3104 (IA32_MISC_BTS_UNAVAIL|IA32_MISC_PEBS_UNAVAIL);
3106 /* Init XSAVE header. */
3107 cpudata->gfpu.xsh_xstate_bv = vmx_xcr0_mask;
3108 cpudata->gfpu.xsh_xcomp_bv = 0;
3110 /* Install the RESET state. */
3111 memcpy(&vcpu->comm->state, &nvmm_x86_reset_state,
3112 sizeof(nvmm_x86_reset_state));
3113 vcpu->comm->state_wanted = NVMM_X64_STATE_ALL;
3114 vcpu->comm->state_cached = 0;
3115 vmx_vcpu_setstate(vcpu);
3117 vmx_vmcs_leave(vcpu);
3121 vmx_vcpu_create(struct nvmm_machine *mach, struct nvmm_cpu *vcpu)
3123 struct vmx_cpudata *cpudata;
3126 /* Allocate the VMX cpudata. */
3127 cpudata = (struct vmx_cpudata *)uvm_km_alloc(kernel_map,
3128 roundup(sizeof(*cpudata), PAGE_SIZE), 0,
3129 UVM_KMF_WIRED|UVM_KMF_ZERO);
3130 if (cpudata == NULL)
3133 cpudata->vmcs_cpu = -1;
3134 vcpu->cpudata = cpudata;
3137 error = vmx_memalloc(&cpudata->vmcs_pa, (vaddr_t *)&cpudata->vmcs,
3143 error = vmx_memalloc(&cpudata->msrbm_pa, (vaddr_t *)&cpudata->msrbm,
3148 /* Guest MSR List */
3149 error = vmx_memalloc(&cpudata->gmsr_pa, (vaddr_t *)&cpudata->gmsr, 1);
3153 CPUMASK_ASSZERO(cpudata->htlb_want_flush);
3155 /* Init the VCPU info. */
3156 vmx_vcpu_init(mach, vcpu);
3161 if (cpudata->vmcs_pa) {
3162 vmx_memfree(cpudata->vmcs_pa, (vaddr_t)cpudata->vmcs,
3165 if (cpudata->msrbm_pa) {
3166 vmx_memfree(cpudata->msrbm_pa, (vaddr_t)cpudata->msrbm,
3169 if (cpudata->gmsr_pa) {
3170 vmx_memfree(cpudata->gmsr_pa, (vaddr_t)cpudata->gmsr, 1);
3172 uvm_km_free(kernel_map, (vaddr_t)cpudata,
3173 roundup(sizeof(*cpudata), PAGE_SIZE), UVM_KMF_WIRED);
3178 vmx_vcpu_destroy(struct nvmm_machine *mach, struct nvmm_cpu *vcpu)
3180 struct vmx_cpudata *cpudata = vcpu->cpudata;
3182 vmx_vmcs_enter(vcpu);
3183 vmx_asid_free(vcpu);
3184 vmx_vmcs_destroy(vcpu);
3187 kcpuset_destroy(cpudata->htlb_want_flush);
3190 vmx_memfree(cpudata->vmcs_pa, (vaddr_t)cpudata->vmcs, VMCS_NPAGES);
3191 vmx_memfree(cpudata->msrbm_pa, (vaddr_t)cpudata->msrbm, MSRBM_NPAGES);
3192 vmx_memfree(cpudata->gmsr_pa, (vaddr_t)cpudata->gmsr, 1);
3193 uvm_km_free(kernel_map, (vaddr_t)cpudata,
3194 roundup(sizeof(*cpudata), PAGE_SIZE), UVM_KMF_WIRED);
3197 /* -------------------------------------------------------------------------- */
3200 vmx_vcpu_configure_cpuid(struct vmx_cpudata *cpudata, void *data)
3202 struct nvmm_vcpu_conf_cpuid *cpuid = data;
3205 if (__predict_false(cpuid->mask && cpuid->exit)) {
3208 if (__predict_false(cpuid->mask &&
3209 ((cpuid->u.mask.set.eax & cpuid->u.mask.del.eax) ||
3210 (cpuid->u.mask.set.ebx & cpuid->u.mask.del.ebx) ||
3211 (cpuid->u.mask.set.ecx & cpuid->u.mask.del.ecx) ||
3212 (cpuid->u.mask.set.edx & cpuid->u.mask.del.edx)))) {
3216 /* If unset, delete, to restore the default behavior. */
3217 if (!cpuid->mask && !cpuid->exit) {
3218 for (i = 0; i < VMX_NCPUIDS; i++) {
3219 if (!cpudata->cpuidpresent[i]) {
3222 if (cpudata->cpuid[i].leaf == cpuid->leaf) {
3223 cpudata->cpuidpresent[i] = false;
3229 /* If already here, replace. */
3230 for (i = 0; i < VMX_NCPUIDS; i++) {
3231 if (!cpudata->cpuidpresent[i]) {
3234 if (cpudata->cpuid[i].leaf == cpuid->leaf) {
3235 memcpy(&cpudata->cpuid[i], cpuid,
3236 sizeof(struct nvmm_vcpu_conf_cpuid));
3241 /* Not here, insert. */
3242 for (i = 0; i < VMX_NCPUIDS; i++) {
3243 if (!cpudata->cpuidpresent[i]) {
3244 cpudata->cpuidpresent[i] = true;
3245 memcpy(&cpudata->cpuid[i], cpuid,
3246 sizeof(struct nvmm_vcpu_conf_cpuid));
3255 vmx_vcpu_configure_tpr(struct vmx_cpudata *cpudata, void *data)
3257 struct nvmm_vcpu_conf_tpr *tpr = data;
3259 memcpy(&cpudata->tpr, tpr, sizeof(*tpr));
3264 vmx_vcpu_configure(struct nvmm_cpu *vcpu, uint64_t op, void *data)
3266 struct vmx_cpudata *cpudata = vcpu->cpudata;
3269 case NVMM_VCPU_CONF_MD(NVMM_VCPU_CONF_CPUID):
3270 return vmx_vcpu_configure_cpuid(cpudata, data);
3271 case NVMM_VCPU_CONF_MD(NVMM_VCPU_CONF_TPR):
3272 return vmx_vcpu_configure_tpr(cpudata, data);
3278 /* -------------------------------------------------------------------------- */
3282 vmx_tlb_flush(struct pmap *pm)
3284 struct nvmm_machine *mach = pm->pm_data;
3285 struct vmx_machdata *machdata = mach->machdata;
3287 atomic_inc_64(&machdata->mach_htlb_gen);
3289 /* Generates IPIs, which cause #VMEXITs. */
3290 pmap_tlb_shootdown(pmap_kernel(), -1, PTE_G, TLBSHOOT_NVMM);
3292 #endif /* __NetBSD__ */
3295 vmx_machine_create(struct nvmm_machine *mach)
3297 struct pmap *pmap = vmspace_pmap(mach->vm);
3298 struct vmx_machdata *machdata;
3300 /* Convert to EPT. */
3301 pmap_ept_transform(pmap, pmap_ept_has_ad ? 0 : PMAP_EMULATE_AD_BITS);
3304 /* Fill in pmap info. */
3305 pmap->pm_data = (void *)mach;
3306 pmap->pm_tlb_flush = vmx_tlb_flush;
3307 #endif /* __NetBSD__ */
3309 machdata = kmem_zalloc(sizeof(struct vmx_machdata), KM_SLEEP);
3310 mach->machdata = machdata;
3312 /* Start with an hTLB flush everywhere. */
3313 machdata->mach_htlb_gen = 1;
3317 vmx_machine_destroy(struct nvmm_machine *mach)
3319 struct vmx_machdata *machdata = mach->machdata;
3321 kmem_free(machdata, sizeof(struct vmx_machdata));
3325 vmx_machine_configure(struct nvmm_machine *mach, uint64_t op, void *data)
3327 panic("%s: impossible", __func__);
3330 /* -------------------------------------------------------------------------- */
3332 #define CTLS_ONE_ALLOWED(msrval, bitoff) \
3333 ((msrval & __BIT(32 + bitoff)) != 0)
3334 #define CTLS_ZERO_ALLOWED(msrval, bitoff) \
3335 ((msrval & __BIT(bitoff)) == 0)
3338 vmx_check_ctls(uint64_t msr_ctls, uint64_t msr_true_ctls, uint64_t set_one)
3340 uint64_t basic, val, true_val;
3344 basic = rdmsr(MSR_IA32_VMX_BASIC);
3345 has_true = (basic & IA32_VMX_BASIC_TRUE_CTLS) != 0;
3347 val = rdmsr(msr_ctls);
3349 true_val = rdmsr(msr_true_ctls);
3354 for (i = 0; i < 32; i++) {
3355 if (!(set_one & __BIT(i))) {
3358 if (!CTLS_ONE_ALLOWED(true_val, i)) {
3367 vmx_init_ctls(uint64_t msr_ctls, uint64_t msr_true_ctls,
3368 uint64_t set_one, uint64_t set_zero, uint64_t *res)
3370 uint64_t basic, val, true_val;
3371 bool one_allowed, zero_allowed, has_true;
3374 basic = rdmsr(MSR_IA32_VMX_BASIC);
3375 has_true = (basic & IA32_VMX_BASIC_TRUE_CTLS) != 0;
3377 val = rdmsr(msr_ctls);
3379 true_val = rdmsr(msr_true_ctls);
3384 for (i = 0; i < 32; i++) {
3385 one_allowed = CTLS_ONE_ALLOWED(true_val, i);
3386 zero_allowed = CTLS_ZERO_ALLOWED(true_val, i);
3388 if (zero_allowed && !one_allowed) {
3389 if (set_one & __BIT(i))
3392 } else if (one_allowed && !zero_allowed) {
3393 if (set_zero & __BIT(i))
3397 if (set_zero & __BIT(i)) {
3399 } else if (set_one & __BIT(i)) {
3401 } else if (!has_true) {
3403 } else if (CTLS_ZERO_ALLOWED(val, i)) {
3405 } else if (CTLS_ONE_ALLOWED(val, i)) {
3422 if (!(cpu_feature2 & CPUID2_VMX)) {
3426 msr = rdmsr(MSR_IA32_FEATURE_CONTROL);
3427 if ((msr & IA32_FEATURE_CONTROL_LOCK) != 0 &&
3428 (msr & IA32_FEATURE_CONTROL_OUT_SMX) == 0) {
3429 printf("NVMM: VMX disabled in BIOS\n");
3433 msr = rdmsr(MSR_IA32_VMX_BASIC);
3434 if ((msr & IA32_VMX_BASIC_IO_REPORT) == 0) {
3435 printf("NVMM: I/O reporting not supported\n");
3438 if (__SHIFTOUT(msr, IA32_VMX_BASIC_MEM_TYPE) != MEM_TYPE_WB) {
3439 printf("NVMM: WB memory not supported\n");
3443 /* PG and PE are reported, even if Unrestricted Guests is supported. */
3444 vmx_cr0_fixed0 = rdmsr(MSR_IA32_VMX_CR0_FIXED0) & ~(CR0_PG|CR0_PE);
3445 vmx_cr0_fixed1 = rdmsr(MSR_IA32_VMX_CR0_FIXED1) | (CR0_PG|CR0_PE);
3446 ret = vmx_check_cr(rcr0(), vmx_cr0_fixed0, vmx_cr0_fixed1);
3448 printf("NVMM: CR0 requirements not satisfied\n");
3452 vmx_cr4_fixed0 = rdmsr(MSR_IA32_VMX_CR4_FIXED0);
3453 vmx_cr4_fixed1 = rdmsr(MSR_IA32_VMX_CR4_FIXED1);
3454 ret = vmx_check_cr(rcr4() | CR4_VMXE, vmx_cr4_fixed0, vmx_cr4_fixed1);
3456 printf("NVMM: CR4 requirements not satisfied\n");
3460 /* Init the CTLSs right now, and check for errors. */
3461 ret = vmx_init_ctls(
3462 MSR_IA32_VMX_PINBASED_CTLS, MSR_IA32_VMX_TRUE_PINBASED_CTLS,
3463 VMX_PINBASED_CTLS_ONE, VMX_PINBASED_CTLS_ZERO,
3464 &vmx_pinbased_ctls);
3466 printf("NVMM: pin-based-ctls requirements not satisfied\n");
3469 ret = vmx_init_ctls(
3470 MSR_IA32_VMX_PROCBASED_CTLS, MSR_IA32_VMX_TRUE_PROCBASED_CTLS,
3471 VMX_PROCBASED_CTLS_ONE, VMX_PROCBASED_CTLS_ZERO,
3472 &vmx_procbased_ctls);
3474 printf("NVMM: proc-based-ctls requirements not satisfied\n");
3477 ret = vmx_init_ctls(
3478 MSR_IA32_VMX_PROCBASED_CTLS2, MSR_IA32_VMX_PROCBASED_CTLS2,
3479 VMX_PROCBASED_CTLS2_ONE, VMX_PROCBASED_CTLS2_ZERO,
3480 &vmx_procbased_ctls2);
3482 printf("NVMM: proc-based-ctls2 requirements not satisfied\n");
3485 ret = vmx_check_ctls(
3486 MSR_IA32_VMX_PROCBASED_CTLS2, MSR_IA32_VMX_PROCBASED_CTLS2,
3487 PROC_CTLS2_INVPCID_ENABLE);
3489 vmx_procbased_ctls2 |= PROC_CTLS2_INVPCID_ENABLE;
3491 ret = vmx_init_ctls(
3492 MSR_IA32_VMX_ENTRY_CTLS, MSR_IA32_VMX_TRUE_ENTRY_CTLS,
3493 VMX_ENTRY_CTLS_ONE, VMX_ENTRY_CTLS_ZERO,
3496 printf("NVMM: entry-ctls requirements not satisfied\n");
3499 ret = vmx_init_ctls(
3500 MSR_IA32_VMX_EXIT_CTLS, MSR_IA32_VMX_TRUE_EXIT_CTLS,
3501 VMX_EXIT_CTLS_ONE, VMX_EXIT_CTLS_ZERO,
3504 printf("NVMM: exit-ctls requirements not satisfied\n");
3508 msr = rdmsr(MSR_IA32_VMX_EPT_VPID_CAP);
3509 if ((msr & IA32_VMX_EPT_VPID_WALKLENGTH_4) == 0) {
3510 printf("NVMM: 4-level page tree not supported\n");
3513 if ((msr & IA32_VMX_EPT_VPID_INVEPT) == 0) {
3514 printf("NVMM: INVEPT not supported\n");
3517 if ((msr & IA32_VMX_EPT_VPID_INVVPID) == 0) {
3518 printf("NVMM: INVVPID not supported\n");
3521 if ((msr & IA32_VMX_EPT_VPID_FLAGS_AD) != 0) {
3522 pmap_ept_has_ad = true;
3524 pmap_ept_has_ad = false;
3526 if (!(msr & IA32_VMX_EPT_VPID_UC) && !(msr & IA32_VMX_EPT_VPID_WB)) {
3527 printf("NVMM: EPT UC/WB memory types not supported\n");
3535 vmx_init_asid(uint32_t maxasid)
3539 mutex_init(&vmx_asidlock, MUTEX_DEFAULT, IPL_NONE);
3541 vmx_maxasid = maxasid;
3542 allocsz = roundup(maxasid, 8) / 8;
3543 vmx_asidmap = kmem_zalloc(allocsz, KM_SLEEP);
3545 /* ASID 0 is reserved for the host. */
3546 vmx_asidmap[0] |= __BIT(0);
3550 vmx_change_cpu(void *arg1)
3552 bool enable = arg1 != NULL;
3556 msr = rdmsr(MSR_IA32_FEATURE_CONTROL);
3557 if ((msr & IA32_FEATURE_CONTROL_LOCK) == 0) {
3558 /* Lock now, with VMX-outside-SMX enabled. */
3559 wrmsr(MSR_IA32_FEATURE_CONTROL, msr |
3560 IA32_FEATURE_CONTROL_LOCK |
3561 IA32_FEATURE_CONTROL_OUT_SMX);
3578 vmx_vmxon(&vmxoncpu[mycpuid].pa);
3581 #ifdef __DragonFly__
3582 if (atomic_fetchadd_int(&vmx_change_cpu_count, -1) == 1)
3583 wakeup(&vmx_change_cpu_count);
3584 #endif /* __DragonFly__ */
3593 if (cpuid_level < 7) {
3597 x86_cpuid(7, descs);
3599 if (descs[3] & CPUID_SEF_ARCH_CAP) {
3600 msr = rdmsr(MSR_IA32_ARCH_CAPABILITIES);
3601 if (msr & IA32_ARCH_SKIP_L1DFL_VMENTRY) {
3602 /* No mitigation needed. */
3607 if (descs[3] & CPUID_SEF_L1D_FLUSH) {
3608 /* Enable hardware mitigation. */
3609 vmx_msrlist_entry_nmsr += 1;
3617 struct vmxon *vmxon;
3624 /* Init the ASID bitmap (VPID). */
3625 vmx_init_asid(VPID_MAX);
3627 /* Init the XCR0 mask. */
3628 vmx_xcr0_mask = VMX_XCR0_MASK_DEFAULT & x86_xsave_features;
3630 /* Init the max basic CPUID leaf. */
3631 vmx_cpuid_max_basic = uimin(cpuid_level, VMX_CPUID_MAX_BASIC);
3633 /* Init the max extended CPUID leaf. */
3634 x86_cpuid(0x80000000, descs);
3635 vmx_cpuid_max_extended = uimin(descs[0], VMX_CPUID_MAX_EXTENDED);
3637 /* Init the TLB flush op, the EPT flush op and the EPTP type. */
3638 msr = rdmsr(MSR_IA32_VMX_EPT_VPID_CAP);
3639 if ((msr & IA32_VMX_EPT_VPID_INVVPID_CONTEXT) != 0) {
3640 vmx_tlb_flush_op = VMX_INVVPID_CONTEXT;
3642 vmx_tlb_flush_op = VMX_INVVPID_ALL;
3644 if ((msr & IA32_VMX_EPT_VPID_INVEPT_CONTEXT) != 0) {
3645 vmx_ept_flush_op = VMX_INVEPT_CONTEXT;
3647 vmx_ept_flush_op = VMX_INVEPT_ALL;
3649 if ((msr & IA32_VMX_EPT_VPID_WB) != 0) {
3650 vmx_eptp_type = EPTP_TYPE_WB;
3652 vmx_eptp_type = EPTP_TYPE_UC;
3655 /* Init the L1TF mitigation. */
3658 /* Init the global host state. */
3659 if (vmx_xcr0_mask != 0) {
3660 vmx_global_hstate.xcr0 = rdxcr(0);
3662 vmx_global_hstate.star = rdmsr(MSR_STAR);
3663 vmx_global_hstate.lstar = rdmsr(MSR_LSTAR);
3664 vmx_global_hstate.cstar = rdmsr(MSR_CSTAR);
3665 vmx_global_hstate.sfmask = rdmsr(MSR_SFMASK);
3667 memset(vmxoncpu, 0, sizeof(vmxoncpu));
3668 revision = vmx_get_revision();
3670 for (i = 0; i < ncpus; i++) {
3671 error = vmx_memalloc(&pa, &va, 1);
3673 panic("%s: out of memory", __func__);
3675 vmxoncpu[i].pa = pa;
3676 vmxoncpu[i].va = va;
3678 vmxon = (struct vmxon *)vmxoncpu[i].va;
3679 vmxon->ident = __SHIFTIN(revision, VMXON_IDENT_REVISION);
3684 xc = xc_broadcast(0, vmx_change_cpu, (void *)true, NULL);
3686 #else /* DragonFly */
3687 atomic_swap_int(&vmx_change_cpu_count, ncpus);
3688 lwkt_send_ipiq_mask(smp_active_mask, vmx_change_cpu, (void *)true);
3691 tsleep_interlock(&vmx_change_cpu_count, 0);
3692 if (vmx_change_cpu_count)
3693 tsleep(&vmx_change_cpu_count, PINTERLOCKED, "vmx", hz);
3694 } while (vmx_change_cpu_count != 0);
3695 #endif /* __NetBSD__ */
3703 allocsz = roundup(vmx_maxasid, 8) / 8;
3704 kmem_free(vmx_asidmap, allocsz);
3706 mutex_destroy(&vmx_asidlock);
3716 xc = xc_broadcast(0, vmx_change_cpu, (void *)false, NULL);
3718 #else /* DragonFly */
3719 atomic_swap_int(&vmx_change_cpu_count, ncpus);
3720 lwkt_send_ipiq_mask(smp_active_mask, vmx_change_cpu, (void *)false);
3723 tsleep_interlock(&vmx_change_cpu_count, 0);
3724 if (vmx_change_cpu_count)
3725 tsleep(&vmx_change_cpu_count, PINTERLOCKED, "vmx", hz);
3726 } while (vmx_change_cpu_count != 0);
3727 #endif /* __NetBSD__ */
3729 for (i = 0; i < MAXCPUS; i++) {
3730 if (vmxoncpu[i].pa != 0)
3731 vmx_memfree(vmxoncpu[i].pa, vmxoncpu[i].va, 1);
3738 vmx_capability(struct nvmm_capability *cap)
3740 cap->arch.mach_conf_support = 0;
3741 cap->arch.vcpu_conf_support =
3742 NVMM_CAP_ARCH_VCPU_CONF_CPUID |
3743 NVMM_CAP_ARCH_VCPU_CONF_TPR;
3744 cap->arch.xcr0_mask = vmx_xcr0_mask;
3745 cap->arch.mxcsr_mask = x86_fpu_mxcsr_mask;
3746 cap->arch.conf_cpuid_maxops = VMX_NCPUIDS;
3749 const struct nvmm_impl nvmm_x86_vmx = {
3754 .capability = vmx_capability,
3755 .mach_conf_max = NVMM_X86_MACH_NCONF,
3756 .mach_conf_sizes = NULL,
3757 .vcpu_conf_max = NVMM_X86_VCPU_NCONF,
3758 .vcpu_conf_sizes = vmx_vcpu_conf_sizes,
3759 .state_size = sizeof(struct nvmm_x64_state),
3760 .machine_create = vmx_machine_create,
3761 .machine_destroy = vmx_machine_destroy,
3762 .machine_configure = vmx_machine_configure,
3763 .vcpu_create = vmx_vcpu_create,
3764 .vcpu_destroy = vmx_vcpu_destroy,
3765 .vcpu_configure = vmx_vcpu_configure,
3766 .vcpu_setstate = vmx_vcpu_setstate,
3767 .vcpu_getstate = vmx_vcpu_getstate,
3768 .vcpu_inject = vmx_vcpu_inject,
3769 .vcpu_run = vmx_vcpu_run