2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 2021 The DragonFly Project. All rights reserved.
6 * This code is derived from software contributed to The DragonFly Project
7 * by Aaron LI <aly@aaronly.me>
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in
17 * the documentation and/or other materials provided with the
19 * 3. Neither the name of The DragonFly Project nor the names of its
20 * contributors may be used to endorse or promote products derived
21 * from this software without specific, prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * Compatibility code to adapt NVMM for DragonFly.
41 #ifndef _NVMM_COMPAT_H_
42 #define _NVMM_COMPAT_H_
45 #error "This file should not be included by userland programs."
48 #include <sys/param.h>
49 #include <sys/bitops.h>
51 #include <sys/malloc.h>
52 #include <sys/mman.h> /* MADV_RANDOM */
53 #include <sys/proc.h> /* lwp */
54 #include <sys/systm.h>
55 #include <sys/thread2.h>
58 #include <vm/vm_extern.h>
59 #include <vm/vm_map.h>
60 #include <vm/vm_object.h>
61 #include <vm/vm_page.h>
62 #include <vm/vm_pager.h>
63 #include <vm/vm_param.h> /* KERN_SUCCESS, etc. */
65 #include <machine/atomic.h>
66 #include <machine/cpu.h>
67 #include <machine/cpufunc.h>
68 #include <machine/md_var.h> /* cpu_high */
69 #include <machine/npx.h>
70 #include <machine/specialreg.h>
75 #define __BIT(__n) __BIT64(__n)
77 #define __BITS(__m, __n) __BITS64(__m, __n)
79 #endif /* __x86_64__ */
82 * CPUID Fn0000_0001 features
84 #define CPUID2_MONITOR CPUID2_MON
85 #define CPUID2_DEADLINE CPUID2_TSCDLT
86 #define CPUID2_RAZ CPUID2_VMM
89 * Intel Deterministic Cache Parameters
93 #define CPUID_DCP_CACHELEVEL __BITS(7, 5) /* Cache level (start at 1) */
94 #define CPUID_DCP_SHARING __BITS(25, 14) /* Sharing */
95 #define CPUID_DCP_CORE_P_PKG __BITS(31, 26) /* Cores/package */
98 * Intel/AMD Structured Extended Feature
102 #define CPUID_SEF_FSGSBASE CPUID_STDEXT_FSGSBASE
103 #define CPUID_SEF_TSC_ADJUST CPUID_STDEXT_TSC_ADJUST
104 #define CPUID_SEF_SGX CPUID_STDEXT_SGX
105 #define CPUID_SEF_BMI1 CPUID_STDEXT_BMI1
106 #define CPUID_SEF_HLE CPUID_STDEXT_HLE
107 #define CPUID_SEF_AVX2 CPUID_STDEXT_AVX2
108 #define CPUID_SEF_FDPEXONLY CPUID_STDEXT_FDP_EXC
109 #define CPUID_SEF_SMEP CPUID_STDEXT_SMEP
110 #define CPUID_SEF_BMI2 CPUID_STDEXT_BMI2
111 #define CPUID_SEF_ERMS CPUID_STDEXT_ERMS
112 #define CPUID_SEF_INVPCID CPUID_STDEXT_INVPCID
113 #define CPUID_SEF_RTM CPUID_STDEXT_RTM
114 #define CPUID_SEF_QM CPUID_STDEXT_PQM
115 #define CPUID_SEF_FPUCSDS CPUID_STDEXT_NFPUSG
116 #define CPUID_SEF_MPX CPUID_STDEXT_MPX
117 #define CPUID_SEF_PQE CPUID_STDEXT_PQE
118 #define CPUID_SEF_AVX512F CPUID_STDEXT_AVX512F
119 #define CPUID_SEF_AVX512DQ CPUID_STDEXT_AVX512DQ
120 #define CPUID_SEF_RDSEED CPUID_STDEXT_RDSEED
121 #define CPUID_SEF_ADX CPUID_STDEXT_ADX
122 #define CPUID_SEF_SMAP CPUID_STDEXT_SMAP
123 #define CPUID_SEF_AVX512_IFMA CPUID_STDEXT_AVX512IFMA
124 #define CPUID_SEF_CLFLUSHOPT CPUID_STDEXT_CLFLUSHOPT
125 #define CPUID_SEF_CLWB CPUID_STDEXT_CLWB
126 #define CPUID_SEF_PT CPUID_STDEXT_PROCTRACE
127 #define CPUID_SEF_AVX512PF CPUID_STDEXT_AVX512PF
128 #define CPUID_SEF_AVX512ER CPUID_STDEXT_AVX512ER
129 #define CPUID_SEF_AVX512CD CPUID_STDEXT_AVX512CD
130 #define CPUID_SEF_SHA CPUID_STDEXT_SHA
131 #define CPUID_SEF_AVX512BW CPUID_STDEXT_AVX512BW
132 #define CPUID_SEF_AVX512VL CPUID_STDEXT_AVX512VL
134 #define CPUID_SEF_PREFETCHWT1 CPUID_STDEXT2_PREFETCHWT1
135 #define CPUID_SEF_AVX512_VBMI CPUID_STDEXT2_AVX512VBMI
136 #define CPUID_SEF_UMIP CPUID_STDEXT2_UMIP
137 #define CPUID_SEF_PKU CPUID_STDEXT2_PKU
138 #define CPUID_SEF_OSPKE CPUID_STDEXT2_OSPKE
139 #define CPUID_SEF_WAITPKG CPUID_STDEXT2_WAITPKG
140 #define CPUID_SEF_AVX512_VBMI2 CPUID_STDEXT2_AVX512VBMI2
141 #define CPUID_SEF_CET_SS CPUID_STDEXT2_CET_SS
142 #define CPUID_SEF_GFNI CPUID_STDEXT2_GFNI
143 #define CPUID_SEF_VAES CPUID_STDEXT2_VAES
144 #define CPUID_SEF_VPCLMULQDQ CPUID_STDEXT2_VPCLMULQDQ
145 #define CPUID_SEF_AVX512_VNNI CPUID_STDEXT2_AVX512VNNI
146 #define CPUID_SEF_AVX512_BITALG CPUID_STDEXT2_AVX512BITALG
147 #define CPUID_SEF_AVX512_VPOPCNTDQ CPUID_STDEXT2_AVX512VPOPCNTDQ
148 #define CPUID_SEF_LA57 CPUID_STDEXT2_LA57
149 #define CPUID_SEF_MAWAU __BITS(21, 17) /* MAWAU for BNDLDX/BNDSTX */
150 #define CPUID_SEF_RDPID CPUID_STDEXT2_RDPID
151 #define CPUID_SEF_KL CPUID_STDEXT2_KL
152 #define CPUID_SEF_CLDEMOTE CPUID_STDEXT2_CLDEMOTE
153 #define CPUID_SEF_MOVDIRI CPUID_STDEXT2_MOVDIRI
154 #define CPUID_SEF_MOVDIR64B CPUID_STDEXT2_MOVDIR64B
155 #define CPUID_SEF_SGXLC CPUID_STDEXT2_SGXLC
156 #define CPUID_SEF_PKS CPUID_STDEXT2_PKS
158 #define CPUID_SEF_AVX512_4VNNIW CPUID_STDEXT3_AVX5124VNNIW
159 #define CPUID_SEF_AVX512_4FMAPS CPUID_STDEXT3_AVX5124FMAPS
160 #define CPUID_SEF_FSREP_MOV CPUID_STDEXT3_FSRM
161 #define CPUID_SEF_AVX512_VP2INTERSECT CPUID_STDEXT3_AVX512VP2INTERSECT
162 #define CPUID_SEF_SRBDS_CTRL CPUID_STDEXT3_MCUOPT
163 #define CPUID_SEF_MD_CLEAR CPUID_STDEXT3_MD_CLEAR
164 #define CPUID_SEF_TSX_FORCE_ABORT CPUID_STDEXT3_TSXFA
165 #define CPUID_SEF_SERIALIZE CPUID_STDEXT3_SERIALIZE
166 #define CPUID_SEF_HYBRID CPUID_STDEXT3_HYBRID
167 #define CPUID_SEF_TSXLDTRK CPUID_STDEXT3_TSXLDTRK
168 #define CPUID_SEF_CET_IBT CPUID_STDEXT3_CET_IBT
169 #define CPUID_SEF_IBRS CPUID_STDEXT3_IBPB
170 #define CPUID_SEF_STIBP CPUID_STDEXT3_STIBP
171 #define CPUID_SEF_L1D_FLUSH CPUID_STDEXT3_L1D_FLUSH
172 #define CPUID_SEF_ARCH_CAP CPUID_STDEXT3_ARCH_CAP
173 #define CPUID_SEF_CORE_CAP CPUID_STDEXT3_CORE_CAP
174 #define CPUID_SEF_SSBD CPUID_STDEXT3_SSBD
177 * Intel CPUID Extended Topology Enumeration
181 #define CPUID_TOP_SHIFTNUM __BITS(4, 0) /* Topology ID shift value */
183 #define CPUID_TOP_LVLNUM __BITS(7, 0) /* Level number */
184 #define CPUID_TOP_LVLTYPE __BITS(15, 8) /* Level type */
185 #define CPUID_TOP_LVLTYPE_INVAL 0 /* Invalid */
186 #define CPUID_TOP_LVLTYPE_SMT 1 /* SMT */
187 #define CPUID_TOP_LVLTYPE_CORE 2 /* Core */
190 * AMD Processor Capacity Parameters and Extended Features
194 #define CPUID_CAPEX_NC __BITS(7, 0)
195 #define CPUID_CAPEX_ApicIdSize __BITS(15, 12)
201 #define XCR0_X87 CPU_XFEATURE_X87 /* 0x00000001 */
202 #define XCR0_SSE CPU_XFEATURE_SSE /* 0x00000002 */
204 #define MSR_MISC_ENABLE MSR_IA32_MISC_ENABLE /* 0x1a0 */
205 #define MSR_CR_PAT MSR_PAT /* 0x277 */
206 #define MSR_SFMASK MSR_SF_MASK /* 0xc0000084 */
207 #define MSR_KERNELGSBASE MSR_KGSBASE /* 0xc0000102 */
208 #define MSR_NB_CFG MSR_AMD_NB_CFG /* 0xc001001f */
209 #define MSR_IC_CFG MSR_AMD_IC_CFG /* 0xc0011021 */
210 #define MSR_DE_CFG MSR_AMD_DE_CFG /* 0xc0011029 */
211 #define MSR_UCODE_AMD_PATCHLEVEL MSR_AMD_PATCH_LEVEL /* 0x0000008b */
213 /* MSR_IA32_ARCH_CAPABILITIES (0x10a) */
214 #define IA32_ARCH_RDCL_NO IA32_ARCH_CAP_RDCL_NO
215 #define IA32_ARCH_IBRS_ALL IA32_ARCH_CAP_IBRS_ALL
216 #define IA32_ARCH_RSBA IA32_ARCH_CAP_RSBA
217 #define IA32_ARCH_SKIP_L1DFL_VMENTRY IA32_ARCH_CAP_SKIP_L1DFL_VMENTRY
218 #define IA32_ARCH_SSB_NO IA32_ARCH_CAP_SSB_NO
219 #define IA32_ARCH_MDS_NO IA32_ARCH_CAP_MDS_NO
220 #define IA32_ARCH_IF_PSCHANGE_MC_NO IA32_ARCH_CAP_IF_PSCHANGE_MC_NO
221 #define IA32_ARCH_TSX_CTRL IA32_ARCH_CAP_TSX_CTRL
222 #define IA32_ARCH_TAA_NO IA32_ARCH_CAP_TAA_NO
224 /* MSR_IA32_FLUSH_CMD (0x10b) */
225 #define IA32_FLUSH_CMD_L1D_FLUSH IA32_FLUSH_CMD_L1D
227 #define MSR_VMCR MSR_AMD_VM_CR /* 0xc0010114 */
228 #define VMCR_DPD VM_CR_DPD
229 #define VMCR_RINIT VM_CR_R_INIT
230 #define VMCR_DISA20 VM_CR_DIS_A20M
231 #define VMCR_LOCK VM_CR_LOCK
232 #define VMCR_SVMED VM_CR_SVMDIS
235 * Constants, functions, etc.
238 #define DIAGNOSTIC INVARIANTS
239 #define MAXCPUS SMP_MAXCPU
241 #define curlwp (curthread->td_lwp)
242 #define printf kprintf
243 #define __cacheline_aligned __cachealign
244 #define __diagused __debugvar
246 #define __arraycount(arr) nitems(arr)
247 #define __insn_barrier() cpu_ccfence()
249 #define KASSERT(x) KKASSERT(x)
250 #define ilog2(n) ((sizeof(n) > 4 ? ffsl(n) : ffs(n)) - 1)
251 #define uimin(a, b) MIN(a, b)
253 #define lcr2(x) load_cr2(x)
254 #define lcr4(x) load_cr4(x)
255 #define ldr0(x) load_dr0(x)
256 #define ldr1(x) load_dr1(x)
257 #define ldr2(x) load_dr2(x)
258 #define ldr3(x) load_dr3(x)
259 #define ldr6(x) load_dr6(x)
260 #define ldr7(x) load_dr7(x)
261 #define rdxcr(xcr) rxcr(xcr)
262 #define wrxcr(xcr, val) load_xcr(xcr, val)
265 * CPUID features/level
267 #define cpuid_level cpu_high
268 #define x86_cpuid(eax, regs) do_cpuid(eax, regs)
269 #define x86_cpuid2(eax, ecx, regs) \
270 cpuid_count(eax, ecx, regs)
275 #define kmutex_t struct lock
276 #define mutex_init(lock, type, ipl) lockinit(lock, "nvmmmtx", 0, 0)
277 #define mutex_destroy(lock) lockuninit(lock)
278 #define mutex_enter(lock) lockmgr(lock, LK_EXCLUSIVE)
279 #define mutex_exit(lock) lockmgr(lock, LK_RELEASE)
280 #define mutex_owned(lock) (lockstatus(lock, curthread) == LK_EXCLUSIVE)
286 RW_READER = LK_SHARED,
287 RW_WRITER = LK_EXCLUSIVE,
289 #define krwlock_t struct lock
290 #define rw_init(lock) lockinit(lock, "nvmmrw", 0, 0)
291 #define rw_destroy(lock) lockuninit(lock)
292 #define rw_enter(lock, op) lockmgr(lock, op)
293 #define rw_exit(lock) lockmgr(lock, LK_RELEASE)
294 #define rw_write_held(lock) (lockstatus(lock, curthread) == LK_EXCLUSIVE)
299 MALLOC_DECLARE(M_NVMM);
302 KM_NOSLEEP = M_NOWAIT,
304 #define kmem_alloc(size, flags) \
306 KKASSERT((flags & ~(KM_SLEEP|KM_NOSLEEP)) == 0); \
307 kmalloc(size, M_NVMM, flags); \
309 #define kmem_zalloc(size, flags) \
311 KKASSERT((flags & ~(KM_SLEEP|KM_NOSLEEP)) == 0); \
312 kmalloc(size, M_NVMM, flags|M_ZERO); \
314 #define kmem_free(data, size) kfree(data, M_NVMM)
319 #define atomic_inc_64(p) atomic_add_64(p, 1)
320 #define atomic_inc_uint(p) atomic_add_int(p, 1)
321 #define atomic_dec_uint(p) atomic_subtract_int(p, 1)
324 * Preemption / critical sections
326 * In DragonFly, a normal kernel thread will not migrate to another CPU or be
327 * preempted (except by an interrupt thread), so kpreempt_{disable,enable}()
328 * are not needed. However, we can't use critical section as an instead,
329 * because that would also prevent interrupt/reschedule flags from being
330 * set, which would be a problem for nvmm_return_needed() that's called from
333 #define kpreempt_disable() /* nothing */
334 #define kpreempt_enable() /* nothing */
335 #define kpreempt_disabled() true
340 /* Adapt NetBSD's 'struct fxsave' to DragonFly's 'union savefpu' */
341 #define fx_cw sv_ymm64.sv_env.en_cw
342 #define fx_sw sv_ymm64.sv_env.en_sw
343 #define fx_tw sv_ymm64.sv_env.en_tw
344 #define fx_zero sv_ymm64.sv_env.en_zero
345 #define fx_mxcsr sv_ymm64.sv_env.en_mxcsr
346 #define fx_mxcsr_mask sv_ymm64.sv_env.en_mxcsr_mask
347 /* Adapt NetBSD's 'struct xsave_header' to DragonFly's 'union savefpu' */
348 #define xsh_xstate_bv sv_ymm64.sv_xstate.sx_hd.xstate_bv
349 #define xsh_xcomp_bv sv_ymm64.sv_xstate.sx_hd.xstate_xcomp_bv
351 #define x86_xsave_features npx_xcr0_mask
352 #define x86_fpu_mxcsr_mask npx_mxcsr_mask
353 #define stts() load_cr0(rcr0() | CR0_TS)
354 #define clts() __asm__("clts")
360 x86_dbregs_save(struct lwp *lp)
364 KKASSERT(lp != NULL && lp->lwp_thread != NULL);
365 pcb = lp->lwp_thread->td_pcb;
367 if (!(pcb->pcb_flags & PCB_DBREGS))
370 pcb->pcb_dr0 = rdr0();
371 pcb->pcb_dr1 = rdr1();
372 pcb->pcb_dr2 = rdr2();
373 pcb->pcb_dr3 = rdr3();
374 pcb->pcb_dr6 = rdr6();
375 pcb->pcb_dr7 = rdr7();
379 x86_dbregs_restore(struct lwp *lp)
383 KKASSERT(lp != NULL && lp->lwp_thread != NULL);
384 pcb = lp->lwp_thread->td_pcb;
386 if (!(pcb->pcb_flags & PCB_DBREGS))
389 load_dr0(pcb->pcb_dr0);
390 load_dr1(pcb->pcb_dr1);
391 load_dr2(pcb->pcb_dr2);
392 load_dr3(pcb->pcb_dr3);
393 load_dr6(pcb->pcb_dr6);
394 load_dr7(pcb->pcb_dr7);
398 * Virtual address space management
400 typedef vm_offset_t vaddr_t;
401 typedef vm_offset_t voff_t;
402 typedef vm_size_t vsize_t;
403 typedef vm_paddr_t paddr_t;
405 #define uvm_object vm_object
407 static __inline struct vmspace *
408 uvmspace_alloc(vaddr_t vmin, vaddr_t vmax, bool topdown)
410 KKASSERT(topdown == false);
411 return vmspace_alloc(vmin, vmax);
415 uvmspace_free(struct vmspace *space)
417 pmap_del_all_cpus(space);
422 uvm_fault(struct vm_map *map, vaddr_t vaddr, vm_prot_t access_type)
426 if (access_type & VM_PROT_WRITE)
427 fault_flags = VM_FAULT_DIRTY;
429 fault_flags = VM_FAULT_NORMAL;
431 return vm_fault(map, trunc_page(vaddr), access_type, fault_flags);
434 /* NetBSD's UVM functions (e.g., uvm_fault()) return 0 on success,
435 * while DragonFly's VM functions return KERN_SUCCESS, which although
436 * defined to be 0 as well, but assert it to be future-proof. */
437 CTASSERT(KERN_SUCCESS == 0);
439 /* bits 0x07: protection codes */
440 #define UVM_PROT_MASK 0x07
441 #define UVM_PROT_RW VM_PROT_RW
442 #define UVM_PROT_RWX VM_PROT_ALL
443 /* bits 0x30: inherit codes */
444 #define UVM_INH_MASK 0x30
445 #define UVM_INH_SHARE 0x00
446 #define UVM_INH_NONE 0x20
447 /* bits 0x700: max protection */
448 /* bits 0x7000: advice codes */
449 #define UVM_ADV_MASK 0x7
450 #define UVM_ADV_RANDOM MADV_RANDOM
451 /* bits 0xffff0000: mapping flags */
452 #define UVM_FLAG_FIXED 0x00010000 /* find space */
453 #define UVM_FLAG_UNMAP 0x08000000 /* unmap existing entries */
455 /* encoding of flags for uvm_map() */
456 #define UVM_MAPFLAG(prot, maxprot, inherit, advice, flags) \
457 (((advice) << 12) | ((maxprot) << 8) | (inherit) | (prot) | (flags))
458 /* extract info from flags */
459 #define UVM_PROTECTION(x) ((x) & UVM_PROT_MASK)
460 #define UVM_INHERIT(x) (((x) & UVM_INH_MASK) >> 4)
461 #define UVM_MAXPROTECTION(x) (((x) >> 8) & UVM_PROT_MASK)
462 #define UVM_ADVICE(x) (((x) >> 12) & UVM_ADV_MASK)
464 /* Establish a pageable mapping from $obj at $offset in the map $map.
465 * The start address of the mapping will be returned in $startp.
468 uvm_map(struct vm_map *map, vaddr_t *startp /* IN/OUT */, vsize_t size,
469 struct vm_object *obj, voff_t offset, vsize_t align, int flags)
471 vm_offset_t addr = *startp;
472 vm_inherit_t inherit = (vm_inherit_t)UVM_INHERIT(flags);
473 int advice = UVM_ADVICE(flags);
474 int rv = KERN_SUCCESS;
477 KKASSERT((size & PAGE_MASK) == 0);
478 KKASSERT((flags & UVM_FLAG_FIXED) == 0 || align == 0);
479 KKASSERT(powerof2(align));
480 KKASSERT(inherit == VM_INHERIT_SHARE || inherit == VM_INHERIT_NONE);
481 KKASSERT(advice == MADV_RANDOM);
482 KKASSERT(obj != NULL);
485 align = 1; /* any alignment */
487 count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
490 if (flags & UVM_FLAG_FIXED) {
491 KKASSERT(flags & UVM_FLAG_UNMAP);
492 /* Remove any existing entries in the range, so the new
493 * mapping can be created at the requested address. */
494 rv = vm_map_delete(map, addr, addr + size, &count);
496 if (vm_map_findspace(map, addr, size, align, 0, &addr))
499 if (rv != KERN_SUCCESS) {
501 vm_map_entry_release(count);
506 /* obj reference has been bumped prior to calling uvm_map() */
507 rv = vm_map_insert(map, &count, obj, NULL,
508 offset, NULL, addr, addr + size,
509 VM_MAPTYPE_NORMAL, VM_SUBSYS_NVMM,
510 UVM_PROTECTION(flags), UVM_MAXPROTECTION(flags), 0);
513 vm_map_entry_release(count);
514 if (rv != KERN_SUCCESS)
517 vm_map_inherit(map, addr, addr + size, inherit);
518 vm_map_madvise(map, addr, addr + size, advice, 0);
525 uvm_map_pageable(struct vm_map *map, vaddr_t start, vaddr_t end,
526 bool new_pageable, int lockflags)
528 KKASSERT(lockflags == 0);
529 return vm_map_wire(map, start, end, new_pageable ? KM_PAGEABLE : 0);
533 uvm_unmap(struct vm_map *map, vaddr_t start, vaddr_t end)
535 vm_map_remove(map, start, end);
539 uvm_deallocate(struct vm_map *map, vaddr_t start, vsize_t size)
541 /* Unwire kernel page before remove, because vm_map_remove() only
542 * handles user wirings.
544 vm_map_wire(map, trunc_page(start), round_page(start + size),
546 vm_map_remove(map, trunc_page(start), round_page(start + size));
549 /* Kernel memory allocation */
552 UVM_KMF_WIRED = 0x01, /* wired memory */
553 UVM_KMF_ZERO = 0x02, /* want zero-filled memory */
556 /* NOTE: DragonFly's kmem_alloc() may return 0 ! */
557 static __inline vaddr_t
558 uvm_km_alloc(struct vm_map *map, vsize_t size, vsize_t align, int flags)
560 KKASSERT(map == kernel_map);
561 KKASSERT(align == 0);
562 KKASSERT(flags == (UVM_KMF_WIRED | UVM_KMF_ZERO));
564 /* Add parentheses around 'kmem_alloc' to avoid macro expansion */
565 return (kmem_alloc)(map, size, VM_SUBSYS_NVMM);
569 uvm_km_free(struct vm_map *map, vaddr_t addr, vsize_t size, int flags __unused)
571 KKASSERT(map == kernel_map);
573 (kmem_free)(map, addr, size);
576 /* Physical page allocation */
578 struct vm_anon; /* dummy */
580 UVM_PGA_ZERO = VM_ALLOC_ZERO,
583 static __inline struct vm_page *
584 uvm_pagealloc(struct vm_object *obj, vm_offset_t off, struct vm_anon *anon,
587 KKASSERT(anon == NULL);
588 KKASSERT(flags == UVM_PGA_ZERO);
590 return vm_page_alloczwq(OFF_TO_IDX(off),
591 VM_ALLOC_SYSTEM | VM_ALLOC_ZERO | VM_ALLOC_RETRY);
595 uvm_pagefree(struct vm_page *pg)
600 /* Anonymous object allocation */
602 static __inline struct vm_object *
603 uao_create(size_t size, int flags)
605 struct vm_object *object;
607 KKASSERT(flags == 0);
609 /* The object should be pageable (e.g., object of guest physical
610 * memory), so choose default_pager. */
611 object = default_pager_alloc(NULL, size, VM_PROT_DEFAULT, 0);
612 vm_object_set_flag(object, OBJ_NOSPLIT);
617 /* Create an additional reference to the named anonymous memory object. */
619 uao_reference(struct vm_object *object)
621 vm_object_reference_quick(object);
624 /* Remove a reference from the named anonymous memory object, destroying it
625 * if the last reference is removed. */
627 uao_detach(struct vm_object *object)
629 vm_object_deallocate(object);
632 #endif /* _NVMM_COMPAT_H_ */