2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 2021 The DragonFly Project. All rights reserved.
6 * This code is derived from software contributed to The DragonFly Project
7 * by Aaron LI <aly@aaronly.me>
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in
17 * the documentation and/or other materials provided with the
19 * 3. Neither the name of The DragonFly Project nor the names of its
20 * contributors may be used to endorse or promote products derived
21 * from this software without specific, prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
44 #include "nvmm_internal.h"
46 MALLOC_DEFINE(M_NVMM, "nvmm", "NVMM data");
49 * NVMM expects VM functions to return 0 on success, but DragonFly's VM
50 * functions return KERN_SUCCESS. Although it's also defined to be 0,
51 * assert it to be future-proofing.
53 CTASSERT(KERN_SUCCESS == 0);
56 os_vmspace_create(vaddr_t vmin, vaddr_t vmax)
58 return vmspace_alloc(vmin, vmax);
62 os_vmspace_destroy(os_vmspace_t *vm)
64 pmap_del_all_cpus(vm);
69 os_vmspace_fault(os_vmspace_t *vm, vaddr_t va, vm_prot_t prot)
73 if (prot & VM_PROT_WRITE)
74 fault_flags = VM_FAULT_DIRTY;
76 fault_flags = VM_FAULT_NORMAL;
78 return vm_fault(&vm->vm_map, trunc_page(va), prot, fault_flags);
82 os_vmobj_create(voff_t size)
84 struct vm_object *object;
86 object = default_pager_alloc(NULL, size, VM_PROT_DEFAULT, 0);
87 vm_object_set_flag(object, OBJ_NOSPLIT);
93 os_vmobj_ref(os_vmobj_t *vmobj)
95 vm_object_hold(vmobj);
96 vm_object_reference_locked(vmobj);
97 vm_object_drop(vmobj);
101 os_vmobj_rel(os_vmobj_t *vmobj)
103 vm_object_deallocate(vmobj);
107 os_vmobj_map(struct vm_map *map, vaddr_t *addr, vsize_t size, os_vmobj_t *vmobj,
108 voff_t offset, bool wired, bool fixed, bool shared, int prot, int maxprot)
110 vm_prot_t vmprot, vmmaxprot;
111 vm_inherit_t inherit;
112 vm_offset_t start = *addr;
113 int rv = KERN_SUCCESS;
118 if (prot & PROT_READ)
119 vmprot |= VM_PROT_READ;
120 if (prot & PROT_WRITE)
121 vmprot |= VM_PROT_WRITE;
122 if (prot & PROT_EXEC)
123 vmprot |= VM_PROT_EXECUTE;
125 /* Convert maxprot. */
127 if (maxprot & PROT_READ)
128 vmmaxprot |= VM_PROT_READ;
129 if (maxprot & PROT_WRITE)
130 vmmaxprot |= VM_PROT_WRITE;
131 if (maxprot & PROT_EXEC)
132 vmmaxprot |= VM_PROT_EXECUTE;
134 count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
139 * Remove any existing entries in the range, so the new
140 * mapping can be created at the requested address.
142 rv = vm_map_delete(map, start, start + size, &count);
144 if (vm_map_findspace(map, start, size, 1, 0, &start))
147 if (rv != KERN_SUCCESS) {
149 vm_map_entry_release(count);
153 /* Get a reference to the object. */
157 * Map the object. This consumes the reference on success only. On
158 * failure we must drop the reference manually.
160 vm_object_hold(vmobj);
161 rv = vm_map_insert(map, &count, vmobj, NULL, offset, NULL,
162 start, start + size, VM_MAPTYPE_NORMAL, VM_SUBSYS_NVMM,
163 vmprot, vmmaxprot, 0);
164 vm_object_drop(vmobj);
166 vm_map_entry_release(count);
167 if (rv != KERN_SUCCESS) {
173 inherit = shared ? VM_INHERIT_SHARE : VM_INHERIT_NONE;
174 rv = vm_map_inherit(map, start, start + size, inherit);
175 if (rv != KERN_SUCCESS) {
176 os_vmobj_unmap(map, start, start + size, false);
181 rv = vm_map_kernel_wiring(map, start, start + size, 0);
182 if (rv != KERN_SUCCESS) {
183 os_vmobj_unmap(map, start, start + size, false);
193 os_vmobj_unmap(struct vm_map *map, vaddr_t start, vaddr_t end, bool wired)
196 /* Unwire kernel mappings before removing. */
197 vm_map_kernel_wiring(map, start, end, KM_PAGEABLE);
199 vm_map_remove(map, start, end);
203 os_pagemem_zalloc(size_t size)
207 /* NOTE: kmem_alloc() may return 0 ! */
208 ret = (void *)kmem_alloc(kernel_map, roundup(size, PAGE_SIZE),
211 OS_ASSERT((uintptr_t)ret % PAGE_SIZE == 0);
217 os_pagemem_free(void *ptr, size_t size)
219 kmem_free(kernel_map, (vaddr_t)ptr, roundup(size, PAGE_SIZE));
227 pg = vm_page_alloczwq(0,
228 VM_ALLOC_SYSTEM | VM_ALLOC_ZERO | VM_ALLOC_RETRY);
230 return VM_PAGE_TO_PHYS(pg);
234 os_pa_free(paddr_t pa)
236 vm_page_freezwq(PHYS_TO_VM_PAGE(pa));
240 os_contigpa_zalloc(paddr_t *pa, vaddr_t *va, size_t npages)
244 addr = contigmalloc(npages * PAGE_SIZE, M_NVMM, M_WAITOK | M_ZERO,
245 0, ~0UL, PAGE_SIZE, 0);
255 os_contigpa_free(paddr_t pa __unused, vaddr_t va, size_t npages)
257 contigfree((void *)va, npages * PAGE_SIZE, M_NVMM);
260 /* -------------------------------------------------------------------------- */
262 #include <sys/conf.h>
263 #include <sys/devfs.h>
264 #include <sys/device.h>
265 #include <sys/fcntl.h>
266 #include <sys/module.h>
268 static d_open_t dfbsd_nvmm_open;
269 static d_ioctl_t dfbsd_nvmm_ioctl;
270 static d_priv_dtor_t dfbsd_nvmm_dtor;
272 static struct dev_ops nvmm_ops = {
273 { "nvmm", 0, D_MPSAFE },
274 .d_open = dfbsd_nvmm_open,
275 .d_ioctl = dfbsd_nvmm_ioctl,
279 dfbsd_nvmm_open(struct dev_open_args *ap)
281 int flags = ap->a_oflags;
282 struct nvmm_owner *owner;
286 if (__predict_false(nvmm_impl == NULL))
288 if (!(flags & O_CLOEXEC))
291 if (OFLAGS(flags) & O_WRONLY) {
292 owner = &nvmm_root_owner;
294 owner = os_mem_alloc(sizeof(*owner));
295 owner->pid = curthread->td_proc->p_pid;
298 fp = ap->a_fpp ? *ap->a_fpp : NULL;
299 error = devfs_set_cdevpriv(fp, owner, dfbsd_nvmm_dtor);
301 dfbsd_nvmm_dtor(owner);
309 dfbsd_nvmm_dtor(void *arg)
311 struct nvmm_owner *owner = arg;
313 OS_ASSERT(owner != NULL);
314 nvmm_kill_machines(owner);
315 if (owner != &nvmm_root_owner) {
316 os_mem_free(owner, sizeof(*owner));
321 dfbsd_nvmm_ioctl(struct dev_ioctl_args *ap)
323 unsigned long cmd = ap->a_cmd;
324 void *data = ap->a_data;
325 struct file *fp = ap->a_fp;
326 struct nvmm_owner *owner = NULL;
328 devfs_get_cdevpriv(fp, (void **)&owner);
329 OS_ASSERT(owner != NULL);
331 return nvmm_ioctl(owner, cmd, data);
334 /* -------------------------------------------------------------------------- */
343 panic("%s: impossible", __func__);
344 os_printf("nvmm: attached, using backend %s\n", nvmm_impl->name);
352 if (os_atomic_load_uint(&nmachines) > 0)
360 nvmm_modevent(module_t mod __unused, int type, void *data __unused)
362 static cdev_t dev = NULL;
367 if (nvmm_ident() == NULL) {
368 os_printf("nvmm: cpu not supported\n");
371 error = nvmm_attach();
375 dev = make_dev(&nvmm_ops, 0, UID_ROOT, GID_NVMM, 0640, "nvmm");
377 os_printf("nvmm: unable to create device\n");
385 error = nvmm_detach();
402 static moduledata_t nvmm_moddata = {
404 .evhand = nvmm_modevent,
408 DECLARE_MODULE(nvmm, nvmm_moddata, SI_SUB_PSEUDO, SI_ORDER_ANY);
409 MODULE_VERSION(nvmm, NVMM_KERN_VERSION);