2 * Copyright (c) 2021 Maxime Villard, m00nbsd.net
3 * Copyright (c) 2021 The DragonFly Project.
6 * This code is part of the NVMM hypervisor.
8 * This code is derived from software contributed to The DragonFly Project
9 * by Aaron LI <aly@aaronly.me>
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
27 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 #error "This file should not be included by userland programs."
40 #if defined(__NetBSD__)
42 #include <uvm/uvm_object.h>
43 #include <uvm/uvm_extern.h>
44 #include <uvm/uvm_page.h>
45 #elif defined(__DragonFly__)
47 #include <sys/malloc.h> /* contigmalloc, contigfree */
48 #include <sys/proc.h> /* LWP_MP_URETMASK */
50 #include <vm/vm_extern.h>
51 #include <vm/vm_map.h>
52 #include <vm/vm_object.h>
53 #include <vm/vm_page.h>
54 #include <vm/vm_pager.h>
55 #include <vm/vm_param.h> /* KERN_SUCCESS, etc. */
56 #include <vm/pmap.h> /* pmap_ept_transform, pmap_npt_transform */
57 #include <machine/cpu.h> /* nvmm_break_wanted */
58 #include <machine/cpufunc.h> /* ffsl, ffs, etc. */
62 #if defined(__NetBSD__)
63 typedef struct vmspace os_vmspace_t;
64 typedef struct uvm_object os_vmobj_t;
65 typedef krwlock_t os_rwl_t;
66 typedef kmutex_t os_mtx_t;
67 #elif defined(__DragonFly__)
68 typedef struct vmspace os_vmspace_t;
69 typedef struct vm_object os_vmobj_t;
70 typedef struct lock os_rwl_t;
71 typedef struct lock os_mtx_t;
72 /* A few standard types. */
73 typedef vm_offset_t vaddr_t;
74 typedef vm_offset_t voff_t;
75 typedef vm_size_t vsize_t;
76 typedef vm_paddr_t paddr_t;
80 #if defined(__DragonFly__)
81 #define DIAGNOSTIC INVARIANTS
82 #define __cacheline_aligned __cachealign
83 #define __diagused __debugvar
87 #if defined(__DragonFly__)
88 #define __arraycount(__x) (sizeof(__x) / sizeof(__x[0]))
89 #define __insn_barrier() __asm __volatile("":::"memory")
93 #if defined(__NetBSD__)
94 #include <sys/bitops.h>
95 #elif defined(__DragonFly__)
96 #include <sys/bitops.h>
99 #define __BIT(__n) __BIT64(__n)
101 #define __BITS(__m, __n) __BITS64(__m, __n)
102 #endif /* __x86_64__ */
106 #if defined(__NetBSD__) || defined(__DragonFly__)
107 #define os_kernel_map kernel_map
108 #define os_curproc_map &curproc->p_vmspace->vm_map
112 #if defined(__NetBSD__)
113 #define os_rwl_init(lock) rw_init(lock)
114 #define os_rwl_destroy(lock) rw_destroy(lock)
115 #define os_rwl_rlock(lock) rw_enter(lock, RW_READER);
116 #define os_rwl_wlock(lock) rw_enter(lock, RW_WRITER);
117 #define os_rwl_unlock(lock) rw_exit(lock)
118 #define os_rwl_wheld(lock) rw_write_held(lock)
119 #elif defined(__DragonFly__)
120 #define os_rwl_init(lock) lockinit(lock, "nvmmrw", 0, 0)
121 #define os_rwl_destroy(lock) lockuninit(lock)
122 #define os_rwl_rlock(lock) lockmgr(lock, LK_SHARED);
123 #define os_rwl_wlock(lock) lockmgr(lock, LK_EXCLUSIVE);
124 #define os_rwl_unlock(lock) lockmgr(lock, LK_RELEASE)
125 #define os_rwl_wheld(lock) (lockstatus(lock, curthread) == LK_EXCLUSIVE)
129 #if defined(__NetBSD__)
130 #define os_mtx_init(lock) mutex_init(lock, MUTEX_DEFAULT, IPL_NONE)
131 #define os_mtx_destroy(lock) mutex_destroy(lock)
132 #define os_mtx_lock(lock) mutex_enter(lock)
133 #define os_mtx_unlock(lock) mutex_exit(lock)
134 #define os_mtx_owned(lock) mutex_owned(lock)
135 #elif defined(__DragonFly__)
136 #define os_mtx_init(lock) lockinit(lock, "nvmmmtx", 0, 0)
137 #define os_mtx_destroy(lock) lockuninit(lock)
138 #define os_mtx_lock(lock) lockmgr(lock, LK_EXCLUSIVE)
139 #define os_mtx_unlock(lock) lockmgr(lock, LK_RELEASE)
140 #define os_mtx_owned(lock) (lockstatus(lock, curthread) == LK_EXCLUSIVE)
144 #if defined(__NetBSD__)
145 #include <sys/kmem.h>
146 #define os_mem_alloc(size) kmem_alloc(size, KM_SLEEP)
147 #define os_mem_zalloc(size) kmem_zalloc(size, KM_SLEEP)
148 #define os_mem_free(ptr, size) kmem_free(ptr, size)
149 #elif defined(__DragonFly__)
150 #include <sys/malloc.h>
151 MALLOC_DECLARE(M_NVMM);
152 #define os_mem_alloc(size) kmalloc(size, M_NVMM, M_WAITOK)
153 #define os_mem_zalloc(size) kmalloc(size, M_NVMM, M_WAITOK | M_ZERO)
154 #define os_mem_free(ptr, size) kfree(ptr, M_NVMM)
158 #if defined(__NetBSD__)
159 #define os_printf printf
160 #elif defined(__DragonFly__)
161 #define os_printf kprintf
165 #if defined(__NetBSD__)
166 #include <sys/atomic.h>
167 #define os_atomic_inc_uint(x) atomic_inc_uint(x)
168 #define os_atomic_dec_uint(x) atomic_dec_uint(x)
169 #define os_atomic_load_uint(x) atomic_load_relaxed(x)
170 #define os_atomic_inc_64(x) atomic_inc_64(x)
171 #elif defined(__DragonFly__)
172 #include <machine/atomic.h>
173 #define os_atomic_inc_uint(x) atomic_add_int(x, 1)
174 #define os_atomic_dec_uint(x) atomic_subtract_int(x, 1)
175 #define os_atomic_load_uint(x) atomic_load_acq_int(x)
176 #define os_atomic_inc_64(x) atomic_add_64(x, 1)
180 #if defined(__NetBSD__)
181 #define os_vmspace_pdirpa(vm) ((vm)->vm_map.pmap->pm_pdirpa[0])
182 #define os_pmap_mach(pm) ((pm)->pm_data)
183 #elif defined(__DragonFly__)
184 #define os_vmspace_pdirpa(vm) (vtophys(vmspace_pmap(vm)->pm_pml4))
188 #if defined(__NetBSD__)
190 typedef struct cpu_info os_cpu_t;
191 #define OS_MAXCPUS MAXCPUS
192 #define OS_CPU_FOREACH(cpu) for (CPU_INFO_FOREACH(, cpu))
193 #define os_cpu_number(cpu) cpu_index(cpu)
194 #define os_cpu_lookup(idx) cpu_lookup(idx)
195 #define os_curcpu() curcpu()
196 #define os_curcpu_number() cpu_number()
197 #define os_curcpu_tss_sel() curcpu()->ci_tss_sel
198 #define os_curcpu_tss() curcpu()->ci_tss
199 #define os_curcpu_gdt() curcpu()->ci_gdt
200 #define os_curcpu_idt() curcpu()->ci_idtvec.iv_idt
201 #elif defined(__DragonFly__)
202 #include <sys/globaldata.h>
203 #include <machine/segments.h>
204 typedef struct globaldata os_cpu_t;
205 #define OS_MAXCPUS SMP_MAXCPU
206 #define OS_CPU_FOREACH(cpu) \
207 for (int idx = 0; idx < ncpus && (cpu = globaldata_find(idx)); idx++)
208 #define os_cpu_number(cpu) (cpu)->gd_cpuid
209 #define os_cpu_lookup(idx) globaldata_find(idx)
210 #define os_curcpu() mycpu
211 #define os_curcpu_number() mycpuid
212 #define os_curcpu_tss_sel() GSEL(GPROC0_SEL, SEL_KPL)
213 #define os_curcpu_tss() &mycpu->gd_prvspace->common_tss
214 #define os_curcpu_gdt() mdcpu->gd_gdt
215 #define os_curcpu_idt() r_idt_arr[mycpuid].rd_base
219 #if defined(__NetBSD__)
220 #include <sys/kcpuset.h>
221 typedef kcpuset_t os_cpuset_t;
222 #define os_cpuset_init(s) kcpuset_create(s, true)
223 #define os_cpuset_destroy(s) kcpuset_destroy(s)
224 #define os_cpuset_isset(s, c) kcpuset_isset(s, c)
225 #define os_cpuset_clear(s, c) kcpuset_clear(s, c)
226 #define os_cpuset_setrunning(s) kcpuset_copy(s, kcpuset_running)
227 #elif defined(__DragonFly__)
228 #include <sys/cpumask.h>
229 #include <machine/smp.h> /* smp_active_mask */
230 typedef cpumask_t os_cpuset_t;
231 #define os_cpuset_init(s) \
232 ({ *(s) = kmalloc(sizeof(cpumask_t), M_NVMM, M_WAITOK | M_ZERO); })
233 #define os_cpuset_destroy(s) kfree((s), M_NVMM)
234 #define os_cpuset_isset(s, c) CPUMASK_TESTBIT(*(s), c)
235 #define os_cpuset_clear(s, c) ATOMIC_CPUMASK_NANDBIT(*(s), c)
236 #define os_cpuset_setrunning(s) ATOMIC_CPUMASK_ORMASK(*(s), smp_active_mask)
240 #if defined(__NetBSD__)
241 #define os_preempt_disable() kpreempt_disable()
242 #define os_preempt_enable() kpreempt_enable()
243 #define os_preempt_disabled() kpreempt_disabled()
244 #elif defined(__DragonFly__)
246 * In DragonFly, a normal kernel thread will not migrate to another CPU or be
247 * preempted (except by an interrupt thread), so kpreempt_{disable,enable}()
248 * are not needed. However, we can't use critical section as an instead,
249 * because that would also prevent interrupt/reschedule flags from being
250 * set, which would be a problem for nvmm_return_needed() that's called from
253 #define os_preempt_disable() /* nothing */
254 #define os_preempt_enable() /* nothing */
255 #define os_preempt_disabled() true
259 #if defined(__NetBSD__)
260 #define OS_ASSERT KASSERT
261 #elif defined(__DragonFly__)
262 #define OS_ASSERT KKASSERT
266 #if defined(__DragonFly__)
267 #define ilog2(n) ((sizeof(n) > 4 ? ffsl(n) : ffs(n)) - 1)
268 #define uimin(a, b) ((u_int)a < (u_int)b ? (u_int)a : (u_int)b)
271 /* -------------------------------------------------------------------------- */
273 os_vmspace_t * os_vmspace_create(vaddr_t, vaddr_t);
274 void os_vmspace_destroy(os_vmspace_t *);
275 int os_vmspace_fault(os_vmspace_t *, vaddr_t, vm_prot_t);
277 os_vmobj_t * os_vmobj_create(voff_t);
278 void os_vmobj_ref(os_vmobj_t *);
279 void os_vmobj_rel(os_vmobj_t *);
281 int os_vmobj_map(struct vm_map *, vaddr_t *, vsize_t, os_vmobj_t *,
282 voff_t, bool, bool, bool, int, int);
283 void os_vmobj_unmap(struct vm_map *map, vaddr_t, vaddr_t, bool);
285 void * os_pagemem_zalloc(size_t);
286 void os_pagemem_free(void *, size_t);
288 paddr_t os_pa_zalloc(void);
289 void os_pa_free(paddr_t);
291 int os_contigpa_zalloc(paddr_t *, vaddr_t *, size_t);
292 void os_contigpa_free(paddr_t, vaddr_t, size_t);
295 os_return_needed(void)
297 #if defined(__NetBSD__)
298 if (preempt_needed()) {
301 if (curlwp->l_flag & LW_USERRET) {
305 #elif defined(__DragonFly__)
306 if (__predict_false(nvmm_break_wanted())) {
309 if (__predict_false(curthread->td_lwp->lwp_mpflags & LWP_MP_URETMASK)) {
316 /* -------------------------------------------------------------------------- */
320 #if defined(__NetBSD__)
322 #include <sys/xcall.h>
323 #define OS_IPI_FUNC(func) void func(void *arg, void *unused)
326 os_ipi_unicast(os_cpu_t *cpu, void (*func)(void *, void *), void *arg)
328 xc_wait(xc_unicast(XC_HIGHPRI, func, arg, NULL, cpu));
332 os_ipi_broadcast(void (*func)(void *, void *), void *arg)
334 xc_wait(xc_broadcast(0, func, arg, NULL));
341 * XXX: this is probably too expensive. NetBSD should have a dummy
342 * interrupt handler that just IRETs without doing anything.
344 pmap_tlb_shootdown(pmap_kernel(), -1, PTE_G, TLBSHOOT_NVMM);
347 #elif defined(__DragonFly__)
349 #include <sys/thread2.h>
350 #define OS_IPI_FUNC(func) void func(void *arg)
353 os_ipi_unicast(os_cpu_t *cpu, void (*func)(void *), void *arg)
357 seq = lwkt_send_ipiq(cpu, func, arg);
358 lwkt_wait_ipiq(cpu, seq);
362 os_ipi_broadcast(void (*func)(void *), void *arg)
367 for (i = 0; i < ncpus; i++) {
368 CPUMASK_ASSBIT(mask, i);
369 lwkt_cpusync_simple(mask, func, arg);
374 * On DragonFly, no need to bind the thread, because any normal kernel
375 * thread will not migrate to another CPU or be preempted (except by an
378 #define curlwp_bind() ((int)0)
379 #define curlwp_bindx(bound) /* nothing */
381 #endif /* __NetBSD__ */
383 #endif /* _NVMM_OS_H_ */