From 15553805ce5caf2e101c881cba99393893fab921 Mon Sep 17 00:00:00 2001 From: Matthew Dillon Date: Mon, 29 Dec 2014 16:56:23 -0800 Subject: [PATCH] kernel - Fix a major (pageable) memory leak * Under certain relatively easy to reproduce conditions an extra ref_count can be added to a VM object during a fork(), preventing the object from ever being destroyed. It's pages may even be paged out, but the system will eventually run out of swap space too. * The actual fix is to assign 'map_object = object' in vm_map_insert() (see the diff). The rest of this commit is conditionalized debugging code and code documentation. * Because this change implements a relatively esoteric feature in the VM system by allowing an anonymous VM object to be extended to cover an area even though it might have a gap (so a new VM object does not have to be allocated), further testing is needed before we can MFC this to the RELEASE branch. --- sys/vm/vm_map.c | 17 +++ sys/vm/vm_object.c | 259 +++++++++++++++++++++++++-------------------- sys/vm/vm_object.h | 72 ++++++++----- 3 files changed, 208 insertions(+), 140 deletions(-) diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c index 9dc336785b..94952159a4 100644 --- a/sys/vm/vm_map.c +++ b/sys/vm/vm_map.c @@ -604,6 +604,11 @@ vm_map_init(struct vm_map *map, vm_offset_t min, vm_offset_t max, pmap_t pmap) * possible to address offsets beyond the mapped area. Just allocate * a maximally sized object for this case. * + * If addref is non-zero an additional reference is added to the returned + * entry. This mechanic exists because the additional reference might have + * to be added atomically and not after return to prevent a premature + * collapse. + * * The vm_map must be exclusively locked. * No other requirements. */ @@ -1078,6 +1083,8 @@ vm_map_insert(vm_map_t map, int *countp, void *map_object, void *map_aux, * map entry, we have to create a new map entry. We * must bump the ref count on the extended object to * account for it. object may be NULL. + * + * XXX if object is NULL should we set offset to 0 here ? */ object = prev_entry->object.vm_object; offset = prev_entry->offset + @@ -1087,6 +1094,7 @@ vm_map_insert(vm_map_t map, int *countp, void *map_object, void *map_aux, vm_object_chain_wait(object, 0); vm_object_reference_locked(object); must_drop = 1; + map_object = object; } } @@ -3135,11 +3143,17 @@ vm_map_split(vm_map_entry_t entry) useshadowlist = 1; vm_object_hold(bobject); vm_object_chain_wait(bobject, 0); + /* ref for shadowing below */ vm_object_reference_locked(bobject); vm_object_chain_acquire(bobject, 0); KKASSERT(bobject->backing_object == bobject); KKASSERT((bobject->flags & OBJ_DEAD) == 0); } else { + /* + * vnodes are not placed on the shadow list but + * they still get another ref for the backing_object + * reference. + */ vm_object_reference_quick(bobject); } } @@ -3197,6 +3211,9 @@ vm_map_split(vm_map_entry_t entry) /* * nobject shadows bobject (oobject already shadows bobject). + * + * Adding an object to bobject's shadow list requires refing bobject + * which we did above in the useshadowlist case. */ if (bobject) { nobject->backing_object_offset = diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c index 1054497355..213555617a 100644 --- a/sys/vm/vm_object.c +++ b/sys/vm/vm_object.c @@ -143,6 +143,42 @@ static struct vm_object vm_objects_init[VM_OBJECTS_INIT]; struct object_q vm_object_lists[VMOBJ_HSIZE]; struct lwkt_token vmobj_tokens[VMOBJ_HSIZE]; +#if defined(DEBUG_LOCKS) + +#define vm_object_vndeallocate(obj, vpp) \ + debugvm_object_vndeallocate(obj, vpp, __FILE__, __LINE__) + +/* + * Debug helper to track hold/drop/ref/deallocate calls. + */ +static void +debugvm_object_add(vm_object_t obj, char *file, int line, int addrem) +{ + int i; + + i = atomic_fetchadd_int(&obj->debug_index, 1); + i = i & (VMOBJ_DEBUG_ARRAY_SIZE - 1); + ksnprintf(obj->debug_hold_thrs[i], + sizeof(obj->debug_hold_thrs[i]), + "%c%d:(%d):%s", + (addrem == -1 ? '-' : (addrem == 1 ? '+' : '=')), + (curthread->td_proc ? curthread->td_proc->p_pid : -1), + obj->ref_count, + curthread->td_comm); + obj->debug_hold_file[i] = file; + obj->debug_hold_line[i] = line; +#if 0 + /* Uncomment for debugging obj refs/derefs in reproducable cases */ + if (strcmp(curthread->td_comm, "sshd") == 0) { + kprintf("%d %p refs=%d ar=%d file: %s/%d\n", + (curthread->td_proc ? curthread->td_proc->p_pid : -1), + obj, obj->ref_count, addrem, file, line); + } +#endif +} + +#endif + /* * Misc low level routines */ @@ -152,10 +188,9 @@ vm_object_lock_init(vm_object_t obj) #if defined(DEBUG_LOCKS) int i; - obj->debug_hold_bitmap = 0; - obj->debug_hold_ovfl = 0; + obj->debug_index = 0; for (i = 0; i < VMOBJ_DEBUG_ARRAY_SIZE; i++) { - obj->debug_hold_thrs[i] = NULL; + obj->debug_hold_thrs[i][0] = 0; obj->debug_hold_file[i] = NULL; obj->debug_hold_line[i] = 0; } @@ -216,11 +251,7 @@ vm_object_assert_held(vm_object_t obj) } void -#ifndef DEBUG_LOCKS -vm_object_hold(vm_object_t obj) -#else -debugvm_object_hold(vm_object_t obj, char *file, int line) -#endif +VMOBJDEBUG(vm_object_hold)(vm_object_t obj VMOBJDBARGS) { KKASSERT(obj != NULL); @@ -234,36 +265,12 @@ debugvm_object_hold(vm_object_t obj, char *file, int line) vm_object_lock(obj); #if defined(DEBUG_LOCKS) - int i; - u_int mask; - - for (;;) { - mask = ~obj->debug_hold_bitmap; - cpu_ccfence(); - if (mask == 0xFFFFFFFFU) { - if (obj->debug_hold_ovfl == 0) - obj->debug_hold_ovfl = 1; - break; - } - i = ffs(mask) - 1; - if (atomic_cmpset_int(&obj->debug_hold_bitmap, ~mask, - ~mask | (1 << i))) { - obj->debug_hold_bitmap |= (1 << i); - obj->debug_hold_thrs[i] = curthread; - obj->debug_hold_file[i] = file; - obj->debug_hold_line[i] = line; - break; - } - } + debugvm_object_add(obj, file, line, 1); #endif } int -#ifndef DEBUG_LOCKS -vm_object_hold_try(vm_object_t obj) -#else -debugvm_object_hold_try(vm_object_t obj, char *file, int line) -#endif +VMOBJDEBUG(vm_object_hold_try)(vm_object_t obj VMOBJDBARGS) { KKASSERT(obj != NULL); @@ -283,37 +290,13 @@ debugvm_object_hold_try(vm_object_t obj, char *file, int line) } #if defined(DEBUG_LOCKS) - int i; - u_int mask; - - for (;;) { - mask = ~obj->debug_hold_bitmap; - cpu_ccfence(); - if (mask == 0xFFFFFFFFU) { - if (obj->debug_hold_ovfl == 0) - obj->debug_hold_ovfl = 1; - break; - } - i = ffs(mask) - 1; - if (atomic_cmpset_int(&obj->debug_hold_bitmap, ~mask, - ~mask | (1 << i))) { - obj->debug_hold_bitmap |= (1 << i); - obj->debug_hold_thrs[i] = curthread; - obj->debug_hold_file[i] = file; - obj->debug_hold_line[i] = line; - break; - } - } + debugvm_object_add(obj, file, line, 1); #endif return(1); } void -#ifndef DEBUG_LOCKS -vm_object_hold_shared(vm_object_t obj) -#else -debugvm_object_hold_shared(vm_object_t obj, char *file, int line) -#endif +VMOBJDEBUG(vm_object_hold_shared)(vm_object_t obj VMOBJDBARGS) { KKASSERT(obj != NULL); @@ -327,27 +310,7 @@ debugvm_object_hold_shared(vm_object_t obj, char *file, int line) vm_object_lock_shared(obj); #if defined(DEBUG_LOCKS) - int i; - u_int mask; - - for (;;) { - mask = ~obj->debug_hold_bitmap; - cpu_ccfence(); - if (mask == 0xFFFFFFFFU) { - if (obj->debug_hold_ovfl == 0) - obj->debug_hold_ovfl = 1; - break; - } - i = ffs(mask) - 1; - if (atomic_cmpset_int(&obj->debug_hold_bitmap, ~mask, - ~mask | (1 << i))) { - obj->debug_hold_bitmap |= (1 << i); - obj->debug_hold_thrs[i] = curthread; - obj->debug_hold_file[i] = file; - obj->debug_hold_line[i] = line; - break; - } - } + debugvm_object_add(obj, file, line, 1); #endif } @@ -357,37 +320,21 @@ debugvm_object_hold_shared(vm_object_t obj, char *file, int line) * WARNING! Token might be shared. */ void -vm_object_drop(vm_object_t obj) +VMOBJDEBUG(vm_object_drop)(vm_object_t obj VMOBJDBARGS) { if (obj == NULL) return; -#if defined(DEBUG_LOCKS) - int found = 0; - int i; - - for (i = 0; i < VMOBJ_DEBUG_ARRAY_SIZE; i++) { - if ((obj->debug_hold_bitmap & (1 << i)) && - (obj->debug_hold_thrs[i] == curthread)) { - obj->debug_hold_bitmap &= ~(1 << i); - obj->debug_hold_thrs[i] = NULL; - obj->debug_hold_file[i] = NULL; - obj->debug_hold_line[i] = 0; - found = 1; - break; - } - } - - if (found == 0 && obj->debug_hold_ovfl == 0) - panic("vm_object: attempt to drop hold on non-self-held obj"); -#endif - /* * No new holders should be possible once we drop hold_count 1->0 as * there is no longer any way to reference the object. */ KKASSERT(obj->hold_count > 0); if (refcount_release(&obj->hold_count)) { +#if defined(DEBUG_LOCKS) + debugvm_object_add(obj, file, line, -1); +#endif + if (obj->ref_count == 0 && (obj->flags & OBJ_DEAD)) { vm_object_unlock(obj); zfree(obj_zone, obj); @@ -395,6 +342,9 @@ vm_object_drop(vm_object_t obj) vm_object_unlock(obj); } } else { +#if defined(DEBUG_LOCKS) + debugvm_object_add(obj, file, line, -1); +#endif vm_object_unlock(obj); } } @@ -532,7 +482,7 @@ vm_object_allocate_hold(objtype_t type, vm_pindex_t size) * we use an atomic op). */ void -vm_object_reference_locked(vm_object_t object) +VMOBJDEBUG(vm_object_reference_locked)(vm_object_t object VMOBJDBARGS) { KKASSERT(object != NULL); ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); @@ -542,17 +492,23 @@ vm_object_reference_locked(vm_object_t object) vref(object->handle); /* XXX what if the vnode is being destroyed? */ } +#if defined(DEBUG_LOCKS) + debugvm_object_add(object, file, line, 1); +#endif } /* * This version is only allowed for vnode objects. */ void -vm_object_reference_quick(vm_object_t object) +VMOBJDEBUG(vm_object_reference_quick)(vm_object_t object VMOBJDBARGS) { KKASSERT(object->type == OBJT_VNODE); atomic_add_int(&object->ref_count, 1); vref(object->handle); +#if defined(DEBUG_LOCKS) + debugvm_object_add(object, file, line, 1); +#endif } /* @@ -752,7 +708,8 @@ vm_object_chain_release_all(vm_object_t first_object, vm_object_t stopobj) * the vp ourselves. */ static void -vm_object_vndeallocate(vm_object_t object, struct vnode **vpp) +VMOBJDEBUG(vm_object_vndeallocate)(vm_object_t object, struct vnode **vpp + VMOBJDBARGS) { struct vnode *vp = (struct vnode *) object->handle; @@ -783,6 +740,9 @@ vm_object_vndeallocate(vm_object_t object, struct vnode **vpp) } /* retry */ } +#if defined(DEBUG_LOCKS) + debugvm_object_add(object, file, line, -1); +#endif /* * vrele or return the vp to vrele. We can only safely vrele(vp) @@ -813,13 +773,14 @@ vm_object_vndeallocate(vm_object_t object, struct vnode **vpp) * XXX Currently all deallocations require an exclusive lock. */ void -vm_object_deallocate(vm_object_t object) +VMOBJDEBUG(vm_object_deallocate)(vm_object_t object VMOBJDBARGS) { struct vnode *vp; int count; if (object == NULL) return; + for (;;) { count = object->ref_count; cpu_ccfence(); @@ -833,6 +794,9 @@ vm_object_deallocate(vm_object_t object) * For vnode objects we only care about 1->0 transitions. */ if (count <= 3 || (object->type == OBJT_VNODE && count <= 1)) { +#if defined(DEBUG_LOCKS) + debugvm_object_add(object, file, line, 0); +#endif vm_object_hold(object); vm_object_deallocate_locked(object); vm_object_drop(object); @@ -850,6 +814,10 @@ vm_object_deallocate(vm_object_t object) vp = (struct vnode *)object->handle; if (atomic_cmpset_int(&object->ref_count, count, count - 1)) { +#if defined(DEBUG_LOCKS) + debugvm_object_add(object, file, line, -1); +#endif + vrele(vp); break; } @@ -857,6 +825,9 @@ vm_object_deallocate(vm_object_t object) } else { if (atomic_cmpset_int(&object->ref_count, count, count - 1)) { +#if defined(DEBUG_LOCKS) + debugvm_object_add(object, file, line, -1); +#endif break; } /* retry */ @@ -866,7 +837,7 @@ vm_object_deallocate(vm_object_t object) } void -vm_object_deallocate_locked(vm_object_t object) +VMOBJDEBUG(vm_object_deallocate_locked)(vm_object_t object VMOBJDBARGS) { struct vm_object_dealloc_list *dlist = NULL; struct vm_object_dealloc_list *dtmp; @@ -915,6 +886,9 @@ again: } if (object->ref_count > 2) { atomic_add_int(&object->ref_count, -1); +#if defined(DEBUG_LOCKS) + debugvm_object_add(object, file, line, -1); +#endif break; } @@ -933,6 +907,9 @@ again: vm_object_set_flag(object, OBJ_ONEMAPPING); } atomic_add_int(&object->ref_count, -1); +#if defined(DEBUG_LOCKS) + debugvm_object_add(object, file, line, -1); +#endif break; } @@ -998,6 +975,9 @@ again: */ KKASSERT(object->ref_count == 2); atomic_add_int(&object->ref_count, -1); +#if defined(DEBUG_LOCKS) + debugvm_object_add(object, file, line, -1); +#endif /* * If our single parent is not collapseable just @@ -1048,6 +1028,9 @@ skip: KKASSERT(object->ref_count != 0); if (object->ref_count >= 2) { atomic_add_int(&object->ref_count, -1); +#if defined(DEBUG_LOCKS) + debugvm_object_add(object, file, line, -1); +#endif break; } KKASSERT(object->ref_count == 1); @@ -1080,6 +1063,12 @@ skip: /* * It shouldn't be possible for the object to be chain locked * if we're removing the last ref on it. + * + * Removing object from temp's shadow list requires dropping + * temp, which we will do on loop. + * + * NOTE! vnodes do not use the shadow list, but still have + * the backing_object reference. */ KKASSERT((object->chainlk & (CHAINLK_EXCL|CHAINLK_MASK)) == 0); @@ -1790,6 +1779,12 @@ shadowlookup: * range. Replace the pointer and offset that was pointing at the existing * object with the pointer/offset for the new object. * + * If addref is non-zero the returned object is given an additional reference. + * This mechanic exists to avoid the situation where refs might be 1 and + * race against a collapse when the caller intends to bump it. So the + * caller cannot add the ref after the fact. Used when the caller is + * duplicating a vm_map_entry. + * * No other requirements. */ void @@ -1807,10 +1802,15 @@ vm_object_shadow(vm_object_t *objectp, vm_ooffset_t *offset, vm_size_t length, * We have to chain wait before adding the reference to avoid * racing a collapse or deallocation. * - * Add the additional ref to source here to avoid racing a later - * collapse or deallocation. Clear the ONEMAPPING flag whether - * addref is TRUE or not in this case because the original object - * will be shadowed. + * Clear OBJ_ONEMAPPING flag when shadowing. + * + * The caller owns a ref on source via *objectp which we are going + * to replace. This ref is inherited by the backing_object assignment. + * from nobject and does not need to be incremented here. + * + * However, we add a temporary extra reference to the original source + * prior to holding nobject in case we block, to avoid races where + * someone else might believe that the source can be collapsed. */ useshadowlist = 0; if (source) { @@ -1824,7 +1824,8 @@ vm_object_shadow(vm_object_t *objectp, vm_ooffset_t *offset, vm_size_t length, source->type == OBJT_SWAP)) { if (addref) { vm_object_reference_locked(source); - vm_object_clear_flag(source, OBJ_ONEMAPPING); + vm_object_clear_flag(source, + OBJ_ONEMAPPING); } vm_object_drop(source); return; @@ -1846,8 +1847,12 @@ vm_object_shadow(vm_object_t *objectp, vm_ooffset_t *offset, vm_size_t length, * The source object currently has an extra reference to prevent * collapses into it while we mess with its shadow list, which * we will remove later in this routine. + * + * The target object may require a second reference if asked for one + * by the caller. */ - if ((result = vm_object_allocate(OBJT_DEFAULT, length)) == NULL) + result = vm_object_allocate(OBJT_DEFAULT, length); + if (result == NULL) panic("vm_object_shadow: no object for shadowing"); vm_object_hold(result); if (addref) { @@ -1863,6 +1868,12 @@ vm_object_shadow(vm_object_t *objectp, vm_ooffset_t *offset, vm_size_t length, * in order to maintain page coloring consistency in the combined * shadowed object. * + * The backing_object reference to source requires adding a ref to + * source. We simply inherit the ref from the original *objectp + * (which we are replacing) so no additional refs need to be added. + * (we must still clean up the extra ref we had to prevent collapse + * races). + * * SHADOWING IS NOT APPLICABLE TO OBJT_VNODE OBJECTS */ KKASSERT(result->backing_object == NULL); @@ -2141,9 +2152,15 @@ vm_object_qcollapse(vm_object_t object, vm_object_t backing_object) { if (backing_object->ref_count == 1) { atomic_add_int(&backing_object->ref_count, 2); +#if defined(DEBUG_LOCKS) + debugvm_object_add(backing_object, "qcollapse", 1, 2); +#endif vm_object_backing_scan(object, backing_object, OBSC_COLLAPSE_NOWAIT); atomic_add_int(&backing_object->ref_count, -2); +#if defined(DEBUG_LOCKS) + debugvm_object_add(backing_object, "qcollapse", 2, -2); +#endif } } @@ -2289,6 +2306,9 @@ vm_object_collapse(vm_object_t object, struct vm_object_dealloc_list **dlistp) /* * Object now shadows whatever backing_object did. * Remove object from backing_object's shadow_list. + * + * Removing object from backing_objects shadow list + * requires releasing object, which we will do below. */ KKASSERT(object->backing_object == backing_object); if (object->flags & OBJ_ONSHADOW) { @@ -2313,6 +2333,12 @@ vm_object_collapse(vm_object_t object, struct vm_object_dealloc_list **dlistp) break; vm_object_drop(bbobj); } + + /* + * We are removing backing_object from bbobj's + * shadow list and adding object to bbobj's shadow + * list, so the ref_count on bbobj is unchanged. + */ if (bbobj) { if (backing_object->flags & OBJ_ONSHADOW) { /* not locked exclusively if vnode */ @@ -2370,6 +2396,9 @@ vm_object_collapse(vm_object_t object, struct vm_object_dealloc_list **dlistp) * of forcing destruction? */ atomic_add_int(&backing_object->ref_count, -1); +#if defined(DEBUG_LOCKS) + debugvm_object_add(backing_object, "collapse", 1, -1); +#endif if ((backing_object->flags & OBJ_DEAD) == 0) vm_object_terminate(backing_object); object_collapses++; @@ -2408,6 +2437,10 @@ vm_object_collapse(vm_object_t object, struct vm_object_dealloc_list **dlistp) * * Deallocating backing_object will not remove * it, since its reference count is at least 2. + * + * Removing object from backing_object's shadow + * list requires releasing a ref, which we do + * below by setting dodealloc to 1. */ KKASSERT(object->backing_object == backing_object); if (object->flags & OBJ_ONSHADOW) { diff --git a/sys/vm/vm_object.h b/sys/vm/vm_object.h index 76ba0d5e2f..192b19df2b 100644 --- a/sys/vm/vm_object.h +++ b/sys/vm/vm_object.h @@ -168,11 +168,10 @@ struct vm_object { */ #define VMOBJ_DEBUG_ARRAY_SIZE (32) - u_int debug_hold_bitmap; - thread_t debug_hold_thrs[VMOBJ_DEBUG_ARRAY_SIZE]; - char *debug_hold_file[VMOBJ_DEBUG_ARRAY_SIZE]; + char debug_hold_thrs[VMOBJ_DEBUG_ARRAY_SIZE][64]; + const char *debug_hold_file[VMOBJ_DEBUG_ARRAY_SIZE]; int debug_hold_line[VMOBJ_DEBUG_ARRAY_SIZE]; - u_int debug_hold_ovfl; + int debug_index; #endif union { @@ -229,6 +228,10 @@ struct vm_object { #define IDX_TO_OFF(idx) (((vm_ooffset_t)(idx)) << PAGE_SHIFT) #define OFF_TO_IDX(off) ((vm_pindex_t)(((vm_ooffset_t)(off)) >> PAGE_SHIFT)) +#define VMOBJ_HSIZE 64 +#define VMOBJ_HMASK (VMOBJ_HSIZE - 1) +#define VMOBJ_HASH(obj) (((intptr_t)(obj) >> 8) & VMOBJ_HMASK) + #ifdef _KERNEL #define OBJPC_SYNC 0x1 /* sync I/O */ @@ -245,10 +248,6 @@ struct vm_object_dealloc_list { TAILQ_HEAD(object_q, vm_object); -#define VMOBJ_HSIZE 64 -#define VMOBJ_HMASK (VMOBJ_HSIZE - 1) -#define VMOBJ_HASH(obj) (((intptr_t)(obj) >> 8) & VMOBJ_HMASK) - extern struct object_q vm_object_lists[VMOBJ_HSIZE]; extern struct lwkt_token vmobj_tokens[VMOBJ_HSIZE]; @@ -312,8 +311,6 @@ vm_object_t vm_object_allocate_hold (objtype_t, vm_pindex_t); void _vm_object_allocate (objtype_t, vm_pindex_t, vm_object_t); boolean_t vm_object_coalesce (vm_object_t, vm_pindex_t, vm_size_t, vm_size_t); void vm_object_collapse (vm_object_t, struct vm_object_dealloc_list **); -void vm_object_deallocate (vm_object_t); -void vm_object_deallocate_locked (vm_object_t); void vm_object_deallocate_list(struct vm_object_dealloc_list **); void vm_object_terminate (vm_object_t); void vm_object_set_writeable_dirty (vm_object_t); @@ -323,8 +320,6 @@ void vm_object_page_remove (vm_object_t, vm_pindex_t, vm_pindex_t, boolean_t); void vm_object_pmap_copy (vm_object_t, vm_pindex_t, vm_pindex_t); void vm_object_pmap_copy_1 (vm_object_t, vm_pindex_t, vm_pindex_t); void vm_object_pmap_remove (vm_object_t, vm_pindex_t, vm_pindex_t); -void vm_object_reference_quick (vm_object_t); -void vm_object_reference_locked (vm_object_t); void vm_object_chain_wait (vm_object_t object, int shared); void vm_object_chain_acquire(vm_object_t object, int shared); void vm_object_chain_release(vm_object_t object); @@ -339,26 +334,49 @@ void vm_object_lock(vm_object_t); void vm_object_lock_shared(vm_object_t); void vm_object_unlock(vm_object_t); -#ifndef DEBUG_LOCKS -void vm_object_hold(vm_object_t); -int vm_object_hold_try(vm_object_t); -void vm_object_hold_shared(vm_object_t); +#if defined(DEBUG_LOCKS) + +#define VMOBJDEBUG(x) debug ## x +#define VMOBJDBARGS , char *file, int line +#define VMOBJDBFWD , file, line + +#define vm_object_hold(obj) \ + debugvm_object_hold(obj, __FILE__, __LINE__) +#define vm_object_hold_try(obj) \ + debugvm_object_hold_try(obj, __FILE__, __LINE__) +#define vm_object_hold_shared(obj) \ + debugvm_object_hold_shared(obj, __FILE__, __LINE__) +#define vm_object_drop(obj) \ + debugvm_object_drop(obj, __FILE__, __LINE__) +#define vm_object_reference_quick(obj) \ + debugvm_object_reference_quick(obj, __FILE__, __LINE__) +#define vm_object_reference_locked(obj) \ + debugvm_object_reference_locked(obj, __FILE__, __LINE__) +#define vm_object_deallocate(obj) \ + debugvm_object_deallocate(obj, __FILE__, __LINE__) +#define vm_object_deallocate_locked(obj) \ + debugvm_object_deallocate_locked(obj, __FILE__, __LINE__) + #else -#define vm_object_hold(obj) \ - debugvm_object_hold(obj, __FILE__, __LINE__) -void debugvm_object_hold(vm_object_t, char *, int); -#define vm_object_hold_try(obj) \ - debugvm_object_hold_try(obj, __FILE__, __LINE__) -int debugvm_object_hold_try(vm_object_t, char *, int); -#define vm_object_hold_shared(obj) \ - debugvm_object_hold_shared(obj, __FILE__, __LINE__) -void debugvm_object_hold_shared(vm_object_t, char *, int); + +#define VMOBJDEBUG(x) x +#define VMOBJDBARGS +#define VMOBJDBFWD + #endif + +void VMOBJDEBUG(vm_object_hold)(vm_object_t object VMOBJDBARGS); +int VMOBJDEBUG(vm_object_hold_try)(vm_object_t object VMOBJDBARGS); +void VMOBJDEBUG(vm_object_hold_shared)(vm_object_t object VMOBJDBARGS); +void VMOBJDEBUG(vm_object_drop)(vm_object_t object VMOBJDBARGS); +void VMOBJDEBUG(vm_object_reference_quick)(vm_object_t object VMOBJDBARGS); +void VMOBJDEBUG(vm_object_reference_locked)(vm_object_t object VMOBJDBARGS); +void VMOBJDEBUG(vm_object_deallocate)(vm_object_t object VMOBJDBARGS); +void VMOBJDEBUG(vm_object_deallocate_locked)(vm_object_t object VMOBJDBARGS); + void vm_object_upgrade(vm_object_t); void vm_object_downgrade(vm_object_t); -void vm_object_drop(vm_object_t); - #endif /* _KERNEL */ #endif /* _VM_VM_OBJECT_H_ */ -- 2.41.0