kernel - VM rework part 1 - Remove shadow_list
authorMatthew Dillon <dillon@apollo.backplane.com>
Tue, 7 May 2019 06:14:02 +0000 (23:14 -0700)
committerMatthew Dillon <dillon@apollo.backplane.com>
Fri, 10 May 2019 16:24:31 +0000 (09:24 -0700)
* Remove shadow_head, shadow_list, shadow_count.

* This leaves the kernel operational but without collapse optimizations
  on 'other' processes when a prorgam exits.

sys/vfs/procfs/procfs_map.c
sys/vm/vm_fault.c
sys/vm/vm_map.c
sys/vm/vm_object.c
sys/vm/vm_object.h

index cb09a3d..4a7e88a 100644 (file)
@@ -87,22 +87,19 @@ procfs_domap(struct proc *curp, struct lwp *lp, struct pfsnode *pfs,
 
        RB_FOREACH(entry, vm_map_rb_tree, &map->rb_root) {
                vm_object_t obj, tobj, lobj;
-               int ref_count, shadow_count, flags;
+               int ref_count, flags;
                vm_offset_t e_start, e_end;
                vm_eflags_t e_eflags;
                vm_prot_t e_prot;
-               int resident, privateresident;
+               int resident;
                char *type;
 
-               privateresident = 0;
                switch(entry->maptype) {
                case VM_MAPTYPE_NORMAL:
                case VM_MAPTYPE_VPAGETABLE:
                        obj = entry->object.vm_object;
                        if (obj != NULL) {
                                vm_object_hold(obj);
-                               if (obj->shadow_count == 1)
-                                       privateresident = obj->resident_page_count;
                        }
                        break;
                case VM_MAPTYPE_UKSMAP:
@@ -188,7 +185,6 @@ procfs_domap(struct proc *curp, struct lwp *lp, struct pfsnode *pfs,
                        
                        flags = obj->flags;
                        ref_count = obj->ref_count;
-                       shadow_count = obj->shadow_count;
                        vm_object_drop(obj);
                        if (vp != NULL) {
                                vn_fullpath(p, vp, &fullpath, &freepath, 1);
@@ -197,7 +193,6 @@ procfs_domap(struct proc *curp, struct lwp *lp, struct pfsnode *pfs,
                } else {
                        flags = 0;
                        ref_count = 0;
-                       shadow_count = 0;
                        switch(entry->maptype) {
                        case VM_MAPTYPE_UNSPECIFIED:
                                type = "unspec";
@@ -232,11 +227,11 @@ procfs_domap(struct proc *curp, struct lwp *lp, struct pfsnode *pfs,
 #endif
                          "0x%04x %s%s %s %s\n",
                        (u_long)e_start, (u_long)e_end,
-                       resident, privateresident, obj,
+                       resident, -1, obj,
                        (e_prot & VM_PROT_READ) ? "r" : "-",
                        (e_prot & VM_PROT_WRITE) ? "w" : "-",
                        (e_prot & VM_PROT_EXECUTE) ? "x" : "-",
-                       ref_count, shadow_count, flags,
+                       ref_count, 0, flags,
                        (e_eflags & MAP_ENTRY_COW) ? "COW" : "NCOW",
                        (e_eflags & MAP_ENTRY_NEEDS_COPY) ?" NC" : " NNC",
                        type, fullpath);
index e51af05..d66ea74 100644 (file)
@@ -306,13 +306,7 @@ virtual_copy_test(struct faultstate *fs)
                return 0;
 
        /*
-        * Only one shadow object
-        */
-       if (fs->object->shadow_count != 1)
-               return 0;
-
-       /*
-        * No COW refs, except us
+        * No refs, except us
         */
        if (fs->object->ref_count != 1)
                return 0;
@@ -616,11 +610,14 @@ RetryFault:
         * process is single-threaded we might as well use an exclusive
         * lock/chain right off the bat.
         */
+#if 0
+       /* WORK IN PROGRESS, CODE REMOVED */
        if (fs.first_shared && fs.first_object->backing_object &&
            LIST_EMPTY(&fs.first_object->shadow_head) &&
            td->td_proc && td->td_proc->p_nthreads == 1) {
                fs.first_shared = 0;
        }
+#endif
 
        /*
         * VM_FAULT_UNSWAP - swap_pager_unswapped() needs an exclusive object
index 5231f85..2330f56 100644 (file)
@@ -1120,9 +1120,8 @@ vm_map_insert(vm_map_t map, int *countp, void *map_object, void *map_aux,
                 *       of objects, so a shared object lock is ok for
                 *       VNODE objects.
                 */
-               if ((object->ref_count > 1) || (object->shadow_count != 0)) {
+               if (object->ref_count > 1)
                        vm_object_clear_flag(object, OBJ_ONEMAPPING);
-               }
        }
        else if (prev_entry &&
                 (prev_entry->eflags == protoeflags) &&
@@ -3391,10 +3390,7 @@ vm_map_split(vm_map_entry_t entry, vm_object_t oobject)
                    oobject->backing_object_offset + IDX_TO_OFF(offidxstart);
                nobject->backing_object = bobject;
                if (useshadowlist) {
-                       bobject->shadow_count++;
                        atomic_add_int(&bobject->generation, 1);
-                       LIST_INSERT_HEAD(&bobject->shadow_head,
-                                        nobject, shadow_list);
                        vm_object_clear_flag(bobject, OBJ_ONEMAPPING); /*XXX*/
                        vm_object_set_flag(nobject, OBJ_ONSHADOW);
                }
index 302d8a0..5bd4290 100644 (file)
@@ -388,7 +388,6 @@ _vm_object_allocate(objtype_t type, vm_pindex_t size, vm_object_t object)
        struct vm_object_hash *hash;
 
        RB_INIT(&object->rb_memq);
-       LIST_INIT(&object->shadow_head);
        lwkt_token_init(&object->token, "vmobj");
 
        object->type = type;
@@ -401,7 +400,6 @@ _vm_object_allocate(objtype_t type, vm_pindex_t size, vm_object_t object)
                vm_object_set_flag(object, OBJ_ONEMAPPING);
        object->paging_in_progress = 0;
        object->resident_page_count = 0;
-       object->shadow_count = 0;
        /* cpu localization twist */
        object->pg_color = vm_quickcolor();
        object->handle = NULL;
@@ -497,7 +495,7 @@ vm_object_allocate_hold(objtype_t type, vm_pindex_t size)
  * must NOT be chain locked by anyone at the time the reference is added.
  *
  * Referencing a chain-locked object can blow up the fairly sensitive
- * ref_count and shadow_count tests in the deallocator.  Most callers
+ * ref_count tests in the deallocator.  Most callers
  * will call vm_object_chain_wait() prior to calling
  * vm_object_reference_locked() to avoid the case.  The held token
  * allows the caller to pair the wait and ref.
@@ -936,7 +934,19 @@ again:
                        break;
                }
 
+#if 0
+               /*
+                * CODE REMOVAL IN PROGRESS.
+                *
+                * This code handled setting ONEMAPPING again on a DEFAULT
+                * or SWAP object on the 2->1 transition of ref_count,
+                *
+                * This code also handled collapsing object chains on the
+                * 2->1 transition when the second ref was due to a shadow.
+                */
                /*
+                * The ref_count is either 1 or 2.
+                *
                 * Here on ref_count of one or two, which are special cases for
                 * objects.
                 *
@@ -1063,12 +1073,14 @@ again:
                        must_drop = 1;
                        continue;
                }
+skip:
+               ;
+#endif
 
                /*
                 * Drop the ref and handle termination on the 1->0 transition.
                 * We may have blocked above so we have to recheck.
                 */
-skip:
                KKASSERT(object->ref_count != 0);
                if (object->ref_count >= 2) {
                        atomic_add_int(&object->ref_count, -1);
@@ -1118,8 +1130,6 @@ skip:
 
                if (temp) {
                        if (object->flags & OBJ_ONSHADOW) {
-                               LIST_REMOVE(object, shadow_list);
-                               temp->shadow_count--;
                                atomic_add_int(&temp->generation, 1);
                                vm_object_clear_flag(object, OBJ_ONSHADOW);
                        }
@@ -1972,8 +1982,7 @@ vm_object_shadow(vm_object_t *objectp, vm_ooffset_t *offset, vm_size_t length,
        }
 
        /*
-        * The new object shadows the source object.  Chain wait before
-        * adjusting shadow_count or the shadow list to avoid races.
+        * The new object shadows the source object.
         *
         * Try to optimize the result object's page color when shadowing
         * in order to maintain page coloring consistency in the combined 
@@ -1992,9 +2001,6 @@ vm_object_shadow(vm_object_t *objectp, vm_ooffset_t *offset, vm_size_t length,
        if (source) {
                if (useshadowlist) {
                        vm_object_chain_wait(source, 0);
-                       LIST_INSERT_HEAD(&source->shadow_head,
-                                        result, shadow_list);
-                       source->shadow_count++;
                        atomic_add_int(&source->generation, 1);
                        vm_object_set_flag(result, OBJ_ONSHADOW);
                }
@@ -2415,15 +2421,12 @@ vm_object_collapse(vm_object_t object, struct vm_object_dealloc_list **dlistp)
 
                        /*
                         * Object now shadows whatever backing_object did.
-                        * Remove object from backing_object's shadow_list.
                         *
                         * Removing object from backing_objects shadow list
                         * requires releasing object, which we will do below.
                         */
                        KKASSERT(object->backing_object == backing_object);
                        if (object->flags & OBJ_ONSHADOW) {
-                               LIST_REMOVE(object, shadow_list);
-                               backing_object->shadow_count--;
                                atomic_add_int(&backing_object->generation, 1);
                                vm_object_clear_flag(object, OBJ_ONSHADOW);
                        }
@@ -2453,9 +2456,6 @@ vm_object_collapse(vm_object_t object, struct vm_object_dealloc_list **dlistp)
                                if (backing_object->flags & OBJ_ONSHADOW) {
                                        /* not locked exclusively if vnode */
                                        KKASSERT(bbobj->type != OBJT_VNODE);
-                                       LIST_REMOVE(backing_object,
-                                                   shadow_list);
-                                       bbobj->shadow_count--;
                                        atomic_add_int(&bbobj->generation, 1);
                                        vm_object_clear_flag(backing_object,
                                                             OBJ_ONSHADOW);
@@ -2465,9 +2465,6 @@ vm_object_collapse(vm_object_t object, struct vm_object_dealloc_list **dlistp)
                        object->backing_object = bbobj;
                        if (bbobj) {
                                if (bbobj->type != OBJT_VNODE) {
-                                       LIST_INSERT_HEAD(&bbobj->shadow_head,
-                                                        object, shadow_list);
-                                       bbobj->shadow_count++;
                                        atomic_add_int(&bbobj->generation, 1);
                                        vm_object_set_flag(object,
                                                           OBJ_ONSHADOW);
@@ -2554,8 +2551,6 @@ vm_object_collapse(vm_object_t object, struct vm_object_dealloc_list **dlistp)
                         */
                        KKASSERT(object->backing_object == backing_object);
                        if (object->flags & OBJ_ONSHADOW) {
-                               LIST_REMOVE(object, shadow_list);
-                               backing_object->shadow_count--;
                                atomic_add_int(&backing_object->generation, 1);
                                vm_object_clear_flag(object, OBJ_ONSHADOW);
                        }
@@ -2574,9 +2569,6 @@ vm_object_collapse(vm_object_t object, struct vm_object_dealloc_list **dlistp)
                                if (bbobj->type != OBJT_VNODE) {
                                        vm_object_chain_wait(bbobj, 0);
                                        vm_object_reference_locked(bbobj);
-                                       LIST_INSERT_HEAD(&bbobj->shadow_head,
-                                                        object, shadow_list);
-                                       bbobj->shadow_count++;
                                        atomic_add_int(&bbobj->generation, 1);
                                        vm_object_set_flag(object,
                                                           OBJ_ONSHADOW);
@@ -3157,10 +3149,10 @@ DB_SHOW_COMMAND(object, vm_object_print_static)
        /*
         * XXX no %qd in kernel.  Truncate object->backing_object_offset.
         */
-       db_iprintf(" sref=%d, backing_object(%d)=(%p)+0x%lx\n",
-           object->shadow_count, 
-           object->backing_object ? object->backing_object->ref_count : 0,
-           object->backing_object, (long)object->backing_object_offset);
+       db_iprintf(" backing_object(%d)=(%p)+0x%lx\n",
+           (object->backing_object ? object->backing_object->ref_count : 0),
+           object->backing_object,
+           (long)object->backing_object_offset);
 
        if (!full)
                return;
index 1e364bd..21fff14 100644 (file)
@@ -138,20 +138,16 @@ typedef u_char objtype_t;
  */
 struct vm_object {
        TAILQ_ENTRY(vm_object) object_list; /* locked by vmobj_tokens[n] */
-       LIST_HEAD(, vm_object) shadow_head; /* objects we are a shadow for */
-       LIST_ENTRY(vm_object) shadow_list;  /* chain of shadow objects */
        RB_HEAD(vm_page_rb_tree, vm_page) rb_memq;      /* resident pages */
        int generation;                 /* generation ID */
        vm_pindex_t size;               /* Object size */
        int ref_count;
-       int shadow_count;               /* count of objs we are a shadow for */
        vm_memattr_t memattr;           /* default memory attribute for pages */
        objtype_t type;                 /* type of pager */
        u_short flags;                  /* see below */
        u_short pg_color;               /* color of first page in obj */
        u_int paging_in_progress;       /* Paging (in or out) so don't collapse or destroy */
        long resident_page_count;       /* number of resident pages */
-        u_int unused01;
        struct vm_object *backing_object; /* object that I'm a shadow of */
        vm_ooffset_t backing_object_offset;/* Offset in backing object */
        TAILQ_ENTRY(vm_object) pager_object_list; /* list of all objects of this pager type */