The diff below introduces a new macro to generalize the test currently
present in uvm_km_pgremove().  It also uses it in new places to reduce
the differences with NetBSD.

This helps me shrink upcoming vmobjlock diff.

ok?

Index: uvm/uvm_aobj.c
===================================================================
RCS file: /cvs/src/sys/uvm/uvm_aobj.c,v
retrieving revision 1.96
diff -u -p -r1.96 uvm_aobj.c
--- uvm/uvm_aobj.c      20 May 2021 08:03:35 -0000      1.96
+++ uvm/uvm_aobj.c      14 Jun 2021 09:39:45 -0000
@@ -143,7 +143,7 @@ struct pool uvm_aobj_pool;
 
 static struct uao_swhash_elt   *uao_find_swhash_elt(struct uvm_aobj *, int,
                                     boolean_t);
-static int                      uao_find_swslot(struct uvm_aobj *, int);
+static int                      uao_find_swslot(struct uvm_object *, int);
 static boolean_t                uao_flush(struct uvm_object *, voff_t,
                                     voff_t, int);
 static void                     uao_free(struct uvm_aobj *);
@@ -242,8 +242,11 @@ uao_find_swhash_elt(struct uvm_aobj *aob
  * uao_find_swslot: find the swap slot number for an aobj/pageidx
  */
 inline static int
-uao_find_swslot(struct uvm_aobj *aobj, int pageidx)
+uao_find_swslot(struct uvm_object *uobj, int pageidx)
 {
+       struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
+
+       KASSERT(UVM_OBJ_IS_AOBJ(uobj));
 
        /*
         * if noswap flag is set, then we never return a slot
@@ -284,6 +287,7 @@ uao_set_swslot(struct uvm_object *uobj, 
        int oldslot;
 
        KERNEL_ASSERT_LOCKED();
+       KASSERT(UVM_OBJ_IS_AOBJ(uobj));
 
        /*
         * if noswap flag is set, then we can't set a slot
@@ -353,6 +357,7 @@ uao_free(struct uvm_aobj *aobj)
 {
        struct uvm_object *uobj = &aobj->u_obj;
 
+       KASSERT(UVM_OBJ_IS_AOBJ(uobj));
        uao_dropswap_range(uobj, 0, 0);
 
        if (UAO_USES_SWHASH(aobj)) {
@@ -881,6 +886,7 @@ uao_flush(struct uvm_object *uobj, voff_
        struct vm_page *pp;
        voff_t curoff;
 
+       KASSERT(UVM_OBJ_IS_AOBJ(uobj));
        KERNEL_ASSERT_LOCKED();
 
        if (flags & PGO_ALLPAGES) {
@@ -1007,6 +1013,7 @@ uao_get(struct uvm_object *uobj, voff_t 
        int lcv, gotpages, maxpages, swslot, rv, pageidx;
        boolean_t done;
 
+       KASSERT(UVM_OBJ_IS_AOBJ(uobj));
        KERNEL_ASSERT_LOCKED();
 
        /*
@@ -1036,7 +1043,7 @@ uao_get(struct uvm_object *uobj, voff_t 
                         * if page is new, attempt to allocate the page,
                         * zero-fill'd.
                         */
-                       if (ptmp == NULL && uao_find_swslot(aobj,
+                       if (ptmp == NULL && uao_find_swslot(uobj,
                            current_offset >> PAGE_SHIFT) == 0) {
                                ptmp = uvm_pagealloc(uobj, current_offset,
                                    NULL, UVM_PGA_ZERO);
@@ -1175,7 +1182,7 @@ uao_get(struct uvm_object *uobj, voff_t 
                 * we have a "fake/busy/clean" page that we just allocated.  
                 * do the needed "i/o", either reading from swap or zeroing.
                 */
-               swslot = uao_find_swslot(aobj, pageidx);
+               swslot = uao_find_swslot(uobj, pageidx);
 
                /* just zero the page if there's nothing in swap.  */
                if (swslot == 0) {
@@ -1241,6 +1248,8 @@ uao_dropswap(struct uvm_object *uobj, in
 {
        int slot;
 
+       KASSERT(UVM_OBJ_IS_AOBJ(uobj));
+
        slot = uao_set_swslot(uobj, pageidx, 0);
        if (slot) {
                uvm_swap_free(slot, 1);
@@ -1456,6 +1465,7 @@ uao_dropswap_range(struct uvm_object *uo
        struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
        int swpgonlydelta = 0;
 
+       KASSERT(UVM_OBJ_IS_AOBJ(uobj));
        /* KASSERT(mutex_owned(uobj->vmobjlock)); */
 
        if (end == 0) {
Index: uvm/uvm_km.c
===================================================================
RCS file: /cvs/src/sys/uvm/uvm_km.c,v
retrieving revision 1.144
diff -u -p -r1.144 uvm_km.c
--- uvm/uvm_km.c        16 May 2021 15:10:20 -0000      1.144
+++ uvm/uvm_km.c        14 Jun 2021 09:40:39 -0000
@@ -246,7 +246,7 @@ uvm_km_pgremove(struct uvm_object *uobj,
        int slot;
        int swpgonlydelta = 0;
 
-       KASSERT(uobj->pgops == &aobj_pager);
+       KASSERT(UVM_OBJ_IS_AOBJ(uobj));
 
        for (curoff = start ; curoff < end ; curoff += PAGE_SIZE) {
                pp = uvm_pagelookup(uobj, curoff);
Index: uvm/uvm_object.h
===================================================================
RCS file: /cvs/src/sys/uvm/uvm_object.h,v
retrieving revision 1.24
diff -u -p -r1.24 uvm_object.h
--- uvm/uvm_object.h    21 Oct 2020 09:08:14 -0000      1.24
+++ uvm/uvm_object.h    14 Jun 2021 09:34:34 -0000
@@ -82,12 +82,15 @@ RBT_PROTOTYPE(uvm_objtree, vm_page, objt
 #define        UVM_OBJ_IS_VNODE(uobj)                                          
\
        ((uobj)->pgops == &uvm_vnodeops)
 
-#define UVM_OBJ_IS_DEVICE(uobj)                                                
\
+#define        UVM_OBJ_IS_DEVICE(uobj)                                         
\
        ((uobj)->pgops == &uvm_deviceops)
 
 #define        UVM_OBJ_IS_VTEXT(uobj)                                          
\
        ((uobj)->pgops == &uvm_vnodeops &&                              \
         ((struct vnode *)uobj)->v_flag & VTEXT)
+
+#define        UVM_OBJ_IS_AOBJ(uobj)                                           
\
+       ((uobj)->pgops == &aobj_pager)
 
 void   uvm_objinit(struct uvm_object *, const struct uvm_pagerops *, int);
 int    uvm_objwire(struct uvm_object *, voff_t, voff_t, struct pglist *);

Reply via email to