uao_reference() vs uao_reference_locked()
uao_detach() vs uao_detach_locked()

When you read the code, you can see that both are calling the same function, so 
fold them into the one function which is used by external callers, and remove 
the internal function call, but preserve its comments. Tested on amd64.
Thanks


diff --git uvm/uvm_aobj.c uvm/uvm_aobj.c
index 63e6c993fc2..5fb1fbd9b2a 100644
--- uvm/uvm_aobj.c
+++ uvm/uvm_aobj.c
@@ -810,16 +810,6 @@ uao_init(void)
 void
 uao_reference(struct uvm_object *uobj)
 {
-       uao_reference_locked(uobj);
-}
-
-/*
- * uao_reference_locked: add a ref to an aobj
- */
-void
-uao_reference_locked(struct uvm_object *uobj)
-{
-
        /* kernel_object already has plenty of references, leave it alone. */
        if (UVM_OBJ_IS_KERN_OBJECT(uobj))
                return;
@@ -830,21 +820,11 @@ uao_reference_locked(struct uvm_object *uobj)
 
 /*
  * uao_detach: drop a reference to an aobj
- */
-void
-uao_detach(struct uvm_object *uobj)
-{
-       uao_detach_locked(uobj);
-}
-
-
-/*
- * uao_detach_locked: drop a reference to an aobj
  *
  * => aobj may freed upon return.
  */
 void
-uao_detach_locked(struct uvm_object *uobj)
+uao_detach(struct uvm_object *uobj)
 {
        struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
        struct vm_page *pg;
@@ -1286,7 +1266,7 @@ uao_swap_off(int startslot, int endslot)
                 * add a ref to the aobj so it doesn't disappear
                 * while we're working.
                 */
-               uao_reference_locked(&aobj->u_obj);
+               uao_reference(&aobj->u_obj);
 
                /*
                 * now it's safe to unlock the uao list.
@@ -1295,7 +1275,7 @@ uao_swap_off(int startslot, int endslot)
                mtx_leave(&uao_list_lock);
 
                if (prevaobj) {
-                       uao_detach_locked(&prevaobj->u_obj);
+                       uao_detach(&prevaobj->u_obj);
                        prevaobj = NULL;
                }
 
@@ -1305,7 +1285,7 @@ uao_swap_off(int startslot, int endslot)
                 */
                rv = uao_pagein(aobj, startslot, endslot);
                if (rv) {
-                       uao_detach_locked(&aobj->u_obj);
+                       uao_detach(&aobj->u_obj);
                        return rv;
                }
 
@@ -1328,7 +1308,7 @@ uao_swap_off(int startslot, int endslot)
        /* done with traversal, unlock the list */
        mtx_leave(&uao_list_lock);
        if (prevaobj) {
-               uao_detach_locked(&prevaobj->u_obj);
+               uao_detach(&prevaobj->u_obj);
        }
        return FALSE;
 }
diff --git uvm/uvm_extern.h uvm/uvm_extern.h
index a473f251229..9402e41fe10 100644
--- uvm/uvm_extern.h
+++ uvm/uvm_extern.h
@@ -261,9 +261,7 @@ void                        vmapbuf(struct buf *, vsize_t);
 void                   vunmapbuf(struct buf *, vsize_t);
 struct uvm_object      *uao_create(vsize_t, int);
 void                   uao_detach(struct uvm_object *);
-void                   uao_detach_locked(struct uvm_object *);
 void                   uao_reference(struct uvm_object *);
-void                   uao_reference_locked(struct uvm_object *);
 int                    uvm_fault(vm_map_t, vaddr_t, vm_fault_t, vm_prot_t);
 
 vaddr_t                        uvm_uarea_alloc(void);

Reply via email to