This allows us to not rely on the KERNEL_LOCK() to check reference
counts.

Also reduces differences with NetBSD and shrink my upcoming `vmobjlock'
diff.

ok?

Index: uvm/uvm_map.c
===================================================================
RCS file: /cvs/src/sys/uvm/uvm_map.c,v
retrieving revision 1.274
diff -u -p -r1.274 uvm_map.c
--- uvm/uvm_map.c       26 Mar 2021 13:40:05 -0000      1.274
+++ uvm/uvm_map.c       18 May 2021 09:36:55 -0000
@@ -491,12 +491,13 @@ uvm_mapent_addr_remove(struct vm_map *ma
 /*
  * uvm_map_reference: add reference to a map
  *
- * XXX check map reference counter lock
+ * => map need not be locked
  */
-#define uvm_map_reference(_map)                                                
\
-       do {                                                            \
-               map->ref_count++;                                       \
-       } while (0)
+void
+uvm_map_reference(struct vm_map *map)
+{
+       atomic_inc_int(&map->ref_count);
+}
 
 /*
  * Calculate the dused delta.
@@ -4292,7 +4293,7 @@ uvm_map_deallocate(vm_map_t map)
        int c;
        struct uvm_map_deadq dead;
 
-       c = --map->ref_count;
+       c = atomic_dec_int_nv(&map->ref_count);
        if (c > 0) {
                return;
        }
Index: uvm/uvm_map.h
===================================================================
RCS file: /cvs/src/sys/uvm/uvm_map.h,v
retrieving revision 1.69
diff -u -p -r1.69 uvm_map.h
--- uvm/uvm_map.h       12 Mar 2021 14:15:49 -0000      1.69
+++ uvm/uvm_map.h       18 May 2021 09:36:36 -0000
@@ -259,6 +259,7 @@ RBT_PROTOTYPE(uvm_map_addr, vm_map_entry
  * read_locks and write_locks are used in lock debugging code.
  *
  *  Locks used to protect struct members in this file:
+ *     a       atomic operations
  *     I       immutable after creation or exec(2)
  *     v       `vm_map_lock' (this map `lock' or `mtx')
  */
@@ -272,7 +273,7 @@ struct vm_map {
        struct uvm_map_addr     addr;           /* [v] Entry tree, by addr */
 
        vsize_t                 size;           /* virtual size */
-       int                     ref_count;      /* Reference count */
+       int                     ref_count;      /* [a] Reference count */
        int                     flags;          /* flags */
        struct mutex            flags_lock;     /* flags lock */
        unsigned int            timestamp;      /* Version number */

Reply via email to