Re: uvm: enable amap per-page refcounting unconditionally

2016-03-29 Thread Martin Pieuchot
On 28/03/16(Mon) 11:28, Stefan Kempf wrote:
> Miod Vallat wrote:
> > 
> > > It seems per-page reference counting is used since forever. I think
> > > there's no reason to ever turn it off (and track referenced pages
> > > with less accuracy, causing leaks).
> > 
> > Actually, assuming the #undef code path works, it might work keeping
> > this and only defining UVM_AMAP_PPREF iff defined(SMALL_KERNEL).
> 
> Doing this saves around 1.6K on bsd.rd/amd64.
> 
> Would that be preferred over removing the #ifdefs?

I'd prefer to get rid of the #ifdefs and have fewer differences between
RAMDISK and GENERIC.

> text  databss dec hex
> 4736948   2409000 577536  7723484 75d9dc
> 4738636   2409000 577536  7725172 75e074
>  
> diff --git a/uvm/uvm_amap.h b/uvm/uvm_amap.h
> index a98b440..a768e94 100644
> --- a/uvm/uvm_amap.h
> +++ b/uvm/uvm_amap.h
> @@ -119,7 +119,9 @@ boolean_t amap_swap_off(int, int);
>   * ... this is enabled with the "UVM_AMAP_PPREF" define.
>   */
>  
> -#define UVM_AMAP_PPREF   /* track partial references */
> +#ifndef SMALL_KERNEL
> +# define UVM_AMAP_PPREF  /* track partial references */
> +#endif
>  
>  /*
>   * here is the definition of the vm_amap structure for this implementation.
> 



Re: uvm: enable amap per-page refcounting unconditionally

2016-03-28 Thread Stefan Kempf
Miod Vallat wrote:
> 
> > It seems per-page reference counting is used since forever. I think
> > there's no reason to ever turn it off (and track referenced pages
> > with less accuracy, causing leaks).
> 
> Actually, assuming the #undef code path works, it might work keeping
> this and only defining UVM_AMAP_PPREF iff defined(SMALL_KERNEL).

Doing this saves around 1.6K on bsd.rd/amd64.

Would that be preferred over removing the #ifdefs?

textdatabss dec hex
4736948 2409000 577536  7723484 75d9dc
4738636 2409000 577536  7725172 75e074
 
diff --git a/uvm/uvm_amap.h b/uvm/uvm_amap.h
index a98b440..a768e94 100644
--- a/uvm/uvm_amap.h
+++ b/uvm/uvm_amap.h
@@ -119,7 +119,9 @@ boolean_t   amap_swap_off(int, int);
  * ... this is enabled with the "UVM_AMAP_PPREF" define.
  */
 
-#define UVM_AMAP_PPREF /* track partial references */
+#ifndef SMALL_KERNEL
+# define UVM_AMAP_PPREF/* track partial references */
+#endif
 
 /*
  * here is the definition of the vm_amap structure for this implementation.



Re: uvm: enable amap per-page refcounting unconditionally

2016-03-27 Thread Miod Vallat

> It seems per-page reference counting is used since forever. I think
> there's no reason to ever turn it off (and track referenced pages
> with less accuracy, causing leaks).

Actually, assuming the #undef code path works, it might work keeping
this and only defining UVM_AMAP_PPREF iff defined(SMALL_KERNEL).



uvm: enable amap per-page refcounting unconditionally

2016-03-27 Thread Stefan Kempf
It seems per-page reference counting is used since forever. I think
there's no reason to ever turn it off (and track referenced pages
with less accuracy, causing leaks).

So remove those #ifdefs.

ok?

Index: uvm/uvm_amap.c
===
RCS file: /cvs/src/sys/uvm/uvm_amap.c,v
retrieving revision 1.63
diff -u -p -r1.63 uvm_amap.c
--- uvm/uvm_amap.c  27 Mar 2016 09:51:37 -  1.63
+++ uvm/uvm_amap.c  27 Mar 2016 12:09:16 -
@@ -81,11 +81,9 @@ amap_list_remove(struct vm_amap *amap)
LIST_REMOVE(amap, am_list);
 }
 
-#ifdef UVM_AMAP_PPREF
 /*
- * what is ppref?   ppref is an _optional_ amap feature which is used
- * to keep track of reference counts on a per-page basis.  it is enabled
- * when UVM_AMAP_PPREF is defined.
+ * what is ppref?   ppref is an amap feature which is used
+ * to keep track of reference counts on a per-page basis.
  *
  * when enabled, an array of ints is allocated for the pprefs.  this
  * array is allocated only when a partial reference is added to the
@@ -147,7 +145,6 @@ pp_setreflen(int *ppref, int offset, int
ppref[offset+1] = len;
}
 }
-#endif
 
 /*
  * amap_init: called at boot time to init global amap data structures
@@ -196,9 +193,7 @@ amap_alloc1(int slots, int padslots, int
 
amap->am_ref = 1;
amap->am_flags = 0;
-#ifdef UVM_AMAP_PPREF
amap->am_ppref = NULL;
-#endif
amap->am_maxslot = totalslots;
amap->am_nslot = slots;
amap->am_nused = 0;
@@ -270,10 +265,8 @@ amap_free(struct vm_amap *amap)
pool_put(_amap_slot_pools[amap->am_maxslot - 1],
amap->am_slots);
 
-#ifdef UVM_AMAP_PPREF
if (amap->am_ppref && amap->am_ppref != PPREF_NONE)
free(amap->am_ppref, M_UVMAMAP, 0);
-#endif
pool_put(_amap_pool, amap);
 
 }
@@ -422,12 +415,10 @@ amap_copy(struct vm_map *map, struct vm_
srcamap->am_ref--;
if (srcamap->am_ref == 1 && (srcamap->am_flags & AMAP_SHARED) != 0)
srcamap->am_flags &= ~AMAP_SHARED;   /* clear shared flag */
-#ifdef UVM_AMAP_PPREF
if (srcamap->am_ppref && srcamap->am_ppref != PPREF_NONE) {
amap_pp_adjref(srcamap, entry->aref.ar_pageoff, 
(entry->end - entry->start) >> PAGE_SHIFT, -1);
}
-#endif
 
/* install new amap. */
entry->aref.ar_pageoff = 0;
@@ -551,19 +542,15 @@ amap_splitref(struct vm_aref *origref, s
if (origref->ar_amap->am_nslot - origref->ar_pageoff - leftslots <= 0)
panic("amap_splitref: map size check failed");
 
-#ifdef UVM_AMAP_PPREF
 /* establish ppref before we add a duplicate reference to the amap */
if (origref->ar_amap->am_ppref == NULL)
amap_pp_establish(origref->ar_amap);
-#endif
 
splitref->ar_amap = origref->ar_amap;
splitref->ar_amap->am_ref++;/* not a share reference */
splitref->ar_pageoff = origref->ar_pageoff + leftslots;
 }
 
-#ifdef UVM_AMAP_PPREF
-
 /*
  * amap_pp_establish: add a ppref array to an amap, if possible
  */
@@ -719,8 +706,6 @@ amap_wiperange(struct vm_amap *amap, int
}
 }
 
-#endif
-
 /*
  * amap_swap_off: pagein anonymous pages in amaps and drop swap slots.
  *
@@ -911,7 +896,6 @@ amap_ref(struct vm_amap *amap, vaddr_t o
amap->am_ref++;
if (flags & AMAP_SHARED)
amap->am_flags |= AMAP_SHARED;
-#ifdef UVM_AMAP_PPREF
if (amap->am_ppref == NULL && (flags & AMAP_REFALL) == 0 &&
len != amap->am_nslot)
amap_pp_establish(amap);
@@ -921,7 +905,6 @@ amap_ref(struct vm_amap *amap, vaddr_t o
else
amap_pp_adjref(amap, offset, len, 1);
}
-#endif
 }
 
 /*
@@ -945,7 +928,6 @@ amap_unref(struct vm_amap *amap, vaddr_t
/* otherwise just drop the reference count(s) */
if (amap->am_ref == 1 && (amap->am_flags & AMAP_SHARED) != 0)
amap->am_flags &= ~AMAP_SHARED; /* clear shared flag */
-#ifdef UVM_AMAP_PPREF
if (amap->am_ppref == NULL && all == 0 && len != amap->am_nslot)
amap_pp_establish(amap);
if (amap->am_ppref && amap->am_ppref != PPREF_NONE) {
@@ -954,5 +936,4 @@ amap_unref(struct vm_amap *amap, vaddr_t
else
amap_pp_adjref(amap, offset, len, -1);
}
-#endif
 }
Index: uvm/uvm_amap.h
===
RCS file: /cvs/src/sys/uvm/uvm_amap.h,v
retrieving revision 1.22
diff -u -p -r1.22 uvm_amap.h
--- uvm/uvm_amap.h  27 Mar 2016 09:51:37 -  1.22
+++ uvm/uvm_amap.h  27 Mar 2016 12:09:16 -
@@ -114,13 +114,10 @@ boolean_t amap_swap_off(int, int);
 
 /*
  * we currently provide an array-based amap implementation.  in this
- * implementation we provide the option of tracking split references
- * so that we don't lose track of references during