Re: svn commit: r252226 - head/sys/vm

2013-07-07 Thread Andriy Gapon
on 26/06/2013 03:57 Jeff Roberson said the following:
 + { NULL, 4 Bucket, BUCKET_SIZE(4), 4096 },

Looks like BUCKET_SIZE(4) is zero, if I am not mistaken.
Is this OK / intended?

-- 
Andriy Gapon
___
svn-src-head@freebsd.org mailing list
http://lists.freebsd.org/mailman/listinfo/svn-src-head
To unsubscribe, send any mail to svn-src-head-unsubscr...@freebsd.org


svn commit: r252226 - head/sys/vm

2013-06-25 Thread Jeff Roberson
Author: jeff
Date: Wed Jun 26 00:57:38 2013
New Revision: 252226
URL: http://svnweb.freebsd.org/changeset/base/252226

Log:
   - Resolve bucket recursion issues by passing a cookie with zone flags
 through bucket_alloc() to uma_zalloc_arg() and uma_zfree_arg().
   - Make some smaller buckets for large zones to further reduce memory
 waste.
   - Implement uma_zone_reserve().  This holds aside a number of items only
 for callers who specify M_USE_RESERVE.  buckets will never be filled
 from reserve allocations.
  
  Sponsored by: EMC / Isilon Storage Division

Modified:
  head/sys/vm/uma.h
  head/sys/vm/uma_core.c
  head/sys/vm/uma_int.h

Modified: head/sys/vm/uma.h
==
--- head/sys/vm/uma.h   Wed Jun 26 00:42:45 2013(r252225)
+++ head/sys/vm/uma.h   Wed Jun 26 00:57:38 2013(r252226)
@@ -459,6 +459,12 @@ void uma_reclaim(void);
 void uma_set_align(int align);
 
 /*
+ * Set a reserved number of items to hold for M_USE_RESERVE allocations.  All
+ * other requests must allocate new backing pages.
+ */
+void uma_zone_reserve(uma_zone_t zone, int nitems);
+
+/*
  * Reserves the maximum KVA space required by the zone and configures the zone
  * to use a VM_ALLOC_NOOBJ-based backend allocator.
  *

Modified: head/sys/vm/uma_core.c
==
--- head/sys/vm/uma_core.c  Wed Jun 26 00:42:45 2013(r252225)
+++ head/sys/vm/uma_core.c  Wed Jun 26 00:57:38 2013(r252226)
@@ -206,12 +206,14 @@ struct uma_bucket_zone {
 #defineBUCKET_MAX  BUCKET_SIZE(128)
 
 struct uma_bucket_zone bucket_zones[] = {
+   { NULL, 4 Bucket, BUCKET_SIZE(4), 4096 },
+   { NULL, 8 Bucket, BUCKET_SIZE(8), 2048 },
+   { NULL, 16 Bucket, BUCKET_SIZE(16), 1024 },
{ NULL, 32 Bucket, BUCKET_SIZE(32), 512 },
{ NULL, 64 Bucket, BUCKET_SIZE(64), 256 },
{ NULL, 128 Bucket, BUCKET_SIZE(128), 128 },
{ NULL, NULL, 0}
 };
-static uma_zone_t largebucket;
 
 /*
  * Flags and enumerations to be passed to internal functions.
@@ -246,10 +248,10 @@ static void *zone_alloc_item(uma_zone_t,
 static void zone_free_item(uma_zone_t, void *, void *, enum zfreeskip);
 static void bucket_enable(void);
 static void bucket_init(void);
-static uma_bucket_t bucket_alloc(uma_zone_t zone, int);
-static void bucket_free(uma_zone_t zone, uma_bucket_t);
+static uma_bucket_t bucket_alloc(uma_zone_t zone, void *, int);
+static void bucket_free(uma_zone_t zone, uma_bucket_t, void *);
 static void bucket_zone_drain(void);
-static uma_bucket_t zone_alloc_bucket(uma_zone_t zone, int flags);
+static uma_bucket_t zone_alloc_bucket(uma_zone_t zone, void *, int flags);
 static uma_slab_t zone_fetch_slab(uma_zone_t zone, uma_keg_t last, int flags);
 static uma_slab_t zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int 
flags);
 static void *slab_alloc_item(uma_keg_t keg, uma_slab_t slab);
@@ -304,17 +306,8 @@ bucket_init(void)
size += sizeof(void *) * ubz-ubz_entries;
ubz-ubz_zone = uma_zcreate(ubz-ubz_name, size,
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
-   UMA_ZONE_MAXBUCKET | UMA_ZONE_MTXCLASS);
+   UMA_ZONE_MTXCLASS | UMA_ZFLAG_BUCKET);
}
-   /*
-* To avoid recursive bucket allocation loops we disable buckets
-* on the smallest bucket zone and use it for the largest zone.
-* The remainder of the zones all use the largest zone.
-*/
-   ubz--;
-   ubz-ubz_zone-uz_count = bucket_zones[0].ubz_entries;
-   bucket_zones[0].ubz_zone-uz_count = 0;
-   largebucket = ubz-ubz_zone;
 }
 
 /*
@@ -350,7 +343,7 @@ bucket_select(int size)
 }
 
 static uma_bucket_t
-bucket_alloc(uma_zone_t zone, int flags)
+bucket_alloc(uma_zone_t zone, void *udata, int flags)
 {
struct uma_bucket_zone *ubz;
uma_bucket_t bucket;
@@ -363,11 +356,26 @@ bucket_alloc(uma_zone_t zone, int flags)
 */
if (bucketdisable)
return (NULL);
-
-   if (zone-uz_flags  UMA_ZFLAG_CACHEONLY)
+   /*
+* To limit bucket recursion we store the original zone flags
+* in a cookie passed via zalloc_arg/zfree_arg.  This allows the
+* NOVM flag to persist even through deep recursions.  We also
+* store ZFLAG_BUCKET once we have recursed attempting to allocate
+* a bucket for a bucket zone so we do not allow infinite bucket
+* recursion.  This cookie will even persist to frees of unused
+* buckets via the allocation path or bucket allocations in the
+* free path.
+*/
+   if ((uintptr_t)udata  UMA_ZFLAG_BUCKET)
+   return (NULL);
+   if ((zone-uz_flags  UMA_ZFLAG_BUCKET) == 0)
+   udata = (void *)(uintptr_t)zone-uz_flags;
+   else
+   udata = (void *)((uintptr_t)udata |