On Fri, 21 Sep 2007, Pavel Emelyanov wrote:
@@ -1486,7 +1597,7 @@ load_freelist:
object = c-page-freelist;
if (unlikely(!object))
goto another_slab;
- if (unlikely(SlabDebug(c-page)))
+ if (unlikely(SlabDebug(c-page)) || (s-flags SLAB_NOTIFY))
[snip]
@@ -1555,6 +1650,11 @@ static void __always_inline *slab_alloc(
}
local_irq_restore(flags);
+if (object slub_alloc_notify(s, object, gfpflags) 0) {
+kmem_cache_free(s, object);
+return NULL;
+}
+
if (unlikely((gfpflags
On Wed, 19 Sep 2007, Pavel Emelyanov wrote:
so the fast path is still fast, and we have two ways:
1. we keep the checks on the fastpath and have 0 overhead for
unaccounted caches and some overhead for accounted;
This stuff accumulates. I have a bad experience from SLAB. We are counting
Christoph Lameter wrote:
On Mon, 17 Sep 2007, Pavel Emelyanov wrote:
@@ -1036,7 +1121,10 @@ static struct page *allocate_slab(struct
page = alloc_pages_node(node, flags, s-order);
if (!page)
-return NULL;
+goto out;
+
+if
On Mon, 17 Sep 2007, Pavel Emelyanov wrote:
@@ -1036,7 +1121,10 @@ static struct page *allocate_slab(struct
page = alloc_pages_node(node, flags, s-order);
if (!page)
- return NULL;
+ goto out;
+
+ if (slub_newpage_notify(s, page, flags)