Currently we use per-cpu array to hold pointers to preallocated nodes.
Let's replace it with linked list. On x86_64 it saves 256 bytes in
per-cpu ELF section which may translate into freeing up 2MB of memory
for NR_CPUS==8192.

Signed-off-by: Kirill A. Shutemov <[email protected]>
---
 lib/radix-tree.c | 27 ++++++++++++++++-----------
 1 file changed, 16 insertions(+), 11 deletions(-)

diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 3d2aa27b845b..1f58724a2f58 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -65,7 +65,8 @@ static struct kmem_cache *radix_tree_node_cachep;
  */
 struct radix_tree_preload {
        int nr;
-       struct radix_tree_node *nodes[RADIX_TREE_PRELOAD_SIZE];
+       /* nodes->private_data points to next prealocated node */
+       struct radix_tree_node *nodes;
 };
 static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
 
@@ -197,8 +198,9 @@ radix_tree_node_alloc(struct radix_tree_root *root)
                 */
                rtp = this_cpu_ptr(&radix_tree_preloads);
                if (rtp->nr) {
-                       ret = rtp->nodes[rtp->nr - 1];
-                       rtp->nodes[rtp->nr - 1] = NULL;
+                       ret = rtp->nodes;
+                       rtp->nodes = ret->private_data;
+                       ret->private_data = NULL;
                        rtp->nr--;
                }
                /*
@@ -257,16 +259,18 @@ static int __radix_tree_preload(gfp_t gfp_mask)
 
        preempt_disable();
        rtp = this_cpu_ptr(&radix_tree_preloads);
-       while (rtp->nr < ARRAY_SIZE(rtp->nodes)) {
+       while (rtp->nr < RADIX_TREE_PRELOAD_SIZE) {
                preempt_enable();
                node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
                if (node == NULL)
                        goto out;
                preempt_disable();
                rtp = this_cpu_ptr(&radix_tree_preloads);
-               if (rtp->nr < ARRAY_SIZE(rtp->nodes))
-                       rtp->nodes[rtp->nr++] = node;
-               else
+               if (rtp->nr < RADIX_TREE_PRELOAD_SIZE) {
+                       node->private_data = rtp->nodes;
+                       rtp->nodes = node;
+                       rtp->nr++;
+               } else
                        kmem_cache_free(radix_tree_node_cachep, node);
        }
        ret = 0;
@@ -1463,15 +1467,16 @@ static int radix_tree_callback(struct notifier_block 
*nfb,
 {
        int cpu = (long)hcpu;
        struct radix_tree_preload *rtp;
+       struct radix_tree_node *node;
 
        /* Free per-cpu pool of perloaded nodes */
        if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
                rtp = &per_cpu(radix_tree_preloads, cpu);
                while (rtp->nr) {
-                       kmem_cache_free(radix_tree_node_cachep,
-                                       rtp->nodes[rtp->nr-1]);
-                       rtp->nodes[rtp->nr-1] = NULL;
-                       rtp->nr--;
+                       node = rtp->nodes;
+                       rtp->nodes = node->private_data;
+                       kmem_cache_free(radix_tree_node_cachep, node);
+                       rtp->nr--;
                }
        }
        return NOTIFY_OK;
-- 
2.1.4

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to