With the introduction of the shared dirty page accounting in .19, NFS should
not be able to surpise the VM with all dirty pages. Thus it should always be
able to free some memory. Hence no more need for mempools.

Signed-off-by: Peter Zijlstra <[EMAIL PROTECTED]>
---
 fs/nfs/read.c  |   15 +++------------
 fs/nfs/write.c |   27 +++++----------------------
 2 files changed, 8 insertions(+), 34 deletions(-)

Index: linux-2.6/fs/nfs/read.c
===================================================================
--- linux-2.6.orig/fs/nfs/read.c
+++ linux-2.6/fs/nfs/read.c
@@ -33,13 +33,10 @@ static const struct rpc_call_ops nfs_rea
 static const struct rpc_call_ops nfs_read_full_ops;
 
 static struct kmem_cache *nfs_rdata_cachep;
-static mempool_t *nfs_rdata_mempool;
-
-#define MIN_POOL_READ  (32)
 
 struct nfs_read_data *nfs_readdata_alloc(unsigned int pagecount)
 {
-       struct nfs_read_data *p = mempool_alloc(nfs_rdata_mempool, GFP_NOFS);
+       struct nfs_read_data *p = kmem_cache_alloc(nfs_rdata_cachep, GFP_NOFS);
 
        if (p) {
                memset(p, 0, sizeof(*p));
@@ -50,7 +47,7 @@ struct nfs_read_data *nfs_readdata_alloc
                else {
                        p->pagevec = kcalloc(pagecount, sizeof(struct page *), 
GFP_NOFS);
                        if (!p->pagevec) {
-                               mempool_free(p, nfs_rdata_mempool);
+                               kmem_cache_free(nfs_rdata_cachep, p);
                                p = NULL;
                        }
                }
@@ -63,7 +60,7 @@ static void nfs_readdata_rcu_free(struct
        struct nfs_read_data *p = container_of(head, struct nfs_read_data, 
task.u.tk_rcu);
        if (p && (p->pagevec != &p->page_array[0]))
                kfree(p->pagevec);
-       mempool_free(p, nfs_rdata_mempool);
+       kmem_cache_free(nfs_rdata_cachep, p);
 }
 
 static void nfs_readdata_free(struct nfs_read_data *rdata)
@@ -595,16 +592,10 @@ int __init nfs_init_readpagecache(void)
        if (nfs_rdata_cachep == NULL)
                return -ENOMEM;
 
-       nfs_rdata_mempool = mempool_create_slab_pool(MIN_POOL_READ,
-                                                    nfs_rdata_cachep);
-       if (nfs_rdata_mempool == NULL)
-               return -ENOMEM;
-
        return 0;
 }
 
 void nfs_destroy_readpagecache(void)
 {
-       mempool_destroy(nfs_rdata_mempool);
        kmem_cache_destroy(nfs_rdata_cachep);
 }
Index: linux-2.6/fs/nfs/write.c
===================================================================
--- linux-2.6.orig/fs/nfs/write.c
+++ linux-2.6/fs/nfs/write.c
@@ -28,9 +28,6 @@
 
 #define NFSDBG_FACILITY                NFSDBG_PAGECACHE
 
-#define MIN_POOL_WRITE         (32)
-#define MIN_POOL_COMMIT                (4)
-
 /*
  * Local function declarations
  */
@@ -44,12 +41,10 @@ static const struct rpc_call_ops nfs_wri
 static const struct rpc_call_ops nfs_commit_ops;
 
 static struct kmem_cache *nfs_wdata_cachep;
-static mempool_t *nfs_wdata_mempool;
-static mempool_t *nfs_commit_mempool;
 
 struct nfs_write_data *nfs_commit_alloc(void)
 {
-       struct nfs_write_data *p = mempool_alloc(nfs_commit_mempool, GFP_NOFS);
+       struct nfs_write_data *p = kmem_cache_alloc(nfs_wdata_cachep, GFP_NOFS);
 
        if (p) {
                memset(p, 0, sizeof(*p));
@@ -63,7 +58,7 @@ static void nfs_commit_rcu_free(struct r
        struct nfs_write_data *p = container_of(head, struct nfs_write_data, 
task.u.tk_rcu);
        if (p && (p->pagevec != &p->page_array[0]))
                kfree(p->pagevec);
-       mempool_free(p, nfs_commit_mempool);
+       kmem_cache_free(nfs_wdata_cachep, p);
 }
 
 void nfs_commit_free(struct nfs_write_data *wdata)
@@ -73,7 +68,7 @@ void nfs_commit_free(struct nfs_write_da
 
 struct nfs_write_data *nfs_writedata_alloc(unsigned int pagecount)
 {
-       struct nfs_write_data *p = mempool_alloc(nfs_wdata_mempool, GFP_NOFS);
+       struct nfs_write_data *p = kmem_cache_alloc(nfs_wdata_cachep, GFP_NOFS);
 
        if (p) {
                memset(p, 0, sizeof(*p));
@@ -84,7 +79,7 @@ struct nfs_write_data *nfs_writedata_all
                else {
                        p->pagevec = kcalloc(pagecount, sizeof(struct page *), 
GFP_NOFS);
                        if (!p->pagevec) {
-                               mempool_free(p, nfs_wdata_mempool);
+                               kmem_cache_free(nfs_wdata_cachep, p);
                                p = NULL;
                        }
                }
@@ -97,7 +92,7 @@ static void nfs_writedata_rcu_free(struc
        struct nfs_write_data *p = container_of(head, struct nfs_write_data, 
task.u.tk_rcu);
        if (p && (p->pagevec != &p->page_array[0]))
                kfree(p->pagevec);
-       mempool_free(p, nfs_wdata_mempool);
+       kmem_cache_free(nfs_wdata_cachep, p);
 }
 
 static void nfs_writedata_free(struct nfs_write_data *wdata)
@@ -1514,16 +1509,6 @@ int __init nfs_init_writepagecache(void)
        if (nfs_wdata_cachep == NULL)
                return -ENOMEM;
 
-       nfs_wdata_mempool = mempool_create_slab_pool(MIN_POOL_WRITE,
-                                                    nfs_wdata_cachep);
-       if (nfs_wdata_mempool == NULL)
-               return -ENOMEM;
-
-       nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT,
-                                                     nfs_wdata_cachep);
-       if (nfs_commit_mempool == NULL)
-               return -ENOMEM;
-
        /*
         * NFS congestion size, scale with available memory.
         *
@@ -1549,8 +1534,6 @@ int __init nfs_init_writepagecache(void)
 
 void nfs_destroy_writepagecache(void)
 {
-       mempool_destroy(nfs_commit_mempool);
-       mempool_destroy(nfs_wdata_mempool);
        kmem_cache_destroy(nfs_wdata_cachep);
 }
 

--

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to