commit:     1a90a17ec827f5827bbb38024ff5f1e0ac9189f5
Author:     Alexey Shvetsov <alexxy <AT> omrb <DOT> pnpi <DOT> spb <DOT> ru>
AuthorDate: Fri Jun 10 19:06:04 2016 +0000
Commit:     Alexey Shvetsov <alexxy <AT> gentoo <DOT> org>
CommitDate: Fri Jun 10 19:06:04 2016 +0000
URL:        https://gitweb.gentoo.org/proj/sci.git/commit/?id=1a90a17e

sys-cluster/lustre: Update patches

Package-Manager: portage-2.3.0_rc1

 ...cfs-update-zfs-proc-handling-to-seq_files.patch | 204 ------
 ...m-Backport-shrinker-changes-from-upstream.patch | 753 ---------------------
 sys-cluster/lustre/lustre-9999.ebuild              |   5 +-
 3 files changed, 1 insertion(+), 961 deletions(-)

diff --git 
a/sys-cluster/lustre/files/0001-LU-3319-procfs-update-zfs-proc-handling-to-seq_files.patch
 
b/sys-cluster/lustre/files/0001-LU-3319-procfs-update-zfs-proc-handling-to-seq_files.patch
deleted file mode 100644
index 2860b4b..0000000
--- 
a/sys-cluster/lustre/files/0001-LU-3319-procfs-update-zfs-proc-handling-to-seq_files.patch
+++ /dev/null
@@ -1,204 +0,0 @@
-From 4e55f198a630e3beb9daad9bf72133da44ca7242 Mon Sep 17 00:00:00 2001
-From: James Simmons <uja.o...@gmail.com>
-Date: Wed, 23 Apr 2014 09:31:09 -0400
-Subject: [PATCH 1/3] LU-3319 procfs: update zfs proc handling to seq_files
-
-Migrate all zfs proc handling to using strictly seq_files.
-
-Signed-off-by: James Simmons <uja.o...@gmail.com>
-Change-Id: I6dc7e65c3e74e7934a17939815ec3c334fac58c7
----
- lustre/osd-zfs/osd_handler.c  | 13 +++---
- lustre/osd-zfs/osd_internal.h |  3 +-
- lustre/osd-zfs/osd_lproc.c    | 92 +++++++++++++++++++++++++------------------
- 3 files changed, 61 insertions(+), 47 deletions(-)
-
-diff --git a/lustre/osd-zfs/osd_handler.c b/lustre/osd-zfs/osd_handler.c
-index 42e6e20..83b243d 100644
---- a/lustre/osd-zfs/osd_handler.c
-+++ b/lustre/osd-zfs/osd_handler.c
-@@ -761,12 +761,13 @@ static int osd_process_config(const struct lu_env *env,
-               break;
-       case LCFG_PARAM: {
-               LASSERT(&o->od_dt_dev);
--              rc = class_process_proc_param(PARAM_OSD, lprocfs_osd_obd_vars,
--                                            cfg, &o->od_dt_dev);
-+              rc = class_process_proc_seq_param(PARAM_OSD,
-+                                                lprocfs_osd_obd_vars, cfg,
-+                                                &o->od_dt_dev);
-               if (rc > 0 || rc == -ENOSYS)
--                      rc = class_process_proc_param(PARAM_OST,
--                                                    lprocfs_osd_obd_vars,
--                                                    cfg, &o->od_dt_dev);
-+                      rc = class_process_proc_seq_param(PARAM_OST,
-+                                                        lprocfs_osd_obd_vars,
-+                                                        cfg, &o->od_dt_dev);
-               break;
-       }
-       default:
-@@ -923,7 +924,7 @@ int __init osd_init(void)
- 
-       rc = class_register_type(&osd_obd_device_ops, NULL, true, NULL,
- #ifndef HAVE_ONLY_PROCFS_SEQ
--                               lprocfs_osd_module_vars,
-+                               NULL,
- #endif
-                                LUSTRE_OSD_ZFS_NAME, &osd_device_type);
-       if (rc)
-diff --git a/lustre/osd-zfs/osd_internal.h b/lustre/osd-zfs/osd_internal.h
-index c53ad8b..1773313 100644
---- a/lustre/osd-zfs/osd_internal.h
-+++ b/lustre/osd-zfs/osd_internal.h
-@@ -405,8 +405,7 @@ enum {
- };
- 
- /* osd_lproc.c */
--extern struct lprocfs_vars lprocfs_osd_obd_vars[];
--extern struct lprocfs_vars lprocfs_osd_module_vars[];
-+extern struct lprocfs_seq_vars lprocfs_osd_obd_vars[];
- 
- int osd_procfs_init(struct osd_device *osd, const char *name);
- int osd_procfs_fini(struct osd_device *osd);
-diff --git a/lustre/osd-zfs/osd_lproc.c b/lustre/osd-zfs/osd_lproc.c
-index bc8a807..f19ed11 100644
---- a/lustre/osd-zfs/osd_lproc.c
-+++ b/lustre/osd-zfs/osd_lproc.c
-@@ -107,27 +107,27 @@ out:
-       RETURN(result);
- }
- 
--static int lprocfs_osd_rd_fstype(char *page, char **start, off_t off,
--                              int count, int *eof, void *data)
-+static int zfs_osd_fstype_seq_show(struct seq_file *m, void *data)
- {
--      return snprintf(page, count, "zfs\n");
-+      return seq_printf(m, "zfs\n");
- }
-+LPROC_SEQ_FOPS_RO(zfs_osd_fstype);
- 
--static int lprocfs_osd_rd_mntdev(char *page, char **start, off_t off, int 
count,
--                              int *eof, void *data)
-+static int zfs_osd_mntdev_seq_show(struct seq_file *m, void *data)
- {
--      struct osd_device *osd = osd_dt_dev((struct dt_device *)data);
-+      struct osd_device *osd = osd_dt_dev((struct dt_device *)m->private);
- 
-       LASSERT(osd != NULL);
--      *eof = 1;
--
--      return snprintf(page, count, "%s\n", osd->od_mntdev);
-+      return seq_printf(m, "%s\n", osd->od_mntdev);
- }
-+LPROC_SEQ_FOPS_RO(zfs_osd_mntdev);
- 
--static int lprocfs_osd_wr_force_sync(struct file *file, const char *buffer,
--                                      unsigned long count, void *data)
-+static ssize_t
-+lprocfs_osd_force_sync_seq_write(struct file *file, const char __user *buffer,
-+                              size_t count, loff_t *off)
- {
--      struct dt_device  *dt = data;
-+      struct seq_file   *m = file->private_data;
-+      struct dt_device  *dt = m->private;
-       struct lu_env      env;
-       int rc;
- 
-@@ -139,21 +139,23 @@ static int lprocfs_osd_wr_force_sync(struct file *file, 
const char *buffer,
- 
-       return rc == 0 ? count : rc;
- }
-+LPROC_SEQ_FOPS_WO_TYPE(zfs, osd_force_sync);
- 
--static int lprocfs_osd_rd_iused_est(char *page, char **start, off_t off,
--                                  int count, int *eof, void *data)
-+static int zfs_osd_iused_est_seq_show(struct seq_file *m, void *data)
- {
--      struct osd_device *osd = osd_dt_dev((struct dt_device *)data);
-+      struct osd_device *osd = osd_dt_dev((struct dt_device *)m->private);
-       LASSERT(osd != NULL);
- 
--      return snprintf(page, count, "%d\n", osd->od_quota_iused_est);
-+      return seq_printf(m, "%d\n", osd->od_quota_iused_est);
- }
- 
--static int lprocfs_osd_wr_iused_est(struct file *file,
--                                  const char __user *buffer,
--                                  unsigned long count, void *data)
-+static ssize_t
-+zfs_osd_iused_est_seq_write(struct file *file, const char __user *buffer,
-+                           size_t count, loff_t *off)
- {
--      struct osd_device *osd = osd_dt_dev((struct dt_device *)data);
-+      struct seq_file   *m = file->private_data;
-+      struct dt_device  *dt = m->private;
-+      struct osd_device *osd = osd_dt_dev(dt);
-       int                rc, val;
- 
-       LASSERT(osd != NULL);
-@@ -166,24 +168,36 @@ static int lprocfs_osd_wr_iused_est(struct file *file,
- 
-       return count;
- }
--
--struct lprocfs_vars lprocfs_osd_obd_vars[] = {
--      { "blocksize",          lprocfs_dt_rd_blksize,  0, 0 },
--      { "kbytestotal",        lprocfs_dt_rd_kbytestotal,      0, 0 },
--      { "kbytesfree",         lprocfs_dt_rd_kbytesfree,       0, 0 },
--      { "kbytesavail",        lprocfs_dt_rd_kbytesavail,      0, 0 },
--      { "filestotal",         lprocfs_dt_rd_filestotal,       0, 0 },
--      { "filesfree",          lprocfs_dt_rd_filesfree,        0, 0 },
--      { "fstype",          lprocfs_osd_rd_fstype,      0, 0 },
--      { "mntdev",          lprocfs_osd_rd_mntdev,      0, 0 },
--      { "force_sync",      0, lprocfs_osd_wr_force_sync     },
--      { "quota_iused_estimate",  lprocfs_osd_rd_iused_est,
--              lprocfs_osd_wr_iused_est,   0, 0 },
--      { 0 }
--};
--
--struct lprocfs_vars lprocfs_osd_module_vars[] = {
--      { "num_refs",        lprocfs_rd_numrefs,         0, 0 },
-+LPROC_SEQ_FOPS(zfs_osd_iused_est);
-+
-+LPROC_SEQ_FOPS_RO_TYPE(zfs, dt_blksize);
-+LPROC_SEQ_FOPS_RO_TYPE(zfs, dt_kbytestotal);
-+LPROC_SEQ_FOPS_RO_TYPE(zfs, dt_kbytesfree);
-+LPROC_SEQ_FOPS_RO_TYPE(zfs, dt_kbytesavail);
-+LPROC_SEQ_FOPS_RO_TYPE(zfs, dt_filestotal);
-+LPROC_SEQ_FOPS_RO_TYPE(zfs, dt_filesfree);
-+
-+struct lprocfs_seq_vars lprocfs_osd_obd_vars[] = {
-+      { .name =       "blocksize",
-+        .fops =       &zfs_dt_blksize_fops            },
-+      { .name =       "kbytestotal",
-+        .fops =       &zfs_dt_kbytestotal_fops        },
-+      { .name =       "kbytesfree",
-+        .fops =       &zfs_dt_kbytesfree_fops         },
-+      { .name =       "kbytesavail",
-+        .fops =       &zfs_dt_kbytesavail_fops        },
-+      { .name =       "filestotal",
-+        .fops =       &zfs_dt_filestotal_fops         },
-+      { .name =       "filesfree",
-+        .fops =       &zfs_dt_filesfree_fops          },
-+      { .name =       "fstype",
-+        .fops =       &zfs_osd_fstype_fops            },
-+      { .name =       "mntdev",
-+        .fops =       &zfs_osd_mntdev_fops            },
-+      { .name =       "force_sync",
-+        .fops =       &zfs_osd_force_sync_fops        },
-+      { .name =       "quota_iused_estimate",
-+        .fops =       &zfs_osd_iused_est_fops         },
-       { 0 }
- };
- 
-@@ -203,7 +217,7 @@ int osd_procfs_init(struct osd_device *osd, const char 
*name)
-       LASSERT(name != NULL);
-       LASSERT(type != NULL);
- 
--      osd->od_proc_entry = lprocfs_register(name, type->typ_procroot,
-+      osd->od_proc_entry = lprocfs_seq_register(name, type->typ_procroot,
-                       lprocfs_osd_obd_vars, &osd->od_dt_dev);
-       if (IS_ERR(osd->od_proc_entry)) {
-               rc = PTR_ERR(osd->od_proc_entry);
--- 
-1.9.3
-

diff --git 
a/sys-cluster/lustre/files/0003-LU-4416-mm-Backport-shrinker-changes-from-upstream.patch
 
b/sys-cluster/lustre/files/0003-LU-4416-mm-Backport-shrinker-changes-from-upstream.patch
deleted file mode 100644
index ea23076..0000000
--- 
a/sys-cluster/lustre/files/0003-LU-4416-mm-Backport-shrinker-changes-from-upstream.patch
+++ /dev/null
@@ -1,753 +0,0 @@
-From 871751ab815067744064132b76aaf4857c720fc9 Mon Sep 17 00:00:00 2001
-From: Yang Sheng <yang.sh...@intel.com>
-Date: Thu, 24 Apr 2014 11:43:09 +0800
-Subject: [PATCH 3/3] LU-4416 mm: Backport shrinker changes from upstream
-
-Convert shrinker to new count/scan API.
---ptlrpc shrinker
---lu_object shrinker
---ldlm pool shrinker
-
-Signed-off-by: Peng Tao <tao.p...@emc.com>
-Signed-off-by: Andreas Dilger <andreas.dil...@intel.com>
-Signed-off-by: Yang Sheng <yang.sh...@intel.com>
-Change-Id: Idbd7cd3b7488202e5e8f6fdf757ae6d20e28d642
----
- libcfs/autoconf/lustre-libcfs.m4        |  18 +++
- libcfs/include/libcfs/linux/linux-mem.h |  34 ++++-
- libcfs/include/libcfs/posix/libcfs.h    |  13 +-
- lustre/ldlm/ldlm_pool.c                 | 222 ++++++++++++++++++++------------
- lustre/obdclass/lu_object.c             | 134 +++++++++++--------
- lustre/ptlrpc/sec_bulk.c                |  80 +++++++++---
- 6 files changed, 338 insertions(+), 163 deletions(-)
-
-diff --git a/libcfs/autoconf/lustre-libcfs.m4 
b/libcfs/autoconf/lustre-libcfs.m4
-index ac5c413..81c6d7f 100644
---- a/libcfs/autoconf/lustre-libcfs.m4
-+++ b/libcfs/autoconf/lustre-libcfs.m4
-@@ -290,6 +290,22 @@ No crc32c pclmulqdq crypto api found, enable internal 
pclmulqdq based crc32c
- ]) # LIBCFS_ENABLE_CRC32C_ACCEL
- 
- #
-+# FC19 3.12 kernel struct shrinker change
-+#
-+AC_DEFUN([LIBCFS_SHRINKER_COUNT],[
-+LB_CHECK_COMPILE([shrinker has 'count_objects'],
-+shrinker_count_objects, [
-+      #include <linux/mmzone.h>
-+      #include <linux/shrinker.h>
-+],[
-+      ((struct shrinker*)0)->count_objects(NULL, NULL);
-+],[
-+      AC_DEFINE(HAVE_SHRINKER_COUNT, 1,
-+              [shrinker has count_objects memeber])
-+])
-+])
-+
-+#
- # LIBCFS_PROG_LINUX
- #
- # LibCFS linux kernel checks
-@@ -324,6 +340,8 @@ LIBCFS_HAVE_CRC32
- LIBCFS_ENABLE_CRC32_ACCEL
- # 3.10
- LIBCFS_ENABLE_CRC32C_ACCEL
-+# 3.12
-+LIBCFS_SHRINKER_COUNT
- ]) # LIBCFS_PROG_LINUX
- 
- #
-diff --git a/libcfs/include/libcfs/linux/linux-mem.h 
b/libcfs/include/libcfs/linux/linux-mem.h
-index 6109645..ce20cb8 100644
---- a/libcfs/include/libcfs/linux/linux-mem.h
-+++ b/libcfs/include/libcfs/linux/linux-mem.h
-@@ -108,13 +108,16 @@ extern void *cfs_mem_cache_cpt_alloc(struct kmem_cache 
*cachep,
- /*
-  * Shrinker
-  */
--
- #ifdef HAVE_SHRINK_CONTROL
- # define SHRINKER_ARGS(sc, nr_to_scan, gfp_mask)  \
-                        struct shrinker *shrinker, \
-                        struct shrink_control *sc
- # define shrink_param(sc, var) ((sc)->var)
- #else
-+struct shrink_control {
-+      gfp_t gfp_mask;
-+      unsigned long nr_to_scan;
-+};
- # ifdef HAVE_SHRINKER_WANT_SHRINK_PTR
- #  define SHRINKER_ARGS(sc, nr_to_scan, gfp_mask)  \
-                         struct shrinker *shrinker, \
-@@ -123,13 +126,31 @@ extern void *cfs_mem_cache_cpt_alloc(struct kmem_cache 
*cachep,
- #  define SHRINKER_ARGS(sc, nr_to_scan, gfp_mask)  \
-                         int nr_to_scan, gfp_t gfp_mask
- # endif
-+      /* avoid conflict with spl mm_compat.h */
-+# define HAVE_SHRINK_CONTROL_STRUCT 1
- # define shrink_param(sc, var) (var)
- #endif
- 
--typedef int (*shrinker_t)(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask));
-+#ifdef HAVE_SHRINKER_COUNT
-+struct shrinker_var {
-+      unsigned long (*count)(struct shrinker *,
-+                             struct shrink_control *sc);
-+      unsigned long (*scan)(struct shrinker *,
-+                            struct shrink_control *sc);
-+};
-+# define DEF_SHRINKER_VAR(name, shrink, count_obj, scan_obj) \
-+          struct shrinker_var name = { .count = count_obj, .scan = scan_obj }
-+#else
-+struct shrinker_var {
-+      int (*shrink)(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask));
-+};
-+# define DEF_SHRINKER_VAR(name, shrinker, count, scan) \
-+          struct shrinker_var name = { .shrink = shrinker }
-+# define SHRINK_STOP (~0UL)
-+#endif
- 
- static inline
--struct shrinker *set_shrinker(int seek, shrinker_t func)
-+struct shrinker *set_shrinker(int seek, struct shrinker_var *var)
- {
-         struct shrinker *s;
- 
-@@ -137,7 +158,12 @@ struct shrinker *set_shrinker(int seek, shrinker_t func)
-         if (s == NULL)
-                 return (NULL);
- 
--        s->shrink = func;
-+#ifdef HAVE_SHRINKER_COUNT
-+      s->count_objects = var->count;
-+      s->scan_objects = var->scan;
-+#else
-+      s->shrink = var->shrink;
-+#endif
-         s->seeks = seek;
- 
-         register_shrinker(s);
-diff --git a/libcfs/include/libcfs/posix/libcfs.h 
b/libcfs/include/libcfs/posix/libcfs.h
-index 6a16be3..c839486 100644
---- a/libcfs/include/libcfs/posix/libcfs.h
-+++ b/libcfs/include/libcfs/posix/libcfs.h
-@@ -269,12 +269,19 @@ struct shrinker {
- #endif
- };
- 
--#define DEFAULT_SEEKS (0)
-+struct shrinker_var {
-+#ifndef __INTEL_COMPILER
-+      ;
-+#endif
-+};
-+
-+#define DEF_SHRINKER_VAR(name, shrink, count, scan) \
-+              struct shrinker_var name = {};
- 
--typedef int (*shrinker_t)(int, unsigned int);
-+#define DEFAULT_SEEKS (0)
- 
- static inline
--struct shrinker *set_shrinker(int seeks, shrinker_t shrink)
-+struct shrinker *set_shrinker(int seeks, struct shrinker_var *var)
- {
-       return (struct shrinker *)0xdeadbea1; /* Cannot return NULL here */
- }
-diff --git a/lustre/ldlm/ldlm_pool.c b/lustre/ldlm/ldlm_pool.c
-index 6cf50f2..b3eaf1c 100644
---- a/lustre/ldlm/ldlm_pool.c
-+++ b/lustre/ldlm/ldlm_pool.c
-@@ -531,7 +531,7 @@ static int ldlm_cli_pool_shrink(struct ldlm_pool *pl,
-                                 int nr, unsigned int gfp_mask)
- {
-         struct ldlm_namespace *ns;
--        int canceled = 0, unused;
-+      int unused;
- 
-         ns = ldlm_pl2ns(pl);
- 
-@@ -550,17 +550,14 @@ static int ldlm_cli_pool_shrink(struct ldlm_pool *pl,
-       unused = ns->ns_nr_unused;
-       spin_unlock(&ns->ns_lock);
- 
--        if (nr) {
--              canceled = ldlm_cancel_lru(ns, nr, LCF_ASYNC,
--                                         LDLM_CANCEL_SHRINK);
--        }
- #ifdef __KERNEL__
--        /*
--         * Return the number of potentially reclaimable locks.
--         */
--        return ((unused - canceled) / 100) * sysctl_vfs_cache_pressure;
-+      if (nr == 0)
-+              return (unused / 100) * sysctl_vfs_cache_pressure;
-+      else
-+              return ldlm_cancel_lru(ns, nr, LCF_ASYNC, LDLM_CANCEL_SHRINK);
- #else
--        return unused - canceled;
-+      return unused - (nr ? ldlm_cancel_lru(ns, nr, LCF_ASYNC,
-+                                            LDLM_CANCEL_SHRINK) : 0);
- #endif
- }
- 
-@@ -1045,41 +1042,36 @@ static struct shrinker *ldlm_pools_cli_shrinker;
- static struct completion ldlm_pools_comp;
- 
- /*
-- * Cancel \a nr locks from all namespaces (if possible). Returns number of
-- * cached locks after shrink is finished. All namespaces are asked to
-- * cancel approximately equal amount of locks to keep balancing.
-- */
--static int ldlm_pools_shrink(ldlm_side_t client, int nr,
--                             unsigned int gfp_mask)
-+* count locks from all namespaces (if possible). Returns number of
-+* cached locks.
-+*/
-+static unsigned long ldlm_pools_count(ldlm_side_t client, unsigned int 
gfp_mask)
- {
--      unsigned int total = 0, cached = 0;
--      int nr_ns;
--        struct ldlm_namespace *ns;
-+      int total = 0, nr_ns;
-+      struct ldlm_namespace *ns;
-       struct ldlm_namespace *ns_old = NULL; /* loop detection */
--        void *cookie;
-+      void *cookie;
- 
--        if (client == LDLM_NAMESPACE_CLIENT && nr != 0 &&
--            !(gfp_mask & __GFP_FS))
--                return -1;
-+      if (client == LDLM_NAMESPACE_CLIENT && !(gfp_mask & __GFP_FS))
-+              return 0;
- 
--        CDEBUG(D_DLMTRACE, "Request to shrink %d %s locks from all pools\n",
--               nr, client == LDLM_NAMESPACE_CLIENT ? "client" : "server");
-+      CDEBUG(D_DLMTRACE, "Request to count %s locks from all pools\n",
-+             client == LDLM_NAMESPACE_CLIENT ? "client" : "server");
- 
--        cookie = cl_env_reenter();
-+      cookie = cl_env_reenter();
- 
--        /*
--         * Find out how many resources we may release.
--         */
-+      /*
-+       * Find out how many resources we may release.
-+       */
-       for (nr_ns = ldlm_namespace_nr_read(client);
--           nr_ns > 0; nr_ns--)
--        {
-+           nr_ns > 0; nr_ns--) {
-               mutex_lock(ldlm_namespace_lock(client));
--                if (cfs_list_empty(ldlm_namespace_list(client))) {
-+              if (list_empty(ldlm_namespace_list(client))) {
-                       mutex_unlock(ldlm_namespace_lock(client));
--                        cl_env_reexit(cookie);
--                        return 0;
--                }
--                ns = ldlm_namespace_first_locked(client);
-+                      cl_env_reexit(cookie);
-+                      return 0;
-+              }
-+              ns = ldlm_namespace_first_locked(client);
- 
-               if (ns == ns_old) {
-                       mutex_unlock(ldlm_namespace_lock(client));
-@@ -1095,57 +1087,117 @@ static int ldlm_pools_shrink(ldlm_side_t client, int 
nr,
-               if (ns_old == NULL)
-                       ns_old = ns;
- 
--                ldlm_namespace_get(ns);
--                ldlm_namespace_move_to_active_locked(ns, client);
-+              ldlm_namespace_get(ns);
-+              ldlm_namespace_move_to_active_locked(ns, client);
-               mutex_unlock(ldlm_namespace_lock(client));
--                total += ldlm_pool_shrink(&ns->ns_pool, 0, gfp_mask);
--                ldlm_namespace_put(ns);
--        }
-+              total += ldlm_pool_shrink(&ns->ns_pool, 0, gfp_mask);
-+              ldlm_namespace_put(ns);
-+      }
- 
--        if (nr == 0 || total == 0) {
--                cl_env_reexit(cookie);
--                return total;
--        }
-+      cl_env_reexit(cookie);
-+      return total;
-+}
- 
--        /*
--         * Shrink at least ldlm_namespace_nr(client) namespaces.
--         */
--      for (nr_ns = ldlm_namespace_nr_read(client) - nr_ns;
--           nr_ns > 0; nr_ns--)
--        {
--              __u64 cancel;
--              unsigned int nr_locks;
-+static unsigned long ldlm_pools_scan(ldlm_side_t client, int nr,
-+                                   unsigned int gfp_mask)
-+{
-+      unsigned long freed = 0;
-+      int tmp, nr_ns;
-+      struct ldlm_namespace *ns;
-+      void *cookie;
- 
--                /*
--                 * Do not call shrink under ldlm_namespace_lock(client)
--                 */
-+      if (client == LDLM_NAMESPACE_CLIENT && !(gfp_mask & __GFP_FS))
-+              return -1;
-+
-+      cookie = cl_env_reenter();
-+
-+      /*
-+       * Shrink at least ldlm_namespace_nr_read(client) namespaces.
-+       */
-+      for (tmp = nr_ns = ldlm_namespace_nr_read(client);
-+           tmp > 0; tmp--) {
-+              int cancel, nr_locks;
-+
-+              /*
-+               * Do not call shrink under ldlm_namespace_lock(client)
-+              */
-               mutex_lock(ldlm_namespace_lock(client));
--                if (cfs_list_empty(ldlm_namespace_list(client))) {
-+              if (list_empty(ldlm_namespace_list(client))) {
-                       mutex_unlock(ldlm_namespace_lock(client));
--                        /*
--                         * If list is empty, we can't return any @cached > 0,
--                         * that probably would cause needless shrinker
--                         * call.
--                         */
--                        cached = 0;
--                        break;
--                }
--                ns = ldlm_namespace_first_locked(client);
--                ldlm_namespace_get(ns);
--                ldlm_namespace_move_to_active_locked(ns, client);
-+                      break;
-+              }
-+              ns = ldlm_namespace_first_locked(client);
-+              ldlm_namespace_get(ns);
-+              ldlm_namespace_move_to_active_locked(ns, client);
-               mutex_unlock(ldlm_namespace_lock(client));
- 
--                nr_locks = ldlm_pool_granted(&ns->ns_pool);
--              cancel = (__u64)nr_locks * nr;
--              do_div(cancel, total);
--              ldlm_pool_shrink(&ns->ns_pool, 1 + cancel, gfp_mask);
--                cached += ldlm_pool_granted(&ns->ns_pool);
--                ldlm_namespace_put(ns);
--        }
--        cl_env_reexit(cookie);
--        /* we only decrease the SLV in server pools shrinker, return -1 to
--         * kernel to avoid needless loop. LU-1128 */
--        return (client == LDLM_NAMESPACE_SERVER) ? -1 : cached;
-+              nr_locks = ldlm_pool_granted(&ns->ns_pool);
-+              /*
-+               * We use to shrink propotionally but with new shrinker API,
-+               * we lost the total number of freeable locks.
-+               */
-+              cancel = 1 + min_t(int, nr_locks, nr / nr_ns);
-+              freed += ldlm_pool_shrink(&ns->ns_pool, cancel, gfp_mask);
-+              ldlm_namespace_put(ns);
-+      }
-+      cl_env_reexit(cookie);
-+      /*
-+       * we only decrease the SLV in server pools shrinker, return
-+       * SHRINK_STOP to kernel to avoid needless loop. LU-1128
-+       */
-+      return (client == LDLM_NAMESPACE_SERVER) ? SHRINK_STOP : freed;
-+}
-+
-+#ifdef HAVE_SHRINKER_COUNT
-+static unsigned long ldlm_pools_srv_count(struct shrinker *s,
-+                                        struct shrink_control *sc)
-+{
-+      return ldlm_pools_count(LDLM_NAMESPACE_SERVER, sc->gfp_mask);
-+}
-+
-+static unsigned long ldlm_pools_srv_scan(struct shrinker *s,
-+                                       struct shrink_control *sc)
-+{
-+      return ldlm_pools_scan(LDLM_NAMESPACE_SERVER, sc->nr_to_scan,
-+                             sc->gfp_mask);
-+}
-+
-+static unsigned long ldlm_pools_cli_count(struct shrinker *s, struct 
shrink_control *sc)
-+{
-+      return ldlm_pools_count(LDLM_NAMESPACE_CLIENT, sc->gfp_mask);
-+}
-+
-+static unsigned long ldlm_pools_cli_scan(struct shrinker *s,
-+                                       struct shrink_control *sc)
-+{
-+      return ldlm_pools_scan(LDLM_NAMESPACE_CLIENT, sc->nr_to_scan,
-+                             sc->gfp_mask);
-+}
-+
-+#else
-+/*
-+ * Cancel \a nr locks from all namespaces (if possible). Returns number of
-+ * cached locks after shrink is finished. All namespaces are asked to
-+ * cancel approximately equal amount of locks to keep balancing.
-+ */
-+static int ldlm_pools_shrink(ldlm_side_t client, int nr,
-+                           unsigned int gfp_mask)
-+{
-+      unsigned int total = 0;
-+
-+      if (client == LDLM_NAMESPACE_CLIENT && nr != 0 &&
-+          !(gfp_mask & __GFP_FS))
-+              return -1;
-+
-+      CDEBUG(D_DLMTRACE, "Request to shrink %d %s locks from all pools\n",
-+             nr, client == LDLM_NAMESPACE_CLIENT ? "client" : "server");
-+
-+      total = ldlm_pools_count(client, gfp_mask);
-+
-+      if (nr == 0 || total == 0)
-+              return total;
-+
-+      return ldlm_pools_scan(client, nr, gfp_mask);
- }
- 
- static int ldlm_pools_srv_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
-@@ -1162,6 +1214,8 @@ static int ldlm_pools_cli_shrink(SHRINKER_ARGS(sc, 
nr_to_scan, gfp_mask))
-                                  shrink_param(sc, gfp_mask));
- }
- 
-+#endif /* HAVE_SHRINKER_COUNT */
-+
- int ldlm_pools_recalc(ldlm_side_t client)
- {
-         __u32 nr_l = 0, nr_p = 0, l;
-@@ -1418,16 +1472,18 @@ static void ldlm_pools_thread_stop(void)
- int ldlm_pools_init(void)
- {
-       int rc;
-+      DEF_SHRINKER_VAR(shsvar, ldlm_pools_srv_shrink,
-+                       ldlm_pools_srv_count, ldlm_pools_srv_scan);
-+      DEF_SHRINKER_VAR(shcvar, ldlm_pools_cli_shrink,
-+                       ldlm_pools_cli_count, ldlm_pools_cli_scan);
-       ENTRY;
- 
-       rc = ldlm_pools_thread_start();
-       if (rc == 0) {
-               ldlm_pools_srv_shrinker =
--                      set_shrinker(DEFAULT_SEEKS,
--                                       ldlm_pools_srv_shrink);
-+                      set_shrinker(DEFAULT_SEEKS, &shsvar);
-               ldlm_pools_cli_shrinker =
--                      set_shrinker(DEFAULT_SEEKS,
--                                       ldlm_pools_cli_shrink);
-+                      set_shrinker(DEFAULT_SEEKS, &shcvar);
-       }
-       RETURN(rc);
- }
-diff --git a/lustre/obdclass/lu_object.c b/lustre/obdclass/lu_object.c
-index 1304e95..77aa930 100644
---- a/lustre/obdclass/lu_object.c
-+++ b/lustre/obdclass/lu_object.c
-@@ -1884,6 +1884,69 @@ static void lu_site_stats_get(cfs_hash_t *hs,
- 
- #ifdef __KERNEL__
- 
-+static unsigned long lu_cache_shrink_count(struct shrinker *sk,
-+                                         struct shrink_control *sc)
-+{
-+      lu_site_stats_t stats;
-+      struct lu_site *s;
-+      struct lu_site *tmp;
-+      unsigned long cached = 0;
-+
-+      if (!(sc->gfp_mask & __GFP_FS))
-+              return 0;
-+
-+      mutex_lock(&lu_sites_guard);
-+      list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) {
-+              memset(&stats, 0, sizeof(stats));
-+              lu_site_stats_get(s->ls_obj_hash, &stats, 0);
-+              cached += stats.lss_total - stats.lss_busy;
-+      }
-+      mutex_unlock(&lu_sites_guard);
-+
-+      cached = (cached / 100) * sysctl_vfs_cache_pressure;
-+      CDEBUG(D_INODE, "%ld objects cached\n", cached);
-+      return cached;
-+}
-+
-+static unsigned long lu_cache_shrink_scan(struct shrinker *sk,
-+                                        struct shrink_control *sc)
-+{
-+      struct lu_site *s;
-+      struct lu_site *tmp;
-+      unsigned long remain = sc->nr_to_scan, freed = remain;
-+      LIST_HEAD(splice);
-+
-+      if (!(sc->gfp_mask & __GFP_FS))
-+              /* We must not take the lu_sites_guard lock when
-+               * __GFP_FS is *not* set because of the deadlock
-+               * possibility detailed above. Additionally,
-+               * since we cannot determine the number of
-+               * objects in the cache without taking this
-+               * lock, we're in a particularly tough spot. As
-+               * a result, we'll just lie and say our cache is
-+               * empty. This _should_ be ok, as we can't
-+               * reclaim objects when __GFP_FS is *not* set
-+               * anyways.
-+               */
-+              return SHRINK_STOP;
-+
-+      mutex_lock(&lu_sites_guard);
-+      list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) {
-+              freed = lu_site_purge(&lu_shrink_env, s, freed);
-+              remain -= freed;
-+              /*
-+               * Move just shrunk site to the tail of site list to
-+               * assure shrinking fairness.
-+               */
-+              list_move_tail(&s->ls_linkage, &splice);
-+      }
-+      list_splice(&splice, lu_sites.prev);
-+      mutex_unlock(&lu_sites_guard);
-+
-+      return sc->nr_to_scan - remain;
-+}
-+
-+#ifndef HAVE_SHRINKER_COUNT
- /*
-  * There exists a potential lock inversion deadlock scenario when using
-  * Lustre on top of ZFS. This occurs between one of ZFS's
-@@ -1904,59 +1967,29 @@ static void lu_site_stats_get(cfs_hash_t *hs,
-  */
- static int lu_cache_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
- {
--        lu_site_stats_t stats;
--        struct lu_site *s;
--        struct lu_site *tmp;
-         int cached = 0;
--        int remain = shrink_param(sc, nr_to_scan);
--        CFS_LIST_HEAD(splice);
--
--      if (!(shrink_param(sc, gfp_mask) & __GFP_FS)) {
--              if (remain != 0)
--                        return -1;
--              else
--                      /* We must not take the lu_sites_guard lock when
--                       * __GFP_FS is *not* set because of the deadlock
--                       * possibility detailed above. Additionally,
--                       * since we cannot determine the number of
--                       * objects in the cache without taking this
--                       * lock, we're in a particularly tough spot. As
--                       * a result, we'll just lie and say our cache is
--                       * empty. This _should_ be ok, as we can't
--                       * reclaim objects when __GFP_FS is *not* set
--                       * anyways.
--                       */
--                      return 0;
--        }
-+      struct shrink_control scv = {
-+               .nr_to_scan = shrink_param(sc, nr_to_scan),
-+               .gfp_mask   = shrink_param(sc, gfp_mask)
-+      };
-+#if !defined(HAVE_SHRINKER_WANT_SHRINK_PTR) && !defined(HAVE_SHRINK_CONTROL)
-+      struct shrinker* shrinker = NULL;
-+#endif
- 
--      CDEBUG(D_INODE, "Shrink %d objects\n", remain);
- 
--      mutex_lock(&lu_sites_guard);
--        cfs_list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) {
--                if (shrink_param(sc, nr_to_scan) != 0) {
--                        remain = lu_site_purge(&lu_shrink_env, s, remain);
--                        /*
--                         * Move just shrunk site to the tail of site list to
--                         * assure shrinking fairness.
--                         */
--                        cfs_list_move_tail(&s->ls_linkage, &splice);
--                }
-+      CDEBUG(D_INODE, "Shrink %lu objects\n", scv.nr_to_scan);
- 
--                memset(&stats, 0, sizeof(stats));
--                lu_site_stats_get(s->ls_obj_hash, &stats, 0);
--                cached += stats.lss_total - stats.lss_busy;
--                if (shrink_param(sc, nr_to_scan) && remain <= 0)
--                        break;
--        }
--        cfs_list_splice(&splice, lu_sites.prev);
--      mutex_unlock(&lu_sites_guard);
-+      lu_cache_shrink_scan(shrinker, &scv);
- 
--        cached = (cached / 100) * sysctl_vfs_cache_pressure;
--        if (shrink_param(sc, nr_to_scan) == 0)
--                CDEBUG(D_INODE, "%d objects cached\n", cached);
--        return cached;
-+      cached = lu_cache_shrink_count(shrinker, &scv);
-+      if (scv.nr_to_scan == 0)
-+              CDEBUG(D_INODE, "%d objects cached\n", cached);
-+      return cached;
- }
- 
-+#endif /* HAVE_SHRINKER_COUNT */
-+
-+
- /*
-  * Debugging stuff.
-  */
-@@ -2005,11 +2038,6 @@ void lu_context_keys_dump(void)
-         }
- }
- EXPORT_SYMBOL(lu_context_keys_dump);
--#else  /* !__KERNEL__ */
--static int lu_cache_shrink(int nr, unsigned int gfp_mask)
--{
--        return 0;
--}
- #endif /* __KERNEL__ */
- 
- /**
-@@ -2018,6 +2046,8 @@ static int lu_cache_shrink(int nr, unsigned int gfp_mask)
- int lu_global_init(void)
- {
-         int result;
-+      DEF_SHRINKER_VAR(shvar, lu_cache_shrink,
-+                       lu_cache_shrink_count, lu_cache_shrink_scan);
- 
-         CDEBUG(D_INFO, "Lustre LU module (%p).\n", &lu_keys);
- 
-@@ -2046,7 +2076,7 @@ int lu_global_init(void)
-          * inode, one for ea. Unfortunately setting this high value results in
-          * lu_object/inode cache consuming all the memory.
-          */
--      lu_site_shrinker = set_shrinker(DEFAULT_SEEKS, lu_cache_shrink);
-+      lu_site_shrinker = set_shrinker(DEFAULT_SEEKS, &shvar);
-         if (lu_site_shrinker == NULL)
-                 return -ENOMEM;
- 
-diff --git a/lustre/ptlrpc/sec_bulk.c b/lustre/ptlrpc/sec_bulk.c
-index bb1e782..9da60ad 100644
---- a/lustre/ptlrpc/sec_bulk.c
-+++ b/lustre/ptlrpc/sec_bulk.c
-@@ -232,30 +232,46 @@ static void enc_pools_release_free_pages(long npages)
- }
- 
- /*
-- * could be called frequently for query (@nr_to_scan == 0).
-  * we try to keep at least PTLRPC_MAX_BRW_PAGES pages in the pool.
-  */
--static int enc_pools_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
-+static unsigned long enc_pools_shrink_count(struct shrinker *s,
-+                                          struct shrink_control *sc)
- {
--      if (unlikely(shrink_param(sc, nr_to_scan) != 0)) {
-+      /*
-+       * if no pool access for a long time, we consider it's fully idle.
-+       * a little race here is fine.
-+       */
-+      if (unlikely(cfs_time_current_sec() - page_pools.epp_last_access >
-+                   CACHE_QUIESCENT_PERIOD)) {
-               spin_lock(&page_pools.epp_lock);
--                shrink_param(sc, nr_to_scan) = min_t(unsigned long,
--                                                   shrink_param(sc, 
nr_to_scan),
--                                                   page_pools.epp_free_pages -
--                                                   PTLRPC_MAX_BRW_PAGES);
--                if (shrink_param(sc, nr_to_scan) > 0) {
--                        enc_pools_release_free_pages(shrink_param(sc,
--                                                                  
nr_to_scan));
--                        CDEBUG(D_SEC, "released %ld pages, %ld left\n",
--                               (long)shrink_param(sc, nr_to_scan),
--                               page_pools.epp_free_pages);
--
--                        page_pools.epp_st_shrinks++;
--                        page_pools.epp_last_shrink = cfs_time_current_sec();
--                }
-+              page_pools.epp_idle_idx = IDLE_IDX_MAX;
-               spin_unlock(&page_pools.epp_lock);
-       }
- 
-+      LASSERT(page_pools.epp_idle_idx <= IDLE_IDX_MAX);
-+      return max((int)page_pools.epp_free_pages - PTLRPC_MAX_BRW_PAGES, 0) *
-+              (IDLE_IDX_MAX - page_pools.epp_idle_idx) / IDLE_IDX_MAX;
-+}
-+
-+/*
-+ * we try to keep at least PTLRPC_MAX_BRW_PAGES pages in the pool.
-+ */
-+static unsigned long enc_pools_shrink_scan(struct shrinker *s,
-+                                         struct shrink_control *sc)
-+{
-+      spin_lock(&page_pools.epp_lock);
-+      sc->nr_to_scan = min_t(unsigned long, sc->nr_to_scan,
-+                            page_pools.epp_free_pages - PTLRPC_MAX_BRW_PAGES);
-+      if (sc->nr_to_scan > 0) {
-+              enc_pools_release_free_pages(sc->nr_to_scan);
-+              CDEBUG(D_SEC, "released %ld pages, %ld left\n",
-+                     (long)sc->nr_to_scan, page_pools.epp_free_pages);
-+
-+              page_pools.epp_st_shrinks++;
-+              page_pools.epp_last_shrink = cfs_time_current_sec();
-+      }
-+      spin_unlock(&page_pools.epp_lock);
-+
-       /*
-        * if no pool access for a long time, we consider it's fully idle.
-        * a little race here is fine.
-@@ -268,10 +284,31 @@ static int enc_pools_shrink(SHRINKER_ARGS(sc, 
nr_to_scan, gfp_mask))
-       }
- 
-       LASSERT(page_pools.epp_idle_idx <= IDLE_IDX_MAX);
--      return max((int)page_pools.epp_free_pages - PTLRPC_MAX_BRW_PAGES, 0) *
--              (IDLE_IDX_MAX - page_pools.epp_idle_idx) / IDLE_IDX_MAX;
-+      return sc->nr_to_scan;
-+}
-+
-+#ifndef HAVE_SHRINKER_COUNT
-+/*
-+ * could be called frequently for query (@nr_to_scan == 0).
-+ * we try to keep at least PTLRPC_MAX_BRW_PAGES pages in the pool.
-+ */
-+static int enc_pools_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
-+{
-+      struct shrink_control scv = {
-+              .nr_to_scan = shrink_param(sc, nr_to_scan),
-+              .gfp_mask   = shrink_param(sc, gfp_mask)
-+      };
-+#if !defined(HAVE_SHRINKER_WANT_SHRINK_PTR) && !defined(HAVE_SHRINK_CONTROL)
-+      struct shrinker* shrinker = NULL;
-+#endif
-+
-+      enc_pools_shrink_scan(shrinker, &scv);
-+
-+      return enc_pools_shrink_count(shrinker, &scv);
- }
- 
-+#endif /* HAVE_SHRINKER_COUNT */
-+
- static inline
- int npages_to_npools(unsigned long npages)
- {
-@@ -706,6 +743,8 @@ static inline void enc_pools_free(void)
- 
- int sptlrpc_enc_pool_init(void)
- {
-+      DEF_SHRINKER_VAR(shvar, enc_pools_shrink,
-+                       enc_pools_shrink_count, enc_pools_shrink_scan);
-       /*
-        * maximum capacity is 1/8 of total physical memory.
-        * is the 1/8 a good number?
-@@ -741,8 +780,7 @@ int sptlrpc_enc_pool_init(void)
-         if (page_pools.epp_pools == NULL)
-                 return -ENOMEM;
- 
--      pools_shrinker = set_shrinker(pools_shrinker_seeks,
--                                          enc_pools_shrink);
-+      pools_shrinker = set_shrinker(pools_shrinker_seeks, &shvar);
-         if (pools_shrinker == NULL) {
-                 enc_pools_free();
-                 return -ENOMEM;
--- 
-1.9.3
-

diff --git a/sys-cluster/lustre/lustre-9999.ebuild 
b/sys-cluster/lustre/lustre-9999.ebuild
index 695b2f0..3c8af2d 100644
--- a/sys-cluster/lustre/lustre-9999.ebuild
+++ b/sys-cluster/lustre/lustre-9999.ebuild
@@ -1,4 +1,4 @@
-# Copyright 1999-2015 Gentoo Foundation
+# Copyright 1999-2016 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 # $Id$
 
@@ -41,9 +41,6 @@ DEPEND="${RDEPEND}
        virtual/linux-sources"
 
 PATCHES=(
-       
"${FILESDIR}/0001-LU-3319-procfs-update-zfs-proc-handling-to-seq_files.patch"
-       
"${DISTDIR}/0002-LU-3319-procfs-move-mdd-ofd-proc-handling-to-seq_fil.patch"
-       
"${FILESDIR}/0003-LU-4416-mm-Backport-shrinker-changes-from-upstream.patch"
        "${FILESDIR}/lustre-readline6.3_fix.patch"
 )
 

Reply via email to