This patch is a first step in rgrp sharing. It allows for glocks
locked in EX mode to be shared amongst processes on that node.

Like a SH (shared) glock, multiple processes may hold the lock in
EX mode at the same time, provided they're all on the same
node. This is a holder flag, meaning "Even though I've locked the
glock in EX, this process will share the lock and allow multiple
holders for other local processes that use the same flag."

For now, there are no users of the new flag. A future patch
will use it to improve performance with rgrp sharing.

Signed-off-by: Bob Peterson <rpete...@redhat.com>
---
 fs/gfs2/glock.c | 23 +++++++++++++++++++----
 fs/gfs2/glock.h |  6 ++++++
 2 files changed, 25 insertions(+), 4 deletions(-)

diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 097bd3c0f270..bd45df7c0ef4 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -279,10 +279,20 @@ void gfs2_glock_put(struct gfs2_glock *gl)
 
 static inline int may_grant(const struct gfs2_glock *gl, const struct 
gfs2_holder *gh)
 {
-       const struct gfs2_holder *gh_head = list_entry(gl->gl_holders.next, 
const struct gfs2_holder, gh_list);
-       if ((gh->gh_state == LM_ST_EXCLUSIVE ||
-            gh_head->gh_state == LM_ST_EXCLUSIVE) && gh != gh_head)
-               return 0;
+       const struct gfs2_holder *gh_head = list_entry(gl->gl_holders.next,
+                                                      const struct gfs2_holder,
+                                                      gh_list);
+
+       if (gh != gh_head) {
+               if (gh_head->gh_state == LM_ST_EXCLUSIVE &&
+                   (gh_head->gh_flags & LM_FLAG_NODE_EX) &&
+                   gh->gh_state == LM_ST_EXCLUSIVE &&
+                   (gh->gh_flags & LM_FLAG_NODE_EX))
+                       return 1;
+               if ((gh->gh_state == LM_ST_EXCLUSIVE ||
+                    gh_head->gh_state == LM_ST_EXCLUSIVE))
+                       return 0;
+       }
        if (gl->gl_state == gh->gh_state)
                return 1;
        if (gh->gh_flags & GL_EXACT)
@@ -292,6 +302,9 @@ static inline int may_grant(const struct gfs2_glock *gl, 
const struct gfs2_holde
                        return 1;
                if (gh->gh_state == LM_ST_DEFERRED && gh_head->gh_state == 
LM_ST_DEFERRED)
                        return 1;
+               if (gh_head->gh_flags & LM_FLAG_NODE_EX &&
+                   gh->gh_flags & LM_FLAG_NODE_EX)
+                       return 1;
        }
        if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY))
                return 1;
@@ -1680,6 +1693,8 @@ static const char *hflags2str(char *buf, u16 flags, 
unsigned long iflags)
                *p++ = 'A';
        if (flags & LM_FLAG_PRIORITY)
                *p++ = 'p';
+       if (flags & LM_FLAG_NODE_EX)
+               *p++ = 'n';
        if (flags & GL_ASYNC)
                *p++ = 'a';
        if (flags & GL_EXACT)
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
index 5e12220cc0c2..d3e1bdeefcc1 100644
--- a/fs/gfs2/glock.h
+++ b/fs/gfs2/glock.h
@@ -78,6 +78,11 @@ enum {
  * request and directly join the other shared lock.  A shared lock request
  * without the priority flag might be forced to wait until the deferred
  * requested had acquired and released the lock.
+ *
+ * LM_FLAG_NODE_EX
+ * This holder agrees to share the lock within this node only. In other words,
+ * the glock is held in EX mode according to DLM, but local holders on the
+ * same node can share it as if it was held in SH.
  */
 
 #define LM_FLAG_TRY            0x0001
@@ -85,6 +90,7 @@ enum {
 #define LM_FLAG_NOEXP          0x0004
 #define LM_FLAG_ANY            0x0008
 #define LM_FLAG_PRIORITY       0x0010
+#define LM_FLAG_NODE_EX                0x0020
 #define GL_ASYNC               0x0040
 #define GL_EXACT               0x0080
 #define GL_SKIP                        0x0100
-- 
2.17.0

Reply via email to