This patch adds a new function state_machine and some hooks to
call it. For this early version, we've only got two states:
idle and finish_xmote. Later, many more will be added.

Signed-off-by: Bob Peterson <rpete...@redhat.com>
---
 fs/gfs2/glock.c  | 74 +++++++++++++++++++++++++++++++++++++++---------
 fs/gfs2/glock.h  |  5 ++++
 fs/gfs2/incore.h |  1 +
 3 files changed, 66 insertions(+), 14 deletions(-)

diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 6e9d53583b73..5e0eac782c32 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -61,6 +61,7 @@ struct gfs2_glock_iter {
 typedef void (*glock_examiner) (struct gfs2_glock * gl);
 
 static void do_xmote(struct gfs2_glock *gl, unsigned int target);
+static void state_machine(struct gfs2_glock *gl, int new_state);
 
 static struct dentry *gfs2_root;
 static struct workqueue_struct *glock_workqueue;
@@ -442,18 +443,16 @@ static void gfs2_demote_wake(struct gfs2_glock *gl)
 /**
  * finish_xmote - The DLM has replied to one of our lock requests
  * @gl: The glock
- * @ret: The status from the DLM
  *
  */
 
-static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
+static void finish_xmote(struct gfs2_glock *gl)
 {
        const struct gfs2_glock_operations *glops = gl->gl_ops;
        struct gfs2_holder *gh;
-       unsigned state = ret & LM_OUT_ST_MASK;
+       unsigned state = gl->gl_reply & LM_OUT_ST_MASK;
        int rv;
 
-       spin_lock(&gl->gl_lockref.lock);
        trace_gfs2_glock_state_change(gl, state);
        state_change(gl, state);
        gh = find_first_waiter(gl);
@@ -467,18 +466,18 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned 
int ret)
        if (unlikely(state != gl->gl_target)) {
                if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
                        /* move to back of queue and try next entry */
-                       if (ret & LM_OUT_CANCELED) {
+                       if (gl->gl_reply & LM_OUT_CANCELED) {
                                if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0)
                                        list_move_tail(&gh->gh_list, 
&gl->gl_holders);
                                gh = find_first_waiter(gl);
                                gl->gl_target = gh->gh_state;
                                state = LM_ST_UNLOCKED;
-                       } else if ((ret & LM_OUT_ERROR) ||
+                       } else if ((gl->gl_reply & LM_OUT_ERROR) ||
                                   (gh->gh_flags & (LM_FLAG_TRY |
                                                    LM_FLAG_TRY_1CB))) {
                                /* An error or failed "try lock" - report it */
                                gl->gl_target = gl->gl_state;
-                               do_error(gl, ret);
+                               do_error(gl, gl->gl_reply);
                                goto out;
                        }
                }
@@ -497,7 +496,6 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned 
int ret)
                               gl->gl_target, state);
                        GLOCK_BUG_ON(gl, 1);
                }
-               spin_unlock(&gl->gl_lockref.lock);
                return;
        }
 
@@ -516,12 +514,10 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned 
int ret)
                }
                rv = do_promote(gl);
                if (rv == 2)
-                       goto out_locked;
+                       return;
        }
 out:
        clear_bit(GLF_LOCK, &gl->gl_flags);
-out_locked:
-       spin_unlock(&gl->gl_lockref.lock);
 }
 
 /**
@@ -573,7 +569,8 @@ __acquires(&gl->gl_lockref.lock)
                if (ret == -EINVAL && gl->gl_target == LM_ST_UNLOCKED &&
                    target == LM_ST_UNLOCKED &&
                    test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags)) {
-                       finish_xmote(gl, target);
+                       gl->gl_reply = target;
+                       state_machine(gl, GL_ST_FINISH_XMOTE);
                        gfs2_glock_queue_work(gl, 0);
                }
                else if (ret) {
@@ -582,7 +579,8 @@ __acquires(&gl->gl_lockref.lock)
                                                   &sdp->sd_flags));
                }
        } else { /* lock_nolock */
-               finish_xmote(gl, target);
+               gl->gl_reply = target;
+               state_machine(gl, GL_ST_FINISH_XMOTE);
                gfs2_glock_queue_work(gl, 0);
        }
 
@@ -662,6 +660,53 @@ __acquires(&gl->gl_lockref.lock)
        return;
 }
 
+/**
+ * __state_machine - the glock state machine
+ * @gl: pointer to the glock we are transitioning
+ * @new_state: The new state we need to execute
+ *
+ * This function handles state transitions for glocks.
+ * When the state_machine is called, it's given a new state that needs to be
+ * handled, but only after it becomes idle from the last call. Once called,
+ * it keeps running until the state transitions have all been resolved.
+ * The lock might be released inside some of the states, so we may need react
+ * to state changes from other calls.
+ */
+static void __state_machine(struct gfs2_glock *gl, int new_state)
+{
+       BUG_ON(!spin_is_locked(&gl->gl_lockref.lock));
+
+       do {
+               switch (gl->gl_mch) {
+               case GL_ST_IDLE:
+                       gl->gl_mch = new_state;
+                       new_state = GL_ST_IDLE;
+                       break;
+
+               case GL_ST_FINISH_XMOTE:
+                       gl->gl_mch = GL_ST_IDLE;
+                       finish_xmote(gl);
+                       break;
+               }
+       } while (gl->gl_mch != GL_ST_IDLE);
+}
+
+/**
+ * state_machine - the glock state machine
+ * @gl: pointer to the glock we are transitioning
+ * @new_state: The new state we need to execute
+ *
+ * Just like __state_machine but it acquires the gl_lockref lock
+ */
+static void state_machine(struct gfs2_glock *gl, int new_state)
+__releases(&gl->gl_lockref.lock)
+__acquires(&gl->gl_lockref.lock)
+{
+       spin_lock(&gl->gl_lockref.lock);
+       __state_machine(gl, new_state);
+       spin_unlock(&gl->gl_lockref.lock);
+}
+
 static void delete_work_func(struct work_struct *work)
 {
        struct gfs2_glock *gl = container_of(work, struct gfs2_glock, 
gl_delete);
@@ -691,7 +736,7 @@ static void glock_work_func(struct work_struct *work)
        unsigned int drop_refs = 1;
 
        if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
-               finish_xmote(gl, gl->gl_reply);
+               state_machine(gl, GL_ST_FINISH_XMOTE);
                drop_refs++;
        }
        spin_lock(&gl->gl_lockref.lock);
@@ -818,6 +863,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
        }
 
        atomic_inc(&sdp->sd_glock_disposal);
+       gl->gl_mch = GL_ST_IDLE;
        gl->gl_node.next = NULL;
        gl->gl_flags = 0;
        gl->gl_name = name;
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
index 5e12220cc0c2..88043d610d64 100644
--- a/fs/gfs2/glock.h
+++ b/fs/gfs2/glock.h
@@ -120,6 +120,11 @@ enum {
 #define GL_GLOCK_HOLD_INCR       (long)(HZ / 20)
 #define GL_GLOCK_HOLD_DECR       (long)(HZ / 40)
 
+enum gl_machine_states {
+       GL_ST_IDLE = 0,         /* State machine idle; no transition needed */
+       GL_ST_FINISH_XMOTE = 1, /* Promotion/demotion complete */
+};
+
 struct lm_lockops {
        const char *lm_proto_name;
        int (*lm_mount) (struct gfs2_sbd *sdp, const char *table);
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 888b62cfd6d1..1ab2ef56123a 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -388,6 +388,7 @@ struct gfs2_glock {
        };
        struct rcu_head gl_rcu;
        struct rhash_head gl_node;
+       int gl_mch; /* state machine state */
 };
 
 #define GFS2_MIN_LVB_SIZE 32   /* Min size of LVB that gfs2 supports */
-- 
2.19.1

Reply via email to