[Ocfs2-devel] [PATCH] ocfs2/dlm: allocate lvb for dlm_lock_resource which needs it -drop2

2010-09-21 Thread Wengang Wang
#reposting with trivial changes

Changes to drop1:
1) adds unsigned reserve_lvb:1 to dlm_lock_resource indicating lvb should be
reserved or not. This flag doesn't make sense to this patch its self, but it
helps the coming patch that allocates lvb for dlm_lock which needs it. The 1
bit flag doesn't take extra memory space, so putting it here is fine.

2) adds a new parameter to dlm_new_lockres() indicating if lvb memory should be
reserved. Determining method by checking name in v1 is discarded.

3) adds u8 reserve_lvb:1 to dlm_migratable_lockres indicating if the migration
target should reserve lvb for the dlm_lock_resouce in case of needing to create
a new dlm_lock_resouce.

4) increment dlm_protocol major version because of 3).

small changes:
1) moves the real allocation from dlm_init_lockres() to dlm_new_lockres().
since we have the new-added parameter, we know if we should do the allocation
in dlm_new_lockres(). And it's better we do allocations at the same space.

2) adds DLM_LKF_VALBLK to dlm_flags in ocfs2_lock_create.
We are missing the flag. But since it's adding a new lockres(no race), so no
problem running current codes.

3) adds some asserts on res-lvb.

Signed-off-by: Wengang Wang wen.gang.w...@oracle.com
---
 fs/ocfs2/dlm/dlmast.c  |2 ++
 fs/ocfs2/dlm/dlmcommon.h   |9 +++--
 fs/ocfs2/dlm/dlmconvert.c  |5 -
 fs/ocfs2/dlm/dlmdebug.c|6 --
 fs/ocfs2/dlm/dlmdomain.c   |6 +-
 fs/ocfs2/dlm/dlmmaster.c   |   16 +---
 fs/ocfs2/dlm/dlmrecovery.c |   34 +++---
 fs/ocfs2/dlm/dlmunlock.c   |2 ++
 fs/ocfs2/dlmglue.c |3 +++
 9 files changed, 67 insertions(+), 16 deletions(-)

diff --git a/fs/ocfs2/dlm/dlmast.c b/fs/ocfs2/dlm/dlmast.c
index f449991..cbe56c5 100644
--- a/fs/ocfs2/dlm/dlmast.c
+++ b/fs/ocfs2/dlm/dlmast.c
@@ -191,6 +191,8 @@ static void dlm_update_lvb(struct dlm_ctxt *dlm, struct 
dlm_lock_resource *res,
mlog(0, getting lvb from lockres for %s node\n,
  lock-ml.node == dlm-node_num ? master :
  remote);
+   mlog_bug_on_msg(!res-lvb, lockname: %.*s\n,
+   res-lockname.len, res-lockname.name);
memcpy(lksb-lvb, res-lvb, DLM_LVB_LEN);
}
/* Do nothing for lvb put requests - they should be done in
diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h
index 4b6ae2c..4b94ede 100644
--- a/fs/ocfs2/dlm/dlmcommon.h
+++ b/fs/ocfs2/dlm/dlmcommon.h
@@ -324,12 +324,13 @@ struct dlm_lock_resource
struct dlm_ctxt *dlm;
 
unsigned migration_pending:1;
+   unsigned reserve_lvb:1;
atomic_t asts_reserved;
spinlock_t spinlock;
wait_queue_head_t wq;
u8  owner;  //node which owns the lock resource, or unknown
u16 state;
-   char lvb[DLM_LVB_LEN];
+   char *lvb;
unsigned int inflight_locks;
unsigned long refmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
 };
@@ -395,6 +396,8 @@ enum dlm_lockres_list {
 static inline int dlm_lvb_is_empty(char *lvb)
 {
int i;
+
+   BUG_ON(!lvb);
for (i=0; iDLM_LVB_LEN; i++)
if (lvb[i])
return 0;
@@ -564,6 +567,7 @@ struct dlm_migratable_lockres
u8 lockname_len;
u8 num_locks;// locks sent in this structure
u8 flags;
+   u8 reserve_lvb:1;
__be32 total_locks; // locks to be sent for this migration cookie
__be64 mig_cookie;  // cookie for this lockres migration
 // or zero if not needed
@@ -859,7 +863,8 @@ struct dlm_lock_resource * dlm_get_lock_resource(struct 
dlm_ctxt *dlm,
 int flags);
 struct dlm_lock_resource *dlm_new_lockres(struct dlm_ctxt *dlm,
  const char *name,
- unsigned int namelen);
+ unsigned int namelen,
+ unsigned int reserve_lvb);
 
 #define dlm_lockres_set_refmap_bit(bit,res)  \
__dlm_lockres_set_refmap_bit(bit,res,__FILE__,__LINE__)
diff --git a/fs/ocfs2/dlm/dlmconvert.c b/fs/ocfs2/dlm/dlmconvert.c
index 9f30491..93bb76b 100644
--- a/fs/ocfs2/dlm/dlmconvert.c
+++ b/fs/ocfs2/dlm/dlmconvert.c
@@ -212,8 +212,11 @@ grant:
if (lock-ml.node == dlm-node_num)
mlog(0, doing in-place convert for nonlocal lock\n);
lock-ml.type = type;
-   if (lock-lksb-flags  DLM_LKSB_PUT_LVB)
+   if (lock-lksb-flags  DLM_LKSB_PUT_LVB) {
+   mlog_bug_on_msg(!res-lvb, lockname: %*s\n, res-lockname.len,
+   res-lockname.name);
memcpy(res-lvb, lock-lksb-lvb, DLM_LVB_LEN);
+   }
 
status = DLM_NORMAL;
*call_ast = 1;
diff --git a/fs/ocfs2/dlm/dlmdebug.c 

Re: [Ocfs2-devel] [PATCH 1/1] o2dlm: free block mles during shutdown

2010-09-21 Thread Sunil Mushran
On 09/20/2010 09:15 PM, Wengang Wang wrote:
 Hi Srini,

 On 10-09-20 14:59, Srinivas Eeda wrote:

 If a node initiates shutdown after another node initiated the lock mastery
 process, this node might have created block mle but will not release it if it
 doesn't get the assert master from the other node. This causes block mle's to
 lie around unfreed.

 This patch frees any block mles that exists on master list after the node 
 sent
 DLM_EXIT_DOMAIN_MSG to other nodes.

 Signed-off-by: Srinivas Eedasrinivas.e...@oracle.com
 ---
   fs/ocfs2/dlm/dlmcommon.h |1 +
   fs/ocfs2/dlm/dlmdomain.c |1 +
   fs/ocfs2/dlm/dlmmaster.c |   33 +
   3 files changed, 35 insertions(+), 0 deletions(-)

 diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h
 index 4b6ae2c..48282dd 100644
 --- a/fs/ocfs2/dlm/dlmcommon.h
 +++ b/fs/ocfs2/dlm/dlmcommon.h
 @@ -1030,6 +1030,7 @@ int dlm_drop_lockres_ref(struct dlm_ctxt *dlm,
   struct dlm_lock_resource *res);
   void dlm_clean_master_list(struct dlm_ctxt *dlm,
 u8 dead_node);
 +void dlm_free_block_mles(struct dlm_ctxt *dlm);
   int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock);
   int __dlm_lockres_has_locks(struct dlm_lock_resource *res);
   int __dlm_lockres_unused(struct dlm_lock_resource *res);
 diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
 index 153abb5..8744fff 100644
 --- a/fs/ocfs2/dlm/dlmdomain.c
 +++ b/fs/ocfs2/dlm/dlmdomain.c
 @@ -693,6 +693,7 @@ void dlm_unregister_domain(struct dlm_ctxt *dlm)

  dlm_mark_domain_leaving(dlm);
  dlm_leave_domain(dlm);
 +dlm_free_block_mles(dlm);
  dlm_complete_dlm_shutdown(dlm);
  }
  dlm_put(dlm);
 diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
 index ffb4c68..5f4d6fd 100644
 --- a/fs/ocfs2/dlm/dlmmaster.c
 +++ b/fs/ocfs2/dlm/dlmmaster.c
 @@ -3433,3 +3433,36 @@ void dlm_lockres_release_ast(struct dlm_ctxt *dlm,
  wake_up(res-wq);
  wake_up(dlm-migration_wq);
   }
 +
 +void dlm_free_block_mles(struct dlm_ctxt *dlm)
 +{
 +int i;
 +struct hlist_head *bucket;
 +struct dlm_master_list_entry *mle;
 +struct hlist_node *list;
 +
 +spin_lock(dlm-spinlock);
 +spin_lock(dlm-master_lock);
 +for (i = 0; i  DLM_HASH_BUCKETS; i++) {
 +bucket = dlm_master_hash(dlm, i);
 +hlist_for_each(list, bucket) {
  
 I guess it can be the last reference on mles?
 If so, don't we need hlist_for_each_safe here since we are removing entries?


Yes. Srini is cleaning up the patch.


___
Ocfs2-devel mailing list
Ocfs2-devel@oss.oracle.com
http://oss.oracle.com/mailman/listinfo/ocfs2-devel


[Ocfs2-devel] [PATCH] o2dlm: force free mles during dlm exit

2010-09-21 Thread Srinivas Eeda
While umounting, a block mle doesn't get freed if dlm is shutdown after
master request is received but before assert master. This results in unclean
shutdown of dlm domain.

This patch frees all mles that lie around after other nodes were notified about
exiting the dlm and marking dlm state as leaving. Only block mles are expected
to be around, so we log ERROR for other mles but still free them.

Signed-off-by: Srinivas Eeda srinivas.e...@oracle.com
---
 fs/ocfs2/dlm/dlmcommon.h |1 +
 fs/ocfs2/dlm/dlmdomain.c |1 +
 fs/ocfs2/dlm/dlmmaster.c |   34 ++
 3 files changed, 36 insertions(+), 0 deletions(-)

diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h
index 4b6ae2c..7652989 100644
--- a/fs/ocfs2/dlm/dlmcommon.h
+++ b/fs/ocfs2/dlm/dlmcommon.h
@@ -1030,6 +1030,7 @@ int dlm_drop_lockres_ref(struct dlm_ctxt *dlm,
 struct dlm_lock_resource *res);
 void dlm_clean_master_list(struct dlm_ctxt *dlm,
   u8 dead_node);
+void dlm_force_free_mles(struct dlm_ctxt *dlm);
 int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock);
 int __dlm_lockres_has_locks(struct dlm_lock_resource *res);
 int __dlm_lockres_unused(struct dlm_lock_resource *res);
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index 153abb5..11a5c87 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -693,6 +693,7 @@ void dlm_unregister_domain(struct dlm_ctxt *dlm)
 
dlm_mark_domain_leaving(dlm);
dlm_leave_domain(dlm);
+   dlm_force_free_mles(dlm);
dlm_complete_dlm_shutdown(dlm);
}
dlm_put(dlm);
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index ffb4c68..156f420 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -3433,3 +3433,37 @@ void dlm_lockres_release_ast(struct dlm_ctxt *dlm,
wake_up(res-wq);
wake_up(dlm-migration_wq);
 }
+
+void dlm_force_free_mles(struct dlm_ctxt *dlm)
+{
+   int i;
+   struct hlist_head *bucket;
+   struct dlm_master_list_entry *mle;
+   struct hlist_node *tmp, *list;
+
+   /* We notified all other nodes that we are exiting the domain and
+* marked the dlm state to DLM_CTXT_LEAVING. If any mles are still
+* around we force free them and wake any processes that are waiting
+* on the mles */
+   spin_lock(dlm-spinlock);
+   spin_lock(dlm-master_lock);
+   for (i = 0; i  DLM_HASH_BUCKETS; i++) {
+   bucket = dlm_master_hash(dlm, i);
+   hlist_for_each_safe(list, tmp, bucket) {
+   mle = hlist_entry(list, struct dlm_master_list_entry,
+ master_hash_node);
+   if (mle-type != DLM_MLE_BLOCK) {
+   mlog(ML_ERROR, bad mle: %p\n, mle);
+   dlm_print_one_mle(mle);
+   }
+   atomic_set(mle-woken, 1);
+   wake_up(mle-wq);
+
+   __dlm_unlink_mle(dlm, mle);
+   __dlm_mle_detach_hb_events(dlm, mle);
+   __dlm_put_mle(mle);
+   }
+   }
+   spin_unlock(dlm-master_lock);
+   spin_unlock(dlm-spinlock);
+}
-- 
1.5.6.5


___
Ocfs2-devel mailing list
Ocfs2-devel@oss.oracle.com
http://oss.oracle.com/mailman/listinfo/ocfs2-devel


Re: [Ocfs2-devel] [PATCH] o2dlm: force free mles during dlm exit

2010-09-21 Thread Sunil Mushran
Comments inlined.

On 09/21/2010 03:30 PM, Srinivas Eeda wrote:
 While umounting, a block mle doesn't get freed if dlm is shutdown after
 master request is received but before assert master. This results in unclean
 shutdown of dlm domain.

 This patch frees all mles that lie around after other nodes were notified 
 about
 exiting the dlm and marking dlm state as leaving. Only block mles are expected
 to be around, so we log ERROR for other mles but still free them.

 Signed-off-by: Srinivas Eedasrinivas.e...@oracle.com
 ---
   fs/ocfs2/dlm/dlmcommon.h |1 +
   fs/ocfs2/dlm/dlmdomain.c |1 +
   fs/ocfs2/dlm/dlmmaster.c |   34 ++
   3 files changed, 36 insertions(+), 0 deletions(-)

 diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h
 index 4b6ae2c..7652989 100644
 --- a/fs/ocfs2/dlm/dlmcommon.h
 +++ b/fs/ocfs2/dlm/dlmcommon.h
 @@ -1030,6 +1030,7 @@ int dlm_drop_lockres_ref(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res);
   void dlm_clean_master_list(struct dlm_ctxt *dlm,
  u8 dead_node);
 +void dlm_force_free_mles(struct dlm_ctxt *dlm);
   int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock);
   int __dlm_lockres_has_locks(struct dlm_lock_resource *res);
   int __dlm_lockres_unused(struct dlm_lock_resource *res);
 diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
 index 153abb5..11a5c87 100644
 --- a/fs/ocfs2/dlm/dlmdomain.c
 +++ b/fs/ocfs2/dlm/dlmdomain.c
 @@ -693,6 +693,7 @@ void dlm_unregister_domain(struct dlm_ctxt *dlm)

   dlm_mark_domain_leaving(dlm);
   dlm_leave_domain(dlm);
 + dlm_force_free_mles(dlm);
   dlm_complete_dlm_shutdown(dlm);
   }
   dlm_put(dlm);
 diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
 index ffb4c68..156f420 100644
 --- a/fs/ocfs2/dlm/dlmmaster.c
 +++ b/fs/ocfs2/dlm/dlmmaster.c
 @@ -3433,3 +3433,37 @@ void dlm_lockres_release_ast(struct dlm_ctxt *dlm,
   wake_up(res-wq);
   wake_up(dlm-migration_wq);
   }
 +
 +void dlm_force_free_mles(struct dlm_ctxt *dlm)
 +{
 + int i;
 + struct hlist_head *bucket;
 + struct dlm_master_list_entry *mle;
 + struct hlist_node *tmp, *list;
 +
 + /* We notified all other nodes that we are exiting the domain and
 +  * marked the dlm state to DLM_CTXT_LEAVING. If any mles are still
 +  * around we force free them and wake any processes that are waiting
 +  * on the mles */


Multi-line comments should be in the format below.

/*
  * Add comment here
  * and here.
  */

 + spin_lock(dlm-spinlock);
 + spin_lock(dlm-master_lock);


Add:

BUG_ON(dlm-dlm_state != DLM_CTXT_LEAVING);
BUG_ON((find_next_bit(dlm-domain_map, O2NM_MAX_NODES, 0)  
O2NM_MAX_NODES));

We want to ensure that we don't call this function from some other
context. The BUG_ONs ensure that the state is leaving and that the
message has been sent to all nodes. The latter is important because
we are force freeing the mles with the understanding that the other
nodes are aware that this node has left the domain.

 + for (i = 0; i  DLM_HASH_BUCKETS; i++) {
 + bucket = dlm_master_hash(dlm, i);
 + hlist_for_each_safe(list, tmp, bucket) {
 + mle = hlist_entry(list, struct dlm_master_list_entry,
 +   master_hash_node);
 + if (mle-type != DLM_MLE_BLOCK) {
 + mlog(ML_ERROR, bad mle: %p\n, mle);
 + dlm_print_one_mle(mle);
 + }
 + atomic_set(mle-woken, 1);
 + wake_up(mle-wq);
 +
 + __dlm_unlink_mle(dlm, mle);
 + __dlm_mle_detach_hb_events(dlm, mle);
 + __dlm_put_mle(mle);
 + }
 + }
 + spin_unlock(dlm-master_lock);
 + spin_unlock(dlm-spinlock);
 +}



___
Ocfs2-devel mailing list
Ocfs2-devel@oss.oracle.com
http://oss.oracle.com/mailman/listinfo/ocfs2-devel


[Ocfs2-devel] [PATCH 1/1] o2dlm: force free mles during dlm exit

2010-09-21 Thread Srinivas Eeda
While umounting, a block mle doesn't get freed if dlm is shutdown after
master request is received but before assert master. This results in unclean
shutdown of dlm domain.

This patch frees all mles that lie around after other nodes were notified about
exiting the dlm and marking dlm state as leaving. Only block mles are expected
to be around, so we log ERROR for other mles but still free them.

Signed-off-by: Srinivas Eeda srinivas.e...@oracle.com
---
 fs/ocfs2/dlm/dlmcommon.h |1 +
 fs/ocfs2/dlm/dlmdomain.c |1 +
 fs/ocfs2/dlm/dlmmaster.c |   40 
 3 files changed, 42 insertions(+), 0 deletions(-)

diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h
index 4b6ae2c..7652989 100644
--- a/fs/ocfs2/dlm/dlmcommon.h
+++ b/fs/ocfs2/dlm/dlmcommon.h
@@ -1030,6 +1030,7 @@ int dlm_drop_lockres_ref(struct dlm_ctxt *dlm,
 struct dlm_lock_resource *res);
 void dlm_clean_master_list(struct dlm_ctxt *dlm,
   u8 dead_node);
+void dlm_force_free_mles(struct dlm_ctxt *dlm);
 int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock);
 int __dlm_lockres_has_locks(struct dlm_lock_resource *res);
 int __dlm_lockres_unused(struct dlm_lock_resource *res);
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index 153abb5..11a5c87 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -693,6 +693,7 @@ void dlm_unregister_domain(struct dlm_ctxt *dlm)
 
dlm_mark_domain_leaving(dlm);
dlm_leave_domain(dlm);
+   dlm_force_free_mles(dlm);
dlm_complete_dlm_shutdown(dlm);
}
dlm_put(dlm);
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index ffb4c68..f564b0e 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -3433,3 +3433,43 @@ void dlm_lockres_release_ast(struct dlm_ctxt *dlm,
wake_up(res-wq);
wake_up(dlm-migration_wq);
 }
+
+void dlm_force_free_mles(struct dlm_ctxt *dlm)
+{
+   int i;
+   struct hlist_head *bucket;
+   struct dlm_master_list_entry *mle;
+   struct hlist_node *tmp, *list;
+
+   /*
+* We notified all other nodes that we are exiting the domain and
+* marked the dlm state to DLM_CTXT_LEAVING. If any mles are still
+* around we force free them and wake any processes that are waiting
+* on the mles
+*/
+   spin_lock(dlm-spinlock);
+   spin_lock(dlm-master_lock);
+
+   BUG_ON(dlm-dlm_state != DLM_CTXT_LEAVING);
+   BUG_ON((find_next_bit(dlm-domain_map, O2NM_MAX_NODES, 0)  
O2NM_MAX_NODES));
+
+   for (i = 0; i  DLM_HASH_BUCKETS; i++) {
+   bucket = dlm_master_hash(dlm, i);
+   hlist_for_each_safe(list, tmp, bucket) {
+   mle = hlist_entry(list, struct dlm_master_list_entry,
+ master_hash_node);
+   if (mle-type != DLM_MLE_BLOCK) {
+   mlog(ML_ERROR, bad mle: %p\n, mle);
+   dlm_print_one_mle(mle);
+   }
+   atomic_set(mle-woken, 1);
+   wake_up(mle-wq);
+
+   __dlm_unlink_mle(dlm, mle);
+   __dlm_mle_detach_hb_events(dlm, mle);
+   __dlm_put_mle(mle);
+   }
+   }
+   spin_unlock(dlm-master_lock);
+   spin_unlock(dlm-spinlock);
+}
-- 
1.5.6.5


___
Ocfs2-devel mailing list
Ocfs2-devel@oss.oracle.com
http://oss.oracle.com/mailman/listinfo/ocfs2-devel


Re: [Ocfs2-devel] [PATCH 1/1] o2dlm: force free mles during dlm exit

2010-09-21 Thread Sunil Mushran
I hope this has been tested.

Acked-by: Sunil Mushran sunil.mush...@oracle.com

On 09/21/2010 04:27 PM, Srinivas Eeda wrote:
 While umounting, a block mle doesn't get freed if dlm is shutdown after
 master request is received but before assert master. This results in unclean
 shutdown of dlm domain.

 This patch frees all mles that lie around after other nodes were notified 
 about
 exiting the dlm and marking dlm state as leaving. Only block mles are expected
 to be around, so we log ERROR for other mles but still free them.

 Signed-off-by: Srinivas Eedasrinivas.e...@oracle.com
 ---
   fs/ocfs2/dlm/dlmcommon.h |1 +
   fs/ocfs2/dlm/dlmdomain.c |1 +
   fs/ocfs2/dlm/dlmmaster.c |   40 
   3 files changed, 42 insertions(+), 0 deletions(-)

 diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h
 index 4b6ae2c..7652989 100644
 --- a/fs/ocfs2/dlm/dlmcommon.h
 +++ b/fs/ocfs2/dlm/dlmcommon.h
 @@ -1030,6 +1030,7 @@ int dlm_drop_lockres_ref(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res);
   void dlm_clean_master_list(struct dlm_ctxt *dlm,
  u8 dead_node);
 +void dlm_force_free_mles(struct dlm_ctxt *dlm);
   int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock);
   int __dlm_lockres_has_locks(struct dlm_lock_resource *res);
   int __dlm_lockres_unused(struct dlm_lock_resource *res);
 diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
 index 153abb5..11a5c87 100644
 --- a/fs/ocfs2/dlm/dlmdomain.c
 +++ b/fs/ocfs2/dlm/dlmdomain.c
 @@ -693,6 +693,7 @@ void dlm_unregister_domain(struct dlm_ctxt *dlm)

   dlm_mark_domain_leaving(dlm);
   dlm_leave_domain(dlm);
 + dlm_force_free_mles(dlm);
   dlm_complete_dlm_shutdown(dlm);
   }
   dlm_put(dlm);
 diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
 index ffb4c68..f564b0e 100644
 --- a/fs/ocfs2/dlm/dlmmaster.c
 +++ b/fs/ocfs2/dlm/dlmmaster.c
 @@ -3433,3 +3433,43 @@ void dlm_lockres_release_ast(struct dlm_ctxt *dlm,
   wake_up(res-wq);
   wake_up(dlm-migration_wq);
   }
 +
 +void dlm_force_free_mles(struct dlm_ctxt *dlm)
 +{
 + int i;
 + struct hlist_head *bucket;
 + struct dlm_master_list_entry *mle;
 + struct hlist_node *tmp, *list;
 +
 + /*
 +  * We notified all other nodes that we are exiting the domain and
 +  * marked the dlm state to DLM_CTXT_LEAVING. If any mles are still
 +  * around we force free them and wake any processes that are waiting
 +  * on the mles
 +  */
 + spin_lock(dlm-spinlock);
 + spin_lock(dlm-master_lock);
 +
 + BUG_ON(dlm-dlm_state != DLM_CTXT_LEAVING);
 + BUG_ON((find_next_bit(dlm-domain_map, O2NM_MAX_NODES, 0)  
 O2NM_MAX_NODES));
 +
 + for (i = 0; i  DLM_HASH_BUCKETS; i++) {
 + bucket = dlm_master_hash(dlm, i);
 + hlist_for_each_safe(list, tmp, bucket) {
 + mle = hlist_entry(list, struct dlm_master_list_entry,
 +   master_hash_node);
 + if (mle-type != DLM_MLE_BLOCK) {
 + mlog(ML_ERROR, bad mle: %p\n, mle);
 + dlm_print_one_mle(mle);
 + }
 + atomic_set(mle-woken, 1);
 + wake_up(mle-wq);
 +
 + __dlm_unlink_mle(dlm, mle);
 + __dlm_mle_detach_hb_events(dlm, mle);
 + __dlm_put_mle(mle);
 + }
 + }
 + spin_unlock(dlm-master_lock);
 + spin_unlock(dlm-spinlock);
 +}



___
Ocfs2-devel mailing list
Ocfs2-devel@oss.oracle.com
http://oss.oracle.com/mailman/listinfo/ocfs2-devel