[PATCH 15/21] staging: lustre: ptlrpc: list_for_each improvements.
1/ use list_for_each_entry_safe() instead of list_for_each_safe() and similar. 2/ use list_first_entry() and list_last_entry() where appropriate. 3/ When removing everything from a list, use while ((x = list_first_entry_or_null()) { as it makes the intent clear 4/ No need to take a spinlock in a structure that is about to be freed - we must have exclusive access at this stage. Signed-off-by: NeilBrown--- drivers/staging/lustre/lustre/ptlrpc/client.c | 89 +++- drivers/staging/lustre/lustre/ptlrpc/import.c | 34 +++-- drivers/staging/lustre/lustre/ptlrpc/pinger.c |8 +- drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c | 15 +--- drivers/staging/lustre/lustre/ptlrpc/recover.c | 26 +++ drivers/staging/lustre/lustre/ptlrpc/service.c | 13 +--- 6 files changed, 59 insertions(+), 126 deletions(-) diff --git a/drivers/staging/lustre/lustre/ptlrpc/client.c b/drivers/staging/lustre/lustre/ptlrpc/client.c index d4c641d2480c..ca096fadb9c0 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/client.c +++ b/drivers/staging/lustre/lustre/ptlrpc/client.c @@ -504,19 +504,16 @@ void ptlrpc_request_cache_free(struct ptlrpc_request *req) */ void ptlrpc_free_rq_pool(struct ptlrpc_request_pool *pool) { - struct list_head *l, *tmp; struct ptlrpc_request *req; - spin_lock(>prp_lock); - list_for_each_safe(l, tmp, >prp_req_list) { - req = list_entry(l, struct ptlrpc_request, rq_list); + while ((req = list_first_entry_or_null(>prp_req_list, + struct ptlrpc_request, rq_list))) { list_del(>rq_list); LASSERT(req->rq_reqbuf); LASSERT(req->rq_reqbuf_len == pool->prp_rq_size); kvfree(req->rq_reqbuf); ptlrpc_request_cache_free(req); } - spin_unlock(>prp_lock); kfree(pool); } EXPORT_SYMBOL(ptlrpc_free_rq_pool); @@ -656,16 +653,13 @@ static void __ptlrpc_free_req_to_pool(struct ptlrpc_request *request) void ptlrpc_add_unreplied(struct ptlrpc_request *req) { struct obd_import *imp = req->rq_import; - struct list_head*tmp; struct ptlrpc_request *iter; assert_spin_locked(>imp_lock); LASSERT(list_empty(>rq_unreplied_list)); /* unreplied list is sorted by xid in ascending order */ - list_for_each_prev(tmp, >imp_unreplied_list) { - iter = list_entry(tmp, struct ptlrpc_request, - rq_unreplied_list); + list_for_each_entry_reverse(iter, >imp_unreplied_list, rq_unreplied_list) { LASSERT(req->rq_xid != iter->rq_xid); if (req->rq_xid < iter->rq_xid) @@ -1001,18 +995,14 @@ struct ptlrpc_request_set *ptlrpc_prep_fcset(int max, set_producer_func func, */ void ptlrpc_set_destroy(struct ptlrpc_request_set *set) { - struct list_head *tmp; - struct list_head *next; + struct ptlrpc_request *req; int expected_phase; int n = 0; /* Requests on the set should either all be completed, or all be new */ expected_phase = (atomic_read(>set_remaining) == 0) ? RQ_PHASE_COMPLETE : RQ_PHASE_NEW; - list_for_each(tmp, >set_requests) { - struct ptlrpc_request *req = - list_entry(tmp, struct ptlrpc_request, rq_set_chain); - + list_for_each_entry(req, >set_requests, rq_set_chain) { LASSERT(req->rq_phase == expected_phase); n++; } @@ -1021,9 +1011,9 @@ void ptlrpc_set_destroy(struct ptlrpc_request_set *set) atomic_read(>set_remaining) == n, "%d / %d\n", atomic_read(>set_remaining), n); - list_for_each_safe(tmp, next, >set_requests) { - struct ptlrpc_request *req = - list_entry(tmp, struct ptlrpc_request, rq_set_chain); + while ((req = list_first_entry_or_null(>set_requests, + struct ptlrpc_request, + rq_set_chain))) { list_del_init(>rq_set_chain); LASSERT(req->rq_phase == expected_phase); @@ -1640,7 +1630,7 @@ static inline int ptlrpc_set_producer(struct ptlrpc_request_set *set) */ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set) { - struct list_head *tmp, *next; + struct ptlrpc_request *req, *next; struct list_head comp_reqs; int force_timer_recalc = 0; @@ -1648,9 +1638,7 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set) return 1; INIT_LIST_HEAD(_reqs); - list_for_each_safe(tmp, next, >set_requests) { - struct ptlrpc_request *req = - list_entry(tmp, struct ptlrpc_request, rq_set_chain); +
[PATCH 15/21] staging: lustre: ptlrpc: list_for_each improvements.
1/ use list_for_each_entry_safe() instead of list_for_each_safe() and similar. 2/ use list_first_entry() and list_last_entry() where appropriate. 3/ When removing everything from a list, use while ((x = list_first_entry_or_null()) { as it makes the intent clear 4/ No need to take a spinlock in a structure that is about to be freed - we must have exclusive access at this stage. Signed-off-by: NeilBrown --- drivers/staging/lustre/lustre/ptlrpc/client.c | 89 +++- drivers/staging/lustre/lustre/ptlrpc/import.c | 34 +++-- drivers/staging/lustre/lustre/ptlrpc/pinger.c |8 +- drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c | 15 +--- drivers/staging/lustre/lustre/ptlrpc/recover.c | 26 +++ drivers/staging/lustre/lustre/ptlrpc/service.c | 13 +--- 6 files changed, 59 insertions(+), 126 deletions(-) diff --git a/drivers/staging/lustre/lustre/ptlrpc/client.c b/drivers/staging/lustre/lustre/ptlrpc/client.c index d4c641d2480c..ca096fadb9c0 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/client.c +++ b/drivers/staging/lustre/lustre/ptlrpc/client.c @@ -504,19 +504,16 @@ void ptlrpc_request_cache_free(struct ptlrpc_request *req) */ void ptlrpc_free_rq_pool(struct ptlrpc_request_pool *pool) { - struct list_head *l, *tmp; struct ptlrpc_request *req; - spin_lock(>prp_lock); - list_for_each_safe(l, tmp, >prp_req_list) { - req = list_entry(l, struct ptlrpc_request, rq_list); + while ((req = list_first_entry_or_null(>prp_req_list, + struct ptlrpc_request, rq_list))) { list_del(>rq_list); LASSERT(req->rq_reqbuf); LASSERT(req->rq_reqbuf_len == pool->prp_rq_size); kvfree(req->rq_reqbuf); ptlrpc_request_cache_free(req); } - spin_unlock(>prp_lock); kfree(pool); } EXPORT_SYMBOL(ptlrpc_free_rq_pool); @@ -656,16 +653,13 @@ static void __ptlrpc_free_req_to_pool(struct ptlrpc_request *request) void ptlrpc_add_unreplied(struct ptlrpc_request *req) { struct obd_import *imp = req->rq_import; - struct list_head*tmp; struct ptlrpc_request *iter; assert_spin_locked(>imp_lock); LASSERT(list_empty(>rq_unreplied_list)); /* unreplied list is sorted by xid in ascending order */ - list_for_each_prev(tmp, >imp_unreplied_list) { - iter = list_entry(tmp, struct ptlrpc_request, - rq_unreplied_list); + list_for_each_entry_reverse(iter, >imp_unreplied_list, rq_unreplied_list) { LASSERT(req->rq_xid != iter->rq_xid); if (req->rq_xid < iter->rq_xid) @@ -1001,18 +995,14 @@ struct ptlrpc_request_set *ptlrpc_prep_fcset(int max, set_producer_func func, */ void ptlrpc_set_destroy(struct ptlrpc_request_set *set) { - struct list_head *tmp; - struct list_head *next; + struct ptlrpc_request *req; int expected_phase; int n = 0; /* Requests on the set should either all be completed, or all be new */ expected_phase = (atomic_read(>set_remaining) == 0) ? RQ_PHASE_COMPLETE : RQ_PHASE_NEW; - list_for_each(tmp, >set_requests) { - struct ptlrpc_request *req = - list_entry(tmp, struct ptlrpc_request, rq_set_chain); - + list_for_each_entry(req, >set_requests, rq_set_chain) { LASSERT(req->rq_phase == expected_phase); n++; } @@ -1021,9 +1011,9 @@ void ptlrpc_set_destroy(struct ptlrpc_request_set *set) atomic_read(>set_remaining) == n, "%d / %d\n", atomic_read(>set_remaining), n); - list_for_each_safe(tmp, next, >set_requests) { - struct ptlrpc_request *req = - list_entry(tmp, struct ptlrpc_request, rq_set_chain); + while ((req = list_first_entry_or_null(>set_requests, + struct ptlrpc_request, + rq_set_chain))) { list_del_init(>rq_set_chain); LASSERT(req->rq_phase == expected_phase); @@ -1640,7 +1630,7 @@ static inline int ptlrpc_set_producer(struct ptlrpc_request_set *set) */ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set) { - struct list_head *tmp, *next; + struct ptlrpc_request *req, *next; struct list_head comp_reqs; int force_timer_recalc = 0; @@ -1648,9 +1638,7 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set) return 1; INIT_LIST_HEAD(_reqs); - list_for_each_safe(tmp, next, >set_requests) { - struct ptlrpc_request *req = - list_entry(tmp, struct ptlrpc_request, rq_set_chain); + list_for_each_entry_safe(req,