From: Bob Peterson <[email protected]> This is a small patch that optimizes multiple glock dequeue operations. It changes the unlock order to be more efficient and makes it easier for lock debugging tools to unravel. It also eliminates the need for the temp variable x, although that would likely be optimized out.
Signed-off-by: Bob Peterson <[email protected]> Signed-off-by: Steven Whitehouse <[email protected]> diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 3f45a14..8648409 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -1248,10 +1248,8 @@ int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs) void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs) { - unsigned int x; - - for (x = 0; x < num_gh; x++) - gfs2_glock_dq(&ghs[x]); + while (num_gh--) + gfs2_glock_dq(&ghs[num_gh]); } /** @@ -1263,10 +1261,8 @@ void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs) void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs) { - unsigned int x; - - for (x = 0; x < num_gh; x++) - gfs2_glock_dq_uninit(&ghs[x]); + while (num_gh--) + gfs2_glock_dq_uninit(&ghs[num_gh]); } void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state) -- 1.7.4
