Re: [Cluster-devel] [GFS2 PATCH] GFS2: Made logd daemon take into account log demand

2017-01-06 Thread Steven Whitehouse

Hi,

Both patches look good to me,

Steve.


On 05/01/17 21:11, Bob Peterson wrote:

Hi,

Before this patch, the logd daemon only tried to flush things when
the log blocks pinned exceeded a certain threshold. But when we're
deleting very large files, it may require a huge number of journal
blocks, and that, in turn, may exceed the threshold. This patch
factors that into account.

Signed-off-by: Bob Peterson 
---
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index a6a3389..00d8dc2 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -794,6 +794,7 @@ struct gfs2_sbd {
atomic_t sd_log_thresh1;
atomic_t sd_log_thresh2;
atomic_t sd_log_blks_free;
+   atomic_t sd_log_blks_needed;
wait_queue_head_t sd_log_waitq;
wait_queue_head_t sd_logd_waitq;
  
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c

index e58ccef0..4df349c 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -349,6 +349,7 @@ int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int 
blks)
if (gfs2_assert_warn(sdp, blks) ||
gfs2_assert_warn(sdp, blks <= sdp->sd_jdesc->jd_blocks))
return -EINVAL;
+   atomic_add(blks, >sd_log_blks_needed);
  retry:
free_blocks = atomic_read(>sd_log_blks_free);
if (unlikely(free_blocks <= wanted)) {
@@ -370,6 +371,7 @@ retry:
wake_up(>sd_reserving_log_wait);
goto retry;
}
+   atomic_sub(blks, >sd_log_blks_needed);
trace_gfs2_log_blocks(sdp, -blks);
  
  	/*

@@ -891,13 +893,16 @@ void gfs2_log_shutdown(struct gfs2_sbd *sdp)
  
  static inline int gfs2_jrnl_flush_reqd(struct gfs2_sbd *sdp)

  {
-   return (atomic_read(>sd_log_pinned) >= 
atomic_read(>sd_log_thresh1));
+   return (atomic_read(>sd_log_pinned) +
+   atomic_read(>sd_log_blks_needed) >=
+   atomic_read(>sd_log_thresh1));
  }
  
  static inline int gfs2_ail_flush_reqd(struct gfs2_sbd *sdp)

  {
unsigned int used_blocks = sdp->sd_jdesc->jd_blocks - 
atomic_read(>sd_log_blks_free);
-   return used_blocks >= atomic_read(>sd_log_thresh2);
+   return used_blocks + atomic_read(>sd_log_blks_needed) >=
+   atomic_read(>sd_log_thresh2);
  }
  
  /**

diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index ff72ac6..86281a9 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -683,6 +683,7 @@ static int init_journal(struct gfs2_sbd *sdp, int undo)
goto fail_jindex;
}
  
+	atomic_set(>sd_log_blks_needed, 0);

if (sdp->sd_args.ar_spectator) {
sdp->sd_jdesc = gfs2_jdesc_find(sdp, 0);
atomic_set(>sd_log_blks_free, sdp->sd_jdesc->jd_blocks);





[Cluster-devel] [GFS2 PATCH] GFS2: Made logd daemon take into account log demand

2017-01-05 Thread Bob Peterson
Hi,

Before this patch, the logd daemon only tried to flush things when
the log blocks pinned exceeded a certain threshold. But when we're
deleting very large files, it may require a huge number of journal
blocks, and that, in turn, may exceed the threshold. This patch
factors that into account.

Signed-off-by: Bob Peterson 
---
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index a6a3389..00d8dc2 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -794,6 +794,7 @@ struct gfs2_sbd {
atomic_t sd_log_thresh1;
atomic_t sd_log_thresh2;
atomic_t sd_log_blks_free;
+   atomic_t sd_log_blks_needed;
wait_queue_head_t sd_log_waitq;
wait_queue_head_t sd_logd_waitq;
 
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index e58ccef0..4df349c 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -349,6 +349,7 @@ int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int 
blks)
if (gfs2_assert_warn(sdp, blks) ||
gfs2_assert_warn(sdp, blks <= sdp->sd_jdesc->jd_blocks))
return -EINVAL;
+   atomic_add(blks, >sd_log_blks_needed);
 retry:
free_blocks = atomic_read(>sd_log_blks_free);
if (unlikely(free_blocks <= wanted)) {
@@ -370,6 +371,7 @@ retry:
wake_up(>sd_reserving_log_wait);
goto retry;
}
+   atomic_sub(blks, >sd_log_blks_needed);
trace_gfs2_log_blocks(sdp, -blks);
 
/*
@@ -891,13 +893,16 @@ void gfs2_log_shutdown(struct gfs2_sbd *sdp)
 
 static inline int gfs2_jrnl_flush_reqd(struct gfs2_sbd *sdp)
 {
-   return (atomic_read(>sd_log_pinned) >= 
atomic_read(>sd_log_thresh1));
+   return (atomic_read(>sd_log_pinned) +
+   atomic_read(>sd_log_blks_needed) >=
+   atomic_read(>sd_log_thresh1));
 }
 
 static inline int gfs2_ail_flush_reqd(struct gfs2_sbd *sdp)
 {
unsigned int used_blocks = sdp->sd_jdesc->jd_blocks - 
atomic_read(>sd_log_blks_free);
-   return used_blocks >= atomic_read(>sd_log_thresh2);
+   return used_blocks + atomic_read(>sd_log_blks_needed) >=
+   atomic_read(>sd_log_thresh2);
 }
 
 /**
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index ff72ac6..86281a9 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -683,6 +683,7 @@ static int init_journal(struct gfs2_sbd *sdp, int undo)
goto fail_jindex;
}
 
+   atomic_set(>sd_log_blks_needed, 0);
if (sdp->sd_args.ar_spectator) {
sdp->sd_jdesc = gfs2_jdesc_find(sdp, 0);
atomic_set(>sd_log_blks_free, sdp->sd_jdesc->jd_blocks);