On Fri 22-05-15 17:13:48, Tejun Heo wrote:
> There are several places in fs/fs-writeback.c which queues
> wb_writeback_work without checking whether the target wb
> (bdi_writeback) has dirty inodes or not.  The only thing
> wb_writeback_work does is writing back the dirty inodes for the target
> wb and queueing a work item for a clean wb is essentially noop.  There
> are some side effects such as bandwidth stats being updated and
> triggering tracepoints but these don't affect the operation in any
> meaningful way.
> 
> This patch makes all writeback_inodes_sb_nr() and sync_inodes_sb()
> skip wb_queue_work() if the target bdi is clean.  Also, it moves
> dirtiness check from wakeup_flusher_threads() to
> __wb_start_writeback() so that all its callers benefit from the check.
> 
> While the overhead incurred by scheduling a noop work isn't currently
> significant, the overhead may be higher with cgroup writeback support
> as we may end up issuing noop work items to a lot of clean wb's.
> 
> Signed-off-by: Tejun Heo <[email protected]>
> Cc: Jens Axboe <[email protected]>
> Cc: Jan Kara <[email protected]>

Looks good. You can add:

Reviewed-by: Jan Kara <[email protected]>

                                                                Honza

> ---
>  fs/fs-writeback.c | 18 ++++++++++--------
>  1 file changed, 10 insertions(+), 8 deletions(-)
> 
> diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
> index c98d392..921a9e4 100644
> --- a/fs/fs-writeback.c
> +++ b/fs/fs-writeback.c
> @@ -189,6 +189,9 @@ static void __wb_start_writeback(struct bdi_writeback 
> *wb, long nr_pages,
>  {
>       struct wb_writeback_work *work;
>  
> +     if (!wb_has_dirty_io(wb))
> +             return;
> +
>       /*
>        * This is WB_SYNC_NONE writeback, so if allocation fails just
>        * wakeup the thread for old dirty data writeback
> @@ -1215,11 +1218,8 @@ void wakeup_flusher_threads(long nr_pages, enum 
> wb_reason reason)
>               nr_pages = get_nr_dirty_pages();
>  
>       rcu_read_lock();
> -     list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
> -             if (!bdi_has_dirty_io(bdi))
> -                     continue;
> +     list_for_each_entry_rcu(bdi, &bdi_list, bdi_list)
>               __wb_start_writeback(&bdi->wb, nr_pages, false, reason);
> -     }
>       rcu_read_unlock();
>  }
>  
> @@ -1512,11 +1512,12 @@ void writeback_inodes_sb_nr(struct super_block *sb,
>               .nr_pages               = nr,
>               .reason                 = reason,
>       };
> +     struct backing_dev_info *bdi = sb->s_bdi;
>  
> -     if (sb->s_bdi == &noop_backing_dev_info)
> +     if (!bdi_has_dirty_io(bdi) || bdi == &noop_backing_dev_info)
>               return;
>       WARN_ON(!rwsem_is_locked(&sb->s_umount));
> -     wb_queue_work(&sb->s_bdi->wb, &work);
> +     wb_queue_work(&bdi->wb, &work);
>       wait_for_completion(&done);
>  }
>  EXPORT_SYMBOL(writeback_inodes_sb_nr);
> @@ -1594,13 +1595,14 @@ void sync_inodes_sb(struct super_block *sb)
>               .reason         = WB_REASON_SYNC,
>               .for_sync       = 1,
>       };
> +     struct backing_dev_info *bdi = sb->s_bdi;
>  
>       /* Nothing to do? */
> -     if (sb->s_bdi == &noop_backing_dev_info)
> +     if (!bdi_has_dirty_io(bdi) || bdi == &noop_backing_dev_info)
>               return;
>       WARN_ON(!rwsem_is_locked(&sb->s_umount));
>  
> -     wb_queue_work(&sb->s_bdi->wb, &work);
> +     wb_queue_work(&bdi->wb, &work);
>       wait_for_completion(&done);
>  
>       wait_sb_inodes(sb);
> -- 
> 2.4.0
> 
-- 
Jan Kara <[email protected]>
SUSE Labs, CR
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to