Change dm kcopyd so that it calls blkdev_issue_copy with an asynchronous
callback. There can be large number of pending kcopyd requests and holding
a process context for each of them may put too much load on the workqueue
subsystem.

This patch changes it so that blkdev_issue_copy returns after it submitted
the requests and copy_offload_callback is called when the copy operation
finishes.

Signed-off-by: Mikulas Patocka <mpato...@redhat.com>

---
 drivers/md/dm-kcopyd.c |   33 ++++++++++++++-------------------
 1 file changed, 14 insertions(+), 19 deletions(-)

Index: linux-3.16-rc5/drivers/md/dm-kcopyd.c
===================================================================
--- linux-3.16-rc5.orig/drivers/md/dm-kcopyd.c  2014-07-15 19:24:20.000000000 
+0200
+++ linux-3.16-rc5/drivers/md/dm-kcopyd.c       2014-07-15 19:24:54.000000000 
+0200
@@ -361,8 +361,6 @@ struct kcopyd_job {
        sector_t progress;
 
        struct kcopyd_job *master_job;
-
-       struct work_struct copy_work;
 };
 
 static struct kmem_cache *_job_cache;
@@ -628,8 +626,9 @@ static void segment_complete(int read_er
        struct kcopyd_job *sub_job = (struct kcopyd_job *) context;
        struct kcopyd_job *job = sub_job->master_job;
        struct dm_kcopyd_client *kc = job->kc;
+       unsigned long flags;
 
-       spin_lock(&job->lock);
+       spin_lock_irqsave(&job->lock, flags);
 
        /* update the error */
        if (read_err)
@@ -653,7 +652,7 @@ static void segment_complete(int read_er
                        job->progress += count;
                }
        }
-       spin_unlock(&job->lock);
+       spin_unlock_irqrestore(&job->lock, flags);
 
        if (count) {
                int i;
@@ -714,29 +713,25 @@ static void submit_job(struct kcopyd_job
        }
 }
 
-static void copy_offload_work(struct work_struct *work)
+static void copy_offload_callback(void *ptr, int error)
 {
-       struct kcopyd_job *job = container_of(work, struct kcopyd_job, 
copy_work);
-       sector_t copied;
+       struct kcopyd_job *job = ptr;
 
-       blkdev_issue_copy(job->source.bdev, job->source.sector,
-                         job->dests[0].bdev, job->dests[0].sector,
-                         job->source.count,
-                         GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | 
__GFP_NOWARN,
-                         NULL, NULL, &copied);
-
-       job->source.sector += copied;
-       job->source.count -= copied;
-       job->dests[0].sector += copied;
-       job->dests[0].count -= copied;
+       job->source.sector += job->progress;
+       job->source.count -= job->progress;
+       job->dests[0].sector += job->progress;
+       job->dests[0].count -= job->progress;
 
        submit_job(job);
 }
 
 static void try_copy_offload(struct kcopyd_job *job)
 {
-       INIT_WORK(&job->copy_work, copy_offload_work);
-       queue_work(job->kc->kcopyd_wq, &job->copy_work);
+       blkdev_issue_copy(job->source.bdev, job->source.sector,
+                         job->dests[0].bdev, job->dests[0].sector,
+                         job->source.count,
+                         GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | 
__GFP_NOWARN,
+                         copy_offload_callback, job, &job->progress);
 }
 
 int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,

--
To unsubscribe from this list: send the line "unsubscribe linux-scsi" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to