From: Zi Yan <z...@nvidia.com>

vm.use_all_dma_chans will grab all usable DMA channels
vm.limit_dma_chans will limit how many DMA channels in use

Signed-off-by: Zi Yan <z...@nvidia.com>
---
 include/linux/highmem.h      |   1 +
 include/linux/sched/sysctl.h |   3 +
 kernel/sysctl.c              |  19 +++
 mm/copy_page.c               | 291 +++++++++++++++++++++++++++++++++++++++++++
 4 files changed, 314 insertions(+)

diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 0f50dc5..119bb39 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -277,5 +277,6 @@ static inline void copy_highpage(struct page *to, struct 
page *from)
 #endif
 
 int copy_page_multithread(struct page *to, struct page *from, int nr_pages);
+int copy_page_dma(struct page *to, struct page *from, int nr_pages);
 
 #endif /* _LINUX_HIGHMEM_H */
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index 99ce6d7..ce11241 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -90,4 +90,7 @@ extern int sched_energy_aware_handler(struct ctl_table 
*table, int write,
                                 loff_t *ppos);
 #endif
 
+extern int sysctl_dma_page_migration(struct ctl_table *table, int write,
+                                void __user *buffer, size_t *lenp,
+                                loff_t *ppos);
 #endif /* _LINUX_SCHED_SYSCTL_H */
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 0eae0b8..b8712eb 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -103,6 +103,8 @@
 
 extern int accel_page_copy;
 extern unsigned int limit_mt_num;
+extern int use_all_dma_chans;
+extern int limit_dma_chans;
 
 /* External variables not in a header file. */
 extern int suid_dumpable;
@@ -1451,6 +1453,23 @@ static struct ctl_table vm_table[] = {
                .extra1         = &zero,
        },
         {
+               .procname       = "use_all_dma_chans",
+               .data           = &use_all_dma_chans,
+               .maxlen         = sizeof(use_all_dma_chans),
+               .mode           = 0644,
+               .proc_handler   = sysctl_dma_page_migration,
+               .extra1         = &zero,
+               .extra2         = &one,
+        },
+        {
+               .procname       = "limit_dma_chans",
+               .data           = &limit_dma_chans,
+               .maxlen         = sizeof(limit_dma_chans),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+               .extra1         = &zero,
+        },
+        {
                .procname       = "hugetlb_shm_group",
                .data           = &sysctl_hugetlb_shm_group,
                .maxlen         = sizeof(gid_t),
diff --git a/mm/copy_page.c b/mm/copy_page.c
index 6665e3d..5e7a797 100644
--- a/mm/copy_page.c
+++ b/mm/copy_page.c
@@ -126,3 +126,294 @@ int copy_page_multithread(struct page *to, struct page 
*from, int nr_pages)
 
        return err;
 }
+/* ======================== DMA copy page ======================== */
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+
+#define NUM_AVAIL_DMA_CHAN 16
+
+
+int use_all_dma_chans = 0;
+int limit_dma_chans = NUM_AVAIL_DMA_CHAN;
+
+
+struct dma_chan *copy_chan[NUM_AVAIL_DMA_CHAN] = {0};
+struct dma_device *copy_dev[NUM_AVAIL_DMA_CHAN] = {0};
+
+
+
+#ifdef CONFIG_PROC_SYSCTL
+extern int proc_dointvec_minmax(struct ctl_table *table, int write,
+                 void __user *buffer, size_t *lenp, loff_t *ppos);
+int sysctl_dma_page_migration(struct ctl_table *table, int write,
+                                void __user *buffer, size_t *lenp,
+                                loff_t *ppos)
+{
+       int err = 0;
+       int use_all_dma_chans_prior_val = use_all_dma_chans;
+       dma_cap_mask_t copy_mask;
+
+       if (write && !capable(CAP_SYS_ADMIN))
+               return -EPERM;
+
+       err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+
+       if (err < 0)
+               return err;
+       if (write) {
+               /* Grab all DMA channels  */
+               if (use_all_dma_chans_prior_val == 0 && use_all_dma_chans == 1) 
{
+                       int i;
+
+                       dma_cap_zero(copy_mask);
+                       dma_cap_set(DMA_MEMCPY, copy_mask);
+
+                       dmaengine_get();
+                       for (i = 0; i < NUM_AVAIL_DMA_CHAN; ++i) {
+                               if (!copy_chan[i]) {
+                                       copy_chan[i] = 
dma_request_channel(copy_mask, NULL, NULL);
+                               }
+                               if (!copy_chan[i]) {
+                                       pr_err("%s: cannot grab channel: %d\n", 
__func__, i);
+                                       continue;
+                               }
+
+                               copy_dev[i] = copy_chan[i]->device;
+
+                               if (!copy_dev[i]) {
+                                       pr_err("%s: no device: %d\n", __func__, 
i);
+                                       continue;
+                               }
+                       }
+
+               }
+               /* Release all DMA channels  */
+               else if (use_all_dma_chans_prior_val == 1 && use_all_dma_chans 
== 0) {
+                       int i;
+
+                       for (i = 0; i < NUM_AVAIL_DMA_CHAN; ++i) {
+                               if (copy_chan[i]) {
+                                       dma_release_channel(copy_chan[i]);
+                                       copy_chan[i] = NULL;
+                                       copy_dev[i] = NULL;
+                               }
+                       }
+
+                       dmaengine_put();
+               }
+
+               if (err)
+                       use_all_dma_chans = use_all_dma_chans_prior_val;
+       }
+       return err;
+}
+
+#endif
+
+static int copy_page_dma_once(struct page *to, struct page *from, int nr_pages)
+{
+       static struct dma_chan *copy_chan = NULL;
+       struct dma_device *device = NULL;
+       struct dma_async_tx_descriptor *tx = NULL;
+       dma_cookie_t cookie;
+       enum dma_ctrl_flags flags = 0;
+       struct dmaengine_unmap_data *unmap = NULL;
+       dma_cap_mask_t mask;
+       int ret_val = 0;
+
+
+       dma_cap_zero(mask);
+       dma_cap_set(DMA_MEMCPY, mask);
+
+       dmaengine_get();
+
+       copy_chan = dma_request_channel(mask, NULL, NULL);
+
+       if (!copy_chan) {
+               pr_err("%s: cannot get a channel\n", __func__);
+               ret_val = -1;
+               goto no_chan;
+       }
+
+       device = copy_chan->device;
+
+       if (!device) {
+               pr_err("%s: cannot get a device\n", __func__);
+               ret_val = -2;
+               goto release;
+       }
+
+       unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOWAIT);
+
+       if (!unmap) {
+               pr_err("%s: cannot get unmap data\n", __func__);
+               ret_val = -3;
+               goto release;
+       }
+
+       unmap->to_cnt = 1;
+       unmap->addr[0] = dma_map_page(device->dev, from, 0, PAGE_SIZE*nr_pages,
+                                         DMA_TO_DEVICE);
+       unmap->from_cnt = 1;
+       unmap->addr[1] = dma_map_page(device->dev, to, 0, PAGE_SIZE*nr_pages,
+                                         DMA_FROM_DEVICE);
+       unmap->len = PAGE_SIZE*nr_pages;
+
+       tx = device->device_prep_dma_memcpy(copy_chan,
+                                               unmap->addr[1],
+                                               unmap->addr[0], unmap->len,
+                                               flags);
+
+       if (!tx) {
+               pr_err("%s: null tx descriptor\n", __func__);
+               ret_val = -4;
+               goto unmap_dma;
+       }
+
+       cookie = tx->tx_submit(tx);
+
+       if (dma_submit_error(cookie)) {
+               pr_err("%s: submission error\n", __func__);
+               ret_val = -5;
+               goto unmap_dma;
+       }
+
+       if (dma_sync_wait(copy_chan, cookie) != DMA_COMPLETE) {
+               pr_err("%s: dma does not complete properly\n", __func__);
+               ret_val = -6;
+       }
+
+unmap_dma:
+       dmaengine_unmap_put(unmap);
+release:
+       if (copy_chan) {
+               dma_release_channel(copy_chan);
+       }
+no_chan:
+       dmaengine_put();
+
+       return ret_val;
+}
+
+static int copy_page_dma_always(struct page *to, struct page *from, int 
nr_pages)
+{
+       struct dma_async_tx_descriptor *tx[NUM_AVAIL_DMA_CHAN] = {0};
+       dma_cookie_t cookie[NUM_AVAIL_DMA_CHAN];
+       enum dma_ctrl_flags flags[NUM_AVAIL_DMA_CHAN] = {0};
+       struct dmaengine_unmap_data *unmap[NUM_AVAIL_DMA_CHAN] = {0};
+       int ret_val = 0;
+       int total_available_chans = NUM_AVAIL_DMA_CHAN;
+       int i;
+       size_t page_offset;
+
+       for (i = 0; i < NUM_AVAIL_DMA_CHAN; ++i) {
+               if (!copy_chan[i]) {
+                       total_available_chans = i;
+               }
+       }
+       if (total_available_chans != NUM_AVAIL_DMA_CHAN) {
+               pr_err("%d channels are missing", NUM_AVAIL_DMA_CHAN - 
total_available_chans);
+       }
+
+       total_available_chans = min_t(int, total_available_chans, 
limit_dma_chans);
+
+       /* round down to closest 2^x value  */
+       total_available_chans = 1<<ilog2(total_available_chans);
+
+       if ((nr_pages != 1) && (nr_pages % total_available_chans != 0))
+               return -5;
+
+       for (i = 0; i < total_available_chans; ++i) {
+               unmap[i] = dmaengine_get_unmap_data(copy_dev[i]->dev, 2, 
GFP_NOWAIT);
+               if (!unmap[i]) {
+                       pr_err("%s: no unmap data at chan %d\n", __func__, i);
+                       ret_val = -3;
+                       goto unmap_dma;
+               }
+       }
+
+       for (i = 0; i < total_available_chans; ++i) {
+               if (nr_pages == 1) {
+                       page_offset = PAGE_SIZE / total_available_chans;
+
+                       unmap[i]->to_cnt = 1;
+                       unmap[i]->addr[0] = dma_map_page(copy_dev[i]->dev, 
from, page_offset*i,
+                                                         page_offset,
+                                                         DMA_TO_DEVICE);
+                       unmap[i]->from_cnt = 1;
+                       unmap[i]->addr[1] = dma_map_page(copy_dev[i]->dev, to, 
page_offset*i,
+                                                         page_offset,
+                                                         DMA_FROM_DEVICE);
+                       unmap[i]->len = page_offset;
+               } else {
+                       page_offset = nr_pages / total_available_chans;
+
+                       unmap[i]->to_cnt = 1;
+                       unmap[i]->addr[0] = dma_map_page(copy_dev[i]->dev,
+                                                               from + 
page_offset*i,
+                                                               0,
+                                                               
PAGE_SIZE*page_offset,
+                                                               DMA_TO_DEVICE);
+                       unmap[i]->from_cnt = 1;
+                       unmap[i]->addr[1] = dma_map_page(copy_dev[i]->dev,
+                                                               to + 
page_offset*i,
+                                                               0,
+                                                               
PAGE_SIZE*page_offset,
+                                                               
DMA_FROM_DEVICE);
+                       unmap[i]->len = PAGE_SIZE*page_offset;
+               }
+       }
+
+       for (i = 0; i < total_available_chans; ++i) {
+               tx[i] = copy_dev[i]->device_prep_dma_memcpy(copy_chan[i],
+                                                       unmap[i]->addr[1],
+                                                       unmap[i]->addr[0],
+                                                       unmap[i]->len,
+                                                       flags[i]);
+               if (!tx[i]) {
+                       pr_err("%s: no tx descriptor at chan %d\n", __func__, 
i);
+                       ret_val = -4;
+                       goto unmap_dma;
+               }
+       }
+
+       for (i = 0; i < total_available_chans; ++i) {
+               cookie[i] = tx[i]->tx_submit(tx[i]);
+
+               if (dma_submit_error(cookie[i])) {
+                       pr_err("%s: submission error at chan %d\n", __func__, 
i);
+                       ret_val = -5;
+                       goto unmap_dma;
+               }
+
+               dma_async_issue_pending(copy_chan[i]);
+       }
+
+       for (i = 0; i < total_available_chans; ++i) {
+               if (dma_sync_wait(copy_chan[i], cookie[i]) != DMA_COMPLETE) {
+                       ret_val = -6;
+                       pr_err("%s: dma does not complete at chan %d\n", 
__func__, i);
+               }
+       }
+
+unmap_dma:
+
+       for (i = 0; i < total_available_chans; ++i) {
+               if (unmap[i])
+                       dmaengine_unmap_put(unmap[i]);
+       }
+
+       return ret_val;
+}
+
+int copy_page_dma(struct page *to, struct page *from, int nr_pages)
+{
+       BUG_ON(hpage_nr_pages(from) != nr_pages);
+       BUG_ON(hpage_nr_pages(to) != nr_pages);
+
+       if (!use_all_dma_chans) {
+               return copy_page_dma_once(to, from, nr_pages);
+       }
+
+       return copy_page_dma_always(to, from, nr_pages);
+}
-- 
2.7.4

Reply via email to