From: Zi Yan <z...@nvidia.com>

This prepare the support for migrate_page_concur(), which migrates
multiple pages at the same time.

Signed-off-by: Zi Yan <z...@nvidia.com>
---
 mm/copy_page.c | 123 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 mm/internal.h  |   2 +
 2 files changed, 125 insertions(+)

diff --git a/mm/copy_page.c b/mm/copy_page.c
index 84f1c02..d2fd67e 100644
--- a/mm/copy_page.c
+++ b/mm/copy_page.c
@@ -126,6 +126,129 @@ int copy_page_multithread(struct page *to, struct page 
*from, int nr_pages)
 
        return err;
 }
+
+int copy_page_lists_mt(struct page **to, struct page **from, int nr_items)
+{
+       int err = 0;
+       unsigned int total_mt_num = limit_mt_num;
+       int to_node = page_to_nid(*to);
+       int i;
+       struct copy_page_info *work_items[NR_CPUS] = {0};
+       const struct cpumask *per_node_cpumask = cpumask_of_node(to_node);
+       int cpu_id_list[NR_CPUS] = {0};
+       int cpu;
+       int max_items_per_thread;
+       int item_idx;
+
+       total_mt_num = min_t(unsigned int, total_mt_num,
+                                                
cpumask_weight(per_node_cpumask));
+
+
+       if (total_mt_num > num_online_cpus())
+               return -ENODEV;
+
+       /* Each threads get part of each page, if nr_items < totla_mt_num */
+       if (nr_items < total_mt_num)
+               max_items_per_thread = nr_items;
+       else
+               max_items_per_thread = (nr_items / total_mt_num) +
+                               ((nr_items % total_mt_num)?1:0);
+
+
+       for (cpu = 0; cpu < total_mt_num; ++cpu) {
+               work_items[cpu] = kzalloc(sizeof(struct copy_page_info) +
+                                       sizeof(struct 
copy_item)*max_items_per_thread, GFP_KERNEL);
+               if (!work_items[cpu]) {
+                       err = -ENOMEM;
+                       goto free_work_items;
+               }
+       }
+
+       i = 0;
+       for_each_cpu(cpu, per_node_cpumask) {
+               if (i >= total_mt_num)
+                       break;
+               cpu_id_list[i] = cpu;
+               ++i;
+       }
+
+       if (nr_items < total_mt_num) {
+               for (cpu = 0; cpu < total_mt_num; ++cpu) {
+                       INIT_WORK((struct work_struct *)work_items[cpu],
+                                         copy_page_work_queue_thread);
+                       work_items[cpu]->num_items = max_items_per_thread;
+               }
+
+               for (item_idx = 0; item_idx < nr_items; ++item_idx) {
+                       unsigned long chunk_size = PAGE_SIZE * 
hpage_nr_pages(from[item_idx]) / total_mt_num;
+                       char *vfrom = kmap(from[item_idx]);
+                       char *vto = kmap(to[item_idx]);
+                       VM_BUG_ON(PAGE_SIZE * hpage_nr_pages(from[item_idx]) % 
total_mt_num);
+                       BUG_ON(hpage_nr_pages(to[item_idx]) !=
+                                  hpage_nr_pages(from[item_idx]));
+
+                       for (cpu = 0; cpu < total_mt_num; ++cpu) {
+                               work_items[cpu]->item_list[item_idx].to = vto + 
chunk_size * cpu;
+                               work_items[cpu]->item_list[item_idx].from = 
vfrom + chunk_size * cpu;
+                               work_items[cpu]->item_list[item_idx].chunk_size 
=
+                                       chunk_size;
+                       }
+               }
+
+               for (cpu = 0; cpu < total_mt_num; ++cpu)
+                       queue_work_on(cpu_id_list[cpu],
+                                                 system_highpri_wq,
+                                                 (struct work_struct 
*)work_items[cpu]);
+       } else {
+               item_idx = 0;
+               for (cpu = 0; cpu < total_mt_num; ++cpu) {
+                       int num_xfer_per_thread = nr_items / total_mt_num;
+                       int per_cpu_item_idx;
+
+                       if (cpu < (nr_items % total_mt_num))
+                               num_xfer_per_thread += 1;
+
+                       INIT_WORK((struct work_struct *)work_items[cpu],
+                                         copy_page_work_queue_thread);
+
+                       work_items[cpu]->num_items = num_xfer_per_thread;
+                       for (per_cpu_item_idx = 0; per_cpu_item_idx < 
work_items[cpu]->num_items;
+                                ++per_cpu_item_idx, ++item_idx) {
+                               work_items[cpu]->item_list[per_cpu_item_idx].to 
= kmap(to[item_idx]);
+                               
work_items[cpu]->item_list[per_cpu_item_idx].from =
+                                       kmap(from[item_idx]);
+                               
work_items[cpu]->item_list[per_cpu_item_idx].chunk_size =
+                                       PAGE_SIZE * 
hpage_nr_pages(from[item_idx]);
+
+                               BUG_ON(hpage_nr_pages(to[item_idx]) !=
+                                          hpage_nr_pages(from[item_idx]));
+                       }
+
+                       queue_work_on(cpu_id_list[cpu],
+                                                 system_highpri_wq,
+                                                 (struct work_struct 
*)work_items[cpu]);
+               }
+               if (item_idx != nr_items)
+                       pr_err("%s: only %d out of %d pages are transferred\n", 
__func__,
+                               item_idx - 1, nr_items);
+       }
+
+       /* Wait until it finishes  */
+       for (i = 0; i < total_mt_num; ++i)
+               flush_work((struct work_struct *)work_items[i]);
+
+       for (i = 0; i < nr_items; ++i) {
+                       kunmap(to[i]);
+                       kunmap(from[i]);
+       }
+
+free_work_items:
+       for (cpu = 0; cpu < total_mt_num; ++cpu)
+               if (work_items[cpu])
+                       kfree(work_items[cpu]);
+
+       return err;
+}
 /* ======================== DMA copy page ======================== */
 #include <linux/dmaengine.h>
 #include <linux/dma-mapping.h>
diff --git a/mm/internal.h b/mm/internal.h
index cb1a610..51f5e1b 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -558,5 +558,7 @@ extern struct page *alloc_new_node_page(struct page *page, 
unsigned long node);
 
 extern int copy_page_lists_dma_always(struct page **to,
                        struct page **from, int nr_pages);
+extern int copy_page_lists_mt(struct page **to,
+                       struct page **from, int nr_pages);
 
 #endif /* __MM_INTERNAL_H */
-- 
2.7.4

Reply via email to