From: Zi Yan <z...@nvidia.com>

Fallback to copy_highpage when it fails.

Signed-off-by: Zi Yan <z...@nvidia.com>
---
 include/linux/migrate_mode.h   |  1 +
 include/uapi/linux/mempolicy.h |  1 +
 mm/migrate.c                   | 31 +++++++++++++++++++++----------
 3 files changed, 23 insertions(+), 10 deletions(-)

diff --git a/include/linux/migrate_mode.h b/include/linux/migrate_mode.h
index 5bc8a77..4f7f5557 100644
--- a/include/linux/migrate_mode.h
+++ b/include/linux/migrate_mode.h
@@ -23,6 +23,7 @@ enum migrate_mode {
        MIGRATE_MODE_MASK = 3,
        MIGRATE_SINGLETHREAD    = 0,
        MIGRATE_MT                              = 1<<4,
+       MIGRATE_DMA                             = 1<<5,
 };
 
 #endif         /* MIGRATE_MODE_H_INCLUDED */
diff --git a/include/uapi/linux/mempolicy.h b/include/uapi/linux/mempolicy.h
index 890269b..49573a6 100644
--- a/include/uapi/linux/mempolicy.h
+++ b/include/uapi/linux/mempolicy.h
@@ -48,6 +48,7 @@ enum {
 #define MPOL_MF_LAZY    (1<<3) /* Modifies '_MOVE:  lazy migrate on fault */
 #define MPOL_MF_INTERNAL (1<<4)        /* Internal flags start here */
 
+#define MPOL_MF_MOVE_DMA (1<<5)        /* Use DMA page copy routine */
 #define MPOL_MF_MOVE_MT  (1<<6)        /* Use multi-threaded page copy routine 
*/
 
 #define MPOL_MF_VALID  (MPOL_MF_STRICT   |     \
diff --git a/mm/migrate.c b/mm/migrate.c
index 8a344e2..09114d3 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -553,15 +553,21 @@ int migrate_huge_page_move_mapping(struct address_space 
*mapping,
  * specialized.
  */
 static void __copy_gigantic_page(struct page *dst, struct page *src,
-                               int nr_pages)
+                               int nr_pages, enum migrate_mode mode)
 {
        int i;
        struct page *dst_base = dst;
        struct page *src_base = src;
+       int rc = -EFAULT;
 
        for (i = 0; i < nr_pages; ) {
                cond_resched();
-               copy_highpage(dst, src);
+
+               if (mode & MIGRATE_DMA)
+                       rc = copy_page_dma(dst, src, 1);
+
+               if (rc)
+                       copy_highpage(dst, src);
 
                i++;
                dst = mem_map_next(dst, dst_base, i);
@@ -582,7 +588,7 @@ static void copy_huge_page(struct page *dst, struct page 
*src,
                nr_pages = pages_per_huge_page(h);
 
                if (unlikely(nr_pages > MAX_ORDER_NR_PAGES)) {
-                       __copy_gigantic_page(dst, src, nr_pages);
+                       __copy_gigantic_page(dst, src, nr_pages, mode);
                        return;
                }
        } else {
@@ -597,6 +603,8 @@ static void copy_huge_page(struct page *dst, struct page 
*src,
 
        if (mode & MIGRATE_MT)
                rc = copy_page_multithread(dst, src, nr_pages);
+       else if (mode & MIGRATE_DMA)
+               rc = copy_page_dma(dst, src, nr_pages);
 
        if (rc)
                for (i = 0; i < nr_pages; i++) {
@@ -674,8 +682,9 @@ void migrate_page_copy(struct page *newpage, struct page 
*page,
 {
        if (PageHuge(page) || PageTransHuge(page))
                copy_huge_page(newpage, page, mode);
-       else
+       else {
                copy_highpage(newpage, page);
+       }
 
        migrate_page_states(newpage, page);
 }
@@ -1511,7 +1520,8 @@ static int store_status(int __user *status, int start, 
int value, int nr)
 }
 
 static int do_move_pages_to_node(struct mm_struct *mm,
-               struct list_head *pagelist, int node, bool migrate_mt)
+               struct list_head *pagelist, int node,
+               bool migrate_mt, bool migrate_dma)
 {
        int err;
 
@@ -1519,7 +1529,8 @@ static int do_move_pages_to_node(struct mm_struct *mm,
                return 0;
 
        err = migrate_pages(pagelist, alloc_new_node_page, NULL, node,
-                       MIGRATE_SYNC | (migrate_mt ? MIGRATE_MT : 
MIGRATE_SINGLETHREAD),
+                       MIGRATE_SYNC | (migrate_mt ? MIGRATE_MT : 
MIGRATE_SINGLETHREAD) |
+                       (migrate_dma ? MIGRATE_DMA : MIGRATE_SINGLETHREAD),
                        MR_SYSCALL);
        if (err)
                putback_movable_pages(pagelist);
@@ -1642,7 +1653,7 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t 
task_nodes,
                        start = i;
                } else if (node != current_node) {
                        err = do_move_pages_to_node(mm, &pagelist, current_node,
-                               flags & MPOL_MF_MOVE_MT);
+                               flags & MPOL_MF_MOVE_MT, flags & 
MPOL_MF_MOVE_DMA);
                        if (err)
                                goto out;
                        err = store_status(status, start, current_node, i - 
start);
@@ -1666,7 +1677,7 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t 
task_nodes,
                        goto out_flush;
 
                err = do_move_pages_to_node(mm, &pagelist, current_node,
-                               flags & MPOL_MF_MOVE_MT);
+                               flags & MPOL_MF_MOVE_MT, flags & 
MPOL_MF_MOVE_DMA);
                if (err)
                        goto out;
                if (i > start) {
@@ -1682,7 +1693,7 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t 
task_nodes,
 
        /* Make sure we do not overwrite the existing error */
        err1 = do_move_pages_to_node(mm, &pagelist, current_node,
-                               flags & MPOL_MF_MOVE_MT);
+                               flags & MPOL_MF_MOVE_MT, flags & 
MPOL_MF_MOVE_DMA);
        if (!err1)
                err1 = store_status(status, start, current_node, i - start);
        if (!err)
@@ -1778,7 +1789,7 @@ static int kernel_move_pages(pid_t pid, unsigned long 
nr_pages,
        nodemask_t task_nodes;
 
        /* Check flags */
-       if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL|MPOL_MF_MOVE_MT))
+       if (flags & 
~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL|MPOL_MF_MOVE_MT|MPOL_MF_MOVE_DMA))
                return -EINVAL;
 
        if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
-- 
2.7.4

Reply via email to