[PATCH v7 36/61] mm: Convert add_to_swap_cache to XArray

2018-02-19 Thread Matthew Wilcox
From: Matthew Wilcox 

Combine __add_to_swap_cache and add_to_swap_cache into one function
since there is no more need to preload.

Signed-off-by: Matthew Wilcox 
---
 mm/swap_state.c | 93 ++---
 1 file changed, 29 insertions(+), 64 deletions(-)

diff --git a/mm/swap_state.c b/mm/swap_state.c
index 3f95e8fc4cb2..a57b5ad4c503 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -107,14 +107,15 @@ void show_swap_cache_info(void)
 }
 
 /*
- * __add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
+ * add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
  * but sets SwapCache flag and private instead of mapping and index.
  */
-int __add_to_swap_cache(struct page *page, swp_entry_t entry)
+int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp)
 {
-   int error, i, nr = hpage_nr_pages(page);
-   struct address_space *address_space;
+   struct address_space *address_space = swap_address_space(entry);
pgoff_t idx = swp_offset(entry);
+   XA_STATE(xas, _space->pages, idx);
+   unsigned long i, nr = 1UL << compound_order(page);
 
VM_BUG_ON_PAGE(!PageLocked(page), page);
VM_BUG_ON_PAGE(PageSwapCache(page), page);
@@ -123,50 +124,30 @@ int __add_to_swap_cache(struct page *page, swp_entry_t 
entry)
page_ref_add(page, nr);
SetPageSwapCache(page);
 
-   address_space = swap_address_space(entry);
-   xa_lock_irq(_space->pages);
-   for (i = 0; i < nr; i++) {
-   set_page_private(page + i, entry.val + i);
-   error = radix_tree_insert(_space->pages,
- idx + i, page + i);
-   if (unlikely(error))
-   break;
-   }
-   if (likely(!error)) {
+   do {
+   xas_lock_irq();
+   xas_create_range(, idx + nr - 1);
+   if (xas_error())
+   goto unlock;
+   for (i = 0; i < nr; i++) {
+   VM_BUG_ON_PAGE(xas.xa_index != idx + i, page);
+   set_page_private(page + i, entry.val + i);
+   xas_store(, page + i);
+   xas_next();
+   }
address_space->nrpages += nr;
__mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr);
ADD_CACHE_INFO(add_total, nr);
-   } else {
-   /*
-* Only the context which have set SWAP_HAS_CACHE flag
-* would call add_to_swap_cache().
-* So add_to_swap_cache() doesn't returns -EEXIST.
-*/
-   VM_BUG_ON(error == -EEXIST);
-   set_page_private(page + i, 0UL);
-   while (i--) {
-   radix_tree_delete(_space->pages, idx + i);
-   set_page_private(page + i, 0UL);
-   }
-   ClearPageSwapCache(page);
-   page_ref_sub(page, nr);
-   }
-   xa_unlock_irq(_space->pages);
+unlock:
+   xas_unlock_irq();
+   } while (xas_nomem(, gfp));
 
-   return error;
-}
-
-
-int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
-{
-   int error;
+   if (!xas_error())
+   return 0;
 
-   error = radix_tree_maybe_preload_order(gfp_mask, compound_order(page));
-   if (!error) {
-   error = __add_to_swap_cache(page, entry);
-   radix_tree_preload_end();
-   }
-   return error;
+   ClearPageSwapCache(page);
+   page_ref_sub(page, nr);
+   return xas_error();
 }
 
 /*
@@ -220,7 +201,7 @@ int add_to_swap(struct page *page)
goto fail;
 
/*
-* Radix-tree node allocations from PF_MEMALLOC contexts could
+* XArray node allocations from PF_MEMALLOC contexts could
 * completely exhaust the page allocator. __GFP_NOMEMALLOC
 * stops emergency reserves from being allocated.
 *
@@ -232,7 +213,6 @@ int add_to_swap(struct page *page)
 */
err = add_to_swap_cache(page, entry,
__GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN);
-   /* -ENOMEM radix-tree allocation failure */
if (err)
/*
 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
@@ -400,19 +380,11 @@ struct page *__read_swap_cache_async(swp_entry_t entry, 
gfp_t gfp_mask,
break;  /* Out of memory */
}
 
-   /*
-* call radix_tree_preload() while we can wait.
-*/
-   err = radix_tree_maybe_preload(gfp_mask & GFP_KERNEL);
-   if (err)
-   break;
-
/*
 * Swap entry may have been freed since our caller observed it.
 */
   

[PATCH v7 36/61] mm: Convert add_to_swap_cache to XArray

2018-02-19 Thread Matthew Wilcox
From: Matthew Wilcox 

Combine __add_to_swap_cache and add_to_swap_cache into one function
since there is no more need to preload.

Signed-off-by: Matthew Wilcox 
---
 mm/swap_state.c | 93 ++---
 1 file changed, 29 insertions(+), 64 deletions(-)

diff --git a/mm/swap_state.c b/mm/swap_state.c
index 3f95e8fc4cb2..a57b5ad4c503 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -107,14 +107,15 @@ void show_swap_cache_info(void)
 }
 
 /*
- * __add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
+ * add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
  * but sets SwapCache flag and private instead of mapping and index.
  */
-int __add_to_swap_cache(struct page *page, swp_entry_t entry)
+int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp)
 {
-   int error, i, nr = hpage_nr_pages(page);
-   struct address_space *address_space;
+   struct address_space *address_space = swap_address_space(entry);
pgoff_t idx = swp_offset(entry);
+   XA_STATE(xas, _space->pages, idx);
+   unsigned long i, nr = 1UL << compound_order(page);
 
VM_BUG_ON_PAGE(!PageLocked(page), page);
VM_BUG_ON_PAGE(PageSwapCache(page), page);
@@ -123,50 +124,30 @@ int __add_to_swap_cache(struct page *page, swp_entry_t 
entry)
page_ref_add(page, nr);
SetPageSwapCache(page);
 
-   address_space = swap_address_space(entry);
-   xa_lock_irq(_space->pages);
-   for (i = 0; i < nr; i++) {
-   set_page_private(page + i, entry.val + i);
-   error = radix_tree_insert(_space->pages,
- idx + i, page + i);
-   if (unlikely(error))
-   break;
-   }
-   if (likely(!error)) {
+   do {
+   xas_lock_irq();
+   xas_create_range(, idx + nr - 1);
+   if (xas_error())
+   goto unlock;
+   for (i = 0; i < nr; i++) {
+   VM_BUG_ON_PAGE(xas.xa_index != idx + i, page);
+   set_page_private(page + i, entry.val + i);
+   xas_store(, page + i);
+   xas_next();
+   }
address_space->nrpages += nr;
__mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr);
ADD_CACHE_INFO(add_total, nr);
-   } else {
-   /*
-* Only the context which have set SWAP_HAS_CACHE flag
-* would call add_to_swap_cache().
-* So add_to_swap_cache() doesn't returns -EEXIST.
-*/
-   VM_BUG_ON(error == -EEXIST);
-   set_page_private(page + i, 0UL);
-   while (i--) {
-   radix_tree_delete(_space->pages, idx + i);
-   set_page_private(page + i, 0UL);
-   }
-   ClearPageSwapCache(page);
-   page_ref_sub(page, nr);
-   }
-   xa_unlock_irq(_space->pages);
+unlock:
+   xas_unlock_irq();
+   } while (xas_nomem(, gfp));
 
-   return error;
-}
-
-
-int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
-{
-   int error;
+   if (!xas_error())
+   return 0;
 
-   error = radix_tree_maybe_preload_order(gfp_mask, compound_order(page));
-   if (!error) {
-   error = __add_to_swap_cache(page, entry);
-   radix_tree_preload_end();
-   }
-   return error;
+   ClearPageSwapCache(page);
+   page_ref_sub(page, nr);
+   return xas_error();
 }
 
 /*
@@ -220,7 +201,7 @@ int add_to_swap(struct page *page)
goto fail;
 
/*
-* Radix-tree node allocations from PF_MEMALLOC contexts could
+* XArray node allocations from PF_MEMALLOC contexts could
 * completely exhaust the page allocator. __GFP_NOMEMALLOC
 * stops emergency reserves from being allocated.
 *
@@ -232,7 +213,6 @@ int add_to_swap(struct page *page)
 */
err = add_to_swap_cache(page, entry,
__GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN);
-   /* -ENOMEM radix-tree allocation failure */
if (err)
/*
 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
@@ -400,19 +380,11 @@ struct page *__read_swap_cache_async(swp_entry_t entry, 
gfp_t gfp_mask,
break;  /* Out of memory */
}
 
-   /*
-* call radix_tree_preload() while we can wait.
-*/
-   err = radix_tree_maybe_preload(gfp_mask & GFP_KERNEL);
-   if (err)
-   break;
-
/*
 * Swap entry may have been freed since our caller observed it.
 */
err = swapcache_prepare(entry);