Re: [PATCH 1/9] mm: infrastructure for page fault page caching

2018-09-27 Thread Matthew Wilcox
On Wed, Sep 26, 2018 at 05:08:48PM -0400, Josef Bacik wrote:
> We want to be able to cache the result of a previous loop of a page
> fault in the case that we use VM_FAULT_RETRY, so introduce
> handle_mm_fault_cacheable that will take a struct vm_fault directly, add
> a ->cached_page field to vm_fault, and add helpers to init/cleanup the
> struct vm_fault.
> 
> I've converted x86, other arch's can follow suit if they so wish, it's
> relatively straightforward.

Here's what I did back in January ... feel free to steal any of it if you
like it better.


diff --git a/mm/memory.c b/mm/memory.c
index 5eb3d2524bdc..403934297a3d 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3977,36 +3977,28 @@ static int handle_pte_fault(struct vm_fault *vmf)
  * The mmap_sem may have been released depending on flags and our
  * return value.  See filemap_fault() and __lock_page_or_retry().
  */
-static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
-   unsigned int flags)
+static int __handle_mm_fault(struct vm_fault *vmf)
 {
-   struct vm_fault vmf = {
-   .vma = vma,
-   .address = address & PAGE_MASK,
-   .flags = flags,
-   .pgoff = linear_page_index(vma, address),
-   .gfp_mask = __get_fault_gfp_mask(vma),
-   };
-   unsigned int dirty = flags & FAULT_FLAG_WRITE;
-   struct mm_struct *mm = vma->vm_mm;
+   unsigned int dirty = vmf->flags & FAULT_FLAG_WRITE;
+   struct mm_struct *mm = vmf->vma->vm_mm;
pgd_t *pgd;
p4d_t *p4d;
int ret;
 
-   pgd = pgd_offset(mm, address);
-   p4d = p4d_alloc(mm, pgd, address);
+   pgd = pgd_offset(mm, vmf->address);
+   p4d = p4d_alloc(mm, pgd, vmf->address);
if (!p4d)
return VM_FAULT_OOM;
 
-   vmf.pud = pud_alloc(mm, p4d, address);
-   if (!vmf.pud)
+   vmf->pud = pud_alloc(mm, p4d, vmf->address);
+   if (!vmf->pud)
return VM_FAULT_OOM;
-   if (pud_none(*vmf.pud) && transparent_hugepage_enabled(vma)) {
-   ret = create_huge_pud();
+   if (pud_none(*vmf->pud) && transparent_hugepage_enabled(vmf->vma)) {
+   ret = create_huge_pud(vmf);
if (!(ret & VM_FAULT_FALLBACK))
return ret;
} else {
-   pud_t orig_pud = *vmf.pud;
+   pud_t orig_pud = *vmf->pud;
 
barrier();
if (pud_trans_huge(orig_pud) || pud_devmap(orig_pud)) {
@@ -4014,50 +4006,51 @@ static int __handle_mm_fault(struct vm_area_struct 
*vma, unsigned long address,
/* NUMA case for anonymous PUDs would go here */
 
if (dirty && !pud_access_permitted(orig_pud, WRITE)) {
-   ret = wp_huge_pud(, orig_pud);
+   ret = wp_huge_pud(vmf, orig_pud);
if (!(ret & VM_FAULT_FALLBACK))
return ret;
} else {
-   huge_pud_set_accessed(, orig_pud);
+   huge_pud_set_accessed(vmf, orig_pud);
return 0;
}
}
}
 
-   vmf.pmd = pmd_alloc(mm, vmf.pud, address);
-   if (!vmf.pmd)
+   vmf->pmd = pmd_alloc(mm, vmf->pud, vmf->address);
+   if (!vmf->pmd)
return VM_FAULT_OOM;
-   if (pmd_none(*vmf.pmd) && transparent_hugepage_enabled(vma)) {
-   ret = create_huge_pmd();
+   if (pmd_none(*vmf->pmd) && transparent_hugepage_enabled(vmf->vma)) {
+   ret = create_huge_pmd(vmf);
if (!(ret & VM_FAULT_FALLBACK))
return ret;
} else {
-   pmd_t orig_pmd = *vmf.pmd;
+   pmd_t orig_pmd = *vmf->pmd;
 
barrier();
if (unlikely(is_swap_pmd(orig_pmd))) {
VM_BUG_ON(thp_migration_supported() &&
  !is_pmd_migration_entry(orig_pmd));
if (is_pmd_migration_entry(orig_pmd))
-   pmd_migration_entry_wait(mm, vmf.pmd);
+   pmd_migration_entry_wait(mm, vmf->pmd);
return 0;
}
if (pmd_trans_huge(orig_pmd) || pmd_devmap(orig_pmd)) {
-   if (pmd_protnone(orig_pmd) && vma_is_accessible(vma))
-   return do_huge_pmd_numa_page(, orig_pmd);
+   if (pmd_protnone(orig_pmd) &&
+   vma_is_accessible(vmf->vma))
+   return do_huge_pmd_numa_page(vmf, orig_pmd);
 
if (dirty && !pmd_access_permitted(orig_pmd, WRITE)) {
-   ret = wp_huge_pmd(, orig_pmd);
+   

Re: [RFC PATCH v2 00/12] get rid of GFP_ZONE_TABLE/BAD

2018-05-25 Thread Matthew Wilcox
On Thu, May 24, 2018 at 05:29:43PM +0200, Michal Hocko wrote:
> > ie if we had more,
> > could we solve our pain by making them more generic?
> 
> Well, if you have more you will consume more bits in the struct pages,
> right?

Not necessarily ... the zone number is stored in the struct page
currently, so either two or three bits are used right now.  In my
proposal, one can infer the zone of a page from its PFN, except for
ZONE_MOVABLE.  So we could trim down to just one bit per struct page
for 32-bit machines while using 3 bits on 64-bit machines, where there
is plenty of space.

> > it more-or-less sucks that the devices with 28-bit DMA limits are forced
> > to allocate from the low 16MB when they're perfectly capable of using the
> > low 256MB.
> 
> Do we actually care all that much about those? If yes then we should
> probably follow the ZONE_DMA (x86) path and use a CMA region for them.
> I mean most devices should be good with very limited addressability or
> below 4G, no?

Sure.  One other thing I meant to mention was the media devices
(TV capture cards and so on) which want a vmalloc_32() allocation.
On 32-bit machines right now, we allocate from LOWMEM, when we really
should be allocating from the 1GB-4GB region.  32-bit machines generally
don't have a ZONE_DMA32 today.
--
To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [RFC PATCH v2 00/12] get rid of GFP_ZONE_TABLE/BAD

2018-05-24 Thread Matthew Wilcox
On Thu, May 24, 2018 at 02:23:23PM +0200, Michal Hocko wrote:
> > If we had eight ZONEs, we could offer:
> 
> No, please no more zones. What we have is quite a maint. burden on its
> own. Ideally we should only have lowmem, highmem and special/device
> zones for directly kernel accessible memory, the one that the kernel
> cannot or must not use and completely special memory managed out of
> the page allocator. All the remaining constrains should better be
> implemented on top.

I believe you when you say that they're a maintenance pain.  Is that
maintenance pain because they're so specialised?  ie if we had more,
could we solve our pain by making them more generic?

> > ZONE_16M// 24 bit
> > ZONE_256M   // 28 bit
> > ZONE_LOWMEM // CONFIG_32BIT only
> > ZONE_4G // 32 bit
> > ZONE_64G// 36 bit
> > ZONE_1T // 40 bit
> > ZONE_ALL// everything larger
> > ZONE_MOVABLE// movable allocations; no physical address guarantees
> > 
> > #ifdef CONFIG_64BIT
> > #define ZONE_NORMAL ZONE_ALL
> > #else
> > #define ZONE_NORMAL ZONE_LOWMEM
> > #endif
> > 
> > This would cover most driver DMA mask allocations; we could tweak the
> > offered zones based on analysis of what people need.
> 
> But those already do have aproper API, IIUC. So do we really need to
> make our GFP_*/Zone API more complicated than it already is?

I don't want to change the driver API (setting the DMA mask, etc),
but we don't actually have a good API to the page allocator for the
implementation of dma_alloc_foo() to request pages.  More or less,
architectures do:

if (mask < 4GB)
alloc_page(GFP_DMA)
else if (mask < 64EB)
alloc_page(GFP_DMA32)
else
alloc_page(GFP_HIGHMEM)

it more-or-less sucks that the devices with 28-bit DMA limits are forced
to allocate from the low 16MB when they're perfectly capable of using the
low 256MB.  Sure, my proposal doesn't help 27 or 26 bit DMA mask devices,
but those are pretty rare.

I'm sure you don't need reminding what a mess vmalloc_32 is, and the
implementation of saa7146_vmalloc_build_pgtable() just hurts.

> > #define GFP_HIGHUSER(GFP_USER | ZONE_ALL)
> > #define GFP_HIGHUSER_MOVABLE(GFP_USER | ZONE_MOVABLE)
> > 
> > One other thing I want to see is that fallback from zones happens from
> > highest to lowest normally (ie if you fail to allocate in 1T, then you
> > try to allocate from 64G), but movable allocations hapen from lowest
> > to highest.  So ZONE_16M ends up full of page cache pages which are
> > readily evictable for the rare occasions when we need to allocate memory
> > below 16MB.
> > 
> > I'm sure there are lots of good reasons why this won't work, which is
> > why I've been hesitant to propose it before now.
> 
> I am worried you are playing with a can of worms...

Yes.  Me too.
--
To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [RFC PATCH v2 00/12] get rid of GFP_ZONE_TABLE/BAD

2018-05-23 Thread Matthew Wilcox
On Tue, May 22, 2018 at 08:37:28PM +0200, Michal Hocko wrote:
> So why is this any better than the current code. Sure I am not a great
> fan of GFP_ZONE_TABLE because of how it is incomprehensible but this
> doesn't look too much better, yet we are losing a check for incompatible
> gfp flags. The diffstat looks really sound but then you just look and
> see that the large part is the comment that at least explained the gfp
> zone modifiers somehow and the debugging code. So what is the selling
> point?

I have a plan, but it's not exactly fully-formed yet.

One of the big problems we have today is that we have a lot of users
who have constraints on the physical memory they want to allocate,
but we have very limited abilities to provide them with what they're
asking for.  The various different ZONEs have different meanings on
different architectures and are generally a mess.

If we had eight ZONEs, we could offer:

ZONE_16M// 24 bit
ZONE_256M   // 28 bit
ZONE_LOWMEM // CONFIG_32BIT only
ZONE_4G // 32 bit
ZONE_64G// 36 bit
ZONE_1T // 40 bit
ZONE_ALL// everything larger
ZONE_MOVABLE// movable allocations; no physical address guarantees

#ifdef CONFIG_64BIT
#define ZONE_NORMAL ZONE_ALL
#else
#define ZONE_NORMAL ZONE_LOWMEM
#endif

This would cover most driver DMA mask allocations; we could tweak the
offered zones based on analysis of what people need.

#define GFP_HIGHUSER(GFP_USER | ZONE_ALL)
#define GFP_HIGHUSER_MOVABLE(GFP_USER | ZONE_MOVABLE)

One other thing I want to see is that fallback from zones happens from
highest to lowest normally (ie if you fail to allocate in 1T, then you
try to allocate from 64G), but movable allocations hapen from lowest
to highest.  So ZONE_16M ends up full of page cache pages which are
readily evictable for the rare occasions when we need to allocate memory
below 16MB.

I'm sure there are lots of good reasons why this won't work, which is
why I've been hesitant to propose it before now.
--
To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [RFC PATCH v2 10/12] mm/zsmalloc: update usage of address zone modifiers

2018-05-22 Thread Matthew Wilcox
On Mon, May 21, 2018 at 11:20:31PM +0800, Huaisheng Ye wrote:
> @@ -343,7 +343,7 @@ static void destroy_cache(struct zs_pool *pool)
>  static unsigned long cache_alloc_handle(struct zs_pool *pool, gfp_t gfp)
>  {
>   return (unsigned long)kmem_cache_alloc(pool->handle_cachep,
> - gfp & ~(__GFP_HIGHMEM|__GFP_MOVABLE));
> + gfp & ~__GFP_ZONE_MOVABLE);
>  }

This should be & ~GFP_ZONEMASK

Actually, we should probably have a function to clear those bits rather
than have every driver manipulating the gfp mask like this.  Maybe

#define gfp_normal(gfp) ((gfp) & ~GFP_ZONEMASK)

return (unsigned long)kmem_cache_alloc(pool->handle_cachep,
-   gfp & ~(__GFP_HIGHMEM|__GFP_MOVABLE));
+   gfp_normal(gfp));

--
To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH] [RFC] bcachefs: SIX locks (shared/intent/exclusive)

2018-05-21 Thread Matthew Wilcox
On Mon, May 21, 2018 at 10:19:51PM -0400, Kent Overstreet wrote:
> New lock for bcachefs, like read/write locks but with a third state,
> intent.
> 
> Intent locks conflict with each other, but not with read locks; taking a
> write lock requires first holding an intent lock.

Can you put something in the description that these are sleeping locks
(like mutexes), not spinning locks (like spinlocks)?  (Yeah, I know
there's the opportunistic spin, but conceptually, they're sleeping locks).

Some other things I'd like documented:

 - Any number of readers can hold the lock
 - Once one thread acquires the lock for intent, further intent acquisitions
   will block.  May new readers acquire the lock?
 - You cannot acquire the lock for write directly, you must acquire it for
   intent first, then upgrade to write.
 - Can you downgrade to read from intent, or downgrade from write back to
   intent?
 - Once you are trying to upgrade from intent to write, are new read
   acquisitions blocked? (can readers starve writers?)
 - When you drop the lock as a writer, do we prefer reader acquisitions
   over intent acquisitions?  That is, if we have a queue of RRIRIRIR,
   and we drop the lock, does the queue look like II or IRIR?

--
To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH 01/10] mm: pagecache add lock

2018-05-18 Thread Matthew Wilcox
On Fri, May 18, 2018 at 03:49:00AM -0400, Kent Overstreet wrote:
> Add a per address space lock around adding pages to the pagecache - making it
> possible for fallocate INSERT_RANGE/COLLAPSE_RANGE to work correctly, and also
> hopefully making truncate and dio a bit saner.

(moving this section here from the overall description so I can reply
to it in one place)

>  * pagecache add lock
> 
> This is the only one that touches existing code in nontrivial ways.
> The problem it's solving is that there is no existing general mechanism
> for shooting down pages in the page and keeping them removed, which is a
> real problem if you're doing anything that modifies file data and isn't
> buffered writes.
> 
> Historically, the only problematic case has been direct IO, and people
> have been willing to say "well, if you mix buffered and direct IO you
> get what you deserve", and that's probably not unreasonable. But now we
> have fallocate insert range and collapse range, and those are broken in
> ways I frankly don't want to think about if they can't ensure consistency
> with the page cache.

ext4 manages collapse-vs-pagefault with the ext4-specific i_mmap_sem.
You may get pushback on the grounds that this ought to be a
filesystem-specific lock rather than one embedded in the generic inode.

> Also, the mechanism truncate uses (i_size and sacrificing a goat) has
> historically been rather fragile, IMO it might be a good think if we
> switched it to a more general rigorous mechanism.
> 
> I need this solved for bcachefs because without this mechanism, the page
> cache inconsistencies lead to various assertions popping (primarily when
> we didn't think we need to get a disk reservation going by page cache
> state, but then do the actual write and disk space accounting says oops,
> we did need one). And having to reason about what can happen without
> a locking mechanism for this is not something I care to spend brain
> cycles on.
> 
> That said, my patch is kind of ugly, and it requires filesystem changes
> for other filesystems to take advantage of it. And unfortunately, since
> one of the code paths that needs locking is readahead, I don't see any
> realistic way of implementing the locking within just bcachefs code.
> 
> So I'm hoping someone has an idea for something cleaner (I think I recall
> Matthew Wilcox saying he had an idea for how to use xarray to solve this),
> but if not I'll polish up my pagecache add lock patch and see what I can
> do to make it less ugly, and hopefully other people find it palatable
> or at least useful.

My idea with the XArray is that we have a number of reserved entries which
we can use as blocking entries.  I was originally planning on making this
an XArray feature, but I now believe it's a page-cache-special feature.
We can always revisit that decision if it turns out to be useful to
another user.

API:

int filemap_block_range(struct address_space *mapping, loff_t start,
loff_t end);
void filemap_remove_block(struct address_space *mapping, loff_t start,
loff_t end);

 - After removing a block, the pagecache is empty between [start, end].
 - You have to treat the block as a single entity; don't unblock only
   a subrange of the range you originally blocked.
 - Lookups of a page within a blocked range return NULL.
 - Attempts to add a page to a blocked range sleep on one of the
   page_wait_table queues.
 - Attempts to block a blocked range will also sleep on one of the
   page_wait_table queues.  Is this restriction acceptable for your use
   case?  It's clearly not a problem for fallocate insert/collapse.  It
   would only be a problem for Direct I/O if people are doing subpage
   directio from within the same page.  I think that's rare enough to
   not be a problem (but please tell me if I'm wrong!)

--
To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: vm_fault_t conversion, for real

2018-05-16 Thread Matthew Wilcox
On Wed, May 16, 2018 at 03:03:09PM +0200, Christoph Hellwig wrote:
> On Wed, May 16, 2018 at 04:23:47AM -0700, Matthew Wilcox wrote:
> > On Wed, May 16, 2018 at 07:43:34AM +0200, Christoph Hellwig wrote:
> > > this series tries to actually turn vm_fault_t into a type that can be
> > > typechecked and checks the fallout instead of sprinkling random
> > > annotations without context.
> > 
> > Yes, why should we have small tasks that newcomers can do when the mighty
> > Christoph Hellwig can swoop in and take over from them?  Seriously,
> > can't your talents find a better use than this?
> 
> I've spent less time on this than trying to argue to you and Souptick
> that these changes are only to get ignored and yelled at as an
> "asshole maintainer".  So yes, I could have done more productive things
> if you hadn't forced this escalation.

Perhaps you should try being less of an arsehole if you don't want to
get yelled at?  I don't mind when you're an arsehole towards me, but I
do mind when you're an arsehole towards newcomers.  How are we supposed
to attract and retain new maintainers when you're so rude?

--
To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH 10/14] vgem: separate errno from VM_FAULT_* values

2018-05-16 Thread Matthew Wilcox
On Wed, May 16, 2018 at 03:01:59PM +0200, Christoph Hellwig wrote:
> On Wed, May 16, 2018 at 11:53:03AM +0200, Daniel Vetter wrote:
> > Reviewed-by: Daniel Vetter 
> > 
> > Want me to merge this through drm-misc or plan to pick it up yourself?
> 
> For now I just want a honest discussion if people really actually
> want the vm_fault_t change with the whole picture in place.

That discussion already happened on the -mm mailing list.  And again
at LSFMM.  Both times the answer was yes.
--
To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH 14/14] mm: turn on vm_fault_t type checking

2018-05-16 Thread Matthew Wilcox
On Wed, May 16, 2018 at 07:43:48AM +0200, Christoph Hellwig wrote:
> Switch vm_fault_t to point to an unsigned int with __bіtwise annotations.
> This both catches any old ->fault or ->page_mkwrite instance with plain
> compiler type checking, as well as finding more intricate problems with
> sparse.

Come on, Christoph; you know better than this.  This patch is completely
unreviewable.  Split it into one patch per maintainer tree, and in any
event, the patch to convert vm_fault_t to an unsigned int should be
separated from all the trivial conversions.
--
To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: vm_fault_t conversion, for real

2018-05-16 Thread Matthew Wilcox
On Wed, May 16, 2018 at 07:43:34AM +0200, Christoph Hellwig wrote:
> this series tries to actually turn vm_fault_t into a type that can be
> typechecked and checks the fallout instead of sprinkling random
> annotations without context.

Yes, why should we have small tasks that newcomers can do when the mighty
Christoph Hellwig can swoop in and take over from them?  Seriously,
can't your talents find a better use than this?

> The first one fixes a real bug in orangefs, the second and third fix
> mismatched existing vm_fault_t annotations on the same function, the
> fourth removes an unused export that was in the chain.  The remainder
> until the last one do some not quite trivial conversions, and the last
> one does the trivial mass annotation and flips vm_fault_t to a __bitwise
> unsigned int - the unsigned means we also get plain compiler type
> checking for the new ->fault signature even without sparse.

Yes, that was (part of) the eventual goal.  Well done.  Would you like
a biscuit?

--
To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH 01/14] orangefs: don't return errno values from ->fault

2018-05-16 Thread Matthew Wilcox
On Wed, May 16, 2018 at 07:43:35AM +0200, Christoph Hellwig wrote:
> + rc = orangefs_inode_getattr(file->f_mapping->host, 0, 1, STATX_SIZE);
>   if (rc) {
>   gossip_err("%s: orangefs_inode_getattr failed, "
>   "rc:%d:.\n", __func__, rc);
> - return rc;
> + return VM_FAULT_SIGBUS;

Nope.  orangefs_inode_getattr can return -ENOMEM.

>   }
>   return filemap_fault(vmf);
>  }
> -- 
> 2.17.0
> 
--
To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v8 00/63] XArray v8

2018-03-06 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

This patchset is, I believe, appropriate for merging for 4.17.
It contains the XArray implementation, to eventually replace the radix
tree, and converts the page cache to use it.

This conversion keeps the radix tree and XArray data structures in sync
at all times.  That allows us to convert the page cache one function at
a time and should allow for easier bisection.  Other than renaming some
elements of the structures, the data structures are fundamentally
unchanged; a radix tree walk and an XArray walk will touch the same
number of cachelines.  I have changes planned to the XArray data
structure, but those will happen in future patches.

Improvements the XArray has over the radix tree:

 - The radix tree provides operations like other trees do; 'insert' and
   'delete'.  But what most users really want is an automatically resizing
   array, and so it makes more sense to give users an API that is like
   an array -- 'load' and 'store'.  We still have an 'insert' operation
   for users that really want that semantic.
 - The XArray considers locking as part of its API.  This simplifies a lot
   of users who formerly had to manage their own locking just for the
   radix tree.  It also improves code generation as we can now tell RCU
   that we're holding a lock and it doesn't need to generate as much
   fencing code.  The other advantage is that tree nodes can be moved
   (not yet implemented).
 - GFP flags are now parameters to calls which may need to allocate
   memory.  The radix tree forced users to decide what the allocation
   flags would be at creation time.  It's much clearer to specify them
   at allocation time.
 - Memory is not preloaded; we don't tie up dozens of pages on the
   off chance that the slab allocator fails.  Instead, we drop the lock,
   allocate a new node and retry the operation.  We have to convert all
   the radix tree, IDA and IDR preload users before we can realise this
   benefit, but I have not yet found a user which cannot be converted.
 - The XArray provides a cmpxchg operation.  The radix tree forces users
   to roll their own (and at least four have).
 - Iterators take a 'max' parameter.  That simplifies many users and
   will reduce the amount of iteration done.
 - Iteration can proceed backwards.  We only have one user for this, but
   since it's called as part of the pagefault readahead algorithm, that
   seemed worth mentioning.
 - RCU-protected pointers are not exposed as part of the API.  There are
   some fun bugs where the page cache forgets to use rcu_dereference()
   in the current codebase.
 - Value entries gain an extra bit compared to radix tree exceptional
   entries.  That gives us the extra bit we need to put huge page swap
   entries in the page cache.
 - Some iterators now take a 'filter' argument instead of having
   separate iterators for tagged/untagged iterations.

The page cache is improved by this:
 - Shorter, easier to read code
 - More efficient iterations
 - Reduction in size of struct address_space
 - Fewer walks from the top of the data structure; the XArray API
   encourages staying at the leaf node and conducting operations there.

Changes since v7:
 - Added acks from Jeff Layton (thanks!)
 - Renamed the address_space ->pages to ->i_pages
 - Used GFP_ZONEMASK instead of the more obtuse shifting by 4 (Jeff Layton)
 - Realised that page_cache_range_empty() and filemap_range_has_page()
   were essentially the same function, so redid that pair of patches
 - A few checkpatch fixes
 - Added an SPDX tag to a missed file
 - Rebased on next-20180306
   - memfd moved out of shmem
   - nds32 needed its flush_dcache_mmap_lock fixed
   - mac80211_hwsim had added a use of IDA_INIT
 - Improved some documentation and commit messages
 - Split the rearrangement of struct address_space into its own patch (Kirill)
 - address_space documentation improvements had somehow worked their way
   into an unrelated patch; move them into the rearrangement patch.
 - Removed chunks of radix tree functionality that are not used any more.

Matthew Wilcox (63):
  mac80211_hwsim: Use DEFINE_IDA
  radix tree: Use GFP_ZONEMASK bits of gfp_t for flags
  arm64: Turn flush_dcache_mmap_lock into a no-op
  unicore32: Turn flush_dcache_mmap_lock into a no-op
  Export __set_page_dirty
  btrfs: Use filemap_range_has_page()
  xfs: Rename xa_ elements to ail_
  fscache: Use appropriate radix tree accessors
  xarray: Add the xa_lock to the radix_tree_root
  page cache: Use xa_lock
  xarray: Replace exceptional entries
  xarray: Change definition of sibling entries
  xarray: Add definition of struct xarray
  xarray: Define struct xa_node
  xarray: Add documentation
  xarray: Add xa_load
  xarray: Add xa_get_tag, xa_set_tag and xa_clear_tag
  xarray: Add xa_store
  xarray: Add xa_cmpxchg and xa_insert
  xarray: Add xa_for_each
  xarray: Add xa_extract
  xarray: Add xa_destroy
  xarray: Add xas_next and xas_prev
  xarray: Add xas_

[PATCH v8 03/63] arm64: Turn flush_dcache_mmap_lock into a no-op

2018-03-06 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

ARM64 doesn't walk the VMA tree in its flush_dcache_page()
implementation, so has no need to take the tree_lock.

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
Reviewed-by: Will Deacon <will.dea...@arm.com>
---
 arch/arm64/include/asm/cacheflush.h | 6 ++
 1 file changed, 2 insertions(+), 4 deletions(-)

diff --git a/arch/arm64/include/asm/cacheflush.h 
b/arch/arm64/include/asm/cacheflush.h
index bef9f418f089..550b0abea953 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -137,10 +137,8 @@ static inline void __flush_icache_all(void)
dsb(ish);
 }
 
-#define flush_dcache_mmap_lock(mapping) \
-   spin_lock_irq(&(mapping)->tree_lock)
-#define flush_dcache_mmap_unlock(mapping) \
-   spin_unlock_irq(&(mapping)->tree_lock)
+#define flush_dcache_mmap_lock(mapping)do { } while (0)
+#define flush_dcache_mmap_unlock(mapping)  do { } while (0)
 
 /*
  * We don't appear to need to do anything here.  In fact, if we did, we'd
-- 
2.16.1

--
To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v8 05/63] Export __set_page_dirty

2018-03-06 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

XFS currently contains a copy-and-paste of __set_page_dirty().  Export
it from buffer.c instead.

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
Acked-by: Jeff Layton <jlay...@kernel.org>
---
 fs/buffer.c|  3 ++-
 fs/xfs/xfs_aops.c  | 15 ++-
 include/linux/mm.h |  1 +
 3 files changed, 5 insertions(+), 14 deletions(-)

diff --git a/fs/buffer.c b/fs/buffer.c
index 3b2b415f1fcd..a17d47b55de1 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -594,7 +594,7 @@ EXPORT_SYMBOL(mark_buffer_dirty_inode);
  *
  * The caller must hold lock_page_memcg().
  */
-static void __set_page_dirty(struct page *page, struct address_space *mapping,
+void __set_page_dirty(struct page *page, struct address_space *mapping,
 int warn)
 {
unsigned long flags;
@@ -608,6 +608,7 @@ static void __set_page_dirty(struct page *page, struct 
address_space *mapping,
}
spin_unlock_irqrestore(>tree_lock, flags);
 }
+EXPORT_SYMBOL_GPL(__set_page_dirty);
 
 /*
  * Add a page to the dirty page list.
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 9c6a830da0ee..31f2c4895a46 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -1472,19 +1472,8 @@ xfs_vm_set_page_dirty(
newly_dirty = !TestSetPageDirty(page);
spin_unlock(>private_lock);
 
-   if (newly_dirty) {
-   /* sigh - __set_page_dirty() is static, so copy it here, too */
-   unsigned long flags;
-
-   spin_lock_irqsave(>tree_lock, flags);
-   if (page->mapping) {/* Race with truncate? */
-   WARN_ON_ONCE(!PageUptodate(page));
-   account_page_dirtied(page, mapping);
-   radix_tree_tag_set(>page_tree,
-   page_index(page), PAGECACHE_TAG_DIRTY);
-   }
-   spin_unlock_irqrestore(>tree_lock, flags);
-   }
+   if (newly_dirty)
+   __set_page_dirty(page, mapping, 1);
unlock_page_memcg(page);
if (newly_dirty)
__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 9f1270360983..8cf4714bfec8 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1454,6 +1454,7 @@ extern int try_to_release_page(struct page * page, gfp_t 
gfp_mask);
 extern void do_invalidatepage(struct page *page, unsigned int offset,
  unsigned int length);
 
+void __set_page_dirty(struct page *, struct address_space *, int warn);
 int __set_page_dirty_nobuffers(struct page *page);
 int __set_page_dirty_no_writeback(struct page *page);
 int redirty_page_for_writepage(struct writeback_control *wbc,
-- 
2.16.1

--
To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v8 04/63] unicore32: Turn flush_dcache_mmap_lock into a no-op

2018-03-06 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

Unicore doesn't walk the VMA tree in its flush_dcache_page()
implementation, so has no need to take the tree_lock.

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
---
 arch/unicore32/include/asm/cacheflush.h | 6 ++
 1 file changed, 2 insertions(+), 4 deletions(-)

diff --git a/arch/unicore32/include/asm/cacheflush.h 
b/arch/unicore32/include/asm/cacheflush.h
index a5e08e2d5d6d..1d9132b66039 100644
--- a/arch/unicore32/include/asm/cacheflush.h
+++ b/arch/unicore32/include/asm/cacheflush.h
@@ -170,10 +170,8 @@ extern void flush_cache_page(struct vm_area_struct *vma,
 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
 extern void flush_dcache_page(struct page *);
 
-#define flush_dcache_mmap_lock(mapping)\
-   spin_lock_irq(&(mapping)->tree_lock)
-#define flush_dcache_mmap_unlock(mapping)  \
-   spin_unlock_irq(&(mapping)->tree_lock)
+#define flush_dcache_mmap_lock(mapping)do { } while (0)
+#define flush_dcache_mmap_unlock(mapping)  do { } while (0)
 
 #define flush_icache_user_range(vma, page, addr, len)  \
flush_dcache_page(page)
-- 
2.16.1

--
To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v8 08/63] fscache: Use appropriate radix tree accessors

2018-03-06 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

Don't open-code accesses to data structure internals.

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
Reviewed-by: Jeff Layton <jlay...@redhat.com>
---
 fs/fscache/cookie.c | 2 +-
 fs/fscache/object.c | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
index ff84258132bb..e9054e0c1a49 100644
--- a/fs/fscache/cookie.c
+++ b/fs/fscache/cookie.c
@@ -608,7 +608,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie 
*cookie, bool retire)
/* Clear pointers back to the netfs */
cookie->netfs_data  = NULL;
cookie->def = NULL;
-   BUG_ON(cookie->stores.rnode);
+   BUG_ON(!radix_tree_empty(>stores));
 
if (cookie->parent) {
ASSERTCMP(atomic_read(>parent->usage), >, 0);
diff --git a/fs/fscache/object.c b/fs/fscache/object.c
index 7a182c87f378..aa0e71f02c33 100644
--- a/fs/fscache/object.c
+++ b/fs/fscache/object.c
@@ -956,7 +956,7 @@ static const struct fscache_state 
*_fscache_invalidate_object(struct fscache_obj
 * retire the object instead.
 */
if (!fscache_use_cookie(object)) {
-   ASSERT(object->cookie->stores.rnode == NULL);
+   ASSERT(radix_tree_empty(>cookie->stores));
set_bit(FSCACHE_OBJECT_RETIRED, >flags);
_leave(" [no cookie]");
return transit_to(KILL_OBJECT);
-- 
2.16.1

--
To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v8 06/63] btrfs: Use filemap_range_has_page()

2018-03-06 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

The current implementation of btrfs_page_exists_in_range() gives the
wrong answer if the workingset code has stored a shadow entry in the
page cache.  The filemap_range_has_page() function does not have this
problem, and it's shared code, so use it instead.

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
---
 fs/btrfs/btrfs_inode.h |  6 -
 fs/btrfs/inode.c   | 70 --
 2 files changed, 5 insertions(+), 71 deletions(-)

diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index f527e99c9f8d..078a53e01ece 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -364,6 +364,10 @@ static inline void btrfs_print_data_csum_error(struct 
btrfs_inode *inode,
logical_start, csum, csum_expected, mirror_num);
 }
 
-bool btrfs_page_exists_in_range(struct inode *inode, loff_t start, loff_t end);
+static inline bool btrfs_page_exists_in_range(struct inode *inode,
+   loff_t start, loff_t end)
+{
+   return filemap_range_has_page(inode->i_mapping, start, end);
+}
 
 #endif
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 1f5b93ecffca..3340de232944 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -7476,76 +7476,6 @@ noinline int can_nocow_extent(struct inode *inode, u64 
offset, u64 *len,
return ret;
 }
 
-bool btrfs_page_exists_in_range(struct inode *inode, loff_t start, loff_t end)
-{
-   struct radix_tree_root *root = >i_mapping->page_tree;
-   bool found = false;
-   void **pagep = NULL;
-   struct page *page = NULL;
-   unsigned long start_idx;
-   unsigned long end_idx;
-
-   start_idx = start >> PAGE_SHIFT;
-
-   /*
-* end is the last byte in the last page.  end == start is legal
-*/
-   end_idx = end >> PAGE_SHIFT;
-
-   rcu_read_lock();
-
-   /* Most of the code in this while loop is lifted from
-* find_get_page.  It's been modified to begin searching from a
-* page and return just the first page found in that range.  If the
-* found idx is less than or equal to the end idx then we know that
-* a page exists.  If no pages are found or if those pages are
-* outside of the range then we're fine (yay!) */
-   while (page == NULL &&
-  radix_tree_gang_lookup_slot(root, , NULL, start_idx, 1)) {
-   page = radix_tree_deref_slot(pagep);
-   if (unlikely(!page))
-   break;
-
-   if (radix_tree_exception(page)) {
-   if (radix_tree_deref_retry(page)) {
-   page = NULL;
-   continue;
-   }
-   /*
-* Otherwise, shmem/tmpfs must be storing a swap entry
-* here as an exceptional entry: so return it without
-* attempting to raise page count.
-*/
-   page = NULL;
-   break; /* TODO: Is this relevant for this use case? */
-   }
-
-   if (!page_cache_get_speculative(page)) {
-   page = NULL;
-   continue;
-   }
-
-   /*
-* Has the page moved?
-* This is part of the lockless pagecache protocol. See
-* include/linux/pagemap.h for details.
-*/
-   if (unlikely(page != *pagep)) {
-   put_page(page);
-   page = NULL;
-   }
-   }
-
-   if (page) {
-   if (page->index <= end_idx)
-   found = true;
-   put_page(page);
-   }
-
-   rcu_read_unlock();
-   return found;
-}
-
 static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
  struct extent_state **cached_state, int writing)
 {
-- 
2.16.1

--
To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v8 07/63] xfs: Rename xa_ elements to ail_

2018-03-06 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

This is a simple rename, except that xa_ail becomes ail_head.

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
Reviewed-by: Darrick J. Wong <darrick.w...@oracle.com>
---
 fs/xfs/xfs_buf_item.c|  10 ++--
 fs/xfs/xfs_dquot.c   |   4 +-
 fs/xfs/xfs_dquot_item.c  |  11 ++--
 fs/xfs/xfs_inode_item.c  |  22 +++
 fs/xfs/xfs_log.c |   6 +-
 fs/xfs/xfs_log_recover.c |  80 -
 fs/xfs/xfs_trans.c   |  18 +++---
 fs/xfs/xfs_trans_ail.c   | 152 +++
 fs/xfs/xfs_trans_buf.c   |   4 +-
 fs/xfs/xfs_trans_priv.h  |  42 ++---
 10 files changed, 175 insertions(+), 174 deletions(-)

diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index 270ddb4d2313..82ad270e390e 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -460,7 +460,7 @@ xfs_buf_item_unpin(
list_del_init(>b_li_list);
bp->b_iodone = NULL;
} else {
-   spin_lock(>xa_lock);
+   spin_lock(>ail_lock);
xfs_trans_ail_delete(ailp, lip, SHUTDOWN_LOG_IO_ERROR);
xfs_buf_item_relse(bp);
ASSERT(bp->b_log_item == NULL);
@@ -1057,12 +1057,12 @@ xfs_buf_do_callbacks_fail(
lip = list_first_entry(>b_li_list, struct xfs_log_item,
li_bio_list);
ailp = lip->li_ailp;
-   spin_lock(>xa_lock);
+   spin_lock(>ail_lock);
list_for_each_entry(lip, >b_li_list, li_bio_list) {
if (lip->li_ops->iop_error)
lip->li_ops->iop_error(lip, bp);
}
-   spin_unlock(>xa_lock);
+   spin_unlock(>ail_lock);
 }
 
 static bool
@@ -1226,7 +1226,7 @@ xfs_buf_iodone(
 *
 * Either way, AIL is useless if we're forcing a shutdown.
 */
-   spin_lock(>xa_lock);
+   spin_lock(>ail_lock);
xfs_trans_ail_delete(ailp, lip, SHUTDOWN_CORRUPT_INCORE);
xfs_buf_item_free(BUF_ITEM(lip));
 }
@@ -1246,7 +1246,7 @@ xfs_buf_resubmit_failed_buffers(
/*
 * Clear XFS_LI_FAILED flag from all items before resubmit
 *
-* XFS_LI_FAILED set/clear is protected by xa_lock, caller  this
+* XFS_LI_FAILED set/clear is protected by ail_lock, caller  this
 * function already have it acquired
 */
list_for_each_entry(lip, >b_li_list, li_bio_list)
diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c
index 43572f8a1b8e..c4041b676b7b 100644
--- a/fs/xfs/xfs_dquot.c
+++ b/fs/xfs/xfs_dquot.c
@@ -920,7 +920,7 @@ xfs_qm_dqflush_done(
 (lip->li_flags & XFS_LI_FAILED))) {
 
/* xfs_trans_ail_delete() drops the AIL lock. */
-   spin_lock(>xa_lock);
+   spin_lock(>ail_lock);
if (lip->li_lsn == qip->qli_flush_lsn) {
xfs_trans_ail_delete(ailp, lip, 
SHUTDOWN_CORRUPT_INCORE);
} else {
@@ -930,7 +930,7 @@ xfs_qm_dqflush_done(
 */
if (lip->li_flags & XFS_LI_FAILED)
xfs_clear_li_failed(lip);
-   spin_unlock(>xa_lock);
+   spin_unlock(>ail_lock);
}
}
 
diff --git a/fs/xfs/xfs_dquot_item.c b/fs/xfs/xfs_dquot_item.c
index 96eaa6933709..4b331e354da7 100644
--- a/fs/xfs/xfs_dquot_item.c
+++ b/fs/xfs/xfs_dquot_item.c
@@ -157,8 +157,9 @@ xfs_dquot_item_error(
 STATIC uint
 xfs_qm_dquot_logitem_push(
struct xfs_log_item *lip,
-   struct list_head*buffer_list) __releases(>li_ailp->xa_lock)
- __acquires(>li_ailp->xa_lock)
+   struct list_head*buffer_list)
+   __releases(>li_ailp->ail_lock)
+   __acquires(>li_ailp->ail_lock)
 {
struct xfs_dquot*dqp = DQUOT_ITEM(lip)->qli_dquot;
struct xfs_buf  *bp = lip->li_buf;
@@ -205,7 +206,7 @@ xfs_qm_dquot_logitem_push(
goto out_unlock;
}
 
-   spin_unlock(>li_ailp->xa_lock);
+   spin_unlock(>li_ailp->ail_lock);
 
error = xfs_qm_dqflush(dqp, );
if (error) {
@@ -217,7 +218,7 @@ xfs_qm_dquot_logitem_push(
xfs_buf_relse(bp);
}
 
-   spin_lock(>li_ailp->xa_lock);
+   spin_lock(>li_ailp->ail_lock);
 out_unlock:
xfs_dqunlock(dqp);
return rval;
@@ -400,7 +401,7 @@ xfs_qm_qoffend_logitem_committed(
 * Delete the qoff-start logitem from the AIL.
 * xfs_trans_ail_delete() drops the AIL lock.
 */
-   spin_lock(>xa_lock);
+   spin_lock(>ail_lock);
xfs_trans_ail_delete(ailp, >qql_item, SHUTDOWN_LOG_IO_ER

[PATCH v8 02/63] radix tree: Use GFP_ZONEMASK bits of gfp_t for flags

2018-03-06 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

None of these bits may be used for slab allocations, so we can use them
as radix tree flags as long as we mask them off before passing them
to the slab allocator.  Move the IDR flag from the high bits to the
GFP_ZONEMASK bits.

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
Acked-by: Jeff Layton <jlay...@kernel.org>
---
 include/linux/idr.h  | 3 ++-
 include/linux/radix-tree.h   | 7 ---
 lib/radix-tree.c | 3 ++-
 tools/testing/radix-tree/linux/gfp.h | 1 +
 4 files changed, 9 insertions(+), 5 deletions(-)

diff --git a/include/linux/idr.h b/include/linux/idr.h
index 7d6a6313f0ab..913c335054f0 100644
--- a/include/linux/idr.h
+++ b/include/linux/idr.h
@@ -29,7 +29,8 @@ struct idr {
 #define IDR_FREE   0
 
 /* Set the IDR flag and the IDR_FREE tag */
-#define IDR_RT_MARKER  ((__force gfp_t)(3 << __GFP_BITS_SHIFT))
+#define IDR_RT_MARKER  (ROOT_IS_IDR | (__force gfp_t)  \
+   (1 << (ROOT_TAG_SHIFT + IDR_FREE)))
 
 #define IDR_INIT_BASE(base) {  \
.idr_rt = RADIX_TREE_INIT(IDR_RT_MARKER),   \
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index fc55ff31eca7..6c4e2e716dac 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -104,9 +104,10 @@ struct radix_tree_node {
unsigned long   tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS];
 };
 
-/* The top bits of gfp_mask are used to store the root tags and the IDR flag */
-#define ROOT_IS_IDR((__force gfp_t)(1 << __GFP_BITS_SHIFT))
-#define ROOT_TAG_SHIFT (__GFP_BITS_SHIFT + 1)
+/* The IDR tag is stored in the low bits of the GFP flags */
+#define ROOT_IS_IDR((__force gfp_t)4)
+/* The top bits of gfp_mask are used to store the root tags */
+#define ROOT_TAG_SHIFT (__GFP_BITS_SHIFT)
 
 struct radix_tree_root {
gfp_t   gfp_mask;
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 8e00138d593f..da9e10c827df 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -146,7 +146,7 @@ static unsigned int radix_tree_descend(const struct 
radix_tree_node *parent,
 
 static inline gfp_t root_gfp_mask(const struct radix_tree_root *root)
 {
-   return root->gfp_mask & __GFP_BITS_MASK;
+   return root->gfp_mask & (__GFP_BITS_MASK & ~GFP_ZONEMASK);
 }
 
 static inline void tag_set(struct radix_tree_node *node, unsigned int tag,
@@ -2285,6 +2285,7 @@ void __init radix_tree_init(void)
int ret;
 
BUILD_BUG_ON(RADIX_TREE_MAX_TAGS + __GFP_BITS_SHIFT > 32);
+   BUILD_BUG_ON(ROOT_IS_IDR & ~GFP_ZONEMASK);
radix_tree_node_cachep = kmem_cache_create("radix_tree_node",
sizeof(struct radix_tree_node), 0,
SLAB_PANIC | SLAB_RECLAIM_ACCOUNT,
diff --git a/tools/testing/radix-tree/linux/gfp.h 
b/tools/testing/radix-tree/linux/gfp.h
index e3201ccf54c3..32159c08a52e 100644
--- a/tools/testing/radix-tree/linux/gfp.h
+++ b/tools/testing/radix-tree/linux/gfp.h
@@ -19,6 +19,7 @@
 
 #define __GFP_RECLAIM  (__GFP_DIRECT_RECLAIM|__GFP_KSWAPD_RECLAIM)
 
+#define GFP_ZONEMASK   0x0fu
 #define GFP_ATOMIC (__GFP_HIGH|__GFP_ATOMIC|__GFP_KSWAPD_RECLAIM)
 #define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS)
 #define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM)
-- 
2.16.1

--
To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v8 12/63] xarray: Change definition of sibling entries

2018-03-06 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

Instead of storing a pointer to the slot containing the canonical entry,
store the offset of the slot.  Produces slightly more efficient code
(~300 bytes) and simplifies the implementation.

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
---
 include/linux/xarray.h | 93 ++
 lib/radix-tree.c   | 66 +++
 2 files changed, 112 insertions(+), 47 deletions(-)

diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index f61806fd8002..283beb5aac58 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -22,6 +22,12 @@
  * x1: Value entry
  *
  * Attempting to store internal entries in the XArray is a bug.
+ *
+ * Most internal entries are pointers to the next node in the tree.
+ * The following internal entries have a special meaning:
+ *
+ * 0-62: Sibling entries
+ * 256: Retry entry
  */
 
 #define BITS_PER_XA_VALUE  (BITS_PER_LONG - 1)
@@ -63,6 +69,42 @@ static inline bool xa_is_value(const void *entry)
return (unsigned long)entry & 1;
 }
 
+/*
+ * xa_mk_internal() - Create an internal entry.
+ * @v: Value to turn into an internal entry.
+ *
+ * Context: Any context.
+ * Return: An XArray internal entry corresponding to this value.
+ */
+static inline void *xa_mk_internal(unsigned long v)
+{
+   return (void *)((v << 2) | 2);
+}
+
+/*
+ * xa_to_internal() - Extract the value from an internal entry.
+ * @entry: XArray entry.
+ *
+ * Context: Any context.
+ * Return: The value which was stored in the internal entry.
+ */
+static inline unsigned long xa_to_internal(const void *entry)
+{
+   return (unsigned long)entry >> 2;
+}
+
+/*
+ * xa_is_internal() - Is the entry an internal entry?
+ * @entry: XArray entry.
+ *
+ * Context: Any context.
+ * Return: %true if the entry is an internal entry.
+ */
+static inline bool xa_is_internal(const void *entry)
+{
+   return ((unsigned long)entry & 3) == 2;
+}
+
 #define xa_trylock(xa) spin_trylock(&(xa)->xa_lock)
 #define xa_lock(xa)spin_lock(&(xa)->xa_lock)
 #define xa_unlock(xa)  spin_unlock(&(xa)->xa_lock)
@@ -75,4 +117,55 @@ static inline bool xa_is_value(const void *entry)
 #define xa_unlock_irqrestore(xa, flags) \
spin_unlock_irqrestore(&(xa)->xa_lock, flags)
 
+/* Everything below here is the Advanced API.  Proceed with caution. */
+
+/*
+ * The xarray is constructed out of a set of 'chunks' of pointers.  Choosing
+ * the best chunk size requires some tradeoffs.  A power of two recommends
+ * itself so that we can walk the tree based purely on shifts and masks.
+ * Generally, the larger the better; as the number of slots per level of the
+ * tree increases, the less tall the tree needs to be.  But that needs to be
+ * balanced against the memory consumption of each node.  On a 64-bit system,
+ * xa_node is currently 576 bytes, and we get 7 of them per 4kB page.  If we
+ * doubled the number of slots per node, we'd get only 3 nodes per 4kB page.
+ */
+#ifndef XA_CHUNK_SHIFT
+#define XA_CHUNK_SHIFT (CONFIG_BASE_SMALL ? 4 : 6)
+#endif
+#define XA_CHUNK_SIZE  (1UL << XA_CHUNK_SHIFT)
+#define XA_CHUNK_MASK  (XA_CHUNK_SIZE - 1)
+
+/* Private */
+static inline bool xa_is_node(const void *entry)
+{
+   return xa_is_internal(entry) && (unsigned long)entry > 4096;
+}
+
+/* Private */
+static inline void *xa_mk_sibling(unsigned int offset)
+{
+   return xa_mk_internal(offset);
+}
+
+/* Private */
+static inline unsigned long xa_to_sibling(const void *entry)
+{
+   return xa_to_internal(entry);
+}
+
+/**
+ * xa_is_sibling() - Is the entry a sibling entry?
+ * @entry: Entry retrieved from the XArray
+ *
+ * Return: %true if the entry is a sibling entry.
+ */
+static inline bool xa_is_sibling(const void *entry)
+{
+   return IS_ENABLED(CONFIG_RADIX_TREE_MULTIORDER) &&
+   xa_is_internal(entry) &&
+   (entry < xa_mk_sibling(XA_CHUNK_SIZE - 1));
+}
+
+#define XA_RETRY_ENTRY xa_mk_internal(256)
+
 #endif /* _LINUX_XARRAY_H */
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index c7246de3f367..dd4669b3c30e 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -38,6 +38,7 @@
 #include 
 #include 
 #include 
+#include 
 
 
 /* Number of nodes in fully populated tree of given height */
@@ -98,24 +99,7 @@ static inline void *node_to_entry(void *ptr)
return (void *)((unsigned long)ptr | RADIX_TREE_INTERNAL_NODE);
 }
 
-#define RADIX_TREE_RETRY   node_to_entry(NULL)
-
-#ifdef CONFIG_RADIX_TREE_MULTIORDER
-/* Sibling slots point directly to another slot in the same node */
-static inline
-bool is_sibling_entry(const struct radix_tree_node *parent, void *node)
-{
-   void __rcu **ptr = node;
-   return (parent->slots <= ptr) &&
-  

[PATCH v8 09/63] xarray: Add the xa_lock to the radix_tree_root

2018-03-06 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

This results in no change in structure size on 64-bit machines as it
fits in the padding between the gfp_t and the void *.  32-bit machines
will grow the structure from 8 to 12 bytes.  Almost all radix trees
are protected with (at least) a spinlock, so as they are converted from
radix trees to xarrays, the data structures will shrink again.

Initialising the spinlock requires a name for the benefit of lockdep,
so RADIX_TREE_INIT() now needs to know the name of the radix tree it's
initialising, and so do IDR_INIT() and IDA_INIT().

Also add the xa_lock() and xa_unlock() family of wrappers to make it
easier to use the lock.  If we could rely on -fplan9-extensions in
the compiler, we could avoid all of this syntactic sugar, but that
wasn't added until gcc 4.6.

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
Reviewed-by: Jeff Layton <jlay...@kernel.org>
---
 fs/f2fs/gc.c   |  2 +-
 include/linux/idr.h| 19 ++-
 include/linux/radix-tree.h |  7 +--
 include/linux/xarray.h | 24 
 kernel/pid.c   |  2 +-
 tools/include/linux/spinlock.h |  1 +
 6 files changed, 42 insertions(+), 13 deletions(-)
 create mode 100644 include/linux/xarray.h

diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index aa720cc44509..7aa15134180e 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -1006,7 +1006,7 @@ int f2fs_gc(struct f2fs_sb_info *sbi, bool sync,
unsigned int init_segno = segno;
struct gc_inode_list gc_list = {
.ilist = LIST_HEAD_INIT(gc_list.ilist),
-   .iroot = RADIX_TREE_INIT(GFP_NOFS),
+   .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
};
 
trace_f2fs_gc_begin(sbi->sb, sync, background,
diff --git a/include/linux/idr.h b/include/linux/idr.h
index 913c335054f0..e856f4e0ab35 100644
--- a/include/linux/idr.h
+++ b/include/linux/idr.h
@@ -32,27 +32,28 @@ struct idr {
 #define IDR_RT_MARKER  (ROOT_IS_IDR | (__force gfp_t)  \
(1 << (ROOT_TAG_SHIFT + IDR_FREE)))
 
-#define IDR_INIT_BASE(base) {  \
-   .idr_rt = RADIX_TREE_INIT(IDR_RT_MARKER),   \
+#define IDR_INIT_BASE(name, base) {\
+   .idr_rt = RADIX_TREE_INIT(name, IDR_RT_MARKER), \
.idr_base = (base), \
.idr_next = 0,  \
 }
 
 /**
  * IDR_INIT() - Initialise an IDR.
+ * @name: Name of IDR.
  *
  * A freshly-initialised IDR contains no IDs.
  */
-#define IDR_INIT   IDR_INIT_BASE(0)
+#define IDR_INIT(name) IDR_INIT_BASE(name, 0)
 
 /**
- * DEFINE_IDR() - Define a statically-allocated IDR
- * @name: Name of IDR
+ * DEFINE_IDR() - Define a statically-allocated IDR.
+ * @name: Name of IDR.
  *
  * An IDR defined using this macro is ready for use with no additional
  * initialisation required.  It contains no IDs.
  */
-#define DEFINE_IDR(name)   struct idr name = IDR_INIT
+#define DEFINE_IDR(name)   struct idr name = IDR_INIT(name)
 
 /**
  * idr_get_cursor - Return the current position of the cyclic allocator
@@ -219,10 +220,10 @@ struct ida {
struct radix_tree_root  ida_rt;
 };
 
-#define IDA_INIT   {   \
-   .ida_rt = RADIX_TREE_INIT(IDR_RT_MARKER | GFP_NOWAIT),  \
+#define IDA_INIT(name) {   \
+   .ida_rt = RADIX_TREE_INIT(name, IDR_RT_MARKER | GFP_NOWAIT),\
 }
-#define DEFINE_IDA(name)   struct ida name = IDA_INIT
+#define DEFINE_IDA(name)   struct ida name = IDA_INIT(name)
 
 int ida_pre_get(struct ida *ida, gfp_t gfp_mask);
 int ida_get_new_above(struct ida *ida, int starting_id, int *p_id);
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index 6c4e2e716dac..34149e8b5f73 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -110,20 +110,23 @@ struct radix_tree_node {
 #define ROOT_TAG_SHIFT (__GFP_BITS_SHIFT)
 
 struct radix_tree_root {
+   spinlock_t  xa_lock;
gfp_t   gfp_mask;
struct radix_tree_node  __rcu *rnode;
 };
 
-#define RADIX_TREE_INIT(mask)  {   \
+#define RADIX_TREE_INIT(name, mask){   \
+   .xa_lock = __SPIN_LOCK_UNLOCKED(name.xa_lock),  \
.gfp_mask = (mask), \
.rnode = NULL,  \
 }
 
 #define RADIX_TREE(name, mask) \
-   struct radix_tree_root name = RADIX_TREE_INIT(mask)
+   struct radix_tree_root name = RADIX_TREE_INIT(name, mask)
 
 #define INIT_RADIX_TREE(root, mask)  

[PATCH v8 11/63] xarray: Replace exceptional entries

2018-03-06 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

Introduce xarray value entries to replace the radix tree exceptional
entry code.  This is a slight change in encoding to allow the use of an
extra bit (we can now store BITS_PER_LONG - 1 bits in a value entry).
It is also a change in emphasis; exceptional entries are intimidating
and different.  As the comment explains, you can choose to store values
or pointers in the xarray and they are both first-class citizens.

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
---
 arch/powerpc/include/asm/book3s/64/pgtable.h|   4 +-
 arch/powerpc/include/asm/nohash/64/pgtable.h|   4 +-
 drivers/gpu/drm/i915/i915_gem.c |  17 ++--
 drivers/staging/lustre/lustre/mdc/mdc_request.c |   2 +-
 fs/btrfs/compression.c  |   2 +-
 fs/dax.c| 107 
 fs/proc/task_mmu.c  |   2 +-
 include/linux/radix-tree.h  |  36 ++--
 include/linux/swapops.h |  19 ++---
 include/linux/xarray.h  |  54 
 lib/idr.c   |  61 ++
 lib/radix-tree.c|  21 ++---
 mm/filemap.c|  10 +--
 mm/khugepaged.c |   2 +-
 mm/madvise.c|   2 +-
 mm/memcontrol.c |   2 +-
 mm/mincore.c|   2 +-
 mm/readahead.c  |   2 +-
 mm/shmem.c  |  10 +--
 mm/swap.c   |   2 +-
 mm/truncate.c   |  12 +--
 mm/workingset.c |  12 ++-
 tools/testing/radix-tree/idr-test.c |   6 +-
 tools/testing/radix-tree/linux/radix-tree.h |   1 +
 tools/testing/radix-tree/multiorder.c   |  47 +--
 tools/testing/radix-tree/test.c |   2 +-
 26 files changed, 223 insertions(+), 218 deletions(-)

diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h 
b/arch/powerpc/include/asm/book3s/64/pgtable.h
index a6b9f1d74600..c3a5c809779c 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -709,9 +709,7 @@ static inline bool pte_user(pte_t pte)
BUILD_BUG_ON(_PAGE_HPTEFLAGS & (0x1f << _PAGE_BIT_SWAP_TYPE)); \
BUILD_BUG_ON(_PAGE_HPTEFLAGS & _PAGE_SWP_SOFT_DIRTY);   \
} while (0)
-/*
- * on pte we don't need handle RADIX_TREE_EXCEPTIONAL_SHIFT;
- */
+
 #define SWP_TYPE_BITS 5
 #define __swp_type(x)  (((x).val >> _PAGE_BIT_SWAP_TYPE) \
& ((1UL << SWP_TYPE_BITS) - 1))
diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h 
b/arch/powerpc/include/asm/nohash/64/pgtable.h
index 5c5f75d005ad..22f519687b74 100644
--- a/arch/powerpc/include/asm/nohash/64/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/64/pgtable.h
@@ -330,9 +330,7 @@ static inline void __ptep_set_access_flags(struct mm_struct 
*mm,
 */ \
BUILD_BUG_ON(_PAGE_HPTEFLAGS & (0x1f << _PAGE_BIT_SWAP_TYPE)); \
} while (0)
-/*
- * on pte we don't need handle RADIX_TREE_EXCEPTIONAL_SHIFT;
- */
+
 #define SWP_TYPE_BITS 5
 #define __swp_type(x)  (((x).val >> _PAGE_BIT_SWAP_TYPE) \
& ((1UL << SWP_TYPE_BITS) - 1))
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index a5bd07338b46..cd3ae7c236ae 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -5771,7 +5771,8 @@ i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
count = __sg_page_count(sg);
 
while (idx + count <= n) {
-   unsigned long exception, i;
+   void *entry;
+   unsigned long i;
int ret;
 
/* If we cannot allocate and insert this entry, or the
@@ -5786,12 +5787,9 @@ i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
if (ret && ret != -EEXIST)
goto scan;
 
-   exception =
-   RADIX_TREE_EXCEPTIONAL_ENTRY |
-   idx << RADIX_TREE_EXCEPTIONAL_SHIFT;
+   entry = xa_mk_value(idx);
for (i = 1; i < count; i++) {
-   ret = radix_tree_insert(>radix, idx + i,
-   (void *)exception);
+   ret = radix_tree_insert(>radix, idx + i, entry);
if (ret && ret != -EEXIST)
goto scan;
}
@@ -5829,15 +5827,14 @@ i915_gem_object_get_sg(struct d

[PATCH v8 10/63] page cache: Use xa_lock

2018-03-06 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

Remove the address_space ->tree_lock and use the xa_lock newly added to
the radix_tree_root.  Rename the address_space ->page_tree to ->i_pages,
since we don't really care that it's a tree.

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
Acked-by: Jeff Layton <jlay...@redhat.com>
---
 Documentation/cgroup-v1/memory.txt  |   2 +-
 Documentation/vm/page_migration |  14 +--
 arch/arm/include/asm/cacheflush.h   |   6 +-
 arch/nds32/include/asm/cacheflush.h |   4 +-
 arch/nios2/include/asm/cacheflush.h |   6 +-
 arch/parisc/include/asm/cacheflush.h|   6 +-
 drivers/staging/lustre/lustre/llite/glimpse.c   |   2 +-
 drivers/staging/lustre/lustre/mdc/mdc_request.c |   8 +-
 fs/afs/write.c  |   9 +-
 fs/btrfs/compression.c  |   2 +-
 fs/btrfs/extent_io.c|  16 +--
 fs/buffer.c |  13 ++-
 fs/cifs/file.c  |   9 +-
 fs/dax.c| 126 
 fs/f2fs/data.c  |   6 +-
 fs/f2fs/dir.c   |   6 +-
 fs/f2fs/inline.c|   6 +-
 fs/f2fs/node.c  |   8 +-
 fs/fs-writeback.c   |  22 ++---
 fs/inode.c  |  11 +--
 fs/nilfs2/btnode.c  |  20 ++--
 fs/nilfs2/page.c|  22 ++---
 include/linux/backing-dev.h |  14 +--
 include/linux/fs.h  |   8 +-
 include/linux/mm.h  |   2 +-
 include/linux/pagemap.h |   4 +-
 mm/filemap.c|  84 
 mm/huge_memory.c|  10 +-
 mm/khugepaged.c |  49 +
 mm/memcontrol.c |   4 +-
 mm/memfd.c  |  18 ++--
 mm/migrate.c|  32 +++---
 mm/page-writeback.c |  43 
 mm/readahead.c  |   2 +-
 mm/rmap.c   |   4 +-
 mm/shmem.c  |  42 
 mm/swap_state.c |  17 ++--
 mm/truncate.c   |  22 ++---
 mm/vmscan.c |  12 +--
 mm/workingset.c |  22 ++---
 40 files changed, 346 insertions(+), 367 deletions(-)

diff --git a/Documentation/cgroup-v1/memory.txt 
b/Documentation/cgroup-v1/memory.txt
index a4af2e124e24..3682e99234c2 100644
--- a/Documentation/cgroup-v1/memory.txt
+++ b/Documentation/cgroup-v1/memory.txt
@@ -262,7 +262,7 @@ When oom event notifier is registered, event will be 
delivered.
 2.6 Locking
 
lock_page_cgroup()/unlock_page_cgroup() should not be called under
-   mapping->tree_lock.
+   the i_pages lock.
 
Other lock order is following:
PG_locked.
diff --git a/Documentation/vm/page_migration b/Documentation/vm/page_migration
index 0478ae2ad44a..496868072e24 100644
--- a/Documentation/vm/page_migration
+++ b/Documentation/vm/page_migration
@@ -90,7 +90,7 @@ Steps:
 
 1. Lock the page to be migrated
 
-2. Insure that writeback is complete.
+2. Ensure that writeback is complete.
 
 3. Lock the new page that we want to move to. It is locked so that accesses to
this (not yet uptodate) page immediately lock while the move is in progress.
@@ -100,8 +100,8 @@ Steps:
mapcount is not zero then we do not migrate the page. All user space
processes that attempt to access the page will now wait on the page lock.
 
-5. The radix tree lock is taken. This will cause all processes trying
-   to access the page via the mapping to block on the radix tree spinlock.
+5. The i_pages lock is taken. This will cause all processes trying
+   to access the page via the mapping to block on the spinlock.
 
 6. The refcount of the page is examined and we back out if references remain
otherwise we know that we are the only one referencing this page.
@@ -114,12 +114,12 @@ Steps:
 
 9. The radix tree is changed to point to the new page.
 
-10. The reference count of the old page is dropped because the radix tree
+10. The reference count of the old page is dropped because the address space
 reference is gone. A reference to the new page is established because
-the new page is referenced to by the radix tree.
+the new page is referenced by the address space.
 
-11. The radix tree lock is dropped. With that lookups in the mapping
-become possible again. Processes will move from spinning on the tree_lock
+1

[PATCH v8 17/63] xarray: Add xa_get_tag, xa_set_tag and xa_clear_tag

2018-03-06 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

XArray tags are slightly more strongly typed than the radix tree tags,
but occupy the same bits.  This commit also adds the xas_ family of tag
operations, for cases where the caller is already holding the lock, and
xa_tagged() to ask whether any array member has a particular tag set.

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
---
 include/linux/xarray.h |  41 +++
 lib/xarray.c   | 235 +
 tools/include/linux/spinlock.h |   6 ++
 3 files changed, 282 insertions(+)

diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index 5845187c1ce8..1cf012256eab 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -11,6 +11,7 @@
 
 #include 
 #include 
+#include 
 #include 
 #include 
 #include 
@@ -149,6 +150,20 @@ static inline int xa_err(void *entry)
return 0;
 }
 
+typedef unsigned __bitwise xa_tag_t;
+#define XA_TAG_0   ((__force xa_tag_t)0U)
+#define XA_TAG_1   ((__force xa_tag_t)1U)
+#define XA_TAG_2   ((__force xa_tag_t)2U)
+#define XA_PRESENT ((__force xa_tag_t)8U)
+#define XA_TAG_MAX XA_TAG_2
+
+/*
+ * Values for xa_flags.  The radix tree stores its GFP flags in the xa_flags,
+ * and we remain compatible with that.
+ */
+#define XA_FLAGS_TAG(tag)  ((__force gfp_t)((1U << __GFP_BITS_SHIFT) << \
+   (__force unsigned)(tag)))
+
 /**
  * struct xarray - The anchor of the XArray.
  * @xa_lock: Lock that protects the contents of the XArray.
@@ -195,6 +210,9 @@ struct xarray {
 
 void xa_init_flags(struct xarray *, gfp_t flags);
 void *xa_load(struct xarray *, unsigned long index);
+bool xa_get_tag(struct xarray *, unsigned long index, xa_tag_t);
+void xa_set_tag(struct xarray *, unsigned long index, xa_tag_t);
+void xa_clear_tag(struct xarray *, unsigned long index, xa_tag_t);
 
 /**
  * xa_init() - Initialise an empty XArray.
@@ -209,6 +227,19 @@ static inline void xa_init(struct xarray *xa)
xa_init_flags(xa, 0);
 }
 
+/**
+ * xa_tagged() - Inquire whether any entry in this array has a tag set
+ * @xa: Array
+ * @tag: Tag value
+ *
+ * Context: Any context.
+ * Return: %true if any entry has this tag set.
+ */
+static inline bool xa_tagged(const struct xarray *xa, xa_tag_t tag)
+{
+   return xa->xa_flags & XA_FLAGS_TAG(tag);
+}
+
 #define xa_trylock(xa) spin_trylock(&(xa)->xa_lock)
 #define xa_lock(xa)spin_lock(&(xa)->xa_lock)
 #define xa_unlock(xa)  spin_unlock(&(xa)->xa_lock)
@@ -221,6 +252,12 @@ static inline void xa_init(struct xarray *xa)
 #define xa_unlock_irqrestore(xa, flags) \
spin_unlock_irqrestore(&(xa)->xa_lock, flags)
 
+/*
+ * Versions of the normal API which require the caller to hold the xa_lock.
+ */
+void __xa_set_tag(struct xarray *, unsigned long index, xa_tag_t);
+void __xa_clear_tag(struct xarray *, unsigned long index, xa_tag_t);
+
 /* Everything below here is the Advanced API.  Proceed with caution. */
 
 /*
@@ -534,6 +571,10 @@ static inline bool xas_retry(struct xa_state *xas, const 
void *entry)
 
 void *xas_load(struct xa_state *);
 
+bool xas_get_tag(const struct xa_state *, xa_tag_t);
+void xas_set_tag(const struct xa_state *, xa_tag_t);
+void xas_clear_tag(const struct xa_state *, xa_tag_t);
+
 /**
  * xas_reload() - Refetch an entry from the xarray.
  * @xas: XArray operation state.
diff --git a/lib/xarray.c b/lib/xarray.c
index 195cb130d53d..ca25a7a4a4fa 100644
--- a/lib/xarray.c
+++ b/lib/xarray.c
@@ -5,6 +5,7 @@
  * Author: Matthew Wilcox <mawil...@microsoft.com>
  */
 
+#include 
 #include 
 #include 
 
@@ -24,6 +25,55 @@
  * @entry refers to something stored in a slot in the xarray
  */
 
+static inline struct xa_node *xa_parent(struct xarray *xa,
+   const struct xa_node *node)
+{
+   return rcu_dereference_check(node->parent,
+   lockdep_is_held(>xa_lock));
+}
+
+static inline struct xa_node *xa_parent_locked(struct xarray *xa,
+   const struct xa_node *node)
+{
+   return rcu_dereference_protected(node->parent,
+   lockdep_is_held(>xa_lock));
+}
+
+static inline void xa_tag_set(struct xarray *xa, xa_tag_t tag)
+{
+   if (!(xa->xa_flags & XA_FLAGS_TAG(tag)))
+   xa->xa_flags |= XA_FLAGS_TAG(tag);
+}
+
+static inline void xa_tag_clear(struct xarray *xa, xa_tag_t tag)
+{
+   if (xa->xa_flags & XA_FLAGS_TAG(tag))
+   xa->xa_flags &= ~(XA_FLAGS_TAG(tag));
+}
+
+static inline bool node_get_tag(const struct xa_node *node, unsigned int 
offset,
+   xa_tag_t tag)
+{
+   return test_bit(offset, node->tags[

[PATCH v8 16/63] xarray: Add xa_load

2018-03-06 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

This first function in the XArray API brings with it a lot of support
infrastructure.  The advanced API is based around the xa_state which is
a more capable version of the radix_tree_iter.

As the test-suite demonstrates, it is possible to use the xarray and
radix tree APIs on the same data structure.

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
---
 include/linux/xarray.h  | 304 
 lib/radix-tree.c|  43 
 lib/xarray.c| 191 +
 tools/testing/radix-tree/.gitignore |   1 +
 tools/testing/radix-tree/Makefile   |   7 +-
 tools/testing/radix-tree/linux/kernel.h |   1 +
 tools/testing/radix-tree/linux/radix-tree.h |   1 -
 tools/testing/radix-tree/linux/rcupdate.h   |   1 +
 tools/testing/radix-tree/linux/xarray.h |   1 +
 tools/testing/radix-tree/xarray-test.c  |  49 +
 10 files changed, 553 insertions(+), 46 deletions(-)
 create mode 100644 tools/testing/radix-tree/xarray-test.c

diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index b51f354dfbf0..5845187c1ce8 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -12,6 +12,8 @@
 #include 
 #include 
 #include 
+#include 
+#include 
 #include 
 #include 
 
@@ -30,6 +32,10 @@
  *
  * 0-62: Sibling entries
  * 256: Retry entry
+ *
+ * Errors are also represented as internal entries, but use the negative
+ * space (-4094 to -2).  They're never stored in the slots array; only
+ * returned by the normal API.
  */
 
 #define BITS_PER_XA_VALUE  (BITS_PER_LONG - 1)
@@ -107,6 +113,42 @@ static inline bool xa_is_internal(const void *entry)
return ((unsigned long)entry & 3) == 2;
 }
 
+/**
+ * xa_is_err() - Report whether an XArray operation returned an error
+ * @entry: Result from calling an XArray function
+ *
+ * If an XArray operation cannot complete an operation, it will return
+ * a special value indicating an error.  This function tells you
+ * whether an error occurred; xa_err() tells you which error occurred.
+ *
+ * Context: Any context.
+ * Return: %true if the entry indicates an error.
+ */
+static inline bool xa_is_err(const void *entry)
+{
+   return unlikely(xa_is_internal(entry));
+}
+
+/**
+ * xa_err() - Turn an XArray result into an errno.
+ * @entry: Result from calling an XArray function.
+ *
+ * If an XArray operation cannot complete an operation, it will return
+ * a special pointer value which encodes an errno.  This function extracts
+ * the errno from the pointer value, or returns 0 if the pointer does not
+ * represent an errno.
+ *
+ * Context: Any context.
+ * Return: A negative errno or 0.
+ */
+static inline int xa_err(void *entry)
+{
+   /* xa_to_internal() would not do sign extension. */
+   if (xa_is_err(entry))
+   return (long)entry >> 2;
+   return 0;
+}
+
 /**
  * struct xarray - The anchor of the XArray.
  * @xa_lock: Lock that protects the contents of the XArray.
@@ -152,6 +194,7 @@ struct xarray {
struct xarray name = XARRAY_INIT_FLAGS(name, flags)
 
 void xa_init_flags(struct xarray *, gfp_t flags);
+void *xa_load(struct xarray *, unsigned long index);
 
 /**
  * xa_init() - Initialise an empty XArray.
@@ -220,6 +263,62 @@ struct xa_node {
unsigned long   tags[XA_MAX_TAGS][XA_TAG_LONGS];
 };
 
+#ifdef XA_DEBUG
+void xa_dump(const struct xarray *);
+void xa_dump_node(const struct xa_node *);
+#define XA_BUG_ON(xa, x) do { \
+   if (x) \
+   xa_dump(xa); \
+   BUG_ON(x); \
+   } while (0)
+#define XA_NODE_BUG_ON(node, x) do { \
+   if ((x) && (node)) \
+   xa_dump_node(node); \
+   BUG_ON(x); \
+   } while (0)
+#else
+#define XA_BUG_ON(xa, x)   do { } while (0)
+#define XA_NODE_BUG_ON(node, x)do { } while (0)
+#endif
+
+/* Private */
+static inline void *xa_head(struct xarray *xa)
+{
+   return rcu_dereference_check(xa->xa_head,
+   lockdep_is_held(>xa_lock));
+}
+
+/* Private */
+static inline void *xa_head_locked(struct xarray *xa)
+{
+   return rcu_dereference_protected(xa->xa_head,
+   lockdep_is_held(>xa_lock));
+}
+
+/* Private */
+static inline void *xa_entry(struct xarray *xa,
+   const struct xa_node *node, unsigned int offset)
+{
+   XA_NODE_BUG_ON(node, offset >= XA_CHUNK_SIZE);
+   return rcu_dereference_check(node->slots[offset],
+   lockdep_is_held(>xa_lock));
+}
+
+/* Private */
+static inline void *xa_entry_locked(struct xarray *xa,
+   const struct xa_node *node, unsigned int offset)
+{
+   XA_NODE_BUG_ON(node, offset >= XA_CHUNK_SIZE);
+

[PATCH v8 15/63] xarray: Add documentation

2018-03-06 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

This is documentation on how to use the XArray, not details about its
internal implementation.

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
---
 Documentation/core-api/index.rst  |   1 +
 Documentation/core-api/xarray.rst | 361 ++
 2 files changed, 362 insertions(+)
 create mode 100644 Documentation/core-api/xarray.rst

diff --git a/Documentation/core-api/index.rst b/Documentation/core-api/index.rst
index c670a8031786..e4e15f0f608b 100644
--- a/Documentation/core-api/index.rst
+++ b/Documentation/core-api/index.rst
@@ -20,6 +20,7 @@ Core utilities
local_ops
workqueue
genericirq
+   xarray
flexible-arrays
librs
genalloc
diff --git a/Documentation/core-api/xarray.rst 
b/Documentation/core-api/xarray.rst
new file mode 100644
index ..914999c0bf3f
--- /dev/null
+++ b/Documentation/core-api/xarray.rst
@@ -0,0 +1,361 @@
+.. SPDX-License-Identifier: CC-BY-SA-4.0
+
+==
+XArray
+==
+
+:Author: Matthew Wilcox
+
+Overview
+
+
+The XArray is an abstract data type which behaves like a very large array
+of pointers.  It meets many of the same needs as a hash or a conventional
+resizable array.  Unlike a hash, it allows you to sensibly go to the
+next or previous entry in a cache-efficient manner.  In contrast to
+a resizable array, there is no need for copying data or changing MMU
+mappings in order to grow the array.  It is more memory-efficient,
+parallelisable and cache friendly than a doubly-linked list.  It takes
+advantage of RCU to perform lookups without locking.
+
+The XArray implementation is efficient when the indices used are densely
+clustered; hashing the object and using the hash as the index will not
+perform well.  The XArray is optimised for small indices, but still has
+good performance with large indices.  If your index can be larger than
+``ULONG_MAX`` then the XArray is not the data type for you.  The most
+important user of the XArray is the page cache.
+
+A freshly-initialised XArray contains a ``NULL`` pointer at every index.
+Each non-``NULL`` entry in the array has three bits associated with it
+called tags.  Each tag may be set or cleared independently of the others.
+You can iterate over entries which are tagged.
+
+Normal pointers may be stored in the XArray directly.  They must be 4-byte
+aligned, which is true for any pointer returned from :c:func:`kmalloc` and
+:c:func:`alloc_page`.  It isn't true for arbitrary user-space pointers,
+nor for function pointers.  You can store pointers to statically allocated
+objects, as long as those objects have an alignment of at least 4.
+
+You can also store integers between 0 and ``LONG_MAX`` in the XArray.
+You must first convert it into an entry using :c:func:`xa_mk_value`.
+When you retrieve an entry from the XArray, you can check whether it is
+a value entry by calling :c:func:`xa_is_value`, and convert it back to
+an integer by calling :c:func:`xa_to_value`.
+
+The XArray does not support storing :c:func:`IS_ERR` pointers as some
+conflict with value entries or internal entries.
+
+An unusual feature of the XArray is the ability to create entries which
+occupy a range of indices.  Once stored to, looking up any index in
+the range will return the same entry as looking up any other index in
+the range.  Setting a tag on one index will set it on all of them.
+Storing to any index will store to all of them.  Multi-index entries can
+be explicitly split into smaller entries, or storing ``NULL`` into any
+entry will cause the XArray to forget about the range.
+
+Normal API
+==
+
+Start by initialising an XArray, either with :c:func:`DEFINE_XARRAY`
+for statically allocated XArrays or :c:func:`xa_init` for dynamically
+allocated ones.
+
+You can then set entries using :c:func:`xa_store` and get entries
+using :c:func:`xa_load`.  xa_store will overwrite any entry with the
+new entry and return the previous entry stored at that index.  You can
+use :c:func:`xa_erase` instead of calling :c:func:`xa_store` with a
+%NULL entry.  There is no difference between an entry that has never
+been stored to and one that has most recently had ``NULL`` stored to it.
+
+You can conditionally replace an entry at an index by using
+:c:func:`xa_cmpxchg`.  Like :c:func:`cmpxchg`, it will only succeed if
+the entry at that index has the 'old' value.  It also returns the entry
+which was at that index; if it returns the same entry which was passed as
+'old', then :c:func:`xa_cmpxchg` succeeded.
+
+If you want to only store a new entry to an index if the current entry
+at that index is ``NULL``, you can use :c:func:`xa_insert` which
+returns ``-EEXIST`` if the entry is not empty.
+
+Calling :c:func:`xa_reserve` ensures that there is enough memory allocated
+to store an entry at the specified index.  This is not normally needed,
+but some users have a complicated locking scheme.
+
+You can enquire whether 

[PATCH v8 19/63] xarray: Add xa_cmpxchg and xa_insert

2018-03-06 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

Like cmpxchg(), xa_cmpxchg will only store to the index if the current
entry matches the old entry.  It returns the current entry, which is
usually more useful than the errno returned by radix_tree_insert().
For the users who really only want the errno, the xa_insert() wrapper
provides a more convenient calling convention.

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
---
 include/linux/xarray.h | 60 
 lib/xarray.c   | 71 ++
 tools/testing/radix-tree/xarray-test.c | 10 +
 3 files changed, 141 insertions(+)

diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index 38e290df2ff0..e95ebe2488f9 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -218,6 +218,8 @@ struct xarray {
 void xa_init_flags(struct xarray *, gfp_t flags);
 void *xa_load(struct xarray *, unsigned long index);
 void *xa_store(struct xarray *, unsigned long index, void *entry, gfp_t);
+void *xa_cmpxchg(struct xarray *, unsigned long index,
+   void *old, void *entry, gfp_t);
 bool xa_get_tag(struct xarray *, unsigned long index, xa_tag_t);
 void xa_set_tag(struct xarray *, unsigned long index, xa_tag_t);
 void xa_clear_tag(struct xarray *, unsigned long index, xa_tag_t);
@@ -277,6 +279,34 @@ static inline bool xa_tagged(const struct xarray *xa, 
xa_tag_t tag)
return xa->xa_flags & XA_FLAGS_TAG(tag);
 }
 
+/**
+ * xa_insert() - Store this entry in the XArray unless another entry is
+ * already present.
+ * @xa: XArray.
+ * @index: Index into array.
+ * @entry: New entry.
+ * @gfp: Memory allocation flags.
+ *
+ * If you would rather see the existing entry in the array, use xa_cmpxchg().
+ * This function is for users who don't care what the entry is, only that
+ * one is present.
+ *
+ * Context: Process context.  Takes and releases the xa_lock.
+ * May sleep if the @gfp flags permit.
+ * Return: 0 if the store succeeded.  -EEXIST if another entry was present.
+ *-ENOMEM if memory could not be allocated.
+ */
+static inline int xa_insert(struct xarray *xa, unsigned long index,
+   void *entry, gfp_t gfp)
+{
+   void *curr = xa_cmpxchg(xa, index, NULL, entry, gfp);
+   if (!curr)
+   return 0;
+   if (xa_is_err(curr))
+   return xa_err(curr);
+   return -EEXIST;
+}
+
 #define xa_trylock(xa) spin_trylock(&(xa)->xa_lock)
 #define xa_lock(xa)spin_lock(&(xa)->xa_lock)
 #define xa_unlock(xa)  spin_unlock(&(xa)->xa_lock)
@@ -296,9 +326,39 @@ static inline bool xa_tagged(const struct xarray *xa, 
xa_tag_t tag)
  */
 void *__xa_erase(struct xarray *, unsigned long index);
 void *__xa_store(struct xarray *, unsigned long index, void *entry, gfp_t);
+void *__xa_cmpxchg(struct xarray *, unsigned long index, void *old,
+   void *entry, gfp_t);
 void __xa_set_tag(struct xarray *, unsigned long index, xa_tag_t);
 void __xa_clear_tag(struct xarray *, unsigned long index, xa_tag_t);
 
+/**
+ * __xa_insert() - Store this entry in the XArray unless another entry is
+ * already present.
+ * @xa: XArray.
+ * @index: Index into array.
+ * @entry: New entry.
+ * @gfp: Memory allocation flags.
+ *
+ * If you would rather see the existing entry in the array, use __xa_cmpxchg().
+ * This function is for users who don't care what the entry is, only that
+ * one is present.
+ *
+ * Context: Any context.  Expects xa_lock to be held on entry.  May
+ * release and reacquire xa_lock if the @gfp flags permit.
+ * Return: 0 if the store succeeded.  -EEXIST if another entry was present.
+ *-ENOMEM if memory could not be allocated.
+ */
+static inline int __xa_insert(struct xarray *xa, unsigned long index,
+   void *entry, gfp_t gfp)
+{
+   void *curr = __xa_cmpxchg(xa, index, NULL, entry, gfp);
+   if (!curr)
+   return 0;
+   if (xa_is_err(curr))
+   return xa_err(curr);
+   return -EEXIST;
+}
+
 /* Everything below here is the Advanced API.  Proceed with caution. */
 
 /*
diff --git a/lib/xarray.c b/lib/xarray.c
index 9e50804f168c..a231699d894a 100644
--- a/lib/xarray.c
+++ b/lib/xarray.c
@@ -937,6 +937,77 @@ void *__xa_store(struct xarray *xa, unsigned long index, 
void *entry, gfp_t gfp)
 }
 EXPORT_SYMBOL(__xa_store);
 
+/**
+ * xa_cmpxchg() - Conditionally replace an entry in the XArray.
+ * @xa: XArray.
+ * @index: Index into array.
+ * @old: Old value to test against.
+ * @entry: New value to place in array.
+ * @gfp: Memory allocation flags.
+ *
+ * If the entry at @index is the same as @old, replace it with @entry.
+ * If the return value is equal to @old, then the exchange was successful.
+ *
+ * Context: Process context.  Takes and releases the xa_lock.  May sleep
+ * if the @gfp flags permit.
+ * Retur

[PATCH v8 18/63] xarray: Add xa_store

2018-03-06 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

xa_store() differs from radix_tree_insert() in that it will overwrite an
existing element in the array rather than returning an error.  This is
the behaviour which most users want, and those that want more complex
behaviour generally want to use the xas family of routines anyway.

For memory allocation, xa_store() will first attempt to request memory
from the slab allocator; if memory is not immediately available, it will
drop the xa_lock and allocate memory, keeping a pointer in the xa_state.
It does not use the per-CPU cache, although those will continue to exist
until all radix tree users are converted to the xarray.

This patch also includes xa_erase() and __xa_erase() for a streamlined
way to store NULL.  Since there is no need to allocate memory in order
to store a NULL in the XArray, we do not need to trouble the user with
deciding what memory allocation flags to use.

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
---
 include/linux/xarray.h| 109 +
 lib/radix-tree.c  |   4 +-
 lib/xarray.c  | 648 ++
 tools/include/linux/spinlock.h|   2 +
 tools/testing/radix-tree/linux/kernel.h   |   4 +
 tools/testing/radix-tree/linux/lockdep.h  |  11 +
 tools/testing/radix-tree/linux/rcupdate.h |   1 +
 tools/testing/radix-tree/test.c   |  32 ++
 tools/testing/radix-tree/test.h   |   5 +
 tools/testing/radix-tree/xarray-test.c| 113 +-
 10 files changed, 925 insertions(+), 4 deletions(-)
 create mode 100644 tools/testing/radix-tree/linux/lockdep.h

diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index 1cf012256eab..38e290df2ff0 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -157,10 +157,17 @@ typedef unsigned __bitwise xa_tag_t;
 #define XA_PRESENT ((__force xa_tag_t)8U)
 #define XA_TAG_MAX XA_TAG_2
 
+enum xa_lock_type {
+   XA_LOCK_IRQ = 1,
+   XA_LOCK_BH = 2,
+};
+
 /*
  * Values for xa_flags.  The radix tree stores its GFP flags in the xa_flags,
  * and we remain compatible with that.
  */
+#define XA_FLAGS_LOCK_IRQ  ((__force gfp_t)XA_LOCK_IRQ)
+#define XA_FLAGS_LOCK_BH   ((__force gfp_t)XA_LOCK_BH)
 #define XA_FLAGS_TAG(tag)  ((__force gfp_t)((1U << __GFP_BITS_SHIFT) << \
(__force unsigned)(tag)))
 
@@ -210,6 +217,7 @@ struct xarray {
 
 void xa_init_flags(struct xarray *, gfp_t flags);
 void *xa_load(struct xarray *, unsigned long index);
+void *xa_store(struct xarray *, unsigned long index, void *entry, gfp_t);
 bool xa_get_tag(struct xarray *, unsigned long index, xa_tag_t);
 void xa_set_tag(struct xarray *, unsigned long index, xa_tag_t);
 void xa_clear_tag(struct xarray *, unsigned long index, xa_tag_t);
@@ -227,6 +235,35 @@ static inline void xa_init(struct xarray *xa)
xa_init_flags(xa, 0);
 }
 
+/**
+ * xa_erase() - Erase this entry from the XArray.
+ * @xa: XArray.
+ * @index: Index of entry.
+ *
+ * This function is the equivalent of calling xa_store() with %NULL as
+ * the third argument.  The XArray does not need to allocate memory, so
+ * the user does not need to provide GFP flags.
+ *
+ * Context: Process context.  Takes and releases the xa_lock.
+ * Return: The entry which used to be at this index.
+ */
+static inline void *xa_erase(struct xarray *xa, unsigned long index)
+{
+   return xa_store(xa, index, NULL, 0);
+}
+
+/**
+ * xa_empty() - Determine if an array has any present entries.
+ * @xa: XArray.
+ *
+ * Context: Any context.
+ * Return: %true if the array contains only NULL pointers.
+ */
+static inline bool xa_empty(const struct xarray *xa)
+{
+   return xa->xa_head == NULL;
+}
+
 /**
  * xa_tagged() - Inquire whether any entry in this array has a tag set
  * @xa: Array
@@ -254,7 +291,11 @@ static inline bool xa_tagged(const struct xarray *xa, 
xa_tag_t tag)
 
 /*
  * Versions of the normal API which require the caller to hold the xa_lock.
+ * If the GFP flags allow it, will drop the lock in order to allocate
+ * memory, then reacquire it afterwards.
  */
+void *__xa_erase(struct xarray *, unsigned long index);
+void *__xa_store(struct xarray *, unsigned long index, void *entry, gfp_t);
 void __xa_set_tag(struct xarray *, unsigned long index, xa_tag_t);
 void __xa_clear_tag(struct xarray *, unsigned long index, xa_tag_t);
 
@@ -350,6 +391,12 @@ static inline void *xa_entry_locked(struct xarray *xa,
lockdep_is_held(>xa_lock));
 }
 
+/* Private */
+static inline void *xa_mk_node(const struct xa_node *node)
+{
+   return (void *)((unsigned long)node | 2);
+}
+
 /* Private */
 static inline struct xa_node *xa_to_node(const void *entry)
 {
@@ -534,6 +581,12 @@ static inline bool xas_valid(const struct xa_state *xas)
return !xas_invalid(xas);
 }
 
+/* True if th

[PATCH v8 23/63] xarray: Add xas_next and xas_prev

2018-03-06 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

These two functions move the xas index by one position, and adjust the
rest of the iterator state to match it.  This is more efficient than
calling xas_set() as it keeps the iterator at the leaves of the tree
instead of walking the iterator from the root each time.

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
---
 include/linux/xarray.h |  67 +
 lib/xarray.c   |  74 ++
 tools/testing/radix-tree/xarray-test.c | 261 -
 3 files changed, 401 insertions(+), 1 deletion(-)

diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index 96773f83ae03..c8a0ddc1b3df 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -683,6 +683,12 @@ static inline bool xas_not_node(struct xa_node *node)
return ((unsigned long)node & 3) || !node;
 }
 
+/* True if the node represents RESTART or an error */
+static inline bool xas_frozen(struct xa_node *node)
+{
+   return (unsigned long)node & 2;
+}
+
 /* True if the node represents head-of-tree, RESTART or BOUNDS */
 static inline bool xas_top(struct xa_node *node)
 {
@@ -940,6 +946,67 @@ enum {
for (entry = xas_find_tag(xas, max, tag); entry; \
 entry = xas_next_tag(xas, max, tag))
 
+void *__xas_next(struct xa_state *);
+void *__xas_prev(struct xa_state *);
+
+/**
+ * xas_prev() - Move iterator to previous index.
+ * @xas: XArray operation state.
+ *
+ * If the @xas was in an error state, it will remain in an error state
+ * and this function will return %NULL.  If the @xas has never been walked,
+ * it will have the effect of calling xas_load().  Otherwise one will be
+ * subtracted from the index and the state will be walked to the correct
+ * location in the array for the next operation.
+ *
+ * If the iterator was referencing index 0, this function wraps
+ * around to %ULONG_MAX.
+ *
+ * Return: The entry at the new index.  This may be %NULL or an internal
+ * entry, although it should never be a node entry.
+ */
+static inline void *xas_prev(struct xa_state *xas)
+{
+   struct xa_node *node = xas->xa_node;
+
+   if (unlikely(xas_not_node(node) || node->shift ||
+   xas->xa_offset == 0))
+   return __xas_prev(xas);
+
+   xas->xa_index--;
+   xas->xa_offset--;
+   return xa_entry(xas->xa, node, xas->xa_offset);
+}
+
+/**
+ * xas_next() - Move state to next index.
+ * @xas: XArray operation state.
+ *
+ * If the @xas was in an error state, it will remain in an error state
+ * and this function will return %NULL.  If the @xas has never been walked,
+ * it will have the effect of calling xas_load().  Otherwise one will be
+ * added to the index and the state will be walked to the correct
+ * location in the array for the next operation.
+ *
+ * If the iterator was referencing index %ULONG_MAX, this function wraps
+ * around to 0.
+ *
+ * Return: The entry at the new index.  This may be %NULL or an internal
+ * entry, although it should never be a node entry.
+ */
+static inline void *xas_next(struct xa_state *xas)
+{
+   struct xa_node *node = xas->xa_node;
+
+   if (unlikely(xas_not_node(node) || node->shift ||
+   xas->xa_offset == XA_CHUNK_MASK))
+   return __xas_next(xas);
+
+   xas->xa_index++;
+   xas->xa_offset++;
+   return xa_entry(xas->xa, node, xas->xa_offset);
+}
+
 /* Internal functions, mostly shared between radix-tree.c, xarray.c and idr.c 
*/
 void xas_destroy(struct xa_state *);
 
diff --git a/lib/xarray.c b/lib/xarray.c
index 080ed0fc3feb..7cf195b6e740 100644
--- a/lib/xarray.c
+++ b/lib/xarray.c
@@ -838,6 +838,80 @@ void xas_pause(struct xa_state *xas)
 }
 EXPORT_SYMBOL_GPL(xas_pause);
 
+/*
+ * __xas_prev() - Find the previous entry in the XArray.
+ * @xas: XArray operation state.
+ *
+ * Helper function for xas_prev() which handles all the complex cases
+ * out of line.
+ */
+void *__xas_prev(struct xa_state *xas)
+{
+   void *entry;
+
+   if (!xas_frozen(xas->xa_node))
+   xas->xa_index--;
+   if (xas_not_node(xas->xa_node))
+   return xas_load(xas);
+
+   if (xas->xa_offset != get_offset(xas->xa_index, xas->xa_node))
+   xas->xa_offset--;
+
+   while (xas->xa_offset == 255) {
+   xas->xa_offset = xas->xa_node->offset - 1;
+   xas->xa_node = xa_parent(xas->xa, xas->xa_node);
+   if (!xas->xa_node)
+   return set_bounds(xas);
+   }
+
+   for (;;) {
+   entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
+   if (!xa_is_node(entry))
+   return entry;
+
+   xas->xa_node = xa_to_node(entry);
+   xas_set_offset(xas);
+   }
+}
+EXPO

[PATCH v8 24/63] xarray: Add xas_create_range

2018-03-06 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

This hopefully temporary function is useful for users who have not yet
been converted to multi-index entries.

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
---
 include/linux/xarray.h |  2 ++
 lib/xarray.c   | 22 ++
 2 files changed, 24 insertions(+)

diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index c8a0ddc1b3df..387be18d05ba 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -744,6 +744,8 @@ void xas_init_tags(const struct xa_state *);
 bool xas_nomem(struct xa_state *, gfp_t);
 void xas_pause(struct xa_state *);
 
+void xas_create_range(struct xa_state *, unsigned long max);
+
 /**
  * xas_reload() - Refetch an entry from the xarray.
  * @xas: XArray operation state.
diff --git a/lib/xarray.c b/lib/xarray.c
index 7cf195b6e740..1d94ecc2dca3 100644
--- a/lib/xarray.c
+++ b/lib/xarray.c
@@ -612,6 +612,28 @@ void *xas_create(struct xa_state *xas)
 }
 EXPORT_SYMBOL_GPL(xas_create);
 
+/**
+ * xas_create_range() - Ensure that stores to this range will succeed
+ * @xas: XArray operation state.
+ * @max: The highest index to create a slot for.
+ *
+ * Creates all of the slots in the range between the current position of
+ * @xas and @max.  This is for the benefit of users who have not yet been
+ * converted to multi-index entries.
+ *
+ * The implementation is naive.
+ */
+void xas_create_range(struct xa_state *xas, unsigned long max)
+{
+   XA_STATE(tmp, xas->xa, xas->xa_index);
+
+   do {
+   xas_create();
+   xas_set(, tmp.xa_index + XA_CHUNK_SIZE);
+   } while (tmp.xa_index < max);
+}
+EXPORT_SYMBOL_GPL(xas_create_range);
+
 static void store_siblings(struct xa_state *xas, void *entry, void *curr,
int *countp, int *valuesp)
 {
-- 
2.16.1

--
To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v8 21/63] xarray: Add xa_extract

2018-03-06 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

This function combines the functionality of radix_tree_gang_lookup() and
radix_tree_gang_lookup_tagged().  It extracts entries matching the
specified filter into a normal array.

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
---
 include/linux/xarray.h |  2 ++
 lib/xarray.c   | 80 ++
 2 files changed, 82 insertions(+)

diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index cf7966bfdd3e..85dd909586f0 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -227,6 +227,8 @@ void *xa_find(struct xarray *xa, unsigned long *index,
unsigned long max, xa_tag_t) __attribute__((nonnull(2)));
 void *xa_find_after(struct xarray *xa, unsigned long *index,
unsigned long max, xa_tag_t) __attribute__((nonnull(2)));
+unsigned int xa_extract(struct xarray *, void **dst, unsigned long start,
+   unsigned long max, unsigned int n, xa_tag_t);
 
 /**
  * xa_init() - Initialise an empty XArray.
diff --git a/lib/xarray.c b/lib/xarray.c
index 267510e98a57..124bbfec66ae 100644
--- a/lib/xarray.c
+++ b/lib/xarray.c
@@ -1388,6 +1388,86 @@ void *xa_find_after(struct xarray *xa, unsigned long 
*indexp,
 }
 EXPORT_SYMBOL(xa_find_after);
 
+static unsigned int xas_extract_present(struct xa_state *xas, void **dst,
+   unsigned long max, unsigned int n)
+{
+   void *entry;
+   unsigned int i = 0;
+
+   rcu_read_lock();
+   xas_for_each(xas, entry, max) {
+   if (xas_retry(xas, entry))
+   continue;
+   dst[i++] = entry;
+   if (i == n)
+   break;
+   }
+   rcu_read_unlock();
+
+   return i;
+}
+
+static unsigned int xas_extract_tag(struct xa_state *xas, void **dst,
+   unsigned long max, unsigned int n, xa_tag_t tag)
+{
+   void *entry;
+   unsigned int i = 0;
+
+   rcu_read_lock();
+   xas_for_each_tag(xas, entry, max, tag) {
+   if (xas_retry(xas, entry))
+   continue;
+   dst[i++] = entry;
+   if (i == n)
+   break;
+   }
+   rcu_read_unlock();
+
+   return i;
+}
+
+/**
+ * xa_extract() - Copy selected entries from the XArray into a normal array.
+ * @xa: The source XArray to copy from.
+ * @dst: The buffer to copy entries into.
+ * @start: The first index in the XArray eligible to be selected.
+ * @max: The last index in the XArray eligible to be selected.
+ * @n: The maximum number of entries to copy.
+ * @filter: Selection criterion.
+ *
+ * Copies up to @n entries that match @filter from the XArray.  The
+ * copied entries will have indices between @start and @max, inclusive.
+ *
+ * The @filter may be an XArray tag value, in which case entries which are
+ * tagged with that tag will be copied.  It may also be %XA_PRESENT, in
+ * which case non-NULL entries will be copied.
+ *
+ * The entries returned may not represent a snapshot of the XArray at a
+ * moment in time.  For example, if another thread stores to index 5, then
+ * index 10, calling xa_extract() may return the old contents of index 5
+ * and the new contents of index 10.  Indices not modified while this
+ * function is running will not be skipped.
+ *
+ * If you need stronger guarantees, holding the xa_lock across calls to this
+ * function will prevent concurrent modification.
+ *
+ * Context: Any context.  Takes and releases the RCU lock.
+ * Return: The number of entries copied.
+ */
+unsigned int xa_extract(struct xarray *xa, void **dst, unsigned long start,
+   unsigned long max, unsigned int n, xa_tag_t filter)
+{
+   XA_STATE(xas, xa, start);
+
+   if (!n)
+   return 0;
+
+   if ((__force unsigned int)filter < XA_MAX_TAGS)
+   return xas_extract_tag(, dst, max, n, filter);
+   return xas_extract_present(, dst, max, n);
+}
+EXPORT_SYMBOL(xa_extract);
+
 #ifdef XA_DEBUG
 void xa_dump_node(const struct xa_node *node)
 {
-- 
2.16.1

--
To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v8 20/63] xarray: Add xa_for_each

2018-03-06 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

This iterator allows the user to efficiently walk a range of the array,
executing the loop body once for each entry in that range that matches
the filter.  This commit also includes xa_find() and xa_find_above()
which are helper functions for xa_for_each() but may also be useful in
their own right.

In the xas family of functions, we also have xas_for_each(), xas_find(),
xas_next_entry(), xas_for_each_tag(), xas_find_tag(), xas_next_tag()
and xas_pause().

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
---
 include/linux/xarray.h | 173 +
 lib/xarray.c   | 274 +
 tools/testing/radix-tree/test.c|  13 ++
 tools/testing/radix-tree/test.h|   1 +
 tools/testing/radix-tree/xarray-test.c | 122 +++
 5 files changed, 583 insertions(+)

diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index e95ebe2488f9..cf7966bfdd3e 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -223,6 +223,10 @@ void *xa_cmpxchg(struct xarray *, unsigned long index,
 bool xa_get_tag(struct xarray *, unsigned long index, xa_tag_t);
 void xa_set_tag(struct xarray *, unsigned long index, xa_tag_t);
 void xa_clear_tag(struct xarray *, unsigned long index, xa_tag_t);
+void *xa_find(struct xarray *xa, unsigned long *index,
+   unsigned long max, xa_tag_t) __attribute__((nonnull(2)));
+void *xa_find_after(struct xarray *xa, unsigned long *index,
+   unsigned long max, xa_tag_t) __attribute__((nonnull(2)));
 
 /**
  * xa_init() - Initialise an empty XArray.
@@ -279,6 +283,35 @@ static inline bool xa_tagged(const struct xarray *xa, 
xa_tag_t tag)
return xa->xa_flags & XA_FLAGS_TAG(tag);
 }
 
+/**
+ * xa_for_each() - Iterate over a portion of an XArray.
+ * @xa: XArray.
+ * @entry: Entry retrieved from array.
+ * @index: Index of @entry.
+ * @max: Maximum index to retrieve from array.
+ * @filter: Selection criterion.
+ *
+ * Initialise @index to the minimum index you want to retrieve from
+ * the array.  During the iteration, @entry will have the value of the
+ * entry stored in @xa at @index.  The iteration will skip all entries in
+ * the array which do not match @filter.  You may modify @index during the
+ * iteration if you want to skip or reprocess indices.  It is safe to modify
+ * the array during the iteration.  At the end of the iteration, @entry will
+ * be set to NULL and @index will have a value less than or equal to max.
+ *
+ * xa_for_each() is O(n.log(n)) while xas_for_each() is O(n).  You have
+ * to handle your own locking with xas_for_each(), and if you have to unlock
+ * after each iteration, it will also end up being O(n.log(n)).  xa_for_each()
+ * will spin if it hits a retry entry; if you intend to see retry entries,
+ * you should use the xas_for_each() iterator instead.  The xas_for_each()
+ * iterator will expand into more inline code than xa_for_each().
+ *
+ * Context: Any context.  Takes and releases the RCU lock.
+ */
+#define xa_for_each(xa, entry, index, max, filter) \
+   for (entry = xa_find(xa, , max, filter); entry; \
+entry = xa_find_after(xa, , max, filter))
+
 /**
  * xa_insert() - Store this entry in the XArray unless another entry is
  * already present.
@@ -641,6 +674,12 @@ static inline bool xas_valid(const struct xa_state *xas)
return !xas_invalid(xas);
 }
 
+/* True if the pointer is something other than a node */
+static inline bool xas_not_node(struct xa_node *node)
+{
+   return ((unsigned long)node & 3) || !node;
+}
+
 /* True if the node represents head-of-tree, RESTART or BOUNDS */
 static inline bool xas_top(struct xa_node *node)
 {
@@ -685,13 +724,16 @@ static inline bool xas_retry(struct xa_state *xas, const 
void *entry)
 void *xas_load(struct xa_state *);
 void *xas_store(struct xa_state *, void *entry);
 void *xas_create(struct xa_state *);
+void *xas_find(struct xa_state *, unsigned long max);
 
 bool xas_get_tag(const struct xa_state *, xa_tag_t);
 void xas_set_tag(const struct xa_state *, xa_tag_t);
 void xas_clear_tag(const struct xa_state *, xa_tag_t);
+void *xas_find_tag(struct xa_state *, unsigned long max, xa_tag_t);
 void xas_init_tags(const struct xa_state *);
 
 bool xas_nomem(struct xa_state *, gfp_t);
+void xas_pause(struct xa_state *);
 
 /**
  * xas_reload() - Refetch an entry from the xarray.
@@ -764,6 +806,137 @@ static inline void xas_set_update(struct xa_state *xas, 
xa_update_node_t update)
xas->xa_update = update;
 }
 
+/* Skip over any of these entries when iterating */
+static inline bool xa_iter_skip(const void *entry)
+{
+   return unlikely(!entry ||
+   (xa_is_internal(entry) && entry < XA_RETRY_ENTRY));
+}
+
+/**
+ * xas_next_entry() - Advance iterator to next present entry.
+ * @xas: XArray operation sta

[PATCH v8 22/63] xarray: Add xa_destroy

2018-03-06 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

This function frees all the internal memory allocated to the xarray
and reinitialises it to be empty.

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
---
 include/linux/xarray.h |  1 +
 lib/xarray.c   | 28 
 2 files changed, 29 insertions(+)

diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index 85dd909586f0..96773f83ae03 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -229,6 +229,7 @@ void *xa_find_after(struct xarray *xa, unsigned long *index,
unsigned long max, xa_tag_t) __attribute__((nonnull(2)));
 unsigned int xa_extract(struct xarray *, void **dst, unsigned long start,
unsigned long max, unsigned int n, xa_tag_t);
+void xa_destroy(struct xarray *);
 
 /**
  * xa_init() - Initialise an empty XArray.
diff --git a/lib/xarray.c b/lib/xarray.c
index 124bbfec66ae..080ed0fc3feb 100644
--- a/lib/xarray.c
+++ b/lib/xarray.c
@@ -1468,6 +1468,34 @@ unsigned int xa_extract(struct xarray *xa, void **dst, 
unsigned long start,
 }
 EXPORT_SYMBOL(xa_extract);
 
+/**
+ * xa_destroy() - Free all internal data structures.
+ * @xa: XArray.
+ *
+ * After calling this function, the XArray is empty and has freed all memory
+ * allocated for its internal data structures.  You are responsible for
+ * freeing the objects referenced by the XArray.
+ *
+ * Context: Any context.  Takes and releases the xa_lock, interrupt-safe.
+ */
+void xa_destroy(struct xarray *xa)
+{
+   XA_STATE(xas, xa, 0);
+   unsigned long flags;
+   void *entry;
+
+   xas.xa_node = NULL;
+   xas_lock_irqsave(, flags);
+   entry = xa_head_locked(xa);
+   RCU_INIT_POINTER(xa->xa_head, NULL);
+   xas_init_tags();
+   /* lockdep checks we're still holding the lock in xas_free_nodes() */
+   if (xa_is_node(entry))
+   xas_free_nodes(, xa_to_node(entry));
+   xas_unlock_irqrestore(, flags);
+}
+EXPORT_SYMBOL(xa_destroy);
+
 #ifdef XA_DEBUG
 void xa_dump_node(const struct xa_node *node)
 {
-- 
2.16.1

--
To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v8 29/63] page cache: Convert page deletion to XArray

2018-03-06 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

The code is slightly shorter and simpler.

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
---
 mm/filemap.c | 30 ++
 1 file changed, 14 insertions(+), 16 deletions(-)

diff --git a/mm/filemap.c b/mm/filemap.c
index 0e19ea454cba..bdda1beda932 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -111,30 +111,28 @@
  *   ->tasklist_lock(memory_failure, collect_procs_ao)
  */
 
-static void page_cache_tree_delete(struct address_space *mapping,
+static void page_cache_delete(struct address_space *mapping,
   struct page *page, void *shadow)
 {
-   int i, nr;
+   XA_STATE(xas, >i_pages, page->index);
+   unsigned int i, nr;
 
-   /* hugetlb pages are represented by one entry in the radix tree */
+   mapping_set_update(, mapping);
+
+   /* hugetlb pages are represented by a single entry in the xarray */
nr = PageHuge(page) ? 1 : hpage_nr_pages(page);
 
VM_BUG_ON_PAGE(!PageLocked(page), page);
VM_BUG_ON_PAGE(PageTail(page), page);
VM_BUG_ON_PAGE(nr != 1 && shadow, page);
 
-   for (i = 0; i < nr; i++) {
-   struct radix_tree_node *node;
-   void **slot;
-
-   __radix_tree_lookup(>i_pages, page->index + i,
-   , );
-
-   VM_BUG_ON_PAGE(!node && nr != 1, page);
-
-   radix_tree_clear_tags(>i_pages, node, slot);
-   __radix_tree_replace(>i_pages, node, slot, shadow,
-   workingset_lookup_update(mapping));
+   i = nr;
+repeat:
+   xas_store(, shadow);
+   xas_init_tags();
+   if (--i) {
+   xas_next();
+   goto repeat;
}
 
page->mapping = NULL;
@@ -234,7 +232,7 @@ void __delete_from_page_cache(struct page *page, void 
*shadow)
trace_mm_filemap_delete_from_page_cache(page);
 
unaccount_page_cache_page(mapping, page);
-   page_cache_tree_delete(mapping, page, shadow);
+   page_cache_delete(mapping, page, shadow);
 }
 
 static void page_cache_free_page(struct address_space *mapping,
-- 
2.16.1

--
To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v8 26/63] page cache: Rearrange address_space

2018-03-06 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

Change i_pages from a radix_tree_root to an xarray, convert the
documentation into kernel-doc format and change the order of the elements
to pack them better on 64-bit systems.

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
---
 include/linux/fs.h | 46 +++---
 1 file changed, 31 insertions(+), 15 deletions(-)

diff --git a/include/linux/fs.h b/include/linux/fs.h
index 75f1f66aec35..785100c2b835 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -389,24 +389,40 @@ int pagecache_write_end(struct file *, struct 
address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata);
 
+/**
+ * struct address_space - Contents of a cacheable, mappable object.
+ * @host: Owner, either the inode or the block_device.
+ * @i_pages: Cached pages.
+ * @gfp_mask: Memory allocation flags to use for allocating pages.
+ * @i_mmap_writable: Number of VM_SHARED mappings.
+ * @i_mmap: Tree of private and shared mappings.
+ * @i_mmap_rwsem: Protects @i_mmap and @i_mmap_writable.
+ * @nrpages: Number of page entries, protected by the i_pages lock.
+ * @nrexceptional: Shadow or DAX entries, protected by the i_pages lock.
+ * @writeback_index: Writeback starts here.
+ * @a_ops: Methods.
+ * @flags: Error bits and flags (AS_*).
+ * @wb_err: The most recent error which has occurred.
+ * @private_lock: For use by the owner of the address_space.
+ * @private_list: For use by the owner of the address_space.
+ * @private_data: For use by the owner of the address_space.
+ */
 struct address_space {
-   struct inode*host;  /* owner: inode, block_device */
-   struct radix_tree_root  i_pages;/* cached pages */
-   atomic_ti_mmap_writable;/* count VM_SHARED mappings */
-   struct rb_root_cached   i_mmap; /* tree of private and shared 
mappings */
-   struct rw_semaphore i_mmap_rwsem;   /* protect tree, count, list */
-   /* Protected by the i_pages lock */
-   unsigned long   nrpages;/* number of total pages */
-   /* number of shadow or DAX exceptional entries */
+   struct inode*host;
+   struct xarray   i_pages;
+   gfp_t   gfp_mask;
+   atomic_ti_mmap_writable;
+   struct rb_root_cached   i_mmap;
+   struct rw_semaphore i_mmap_rwsem;
+   unsigned long   nrpages;
unsigned long   nrexceptional;
-   pgoff_t writeback_index;/* writeback starts here */
-   const struct address_space_operations *a_ops;   /* methods */
-   unsigned long   flags;  /* error bits */
-   spinlock_t  private_lock;   /* for use by the address_space 
*/
-   gfp_t   gfp_mask;   /* implicit gfp mask for 
allocations */
-   struct list_headprivate_list;   /* for use by the address_space 
*/
-   void*private_data;  /* ditto */
+   pgoff_t writeback_index;
+   const struct address_space_operations *a_ops;
+   unsigned long   flags;
errseq_twb_err;
+   spinlock_t  private_lock;
+   struct list_headprivate_list;
+   void*private_data;
 } __attribute__((aligned(sizeof(long __randomize_layout;
/*
 * On most architectures that alignment is already the case; but
-- 
2.16.1

--
To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v8 25/63] xarray: Add MAINTAINERS entry

2018-03-06 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

Add myself as XArray and IDR maintainer.

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
---
 MAINTAINERS | 12 
 1 file changed, 12 insertions(+)

diff --git a/MAINTAINERS b/MAINTAINERS
index 6d78237066ab..08613d97a74d 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -15293,6 +15293,18 @@ T: git 
git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/vdso
 S: Maintained
 F: arch/x86/entry/vdso/
 
+XARRAY
+M: Matthew Wilcox <mawil...@microsoft.com>
+M: Matthew Wilcox <wi...@infradead.org>
+L: linux-fsde...@vger.kernel.org
+S: Supported
+F: Documentation/core-api/xarray.rst
+F: lib/idr.c
+F: lib/xarray.c
+F: include/linux/idr.h
+F: include/linux/xarray.h
+F: tools/testing/radix-tree
+
 XC2028/3028 TUNER DRIVER
 M: Mauro Carvalho Chehab <mche...@s-opensource.com>
 M: Mauro Carvalho Chehab <mche...@kernel.org>
-- 
2.16.1

--
To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v8 27/63] page cache: Convert hole search to XArray

2018-03-06 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

The page cache offers the ability to search for a miss in the previous or
next N locations.  Rather than teach the XArray about the page cache's
definition of a miss, use xas_prev() and xas_next() to search the page
array.  This should be more efficient as it does not have to start the
lookup from the top for each index.

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
---
 fs/nfs/blocklayout/blocklayout.c |   2 +-
 include/linux/pagemap.h  |   4 +-
 mm/filemap.c | 110 ++-
 mm/readahead.c   |   4 +-
 4 files changed, 55 insertions(+), 65 deletions(-)

diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
index 7cb5c38c19e4..961901685007 100644
--- a/fs/nfs/blocklayout/blocklayout.c
+++ b/fs/nfs/blocklayout/blocklayout.c
@@ -895,7 +895,7 @@ static u64 pnfs_num_cont_bytes(struct inode *inode, pgoff_t 
idx)
end = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
if (end != inode->i_mapping->nrpages) {
rcu_read_lock();
-   end = page_cache_next_hole(mapping, idx + 1, ULONG_MAX);
+   end = page_cache_next_gap(mapping, idx + 1, ULONG_MAX);
rcu_read_unlock();
}
 
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index b1bd2186e6d2..2f5d2d3ebaac 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -241,9 +241,9 @@ static inline gfp_t readahead_gfp_mask(struct address_space 
*x)
 
 typedef int filler_t(void *, struct page *);
 
-pgoff_t page_cache_next_hole(struct address_space *mapping,
+pgoff_t page_cache_next_gap(struct address_space *mapping,
 pgoff_t index, unsigned long max_scan);
-pgoff_t page_cache_prev_hole(struct address_space *mapping,
+pgoff_t page_cache_prev_gap(struct address_space *mapping,
 pgoff_t index, unsigned long max_scan);
 
 #define FGP_ACCESSED   0x0001
diff --git a/mm/filemap.c b/mm/filemap.c
index f2251183a977..efe227940784 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1326,86 +1326,76 @@ int __lock_page_or_retry(struct page *page, struct 
mm_struct *mm,
 }
 
 /**
- * page_cache_next_hole - find the next hole (not-present entry)
- * @mapping: mapping
- * @index: index
- * @max_scan: maximum range to search
- *
- * Search the set [index, min(index+max_scan-1, MAX_INDEX)] for the
- * lowest indexed hole.
- *
- * Returns: the index of the hole if found, otherwise returns an index
- * outside of the set specified (in which case 'return - index >=
- * max_scan' will be true). In rare cases of index wrap-around, 0 will
- * be returned.
- *
- * page_cache_next_hole may be called under rcu_read_lock. However,
- * like radix_tree_gang_lookup, this will not atomically search a
- * snapshot of the tree at a single point in time. For example, if a
- * hole is created at index 5, then subsequently a hole is created at
- * index 10, page_cache_next_hole covering both indexes may return 10
- * if called under rcu_read_lock.
+ * page_cache_next_gap() - Find the next gap in the page cache.
+ * @mapping: Mapping.
+ * @index: Index.
+ * @max_scan: Maximum range to search.
+ *
+ * Search the range [index, min(index + max_scan - 1, ULONG_MAX)] for the
+ * gap with the lowest index.
+ *
+ * This function may be called under the rcu_read_lock.  However, this will
+ * not atomically search a snapshot of the cache at a single point in time.
+ * For example, if a gap is created at index 5, then subsequently a gap is
+ * created at index 10, page_cache_next_gap covering both indices may
+ * return 10 if called under the rcu_read_lock.
+ *
+ * Return: The index of the gap if found, otherwise an index outside the
+ * range specified (in which case 'return - index >= max_scan' will be true).
+ * In the rare case of index wrap-around, 0 will be returned.
  */
-pgoff_t page_cache_next_hole(struct address_space *mapping,
+pgoff_t page_cache_next_gap(struct address_space *mapping,
 pgoff_t index, unsigned long max_scan)
 {
-   unsigned long i;
+   XA_STATE(xas, >i_pages, index);
 
-   for (i = 0; i < max_scan; i++) {
-   struct page *page;
-
-   page = radix_tree_lookup(>i_pages, index);
-   if (!page || xa_is_value(page))
+   while (max_scan--) {
+   void *entry = xas_next();
+   if (!entry || xa_is_value(entry))
break;
-   index++;
-   if (index == 0)
+   if (xas.xa_index == 0)
break;
}
 
-   return index;
+   return xas.xa_index;
 }
-EXPORT_SYMBOL(page_cache_next_hole);
+EXPORT_SYMBOL(page_cache_next_gap);
 
 /**
- * page_cache_prev_hole - find the prev hole (not-present entry)
- * @mapping: mapping
- * @index: index
- * @max_scan: maximum range t

[PATCH v8 30/63] page cache: Convert page cache lookups to XArray

2018-03-06 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

Introduce page_cache_pin() to factor out the common logic between the
various lookup routines:

find_get_entry
find_get_entries
find_get_pages_range
find_get_pages_contig
find_get_pages_range_tag
find_get_entries_tag
filemap_map_pages

By using the xa_state to control the iteration, we can remove most of
the gotos and just use the normal break/continue loop control flow.

Also convert the regression1 read-side to XArray since that simulates
the functions being modified here.

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
---
 include/linux/pagemap.h|   6 +-
 mm/filemap.c   | 380 +
 tools/testing/radix-tree/regression1.c |  68 +++---
 3 files changed, 129 insertions(+), 325 deletions(-)

diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 2f5d2d3ebaac..442977811b59 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -363,17 +363,17 @@ static inline unsigned find_get_pages(struct 
address_space *mapping,
 unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
   unsigned int nr_pages, struct page **pages);
 unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t 
*index,
-   pgoff_t end, int tag, unsigned int nr_pages,
+   pgoff_t end, xa_tag_t tag, unsigned int nr_pages,
struct page **pages);
 static inline unsigned find_get_pages_tag(struct address_space *mapping,
-   pgoff_t *index, int tag, unsigned int nr_pages,
+   pgoff_t *index, xa_tag_t tag, unsigned int nr_pages,
struct page **pages)
 {
return find_get_pages_range_tag(mapping, index, (pgoff_t)-1, tag,
nr_pages, pages);
 }
 unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start,
-   int tag, unsigned int nr_entries,
+   xa_tag_t tag, unsigned int nr_entries,
struct page **entries, pgoff_t *indices);
 
 struct page *grab_cache_page_write_begin(struct address_space *mapping,
diff --git a/mm/filemap.c b/mm/filemap.c
index bdda1beda932..5a6c7c874d45 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1374,6 +1374,32 @@ pgoff_t page_cache_prev_gap(struct address_space 
*mapping,
 }
 EXPORT_SYMBOL(page_cache_prev_gap);
 
+/*
+ * page_cache_pin() - Try to pin a page in the page cache.
+ * @xas: The XArray operation state.
+ * @pagep: The page which has been previously found at this location.
+ *
+ * On success, the page has an elevated refcount, but is not locked.
+ * This implements the lockless pagecache protocol as described in
+ * include/linux/pagemap.h; see page_cache_get_speculative().
+ *
+ * Return: True if the page is still in the cache.
+ */
+static bool page_cache_pin(struct xa_state *xas, struct page *page)
+{
+   struct page *head = compound_head(page);
+   bool got = page_cache_get_speculative(head);
+
+   if (likely(got && (xas_reload(xas) == page) &&
+   (compound_head(page) == head)))
+   return true;
+
+   if (got)
+   put_page(head);
+   xas_reset(xas);
+   return false;
+}
+
 /**
  * find_get_entry - find and get a page cache entry
  * @mapping: the address_space to search
@@ -1389,51 +1415,21 @@ EXPORT_SYMBOL(page_cache_prev_gap);
  */
 struct page *find_get_entry(struct address_space *mapping, pgoff_t offset)
 {
-   void **pagep;
-   struct page *head, *page;
+   XA_STATE(xas, >i_pages, offset);
+   struct page *page;
 
rcu_read_lock();
-repeat:
-   page = NULL;
-   pagep = radix_tree_lookup_slot(>i_pages, offset);
-   if (pagep) {
-   page = radix_tree_deref_slot(pagep);
-   if (unlikely(!page))
-   goto out;
-   if (radix_tree_exception(page)) {
-   if (radix_tree_deref_retry(page))
-   goto repeat;
-   /*
-* A shadow entry of a recently evicted page,
-* or a swap entry from shmem/tmpfs.  Return
-* it without attempting to raise page count.
-*/
-   goto out;
-   }
-
-   head = compound_head(page);
-   if (!page_cache_get_speculative(head))
-   goto repeat;
-
-   /* The page was split under us? */
-   if (compound_head(page) != head) {
-   put_page(head);
-   goto repeat;
-   }
+   do {
+   page = xas_load();
+   if (xas_retry(, page))
+   continue;
+  

[PATCH v8 28/63] page cache: Add and replace pages using the XArray

2018-03-06 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

Use the XArray APIs to add and replace pages in the page cache.  This
removes two uses of the radix tree preload API and is significantly
shorter code.

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
---
 include/linux/swap.h |   8 ++-
 mm/filemap.c | 143 ++-
 2 files changed, 67 insertions(+), 84 deletions(-)

diff --git a/include/linux/swap.h b/include/linux/swap.h
index 1985940af479..a0ebb5deea2d 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -300,8 +300,12 @@ void *workingset_eviction(struct address_space *mapping, 
struct page *page);
 bool workingset_refault(void *shadow);
 void workingset_activation(struct page *page);
 
-/* Do not use directly, use workingset_lookup_update */
-void workingset_update_node(struct radix_tree_node *node);
+/* Only track the nodes of mappings with shadow entries */
+void workingset_update_node(struct xa_node *node);
+#define mapping_set_update(xas, mapping) do {  \
+   if (!dax_mapping(mapping) && !shmem_mapping(mapping))   \
+   xas_set_update(xas, workingset_update_node);\
+} while (0)
 
 /* Returns workingset_update_node() if the mapping has shadow entries. */
 #define workingset_lookup_update(mapping)  \
diff --git a/mm/filemap.c b/mm/filemap.c
index efe227940784..0e19ea454cba 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -111,35 +111,6 @@
  *   ->tasklist_lock(memory_failure, collect_procs_ao)
  */
 
-static int page_cache_tree_insert(struct address_space *mapping,
- struct page *page, void **shadowp)
-{
-   struct radix_tree_node *node;
-   void **slot;
-   int error;
-
-   error = __radix_tree_create(>i_pages, page->index, 0,
-   , );
-   if (error)
-   return error;
-   if (*slot) {
-   void *p;
-
-   p = radix_tree_deref_slot_protected(slot,
-   >i_pages.xa_lock);
-   if (!xa_is_value(p))
-   return -EEXIST;
-
-   mapping->nrexceptional--;
-   if (shadowp)
-   *shadowp = p;
-   }
-   __radix_tree_replace(>i_pages, node, slot, page,
-workingset_lookup_update(mapping));
-   mapping->nrpages++;
-   return 0;
-}
-
 static void page_cache_tree_delete(struct address_space *mapping,
   struct page *page, void *shadow)
 {
@@ -775,51 +746,44 @@ EXPORT_SYMBOL(file_write_and_wait_range);
  * locked.  This function does not add the new page to the LRU, the
  * caller must do that.
  *
- * The remove + add is atomic.  The only way this function can fail is
- * memory allocation failure.
+ * The remove + add is atomic.  This function cannot fail.
  */
 int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
 {
-   int error;
+   struct address_space *mapping = old->mapping;
+   void (*freepage)(struct page *) = mapping->a_ops->freepage;
+   pgoff_t offset = old->index;
+   XA_STATE(xas, >i_pages, offset);
+   unsigned long flags;
 
VM_BUG_ON_PAGE(!PageLocked(old), old);
VM_BUG_ON_PAGE(!PageLocked(new), new);
VM_BUG_ON_PAGE(new->mapping, new);
 
-   error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
-   if (!error) {
-   struct address_space *mapping = old->mapping;
-   void (*freepage)(struct page *);
-   unsigned long flags;
-
-   pgoff_t offset = old->index;
-   freepage = mapping->a_ops->freepage;
-
-   get_page(new);
-   new->mapping = mapping;
-   new->index = offset;
+   get_page(new);
+   new->mapping = mapping;
+   new->index = offset;
 
-   xa_lock_irqsave(>i_pages, flags);
-   __delete_from_page_cache(old, NULL);
-   error = page_cache_tree_insert(mapping, new, NULL);
-   BUG_ON(error);
+   xas_lock_irqsave(, flags);
+   xas_store(, new);
 
-   /*
-* hugetlb pages do not participate in page cache accounting.
-*/
-   if (!PageHuge(new))
-   __inc_node_page_state(new, NR_FILE_PAGES);
-   if (PageSwapBacked(new))
-   __inc_node_page_state(new, NR_SHMEM);
-   xa_unlock_irqrestore(>i_pages, flags);
-   mem_cgroup_migrate(old, new);
-   radix_tree_preload_end();
-   if (freepage)
-   freepage(old);
-   put_page(old);
-   }
+   old->mapping = NULL;
+   /* hugetlb pages do not participate in

[PATCH v8 32/63] page cache: Remove stray radix comment

2018-03-06 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
---
 mm/filemap.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/mm/filemap.c b/mm/filemap.c
index 0635e9cdbc06..86c83014c909 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2579,7 +2579,7 @@ static struct page *do_read_cache_page(struct 
address_space *mapping,
put_page(page);
if (err == -EEXIST)
goto repeat;
-   /* Presumably ENOMEM for radix tree node */
+   /* Presumably ENOMEM for xarray node */
return ERR_PTR(err);
}
 
-- 
2.16.1

--
To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v8 31/63] page cache: Convert delete_batch to XArray

2018-03-06 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

Rename the function from page_cache_tree_delete_batch to just
page_cache_delete_batch.

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
---
 mm/filemap.c | 28 +---
 1 file changed, 13 insertions(+), 15 deletions(-)

diff --git a/mm/filemap.c b/mm/filemap.c
index 5a6c7c874d45..0635e9cdbc06 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -275,7 +275,7 @@ void delete_from_page_cache(struct page *page)
 EXPORT_SYMBOL(delete_from_page_cache);
 
 /*
- * page_cache_tree_delete_batch - delete several pages from page cache
+ * page_cache_delete_batch - delete several pages from page cache
  * @mapping: the mapping to which pages belong
  * @pvec: pagevec with pages to delete
  *
@@ -288,23 +288,18 @@ EXPORT_SYMBOL(delete_from_page_cache);
  *
  * The function expects the i_pages lock to be held.
  */
-static void
-page_cache_tree_delete_batch(struct address_space *mapping,
+static void page_cache_delete_batch(struct address_space *mapping,
 struct pagevec *pvec)
 {
-   struct radix_tree_iter iter;
-   void **slot;
+   XA_STATE(xas, >i_pages, pvec->pages[0]->index);
int total_pages = 0;
int i = 0, tail_pages = 0;
struct page *page;
-   pgoff_t start;
 
-   start = pvec->pages[0]->index;
-   radix_tree_for_each_slot(slot, >i_pages, , start) {
+   mapping_set_update(, mapping);
+   xas_for_each(, page, ULONG_MAX) {
if (i >= pagevec_count(pvec) && !tail_pages)
break;
-   page = radix_tree_deref_slot_protected(slot,
-  
>i_pages.xa_lock);
if (xa_is_value(page))
continue;
if (!tail_pages) {
@@ -313,8 +308,11 @@ page_cache_tree_delete_batch(struct address_space *mapping,
 * have our pages locked so they are protected from
 * being removed.
 */
-   if (page != pvec->pages[i])
+   if (page != pvec->pages[i]) {
+   VM_BUG_ON_PAGE(page->index >
+   pvec->pages[i]->index, page);
continue;
+   }
WARN_ON_ONCE(!PageLocked(page));
if (PageTransHuge(page) && !PageHuge(page))
tail_pages = HPAGE_PMD_NR - 1;
@@ -325,11 +323,11 @@ page_cache_tree_delete_batch(struct address_space 
*mapping,
 */
i++;
} else {
+   VM_BUG_ON_PAGE(page->index + HPAGE_PMD_NR - tail_pages
+   != pvec->pages[i]->index, page);
tail_pages--;
}
-   radix_tree_clear_tags(>i_pages, iter.node, slot);
-   __radix_tree_replace(>i_pages, iter.node, slot, NULL,
-   workingset_lookup_update(mapping));
+   xas_store(, NULL);
total_pages++;
}
mapping->nrpages -= total_pages;
@@ -350,7 +348,7 @@ void delete_from_page_cache_batch(struct address_space 
*mapping,
 
unaccount_page_cache_page(mapping, pvec->pages[i]);
}
-   page_cache_tree_delete_batch(mapping, pvec);
+   page_cache_delete_batch(mapping, pvec);
xa_unlock_irqrestore(>i_pages, flags);
 
for (i = 0; i < pagevec_count(pvec); i++)
-- 
2.16.1

--
To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v8 33/63] page cache: Convert filemap_range_has_page to XArray

2018-03-06 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

Instead of calling find_get_pages_range() and putting any reference,
use xas_find() to iterate over any entries in the range, skipping the
shadow/swap entries.

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
---
 mm/filemap.c | 26 ++
 1 file changed, 18 insertions(+), 8 deletions(-)

diff --git a/mm/filemap.c b/mm/filemap.c
index 86c83014c909..9bc417913269 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -458,20 +458,30 @@ EXPORT_SYMBOL(filemap_flush);
 bool filemap_range_has_page(struct address_space *mapping,
   loff_t start_byte, loff_t end_byte)
 {
-   pgoff_t index = start_byte >> PAGE_SHIFT;
-   pgoff_t end = end_byte >> PAGE_SHIFT;
struct page *page;
+   XA_STATE(xas, >i_pages, start_byte >> PAGE_SHIFT);
+   pgoff_t max = end_byte >> PAGE_SHIFT;
 
if (end_byte < start_byte)
return false;
 
-   if (mapping->nrpages == 0)
-   return false;
+   rcu_read_lock();
+   do {
+   page = xas_find(, max);
+   if (xas_retry(, page))
+   continue;
+   /* Shadow entries don't count */
+   if (xa_is_value(page))
+   continue;
+   /*
+* We don't need to try to pin this page; we're about to
+* release the RCU lock anyway.  It is enough to know that
+* there was a page here recently.
+*/
+   } while (0);
+   rcu_read_unlock();
 
-   if (!find_get_pages_range(mapping, , end, 1, ))
-   return false;
-   put_page(page);
-   return true;
+   return page != NULL;
 }
 EXPORT_SYMBOL(filemap_range_has_page);
 
-- 
2.16.1

--
To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v8 38/63] mm: Convert delete_from_swap_cache to XArray

2018-03-06 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

Both callers of __delete_from_swap_cache have the swp_entry_t already,
so pass that in to make constructing the XA_STATE easier.

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
---
 include/linux/swap.h |  5 +++--
 mm/swap_state.c  | 24 ++--
 mm/vmscan.c  |  2 +-
 3 files changed, 14 insertions(+), 17 deletions(-)

diff --git a/include/linux/swap.h b/include/linux/swap.h
index dab96af23d96..0b6a47a46c55 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -404,7 +404,7 @@ extern void show_swap_cache_info(void);
 extern int add_to_swap(struct page *page);
 extern int add_to_swap_cache(struct page *, swp_entry_t, gfp_t);
 extern int __add_to_swap_cache(struct page *page, swp_entry_t entry);
-extern void __delete_from_swap_cache(struct page *);
+extern void __delete_from_swap_cache(struct page *, swp_entry_t entry);
 extern void delete_from_swap_cache(struct page *);
 extern void free_page_and_swap_cache(struct page *);
 extern void free_pages_and_swap_cache(struct page **, int);
@@ -564,7 +564,8 @@ static inline int add_to_swap_cache(struct page *page, 
swp_entry_t entry,
return -1;
 }
 
-static inline void __delete_from_swap_cache(struct page *page)
+static inline void __delete_from_swap_cache(struct page *page,
+   swp_entry_t entry)
 {
 }
 
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 329736723d36..21218f6e438b 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -154,23 +154,22 @@ int add_to_swap_cache(struct page *page, swp_entry_t 
entry, gfp_t gfp)
  * This must be called only on pages that have
  * been verified to be in the swap cache.
  */
-void __delete_from_swap_cache(struct page *page)
+void __delete_from_swap_cache(struct page *page, swp_entry_t entry)
 {
-   struct address_space *address_space;
+   struct address_space *address_space = swap_address_space(entry);
int i, nr = hpage_nr_pages(page);
-   swp_entry_t entry;
-   pgoff_t idx;
+   pgoff_t idx = swp_offset(entry);
+   XA_STATE(xas, _space->i_pages, idx);
 
VM_BUG_ON_PAGE(!PageLocked(page), page);
VM_BUG_ON_PAGE(!PageSwapCache(page), page);
VM_BUG_ON_PAGE(PageWriteback(page), page);
 
-   entry.val = page_private(page);
-   address_space = swap_address_space(entry);
-   idx = swp_offset(entry);
for (i = 0; i < nr; i++) {
-   radix_tree_delete(_space->i_pages, idx + i);
+   void *entry = xas_store(, NULL);
+   VM_BUG_ON_PAGE(entry != page + i, entry);
set_page_private(page + i, 0);
+   xas_next();
}
ClearPageSwapCache(page);
address_space->nrpages -= nr;
@@ -246,14 +245,11 @@ int add_to_swap(struct page *page)
  */
 void delete_from_swap_cache(struct page *page)
 {
-   swp_entry_t entry;
-   struct address_space *address_space;
-
-   entry.val = page_private(page);
+   swp_entry_t entry = { .val = page_private(page) };
+   struct address_space *address_space = swap_address_space(entry);
 
-   address_space = swap_address_space(entry);
xa_lock_irq(_space->i_pages);
-   __delete_from_swap_cache(page);
+   __delete_from_swap_cache(page, entry);
xa_unlock_irq(_space->i_pages);
 
put_swap_page(page, entry);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 22bd3720b318..728d7e57cf11 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -689,7 +689,7 @@ static int __remove_mapping(struct address_space *mapping, 
struct page *page,
if (PageSwapCache(page)) {
swp_entry_t swap = { .val = page_private(page) };
mem_cgroup_swapout(page, swap);
-   __delete_from_swap_cache(page);
+   __delete_from_swap_cache(page, swap);
xa_unlock_irqrestore(>i_pages, flags);
put_swap_page(page, swap);
} else {
-- 
2.16.1

--
To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v8 37/63] mm: Convert add_to_swap_cache to XArray

2018-03-06 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

Combine __add_to_swap_cache and add_to_swap_cache into one function
since there is no more need to preload.

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
---
 mm/swap_state.c | 93 ++---
 1 file changed, 29 insertions(+), 64 deletions(-)

diff --git a/mm/swap_state.c b/mm/swap_state.c
index b2ba3b1d4727..329736723d36 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -107,14 +107,15 @@ void show_swap_cache_info(void)
 }
 
 /*
- * __add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
+ * add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
  * but sets SwapCache flag and private instead of mapping and index.
  */
-int __add_to_swap_cache(struct page *page, swp_entry_t entry)
+int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp)
 {
-   int error, i, nr = hpage_nr_pages(page);
-   struct address_space *address_space;
+   struct address_space *address_space = swap_address_space(entry);
pgoff_t idx = swp_offset(entry);
+   XA_STATE(xas, _space->i_pages, idx);
+   unsigned long i, nr = 1UL << compound_order(page);
 
VM_BUG_ON_PAGE(!PageLocked(page), page);
VM_BUG_ON_PAGE(PageSwapCache(page), page);
@@ -123,50 +124,30 @@ int __add_to_swap_cache(struct page *page, swp_entry_t 
entry)
page_ref_add(page, nr);
SetPageSwapCache(page);
 
-   address_space = swap_address_space(entry);
-   xa_lock_irq(_space->i_pages);
-   for (i = 0; i < nr; i++) {
-   set_page_private(page + i, entry.val + i);
-   error = radix_tree_insert(_space->i_pages,
- idx + i, page + i);
-   if (unlikely(error))
-   break;
-   }
-   if (likely(!error)) {
+   do {
+   xas_lock_irq();
+   xas_create_range(, idx + nr - 1);
+   if (xas_error())
+   goto unlock;
+   for (i = 0; i < nr; i++) {
+   VM_BUG_ON_PAGE(xas.xa_index != idx + i, page);
+   set_page_private(page + i, entry.val + i);
+   xas_store(, page + i);
+   xas_next();
+   }
address_space->nrpages += nr;
__mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr);
ADD_CACHE_INFO(add_total, nr);
-   } else {
-   /*
-* Only the context which have set SWAP_HAS_CACHE flag
-* would call add_to_swap_cache().
-* So add_to_swap_cache() doesn't returns -EEXIST.
-*/
-   VM_BUG_ON(error == -EEXIST);
-   set_page_private(page + i, 0UL);
-   while (i--) {
-   radix_tree_delete(_space->i_pages, idx + i);
-   set_page_private(page + i, 0UL);
-   }
-   ClearPageSwapCache(page);
-   page_ref_sub(page, nr);
-   }
-   xa_unlock_irq(_space->i_pages);
+unlock:
+   xas_unlock_irq();
+   } while (xas_nomem(, gfp));
 
-   return error;
-}
-
-
-int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
-{
-   int error;
+   if (!xas_error())
+   return 0;
 
-   error = radix_tree_maybe_preload_order(gfp_mask, compound_order(page));
-   if (!error) {
-   error = __add_to_swap_cache(page, entry);
-   radix_tree_preload_end();
-   }
-   return error;
+   ClearPageSwapCache(page);
+   page_ref_sub(page, nr);
+   return xas_error();
 }
 
 /*
@@ -220,7 +201,7 @@ int add_to_swap(struct page *page)
goto fail;
 
/*
-* Radix-tree node allocations from PF_MEMALLOC contexts could
+* XArray node allocations from PF_MEMALLOC contexts could
 * completely exhaust the page allocator. __GFP_NOMEMALLOC
 * stops emergency reserves from being allocated.
 *
@@ -232,7 +213,6 @@ int add_to_swap(struct page *page)
 */
err = add_to_swap_cache(page, entry,
__GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN);
-   /* -ENOMEM radix-tree allocation failure */
if (err)
/*
 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
@@ -421,19 +401,11 @@ struct page *__read_swap_cache_async(swp_entry_t entry, 
gfp_t gfp_mask,
break;  /* Out of memory */
}
 
-   /*
-* call radix_tree_preload() while we can wait.
-*/
-   err = radix_tree_maybe_preload(gfp_mask & GFP_KERNEL);
-   if (err)
-   break;
-
/*
 * Swap entry

[PATCH v8 34/63] mm: Convert page-writeback to XArray

2018-03-06 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

Includes moving mapping_tagged() to fs.h as a static inline, and
changing it to return bool.

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
---
 include/linux/fs.h  | 17 +--
 mm/page-writeback.c | 63 +++--
 2 files changed, 32 insertions(+), 48 deletions(-)

diff --git a/include/linux/fs.h b/include/linux/fs.h
index 785100c2b835..4bd801b5adc8 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -469,15 +469,18 @@ struct block_device {
struct mutexbd_fsfreeze_mutex;
 } __randomize_layout;
 
+/* XArray tags, for tagging dirty and writeback pages in the pagecache. */
+#define PAGECACHE_TAG_DIRTYXA_TAG_0
+#define PAGECACHE_TAG_WRITEBACKXA_TAG_1
+#define PAGECACHE_TAG_TOWRITE  XA_TAG_2
+
 /*
- * Radix-tree tags, for tagging dirty and writeback pages within the pagecache
- * radix trees
+ * Returns true if any of the pages in the mapping are marked with the tag.
  */
-#define PAGECACHE_TAG_DIRTY0
-#define PAGECACHE_TAG_WRITEBACK1
-#define PAGECACHE_TAG_TOWRITE  2
-
-int mapping_tagged(struct address_space *mapping, int tag);
+static inline bool mapping_tagged(struct address_space *mapping, xa_tag_t tag)
+{
+   return xa_tagged(>i_pages, tag);
+}
 
 static inline void i_mmap_lock_write(struct address_space *mapping)
 {
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 5c1a3279e63f..195ccd0b30c8 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2098,34 +2098,25 @@ void __init page_writeback_init(void)
  * dirty pages in the file (thus it is important for this function to be quick
  * so that it can tag pages faster than a dirtying process can create them).
  */
-/*
- * We tag pages in batches of WRITEBACK_TAG_BATCH to reduce the i_pages lock
- * latency.
- */
 void tag_pages_for_writeback(struct address_space *mapping,
 pgoff_t start, pgoff_t end)
 {
-#define WRITEBACK_TAG_BATCH 4096
-   unsigned long tagged = 0;
-   struct radix_tree_iter iter;
-   void **slot;
+   XA_STATE(xas, >i_pages, start);
+   unsigned int tagged = 0;
+   void *page;
 
-   xa_lock_irq(>i_pages);
-   radix_tree_for_each_tagged(slot, >i_pages, , start,
-   PAGECACHE_TAG_DIRTY) {
-   if (iter.index > end)
-   break;
-   radix_tree_iter_tag_set(>i_pages, ,
-   PAGECACHE_TAG_TOWRITE);
-   tagged++;
-   if ((tagged % WRITEBACK_TAG_BATCH) != 0)
+   xas_lock_irq();
+   xas_for_each_tag(, page, end, PAGECACHE_TAG_DIRTY) {
+   xas_set_tag(, PAGECACHE_TAG_TOWRITE);
+   if (++tagged % XA_CHECK_SCHED)
continue;
-   slot = radix_tree_iter_resume(slot, );
-   xa_unlock_irq(>i_pages);
+
+   xas_pause();
+   xas_unlock_irq();
cond_resched();
-   xa_lock_irq(>i_pages);
+   xas_lock_irq();
}
-   xa_unlock_irq(>i_pages);
+   xas_unlock_irq();
 }
 EXPORT_SYMBOL(tag_pages_for_writeback);
 
@@ -2165,7 +2156,7 @@ int write_cache_pages(struct address_space *mapping,
pgoff_t done_index;
int cycled;
int range_whole = 0;
-   int tag;
+   xa_tag_t tag;
 
pagevec_init();
if (wbc->range_cyclic) {
@@ -2446,7 +2437,7 @@ void account_page_cleaned(struct page *page, struct 
address_space *mapping,
 
 /*
  * For address_spaces which do not use buffers.  Just tag the page as dirty in
- * its radix tree.
+ * the xarray.
  *
  * This is also used when a single buffer is being dirtied: we want to set the
  * page dirty in that case, but not all the buffers.  This is a "bottom-up"
@@ -2472,7 +2463,7 @@ int __set_page_dirty_nobuffers(struct page *page)
BUG_ON(page_mapping(page) != mapping);
WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
account_page_dirtied(page, mapping);
-   radix_tree_tag_set(>i_pages, page_index(page),
+   __xa_set_tag(>i_pages, page_index(page),
   PAGECACHE_TAG_DIRTY);
xa_unlock_irqrestore(>i_pages, flags);
unlock_page_memcg(page);
@@ -2635,13 +2626,13 @@ EXPORT_SYMBOL(__cancel_dirty_page);
  * Returns true if the page was previously dirty.
  *
  * This is for preparing to put the page under writeout.  We leave the page
- * tagged as dirty in the radix tree so that a concurrent write-for-sync
+ * tagged as dirty in the xarray so that a concurrent write-for-sync
  * can discover it via a PAGECACHE_TAG_DIRTY walk.  The ->writepage
  * implementation will run either set_page_writeback() or set_page_dirty(),
- * at which st

[PATCH v8 36/63] mm: Convert truncate to XArray

2018-03-06 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

This is essentially xa_cmpxchg() with the locking handled above us,
and it doesn't have to handle replacing a NULL entry.

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
---
 mm/truncate.c | 15 ++-
 1 file changed, 6 insertions(+), 9 deletions(-)

diff --git a/mm/truncate.c b/mm/truncate.c
index ed778555c9f3..45d68e90b703 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -33,15 +33,12 @@
 static inline void __clear_shadow_entry(struct address_space *mapping,
pgoff_t index, void *entry)
 {
-   struct radix_tree_node *node;
-   void **slot;
+   XA_STATE(xas, >i_pages, index);
 
-   if (!__radix_tree_lookup(>i_pages, index, , ))
+   xas_set_update(, workingset_update_node);
+   if (xas_load() != entry)
return;
-   if (*slot != entry)
-   return;
-   __radix_tree_replace(>i_pages, node, slot, NULL,
-workingset_update_node);
+   xas_store(, NULL);
mapping->nrexceptional--;
 }
 
@@ -738,10 +735,10 @@ int invalidate_inode_pages2_range(struct address_space 
*mapping,
index++;
}
/*
-* For DAX we invalidate page tables after invalidating radix tree.  We
+* For DAX we invalidate page tables after invalidating page cache.  We
 * could invalidate page tables while invalidating each entry however
 * that would be expensive. And doing range unmapping before doesn't
-* work as we have no cheap way to find whether radix tree entry didn't
+* work as we have no cheap way to find whether page cache entry didn't
 * get remapped later.
 */
if (dax_mapping(mapping)) {
-- 
2.16.1

--
To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v8 35/63] mm: Convert workingset to XArray

2018-03-06 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

We construct a fake XA_STATE and use it to delete the node with xa_store()
rather than adding a special function for this unique use case.

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
---
 include/linux/swap.h |  9 -
 mm/workingset.c  | 51 ++-
 2 files changed, 22 insertions(+), 38 deletions(-)

diff --git a/include/linux/swap.h b/include/linux/swap.h
index a0ebb5deea2d..dab96af23d96 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -307,15 +307,6 @@ void workingset_update_node(struct xa_node *node);
xas_set_update(xas, workingset_update_node);\
 } while (0)
 
-/* Returns workingset_update_node() if the mapping has shadow entries. */
-#define workingset_lookup_update(mapping)  \
-({ \
-   radix_tree_update_node_t __helper = workingset_update_node; \
-   if (dax_mapping(mapping) || shmem_mapping(mapping)) \
-   __helper = NULL;\
-   __helper;   \
-})
-
 /* linux/mm/page_alloc.c */
 extern unsigned long totalram_pages;
 extern unsigned long totalreserve_pages;
diff --git a/mm/workingset.c b/mm/workingset.c
index bad4e58881cd..564e97bd5934 100644
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@ -148,7 +148,7 @@
  * and activations is maintained (node->inactive_age).
  *
  * On eviction, a snapshot of this counter (along with some bits to
- * identify the node) is stored in the now empty page cache radix tree
+ * identify the node) is stored in the now empty page cache
  * slot of the evicted page.  This is called a shadow entry.
  *
  * On cache misses for which there are shadow entries, an eligible
@@ -162,7 +162,7 @@
 
 /*
  * Eviction timestamps need to be able to cover the full range of
- * actionable refaults. However, bits are tight in the radix tree
+ * actionable refaults. However, bits are tight in the xarray
  * entry, and after storing the identifier for the lruvec there might
  * not be enough left to represent every single actionable refault. In
  * that case, we have to sacrifice granularity for distance, and group
@@ -338,7 +338,7 @@ void workingset_activation(struct page *page)
 
 static struct list_lru shadow_nodes;
 
-void workingset_update_node(struct radix_tree_node *node)
+void workingset_update_node(struct xa_node *node)
 {
/*
 * Track non-empty nodes that contain only shadow entries;
@@ -370,7 +370,7 @@ static unsigned long count_shadow_nodes(struct shrinker 
*shrinker,
local_irq_enable();
 
/*
-* Approximate a reasonable limit for the radix tree nodes
+* Approximate a reasonable limit for the nodes
 * containing shadow entries. We don't need to keep more
 * shadow entries than possible pages on the active list,
 * since refault distances bigger than that are dismissed.
@@ -385,11 +385,11 @@ static unsigned long count_shadow_nodes(struct shrinker 
*shrinker,
 * worst-case density of 1/8th. Below that, not all eligible
 * refaults can be detected anymore.
 *
-* On 64-bit with 7 radix_tree_nodes per page and 64 slots
+* On 64-bit with 7 xa_nodes per page and 64 slots
 * each, this will reclaim shadow entries when they consume
 * ~1.8% of available memory:
 *
-* PAGE_SIZE / radix_tree_nodes / node_entries * 8 / PAGE_SIZE
+* PAGE_SIZE / xa_nodes / node_entries * 8 / PAGE_SIZE
 */
if (sc->memcg) {
cache = mem_cgroup_node_nr_lru_pages(sc->memcg, sc->nid,
@@ -398,7 +398,7 @@ static unsigned long count_shadow_nodes(struct shrinker 
*shrinker,
cache = node_page_state(NODE_DATA(sc->nid), NR_ACTIVE_FILE) +
node_page_state(NODE_DATA(sc->nid), NR_INACTIVE_FILE);
}
-   max_nodes = cache >> (RADIX_TREE_MAP_SHIFT - 3);
+   max_nodes = cache >> (XA_CHUNK_SHIFT - 3);
 
if (nodes <= max_nodes)
return 0;
@@ -408,11 +408,11 @@ static unsigned long count_shadow_nodes(struct shrinker 
*shrinker,
 static enum lru_status shadow_lru_isolate(struct list_head *item,
  struct list_lru_one *lru,
  spinlock_t *lru_lock,
- void *arg)
+ void *arg) __must_hold(lru_lock)
 {
+   XA_STATE(xas, NULL, 0);
struct address_space *mapping;
-   struct radix_tree_node *node;
-   unsigned int i;
+   struct xa_node *node;
int ret;
 
/*
@@ -420,7 +420,7 @@ static enum lru_status shadow_lru_isolate(struct list_head 
*item,

[PATCH v8 40/63] mm: Convert page migration to XArray

2018-03-06 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
---
 mm/migrate.c | 41 -
 1 file changed, 16 insertions(+), 25 deletions(-)

diff --git a/mm/migrate.c b/mm/migrate.c
index 740b71857898..9a15d27768a0 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -322,7 +322,7 @@ void __migration_entry_wait(struct mm_struct *mm, pte_t 
*ptep,
page = migration_entry_to_page(entry);
 
/*
-* Once radix-tree replacement of page migration started, page_count
+* Once page cache replacement of page migration started, page_count
 * *must* be zero. And, we don't want to call wait_on_page_locked()
 * against a page without get_page().
 * So, we use get_page_unless_zero(), here. Even failed, page fault
@@ -437,10 +437,10 @@ int migrate_page_move_mapping(struct address_space 
*mapping,
struct buffer_head *head, enum migrate_mode mode,
int extra_count)
 {
+   XA_STATE(xas, >i_pages, page_index(page));
struct zone *oldzone, *newzone;
int dirty;
int expected_count = 1 + extra_count;
-   void **pslot;
 
/*
 * Device public or private pages have an extra refcount as they are
@@ -466,21 +466,16 @@ int migrate_page_move_mapping(struct address_space 
*mapping,
oldzone = page_zone(page);
newzone = page_zone(newpage);
 
-   xa_lock_irq(>i_pages);
-
-   pslot = radix_tree_lookup_slot(>i_pages,
-   page_index(page));
+   xas_lock_irq();
 
expected_count += 1 + page_has_private(page);
-   if (page_count(page) != expected_count ||
-   radix_tree_deref_slot_protected(pslot,
-   >i_pages.xa_lock) != page) {
-   xa_unlock_irq(>i_pages);
+   if (page_count(page) != expected_count || xas_load() != page) {
+   xas_unlock_irq();
return -EAGAIN;
}
 
if (!page_ref_freeze(page, expected_count)) {
-   xa_unlock_irq(>i_pages);
+   xas_unlock_irq();
return -EAGAIN;
}
 
@@ -494,7 +489,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
if (mode == MIGRATE_ASYNC && head &&
!buffer_migrate_lock_buffers(head, mode)) {
page_ref_unfreeze(page, expected_count);
-   xa_unlock_irq(>i_pages);
+   xas_unlock_irq();
return -EAGAIN;
}
 
@@ -522,7 +517,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
SetPageDirty(newpage);
}
 
-   radix_tree_replace_slot(>i_pages, pslot, newpage);
+   xas_store(, newpage);
 
/*
 * Drop cache reference from old page by unfreezing
@@ -531,7 +526,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
 */
page_ref_unfreeze(page, expected_count - 1);
 
-   xa_unlock(>i_pages);
+   xas_unlock();
/* Leave irq disabled to prevent preemption while updating stats */
 
/*
@@ -571,22 +566,18 @@ EXPORT_SYMBOL(migrate_page_move_mapping);
 int migrate_huge_page_move_mapping(struct address_space *mapping,
   struct page *newpage, struct page *page)
 {
+   XA_STATE(xas, >i_pages, page_index(page));
int expected_count;
-   void **pslot;
-
-   xa_lock_irq(>i_pages);
-
-   pslot = radix_tree_lookup_slot(>i_pages, page_index(page));
 
+   xas_lock_irq();
expected_count = 2 + page_has_private(page);
-   if (page_count(page) != expected_count ||
-   radix_tree_deref_slot_protected(pslot, 
>i_pages.xa_lock) != page) {
-   xa_unlock_irq(>i_pages);
+   if (page_count(page) != expected_count || xas_load() != page) {
+   xas_unlock_irq();
return -EAGAIN;
}
 
if (!page_ref_freeze(page, expected_count)) {
-   xa_unlock_irq(>i_pages);
+   xas_unlock_irq();
return -EAGAIN;
}
 
@@ -595,11 +586,11 @@ int migrate_huge_page_move_mapping(struct address_space 
*mapping,
 
get_page(newpage);
 
-   radix_tree_replace_slot(>i_pages, pslot, newpage);
+   xas_store(, newpage);
 
page_ref_unfreeze(page, expected_count - 1);
 
-   xa_unlock_irq(>i_pages);
+   xas_unlock_irq();
 
return MIGRATEPAGE_SUCCESS;
 }
-- 
2.16.1

--
To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v8 13/63] xarray: Add definition of struct xarray

2018-03-06 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

This is a direct replacement for struct radix_tree_root.  Some of the
struct members have changed name; convert those, and use a #define so
that radix_tree users continue to work without change.

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
---
 include/linux/radix-tree.h   | 33 --
 include/linux/xarray.h   | 61 ++
 lib/Makefile |  2 +-
 lib/idr.c|  4 +-
 lib/radix-tree.c | 75 
 lib/xarray.c | 44 +++
 tools/include/linux/spinlock.h   |  3 +-
 tools/testing/radix-tree/.gitignore  |  1 +
 tools/testing/radix-tree/Makefile|  8 +++-
 tools/testing/radix-tree/linux/bug.h |  1 +
 tools/testing/radix-tree/linux/kconfig.h |  1 +
 tools/testing/radix-tree/linux/xarray.h  |  2 +
 tools/testing/radix-tree/multiorder.c|  6 +--
 tools/testing/radix-tree/test.c  |  6 +--
 14 files changed, 173 insertions(+), 74 deletions(-)
 create mode 100644 lib/xarray.c
 create mode 100644 tools/testing/radix-tree/linux/kconfig.h
 create mode 100644 tools/testing/radix-tree/linux/xarray.h

diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index 87f35fe00e55..c8a33e9e9a3c 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -30,6 +30,9 @@
 #include 
 #include 
 
+/* Keep unconverted code working */
+#define radix_tree_rootxarray
+
 /*
  * The bottom two bits of the slot determine how the remaining bits in the
  * slot are interpreted:
@@ -59,10 +62,7 @@ static inline bool radix_tree_is_internal_node(void *ptr)
 
 #define RADIX_TREE_MAX_TAGS 3
 
-#ifndef RADIX_TREE_MAP_SHIFT
-#define RADIX_TREE_MAP_SHIFT   (CONFIG_BASE_SMALL ? 4 : 6)
-#endif
-
+#define RADIX_TREE_MAP_SHIFT   XA_CHUNK_SHIFT
 #define RADIX_TREE_MAP_SIZE(1UL << RADIX_TREE_MAP_SHIFT)
 #define RADIX_TREE_MAP_MASK(RADIX_TREE_MAP_SIZE-1)
 
@@ -95,36 +95,21 @@ struct radix_tree_node {
unsigned long   tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS];
 };
 
-/* The IDR tag is stored in the low bits of the GFP flags */
+/* The IDR tag is stored in the low bits of xa_flags */
 #define ROOT_IS_IDR((__force gfp_t)4)
-/* The top bits of gfp_mask are used to store the root tags */
+/* The top bits of xa_flags are used to store the root tags */
 #define ROOT_TAG_SHIFT (__GFP_BITS_SHIFT)
 
-struct radix_tree_root {
-   spinlock_t  xa_lock;
-   gfp_t   gfp_mask;
-   struct radix_tree_node  __rcu *rnode;
-};
-
-#define RADIX_TREE_INIT(name, mask){   \
-   .xa_lock = __SPIN_LOCK_UNLOCKED(name.xa_lock),  \
-   .gfp_mask = (mask), \
-   .rnode = NULL,  \
-}
+#define RADIX_TREE_INIT(name, mask)XARRAY_INIT_FLAGS(name, mask)
 
 #define RADIX_TREE(name, mask) \
struct radix_tree_root name = RADIX_TREE_INIT(name, mask)
 
-#define INIT_RADIX_TREE(root, mask)\
-do {   \
-   spin_lock_init(&(root)->xa_lock);   \
-   (root)->gfp_mask = (mask);  \
-   (root)->rnode = NULL;   \
-} while (0)
+#define INIT_RADIX_TREE(root, mask) xa_init_flags(root, mask)
 
 static inline bool radix_tree_empty(const struct radix_tree_root *root)
 {
-   return root->rnode == NULL;
+   return root->xa_head == NULL;
 }
 
 /**
diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index 283beb5aac58..9b05b907062b 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -10,6 +10,8 @@
  */
 
 #include 
+#include 
+#include 
 #include 
 #include 
 
@@ -105,6 +107,65 @@ static inline bool xa_is_internal(const void *entry)
return ((unsigned long)entry & 3) == 2;
 }
 
+/**
+ * struct xarray - The anchor of the XArray.
+ * @xa_lock: Lock that protects the contents of the XArray.
+ *
+ * To use the xarray, define it statically or embed it in your data structure.
+ * It is a very small data structure, so it does not usually make sense to
+ * allocate it separately and keep a pointer to it in your data structure.
+ *
+ * You may use the xa_lock to protect your own data structures as well.
+ */
+/*
+ * If all of the entries in the array are NULL, @xa_head is a NULL pointer.
+ * If the only non-NULL entry in the array is at index 0, @xa_head is that
+ * entry.  If any other entry in the array is non-NULL, @xa_head points
+ * to an @xa_node.
+ */
+struct xarray {
+   spinlock_t  xa_lock;
+/* private: The rest of the data structure is not to be used dire

[PATCH v8 39/63] mm: Convert __do_page_cache_readahead to XArray

2018-03-06 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

This one is trivial.

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
---
 mm/readahead.c | 4 +---
 1 file changed, 1 insertion(+), 3 deletions(-)

diff --git a/mm/readahead.c b/mm/readahead.c
index 3ff9763b0461..5f528d649d5e 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -174,9 +174,7 @@ int __do_page_cache_readahead(struct address_space 
*mapping, struct file *filp,
if (page_offset > end_index)
break;
 
-   rcu_read_lock();
-   page = radix_tree_lookup(>i_pages, page_offset);
-   rcu_read_unlock();
+   page = xa_load(>i_pages, page_offset);
if (page && !xa_is_value(page))
continue;
 
-- 
2.16.1

--
To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v8 41/63] mm: Convert huge_memory to XArray

2018-03-06 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

Quite a straightforward conversion.

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
---
 mm/huge_memory.c | 17 +++--
 1 file changed, 7 insertions(+), 10 deletions(-)

diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 89737c0e0d34..354b7f768d0f 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2442,13 +2442,13 @@ static void __split_huge_page(struct page *page, struct 
list_head *list,
ClearPageCompound(head);
/* See comment in __split_huge_page_tail() */
if (PageAnon(head)) {
-   /* Additional pin to radix tree of swap cache */
+   /* Additional pin to swap cache */
if (PageSwapCache(head))
page_ref_add(head, 2);
else
page_ref_inc(head);
} else {
-   /* Additional pin to radix tree */
+   /* Additional pin to page cache */
page_ref_add(head, 2);
xa_unlock(>mapping->i_pages);
}
@@ -2560,7 +2560,7 @@ bool can_split_huge_page(struct page *page, int 
*pextra_pins)
 {
int extra_pins;
 
-   /* Additional pins from radix tree */
+   /* Additional pins from page cache */
if (PageAnon(page))
extra_pins = PageSwapCache(page) ? HPAGE_PMD_NR : 0;
else
@@ -2656,17 +2656,14 @@ int split_huge_page_to_list(struct page *page, struct 
list_head *list)
spin_lock_irqsave(zone_lru_lock(page_zone(head)), flags);
 
if (mapping) {
-   void **pslot;
+   XA_STATE(xas, >i_pages, page_index(head));
 
-   xa_lock(>i_pages);
-   pslot = radix_tree_lookup_slot(>i_pages,
-   page_index(head));
/*
-* Check if the head page is present in radix tree.
+* Check if the head page is present in page cache.
 * We assume all tail are present too, if head is there.
 */
-   if (radix_tree_deref_slot_protected(pslot,
-   >i_pages.xa_lock) != head)
+   xa_lock(>i_pages);
+   if (xas_load() != head)
goto fail;
}
 
-- 
2.16.1

--
To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v8 42/63] mm: Convert collapse_shmem to XArray

2018-03-06 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

I found another victim of the radix tree being hard to use.  Because
there was no call to radix_tree_preload(), khugepaged was allocating
radix_tree_nodes using GFP_ATOMIC.

I also converted a local_irq_save()/restore() pair to
disable()/enable().

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
---
 mm/khugepaged.c | 158 +++-
 1 file changed, 65 insertions(+), 93 deletions(-)

diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 69545692155f..3685c8e2b3dc 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1282,17 +1282,17 @@ static void retract_page_tables(struct address_space 
*mapping, pgoff_t pgoff)
  *
  * Basic scheme is simple, details are more complex:
  *  - allocate and freeze a new huge page;
- *  - scan over radix tree replacing old pages the new one
+ *  - scan page cache replacing old pages with the new one
  *+ swap in pages if necessary;
  *+ fill in gaps;
- *+ keep old pages around in case if rollback is required;
- *  - if replacing succeed:
+ *+ keep old pages around in case rollback is required;
+ *  - if replacing succeeds:
  *+ copy data over;
  *+ free old pages;
  *+ unfreeze huge page;
  *  - if replacing failed;
  *+ put all pages back and unfreeze them;
- *+ restore gaps in the radix-tree;
+ *+ restore gaps in the page cache;
  *+ free huge page;
  */
 static void collapse_shmem(struct mm_struct *mm,
@@ -1300,12 +1300,11 @@ static void collapse_shmem(struct mm_struct *mm,
struct page **hpage, int node)
 {
gfp_t gfp;
-   struct page *page, *new_page, *tmp;
+   struct page *new_page;
struct mem_cgroup *memcg;
pgoff_t index, end = start + HPAGE_PMD_NR;
LIST_HEAD(pagelist);
-   struct radix_tree_iter iter;
-   void **slot;
+   XA_STATE(xas, >i_pages, start);
int nr_none = 0, result = SCAN_SUCCEED;
 
VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
@@ -1330,48 +1329,48 @@ static void collapse_shmem(struct mm_struct *mm,
__SetPageLocked(new_page);
BUG_ON(!page_ref_freeze(new_page, 1));
 
-
/*
-* At this point the new_page is 'frozen' (page_count() is zero), locked
-* and not up-to-date. It's safe to insert it into radix tree, because
-* nobody would be able to map it or use it in other way until we
-* unfreeze it.
+* At this point the new_page is 'frozen' (page_count() is zero),
+* locked and not up-to-date. It's safe to insert it into the page
+* cache, because nobody would be able to map it or use it in other
+* way until we unfreeze it.
 */
 
-   index = start;
-   xa_lock_irq(>i_pages);
-   radix_tree_for_each_slot(slot, >i_pages, , start) {
-   int n = min(iter.index, end) - index;
-
-   /*
-* Handle holes in the radix tree: charge it from shmem and
-* insert relevant subpage of new_page into the radix-tree.
-*/
-   if (n && !shmem_charge(mapping->host, n)) {
-   result = SCAN_FAIL;
+   /* This will be less messy when we use multi-index entries */
+   do {
+   xas_lock_irq();
+   xas_create_range(, end - 1);
+   if (!xas_error())
break;
-   }
-   nr_none += n;
-   for (; index < min(iter.index, end); index++) {
-   radix_tree_insert(>i_pages, index,
-   new_page + (index % HPAGE_PMD_NR));
-   }
+   xas_unlock_irq();
+   if (!xas_nomem(, GFP_KERNEL))
+   goto out;
+   } while (1);
 
-   /* We are done. */
-   if (index >= end)
-   break;
+   for (index = start; index < end; index++) {
+   struct page *page = xas_next();
+
+   VM_BUG_ON(index != xas.xa_index);
+   if (!page) {
+   if (!shmem_charge(mapping->host, 1)) {
+   result = SCAN_FAIL;
+   break;
+   }
+   xas_store(, new_page + (index % HPAGE_PMD_NR));
+   nr_none++;
+   continue;
+   }
 
-   page = radix_tree_deref_slot_protected(slot,
-   >i_pages.xa_lock);
if (xa_is_value(page) || !PageUptodate(page)) {
-   xa_unlock_irq(>i_pages);
+   xas_unlock_irq();
/* swap in or instantiate fallocated page */
if (shmem_getpage(mapping->host, index, ,
SGP_N

[PATCH v8 43/63] mm: Convert khugepaged_scan_shmem to XArray

2018-03-06 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

Slightly shorter and easier to read code.

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
---
 mm/khugepaged.c | 17 +
 1 file changed, 5 insertions(+), 12 deletions(-)

diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 3685c8e2b3dc..39e260a0639c 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1533,8 +1533,7 @@ static void khugepaged_scan_shmem(struct mm_struct *mm,
pgoff_t start, struct page **hpage)
 {
struct page *page = NULL;
-   struct radix_tree_iter iter;
-   void **slot;
+   XA_STATE(xas, >i_pages, start);
int present, swap;
int node = NUMA_NO_NODE;
int result = SCAN_SUCCEED;
@@ -1543,17 +1542,11 @@ static void khugepaged_scan_shmem(struct mm_struct *mm,
swap = 0;
memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
rcu_read_lock();
-   radix_tree_for_each_slot(slot, >i_pages, , start) {
-   if (iter.index >= start + HPAGE_PMD_NR)
-   break;
-
-   page = radix_tree_deref_slot(slot);
-   if (radix_tree_deref_retry(page)) {
-   slot = radix_tree_iter_retry();
+   xas_for_each(, page, start + HPAGE_PMD_NR - 1) {
+   if (xas_retry(, page))
continue;
-   }
 
-   if (radix_tree_exception(page)) {
+   if (xa_is_value(page)) {
if (++swap > khugepaged_max_ptes_swap) {
result = SCAN_EXCEED_SWAP_PTE;
break;
@@ -1592,7 +1585,7 @@ static void khugepaged_scan_shmem(struct mm_struct *mm,
present++;
 
if (need_resched()) {
-   slot = radix_tree_iter_resume(slot, );
+   xas_pause();
cond_resched_rcu();
}
}
-- 
2.16.1

--
To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v8 44/63] pagevec: Use xa_tag_t

2018-03-06 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

Removes sparse warnings.

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
---
 fs/btrfs/extent_io.c| 4 ++--
 fs/ext4/inode.c | 2 +-
 fs/f2fs/data.c  | 2 +-
 fs/gfs2/aops.c  | 2 +-
 include/linux/pagevec.h | 8 +---
 mm/swap.c   | 4 ++--
 6 files changed, 12 insertions(+), 10 deletions(-)

diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 4e22edd04457..bcc24ee5a2c9 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -3789,7 +3789,7 @@ int btree_write_cache_pages(struct address_space *mapping,
pgoff_t index;
pgoff_t end;/* Inclusive */
int scanned = 0;
-   int tag;
+   xa_tag_t tag;
 
pagevec_init();
if (wbc->range_cyclic) {
@@ -3914,7 +3914,7 @@ static int extent_write_cache_pages(struct address_space 
*mapping,
pgoff_t done_index;
int range_whole = 0;
int scanned = 0;
-   int tag;
+   xa_tag_t tag;
 
/*
 * We have to hold onto the inode so that ordered extents can do their
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 2a47c2f715bb..71cb9d7fd9c2 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -2615,7 +2615,7 @@ static int mpage_prepare_extent_to_map(struct 
mpage_da_data *mpd)
long left = mpd->wbc->nr_to_write;
pgoff_t index = mpd->first_page;
pgoff_t end = mpd->last_page;
-   int tag;
+   xa_tag_t tag;
int i, err = 0;
int blkbits = mpd->inode->i_blkbits;
ext4_lblk_t lblk;
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 412e9f650dac..dfbccf884d4f 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -1848,7 +1848,7 @@ static int f2fs_write_cache_pages(struct address_space 
*mapping,
pgoff_t last_idx = ULONG_MAX;
int cycled;
int range_whole = 0;
-   int tag;
+   xa_tag_t tag;
 
pagevec_init();
 
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index f58716567972..8376d1358379 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -371,7 +371,7 @@ static int gfs2_write_cache_jdata(struct address_space 
*mapping,
pgoff_t done_index;
int cycled;
int range_whole = 0;
-   int tag;
+   xa_tag_t tag;
 
pagevec_init();
if (wbc->range_cyclic) {
diff --git a/include/linux/pagevec.h b/include/linux/pagevec.h
index 6dc456ac6136..955bd6425903 100644
--- a/include/linux/pagevec.h
+++ b/include/linux/pagevec.h
@@ -9,6 +9,8 @@
 #ifndef _LINUX_PAGEVEC_H
 #define _LINUX_PAGEVEC_H
 
+#include 
+
 /* 15 pointers + header align the pagevec structure to a power of two */
 #define PAGEVEC_SIZE   15
 
@@ -40,12 +42,12 @@ static inline unsigned pagevec_lookup(struct pagevec *pvec,
 
 unsigned pagevec_lookup_range_tag(struct pagevec *pvec,
struct address_space *mapping, pgoff_t *index, pgoff_t end,
-   int tag);
+   xa_tag_t tag);
 unsigned pagevec_lookup_range_nr_tag(struct pagevec *pvec,
struct address_space *mapping, pgoff_t *index, pgoff_t end,
-   int tag, unsigned max_pages);
+   xa_tag_t tag, unsigned max_pages);
 static inline unsigned pagevec_lookup_tag(struct pagevec *pvec,
-   struct address_space *mapping, pgoff_t *index, int tag)
+   struct address_space *mapping, pgoff_t *index, xa_tag_t tag)
 {
return pagevec_lookup_range_tag(pvec, mapping, index, (pgoff_t)-1, tag);
 }
diff --git a/mm/swap.c b/mm/swap.c
index 65d3ecbd5958..b9072d77bb82 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -1002,7 +1002,7 @@ EXPORT_SYMBOL(pagevec_lookup_range);
 
 unsigned pagevec_lookup_range_tag(struct pagevec *pvec,
struct address_space *mapping, pgoff_t *index, pgoff_t end,
-   int tag)
+   xa_tag_t tag)
 {
pvec->nr = find_get_pages_range_tag(mapping, index, end, tag,
PAGEVEC_SIZE, pvec->pages);
@@ -1012,7 +1012,7 @@ EXPORT_SYMBOL(pagevec_lookup_range_tag);
 
 unsigned pagevec_lookup_range_nr_tag(struct pagevec *pvec,
struct address_space *mapping, pgoff_t *index, pgoff_t end,
-   int tag, unsigned max_pages)
+   xa_tag_t tag, unsigned max_pages)
 {
pvec->nr = find_get_pages_range_tag(mapping, index, end, tag,
min_t(unsigned int, max_pages, PAGEVEC_SIZE), pvec->pages);
-- 
2.16.1

--
To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v8 47/63] shmem: Convert find_swap_entry to XArray

2018-03-06 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

This is a 1:1 conversion.

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
---
 mm/shmem.c | 23 +++
 1 file changed, 11 insertions(+), 12 deletions(-)

diff --git a/mm/shmem.c b/mm/shmem.c
index 0af8a439dfad..49f42dc9e1dc 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1076,28 +1076,27 @@ static void shmem_evict_inode(struct inode *inode)
clear_inode(inode);
 }
 
-static unsigned long find_swap_entry(struct radix_tree_root *root, void *item)
+static unsigned long find_swap_entry(struct xarray *xa, void *item)
 {
-   struct radix_tree_iter iter;
-   void **slot;
-   unsigned long found = -1;
+   XA_STATE(xas, xa, 0);
unsigned int checked = 0;
+   void *entry;
 
rcu_read_lock();
-   radix_tree_for_each_slot(slot, root, , 0) {
-   if (*slot == item) {
-   found = iter.index;
+   xas_for_each(, entry, ULONG_MAX) {
+   if (xas_retry(, entry))
+   continue;
+   if (entry == item)
break;
-   }
checked++;
-   if ((checked % 4096) != 0)
+   if ((checked % XA_CHECK_SCHED) != 0)
continue;
-   slot = radix_tree_iter_resume(slot, );
+   xas_pause();
cond_resched_rcu();
}
-
rcu_read_unlock();
-   return found;
+
+   return xas_invalid() ? -1 : xas.xa_index;
 }
 
 /*
-- 
2.16.1

--
To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v8 53/63] memfd: Convert shmem_wait_for_pins to XArray

2018-03-06 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

As with shmem_tag_pins(), hold the lock around the entire loop instead
of acquiring & dropping it for each entry we're going to untag.

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
---
 mm/memfd.c | 61 +
 1 file changed, 25 insertions(+), 36 deletions(-)

diff --git a/mm/memfd.c b/mm/memfd.c
index 3b299d72df78..0e0835e63af2 100644
--- a/mm/memfd.c
+++ b/mm/memfd.c
@@ -64,9 +64,7 @@ static void shmem_tag_pins(struct address_space *mapping)
  */
 static int shmem_wait_for_pins(struct address_space *mapping)
 {
-   struct radix_tree_iter iter;
-   void __rcu **slot;
-   pgoff_t start;
+   XA_STATE(xas, >i_pages, 0);
struct page *page;
int error, scan;
 
@@ -74,7 +72,9 @@ static int shmem_wait_for_pins(struct address_space *mapping)
 
error = 0;
for (scan = 0; scan <= LAST_SCAN; scan++) {
-   if (!radix_tree_tagged(>i_pages, SHMEM_TAG_PINNED))
+   unsigned int tagged = 0;
+
+   if (!xas_tagged(, SHMEM_TAG_PINNED))
break;
 
if (!scan)
@@ -82,45 +82,34 @@ static int shmem_wait_for_pins(struct address_space 
*mapping)
else if (schedule_timeout_killable((HZ << scan) / 200))
scan = LAST_SCAN;
 
-   start = 0;
-   rcu_read_lock();
-   radix_tree_for_each_tagged(slot, >i_pages, ,
-  start, SHMEM_TAG_PINNED) {
-
-   page = radix_tree_deref_slot(slot);
-   if (radix_tree_exception(page)) {
-   if (radix_tree_deref_retry(page)) {
-   slot = radix_tree_iter_retry();
-   continue;
-   }
-
-   page = NULL;
-   }
-
-   if (page &&
-   page_count(page) - page_mapcount(page) != 1) {
-   if (scan < LAST_SCAN)
-   goto continue_resched;
-
+   xas_set(, 0);
+   xas_lock_irq();
+   xas_for_each_tag(, page, ULONG_MAX, SHMEM_TAG_PINNED) {
+   bool clear = true;
+   if (xa_is_value(page))
+   continue;
+   if (page_count(page) - page_mapcount(page) != 1) {
/*
 * On the last scan, we clean up all those tags
 * we inserted; but make a note that we still
 * found pages pinned.
 */
-   error = -EBUSY;
-   }
-
-   xa_lock_irq(>i_pages);
-   radix_tree_tag_clear(>i_pages,
-iter.index, SHMEM_TAG_PINNED);
-   xa_unlock_irq(>i_pages);
-continue_resched:
-   if (need_resched()) {
-   slot = radix_tree_iter_resume(slot, );
-   cond_resched_rcu();
+   if (scan == LAST_SCAN)
+   error = -EBUSY;
+   else
+   clear = false;
}
+   if (clear)
+   xas_clear_tag(, SHMEM_TAG_PINNED);
+   if (++tagged % XA_CHECK_SCHED)
+   continue;
+
+   xas_pause();
+   xas_unlock_irq();
+   cond_resched();
+   xas_lock_irq();
}
-   rcu_read_unlock();
+   xas_unlock_irq();
}
 
return error;
-- 
2.16.1

--
To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v8 52/63] memfd: Convert shmem_tag_pins to XArray

2018-03-06 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

Simplify the locking by taking the spinlock while we walk the tree on
the assumption that many acquires and releases of the lock will be
worse than holding the lock for a (potentially) long time.

We could replicate the same locking behaviour with the xarray, but would
have to be careful that the xa_node wasn't RCU-freed under us before we
took the lock.

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
---
 mm/memfd.c | 43 ++-
 1 file changed, 18 insertions(+), 25 deletions(-)

diff --git a/mm/memfd.c b/mm/memfd.c
index 4cf7401cb09c..3b299d72df78 100644
--- a/mm/memfd.c
+++ b/mm/memfd.c
@@ -21,7 +21,7 @@
 #include 
 
 /*
- * We need a tag: a new tag would expand every radix_tree_node by 8 bytes,
+ * We need a tag: a new tag would expand every xa_node by 8 bytes,
  * so reuse a tag which we firmly believe is never set or cleared on shmem.
  */
 #define SHMEM_TAG_PINNEDPAGECACHE_TAG_TOWRITE
@@ -29,35 +29,28 @@
 
 static void shmem_tag_pins(struct address_space *mapping)
 {
-   struct radix_tree_iter iter;
-   void __rcu **slot;
-   pgoff_t start;
+   XA_STATE(xas, >i_pages, 0);
struct page *page;
+   unsigned int tagged = 0;
 
lru_add_drain();
-   start = 0;
-   rcu_read_lock();
-
-   radix_tree_for_each_slot(slot, >i_pages, , start) {
-   page = radix_tree_deref_slot(slot);
-   if (!page || radix_tree_exception(page)) {
-   if (radix_tree_deref_retry(page)) {
-   slot = radix_tree_iter_retry();
-   continue;
-   }
-   } else if (page_count(page) - page_mapcount(page) > 1) {
-   xa_lock_irq(>i_pages);
-   radix_tree_tag_set(>i_pages, iter.index,
-  SHMEM_TAG_PINNED);
-   xa_unlock_irq(>i_pages);
-   }
 
-   if (need_resched()) {
-   slot = radix_tree_iter_resume(slot, );
-   cond_resched_rcu();
-   }
+   xas_lock_irq();
+   xas_for_each(, page, ULONG_MAX) {
+   if (xa_is_value(page))
+   continue;
+   if (page_count(page) - page_mapcount(page) > 1)
+   xas_set_tag(, SHMEM_TAG_PINNED);
+
+   if (++tagged % XA_CHECK_SCHED)
+   continue;
+
+   xas_pause();
+   xas_unlock_irq();
+   cond_resched();
+   xas_lock_irq();
}
-   rcu_read_unlock();
+   xas_unlock_irq();
 }
 
 /*
-- 
2.16.1

--
To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v8 50/63] shmem: Convert shmem_free_swap to XArray

2018-03-06 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

This is a perfect use for xa_cmpxchg().  Note the use of 0 for GFP
flags; we won't be allocating memory.

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
---
 mm/shmem.c | 7 ++-
 1 file changed, 2 insertions(+), 5 deletions(-)

diff --git a/mm/shmem.c b/mm/shmem.c
index a0a354a87f3b..cfbffb4b47a2 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -635,16 +635,13 @@ static void shmem_delete_from_page_cache(struct page 
*page, void *radswap)
 }
 
 /*
- * Remove swap entry from radix tree, free the swap and its page cache.
+ * Remove swap entry from page cache, free the swap and its page cache.
  */
 static int shmem_free_swap(struct address_space *mapping,
   pgoff_t index, void *radswap)
 {
-   void *old;
+   void *old = xa_cmpxchg(>i_pages, index, radswap, NULL, 0);
 
-   xa_lock_irq(>i_pages);
-   old = radix_tree_delete_item(>i_pages, index, radswap);
-   xa_unlock_irq(>i_pages);
if (old != radswap)
return -ENOENT;
free_swap_and_cache(radix_to_swp_entry(radswap));
-- 
2.16.1

--
To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v8 49/63] shmem: Convert shmem_alloc_hugepage to XArray

2018-03-06 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

xa_find() is a slightly easier API to use than
radix_tree_gang_lookup_slot() because it contains its own RCU locking.

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
---
 mm/shmem.c | 14 --
 1 file changed, 4 insertions(+), 10 deletions(-)

diff --git a/mm/shmem.c b/mm/shmem.c
index fb06fb3e644a..a0a354a87f3b 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1416,23 +1416,17 @@ static struct page *shmem_alloc_hugepage(gfp_t gfp,
struct shmem_inode_info *info, pgoff_t index)
 {
struct vm_area_struct pvma;
-   struct inode *inode = >vfs_inode;
-   struct address_space *mapping = inode->i_mapping;
-   pgoff_t idx, hindex;
-   void __rcu **results;
+   struct address_space *mapping = info->vfs_inode.i_mapping;
+   pgoff_t hindex;
struct page *page;
 
if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
return NULL;
 
hindex = round_down(index, HPAGE_PMD_NR);
-   rcu_read_lock();
-   if (radix_tree_gang_lookup_slot(>i_pages, , ,
-   hindex, 1) && idx < hindex + HPAGE_PMD_NR) {
-   rcu_read_unlock();
+   if (xa_find(>i_pages, , hindex + HPAGE_PMD_NR - 1,
+   XA_PRESENT))
return NULL;
-   }
-   rcu_read_unlock();
 
shmem_pseudo_vma_init(, info, hindex);
page = alloc_pages_vma(gfp | __GFP_COMP | __GFP_NORETRY | __GFP_NOWARN,
-- 
2.16.1

--
To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v8 51/63] shmem: Convert shmem_partial_swap_usage to XArray

2018-03-06 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

Simpler code because the xarray takes care of things like the limit and
dereferencing the slot.

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
---
 mm/shmem.c | 18 +++---
 1 file changed, 3 insertions(+), 15 deletions(-)

diff --git a/mm/shmem.c b/mm/shmem.c
index cfbffb4b47a2..707430003ec7 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -658,29 +658,17 @@ static int shmem_free_swap(struct address_space *mapping,
 unsigned long shmem_partial_swap_usage(struct address_space *mapping,
pgoff_t start, pgoff_t end)
 {
-   struct radix_tree_iter iter;
-   void **slot;
+   XA_STATE(xas, >i_pages, start);
struct page *page;
unsigned long swapped = 0;
 
rcu_read_lock();
-
-   radix_tree_for_each_slot(slot, >i_pages, , start) {
-   if (iter.index >= end)
-   break;
-
-   page = radix_tree_deref_slot(slot);
-
-   if (radix_tree_deref_retry(page)) {
-   slot = radix_tree_iter_retry();
-   continue;
-   }
-
+   xas_for_each(, page, end - 1) {
if (xa_is_value(page))
swapped++;
 
if (need_resched()) {
-   slot = radix_tree_iter_resume(slot, );
+   xas_pause();
cond_resched_rcu();
}
}
-- 
2.16.1

--
To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v8 54/63] shmem: Comment fixups

2018-03-06 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

Remove the last mentions of radix tree from various comments.

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
---
 mm/shmem.c | 12 ++--
 1 file changed, 6 insertions(+), 6 deletions(-)

diff --git a/mm/shmem.c b/mm/shmem.c
index 707430003ec7..6b044cb6c8b5 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -743,7 +743,7 @@ void shmem_unlock_mapping(struct address_space *mapping)
 }
 
 /*
- * Remove range of pages and swap entries from radix tree, and free them.
+ * Remove range of pages and swap entries from page cache, and free them.
  * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate.
  */
 static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
@@ -1118,10 +1118,10 @@ static int shmem_unuse_inode(struct shmem_inode_info 
*info,
 * We needed to drop mutex to make that restrictive page
 * allocation, but the inode might have been freed while we
 * dropped it: although a racing shmem_evict_inode() cannot
-* complete without emptying the radix_tree, our page lock
+* complete without emptying the page cache, our page lock
 * on this swapcache page is not enough to prevent that -
 * free_swap_and_cache() of our swap entry will only
-* trylock_page(), removing swap from radix_tree whatever.
+* trylock_page(), removing swap from page cache whatever.
 *
 * We must not proceed to shmem_add_to_page_cache() if the
 * inode has been freed, but of course we cannot rely on
@@ -1187,7 +1187,7 @@ int shmem_unuse(swp_entry_t swap, struct page *page)
false);
if (error)
goto out;
-   /* No radix_tree_preload: swap entry keeps a place for page in tree */
+   /* No memory allocation: swap entry occupies the slot for the page */
error = -EAGAIN;
 
mutex_lock(_swaplist_mutex);
@@ -1866,7 +1866,7 @@ alloc_nohuge: page = 
shmem_alloc_and_acct_page(gfp, inode,
spin_unlock_irq(>lock);
goto repeat;
}
-   if (error == -EEXIST)   /* from above or from radix_tree_insert */
+   if (error == -EEXIST)
goto repeat;
return error;
 }
@@ -2478,7 +2478,7 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, 
struct iov_iter *to)
 }
 
 /*
- * llseek SEEK_DATA or SEEK_HOLE through the radix_tree.
+ * llseek SEEK_DATA or SEEK_HOLE through the page cache.
  */
 static pgoff_t shmem_seek_hole_data(struct address_space *mapping,
pgoff_t index, pgoff_t end, int whence)
-- 
2.16.1

--
To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v8 59/63] f2fs: Convert to XArray

2018-03-06 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

This is a straightforward conversion.

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
---
 fs/f2fs/data.c   |  3 +--
 fs/f2fs/dir.c|  5 +
 fs/f2fs/inline.c |  6 +-
 fs/f2fs/node.c   | 10 ++
 4 files changed, 5 insertions(+), 19 deletions(-)

diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index dfbccf884d4f..8deac207fbc3 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -2384,8 +2384,7 @@ void f2fs_set_page_dirty_nobuffers(struct page *page)
xa_lock_irqsave(>i_pages, flags);
WARN_ON_ONCE(!PageUptodate(page));
account_page_dirtied(page, mapping);
-   radix_tree_tag_set(>i_pages,
-   page_index(page), PAGECACHE_TAG_DIRTY);
+   __xa_set_tag(>i_pages, page_index(page), PAGECACHE_TAG_DIRTY);
xa_unlock_irqrestore(>i_pages, flags);
unlock_page_memcg(page);
 
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index a0aa4dd5a7d4..bf7f33f97fdf 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -708,7 +708,6 @@ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, 
struct page *page,
unsigned int bit_pos;
int slots = GET_DENTRY_SLOTS(le16_to_cpu(dentry->name_len));
struct address_space *mapping = page_mapping(page);
-   unsigned long flags;
int i;
 
f2fs_update_time(F2FS_I_SB(dir), REQ_TIME);
@@ -741,10 +740,8 @@ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, 
struct page *page,
 
if (bit_pos == NR_DENTRY_IN_BLOCK &&
!truncate_hole(dir, page->index, page->index + 1)) {
-   xa_lock_irqsave(>i_pages, flags);
-   radix_tree_tag_clear(>i_pages, page_index(page),
+   xa_clear_tag(>i_pages, page_index(page),
 PAGECACHE_TAG_DIRTY);
-   xa_unlock_irqrestore(>i_pages, flags);
 
clear_page_dirty_for_io(page);
ClearPagePrivate(page);
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index eb82891e4ab6..de06efb53cef 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -204,7 +204,6 @@ int f2fs_write_inline_data(struct inode *inode, struct page 
*page)
void *src_addr, *dst_addr;
struct dnode_of_data dn;
struct address_space *mapping = page_mapping(page);
-   unsigned long flags;
int err;
 
set_new_dnode(, inode, NULL, NULL, 0);
@@ -226,10 +225,7 @@ int f2fs_write_inline_data(struct inode *inode, struct 
page *page)
kunmap_atomic(src_addr);
set_page_dirty(dn.inode_page);
 
-   xa_lock_irqsave(>i_pages, flags);
-   radix_tree_tag_clear(>i_pages, page_index(page),
-PAGECACHE_TAG_DIRTY);
-   xa_unlock_irqrestore(>i_pages, flags);
+   xa_clear_tag(>i_pages, page_index(page), PAGECACHE_TAG_DIRTY);
 
set_inode_flag(inode, FI_APPEND_WRITE);
set_inode_flag(inode, FI_DATA_EXIST);
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index ce912c33b11e..57b444ba988b 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -88,14 +88,10 @@ bool available_free_memory(struct f2fs_sb_info *sbi, int 
type)
 static void clear_node_page_dirty(struct page *page)
 {
struct address_space *mapping = page->mapping;
-   unsigned int long flags;
 
if (PageDirty(page)) {
-   xa_lock_irqsave(>i_pages, flags);
-   radix_tree_tag_clear(>i_pages,
-   page_index(page),
+   xa_clear_tag(>i_pages, page_index(page),
PAGECACHE_TAG_DIRTY);
-   xa_unlock_irqrestore(>i_pages, flags);
 
clear_page_dirty_for_io(page);
dec_page_count(F2FS_M_SB(mapping), F2FS_DIRTY_NODES);
@@ -1139,9 +1135,7 @@ void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
return;
f2fs_bug_on(sbi, check_nid_range(sbi, nid));
 
-   rcu_read_lock();
-   apage = radix_tree_lookup(_MAPPING(sbi)->i_pages, nid);
-   rcu_read_unlock();
+   apage = xa_load(_MAPPING(sbi)->i_pages, nid);
if (apage)
return;
 
-- 
2.16.1

--
To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v8 58/63] nilfs2: Convert to XArray

2018-03-06 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

I'm not 100% convinced that the rewrite of nilfs_copy_back_pages is
correct, but it will at least have different bugs from the current
version.

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
---
 fs/nilfs2/btnode.c | 37 +++-
 fs/nilfs2/page.c   | 72 +++---
 2 files changed, 56 insertions(+), 53 deletions(-)

diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c
index dec98cab729d..68797603bc08 100644
--- a/fs/nilfs2/btnode.c
+++ b/fs/nilfs2/btnode.c
@@ -177,42 +177,36 @@ int nilfs_btnode_prepare_change_key(struct address_space 
*btnc,
ctxt->newbh = NULL;
 
if (inode->i_blkbits == PAGE_SHIFT) {
-   lock_page(obh->b_page);
-   /*
-* We cannot call radix_tree_preload for the kernels older
-* than 2.6.23, because it is not exported for modules.
-*/
+   void *entry;
+   struct page *opage = obh->b_page;
+   lock_page(opage);
 retry:
-   err = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
-   if (err)
-   goto failed_unlock;
/* BUG_ON(oldkey != obh->b_page->index); */
-   if (unlikely(oldkey != obh->b_page->index))
-   NILFS_PAGE_BUG(obh->b_page,
+   if (unlikely(oldkey != opage->index))
+   NILFS_PAGE_BUG(opage,
   "invalid oldkey %lld (newkey=%lld)",
   (unsigned long long)oldkey,
   (unsigned long long)newkey);
 
-   xa_lock_irq(>i_pages);
-   err = radix_tree_insert(>i_pages, newkey, obh->b_page);
-   xa_unlock_irq(>i_pages);
+   entry = xa_cmpxchg(>i_pages, newkey, NULL, opage, 
GFP_NOFS);
/*
 * Note: page->index will not change to newkey until
 * nilfs_btnode_commit_change_key() will be called.
 * To protect the page in intermediate state, the page lock
 * is held.
 */
-   radix_tree_preload_end();
-   if (!err)
+   if (!entry)
return 0;
-   else if (err != -EEXIST)
+   if (xa_is_err(entry)) {
+   err = xa_err(entry);
goto failed_unlock;
+   }
 
err = invalidate_inode_pages2_range(btnc, newkey, newkey);
if (!err)
goto retry;
/* fallback to copy mode */
-   unlock_page(obh->b_page);
+   unlock_page(opage);
}
 
nbh = nilfs_btnode_create_block(btnc, newkey);
@@ -252,9 +246,8 @@ void nilfs_btnode_commit_change_key(struct address_space 
*btnc,
mark_buffer_dirty(obh);
 
xa_lock_irq(>i_pages);
-   radix_tree_delete(>i_pages, oldkey);
-   radix_tree_tag_set(>i_pages, newkey,
-  PAGECACHE_TAG_DIRTY);
+   __xa_erase(>i_pages, oldkey);
+   __xa_set_tag(>i_pages, newkey, PAGECACHE_TAG_DIRTY);
xa_unlock_irq(>i_pages);
 
opage->index = obh->b_blocknr = newkey;
@@ -283,9 +276,7 @@ void nilfs_btnode_abort_change_key(struct address_space 
*btnc,
return;
 
if (nbh == NULL) {  /* blocksize == pagesize */
-   xa_lock_irq(>i_pages);
-   radix_tree_delete(>i_pages, newkey);
-   xa_unlock_irq(>i_pages);
+   xa_erase(>i_pages, newkey);
unlock_page(ctxt->bh->b_page);
} else
brelse(nbh);
diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c
index 4cb850a6f1c2..a3995406d5d3 100644
--- a/fs/nilfs2/page.c
+++ b/fs/nilfs2/page.c
@@ -304,10 +304,10 @@ int nilfs_copy_dirty_pages(struct address_space *dmap,
 void nilfs_copy_back_pages(struct address_space *dmap,
   struct address_space *smap)
 {
+   XA_STATE(xas, >i_pages, 0);
struct pagevec pvec;
unsigned int i, n;
pgoff_t index = 0;
-   int err;
 
pagevec_init();
 repeat:
@@ -317,43 +317,56 @@ void nilfs_copy_back_pages(struct address_space *dmap,
 
for (i = 0; i < pagevec_count(); i++) {
struct page *page = pvec.pages[i], *dpage;
-   pgoff_t offset = page->index;
+   xas_set(, page->index);
 
lock_page(page);
-   dpage = find_lock_page(dmap, offset);
+   do {
+   xas_lock_irq();
+   dpage = xas_create();
+   if (!xas_error())

[PATCH v8 55/63] btrfs: Convert page cache to XArray

2018-03-06 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
---
 fs/btrfs/compression.c | 4 +---
 fs/btrfs/extent_io.c   | 8 +++-
 2 files changed, 4 insertions(+), 8 deletions(-)

diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index ad330af89eef..c2286f436571 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -457,9 +457,7 @@ static noinline int add_ra_bio_pages(struct inode *inode,
if (pg_index > end_index)
break;
 
-   rcu_read_lock();
-   page = radix_tree_lookup(>i_pages, pg_index);
-   rcu_read_unlock();
+   page = xa_load(>i_pages, pg_index);
if (page && !xa_is_value(page)) {
misses++;
if (misses > 4)
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index bcc24ee5a2c9..edc472b41037 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -5170,11 +5170,9 @@ void clear_extent_buffer_dirty(struct extent_buffer *eb)
 
clear_page_dirty_for_io(page);
xa_lock_irq(>mapping->i_pages);
-   if (!PageDirty(page)) {
-   radix_tree_tag_clear(>mapping->i_pages,
-   page_index(page),
-   PAGECACHE_TAG_DIRTY);
-   }
+   if (!PageDirty(page))
+   __xa_clear_tag(>mapping->i_pages,
+   page_index(page), PAGECACHE_TAG_DIRTY);
xa_unlock_irq(>mapping->i_pages);
ClearPageError(page);
unlock_page(page);
-- 
2.16.1

--
To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v8 57/63] fs: Convert writeback to XArray

2018-03-06 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

A couple of short loops.

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
---
 fs/fs-writeback.c | 25 +
 1 file changed, 9 insertions(+), 16 deletions(-)

diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 091577edc497..98e5e08274a2 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -339,9 +339,9 @@ static void inode_switch_wbs_work_fn(struct work_struct 
*work)
struct address_space *mapping = inode->i_mapping;
struct bdi_writeback *old_wb = inode->i_wb;
struct bdi_writeback *new_wb = isw->new_wb;
-   struct radix_tree_iter iter;
+   XA_STATE(xas, >i_pages, 0);
+   struct page *page;
bool switched = false;
-   void **slot;
 
/*
 * By the time control reaches here, RCU grace period has passed
@@ -375,25 +375,18 @@ static void inode_switch_wbs_work_fn(struct work_struct 
*work)
 * to possibly dirty pages while PAGECACHE_TAG_WRITEBACK points to
 * pages actually under writeback.
 */
-   radix_tree_for_each_tagged(slot, >i_pages, , 0,
-  PAGECACHE_TAG_DIRTY) {
-   struct page *page = radix_tree_deref_slot_protected(slot,
-   >i_pages.xa_lock);
-   if (likely(page) && PageDirty(page)) {
+   xas_for_each_tag(, page, ULONG_MAX, PAGECACHE_TAG_DIRTY) {
+   if (PageDirty(page)) {
dec_wb_stat(old_wb, WB_RECLAIMABLE);
inc_wb_stat(new_wb, WB_RECLAIMABLE);
}
}
 
-   radix_tree_for_each_tagged(slot, >i_pages, , 0,
-  PAGECACHE_TAG_WRITEBACK) {
-   struct page *page = radix_tree_deref_slot_protected(slot,
-   >i_pages.xa_lock);
-   if (likely(page)) {
-   WARN_ON_ONCE(!PageWriteback(page));
-   dec_wb_stat(old_wb, WB_WRITEBACK);
-   inc_wb_stat(new_wb, WB_WRITEBACK);
-   }
+   xas_set(, 0);
+   xas_for_each_tag(, page, ULONG_MAX, PAGECACHE_TAG_WRITEBACK) {
+   WARN_ON_ONCE(!PageWriteback(page));
+   dec_wb_stat(old_wb, WB_WRITEBACK);
+   inc_wb_stat(new_wb, WB_WRITEBACK);
}
 
wb_get(new_wb);
-- 
2.16.1

--
To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v8 56/63] fs: Convert buffer to XArray

2018-03-06 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

Mostly comment fixes, but one use of __xa_set_tag.

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
---
 fs/buffer.c | 14 +++---
 1 file changed, 7 insertions(+), 7 deletions(-)

diff --git a/fs/buffer.c b/fs/buffer.c
index 3ee82c056d85..70af8fbc64cf 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -585,7 +585,7 @@ void mark_buffer_dirty_inode(struct buffer_head *bh, struct 
inode *inode)
 EXPORT_SYMBOL(mark_buffer_dirty_inode);
 
 /*
- * Mark the page dirty, and set it dirty in the radix tree, and mark the inode
+ * Mark the page dirty, and set it dirty in the page cache, and mark the inode
  * dirty.
  *
  * If warn is true, then emit a warning if the page is not uptodate and has
@@ -602,8 +602,8 @@ void __set_page_dirty(struct page *page, struct 
address_space *mapping,
if (page->mapping) {/* Race with truncate? */
WARN_ON_ONCE(warn && !PageUptodate(page));
account_page_dirtied(page, mapping);
-   radix_tree_tag_set(>i_pages,
-   page_index(page), PAGECACHE_TAG_DIRTY);
+   __xa_set_tag(>i_pages, page_index(page),
+   PAGECACHE_TAG_DIRTY);
}
xa_unlock_irqrestore(>i_pages, flags);
 }
@@ -1066,7 +1066,7 @@ __getblk_slow(struct block_device *bdev, sector_t block,
  * The relationship between dirty buffers and dirty pages:
  *
  * Whenever a page has any dirty buffers, the page's dirty bit is set, and
- * the page is tagged dirty in its radix tree.
+ * the page is tagged dirty in the page cache.
  *
  * At all times, the dirtiness of the buffers represents the dirtiness of
  * subsections of the page.  If the page has buffers, the page dirty bit is
@@ -1089,9 +1089,9 @@ __getblk_slow(struct block_device *bdev, sector_t block,
  * mark_buffer_dirty - mark a buffer_head as needing writeout
  * @bh: the buffer_head to mark dirty
  *
- * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
- * backing page dirty, then tag the page as dirty in its address_space's radix
- * tree and then attach the address_space's inode to its superblock's dirty
+ * mark_buffer_dirty() will set the dirty bit against the buffer, then set
+ * its backing page dirty, then tag the page as dirty in the page cache
+ * and then attach the address_space's inode to its superblock's dirty
  * inode list.
  *
  * mark_buffer_dirty() is atomic.  It takes bh->b_page->mapping->private_lock,
-- 
2.16.1

--
To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v8 61/63] dax: Convert to XArray

2018-03-06 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

The DAX code (by its nature) is deeply interwoven with the radix tree
infrastructure, doing operations directly on the radix tree slots.
Convert the whole file to use XArray concepts; mostly passing around
xa_state instead of address_space, index or slot.

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
---
 fs/dax.c | 369 +--
 1 file changed, 143 insertions(+), 226 deletions(-)

diff --git a/fs/dax.c b/fs/dax.c
index ae70bebdb835..d14c2d931377 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -45,6 +45,7 @@
 /* The 'colour' (ie low bits) within a PMD of a page offset.  */
 #define PG_PMD_COLOUR  ((PMD_SIZE >> PAGE_SHIFT) - 1)
 #define PG_PMD_NR  (PMD_SIZE >> PAGE_SHIFT)
+#define PMD_ORDER  (PMD_SHIFT - PAGE_SHIFT)
 
 static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES];
 
@@ -74,21 +75,26 @@ fs_initcall(init_dax_wait_table);
 #define DAX_ZERO_PAGE  (1UL << 2)
 #define DAX_EMPTY  (1UL << 3)
 
-static unsigned long dax_radix_sector(void *entry)
+static bool xa_is_dax_locked(void *entry)
+{
+   return xa_to_value(entry) & DAX_ENTRY_LOCK;
+}
+
+static unsigned long xa_to_dax_sector(void *entry)
 {
return xa_to_value(entry) >> DAX_SHIFT;
 }
 
-static void *dax_radix_locked_entry(sector_t sector, unsigned long flags)
+static void *xa_mk_dax_locked(sector_t sector, unsigned long flags)
 {
return xa_mk_value(flags | ((unsigned long)sector << DAX_SHIFT) |
DAX_ENTRY_LOCK);
 }
 
-static unsigned int dax_radix_order(void *entry)
+static unsigned int dax_entry_order(void *entry)
 {
if (xa_to_value(entry) & DAX_PMD)
-   return PMD_SHIFT - PAGE_SHIFT;
+   return PMD_ORDER;
return 0;
 }
 
@@ -113,10 +119,10 @@ static int dax_is_empty_entry(void *entry)
 }
 
 /*
- * DAX radix tree locking
+ * DAX page cache entry locking
  */
 struct exceptional_entry_key {
-   struct address_space *mapping;
+   struct xarray *xa;
pgoff_t entry_start;
 };
 
@@ -125,9 +131,10 @@ struct wait_exceptional_entry_queue {
struct exceptional_entry_key key;
 };
 
-static wait_queue_head_t *dax_entry_waitqueue(struct address_space *mapping,
-   pgoff_t index, void *entry, struct exceptional_entry_key *key)
+static wait_queue_head_t *dax_entry_waitqueue(struct xa_state *xas,
+   void *entry, struct exceptional_entry_key *key)
 {
+   unsigned long index = xas->xa_index;
unsigned long hash;
 
/*
@@ -138,10 +145,10 @@ static wait_queue_head_t *dax_entry_waitqueue(struct 
address_space *mapping,
if (dax_is_pmd_entry(entry))
index &= ~PG_PMD_COLOUR;
 
-   key->mapping = mapping;
+   key->xa = xas->xa;
key->entry_start = index;
 
-   hash = hash_long((unsigned long)mapping ^ index, DAX_WAIT_TABLE_BITS);
+   hash = hash_long((unsigned long)xas->xa ^ index, DAX_WAIT_TABLE_BITS);
return wait_table + hash;
 }
 
@@ -152,7 +159,7 @@ static int wake_exceptional_entry_func(wait_queue_entry_t 
*wait, unsigned int mo
struct wait_exceptional_entry_queue *ewait =
container_of(wait, struct wait_exceptional_entry_queue, wait);
 
-   if (key->mapping != ewait->key.mapping ||
+   if (key->xa != ewait->key.xa ||
key->entry_start != ewait->key.entry_start)
return 0;
return autoremove_wake_function(wait, mode, sync, NULL);
@@ -163,13 +170,12 @@ static int wake_exceptional_entry_func(wait_queue_entry_t 
*wait, unsigned int mo
  * The important information it's conveying is whether the entry at
  * this index used to be a PMD entry.
  */
-static void dax_wake_mapping_entry_waiter(struct address_space *mapping,
-   pgoff_t index, void *entry, bool wake_all)
+static void dax_wake_entry(struct xa_state *xas, void *entry, bool wake_all)
 {
struct exceptional_entry_key key;
wait_queue_head_t *wq;
 
-   wq = dax_entry_waitqueue(mapping, index, entry, );
+   wq = dax_entry_waitqueue(xas, entry, );
 
/*
 * Checking for locked entry and prepare_to_wait_exclusive() happens
@@ -182,53 +188,27 @@ static void dax_wake_mapping_entry_waiter(struct 
address_space *mapping,
 }
 
 /*
- * Check whether the given slot is locked.  Must be called with the i_pages
- * lock held.
- */
-static inline int slot_locked(struct address_space *mapping, void **slot)
-{
-   unsigned long entry = xa_to_value(
-   radix_tree_deref_slot_protected(slot, 
>i_pages.xa_lock));
-   return entry & DAX_ENTRY_LOCK;
-}
-
-/*
- * Mark the given slot as locked.  Must be called with the i_pages lock held.
+ * Mark the given entry as locked.  Must be called with the i_pages lock held.
  */
-static inline void *lock_slot(struct address_space *mapping, void

[PATCH v8 60/63] lustre: Convert to XArray

2018-03-06 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
---
 drivers/staging/lustre/lustre/llite/glimpse.c   | 12 +---
 drivers/staging/lustre/lustre/mdc/mdc_request.c | 16 
 2 files changed, 13 insertions(+), 15 deletions(-)

diff --git a/drivers/staging/lustre/lustre/llite/glimpse.c 
b/drivers/staging/lustre/lustre/llite/glimpse.c
index 3075358f3f08..014035be5ac7 100644
--- a/drivers/staging/lustre/lustre/llite/glimpse.c
+++ b/drivers/staging/lustre/lustre/llite/glimpse.c
@@ -57,7 +57,7 @@ static const struct cl_lock_descr whole_file = {
 };
 
 /*
- * Check whether file has possible unwriten pages.
+ * Check whether file has possible unwritten pages.
  *
  * \retval 1file is mmap-ed or has dirty pages
  *  0otherwise
@@ -66,16 +66,14 @@ blkcnt_t dirty_cnt(struct inode *inode)
 {
blkcnt_t cnt = 0;
struct vvp_object *vob = cl_inode2vvp(inode);
-   void  *results[1];
 
-   if (inode->i_mapping)
-   cnt += radix_tree_gang_lookup_tag(>i_mapping->i_pages,
- results, 0, 1,
- PAGECACHE_TAG_DIRTY);
+   if (inode->i_mapping && xa_tagged(>i_mapping->i_pages,
+   PAGECACHE_TAG_DIRTY))
+   cnt = 1;
if (cnt == 0 && atomic_read(>vob_mmap_cnt) > 0)
cnt = 1;
 
-   return (cnt > 0) ? 1 : 0;
+   return cnt;
 }
 
 int cl_glimpse_lock(const struct lu_env *env, struct cl_io *io,
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_request.c 
b/drivers/staging/lustre/lustre/mdc/mdc_request.c
index 6950cb21638e..dbda8a9e351d 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_request.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_request.c
@@ -931,17 +931,18 @@ static struct page *mdc_page_locate(struct address_space 
*mapping, __u64 *hash,
 * hash _smaller_ than one we are looking for.
 */
unsigned long offset = hash_x_index(*hash, hash64);
+   XA_STATE(xas, >i_pages, offset);
struct page *page;
-   int found;
 
-   xa_lock_irq(>i_pages);
-   found = radix_tree_gang_lookup(>i_pages,
-  (void **), offset, 1);
-   if (found > 0 && !xa_is_value(page)) {
+   xas_lock_irq();
+   page = xas_find(, ULONG_MAX);
+   if (xa_is_value(page))
+   page = NULL;
+   if (page) {
struct lu_dirpage *dp;
 
get_page(page);
-   xa_unlock_irq(>i_pages);
+   xas_unlock_irq();
/*
 * In contrast to find_lock_page() we are sure that directory
 * page cannot be truncated (while DLM lock is held) and,
@@ -989,8 +990,7 @@ static struct page *mdc_page_locate(struct address_space 
*mapping, __u64 *hash,
page = ERR_PTR(-EIO);
}
} else {
-   xa_unlock_irq(>i_pages);
-   page = NULL;
+   xas_unlock_irq();
}
return page;
 }
-- 
2.16.1

--
To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v8 63/63] radix tree: Remove unused functions

2018-03-06 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

The following functions are (now) unused:
 - __radix_tree_delete_node
 - radix_tree_gang_lookup_slot
 - radix_tree_join
 - radix_tree_maybe_preload_order
 - radix_tree_split
 - radix_tree_split_preload

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
---
 include/linux/radix-tree.h |  16 +--
 lib/radix-tree.c   | 294 +
 2 files changed, 4 insertions(+), 306 deletions(-)

diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index f64beb9ba175..eb2ae901f2ec 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -147,12 +147,11 @@ static inline unsigned int iter_shift(const struct 
radix_tree_iter *iter)
  * radix_tree_lookup_slot
  * radix_tree_tag_get
  * radix_tree_gang_lookup
- * radix_tree_gang_lookup_slot
  * radix_tree_gang_lookup_tag
  * radix_tree_gang_lookup_tag_slot
  * radix_tree_tagged
  *
- * The first 8 functions are able to be called locklessly, using RCU. The
+ * The first 7 functions are able to be called locklessly, using RCU. The
  * caller must ensure calls to these functions are made within rcu_read_lock()
  * regions. Other readers (lock-free or otherwise) and modifications may be
  * running concurrently.
@@ -254,9 +253,6 @@ void radix_tree_iter_replace(struct radix_tree_root *,
const struct radix_tree_iter *, void __rcu **slot, void *entry);
 void radix_tree_replace_slot(struct radix_tree_root *,
 void __rcu **slot, void *entry);
-void __radix_tree_delete_node(struct radix_tree_root *,
- struct radix_tree_node *,
- radix_tree_update_node_t update_node);
 void radix_tree_iter_delete(struct radix_tree_root *,
struct radix_tree_iter *iter, void __rcu **slot);
 void *radix_tree_delete_item(struct radix_tree_root *, unsigned long, void *);
@@ -266,12 +262,8 @@ void radix_tree_clear_tags(struct radix_tree_root *, 
struct radix_tree_node *,
 unsigned int radix_tree_gang_lookup(const struct radix_tree_root *,
void **results, unsigned long first_index,
unsigned int max_items);
-unsigned int radix_tree_gang_lookup_slot(const struct radix_tree_root *,
-   void __rcu ***results, unsigned long *indices,
-   unsigned long first_index, unsigned int max_items);
 int radix_tree_preload(gfp_t gfp_mask);
 int radix_tree_maybe_preload(gfp_t gfp_mask);
-int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order);
 void radix_tree_init(void);
 void *radix_tree_tag_set(struct radix_tree_root *,
unsigned long index, unsigned int tag);
@@ -296,12 +288,6 @@ static inline void radix_tree_preload_end(void)
preempt_enable();
 }
 
-int radix_tree_split_preload(unsigned old_order, unsigned new_order, gfp_t);
-int radix_tree_split(struct radix_tree_root *, unsigned long index,
-   unsigned new_order);
-int radix_tree_join(struct radix_tree_root *, unsigned long index,
-   unsigned new_order, void *);
-
 void __rcu **idr_get_free(struct radix_tree_root *root,
  struct radix_tree_iter *iter, gfp_t gfp,
  unsigned long max);
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 71697bd25140..20858120ac0b 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -41,9 +41,6 @@
 #include 
 
 
-/* Number of nodes in fully populated tree of given height */
-static unsigned long height_to_maxnodes[RADIX_TREE_MAX_PATH + 1] __read_mostly;
-
 /*
  * Radix tree node cache.
  */
@@ -463,73 +460,6 @@ int radix_tree_maybe_preload(gfp_t gfp_mask)
 }
 EXPORT_SYMBOL(radix_tree_maybe_preload);
 
-#ifdef CONFIG_RADIX_TREE_MULTIORDER
-/*
- * Preload with enough objects to ensure that we can split a single entry
- * of order @old_order into many entries of size @new_order
- */
-int radix_tree_split_preload(unsigned int old_order, unsigned int new_order,
-   gfp_t gfp_mask)
-{
-   unsigned top = 1 << (old_order % RADIX_TREE_MAP_SHIFT);
-   unsigned layers = (old_order / RADIX_TREE_MAP_SHIFT) -
-   (new_order / RADIX_TREE_MAP_SHIFT);
-   unsigned nr = 0;
-
-   WARN_ON_ONCE(!gfpflags_allow_blocking(gfp_mask));
-   BUG_ON(new_order >= old_order);
-
-   while (layers--)
-   nr = nr * RADIX_TREE_MAP_SIZE + 1;
-   return __radix_tree_preload(gfp_mask, top * nr);
-}
-#endif
-
-/*
- * The same as function above, but preload number of nodes required to insert
- * (1 << order) continuous naturally-aligned elements.
- */
-int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order)
-{
-   unsigned long nr_subtrees;
-   int nr_nodes, subtree_height;
-
-   /* Preloading doesn't help anythi

[PATCH v8 62/63] page cache: Finish XArray conversion

2018-03-06 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

With no more radix tree API users left, we can drop the GFP flags
and use xa_init() instead of INIT_RADIX_TREE().

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
---
 fs/inode.c  | 2 +-
 mm/swap_state.c | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/fs/inode.c b/fs/inode.c
index 14d938e2c9a2..6dcbb1b47fb7 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -349,7 +349,7 @@ EXPORT_SYMBOL(inc_nlink);
 void address_space_init_once(struct address_space *mapping)
 {
memset(mapping, 0, sizeof(*mapping));
-   INIT_RADIX_TREE(>i_pages, GFP_ATOMIC | __GFP_ACCOUNT);
+   xa_init_flags(>i_pages, XA_FLAGS_LOCK_IRQ);
init_rwsem(>i_mmap_rwsem);
INIT_LIST_HEAD(>private_list);
spin_lock_init(>private_lock);
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 21218f6e438b..ebf837d13d2d 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -595,7 +595,7 @@ int init_swap_address_space(unsigned int type, unsigned 
long nr_pages)
return -ENOMEM;
for (i = 0; i < nr; i++) {
space = spaces + i;
-   INIT_RADIX_TREE(>i_pages, GFP_ATOMIC|__GFP_NOWARN);
+   xa_init_flags(>i_pages, XA_FLAGS_LOCK_IRQ);
atomic_set(>i_mmap_writable, 0);
space->a_ops = _aops;
/* swap cache doesn't use writeback related tags */
-- 
2.16.1

--
To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v8 46/63] shmem: Convert shmem_confirm_swap to XArray

2018-03-06 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

xa_load has its own RCU locking, so we can eliminate it here.

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
---
 mm/shmem.c | 7 +--
 1 file changed, 1 insertion(+), 6 deletions(-)

diff --git a/mm/shmem.c b/mm/shmem.c
index 5813808965cd..0af8a439dfad 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -348,12 +348,7 @@ static int shmem_xa_replace(struct address_space *mapping,
 static bool shmem_confirm_swap(struct address_space *mapping,
   pgoff_t index, swp_entry_t swap)
 {
-   void *item;
-
-   rcu_read_lock();
-   item = radix_tree_lookup(>i_pages, index);
-   rcu_read_unlock();
-   return item == swp_to_radix_entry(swap);
+   return xa_load(>i_pages, index) == swp_to_radix_entry(swap);
 }
 
 /*
-- 
2.16.1

--
To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v8 48/63] shmem: Convert shmem_add_to_page_cache to XArray

2018-03-06 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

This removes the last caller of radix_tree_maybe_preload_order().
Simpler code, unless we run out of memory for new xa_nodes partway through
inserting entries into the xarray.  Hopefully we can support multi-index
entries in the page cache soon and all the awful code goes away.

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
---
 mm/shmem.c | 87 --
 1 file changed, 39 insertions(+), 48 deletions(-)

diff --git a/mm/shmem.c b/mm/shmem.c
index 49f42dc9e1dc..fb06fb3e644a 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -558,9 +558,10 @@ static unsigned long shmem_unused_huge_shrink(struct 
shmem_sb_info *sbinfo,
  */
 static int shmem_add_to_page_cache(struct page *page,
   struct address_space *mapping,
-  pgoff_t index, void *expected)
+  pgoff_t index, void *expected, gfp_t gfp)
 {
-   int error, nr = hpage_nr_pages(page);
+   XA_STATE(xas, >i_pages, index);
+   unsigned long i, nr = 1UL << compound_order(page);
 
VM_BUG_ON_PAGE(PageTail(page), page);
VM_BUG_ON_PAGE(index != round_down(index, nr), page);
@@ -569,49 +570,47 @@ static int shmem_add_to_page_cache(struct page *page,
VM_BUG_ON(expected && PageTransHuge(page));
 
page_ref_add(page, nr);
-   page->mapping = mapping;
page->index = index;
+   page->mapping = mapping;
 
-   xa_lock_irq(>i_pages);
-   if (PageTransHuge(page)) {
-   void __rcu **results;
-   pgoff_t idx;
-   int i;
-
-   error = 0;
-   if (radix_tree_gang_lookup_slot(>i_pages,
-   , , index, 1) &&
-   idx < index + HPAGE_PMD_NR) {
-   error = -EEXIST;
+   do {
+   xas_lock_irq();
+   xas_create_range(, index + nr - 1);
+   if (xas_error())
+   goto unlock;
+   for (i = 0; i < nr; i++) {
+   void *entry = xas_load();
+   if (entry != expected)
+   xas_set_err(, -ENOENT);
+   if (xas_error())
+   goto undo;
+   xas_store(, page + i);
+   xas_next();
}
-
-   if (!error) {
-   for (i = 0; i < HPAGE_PMD_NR; i++) {
-   error = radix_tree_insert(>i_pages,
-   index + i, page + i);
-   VM_BUG_ON(error);
-   }
+   if (PageTransHuge(page)) {
count_vm_event(THP_FILE_ALLOC);
+   __inc_node_page_state(page, NR_SHMEM_THPS);
}
-   } else if (!expected) {
-   error = radix_tree_insert(>i_pages, index, page);
-   } else {
-   error = shmem_xa_replace(mapping, index, expected, page);
-   }
-
-   if (!error) {
mapping->nrpages += nr;
-   if (PageTransHuge(page))
-   __inc_node_page_state(page, NR_SHMEM_THPS);
__mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr);
__mod_node_page_state(page_pgdat(page), NR_SHMEM, nr);
-   xa_unlock_irq(>i_pages);
-   } else {
+   goto unlock;
+undo:
+   while (i-- > 0) {
+   xas_store(, NULL);
+   xas_prev();
+   }
+unlock:
+   xas_unlock_irq();
+   } while (xas_nomem(, gfp));
+
+   if (xas_error()) {
page->mapping = NULL;
-   xa_unlock_irq(>i_pages);
page_ref_sub(page, nr);
+   return xas_error();
}
-   return error;
+
+   return 0;
 }
 
 /*
@@ -1159,7 +1158,7 @@ static int shmem_unuse_inode(struct shmem_inode_info 
*info,
 */
if (!error)
error = shmem_add_to_page_cache(*pagep, mapping, index,
-   radswap);
+   radswap, gfp);
if (error != -ENOMEM) {
/*
 * Truncation and eviction use free_swap_and_cache(), which
@@ -1680,7 +1679,7 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t 
index,
false);
if (!error) {
error = shmem_add_to_page_cache(page, mapping, index,
-   swp_to_radix_entry(swap));
+   swp_to_radix_entry(swap), gfp);
/*
 * We alr

[PATCH v8 14/63] xarray: Define struct xa_node

2018-03-06 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

This is a direct replacement for struct radix_tree_node.  A couple of
struct members have changed name, so convert those.  Use a #define so
that radix tree users continue to work without change.

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
---
 include/linux/radix-tree.h| 29 +++--
 include/linux/xarray.h| 24 ++
 lib/radix-tree.c  | 48 +--
 mm/workingset.c   | 16 ++--
 tools/testing/radix-tree/multiorder.c | 30 +++---
 5 files changed, 74 insertions(+), 73 deletions(-)

diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index c8a33e9e9a3c..f64beb9ba175 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -32,6 +32,7 @@
 
 /* Keep unconverted code working */
 #define radix_tree_rootxarray
+#define radix_tree_nodexa_node
 
 /*
  * The bottom two bits of the slot determine how the remaining bits in the
@@ -60,41 +61,17 @@ static inline bool radix_tree_is_internal_node(void *ptr)
 
 /*** radix-tree API starts here ***/
 
-#define RADIX_TREE_MAX_TAGS 3
-
 #define RADIX_TREE_MAP_SHIFT   XA_CHUNK_SHIFT
 #define RADIX_TREE_MAP_SIZE(1UL << RADIX_TREE_MAP_SHIFT)
 #define RADIX_TREE_MAP_MASK(RADIX_TREE_MAP_SIZE-1)
 
-#define RADIX_TREE_TAG_LONGS   \
-   ((RADIX_TREE_MAP_SIZE + BITS_PER_LONG - 1) / BITS_PER_LONG)
+#define RADIX_TREE_MAX_TAGSXA_MAX_TAGS
+#define RADIX_TREE_TAG_LONGS   XA_TAG_LONGS
 
 #define RADIX_TREE_INDEX_BITS  (8 /* CHAR_BIT */ * sizeof(unsigned long))
 #define RADIX_TREE_MAX_PATH (DIV_ROUND_UP(RADIX_TREE_INDEX_BITS, \
  RADIX_TREE_MAP_SHIFT))
 
-/*
- * @count is the count of every non-NULL element in the ->slots array
- * whether that is a data entry, a retry entry, a user pointer,
- * a sibling entry or a pointer to the next level of the tree.
- * @exceptional is the count of every element in ->slots which is
- * either a data entry or a sibling entry for data.
- */
-struct radix_tree_node {
-   unsigned char   shift;  /* Bits remaining in each slot */
-   unsigned char   offset; /* Slot offset in parent */
-   unsigned char   count;  /* Total entry count */
-   unsigned char   exceptional;/* Exceptional entry count */
-   struct radix_tree_node *parent; /* Used when ascending tree */
-   struct radix_tree_root *root;   /* The tree we belong to */
-   union {
-   struct list_head private_list;  /* For tree user */
-   struct rcu_head rcu_head;   /* Used when freeing node */
-   };
-   void __rcu  *slots[RADIX_TREE_MAP_SIZE];
-   unsigned long   tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS];
-};
-
 /* The IDR tag is stored in the low bits of xa_flags */
 #define ROOT_IS_IDR((__force gfp_t)4)
 /* The top bits of xa_flags are used to store the root tags */
diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index 9b05b907062b..b51f354dfbf0 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -195,6 +195,30 @@ static inline void xa_init(struct xarray *xa)
 #endif
 #define XA_CHUNK_SIZE  (1UL << XA_CHUNK_SHIFT)
 #define XA_CHUNK_MASK  (XA_CHUNK_SIZE - 1)
+#define XA_MAX_TAGS3
+#define XA_TAG_LONGS   DIV_ROUND_UP(XA_CHUNK_SIZE, BITS_PER_LONG)
+
+/*
+ * @count is the count of every non-NULL element in the ->slots array
+ * whether that is a value entry, a retry entry, a user pointer,
+ * a sibling entry or a pointer to the next level of the tree.
+ * @nr_values is the count of every element in ->slots which is
+ * either a value entry or a sibling entry to a value entry.
+ */
+struct xa_node {
+   unsigned char   shift;  /* Bits remaining in each slot */
+   unsigned char   offset; /* Slot offset in parent */
+   unsigned char   count;  /* Total entry count */
+   unsigned char   nr_values;  /* Value entry count */
+   struct xa_node __rcu *parent;   /* NULL at top of tree */
+   struct xarray   *array; /* The array we belong to */
+   union {
+   struct list_head private_list;  /* For tree user */
+   struct rcu_head rcu_head;   /* Used when freeing node */
+   };
+   void __rcu  *slots[XA_CHUNK_SIZE];
+   unsigned long   tags[XA_MAX_TAGS][XA_TAG_LONGS];
+};
 
 /* Private */
 static inline bool xa_is_node(const void *entry)
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index c9ae6e6579f8..e98de16b1648 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -260,11 +260,11 @@ static void dump_node(struct radix_tree_node *node, 
unsigned long index)
 {
unsigned long i;
 
-   pr_debug("radix node: %p offset %d indices %lu-%lu p

[PATCH v8 45/63] shmem: Convert replace to XArray

2018-03-06 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

shmem_radix_tree_replace() is renamed to shmem_xa_replace() and
converted to use the XArray API.

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
---
 mm/shmem.c | 22 --
 1 file changed, 8 insertions(+), 14 deletions(-)

diff --git a/mm/shmem.c b/mm/shmem.c
index ac53cae5d3a7..5813808965cd 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -321,24 +321,20 @@ void shmem_uncharge(struct inode *inode, long pages)
 }
 
 /*
- * Replace item expected in radix tree by a new item, while holding tree lock.
+ * Replace item expected in xarray by a new item, while holding xa_lock.
  */
-static int shmem_radix_tree_replace(struct address_space *mapping,
+static int shmem_xa_replace(struct address_space *mapping,
pgoff_t index, void *expected, void *replacement)
 {
-   struct radix_tree_node *node;
-   void **pslot;
+   XA_STATE(xas, >i_pages, index);
void *item;
 
VM_BUG_ON(!expected);
VM_BUG_ON(!replacement);
-   item = __radix_tree_lookup(>i_pages, index, , );
-   if (!item)
-   return -ENOENT;
+   item = xas_load();
if (item != expected)
return -ENOENT;
-   __radix_tree_replace(>i_pages, node, pslot,
-replacement, NULL);
+   xas_store(, replacement);
return 0;
 }
 
@@ -605,8 +601,7 @@ static int shmem_add_to_page_cache(struct page *page,
} else if (!expected) {
error = radix_tree_insert(>i_pages, index, page);
} else {
-   error = shmem_radix_tree_replace(mapping, index, expected,
-page);
+   error = shmem_xa_replace(mapping, index, expected, page);
}
 
if (!error) {
@@ -635,7 +630,7 @@ static void shmem_delete_from_page_cache(struct page *page, 
void *radswap)
VM_BUG_ON_PAGE(PageCompound(page), page);
 
xa_lock_irq(>i_pages);
-   error = shmem_radix_tree_replace(mapping, page->index, page, radswap);
+   error = shmem_xa_replace(mapping, page->index, page, radswap);
page->mapping = NULL;
mapping->nrpages--;
__dec_node_page_state(page, NR_FILE_PAGES);
@@ -1553,8 +1548,7 @@ static int shmem_replace_page(struct page **pagep, gfp_t 
gfp,
 * a nice clean interface for us to replace oldpage by newpage there.
 */
xa_lock_irq(_mapping->i_pages);
-   error = shmem_radix_tree_replace(swap_mapping, swap_index, oldpage,
-  newpage);
+   error = shmem_xa_replace(swap_mapping, swap_index, oldpage, newpage);
if (!error) {
__inc_node_page_state(newpage, NR_FILE_PAGES);
__dec_node_page_state(oldpage, NR_FILE_PAGES);
-- 
2.16.1

--
To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v8 01/63] mac80211_hwsim: Use DEFINE_IDA

2018-03-06 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

This is preferred to opencoding an IDA_INIT.

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
---
 drivers/net/wireless/mac80211_hwsim.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/net/wireless/mac80211_hwsim.c 
b/drivers/net/wireless/mac80211_hwsim.c
index 7b6c3640a94f..8bffd6ebc03e 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -253,7 +253,7 @@ static inline void hwsim_clear_chanctx_magic(struct 
ieee80211_chanctx_conf *c)
 
 static unsigned int hwsim_net_id;
 
-static struct ida hwsim_netgroup_ida = IDA_INIT;
+static DEFINE_IDA(hwsim_netgroup_ida);
 
 struct hwsim_net {
int netgroup;
-- 
2.16.1

--
To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH v6 00/99] XArray version 6

2018-01-18 Thread Matthew Wilcox
On Thu, Jan 18, 2018 at 05:56:12PM +0100, David Sterba wrote:
> On Thu, Jan 18, 2018 at 08:48:43AM -0800, Matthew Wilcox wrote:
> > Thank you!  I shall attempt to debug.  Was this with a btrfs root
> > filesystem?  I'm most suspicious of those patches right now, since they've
> > received next to no testing.  I'm going to put together a smaller patchset
> > which just does the page cache conversion and nothing else in the hope
> > that we can get that merged this year.
> 
> No, the root is ext3 and there was no btrfs filesytem mounted at the
> time.

Found it; I was missing a prerequisite patch.  New (smaller) patch series
coming soon.
--
To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH v6 00/99] XArray version 6

2018-01-18 Thread Matthew Wilcox
On Thu, Jan 18, 2018 at 05:07:50PM +0100, David Sterba wrote:
> On Wed, Jan 17, 2018 at 12:20:24PM -0800, Matthew Wilcox wrote:
> > From: Matthew Wilcox <mawil...@microsoft.com>
> > 
> > This version of the XArray has no known bugs.
> 
> I've booted this patchset on 2 boxes, both had random problems during
> boot. On one I was not able to diagnose what went wrong. On the other
> one the system booted up to userspace and failed to set up networking.
> Serial console worked and the network service complained about wrong
> format of /usr/share/wicked/schema/team.xml . That's supposed to be a
> text file, though hexdump showed me lots of zeros. Trimmed output:
> 
>   00 00 00 00 00 00 00 00  00 00 00 00 00 00 00 00  ||
> *
> (similar output here)
> *
> 0a10  00 00 00 00 00 00 00 00  11 03 00 00 00 00 00 00  ||
> 0a20  20 8b 7f 01 00 00 00 00  a0 84 7d 01 00 00 00 00  | .}.|
> 0a30  00 00 00 00 00 00 00 00  10 89 7f 01 00 00 00 00  ||
> 0a40  a0 84 7d 01 00 00 00 00  00 00 00 00 00 00 00 00  |..}.|
> 0a50  80 8a 7f 01 00 00 00 00  e0 cf 7d 01 00 00 00 00  |..}.|
> 0a60  00 00 00 00 00 00 00 00  60 8a 7f 01 00 00 00 00  |`...|
> 0a70  a0 84 7d 01 00 00 00 00  00 00 00 00 00 00 00 00  |..}.|
> 0a80  30 89 7f 01 00 00 00 00  a0 84 7d 01 00 00 00 00  |0.}.|
> 0a90  00 00 00 00 00 00 00 00  60 f2 7f 01 00 00 00 00  |`...|
> 0aa0  40 fd 7e 01 00 00 00 00  00 00 00 00 00 00 00 00  |@.~.|
> 0ab0  00 00 00 00 00 00 00 00  00 00 00 00 00 00 00 00  ||
> *
> 1000  3e 0a 20 20 3c 2f 6d 65  74 68 6f 64 3e 0a 3c 2f  |>.  . 1010  73 65 72 76 69 63 65 3e  0a   |service>.|
> 
> There's something at the end of the file that does look like a xml fragment.
> The file size is 4121. This looks to me like exactly the first page of the 
> file
> was not read correctly.
> 
> The xml file is supposed to be read-only during startup, so there was no write
> in flight. 'rpm -Vv' reported only this file corrupted. Booting to other
> kernels was fine, network up, and the file was ok again. So the
> corruption happened only in memory, which leads me to conclusion that
> there is an unknown bug in your patchset.

Thank you!  I shall attempt to debug.  Was this with a btrfs root
filesystem?  I'm most suspicious of those patches right now, since they've
received next to no testing.  I'm going to put together a smaller patchset
which just does the page cache conversion and nothing else in the hope
that we can get that merged this year.
--
To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v6 05/99] xarray: Add definition of struct xarray

2018-01-17 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

This is a direct replacement for struct radix_tree_root.  Some of the
struct members have changed name; convert those, and use a #define so
that radix_tree users continue to work without change.

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
---
 include/linux/radix-tree.h   | 33 --
 include/linux/xarray.h   | 59 +
 lib/Makefile |  2 +-
 lib/idr.c|  4 +-
 lib/radix-tree.c | 75 
 lib/xarray.c | 42 ++
 tools/include/linux/spinlock.h   |  1 +
 tools/testing/radix-tree/.gitignore  |  1 +
 tools/testing/radix-tree/Makefile|  8 +++-
 tools/testing/radix-tree/linux/bug.h |  1 +
 tools/testing/radix-tree/linux/kconfig.h |  1 +
 tools/testing/radix-tree/linux/xarray.h  |  2 +
 tools/testing/radix-tree/multiorder.c|  6 +--
 tools/testing/radix-tree/test.c  |  6 +--
 14 files changed, 168 insertions(+), 73 deletions(-)
 create mode 100644 lib/xarray.c
 create mode 100644 tools/testing/radix-tree/linux/kconfig.h
 create mode 100644 tools/testing/radix-tree/linux/xarray.h

diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index 87f35fe00e55..c8a33e9e9a3c 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -30,6 +30,9 @@
 #include 
 #include 
 
+/* Keep unconverted code working */
+#define radix_tree_rootxarray
+
 /*
  * The bottom two bits of the slot determine how the remaining bits in the
  * slot are interpreted:
@@ -59,10 +62,7 @@ static inline bool radix_tree_is_internal_node(void *ptr)
 
 #define RADIX_TREE_MAX_TAGS 3
 
-#ifndef RADIX_TREE_MAP_SHIFT
-#define RADIX_TREE_MAP_SHIFT   (CONFIG_BASE_SMALL ? 4 : 6)
-#endif
-
+#define RADIX_TREE_MAP_SHIFT   XA_CHUNK_SHIFT
 #define RADIX_TREE_MAP_SIZE(1UL << RADIX_TREE_MAP_SHIFT)
 #define RADIX_TREE_MAP_MASK(RADIX_TREE_MAP_SIZE-1)
 
@@ -95,36 +95,21 @@ struct radix_tree_node {
unsigned long   tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS];
 };
 
-/* The IDR tag is stored in the low bits of the GFP flags */
+/* The IDR tag is stored in the low bits of xa_flags */
 #define ROOT_IS_IDR((__force gfp_t)4)
-/* The top bits of gfp_mask are used to store the root tags */
+/* The top bits of xa_flags are used to store the root tags */
 #define ROOT_TAG_SHIFT (__GFP_BITS_SHIFT)
 
-struct radix_tree_root {
-   spinlock_t  xa_lock;
-   gfp_t   gfp_mask;
-   struct radix_tree_node  __rcu *rnode;
-};
-
-#define RADIX_TREE_INIT(name, mask){   \
-   .xa_lock = __SPIN_LOCK_UNLOCKED(name.xa_lock),  \
-   .gfp_mask = (mask), \
-   .rnode = NULL,  \
-}
+#define RADIX_TREE_INIT(name, mask)XARRAY_INIT_FLAGS(name, mask)
 
 #define RADIX_TREE(name, mask) \
struct radix_tree_root name = RADIX_TREE_INIT(name, mask)
 
-#define INIT_RADIX_TREE(root, mask)\
-do {   \
-   spin_lock_init(&(root)->xa_lock);   \
-   (root)->gfp_mask = (mask);  \
-   (root)->rnode = NULL;   \
-} while (0)
+#define INIT_RADIX_TREE(root, mask) xa_init_flags(root, mask)
 
 static inline bool radix_tree_empty(const struct radix_tree_root *root)
 {
-   return root->rnode == NULL;
+   return root->xa_head == NULL;
 }
 
 /**
diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index c308152fde7f..3d2f1fafb7ec 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -10,6 +10,8 @@
  */
 
 #include 
+#include 
+#include 
 #include 
 #include 
 
@@ -99,6 +101,63 @@ static inline bool xa_is_internal(const void *entry)
return ((unsigned long)entry & 3) == 2;
 }
 
+/**
+ * struct xarray - The anchor of the XArray.
+ * @xa_lock: Lock that protects the contents of the XArray.
+ *
+ * To use the xarray, define it statically or embed it in your data structure.
+ * It is a very small data structure, so it does not usually make sense to
+ * allocate it separately and keep a pointer to it in your data structure.
+ *
+ * You may use the xa_lock to protect your own data structures as well.
+ */
+/*
+ * If all of the entries in the array are NULL, @xa_head is a NULL pointer.
+ * If the only non-NULL entry in the array is at index 0, @xa_head is that
+ * entry.  If any other entry in the array is non-NULL, @xa_head points
+ * to an @xa_node.
+ */
+struct xarray {
+   spinlock_t  xa_lock;
+/* private: The rest of the data structure is not to be used dire

[PATCH v6 02/99] page cache: Use xa_lock

2018-01-17 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

Remove the address_space ->tree_lock and use the xa_lock newly added to
the radix_tree_root.  Rename the address_space ->page_tree to ->pages,
since we don't really care that it's a tree.  Take the opportunity to
rearrange the elements of address_space to pack them better on 64-bit,
and make the comments more useful.

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
---
 Documentation/cgroup-v1/memory.txt  |   2 +-
 Documentation/vm/page_migration |  14 +--
 arch/arm/include/asm/cacheflush.h   |   6 +-
 arch/nios2/include/asm/cacheflush.h |   6 +-
 arch/parisc/include/asm/cacheflush.h|   6 +-
 drivers/staging/lustre/lustre/llite/glimpse.c   |   2 +-
 drivers/staging/lustre/lustre/mdc/mdc_request.c |   8 +-
 fs/afs/write.c  |   9 +-
 fs/btrfs/compression.c  |   2 +-
 fs/btrfs/extent_io.c|  16 +--
 fs/btrfs/inode.c|   2 +-
 fs/buffer.c |  13 ++-
 fs/cifs/file.c  |   9 +-
 fs/dax.c| 123 
 fs/f2fs/data.c  |   6 +-
 fs/f2fs/dir.c   |   6 +-
 fs/f2fs/inline.c|   6 +-
 fs/f2fs/node.c  |   8 +-
 fs/fs-writeback.c   |  20 ++--
 fs/inode.c  |  11 +--
 fs/nilfs2/btnode.c  |  20 ++--
 fs/nilfs2/page.c|  22 ++---
 include/linux/backing-dev.h |  12 +--
 include/linux/fs.h  |  17 ++--
 include/linux/mm.h  |   2 +-
 include/linux/pagemap.h |   4 +-
 mm/filemap.c|  84 
 mm/huge_memory.c|  10 +-
 mm/khugepaged.c |  49 +-
 mm/memcontrol.c |   4 +-
 mm/migrate.c|  32 +++---
 mm/page-writeback.c |  42 
 mm/readahead.c  |   2 +-
 mm/rmap.c   |   4 +-
 mm/shmem.c  |  60 ++--
 mm/swap_state.c |  17 ++--
 mm/truncate.c   |  22 ++---
 mm/vmscan.c |  12 +--
 mm/workingset.c |  22 ++---
 39 files changed, 344 insertions(+), 368 deletions(-)

diff --git a/Documentation/cgroup-v1/memory.txt 
b/Documentation/cgroup-v1/memory.txt
index cefb63639070..f0ba3fc6f2d8 100644
--- a/Documentation/cgroup-v1/memory.txt
+++ b/Documentation/cgroup-v1/memory.txt
@@ -262,7 +262,7 @@ When oom event notifier is registered, event will be 
delivered.
 2.6 Locking
 
lock_page_cgroup()/unlock_page_cgroup() should not be called under
-   mapping->tree_lock.
+   the mapping's xa_lock.
 
Other lock order is following:
PG_locked.
diff --git a/Documentation/vm/page_migration b/Documentation/vm/page_migration
index 0478ae2ad44a..faf849596a85 100644
--- a/Documentation/vm/page_migration
+++ b/Documentation/vm/page_migration
@@ -90,7 +90,7 @@ Steps:
 
 1. Lock the page to be migrated
 
-2. Insure that writeback is complete.
+2. Ensure that writeback is complete.
 
 3. Lock the new page that we want to move to. It is locked so that accesses to
this (not yet uptodate) page immediately lock while the move is in progress.
@@ -100,8 +100,8 @@ Steps:
mapcount is not zero then we do not migrate the page. All user space
processes that attempt to access the page will now wait on the page lock.
 
-5. The radix tree lock is taken. This will cause all processes trying
-   to access the page via the mapping to block on the radix tree spinlock.
+5. The address space xa_lock is taken. This will cause all processes trying
+   to access the page via the mapping to block on the spinlock.
 
 6. The refcount of the page is examined and we back out if references remain
otherwise we know that we are the only one referencing this page.
@@ -114,12 +114,12 @@ Steps:
 
 9. The radix tree is changed to point to the new page.
 
-10. The reference count of the old page is dropped because the radix tree
+10. The reference count of the old page is dropped because the address space
 reference is gone. A reference to the new page is established because
-the new page is referenced to by the radix tree.
+the new page is referenced by the address space.
 
-11. The radix tree lock is dropped. With that lookups in the mapping
-become possible again. Processes

[PATCH v6 04/99] xarray: Change definition of sibling entries

2018-01-17 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

Instead of storing a pointer to the slot containing the canonical entry,
store the offset of the slot.  Produces slightly more efficient code
(~300 bytes) and simplifies the implementation.

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
---
 include/linux/xarray.h | 90 ++
 lib/radix-tree.c   | 66 +++-
 2 files changed, 109 insertions(+), 47 deletions(-)

diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index 1aa4ff0c19b6..c308152fde7f 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -22,6 +22,12 @@
  * x1: Value entry
  *
  * Attempting to store internal entries in the XArray is a bug.
+ *
+ * Most internal entries are pointers to the next node in the tree.
+ * The following internal entries have a special meaning:
+ *
+ * 0-62: Sibling entries
+ * 256: Retry entry
  */
 
 #define BITS_PER_XA_VALUE  (BITS_PER_LONG - 1)
@@ -60,6 +66,39 @@ static inline bool xa_is_value(const void *entry)
return (unsigned long)entry & 1;
 }
 
+/*
+ * xa_mk_internal() - Create an internal entry.
+ * @v: Value to turn into an internal entry.
+ *
+ * Return: An XArray internal entry corresponding to this value.
+ */
+static inline void *xa_mk_internal(unsigned long v)
+{
+   return (void *)((v << 2) | 2);
+}
+
+/*
+ * xa_to_internal() - Extract the value from an internal entry.
+ * @entry: XArray entry.
+ *
+ * Return: The value which was stored in the internal entry.
+ */
+static inline unsigned long xa_to_internal(const void *entry)
+{
+   return (unsigned long)entry >> 2;
+}
+
+/*
+ * xa_is_internal() - Is the entry an internal entry?
+ * @entry: XArray entry.
+ *
+ * Return: %true if the entry is an internal entry.
+ */
+static inline bool xa_is_internal(const void *entry)
+{
+   return ((unsigned long)entry & 3) == 2;
+}
+
 #define xa_trylock(xa) spin_trylock(&(xa)->xa_lock)
 #define xa_lock(xa)spin_lock(&(xa)->xa_lock)
 #define xa_unlock(xa)  spin_unlock(&(xa)->xa_lock)
@@ -72,4 +111,55 @@ static inline bool xa_is_value(const void *entry)
 #define xa_unlock_irqrestore(xa, flags) \
spin_unlock_irqrestore(&(xa)->xa_lock, flags)
 
+/* Everything below here is the Advanced API.  Proceed with caution. */
+
+/*
+ * The xarray is constructed out of a set of 'chunks' of pointers.  Choosing
+ * the best chunk size requires some tradeoffs.  A power of two recommends
+ * itself so that we can walk the tree based purely on shifts and masks.
+ * Generally, the larger the better; as the number of slots per level of the
+ * tree increases, the less tall the tree needs to be.  But that needs to be
+ * balanced against the memory consumption of each node.  On a 64-bit system,
+ * xa_node is currently 576 bytes, and we get 7 of them per 4kB page.  If we
+ * doubled the number of slots per node, we'd get only 3 nodes per 4kB page.
+ */
+#ifndef XA_CHUNK_SHIFT
+#define XA_CHUNK_SHIFT (CONFIG_BASE_SMALL ? 4 : 6)
+#endif
+#define XA_CHUNK_SIZE  (1UL << XA_CHUNK_SHIFT)
+#define XA_CHUNK_MASK  (XA_CHUNK_SIZE - 1)
+
+/* Private */
+static inline bool xa_is_node(const void *entry)
+{
+   return xa_is_internal(entry) && (unsigned long)entry > 4096;
+}
+
+/* Private */
+static inline void *xa_mk_sibling(unsigned int offset)
+{
+   return xa_mk_internal(offset);
+}
+
+/* Private */
+static inline unsigned long xa_to_sibling(const void *entry)
+{
+   return xa_to_internal(entry);
+}
+
+/**
+ * xa_is_sibling() - Is the entry a sibling entry?
+ * @entry: Entry retrieved from the XArray
+ *
+ * Return: %true if the entry is a sibling entry.
+ */
+static inline bool xa_is_sibling(const void *entry)
+{
+   return IS_ENABLED(CONFIG_RADIX_TREE_MULTIORDER) &&
+   xa_is_internal(entry) &&
+   (entry < xa_mk_sibling(XA_CHUNK_SIZE - 1));
+}
+
+#define XA_RETRY_ENTRY xa_mk_internal(256)
+
 #endif /* _LINUX_XARRAY_H */
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 012e4869f99b..f16f63d15edc 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -37,6 +37,7 @@
 #include 
 #include 
 #include 
+#include 
 
 
 /* Number of nodes in fully populated tree of given height */
@@ -97,24 +98,7 @@ static inline void *node_to_entry(void *ptr)
return (void *)((unsigned long)ptr | RADIX_TREE_INTERNAL_NODE);
 }
 
-#define RADIX_TREE_RETRY   node_to_entry(NULL)
-
-#ifdef CONFIG_RADIX_TREE_MULTIORDER
-/* Sibling slots point directly to another slot in the same node */
-static inline
-bool is_sibling_entry(const struct radix_tree_node *parent, void *node)
-{
-   void __rcu **ptr = node;
-   return (parent->slots <= ptr) &&
-   (ptr < parent->slots + RADIX_TREE_MAP_SIZE);
-}
-#else
-static inline
-

[PATCH v6 03/99] xarray: Replace exceptional entries

2018-01-17 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

Introduce xarray value entries to replace the radix tree exceptional
entry code.  This is a slight change in encoding to allow the use of an
extra bit (we can now store BITS_PER_LONG - 1 bits in a value entry).
It is also a change in emphasis; exceptional entries are intimidating
and different.  As the comment explains, you can choose to store values
or pointers in the xarray and they are both first-class citizens.

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
---
 arch/powerpc/include/asm/book3s/64/pgtable.h|   4 +-
 arch/powerpc/include/asm/nohash/64/pgtable.h|   4 +-
 drivers/gpu/drm/i915/i915_gem.c |  17 ++--
 drivers/staging/lustre/lustre/mdc/mdc_request.c |   2 +-
 fs/btrfs/compression.c  |   2 +-
 fs/btrfs/inode.c|   4 +-
 fs/dax.c| 107 
 fs/proc/task_mmu.c  |   2 +-
 include/linux/fs.h  |  48 +++
 include/linux/radix-tree.h  |  36 ++--
 include/linux/swapops.h |  19 ++---
 include/linux/xarray.h  |  51 +++
 lib/idr.c   |  63 ++
 lib/radix-tree.c|  21 ++---
 mm/filemap.c|  10 +--
 mm/khugepaged.c |   2 +-
 mm/madvise.c|   2 +-
 mm/memcontrol.c |   2 +-
 mm/mincore.c|   2 +-
 mm/readahead.c  |   2 +-
 mm/shmem.c  |  10 +--
 mm/swap.c   |   2 +-
 mm/truncate.c   |  12 +--
 mm/workingset.c |  12 ++-
 tools/testing/radix-tree/idr-test.c |   6 +-
 tools/testing/radix-tree/linux/radix-tree.h |   1 +
 tools/testing/radix-tree/multiorder.c   |  47 +--
 tools/testing/radix-tree/test.c |   2 +-
 28 files changed, 256 insertions(+), 236 deletions(-)

diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h 
b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 44697817ccc6..5025c26f1acd 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -649,9 +649,7 @@ static inline bool pte_user(pte_t pte)
BUILD_BUG_ON(_PAGE_HPTEFLAGS & (0x1f << _PAGE_BIT_SWAP_TYPE)); \
BUILD_BUG_ON(_PAGE_HPTEFLAGS & _PAGE_SWP_SOFT_DIRTY);   \
} while (0)
-/*
- * on pte we don't need handle RADIX_TREE_EXCEPTIONAL_SHIFT;
- */
+
 #define SWP_TYPE_BITS 5
 #define __swp_type(x)  (((x).val >> _PAGE_BIT_SWAP_TYPE) \
& ((1UL << SWP_TYPE_BITS) - 1))
diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h 
b/arch/powerpc/include/asm/nohash/64/pgtable.h
index abddf5830ad5..f711773568d7 100644
--- a/arch/powerpc/include/asm/nohash/64/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/64/pgtable.h
@@ -329,9 +329,7 @@ static inline void __ptep_set_access_flags(struct mm_struct 
*mm,
 */ \
BUILD_BUG_ON(_PAGE_HPTEFLAGS & (0x1f << _PAGE_BIT_SWAP_TYPE)); \
} while (0)
-/*
- * on pte we don't need handle RADIX_TREE_EXCEPTIONAL_SHIFT;
- */
+
 #define SWP_TYPE_BITS 5
 #define __swp_type(x)  (((x).val >> _PAGE_BIT_SWAP_TYPE) \
& ((1UL << SWP_TYPE_BITS) - 1))
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 5cfba89ed586..25ce7bcf9988 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -5369,7 +5369,8 @@ i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
count = __sg_page_count(sg);
 
while (idx + count <= n) {
-   unsigned long exception, i;
+   void *entry;
+   unsigned long i;
int ret;
 
/* If we cannot allocate and insert this entry, or the
@@ -5384,12 +5385,9 @@ i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
if (ret && ret != -EEXIST)
goto scan;
 
-   exception =
-   RADIX_TREE_EXCEPTIONAL_ENTRY |
-   idx << RADIX_TREE_EXCEPTIONAL_SHIFT;
+   entry = xa_mk_value(idx);
for (i = 1; i < count; i++) {
-   ret = radix_tree_insert(>radix, idx + i,
-   (void *)exception);
+   ret = radix_tree_insert(>radix, idx + i, entry);
if (ret &&

[PATCH v6 07/99] xarray: Add documentation

2018-01-17 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

This is documentation on how to use the XArray, not details about its
internal implementation.

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
---
 Documentation/core-api/index.rst  |   1 +
 Documentation/core-api/xarray.rst | 361 ++
 2 files changed, 362 insertions(+)
 create mode 100644 Documentation/core-api/xarray.rst

diff --git a/Documentation/core-api/index.rst b/Documentation/core-api/index.rst
index d5bbe035316d..eb16ba30aeb6 100644
--- a/Documentation/core-api/index.rst
+++ b/Documentation/core-api/index.rst
@@ -18,6 +18,7 @@ Core utilities
local_ops
workqueue
genericirq
+   xarray
flexible-arrays
librs
genalloc
diff --git a/Documentation/core-api/xarray.rst 
b/Documentation/core-api/xarray.rst
new file mode 100644
index ..914999c0bf3f
--- /dev/null
+++ b/Documentation/core-api/xarray.rst
@@ -0,0 +1,361 @@
+.. SPDX-License-Identifier: CC-BY-SA-4.0
+
+==
+XArray
+==
+
+:Author: Matthew Wilcox
+
+Overview
+
+
+The XArray is an abstract data type which behaves like a very large array
+of pointers.  It meets many of the same needs as a hash or a conventional
+resizable array.  Unlike a hash, it allows you to sensibly go to the
+next or previous entry in a cache-efficient manner.  In contrast to
+a resizable array, there is no need for copying data or changing MMU
+mappings in order to grow the array.  It is more memory-efficient,
+parallelisable and cache friendly than a doubly-linked list.  It takes
+advantage of RCU to perform lookups without locking.
+
+The XArray implementation is efficient when the indices used are densely
+clustered; hashing the object and using the hash as the index will not
+perform well.  The XArray is optimised for small indices, but still has
+good performance with large indices.  If your index can be larger than
+``ULONG_MAX`` then the XArray is not the data type for you.  The most
+important user of the XArray is the page cache.
+
+A freshly-initialised XArray contains a ``NULL`` pointer at every index.
+Each non-``NULL`` entry in the array has three bits associated with it
+called tags.  Each tag may be set or cleared independently of the others.
+You can iterate over entries which are tagged.
+
+Normal pointers may be stored in the XArray directly.  They must be 4-byte
+aligned, which is true for any pointer returned from :c:func:`kmalloc` and
+:c:func:`alloc_page`.  It isn't true for arbitrary user-space pointers,
+nor for function pointers.  You can store pointers to statically allocated
+objects, as long as those objects have an alignment of at least 4.
+
+You can also store integers between 0 and ``LONG_MAX`` in the XArray.
+You must first convert it into an entry using :c:func:`xa_mk_value`.
+When you retrieve an entry from the XArray, you can check whether it is
+a value entry by calling :c:func:`xa_is_value`, and convert it back to
+an integer by calling :c:func:`xa_to_value`.
+
+The XArray does not support storing :c:func:`IS_ERR` pointers as some
+conflict with value entries or internal entries.
+
+An unusual feature of the XArray is the ability to create entries which
+occupy a range of indices.  Once stored to, looking up any index in
+the range will return the same entry as looking up any other index in
+the range.  Setting a tag on one index will set it on all of them.
+Storing to any index will store to all of them.  Multi-index entries can
+be explicitly split into smaller entries, or storing ``NULL`` into any
+entry will cause the XArray to forget about the range.
+
+Normal API
+==
+
+Start by initialising an XArray, either with :c:func:`DEFINE_XARRAY`
+for statically allocated XArrays or :c:func:`xa_init` for dynamically
+allocated ones.
+
+You can then set entries using :c:func:`xa_store` and get entries
+using :c:func:`xa_load`.  xa_store will overwrite any entry with the
+new entry and return the previous entry stored at that index.  You can
+use :c:func:`xa_erase` instead of calling :c:func:`xa_store` with a
+%NULL entry.  There is no difference between an entry that has never
+been stored to and one that has most recently had ``NULL`` stored to it.
+
+You can conditionally replace an entry at an index by using
+:c:func:`xa_cmpxchg`.  Like :c:func:`cmpxchg`, it will only succeed if
+the entry at that index has the 'old' value.  It also returns the entry
+which was at that index; if it returns the same entry which was passed as
+'old', then :c:func:`xa_cmpxchg` succeeded.
+
+If you want to only store a new entry to an index if the current entry
+at that index is ``NULL``, you can use :c:func:`xa_insert` which
+returns ``-EEXIST`` if the entry is not empty.
+
+Calling :c:func:`xa_reserve` ensures that there is enough memory allocated
+to store an entry at the specified index.  This is not normally needed,
+but some users have a complicated locking scheme.
+
+You can enquire whether 

[PATCH v6 10/99] xarray: Add xa_store

2018-01-17 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

xa_store() differs from radix_tree_insert() in that it will overwrite an
existing element in the array rather than returning an error.  This is
the behaviour which most users want, and those that want more complex
behaviour generally want to use the xas family of routines anyway.

For memory allocation, xa_store() will first attempt to request memory
from the slab allocator; if memory is not immediately available, it will
drop the xa_lock and allocate memory, keeping a pointer in the xa_state.
It does not use the per-CPU cache, although those will continue to exist
until all radix tree users are converted to the xarray.

This patch also includes xa_erase() and __xa_erase() for a streamlined
way to store NULL.  Since there is no need to allocate memory in order
to store a NULL in the XArray, we do not need to trouble the user with
deciding what memory allocation flags to use.

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
---
 include/linux/xarray.h| 107 +
 lib/radix-tree.c  |   4 +-
 lib/xarray.c  | 642 ++
 tools/include/linux/spinlock.h|   2 +
 tools/testing/radix-tree/linux/kernel.h   |   4 +
 tools/testing/radix-tree/linux/lockdep.h  |  11 +
 tools/testing/radix-tree/linux/rcupdate.h |   1 +
 tools/testing/radix-tree/test.c   |  32 ++
 tools/testing/radix-tree/test.h   |   5 +
 tools/testing/radix-tree/xarray-test.c| 113 +-
 10 files changed, 917 insertions(+), 4 deletions(-)
 create mode 100644 tools/testing/radix-tree/linux/lockdep.h

diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index ddeb49b8bfc1..139b1c1fd022 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -149,10 +149,17 @@ typedef unsigned __bitwise xa_tag_t;
 #define XA_PRESENT ((__force xa_tag_t)8U)
 #define XA_TAG_MAX XA_TAG_2
 
+enum xa_lock_type {
+   XA_LOCK_IRQ = 1,
+   XA_LOCK_BH = 2,
+};
+
 /*
  * Values for xa_flags.  The radix tree stores its GFP flags in the xa_flags,
  * and we remain compatible with that.
  */
+#define XA_FLAGS_LOCK_IRQ  ((__force gfp_t)XA_LOCK_IRQ)
+#define XA_FLAGS_LOCK_BH   ((__force gfp_t)XA_LOCK_BH)
 #define XA_FLAGS_TAG(tag)  ((__force gfp_t)((1U << __GFP_BITS_SHIFT) << \
(__force unsigned)(tag)))
 
@@ -202,6 +209,7 @@ struct xarray {
 
 void xa_init_flags(struct xarray *, gfp_t flags);
 void *xa_load(struct xarray *, unsigned long index);
+void *xa_store(struct xarray *, unsigned long index, void *entry, gfp_t);
 bool xa_get_tag(struct xarray *, unsigned long index, xa_tag_t);
 void xa_set_tag(struct xarray *, unsigned long index, xa_tag_t);
 void xa_clear_tag(struct xarray *, unsigned long index, xa_tag_t);
@@ -217,6 +225,33 @@ static inline void xa_init(struct xarray *xa)
xa_init_flags(xa, 0);
 }
 
+/**
+ * xa_erase() - Erase this entry from the XArray.
+ * @xa: XArray.
+ * @index: Index of entry.
+ *
+ * This function is the equivalent of calling xa_store() with %NULL as
+ * the third argument.  The XArray does not need to allocate memory, so
+ * the user does not need to provide GFP flags.
+ *
+ * Return: The entry which used to be at this index.
+ */
+static inline void *xa_erase(struct xarray *xa, unsigned long index)
+{
+   return xa_store(xa, index, NULL, 0);
+}
+
+/**
+ * xa_empty() - Determine if an array has any present entries.
+ * @xa: XArray.
+ *
+ * Return: %true if the array contains only NULL pointers.
+ */
+static inline bool xa_empty(const struct xarray *xa)
+{
+   return xa->xa_head == NULL;
+}
+
 /**
  * xa_tagged() - Inquire whether any entry in this array has a tag set
  * @xa: Array
@@ -243,7 +278,11 @@ static inline bool xa_tagged(const struct xarray *xa, 
xa_tag_t tag)
 
 /*
  * Versions of the normal API which require the caller to hold the xa_lock.
+ * If the GFP flags allow it, will drop the lock in order to allocate
+ * memory, then reacquire it afterwards.
  */
+void *__xa_erase(struct xarray *, unsigned long index);
+void *__xa_store(struct xarray *, unsigned long index, void *entry, gfp_t);
 void __xa_set_tag(struct xarray *, unsigned long index, xa_tag_t);
 void __xa_clear_tag(struct xarray *, unsigned long index, xa_tag_t);
 
@@ -339,6 +378,12 @@ static inline void *xa_entry_locked(struct xarray *xa,
lockdep_is_held(>xa_lock));
 }
 
+/* Private */
+static inline void *xa_mk_node(const struct xa_node *node)
+{
+   return (void *)((unsigned long)node | 2);
+}
+
 /* Private */
 static inline struct xa_node *xa_to_node(const void *entry)
 {
@@ -519,6 +564,12 @@ static inline bool xas_valid(const struct xa_state *xas)
return !xas_invalid(xas);
 }
 
+/* True if the node represents head-of-tree, RESTART or BOUNDS */
+static inline bool xas_top(struct xa_node

[PATCH v6 09/99] xarray: Add xa_get_tag, xa_set_tag and xa_clear_tag

2018-01-17 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

XArray tags are slightly more strongly typed than the radix tree tags,
but occupy the same bits.  This commit also adds the xas_ family of tag
operations, for cases where the caller is already holding the lock, and
xa_tagged() to ask whether any array member has a particular tag set.

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
---
 include/linux/xarray.h |  40 +++
 lib/xarray.c   | 229 +
 tools/include/linux/spinlock.h |   6 ++
 3 files changed, 275 insertions(+)

diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index 54c694e5c33f..ddeb49b8bfc1 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -11,6 +11,7 @@
 
 #include 
 #include 
+#include 
 #include 
 #include 
 #include 
@@ -141,6 +142,20 @@ static inline int xa_err(void *entry)
return 0;
 }
 
+typedef unsigned __bitwise xa_tag_t;
+#define XA_TAG_0   ((__force xa_tag_t)0U)
+#define XA_TAG_1   ((__force xa_tag_t)1U)
+#define XA_TAG_2   ((__force xa_tag_t)2U)
+#define XA_PRESENT ((__force xa_tag_t)8U)
+#define XA_TAG_MAX XA_TAG_2
+
+/*
+ * Values for xa_flags.  The radix tree stores its GFP flags in the xa_flags,
+ * and we remain compatible with that.
+ */
+#define XA_FLAGS_TAG(tag)  ((__force gfp_t)((1U << __GFP_BITS_SHIFT) << \
+   (__force unsigned)(tag)))
+
 /**
  * struct xarray - The anchor of the XArray.
  * @xa_lock: Lock that protects the contents of the XArray.
@@ -187,6 +202,9 @@ struct xarray {
 
 void xa_init_flags(struct xarray *, gfp_t flags);
 void *xa_load(struct xarray *, unsigned long index);
+bool xa_get_tag(struct xarray *, unsigned long index, xa_tag_t);
+void xa_set_tag(struct xarray *, unsigned long index, xa_tag_t);
+void xa_clear_tag(struct xarray *, unsigned long index, xa_tag_t);
 
 /**
  * xa_init() - Initialise an empty XArray.
@@ -199,6 +217,18 @@ static inline void xa_init(struct xarray *xa)
xa_init_flags(xa, 0);
 }
 
+/**
+ * xa_tagged() - Inquire whether any entry in this array has a tag set
+ * @xa: Array
+ * @tag: Tag value
+ *
+ * Return: %true if any entry has this tag set.
+ */
+static inline bool xa_tagged(const struct xarray *xa, xa_tag_t tag)
+{
+   return xa->xa_flags & XA_FLAGS_TAG(tag);
+}
+
 #define xa_trylock(xa) spin_trylock(&(xa)->xa_lock)
 #define xa_lock(xa)spin_lock(&(xa)->xa_lock)
 #define xa_unlock(xa)  spin_unlock(&(xa)->xa_lock)
@@ -211,6 +241,12 @@ static inline void xa_init(struct xarray *xa)
 #define xa_unlock_irqrestore(xa, flags) \
spin_unlock_irqrestore(&(xa)->xa_lock, flags)
 
+/*
+ * Versions of the normal API which require the caller to hold the xa_lock.
+ */
+void __xa_set_tag(struct xarray *, unsigned long index, xa_tag_t);
+void __xa_clear_tag(struct xarray *, unsigned long index, xa_tag_t);
+
 /* Everything below here is the Advanced API.  Proceed with caution. */
 
 /*
@@ -504,6 +540,10 @@ static inline bool xas_retry(struct xa_state *xas, const 
void *entry)
 
 void *xas_load(struct xa_state *);
 
+bool xas_get_tag(const struct xa_state *, xa_tag_t);
+void xas_set_tag(const struct xa_state *, xa_tag_t);
+void xas_clear_tag(const struct xa_state *, xa_tag_t);
+
 /**
  * xas_reload() - Refetch an entry from the xarray.
  * @xas: XArray operation state.
diff --git a/lib/xarray.c b/lib/xarray.c
index 83b9c25de415..59b57e6f80de 100644
--- a/lib/xarray.c
+++ b/lib/xarray.c
@@ -5,6 +5,7 @@
  * Author: Matthew Wilcox <mawil...@microsoft.com>
  */
 
+#include 
 #include 
 #include 
 
@@ -24,6 +25,55 @@
  * @entry refers to something stored in a slot in the xarray
  */
 
+static inline struct xa_node *xa_parent(struct xarray *xa,
+   const struct xa_node *node)
+{
+   return rcu_dereference_check(node->parent,
+   lockdep_is_held(>xa_lock));
+}
+
+static inline struct xa_node *xa_parent_locked(struct xarray *xa,
+   const struct xa_node *node)
+{
+   return rcu_dereference_protected(node->parent,
+   lockdep_is_held(>xa_lock));
+}
+
+static inline void xa_tag_set(struct xarray *xa, xa_tag_t tag)
+{
+   if (!(xa->xa_flags & XA_FLAGS_TAG(tag)))
+   xa->xa_flags |= XA_FLAGS_TAG(tag);
+}
+
+static inline void xa_tag_clear(struct xarray *xa, xa_tag_t tag)
+{
+   if (xa->xa_flags & XA_FLAGS_TAG(tag))
+   xa->xa_flags &= ~(XA_FLAGS_TAG(tag));
+}
+
+static inline bool node_get_tag(const struct xa_node *node, unsigned int 
offset,
+   xa_tag_t tag)
+{
+   return test_bit(offset, node->tags[(__force unsigned)tag]);
+}
+
+static inli

[PATCH v6 08/99] xarray: Add xa_load

2018-01-17 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

This first function in the XArray API brings with it a lot of support
infrastructure.  The advanced API is based around the xa_state which is
a more capable version of the radix_tree_iter.

As the test-suite demonstrates, it is possible to use the xarray and
radix tree APIs on the same data structure.

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
---
 include/linux/xarray.h  | 282 
 lib/radix-tree.c|  43 -
 lib/xarray.c| 190 +++
 tools/testing/radix-tree/.gitignore |   1 +
 tools/testing/radix-tree/Makefile   |   7 +-
 tools/testing/radix-tree/linux/kernel.h |   1 +
 tools/testing/radix-tree/linux/radix-tree.h |   1 -
 tools/testing/radix-tree/linux/rcupdate.h   |   1 +
 tools/testing/radix-tree/linux/xarray.h |   1 +
 tools/testing/radix-tree/xarray-test.c  |  56 ++
 10 files changed, 537 insertions(+), 46 deletions(-)
 create mode 100644 tools/testing/radix-tree/xarray-test.c

diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index 3d5f7804ef45..54c694e5c33f 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -12,6 +12,8 @@
 #include 
 #include 
 #include 
+#include 
+#include 
 #include 
 #include 
 
@@ -30,6 +32,10 @@
  *
  * 0-62: Sibling entries
  * 256: Retry entry
+ *
+ * Errors are also represented as internal entries, but use the negative
+ * space (-4094 to -2).  They're never stored in the slots array; only
+ * returned by the normal API.
  */
 
 #define BITS_PER_XA_VALUE  (BITS_PER_LONG - 1)
@@ -101,6 +107,40 @@ static inline bool xa_is_internal(const void *entry)
return ((unsigned long)entry & 3) == 2;
 }
 
+/**
+ * xa_is_err() - Report whether an XArray operation returned an error
+ * @entry: Result from calling an XArray function
+ *
+ * If an XArray operation cannot complete an operation, it will return
+ * a special value indicating an error.  This function tells you
+ * whether an error occurred; xa_err() tells you which error occurred.
+ *
+ * Return: %true if the entry indicates an error.
+ */
+static inline bool xa_is_err(const void *entry)
+{
+   return unlikely(xa_is_internal(entry));
+}
+
+/**
+ * xa_err() - Turn an XArray result into an errno.
+ * @entry: Result from calling an XArray function.
+ *
+ * If an XArray operation cannot complete an operation, it will return
+ * a special pointer value which encodes an errno.  This function extracts
+ * the errno from the pointer value, or returns 0 if the pointer does not
+ * represent an errno.
+ *
+ * Return: A negative errno or 0.
+ */
+static inline int xa_err(void *entry)
+{
+   /* xa_to_internal() would not do sign extension. */
+   if (xa_is_err(entry))
+   return (long)entry >> 2;
+   return 0;
+}
+
 /**
  * struct xarray - The anchor of the XArray.
  * @xa_lock: Lock that protects the contents of the XArray.
@@ -146,6 +186,7 @@ struct xarray {
struct xarray name = XARRAY_INIT_FLAGS(name, flags)
 
 void xa_init_flags(struct xarray *, gfp_t flags);
+void *xa_load(struct xarray *, unsigned long index);
 
 /**
  * xa_init() - Initialise an empty XArray.
@@ -212,6 +253,62 @@ struct xa_node {
unsigned long   tags[XA_MAX_TAGS][XA_TAG_LONGS];
 };
 
+#ifdef XA_DEBUG
+void xa_dump(const struct xarray *);
+void xa_dump_node(const struct xa_node *);
+#define XA_BUG_ON(xa, x) do { \
+   if (x) \
+   xa_dump(xa); \
+   BUG_ON(x); \
+   } while (0)
+#define XA_NODE_BUG_ON(node, x) do { \
+   if ((x) && (node)) \
+   xa_dump_node(node); \
+   BUG_ON(x); \
+   } while (0)
+#else
+#define XA_BUG_ON(xa, x)   do { } while (0)
+#define XA_NODE_BUG_ON(node, x)do { } while (0)
+#endif
+
+/* Private */
+static inline void *xa_head(struct xarray *xa)
+{
+   return rcu_dereference_check(xa->xa_head,
+   lockdep_is_held(>xa_lock));
+}
+
+/* Private */
+static inline void *xa_head_locked(struct xarray *xa)
+{
+   return rcu_dereference_protected(xa->xa_head,
+   lockdep_is_held(>xa_lock));
+}
+
+/* Private */
+static inline void *xa_entry(struct xarray *xa,
+   const struct xa_node *node, unsigned int offset)
+{
+   XA_NODE_BUG_ON(node, offset >= XA_CHUNK_SIZE);
+   return rcu_dereference_check(node->slots[offset],
+   lockdep_is_held(>xa_lock));
+}
+
+/* Private */
+static inline void *xa_entry_locked(struct xarray *xa,
+   const struct xa_node *node, unsigned int offset)
+{
+   XA_NODE_BUG_ON(node, offset >= XA_CHUNK_SIZE);
+

[PATCH v6 00/99] XArray version 6

2018-01-17 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

This version of the XArray has no known bugs.  I have converted the
radix tree test suite entirely over to the XArray and fixed all bugs
that it has uncovered.  There are additional tests in the test suite for
the XArray, so I now claim the XArray has better test coverage than the
Radix Tree did.  Of course, that is not the same thing as fewer bugs,
but it now stands up to the tender embraces of Trinity without crashing.

You can get this version from my git tree here:
http://git.infradead.org/users/willy/linux-dax.git/shortlog/refs/heads/xarray-2018-01-09
which includes a number of other patches that are at least tangentially
related to this patch set.

Most of the work I've done recently has been converting additional users
from the radix tree to the XArray.  That's going pretty well; still 24
radix tree users left to convert.  It's been worth doing because I've
spotted several common patterns that have led to changes (the lock_type,
reserve/release) and some common patterns that I'll add support for
later (chaining multiple entries from a single index, wanting to use
64-bit indices on 32-bit machines, having an array of XArrays, various
workarounds for not having range entries yet).

As far as line count goes, for the whole git tree, we're at:
 212 files changed, 7764 insertions(+), 7002 deletions(-)
with another 1376 lines to delete from radix-tree.[ch].  That doesn't take
into account the 371 lines of xarray.rst, the 587 lines of xarray-test.c,
and the fact that almost half of lib/xarray.c and include/linux/xarray.h
is documentation.

Changes since version 5:

 - Rebased to 4.15-rc8

API changes:
 - Renamed __xa_init() to xa_init_flags().
 - Added DEFINE_XARRAY_FLAGS().
 - Renamed xa_ctx to xa_lock_type; store it in the XA_FLAGS and use separate
   locking classes for each type so that lockdep doesn't emit spurious
   warnings.  It also reduces the amount of boilerplate.
 - Combined __xa_store_bh, __xa_store_irq and __xa_store into __xa_store().
 - Ditto for __xa_cmpxchg().
 - Renamed xa_store_empty() to xa_insert().
 - Added __xa_insert().
 - Added xa_reserve() and xa_release().
 - Renamed XA_NO_TAG to XA_PRESENT.
 - Combined xa_get_entries(), xa_get_tagged() and xa_get_maybe_tag()
   into xa_extract().
 - Added 'filter' argument to xa_find(), xa_find_after() and xa_for_each()
   to match xa_extract() and provide the functionality that would
   have otherwise had to be added in the form of xa_find_tag(),
   xa_find_tag_after() and xa_for_each_tag().
 - Replaced workingset_lookup_update() with mapping_set_update().
 - Renamed page_cache_tree_delete() to page_cache_delete().

New xarray users:
 - Converted SuperH interrupt controller radix tree to XArray.
 - Converted blk-cgroup radix tree to XArray.
 - Converted blk-ioc radix tree to XArray.
 - Converted i915 handles_vma radix tree to XArray.
 - Converted s390 gmap radix trees to XArray.
 - Converted hwspinlock to XArray.
 - Converted btrfs fs_roots to XArray.
 - Converted btrfs reada_zones to XArray.
 - Converted btrfs reada_extents to XArray.
 - Converted btrfs reada_tree to XArray.
 - Converted btrfs buffer_radix to XArray.
 - Converted btrfs delayed_nodes to XArray.
 - Converted btrfs name_cache to XArray.
 - Converted f2fs pids radix tree to XArray.
 - Converted f2fs ino_root radix tree to XArray.
 - Converted f2fs extent_tree to XArray.
 - Converted f2fs gclist radix tree to XArray.
 - Converted dma-debug active cacheline radix tree to XArray.
 - Converted Xen pvcalls-back socketpass_mappings to XArray.
 - Converted net/qrtr radix tree to XArray.
 - Converted null_blk radix trees to XArray.

Documentation:
 - Added a bit more internals documentation.
 - Rewrote xa_init_flags documentation.
 - Added the __xa_ functions to the locking table.
 - Rewrote the section on using the __xa_ functions.

Internal changes:
 - Free up the bottom four bits of the xa_flags, since these are not
   valid GFP flags to pass to kmem_cache_alloc().
 - Moved the XA_FLAGS_TRACK_FREE bit to the bottom bits of the flags to leave
   space for more tags (later).
 - Fixed multiple bugs in xas_find() and xas_find_tag().
 - Fixed bug in shrinking XArray (and add a test case that exercises it).
 - Fixed bug in erasing multi-index entries.
 - Fixed a compile warning with CONFIG_RADIX_TREE_MULTIORDER=n.
 - Added an xas_update() helper.
 - Use ->array to track an xa_node's state through its lifecycle
   (allocated -> rcu_free -> actually free).
 - Made XA_BUG_ON dump the entire tree while XA_NODE_BUG_ON dumps only the
   node that appears suspect.
 - Fixed debugging printks to use %px and pr_cont/pr_info etc.
 - Renamed some internal tag functions.
 - Moved xa_track_free() from xarray.h to xarray.c.

Test suite:
 - Added new tests for xas_find() and xas_find_tag().
 - Added new tests for the update_node functionality.
 - Converted the radix tree test suite to the xarray API.

Matthew Wilcox (99):
  xarray: 

[PATCH v6 12/99] xarray: Add xa_for_each

2018-01-17 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

This iterator allows the user to efficiently walk a range of the array,
executing the loop body once for each entry in that range that matches
the filter.  This commit also includes xa_find() and xa_find_above()
which are helper functions for xa_for_each() but may also be useful in
their own right.

In the xas family of functions, we also have xas_for_each(), xas_find(),
xas_next_entry(), xas_for_each_tag(), xas_find_tag(), xas_next_tag()
and xas_pause().

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
---
 include/linux/xarray.h | 171 +
 lib/xarray.c   | 272 +
 tools/testing/radix-tree/test.c|  13 ++
 tools/testing/radix-tree/test.h|   1 +
 tools/testing/radix-tree/xarray-test.c | 122 +++
 5 files changed, 579 insertions(+)

diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index fc9ab3b13e60..fcd7ef68933a 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -215,6 +215,10 @@ void *xa_cmpxchg(struct xarray *, unsigned long index,
 bool xa_get_tag(struct xarray *, unsigned long index, xa_tag_t);
 void xa_set_tag(struct xarray *, unsigned long index, xa_tag_t);
 void xa_clear_tag(struct xarray *, unsigned long index, xa_tag_t);
+void *xa_find(struct xarray *xa, unsigned long *index,
+   unsigned long max, xa_tag_t) __attribute__((nonnull(2)));
+void *xa_find_after(struct xarray *xa, unsigned long *index,
+   unsigned long max, xa_tag_t) __attribute__((nonnull(2)));
 
 /**
  * xa_init() - Initialise an empty XArray.
@@ -266,6 +270,33 @@ static inline bool xa_tagged(const struct xarray *xa, 
xa_tag_t tag)
return xa->xa_flags & XA_FLAGS_TAG(tag);
 }
 
+/**
+ * xa_for_each() - Iterate over a portion of an XArray.
+ * @xa: XArray.
+ * @entry: Entry retrieved from array.
+ * @index: Index of @entry.
+ * @max: Maximum index to retrieve from array.
+ * @filter: Selection criterion.
+ *
+ * Initialise @index to the minimum index you want to retrieve from
+ * the array.  During the iteration, @entry will have the value of the
+ * entry stored in @xa at @index.  The iteration will skip all entries in
+ * the array which do not match @filter.  You may modify @index during the
+ * iteration if you want to skip or reprocess indices.  It is safe to modify
+ * the array during the iteration.  At the end of the iteration, @entry will
+ * be set to NULL and @index will have a value less than or equal to max.
+ *
+ * xa_for_each() is O(n.log(n)) while xas_for_each() is O(n).  You have
+ * to handle your own locking with xas_for_each(), and if you have to unlock
+ * after each iteration, it will also end up being O(n.log(n)).  xa_for_each()
+ * will spin if it hits a retry entry; if you intend to see retry entries,
+ * you should use the xas_for_each() iterator instead.  The xas_for_each()
+ * iterator will expand into more inline code than xa_for_each().
+ */
+#define xa_for_each(xa, entry, index, max, filter) \
+   for (entry = xa_find(xa, , max, filter); entry; \
+entry = xa_find_after(xa, , max, filter))
+
 /**
  * xa_insert() - Store this entry in the XArray unless another entry is
  * already present.
@@ -620,6 +651,12 @@ static inline bool xas_valid(const struct xa_state *xas)
return !xas_invalid(xas);
 }
 
+/* True if the pointer is something other than a node */
+static inline bool xas_not_node(struct xa_node *node)
+{
+   return ((unsigned long)node & 3) || !node;
+}
+
 /* True if the node represents head-of-tree, RESTART or BOUNDS */
 static inline bool xas_top(struct xa_node *node)
 {
@@ -648,13 +685,16 @@ static inline bool xas_retry(struct xa_state *xas, const 
void *entry)
 void *xas_load(struct xa_state *);
 void *xas_store(struct xa_state *, void *entry);
 void *xas_create(struct xa_state *);
+void *xas_find(struct xa_state *, unsigned long max);
 
 bool xas_get_tag(const struct xa_state *, xa_tag_t);
 void xas_set_tag(const struct xa_state *, xa_tag_t);
 void xas_clear_tag(const struct xa_state *, xa_tag_t);
+void *xas_find_tag(struct xa_state *, unsigned long max, xa_tag_t);
 void xas_init_tags(const struct xa_state *);
 
 bool xas_nomem(struct xa_state *, gfp_t);
+void xas_pause(struct xa_state *);
 
 /**
  * xas_reload() - Refetch an entry from the xarray.
@@ -727,6 +767,137 @@ static inline void xas_set_update(struct xa_state *xas, 
xa_update_node_t update)
xas->xa_update = update;
 }
 
+/* Skip over any of these entries when iterating */
+static inline bool xa_iter_skip(const void *entry)
+{
+   return unlikely(!entry ||
+   (xa_is_internal(entry) && entry < XA_RETRY_ENTRY));
+}
+
+/**
+ * xas_next_entry() - Advance iterator to next present entry.
+ * @xas: XArray operation state.
+ * @max: Highest index to return.
+ *
+ * xas_next_entry() is an inline 

[PATCH v6 11/99] xarray: Add xa_cmpxchg and xa_insert

2018-01-17 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

Like cmpxchg(), xa_cmpxchg will only store to the index if the current
entry matches the old entry.  It returns the current entry, which is
usually more useful than the errno returned by radix_tree_insert().
For the users who really only want the errno, the xa_insert() wrapper
provides a more convenient calling convention.

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
---
 include/linux/xarray.h | 56 
 lib/xarray.c   | 68 ++
 tools/testing/radix-tree/xarray-test.c | 10 +
 3 files changed, 134 insertions(+)

diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index 139b1c1fd022..fc9ab3b13e60 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -210,6 +210,8 @@ struct xarray {
 void xa_init_flags(struct xarray *, gfp_t flags);
 void *xa_load(struct xarray *, unsigned long index);
 void *xa_store(struct xarray *, unsigned long index, void *entry, gfp_t);
+void *xa_cmpxchg(struct xarray *, unsigned long index,
+   void *old, void *entry, gfp_t);
 bool xa_get_tag(struct xarray *, unsigned long index, xa_tag_t);
 void xa_set_tag(struct xarray *, unsigned long index, xa_tag_t);
 void xa_clear_tag(struct xarray *, unsigned long index, xa_tag_t);
@@ -264,6 +266,32 @@ static inline bool xa_tagged(const struct xarray *xa, 
xa_tag_t tag)
return xa->xa_flags & XA_FLAGS_TAG(tag);
 }
 
+/**
+ * xa_insert() - Store this entry in the XArray unless another entry is
+ * already present.
+ * @xa: XArray.
+ * @index: Index into array.
+ * @entry: New entry.
+ * @gfp: Memory allocation flags.
+ *
+ * If you would rather see the existing entry in the array, use xa_cmpxchg().
+ * This function is for users who don't care what the entry is, only that
+ * one is present.
+ *
+ * Return: -EEXIST if another entry was present, 0 if the store succeeded,
+ * or another negative errno if a different error happened (eg -ENOMEM).
+ */
+static inline int xa_insert(struct xarray *xa, unsigned long index,
+   void *entry, gfp_t gfp)
+{
+   void *curr = xa_cmpxchg(xa, index, NULL, entry, gfp);
+   if (!curr)
+   return 0;
+   if (xa_is_err(curr))
+   return xa_err(curr);
+   return -EEXIST;
+}
+
 #define xa_trylock(xa) spin_trylock(&(xa)->xa_lock)
 #define xa_lock(xa)spin_lock(&(xa)->xa_lock)
 #define xa_unlock(xa)  spin_unlock(&(xa)->xa_lock)
@@ -283,9 +311,37 @@ static inline bool xa_tagged(const struct xarray *xa, 
xa_tag_t tag)
  */
 void *__xa_erase(struct xarray *, unsigned long index);
 void *__xa_store(struct xarray *, unsigned long index, void *entry, gfp_t);
+void *__xa_cmpxchg(struct xarray *, unsigned long index, void *old,
+   void *entry, gfp_t);
 void __xa_set_tag(struct xarray *, unsigned long index, xa_tag_t);
 void __xa_clear_tag(struct xarray *, unsigned long index, xa_tag_t);
 
+/**
+ * __xa_insert() - Store this entry in the XArray unless another entry is
+ * already present.
+ * @xa: XArray.
+ * @index: Index into array.
+ * @entry: New entry.
+ * @gfp: Memory allocation flags.
+ *
+ * If you would rather see the existing entry in the array, use __xa_cmpxchg().
+ * This function is for users who don't care what the entry is, only that
+ * one is present.
+ *
+ * Return: -EEXIST if another entry was present, 0 if the store succeeded,
+ * or another negative errno if a different error happened (eg -ENOMEM).
+ */
+static inline int __xa_insert(struct xarray *xa, unsigned long index,
+   void *entry, gfp_t gfp)
+{
+   void *curr = __xa_cmpxchg(xa, index, NULL, entry, gfp);
+   if (!curr)
+   return 0;
+   if (xa_is_err(curr))
+   return xa_err(curr);
+   return -EEXIST;
+}
+
 /* Everything below here is the Advanced API.  Proceed with caution. */
 
 /*
diff --git a/lib/xarray.c b/lib/xarray.c
index 45b70e622bf1..d925a98fb9b8 100644
--- a/lib/xarray.c
+++ b/lib/xarray.c
@@ -928,6 +928,74 @@ void *__xa_store(struct xarray *xa, unsigned long index, 
void *entry, gfp_t gfp)
 }
 EXPORT_SYMBOL(__xa_store);
 
+/**
+ * xa_cmpxchg() - Conditionally replace an entry in the XArray.
+ * @xa: XArray.
+ * @index: Index into array.
+ * @old: Old value to test against.
+ * @entry: New value to place in array.
+ * @gfp: Memory allocation flags.
+ *
+ * If the entry at @index is the same as @old, replace it with @entry.
+ * If the return value is equal to @old, then the exchange was successful.
+ *
+ * Return: The old value at this index or xa_err() if an error happened.
+ */
+void *xa_cmpxchg(struct xarray *xa, unsigned long index,
+   void *old, void *entry, gfp_t gfp)
+{
+   XA_STATE(xas, xa, index);
+   void *curr;
+
+   if (WARN_ON_ONCE(xa_is_internal(entry)))
+ 

[PATCH v6 13/99] xarray: Add xa_extract

2018-01-17 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

This function combines the functionality of radix_tree_gang_lookup() and
radix_tree_gang_lookup_tagged().  It extracts entries matching the
specified filter into a normal array.

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
---
 include/linux/xarray.h |  2 ++
 lib/xarray.c   | 80 ++
 2 files changed, 82 insertions(+)

diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index fcd7ef68933a..d79fd48e4957 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -219,6 +219,8 @@ void *xa_find(struct xarray *xa, unsigned long *index,
unsigned long max, xa_tag_t) __attribute__((nonnull(2)));
 void *xa_find_after(struct xarray *xa, unsigned long *index,
unsigned long max, xa_tag_t) __attribute__((nonnull(2)));
+unsigned int xa_extract(struct xarray *, void **dst, unsigned long start,
+   unsigned long max, unsigned int n, xa_tag_t);
 
 /**
  * xa_init() - Initialise an empty XArray.
diff --git a/lib/xarray.c b/lib/xarray.c
index 3e6be0a07525..be276618f81b 100644
--- a/lib/xarray.c
+++ b/lib/xarray.c
@@ -1368,6 +1368,86 @@ void *xa_find_after(struct xarray *xa, unsigned long 
*indexp,
 }
 EXPORT_SYMBOL(xa_find_after);
 
+static unsigned int xas_extract_present(struct xa_state *xas, void **dst,
+   unsigned long max, unsigned int n)
+{
+   void *entry;
+   unsigned int i = 0;
+
+   rcu_read_lock();
+   xas_for_each(xas, entry, max) {
+   if (xas_retry(xas, entry))
+   continue;
+   dst[i++] = entry;
+   if (i == n)
+   break;
+   }
+   rcu_read_unlock();
+
+   return i;
+}
+
+static unsigned int xas_extract_tag(struct xa_state *xas, void **dst,
+   unsigned long max, unsigned int n, xa_tag_t tag)
+{
+   void *entry;
+   unsigned int i = 0;
+
+   rcu_read_lock();
+   xas_for_each_tag(xas, entry, max, tag) {
+   if (xas_retry(xas, entry))
+   continue;
+   dst[i++] = entry;
+   if (i == n)
+   break;
+   }
+   rcu_read_unlock();
+
+   return i;
+}
+
+/**
+ * xa_extract() - Copy selected entries from the XArray into a normal array.
+ * @xa: The source XArray to copy from.
+ * @dst: The buffer to copy entries into.
+ * @start: The first index in the XArray eligible to be selected.
+ * @max: The last index in the XArray eligible to be selected.
+ * @n: The maximum number of entries to copy.
+ * @filter: Selection criterion.
+ *
+ * Copies up to @n entries that match @filter from the XArray.  The
+ * copied entries will have indices between @start and @max, inclusive.
+ *
+ * The @filter may be an XArray tag value, in which case entries which are
+ * tagged with that tag will be copied.  It may also be %XA_PRESENT, in
+ * which case non-NULL entries will be copied.
+ *
+ * This function uses the RCU lock to protect itself.  That means that the
+ * entries returned may not represent a snapshot of the XArray at a moment
+ * in time.  For example, if index 5 is stored to, then index 10 is stored to,
+ * calling xa_extract() may return the old contents of index 5 and the
+ * new contents of index 10.  Indices not modified while this function is
+ * running will not be skipped.
+ *
+ * If you need stronger guarantees, holding the xa_lock across calls to this
+ * function will prevent concurrent modification.
+ *
+ * Return: The number of entries copied.
+ */
+unsigned int xa_extract(struct xarray *xa, void **dst, unsigned long start,
+   unsigned long max, unsigned int n, xa_tag_t filter)
+{
+   XA_STATE(xas, xa, start);
+
+   if (!n)
+   return 0;
+
+   if ((__force unsigned int)filter < XA_MAX_TAGS)
+   return xas_extract_tag(, dst, max, n, filter);
+   return xas_extract_present(, dst, max, n);
+}
+EXPORT_SYMBOL(xa_extract);
+
 #ifdef XA_DEBUG
 void xa_dump_node(const struct xa_node *node)
 {
-- 
2.15.1

--
To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v6 15/99] xarray: Add xas_next and xas_prev

2018-01-17 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

These two functions move the xas index by one position, and adjust the
rest of the iterator state to match it.  This is more efficient than
calling xas_set() as it keeps the iterator at the leaves of the tree
instead of walking the iterator from the root each time.

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
---
 include/linux/xarray.h |  67 +
 lib/xarray.c   |  74 ++
 tools/testing/radix-tree/xarray-test.c | 259 +
 3 files changed, 400 insertions(+)

diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index d106b2fe4cec..01ce313fc00e 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -660,6 +660,12 @@ static inline bool xas_not_node(struct xa_node *node)
return ((unsigned long)node & 3) || !node;
 }
 
+/* True if the node represents RESTART or an error */
+static inline bool xas_frozen(struct xa_node *node)
+{
+   return (unsigned long)node & 2;
+}
+
 /* True if the node represents head-of-tree, RESTART or BOUNDS */
 static inline bool xas_top(struct xa_node *node)
 {
@@ -901,6 +907,67 @@ enum {
for (entry = xas_find_tag(xas, max, tag); entry; \
 entry = xas_next_tag(xas, max, tag))
 
+void *__xas_next(struct xa_state *);
+void *__xas_prev(struct xa_state *);
+
+/**
+ * xas_prev() - Move iterator to previous index.
+ * @xas: XArray operation state.
+ *
+ * If the @xas was in an error state, it will remain in an error state
+ * and this function will return %NULL.  If the @xas has never been walked,
+ * it will have the effect of calling xas_load().  Otherwise one will be
+ * subtracted from the index and the state will be walked to the correct
+ * location in the array for the next operation.
+ *
+ * If the iterator was referencing index 0, this function wraps
+ * around to %ULONG_MAX.
+ *
+ * Return: The entry at the new index.  This may be %NULL or an internal
+ * entry, although it should never be a node entry.
+ */
+static inline void *xas_prev(struct xa_state *xas)
+{
+   struct xa_node *node = xas->xa_node;
+
+   if (unlikely(xas_not_node(node) || node->shift ||
+   xas->xa_offset == 0))
+   return __xas_prev(xas);
+
+   xas->xa_index--;
+   xas->xa_offset--;
+   return xa_entry(xas->xa, node, xas->xa_offset);
+}
+
+/**
+ * xas_next() - Move state to next index.
+ * @xas: XArray operation state.
+ *
+ * If the @xas was in an error state, it will remain in an error state
+ * and this function will return %NULL.  If the @xas has never been walked,
+ * it will have the effect of calling xas_load().  Otherwise one will be
+ * added to the index and the state will be walked to the correct
+ * location in the array for the next operation.
+ *
+ * If the iterator was referencing index %ULONG_MAX, this function wraps
+ * around to 0.
+ *
+ * Return: The entry at the new index.  This may be %NULL or an internal
+ * entry, although it should never be a node entry.
+ */
+static inline void *xas_next(struct xa_state *xas)
+{
+   struct xa_node *node = xas->xa_node;
+
+   if (unlikely(xas_not_node(node) || node->shift ||
+   xas->xa_offset == XA_CHUNK_MASK))
+   return __xas_next(xas);
+
+   xas->xa_index++;
+   xas->xa_offset++;
+   return xa_entry(xas->xa, node, xas->xa_offset);
+}
+
 /* Internal functions, mostly shared between radix-tree.c, xarray.c and idr.c 
*/
 void xas_destroy(struct xa_state *);
 
diff --git a/lib/xarray.c b/lib/xarray.c
index af81d4bf9ae1..e8ece1fff9fd 100644
--- a/lib/xarray.c
+++ b/lib/xarray.c
@@ -838,6 +838,80 @@ void xas_pause(struct xa_state *xas)
 }
 EXPORT_SYMBOL_GPL(xas_pause);
 
+/*
+ * __xas_prev() - Find the previous entry in the XArray.
+ * @xas: XArray operation state.
+ *
+ * Helper function for xas_prev() which handles all the complex cases
+ * out of line.
+ */
+void *__xas_prev(struct xa_state *xas)
+{
+   void *entry;
+
+   if (!xas_frozen(xas->xa_node))
+   xas->xa_index--;
+   if (xas_not_node(xas->xa_node))
+   return xas_load(xas);
+
+   if (xas->xa_offset != get_offset(xas->xa_index, xas->xa_node))
+   xas->xa_offset--;
+
+   while (xas->xa_offset == 255) {
+   xas->xa_offset = xas->xa_node->offset - 1;
+   xas->xa_node = xa_parent(xas->xa, xas->xa_node);
+   if (!xas->xa_node)
+   return set_bounds(xas);
+   }
+
+   for (;;) {
+   entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
+   if (!xa_is_node(entry))
+   return entry;
+
+   xas->xa_node = xa_to_node(entry);
+   xas_set_offset(xas);
+   }
+}
+EXPORT_SYMBOL_GPL(__xas_prev)

[PATCH v6 14/99] xarray: Add xa_destroy

2018-01-17 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

This function frees all the internal memory allocated to the xarray
and reinitialises it to be empty.

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
---
 include/linux/xarray.h |  1 +
 lib/xarray.c   | 26 ++
 2 files changed, 27 insertions(+)

diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index d79fd48e4957..d106b2fe4cec 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -221,6 +221,7 @@ void *xa_find_after(struct xarray *xa, unsigned long *index,
unsigned long max, xa_tag_t) __attribute__((nonnull(2)));
 unsigned int xa_extract(struct xarray *, void **dst, unsigned long start,
unsigned long max, unsigned int n, xa_tag_t);
+void xa_destroy(struct xarray *);
 
 /**
  * xa_init() - Initialise an empty XArray.
diff --git a/lib/xarray.c b/lib/xarray.c
index be276618f81b..af81d4bf9ae1 100644
--- a/lib/xarray.c
+++ b/lib/xarray.c
@@ -1448,6 +1448,32 @@ unsigned int xa_extract(struct xarray *xa, void **dst, 
unsigned long start,
 }
 EXPORT_SYMBOL(xa_extract);
 
+/**
+ * xa_destroy() - Free all internal data structures.
+ * @xa: XArray.
+ *
+ * After calling this function, the XArray is empty and has freed all memory
+ * allocated for its internal data structures.  You are responsible for
+ * freeing the objects referenced by the XArray.
+ */
+void xa_destroy(struct xarray *xa)
+{
+   XA_STATE(xas, xa, 0);
+   unsigned long flags;
+   void *entry;
+
+   xas.xa_node = NULL;
+   xas_lock_irqsave(, flags);
+   entry = xa_head_locked(xa);
+   RCU_INIT_POINTER(xa->xa_head, NULL);
+   xas_init_tags();
+   /* lockdep checks we're still holding the lock in xas_free_nodes() */
+   if (xa_is_node(entry))
+   xas_free_nodes(, xa_to_node(entry));
+   xas_unlock_irqrestore(, flags);
+}
+EXPORT_SYMBOL(xa_destroy);
+
 #ifdef XA_DEBUG
 void xa_dump_node(const struct xa_node *node)
 {
-- 
2.15.1

--
To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v6 17/99] xarray: Add MAINTAINERS entry

2018-01-17 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

Add myself as XArray and IDR maintainer.

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
---
 MAINTAINERS | 12 
 1 file changed, 12 insertions(+)

diff --git a/MAINTAINERS b/MAINTAINERS
index 18994806e441..55ae4c0b38d5 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -14893,6 +14893,18 @@ T: git 
git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/vdso
 S: Maintained
 F: arch/x86/entry/vdso/
 
+XARRAY
+M: Matthew Wilcox <mawil...@microsoft.com>
+M: Matthew Wilcox <wi...@infradead.org>
+L: linux-fsde...@vger.kernel.org
+S: Supported
+F: Documentation/core-api/xarray.rst
+F: lib/idr.c
+F: lib/xarray.c
+F: include/linux/idr.h
+F: include/linux/xarray.h
+F: tools/testing/radix-tree
+
 XC2028/3028 TUNER DRIVER
 M: Mauro Carvalho Chehab <mche...@s-opensource.com>
 M: Mauro Carvalho Chehab <mche...@kernel.org>
-- 
2.15.1

--
To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v6 19/99] idr: Convert to XArray

2018-01-17 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

The IDR distinguishes between unallocated entries (read as NULL) and
entries where the user has chosen to store NULL.  The radix tree was
modified to consider NULL entries which had tag 0 _clear_ as being
allocated, but it added a lot of complexity.

Instead, the XArray has a 'zero entry', which the normal API will treat
as NULL, but is distinct from NULL when using the advanced API.  The IDR
code converts between NULL and zero entries.

The idr_for_each_entry_ul() iterator becomes an alias for xa_for_each(),
so we drop the idr_get_next_ul() function as it has no users.

The exported IDR API was a weird mix of GPL-only and general symbols;
I converted them all to GPL as there was no way to use the IDR API
without being GPL.

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
---
 Documentation/core-api/xarray.rst   |   6 +
 include/linux/idr.h | 156 ---
 include/linux/xarray.h  |  29 +++-
 lib/idr.c   | 298 ++--
 lib/radix-tree.c|  77 +-
 lib/xarray.c|  32 
 tools/testing/radix-tree/idr-test.c |  34 
 7 files changed, 419 insertions(+), 213 deletions(-)

diff --git a/Documentation/core-api/xarray.rst 
b/Documentation/core-api/xarray.rst
index 0172c7d9e6ea..1dea1c522506 100644
--- a/Documentation/core-api/xarray.rst
+++ b/Documentation/core-api/xarray.rst
@@ -284,6 +284,12 @@ to :c:func:`xas_retry`, and retry the operation if it 
returns ``true``.
this RCU period.  You should restart the lookup from the head of the
array.
 
+   * - Zero
+ - :c:func:`xa_is_zero`
+ - Zero entries appear as ``NULL`` through the Normal API, but occupy an
+   entry in the XArray which can be tagged or otherwise used to reserve
+   the index.
+
 Other internal entries may be added in the future.  As far as possible, they
 will be handled by :c:func:`xas_retry`.
 
diff --git a/include/linux/idr.h b/include/linux/idr.h
index 11eea38b9629..9064ae5f0abc 100644
--- a/include/linux/idr.h
+++ b/include/linux/idr.h
@@ -9,35 +9,35 @@
  * tables.
  */
 
-#ifndef __IDR_H__
-#define __IDR_H__
+#ifndef _LINUX_IDR_H
+#define _LINUX_IDR_H
 
 #include 
 #include 
 #include 
-#include 
+#include 
 
 struct idr {
-   struct radix_tree_root  idr_rt;
-   unsigned intidr_next;
+   struct xarray   idr_xa;
+   unsigned intidr_next;
 };
 
-/*
- * The IDR API does not expose the tagging functionality of the radix tree
- * to users.  Use tag 0 to track whether a node has free space below it.
- */
-#define IDR_FREE   0
-
-/* Set the IDR flag and the IDR_FREE tag */
-#define IDR_RT_MARKER  (ROOT_IS_IDR | (__force gfp_t)  \
-   (1 << (ROOT_TAG_SHIFT + IDR_FREE)))
+#define IDR_INIT_FLAGS (XA_FLAGS_TRACK_FREE | XA_FLAGS_LOCK_IRQ |  \
+XA_FLAGS_TAG(XA_FREE_TAG))
 
 #define IDR_INIT(name) \
 {  \
-   .idr_rt = RADIX_TREE_INIT(name, IDR_RT_MARKER)  \
+   .idr_xa = XARRAY_INIT_FLAGS(name.idr_xa, IDR_INIT_FLAGS),   \
+   .idr_next = 0,  \
 }
 #define DEFINE_IDR(name)   struct idr name = IDR_INIT(name)
 
+static inline void idr_init(struct idr *idr)
+{
+   xa_init_flags(>idr_xa, IDR_INIT_FLAGS);
+   idr->idr_next = 0;
+}
+
 /**
  * idr_get_cursor - Return the current position of the cyclic allocator
  * @idr: idr handle
@@ -66,62 +66,83 @@ static inline void idr_set_cursor(struct idr *idr, unsigned 
int val)
 
 /**
  * DOC: idr sync
- * idr synchronization (stolen from radix-tree.h)
+ * idr synchronization
  *
- * idr_find() is able to be called locklessly, using RCU. The caller must
- * ensure calls to this function are made within rcu_read_lock() regions.
- * Other readers (lock-free or otherwise) and modifications may be running
- * concurrently.
+ * The IDR manages its own locking, using irqsafe spinlocks for operations
+ * which modify the IDR and RCU for operations which do not.  The user of
+ * the IDR may choose to wrap accesses to it in a lock if it needs to
+ * guarantee the IDR does not change during a read access.  The easiest way
+ * to do this is to grab the same lock the IDR uses for write accesses
+ * using one of the idr_lock() wrappers.
  *
- * It is still required that the caller manage the synchronization and
- * lifetimes of the items. So if RCU lock-free lookups are used, typically
- * this would mean that the items have their own locks, or are amenable to
- * lock-free access; and that the items are freed by RCU (or only freed after
- * having been deleted from the idr tree *and* a synchronize_rcu() grace
- * period).
+ * The caller must still manage the synch

[PATCH v6 18/99] xarray: Add ability to store errno values

2018-01-17 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

While the radix tree offers no ability to store IS_ERR pointers,
documenting that the XArray does not led to some concern.  Here is a
sanctioned way to store errnos in the XArray.  I'm concerned that it
will confuse people who can't tell the difference between xa_is_err()
and xa_is_errno(), so I've added copious kernel-doc to help them tell
the difference.

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
---
 Documentation/core-api/xarray.rst  |  8 +--
 include/linux/xarray.h | 44 ++
 tools/testing/radix-tree/xarray-test.c |  8 ++-
 3 files changed, 57 insertions(+), 3 deletions(-)

diff --git a/Documentation/core-api/xarray.rst 
b/Documentation/core-api/xarray.rst
index 914999c0bf3f..0172c7d9e6ea 100644
--- a/Documentation/core-api/xarray.rst
+++ b/Documentation/core-api/xarray.rst
@@ -42,8 +42,12 @@ When you retrieve an entry from the XArray, you can check 
whether it is
 a value entry by calling :c:func:`xa_is_value`, and convert it back to
 an integer by calling :c:func:`xa_to_value`.
 
-The XArray does not support storing :c:func:`IS_ERR` pointers as some
-conflict with value entries or internal entries.
+The XArray does not support storing :c:func:`IS_ERR` pointers because
+some conflict with value entries or internal entries.  If you need
+to store error numbers in the array, you can encode them into error
+entries with :c:func:`xa_mk_errno`, check whether a returned entry is
+an error with :c:func:`xa_is_errno` and convert it back into an errno
+with :c:func:`xa_to_errno`.
 
 An unusual feature of the XArray is the ability to create entries which
 occupy a range of indices.  Once stored to, looking up any index in
diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index acb6d02ff194..ca6af6dd42c4 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -75,6 +75,50 @@ static inline bool xa_is_value(const void *entry)
return (unsigned long)entry & 1;
 }
 
+/**
+ * xa_mk_errno() - Create an XArray entry from an error number.
+ * @error: Error number to store in XArray.
+ *
+ * Return: An entry suitable for storing in the XArray.
+ */
+static inline void *xa_mk_errno(long error)
+{
+   return (void *)(error << 2);
+}
+
+/**
+ * xa_to_errno() - Get error number stored in an XArray entry.
+ * @entry: XArray entry.
+ *
+ * Calling this function on an entry which is not an xa_is_errno() will
+ * yield unpredictable results.  Do not confuse this function with xa_err();
+ * this function is for errnos which have been stored in the XArray, and
+ * that function is for errors returned from the XArray implementation.
+ *
+ * Return: The error number stored in the XArray entry.
+ */
+static inline long xa_to_errno(const void *entry)
+{
+   return (long)entry >> 2;
+}
+
+/**
+ * xa_is_errno() - Determine if an entry is an errno.
+ * @entry: XArray entry.
+ *
+ * Do not confuse this function with xa_is_err(); that function tells you
+ * whether the XArray implementation returned an error; this function
+ * tells you whether the entry you successfully stored in the XArray
+ * represented an errno.  If you have never stored an errno in the XArray,
+ * you do not have to check this.
+ *
+ * Return: True if the entry is an errno, false if it is a pointer.
+ */
+static inline bool xa_is_errno(const void *entry)
+{
+   return (((unsigned long)entry & 3) == 0) && (entry > (void *)-4096);
+}
+
 /*
  * xa_mk_internal() - Create an internal entry.
  * @v: Value to turn into an internal entry.
diff --git a/tools/testing/radix-tree/xarray-test.c 
b/tools/testing/radix-tree/xarray-test.c
index 2ad460c1febf..4d3541ac31e9 100644
--- a/tools/testing/radix-tree/xarray-test.c
+++ b/tools/testing/radix-tree/xarray-test.c
@@ -29,7 +29,13 @@ void check_xa_err(struct xarray *xa)
assert(xa_err(xa_store(xa, 1, xa_mk_value(0), GFP_KERNEL)) == 0);
assert(xa_err(xa_store(xa, 1, NULL, 0)) == 0);
 // kills the test-suite :-(
-// assert(xa_err(xa_store(xa, 0, xa_mk_internal(0), 0)) == -EINVAL);
+// assert(xa_err(xa_store(xa, 0, xa_mk_internal(0), 0)) == -EINVAL);
+
+   assert(xa_err(xa_store(xa, 0, xa_mk_errno(-ENOMEM), GFP_KERNEL)) == 0);
+   assert(xa_err(xa_load(xa, 0)) == 0);
+   assert(xa_is_errno(xa_load(xa, 0)) == true);
+   assert(xa_to_errno(xa_load(xa, 0)) == -ENOMEM);
+   xa_erase(xa, 0);
 }
 
 void check_xa_tag(struct xarray *xa)
-- 
2.15.1

--
To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v6 01/99] xarray: Add the xa_lock to the radix_tree_root

2018-01-17 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

This results in no change in structure size on 64-bit x86 as it fits in
the padding between the gfp_t and the void *.

Initialising the spinlock requires a name for the benefit of lockdep,
so RADIX_TREE_INIT() now needs to know the name of the radix tree it's
initialising, and so do IDR_INIT() and IDA_INIT().

Also add the xa_lock() and xa_unlock() family of wrappers to make it
easier to use the lock.  If we could rely on -fplan9-extensions in
the compiler, we could avoid all of this syntactic sugar, but that
wasn't added until gcc 4.6.

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
---
 fs/f2fs/gc.c   |  2 +-
 include/linux/idr.h| 12 ++--
 include/linux/radix-tree.h |  7 +--
 include/linux/xarray.h | 24 
 kernel/pid.c   |  2 +-
 tools/include/linux/spinlock.h |  1 +
 6 files changed, 38 insertions(+), 10 deletions(-)
 create mode 100644 include/linux/xarray.h

diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index d844dcb80570..aac1e02f75df 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -991,7 +991,7 @@ int f2fs_gc(struct f2fs_sb_info *sbi, bool sync,
unsigned int init_segno = segno;
struct gc_inode_list gc_list = {
.ilist = LIST_HEAD_INIT(gc_list.ilist),
-   .iroot = RADIX_TREE_INIT(GFP_NOFS),
+   .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
};
 
trace_f2fs_gc_begin(sbi->sb, sync, background,
diff --git a/include/linux/idr.h b/include/linux/idr.h
index ed1459d36b9d..11eea38b9629 100644
--- a/include/linux/idr.h
+++ b/include/linux/idr.h
@@ -32,11 +32,11 @@ struct idr {
 #define IDR_RT_MARKER  (ROOT_IS_IDR | (__force gfp_t)  \
(1 << (ROOT_TAG_SHIFT + IDR_FREE)))
 
-#define IDR_INIT   \
+#define IDR_INIT(name) \
 {  \
-   .idr_rt = RADIX_TREE_INIT(IDR_RT_MARKER)\
+   .idr_rt = RADIX_TREE_INIT(name, IDR_RT_MARKER)  \
 }
-#define DEFINE_IDR(name)   struct idr name = IDR_INIT
+#define DEFINE_IDR(name)   struct idr name = IDR_INIT(name)
 
 /**
  * idr_get_cursor - Return the current position of the cyclic allocator
@@ -195,10 +195,10 @@ struct ida {
struct radix_tree_root  ida_rt;
 };
 
-#define IDA_INIT   {   \
-   .ida_rt = RADIX_TREE_INIT(IDR_RT_MARKER | GFP_NOWAIT),  \
+#define IDA_INIT(name) {   \
+   .ida_rt = RADIX_TREE_INIT(name, IDR_RT_MARKER | GFP_NOWAIT),\
 }
-#define DEFINE_IDA(name)   struct ida name = IDA_INIT
+#define DEFINE_IDA(name)   struct ida name = IDA_INIT(name)
 
 int ida_pre_get(struct ida *ida, gfp_t gfp_mask);
 int ida_get_new_above(struct ida *ida, int starting_id, int *p_id);
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index 6c4e2e716dac..34149e8b5f73 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -110,20 +110,23 @@ struct radix_tree_node {
 #define ROOT_TAG_SHIFT (__GFP_BITS_SHIFT)
 
 struct radix_tree_root {
+   spinlock_t  xa_lock;
gfp_t   gfp_mask;
struct radix_tree_node  __rcu *rnode;
 };
 
-#define RADIX_TREE_INIT(mask)  {   \
+#define RADIX_TREE_INIT(name, mask){   \
+   .xa_lock = __SPIN_LOCK_UNLOCKED(name.xa_lock),  \
.gfp_mask = (mask), \
.rnode = NULL,  \
 }
 
 #define RADIX_TREE(name, mask) \
-   struct radix_tree_root name = RADIX_TREE_INIT(mask)
+   struct radix_tree_root name = RADIX_TREE_INIT(name, mask)
 
 #define INIT_RADIX_TREE(root, mask)\
 do {   \
+   spin_lock_init(&(root)->xa_lock);   \
(root)->gfp_mask = (mask);  \
(root)->rnode = NULL;   \
 } while (0)
diff --git a/include/linux/xarray.h b/include/linux/xarray.h
new file mode 100644
index ..2dfc8006fe64
--- /dev/null
+++ b/include/linux/xarray.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+#ifndef _LINUX_XARRAY_H
+#define _LINUX_XARRAY_H
+/*
+ * eXtensible Arrays
+ * Copyright (c) 2017 Microsoft Corporation
+ * Author: Matthew Wilcox <mawil...@microsoft.com>
+ */
+
+#include 
+
+#define xa_trylock(xa) spin_trylock(&(xa)->xa_lock)
+#define xa_lock(xa)spin_lock(&(xa)->xa_

[PATCH v6 20/99] ida: Convert to XArray

2018-01-17 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

Use the xarray infrstructure like we used the radix tree infrastructure.
This lets us get rid of idr_get_free() from the radix tree code.

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
---
 include/linux/idr.h|   8 +-
 include/linux/radix-tree.h |   4 -
 lib/idr.c  | 320 ++---
 lib/radix-tree.c   | 119 -
 4 files changed, 187 insertions(+), 264 deletions(-)

diff --git a/include/linux/idr.h b/include/linux/idr.h
index 9064ae5f0abc..ad4199247301 100644
--- a/include/linux/idr.h
+++ b/include/linux/idr.h
@@ -232,11 +232,11 @@ struct ida_bitmap {
 DECLARE_PER_CPU(struct ida_bitmap *, ida_bitmap);
 
 struct ida {
-   struct radix_tree_root  ida_rt;
+   struct xarray   ida_xa;
 };
 
 #define IDA_INIT(name) {   \
-   .ida_rt = RADIX_TREE_INIT(name, IDR_INIT_FLAGS | GFP_NOWAIT),   \
+   .ida_xa = XARRAY_INIT_FLAGS(name.ida_xa, IDR_INIT_FLAGS)\
 }
 #define DEFINE_IDA(name)   struct ida name = IDA_INIT(name)
 
@@ -251,7 +251,7 @@ void ida_simple_remove(struct ida *ida, unsigned int id);
 
 static inline void ida_init(struct ida *ida)
 {
-   INIT_RADIX_TREE(>ida_rt, IDR_INIT_FLAGS | GFP_NOWAIT);
+   xa_init_flags(>ida_xa, IDR_INIT_FLAGS);
 }
 
 /**
@@ -268,6 +268,6 @@ static inline int ida_get_new(struct ida *ida, int *p_id)
 
 static inline bool ida_is_empty(const struct ida *ida)
 {
-   return radix_tree_empty(>ida_rt);
+   return xa_empty(>ida_xa);
 }
 #endif /* _LINUX_IDR_H */
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index f64beb9ba175..4c5c36414a80 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -302,10 +302,6 @@ int radix_tree_split(struct radix_tree_root *, unsigned 
long index,
 int radix_tree_join(struct radix_tree_root *, unsigned long index,
unsigned new_order, void *);
 
-void __rcu **idr_get_free(struct radix_tree_root *root,
- struct radix_tree_iter *iter, gfp_t gfp,
- unsigned long max);
-
 enum {
RADIX_TREE_ITER_TAG_MASK = 0x0f,/* tag index in lower nybble */
RADIX_TREE_ITER_TAGGED   = 0x10,/* lookup tagged slots */
diff --git a/lib/idr.c b/lib/idr.c
index 379eaa8cb75b..7e9a8850b613 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -13,7 +13,6 @@
 #include 
 
 DEFINE_PER_CPU(struct ida_bitmap *, ida_bitmap);
-static DEFINE_SPINLOCK(simple_ida_lock);
 
 /* In radix-tree.c temporarily */
 extern bool idr_nomem(struct xa_state *, gfp_t);
@@ -337,26 +336,23 @@ EXPORT_SYMBOL_GPL(idr_replace);
 /*
  * Developer's notes:
  *
- * The IDA uses the functionality provided by the IDR & radix tree to store
- * bitmaps in each entry.  The XA_FREE_TAG tag means there is at least one bit
- * free, unlike the IDR where it means at least one entry is free.
- *
- * I considered telling the radix tree that each slot is an order-10 node
- * and storing the bit numbers in the radix tree, but the radix tree can't
- * allow a single multiorder entry at index 0, which would significantly
- * increase memory consumption for the IDA.  So instead we divide the index
- * by the number of bits in the leaf bitmap before doing a radix tree lookup.
- *
- * As an optimisation, if there are only a few low bits set in any given
- * leaf, instead of allocating a 128-byte bitmap, we store the bits
+ * The IDA uses the functionality provided by the IDR & XArray to store
+ * bitmaps in each entry.  The XA_FREE_TAG tag is used to mean that there
+ * is at least one bit free, unlike the IDR where it means at least one
+ * array entry is free.
+ *
+ * The XArray supports multi-index entries, so I considered teaching the
+ * XArray that each slot is an order-10 node and indexing the XArray by the
+ * ID.  The XArray has the significant optimisation of storing the first
+ * entry in the struct xarray and avoiding allocating an xa_node.
+ * Unfortunately, it can't do that for multi-order entries.
+ * So instead the XArray index is the ID divided by the number of bits in
+ * the bitmap
+ *
+ * As a further optimisation, if there are only a few low bits set in any
+ * given leaf, instead of allocating a 128-byte bitmap, we store the bits
  * directly in the entry.
  *
- * We allow the radix tree 'exceptional' count to get out of date.  Nothing
- * in the IDA nor the radix tree code checks it.  If it becomes important
- * to maintain an accurate exceptional count, switch the rcu_assign_pointer()
- * calls to radix_tree_iter_replace() which will correct the exceptional
- * count.
- *
  * The IDA always requires a lock to alloc/free.  If we add a 'test_bit'
  * equivalent, it will still need locking.  Going to RCU lookup would require
  * using RCU to free bitmaps, and that's not trivial without embedding an
@@ -366,104 +362,114 

[PATCH v6 21/99] xarray: Add xa_reserve and xa_release

2018-01-17 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

This function simply creates a slot in the XArray for users which need
to acquire multiple locks before storing their entry in the tree and
so cannot use a plain xa_store().

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
---
 include/linux/xarray.h | 14 ++
 lib/xarray.c   | 51 ++
 tools/testing/radix-tree/xarray-test.c | 25 +
 3 files changed, 90 insertions(+)

diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index 6f59f1f60205..c3f7405c5517 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -259,6 +259,7 @@ void *xa_load(struct xarray *, unsigned long index);
 void *xa_store(struct xarray *, unsigned long index, void *entry, gfp_t);
 void *xa_cmpxchg(struct xarray *, unsigned long index,
void *old, void *entry, gfp_t);
+int xa_reserve(struct xarray *, unsigned long index, gfp_t);
 bool xa_get_tag(struct xarray *, unsigned long index, xa_tag_t);
 void xa_set_tag(struct xarray *, unsigned long index, xa_tag_t);
 void xa_clear_tag(struct xarray *, unsigned long index, xa_tag_t);
@@ -373,6 +374,19 @@ static inline int xa_insert(struct xarray *xa, unsigned 
long index,
return -EEXIST;
 }
 
+/**
+ * xa_release() - Release a reserved entry.
+ * @xa: XArray.
+ * @index: Index of entry.
+ *
+ * After calling xa_reserve(), you can call this function to release the
+ * reservation.  It is harmless to call this function if the entry was used.
+ */
+static inline void xa_release(struct xarray *xa, unsigned long index)
+{
+   xa_cmpxchg(xa, index, NULL, NULL, 0);
+}
+
 #define xa_trylock(xa) spin_trylock(&(xa)->xa_lock)
 #define xa_lock(xa)spin_lock(&(xa)->xa_lock)
 #define xa_unlock(xa)  spin_unlock(&(xa)->xa_lock)
diff --git a/lib/xarray.c b/lib/xarray.c
index ace309cc9253..b4dec8e2d202 100644
--- a/lib/xarray.c
+++ b/lib/xarray.c
@@ -1275,6 +1275,8 @@ void *xa_cmpxchg(struct xarray *xa, unsigned long index,
do {
xas_lock();
curr = xas_load();
+   if (curr == XA_ZERO_ENTRY)
+   curr = NULL;
if (curr == old)
xas_store(, entry);
xas_unlock();
@@ -1310,6 +1312,8 @@ void *__xa_cmpxchg(struct xarray *xa, unsigned long index,
 
do {
curr = xas_load();
+   if (curr == XA_ZERO_ENTRY)
+   curr = NULL;
if (curr == old)
xas_store(, entry);
} while (__xas_nomem(, gfp));
@@ -1318,6 +1322,53 @@ void *__xa_cmpxchg(struct xarray *xa, unsigned long 
index,
 }
 EXPORT_SYMBOL(__xa_cmpxchg);
 
+/**
+ * xa_reserve() - Reserve this index in the XArray.
+ * @xa: XArray.
+ * @index: Index into array.
+ * @gfp: Memory allocation flags.
+ *
+ * Ensures there is somewhere to store an entry at @index in the array.
+ * If there is already something stored at @index, this function does
+ * nothing.  If there was nothing there, the entry is marked as reserved.
+ * Loads from @index will continue to see a %NULL pointer until a
+ * subsequent store to @index.
+ *
+ * If you do not use the entry that you have reserved, call xa_release()
+ * or xa_erase() to free any unnecessary memory.
+ *
+ * Return: 0 if the reservation succeeded or -ENOMEM if it failed.
+ */
+int xa_reserve(struct xarray *xa, unsigned long index, gfp_t gfp)
+{
+   XA_STATE(xas, xa, index);
+   unsigned int lock_type = xa_lock_type(xa);
+   void *curr;
+
+   do {
+   if (lock_type == XA_LOCK_IRQ)
+   xas_lock_irq();
+   else if (lock_type == XA_LOCK_BH)
+   xas_lock_bh();
+   else
+   xas_lock();
+
+   curr = xas_create();
+   if (!curr)
+   xas_store(, XA_ZERO_ENTRY);
+
+if (lock_type == XA_LOCK_IRQ)
+xas_unlock_irq();
+else if (lock_type == XA_LOCK_BH)
+xas_unlock_bh();
+else
+xas_unlock();
+   } while (xas_nomem(, gfp));
+
+   return xas_error();
+}
+EXPORT_SYMBOL(xa_reserve);
+
 /**
  * __xa_set_tag() - Set this tag on this entry while locked.
  * @xa: XArray.
diff --git a/tools/testing/radix-tree/xarray-test.c 
b/tools/testing/radix-tree/xarray-test.c
index 4d3541ac31e9..fe38b53df2ab 100644
--- a/tools/testing/radix-tree/xarray-test.c
+++ b/tools/testing/radix-tree/xarray-test.c
@@ -502,6 +502,29 @@ void check_move(struct xarray *xa)
} while (i < (1 << 16));
 }
 
+void check_reserve(struct xarray *xa)
+{
+   assert(xa_empty(xa));
+   xa_reserve(xa, 12345678, GFP_KERNEL);
+   assert(!xa_empty(xa));
+   assert(!xa_load(xa, 12345678));
+   xa_release

[PATCH v6 24/99] page cache: Add and replace pages using the XArray

2018-01-17 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

Use the XArray APIs to add and replace pages in the page cache.  This
removes two uses of the radix tree preload API and is significantly
shorter code.

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
---
 include/linux/swap.h |   8 ++-
 mm/filemap.c | 143 ++-
 2 files changed, 67 insertions(+), 84 deletions(-)

diff --git a/include/linux/swap.h b/include/linux/swap.h
index c2b8128799c1..394957963c4b 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -299,8 +299,12 @@ void *workingset_eviction(struct address_space *mapping, 
struct page *page);
 bool workingset_refault(void *shadow);
 void workingset_activation(struct page *page);
 
-/* Do not use directly, use workingset_lookup_update */
-void workingset_update_node(struct radix_tree_node *node);
+/* Only track the nodes of mappings with shadow entries */
+void workingset_update_node(struct xa_node *node);
+#define mapping_set_update(xas, mapping) do {  \
+   if (!dax_mapping(mapping) && !shmem_mapping(mapping))   \
+   xas_set_update(xas, workingset_update_node);\
+} while (0)
 
 /* Returns workingset_update_node() if the mapping has shadow entries. */
 #define workingset_lookup_update(mapping)  \
diff --git a/mm/filemap.c b/mm/filemap.c
index f1b4480723dd..e6371b551de1 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -112,35 +112,6 @@
  *   ->tasklist_lock(memory_failure, collect_procs_ao)
  */
 
-static int page_cache_tree_insert(struct address_space *mapping,
- struct page *page, void **shadowp)
-{
-   struct radix_tree_node *node;
-   void **slot;
-   int error;
-
-   error = __radix_tree_create(>pages, page->index, 0,
-   , );
-   if (error)
-   return error;
-   if (*slot) {
-   void *p;
-
-   p = radix_tree_deref_slot_protected(slot,
-   >pages.xa_lock);
-   if (!xa_is_value(p))
-   return -EEXIST;
-
-   mapping->nrexceptional--;
-   if (shadowp)
-   *shadowp = p;
-   }
-   __radix_tree_replace(>pages, node, slot, page,
-workingset_lookup_update(mapping));
-   mapping->nrpages++;
-   return 0;
-}
-
 static void page_cache_tree_delete(struct address_space *mapping,
   struct page *page, void *shadow)
 {
@@ -776,51 +747,44 @@ EXPORT_SYMBOL(file_write_and_wait_range);
  * locked.  This function does not add the new page to the LRU, the
  * caller must do that.
  *
- * The remove + add is atomic.  The only way this function can fail is
- * memory allocation failure.
+ * The remove + add is atomic.  This function cannot fail.
  */
 int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
 {
-   int error;
+   struct address_space *mapping = old->mapping;
+   void (*freepage)(struct page *) = mapping->a_ops->freepage;
+   pgoff_t offset = old->index;
+   XA_STATE(xas, >pages, offset);
+   unsigned long flags;
 
VM_BUG_ON_PAGE(!PageLocked(old), old);
VM_BUG_ON_PAGE(!PageLocked(new), new);
VM_BUG_ON_PAGE(new->mapping, new);
 
-   error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
-   if (!error) {
-   struct address_space *mapping = old->mapping;
-   void (*freepage)(struct page *);
-   unsigned long flags;
-
-   pgoff_t offset = old->index;
-   freepage = mapping->a_ops->freepage;
-
-   get_page(new);
-   new->mapping = mapping;
-   new->index = offset;
+   get_page(new);
+   new->mapping = mapping;
+   new->index = offset;
 
-   xa_lock_irqsave(>pages, flags);
-   __delete_from_page_cache(old, NULL);
-   error = page_cache_tree_insert(mapping, new, NULL);
-   BUG_ON(error);
+   xas_lock_irqsave(, flags);
+   xas_store(, new);
 
-   /*
-* hugetlb pages do not participate in page cache accounting.
-*/
-   if (!PageHuge(new))
-   __inc_node_page_state(new, NR_FILE_PAGES);
-   if (PageSwapBacked(new))
-   __inc_node_page_state(new, NR_SHMEM);
-   xa_unlock_irqrestore(>pages, flags);
-   mem_cgroup_migrate(old, new);
-   radix_tree_preload_end();
-   if (freepage)
-   freepage(old);
-   put_page(old);
-   }
+   old->mapping = NULL;
+   /* hugetlb pages do not participate in page cache acc

[PATCH v6 23/99] page cache: Add page_cache_range_empty function

2018-01-17 Thread Matthew Wilcox
From: Matthew Wilcox <mawil...@microsoft.com>

btrfs has its own custom function for determining whether the page cache
has any pages in a particular range.  Move this functionality to the
page cache, and call it from btrfs.

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
---
 fs/btrfs/btrfs_inode.h  |  7 -
 fs/btrfs/inode.c| 70 -
 include/linux/pagemap.h |  2 ++
 mm/filemap.c| 26 ++
 4 files changed, 34 insertions(+), 71 deletions(-)

diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index 63f0ccc92a71..a48bd6e0a0bb 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -365,6 +365,11 @@ static inline void btrfs_print_data_csum_error(struct 
btrfs_inode *inode,
logical_start, csum, csum_expected, mirror_num);
 }
 
-bool btrfs_page_exists_in_range(struct inode *inode, loff_t start, loff_t end);
+static inline bool btrfs_page_exists_in_range(struct inode *inode,
+   loff_t start, loff_t end)
+{
+   return page_cache_range_empty(inode->i_mapping, start >> PAGE_SHIFT,
+   end >> PAGE_SHIFT);
+}
 
 #endif
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index dbdb5bf6bca1..d7d2c556d5a2 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -7541,76 +7541,6 @@ noinline int can_nocow_extent(struct inode *inode, u64 
offset, u64 *len,
return ret;
 }
 
-bool btrfs_page_exists_in_range(struct inode *inode, loff_t start, loff_t end)
-{
-   struct radix_tree_root *root = >i_mapping->pages;
-   bool found = false;
-   void **pagep = NULL;
-   struct page *page = NULL;
-   unsigned long start_idx;
-   unsigned long end_idx;
-
-   start_idx = start >> PAGE_SHIFT;
-
-   /*
-* end is the last byte in the last page.  end == start is legal
-*/
-   end_idx = end >> PAGE_SHIFT;
-
-   rcu_read_lock();
-
-   /* Most of the code in this while loop is lifted from
-* find_get_page.  It's been modified to begin searching from a
-* page and return just the first page found in that range.  If the
-* found idx is less than or equal to the end idx then we know that
-* a page exists.  If no pages are found or if those pages are
-* outside of the range then we're fine (yay!) */
-   while (page == NULL &&
-  radix_tree_gang_lookup_slot(root, , NULL, start_idx, 1)) {
-   page = radix_tree_deref_slot(pagep);
-   if (unlikely(!page))
-   break;
-
-   if (radix_tree_exception(page)) {
-   if (radix_tree_deref_retry(page)) {
-   page = NULL;
-   continue;
-   }
-   /*
-* Otherwise, shmem/tmpfs must be storing a swap entry
-* here so return it without attempting to raise page
-* count.
-*/
-   page = NULL;
-   break; /* TODO: Is this relevant for this use case? */
-   }
-
-   if (!page_cache_get_speculative(page)) {
-   page = NULL;
-   continue;
-   }
-
-   /*
-* Has the page moved?
-* This is part of the lockless pagecache protocol. See
-* include/linux/pagemap.h for details.
-*/
-   if (unlikely(page != *pagep)) {
-   put_page(page);
-   page = NULL;
-   }
-   }
-
-   if (page) {
-   if (page->index <= end_idx)
-   found = true;
-   put_page(page);
-   }
-
-   rcu_read_unlock();
-   return found;
-}
-
 static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
  struct extent_state **cached_state, int writing)
 {
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 0db127c3ccac..34d4fa3ad1c5 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -245,6 +245,8 @@ pgoff_t page_cache_next_gap(struct address_space *mapping,
 pgoff_t index, unsigned long max_scan);
 pgoff_t page_cache_prev_gap(struct address_space *mapping,
 pgoff_t index, unsigned long max_scan);
+bool page_cache_range_empty(struct address_space *mapping,
+   pgoff_t index, pgoff_t max);
 
 #define FGP_ACCESSED   0x0001
 #define FGP_LOCK   0x0002
diff --git a/mm/filemap.c b/mm/filemap.c
index 146e8ec16ec0..f1b4480723dd 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1398,6 +1398,32 @@

  1   2   3   4   >