Re: [PATCH-v3 1/4] idr: Percpu ida

2013-08-28 Thread Kent Overstreet
On Tue, Aug 20, 2013 at 02:31:57PM -0700, Andrew Morton wrote:
 On Fri, 16 Aug 2013 23:09:06 + Nicholas A. Bellinger 
 n...@linux-iscsi.org wrote:
  +   /*
  +* Bitmap of cpus that (may) have tags on their percpu freelists:
  +* steal_tags() uses this to decide when to steal tags, and which cpus
  +* to try stealing from.
  +*
  +* It's ok for a freelist to be empty when its bit is set - steal_tags()
  +* will just keep looking - but the bitmap _must_ be set whenever a
  +* percpu freelist does have tags.
  +*/
  +   unsigned long   *cpus_have_tags;
 
 Why not cpumask_t?

I hadn't encountered it before - looks like it's probably what I want.

I don't see any explanation for the parallel set of operations for
working on cpumasks - e.g. next_cpu()/cpumask_next(). For now I'm going
with the cpumask_* versions, is that what I want?o

If you can have a look at the fixup patch that'll be most appreciated.

  +   struct {
  +   spinlock_t  lock;
  +   /*
  +* When we go to steal tags from another cpu (see steal_tags()),
  +* we want to pick a cpu at random. Cycling through them every
  +* time we steal is a bit easier and more or less equivalent:
  +*/
  +   unsignedcpu_last_stolen;
  +
  +   /* For sleeping on allocation failure */
  +   wait_queue_head_t   wait;
  +
  +   /*
  +* Global freelist - it's a stack where nr_free points to the
  +* top
  +*/
  +   unsignednr_free;
  +   unsigned*freelist;
  +   } cacheline_aligned_in_smp;
 
 Why the cacheline_aligned_in_smp?

It's separating the RW stuff that isn't always touched from the RO stuff
that's used on every allocation.

 
  +};
  
  ...
 
  +
  +/* Percpu IDA */
  +
  +/*
  + * Number of tags we move between the percpu freelist and the global 
  freelist at
  + * a time
 
 between a percpu freelist would be more accurate?

No, because when we're stealing tags we always grab all of the remote
percpu freelist's tags - IDA_PCPU_BATCH_MOVE is only used when moving
to/from the global freelist.

 
  + */
  +#define IDA_PCPU_BATCH_MOVE32U
  +
  +/* Max size of percpu freelist, */
  +#define IDA_PCPU_SIZE  ((IDA_PCPU_BATCH_MOVE * 3) / 2)
  +
  +struct percpu_ida_cpu {
  +   spinlock_t  lock;
  +   unsignednr_free;
  +   unsignedfreelist[];
  +};
 
 Data structure needs documentation.  There's one of these per cpu.  I
 guess nr_free and freelist are clear enough.  The presence of a lock
 in a percpu data structure is a surprise.  It's for cross-cpu stealing,
 I assume?

Yeah, I'll add some comments.

  +static inline void alloc_global_tags(struct percpu_ida *pool,
  +struct percpu_ida_cpu *tags)
  +{
  +   move_tags(tags-freelist, tags-nr_free,
  + pool-freelist, pool-nr_free,
  + min(pool-nr_free, IDA_PCPU_BATCH_MOVE));
  +}
 
 Document this function?

Will do

  +   while (1) {
  +   spin_lock(pool-lock);
  +
  +   /*
  +* prepare_to_wait() must come before steal_tags(), in case
  +* percpu_ida_free() on another cpu flips a bit in
  +* cpus_have_tags
  +*
  +* global lock held and irqs disabled, don't need percpu lock
  +*/
  +   prepare_to_wait(pool-wait, wait, TASK_UNINTERRUPTIBLE);
  +
  +   if (!tags-nr_free)
  +   alloc_global_tags(pool, tags);
  +   if (!tags-nr_free)
  +   steal_tags(pool, tags);
  +
  +   if (tags-nr_free) {
  +   tag = tags-freelist[--tags-nr_free];
  +   if (tags-nr_free)
  +   set_bit(smp_processor_id(),
  +   pool-cpus_have_tags);
  +   }
  +
  +   spin_unlock(pool-lock);
  +   local_irq_restore(flags);
  +
  +   if (tag = 0 || !(gfp  __GFP_WAIT))
  +   break;
  +
  +   schedule();
  +
  +   local_irq_save(flags);
  +   tags = this_cpu_ptr(pool-tag_cpu);
  +   }
 
 What guarantees that this wait will terminate?

It seems fairly clear to me from the break statement a couple lines up;
if we were passed __GFP_WAIT we terminate iff we succesfully allocated a
tag. If we weren't passed __GFP_WAIT we never actually sleep.

I can add a comment if you think it needs one.

  +   finish_wait(pool-wait, wait);
  +   return tag;
  +}
  +EXPORT_SYMBOL_GPL(percpu_ida_alloc);
  +
  +/**
  + * percpu_ida_free - free a tag
  + * @pool: pool @tag was allocated from
  + * @tag: a tag previously allocated with percpu_ida_alloc()
  + *
  + * Safe to be called from interrupt context.
  + */
  +void percpu_ida_free(struct percpu_ida *pool, 

Re: [PATCH-v3 1/4] idr: Percpu ida

2013-08-28 Thread Andrew Morton
On Wed, 28 Aug 2013 12:53:17 -0700 Kent Overstreet k...@daterainc.com wrote:

   + while (1) {
   + spin_lock(pool-lock);
   +
   + /*
   +  * prepare_to_wait() must come before steal_tags(), in case
   +  * percpu_ida_free() on another cpu flips a bit in
   +  * cpus_have_tags
   +  *
   +  * global lock held and irqs disabled, don't need percpu lock
   +  */
   + prepare_to_wait(pool-wait, wait, TASK_UNINTERRUPTIBLE);
   +
   + if (!tags-nr_free)
   + alloc_global_tags(pool, tags);
   + if (!tags-nr_free)
   + steal_tags(pool, tags);
   +
   + if (tags-nr_free) {
   + tag = tags-freelist[--tags-nr_free];
   + if (tags-nr_free)
   + set_bit(smp_processor_id(),
   + pool-cpus_have_tags);
   + }
   +
   + spin_unlock(pool-lock);
   + local_irq_restore(flags);
   +
   + if (tag = 0 || !(gfp  __GFP_WAIT))
   + break;
   +
   + schedule();
   +
   + local_irq_save(flags);
   + tags = this_cpu_ptr(pool-tag_cpu);
   + }
  
  What guarantees that this wait will terminate?
 
 It seems fairly clear to me from the break statement a couple lines up;
 if we were passed __GFP_WAIT we terminate iff we succesfully allocated a
 tag. If we weren't passed __GFP_WAIT we never actually sleep.

OK ;)  Let me rephrase.  What guarantees that a tag will become available?

If what we have here is an open-coded __GFP_NOFAIL then that is
potentially problematic.
--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH-v3 1/4] idr: Percpu ida

2013-08-28 Thread Kent Overstreet
On Wed, Aug 28, 2013 at 01:23:32PM -0700, Andrew Morton wrote:
 On Wed, 28 Aug 2013 12:53:17 -0700 Kent Overstreet k...@daterainc.com wrote:
 
+   while (1) {
+   spin_lock(pool-lock);
+
+   /*
+* prepare_to_wait() must come before steal_tags(), in 
case
+* percpu_ida_free() on another cpu flips a bit in
+* cpus_have_tags
+*
+* global lock held and irqs disabled, don't need 
percpu lock
+*/
+   prepare_to_wait(pool-wait, wait, 
TASK_UNINTERRUPTIBLE);
+
+   if (!tags-nr_free)
+   alloc_global_tags(pool, tags);
+   if (!tags-nr_free)
+   steal_tags(pool, tags);
+
+   if (tags-nr_free) {
+   tag = tags-freelist[--tags-nr_free];
+   if (tags-nr_free)
+   set_bit(smp_processor_id(),
+   pool-cpus_have_tags);
+   }
+
+   spin_unlock(pool-lock);
+   local_irq_restore(flags);
+
+   if (tag = 0 || !(gfp  __GFP_WAIT))
+   break;
+
+   schedule();
+
+   local_irq_save(flags);
+   tags = this_cpu_ptr(pool-tag_cpu);
+   }
   
   What guarantees that this wait will terminate?
  
  It seems fairly clear to me from the break statement a couple lines up;
  if we were passed __GFP_WAIT we terminate iff we succesfully allocated a
  tag. If we weren't passed __GFP_WAIT we never actually sleep.
 
 OK ;)  Let me rephrase.  What guarantees that a tag will become available?
 
 If what we have here is an open-coded __GFP_NOFAIL then that is
 potentially problematic.

It's the same semantics as a mempool, really - it'll succeed when a tag
gets freed.  If we are sleeping then there isn't really anything else we
can do, there isn't anything we're trying in the __GFP_WAIT case that
we're not trying in the GFP_NOWAIT case.
--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH-v3 1/4] idr: Percpu ida

2013-08-28 Thread Andrew Morton
On Wed, 28 Aug 2013 13:44:54 -0700 Kent Overstreet k...@daterainc.com wrote:

What guarantees that this wait will terminate?
   
   It seems fairly clear to me from the break statement a couple lines up;
   if we were passed __GFP_WAIT we terminate iff we succesfully allocated a
   tag. If we weren't passed __GFP_WAIT we never actually sleep.
  
  OK ;)  Let me rephrase.  What guarantees that a tag will become available?
  
  If what we have here is an open-coded __GFP_NOFAIL then that is
  potentially problematic.
 
 It's the same semantics as a mempool, really - it'll succeed when a tag
 gets freed.

OK, that's reasonable if the code is being used to generate IO tags -
we expect the in-flight tags to eventually be returned.

But if a client of this code is using the allocator for something
totally different, there is no guarantee that the act of waiting will
result in any tags being returned.

(These are core design principles/constraints which should be
explicitly documented in a place where future readers will see them!)

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH-v3 1/4] idr: Percpu ida

2013-08-28 Thread Kent Overstreet
On Wed, Aug 28, 2013 at 01:50:42PM -0700, Andrew Morton wrote:
 On Wed, 28 Aug 2013 13:44:54 -0700 Kent Overstreet k...@daterainc.com wrote:
 
 What guarantees that this wait will terminate?

It seems fairly clear to me from the break statement a couple lines up;
if we were passed __GFP_WAIT we terminate iff we succesfully allocated a
tag. If we weren't passed __GFP_WAIT we never actually sleep.
   
   OK ;)  Let me rephrase.  What guarantees that a tag will become available?
   
   If what we have here is an open-coded __GFP_NOFAIL then that is
   potentially problematic.
  
  It's the same semantics as a mempool, really - it'll succeed when a tag
  gets freed.
 
 OK, that's reasonable if the code is being used to generate IO tags -
 we expect the in-flight tags to eventually be returned.
 
 But if a client of this code is using the allocator for something
 totally different, there is no guarantee that the act of waiting will
 result in any tags being returned.

Yeah, and I did wonder a bit whether the waiting mechanism belonged in
the percpu ida code; arguably (certainly just looking at this code, not
any of the users) if it belongs in this code it should be common to
regular ida, not specific to percpu ida.

For now I've just decided to punt on changing that for now, since all
the percpu ida users I've come across do want the waiting mechanism, but
none of the regular ida users that I've looked at want it. There's
probably a reason for that I haven't thought of yet.

 (These are core design principles/constraints which should be
 explicitly documented in a place where future readers will see them!)

*nod* I suppose it should be said explicitly that the gfp_t parameter
indicates whether or not to wait until a _tag_ is available, and not
some internal memory allocation or something.

How's this look?

diff --git a/lib/idr.c b/lib/idr.c
index 15c021c..a3f8e9a 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -1288,6 +1288,11 @@ static inline unsigned alloc_local_tag(struct percpu_ida 
*pool,
  * Safe to be called from interrupt context (assuming it isn't passed
  * __GFP_WAIT, of course).
  *
+ * @gfp indicates whether or not to wait until a free id is available (it's not
+ * used for internal memory allocations); thus if passed __GFP_WAIT we may 
sleep
+ * however long it takes until another thread frees an id (same semantics as a
+ * mempool).
+ *
  * Will not fail if passed __GFP_WAIT.
  */
 int percpu_ida_alloc(struct percpu_ida *pool, gfp_t gfp)
--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH-v3 1/4] idr: Percpu ida

2013-08-28 Thread Andrew Morton
On Wed, 28 Aug 2013 14:12:17 -0700 Kent Overstreet k...@daterainc.com wrote:

 How's this look?
 
 diff --git a/lib/idr.c b/lib/idr.c
 index 15c021c..a3f8e9a 100644
 --- a/lib/idr.c
 +++ b/lib/idr.c
 @@ -1288,6 +1288,11 @@ static inline unsigned alloc_local_tag(struct 
 percpu_ida *pool,
   * Safe to be called from interrupt context (assuming it isn't passed
   * __GFP_WAIT, of course).
   *
 + * @gfp indicates whether or not to wait until a free id is available (it's 
 not
 + * used for internal memory allocations); thus if passed __GFP_WAIT we may 
 sleep
 + * however long it takes until another thread frees an id (same semantics as 
 a
 + * mempool).

Looks good.  Mentioning the mempool thing is effective - people
understand that.
--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH-v3 1/4] idr: Percpu ida

2013-08-26 Thread Kent Overstreet
On Tue, Aug 20, 2013 at 02:31:57PM -0700, Andrew Morton wrote:
 On Fri, 16 Aug 2013 23:09:06 + Nicholas A. Bellinger 
 n...@linux-iscsi.org wrote:
 
  From: Kent Overstreet k...@daterainc.com
  
  Percpu frontend for allocating ids. With percpu allocation (that works),
  it's impossible to guarantee it will always be possible to allocate all
  nr_tags - typically, some will be stuck on a remote percpu freelist
  where the current job can't get to them.
  
  We do guarantee that it will always be possible to allocate at least
  (nr_tags / 2) tags - this is done by keeping track of which and how many
  cpus have tags on their percpu freelists. On allocation failure if
  enough cpus have tags that there could potentially be (nr_tags / 2) tags
  stuck on remote percpu freelists, we then pick a remote cpu at random to
  steal from.
  
  Note that there's no cpu hotplug notifier - we don't care, because
  steal_tags() will eventually get the down cpu's tags. We _could_ satisfy
  more allocations if we had a notifier - but we'll still meet our
  guarantees and it's absolutely not a correctness issue, so I don't think
  it's worth the extra code.
 
  ...
 
   include/linux/idr.h |   53 +
   lib/idr.c   |  316 
  +--
 
 I don't think this should be in idr.[ch] at all.  It has no
 relationship with the existing code.  Apart from duplicating its
 functionality :(

Well, in the full patch series it does make use of the non-percpu ida.
I'm still hoping to get the ida/idr rewrites in.
--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH-v3 1/4] idr: Percpu ida

2013-08-26 Thread Kent Overstreet
On Wed, Aug 21, 2013 at 06:25:58PM +, Christoph Lameter wrote:
 On Fri, 16 Aug 2013, Nicholas A. Bellinger wrote:
 
  +   spinlock_t  lock;
 
 Remove the spinlock.

As Andrew noted, the spinlock is needed because of tag stealing. (You
don't think I'd stick a spinlock on a percpu data structure without a
real reason, would you?)

  +   unsignednr_free;
  +   unsignedfreelist[];
  +};
  +
  +static inline void move_tags(unsigned *dst, unsigned *dst_nr,
  +unsigned *src, unsigned *src_nr,
  +unsigned nr)
  +{
  +   *src_nr -= nr;
  +   memcpy(dst + *dst_nr, src + *src_nr, sizeof(unsigned) * nr);
  +   *dst_nr += nr;
  +}
  +
 
  +static inline unsigned alloc_local_tag(struct percpu_ida *pool,
  +  struct percpu_ida_cpu *tags)
 
 Pass the __percpu offset and not the tags pointer.

Why? It just changes where the this_cpu_ptr

 
  +{
  +   int tag = -ENOSPC;
  +
  +   spin_lock(tags-lock);
 
 Interupts are already disabled. Drop the spinlock.
 
  +   if (tags-nr_free)
  +   tag = tags-freelist[--tags-nr_free];
 
 You can keep this or avoid address calculation through segment prefixes.
 F.e.
 
 if (__this_cpu_read(tags-nrfree) {
   int n = __this_cpu_dec_return(tags-nr_free);
   tag =  __this_cpu_read(tags-freelist[n]);
 }

Can you explain what the point of that change would be? It sounds like
it's preferable to do it that way and avoid this_cpu_ptr() for some
reason, but you're not explaining why.
--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH-v3 1/4] idr: Percpu ida

2013-08-21 Thread Christoph Lameter
On Fri, 16 Aug 2013, Nicholas A. Bellinger wrote:

 + spinlock_t  lock;

Remove the spinlock.

 + unsignednr_free;
 + unsignedfreelist[];
 +};
 +
 +static inline void move_tags(unsigned *dst, unsigned *dst_nr,
 +  unsigned *src, unsigned *src_nr,
 +  unsigned nr)
 +{
 + *src_nr -= nr;
 + memcpy(dst + *dst_nr, src + *src_nr, sizeof(unsigned) * nr);
 + *dst_nr += nr;
 +}
 +

 +static inline unsigned alloc_local_tag(struct percpu_ida *pool,
 +struct percpu_ida_cpu *tags)

Pass the __percpu offset and not the tags pointer.

 +{
 + int tag = -ENOSPC;
 +
 + spin_lock(tags-lock);

Interupts are already disabled. Drop the spinlock.

 + if (tags-nr_free)
 + tag = tags-freelist[--tags-nr_free];

You can keep this or avoid address calculation through segment prefixes.
F.e.

if (__this_cpu_read(tags-nrfree) {
int n = __this_cpu_dec_return(tags-nr_free);
tag =  __this_cpu_read(tags-freelist[n]);
}

 + spin_unlock(tags-lock);

Drop.

 + * Returns a tag - an integer in the range [0..nr_tags) (passed to
 + * tag_pool_init()), or otherwise -ENOSPC on allocation failure.
 + *
 + * Safe to be called from interrupt context (assuming it isn't passed
 + * __GFP_WAIT, of course).
 + *
 + * Will not fail if passed __GFP_WAIT.
 + */
 +int percpu_ida_alloc(struct percpu_ida *pool, gfp_t gfp)
 +{
 + DEFINE_WAIT(wait);
 + struct percpu_ida_cpu *tags;
 + unsigned long flags;
 + int tag;
 +
 + local_irq_save(flags);
 + tags = this_cpu_ptr(pool-tag_cpu);

You could drop this_cpu_ptr if you pass pool-tag_cpu to alloc_local_tag.

 +/**
 + * percpu_ida_free - free a tag
 + * @pool: pool @tag was allocated from
 + * @tag: a tag previously allocated with percpu_ida_alloc()
 + *
 + * Safe to be called from interrupt context.
 + */
 +void percpu_ida_free(struct percpu_ida *pool, unsigned tag)
 +{
 + struct percpu_ida_cpu *tags;
 + unsigned long flags;
 + unsigned nr_free;
 +
 + BUG_ON(tag = pool-nr_tags);
 +
 + local_irq_save(flags);
 + tags = this_cpu_ptr(pool-tag_cpu);
 +
 + spin_lock(tags-lock);

No need for spinlocking
 + tags-freelist[tags-nr_free++] = tag;

nr_free = __this_cpu_inc_return(pool-tag_cpu.nr_free) ?

__this_cpu_write(pool-tag_cpu.freelist[nr_free], tag)


--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH-v3 1/4] idr: Percpu ida

2013-08-20 Thread Andrew Morton
On Fri, 16 Aug 2013 23:09:06 + Nicholas A. Bellinger 
n...@linux-iscsi.org wrote:

 From: Kent Overstreet k...@daterainc.com
 
 Percpu frontend for allocating ids. With percpu allocation (that works),
 it's impossible to guarantee it will always be possible to allocate all
 nr_tags - typically, some will be stuck on a remote percpu freelist
 where the current job can't get to them.
 
 We do guarantee that it will always be possible to allocate at least
 (nr_tags / 2) tags - this is done by keeping track of which and how many
 cpus have tags on their percpu freelists. On allocation failure if
 enough cpus have tags that there could potentially be (nr_tags / 2) tags
 stuck on remote percpu freelists, we then pick a remote cpu at random to
 steal from.
 
 Note that there's no cpu hotplug notifier - we don't care, because
 steal_tags() will eventually get the down cpu's tags. We _could_ satisfy
 more allocations if we had a notifier - but we'll still meet our
 guarantees and it's absolutely not a correctness issue, so I don't think
 it's worth the extra code.

 ...

  include/linux/idr.h |   53 +
  lib/idr.c   |  316 
 +--

I don't think this should be in idr.[ch] at all.  It has no
relationship with the existing code.  Apart from duplicating its
functionality :(

 
 ...

 @@ -243,4 +245,55 @@ static inline int ida_get_new(struct ida *ida, int *p_id)
  
  void __init idr_init_cache(void);
  
 +/* Percpu IDA/tag allocator */
 +
 +struct percpu_ida_cpu;
 +
 +struct percpu_ida {
 + /*
 +  * number of tags available to be allocated, as passed to
 +  * percpu_ida_init()
 +  */
 + unsignednr_tags;
 +
 + struct percpu_ida_cpu __percpu  *tag_cpu;
 +
 + /*
 +  * Bitmap of cpus that (may) have tags on their percpu freelists:
 +  * steal_tags() uses this to decide when to steal tags, and which cpus
 +  * to try stealing from.
 +  *
 +  * It's ok for a freelist to be empty when its bit is set - steal_tags()
 +  * will just keep looking - but the bitmap _must_ be set whenever a
 +  * percpu freelist does have tags.
 +  */
 + unsigned long   *cpus_have_tags;

Why not cpumask_t?

 + struct {
 + spinlock_t  lock;
 + /*
 +  * When we go to steal tags from another cpu (see steal_tags()),
 +  * we want to pick a cpu at random. Cycling through them every
 +  * time we steal is a bit easier and more or less equivalent:
 +  */
 + unsignedcpu_last_stolen;
 +
 + /* For sleeping on allocation failure */
 + wait_queue_head_t   wait;
 +
 + /*
 +  * Global freelist - it's a stack where nr_free points to the
 +  * top
 +  */
 + unsignednr_free;
 + unsigned*freelist;
 + } cacheline_aligned_in_smp;

Why the cacheline_aligned_in_smp?

 +};
 
 ...

 +
 +/* Percpu IDA */
 +
 +/*
 + * Number of tags we move between the percpu freelist and the global 
 freelist at
 + * a time

between a percpu freelist would be more accurate?

 + */
 +#define IDA_PCPU_BATCH_MOVE  32U
 +
 +/* Max size of percpu freelist, */
 +#define IDA_PCPU_SIZE((IDA_PCPU_BATCH_MOVE * 3) / 2)
 +
 +struct percpu_ida_cpu {
 + spinlock_t  lock;
 + unsignednr_free;
 + unsignedfreelist[];
 +};

Data structure needs documentation.  There's one of these per cpu.  I
guess nr_free and freelist are clear enough.  The presence of a lock
in a percpu data structure is a surprise.  It's for cross-cpu stealing,
I assume?

 +static inline void move_tags(unsigned *dst, unsigned *dst_nr,
 +  unsigned *src, unsigned *src_nr,
 +  unsigned nr)
 +{
 + *src_nr -= nr;
 + memcpy(dst + *dst_nr, src + *src_nr, sizeof(unsigned) * nr);
 + *dst_nr += nr;
 +}
 +
 
 ...

 +static inline void alloc_global_tags(struct percpu_ida *pool,
 +  struct percpu_ida_cpu *tags)
 +{
 + move_tags(tags-freelist, tags-nr_free,
 +   pool-freelist, pool-nr_free,
 +   min(pool-nr_free, IDA_PCPU_BATCH_MOVE));
 +}

Document this function?

 +static inline unsigned alloc_local_tag(struct percpu_ida *pool,
 +struct percpu_ida_cpu *tags)
 +{
 + int tag = -ENOSPC;
 +
 + spin_lock(tags-lock);
 + if (tags-nr_free)
 + tag = tags-freelist[--tags-nr_free];
 + spin_unlock(tags-lock);
 +
 + return tag;
 +}

I guess this one's clear enough, if the data structure relationships are
understood.

 +/**
 + * percpu_ida_alloc - allocate a tag
 + * @pool: pool to allocate from
 + * @gfp: gfp flags
 + *
 + * Returns a tag - an integer in the