[PATCH v8 7/8] block: Consolidate bio_alloc_bioset(), bio_kmalloc()

2012-09-06 Thread Kent Overstreet
Previously, bio_kmalloc() and bio_alloc_bioset() behaved slightly
different because there was some almost-duplicated code - this fixes
some of that.

The important change is that previously bio_kmalloc() always set
bi_io_vec = bi_inline_vecs, even if nr_iovecs == 0 - unlike
bio_alloc_bioset(). This would cause bio_has_data() to return true; I
don't know if this resulted in any actual bugs but it was certainly
wrong.

bio_kmalloc() and bio_alloc_bioset() also have different arbitrary
limits on nr_iovecs - 1024 (UIO_MAXIOV) for bio_kmalloc(), 256
(BIO_MAX_PAGES) for bio_alloc_bioset(). This patch doesn't fix that, but
at least they're enforced closer together and hopefully they will be
fixed in a later patch.

This'll also help with some future cleanups - there are a fair number of
functions that allocate bios (e.g. bio_clone()), and now they don't have
to be duplicated for bio_alloc(), bio_alloc_bioset(), and bio_kmalloc().

Signed-off-by: Kent Overstreet 
CC: Jens Axboe 
v7: Re-add dropped comments, improv patch description
---
 fs/bio.c| 110 ++--
 include/linux/bio.h |  16 ++--
 2 files changed, 49 insertions(+), 77 deletions(-)

diff --git a/fs/bio.c b/fs/bio.c
index d807fe2..357a3af 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -55,6 +55,7 @@ static struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] 
__read_mostly = {
  * IO code that does not need private memory pools.
  */
 struct bio_set *fs_bio_set;
+EXPORT_SYMBOL(fs_bio_set);
 
 /*
  * Our slab pool management
@@ -291,39 +292,58 @@ EXPORT_SYMBOL(bio_reset);
  * @bs:the bio_set to allocate from.
  *
  * Description:
- *   bio_alloc_bioset will try its own mempool to satisfy the allocation.
- *   If %__GFP_WAIT is set then we will block on the internal pool waiting
- *   for a  bio to become free.
- **/
+ *   If @bs is NULL, uses kmalloc() to allocate the bio; else the allocation is
+ *   backed by the @bs's mempool.
+ *
+ *   When @bs is not NULL, if %__GFP_WAIT is set then bio_alloc will always be
+ *   able to allocate a bio. This is due to the mempool guarantees. To make 
this
+ *   work, callers must never allocate more than 1 bio at a time from this 
pool.
+ *   Callers that need to allocate more than 1 bio must always submit the
+ *   previously allocated bio for IO before attempting to allocate a new one.
+ *   Failure to do so can cause deadlocks under memory pressure.
+ *
+ *   RETURNS:
+ *   Pointer to new bio on success, NULL on failure.
+ */
 struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
 {
+   unsigned front_pad;
+   unsigned inline_vecs;
unsigned long idx = BIO_POOL_NONE;
struct bio_vec *bvl = NULL;
struct bio *bio;
void *p;
 
-   p = mempool_alloc(bs->bio_pool, gfp_mask);
+   if (!bs) {
+   if (nr_iovecs > UIO_MAXIOV)
+   return NULL;
+
+   p = kmalloc(sizeof(struct bio) +
+   nr_iovecs * sizeof(struct bio_vec),
+   gfp_mask);
+   front_pad = 0;
+   inline_vecs = nr_iovecs;
+   } else {
+   p = mempool_alloc(bs->bio_pool, gfp_mask);
+   front_pad = bs->front_pad;
+   inline_vecs = BIO_INLINE_VECS;
+   }
+
if (unlikely(!p))
return NULL;
-   bio = p + bs->front_pad;
 
+   bio = p + front_pad;
bio_init(bio);
-   bio->bi_pool = bs;
-
-   if (unlikely(!nr_iovecs))
-   goto out_set;
 
-   if (nr_iovecs <= BIO_INLINE_VECS) {
-   bvl = bio->bi_inline_vecs;
-   nr_iovecs = BIO_INLINE_VECS;
-   } else {
+   if (nr_iovecs > inline_vecs) {
bvl = bvec_alloc_bs(gfp_mask, nr_iovecs, , bs);
if (unlikely(!bvl))
goto err_free;
-
-   nr_iovecs = bvec_nr_vecs(idx);
+   } else if (nr_iovecs) {
+   bvl = bio->bi_inline_vecs;
}
-out_set:
+
+   bio->bi_pool = bs;
bio->bi_flags |= idx << BIO_POOL_OFFSET;
bio->bi_max_vecs = nr_iovecs;
bio->bi_io_vec = bvl;
@@ -335,62 +355,6 @@ err_free:
 }
 EXPORT_SYMBOL(bio_alloc_bioset);
 
-/**
- * bio_alloc - allocate a new bio, memory pool backed
- * @gfp_mask: allocation mask to use
- * @nr_iovecs: number of iovecs
- *
- * bio_alloc will allocate a bio and associated bio_vec array that can hold
- * at least @nr_iovecs entries. Allocations will be done from the
- * fs_bio_set. Also see @bio_alloc_bioset and @bio_kmalloc.
- *
- * If %__GFP_WAIT is set, then bio_alloc will always be able to allocate
- * a bio. This is due to the mempool guarantees. To make this work, callers
- * must never allocate more than 1 bio at a time from this pool. Callers
- * that need to allocate more than 1 bio must always submit the previously
- * allocated bio for IO before 

[PATCH v8 7/8] block: Consolidate bio_alloc_bioset(), bio_kmalloc()

2012-09-06 Thread Kent Overstreet
Previously, bio_kmalloc() and bio_alloc_bioset() behaved slightly
different because there was some almost-duplicated code - this fixes
some of that.

The important change is that previously bio_kmalloc() always set
bi_io_vec = bi_inline_vecs, even if nr_iovecs == 0 - unlike
bio_alloc_bioset(). This would cause bio_has_data() to return true; I
don't know if this resulted in any actual bugs but it was certainly
wrong.

bio_kmalloc() and bio_alloc_bioset() also have different arbitrary
limits on nr_iovecs - 1024 (UIO_MAXIOV) for bio_kmalloc(), 256
(BIO_MAX_PAGES) for bio_alloc_bioset(). This patch doesn't fix that, but
at least they're enforced closer together and hopefully they will be
fixed in a later patch.

This'll also help with some future cleanups - there are a fair number of
functions that allocate bios (e.g. bio_clone()), and now they don't have
to be duplicated for bio_alloc(), bio_alloc_bioset(), and bio_kmalloc().

Signed-off-by: Kent Overstreet koverstr...@google.com
CC: Jens Axboe ax...@kernel.dk
v7: Re-add dropped comments, improv patch description
---
 fs/bio.c| 110 ++--
 include/linux/bio.h |  16 ++--
 2 files changed, 49 insertions(+), 77 deletions(-)

diff --git a/fs/bio.c b/fs/bio.c
index d807fe2..357a3af 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -55,6 +55,7 @@ static struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] 
__read_mostly = {
  * IO code that does not need private memory pools.
  */
 struct bio_set *fs_bio_set;
+EXPORT_SYMBOL(fs_bio_set);
 
 /*
  * Our slab pool management
@@ -291,39 +292,58 @@ EXPORT_SYMBOL(bio_reset);
  * @bs:the bio_set to allocate from.
  *
  * Description:
- *   bio_alloc_bioset will try its own mempool to satisfy the allocation.
- *   If %__GFP_WAIT is set then we will block on the internal pool waiting
- *   for a struct bio to become free.
- **/
+ *   If @bs is NULL, uses kmalloc() to allocate the bio; else the allocation is
+ *   backed by the @bs's mempool.
+ *
+ *   When @bs is not NULL, if %__GFP_WAIT is set then bio_alloc will always be
+ *   able to allocate a bio. This is due to the mempool guarantees. To make 
this
+ *   work, callers must never allocate more than 1 bio at a time from this 
pool.
+ *   Callers that need to allocate more than 1 bio must always submit the
+ *   previously allocated bio for IO before attempting to allocate a new one.
+ *   Failure to do so can cause deadlocks under memory pressure.
+ *
+ *   RETURNS:
+ *   Pointer to new bio on success, NULL on failure.
+ */
 struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
 {
+   unsigned front_pad;
+   unsigned inline_vecs;
unsigned long idx = BIO_POOL_NONE;
struct bio_vec *bvl = NULL;
struct bio *bio;
void *p;
 
-   p = mempool_alloc(bs-bio_pool, gfp_mask);
+   if (!bs) {
+   if (nr_iovecs  UIO_MAXIOV)
+   return NULL;
+
+   p = kmalloc(sizeof(struct bio) +
+   nr_iovecs * sizeof(struct bio_vec),
+   gfp_mask);
+   front_pad = 0;
+   inline_vecs = nr_iovecs;
+   } else {
+   p = mempool_alloc(bs-bio_pool, gfp_mask);
+   front_pad = bs-front_pad;
+   inline_vecs = BIO_INLINE_VECS;
+   }
+
if (unlikely(!p))
return NULL;
-   bio = p + bs-front_pad;
 
+   bio = p + front_pad;
bio_init(bio);
-   bio-bi_pool = bs;
-
-   if (unlikely(!nr_iovecs))
-   goto out_set;
 
-   if (nr_iovecs = BIO_INLINE_VECS) {
-   bvl = bio-bi_inline_vecs;
-   nr_iovecs = BIO_INLINE_VECS;
-   } else {
+   if (nr_iovecs  inline_vecs) {
bvl = bvec_alloc_bs(gfp_mask, nr_iovecs, idx, bs);
if (unlikely(!bvl))
goto err_free;
-
-   nr_iovecs = bvec_nr_vecs(idx);
+   } else if (nr_iovecs) {
+   bvl = bio-bi_inline_vecs;
}
-out_set:
+
+   bio-bi_pool = bs;
bio-bi_flags |= idx  BIO_POOL_OFFSET;
bio-bi_max_vecs = nr_iovecs;
bio-bi_io_vec = bvl;
@@ -335,62 +355,6 @@ err_free:
 }
 EXPORT_SYMBOL(bio_alloc_bioset);
 
-/**
- * bio_alloc - allocate a new bio, memory pool backed
- * @gfp_mask: allocation mask to use
- * @nr_iovecs: number of iovecs
- *
- * bio_alloc will allocate a bio and associated bio_vec array that can hold
- * at least @nr_iovecs entries. Allocations will be done from the
- * fs_bio_set. Also see @bio_alloc_bioset and @bio_kmalloc.
- *
- * If %__GFP_WAIT is set, then bio_alloc will always be able to allocate
- * a bio. This is due to the mempool guarantees. To make this work, callers
- * must never allocate more than 1 bio at a time from this pool. Callers
- * that need to allocate more than 1 bio must always submit the previously
- * 

[PATCH v8 7/8] block: Consolidate bio_alloc_bioset(), bio_kmalloc()

2012-09-05 Thread Kent Overstreet
Previously, bio_kmalloc() and bio_alloc_bioset() behaved slightly
different because there was some almost-duplicated code - this fixes
some of that.

The important change is that previously bio_kmalloc() always set
bi_io_vec = bi_inline_vecs, even if nr_iovecs == 0 - unlike
bio_alloc_bioset(). This would cause bio_has_data() to return true; I
don't know if this resulted in any actual bugs but it was certainly
wrong.

bio_kmalloc() and bio_alloc_bioset() also have different arbitrary
limits on nr_iovecs - 1024 (UIO_MAXIOV) for bio_kmalloc(), 256
(BIO_MAX_PAGES) for bio_alloc_bioset(). This patch doesn't fix that, but
at least they're enforced closer together and hopefully they will be
fixed in a later patch.

This'll also help with some future cleanups - there are a fair number of
functions that allocate bios (e.g. bio_clone()), and now they don't have
to be duplicated for bio_alloc(), bio_alloc_bioset(), and bio_kmalloc().

Signed-off-by: Kent Overstreet 
CC: Jens Axboe 
v7: Re-add dropped comments, improv patch description
---
 fs/bio.c| 110 ++--
 include/linux/bio.h |  16 ++--
 2 files changed, 49 insertions(+), 77 deletions(-)

diff --git a/fs/bio.c b/fs/bio.c
index d807fe2..357a3af 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -55,6 +55,7 @@ static struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] 
__read_mostly = {
  * IO code that does not need private memory pools.
  */
 struct bio_set *fs_bio_set;
+EXPORT_SYMBOL(fs_bio_set);
 
 /*
  * Our slab pool management
@@ -291,39 +292,58 @@ EXPORT_SYMBOL(bio_reset);
  * @bs:the bio_set to allocate from.
  *
  * Description:
- *   bio_alloc_bioset will try its own mempool to satisfy the allocation.
- *   If %__GFP_WAIT is set then we will block on the internal pool waiting
- *   for a  bio to become free.
- **/
+ *   If @bs is NULL, uses kmalloc() to allocate the bio; else the allocation is
+ *   backed by the @bs's mempool.
+ *
+ *   When @bs is not NULL, if %__GFP_WAIT is set then bio_alloc will always be
+ *   able to allocate a bio. This is due to the mempool guarantees. To make 
this
+ *   work, callers must never allocate more than 1 bio at a time from this 
pool.
+ *   Callers that need to allocate more than 1 bio must always submit the
+ *   previously allocated bio for IO before attempting to allocate a new one.
+ *   Failure to do so can cause deadlocks under memory pressure.
+ *
+ *   RETURNS:
+ *   Pointer to new bio on success, NULL on failure.
+ */
 struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
 {
+   unsigned front_pad;
+   unsigned inline_vecs;
unsigned long idx = BIO_POOL_NONE;
struct bio_vec *bvl = NULL;
struct bio *bio;
void *p;
 
-   p = mempool_alloc(bs->bio_pool, gfp_mask);
+   if (!bs) {
+   if (nr_iovecs > UIO_MAXIOV)
+   return NULL;
+
+   p = kmalloc(sizeof(struct bio) +
+   nr_iovecs * sizeof(struct bio_vec),
+   gfp_mask);
+   front_pad = 0;
+   inline_vecs = nr_iovecs;
+   } else {
+   p = mempool_alloc(bs->bio_pool, gfp_mask);
+   front_pad = bs->front_pad;
+   inline_vecs = BIO_INLINE_VECS;
+   }
+
if (unlikely(!p))
return NULL;
-   bio = p + bs->front_pad;
 
+   bio = p + front_pad;
bio_init(bio);
-   bio->bi_pool = bs;
-
-   if (unlikely(!nr_iovecs))
-   goto out_set;
 
-   if (nr_iovecs <= BIO_INLINE_VECS) {
-   bvl = bio->bi_inline_vecs;
-   nr_iovecs = BIO_INLINE_VECS;
-   } else {
+   if (nr_iovecs > inline_vecs) {
bvl = bvec_alloc_bs(gfp_mask, nr_iovecs, , bs);
if (unlikely(!bvl))
goto err_free;
-
-   nr_iovecs = bvec_nr_vecs(idx);
+   } else if (nr_iovecs) {
+   bvl = bio->bi_inline_vecs;
}
-out_set:
+
+   bio->bi_pool = bs;
bio->bi_flags |= idx << BIO_POOL_OFFSET;
bio->bi_max_vecs = nr_iovecs;
bio->bi_io_vec = bvl;
@@ -335,62 +355,6 @@ err_free:
 }
 EXPORT_SYMBOL(bio_alloc_bioset);
 
-/**
- * bio_alloc - allocate a new bio, memory pool backed
- * @gfp_mask: allocation mask to use
- * @nr_iovecs: number of iovecs
- *
- * bio_alloc will allocate a bio and associated bio_vec array that can hold
- * at least @nr_iovecs entries. Allocations will be done from the
- * fs_bio_set. Also see @bio_alloc_bioset and @bio_kmalloc.
- *
- * If %__GFP_WAIT is set, then bio_alloc will always be able to allocate
- * a bio. This is due to the mempool guarantees. To make this work, callers
- * must never allocate more than 1 bio at a time from this pool. Callers
- * that need to allocate more than 1 bio must always submit the previously
- * allocated bio for IO before 

[PATCH v8 7/8] block: Consolidate bio_alloc_bioset(), bio_kmalloc()

2012-09-05 Thread Kent Overstreet
Previously, bio_kmalloc() and bio_alloc_bioset() behaved slightly
different because there was some almost-duplicated code - this fixes
some of that.

The important change is that previously bio_kmalloc() always set
bi_io_vec = bi_inline_vecs, even if nr_iovecs == 0 - unlike
bio_alloc_bioset(). This would cause bio_has_data() to return true; I
don't know if this resulted in any actual bugs but it was certainly
wrong.

bio_kmalloc() and bio_alloc_bioset() also have different arbitrary
limits on nr_iovecs - 1024 (UIO_MAXIOV) for bio_kmalloc(), 256
(BIO_MAX_PAGES) for bio_alloc_bioset(). This patch doesn't fix that, but
at least they're enforced closer together and hopefully they will be
fixed in a later patch.

This'll also help with some future cleanups - there are a fair number of
functions that allocate bios (e.g. bio_clone()), and now they don't have
to be duplicated for bio_alloc(), bio_alloc_bioset(), and bio_kmalloc().

Signed-off-by: Kent Overstreet koverstr...@google.com
CC: Jens Axboe ax...@kernel.dk
v7: Re-add dropped comments, improv patch description
---
 fs/bio.c| 110 ++--
 include/linux/bio.h |  16 ++--
 2 files changed, 49 insertions(+), 77 deletions(-)

diff --git a/fs/bio.c b/fs/bio.c
index d807fe2..357a3af 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -55,6 +55,7 @@ static struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] 
__read_mostly = {
  * IO code that does not need private memory pools.
  */
 struct bio_set *fs_bio_set;
+EXPORT_SYMBOL(fs_bio_set);
 
 /*
  * Our slab pool management
@@ -291,39 +292,58 @@ EXPORT_SYMBOL(bio_reset);
  * @bs:the bio_set to allocate from.
  *
  * Description:
- *   bio_alloc_bioset will try its own mempool to satisfy the allocation.
- *   If %__GFP_WAIT is set then we will block on the internal pool waiting
- *   for a struct bio to become free.
- **/
+ *   If @bs is NULL, uses kmalloc() to allocate the bio; else the allocation is
+ *   backed by the @bs's mempool.
+ *
+ *   When @bs is not NULL, if %__GFP_WAIT is set then bio_alloc will always be
+ *   able to allocate a bio. This is due to the mempool guarantees. To make 
this
+ *   work, callers must never allocate more than 1 bio at a time from this 
pool.
+ *   Callers that need to allocate more than 1 bio must always submit the
+ *   previously allocated bio for IO before attempting to allocate a new one.
+ *   Failure to do so can cause deadlocks under memory pressure.
+ *
+ *   RETURNS:
+ *   Pointer to new bio on success, NULL on failure.
+ */
 struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
 {
+   unsigned front_pad;
+   unsigned inline_vecs;
unsigned long idx = BIO_POOL_NONE;
struct bio_vec *bvl = NULL;
struct bio *bio;
void *p;
 
-   p = mempool_alloc(bs-bio_pool, gfp_mask);
+   if (!bs) {
+   if (nr_iovecs  UIO_MAXIOV)
+   return NULL;
+
+   p = kmalloc(sizeof(struct bio) +
+   nr_iovecs * sizeof(struct bio_vec),
+   gfp_mask);
+   front_pad = 0;
+   inline_vecs = nr_iovecs;
+   } else {
+   p = mempool_alloc(bs-bio_pool, gfp_mask);
+   front_pad = bs-front_pad;
+   inline_vecs = BIO_INLINE_VECS;
+   }
+
if (unlikely(!p))
return NULL;
-   bio = p + bs-front_pad;
 
+   bio = p + front_pad;
bio_init(bio);
-   bio-bi_pool = bs;
-
-   if (unlikely(!nr_iovecs))
-   goto out_set;
 
-   if (nr_iovecs = BIO_INLINE_VECS) {
-   bvl = bio-bi_inline_vecs;
-   nr_iovecs = BIO_INLINE_VECS;
-   } else {
+   if (nr_iovecs  inline_vecs) {
bvl = bvec_alloc_bs(gfp_mask, nr_iovecs, idx, bs);
if (unlikely(!bvl))
goto err_free;
-
-   nr_iovecs = bvec_nr_vecs(idx);
+   } else if (nr_iovecs) {
+   bvl = bio-bi_inline_vecs;
}
-out_set:
+
+   bio-bi_pool = bs;
bio-bi_flags |= idx  BIO_POOL_OFFSET;
bio-bi_max_vecs = nr_iovecs;
bio-bi_io_vec = bvl;
@@ -335,62 +355,6 @@ err_free:
 }
 EXPORT_SYMBOL(bio_alloc_bioset);
 
-/**
- * bio_alloc - allocate a new bio, memory pool backed
- * @gfp_mask: allocation mask to use
- * @nr_iovecs: number of iovecs
- *
- * bio_alloc will allocate a bio and associated bio_vec array that can hold
- * at least @nr_iovecs entries. Allocations will be done from the
- * fs_bio_set. Also see @bio_alloc_bioset and @bio_kmalloc.
- *
- * If %__GFP_WAIT is set, then bio_alloc will always be able to allocate
- * a bio. This is due to the mempool guarantees. To make this work, callers
- * must never allocate more than 1 bio at a time from this pool. Callers
- * that need to allocate more than 1 bio must always submit the previously
- *