[PATCH RESEND 1/5] aio: Export symbols and struct kiocb_batch for in kernel aio usage

2012-07-13 Thread Asias He
This is useful for people who want to use aio in kernel, e.g. vhost-blk.

Cc: Benjamin LaHaise 
Cc: Alexander Viro 
Cc: Jeff Moyer 
Cc: James Bottomley 
Cc: Michael S. Tsirkin 
Cc: linux-...@kvack.org
Cc: linux-fsde...@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Cc: k...@vger.kernel.org
Cc: virtualizat...@lists.linux-foundation.org
Signed-off-by: Asias He 
---
 fs/aio.c|   37 ++---
 include/linux/aio.h |   21 +
 2 files changed, 39 insertions(+), 19 deletions(-)

diff --git a/fs/aio.c b/fs/aio.c
index 55c4c76..93dfbdd 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -224,22 +224,24 @@ static void __put_ioctx(struct kioctx *ctx)
call_rcu(>rcu_head, ctx_rcu_free);
 }
 
-static inline int try_get_ioctx(struct kioctx *kioctx)
+inline int try_get_ioctx(struct kioctx *kioctx)
 {
return atomic_inc_not_zero(>users);
 }
+EXPORT_SYMBOL(try_get_ioctx);
 
-static inline void put_ioctx(struct kioctx *kioctx)
+inline void put_ioctx(struct kioctx *kioctx)
 {
BUG_ON(atomic_read(>users) <= 0);
if (unlikely(atomic_dec_and_test(>users)))
__put_ioctx(kioctx);
 }
+EXPORT_SYMBOL(put_ioctx);
 
 /* ioctx_alloc
  * Allocates and initializes an ioctx.  Returns an ERR_PTR if it failed.
  */
-static struct kioctx *ioctx_alloc(unsigned nr_events)
+struct kioctx *ioctx_alloc(unsigned nr_events)
 {
struct mm_struct *mm;
struct kioctx *ctx;
@@ -303,6 +305,7 @@ out_freectx:
dprintk("aio: error allocating ioctx %d\n", err);
return ERR_PTR(err);
 }
+EXPORT_SYMBOL(ioctx_alloc);
 
 /* kill_ctx
  * Cancels all outstanding aio requests on an aio context.  Used 
@@ -436,23 +439,14 @@ static struct kiocb *__aio_get_req(struct kioctx *ctx)
return req;
 }
 
-/*
- * struct kiocb's are allocated in batches to reduce the number of
- * times the ctx lock is acquired and released.
- */
-#define KIOCB_BATCH_SIZE   32L
-struct kiocb_batch {
-   struct list_head head;
-   long count; /* number of requests left to allocate */
-};
-
-static void kiocb_batch_init(struct kiocb_batch *batch, long total)
+void kiocb_batch_init(struct kiocb_batch *batch, long total)
 {
INIT_LIST_HEAD(>head);
batch->count = total;
 }
+EXPORT_SYMBOL(kiocb_batch_init);
 
-static void kiocb_batch_free(struct kioctx *ctx, struct kiocb_batch *batch)
+void kiocb_batch_free(struct kioctx *ctx, struct kiocb_batch *batch)
 {
struct kiocb *req, *n;
 
@@ -470,6 +464,7 @@ static void kiocb_batch_free(struct kioctx *ctx, struct 
kiocb_batch *batch)
wake_up_all(>wait);
spin_unlock_irq(>ctx_lock);
 }
+EXPORT_SYMBOL(kiocb_batch_free);
 
 /*
  * Allocate a batch of kiocbs.  This avoids taking and dropping the
@@ -540,7 +535,7 @@ out:
return allocated;
 }
 
-static inline struct kiocb *aio_get_req(struct kioctx *ctx,
+inline struct kiocb *aio_get_req(struct kioctx *ctx,
struct kiocb_batch *batch)
 {
struct kiocb *req;
@@ -552,6 +547,7 @@ static inline struct kiocb *aio_get_req(struct kioctx *ctx,
list_del(>ki_batch);
return req;
 }
+EXPORT_SYMBOL(aio_get_req);
 
 static inline void really_put_req(struct kioctx *ctx, struct kiocb *req)
 {
@@ -721,7 +717,7 @@ static inline int __queue_kicked_iocb(struct kiocb *iocb)
  * simplifies the coding of individual aio operations as
  * it avoids various potential races.
  */
-static ssize_t aio_run_iocb(struct kiocb *iocb)
+ssize_t aio_run_iocb(struct kiocb *iocb)
 {
struct kioctx   *ctx = iocb->ki_ctx;
ssize_t (*retry)(struct kiocb *);
@@ -815,6 +811,7 @@ out:
}
return ret;
 }
+EXPORT_SYMBOL(aio_run_iocb);
 
 /*
  * __aio_run_iocbs:
@@ -1136,7 +1133,7 @@ static inline void clear_timeout(struct aio_timeout *to)
del_singleshot_timer_sync(>timer);
 }
 
-static int read_events(struct kioctx *ctx,
+int read_events(struct kioctx *ctx,
long min_nr, long nr,
struct io_event __user *event,
struct timespec __user *timeout)
@@ -1252,6 +1249,7 @@ out:
destroy_timer_on_stack();
return i ? i : ret;
 }
+EXPORT_SYMBOL(read_events);
 
 /* Take an ioctx and remove it from the list of ioctx's.  Protects 
  * against races with itself via ->dead.
@@ -1492,7 +1490,7 @@ static ssize_t aio_setup_single_vector(int type, struct 
file * file, struct kioc
  * Performs the initial checks and aio retry method
  * setup for the kiocb at the time of io submission.
  */
-static ssize_t aio_setup_iocb(struct kiocb *kiocb, bool compat)
+ssize_t aio_setup_iocb(struct kiocb *kiocb, bool compat)
 {
struct file *file = kiocb->ki_filp;
ssize_t ret = 0;
@@ -1570,6 +1568,7 @@ static ssize_t aio_setup_iocb(struct kiocb *kiocb, bool 
compat)
 
return 0;
 }
+EXPORT_SYMBOL(aio_setup_iocb);
 
 static int io_submit_one(struct kioctx *ctx, struct iocb __user 

[PATCH RESEND 1/5] aio: Export symbols and struct kiocb_batch for in kernel aio usage

2012-07-13 Thread Asias He
This is useful for people who want to use aio in kernel, e.g. vhost-blk.

Cc: Benjamin LaHaise b...@kvack.org
Cc: Alexander Viro v...@zeniv.linux.org.uk
Cc: Jeff Moyer jmo...@redhat.com
Cc: James Bottomley james.bottom...@hansenpartnership.com
Cc: Michael S. Tsirkin m...@redhat.com
Cc: linux-...@kvack.org
Cc: linux-fsde...@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Cc: k...@vger.kernel.org
Cc: virtualizat...@lists.linux-foundation.org
Signed-off-by: Asias He as...@redhat.com
---
 fs/aio.c|   37 ++---
 include/linux/aio.h |   21 +
 2 files changed, 39 insertions(+), 19 deletions(-)

diff --git a/fs/aio.c b/fs/aio.c
index 55c4c76..93dfbdd 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -224,22 +224,24 @@ static void __put_ioctx(struct kioctx *ctx)
call_rcu(ctx-rcu_head, ctx_rcu_free);
 }
 
-static inline int try_get_ioctx(struct kioctx *kioctx)
+inline int try_get_ioctx(struct kioctx *kioctx)
 {
return atomic_inc_not_zero(kioctx-users);
 }
+EXPORT_SYMBOL(try_get_ioctx);
 
-static inline void put_ioctx(struct kioctx *kioctx)
+inline void put_ioctx(struct kioctx *kioctx)
 {
BUG_ON(atomic_read(kioctx-users) = 0);
if (unlikely(atomic_dec_and_test(kioctx-users)))
__put_ioctx(kioctx);
 }
+EXPORT_SYMBOL(put_ioctx);
 
 /* ioctx_alloc
  * Allocates and initializes an ioctx.  Returns an ERR_PTR if it failed.
  */
-static struct kioctx *ioctx_alloc(unsigned nr_events)
+struct kioctx *ioctx_alloc(unsigned nr_events)
 {
struct mm_struct *mm;
struct kioctx *ctx;
@@ -303,6 +305,7 @@ out_freectx:
dprintk(aio: error allocating ioctx %d\n, err);
return ERR_PTR(err);
 }
+EXPORT_SYMBOL(ioctx_alloc);
 
 /* kill_ctx
  * Cancels all outstanding aio requests on an aio context.  Used 
@@ -436,23 +439,14 @@ static struct kiocb *__aio_get_req(struct kioctx *ctx)
return req;
 }
 
-/*
- * struct kiocb's are allocated in batches to reduce the number of
- * times the ctx lock is acquired and released.
- */
-#define KIOCB_BATCH_SIZE   32L
-struct kiocb_batch {
-   struct list_head head;
-   long count; /* number of requests left to allocate */
-};
-
-static void kiocb_batch_init(struct kiocb_batch *batch, long total)
+void kiocb_batch_init(struct kiocb_batch *batch, long total)
 {
INIT_LIST_HEAD(batch-head);
batch-count = total;
 }
+EXPORT_SYMBOL(kiocb_batch_init);
 
-static void kiocb_batch_free(struct kioctx *ctx, struct kiocb_batch *batch)
+void kiocb_batch_free(struct kioctx *ctx, struct kiocb_batch *batch)
 {
struct kiocb *req, *n;
 
@@ -470,6 +464,7 @@ static void kiocb_batch_free(struct kioctx *ctx, struct 
kiocb_batch *batch)
wake_up_all(ctx-wait);
spin_unlock_irq(ctx-ctx_lock);
 }
+EXPORT_SYMBOL(kiocb_batch_free);
 
 /*
  * Allocate a batch of kiocbs.  This avoids taking and dropping the
@@ -540,7 +535,7 @@ out:
return allocated;
 }
 
-static inline struct kiocb *aio_get_req(struct kioctx *ctx,
+inline struct kiocb *aio_get_req(struct kioctx *ctx,
struct kiocb_batch *batch)
 {
struct kiocb *req;
@@ -552,6 +547,7 @@ static inline struct kiocb *aio_get_req(struct kioctx *ctx,
list_del(req-ki_batch);
return req;
 }
+EXPORT_SYMBOL(aio_get_req);
 
 static inline void really_put_req(struct kioctx *ctx, struct kiocb *req)
 {
@@ -721,7 +717,7 @@ static inline int __queue_kicked_iocb(struct kiocb *iocb)
  * simplifies the coding of individual aio operations as
  * it avoids various potential races.
  */
-static ssize_t aio_run_iocb(struct kiocb *iocb)
+ssize_t aio_run_iocb(struct kiocb *iocb)
 {
struct kioctx   *ctx = iocb-ki_ctx;
ssize_t (*retry)(struct kiocb *);
@@ -815,6 +811,7 @@ out:
}
return ret;
 }
+EXPORT_SYMBOL(aio_run_iocb);
 
 /*
  * __aio_run_iocbs:
@@ -1136,7 +1133,7 @@ static inline void clear_timeout(struct aio_timeout *to)
del_singleshot_timer_sync(to-timer);
 }
 
-static int read_events(struct kioctx *ctx,
+int read_events(struct kioctx *ctx,
long min_nr, long nr,
struct io_event __user *event,
struct timespec __user *timeout)
@@ -1252,6 +1249,7 @@ out:
destroy_timer_on_stack(to.timer);
return i ? i : ret;
 }
+EXPORT_SYMBOL(read_events);
 
 /* Take an ioctx and remove it from the list of ioctx's.  Protects 
  * against races with itself via -dead.
@@ -1492,7 +1490,7 @@ static ssize_t aio_setup_single_vector(int type, struct 
file * file, struct kioc
  * Performs the initial checks and aio retry method
  * setup for the kiocb at the time of io submission.
  */
-static ssize_t aio_setup_iocb(struct kiocb *kiocb, bool compat)
+ssize_t aio_setup_iocb(struct kiocb *kiocb, bool compat)
 {
struct file *file = kiocb-ki_filp;
ssize_t ret = 0;
@@ -1570,6 +1568,7 @@ static ssize_t