Re: [PATCH 03/36] aio: refactor read/write iocb setup

2018-03-19 Thread Darrick J. Wong
On Mon, Mar 05, 2018 at 01:27:10PM -0800, Christoph Hellwig wrote:
> Don't reference the kiocb structure from the common aio code, and move
> any use of it into helper specific to the read/write path.  This is in
> preparation for aio_poll support that wants to use the space for different
> fields.
> 
> Signed-off-by: Christoph Hellwig 
> Acked-by: Jeff Moyer 

Looks straightforward enough to me,
Reviewed-by: Darrick J. Wong 

--D

> ---
>  fs/aio.c | 171 
> ---
>  1 file changed, 97 insertions(+), 74 deletions(-)
> 
> diff --git a/fs/aio.c b/fs/aio.c
> index 41fc8ce6bc7f..6295fc00f104 100644
> --- a/fs/aio.c
> +++ b/fs/aio.c
> @@ -170,7 +170,9 @@ struct kioctx {
>  #define KIOCB_CANCELLED  ((void *) (~0ULL))
>  
>  struct aio_kiocb {
> - struct kiocbcommon;
> + union {
> + struct kiocbrw;
> + };
>  
>   struct kioctx   *ki_ctx;
>   kiocb_cancel_fn *ki_cancel;
> @@ -549,7 +551,7 @@ static int aio_setup_ring(struct kioctx *ctx, unsigned 
> int nr_events)
>  
>  void kiocb_set_cancel_fn(struct kiocb *iocb, kiocb_cancel_fn *cancel)
>  {
> - struct aio_kiocb *req = container_of(iocb, struct aio_kiocb, common);
> + struct aio_kiocb *req = container_of(iocb, struct aio_kiocb, rw);
>   struct kioctx *ctx = req->ki_ctx;
>   unsigned long flags;
>  
> @@ -582,7 +584,7 @@ static int kiocb_cancel(struct aio_kiocb *kiocb)
>   cancel = cmpxchg(>ki_cancel, old, KIOCB_CANCELLED);
>   } while (cancel != old);
>  
> - return cancel(>common);
> + return cancel(>rw);
>  }
>  
>  static void free_ioctx(struct work_struct *work)
> @@ -1040,15 +1042,6 @@ static inline struct aio_kiocb *aio_get_req(struct 
> kioctx *ctx)
>   return NULL;
>  }
>  
> -static void kiocb_free(struct aio_kiocb *req)
> -{
> - if (req->common.ki_filp)
> - fput(req->common.ki_filp);
> - if (req->ki_eventfd != NULL)
> - eventfd_ctx_put(req->ki_eventfd);
> - kmem_cache_free(kiocb_cachep, req);
> -}
> -
>  static struct kioctx *lookup_ioctx(unsigned long ctx_id)
>  {
>   struct aio_ring __user *ring  = (void __user *)ctx_id;
> @@ -1079,29 +1072,14 @@ static struct kioctx *lookup_ioctx(unsigned long 
> ctx_id)
>  /* aio_complete
>   *   Called when the io request on the given iocb is complete.
>   */
> -static void aio_complete(struct kiocb *kiocb, long res, long res2)
> +static void aio_complete(struct aio_kiocb *iocb, long res, long res2)
>  {
> - struct aio_kiocb *iocb = container_of(kiocb, struct aio_kiocb, common);
>   struct kioctx   *ctx = iocb->ki_ctx;
>   struct aio_ring *ring;
>   struct io_event *ev_page, *event;
>   unsigned tail, pos, head;
>   unsigned long   flags;
>  
> - BUG_ON(is_sync_kiocb(kiocb));
> -
> - if (kiocb->ki_flags & IOCB_WRITE) {
> - struct file *file = kiocb->ki_filp;
> -
> - /*
> -  * Tell lockdep we inherited freeze protection from submission
> -  * thread.
> -  */
> - if (S_ISREG(file_inode(file)->i_mode))
> - __sb_writers_acquired(file_inode(file)->i_sb, 
> SB_FREEZE_WRITE);
> - file_end_write(file);
> - }
> -
>   if (iocb->ki_list.next) {
>   unsigned long flags;
>  
> @@ -1163,11 +1141,12 @@ static void aio_complete(struct kiocb *kiocb, long 
> res, long res2)
>* eventfd. The eventfd_signal() function is safe to be called
>* from IRQ context.
>*/
> - if (iocb->ki_eventfd != NULL)
> + if (iocb->ki_eventfd) {
>   eventfd_signal(iocb->ki_eventfd, 1);
> + eventfd_ctx_put(iocb->ki_eventfd);
> + }
>  
> - /* everything turned out well, dispose of the aiocb. */
> - kiocb_free(iocb);
> + kmem_cache_free(kiocb_cachep, iocb);
>  
>   /*
>* We have to order our ring_info tail store above and test
> @@ -1430,6 +1409,47 @@ SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx)
>   return -EINVAL;
>  }
>  
> +static void aio_complete_rw(struct kiocb *kiocb, long res, long res2)
> +{
> + struct aio_kiocb *iocb = container_of(kiocb, struct aio_kiocb, rw);
> +
> + WARN_ON_ONCE(is_sync_kiocb(kiocb));
> +
> + if (kiocb->ki_flags & IOCB_WRITE) {
> + struct inode *inode = file_inode(kiocb->ki_filp);
> +
> + /*
> +  * Tell lockdep we inherited freeze protection from submission
> +  * thread.
> +  */
> + if (S_ISREG(inode->i_mode))
> + __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE);
> + file_end_write(kiocb->ki_filp);
> + }
> +
> + fput(kiocb->ki_filp);
> + aio_complete(iocb, res, res2);
> +}
> +
> +static int aio_prep_rw(struct kiocb *req, struct iocb *iocb)
> +{
> + int ret;
> +
> + 

[PATCH 03/36] aio: refactor read/write iocb setup

2018-03-05 Thread Christoph Hellwig
Don't reference the kiocb structure from the common aio code, and move
any use of it into helper specific to the read/write path.  This is in
preparation for aio_poll support that wants to use the space for different
fields.

Signed-off-by: Christoph Hellwig 
Acked-by: Jeff Moyer 
---
 fs/aio.c | 171 ---
 1 file changed, 97 insertions(+), 74 deletions(-)

diff --git a/fs/aio.c b/fs/aio.c
index 41fc8ce6bc7f..6295fc00f104 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -170,7 +170,9 @@ struct kioctx {
 #define KIOCB_CANCELLED((void *) (~0ULL))
 
 struct aio_kiocb {
-   struct kiocbcommon;
+   union {
+   struct kiocbrw;
+   };
 
struct kioctx   *ki_ctx;
kiocb_cancel_fn *ki_cancel;
@@ -549,7 +551,7 @@ static int aio_setup_ring(struct kioctx *ctx, unsigned int 
nr_events)
 
 void kiocb_set_cancel_fn(struct kiocb *iocb, kiocb_cancel_fn *cancel)
 {
-   struct aio_kiocb *req = container_of(iocb, struct aio_kiocb, common);
+   struct aio_kiocb *req = container_of(iocb, struct aio_kiocb, rw);
struct kioctx *ctx = req->ki_ctx;
unsigned long flags;
 
@@ -582,7 +584,7 @@ static int kiocb_cancel(struct aio_kiocb *kiocb)
cancel = cmpxchg(>ki_cancel, old, KIOCB_CANCELLED);
} while (cancel != old);
 
-   return cancel(>common);
+   return cancel(>rw);
 }
 
 static void free_ioctx(struct work_struct *work)
@@ -1040,15 +1042,6 @@ static inline struct aio_kiocb *aio_get_req(struct 
kioctx *ctx)
return NULL;
 }
 
-static void kiocb_free(struct aio_kiocb *req)
-{
-   if (req->common.ki_filp)
-   fput(req->common.ki_filp);
-   if (req->ki_eventfd != NULL)
-   eventfd_ctx_put(req->ki_eventfd);
-   kmem_cache_free(kiocb_cachep, req);
-}
-
 static struct kioctx *lookup_ioctx(unsigned long ctx_id)
 {
struct aio_ring __user *ring  = (void __user *)ctx_id;
@@ -1079,29 +1072,14 @@ static struct kioctx *lookup_ioctx(unsigned long ctx_id)
 /* aio_complete
  * Called when the io request on the given iocb is complete.
  */
-static void aio_complete(struct kiocb *kiocb, long res, long res2)
+static void aio_complete(struct aio_kiocb *iocb, long res, long res2)
 {
-   struct aio_kiocb *iocb = container_of(kiocb, struct aio_kiocb, common);
struct kioctx   *ctx = iocb->ki_ctx;
struct aio_ring *ring;
struct io_event *ev_page, *event;
unsigned tail, pos, head;
unsigned long   flags;
 
-   BUG_ON(is_sync_kiocb(kiocb));
-
-   if (kiocb->ki_flags & IOCB_WRITE) {
-   struct file *file = kiocb->ki_filp;
-
-   /*
-* Tell lockdep we inherited freeze protection from submission
-* thread.
-*/
-   if (S_ISREG(file_inode(file)->i_mode))
-   __sb_writers_acquired(file_inode(file)->i_sb, 
SB_FREEZE_WRITE);
-   file_end_write(file);
-   }
-
if (iocb->ki_list.next) {
unsigned long flags;
 
@@ -1163,11 +1141,12 @@ static void aio_complete(struct kiocb *kiocb, long res, 
long res2)
 * eventfd. The eventfd_signal() function is safe to be called
 * from IRQ context.
 */
-   if (iocb->ki_eventfd != NULL)
+   if (iocb->ki_eventfd) {
eventfd_signal(iocb->ki_eventfd, 1);
+   eventfd_ctx_put(iocb->ki_eventfd);
+   }
 
-   /* everything turned out well, dispose of the aiocb. */
-   kiocb_free(iocb);
+   kmem_cache_free(kiocb_cachep, iocb);
 
/*
 * We have to order our ring_info tail store above and test
@@ -1430,6 +1409,47 @@ SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx)
return -EINVAL;
 }
 
+static void aio_complete_rw(struct kiocb *kiocb, long res, long res2)
+{
+   struct aio_kiocb *iocb = container_of(kiocb, struct aio_kiocb, rw);
+
+   WARN_ON_ONCE(is_sync_kiocb(kiocb));
+
+   if (kiocb->ki_flags & IOCB_WRITE) {
+   struct inode *inode = file_inode(kiocb->ki_filp);
+
+   /*
+* Tell lockdep we inherited freeze protection from submission
+* thread.
+*/
+   if (S_ISREG(inode->i_mode))
+   __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE);
+   file_end_write(kiocb->ki_filp);
+   }
+
+   fput(kiocb->ki_filp);
+   aio_complete(iocb, res, res2);
+}
+
+static int aio_prep_rw(struct kiocb *req, struct iocb *iocb)
+{
+   int ret;
+
+   req->ki_filp = fget(iocb->aio_fildes);
+   if (unlikely(!req->ki_filp))
+   return -EBADF;
+   req->ki_complete = aio_complete_rw;
+   req->ki_pos = iocb->aio_offset;
+   req->ki_flags = iocb_flags(req->ki_filp);
+   if (iocb->aio_flags & IOCB_FLAG_RESFD)
+   req->ki_flags 

[PATCH 03/36] aio: refactor read/write iocb setup

2018-01-22 Thread Christoph Hellwig
Don't reference the kiocb structure from the common aio code, and move
any use of it into helper specific to the read/write path.  This is in
preparation for aio_poll support that wants to use the space for different
fields.

Signed-off-by: Christoph Hellwig 
Acked-by: Jeff Moyer 
---
 fs/aio.c | 171 ---
 1 file changed, 97 insertions(+), 74 deletions(-)

diff --git a/fs/aio.c b/fs/aio.c
index 41fc8ce6bc7f..6295fc00f104 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -170,7 +170,9 @@ struct kioctx {
 #define KIOCB_CANCELLED((void *) (~0ULL))
 
 struct aio_kiocb {
-   struct kiocbcommon;
+   union {
+   struct kiocbrw;
+   };
 
struct kioctx   *ki_ctx;
kiocb_cancel_fn *ki_cancel;
@@ -549,7 +551,7 @@ static int aio_setup_ring(struct kioctx *ctx, unsigned int 
nr_events)
 
 void kiocb_set_cancel_fn(struct kiocb *iocb, kiocb_cancel_fn *cancel)
 {
-   struct aio_kiocb *req = container_of(iocb, struct aio_kiocb, common);
+   struct aio_kiocb *req = container_of(iocb, struct aio_kiocb, rw);
struct kioctx *ctx = req->ki_ctx;
unsigned long flags;
 
@@ -582,7 +584,7 @@ static int kiocb_cancel(struct aio_kiocb *kiocb)
cancel = cmpxchg(>ki_cancel, old, KIOCB_CANCELLED);
} while (cancel != old);
 
-   return cancel(>common);
+   return cancel(>rw);
 }
 
 static void free_ioctx(struct work_struct *work)
@@ -1040,15 +1042,6 @@ static inline struct aio_kiocb *aio_get_req(struct 
kioctx *ctx)
return NULL;
 }
 
-static void kiocb_free(struct aio_kiocb *req)
-{
-   if (req->common.ki_filp)
-   fput(req->common.ki_filp);
-   if (req->ki_eventfd != NULL)
-   eventfd_ctx_put(req->ki_eventfd);
-   kmem_cache_free(kiocb_cachep, req);
-}
-
 static struct kioctx *lookup_ioctx(unsigned long ctx_id)
 {
struct aio_ring __user *ring  = (void __user *)ctx_id;
@@ -1079,29 +1072,14 @@ static struct kioctx *lookup_ioctx(unsigned long ctx_id)
 /* aio_complete
  * Called when the io request on the given iocb is complete.
  */
-static void aio_complete(struct kiocb *kiocb, long res, long res2)
+static void aio_complete(struct aio_kiocb *iocb, long res, long res2)
 {
-   struct aio_kiocb *iocb = container_of(kiocb, struct aio_kiocb, common);
struct kioctx   *ctx = iocb->ki_ctx;
struct aio_ring *ring;
struct io_event *ev_page, *event;
unsigned tail, pos, head;
unsigned long   flags;
 
-   BUG_ON(is_sync_kiocb(kiocb));
-
-   if (kiocb->ki_flags & IOCB_WRITE) {
-   struct file *file = kiocb->ki_filp;
-
-   /*
-* Tell lockdep we inherited freeze protection from submission
-* thread.
-*/
-   if (S_ISREG(file_inode(file)->i_mode))
-   __sb_writers_acquired(file_inode(file)->i_sb, 
SB_FREEZE_WRITE);
-   file_end_write(file);
-   }
-
if (iocb->ki_list.next) {
unsigned long flags;
 
@@ -1163,11 +1141,12 @@ static void aio_complete(struct kiocb *kiocb, long res, 
long res2)
 * eventfd. The eventfd_signal() function is safe to be called
 * from IRQ context.
 */
-   if (iocb->ki_eventfd != NULL)
+   if (iocb->ki_eventfd) {
eventfd_signal(iocb->ki_eventfd, 1);
+   eventfd_ctx_put(iocb->ki_eventfd);
+   }
 
-   /* everything turned out well, dispose of the aiocb. */
-   kiocb_free(iocb);
+   kmem_cache_free(kiocb_cachep, iocb);
 
/*
 * We have to order our ring_info tail store above and test
@@ -1430,6 +1409,47 @@ SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx)
return -EINVAL;
 }
 
+static void aio_complete_rw(struct kiocb *kiocb, long res, long res2)
+{
+   struct aio_kiocb *iocb = container_of(kiocb, struct aio_kiocb, rw);
+
+   WARN_ON_ONCE(is_sync_kiocb(kiocb));
+
+   if (kiocb->ki_flags & IOCB_WRITE) {
+   struct inode *inode = file_inode(kiocb->ki_filp);
+
+   /*
+* Tell lockdep we inherited freeze protection from submission
+* thread.
+*/
+   if (S_ISREG(inode->i_mode))
+   __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE);
+   file_end_write(kiocb->ki_filp);
+   }
+
+   fput(kiocb->ki_filp);
+   aio_complete(iocb, res, res2);
+}
+
+static int aio_prep_rw(struct kiocb *req, struct iocb *iocb)
+{
+   int ret;
+
+   req->ki_filp = fget(iocb->aio_fildes);
+   if (unlikely(!req->ki_filp))
+   return -EBADF;
+   req->ki_complete = aio_complete_rw;
+   req->ki_pos = iocb->aio_offset;
+   req->ki_flags = iocb_flags(req->ki_filp);
+   if (iocb->aio_flags & IOCB_FLAG_RESFD)
+   req->ki_flags 

[PATCH 03/36] aio: refactor read/write iocb setup

2018-01-17 Thread Christoph Hellwig
Don't reference the kiocb structure from the common aio code, and move
any use of it into helper specific to the read/write path.  This is in
preparation for aio_poll support that wants to use the space for different
fields.

Signed-off-by: Christoph Hellwig 
Acked-by: Jeff Moyer 
---
 fs/aio.c | 171 ---
 1 file changed, 97 insertions(+), 74 deletions(-)

diff --git a/fs/aio.c b/fs/aio.c
index 41fc8ce6bc7f..6295fc00f104 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -170,7 +170,9 @@ struct kioctx {
 #define KIOCB_CANCELLED((void *) (~0ULL))
 
 struct aio_kiocb {
-   struct kiocbcommon;
+   union {
+   struct kiocbrw;
+   };
 
struct kioctx   *ki_ctx;
kiocb_cancel_fn *ki_cancel;
@@ -549,7 +551,7 @@ static int aio_setup_ring(struct kioctx *ctx, unsigned int 
nr_events)
 
 void kiocb_set_cancel_fn(struct kiocb *iocb, kiocb_cancel_fn *cancel)
 {
-   struct aio_kiocb *req = container_of(iocb, struct aio_kiocb, common);
+   struct aio_kiocb *req = container_of(iocb, struct aio_kiocb, rw);
struct kioctx *ctx = req->ki_ctx;
unsigned long flags;
 
@@ -582,7 +584,7 @@ static int kiocb_cancel(struct aio_kiocb *kiocb)
cancel = cmpxchg(>ki_cancel, old, KIOCB_CANCELLED);
} while (cancel != old);
 
-   return cancel(>common);
+   return cancel(>rw);
 }
 
 static void free_ioctx(struct work_struct *work)
@@ -1040,15 +1042,6 @@ static inline struct aio_kiocb *aio_get_req(struct 
kioctx *ctx)
return NULL;
 }
 
-static void kiocb_free(struct aio_kiocb *req)
-{
-   if (req->common.ki_filp)
-   fput(req->common.ki_filp);
-   if (req->ki_eventfd != NULL)
-   eventfd_ctx_put(req->ki_eventfd);
-   kmem_cache_free(kiocb_cachep, req);
-}
-
 static struct kioctx *lookup_ioctx(unsigned long ctx_id)
 {
struct aio_ring __user *ring  = (void __user *)ctx_id;
@@ -1079,29 +1072,14 @@ static struct kioctx *lookup_ioctx(unsigned long ctx_id)
 /* aio_complete
  * Called when the io request on the given iocb is complete.
  */
-static void aio_complete(struct kiocb *kiocb, long res, long res2)
+static void aio_complete(struct aio_kiocb *iocb, long res, long res2)
 {
-   struct aio_kiocb *iocb = container_of(kiocb, struct aio_kiocb, common);
struct kioctx   *ctx = iocb->ki_ctx;
struct aio_ring *ring;
struct io_event *ev_page, *event;
unsigned tail, pos, head;
unsigned long   flags;
 
-   BUG_ON(is_sync_kiocb(kiocb));
-
-   if (kiocb->ki_flags & IOCB_WRITE) {
-   struct file *file = kiocb->ki_filp;
-
-   /*
-* Tell lockdep we inherited freeze protection from submission
-* thread.
-*/
-   if (S_ISREG(file_inode(file)->i_mode))
-   __sb_writers_acquired(file_inode(file)->i_sb, 
SB_FREEZE_WRITE);
-   file_end_write(file);
-   }
-
if (iocb->ki_list.next) {
unsigned long flags;
 
@@ -1163,11 +1141,12 @@ static void aio_complete(struct kiocb *kiocb, long res, 
long res2)
 * eventfd. The eventfd_signal() function is safe to be called
 * from IRQ context.
 */
-   if (iocb->ki_eventfd != NULL)
+   if (iocb->ki_eventfd) {
eventfd_signal(iocb->ki_eventfd, 1);
+   eventfd_ctx_put(iocb->ki_eventfd);
+   }
 
-   /* everything turned out well, dispose of the aiocb. */
-   kiocb_free(iocb);
+   kmem_cache_free(kiocb_cachep, iocb);
 
/*
 * We have to order our ring_info tail store above and test
@@ -1430,6 +1409,47 @@ SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx)
return -EINVAL;
 }
 
+static void aio_complete_rw(struct kiocb *kiocb, long res, long res2)
+{
+   struct aio_kiocb *iocb = container_of(kiocb, struct aio_kiocb, rw);
+
+   WARN_ON_ONCE(is_sync_kiocb(kiocb));
+
+   if (kiocb->ki_flags & IOCB_WRITE) {
+   struct inode *inode = file_inode(kiocb->ki_filp);
+
+   /*
+* Tell lockdep we inherited freeze protection from submission
+* thread.
+*/
+   if (S_ISREG(inode->i_mode))
+   __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE);
+   file_end_write(kiocb->ki_filp);
+   }
+
+   fput(kiocb->ki_filp);
+   aio_complete(iocb, res, res2);
+}
+
+static int aio_prep_rw(struct kiocb *req, struct iocb *iocb)
+{
+   int ret;
+
+   req->ki_filp = fget(iocb->aio_fildes);
+   if (unlikely(!req->ki_filp))
+   return -EBADF;
+   req->ki_complete = aio_complete_rw;
+   req->ki_pos = iocb->aio_offset;
+   req->ki_flags = iocb_flags(req->ki_filp);
+   if (iocb->aio_flags & IOCB_FLAG_RESFD)
+   req->ki_flags