Backport from ml:

commit 0d8e84b0432beb6d11a1c82deeb9dc1a7bee02c0
Author: Miklos Szeredi <[email protected]>
Date:   Wed Jul 1 16:25:58 2015 +0200

    fuse: simplify request abort

     - don't end the request while req->locked is true

     - make unlock_request() return an error if the connection was aborted

    Signed-off-by: Miklos Szeredi <[email protected]>
    Reviewed-by: Ashish Samant <[email protected]>

Signed-off-by: Maxim Patlasov <[email protected]>
---
 fs/fuse/dev.c |  119 ++++++++++++++++++++++-----------------------------------
 1 file changed, 46 insertions(+), 73 deletions(-)

diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index d50f48f..66ea344 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -365,8 +365,8 @@ __releases(fc->lock)
 {
        void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
        req->end = NULL;
-       list_del(&req->list);
-       list_del(&req->intr_entry);
+       list_del_init(&req->list);
+       list_del_init(&req->intr_entry);
        req->state = FUSE_REQ_FINISHED;
        if (req->background) {
                req->background = 0;
@@ -435,8 +435,6 @@ __acquires(fc->lock)
                /* Any signal may interrupt this */
                wait_answer_interruptible(fc, req);
 
-               if (req->aborted)
-                       goto aborted;
                if (req->state == FUSE_REQ_FINISHED)
                        return;
 
@@ -449,8 +447,6 @@ __acquires(fc->lock)
                /* Only fatal signals may interrupt this */
                wait_answer_killable(fc, req);
 
-               if (req->aborted)
-                       goto aborted;
                if (req->state == FUSE_REQ_FINISHED)
                        return;
 
@@ -470,22 +466,6 @@ __acquires(fc->lock)
        spin_unlock(&fc->lock);
        wait_event(req->waitq, req->state == FUSE_REQ_FINISHED);
        spin_lock(&fc->lock);
-
-       if (!req->aborted)
-               return;
-
- aborted:
-       BUG_ON(req->state != FUSE_REQ_FINISHED);
-       if (req->locked) {
-               /* This is uninterruptible sleep, because data is
-                  being copied to/from the buffers of req.  During
-                  locked state, there mustn't be any filesystem
-                  operation (e.g. page fault), since that could lead
-                  to deadlock */
-               spin_unlock(&fc->lock);
-               wait_event(req->waitq, !req->locked);
-               spin_lock(&fc->lock);
-       }
 }
 
 static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req,
@@ -632,19 +612,21 @@ static int lock_request(struct fuse_conn *fc, struct 
fuse_req *req)
 }
 
 /*
- * Unlock request.  If it was aborted during being locked, the
- * requester thread is currently waiting for it to be unlocked, so
- * wake it up.
+ * Unlock request.  If it was aborted while locked, caller is responsible
+ * for unlocking and ending the request.
  */
-static void unlock_request(struct fuse_conn *fc, struct fuse_req *req)
+static int unlock_request(struct fuse_conn *fc, struct fuse_req *req)
 {
+       int err = 0;
        if (req) {
                spin_lock(&fc->lock);
-               req->locked = 0;
                if (req->aborted)
-                       wake_up(&req->waitq);
+                       err = -ENOENT;
+               else
+                       req->locked = 0;
                spin_unlock(&fc->lock);
        }
+       return err;
 }
 
 struct fuse_copy_state {
@@ -710,7 +692,10 @@ static int fuse_copy_fill(struct fuse_copy_state *cs)
        unsigned long offset;
        int err;
 
-       unlock_request(cs->fc, cs->req);
+       err = unlock_request(cs->fc, cs->req);
+       if (err)
+               return err;
+
        fuse_copy_finish(cs);
        if (cs->pipebufs) {
                struct pipe_buffer *buf = cs->pipebufs;
@@ -814,7 +799,10 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, 
struct page **pagep)
        struct page *newpage;
        struct pipe_buffer *buf = cs->pipebufs;
 
-       unlock_request(cs->fc, cs->req);
+       err = unlock_request(cs->fc, cs->req);
+       if (err)
+               return err;
+
        fuse_copy_finish(cs);
 
        err = buf->ops->confirm(cs->pipe, buf);
@@ -904,11 +892,15 @@ static int fuse_ref_page(struct fuse_copy_state *cs, 
struct page *page,
                         unsigned offset, unsigned count)
 {
        struct pipe_buffer *buf;
+       int err;
 
        if (cs->nr_segs == cs->pipe->buffers)
                return -EIO;
 
-       unlock_request(cs->fc, cs->req);
+       err = unlock_request(cs->fc, cs->req);
+       if (err)
+               return err;
+
        fuse_copy_finish(cs);
 
        buf = cs->pipebufs;
@@ -1293,7 +1285,7 @@ static ssize_t fuse_dev_do_read(struct fuse_conn *fc, 
struct file *file,
        fuse_copy_finish(cs);
        spin_lock(&fc->lock);
        req->locked = 0;
-       if (req->aborted) {
+       if (!fc->connected) {
                request_end(fc, req);
                return -ENODEV;
        }
@@ -1919,13 +1911,6 @@ static ssize_t fuse_dev_do_write(struct fuse_conn *fc,
        if (!req)
                goto err_unlock;
 
-       if (req->aborted) {
-               spin_unlock(&fc->lock);
-               fuse_copy_finish(cs);
-               spin_lock(&fc->lock);
-               request_end(fc, req);
-               return -ENOENT;
-       }
        /* Is it an interrupt reply? */
        if (req->intr_unique == oh.unique) {
                err = -EINVAL;
@@ -1956,10 +1941,9 @@ static ssize_t fuse_dev_do_write(struct fuse_conn *fc,
 
        spin_lock(&fc->lock);
        req->locked = 0;
-       if (!err) {
-               if (req->aborted)
-                       err = -ENOENT;
-       } else if (!req->aborted)
+       if (!fc->connected)
+               err = -ENOENT;
+       else if (err)
                req->out.h.error = -EIO;
        request_end(fc, req);
 
@@ -2103,37 +2087,31 @@ __acquires(fc->lock)
 /*
  * Abort requests under I/O
  *
- * The requests are set to aborted and finished, and the request
- * waiter is woken up.  This will make request_wait_answer() wait
- * until the request is unlocked and then return.
+ * Separate out unlocked requests, they should be finished off immediately.
+ * Locked requests will be finished after unlock; see unlock_request().
  *
- * If the request is asynchronous, then the end function needs to be
- * called after waiting for the request to be unlocked (if it was
- * locked).
+ * Next finish off the unlocked requests.  It is possible that some request 
will
+ * finish before we can.  This is OK, the request will in that case be removed
+ * from the list before we touch it.
  */
 static void end_io_requests(struct fuse_conn *fc)
 __releases(fc->lock)
 __acquires(fc->lock)
 {
-       while (!list_empty(&fc->io)) {
-               struct fuse_req *req =
-                       list_entry(fc->io.next, struct fuse_req, list);
-               void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
+       struct fuse_req *req, *next;
+       LIST_HEAD(to_end);
 
-               req->aborted = 1;
+       list_for_each_entry_safe(req, next, &fc->io, list) {
                req->out.h.error = -ECONNABORTED;
-               req->state = FUSE_REQ_FINISHED;
-               list_del_init(&req->list);
-               wake_up(&req->waitq);
-               if (end) {
-                       req->end = NULL;
-                       __fuse_get_request(req);
-                       spin_unlock(&fc->lock);
-                       wait_event(req->waitq, !req->locked);
-                       end(fc, req);
-                       fuse_put_request(fc, req);
-                       spin_lock(&fc->lock);
-               }
+               req->aborted = 1;
+               if (!req->locked)
+                       list_move(&req->list, &to_end);
+       }
+       while (!list_empty(&to_end)) {
+               req = list_first_entry(&to_end, struct fuse_req, list);
+               __fuse_get_request(req);
+               request_end(fc, req);
+               spin_lock(&fc->lock);
        }
 }
 
@@ -2175,13 +2153,8 @@ static void end_polls(struct fuse_conn *fc)
  * is the combination of an asynchronous request and the tricky
  * deadlock (see Documentation/filesystems/fuse.txt).
  *
- * During the aborting, progression of requests from the pending and
- * processing lists onto the io list, and progression of new requests
- * onto the pending list is prevented by req->connected being false.
- *
- * Progression of requests under I/O to the processing list is
- * prevented by the req->aborted flag being true for these requests.
- * For this reason requests on the io list must be aborted first.
+ * Request progression from one list to the next is prevented by
+ * fc->connected being false.
  */
 void fuse_abort_conn(struct fuse_conn *fc)
 {

_______________________________________________
Devel mailing list
[email protected]
https://lists.openvz.org/mailman/listinfo/devel

Reply via email to