The branch, master has been updated
       via  36813a4 vfs_glusterfs: Move vfs_gluster_write and 
vfs_gluster_pwrite.
       via  8d3b900 vfs_glusterfs: Add white space so vfs_glusterfs_pread_send 
and vfs_glusterfs_pwrite_send match.
       via  640ecbb vfs_glusterfs: Fix AIO crash on smb.conf reload.
       via  79df4ca docs: Fix typos in man vfs_gpfs.
      from  defa49e s4-torture: Remove obsolte code in backupkey_heimdal rpc 
test

https://git.samba.org/?p=samba.git;a=shortlog;h=master


- Log -----------------------------------------------------------------
commit 36813a42036af906e2a02a90e42fd10aebf3d6a3
Author: Ira Cooper <[email protected]>
Date:   Fri Dec 11 06:27:17 2015 -0500

    vfs_glusterfs: Move vfs_gluster_write and vfs_gluster_pwrite.
    
    Move the functions to a more logical location.
    
    Signed-off-by: Ira Cooper <[email protected]>
    Reviewed-by: Jeremy Allison <[email protected]>
    
    Autobuild-User(master): Jeremy Allison <[email protected]>
    Autobuild-Date(master): Sat Dec 12 01:03:40 CET 2015 on sn-devel-104

commit 8d3b9009e5dd9fe1b55bba92d3d69423eed3c649
Author: Ira Cooper <[email protected]>
Date:   Fri Dec 11 07:37:53 2015 -0500

    vfs_glusterfs: Add white space so vfs_glusterfs_pread_send and 
vfs_glusterfs_pwrite_send match.
    
    These two functions are basically the same thing, so they should be
    formatted the same.
    
    Signed-off-by: Ira Cooper <[email protected]>
    Reviewed-by: Jeremy Allison <[email protected]>

commit 640ecbbcd04e8dcc96dcfae7f22e457df024b6cc
Author: Ira Cooper <[email protected]>
Date:   Wed Nov 18 11:09:06 2015 -0500

    vfs_glusterfs: Fix AIO crash on smb.conf reload.
    
    This fixes an issue where we couldn't handle cancellation properly
    so when smb.conf was reloaded we crashed.
    
    Signed-off-by: Ira Cooper <[email protected]>
    Reviewed-by: Jeremy Allison <[email protected]>

commit 79df4caed3b819cc1cd67c4033f3ac700086ab99
Author: Karolin Seeger <[email protected]>
Date:   Fri Dec 11 10:23:28 2015 +0100

    docs: Fix typos in man vfs_gpfs.
    
    BUG: https://bugzilla.samba.org/show_bug.cgi?id=11641
    Duplicate "acl map full control" entry in man vfs_gpfs
    
    Signed-off-by: Karolin Seeger <[email protected]>
    Reviewed-by: Jeremy Allison <[email protected]>

-----------------------------------------------------------------------

Summary of changes:
 docs-xml/manpages/vfs_gpfs.8.xml |   6 +-
 source3/modules/vfs_glusterfs.c  | 165 ++++++++++++++++++++++++++++++---------
 2 files changed, 130 insertions(+), 41 deletions(-)


Changeset truncated at 500 lines:

diff --git a/docs-xml/manpages/vfs_gpfs.8.xml b/docs-xml/manpages/vfs_gpfs.8.xml
index 7bb4e90..e0c5951 100644
--- a/docs-xml/manpages/vfs_gpfs.8.xml
+++ b/docs-xml/manpages/vfs_gpfs.8.xml
@@ -42,14 +42,14 @@
        </itemizedlist>
        </para>
 
-       <para><command>NOTE:</command>This module follows the posix-acl 
behaviour
+       <para><command>NOTE:</command> This module follows the posix-acl 
behaviour
        and hence allows permission stealing via chown. Samba might allow at a 
later
        point in time, to restrict the chown via this module as such 
restrictions
        are the responsibility of the underlying filesystem than of Samba.
        </para>
 
        <para>This module makes use of the smb.conf parameter
-       <smbconfoption name="acl map full control">acl map full 
control</smbconfoption>
+       <smbconfoption name="acl map full control"></smbconfoption>.
        When set to yes (the default), this parameter will add in the 
FILE_DELETE_CHILD
        bit on a returned ACE entry for a file (not a directory) that already
        contains all file permissions except for FILE_DELETE and 
FILE_DELETE_CHILD.
@@ -471,7 +471,7 @@
        </para>
        <para>
        At build time, only the header file <command>gpfs_gpl.h</command>
-       is required , which is a symlink to <command>gpfs.h</command> in
+       is required, which is a symlink to <command>gpfs.h</command> in
        gpfs versions newer than 3.2.1 PTF8.
        </para>
 </refsect1>
diff --git a/source3/modules/vfs_glusterfs.c b/source3/modules/vfs_glusterfs.c
index cf8066e..8025cd6 100644
--- a/source3/modules/vfs_glusterfs.c
+++ b/source3/modules/vfs_glusterfs.c
@@ -487,11 +487,28 @@ static ssize_t vfs_gluster_pread(struct vfs_handle_struct 
*handle,
        return glfs_pread(*(glfs_fd_t **)VFS_FETCH_FSP_EXTENSION(handle, fsp), 
data, n, offset, 0);
 }
 
+struct glusterfs_aio_state;
+
+struct glusterfs_aio_wrapper {
+       struct glusterfs_aio_state *state;
+};
+
 struct glusterfs_aio_state {
        ssize_t ret;
        int err;
+       struct tevent_req *req;
+       bool cancelled;
 };
 
+static int aio_wrapper_destructor(void *ptr)
+{
+       struct glusterfs_aio_wrapper *wrap = (struct glusterfs_aio_wrapper 
*)ptr;
+
+       wrap->state->cancelled = true;
+
+       return 0;
+}
+
 /*
  * This function is the callback that will be called on glusterfs
  * threads once the async IO submitted is complete. To notify
@@ -499,12 +516,10 @@ struct glusterfs_aio_state {
  */
 static void aio_glusterfs_done(glfs_fd_t *fd, ssize_t ret, void *data)
 {
-       struct tevent_req *req = NULL;
        struct glusterfs_aio_state *state = NULL;
        int sts = 0;
 
-       req = talloc_get_type_abort(data, struct tevent_req);
-       state = tevent_req_data(req, struct glusterfs_aio_state);
+       state = (struct glusterfs_aio_state *)data;
 
        if (ret < 0) {
                state->ret = -1;
@@ -515,10 +530,10 @@ static void aio_glusterfs_done(glfs_fd_t *fd, ssize_t 
ret, void *data)
        }
 
        /*
-        * Write the pointer to each req that needs to be completed
-        * by calling tevent_req_done(). tevent_req_done() cannot
-        * be called here, as it is not designed to be executed
-        * in the multithread environment, tevent_req_done() must be
+        * Write the state pointer to glusterfs_aio_state to the
+        * pipe, so we can call tevent_req_done() from the main thread,
+        * because tevent_req_done() is not designed to be executed in
+        * the multithread environment, so tevent_req_done() must be
         * executed from the smbd main thread.
         *
         * write(2) on pipes with sizes under _POSIX_PIPE_BUF
@@ -529,7 +544,7 @@ static void aio_glusterfs_done(glfs_fd_t *fd, ssize_t ret, 
void *data)
         * that we can trust it here.
         */
 
-       sts = sys_write(write_fd, &req, sizeof(struct tevent_req *));
+       sts = sys_write(write_fd, &state, sizeof(struct glusterfs_aio_state *));
        if (sts < 0) {
                DEBUG(0,("\nWrite to pipe failed (%s)", strerror(errno)));
        }
@@ -545,6 +560,7 @@ static void aio_tevent_fd_done(struct tevent_context 
*event_ctx,
                                uint16_t flags, void *data)
 {
        struct tevent_req *req = NULL;
+       struct glusterfs_aio_state *state = NULL;
        int sts = 0;
 
        /*
@@ -557,11 +573,24 @@ static void aio_tevent_fd_done(struct tevent_context 
*event_ctx,
         * can trust it here.
         */
 
-       sts = sys_read(read_fd, &req, sizeof(struct tevent_req *));
+       sts = sys_read(read_fd, &state, sizeof(struct glusterfs_aio_state *));
+
        if (sts < 0) {
                DEBUG(0,("\nRead from pipe failed (%s)", strerror(errno)));
        }
 
+       if (state->cancelled) {
+               return;
+       }
+
+       req = state->req;
+
+       /* if we've cancelled the op, there is no req, so just clean up. */
+       if (state->cancelled == true) {
+               TALLOC_FREE(state);
+               return;
+       }
+
        if (req) {
                tevent_req_done(req);
        }
@@ -610,28 +639,62 @@ fail:
        return false;
 }
 
-static struct tevent_req *vfs_gluster_pread_send(struct vfs_handle_struct
-                                                *handle, TALLOC_CTX *mem_ctx,
-                                                struct tevent_context *ev,
-                                                files_struct *fsp, void *data,
-                                                size_t n, off_t offset)
+static struct glusterfs_aio_state *aio_state_create(TALLOC_CTX *mem_ctx)
 {
        struct tevent_req *req = NULL;
        struct glusterfs_aio_state *state = NULL;
-       int ret = 0;
+       struct glusterfs_aio_wrapper *wrapper = NULL;
+
+       req = tevent_req_create(mem_ctx, &wrapper, struct 
glusterfs_aio_wrapper);
 
-       req = tevent_req_create(mem_ctx, &state, struct glusterfs_aio_state);
        if (req == NULL) {
                return NULL;
        }
 
+       state = talloc(NULL, struct glusterfs_aio_state);
+
+       if (state == NULL) {
+               TALLOC_FREE(req);
+               return NULL;
+       }
+
+       state->cancelled = false;
+       state->ret = 0;
+       state->err = 0;
+       state->req = req;
+
+       wrapper->state = state;
+
+       return state;
+}
+
+static struct tevent_req *vfs_gluster_pread_send(struct vfs_handle_struct
+                                                 *handle, TALLOC_CTX *mem_ctx,
+                                                 struct tevent_context *ev,
+                                                 files_struct *fsp,
+                                                 void *data, size_t n,
+                                                 off_t offset)
+{
+       struct glusterfs_aio_state *state = NULL;
+       struct tevent_req *req = NULL;
+       int ret = 0;
+
+       state = aio_state_create(mem_ctx);
+
+       if (state == NULL) {
+               return NULL;
+       }
+
+       req = state->req;
+
        if (!init_gluster_aio(handle)) {
                tevent_req_error(req, EIO);
                return tevent_req_post(req, ev);
        }
+
        ret = glfs_pread_async(*(glfs_fd_t **)VFS_FETCH_FSP_EXTENSION(handle,
                                fsp), data, n, offset, 0, aio_glusterfs_done,
-                               req);
+                               state);
        if (ret < 0) {
                tevent_req_error(req, -ret);
                return tevent_req_post(req, ev);
@@ -640,19 +703,6 @@ static struct tevent_req *vfs_gluster_pread_send(struct 
vfs_handle_struct
        return req;
 }
 
-static ssize_t vfs_gluster_write(struct vfs_handle_struct *handle,
-                                files_struct *fsp, const void *data, size_t n)
-{
-       return glfs_write(*(glfs_fd_t **)VFS_FETCH_FSP_EXTENSION(handle, fsp), 
data, n, 0);
-}
-
-static ssize_t vfs_gluster_pwrite(struct vfs_handle_struct *handle,
-                                 files_struct *fsp, const void *data,
-                                 size_t n, off_t offset)
-{
-       return glfs_pwrite(*(glfs_fd_t **)VFS_FETCH_FSP_EXTENSION(handle, fsp), 
data, n, offset, 0);
-}
-
 static struct tevent_req *vfs_gluster_pwrite_send(struct vfs_handle_struct
                                                  *handle, TALLOC_CTX *mem_ctx,
                                                  struct tevent_context *ev,
@@ -660,33 +710,48 @@ static struct tevent_req *vfs_gluster_pwrite_send(struct 
vfs_handle_struct
                                                  const void *data, size_t n,
                                                  off_t offset)
 {
-       struct tevent_req *req = NULL;
        struct glusterfs_aio_state *state = NULL;
+       struct tevent_req *req = NULL;
        int ret = 0;
 
-       req = tevent_req_create(mem_ctx, &state, struct glusterfs_aio_state);
-       if (req == NULL) {
+       state = aio_state_create(mem_ctx);
+
+       if (state == NULL) {
                return NULL;
        }
+
+       req = state->req;
+
        if (!init_gluster_aio(handle)) {
                tevent_req_error(req, EIO);
                return tevent_req_post(req, ev);
        }
+
        ret = glfs_pwrite_async(*(glfs_fd_t **)VFS_FETCH_FSP_EXTENSION(handle,
                                fsp), data, n, offset, 0, aio_glusterfs_done,
-                               req);
+                               state);
        if (ret < 0) {
                tevent_req_error(req, -ret);
                return tevent_req_post(req, ev);
        }
+
        return req;
 }
 
 static ssize_t vfs_gluster_recv(struct tevent_req *req, int *err)
 {
        struct glusterfs_aio_state *state = NULL;
+       struct glusterfs_aio_wrapper *wrapper = NULL;
+       int ret = 0;
+
+       wrapper = tevent_req_data(req, struct glusterfs_aio_wrapper);
+
+       if (wrapper == NULL) {
+               return -1;
+       }
+
+       state = wrapper->state;
 
-       state = tevent_req_data(req, struct glusterfs_aio_state);
        if (state == NULL) {
                return -1;
        }
@@ -697,7 +762,27 @@ static ssize_t vfs_gluster_recv(struct tevent_req *req, 
int *err)
        if (state->ret == -1) {
                *err = state->err;
        }
-       return state->ret;
+
+       ret = state->ret;
+
+       /* Clean up the state, it is in a NULL context. */
+
+       TALLOC_FREE(state);
+
+       return ret;
+}
+
+static ssize_t vfs_gluster_write(struct vfs_handle_struct *handle,
+                                files_struct *fsp, const void *data, size_t n)
+{
+       return glfs_write(*(glfs_fd_t **)VFS_FETCH_FSP_EXTENSION(handle, fsp), 
data, n, 0);
+}
+
+static ssize_t vfs_gluster_pwrite(struct vfs_handle_struct *handle,
+                                 files_struct *fsp, const void *data,
+                                 size_t n, off_t offset)
+{
+       return glfs_pwrite(*(glfs_fd_t **)VFS_FETCH_FSP_EXTENSION(handle, fsp), 
data, n, offset, 0);
 }
 
 static off_t vfs_gluster_lseek(struct vfs_handle_struct *handle,
@@ -746,10 +831,14 @@ static struct tevent_req *vfs_gluster_fsync_send(struct 
vfs_handle_struct
        struct glusterfs_aio_state *state = NULL;
        int ret = 0;
 
-       req = tevent_req_create(mem_ctx, &state, struct glusterfs_aio_state);
-       if (req == NULL) {
+       state = aio_state_create(mem_ctx);
+
+       if (state == NULL) {
                return NULL;
        }
+
+       req = state->req;
+
        if (!init_gluster_aio(handle)) {
                tevent_req_error(req, EIO);
                return tevent_req_post(req, ev);


-- 
Samba Shared Repository

Reply via email to