The branch, master has been updated
       via  051b81a vfs_glusterfs: Implement AIO support
       via  26b3544 vfs_glusterfs: Change sys_get_acl_file/fd to return ACLs 
corresponding to mode bits when there are no ACLs set.
      from  770f222 script/autobuild.py: use --picky-developer for the 
samba-ctdb target

https://git.samba.org/?p=samba.git;a=shortlog;h=master


- Log -----------------------------------------------------------------
commit 051b81aac81c7ccb234da221e9f8272b70b265ce
Author: Poornima G <[email protected]>
Date:   Thu Dec 11 07:35:10 2014 +0530

    vfs_glusterfs: Implement AIO support
    
    Signed-off-by: Poornima G <[email protected]>
    
    Reviewed-by: Guenther Deschner <[email protected]>
    Reviewed-by: Michael Adam <[email protected]>
    
    Autobuild-User(master): Günther Deschner <[email protected]>
    Autobuild-Date(master): Wed Dec 17 16:35:37 CET 2014 on sn-devel-104

commit 26b3544251babdfcdf5ada338a4ed39ff18bc47a
Author: Poornima G <[email protected]>
Date:   Fri Dec 12 14:11:49 2014 +0100

    vfs_glusterfs: Change sys_get_acl_file/fd to return ACLs corresponding to 
mode bits when there are no ACLs set.
    
    Signed-off-by: Poornima G <[email protected]>
    
    Reviewed-by: Guenther Deschner <[email protected]>
    Reviewed-by: Michael Adam <[email protected]>

-----------------------------------------------------------------------

Summary of changes:
 source3/modules/vfs_glusterfs.c | 313 ++++++++++++++++++++++++++++++++++++++--
 source3/wscript                 |   1 +
 2 files changed, 302 insertions(+), 12 deletions(-)


Changeset truncated at 500 lines:

diff --git a/source3/modules/vfs_glusterfs.c b/source3/modules/vfs_glusterfs.c
index c222164..68aea40 100644
--- a/source3/modules/vfs_glusterfs.c
+++ b/source3/modules/vfs_glusterfs.c
@@ -26,8 +26,6 @@
  * @brief  Samba VFS module for glusterfs
  *
  * @todo
- *   - AIO support\n
- *     See, for example \c vfs_aio_linux.c in the \c sourc3/modules directory
  *   - sendfile/recvfile support
  *
  * A Samba VFS module for GlusterFS, based on Gluster's libgfapi.
@@ -42,9 +40,24 @@
 #include <stdio.h>
 #include "api/glfs.h"
 #include "lib/util/dlinklist.h"
+#include "lib/util/tevent_unix.h"
+#ifdef HAVE_SYS_EVENTFD_H
+#include <sys/eventfd.h>
+#endif
+#include <pthread.h>
+#include "smbd/globals.h"
 
 #define DEFAULT_VOLFILE_SERVER "localhost"
 
+#ifdef HAVE_EVENTFD
+static pthread_mutex_t lock_req_list = PTHREAD_MUTEX_INITIALIZER;
+static int event_fd = -1;
+static struct tevent_fd *aio_read_event = NULL;
+static struct tevent_req **req_producer_list = NULL;
+static struct tevent_req **req_consumer_list = NULL;
+static uint64_t req_counter = 0;
+#endif
+
 /**
  * Helper to convert struct stat to struct stat_ex.
  */
@@ -482,20 +495,174 @@ static ssize_t vfs_gluster_pread(struct 
vfs_handle_struct *handle,
        return glfs_pread(*(glfs_fd_t **)VFS_FETCH_FSP_EXTENSION(handle, fsp), 
data, n, offset, 0);
 }
 
+struct glusterfs_aio_state {
+       ssize_t ret;
+       int err;
+};
+
+/*
+ * This function is the callback that will be called on glusterfs
+ * threads once the async IO submitted is complete. To notify
+ * Samba of the completion we use eventfd mechanism.
+ */
+static void aio_glusterfs_done(glfs_fd_t *fd, ssize_t ret, void *data)
+{
+#if HAVE_EVENTFD
+       struct tevent_req *req = NULL;
+       struct glusterfs_aio_state *state = NULL;
+       int i, sts = 0;
+       uint64_t u = 1;
+
+       req = talloc_get_type_abort(data, struct tevent_req);
+       state = tevent_req_data(req, struct glusterfs_aio_state);
+
+       if (ret < 0) {
+               state->ret = -1;
+               state->err = errno;
+       } else {
+               state->ret = ret;
+               state->err = 0;
+       }
+
+       /*
+        * Store the reqs that needs to be completed by calling
+        * tevent_req_done(). tevent_req_done() cannot be called
+        * here, as it is not designed to be executed in the
+        * multithread environment, tevent_req_done() should be
+        * executed from the smbd main thread.
+        */
+       pthread_mutex_lock (&lock_req_list);
+       {
+               for (i = 0 ; i < aio_pending_size ; i++) {
+                       if(!req_producer_list[i]) {
+                               req_producer_list[i] = req;
+                               req_counter = req_counter + 1;
+                               break;
+                       }
+               }
+       }
+       pthread_mutex_unlock (&lock_req_list);
+
+       /*
+        * For a bunch of fops notify only once
+        */
+       if (req_counter == 1) {
+               sts = write (event_fd, &u, sizeof(uint64_t));
+               if (sts < 0 && errno == EAGAIN)
+                       DEBUG(0,("\nWRITE: reached max value"));
+       }
+       return;
+#endif
+}
+
+#ifdef HAVE_EVENTFD
+static void aio_tevent_fd_done(struct tevent_context *event_ctx,
+                               struct tevent_fd *fde,
+                               uint16 flags, void *data)
+{
+       struct tevent_req *req = NULL;
+       struct tevent_req **temp = NULL;
+       int i = 0, sts = 0;
+       uint64_t u = 0;
+
+       sts = read (event_fd, &u, sizeof(uint64_t));
+       if (sts < 0 && errno == EAGAIN)
+               DEBUG(0,("\nREAD: eventfd read failed (%s)",strerror(errno)));
+
+       pthread_mutex_lock (&lock_req_list);
+       {
+               temp = req_producer_list;
+               req_producer_list = req_consumer_list;
+               req_consumer_list = temp;
+               req_counter = 0;
+       }
+       pthread_mutex_unlock (&lock_req_list);
+
+       for (i = 0 ; i < aio_pending_size ; i++) {
+               req = req_consumer_list[i];
+               if (req) {
+                       tevent_req_done(req);
+                       req_consumer_list[i] = 0;
+               }
+       }
+       return;
+}
+#endif
+
+static bool init_gluster_aio(struct vfs_handle_struct *handle)
+{
+#ifdef HAVE_EVENTFD
+       if (event_fd != -1) {
+               /*
+                * Already initialized.
+                */
+               return true;
+       }
+
+       event_fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
+       if (event_fd == -1) {
+               goto fail;
+       }
+
+       aio_read_event = tevent_add_fd(handle->conn->sconn->ev_ctx,
+                                       NULL,
+                                       event_fd,
+                                       TEVENT_FD_READ,
+                                       aio_tevent_fd_done,
+                                       NULL);
+       if (aio_read_event == NULL) {
+               goto fail;
+       }
+
+       req_producer_list = talloc_zero_array(NULL, struct tevent_req *,
+                                               aio_pending_size);
+       req_consumer_list = talloc_zero_array(NULL, struct tevent_req *,
+                                               aio_pending_size);
+
+       return true;
+fail:
+       TALLOC_FREE(aio_read_event);
+       if (event_fd != -1) {
+               close(event_fd);
+               event_fd = -1;
+       }
+#endif
+       return false;
+}
+
 static struct tevent_req *vfs_gluster_pread_send(struct vfs_handle_struct
                                                 *handle, TALLOC_CTX *mem_ctx,
                                                 struct tevent_context *ev,
                                                 files_struct *fsp, void *data,
                                                 size_t n, off_t offset)
 {
+       struct tevent_req *req = NULL;
+       struct glusterfs_aio_state *state = NULL;
+       int ret = 0;
+
+#ifndef HAVE_EVENTFD
        errno = ENOTSUP;
        return NULL;
-}
+#endif
 
-static ssize_t vfs_gluster_pread_recv(struct tevent_req *req, int *err)
-{
-       errno = ENOTSUP;
-       return -1;
+       req = tevent_req_create(mem_ctx, &state, struct glusterfs_aio_state);
+       if (req == NULL) {
+               return NULL;
+       }
+
+       if (!init_gluster_aio(handle)) {
+               tevent_req_error(req, EIO);
+               return tevent_req_post(req, ev);
+       }
+       ret = glfs_pread_async(*(glfs_fd_t **)VFS_FETCH_FSP_EXTENSION(handle,
+                               fsp), data, n, offset, 0, aio_glusterfs_done,
+                               req);
+       if (ret < 0) {
+               tevent_req_error(req, -ret);
+               return tevent_req_post(req, ev);
+       }
+
+       return req;
 }
 
 static ssize_t vfs_gluster_write(struct vfs_handle_struct *handle,
@@ -518,14 +685,53 @@ static struct tevent_req *vfs_gluster_pwrite_send(struct 
vfs_handle_struct
                                                  const void *data, size_t n,
                                                  off_t offset)
 {
+       struct tevent_req *req = NULL;
+       struct glusterfs_aio_state *state = NULL;
+       int ret = 0;
+
+#ifndef HAVE_EVENTFD
        errno = ENOTSUP;
        return NULL;
+#endif
+
+       req = tevent_req_create(mem_ctx, &state, struct glusterfs_aio_state);
+       if (req == NULL) {
+               return NULL;
+       }
+       if (!init_gluster_aio(handle)) {
+               tevent_req_error(req, EIO);
+               return tevent_req_post(req, ev);
+       }
+       ret = glfs_pwrite_async(*(glfs_fd_t **)VFS_FETCH_FSP_EXTENSION(handle,
+                               fsp), data, n, offset, 0, aio_glusterfs_done,
+                               req);
+       if (ret < 0) {
+               tevent_req_error(req, -ret);
+               return tevent_req_post(req, ev);
+       }
+       return req;
 }
 
-static ssize_t vfs_gluster_pwrite_recv(struct tevent_req *req, int *err)
+static ssize_t vfs_gluster_recv(struct tevent_req *req, int *err)
 {
+       struct glusterfs_aio_state *state = NULL;
+
+#ifndef HAVE_EVENTFD
        errno = ENOTSUP;
        return -1;
+#endif
+       state = tevent_req_data(req, struct glusterfs_aio_state);
+       if (state == NULL) {
+               return -1;
+       }
+
+       if (tevent_req_is_unix_error(req, err)) {
+               return -1;
+       }
+       if (state->ret == -1) {
+               *err = state->err;
+       }
+       return state->ret;
 }
 
 static off_t vfs_gluster_lseek(struct vfs_handle_struct *handle,
@@ -570,14 +776,38 @@ static struct tevent_req *vfs_gluster_fsync_send(struct 
vfs_handle_struct
                                                 struct tevent_context *ev,
                                                 files_struct *fsp)
 {
+       struct tevent_req *req = NULL;
+       struct glusterfs_aio_state *state = NULL;
+       int ret = 0;
+
+#ifndef HAVE_EVENTFD
        errno = ENOTSUP;
        return NULL;
+#endif
+
+       req = tevent_req_create(mem_ctx, &state, struct glusterfs_aio_state);
+       if (req == NULL) {
+               return NULL;
+       }
+       if (!init_gluster_aio(handle)) {
+               tevent_req_error(req, EIO);
+               return tevent_req_post(req, ev);
+       }
+       ret = glfs_fsync_async(*(glfs_fd_t **)VFS_FETCH_FSP_EXTENSION(handle,
+                               fsp), aio_glusterfs_done, req);
+       if (ret < 0) {
+               tevent_req_error(req, -ret);
+               return tevent_req_post(req, ev);
+       }
+       return req;
 }
 
 static int vfs_gluster_fsync_recv(struct tevent_req *req, int *err)
 {
-       errno = ENOTSUP;
-       return -1;
+       /*
+        * Use implicit conversion ssize_t->int
+        */
+       return vfs_gluster_recv(req, err);
 }
 
 static int vfs_gluster_stat(struct vfs_handle_struct *handle,
@@ -1029,6 +1259,39 @@ static int vfs_gluster_set_offline(struct 
vfs_handle_struct *handle,
 
 #define GLUSTER_ACL_SIZE(n)       (GLUSTER_ACL_HEADER_SIZE + (n * 
GLUSTER_ACL_ENTRY_SIZE))
 
+static SMB_ACL_T mode_to_smb_acls(const struct stat *mode, TALLOC_CTX *mem_ctx)
+{
+       struct smb_acl_t *result;
+       int count;
+
+       count = 3;
+       result = sys_acl_init(mem_ctx);
+       if (!result) {
+               errno = ENOMEM;
+               return NULL;
+       }
+
+       result->acl = talloc_array(result, struct smb_acl_entry, count);
+       if (!result->acl) {
+               errno = ENOMEM;
+               talloc_free(result);
+               return NULL;
+       }
+
+       result->count = count;
+
+       result->acl[0].a_type = SMB_ACL_USER_OBJ;
+       result->acl[0].a_perm = (mode->st_mode & S_IRWXU) >> 6;;
+
+       result->acl[1].a_type = SMB_ACL_GROUP_OBJ;
+       result->acl[1].a_perm = (mode->st_mode & S_IRWXG) >> 3;;
+
+       result->acl[2].a_type = SMB_ACL_OTHER;
+       result->acl[2].a_perm = mode->st_mode & S_IRWXO;;
+
+       return result;
+}
+
 static SMB_ACL_T gluster_to_smb_acl(const char *buf, size_t xattr_size,
                                    TALLOC_CTX *mem_ctx)
 {
@@ -1296,6 +1559,7 @@ static SMB_ACL_T vfs_gluster_sys_acl_get_file(struct 
vfs_handle_struct *handle,
                                              TALLOC_CTX *mem_ctx)
 {
        struct smb_acl_t *result;
+       struct stat st;
        char *buf;
        const char *key;
        ssize_t ret, size = GLUSTER_ACL_SIZE(20);
@@ -1328,6 +1592,18 @@ static SMB_ACL_T vfs_gluster_sys_acl_get_file(struct 
vfs_handle_struct *handle,
                        ret = glfs_getxattr(handle->data, path_p, key, buf, 
ret);
                }
        }
+
+       /* retrieving the ACL from the xattr has finally failed, do a
+        * mode-to-acl mapping */
+
+       if (ret == -1 && errno == ENODATA) {
+               ret = glfs_stat(handle->data, path_p, &st);
+               if (ret == 0) {
+                       result = mode_to_smb_acls(&st, mem_ctx);
+                       return result;
+               }
+       }
+
        if (ret <= 0) {
                return NULL;
        }
@@ -1342,6 +1618,7 @@ static SMB_ACL_T vfs_gluster_sys_acl_get_fd(struct 
vfs_handle_struct *handle,
                                            TALLOC_CTX *mem_ctx)
 {
        struct smb_acl_t *result;
+       struct stat st;
        ssize_t ret, size = GLUSTER_ACL_SIZE(20);
        char *buf;
        glfs_fd_t *glfd;
@@ -1365,6 +1642,18 @@ static SMB_ACL_T vfs_gluster_sys_acl_get_fd(struct 
vfs_handle_struct *handle,
                                             buf, ret);
                }
        }
+
+       /* retrieving the ACL from the xattr has finally failed, do a
+        * mode-to-acl mapping */
+
+       if (ret == -1 && errno == ENODATA) {
+               ret = glfs_fstat(glfd, &st);
+               if (ret == 0) {
+                       result = mode_to_smb_acls(&st, mem_ctx);
+                       return result;
+               }
+       }
+
        if (ret <= 0) {
                return NULL;
        }
@@ -1471,11 +1760,11 @@ static struct vfs_fn_pointers glusterfs_fns = {
        .read_fn = vfs_gluster_read,
        .pread_fn = vfs_gluster_pread,
        .pread_send_fn = vfs_gluster_pread_send,
-       .pread_recv_fn = vfs_gluster_pread_recv,
+       .pread_recv_fn = vfs_gluster_recv,
        .write_fn = vfs_gluster_write,
        .pwrite_fn = vfs_gluster_pwrite,
        .pwrite_send_fn = vfs_gluster_pwrite_send,
-       .pwrite_recv_fn = vfs_gluster_pwrite_recv,
+       .pwrite_recv_fn = vfs_gluster_recv,
        .lseek_fn = vfs_gluster_lseek,
        .sendfile_fn = vfs_gluster_sendfile,
        .recvfile_fn = vfs_gluster_recvfile,
diff --git a/source3/wscript b/source3/wscript
index f61c049..bb57db5 100644
--- a/source3/wscript
+++ b/source3/wscript
@@ -551,6 +551,7 @@ return acl_get_perm_np(permset_d, perm);
         conf.DEFINE('HAVE_NO_AIO', '1')
 
     if host_os.rfind('linux') > -1:
+       conf.CHECK_FUNCS('eventfd')
        conf.CHECK_FUNCS_IN('io_submit', 'aio')
        conf.CHECK_CODE('''
 struct io_event ioev;


-- 
Samba Shared Repository

Reply via email to