The branch, master has been updated
       via  c952e11 smbd: Remove byte_range_lock->read_only
       via  8c435cd smbd: Remove the brl_get_locks wrapper
       via  440e331 smbd: brl_get_locks_internal is always called r/w now
       via  5d8f64c smbd: Restructure brl_get_locks_readonly
       via  2b3c5be smbd: Avoid an if-statement per read/write in the 
non-clustered case
      from  f650bb9 smbd: Remove unused "brl->key" struct element

http://gitweb.samba.org/?p=samba.git;a=shortlog;h=master


- Log -----------------------------------------------------------------
commit c952e11859d786418f82204e6cabc6c424e71bb9
Author: Volker Lendecke <v...@samba.org>
Date:   Wed Sep 11 11:54:37 2013 +0000

    smbd: Remove byte_range_lock->read_only
    
    With the rewritten brl_get_lock_readonly we only set the destructor for
    r/w lock records anyway.
    
    Signed-off-by: Volker Lendecke <v...@samba.org>
    Reviewed-by: Stefan Metzmacher <me...@samba.org>
    
    Autobuild-User(master): Stefan Metzmacher <me...@samba.org>
    Autobuild-Date(master): Sun Oct  6 22:20:05 CEST 2013 on sn-devel-104

commit 8c435cd588bd15f444eb4d2fcd687eee02204c88
Author: Volker Lendecke <v...@samba.org>
Date:   Wed Sep 11 11:53:26 2013 +0000

    smbd: Remove the brl_get_locks wrapper
    
    Signed-off-by: Volker Lendecke <v...@samba.org>
    Reviewed-by: Stefan Metzmacher <me...@samba.org>

commit 440e331949fe8da5c09ce9ef6cf79f6e8656abe2
Author: Volker Lendecke <v...@samba.org>
Date:   Wed Sep 11 11:51:44 2013 +0000

    smbd: brl_get_locks_internal is always called r/w now
    
    Signed-off-by: Volker Lendecke <v...@samba.org>
    Reviewed-by: Stefan Metzmacher <me...@samba.org>

commit 5d8f64c47d02c2aa58f3f0c87903bbd41d086aa0
Author: Volker Lendecke <v...@samba.org>
Date:   Wed Sep 11 11:36:54 2013 +0000

    smbd: Restructure brl_get_locks_readonly
    
    This is step 1 to get rid of brl_get_locks_internal with its complex 
readonly
    business. It also optimizes 2 things: First, it uses dbwrap_parse_record to
    avoid a talloc and memcpy, and second it uses talloc_pooled_object.
    
    And -- hopefully it is easier to understand the caching logic with
    fsp->brlock_rec and the clustering escape.
    
    Signed-off-by: Volker Lendecke <v...@samba.org>
    Reviewed-by: Stefan Metzmacher <me...@samba.org>

commit 2b3c5bea1c1512bc250481690b2d968491738629
Author: Volker Lendecke <v...@samba.org>
Date:   Wed Sep 11 10:17:05 2013 +0000

    smbd: Avoid an if-statement per read/write in the non-clustered case
    
    Without clustering, fsp->brlock_rec will never be set anyway. In the
    clustering case we can't use the seqnum trick, so this is slow enough
    that the additional if-statement does not matter in this case anyway. In
    the non-clustered case it might. Have not measured it, but every little
    bit helps I guess.
    
    Signed-off-by: Volker Lendecke <v...@samba.org>
    Reviewed-by: Stefan Metzmacher <me...@samba.org>

-----------------------------------------------------------------------

Summary of changes:
 source3/locking/brlock.c |  168 ++++++++++++++++++++++++++++++----------------
 1 files changed, 109 insertions(+), 59 deletions(-)


Changeset truncated at 500 lines:

diff --git a/source3/locking/brlock.c b/source3/locking/brlock.c
index ee4354c..0d45501 100644
--- a/source3/locking/brlock.c
+++ b/source3/locking/brlock.c
@@ -47,7 +47,6 @@ struct byte_range_lock {
        struct files_struct *fsp;
        unsigned int num_locks;
        bool modified;
-       bool read_only;
        struct lock_struct *lock_data;
        struct db_record *record;
 };
@@ -1879,10 +1878,6 @@ int brl_forall(void (*fn)(struct file_id id, struct 
server_id pid,
 
 static void byte_range_lock_flush(struct byte_range_lock *br_lck)
 {
-       if (br_lck->read_only) {
-               SMB_ASSERT(!br_lck->modified);
-       }
-
        if (!br_lck->modified) {
                goto done;
        }
@@ -1910,10 +1905,7 @@ static void byte_range_lock_flush(struct byte_range_lock 
*br_lck)
        }
 
  done:
-
-       br_lck->read_only = true;
        br_lck->modified = false;
-
        TALLOC_FREE(br_lck->record);
 }
 
@@ -1929,12 +1921,10 @@ static int byte_range_lock_destructor(struct 
byte_range_lock *br_lck)
  TALLOC_FREE(brl) will release the lock in the destructor.
 ********************************************************************/
 
-static struct byte_range_lock *brl_get_locks_internal(TALLOC_CTX *mem_ctx,
-                                       files_struct *fsp, bool read_only)
+struct byte_range_lock *brl_get_locks(TALLOC_CTX *mem_ctx, files_struct *fsp)
 {
        TDB_DATA key, data;
        struct byte_range_lock *br_lck = talloc(mem_ctx, struct 
byte_range_lock);
-       bool do_read_only = read_only;
 
        if (br_lck == NULL) {
                return NULL;
@@ -1947,40 +1937,22 @@ static struct byte_range_lock 
*brl_get_locks_internal(TALLOC_CTX *mem_ctx,
        key.dptr = (uint8 *)&fsp->file_id;
        key.dsize = sizeof(struct file_id);
 
-       if (!fsp->lockdb_clean) {
-               /* We must be read/write to clean
-                  the dead entries. */
-               do_read_only = false;
-       }
-
-       if (do_read_only) {
-               NTSTATUS status;
-               status = dbwrap_fetch(brlock_db, br_lck, key, &data);
-               if (!NT_STATUS_IS_OK(status)) {
-                       DEBUG(3, ("Could not fetch byte range lock record\n"));
-                       TALLOC_FREE(br_lck);
-                       return NULL;
-               }
-               br_lck->record = NULL;
-       } else {
-               br_lck->record = dbwrap_fetch_locked(brlock_db, br_lck, key);
-
-               if (br_lck->record == NULL) {
-                       DEBUG(3, ("Could not lock byte range lock entry\n"));
-                       TALLOC_FREE(br_lck);
-                       return NULL;
-               }
+       br_lck->record = dbwrap_fetch_locked(brlock_db, br_lck, key);
 
-               data = dbwrap_record_get_value(br_lck->record);
+       if (br_lck->record == NULL) {
+               DEBUG(3, ("Could not lock byte range lock entry\n"));
+               TALLOC_FREE(br_lck);
+               return NULL;
        }
 
+       data = dbwrap_record_get_value(br_lck->record);
+
        if ((data.dsize % sizeof(struct lock_struct)) != 0) {
                DEBUG(3, ("Got invalid brlock data\n"));
                TALLOC_FREE(br_lck);
                return NULL;
        }
 
-       br_lck->read_only = do_read_only;
        br_lck->lock_data = NULL;
 
        talloc_set_destructor(br_lck, byte_range_lock_destructor);
@@ -2041,47 +2013,125 @@ static struct byte_range_lock 
*brl_get_locks_internal(TALLOC_CTX *mem_ctx,
                }
        }
 
-       if (do_read_only != read_only) {
-               /*
-                * this stores the record and gets rid of
-                * the write lock that is needed for a cleanup
-                */
-               byte_range_lock_flush(br_lck);
-       }
-
        return br_lck;
 }
 
-struct byte_range_lock *brl_get_locks(TALLOC_CTX *mem_ctx,
-                                       files_struct *fsp)
-{
-       return brl_get_locks_internal(mem_ctx, fsp, False);
-}
+struct brl_get_locks_readonly_state {
+       TALLOC_CTX *mem_ctx;
+       struct byte_range_lock **br_lock;
+};
 
-struct byte_range_lock *brl_get_locks_readonly(files_struct *fsp)
+static void brl_get_locks_readonly_parser(TDB_DATA key, TDB_DATA data,
+                                         void *private_data)
 {
+       struct brl_get_locks_readonly_state *state =
+               (struct brl_get_locks_readonly_state *)private_data;
        struct byte_range_lock *br_lock;
 
-       if (lp_clustering()) {
-               return brl_get_locks_internal(talloc_tos(), fsp, true);
+       br_lock = talloc_pooled_object(
+               state->mem_ctx, struct byte_range_lock, 1, data.dsize);
+       if (br_lock == NULL) {
+               *state->br_lock = NULL;
+               return;
        }
+       br_lock->lock_data = (struct lock_struct *)talloc_memdup(
+               br_lock, data.dptr, data.dsize);
+       br_lock->num_locks = data.dsize / sizeof(struct lock_struct);
+
+       *state->br_lock = br_lock;
+}
+
+struct byte_range_lock *brl_get_locks_readonly(files_struct *fsp)
+{
+       struct byte_range_lock *br_lock = NULL;
+       struct byte_range_lock *rw = NULL;
 
        if ((fsp->brlock_rec != NULL)
            && (dbwrap_get_seqnum(brlock_db) == fsp->brlock_seqnum)) {
+               /*
+                * We have cached the brlock_rec and the database did not
+                * change.
+                */
                return fsp->brlock_rec;
        }
 
-       TALLOC_FREE(fsp->brlock_rec);
+       if (!fsp->lockdb_clean) {
+               /*
+                * Fetch the record in R/W mode to give validate_lock_entries
+                * a chance to kick in once.
+                */
+               rw = brl_get_locks(talloc_tos(), fsp);
+               if (rw == NULL) {
+                       return NULL;
+               }
+               fsp->lockdb_clean = true;
+       }
 
-       br_lock = brl_get_locks_internal(talloc_tos(), fsp, true);
-       if (br_lock == NULL) {
-               return NULL;
+       if (rw != NULL) {
+               size_t lock_data_size;
+
+               /*
+                * Make a copy of the already retrieved and sanitized rw record
+                */
+               lock_data_size = rw->num_locks * sizeof(struct lock_struct);
+               br_lock = talloc_pooled_object(
+                       fsp, struct byte_range_lock, 1, lock_data_size);
+               if (br_lock == NULL) {
+                       goto fail;
+               }
+               br_lock->num_locks = rw->num_locks;
+               br_lock->lock_data = (struct lock_struct *)talloc_memdup(
+                       br_lock, rw->lock_data, lock_data_size);
+       } else {
+               struct brl_get_locks_readonly_state state;
+               NTSTATUS status;
+
+               /*
+                * Parse the record fresh from the database
+                */
+
+               state.mem_ctx = fsp;
+               state.br_lock = &br_lock;
+
+               status = dbwrap_parse_record(
+                       brlock_db,
+                       make_tdb_data((uint8_t *)&fsp->file_id,
+                                     sizeof(fsp->file_id)),
+                       brl_get_locks_readonly_parser, &state);
+               if (!NT_STATUS_IS_OK(status)) {
+                       DEBUG(3, ("Could not parse byte range lock record: "
+                                 "%s\n", nt_errstr(status)));
+                       goto fail;
+               }
+               if (br_lock == NULL) {
+                       goto fail;
+               }
        }
-       fsp->brlock_seqnum = dbwrap_get_seqnum(brlock_db);
 
-       fsp->brlock_rec = talloc_move(fsp, &br_lock);
+       br_lock->fsp = fsp;
+       br_lock->modified = false;
+       br_lock->record = NULL;
+
+       if (lp_clustering()) {
+               /*
+                * In the cluster case we can't cache the brlock struct
+                * because dbwrap_get_seqnum does not work reliably over
+                * ctdb. Thus we have to throw away the brlock struct soon.
+                */
+               talloc_steal(talloc_tos(), br_lock);
+       } else {
+               /*
+                * Cache the brlock struct, invalidated when the dbwrap_seqnum
+                * changes. See beginning of this routine.
+                */
+               TALLOC_FREE(fsp->brlock_rec);
+               fsp->brlock_rec = br_lock;
+               fsp->brlock_seqnum = dbwrap_get_seqnum(brlock_db);
+       }
 
-       return fsp->brlock_rec;
+fail:
+       TALLOC_FREE(rw);
+       return br_lock;
 }
 
 struct brl_revalidate_state {


-- 
Samba Shared Repository

Reply via email to