The branch, v3-6-test has been updated
       via  0418a43 s3:locking: simplify the non cluster case of 
brl_get_locks_readonly()
       via  419730e s3:locking: brl_get_locks_internal() should not expose a 
write lock if the caller wants read_only
      from  bb5bd2b s4-smbtorture: add netsessiongetinfo() test.

http://gitweb.samba.org/?p=samba.git;a=shortlog;h=v3-6-test


- Log -----------------------------------------------------------------
commit 0418a43aecb654588429ca76c25cf453b8607bc5
Author: Stefan Metzmacher <[email protected]>
Date:   Thu Mar 17 10:08:56 2011 +0100

    s3:locking: simplify the non cluster case of brl_get_locks_readonly()
    
    Pair-Programmed-With: Michael Adam <[email protected]>
    
    metze
    
    Autobuild-User: Stefan Metzmacher <[email protected]>
    Autobuild-Date: Fri Mar 18 13:00:51 CET 2011 on sn-devel-104
    (cherry picked from commit 2f048e458bbd47307949293cf8436c2d8f879439)

commit 419730ed864508708a0dd6eab484b24c1b7c0234
Author: Stefan Metzmacher <[email protected]>
Date:   Thu Mar 17 10:04:12 2011 +0100

    s3:locking: brl_get_locks_internal() should not expose a write lock if the 
caller wants read_only
    
    This triggered deadlocks in the cluster case of brl_get_locks_readonly().
    
    Pair-Programmed-With: Michael Adam <[email protected]>
    
    metze
    (cherry picked from commit e8411d6f9f1827ea88754fa97a2671cdf27a5554)

-----------------------------------------------------------------------

Summary of changes:
 source3/locking/brlock.c |   54 ++++++++++++++++++++++-----------------------
 1 files changed, 26 insertions(+), 28 deletions(-)


Changeset truncated at 500 lines:

diff --git a/source3/locking/brlock.c b/source3/locking/brlock.c
index 3cf72cc..95194da 100644
--- a/source3/locking/brlock.c
+++ b/source3/locking/brlock.c
@@ -1763,7 +1763,7 @@ int brl_forall(void (*fn)(struct file_id id, struct 
server_id pid,
  Unlock the record.
 ********************************************************************/
 
-static int byte_range_lock_destructor(struct byte_range_lock *br_lck)
+static void byte_range_lock_flush(struct byte_range_lock *br_lck)
 {
        if (br_lck->read_only) {
                SMB_ASSERT(!br_lck->modified);
@@ -1798,8 +1798,16 @@ static int byte_range_lock_destructor(struct 
byte_range_lock *br_lck)
 
  done:
 
-       SAFE_FREE(br_lck->lock_data);
+       br_lck->read_only = true;
+       br_lck->modified = false;
+
        TALLOC_FREE(br_lck->record);
+}
+
+static int byte_range_lock_destructor(struct byte_range_lock *br_lck)
+{
+       byte_range_lock_flush(br_lck);
+       SAFE_FREE(br_lck->lock_data);
        return 0;
 }
 
@@ -1814,6 +1822,7 @@ static struct byte_range_lock 
*brl_get_locks_internal(TALLOC_CTX *mem_ctx,
 {
        TDB_DATA key, data;
        struct byte_range_lock *br_lck = TALLOC_P(mem_ctx, struct 
byte_range_lock);
+       bool do_read_only = read_only;
 
        if (br_lck == NULL) {
                return NULL;
@@ -1830,18 +1839,17 @@ static struct byte_range_lock 
*brl_get_locks_internal(TALLOC_CTX *mem_ctx,
        if (!fsp->lockdb_clean) {
                /* We must be read/write to clean
                   the dead entries. */
-               read_only = False;
+               do_read_only = false;
        }
 
-       if (read_only) {
+       if (do_read_only) {
                if (brlock_db->fetch(brlock_db, br_lck, key, &data) == -1) {
                        DEBUG(3, ("Could not fetch byte range lock record\n"));
                        TALLOC_FREE(br_lck);
                        return NULL;
                }
                br_lck->record = NULL;
-       }
-       else {
+       } else {
                br_lck->record = brlock_db->fetch_locked(brlock_db, br_lck, 
key);
 
                if (br_lck->record == NULL) {
@@ -1853,7 +1861,7 @@ static struct byte_range_lock 
*brl_get_locks_internal(TALLOC_CTX *mem_ctx,
                data = br_lck->record->value;
        }
 
-       br_lck->read_only = read_only;
+       br_lck->read_only = do_read_only;
        br_lck->lock_data = NULL;
 
        talloc_set_destructor(br_lck, byte_range_lock_destructor);
@@ -1905,6 +1913,15 @@ static struct byte_range_lock 
*brl_get_locks_internal(TALLOC_CTX *mem_ctx,
                        print_lock_struct(i, &locks[i]);
                }
        }
+
+       if (do_read_only != read_only) {
+               /*
+                * this stores the record and gets rid of
+                * the write lock that is needed for a cleanup
+                */
+               byte_range_lock_flush(br_lck);
+       }
+
        return br_lck;
 }
 
@@ -1929,34 +1946,15 @@ struct byte_range_lock 
*brl_get_locks_readonly(files_struct *fsp)
 
        TALLOC_FREE(fsp->brlock_rec);
 
-       br_lock = brl_get_locks_internal(talloc_tos(), fsp, false);
+       br_lock = brl_get_locks_internal(talloc_tos(), fsp, true);
        if (br_lock == NULL) {
                return NULL;
        }
        fsp->brlock_seqnum = brlock_db->get_seqnum(brlock_db);
 
-       fsp->brlock_rec = talloc_zero(fsp, struct byte_range_lock);
-       if (fsp->brlock_rec == NULL) {
-               goto fail;
-       }
-       fsp->brlock_rec->fsp = fsp;
-       fsp->brlock_rec->num_locks = br_lock->num_locks;
-       fsp->brlock_rec->read_only = true;
-       fsp->brlock_rec->key = br_lock->key;
-
-       fsp->brlock_rec->lock_data = (struct lock_struct *)
-               talloc_memdup(fsp->brlock_rec, br_lock->lock_data,
-                             sizeof(struct lock_struct) * br_lock->num_locks);
-       if (fsp->brlock_rec->lock_data == NULL) {
-               goto fail;
-       }
+       fsp->brlock_rec = talloc_move(fsp, &br_lock);
 
-       TALLOC_FREE(br_lock);
        return fsp->brlock_rec;
-fail:
-       TALLOC_FREE(br_lock);
-       TALLOC_FREE(fsp->brlock_rec);
-       return NULL;
 }
 
 struct brl_revalidate_state {


-- 
Samba Shared Repository

Reply via email to