The branch, master has been updated
       via  4fa5125 tdb: enable VALGRIND to remove valgrind noise.
      from  4afe426 s4-ipv6: fill in pdc_ip in DsRGetDCNameEx2

http://gitweb.samba.org/?p=samba.git;a=shortlog;h=master


- Log -----------------------------------------------------------------
commit 4fa51257b283c2e8bb415cc7f9c001d64c8a2669
Author: Rusty Russell <ru...@rustcorp.com.au>
Date:   Wed Jun 8 17:20:48 2011 +0930

    tdb: enable VALGRIND to remove valgrind noise.
    
    Andrew Bartlett complained that valgrind needs --partial-loads-ok=yes 
otherwise
    the Jenkins hash makes it complain.
    
    My benchmarking here revealed that at least with modern gcc (4.5) and CPU
    (Intel i5 32 bit) there's no measurable performance penalty for the
    "correct" code, so rip out the optimized one.
    
    Signed-off-by: Rusty Russell <ru...@rustcorp.com.au>
    
    Autobuild-User: Rusty Russell <ru...@rustcorp.com.au>
    Autobuild-Date: Wed Jun  8 11:05:47 CEST 2011 on sn-devel-104

-----------------------------------------------------------------------

Summary of changes:
 lib/tdb/common/hash.c |   35 -----------------------------------
 1 files changed, 0 insertions(+), 35 deletions(-)


Changeset truncated at 500 lines:

diff --git a/lib/tdb/common/hash.c b/lib/tdb/common/hash.c
index 2472ed1..1eed722 100644
--- a/lib/tdb/common/hash.c
+++ b/lib/tdb/common/hash.c
@@ -214,9 +214,7 @@ static uint32_t hashlittle( const void *key, size_t length )
   u.ptr = key;
   if (HASH_LITTLE_ENDIAN && ((u.i & 0x3) == 0)) {
     const uint32_t *k = (const uint32_t *)key;         /* read 32-bit chunks */
-#ifdef VALGRIND
     const uint8_t  *k8;
-#endif
 
     /*------ all but last block: aligned reads and affect 32 bits of (a,b,c) */
     while (length > 12)
@@ -230,36 +228,6 @@ static uint32_t hashlittle( const void *key, size_t length 
)
     }
 
     /*----------------------------- handle the last (probably partial) block */
-    /*
-     * "k[2]&0xffffff" actually reads beyond the end of the string, but
-     * then masks off the part it's not allowed to read.  Because the
-     * string is aligned, the masked-off tail is in the same word as the
-     * rest of the string.  Every machine with memory protection I've seen
-     * does it on word boundaries, so is OK with this.  But VALGRIND will
-     * still catch it and complain.  The masking trick does make the hash
-     * noticably faster for short strings (like English words).
-     */
-#ifndef VALGRIND
-
-    switch(length)
-    {
-    case 12: c+=k[2]; b+=k[1]; a+=k[0]; break;
-    case 11: c+=k[2]&0xffffff; b+=k[1]; a+=k[0]; break;
-    case 10: c+=k[2]&0xffff; b+=k[1]; a+=k[0]; break;
-    case 9 : c+=k[2]&0xff; b+=k[1]; a+=k[0]; break;
-    case 8 : b+=k[1]; a+=k[0]; break;
-    case 7 : b+=k[1]&0xffffff; a+=k[0]; break;
-    case 6 : b+=k[1]&0xffff; a+=k[0]; break;
-    case 5 : b+=k[1]&0xff; a+=k[0]; break;
-    case 4 : a+=k[0]; break;
-    case 3 : a+=k[0]&0xffffff; break;
-    case 2 : a+=k[0]&0xffff; break;
-    case 1 : a+=k[0]&0xff; break;
-    case 0 : return c;              /* zero length strings require no mixing */
-    }
-
-#else /* make valgrind happy */
-
     k8 = (const uint8_t *)k;
     switch(length)
     {
@@ -277,9 +245,6 @@ static uint32_t hashlittle( const void *key, size_t length )
     case 1 : a+=k8[0]; break;
     case 0 : return c;
     }
-
-#endif /* !valgrind */
-
   } else if (HASH_LITTLE_ENDIAN && ((u.i & 0x1) == 0)) {
     const uint16_t *k = (const uint16_t *)key;         /* read 16-bit chunks */
     const uint8_t  *k8;


-- 
Samba Shared Repository

Reply via email to