ChangeSet 1.2181.1.13, 2005/03/30 13:49:13-08:00, [EMAIL PROTECTED]

        [SPARC64]: Simplified csum_partial() implementation.
        
        There is no need to make this thing use VIS et al.
        A simple straightforward prefetching integer version
        is fine.  Actually, most of the time this routine is
        run to compute checksums of small header bits or
        similar.
        
        Signed-off-by: David S. Miller <[EMAIL PROTECTED]>



 Makefile   |    2 
 checksum.S |  157 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 158 insertions(+), 1 deletion(-)


diff -Nru a/arch/sparc64/lib/Makefile b/arch/sparc64/lib/Makefile
--- a/arch/sparc64/lib/Makefile 2005-04-03 21:19:20 -07:00
+++ b/arch/sparc64/lib/Makefile 2005-04-03 21:19:20 -07:00
@@ -7,7 +7,7 @@
 
 lib-y := PeeCeeI.o copy_page.o clear_page.o strlen.o strncmp.o \
         memscan.o strncpy_from_user.o strlen_user.o memcmp.o checksum.o \
-        bzero.o VIScsum.o VIScsumcopy.o \
+        bzero.o VIScsumcopy.o \
         VIScsumcopyusr.o VISsave.o atomic.o bitops.o \
         U1memcpy.o U1copy_from_user.o U1copy_to_user.o \
         U3memcpy.o U3copy_from_user.o U3copy_to_user.o U3patch.o \
diff -Nru a/arch/sparc64/lib/checksum.S b/arch/sparc64/lib/checksum.S
--- a/arch/sparc64/lib/checksum.S       2005-04-03 21:19:20 -07:00
+++ b/arch/sparc64/lib/checksum.S       2005-04-03 21:19:20 -07:00
@@ -506,6 +506,163 @@
        retl
         ldx    [%g6 + TI_TASK], %g4
 
+csum_partial_fix_alignment:
+       /* We checked for zero length already, so there must be
+        * at least one byte.
+        */
+       be,pt           %icc, 1f
+        nop
+       ldub            [%o0 + 0x00], %o4
+       add             %o0, 1, %o0
+       sub             %o1, 1, %o1
+1:     andcc           %o0, 0x2, %g0
+       be,pn           %icc, csum_partial_post_align
+        cmp            %o1, 2
+       blu,pn          %icc, csum_partial_end_cruft
+        nop
+       lduh            [%o0 + 0x00], %o5
+       add             %o0, 2, %o0
+       sub             %o1, 2, %o1
+       ba,pt           %xcc, csum_partial_post_align
+        add            %o5, %o4, %o4
+
+       .align          32
+       .globl          csum_partial
+csum_partial:          /* %o0=buff, %o1=len, %o2=sum */
+       prefetch        [%o0 + 0x000], #one_read
+       clr             %o4
+       prefetch        [%o0 + 0x040], #one_read
+       brz,pn          %o1, csum_partial_finish
+        andcc          %o0, 0x3, %g0
+
+       /* We "remember" whether the lowest bit in the address
+        * was set in %g7.  Because if it is, we have to swap
+        * upper and lower 8 bit fields of the sum we calculate.
+       */
+       bne,pn          %icc, csum_partial_fix_alignment
+        andcc          %o0, 0x1, %g7
+
+csum_partial_post_align:
+       prefetch        [%o0 + 0x080], #one_read
+       andncc          %o1, 0x3f, %o3
+
+       prefetch        [%o0 + 0x0c0], #one_read
+       sub             %o1, %o3, %o1
+       brz,pn          %o3, 2f
+        prefetch       [%o0 + 0x100], #one_read
+
+       /* So that we don't need to use the non-pairing
+        * add-with-carry instructions we accumulate 32-bit
+        * values into a 64-bit register.  At the end of the
+        * loop we fold it down to 32-bits and so on.
+        */
+       prefetch        [%o0 + 0x140], #one_read
+1:     lduw            [%o0 + 0x00], %o5
+       lduw            [%o0 + 0x04], %g1
+       lduw            [%o0 + 0x08], %g2
+       add             %o4, %o5, %o4
+       lduw            [%o0 + 0x0c], %g3
+       add             %o4, %g1, %o4
+       lduw            [%o0 + 0x10], %o5
+       add             %o4, %g2, %o4
+       lduw            [%o0 + 0x14], %g1
+       add             %o4, %g3, %o4
+       lduw            [%o0 + 0x18], %g2
+       add             %o4, %o5, %o4
+       lduw            [%o0 + 0x1c], %g3
+       add             %o4, %g1, %o4
+       lduw            [%o0 + 0x20], %o5
+       add             %o4, %g2, %o4
+       lduw            [%o0 + 0x24], %g1
+       add             %o4, %g3, %o4
+       lduw            [%o0 + 0x28], %g2
+       add             %o4, %o5, %o4
+       lduw            [%o0 + 0x2c], %g3
+       add             %o4, %g1, %o4
+       lduw            [%o0 + 0x30], %o5
+       add             %o4, %g2, %o4
+       lduw            [%o0 + 0x34], %g1
+       add             %o4, %g3, %o4
+       lduw            [%o0 + 0x38], %g2
+       add             %o4, %o5, %o4
+       lduw            [%o0 + 0x3c], %g3
+       add             %o4, %g1, %o4
+       prefetch        [%o0 + 0x180], #one_read
+       add             %o4, %g2, %o4
+       subcc           %o3, 0x40, %o3
+       add             %o0, 0x40, %o0
+       bne,pt          %icc, 1b
+        add            %o4, %g3, %o4
+
+2:     and             %o1, 0x3c, %o3
+       brz,pn          %o3, 2f
+        sub            %o1, %o3, %o1
+1:     lduw            [%o0 + 0x00], %o5
+       subcc           %o3, 0x4, %o3
+       add             %o0, 0x4, %o0
+       bne,pt          %icc, 1b
+        add            %o4, %o5, %o4
+
+2:
+       /* fold 64-->32 */
+       srlx            %o4, 32, %o5
+       srl             %o4, 0, %o4
+       add             %o4, %o5, %o4
+       srlx            %o4, 32, %o5
+       srl             %o4, 0, %o4
+       add             %o4, %o5, %o4
+
+       /* fold 32-->16 */
+       sethi           %hi(0xffff0000), %g1
+       srl             %o4, 16, %o5
+       andn            %o4, %g1, %g2
+       add             %o5, %g2, %o4
+       srl             %o4, 16, %o5
+       andn            %o4, %g1, %g2
+       add             %o5, %g2, %o4
+
+csum_partial_end_cruft:
+       /* %o4 has the 16-bit sum we have calculated so-far.  */
+       cmp             %o1, 2
+       blu,pt          %icc, 1f
+        nop
+       lduh            [%o0 + 0x00], %o5
+       sub             %o1, 2, %o1
+       add             %o0, 2, %o0
+       add             %o4, %o5, %o4
+1:     brz,pt          %o1, 1f
+        nop
+       ldub            [%o0 + 0x00], %o5
+       sub             %o1, 1, %o1
+       add             %o0, 1, %o0
+       sllx            %o5, 8, %o5
+       add             %o4, %o5, %o4
+1:
+       /* fold 32-->16 */
+       sethi           %hi(0xffff0000), %g1
+       srl             %o4, 16, %o5
+       andn            %o4, %g1, %g2
+       add             %o5, %g2, %o4
+       srl             %o4, 16, %o5
+       andn            %o4, %g1, %g2
+       add             %o5, %g2, %o4
+
+1:     brz,pt          %g7, 1f
+        nop
+
+       /* We started with an odd byte, byte-swap the result.  */
+       srl             %o4, 8, %o5
+       and             %o4, 0xff, %g1
+       sll             %g1, 8, %g1
+       or              %o5, %g1, %o4
+
+1:     add             %o2, %o4, %o2
+
+csum_partial_finish:
+       retl
+        mov            %o2, %o0
+
+
        .section __ex_table
        .align  4
        .word   cpc_start, 0, cpc_end, cpc_handler
-
To unsubscribe from this list: send the line "unsubscribe bk-commits-head" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to