On Tue, Oct 17, 2006 at 11:12:56PM -0400, Adam Kropelin wrote:
...
> Write the code in userspace to prove it to yourself if you don't believe 
> me. In fact, here. I did it for you. (x86 32 bit assumed; adjust to 
> taste)

Thanks for the test code.
I'll resubmit with a patch that works for 32-bit extracts/implements
and integrate the more useful comments from this thread.

With just 1ULL and the original test, I get:
[EMAIL PROTECTED]:~$ ./test_extract 
broken=0x61626364, working=0x00000000

The test code didn't attempt to correct for endianess...so the bytes
are backwards on gsyprf11. But 32-bit extract fails for offsets
1-7 like I expected.

I've rewritten the test to cover a few more cases.
And I've fixed the algorithm to handle 32-bit extracts in the test
below and will submit the same code in the next patch.

thanks,
grant

----------------------------------------------------------------------
/* test_extract.c */
#include <stdio.h>

typedef unsigned long u32;
typedef unsigned long long u64;
typedef unsigned char u8;
typedef unsigned long long __le64;
#define get_unaligned(x) (*(x)) /* kernel handles this though it might complain 
*/

#ifdef __hppa__
/* copied from include/asm-parisc/byteorder.h */

#define __u32 u32
#define __u64 u64
static __u32 ___arch__swab32(__u32 x)
{
        unsigned int temp;
        __asm__("shd %0, %0, 16, %1\n\t"        /* shift abcdabcd -> cdab */
                "dep %1, 15, 8, %1\n\t"         /* deposit cdab -> cbab */
                "shd %0, %1, 8, %0"             /* shift abcdcbab -> dcba */
                : "=r" (x), "=&r" (temp)
                : "0" (x));
        return x;
}

static __inline__ __u64 le64_to_cpu(__u64 x)
{
        __u32 t1 = ___arch__swab32((__u32) x);
        __u32 t2 = ___arch__swab32((__u32) (x >> 32));
        return (((__u64) t1 << 32) | t2);
}
#else
#define le64_to_cpu(x) (x)
#endif

static u32 working(u8 *report, unsigned offset, unsigned n)
{
        report += (offset >> 5) << 2; offset &= 31;
        return (le64_to_cpu(get_unaligned((__le64*)report)) >> offset) & 
((1ULL << n) - 1);
}

static u32 broken(u8 *report, unsigned offset, unsigned n)
{
        u64 x;

        report += offset >> 3;  /* adjust byte index */
        offset &= 8 - 1;
        x = get_unaligned((u64 *) report);
        x = le64_to_cpu(x);
        x = (x >> offset) & ((1ULL << n) - 1);
        return x;
}

int main(int argc, char* argv)
{
        unsigned int i;
        u8 t[10] = { 0x10, 0x32, 0x54, 0x76, 0x98, 0xba, 0xdc, 0xfe, 0x10, 0x32 
};

        for (i = 0; i <= 40; i++)
                printf("%2d : new 0x%08x  orig 0x%08x\n", i,
                                broken(t, i, 32),
                                working(t, i, 32) );

        return 0;
}

-------------------------------------------------------------------------
Using Tomcat but need to do more? Need to support web services, security?
Get stuff done quickly with pre-integrated technology to make your job easier
Download IBM WebSphere Application Server v.1.0.1 based on Apache Geronimo
http://sel.as-us.falkag.net/sel?cmd=lnk&kid=120709&bid=263057&dat=121642
_______________________________________________
linux-usb-devel@lists.sourceforge.net
To unsubscribe, use the last form field at:
https://lists.sourceforge.net/lists/listinfo/linux-usb-devel

Reply via email to