On 13/05/2025 6:05 pm, Sergii Dmytruk wrote:
> diff --git a/xen/include/xen/sha1.h b/xen/include/xen/sha1.h
> new file mode 100644
> index 0000000000..085f750a6a
> --- /dev/null
> +++ b/xen/include/xen/sha1.h
> @@ -0,0 +1,12 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +
> +#ifndef XEN__SHA1_H
> +#define XEN__SHA1_H
> +
> +#include <xen/inttypes.h>

Please crib from sha2.h as much as you can.  Use xen/types.h, drop the
double underscore in the guard, and provide a link to the spec.

I think it's https://csrc.nist.gov/pubs/fips/180-1/final

The rest of the header is fine; I don't think we need split-update()
support yet.

> +
> +#define SHA1_DIGEST_SIZE  20
> +
> +void sha1_hash(uint8_t digest[SHA1_DIGEST_SIZE], const void *msg, size_t 
> len);
> +
> +#endif /* XEN__SHA1_H */
> diff --git a/xen/lib/Makefile b/xen/lib/Makefile
> index 5ccb1e5241..fd4b9ece63 100644
> --- a/xen/lib/Makefile
> +++ b/xen/lib/Makefile
> @@ -17,6 +17,7 @@ lib-y += memset.o
>  lib-y += muldiv64.o
>  lib-y += parse-size.o
>  lib-y += rbtree.o
> +lib-$(CONFIG_X86) += sha1.o
>  lib-$(CONFIG_X86) += sha2-256.o
>  lib-y += sort.o
>  lib-y += strcasecmp.o
> diff --git a/xen/lib/sha1.c b/xen/lib/sha1.c
> new file mode 100644
> index 0000000000..c7a464e2cf
> --- /dev/null
> +++ b/xen/lib/sha1.c
> @@ -0,0 +1,218 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +/*
> + * SHA1 routine optimized to do word accesses rather than byte accesses,
> + * and to avoid unnecessary copies into the context array.
> + *
> + * This was based on the git SHA1 implementation.
> + */
> +
> +#include <xen/bitops.h>
> +#include <xen/sha1.h>
> +#include <xen/string.h>
> +#include <xen/types.h>
> +#include <xen/unaligned.h>
> +
> +/*
> + * If you have 32 registers or more, the compiler can (and should)
> + * try to change the array[] accesses into registers. However, on
> + * machines with less than ~25 registers, that won't really work,
> + * and at least GCC will make an unholy mess of it.
> + *
> + * So to avoid that mess which just slows things down, we force
> + * the stores to memory to actually happen (we might be better off
> + * with a 'W(t)=(val);asm("":"+m" (W(t))' there instead, as
> + * suggested by Artur Skawina - that will also make GCC unable to
> + * try to do the silly "optimize away loads" part because it won't
> + * see what the value will be).
> + *
> + * Ben Herrenschmidt reports that on PPC, the C version comes close
> + * to the optimized asm with this (ie on PPC you don't want that
> + * 'volatile', since there are lots of registers).
> + *
> + * On ARM we get the best code generation by forcing a full memory barrier
> + * between each SHA round, otherwise GCC happily gets wild with spilling and
> + * the stack frame size simply explode and performance goes down the drain.
> + */
> +
> +#define SHA1_BLOCK_SIZE         64
> +#define SHA1_WORKSPACE_WORDS    16
> +#define SHA1_WORKSPACE_MASK     (SHA1_WORKSPACE_WORDS - 1)
> +
> +struct sha1_state {
> +    uint32_t state[SHA1_DIGEST_SIZE / 4];
> +    uint64_t count;
> +    uint8_t buffer[SHA1_BLOCK_SIZE];
> +};

As it's uint64_t, the count field needs to be first to avoid padding.

> +
> +/* This "rolls" over the 512-bit array */
> +static void set_w(uint32_t w[SHA1_WORKSPACE_WORDS], size_t i, uint32_t val)
> +{
> +#ifdef CONFIG_X86
> +    *(volatile uint32_t *)&w[i & SHA1_WORKSPACE_MASK] = val;
> +#else
> +    w[i & SHA1_WORKSPACE_MASK] = val;
> +# ifdef CONFIG_ARM
> +    barrier();
> +# endif
> +#endif

This is horrible.  I think the problems discussed are created by having
the loops in sha1_transform() broken in a wrong (read, unhelpful) way.  
The 5-way shuffle of the chaining variables probably is beyond the
compilers' ability to unroll given the multiples of 4 currently used.

See the implementation in SKL where I spent a while optimising the code
generation.  Admittedly that was optimising for size rather than speed,
but the end result look to be good for both.

> +}
> +
> +static uint32_t blend(const uint32_t w[SHA1_WORKSPACE_WORDS], size_t i)
> +{
> +/* This "rolls" over the 512-bit array */
> +#define w(i) w[(i) & SHA1_WORKSPACE_MASK]
> +
> +    return rol32(w(i + 13) ^ w(i + 8) ^ w(i + 2) ^ w(i), 1);
> +
> +#undef w
> +}
> +
> +/**
> + * sha1_transform - single block SHA1 transform
> + *
> + * @digest: 160 bit digest to update
> + * @data:   512 bits of data to hash
> + *
> + * This function executes SHA-1's internal compression function.  It updates 
> the
> + * 160-bit internal state (@digest) with a single 512-bit data block (@data).
> + */
> +static void sha1_transform(uint32_t *digest, const uint8_t *data)
> +{
> +    uint32_t a, b, c, d, e, t;
> +    uint32_t workspace[SHA1_WORKSPACE_WORDS];
> +    unsigned int i = 0;
> +
> +    a = digest[0];
> +    b = digest[1];
> +    c = digest[2];
> +    d = digest[3];
> +    e = digest[4];
> +
> +    /* Round 1 - iterations 0-16 take their input from 'data' */
> +    for ( ; i < 16; ++i ) {

Xen style has this { on the next line.

> +        t = get_unaligned_be32((uint32_t *)data + i);
> +        set_w(workspace, i, t);
> +        e += t + rol32(a, 5) + (((c ^ d) & b) ^ d) + 0x5a827999U;
> +        b = ror32(b, 2);
> +        t = e; e = d; d = c; c = b; b = a; a = t;
> +    }
> +
> +    /* Round 1 - tail. Input from 512-bit mixing array */
> +    for ( ; i < 20; ++i ) {
> +        t = blend(workspace, i);
> +        set_w(workspace, i, t);
> +        e += t + rol32(a, 5) + (((c ^ d) & b) ^ d) + 0x5a827999U;
> +        b = ror32(b, 2);
> +        t = e; e = d; d = c; c = b; b = a; a = t;
> +    }
> +
> +    /* Round 2 */
> +    for ( ; i < 40; ++i ) {
> +        t = blend(workspace, i);
> +        set_w(workspace, i, t);
> +        e += t + rol32(a, 5) + (b ^ c ^ d) + 0x6ed9eba1U;
> +        b = ror32(b, 2);
> +        t = e; e = d; d = c; c = b; b = a; a = t;
> +    }
> +
> +    /* Round 3 */
> +    for ( ; i < 60; ++i ) {
> +        t = blend(workspace, i);
> +        set_w(workspace, i, t);
> +        e += t + rol32(a, 5) + ((b & c) + (d & (b ^ c))) + 0x8f1bbcdcU;
> +        b = ror32(b, 2);
> +        t = e; e = d; d = c; c = b; b = a; a = t;
> +    }
> +
> +    /* Round 4 */
> +    for ( ; i < 80; ++i ) {
> +        t = blend(workspace, i);
> +        set_w(workspace, i, t);
> +        e += t + rol32(a, 5) + (b ^ c ^ d) + 0xca62c1d6U;
> +        b = ror32(b, 2);
> +        t = e; e = d; d = c; c = b; b = a; a = t;
> +    }
> +
> +    digest[0] += a;
> +    digest[1] += b;
> +    digest[2] += c;
> +    digest[3] += d;
> +    digest[4] += e;
> +}
> +
> +static void sha1_init(struct sha1_state *sctx)
> +{
> +    sctx->state[0] = 0x67452301UL;
> +    sctx->state[1] = 0xefcdab89UL;
> +    sctx->state[2] = 0x98badcfeUL;
> +    sctx->state[3] = 0x10325476UL;
> +    sctx->state[4] = 0xc3d2e1f0UL;
> +    sctx->count = 0;
> +}
> +
> +static void sha1_update(struct sha1_state *sctx, const uint8_t *msg, size_t 
> len)
> +{
> +    unsigned int partial = sctx->count % SHA1_BLOCK_SIZE;
> +
> +    sctx->count += len;
> +
> +    if ( (partial + len) >= SHA1_BLOCK_SIZE )
> +    {
> +        if ( partial )
> +        {
> +            int rem = SHA1_BLOCK_SIZE - partial;

Unsigned int please.

> +
> +            memcpy(sctx->buffer + partial, msg, rem);
> +            msg += rem;
> +            len -= rem;
> +
> +            sha1_transform(sctx->state, sctx->buffer);
> +        }
> +
> +        for ( ; len >= SHA1_BLOCK_SIZE; len -= SHA1_BLOCK_SIZE )
> +        {
> +            sha1_transform(sctx->state, msg);
> +            msg += SHA1_BLOCK_SIZE;
> +        }
> +        partial = 0;
> +    }
> +
> +    /* Remaining data becomes partial. */
> +    memcpy(sctx->buffer + partial, msg, len);
> +}
> +
> +static void sha1_final(struct sha1_state *sctx, void *out)

Please make this uint8_t digest[SHA1_DIGEST_SIZE] straight away.  This
was an oversight of mine in sha2-256.c which was fixed when exposing the
function (c/s aea52ce607fe).

~Andrew

Reply via email to