Add a primitive for SubBytes + ShiftRows + MixColumns + AddRoundKey. Signed-off-by: Richard Henderson <richard.hender...@linaro.org> --- host/include/generic/host/crypto/aes-round.h | 3 + include/crypto/aes-round.h | 21 +++++++ crypto/aes.c | 58 ++++++++++++++++++++ 3 files changed, 82 insertions(+)
diff --git a/host/include/generic/host/crypto/aes-round.h b/host/include/generic/host/crypto/aes-round.h index 335ec3f11e..9886e81e50 100644 --- a/host/include/generic/host/crypto/aes-round.h +++ b/host/include/generic/host/crypto/aes-round.h @@ -14,6 +14,9 @@ void aesenc_MC_accel(AESState *, const AESState *, bool) void aesenc_SB_SR_AK_accel(AESState *, const AESState *, const AESState *, bool) QEMU_ERROR("unsupported accel"); +void aesenc_SB_SR_MC_AK_accel(AESState *, const AESState *, + const AESState *, bool) + QEMU_ERROR("unsupported accel"); void aesdec_IMC_accel(AESState *, const AESState *, bool) QEMU_ERROR("unsupported accel"); diff --git a/include/crypto/aes-round.h b/include/crypto/aes-round.h index e1a9c24cca..6c744b299d 100644 --- a/include/crypto/aes-round.h +++ b/include/crypto/aes-round.h @@ -59,6 +59,27 @@ static inline void aesenc_SB_SR_AK(AESState *r, const AESState *st, } } +/* + * Perform SubBytes + ShiftRows + MixColumns + AddRoundKey. + */ + +void aesenc_SB_SR_MC_AK_gen(AESState *ret, const AESState *st, + const AESState *rk); +void aesenc_SB_SR_MC_AK_genrev(AESState *ret, const AESState *st, + const AESState *rk); + +static inline void aesenc_SB_SR_MC_AK(AESState *r, const AESState *st, + const AESState *rk, bool be) +{ + if (HAVE_AES_ACCEL) { + aesenc_SB_SR_MC_AK_accel(r, st, rk, be); + } else if (HOST_BIG_ENDIAN == be) { + aesenc_SB_SR_MC_AK_gen(r, st, rk); + } else { + aesenc_SB_SR_MC_AK_genrev(r, st, rk); + } +} + /* * Perform InvMixColumns. */ diff --git a/crypto/aes.c b/crypto/aes.c index 31028609d6..c0350c0b76 100644 --- a/crypto/aes.c +++ b/crypto/aes.c @@ -1356,6 +1356,64 @@ void aesenc_SB_SR_AK_genrev(AESState *r, const AESState *s, const AESState *k) aesenc_SB_SR_AK_swap(r, s, k, true); } +/* + * Perform SubBytes + ShiftRows + MixColumns + AddRoundKey. + */ +static inline void +aesenc_SB_SR_MC_AK_swap(AESState *r, const AESState *st, + const AESState *rk, bool swap) +{ + int swap_b = swap * 0xf; + int swap_w = swap * 0x3; + bool be = HOST_BIG_ENDIAN ^ swap; + uint32_t w0, w1, w2, w3; + + w0 = (AES_Te0[st->b[swap_b ^ AES_SH_0]] ^ + AES_Te1[st->b[swap_b ^ AES_SH_1]] ^ + AES_Te2[st->b[swap_b ^ AES_SH_2]] ^ + AES_Te3[st->b[swap_b ^ AES_SH_3]]); + + w1 = (AES_Te0[st->b[swap_b ^ AES_SH_4]] ^ + AES_Te1[st->b[swap_b ^ AES_SH_5]] ^ + AES_Te2[st->b[swap_b ^ AES_SH_6]] ^ + AES_Te3[st->b[swap_b ^ AES_SH_7]]); + + w2 = (AES_Te0[st->b[swap_b ^ AES_SH_8]] ^ + AES_Te1[st->b[swap_b ^ AES_SH_9]] ^ + AES_Te2[st->b[swap_b ^ AES_SH_A]] ^ + AES_Te3[st->b[swap_b ^ AES_SH_B]]); + + w3 = (AES_Te0[st->b[swap_b ^ AES_SH_C]] ^ + AES_Te1[st->b[swap_b ^ AES_SH_D]] ^ + AES_Te2[st->b[swap_b ^ AES_SH_E]] ^ + AES_Te3[st->b[swap_b ^ AES_SH_F]]); + + /* Note that AES_TeX is encoded for big-endian. */ + if (!be) { + w0 = bswap32(w0); + w1 = bswap32(w1); + w2 = bswap32(w2); + w3 = bswap32(w3); + } + + r->w[swap_w ^ 0] = rk->w[swap_w ^ 0] ^ w0; + r->w[swap_w ^ 1] = rk->w[swap_w ^ 1] ^ w1; + r->w[swap_w ^ 2] = rk->w[swap_w ^ 2] ^ w2; + r->w[swap_w ^ 3] = rk->w[swap_w ^ 3] ^ w3; +} + +void aesenc_SB_SR_MC_AK_gen(AESState *r, const AESState *st, + const AESState *rk) +{ + aesenc_SB_SR_MC_AK_swap(r, st, rk, false); +} + +void aesenc_SB_SR_MC_AK_genrev(AESState *r, const AESState *st, + const AESState *rk) +{ + aesenc_SB_SR_MC_AK_swap(r, st, rk, true); +} + /* * Perform InvMixColumns. */ -- 2.34.1