Module Name:    src
Committed By:   riastradh
Date:           Mon Jun 29 23:27:52 UTC 2020

Modified Files:
        src/sys/conf: files
        src/sys/rump/kern/lib/libcrypto: Makefile
Added Files:
        src/sys/crypto/aes: aes.h aes_bear.c aes_bear.h aes_ct.c aes_ct_dec.c
            aes_ct_enc.c aes_impl.c aes_rijndael.c aes_selftest.c files.aes
Removed Files:
        src/sys/crypto/rijndael: files.rijndael rijndael-alg-fst.c
            rijndael-api-fst.c rijndael.c rijndael_local.h

Log Message:
Rework AES in kernel to finally address CVE-2005-1797.

1. Rip out old variable-time reference implementation.
2. Replace it by BearSSL's constant-time 32-bit logic.
   => Obtained from commit dda1f8a0c46e15b4a235163470ff700b2f13dcc5.
   => We could conditionally adopt the 64-bit logic too, which would
      likely give a modest performance boost on 64-bit platforms
      without AES-NI, but that's a bit more trouble.
3. Select the AES implementation at boot-time; allow an MD override.
   => Use self-tests to verify basic correctness at boot.
   => The implementation selection policy is rather rudimentary at
      the moment but it is isolated to one place so it's easy to
      change later on.

This (a) plugs a host of timing attacks on, e.g., cgd, and (b) paves
the way to take advantage of CPU support for AES -- both things we
should've done a decade ago.  Downside: Computing AES takes 2-3x the
CPU time.  But that's what hardware support will be coming for.

Rudimentary measurement of performance impact done by:

mount -t tmpfs tmpfs /tmp
dd if=/dev/zero of=/tmp/disk bs=1m count=512
vnconfig -cv vnd0 /tmp/disk
cgdconfig -s cgd0 /dev/vnd0 aes-cbc 256 < /dev/zero
dd if=/dev/rcgd0d of=/dev/null bs=64k
dd if=/dev/zero of=/dev/rcgd0d bs=64k

The AES-CBC encryption performance impact is closer to 3x because it
is inherently sequential; the AES-CBC decryption impact is closer to
2x because the bitsliced AES logic can process two blocks at once.

Discussed on tech-kern:

https://mail-index.NetBSD.org/tech-kern/2020/06/18/msg026505.html


To generate a diff of this commit:
cvs rdiff -u -r1.1268 -r1.1269 src/sys/conf/files
cvs rdiff -u -r0 -r1.1 src/sys/crypto/aes/aes.h src/sys/crypto/aes/aes_bear.c \
    src/sys/crypto/aes/aes_bear.h src/sys/crypto/aes/aes_ct.c \
    src/sys/crypto/aes/aes_ct_dec.c src/sys/crypto/aes/aes_ct_enc.c \
    src/sys/crypto/aes/aes_impl.c src/sys/crypto/aes/aes_rijndael.c \
    src/sys/crypto/aes/aes_selftest.c src/sys/crypto/aes/files.aes
cvs rdiff -u -r1.7 -r0 src/sys/crypto/rijndael/files.rijndael \
    src/sys/crypto/rijndael/rijndael-alg-fst.c
cvs rdiff -u -r1.25 -r0 src/sys/crypto/rijndael/rijndael-api-fst.c
cvs rdiff -u -r1.8 -r0 src/sys/crypto/rijndael/rijndael.c
cvs rdiff -u -r1.6 -r0 src/sys/crypto/rijndael/rijndael_local.h
cvs rdiff -u -r1.6 -r1.7 src/sys/rump/kern/lib/libcrypto/Makefile

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/conf/files
diff -u src/sys/conf/files:1.1268 src/sys/conf/files:1.1269
--- src/sys/conf/files:1.1268	Sun Jun  7 09:45:19 2020
+++ src/sys/conf/files	Mon Jun 29 23:27:52 2020
@@ -1,4 +1,4 @@
-#	$NetBSD: files,v 1.1268 2020/06/07 09:45:19 maxv Exp $
+#	$NetBSD: files,v 1.1269 2020/06/29 23:27:52 riastradh Exp $
 #	@(#)files.newconf	7.5 (Berkeley) 5/10/93
 
 version 	20171118
@@ -200,10 +200,10 @@ defflag	opt_machdep.h		MACHDEP
 # use it.
 
 # Individual crypto transforms
+include "crypto/aes/files.aes"
 include "crypto/des/files.des"
 include "crypto/blowfish/files.blowfish"
 include "crypto/cast128/files.cast128"
-include "crypto/rijndael/files.rijndael"
 include "crypto/skipjack/files.skipjack"
 include "crypto/camellia/files.camellia"
 # General-purpose crypto processing framework.

Index: src/sys/rump/kern/lib/libcrypto/Makefile
diff -u src/sys/rump/kern/lib/libcrypto/Makefile:1.6 src/sys/rump/kern/lib/libcrypto/Makefile:1.7
--- src/sys/rump/kern/lib/libcrypto/Makefile:1.6	Thu Dec  5 03:57:55 2019
+++ src/sys/rump/kern/lib/libcrypto/Makefile	Mon Jun 29 23:27:52 2020
@@ -1,11 +1,11 @@
-#	$NetBSD: Makefile,v 1.6 2019/12/05 03:57:55 riastradh Exp $
+#	$NetBSD: Makefile,v 1.7 2020/06/29 23:27:52 riastradh Exp $
 #
 
-.PATH:	${.CURDIR}/../../../../crypto/blowfish				\
+.PATH:	${.CURDIR}/../../../../crypto/aes				\
+	${.CURDIR}/../../../../crypto/blowfish				\
 	${.CURDIR}/../../../../crypto/camellia				\
 	${.CURDIR}/../../../../crypto/cast128				\
 	${.CURDIR}/../../../../crypto/des				\
-	${.CURDIR}/../../../../crypto/rijndael				\
 	${.CURDIR}/../../../../crypto/skipjack
 
 LIB=	rumpkern_crypto
@@ -23,8 +23,14 @@ SRCS+=	cast128.c
 # DES
 SRCS+=	des_ecb.c des_setkey.c des_enc.c des_cbc.c des_module.c
 
-# rijndael
-SRCS+=	rijndael-alg-fst.c rijndael-api-fst.c rijndael.c
+# AES
+SRCS+=	aes_bear.c
+SRCS+=	aes_ct.c
+SRCS+=	aes_ct_dec.c
+SRCS+=	aes_ct_enc.c
+SRCS+=	aes_impl.c
+SRCS+=	aes_rijndael.c
+SRCS+=	aes_selftest.c
 
 # skipjack
 SRCS+=	skipjack.c

Added files:

Index: src/sys/crypto/aes/aes.h
diff -u /dev/null src/sys/crypto/aes/aes.h:1.1
--- /dev/null	Mon Jun 29 23:27:52 2020
+++ src/sys/crypto/aes/aes.h	Mon Jun 29 23:27:52 2020
@@ -0,0 +1,101 @@
+/*	$NetBSD: aes.h,v 1.1 2020/06/29 23:27:52 riastradh Exp $	*/
+
+/*-
+ * Copyright (c) 2020 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef	_CRYPTO_AES_AES_H
+#define	_CRYPTO_AES_AES_H
+
+#include <sys/types.h>
+#include <sys/cdefs.h>
+
+/*
+ * struct aes
+ *
+ *	Expanded round keys.
+ */
+struct aes {
+	uint32_t	aes_rk[60];
+} __aligned(16);
+
+#define	AES_128_NROUNDS	10
+#define	AES_192_NROUNDS	12
+#define	AES_256_NROUNDS	14
+
+struct aesenc {
+	struct aes	aese_aes;
+};
+
+struct aesdec {
+	struct aes	aesd_aes;
+};
+
+struct aes_impl {
+	const char *ai_name;
+	int	(*ai_probe)(void);
+	void	(*ai_setenckey)(struct aesenc *, const uint8_t *, uint32_t);
+	void	(*ai_setdeckey)(struct aesdec *, const uint8_t *, uint32_t);
+	void	(*ai_enc)(const struct aesenc *, const uint8_t[static 16],
+		    uint8_t[static 16], uint32_t);
+	void	(*ai_dec)(const struct aesdec *, const uint8_t[static 16],
+		    uint8_t[static 16], uint32_t);
+	void	(*ai_cbc_enc)(const struct aesenc *, const uint8_t[static 16],
+		    uint8_t[static 16], size_t, uint8_t[static 16], uint32_t);
+	void	(*ai_cbc_dec)(const struct aesdec *, const uint8_t[static 16],
+		    uint8_t[static 16], size_t, uint8_t[static 16], uint32_t);
+	void	(*ai_xts_enc)(const struct aesenc *, const uint8_t[static 16],
+		    uint8_t[static 16], size_t, uint8_t[static 16], uint32_t);
+	void	(*ai_xts_dec)(const struct aesdec *, const uint8_t[static 16],
+		    uint8_t[static 16], size_t, uint8_t[static 16], uint32_t);
+};
+
+int	aes_selftest(const struct aes_impl *);
+
+uint32_t aes_setenckey128(struct aesenc *, const uint8_t[static 16]);
+uint32_t aes_setenckey192(struct aesenc *, const uint8_t[static 24]);
+uint32_t aes_setenckey256(struct aesenc *, const uint8_t[static 32]);
+uint32_t aes_setdeckey128(struct aesdec *, const uint8_t[static 16]);
+uint32_t aes_setdeckey192(struct aesdec *, const uint8_t[static 24]);
+uint32_t aes_setdeckey256(struct aesdec *, const uint8_t[static 32]);
+
+void	aes_enc(const struct aesenc *, const uint8_t[static 16],
+	    uint8_t[static 16], uint32_t);
+void	aes_dec(const struct aesdec *, const uint8_t[static 16],
+	    uint8_t[static 16], uint32_t);
+
+void	aes_cbc_enc(struct aesenc *, const uint8_t[static 16],
+	    uint8_t[static 16], size_t, uint8_t[static 16], uint32_t);
+void	aes_cbc_dec(struct aesdec *, const uint8_t[static 16],
+	    uint8_t[static 16], size_t, uint8_t[static 16], uint32_t);
+
+void	aes_xts_enc(struct aesenc *, const uint8_t[static 16],
+	    uint8_t[static 16], size_t, uint8_t[static 16], uint32_t);
+void	aes_xts_dec(struct aesdec *, const uint8_t[static 16],
+	    uint8_t[static 16], size_t, uint8_t[static 16], uint32_t);
+
+void	aes_md_init(const struct aes_impl *);
+
+#endif	/* _CRYPTO_AES_AES_H */
Index: src/sys/crypto/aes/aes_bear.c
diff -u /dev/null src/sys/crypto/aes/aes_bear.c:1.1
--- /dev/null	Mon Jun 29 23:27:52 2020
+++ src/sys/crypto/aes/aes_bear.c	Mon Jun 29 23:27:52 2020
@@ -0,0 +1,617 @@
+/*	$NetBSD: aes_bear.c,v 1.1 2020/06/29 23:27:52 riastradh Exp $	*/
+
+/*-
+ * Copyright (c) 2020 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__KERNEL_RCSID(1, "$NetBSD: aes_bear.c,v 1.1 2020/06/29 23:27:52 riastradh Exp $");
+
+#include <sys/types.h>
+#include <sys/endian.h>
+#include <sys/systm.h>
+
+#include <crypto/aes/aes.h>
+#include <crypto/aes/aes_bear.h>
+
+static void
+aesbear_setkey(uint32_t rk[static 60], const void *key, uint32_t nrounds)
+{
+	size_t key_len;
+
+	switch (nrounds) {
+	case 10:
+		key_len = 16;
+		break;
+	case 12:
+		key_len = 24;
+		break;
+	case 14:
+		key_len = 32;
+		break;
+	default:
+		panic("invalid AES nrounds: %u", nrounds);
+	}
+
+	br_aes_ct_keysched(rk, key, key_len);
+}
+
+static void
+aesbear_setenckey(struct aesenc *enc, const uint8_t *key, uint32_t nrounds)
+{
+
+	aesbear_setkey(enc->aese_aes.aes_rk, key, nrounds);
+}
+
+static void
+aesbear_setdeckey(struct aesdec *dec, const uint8_t *key, uint32_t nrounds)
+{
+
+	/*
+	 * BearSSL computes InvMixColumns on the fly -- no need for
+	 * distinct decryption round keys.
+	 */
+	aesbear_setkey(dec->aesd_aes.aes_rk, key, nrounds);
+}
+
+static void
+aesbear_enc(const struct aesenc *enc, const uint8_t in[static 16],
+    uint8_t out[static 16], uint32_t nrounds)
+{
+	uint32_t sk_exp[120];
+	uint32_t q[8];
+
+	/* Expand round keys for bitslicing.  */
+	br_aes_ct_skey_expand(sk_exp, nrounds, enc->aese_aes.aes_rk);
+
+	/* Load input block interleaved with garbage block.  */
+	q[2*0] = le32dec(in + 4*0);
+	q[2*1] = le32dec(in + 4*1);
+	q[2*2] = le32dec(in + 4*2);
+	q[2*3] = le32dec(in + 4*3);
+	q[1] = q[3] = q[5] = q[7] = 0;
+
+	/* Transform to bitslice, decrypt, transform from bitslice.  */
+	br_aes_ct_ortho(q);
+	br_aes_ct_bitslice_encrypt(nrounds, sk_exp, q);
+	br_aes_ct_ortho(q);
+
+	/* Store output block.  */
+	le32enc(out + 4*0, q[2*0]);
+	le32enc(out + 4*1, q[2*1]);
+	le32enc(out + 4*2, q[2*2]);
+	le32enc(out + 4*3, q[2*3]);
+
+	/* Paranoia: Zero temporary buffers.  */
+	explicit_memset(sk_exp, 0, sizeof sk_exp);
+	explicit_memset(q, 0, sizeof q);
+}
+
+static void
+aesbear_dec(const struct aesdec *dec, const uint8_t in[static 16],
+    uint8_t out[static 16], uint32_t nrounds)
+{
+	uint32_t sk_exp[120];
+	uint32_t q[8];
+
+	/* Expand round keys for bitslicing.  */
+	br_aes_ct_skey_expand(sk_exp, nrounds, dec->aesd_aes.aes_rk);
+
+	/* Load input block interleaved with garbage.  */
+	q[2*0] = le32dec(in + 4*0);
+	q[2*1] = le32dec(in + 4*1);
+	q[2*2] = le32dec(in + 4*2);
+	q[2*3] = le32dec(in + 4*3);
+	q[1] = q[3] = q[5] = q[7] = 0;
+
+	/* Transform to bitslice, decrypt, transform from bitslice.  */
+	br_aes_ct_ortho(q);
+	br_aes_ct_bitslice_decrypt(nrounds, sk_exp, q);
+	br_aes_ct_ortho(q);
+
+	/* Store output block.  */
+	le32enc(out + 4*0, q[2*0]);
+	le32enc(out + 4*1, q[2*1]);
+	le32enc(out + 4*2, q[2*2]);
+	le32enc(out + 4*3, q[2*3]);
+
+	/* Paranoia: Zero temporary buffers.  */
+	explicit_memset(sk_exp, 0, sizeof sk_exp);
+	explicit_memset(q, 0, sizeof q);
+}
+
+static void
+aesbear_cbc_enc(const struct aesenc *enc, const uint8_t in[static 16],
+    uint8_t out[static 16], size_t nbytes, uint8_t iv[static 16],
+    uint32_t nrounds)
+{
+	uint32_t sk_exp[120];
+	uint32_t q[8];
+	uint32_t cv0, cv1, cv2, cv3;
+
+	KASSERT(nbytes % 16 == 0);
+
+	/* Skip if there's nothing to do.  */
+	if (nbytes == 0)
+		return;
+
+	/* Expand round keys for bitslicing.  */
+	br_aes_ct_skey_expand(sk_exp, nrounds, enc->aese_aes.aes_rk);
+
+	/* Initialize garbage block.  */
+	q[1] = q[3] = q[5] = q[7] = 0;
+
+	/* Load IV.  */
+	cv0 = le32dec(iv + 4*0);
+	cv1 = le32dec(iv + 4*1);
+	cv2 = le32dec(iv + 4*2);
+	cv3 = le32dec(iv + 4*3);
+
+	for (; nbytes; nbytes -= 16, in += 16, out += 16) {
+		/* Load input block and apply CV.  */
+		q[2*0] = cv0 ^ le32dec(in + 4*0);
+		q[2*1] = cv1 ^ le32dec(in + 4*1);
+		q[2*2] = cv2 ^ le32dec(in + 4*2);
+		q[2*3] = cv3 ^ le32dec(in + 4*3);
+
+		/* Transform to bitslice, encrypt, transform from bitslice.  */
+		br_aes_ct_ortho(q);
+		br_aes_ct_bitslice_encrypt(nrounds, sk_exp, q);
+		br_aes_ct_ortho(q);
+
+		/* Remember ciphertext as CV and store output block.  */
+		cv0 = q[2*0];
+		cv1 = q[2*1];
+		cv2 = q[2*2];
+		cv3 = q[2*3];
+		le32enc(out + 4*0, cv0);
+		le32enc(out + 4*1, cv1);
+		le32enc(out + 4*2, cv2);
+		le32enc(out + 4*3, cv3);
+	}
+
+	/* Store updated IV.  */
+	le32enc(iv + 4*0, cv0);
+	le32enc(iv + 4*1, cv1);
+	le32enc(iv + 4*2, cv2);
+	le32enc(iv + 4*3, cv3);
+
+	/* Paranoia: Zero temporary buffers.  */
+	explicit_memset(sk_exp, 0, sizeof sk_exp);
+	explicit_memset(q, 0, sizeof q);
+}
+
+static void
+aesbear_cbc_dec(const struct aesdec *dec, const uint8_t in[static 16],
+    uint8_t out[static 16], size_t nbytes, uint8_t iv[static 16],
+    uint32_t nrounds)
+{
+	uint32_t sk_exp[120];
+	uint32_t q[8];
+	uint32_t cv0, cv1, cv2, cv3, iv0, iv1, iv2, iv3;
+
+	KASSERT(nbytes % 16 == 0);
+
+	/* Skip if there's nothing to do.  */
+	if (nbytes == 0)
+		return;
+
+	/* Expand round keys for bitslicing.  */
+	br_aes_ct_skey_expand(sk_exp, nrounds, dec->aesd_aes.aes_rk);
+
+	/* Load the IV.  */
+	iv0 = le32dec(iv + 4*0);
+	iv1 = le32dec(iv + 4*1);
+	iv2 = le32dec(iv + 4*2);
+	iv3 = le32dec(iv + 4*3);
+
+	/* Load the last cipher block.  */
+	cv0 = le32dec(in + nbytes - 16 + 4*0);
+	cv1 = le32dec(in + nbytes - 16 + 4*1);
+	cv2 = le32dec(in + nbytes - 16 + 4*2);
+	cv3 = le32dec(in + nbytes - 16 + 4*3);
+
+	/* Store the updated IV.  */
+	le32enc(iv + 4*0, cv0);
+	le32enc(iv + 4*1, cv1);
+	le32enc(iv + 4*2, cv2);
+	le32enc(iv + 4*3, cv3);
+
+	/* Handle the last cipher block separately if odd number.  */
+	if (nbytes % 32) {
+		KASSERT(nbytes % 32 == 16);
+
+		/* Set up the last cipher block and a garbage block.  */
+		q[2*0] = cv0;
+		q[2*1] = cv1;
+		q[2*2] = cv2;
+		q[2*3] = cv3;
+		q[1] = q[3] = q[5] = q[7] = 0;
+
+		/* Decrypt.  */
+		br_aes_ct_ortho(q);
+		br_aes_ct_bitslice_decrypt(nrounds, sk_exp, q);
+		br_aes_ct_ortho(q);
+
+		/* If this was the only cipher block, we're done.  */
+		nbytes -= 16;
+		if (nbytes == 0)
+			goto out;
+
+		/*
+		 * Otherwise, load up the penultimate cipher block, and
+		 * store the output block.
+		 */
+		cv0 = le32dec(in + nbytes - 16 + 4*0);
+		cv1 = le32dec(in + nbytes - 16 + 4*1);
+		cv2 = le32dec(in + nbytes - 16 + 4*2);
+		cv3 = le32dec(in + nbytes - 16 + 4*3);
+		le32enc(out + nbytes + 4*0, cv0 ^ q[2*0]);
+		le32enc(out + nbytes + 4*1, cv1 ^ q[2*1]);
+		le32enc(out + nbytes + 4*2, cv2 ^ q[2*2]);
+		le32enc(out + nbytes + 4*3, cv3 ^ q[2*3]);
+	}
+
+	for (;;) {
+		KASSERT(nbytes >= 32);
+
+		/*
+		 * 1. Set up upper cipher block from cvN.
+		 * 2. Load lower cipher block into cvN and set it up.
+		 * 3. Decrypt.
+		 */
+		q[2*0 + 1] = cv0;
+		q[2*1 + 1] = cv1;
+		q[2*2 + 1] = cv2;
+		q[2*3 + 1] = cv3;
+		cv0 = q[2*0] = le32dec(in + nbytes - 32 + 4*0);
+		cv1 = q[2*1] = le32dec(in + nbytes - 32 + 4*1);
+		cv2 = q[2*2] = le32dec(in + nbytes - 32 + 4*2);
+		cv3 = q[2*3] = le32dec(in + nbytes - 32 + 4*3);
+
+		br_aes_ct_ortho(q);
+		br_aes_ct_bitslice_decrypt(nrounds, sk_exp, q);
+		br_aes_ct_ortho(q);
+
+		/* Store the upper output block.  */
+		le32enc(out + nbytes - 16 + 4*0, q[2*0 + 1] ^ cv0);
+		le32enc(out + nbytes - 16 + 4*1, q[2*1 + 1] ^ cv1);
+		le32enc(out + nbytes - 16 + 4*2, q[2*2 + 1] ^ cv2);
+		le32enc(out + nbytes - 16 + 4*3, q[2*3 + 1] ^ cv3);
+
+		/* Stop if we've reached the first output block.  */
+		nbytes -= 32;
+		if (nbytes == 0)
+			goto out;
+
+		/*
+		 * Load the preceding cipher block, and apply it as the
+		 * chaining value to this one.
+		 */
+		cv0 = le32dec(in + nbytes - 16 + 4*0);
+		cv1 = le32dec(in + nbytes - 16 + 4*1);
+		cv2 = le32dec(in + nbytes - 16 + 4*2);
+		cv3 = le32dec(in + nbytes - 16 + 4*3);
+		le32enc(out + nbytes + 4*0, q[2*0] ^ cv0);
+		le32enc(out + nbytes + 4*1, q[2*1] ^ cv1);
+		le32enc(out + nbytes + 4*2, q[2*2] ^ cv2);
+		le32enc(out + nbytes + 4*3, q[2*3] ^ cv3);
+	}
+
+out:	/* Store the first output block.  */
+	le32enc(out + 4*0, q[2*0] ^ iv0);
+	le32enc(out + 4*1, q[2*1] ^ iv1);
+	le32enc(out + 4*2, q[2*2] ^ iv2);
+	le32enc(out + 4*3, q[2*3] ^ iv3);
+
+	/* Paranoia: Zero temporary buffers.  */
+	explicit_memset(sk_exp, 0, sizeof sk_exp);
+	explicit_memset(q, 0, sizeof q);
+}
+
+static inline void
+aesbear_xts_update(uint32_t *t0, uint32_t *t1, uint32_t *t2, uint32_t *t3)
+{
+	uint32_t s0, s1, s2, s3;
+
+	s0 = *t0 >> 31;
+	s1 = *t1 >> 31;
+	s2 = *t2 >> 31;
+	s3 = *t3 >> 31;
+	*t0 = (*t0 << 1) ^ (-s3 & 0x87);
+	*t1 = (*t1 << 1) ^ s0;
+	*t2 = (*t2 << 1) ^ s1;
+	*t3 = (*t3 << 1) ^ s2;
+}
+
+static int
+aesbear_xts_update_selftest(void)
+{
+	static const struct {
+		uint32_t in[4], out[4];
+	} cases[] = {
+		{ {1}, {2} },
+		{ {0x80000000U,0,0,0}, {0,1,0,0} },
+		{ {0,0x80000000U,0,0}, {0,0,1,0} },
+		{ {0,0,0x80000000U,0}, {0,0,0,1} },
+		{ {0,0,0,0x80000000U}, {0x87,0,0,0} },
+		{ {0,0x80000000U,0,0x80000000U}, {0x87,0,1,0} },
+	};
+	unsigned i;
+	uint32_t t0, t1, t2, t3;
+
+	for (i = 0; i < sizeof(cases)/sizeof(cases[0]); i++) {
+		t0 = cases[i].in[0];
+		t1 = cases[i].in[1];
+		t2 = cases[i].in[2];
+		t3 = cases[i].in[3];
+		aesbear_xts_update(&t0, &t1, &t2, &t3);
+		if (t0 != cases[i].out[0] ||
+		    t1 != cases[i].out[1] ||
+		    t2 != cases[i].out[2] ||
+		    t3 != cases[i].out[3])
+			return -1;
+	}
+
+	/* Success!  */
+	return 0;
+}
+
+static void
+aesbear_xts_enc(const struct aesenc *enc, const uint8_t in[static 16],
+    uint8_t out[static 16], size_t nbytes, uint8_t tweak[static 16],
+    uint32_t nrounds)
+{
+	uint32_t sk_exp[120];
+	uint32_t q[8];
+	uint32_t t0, t1, t2, t3, u0, u1, u2, u3;
+
+	KASSERT(nbytes % 16 == 0);
+
+	/* Skip if there's nothing to do.  */
+	if (nbytes == 0)
+		return;
+
+	/* Expand round keys for bitslicing.  */
+	br_aes_ct_skey_expand(sk_exp, nrounds, enc->aese_aes.aes_rk);
+
+	/* Load tweak.  */
+	t0 = le32dec(tweak + 4*0);
+	t1 = le32dec(tweak + 4*1);
+	t2 = le32dec(tweak + 4*2);
+	t3 = le32dec(tweak + 4*3);
+
+	/* Handle the first block separately if odd number.  */
+	if (nbytes % 32) {
+		KASSERT(nbytes % 32 == 16);
+
+		/* Load up the first block and a garbage block.  */
+		q[2*0] = le32dec(in + 4*0) ^ t0;
+		q[2*1] = le32dec(in + 4*1) ^ t1;
+		q[2*2] = le32dec(in + 4*2) ^ t2;
+		q[2*3] = le32dec(in + 4*3) ^ t3;
+		q[1] = q[3] = q[5] = q[7] = 0;
+
+		/* Encrypt two blocks.  */
+		br_aes_ct_ortho(q);
+		br_aes_ct_bitslice_encrypt(nrounds, sk_exp, q);
+		br_aes_ct_ortho(q);
+
+		/* Store the first cipher block.  */
+		le32enc(out + 4*0, q[2*0] ^ t0);
+		le32enc(out + 4*1, q[2*1] ^ t1);
+		le32enc(out + 4*2, q[2*2] ^ t2);
+		le32enc(out + 4*3, q[2*3] ^ t3);
+
+		/* Advance to the next block.  */
+		aesbear_xts_update(&t0, &t1, &t2, &t3);
+		if ((nbytes -= 16) == 0)
+			goto out;
+		in += 16;
+		out += 16;
+	}
+
+	do {
+		KASSERT(nbytes >= 32);
+
+		/* Compute the upper tweak.  */
+		u0 = t0; u1 = t1; u2 = t2; u3 = t3;
+		aesbear_xts_update(&u0, &u1, &u2, &u3);
+
+		/* Load lower and upper blocks.  */
+		q[2*0] = le32dec(in + 4*0) ^ t0;
+		q[2*1] = le32dec(in + 4*1) ^ t1;
+		q[2*2] = le32dec(in + 4*2) ^ t2;
+		q[2*3] = le32dec(in + 4*3) ^ t3;
+		q[2*0 + 1] = le32dec(in + 16 + 4*0) ^ u0;
+		q[2*1 + 1] = le32dec(in + 16 + 4*1) ^ u1;
+		q[2*2 + 1] = le32dec(in + 16 + 4*2) ^ u2;
+		q[2*3 + 1] = le32dec(in + 16 + 4*3) ^ u3;
+
+		/* Encrypt two blocks.  */
+		br_aes_ct_ortho(q);
+		br_aes_ct_bitslice_encrypt(nrounds, sk_exp, q);
+		br_aes_ct_ortho(q);
+
+		/* Store lower and upper blocks.  */
+		le32enc(out + 4*0, q[2*0] ^ t0);
+		le32enc(out + 4*1, q[2*1] ^ t1);
+		le32enc(out + 4*2, q[2*2] ^ t2);
+		le32enc(out + 4*3, q[2*3] ^ t3);
+		le32enc(out + 16 + 4*0, q[2*0 + 1] ^ u0);
+		le32enc(out + 16 + 4*1, q[2*1 + 1] ^ u1);
+		le32enc(out + 16 + 4*2, q[2*2 + 1] ^ u2);
+		le32enc(out + 16 + 4*3, q[2*3 + 1] ^ u3);
+
+		/* Advance to the next pair of blocks.  */
+		t0 = u0; t1 = u1; t2 = u2; t3 = u3;
+		aesbear_xts_update(&t0, &t1, &t2, &t3);
+		in += 32;
+		out += 32;
+	} while (nbytes -= 32, nbytes);
+
+out:	/* Store the updated tweak.  */
+	le32enc(tweak + 4*0, t0);
+	le32enc(tweak + 4*1, t1);
+	le32enc(tweak + 4*2, t2);
+	le32enc(tweak + 4*3, t3);
+
+	/* Paranoia: Zero temporary buffers.  */
+	explicit_memset(sk_exp, 0, sizeof sk_exp);
+	explicit_memset(q, 0, sizeof q);
+}
+
+static void
+aesbear_xts_dec(const struct aesdec *dec, const uint8_t in[static 16],
+    uint8_t out[static 16], size_t nbytes, uint8_t tweak[static 16],
+    uint32_t nrounds)
+{
+	uint32_t sk_exp[120];
+	uint32_t q[8];
+	uint32_t t0, t1, t2, t3, u0, u1, u2, u3;
+
+	KASSERT(nbytes % 16 == 0);
+
+	/* Skip if there's nothing to do.  */
+	if (nbytes == 0)
+		return;
+
+	/* Expand round keys for bitslicing.  */
+	br_aes_ct_skey_expand(sk_exp, nrounds, dec->aesd_aes.aes_rk);
+
+	/* Load tweak.  */
+	t0 = le32dec(tweak + 4*0);
+	t1 = le32dec(tweak + 4*1);
+	t2 = le32dec(tweak + 4*2);
+	t3 = le32dec(tweak + 4*3);
+
+	/* Handle the first block separately if odd number.  */
+	if (nbytes % 32) {
+		KASSERT(nbytes % 32 == 16);
+
+		/* Load up the first block and a garbage block.  */
+		q[2*0] = le32dec(in + 4*0) ^ t0;
+		q[2*1] = le32dec(in + 4*1) ^ t1;
+		q[2*2] = le32dec(in + 4*2) ^ t2;
+		q[2*3] = le32dec(in + 4*3) ^ t3;
+		q[1] = q[3] = q[5] = q[7] = 0;
+
+		/* Decrypt two blocks.  */
+		br_aes_ct_ortho(q);
+		br_aes_ct_bitslice_decrypt(nrounds, sk_exp, q);
+		br_aes_ct_ortho(q);
+
+		/* Store the first cipher block.  */
+		le32enc(out + 4*0, q[2*0] ^ t0);
+		le32enc(out + 4*1, q[2*1] ^ t1);
+		le32enc(out + 4*2, q[2*2] ^ t2);
+		le32enc(out + 4*3, q[2*3] ^ t3);
+
+		/* Advance to the next block.  */
+		aesbear_xts_update(&t0, &t1, &t2, &t3);
+		if ((nbytes -= 16) == 0)
+			goto out;
+		in += 16;
+		out += 16;
+	}
+
+	do {
+		KASSERT(nbytes >= 32);
+
+		/* Compute the upper tweak.  */
+		u0 = t0; u1 = t1; u2 = t2; u3 = t3;
+		aesbear_xts_update(&u0, &u1, &u2, &u3);
+
+		/* Load lower and upper blocks.  */
+		q[2*0] = le32dec(in + 4*0) ^ t0;
+		q[2*1] = le32dec(in + 4*1) ^ t1;
+		q[2*2] = le32dec(in + 4*2) ^ t2;
+		q[2*3] = le32dec(in + 4*3) ^ t3;
+		q[2*0 + 1] = le32dec(in + 16 + 4*0) ^ u0;
+		q[2*1 + 1] = le32dec(in + 16 + 4*1) ^ u1;
+		q[2*2 + 1] = le32dec(in + 16 + 4*2) ^ u2;
+		q[2*3 + 1] = le32dec(in + 16 + 4*3) ^ u3;
+
+		/* Encrypt two blocks.  */
+		br_aes_ct_ortho(q);
+		br_aes_ct_bitslice_decrypt(nrounds, sk_exp, q);
+		br_aes_ct_ortho(q);
+
+		/* Store lower and upper blocks.  */
+		le32enc(out + 4*0, q[2*0] ^ t0);
+		le32enc(out + 4*1, q[2*1] ^ t1);
+		le32enc(out + 4*2, q[2*2] ^ t2);
+		le32enc(out + 4*3, q[2*3] ^ t3);
+		le32enc(out + 16 + 4*0, q[2*0 + 1] ^ u0);
+		le32enc(out + 16 + 4*1, q[2*1 + 1] ^ u1);
+		le32enc(out + 16 + 4*2, q[2*2 + 1] ^ u2);
+		le32enc(out + 16 + 4*3, q[2*3 + 1] ^ u3);
+
+		/* Advance to the next pair of blocks.  */
+		t0 = u0; t1 = u1; t2 = u2; t3 = u3;
+		aesbear_xts_update(&t0, &t1, &t2, &t3);
+		in += 32;
+		out += 32;
+	} while (nbytes -= 32, nbytes);
+
+out:	/* Store the updated tweak.  */
+	le32enc(tweak + 4*0, t0);
+	le32enc(tweak + 4*1, t1);
+	le32enc(tweak + 4*2, t2);
+	le32enc(tweak + 4*3, t3);
+
+	/* Paranoia: Zero temporary buffers.  */
+	explicit_memset(sk_exp, 0, sizeof sk_exp);
+	explicit_memset(q, 0, sizeof q);
+}
+
+static int
+aesbear_probe(void)
+{
+
+	if (aesbear_xts_update_selftest())
+		return -1;
+
+	/* XXX test br_aes_ct_bitslice_decrypt */
+	/* XXX test br_aes_ct_bitslice_encrypt */
+	/* XXX test br_aes_ct_keysched */
+	/* XXX test br_aes_ct_ortho */
+	/* XXX test br_aes_ct_skey_expand */
+
+	return 0;
+}
+
+struct aes_impl aes_bear_impl = {
+	.ai_name = "BearSSL aes_ct",
+	.ai_probe = aesbear_probe,
+	.ai_setenckey = aesbear_setenckey,
+	.ai_setdeckey = aesbear_setdeckey,
+	.ai_enc = aesbear_enc,
+	.ai_dec = aesbear_dec,
+	.ai_cbc_enc = aesbear_cbc_enc,
+	.ai_cbc_dec = aesbear_cbc_dec,
+	.ai_xts_enc = aesbear_xts_enc,
+	.ai_xts_dec = aesbear_xts_dec,
+};
Index: src/sys/crypto/aes/aes_bear.h
diff -u /dev/null src/sys/crypto/aes/aes_bear.h:1.1
--- /dev/null	Mon Jun 29 23:27:52 2020
+++ src/sys/crypto/aes/aes_bear.h	Mon Jun 29 23:27:52 2020
@@ -0,0 +1,50 @@
+/*	$NetBSD: aes_bear.h,v 1.1 2020/06/29 23:27:52 riastradh Exp $	*/
+
+/*-
+ * Copyright (c) 2020 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef	_CRYPTO_AES_AES_BEAR_H
+#define	_CRYPTO_AES_AES_BEAR_H
+
+#include <sys/types.h>
+#include <sys/endian.h>
+
+#include <crypto/aes/aes.h>
+
+#define	br_dec32le	le32dec
+#define	br_enc32le	le32enc
+
+void	br_aes_ct_bitslice_Sbox(uint32_t *);
+void	br_aes_ct_bitslice_invSbox(uint32_t *);
+void	br_aes_ct_ortho(uint32_t *);
+u_int	br_aes_ct_keysched(uint32_t *, const void *, size_t);
+void	br_aes_ct_skey_expand(uint32_t *, unsigned, const uint32_t *);
+void	br_aes_ct_bitslice_encrypt(unsigned, const uint32_t *, uint32_t *);
+void	br_aes_ct_bitslice_decrypt(unsigned, const uint32_t *, uint32_t *);
+
+extern struct aes_impl	aes_bear_impl;
+
+#endif	/* _CRYPTO_AES_AES_BEAR_H */
Index: src/sys/crypto/aes/aes_ct.c
diff -u /dev/null src/sys/crypto/aes/aes_ct.c:1.1
--- /dev/null	Mon Jun 29 23:27:52 2020
+++ src/sys/crypto/aes/aes_ct.c	Mon Jun 29 23:27:52 2020
@@ -0,0 +1,335 @@
+/*	$NetBSD: aes_ct.c,v 1.1 2020/06/29 23:27:52 riastradh Exp $	*/
+
+/*
+ * Copyright (c) 2016 Thomas Pornin <por...@bolet.org>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining 
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be 
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <sys/cdefs.h>
+__KERNEL_RCSID(1, "$NetBSD: aes_ct.c,v 1.1 2020/06/29 23:27:52 riastradh Exp $");
+
+#include <sys/types.h>
+
+#include <crypto/aes/aes_bear.h>
+
+/* see inner.h */
+void
+br_aes_ct_bitslice_Sbox(uint32_t *q)
+{
+	/*
+	 * This S-box implementation is a straightforward translation of
+	 * the circuit described by Boyar and Peralta in "A new
+	 * combinational logic minimization technique with applications
+	 * to cryptology" (https://eprint.iacr.org/2009/191.pdf).
+	 *
+	 * Note that variables x* (input) and s* (output) are numbered
+	 * in "reverse" order (x0 is the high bit, x7 is the low bit).
+	 */
+
+	uint32_t x0, x1, x2, x3, x4, x5, x6, x7;
+	uint32_t y1, y2, y3, y4, y5, y6, y7, y8, y9;
+	uint32_t y10, y11, y12, y13, y14, y15, y16, y17, y18, y19;
+	uint32_t y20, y21;
+	uint32_t z0, z1, z2, z3, z4, z5, z6, z7, z8, z9;
+	uint32_t z10, z11, z12, z13, z14, z15, z16, z17;
+	uint32_t t0, t1, t2, t3, t4, t5, t6, t7, t8, t9;
+	uint32_t t10, t11, t12, t13, t14, t15, t16, t17, t18, t19;
+	uint32_t t20, t21, t22, t23, t24, t25, t26, t27, t28, t29;
+	uint32_t t30, t31, t32, t33, t34, t35, t36, t37, t38, t39;
+	uint32_t t40, t41, t42, t43, t44, t45, t46, t47, t48, t49;
+	uint32_t t50, t51, t52, t53, t54, t55, t56, t57, t58, t59;
+	uint32_t t60, t61, t62, t63, t64, t65, t66, t67;
+	uint32_t s0, s1, s2, s3, s4, s5, s6, s7;
+
+	x0 = q[7];
+	x1 = q[6];
+	x2 = q[5];
+	x3 = q[4];
+	x4 = q[3];
+	x5 = q[2];
+	x6 = q[1];
+	x7 = q[0];
+
+	/*
+	 * Top linear transformation.
+	 */
+	y14 = x3 ^ x5;
+	y13 = x0 ^ x6;
+	y9 = x0 ^ x3;
+	y8 = x0 ^ x5;
+	t0 = x1 ^ x2;
+	y1 = t0 ^ x7;
+	y4 = y1 ^ x3;
+	y12 = y13 ^ y14;
+	y2 = y1 ^ x0;
+	y5 = y1 ^ x6;
+	y3 = y5 ^ y8;
+	t1 = x4 ^ y12;
+	y15 = t1 ^ x5;
+	y20 = t1 ^ x1;
+	y6 = y15 ^ x7;
+	y10 = y15 ^ t0;
+	y11 = y20 ^ y9;
+	y7 = x7 ^ y11;
+	y17 = y10 ^ y11;
+	y19 = y10 ^ y8;
+	y16 = t0 ^ y11;
+	y21 = y13 ^ y16;
+	y18 = x0 ^ y16;
+
+	/*
+	 * Non-linear section.
+	 */
+	t2 = y12 & y15;
+	t3 = y3 & y6;
+	t4 = t3 ^ t2;
+	t5 = y4 & x7;
+	t6 = t5 ^ t2;
+	t7 = y13 & y16;
+	t8 = y5 & y1;
+	t9 = t8 ^ t7;
+	t10 = y2 & y7;
+	t11 = t10 ^ t7;
+	t12 = y9 & y11;
+	t13 = y14 & y17;
+	t14 = t13 ^ t12;
+	t15 = y8 & y10;
+	t16 = t15 ^ t12;
+	t17 = t4 ^ t14;
+	t18 = t6 ^ t16;
+	t19 = t9 ^ t14;
+	t20 = t11 ^ t16;
+	t21 = t17 ^ y20;
+	t22 = t18 ^ y19;
+	t23 = t19 ^ y21;
+	t24 = t20 ^ y18;
+
+	t25 = t21 ^ t22;
+	t26 = t21 & t23;
+	t27 = t24 ^ t26;
+	t28 = t25 & t27;
+	t29 = t28 ^ t22;
+	t30 = t23 ^ t24;
+	t31 = t22 ^ t26;
+	t32 = t31 & t30;
+	t33 = t32 ^ t24;
+	t34 = t23 ^ t33;
+	t35 = t27 ^ t33;
+	t36 = t24 & t35;
+	t37 = t36 ^ t34;
+	t38 = t27 ^ t36;
+	t39 = t29 & t38;
+	t40 = t25 ^ t39;
+
+	t41 = t40 ^ t37;
+	t42 = t29 ^ t33;
+	t43 = t29 ^ t40;
+	t44 = t33 ^ t37;
+	t45 = t42 ^ t41;
+	z0 = t44 & y15;
+	z1 = t37 & y6;
+	z2 = t33 & x7;
+	z3 = t43 & y16;
+	z4 = t40 & y1;
+	z5 = t29 & y7;
+	z6 = t42 & y11;
+	z7 = t45 & y17;
+	z8 = t41 & y10;
+	z9 = t44 & y12;
+	z10 = t37 & y3;
+	z11 = t33 & y4;
+	z12 = t43 & y13;
+	z13 = t40 & y5;
+	z14 = t29 & y2;
+	z15 = t42 & y9;
+	z16 = t45 & y14;
+	z17 = t41 & y8;
+
+	/*
+	 * Bottom linear transformation.
+	 */
+	t46 = z15 ^ z16;
+	t47 = z10 ^ z11;
+	t48 = z5 ^ z13;
+	t49 = z9 ^ z10;
+	t50 = z2 ^ z12;
+	t51 = z2 ^ z5;
+	t52 = z7 ^ z8;
+	t53 = z0 ^ z3;
+	t54 = z6 ^ z7;
+	t55 = z16 ^ z17;
+	t56 = z12 ^ t48;
+	t57 = t50 ^ t53;
+	t58 = z4 ^ t46;
+	t59 = z3 ^ t54;
+	t60 = t46 ^ t57;
+	t61 = z14 ^ t57;
+	t62 = t52 ^ t58;
+	t63 = t49 ^ t58;
+	t64 = z4 ^ t59;
+	t65 = t61 ^ t62;
+	t66 = z1 ^ t63;
+	s0 = t59 ^ t63;
+	s6 = t56 ^ ~t62;
+	s7 = t48 ^ ~t60;
+	t67 = t64 ^ t65;
+	s3 = t53 ^ t66;
+	s4 = t51 ^ t66;
+	s5 = t47 ^ t65;
+	s1 = t64 ^ ~s3;
+	s2 = t55 ^ ~t67;
+
+	q[7] = s0;
+	q[6] = s1;
+	q[5] = s2;
+	q[4] = s3;
+	q[3] = s4;
+	q[2] = s5;
+	q[1] = s6;
+	q[0] = s7;
+}
+
+/* see inner.h */
+void
+br_aes_ct_ortho(uint32_t *q)
+{
+#define SWAPN(cl, ch, s, x, y)   do { \
+		uint32_t a, b; \
+		a = (x); \
+		b = (y); \
+		(x) = (a & (uint32_t)cl) | ((b & (uint32_t)cl) << (s)); \
+		(y) = ((a & (uint32_t)ch) >> (s)) | (b & (uint32_t)ch); \
+	} while (0)
+
+#define SWAP2(x, y)   SWAPN(0x55555555, 0xAAAAAAAA, 1, x, y)
+#define SWAP4(x, y)   SWAPN(0x33333333, 0xCCCCCCCC, 2, x, y)
+#define SWAP8(x, y)   SWAPN(0x0F0F0F0F, 0xF0F0F0F0, 4, x, y)
+
+	SWAP2(q[0], q[1]);
+	SWAP2(q[2], q[3]);
+	SWAP2(q[4], q[5]);
+	SWAP2(q[6], q[7]);
+
+	SWAP4(q[0], q[2]);
+	SWAP4(q[1], q[3]);
+	SWAP4(q[4], q[6]);
+	SWAP4(q[5], q[7]);
+
+	SWAP8(q[0], q[4]);
+	SWAP8(q[1], q[5]);
+	SWAP8(q[2], q[6]);
+	SWAP8(q[3], q[7]);
+}
+
+static const unsigned char Rcon[] = {
+	0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1B, 0x36
+};
+
+static uint32_t
+sub_word(uint32_t x)
+{
+	uint32_t q[8];
+	int i;
+
+	for (i = 0; i < 8; i ++) {
+		q[i] = x;
+	}
+	br_aes_ct_ortho(q);
+	br_aes_ct_bitslice_Sbox(q);
+	br_aes_ct_ortho(q);
+	return q[0];
+}
+
+/* see inner.h */
+unsigned
+br_aes_ct_keysched(uint32_t *comp_skey, const void *key, size_t key_len)
+{
+	unsigned num_rounds;
+	int i, j, k, nk, nkf;
+	uint32_t tmp;
+	uint32_t skey[120];
+
+	switch (key_len) {
+	case 16:
+		num_rounds = 10;
+		break;
+	case 24:
+		num_rounds = 12;
+		break;
+	case 32:
+		num_rounds = 14;
+		break;
+	default:
+		/* abort(); */
+		return 0;
+	}
+	nk = (int)(key_len >> 2);
+	nkf = (int)((num_rounds + 1) << 2);
+	tmp = 0;
+	for (i = 0; i < nk; i ++) {
+		tmp = br_dec32le((const unsigned char *)key + (i << 2));
+		skey[(i << 1) + 0] = tmp;
+		skey[(i << 1) + 1] = tmp;
+	}
+	for (i = nk, j = 0, k = 0; i < nkf; i ++) {
+		if (j == 0) {
+			tmp = (tmp << 24) | (tmp >> 8);
+			tmp = sub_word(tmp) ^ Rcon[k];
+		} else if (nk > 6 && j == 4) {
+			tmp = sub_word(tmp);
+		}
+		tmp ^= skey[(i - nk) << 1];
+		skey[(i << 1) + 0] = tmp;
+		skey[(i << 1) + 1] = tmp;
+		if (++ j == nk) {
+			j = 0;
+			k ++;
+		}
+	}
+	for (i = 0; i < nkf; i += 4) {
+		br_aes_ct_ortho(skey + (i << 1));
+	}
+	for (i = 0, j = 0; i < nkf; i ++, j += 2) {
+		comp_skey[i] = (skey[j + 0] & 0x55555555)
+			| (skey[j + 1] & 0xAAAAAAAA);
+	}
+	return num_rounds;
+}
+
+/* see inner.h */
+void
+br_aes_ct_skey_expand(uint32_t *skey,
+	unsigned num_rounds, const uint32_t *comp_skey)
+{
+	unsigned u, v, n;
+
+	n = (num_rounds + 1) << 2;
+	for (u = 0, v = 0; u < n; u ++, v += 2) {
+		uint32_t x, y;
+
+		x = y = comp_skey[u];
+		x &= 0x55555555;
+		skey[v + 0] = x | (x << 1);
+		y &= 0xAAAAAAAA;
+		skey[v + 1] = y | (y >> 1);
+	}
+}
Index: src/sys/crypto/aes/aes_ct_dec.c
diff -u /dev/null src/sys/crypto/aes/aes_ct_dec.c:1.1
--- /dev/null	Mon Jun 29 23:27:52 2020
+++ src/sys/crypto/aes/aes_ct_dec.c	Mon Jun 29 23:27:52 2020
@@ -0,0 +1,177 @@
+/*	$NetBSD: aes_ct_dec.c,v 1.1 2020/06/29 23:27:52 riastradh Exp $	*/
+
+/*
+ * Copyright (c) 2016 Thomas Pornin <por...@bolet.org>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining 
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be 
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <sys/cdefs.h>
+__KERNEL_RCSID(1, "$NetBSD: aes_ct_dec.c,v 1.1 2020/06/29 23:27:52 riastradh Exp $");
+
+#include <sys/types.h>
+
+#include <crypto/aes/aes_bear.h>
+
+/* see inner.h */
+void
+br_aes_ct_bitslice_invSbox(uint32_t *q)
+{
+	/*
+	 * AES S-box is:
+	 *   S(x) = A(I(x)) ^ 0x63
+	 * where I() is inversion in GF(256), and A() is a linear
+	 * transform (0 is formally defined to be its own inverse).
+	 * Since inversion is an involution, the inverse S-box can be
+	 * computed from the S-box as:
+	 *   iS(x) = B(S(B(x ^ 0x63)) ^ 0x63)
+	 * where B() is the inverse of A(). Indeed, for any y in GF(256):
+	 *   iS(S(y)) = B(A(I(B(A(I(y)) ^ 0x63 ^ 0x63))) ^ 0x63 ^ 0x63) = y
+	 *
+	 * Note: we reuse the implementation of the forward S-box,
+	 * instead of duplicating it here, so that total code size is
+	 * lower. By merging the B() transforms into the S-box circuit
+	 * we could make faster CBC decryption, but CBC decryption is
+	 * already quite faster than CBC encryption because we can
+	 * process two blocks in parallel.
+	 */
+	uint32_t q0, q1, q2, q3, q4, q5, q6, q7;
+
+	q0 = ~q[0];
+	q1 = ~q[1];
+	q2 = q[2];
+	q3 = q[3];
+	q4 = q[4];
+	q5 = ~q[5];
+	q6 = ~q[6];
+	q7 = q[7];
+	q[7] = q1 ^ q4 ^ q6;
+	q[6] = q0 ^ q3 ^ q5;
+	q[5] = q7 ^ q2 ^ q4;
+	q[4] = q6 ^ q1 ^ q3;
+	q[3] = q5 ^ q0 ^ q2;
+	q[2] = q4 ^ q7 ^ q1;
+	q[1] = q3 ^ q6 ^ q0;
+	q[0] = q2 ^ q5 ^ q7;
+
+	br_aes_ct_bitslice_Sbox(q);
+
+	q0 = ~q[0];
+	q1 = ~q[1];
+	q2 = q[2];
+	q3 = q[3];
+	q4 = q[4];
+	q5 = ~q[5];
+	q6 = ~q[6];
+	q7 = q[7];
+	q[7] = q1 ^ q4 ^ q6;
+	q[6] = q0 ^ q3 ^ q5;
+	q[5] = q7 ^ q2 ^ q4;
+	q[4] = q6 ^ q1 ^ q3;
+	q[3] = q5 ^ q0 ^ q2;
+	q[2] = q4 ^ q7 ^ q1;
+	q[1] = q3 ^ q6 ^ q0;
+	q[0] = q2 ^ q5 ^ q7;
+}
+
+static void
+add_round_key(uint32_t *q, const uint32_t *sk)
+{
+	int i;
+
+	for (i = 0; i < 8; i ++) {
+		q[i] ^= sk[i];
+	}
+}
+
+static void
+inv_shift_rows(uint32_t *q)
+{
+	int i;
+
+	for (i = 0; i < 8; i ++) {
+		uint32_t x;
+
+		x = q[i];
+		q[i] = (x & 0x000000FF)
+			| ((x & 0x00003F00) << 2) | ((x & 0x0000C000) >> 6)
+			| ((x & 0x000F0000) << 4) | ((x & 0x00F00000) >> 4)
+			| ((x & 0x03000000) << 6) | ((x & 0xFC000000) >> 2);
+	}
+}
+
+static inline uint32_t
+rotr16(uint32_t x)
+{
+	return (x << 16) | (x >> 16);
+}
+
+static void
+inv_mix_columns(uint32_t *q)
+{
+	uint32_t q0, q1, q2, q3, q4, q5, q6, q7;
+	uint32_t r0, r1, r2, r3, r4, r5, r6, r7;
+
+	q0 = q[0];
+	q1 = q[1];
+	q2 = q[2];
+	q3 = q[3];
+	q4 = q[4];
+	q5 = q[5];
+	q6 = q[6];
+	q7 = q[7];
+	r0 = (q0 >> 8) | (q0 << 24);
+	r1 = (q1 >> 8) | (q1 << 24);
+	r2 = (q2 >> 8) | (q2 << 24);
+	r3 = (q3 >> 8) | (q3 << 24);
+	r4 = (q4 >> 8) | (q4 << 24);
+	r5 = (q5 >> 8) | (q5 << 24);
+	r6 = (q6 >> 8) | (q6 << 24);
+	r7 = (q7 >> 8) | (q7 << 24);
+
+	q[0] = q5 ^ q6 ^ q7 ^ r0 ^ r5 ^ r7 ^ rotr16(q0 ^ q5 ^ q6 ^ r0 ^ r5);
+	q[1] = q0 ^ q5 ^ r0 ^ r1 ^ r5 ^ r6 ^ r7 ^ rotr16(q1 ^ q5 ^ q7 ^ r1 ^ r5 ^ r6);
+	q[2] = q0 ^ q1 ^ q6 ^ r1 ^ r2 ^ r6 ^ r7 ^ rotr16(q0 ^ q2 ^ q6 ^ r2 ^ r6 ^ r7);
+	q[3] = q0 ^ q1 ^ q2 ^ q5 ^ q6 ^ r0 ^ r2 ^ r3 ^ r5 ^ rotr16(q0 ^ q1 ^ q3 ^ q5 ^ q6 ^ q7 ^ r0 ^ r3 ^ r5 ^ r7);
+	q[4] = q1 ^ q2 ^ q3 ^ q5 ^ r1 ^ r3 ^ r4 ^ r5 ^ r6 ^ r7 ^ rotr16(q1 ^ q2 ^ q4 ^ q5 ^ q7 ^ r1 ^ r4 ^ r5 ^ r6);
+	q[5] = q2 ^ q3 ^ q4 ^ q6 ^ r2 ^ r4 ^ r5 ^ r6 ^ r7 ^ rotr16(q2 ^ q3 ^ q5 ^ q6 ^ r2 ^ r5 ^ r6 ^ r7);
+	q[6] = q3 ^ q4 ^ q5 ^ q7 ^ r3 ^ r5 ^ r6 ^ r7 ^ rotr16(q3 ^ q4 ^ q6 ^ q7 ^ r3 ^ r6 ^ r7);
+	q[7] = q4 ^ q5 ^ q6 ^ r4 ^ r6 ^ r7 ^ rotr16(q4 ^ q5 ^ q7 ^ r4 ^ r7);
+}
+
+/* see inner.h */
+void
+br_aes_ct_bitslice_decrypt(unsigned num_rounds,
+	const uint32_t *skey, uint32_t *q)
+{
+	unsigned u;
+
+	add_round_key(q, skey + (num_rounds << 3));
+	for (u = num_rounds - 1; u > 0; u --) {
+		inv_shift_rows(q);
+		br_aes_ct_bitslice_invSbox(q);
+		add_round_key(q, skey + (u << 3));
+		inv_mix_columns(q);
+	}
+	inv_shift_rows(q);
+	br_aes_ct_bitslice_invSbox(q);
+	add_round_key(q, skey);
+}
Index: src/sys/crypto/aes/aes_ct_enc.c
diff -u /dev/null src/sys/crypto/aes/aes_ct_enc.c:1.1
--- /dev/null	Mon Jun 29 23:27:52 2020
+++ src/sys/crypto/aes/aes_ct_enc.c	Mon Jun 29 23:27:52 2020
@@ -0,0 +1,119 @@
+/*	$NetBSD: aes_ct_enc.c,v 1.1 2020/06/29 23:27:52 riastradh Exp $	*/
+
+/*
+ * Copyright (c) 2016 Thomas Pornin <por...@bolet.org>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining 
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be 
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <sys/cdefs.h>
+__KERNEL_RCSID(1, "$NetBSD: aes_ct_enc.c,v 1.1 2020/06/29 23:27:52 riastradh Exp $");
+
+#include <sys/types.h>
+
+#include <crypto/aes/aes_bear.h>
+
+static inline void
+add_round_key(uint32_t *q, const uint32_t *sk)
+{
+	q[0] ^= sk[0];
+	q[1] ^= sk[1];
+	q[2] ^= sk[2];
+	q[3] ^= sk[3];
+	q[4] ^= sk[4];
+	q[5] ^= sk[5];
+	q[6] ^= sk[6];
+	q[7] ^= sk[7];
+}
+
+static inline void
+shift_rows(uint32_t *q)
+{
+	int i;
+
+	for (i = 0; i < 8; i ++) {
+		uint32_t x;
+
+		x = q[i];
+		q[i] = (x & 0x000000FF)
+			| ((x & 0x0000FC00) >> 2) | ((x & 0x00000300) << 6)
+			| ((x & 0x00F00000) >> 4) | ((x & 0x000F0000) << 4)
+			| ((x & 0xC0000000) >> 6) | ((x & 0x3F000000) << 2);
+	}
+}
+
+static inline uint32_t
+rotr16(uint32_t x)
+{
+	return (x << 16) | (x >> 16);
+}
+
+static inline void
+mix_columns(uint32_t *q)
+{
+	uint32_t q0, q1, q2, q3, q4, q5, q6, q7;
+	uint32_t r0, r1, r2, r3, r4, r5, r6, r7;
+
+	q0 = q[0];
+	q1 = q[1];
+	q2 = q[2];
+	q3 = q[3];
+	q4 = q[4];
+	q5 = q[5];
+	q6 = q[6];
+	q7 = q[7];
+	r0 = (q0 >> 8) | (q0 << 24);
+	r1 = (q1 >> 8) | (q1 << 24);
+	r2 = (q2 >> 8) | (q2 << 24);
+	r3 = (q3 >> 8) | (q3 << 24);
+	r4 = (q4 >> 8) | (q4 << 24);
+	r5 = (q5 >> 8) | (q5 << 24);
+	r6 = (q6 >> 8) | (q6 << 24);
+	r7 = (q7 >> 8) | (q7 << 24);
+
+	q[0] = q7 ^ r7 ^ r0 ^ rotr16(q0 ^ r0);
+	q[1] = q0 ^ r0 ^ q7 ^ r7 ^ r1 ^ rotr16(q1 ^ r1);
+	q[2] = q1 ^ r1 ^ r2 ^ rotr16(q2 ^ r2);
+	q[3] = q2 ^ r2 ^ q7 ^ r7 ^ r3 ^ rotr16(q3 ^ r3);
+	q[4] = q3 ^ r3 ^ q7 ^ r7 ^ r4 ^ rotr16(q4 ^ r4);
+	q[5] = q4 ^ r4 ^ r5 ^ rotr16(q5 ^ r5);
+	q[6] = q5 ^ r5 ^ r6 ^ rotr16(q6 ^ r6);
+	q[7] = q6 ^ r6 ^ r7 ^ rotr16(q7 ^ r7);
+}
+
+/* see inner.h */
+void
+br_aes_ct_bitslice_encrypt(unsigned num_rounds,
+	const uint32_t *skey, uint32_t *q)
+{
+	unsigned u;
+
+	add_round_key(q, skey);
+	for (u = 1; u < num_rounds; u ++) {
+		br_aes_ct_bitslice_Sbox(q);
+		shift_rows(q);
+		mix_columns(q);
+		add_round_key(q, skey + (u << 3));
+	}
+	br_aes_ct_bitslice_Sbox(q);
+	shift_rows(q);
+	add_round_key(q, skey + (num_rounds << 3));
+}
Index: src/sys/crypto/aes/aes_impl.c
diff -u /dev/null src/sys/crypto/aes/aes_impl.c:1.1
--- /dev/null	Mon Jun 29 23:27:52 2020
+++ src/sys/crypto/aes/aes_impl.c	Mon Jun 29 23:27:52 2020
@@ -0,0 +1,256 @@
+/*	$NetBSD: aes_impl.c,v 1.1 2020/06/29 23:27:52 riastradh Exp $	*/
+
+/*-
+ * Copyright (c) 2020 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__KERNEL_RCSID(1, "$NetBSD: aes_impl.c,v 1.1 2020/06/29 23:27:52 riastradh Exp $");
+
+#include <sys/types.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/once.h>
+#include <sys/systm.h>
+
+#include <crypto/aes/aes.h>
+#include <crypto/aes/aes_bear.h> /* default implementation */
+
+static const struct aes_impl	*aes_md_impl	__read_mostly;
+static const struct aes_impl	*aes_impl	__read_mostly;
+
+/*
+ * The timing of AES implementation selection is finicky:
+ *
+ *	1. It has to be done _after_ cpu_attach for implementations,
+ *	   such as AES-NI, that rely on fpu initialization done by
+ *	   fpu_attach.
+ *
+ *	2. It has to be done _before_ the cgd self-tests or anything
+ *	   else that might call AES.
+ *
+ * For the moment, doing it in module init works.  However, if a
+ * driver-class module depended on the aes module, that would break.
+ */
+
+static int
+aes_select(void)
+{
+
+	KASSERT(aes_impl == NULL);
+
+	if (aes_md_impl) {
+		if (aes_selftest(aes_md_impl))
+			aprint_error("aes: self-test failed: %s\n",
+			    aes_md_impl->ai_name);
+		else
+			aes_impl = aes_md_impl;
+	}
+	if (aes_impl == NULL) {
+		if (aes_selftest(&aes_bear_impl))
+			aprint_error("aes: self-test failed: %s\n",
+			    aes_bear_impl.ai_name);
+		else
+			aes_impl = &aes_bear_impl;
+	}
+	if (aes_impl == NULL)
+		panic("AES self-tests failed");
+
+	aprint_normal("aes: %s\n", aes_impl->ai_name);
+	return 0;
+}
+
+MODULE(MODULE_CLASS_MISC, aes, NULL);
+
+static int
+aes_modcmd(modcmd_t cmd, void *opaque)
+{
+
+	switch (cmd) {
+	case MODULE_CMD_INIT:
+		return aes_select();
+	case MODULE_CMD_FINI:
+		return 0;
+	default:
+		return ENOTTY;
+	}
+}
+
+static void
+aes_guarantee_selected(void)
+{
+#if 0
+	static once_t once;
+	int error;
+
+	error = RUN_ONCE(&once, aes_select);
+	KASSERT(error == 0);
+#endif
+}
+
+void
+aes_md_init(const struct aes_impl *impl)
+{
+
+	KASSERT(cold);
+	KASSERTMSG(aes_impl == NULL,
+	    "AES implementation `%s' already chosen, can't offer `%s'",
+	    aes_impl->ai_name, impl->ai_name);
+	KASSERTMSG(aes_md_impl == NULL,
+	    "AES implementation `%s' already offered, can't offer `%s'",
+	    aes_md_impl->ai_name, impl->ai_name);
+
+	aes_md_impl = impl;
+}
+
+static void
+aes_setenckey(struct aesenc *enc, const uint8_t key[static 16],
+    uint32_t nrounds)
+{
+
+	aes_guarantee_selected();
+	aes_impl->ai_setenckey(enc, key, nrounds);
+}
+
+uint32_t
+aes_setenckey128(struct aesenc *enc, const uint8_t key[static 16])
+{
+	uint32_t nrounds = AES_128_NROUNDS;
+
+	aes_setenckey(enc, key, nrounds);
+	return nrounds;
+}
+
+uint32_t
+aes_setenckey192(struct aesenc *enc, const uint8_t key[static 24])
+{
+	uint32_t nrounds = AES_192_NROUNDS;
+
+	aes_setenckey(enc, key, nrounds);
+	return nrounds;
+}
+
+uint32_t
+aes_setenckey256(struct aesenc *enc, const uint8_t key[static 32])
+{
+	uint32_t nrounds = AES_256_NROUNDS;
+
+	aes_setenckey(enc, key, nrounds);
+	return nrounds;
+}
+
+static void
+aes_setdeckey(struct aesdec *dec, const uint8_t key[static 16],
+    uint32_t nrounds)
+{
+
+	aes_guarantee_selected();
+	aes_impl->ai_setdeckey(dec, key, nrounds);
+}
+
+uint32_t
+aes_setdeckey128(struct aesdec *dec, const uint8_t key[static 16])
+{
+	uint32_t nrounds = AES_128_NROUNDS;
+
+	aes_setdeckey(dec, key, nrounds);
+	return nrounds;
+}
+
+uint32_t
+aes_setdeckey192(struct aesdec *dec, const uint8_t key[static 24])
+{
+	uint32_t nrounds = AES_192_NROUNDS;
+
+	aes_setdeckey(dec, key, nrounds);
+	return nrounds;
+}
+
+uint32_t
+aes_setdeckey256(struct aesdec *dec, const uint8_t key[static 32])
+{
+	uint32_t nrounds = AES_256_NROUNDS;
+
+	aes_setdeckey(dec, key, nrounds);
+	return nrounds;
+}
+
+void
+aes_enc(const struct aesenc *enc, const uint8_t in[static 16],
+    uint8_t out[static 16], uint32_t nrounds)
+{
+
+	aes_guarantee_selected();
+	aes_impl->ai_enc(enc, in, out, nrounds);
+}
+
+void
+aes_dec(const struct aesdec *dec, const uint8_t in[static 16],
+    uint8_t out[static 16], uint32_t nrounds)
+{
+
+	aes_guarantee_selected();
+	aes_impl->ai_dec(dec, in, out, nrounds);
+}
+
+void
+aes_cbc_enc(struct aesenc *enc, const uint8_t in[static 16],
+    uint8_t out[static 16], size_t nbytes, uint8_t iv[static 16],
+    uint32_t nrounds)
+{
+
+	aes_guarantee_selected();
+	aes_impl->ai_cbc_enc(enc, in, out, nbytes, iv, nrounds);
+}
+
+void
+aes_cbc_dec(struct aesdec *dec, const uint8_t in[static 16],
+    uint8_t out[static 16], size_t nbytes, uint8_t iv[static 16],
+    uint32_t nrounds)
+{
+
+	aes_guarantee_selected();
+	aes_impl->ai_cbc_dec(dec, in, out, nbytes, iv, nrounds);
+}
+
+void
+aes_xts_enc(struct aesenc *enc, const uint8_t in[static 16],
+    uint8_t out[static 16], size_t nbytes, uint8_t tweak[static 16],
+    uint32_t nrounds)
+{
+
+	aes_guarantee_selected();
+	aes_impl->ai_xts_enc(enc, in, out, nbytes, tweak, nrounds);
+}
+
+void
+aes_xts_dec(struct aesdec *dec, const uint8_t in[static 16],
+    uint8_t out[static 16], size_t nbytes, uint8_t tweak[static 16],
+    uint32_t nrounds)
+{
+
+	aes_guarantee_selected();
+	aes_impl->ai_xts_dec(dec, in, out, nbytes, tweak, nrounds);
+}
Index: src/sys/crypto/aes/aes_rijndael.c
diff -u /dev/null src/sys/crypto/aes/aes_rijndael.c:1.1
--- /dev/null	Mon Jun 29 23:27:52 2020
+++ src/sys/crypto/aes/aes_rijndael.c	Mon Jun 29 23:27:52 2020
@@ -0,0 +1,306 @@
+/*	$NetBSD: aes_rijndael.c,v 1.1 2020/06/29 23:27:52 riastradh Exp $	*/
+
+/*-
+ * Copyright (c) 2020 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Legacy `Rijndael' API
+ *
+ *	rijndael_set_key
+ *	rijndael_encrypt
+ *	rijndael_decrypt
+ *
+ *	rijndaelKeySetupEnc
+ *	rijndaelKeySetupDec
+ *	rijndaelEncrypt
+ *	rijndaelDecrypt
+ *	rijndael_makeKey
+ *	rijndael_cipherInit
+ *	rijndael_blockEncrypt
+ *	rijndael_blockDecrypt
+ */
+
+#include <sys/cdefs.h>
+__KERNEL_RCSID(1, "$NetBSD: aes_rijndael.c,v 1.1 2020/06/29 23:27:52 riastradh Exp $");
+
+#include <sys/types.h>
+#include <sys/systm.h>
+
+#include <crypto/aes/aes.h>
+#include <crypto/rijndael/rijndael.h>
+#include <crypto/rijndael/rijndael-alg-fst.h>
+#include <crypto/rijndael/rijndael-api-fst.h>
+
+void
+rijndael_set_key(rijndael_ctx *ctx, const uint8_t *key, int keybits)
+{
+
+	ctx->Nr = rijndaelKeySetupEnc(ctx->ek, key, keybits);
+	rijndaelKeySetupDec(ctx->dk, key, keybits);
+}
+
+void
+rijndael_encrypt(const rijndael_ctx *ctx, const uint8_t *in, uint8_t *out)
+{
+
+	rijndaelEncrypt(ctx->ek, ctx->Nr, in, out);
+}
+
+void
+rijndael_decrypt(const rijndael_ctx *ctx, const u_char *in, uint8_t *out)
+{
+
+	rijndaelDecrypt(ctx->dk, ctx->Nr, in, out);
+}
+
+int
+rijndaelKeySetupEnc(uint32_t *rk, const uint8_t *key, int keybits)
+{
+	struct aesenc enc;
+	unsigned nrounds;
+
+	switch (keybits) {
+	case 128:
+		nrounds = aes_setenckey128(&enc, key);
+		break;
+	case 192:
+		nrounds = aes_setenckey192(&enc, key);
+		break;
+	case 256:
+		nrounds = aes_setenckey256(&enc, key);
+		break;
+	default:
+		panic("invalid AES key bits: %d", keybits);
+	}
+
+	memcpy(rk, enc.aese_aes.aes_rk, 4*(nrounds + 1)*sizeof(rk[0]));
+	explicit_memset(&enc, 0, sizeof enc);
+
+	return nrounds;
+}
+
+int
+rijndaelKeySetupDec(uint32_t *rk, const uint8_t *key, int keybits)
+{
+	struct aesdec dec;
+	unsigned nrounds;
+
+	switch (keybits) {
+	case 128:
+		nrounds = aes_setdeckey128(&dec, key);
+		break;
+	case 192:
+		nrounds = aes_setdeckey192(&dec, key);
+		break;
+	case 256:
+		nrounds = aes_setdeckey256(&dec, key);
+		break;
+	default:
+		panic("invalid AES key bits: %d", keybits);
+	}
+
+	memcpy(rk, dec.aesd_aes.aes_rk, 4*(nrounds + 1)*sizeof(rk[0]));
+	explicit_memset(&dec, 0, sizeof dec);
+
+	return nrounds;
+}
+
+void
+rijndaelEncrypt(const uint32_t *rk, int nrounds, const uint8_t in[16],
+    uint8_t out[16])
+{
+	struct aesenc enc;
+
+	memcpy(enc.aese_aes.aes_rk, rk, 4*(nrounds + 1)*sizeof(rk[0]));
+	aes_enc(&enc, in, out, nrounds);
+	explicit_memset(&enc, 0, sizeof enc);
+}
+
+void
+rijndaelDecrypt(const uint32_t *rk, int nrounds, const uint8_t in[16],
+    uint8_t out[16])
+{
+	struct aesdec dec;
+
+	memcpy(dec.aesd_aes.aes_rk, rk, 4*(nrounds + 1)*sizeof(rk[0]));
+	aes_dec(&dec, in, out, nrounds);
+	explicit_memset(&dec, 0, sizeof dec);
+}
+
+int
+rijndael_makeKey(keyInstance *key, BYTE direction, int keybits,
+    const char *keyp)
+{
+
+	if (key == NULL)
+		return BAD_KEY_INSTANCE;
+
+	memset(key, 0x1a, sizeof(*key));
+
+	switch (direction) {
+	case DIR_ENCRYPT:
+	case DIR_DECRYPT:
+		key->direction = direction;
+		break;
+	default:
+		return BAD_KEY_DIR;
+	}
+
+	switch (keybits) {
+	case 128:
+	case 192:
+	case 256:
+		key->keyLen = keybits;
+		break;
+	default:
+		return BAD_KEY_MAT;
+	}
+
+	if (keyp)
+		memcpy(key->keyMaterial, keyp, keybits/8);
+
+	switch (direction) {
+	case DIR_ENCRYPT:
+		key->Nr = rijndaelKeySetupEnc(key->rk,
+		    (const uint8_t *)key->keyMaterial, keybits);
+		break;
+	case DIR_DECRYPT:
+		key->Nr = rijndaelKeySetupDec(key->rk,
+		    (const uint8_t *)key->keyMaterial, keybits);
+		break;
+	default:
+		panic("unknown encryption direction %d", direction);
+	}
+	rijndaelKeySetupEnc(key->ek, (const uint8_t *)key->keyMaterial,
+	    keybits);
+
+	return 1;
+}
+
+int
+rijndael_cipherInit(cipherInstance *cipher, BYTE mode, const char *iv)
+{
+
+	switch (mode) {
+	case MODE_ECB:		/* used only for encrypting one block */
+	case MODE_CBC:
+	case MODE_XTS:
+		cipher->mode = mode;
+		break;
+	case MODE_CFB1:		/* unused */
+	default:
+		return BAD_CIPHER_MODE;
+	}
+
+	if (iv)
+		memcpy(cipher->IV, iv, RIJNDAEL_MAX_IV_SIZE);
+	else
+		memset(cipher->IV, 0, RIJNDAEL_MAX_IV_SIZE);
+
+	return 1;
+}
+
+int
+rijndael_blockEncrypt(cipherInstance *cipher, keyInstance *key,
+    const BYTE *in, int nbits, BYTE *out)
+{
+	struct aesenc enc;
+
+	if (cipher == NULL)
+		return BAD_CIPHER_STATE;
+	if (key == NULL)
+		return BAD_CIPHER_STATE;
+	if (key->direction != DIR_ENCRYPT)
+		return BAD_CIPHER_STATE;
+
+	if (in == NULL || nbits <= 0)
+		return 0;
+
+	memcpy(enc.aese_aes.aes_rk, key->rk,
+	    4*(key->Nr + 1)*sizeof(key->rk[0]));
+	switch (cipher->mode) {
+	case MODE_ECB:
+		KASSERT(nbits == 128);
+		aes_enc(&enc, in, out, key->Nr);
+		break;
+	case MODE_CBC:
+		KASSERT(nbits % 128 == 0);
+		aes_cbc_enc(&enc, in, out, nbits/8, (uint8_t *)cipher->IV,
+		    key->Nr);
+		break;
+	case MODE_XTS:
+		KASSERT(nbits % 128 == 0);
+		aes_xts_enc(&enc, in, out, nbits/8, (uint8_t *)cipher->IV,
+		    key->Nr);
+		break;
+	default:
+		panic("invalid AES mode: %d", cipher->mode);
+	}
+	explicit_memset(&enc, 0, sizeof enc);
+
+	return nbits;
+}
+
+int
+rijndael_blockDecrypt(cipherInstance *cipher, keyInstance *key,
+    const BYTE *in, int nbits, BYTE *out)
+{
+	struct aesdec dec;
+
+	if (cipher == NULL)
+		return BAD_CIPHER_STATE;
+	if (key == NULL)
+		return BAD_CIPHER_STATE;
+	if (key->direction != DIR_DECRYPT)
+		return BAD_CIPHER_STATE;
+
+	if (in == NULL || nbits <= 0)
+		return 0;
+
+	memcpy(dec.aesd_aes.aes_rk, key->rk,
+	    4*(key->Nr + 1)*sizeof(key->rk[0]));
+	switch (cipher->mode) {
+	case MODE_ECB:
+		KASSERT(nbits == 128);
+		aes_dec(&dec, in, out, key->Nr);
+		break;
+	case MODE_CBC:
+		KASSERT(nbits % 128 == 0);
+		aes_cbc_dec(&dec, in, out, nbits/8, (uint8_t *)cipher->IV,
+		    key->Nr);
+		break;
+	case MODE_XTS:
+		KASSERT(nbits % 128 == 0);
+		aes_xts_dec(&dec, in, out, nbits/8, (uint8_t *)cipher->IV,
+		    key->Nr);
+		break;
+	default:
+		panic("invalid AES mode: %d", cipher->mode);
+	}
+	explicit_memset(&dec, 0, sizeof dec);
+
+	return nbits;
+}
Index: src/sys/crypto/aes/aes_selftest.c
diff -u /dev/null src/sys/crypto/aes/aes_selftest.c:1.1
--- /dev/null	Mon Jun 29 23:27:52 2020
+++ src/sys/crypto/aes/aes_selftest.c	Mon Jun 29 23:27:52 2020
@@ -0,0 +1,387 @@
+/*	$NetBSD: aes_selftest.c,v 1.1 2020/06/29 23:27:52 riastradh Exp $	*/
+
+/*-
+ * Copyright (c) 2020 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__KERNEL_RCSID(1, "$NetBSD: aes_selftest.c,v 1.1 2020/06/29 23:27:52 riastradh Exp $");
+
+#include <sys/types.h>
+#include <sys/systm.h>
+
+#include <lib/libkern/libkern.h>
+
+#include <crypto/aes/aes.h>
+
+static const unsigned aes_keybytes[] __unused = { 16, 24, 32 };
+static const unsigned aes_keybits[] __unused = { 128, 192, 256 };
+static const unsigned aes_nrounds[] = { 10, 12, 14 };
+
+#define	aes_selftest_fail(impl, actual, expected, nbytes, fmt, args...)	      \
+({									      \
+	printf("%s "fmt": self-test failed\n", (impl)->ai_name, ##args);      \
+	hexdump(printf, "was", (actual), (nbytes));			      \
+	hexdump(printf, "expected", (expected), (nbytes));		      \
+	-1;								      \
+})
+
+static int
+aes_selftest_encdec(const struct aes_impl *impl)
+{
+	/*
+	 * head -c 16 < /dev/zero | openssl enc -aes-{128,192,256}-ecb
+	 *     -nopad -K 000102030405060708090a0b0c0d... | hexdump -C
+	 */
+	static const uint8_t expected[3][16] = {
+		[0] = {
+			0xc6,0xa1,0x3b,0x37,0x87,0x8f,0x5b,0x82,
+			0x6f,0x4f,0x81,0x62,0xa1,0xc8,0xd8,0x79,
+		},
+		[1] = {
+			0x91,0x62,0x51,0x82,0x1c,0x73,0xa5,0x22,
+			0xc3,0x96,0xd6,0x27,0x38,0x01,0x96,0x07,
+		},
+		[2] = {
+			0xf2,0x90,0x00,0xb6,0x2a,0x49,0x9f,0xd0,
+			0xa9,0xf3,0x9a,0x6a,0xdd,0x2e,0x77,0x80,
+		},
+	};
+	struct aesenc enc;
+	struct aesdec dec;
+	uint8_t key[32];
+	uint8_t in[16];
+	uint8_t outbuf[18] = { [0] = 0x1a, [17] = 0x1a }, *out = outbuf + 1;
+	unsigned i;
+
+	for (i = 0; i < 32; i++)
+		key[i] = i;
+	for (i = 0; i < 16; i++)
+		in[i] = 0;
+
+	for (i = 0; i < 3; i++) {
+		impl->ai_setenckey(&enc, key, aes_nrounds[i]);
+		impl->ai_setdeckey(&dec, key, aes_nrounds[i]);
+		impl->ai_enc(&enc, in, out, aes_nrounds[i]);
+		if (memcmp(out, expected[i], 16))
+			return aes_selftest_fail(impl, out, expected[i], 16,
+			    "AES-%u enc", aes_keybits[i]);
+		impl->ai_dec(&dec, out, out, aes_nrounds[i]);
+		if (memcmp(out, in, 16))
+			return aes_selftest_fail(impl, out, in, 16,
+			    "AES-%u dec", aes_keybits[i]);
+	}
+
+	if (outbuf[0] != 0x1a)
+		return aes_selftest_fail(impl, outbuf,
+		    (const uint8_t[1]){0x1a}, 1,
+		    "AES overrun preceding");
+	if (outbuf[17] != 0x1a)
+		return aes_selftest_fail(impl, outbuf + 17,
+		    (const uint8_t[1]){0x1a}, 1,
+		    "AES overrun folllowing");
+
+	/* Success!  */
+	return 0;
+}
+
+static int
+aes_selftest_encdec_cbc(const struct aes_impl *impl)
+{
+	static const uint8_t expected[3][144] = {
+		[0] = {
+			0xfe,0xf1,0xa8,0xb6,0x25,0xf0,0xc4,0x3a,
+			0x71,0x08,0xb6,0x23,0xa6,0xfb,0x90,0xca,
+			0x9e,0x64,0x6d,0x95,0xb5,0xf5,0x41,0x24,
+			0xd2,0xe6,0x60,0xda,0x6c,0x69,0xc4,0xa0,
+			0x4d,0xaa,0x94,0xf6,0x66,0x1e,0xaa,0x85,
+			0x68,0xc5,0x6b,0x2e,0x77,0x7a,0x68,0xff,
+			0x45,0x15,0x45,0xc5,0x9c,0xbb,0x3a,0x23,
+			0x08,0x3a,0x06,0xdd,0xc0,0x52,0xd2,0xb7,
+			0x47,0xaa,0x1c,0xc7,0xb5,0xa9,0x7d,0x04,
+			0x60,0x67,0x78,0xf6,0xb9,0xba,0x26,0x84,
+			0x45,0x72,0x44,0xed,0xa3,0xd3,0xa0,0x3f,
+			0x19,0xee,0x3f,0x94,0x59,0x52,0x4b,0x13,
+			0xfd,0x81,0xcc,0xf9,0xf2,0x29,0xd7,0xec,
+			0xde,0x03,0x56,0x01,0x4a,0x19,0x86,0xc0,
+			0x87,0xce,0xe1,0xcc,0x13,0xf1,0x2e,0xda,
+			0x3f,0xfe,0xa4,0x64,0xe7,0x48,0xb4,0x7b,
+			0x73,0x62,0x5a,0x80,0x5e,0x01,0x20,0xa5,
+			0x0a,0xd7,0x98,0xa7,0xd9,0x8b,0xff,0xc2,
+		},
+		[1] = {
+			0xa6,0x87,0xf0,0x92,0x68,0xc8,0xd6,0x42,
+			0xa8,0x83,0x1c,0x92,0x65,0x8c,0xd9,0xfe,
+			0x0b,0x1a,0xc6,0x96,0x27,0x44,0xd4,0x14,
+			0xfc,0xe7,0x85,0xb2,0x71,0xc7,0x11,0x39,
+			0xed,0x36,0xd3,0x5c,0xa7,0xf7,0x3d,0xc9,
+			0xa2,0x54,0x8b,0xb4,0xfa,0xe8,0x21,0xf9,
+			0xfd,0x6a,0x42,0x85,0xde,0x66,0xd4,0xc0,
+			0xa7,0xd3,0x5b,0xe1,0xe6,0xac,0xea,0xf9,
+			0xa3,0x15,0x68,0xf4,0x66,0x4c,0x23,0x75,
+			0x58,0xba,0x7f,0xca,0xbf,0x40,0x56,0x79,
+			0x2f,0xbf,0xdf,0x5f,0x56,0xcb,0xa0,0xe4,
+			0x22,0x65,0x6a,0x8f,0x4f,0xff,0x11,0x6b,
+			0x57,0xeb,0x45,0xeb,0x9d,0x7f,0xfe,0x9c,
+			0x8b,0x30,0xa8,0xb0,0x7e,0x27,0xf8,0xbc,
+			0x1f,0xf8,0x15,0x34,0x36,0x4f,0x46,0x73,
+			0x81,0x90,0x4b,0x4b,0x46,0x4d,0x01,0x45,
+			0xa1,0xc3,0x0b,0xa8,0x5a,0xab,0xc1,0x88,
+			0x66,0xc8,0x1a,0x94,0x17,0x64,0x6f,0xf4,
+		},
+		[2] = {
+			0x22,0x4c,0x27,0xf4,0xba,0x37,0x8b,0x27,
+			0xd3,0xd6,0x88,0x8a,0xdc,0xed,0x64,0x42,
+			0x19,0x60,0x31,0x09,0xf3,0x72,0xd2,0xc2,
+			0xd3,0xe3,0xff,0xce,0xc5,0x03,0x9f,0xce,
+			0x99,0x49,0x8a,0xf2,0xe1,0xba,0xe2,0xa8,
+			0xd7,0x32,0x07,0x2d,0xb0,0xb3,0xbc,0x67,
+			0x32,0x9a,0x3e,0x7d,0x16,0x23,0xe7,0x24,
+			0x84,0xe1,0x15,0x03,0x9c,0xa2,0x7a,0x95,
+			0x34,0xa8,0x04,0x4e,0x79,0x31,0x50,0x26,
+			0x76,0xd1,0x10,0xce,0xec,0x13,0xf7,0xfb,
+			0x94,0x6b,0x76,0x50,0x5f,0xb2,0x3e,0x7c,
+			0xbe,0x97,0xe7,0x13,0x06,0x9e,0x2d,0xc4,
+			0x46,0x65,0xa7,0x69,0x37,0x07,0x25,0x37,
+			0xe5,0x48,0x51,0xa8,0x58,0xe8,0x4d,0x7c,
+			0xb5,0xbe,0x25,0x13,0xbc,0x11,0xc2,0xde,
+			0xdb,0x00,0xef,0x1c,0x1d,0xeb,0xe3,0x49,
+			0x1c,0xc0,0x78,0x29,0x76,0xc0,0xde,0x3a,
+			0x0e,0x96,0x8f,0xea,0xd7,0x42,0x4e,0xb4,
+		},
+	};
+	struct aesenc enc;
+	struct aesdec dec;
+	uint8_t key[32];
+	uint8_t in[144];
+	uint8_t outbuf[146] = { [0] = 0x1a, [145] = 0x1a }, *out = outbuf + 1;
+	uint8_t iv0[16], iv[16];
+	unsigned i;
+
+	for (i = 0; i < 32; i++)
+		key[i] = i;
+	for (i = 0; i < 16; i++)
+		iv0[i] = 0x20 ^ i;
+	for (i = 0; i < 144; i++)
+		in[i] = 0x80 ^ i;
+
+	for (i = 0; i < 3; i++) {
+		impl->ai_setenckey(&enc, key, aes_nrounds[i]);
+		impl->ai_setdeckey(&dec, key, aes_nrounds[i]);
+
+		/* Try one swell foop.  */
+		memcpy(iv, iv0, 16);
+		impl->ai_cbc_enc(&enc, in, out, 144, iv, aes_nrounds[i]);
+		if (memcmp(out, expected[i], 144))
+			return aes_selftest_fail(impl, out, expected[i], 144,
+			    "AES-%u-CBC enc", aes_keybits[i]);
+
+		memcpy(iv, iv0, 16);
+		impl->ai_cbc_dec(&dec, out, out, 144, iv, aes_nrounds[i]);
+		if (memcmp(out, in, 144))
+			return aes_selftest_fail(impl, out, in, 144,
+			    "AES-%u-CBC dec", aes_keybits[i]);
+
+		/* Try incrementally, with IV update.  */
+		memcpy(iv, iv0, 16);
+		impl->ai_cbc_enc(&enc, in, out, 16, iv, aes_nrounds[i]);
+		impl->ai_cbc_enc(&enc, in + 16, out + 16, 128, iv,
+		    aes_nrounds[i]);
+		if (memcmp(out, expected[i], 144))
+			return aes_selftest_fail(impl, out, expected[i], 144,
+			    "AES-%u-CBC enc incremental", aes_keybits[i]);
+
+		memcpy(iv, iv0, 16);
+		impl->ai_cbc_dec(&dec, out, out, 128, iv, aes_nrounds[i]);
+		impl->ai_cbc_dec(&dec, out + 128, out + 128, 16, iv,
+		    aes_nrounds[i]);
+		if (memcmp(out, in, 144))
+			return aes_selftest_fail(impl, out, in, 144,
+			    "AES-%u-CBC dec incremental", aes_keybits[i]);
+	}
+
+	if (outbuf[0] != 0x1a)
+		return aes_selftest_fail(impl, outbuf,
+		    (const uint8_t[1]){0x1a}, 1,
+		    "AES-CBC overrun preceding");
+	if (outbuf[145] != 0x1a)
+		return aes_selftest_fail(impl, outbuf + 145,
+		    (const uint8_t[1]){0x1a}, 1,
+		    "AES-CBC overrun following");
+
+	/* Success!  */
+	return 0;
+}
+
+static int
+aes_selftest_encdec_xts(const struct aes_impl *impl)
+{
+	uint64_t blkno[3] = { 0, 1, 0xff };
+	static const uint8_t expected[3][144] = {
+		[0] = {
+			/* IEEE P1619-D16, XTS-AES-128, Vector 4, truncated */
+			0x27,0xa7,0x47,0x9b,0xef,0xa1,0xd4,0x76,
+			0x48,0x9f,0x30,0x8c,0xd4,0xcf,0xa6,0xe2,
+			0xa9,0x6e,0x4b,0xbe,0x32,0x08,0xff,0x25,
+			0x28,0x7d,0xd3,0x81,0x96,0x16,0xe8,0x9c,
+			0xc7,0x8c,0xf7,0xf5,0xe5,0x43,0x44,0x5f,
+			0x83,0x33,0xd8,0xfa,0x7f,0x56,0x00,0x00,
+			0x05,0x27,0x9f,0xa5,0xd8,0xb5,0xe4,0xad,
+			0x40,0xe7,0x36,0xdd,0xb4,0xd3,0x54,0x12,
+			0x32,0x80,0x63,0xfd,0x2a,0xab,0x53,0xe5,
+			0xea,0x1e,0x0a,0x9f,0x33,0x25,0x00,0xa5,
+			0xdf,0x94,0x87,0xd0,0x7a,0x5c,0x92,0xcc,
+			0x51,0x2c,0x88,0x66,0xc7,0xe8,0x60,0xce,
+			0x93,0xfd,0xf1,0x66,0xa2,0x49,0x12,0xb4,
+			0x22,0x97,0x61,0x46,0xae,0x20,0xce,0x84,
+			0x6b,0xb7,0xdc,0x9b,0xa9,0x4a,0x76,0x7a,
+			0xae,0xf2,0x0c,0x0d,0x61,0xad,0x02,0x65,
+			0x5e,0xa9,0x2d,0xc4,0xc4,0xe4,0x1a,0x89,
+			0x52,0xc6,0x51,0xd3,0x31,0x74,0xbe,0x51,
+		},
+		[1] = {
+		},
+		[2] = {
+			/* IEEE P1619-D16, XTS-AES-256, Vector 10, truncated */
+			0x1c,0x3b,0x3a,0x10,0x2f,0x77,0x03,0x86,
+			0xe4,0x83,0x6c,0x99,0xe3,0x70,0xcf,0x9b,
+			0xea,0x00,0x80,0x3f,0x5e,0x48,0x23,0x57,
+			0xa4,0xae,0x12,0xd4,0x14,0xa3,0xe6,0x3b,
+			0x5d,0x31,0xe2,0x76,0xf8,0xfe,0x4a,0x8d,
+			0x66,0xb3,0x17,0xf9,0xac,0x68,0x3f,0x44,
+			0x68,0x0a,0x86,0xac,0x35,0xad,0xfc,0x33,
+			0x45,0xbe,0xfe,0xcb,0x4b,0xb1,0x88,0xfd,
+			0x57,0x76,0x92,0x6c,0x49,0xa3,0x09,0x5e,
+			0xb1,0x08,0xfd,0x10,0x98,0xba,0xec,0x70,
+			0xaa,0xa6,0x69,0x99,0xa7,0x2a,0x82,0xf2,
+			0x7d,0x84,0x8b,0x21,0xd4,0xa7,0x41,0xb0,
+			0xc5,0xcd,0x4d,0x5f,0xff,0x9d,0xac,0x89,
+			0xae,0xba,0x12,0x29,0x61,0xd0,0x3a,0x75,
+			0x71,0x23,0xe9,0x87,0x0f,0x8a,0xcf,0x10,
+			0x00,0x02,0x08,0x87,0x89,0x14,0x29,0xca,
+			0x2a,0x3e,0x7a,0x7d,0x7d,0xf7,0xb1,0x03,
+			0x55,0x16,0x5c,0x8b,0x9a,0x6d,0x0a,0x7d,
+		},
+	};
+	static const uint8_t key1[32] = {
+		0x27,0x18,0x28,0x18,0x28,0x45,0x90,0x45,
+		0x23,0x53,0x60,0x28,0x74,0x71,0x35,0x26,
+		0x62,0x49,0x77,0x57,0x24,0x70,0x93,0x69,
+		0x99,0x59,0x57,0x49,0x66,0x96,0x76,0x27,
+	};
+	static const uint8_t key2[32] = {
+		0x31,0x41,0x59,0x26,0x53,0x58,0x97,0x93,
+		0x23,0x84,0x62,0x64,0x33,0x83,0x27,0x95,
+		0x02,0x88,0x41,0x97,0x16,0x93,0x99,0x37,
+		0x51,0x05,0x82,0x09,0x74,0x94,0x45,0x92,
+	};
+	struct aesenc enc;
+	struct aesdec dec;
+	uint8_t in[144];
+	uint8_t outbuf[146] = { [0] = 0x1a, [145] = 0x1a }, *out = outbuf + 1;
+	uint8_t blkno_buf[16];
+	uint8_t iv0[16], iv[16];
+	unsigned i;
+
+	for (i = 0; i < 144; i++)
+		in[i] = i;
+
+	for (i = 0; i < 3; i++) {
+		if (i == 1)	/* XXX missing AES-192 test vector */
+			continue;
+
+		/* Format the data unit sequence number.  */
+		memset(blkno_buf, 0, sizeof blkno_buf);
+		le64enc(blkno_buf, blkno[i]);
+
+		/* Generate the tweak.  */
+		impl->ai_setenckey(&enc, key2, aes_nrounds[i]);
+		impl->ai_enc(&enc, blkno_buf, iv0, aes_nrounds[i]);
+
+		/* Load the data encryption key.  */
+		impl->ai_setenckey(&enc, key1, aes_nrounds[i]);
+		impl->ai_setdeckey(&dec, key1, aes_nrounds[i]);
+
+		/* Try one swell foop.  */
+		memcpy(iv, iv0, 16);
+		impl->ai_xts_enc(&enc, in, out, 144, iv, aes_nrounds[i]);
+		if (memcmp(out, expected[i], 144))
+			return aes_selftest_fail(impl, out, expected[i], 144,
+			    "AES-%u-XTS enc", aes_keybits[i]);
+
+		memcpy(iv, iv0, 16);
+		impl->ai_xts_dec(&dec, out, out, 144, iv, aes_nrounds[i]);
+		if (memcmp(out, in, 144))
+			return aes_selftest_fail(impl, out, in, 144,
+			    "AES-%u-XTS dec", aes_keybits[i]);
+
+		/* Try incrementally, with IV update.  */
+		memcpy(iv, iv0, 16);
+		impl->ai_xts_enc(&enc, in, out, 16, iv, aes_nrounds[i]);
+		impl->ai_xts_enc(&enc, in + 16, out + 16, 128, iv,
+		    aes_nrounds[i]);
+		if (memcmp(out, expected[i], 144))
+			return aes_selftest_fail(impl, out, expected[i], 144,
+			    "AES-%u-XTS enc incremental", aes_keybits[i]);
+
+		memcpy(iv, iv0, 16);
+		impl->ai_xts_dec(&dec, out, out, 128, iv, aes_nrounds[i]);
+		impl->ai_xts_dec(&dec, out + 128, out + 128, 16, iv,
+		    aes_nrounds[i]);
+		if (memcmp(out, in, 144))
+			return aes_selftest_fail(impl, out, in, 144,
+			    "AES-%u-XTS dec incremental", aes_keybits[i]);
+	}
+
+	if (outbuf[0] != 0x1a)
+		return aes_selftest_fail(impl, outbuf,
+		    (const uint8_t[1]){0x1a}, 1,
+		    "AES-XTS overrun preceding");
+	if (outbuf[145] != 0x1a)
+		return aes_selftest_fail(impl, outbuf + 145,
+		    (const uint8_t[1]){0x1a}, 1,
+		    "AES-XTS overrun following");
+
+	/* Success!  */
+	return 0;
+}
+
+int
+aes_selftest(const struct aes_impl *impl)
+{
+	int result = 0;
+
+	if (impl->ai_probe())
+		return -1;
+
+	if (aes_selftest_encdec(impl))
+		result = -1;
+	if (aes_selftest_encdec_cbc(impl))
+		result = -1;
+	if (aes_selftest_encdec_xts(impl))
+		result = -1;
+
+	return result;
+}
Index: src/sys/crypto/aes/files.aes
diff -u /dev/null src/sys/crypto/aes/files.aes:1.1
--- /dev/null	Mon Jun 29 23:27:52 2020
+++ src/sys/crypto/aes/files.aes	Mon Jun 29 23:27:52 2020
@@ -0,0 +1,12 @@
+#	$NetBSD: files.aes,v 1.1 2020/06/29 23:27:52 riastradh Exp $
+
+define	aes
+define	rijndael: aes	# legacy Rijndael API
+
+file	crypto/aes/aes_bear.c			aes
+file	crypto/aes/aes_ct.c			aes
+file	crypto/aes/aes_ct_dec.c			aes
+file	crypto/aes/aes_ct_enc.c			aes
+file	crypto/aes/aes_impl.c			aes
+file	crypto/aes/aes_rijndael.c		rijndael
+file	crypto/aes/aes_selftest.c		aes

Reply via email to