Currently the AES-GCM crypt and hash parts are performed in two separate
functions. Each can be replaced with an arch-specific optimized assembly
routine. This makes it difficult to introduce an arch-specific routine
implementing the combination of both parts in a single function.

Rework the existing gcm_{en,de}crypt() functions to instead call a new
gcm_aes_{en,de}crypt_wrap() function which calls out to a (for now) stub
gcm_aes_{en,de}crypt(). This stub can be then overriden either via FAT
or statically during build.

Signed-off-by: Christopher M. Riedl <[email protected]>
---
 configure.ac |   8 ++-
 gcm.c        | 147 +++++++++++++++++++++++++++++++++++++++++++++++++--
 2 files changed, 149 insertions(+), 6 deletions(-)

diff --git a/configure.ac b/configure.ac
index 026ae99d..ba85a313 100644
--- a/configure.ac
+++ b/configure.ac
@@ -538,7 +538,7 @@ asm_nettle_optional_list="gcm-hash.asm gcm-hash8.asm 
cpuid.asm \
   salsa20-2core.asm salsa20-core-internal-2.asm \
   sha1-compress-2.asm sha256-compress-2.asm \
   sha3-permute-2.asm sha512-compress-2.asm \
-  umac-nh-n-2.asm umac-nh-2.asm"
+  umac-nh-n-2.asm umac-nh-2.asm gcm-aes-encrypt.asm gcm-aes-decrypt.asm"
 
 asm_hogweed_optional_list=""
 if test "x$enable_public_key" = "xyes" ; then
@@ -674,7 +674,11 @@ AH_VERBATIM([HAVE_NATIVE],
 #undef HAVE_NATIVE_sha512_compress
 #undef HAVE_NATIVE_sha3_permute
 #undef HAVE_NATIVE_umac_nh
-#undef HAVE_NATIVE_umac_nh_n])
+#undef HAVE_NATIVE_umac_nh_n
+#undef HAVE_NATIVE_gcm_aes_decrypt
+#undef HAVE_NATIVE_gcm_aes_encrypt
+#undef HAVE_NATIVE_fat_gcm_aes_decrypt
+#undef HAVE_NATIVE_fat_gcm_aes_encrypt])
 
 if test "x$enable_pic" = xyes; then
     LSH_CCPIC
diff --git a/gcm.c b/gcm.c
index d1f21d3a..6fe25a01 100644
--- a/gcm.c
+++ b/gcm.c
@@ -423,28 +423,167 @@ gcm_fill(uint8_t *ctr, size_t blocks, union 
nettle_block16 *buffer)
 }
 #endif
 
+enum gcm_aes_rounds {
+    NOT_AES = 0,
+    AES_128 = _AES128_ROUNDS,
+    AES_192 = _AES192_ROUNDS,
+    AES_256 = _AES256_ROUNDS
+};
+
+static enum gcm_aes_rounds
+_nettle_gcm_get_aes_rounds(nettle_cipher_func *f)
+{
+  if (f == (nettle_cipher_func *)nettle_aes128_encrypt ||
+      f == (nettle_cipher_func *)nettle_aes128_decrypt)
+    {
+      return AES_128;
+    }
+  else if (f == (nettle_cipher_func *)nettle_aes192_encrypt ||
+          f == (nettle_cipher_func *)nettle_aes192_decrypt)
+    {
+      return AES_192;
+    }
+  else if (f == (nettle_cipher_func *)nettle_aes256_encrypt ||
+          f == (nettle_cipher_func *)nettle_aes256_decrypt)
+    {
+      return AES_256;
+    }
+  else
+    {
+      return NOT_AES;
+    }
+}
+
+#if !HAVE_NATIVE_gcm_aes_encrypt
+# if !HAVE_NATIVE_fat_gcm_aes_encrypt
+#   define _nettle_gcm_aes_encrypt _nettle_gcm_aes_encrypt_c
+static
+#endif /* !HAVE_NATIVE_fat_gcm_aes_encrypt */
+int
+_nettle_gcm_aes_encrypt_c (const struct gcm_key *key, union nettle_block16 *x,
+                          size_t length, const uint8_t *src, unsigned rounds,
+                          const uint32_t *keys, uint8_t *dst, uint8_t* ctr)
+{
+  (void)key;
+  (void)x;
+  (void)length;
+  (void)src;
+  (void)rounds;
+  (void)keys;
+  (void)dst;
+  (void)ctr;
+
+  return -1; /* Not implemented */
+}
+#endif /* !HAVE_NATIVE_gcm_aes_encrypt */
+
+static int
+_nettle_gcm_aes_encrypt_wrap (struct gcm_ctx *ctx, const struct gcm_key *key,
+                             const void *cipher, size_t length, uint8_t *dst,
+                             const uint8_t *src, enum gcm_aes_rounds rounds)
+{
+  switch (rounds) {
+    default:
+      abort();
+    case AES_128:
+      return _nettle_gcm_aes_encrypt(key, &ctx->x, length, src, rounds,
+                                    ((struct aes128_ctx*)cipher)->keys, dst,
+                                    ctx->ctr.b);
+    case AES_192:
+      return _nettle_gcm_aes_encrypt(key, &ctx->x, length, src, rounds,
+                                    ((struct aes192_ctx*)cipher)->keys, dst,
+                                    ctx->ctr.b);
+    case AES_256:
+      return _nettle_gcm_aes_encrypt(key, &ctx->x, length, src, rounds,
+                                    ((struct aes256_ctx*)cipher)->keys, dst,
+                                    ctx->ctr.b);
+  }
+}
+
 void
 gcm_encrypt (struct gcm_ctx *ctx, const struct gcm_key *key,
             const void *cipher, nettle_cipher_func *f,
             size_t length, uint8_t *dst, const uint8_t *src)
 {
+  enum gcm_aes_rounds rounds;
   assert(ctx->data_size % GCM_BLOCK_SIZE == 0);
 
-  _nettle_ctr_crypt16(cipher, f, gcm_fill, ctx->ctr.b, length, dst, src);
-  _nettle_gcm_hash(key, &ctx->x, length, dst);
+  rounds = _nettle_gcm_get_aes_rounds(f);
+
+  if (rounds == NOT_AES ||
+      _nettle_gcm_aes_encrypt_wrap(ctx, key, cipher, length,
+                                  dst, src, rounds) == -1)
+    {
+      _nettle_ctr_crypt16(cipher, f, gcm_fill, ctx->ctr.b, length, dst, src);
+      _nettle_gcm_hash(key, &ctx->x, length, dst);
+    }
 
   ctx->data_size += length;
 }
 
+#if !HAVE_NATIVE_gcm_aes_decrypt
+# if !HAVE_NATIVE_fat_gcm_aes_decrypt
+#   define _nettle_gcm_aes_decrypt _nettle_gcm_aes_decrypt_c
+static
+#endif /* !HAVE_NATIVE_fat_gcm_aes_decrypt */
+int
+_nettle_gcm_aes_decrypt_c (const struct gcm_key *key, union nettle_block16 *x,
+                          size_t length, const uint8_t *src, unsigned rounds,
+                          const uint32_t *keys, uint8_t *dst, uint8_t *ctr)
+{
+  (void)key;
+  (void)x;
+  (void)length;
+  (void)src;
+  (void)rounds;
+  (void)keys;
+  (void)dst;
+  (void)ctr;
+
+  return -1; /* Not implemented */
+}
+#endif /* !HAVE_NATIVE_gcm_aes_decrypt */
+
+static int
+_nettle_gcm_aes_decrypt_wrap (struct gcm_ctx *ctx, const struct gcm_key *key,
+                             const void *cipher, size_t length, uint8_t *dst,
+                             const uint8_t *src, enum gcm_aes_rounds rounds)
+{
+  switch (rounds) {
+    default:
+      abort();
+    case AES_128:
+      return _nettle_gcm_aes_decrypt(key, &ctx->x, length, src, rounds,
+                                    ((struct aes128_ctx*)cipher)->keys, dst,
+                                    ctx->ctr.b);
+    case AES_192:
+      return _nettle_gcm_aes_decrypt(key, &ctx->x, length, src, rounds,
+                                    ((struct aes192_ctx*)cipher)->keys, dst,
+                                    ctx->ctr.b);
+    case AES_256:
+      return _nettle_gcm_aes_decrypt(key, &ctx->x, length, src, rounds,
+                                    ((struct aes256_ctx*)cipher)->keys, dst,
+                                    ctx->ctr.b);
+  }
+}
+
 void
 gcm_decrypt(struct gcm_ctx *ctx, const struct gcm_key *key,
            const void *cipher, nettle_cipher_func *f,
            size_t length, uint8_t *dst, const uint8_t *src)
 {
+  enum gcm_aes_rounds rounds;
   assert(ctx->data_size % GCM_BLOCK_SIZE == 0);
 
-  _nettle_gcm_hash(key, &ctx->x, length, src);
-  _nettle_ctr_crypt16(cipher, f, gcm_fill, ctx->ctr.b, length, dst, src);
+  rounds = _nettle_gcm_get_aes_rounds(f);
+
+  if (rounds == NOT_AES ||
+      _nettle_gcm_aes_decrypt_wrap(ctx, key, cipher, length,
+                                  dst, src, rounds) == -1)
+    {
+      _nettle_gcm_hash(key, &ctx->x, length, src);
+      _nettle_ctr_crypt16(cipher, f, gcm_fill, ctx->ctr.b, length, dst, src);
+    }
 
   ctx->data_size += length;
 }
-- 
2.26.1

_______________________________________________
nettle-bugs mailing list
[email protected]
http://lists.lysator.liu.se/mailman/listinfo/nettle-bugs

Reply via email to