The aes-ctr mode used one preallocated page without any concurrency
protection. When multiple threads run aes-ctr encryption or decryption
this could lead to data corruption.
The patch introduces locking for the preallocated page and alternatively
allocating and freeing of an temp page in concurrency situations.
---
arch/s390/crypto/aes_s390.c | 55 --
1 files changed, 42 insertions(+), 13 deletions(-)
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index 4363528..fcb5297 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -25,6 +25,7 @@
#include linux/err.h
#include linux/module.h
#include linux/init.h
+#include linux/mutex.h
#include crypt_s390.h
#define AES_KEYLEN_128 1
@@ -32,6 +33,7 @@
#define AES_KEYLEN_256 4
static u8 *ctrblk;
+static DEFINE_MUTEX(ctrblk_lock);
static char keylen_flag;
struct s390_aes_ctx {
@@ -762,11 +764,25 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc,
long func,
unsigned int i, n, nbytes;
u8 buf[AES_BLOCK_SIZE];
u8 *out, *in;
+ u8 *ctrpage;
if (!walk-nbytes)
return ret;
- memcpy(ctrblk, walk-iv, AES_BLOCK_SIZE);
+ if (mutex_trylock(ctrblk_lock)) {
+ /* ctrblk is now reserved for us */
+ ctrpage = ctrblk;
+ } else {
+ /* ctrblk is in use by someone else, alloc our own page */
+ ctrpage = (u8*) __get_free_page(GFP_ATOMIC);
+ if (!ctrpage) {
+ /* gfp failed, wait until ctrblk becomes available */
+ mutex_lock(ctrblk_lock);
+ ctrpage = ctrblk;
+ }
+ }
+
+ memcpy(ctrpage, walk-iv, AES_BLOCK_SIZE);
while ((nbytes = walk-nbytes) = AES_BLOCK_SIZE) {
out = walk-dst.virt.addr;
in = walk-src.virt.addr;
@@ -775,17 +791,19 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc,
long func,
n = (nbytes PAGE_SIZE) ? PAGE_SIZE :
nbytes ~(AES_BLOCK_SIZE - 1);
for (i = AES_BLOCK_SIZE; i n; i += AES_BLOCK_SIZE) {
- memcpy(ctrblk + i, ctrblk + i - AES_BLOCK_SIZE,
+ memcpy(ctrpage + i, ctrpage + i -
AES_BLOCK_SIZE,
AES_BLOCK_SIZE);
- crypto_inc(ctrblk + i, AES_BLOCK_SIZE);
+ crypto_inc(ctrpage + i, AES_BLOCK_SIZE);
+ }
+ ret = crypt_s390_kmctr(func, sctx-key, out, in, n,
ctrpage);
+ if (ret 0 || ret != n) {
+ ret = -EIO;
+ goto out;
}
- ret = crypt_s390_kmctr(func, sctx-key, out, in, n,
ctrblk);
- if (ret 0 || ret != n)
- return -EIO;
if (n AES_BLOCK_SIZE)
- memcpy(ctrblk, ctrblk + n - AES_BLOCK_SIZE,
+ memcpy(ctrpage, ctrpage + n - AES_BLOCK_SIZE,
AES_BLOCK_SIZE);
- crypto_inc(ctrblk, AES_BLOCK_SIZE);
+ crypto_inc(ctrpage, AES_BLOCK_SIZE);
out += n;
in += n;
nbytes -= n;
@@ -799,14 +817,25 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc,
long func,
out = walk-dst.virt.addr;
in = walk-src.virt.addr;
ret = crypt_s390_kmctr(func, sctx-key, buf, in,
- AES_BLOCK_SIZE, ctrblk);
- if (ret 0 || ret != AES_BLOCK_SIZE)
- return -EIO;
+ AES_BLOCK_SIZE, ctrpage);
+ if (ret 0 || ret != AES_BLOCK_SIZE) {
+ ret = -EIO;
+ goto out;
+ }
memcpy(out, buf, nbytes);
- crypto_inc(ctrblk, AES_BLOCK_SIZE);
+ crypto_inc(ctrpage, AES_BLOCK_SIZE);
ret = blkcipher_walk_done(desc, walk, 0);
}
- memcpy(walk-iv, ctrblk, AES_BLOCK_SIZE);
+ memcpy(walk-iv, ctrpage, AES_BLOCK_SIZE);
+
+out:
+ if (ctrpage == ctrblk) {
+ /* free the reservation for ctrblk now */
+ mutex_unlock(ctrblk_lock);
+ } else {
+ /* free the page allocated above */
+ free_page((unsigned long) ctrpage);
+ }
return ret;
}
--
1.7.0.4
--
To unsubscribe from this list: send the line unsubscribe linux-crypto in
the body of a message to majord...@vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html