Re: [RFC PATCH] KEYS: Use individual pages in big_key for crypto buffers [ver #2]

2018-02-16 Thread David Howells
Eric Biggers  wrote:

> memset() after vunmap(), and also when buf->virt can be NULL?  I had 
> suggested:
> 
> if (buf->virt) {
> memset(buf->virt, 0, buf->nr_pages * PAGE_SIZE);
> vunmap(buf->virt);
> }

Sorry, yes.  I don't know why the change I made doesn't oops.

David


Re: [RFC PATCH] KEYS: Use individual pages in big_key for crypto buffers [ver #2]

2018-02-15 Thread Eric Biggers
Hi David,

On Thu, Feb 15, 2018 at 10:53:49PM +, David Howells wrote:
>  /*
> + * Free up the buffer.
> + */
> +static void big_key_free_buffer(struct big_key_buf *buf)
> +{
> + unsigned int i;
> +
> + vunmap(buf->virt);
> + for (i = 0; i < buf->nr_pages; i++)
> + if (buf->pages[i])
> + __free_page(buf->pages[i]);
> +
> + memset(buf->virt, 0, buf->nr_pages * PAGE_SIZE);
> + kfree(buf);
> +}

memset() after vunmap(), and also when buf->virt can be NULL?  I had suggested:

if (buf->virt) {
memset(buf->virt, 0, buf->nr_pages * PAGE_SIZE);
vunmap(buf->virt);
}

- Eric


[RFC PATCH] KEYS: Use individual pages in big_key for crypto buffers [ver #2]

2018-02-15 Thread David Howells
kmalloc() can't always allocate large enough buffers for big_key to use for
crypto (1MB + some metadata) so we cannot use that to allocate the buffer.
Further, vmalloc'd pages can't be passed to sg_init_one() and the aead
crypto accessors cannot be called progressively and must be passed all the
data in one go (which means we can't pass the data in one block at a time).

Fix this by allocating the buffer pages individually and passing them
through a multientry scatterlist to the crypto layer.  This has the bonus
advantage that we don't have to allocate a contiguous series of pages.

We then vmap() the page list and pass that through to the VFS read/write
routines.

This can trigger a warning:

WARNING: CPU: 0 PID: 60912 at mm/page_alloc.c:3883 
__alloc_pages_nodemask+0xb7c/0x15f8
([<002acbb6>] __alloc_pages_nodemask+0x1ee/0x15f8)
 [<002dd356>] kmalloc_order+0x46/0x90
 [<002dd3e0>] kmalloc_order_trace+0x40/0x1f8
 [<00326a10>] __kmalloc+0x430/0x4c0
 [<004343e4>] big_key_preparse+0x7c/0x210
 [<0042c040>] key_create_or_update+0x128/0x420
 [<0042e52c>] SyS_add_key+0x124/0x220
 [<007bba2c>] system_call+0xc4/0x2b0

from the keyctl/padd/useradd test of the keyutils testsuite on s390x.

Note that it might be better to shovel data through in page-sized lumps
instead as there's no particular need to use a monolithic buffer unless the
kernel itself wants to access the data.

Fixes: 13100a72f40f ("Security: Keys: Big keys stored encrypted")
Reported-by: Paul Bunyan 
Signed-off-by: David Howells 
cc: Kirill Marinushkin 
---

 security/keys/big_key.c |  107 +--
 1 file changed, 84 insertions(+), 23 deletions(-)

diff --git a/security/keys/big_key.c b/security/keys/big_key.c
index 929e14978c42..4deb7df56ec8 100644
--- a/security/keys/big_key.c
+++ b/security/keys/big_key.c
@@ -22,6 +22,13 @@
 #include 
 #include 
 
+struct big_key_buf {
+   unsigned intnr_pages;
+   void*virt;
+   struct scatterlist  *sg;
+   struct page *pages[];
+};
+
 /*
  * Layout of key payload words.
  */
@@ -91,10 +98,9 @@ static DEFINE_MUTEX(big_key_aead_lock);
 /*
  * Encrypt/decrypt big_key data
  */
-static int big_key_crypt(enum big_key_op op, u8 *data, size_t datalen, u8 *key)
+static int big_key_crypt(enum big_key_op op, struct big_key_buf *buf, size_t 
datalen, u8 *key)
 {
int ret;
-   struct scatterlist sgio;
struct aead_request *aead_req;
/* We always use a zero nonce. The reason we can get away with this is
 * because we're using a different randomly generated key for every
@@ -109,8 +115,7 @@ static int big_key_crypt(enum big_key_op op, u8 *data, 
size_t datalen, u8 *key)
return -ENOMEM;
 
memset(zero_nonce, 0, sizeof(zero_nonce));
-   sg_init_one(, data, datalen + (op == BIG_KEY_ENC ? 
ENC_AUTHTAG_SIZE : 0));
-   aead_request_set_crypt(aead_req, , , datalen, zero_nonce);
+   aead_request_set_crypt(aead_req, buf->sg, buf->sg, datalen, zero_nonce);
aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, 
NULL);
aead_request_set_ad(aead_req, 0);
 
@@ -130,21 +135,78 @@ static int big_key_crypt(enum big_key_op op, u8 *data, 
size_t datalen, u8 *key)
 }
 
 /*
+ * Free up the buffer.
+ */
+static void big_key_free_buffer(struct big_key_buf *buf)
+{
+   unsigned int i;
+
+   vunmap(buf->virt);
+   for (i = 0; i < buf->nr_pages; i++)
+   if (buf->pages[i])
+   __free_page(buf->pages[i]);
+
+   memset(buf->virt, 0, buf->nr_pages * PAGE_SIZE);
+   kfree(buf);
+}
+
+/*
+ * Allocate a buffer consisting of a set of pages with a virtual mapping
+ * applied over them.
+ */
+static void *big_key_alloc_buffer(size_t len)
+{
+   struct big_key_buf *buf;
+   unsigned int npg = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
+   unsigned int i, l;
+
+   buf = kzalloc(sizeof(struct big_key_buf) +
+ sizeof(struct page) * npg +
+ sizeof(struct scatterlist) * npg,
+ GFP_KERNEL);
+   if (!buf)
+   return NULL;
+
+   buf->nr_pages = npg;
+   buf->sg = (void *)(buf->pages + npg);
+   sg_init_table(buf->sg, npg);
+
+   for (i = 0; i < buf->nr_pages; i++) {
+   buf->pages[i] = alloc_page(GFP_KERNEL);
+   if (!buf->pages[i])
+   goto nomem;
+
+   l = min(len, PAGE_SIZE);
+   sg_set_page(>sg[i], buf->pages[i], l, 0);
+   len -= l;
+   }
+
+   buf->virt = vmap(buf->pages, buf->nr_pages, VM_MAP, PAGE_KERNEL);
+   if (!buf->virt)
+   goto nomem;
+
+   return buf;
+
+nomem:
+   big_key_free_buffer(buf);
+