RE: [PATCH] tls: Add support for encryption using async offload accelerator
Forgot to add 'v2' in subject line. I will be resending. -Original Message- From: Vakul Garg Sent: Wednesday, January 31, 2018 9:29 PM To: linux-crypto@vger.kernel.org Cc: il...@mellanox.com; avia...@mellanox.com; davejwat...@fb.com; da...@davemloft.net; net...@vger.kernel.org; Vakul Garg Subject: [PATCH] tls: Add support for encryption using async offload accelerator Async crypto accelerators (e.g. drivers/crypto/caam) support offloading GCM operation. If they are enabled, crypto_aead_encrypt() return error code -EINPROGRESS. In this case tls_do_encryption() needs to wait on a completion till the time the response for crypto offload request is received. Signed-off-by: Vakul Garg --- v1-v2: - Used crypto_wait_req() to wait for async operation completion - Passed CRYPTO_TFM_REQ_MAY_BACKLOG to crypto_aead_encrypt include/net/tls.h | 2 ++ net/tls/tls_sw.c | 8 +++- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/include/net/tls.h b/include/net/tls.h index 936cfc5cab7d..8a8e1256caee 100644 --- a/include/net/tls.h +++ b/include/net/tls.h @@ -36,6 +36,7 @@ #include #include +#include #include #include #include @@ -57,6 +58,7 @@ struct tls_sw_context { struct crypto_aead *aead_send; + struct crypto_wait async_wait; /* Sending context */ char aad_space[TLS_AAD_SPACE_SIZE]; diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 73d19210dd49..31a9f0ef8af6 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -214,7 +214,11 @@ static int tls_do_encryption(struct tls_context *tls_ctx, aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE); aead_request_set_crypt(aead_req, ctx->sg_aead_in, ctx->sg_aead_out, data_len, tls_ctx->iv); - rc = crypto_aead_encrypt(aead_req); + + aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG, + crypto_req_done, &ctx->async_wait); + + rc = crypto_wait_req(crypto_aead_encrypt(aead_req), &ctx->async_wait); ctx->sg_encrypted_data[0].offset -= tls_ctx->prepend_size; ctx->sg_encrypted_data[0].length += tls_ctx->prepend_size; @@ -663,6 +667,8 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx) goto out; } + crypto_init_wait(&sw_ctx->async_wait); + ctx->priv_ctx = (struct tls_offload_context *)sw_ctx; crypto_info = &ctx->crypto_send; -- 2.13.6
[PATCH] tls: Add support for encryption using async offload accelerator
Async crypto accelerators (e.g. drivers/crypto/caam) support offloading GCM operation. If they are enabled, crypto_aead_encrypt() return error code -EINPROGRESS. In this case tls_do_encryption() needs to wait on a completion till the time the response for crypto offload request is received. Signed-off-by: Vakul Garg --- v1-v2: - Used crypto_wait_req() to wait for async operation completion - Passed CRYPTO_TFM_REQ_MAY_BACKLOG to crypto_aead_encrypt include/net/tls.h | 2 ++ net/tls/tls_sw.c | 8 +++- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/include/net/tls.h b/include/net/tls.h index 936cfc5cab7d..8a8e1256caee 100644 --- a/include/net/tls.h +++ b/include/net/tls.h @@ -36,6 +36,7 @@ #include #include +#include #include #include #include @@ -57,6 +58,7 @@ struct tls_sw_context { struct crypto_aead *aead_send; + struct crypto_wait async_wait; /* Sending context */ char aad_space[TLS_AAD_SPACE_SIZE]; diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 73d19210dd49..31a9f0ef8af6 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -214,7 +214,11 @@ static int tls_do_encryption(struct tls_context *tls_ctx, aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE); aead_request_set_crypt(aead_req, ctx->sg_aead_in, ctx->sg_aead_out, data_len, tls_ctx->iv); - rc = crypto_aead_encrypt(aead_req); + + aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG, + crypto_req_done, &ctx->async_wait); + + rc = crypto_wait_req(crypto_aead_encrypt(aead_req), &ctx->async_wait); ctx->sg_encrypted_data[0].offset -= tls_ctx->prepend_size; ctx->sg_encrypted_data[0].length += tls_ctx->prepend_size; @@ -663,6 +667,8 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx) goto out; } + crypto_init_wait(&sw_ctx->async_wait); + ctx->priv_ctx = (struct tls_offload_context *)sw_ctx; crypto_info = &ctx->crypto_send; -- 2.13.6
[PATCH] tls: Add support for encryption using async offload accelerator
Async crypto accelerators (e.g. drivers/crypto/caam) support offloading GCM operation. If they are enabled, crypto_aead_encrypt() return error code -EINPROGRESS. In this case tls_do_encryption() needs to wait on a completion till the time the response for crypto offload request is received. Signed-off-by: Vakul Garg --- net/tls/tls_sw.c | 22 ++ 1 file changed, 22 insertions(+) diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 73d19210dd49..390e6dc7b135 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -39,6 +39,11 @@ #include +struct crypto_completion { + struct completion comp; + int result; +}; + static void trim_sg(struct sock *sk, struct scatterlist *sg, int *sg_num_elem, unsigned int *sg_size, int target_size) { @@ -194,6 +199,14 @@ static void tls_free_both_sg(struct sock *sk) &ctx->sg_plaintext_size); } +static void crypto_complete(struct crypto_async_request *req, int err) +{ + struct crypto_completion *x = req->data; + + x->result = err; + complete(&x->comp); +} + static int tls_do_encryption(struct tls_context *tls_ctx, struct tls_sw_context *ctx, size_t data_len, gfp_t flags) @@ -202,6 +215,7 @@ static int tls_do_encryption(struct tls_context *tls_ctx, crypto_aead_reqsize(ctx->aead_send); struct aead_request *aead_req; int rc; + struct crypto_completion crypto_done; aead_req = kzalloc(req_size, flags); if (!aead_req) @@ -214,7 +228,15 @@ static int tls_do_encryption(struct tls_context *tls_ctx, aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE); aead_request_set_crypt(aead_req, ctx->sg_aead_in, ctx->sg_aead_out, data_len, tls_ctx->iv); + aead_request_set_callback(aead_req, 0, crypto_complete, &crypto_done); + + init_completion(&crypto_done.comp); + rc = crypto_aead_encrypt(aead_req); + if (rc == -EINPROGRESS) { + wait_for_completion(&crypto_done.comp); + rc = crypto_done.result; + } ctx->sg_encrypted_data[0].offset -= tls_ctx->prepend_size; ctx->sg_encrypted_data[0].length += tls_ctx->prepend_size; -- 2.13.6
Re: [PATCH] tls: Add support for encryption using async offload accelerator
On Wed, Jan 31, 2018 at 8:10 AM, Gilad Ben-Yossef wrote: > Hi Vakul, > > On Wed, Jan 31, 2018 at 12:36 PM, Vakul Garg wrote: >> Async crypto accelerators (e.g. drivers/crypto/caam) support offloading >> GCM operation. If they are enabled, crypto_aead_encrypt() return error >> code -EINPROGRESS. In this case tls_do_encryption() needs to wait on a >> completion till the time the response for crypto offload request is >> received. >> > > Thank you for this patch. I think it is actually a bug fix and should > probably go into stable On second though in stable we should probably just disable async tfm allocations. It's simpler. But this approach is still good for -next Gilad -- Gilad Ben-Yossef Chief Coffee Drinker "If you take a class in large-scale robotics, can you end up in a situation where the homework eats your dog?" -- Jean-Baptiste Queru
Re: [PATCH] tls: Add support for encryption using async offload accelerator
Hi Vakul, On Wed, Jan 31, 2018 at 12:36 PM, Vakul Garg wrote: > Async crypto accelerators (e.g. drivers/crypto/caam) support offloading > GCM operation. If they are enabled, crypto_aead_encrypt() return error > code -EINPROGRESS. In this case tls_do_encryption() needs to wait on a > completion till the time the response for crypto offload request is > received. > Thank you for this patch. I think it is actually a bug fix and should probably go into stable since I believe current code is broken on machines where any async GCM transformation is registered with a high enough priority as it will try to call a NULL callback. But, please use the new crypto_wait_req() and friends to implement the waiting. Take a look at the code example in Documentation/crypto/api-samples.rst to see how. Also, maybe add an explanation why CRYPTO_TFM_REQ_MAY_BACKLOG flag should not be used here, if this was on purpose? I mean I'm not sure using backlog here is a good idea but probably should explain why not. Thanks! Gilad > Signed-off-by: Vakul Garg > --- > net/tls/tls_sw.c | 22 ++ > 1 file changed, 22 insertions(+) > > diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c > index 73d19210dd49..390e6dc7b135 100644 > --- a/net/tls/tls_sw.c > +++ b/net/tls/tls_sw.c > @@ -39,6 +39,11 @@ > > #include > > +struct crypto_completion { > + struct completion comp; > + int result; > +}; > + > static void trim_sg(struct sock *sk, struct scatterlist *sg, > int *sg_num_elem, unsigned int *sg_size, int target_size) > { > @@ -194,6 +199,14 @@ static void tls_free_both_sg(struct sock *sk) > &ctx->sg_plaintext_size); > } > > +static void crypto_complete(struct crypto_async_request *req, int err) > +{ > + struct crypto_completion *x = req->data; > + > + x->result = err; > + complete(&x->comp); > +} > + > static int tls_do_encryption(struct tls_context *tls_ctx, > struct tls_sw_context *ctx, size_t data_len, > gfp_t flags) > @@ -202,6 +215,7 @@ static int tls_do_encryption(struct tls_context *tls_ctx, > crypto_aead_reqsize(ctx->aead_send); > struct aead_request *aead_req; > int rc; > + struct crypto_completion crypto_done; > > aead_req = kzalloc(req_size, flags); > if (!aead_req) > @@ -214,7 +228,15 @@ static int tls_do_encryption(struct tls_context *tls_ctx, > aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE); > aead_request_set_crypt(aead_req, ctx->sg_aead_in, ctx->sg_aead_out, >data_len, tls_ctx->iv); > + aead_request_set_callback(aead_req, 0, crypto_complete, &crypto_done); > + > + init_completion(&crypto_done.comp); > + > rc = crypto_aead_encrypt(aead_req); > + if (rc == -EINPROGRESS) { > + wait_for_completion(&crypto_done.comp); > + rc = crypto_done.result; > + } > > ctx->sg_encrypted_data[0].offset -= tls_ctx->prepend_size; > ctx->sg_encrypted_data[0].length += tls_ctx->prepend_size; > -- > 2.13.6 > -- Gilad Ben-Yossef Chief Coffee Drinker "If you take a class in large-scale robotics, can you end up in a situation where the homework eats your dog?" -- Jean-Baptiste Queru
[PATCH] tls: Add support for encryption using async offload accelerator
Async crypto accelerators (e.g. drivers/crypto/caam) support offloading GCM operation. If they are enabled, crypto_aead_encrypt() return error code -EINPROGRESS. In this case tls_do_encryption() needs to wait on a completion till the time the response for crypto offload request is received. Signed-off-by: Vakul Garg --- net/tls/tls_sw.c | 22 ++ 1 file changed, 22 insertions(+) diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 73d19210dd49..390e6dc7b135 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -39,6 +39,11 @@ #include +struct crypto_completion { + struct completion comp; + int result; +}; + static void trim_sg(struct sock *sk, struct scatterlist *sg, int *sg_num_elem, unsigned int *sg_size, int target_size) { @@ -194,6 +199,14 @@ static void tls_free_both_sg(struct sock *sk) &ctx->sg_plaintext_size); } +static void crypto_complete(struct crypto_async_request *req, int err) +{ + struct crypto_completion *x = req->data; + + x->result = err; + complete(&x->comp); +} + static int tls_do_encryption(struct tls_context *tls_ctx, struct tls_sw_context *ctx, size_t data_len, gfp_t flags) @@ -202,6 +215,7 @@ static int tls_do_encryption(struct tls_context *tls_ctx, crypto_aead_reqsize(ctx->aead_send); struct aead_request *aead_req; int rc; + struct crypto_completion crypto_done; aead_req = kzalloc(req_size, flags); if (!aead_req) @@ -214,7 +228,15 @@ static int tls_do_encryption(struct tls_context *tls_ctx, aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE); aead_request_set_crypt(aead_req, ctx->sg_aead_in, ctx->sg_aead_out, data_len, tls_ctx->iv); + aead_request_set_callback(aead_req, 0, crypto_complete, &crypto_done); + + init_completion(&crypto_done.comp); + rc = crypto_aead_encrypt(aead_req); + if (rc == -EINPROGRESS) { + wait_for_completion(&crypto_done.comp); + rc = crypto_done.result; + } ctx->sg_encrypted_data[0].offset -= tls_ctx->prepend_size; ctx->sg_encrypted_data[0].length += tls_ctx->prepend_size; -- 2.13.6