[PATCH 4.14 052/115] crypto: inside-secure - per request invalidation

2018-03-02 Thread Greg Kroah-Hartman
4.14-stable review patch.  If anyone has any objections, please let me know.

--

From: Ofer Heifetz 


[ Upstream commit 1eb7b40386c97f6c4d1c62931bf306f4535a4bd6 ]

When an invalidation request is needed we currently override the context
.send and .handle_result helpers. This is wrong as under high load other
requests can already be queued and overriding the context helpers will
make them execute the wrong .send and .handle_result functions.

This commit fixes this by adding a needs_inv flag in the request to
choose the action to perform when sending requests or handling their
results. This flag will be set when needed (i.e. when the context flag
will be set).

Fixes: 1b44c5a60c13 ("crypto: inside-secure - add SafeXcel EIP197 crypto engine 
driver")
Signed-off-by: Ofer Heifetz 
[Antoine: commit message, and removed non related changes from the
original commit]
Signed-off-by: Antoine Tenart 
Signed-off-by: Herbert Xu 

Signed-off-by: Sasha Levin 
Signed-off-by: Greg Kroah-Hartman 
---
 drivers/crypto/inside-secure/safexcel_cipher.c |   71 -
 drivers/crypto/inside-secure/safexcel_hash.c   |   67 ++-
 2 files changed, 111 insertions(+), 27 deletions(-)

--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -14,6 +14,7 @@
 
 #include 
 #include 
+#include 
 
 #include "safexcel.h"
 
@@ -33,6 +34,10 @@ struct safexcel_cipher_ctx {
unsigned int key_len;
 };
 
+struct safexcel_cipher_req {
+   bool needs_inv;
+};
+
 static void safexcel_cipher_token(struct safexcel_cipher_ctx *ctx,
  struct crypto_async_request *async,
  struct safexcel_command_desc *cdesc,
@@ -126,9 +131,9 @@ static int safexcel_context_control(stru
return 0;
 }
 
-static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
- struct crypto_async_request *async,
- bool *should_complete, int *ret)
+static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int 
ring,
+ struct crypto_async_request *async,
+ bool *should_complete, int *ret)
 {
struct skcipher_request *req = skcipher_request_cast(async);
struct safexcel_result_desc *rdesc;
@@ -265,7 +270,6 @@ static int safexcel_aes_send(struct cryp
spin_unlock_bh(>ring[ring].egress_lock);
 
request->req = >base;
-   ctx->base.handle_result = safexcel_handle_result;
 
*commands = n_cdesc;
*results = n_rdesc;
@@ -341,8 +345,6 @@ static int safexcel_handle_inv_result(st
 
ring = safexcel_select_ring(priv);
ctx->base.ring = ring;
-   ctx->base.needs_inv = false;
-   ctx->base.send = safexcel_aes_send;
 
spin_lock_bh(>ring[ring].queue_lock);
enq_ret = crypto_enqueue_request(>ring[ring].queue, async);
@@ -359,6 +361,26 @@ static int safexcel_handle_inv_result(st
return ndesc;
 }
 
+static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
+ struct crypto_async_request *async,
+ bool *should_complete, int *ret)
+{
+   struct skcipher_request *req = skcipher_request_cast(async);
+   struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
+   int err;
+
+   if (sreq->needs_inv) {
+   sreq->needs_inv = false;
+   err = safexcel_handle_inv_result(priv, ring, async,
+should_complete, ret);
+   } else {
+   err = safexcel_handle_req_result(priv, ring, async,
+should_complete, ret);
+   }
+
+   return err;
+}
+
 static int safexcel_cipher_send_inv(struct crypto_async_request *async,
int ring, struct safexcel_request *request,
int *commands, int *results)
@@ -368,8 +390,6 @@ static int safexcel_cipher_send_inv(stru
struct safexcel_crypto_priv *priv = ctx->priv;
int ret;
 
-   ctx->base.handle_result = safexcel_handle_inv_result;
-
ret = safexcel_invalidate_cache(async, >base, priv,
ctx->base.ctxr_dma, ring, request);
if (unlikely(ret))
@@ -381,11 +401,29 @@ static int safexcel_cipher_send_inv(stru
return 0;
 }
 
+static int safexcel_send(struct crypto_async_request *async,
+int ring, struct safexcel_request *request,
+int *commands, int *results)
+{
+   struct skcipher_request *req = skcipher_request_cast(async);
+   struct safexcel_cipher_req 

[PATCH 4.14 052/115] crypto: inside-secure - per request invalidation

2018-03-02 Thread Greg Kroah-Hartman
4.14-stable review patch.  If anyone has any objections, please let me know.

--

From: Ofer Heifetz 


[ Upstream commit 1eb7b40386c97f6c4d1c62931bf306f4535a4bd6 ]

When an invalidation request is needed we currently override the context
.send and .handle_result helpers. This is wrong as under high load other
requests can already be queued and overriding the context helpers will
make them execute the wrong .send and .handle_result functions.

This commit fixes this by adding a needs_inv flag in the request to
choose the action to perform when sending requests or handling their
results. This flag will be set when needed (i.e. when the context flag
will be set).

Fixes: 1b44c5a60c13 ("crypto: inside-secure - add SafeXcel EIP197 crypto engine 
driver")
Signed-off-by: Ofer Heifetz 
[Antoine: commit message, and removed non related changes from the
original commit]
Signed-off-by: Antoine Tenart 
Signed-off-by: Herbert Xu 

Signed-off-by: Sasha Levin 
Signed-off-by: Greg Kroah-Hartman 
---
 drivers/crypto/inside-secure/safexcel_cipher.c |   71 -
 drivers/crypto/inside-secure/safexcel_hash.c   |   67 ++-
 2 files changed, 111 insertions(+), 27 deletions(-)

--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -14,6 +14,7 @@
 
 #include 
 #include 
+#include 
 
 #include "safexcel.h"
 
@@ -33,6 +34,10 @@ struct safexcel_cipher_ctx {
unsigned int key_len;
 };
 
+struct safexcel_cipher_req {
+   bool needs_inv;
+};
+
 static void safexcel_cipher_token(struct safexcel_cipher_ctx *ctx,
  struct crypto_async_request *async,
  struct safexcel_command_desc *cdesc,
@@ -126,9 +131,9 @@ static int safexcel_context_control(stru
return 0;
 }
 
-static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
- struct crypto_async_request *async,
- bool *should_complete, int *ret)
+static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int 
ring,
+ struct crypto_async_request *async,
+ bool *should_complete, int *ret)
 {
struct skcipher_request *req = skcipher_request_cast(async);
struct safexcel_result_desc *rdesc;
@@ -265,7 +270,6 @@ static int safexcel_aes_send(struct cryp
spin_unlock_bh(>ring[ring].egress_lock);
 
request->req = >base;
-   ctx->base.handle_result = safexcel_handle_result;
 
*commands = n_cdesc;
*results = n_rdesc;
@@ -341,8 +345,6 @@ static int safexcel_handle_inv_result(st
 
ring = safexcel_select_ring(priv);
ctx->base.ring = ring;
-   ctx->base.needs_inv = false;
-   ctx->base.send = safexcel_aes_send;
 
spin_lock_bh(>ring[ring].queue_lock);
enq_ret = crypto_enqueue_request(>ring[ring].queue, async);
@@ -359,6 +361,26 @@ static int safexcel_handle_inv_result(st
return ndesc;
 }
 
+static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
+ struct crypto_async_request *async,
+ bool *should_complete, int *ret)
+{
+   struct skcipher_request *req = skcipher_request_cast(async);
+   struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
+   int err;
+
+   if (sreq->needs_inv) {
+   sreq->needs_inv = false;
+   err = safexcel_handle_inv_result(priv, ring, async,
+should_complete, ret);
+   } else {
+   err = safexcel_handle_req_result(priv, ring, async,
+should_complete, ret);
+   }
+
+   return err;
+}
+
 static int safexcel_cipher_send_inv(struct crypto_async_request *async,
int ring, struct safexcel_request *request,
int *commands, int *results)
@@ -368,8 +390,6 @@ static int safexcel_cipher_send_inv(stru
struct safexcel_crypto_priv *priv = ctx->priv;
int ret;
 
-   ctx->base.handle_result = safexcel_handle_inv_result;
-
ret = safexcel_invalidate_cache(async, >base, priv,
ctx->base.ctxr_dma, ring, request);
if (unlikely(ret))
@@ -381,11 +401,29 @@ static int safexcel_cipher_send_inv(stru
return 0;
 }
 
+static int safexcel_send(struct crypto_async_request *async,
+int ring, struct safexcel_request *request,
+int *commands, int *results)
+{
+   struct skcipher_request *req = skcipher_request_cast(async);
+   struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
+   int ret;
+
+   if (sreq->needs_inv)
+   ret = safexcel_cipher_send_inv(async, ring, request,
+