This update the dequeue function of the inside-secure safexcel driver so
that failed requests aren't requeued when they fail (for whatever
reason, which can be because the hw ring is full).

Signed-off-by: Antoine Tenart <antoine.ten...@free-electrons.com>
---
 drivers/crypto/inside-secure/safexcel.c | 18 ++++++++++--------
 1 file changed, 10 insertions(+), 8 deletions(-)

diff --git a/drivers/crypto/inside-secure/safexcel.c 
b/drivers/crypto/inside-secure/safexcel.c
index 8956b23803a8..8ae133a9e3f2 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -439,20 +439,22 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, 
int ring)
                        goto finalize;
 
                request = kzalloc(sizeof(*request), EIP197_GFP_FLAGS(*req));
-               if (!request)
-                       goto requeue;
+               if (!request) {
+                       spin_lock_bh(&priv->ring[ring].queue_lock);
+                       crypto_enqueue_request(&priv->ring[ring].queue, req);
+                       spin_unlock_bh(&priv->ring[ring].queue_lock);
+
+                       priv->ring[ring].need_dequeue = true;
+                       goto finalize;
+               }
 
                ctx = crypto_tfm_ctx(req->tfm);
                ret = ctx->send(req, ring, request, &commands, &results);
                if (ret) {
                        kfree(request);
-requeue:
-                       spin_lock_bh(&priv->ring[ring].queue_lock);
-                       crypto_enqueue_request(&priv->ring[ring].queue, req);
-                       spin_unlock_bh(&priv->ring[ring].queue_lock);
-
+                       req->complete(req, ret);
                        priv->ring[ring].need_dequeue = true;
-                       continue;
+                       goto finalize;
                }
 
                if (backlog)
-- 
2.9.4

Reply via email to