Replaces spinlock usage by a simple copy of the crypto context, avoiding
possible bottlenecks.

Signed-off-by: Leonidas Da Silva Barbosa <leosi...@linux.vnet.ibm.com>
---
 drivers/crypto/nx/nx-aes-ccm.c |   42 +++++++++++++++++++++++----------------
 1 files changed, 25 insertions(+), 17 deletions(-)

diff --git a/drivers/crypto/nx/nx-aes-ccm.c b/drivers/crypto/nx/nx-aes-ccm.c
index 5ecd4c2..f1000ad 100644
--- a/drivers/crypto/nx/nx-aes-ccm.c
+++ b/drivers/crypto/nx/nx-aes-ccm.c
@@ -341,12 +341,16 @@ static int ccm_nx_decrypt(struct aead_request   *req,
        unsigned int nbytes = req->cryptlen;
        unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
        struct nx_ccm_priv *priv = &nx_ctx->priv.ccm;
-       unsigned long irq_flags;
        unsigned int processed = 0, to_process;
        u32 max_sg_len;
        int rc = -1;
 
-       spin_lock_irqsave(&nx_ctx->lock, irq_flags);
+       struct nx_crypto_ctx curr_nx_ctx;
+
+       if (nx_copy_ctx(&curr_nx_ctx, nx_ctx))
+               return -ENOMEM;
+
+       csbcpb = curr_nx_ctx.csbcpb;    
 
        nbytes -= authsize;
 
@@ -355,14 +359,14 @@ static int ccm_nx_decrypt(struct aead_request   *req,
                                 req->src, nbytes, authsize,
                                 SCATTERWALK_FROM_SG);
 
-       rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes,
+       rc = generate_pat(desc->info, req, &curr_nx_ctx, authsize, nbytes,
                          csbcpb->cpb.aes_ccm.in_pat_or_b0);
        if (rc)
                goto out;
 
        /* page_limit: number of sg entries that fit on one page */
        max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
-                          nx_ctx->ap->sglen);
+                          curr_nx_ctx.ap->sglen);
 
        do {
 
@@ -370,7 +374,7 @@ static int ccm_nx_decrypt(struct aead_request   *req,
                 * update. This value is bound by sg list limits.
                 */
                to_process = min_t(u64, nbytes - processed,
-                                  nx_ctx->ap->databytelen);
+                                  curr_nx_ctx.ap->databytelen);
                to_process = min_t(u64, to_process,
                                   NX_PAGE_SIZE * (max_sg_len - 1));
 
@@ -379,15 +383,15 @@ static int ccm_nx_decrypt(struct aead_request   *req,
                else
                        NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
 
-               NX_CPB_FDM(nx_ctx->csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
+               NX_CPB_FDM(curr_nx_ctx.csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
 
-               rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src,
+               rc = nx_build_sg_lists(&curr_nx_ctx, desc, req->dst, req->src,
                                        to_process, processed,
                                        csbcpb->cpb.aes_ccm.iv_or_ctr);
                if (rc)
                        goto out;
 
-               rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
+               rc = nx_hcall_sync(&curr_nx_ctx, &curr_nx_ctx.op,
                           req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
                if (rc)
                        goto out;
@@ -414,7 +418,7 @@ static int ccm_nx_decrypt(struct aead_request   *req,
        rc = memcmp(csbcpb->cpb.aes_ccm.out_pat_or_mac, priv->oauth_tag,
                    authsize) ? -EBADMSG : 0;
 out:
-       spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
+       kmem_cache_free(nx_driver.slab, curr_nx_ctx.kmem);
        return rc;
 }
 
@@ -425,28 +429,32 @@ static int ccm_nx_encrypt(struct aead_request   *req,
        struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
        unsigned int nbytes = req->cryptlen;
        unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
-       unsigned long irq_flags;
        unsigned int processed = 0, to_process;
        u32 max_sg_len;
        int rc = -1;
 
-       spin_lock_irqsave(&nx_ctx->lock, irq_flags);
+       struct nx_crypto_ctx curr_nx_ctx;
+       
+       if (nx_copy_ctx(&curr_nx_ctx, nx_ctx))
+               return -ENOMEM;
 
-       rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes,
+       csbcpb = curr_nx_ctx.csbcpb;
+
+       rc = generate_pat(desc->info, req, &curr_nx_ctx, authsize, nbytes,
                          csbcpb->cpb.aes_ccm.in_pat_or_b0);
        if (rc)
                goto out;
 
        /* page_limit: number of sg entries that fit on one page */
        max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
-                          nx_ctx->ap->sglen);
+                          curr_nx_ctx.ap->sglen);
 
        do {
                /* to process: the AES_BLOCK_SIZE data chunk to process in this
                 * update. This value is bound by sg list limits.
                 */
                to_process = min_t(u64, nbytes - processed,
-                                  nx_ctx->ap->databytelen);
+                                  curr_nx_ctx.ap->databytelen);
                to_process = min_t(u64, to_process,
                                   NX_PAGE_SIZE * (max_sg_len - 1));
 
@@ -457,13 +465,13 @@ static int ccm_nx_encrypt(struct aead_request   *req,
 
                NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
 
-               rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src,
+               rc = nx_build_sg_lists(&curr_nx_ctx, desc, req->dst, req->src,
                                        to_process, processed,
                                       csbcpb->cpb.aes_ccm.iv_or_ctr);
                if (rc)
                        goto out;
 
-               rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
+               rc = nx_hcall_sync(&curr_nx_ctx, &curr_nx_ctx.op,
                                   req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
                if (rc)
                        goto out;
@@ -494,7 +502,7 @@ static int ccm_nx_encrypt(struct aead_request   *req,
                                 SCATTERWALK_TO_SG);
 
 out:
-       spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
+       kmem_cache_free(nx_driver.slab, curr_nx_ctx.kmem);
        return rc;
 }
 
-- 
1.7.1

--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to