On Sun, Nov 25, 2007 at 08:31:41PM +0800, Herbert Xu wrote:
> 
> OK, one night I suddenly had this idea that we can postpone the
> uncommon collision case to process context.  Here's the patch.

Small improvement, set the may-sleep flag when postponed.  So
now it can sleep where it couldn't before, ironic huh :)

Cheers,
-- 
Visit Openswan at http://www.openswan.org/
Email: Herbert Xu ~{PmV>HI~} <[EMAIL PROTECTED]>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt
--
diff --git a/crypto/chainiv.c b/crypto/chainiv.c
index 25aa244..49457fa 100644
--- a/crypto/chainiv.c
+++ b/crypto/chainiv.c
@@ -20,45 +20,108 @@
 #include <linux/random.h>
 #include <linux/spinlock.h>
 #include <linux/string.h>
+#include <linux/workqueue.h>
+
+enum {
+       CHAINIV_STATE_INUSE = 0,
+};
 
 struct chainiv_ctx {
        struct crypto_ablkcipher *cipher;
+       unsigned long state;
+
        spinlock_t lock;
+       struct crypto_queue queue;
+
+       struct work_struct postponed;
+       int err;
+
        char iv[];
 };
 
-static int chainiv_givcrypt(struct ablkcipher_request *req)
+static int chainiv_schedule_work(struct chainiv_ctx *ctx)
+{
+       int queued;
+
+       if (!ctx->queue.qlen) {
+               smp_mb__before_clear_bit();
+               clear_bit(CHAINIV_STATE_INUSE, &ctx->state);
+
+               if (!ctx->queue.qlen ||
+                   test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state))
+                       goto out;
+       }
+
+       queued = schedule_work(&ctx->postponed);
+       BUG_ON(!queued);
+
+out:
+       return ctx->err;
+}
+
+static int chainiv_postpone_request(struct ablkcipher_request *req)
 {
        struct crypto_ablkcipher *geniv = crypto_ablkcipher_reqtfm(req);
        struct chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
-       struct ablkcipher_request *subreq = ablkcipher_request_ctx(req);
-       unsigned int ivsize;
        int err;
 
-       ablkcipher_request_set_tfm(subreq, ctx->cipher);
-       ablkcipher_request_set_callback(subreq, req->base.flags &
-                                               ~CRYPTO_TFM_REQ_MAY_SLEEP,
-                                       req->base.complete, req->base.data);
-       ablkcipher_request_set_crypt(subreq, req->src, req->dst, req->nbytes,
-                                    req->info);
-
        spin_lock_bh(&ctx->lock);
+       err = ablkcipher_enqueue_request(&ctx->queue, req);
+       spin_unlock_bh(&ctx->lock);
+
+       if (test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state))
+               return err;
+
+       ctx->err = err;
+       return chainiv_schedule_work(ctx);
+}
+
+static int chainiv_givcrypt_tail(struct ablkcipher_request *req)
+{
+       struct crypto_ablkcipher *geniv = crypto_ablkcipher_reqtfm(req);
+       struct chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
+       struct ablkcipher_request *subreq = ablkcipher_request_ctx(req);
+       unsigned int ivsize;
 
        ivsize = crypto_ablkcipher_ivsize(geniv);
 
        memcpy(req->giv, ctx->iv, ivsize);
        memcpy(req->info, ctx->iv, ivsize);
 
-       err = crypto_ablkcipher_encrypt(subreq);
-       if (err)
-               goto unlock;
+       ctx->err = crypto_ablkcipher_encrypt(subreq);
+       if (ctx->err)
+               goto out;
 
        memcpy(ctx->iv, req->info, ivsize);
 
-unlock:
-       spin_unlock_bh(&ctx->lock);
+out:
+       return chainiv_schedule_work(ctx);
+}
+
+static int chainiv_givcrypt(struct ablkcipher_request *req)
+{
+       struct crypto_ablkcipher *geniv = crypto_ablkcipher_reqtfm(req);
+       struct chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
+       struct ablkcipher_request *subreq = ablkcipher_request_ctx(req);
+
+       ablkcipher_request_set_tfm(subreq, ctx->cipher);
+       ablkcipher_request_set_callback(subreq, req->base.flags,
+                                       req->base.complete, req->base.data);
+       ablkcipher_request_set_crypt(subreq, req->src, req->dst, req->nbytes,
+                                    req->info);
+
+       if (test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state))
+               goto postpone;
 
-       return err;
+       if (ctx->queue.qlen) {
+               clear_bit(CHAINIV_STATE_INUSE, &ctx->state);
+               goto postpone;
+       }
+
+       return chainiv_givcrypt_tail(req);
+
+postpone:
+       return chainiv_postpone_request(req);
 }
 
 static int chainiv_givcrypt_first(struct ablkcipher_request *req)
@@ -67,19 +130,47 @@ static int chainiv_givcrypt_first(struct 
ablkcipher_request *req)
        struct chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
        struct crypto_ablkcipher *cipher = ctx->cipher;
 
-       spin_lock_bh(&ctx->lock);
+       if (test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state))
+               goto out;
+
        if (crypto_ablkcipher_crt(cipher)->givcrypt != chainiv_givcrypt_first)
                goto unlock;
 
        crypto_ablkcipher_crt(cipher)->givcrypt = chainiv_givcrypt;
        get_random_bytes(ctx->iv, crypto_ablkcipher_ivsize(geniv));
 
+       return chainiv_givcrypt_tail(req);
+
 unlock:
-       spin_unlock_bh(&ctx->lock);
+       clear_bit(CHAINIV_STATE_INUSE, &ctx->state);
 
+out:
        return chainiv_givcrypt(req);
 }
 
+static void chainiv_do_postponed(struct work_struct *work)
+{
+       struct chainiv_ctx *ctx = container_of(work, struct chainiv_ctx,
+                                              postponed);
+       struct ablkcipher_request *req;
+       struct ablkcipher_request *subreq;
+
+       /* Only handle one request to avoid hogging keventd. */
+       spin_lock_bh(&ctx->lock);
+       req = ablkcipher_dequeue_request(&ctx->queue);
+       spin_unlock_bh(&ctx->lock);
+
+       if (!req) {
+               chainiv_schedule_work(ctx);
+               return;
+       }
+
+       subreq = ablkcipher_request_ctx(req);
+       subreq->base.flags |= CRYPTO_TFM_REQ_MAY_SLEEP;
+
+       chainiv_givcrypt_tail(req);
+}
+
 static int chainiv_init(struct crypto_tfm *tfm)
 {
        struct crypto_instance *inst = (void *)tfm->__crt_alg;
@@ -94,6 +185,9 @@ static int chainiv_init(struct crypto_tfm *tfm)
        ctx->cipher = cipher;
        spin_lock_init(&ctx->lock);
 
+       crypto_init_queue(&ctx->queue, 100);
+       INIT_WORK(&ctx->postponed, chainiv_do_postponed);
+
        tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request) +
                                      crypto_ablkcipher_reqsize(cipher);
 
@@ -103,6 +197,9 @@ static int chainiv_init(struct crypto_tfm *tfm)
 static void chainiv_exit(struct crypto_tfm *tfm)
 {
        struct chainiv_ctx *ctx = crypto_tfm_ctx(tfm);
+
+       BUG_ON(test_bit(CHAINIV_STATE_INUSE, &ctx->state) || ctx->queue.qlen);
+
        crypto_free_ablkcipher(ctx->cipher);
 }
 
@@ -117,6 +214,8 @@ static struct crypto_instance *chainiv_alloc(struct rtattr 
**tb)
        if (IS_ERR(inst))
                goto out;
 
+       inst->alg.cra_flags |= CRYPTO_ALG_ASYNC;
+
        inst->alg.cra_ablkcipher.givcrypt = chainiv_givcrypt_first;
 
        inst->alg.cra_init = chainiv_init;
-
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to