Hi Nice job, (horoshaya rabota) Just two issue: ->I saw this patch is using a global variable which counts the number pending requests: dm_async_pending. I think that this variable should be allocated per dm_crypt session, i.e for each crypt_config, since there can be more then 2 consumers reading through the dm_crypt. ->I think that we might have a problem if a write operation will be processed in parallel to a read operation, the read might wait for the write to complete, and the dm_async_pending might also get wrong values???
Regards Ronen Shitrit Marvell Semiconductor Israel Ltd -----Original Message----- From: [EMAIL PROTECTED] [mailto:[EMAIL PROTECTED] On Behalf Of Evgeniy Polyakov Sent: Friday, September 02, 2005 9:09 AM To: linux-crypto@vger.kernel.org Subject: [ACRYPTO] dm-crypt ported to acrypto. Hello, developers. dm-crypt from device mapper package has been ported to acrypto. Patch against 2.6.13 for dm-crypt attached. [1] I did not run any performance test, since I have no hardware setup currently which can show any diference between sync/async dm-crypt. dm-crypt is the second storage management tool working with acrypto. BD is the first and the fastest one, specially designed for asynchronous operations, it can be fount at [2]. 1. http://tservice.net.ru/~s0mbre/archive/acrypto/drivers 2. http://tservice.net.ru/~s0mbre/?section=projects&item=bd diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -230,6 +230,173 @@ static struct crypt_iv_operations crypt_ .generator = crypt_iv_essiv_gen }; +#if defined CONFIG_ACRYPTO || defined CONFIG_ACRYPTO_MODULE + +static DECLARE_WAIT_QUEUE_HEAD(dm_async_queue); +static int dm_async_pending; + +#include <linux/acrypto.h> +#include <linux/crypto_def.h> + +static inline u16 crypto_tfm_get_type(struct crypto_tfm *tfm) { + u16 type; + char *name; + + name = (char *)crypto_tfm_alg_name(tfm); + + if (!strncmp(name, "aes", 3)) { + switch (crypto_tfm_alg_blocksize(tfm) << 3) { + case 128: + type = CRYPTO_TYPE_AES_128; + break; + case 192: + type = CRYPTO_TYPE_AES_192; + break; + case 256: + type = CRYPTO_TYPE_AES_256; + break; + default: + type = 0xffff; + break; + } + } else if (!strncmp(name, "des3_ede", 3)) + type = CRYPTO_TYPE_3DES; + else + type = 0xffff; + + return type; +} + +static inline u16 crypto_tfm_get_mode(struct crypto_tfm *tfm) { + u16 mode = tfm->crt_cipher.cit_mode & CRYPTO_TFM_MODE_MASK; + + switch (mode) { + case CRYPTO_TFM_MODE_ECB: + mode = CRYPTO_MODE_ECB; + break; + case CRYPTO_TFM_MODE_CBC: + mode = CRYPTO_MODE_CBC; + break; + case CRYPTO_TFM_MODE_CFB: + mode = CRYPTO_MODE_CFB; + break; + case CRYPTO_TFM_MODE_CTR: + mode = CRYPTO_MODE_CTR; + break; + default: + mode = 0xffff; + break; + } + + return mode; +} + +static void dm_callback(struct crypto_session_initializer *ci, struct +crypto_data *data) { + dm_async_pending--; + + kfree((void *)(data->priv)); + wake_up(&dm_async_queue); +} + +static inline int acrypto_process(struct crypt_config *cc, struct scatterlist *out, + struct scatterlist *in, unsigned int len, u8 *iv, int iv_size, int +write) { + struct crypto_session *s; + struct crypto_session_initializer ci; + struct crypto_data data; + struct scatterlist *sg; + int sg_size, err = 0; + u8 *local_key; + + memset(&ci, 0, sizeof(ci)); + memset(&data, 0, sizeof(data)); + + ci.operation = (write)?CRYPTO_OP_ENCRYPT:CRYPTO_OP_DECRYPT; + ci.priority = 0; + ci.callback = &dm_callback; + ci.type = crypto_tfm_get_type(cc->tfm); + ci.mode = crypto_tfm_get_mode(cc->tfm); + + if (ci.type == 0xffff || ci.mode == 0xffff) { + err = -EINVAL; + goto err_out_exit; + } + + data.sg_src_num = data.sg_dst_num = data.sg_key_num = 1; + data.sg_iv_num = (iv)?1:0; + + sg_size = sizeof(*sg) * (data.sg_src_num + data.sg_dst_num + data.sg_iv_num + data.sg_key_num + iv_size + cc->key_size); + sg = kmalloc(sg_size, GFP_KERNEL); + if (!sg) { + err = -ENOMEM; + goto err_out_exit; + } + + memset(sg, 0, sg_size); +#if 0 + printk("%s: key_size=%u, iv=%p, iv_size=%d, write=%d, priv=%p.\n", + __func__, cc->key_size, iv, iv_size, write, sg); #endif + data.priv = sg; + + data.sg_src = &sg[0]; + data.sg_dst = &sg[1]; + + data.sg_src[0].page = in->page; + data.sg_src[0].offset = in->offset; + data.sg_src[0].length = in->length; + + data.sg_dst[0].page = out->page; + data.sg_dst[0].offset = out->offset; + data.sg_dst[0].length = out->length; + + data.sg_key = &sg[2]; + local_key = (u8 *)&sg[3]; + + memcpy(local_key, cc->key, cc->key_size); + + data.sg_key[0].page = virt_to_page(local_key); + data.sg_key[0].offset = offset_in_page(local_key); + data.sg_key[0].length = cc->key_size; + + if (iv) { + u8 *local_iv; + + data.sg_iv = (struct scatterlist *)(local_key + cc->key_size); + local_iv = (u8 *)(local_key + cc->key_size + sizeof(*sg)); + + data.sg_iv[0].page = virt_to_page(local_iv); + data.sg_iv[0].offset = offset_in_page(local_iv); + data.sg_iv[0].length = iv_size; + + memcpy(local_iv, iv, iv_size); + } + + s = crypto_session_alloc(&ci, &data); + if (!s) { + err = -ENOMEM; + goto err_out_free; + } + + return 0; + +err_out_free: + kfree(sg); +err_out_exit: + dm_async_pending--; + + return err; +} +#else +static inline int acrypto_process(struct crypt_config *cc, struct scatterlist *out, + struct scatterlist *in, unsigned int len, u8 *iv, int iv_size, int write) +{ + return 0; +} +#endif static inline int crypt_convert_scatterlist(struct crypt_config *cc, struct scatterlist *out, @@ -244,11 +411,19 @@ crypt_convert_scatterlist(struct crypt_c if (r < 0) return r; + r = acrypto_process(cc, out, in, length, iv, cc->iv_size, write); + if (r == 0) + return r; + if (write) r = crypto_cipher_encrypt_iv(cc->tfm, out, in, length, iv); else r = crypto_cipher_decrypt_iv(cc->tfm, out, in, length, iv); } else { + r = acrypto_process(cc, out, in, length, NULL, 0, write); + if (r == 0) + return r; + if (write) r = crypto_cipher_encrypt(cc->tfm, out, in, length); else @@ -281,6 +456,42 @@ static int crypt_convert(struct crypt_co { int r = 0; +#if defined CONFIG_ACRYPTO || defined CONFIG_ACRYPTO_MODULE + { + unsigned int idx_in, idx_out, offset_in, offset_out; + int num = 0; + + idx_in = ctx->idx_in; + idx_out = ctx->idx_out; + offset_in = ctx->offset_in; + offset_out = ctx->offset_out; + + while(idx_in < ctx->bio_in->bi_vcnt && + idx_out < ctx->bio_out->bi_vcnt) { + + struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, idx_in); + struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, idx_out); + + offset_in += 1 << SECTOR_SHIFT; + offset_out += 1 << SECTOR_SHIFT; + + if (offset_in >= bv_in->bv_len) { + offset_in = 0; + idx_in++; + } + + if (offset_out >= bv_out->bv_len) { + offset_out = 0; + idx_out++; + } + + num++; + } + + dm_async_pending = num; + } +#endif + while(ctx->idx_in < ctx->bio_in->bi_vcnt && ctx->idx_out < ctx->bio_out->bi_vcnt) { struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in); @@ -470,6 +681,17 @@ static void kcryptd_do_work(void *data) io->bio->bi_sector - io->target->begin, 0); r = crypt_convert(cc, &ctx); +#if defined CONFIG_ACRYPTO || defined CONFIG_ACRYPTO_MODULE + { + long timeout = 1000; + long tm; + + tm = wait_event_timeout(dm_async_queue, dm_async_pending == 0, msecs_to_jiffies(timeout)); + if (!tm) + printk("%s: bug: work was not finished in %ld msecs, %d users remains.\n", + __func__, timeout, dm_async_pending); + } +#endif dec_pending(io, r); } -- Evgeniy Polyakov - To unsubscribe from this list: send the line "unsubscribe linux-crypto" in the body of a message to [EMAIL PROTECTED] More majordomo info at http://vger.kernel.org/majordomo-info.html - To unsubscribe from this list: send the line "unsubscribe linux-crypto" in the body of a message to [EMAIL PROTECTED] More majordomo info at http://vger.kernel.org/majordomo-info.html