--- a/drivers/md/dm-crypt.c	2005-06-17 22:48:29.000000000 +0300
+++ b/drivers/md/dm-crypt.c	2005-09-06 21:17:15.414188768 +0300
@@ -18,6 +18,19 @@
 #include <asm/scatterlist.h>
 #include <asm/page.h>
 
+#if defined(CONFIG_OCF_DM_CRYPT)
+
+#undef DM_DEBUG
+#ifdef DM_DEBUG
+#define dmprintk printk
+#else
+#define dmprintk(fmt,args...) 
+#endif
+
+#include <linux/cryptodev.h>
+
+#endif /* CONFIG_OCF_DM_CRYPT */
+
 #include "dm.h"
 
 #define PFX	"crypt: "
@@ -82,7 +95,13 @@
 	sector_t iv_offset;
 	unsigned int iv_size;
 
+#if defined(CONFIG_OCF_DM_CRYPT)
+	struct cryptoini 	cr_dm;    		/* OCF session */
+	char  			ocf_alg_name[CRYPTO_MAX_ALG_NAME];
+	uint64_t 	 	ocf_cryptoid;		/* OCF sesssion ID */
+#else
 	struct crypto_tfm *tfm;
+#endif
 	unsigned int key_size;
 	u8 key[0];
 };
@@ -171,15 +190,24 @@
 	crypto_free_tfm(hash_tfm);
 
 	/* Setup the essiv_tfm with the given salt */
+#if defined(CONFIG_OCF_DM_CRYPT)
+	essiv_tfm = crypto_alloc_tfm(cc->ocf_alg_name,
+	                             CRYPTO_TFM_MODE_ECB);
+#else
 	essiv_tfm = crypto_alloc_tfm(crypto_tfm_alg_name(cc->tfm),
 	                             CRYPTO_TFM_MODE_ECB);
+#endif
 	if (essiv_tfm == NULL) {
 		ti->error = PFX "Error allocating crypto tfm for ESSIV";
 		kfree(salt);
 		return -EINVAL;
 	}
+#if  defined(CONFIG_OCF_DM_CRYPT)
+	if (crypto_tfm_alg_blocksize(essiv_tfm) != cc->iv_size) {
+#else
 	if (crypto_tfm_alg_blocksize(essiv_tfm)
 	    != crypto_tfm_alg_ivsize(cc->tfm)) {
+#endif
 		ti->error = PFX "Block size of ESSIV cipher does "
 			        "not match IV size of block cipher";
 		crypto_free_tfm(essiv_tfm);
@@ -231,6 +259,219 @@
 };
 
 
+#if defined(CONFIG_OCF_DM_CRYPT)
+static void dec_pending(struct crypt_io *io, int error);
+
+struct ocf_wr_priv {
+	u32 		 	dm_ocf_wr_completed;	/* Num of wr completions */
+	u32 		 	dm_ocf_wr_pending;	/* Num of wr pendings */
+	wait_queue_head_t	dm_ocf_wr_queue;	/* waiting Q, for wr completion */
+};
+
+static int dm_ocf_wr_cb(struct cryptop *crp)
+{
+	struct ocf_wr_priv *ocf_wr_priv;
+
+	if(crp == NULL) {
+		printk("dm_ocf_wr_cb: crp is NULL!! \n");
+		return 0;
+	}
+
+	ocf_wr_priv = (struct ocf_wr_priv*)crp->crp_opaque;
+
+	ocf_wr_priv->dm_ocf_wr_completed++;
+	
+	/* if no more pending for read, wake up the read task. */
+	if(ocf_wr_priv->dm_ocf_wr_completed == ocf_wr_priv->dm_ocf_wr_pending)
+		wake_up(&ocf_wr_priv->dm_ocf_wr_queue);
+
+	crypto_freereq(crp);
+	return 0;
+}
+
+static int dm_ocf_rd_cb(struct cryptop *crp)
+{
+	struct crypt_io *io;
+
+	if(crp == NULL) {
+		printk("dm_ocf_rd_cb: crp is NULL!! \n");
+		return 0;
+	}
+
+	io = (struct crypt_io *)crp->crp_opaque;
+
+	crypto_freereq(crp);
+
+	if(io != NULL)
+		dec_pending(io, 0);
+
+	return 0;
+}
+
+static inline int dm_ocf_process(struct crypt_config *cc, struct scatterlist *out, 
+		struct scatterlist *in, unsigned int len, u8 *iv, int iv_size, int write, void *priv)
+{
+	struct cryptop *crp;
+	struct cryptodesc *crda = NULL;
+
+	if(!iv) {
+		printk("dm_ocf_process: only CBC mode is supported\n");
+		return -EPERM;	
+	}
+
+	crp = crypto_getreq(1);	 /* only encryption/decryption */
+	if (!crp) {
+		printk("dm_ocf_process: crypto_getreq failed!!\n");
+		return -ENOMEM;
+	}
+	
+	crda = crp->crp_desc;
+
+	crda->crd_flags  = (write)? CRD_F_ENCRYPT: 0;	
+	crda->crd_alg    = cc->cr_dm.cri_alg;
+	crda->crd_skip   = 0;
+	crda->crd_len    = len;
+	crda->crd_inject = 0; /* NA */
+	crda->crd_klen   = cc->cr_dm.cri_klen;
+	crda->crd_key    = cc->cr_dm.cri_key;
+
+	if (iv) {
+		crda->crd_flags |= (CRD_F_IV_EXPLICIT | CRD_F_IV_PRESENT);
+		if( iv_size > EALG_MAX_BLOCK_LEN ) {
+			printk("dm_ocf_process: iv is too big!!\n");
+		}
+		memcpy(&crda->crd_iv, iv, iv_size);		
+	}
+
+	/* according to the current implementation the in and the out are the same buffer for read, and different for write*/
+	if((page_address(out->page) + out->offset) != (page_address(in->page) + in->offset)) {
+		memcpy((page_address(out->page) + out->offset) , (page_address(in->page) + in->offset) , len);
+		dmprintk("dm_ocf_process: copy buffers!! \n");
+	}
+
+	dmprintk("len: %d",len);
+	crp->crp_ilen = len; /* Total input length */
+        crp->crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_BATCH;
+        crp->crp_buf = page_address(out->page) + out->offset;
+	crp->crp_opaque = priv;
+	if(write) {
+        	crp->crp_callback = dm_ocf_wr_cb;
+	}
+	else {
+		crp->crp_callback = dm_ocf_rd_cb;
+	}
+        crp->crp_sid = cc->ocf_cryptoid;
+        if(crypto_dispatch(crp) != 0) {
+		printk("dm_ocf_process: crypto_dispatch failed!!\n");
+	}
+
+	return 0;
+	
+}
+
+static inline int
+ocf_crypt_convert_scatterlist(struct crypt_config *cc, struct scatterlist *out,
+                          struct scatterlist *in, unsigned int length,
+                          int write, sector_t sector, void *priv)
+{
+	u8 iv[cc->iv_size];
+	int r;
+
+	if (cc->iv_gen_ops) {
+		r = cc->iv_gen_ops->generator(cc, iv, sector);
+		if (r < 0)
+			return r;
+		r = dm_ocf_process(cc, out, in, length, iv, cc->iv_size, write, priv);
+	} else {
+		r = dm_ocf_process(cc, out, in, length, NULL, 0, write, priv);
+	}
+
+	return r;
+}
+
+/*
+ * Encrypt / decrypt data from one bio to another one (can be the same one)
+ */
+static int ocf_crypt_convert(struct crypt_config *cc,
+                         struct convert_context *ctx, struct crypt_io *io)
+{
+	int r = 0;
+	long wr_timeout = 2000;
+	long wr_tm;
+	int num = 0;
+	void *priv = NULL;
+	struct ocf_wr_priv *ocf_wr_priv = NULL;
+
+	if(ctx->write) {
+		ocf_wr_priv = kmalloc(sizeof(struct ocf_wr_priv),GFP_KERNEL);
+		ocf_wr_priv->dm_ocf_wr_pending = 0;
+		ocf_wr_priv->dm_ocf_wr_completed = 0;
+		init_waitqueue_head(&ocf_wr_priv->dm_ocf_wr_queue);
+		priv = ocf_wr_priv;
+	}
+
+	while(ctx->idx_in < ctx->bio_in->bi_vcnt &&
+	      ctx->idx_out < ctx->bio_out->bi_vcnt) {
+		struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in);
+		struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out);
+		struct scatterlist sg_in = {
+			.page = bv_in->bv_page,
+			.offset = bv_in->bv_offset + ctx->offset_in,
+			.length = 1 << SECTOR_SHIFT
+		};
+		struct scatterlist sg_out = {
+			.page = bv_out->bv_page,
+			.offset = bv_out->bv_offset + ctx->offset_out,
+			.length = 1 << SECTOR_SHIFT
+		};
+
+		ctx->offset_in += sg_in.length;
+		if (ctx->offset_in >= bv_in->bv_len) {
+			ctx->offset_in = 0;
+			ctx->idx_in++;
+		}
+
+		ctx->offset_out += sg_out.length;
+		if (ctx->offset_out >= bv_out->bv_len) {
+			ctx->offset_out = 0;
+			ctx->idx_out++;
+		}
+
+		if(ctx->write) {
+			num++;
+		}
+		/* if last read in the context - send the io, so the OCF read callback will release the IO. */
+		else if(!(ctx->idx_in < ctx->bio_in->bi_vcnt && ctx->idx_out < ctx->bio_out->bi_vcnt)) {
+			priv = io;
+		}
+
+		r = ocf_crypt_convert_scatterlist(cc, &sg_out, &sg_in, sg_in.length,
+		                              ctx->write, ctx->sector, priv);
+		if (r < 0){
+			printk("ocf_crypt_convert: ocf_crypt_convert_scatterlist failed \n");
+			break;
+		}
+
+		ctx->sector++;
+	}
+
+	if(ctx->write) {
+		ocf_wr_priv->dm_ocf_wr_pending += num;
+		wr_tm = wait_event_timeout(ocf_wr_priv->dm_ocf_wr_queue, 
+				(ocf_wr_priv->dm_ocf_wr_pending == ocf_wr_priv->dm_ocf_wr_completed)
+									, msecs_to_jiffies(wr_timeout) );
+		if (!wr_tm) {
+			printk("ocf_crypt_convert: wr work was not finished in %ld msecs, %d pending %d completed.\n", 
+				wr_timeout, ocf_wr_priv->dm_ocf_wr_pending, ocf_wr_priv->dm_ocf_wr_completed);
+		}
+		kfree(ocf_wr_priv);
+	}
+
+	return r;
+}
+
+#else
+
 static inline int
 crypt_convert_scatterlist(struct crypt_config *cc, struct scatterlist *out,
                           struct scatterlist *in, unsigned int length,
@@ -258,21 +499,6 @@
 	return r;
 }
 
-static void
-crypt_convert_init(struct crypt_config *cc, struct convert_context *ctx,
-                   struct bio *bio_out, struct bio *bio_in,
-                   sector_t sector, int write)
-{
-	ctx->bio_in = bio_in;
-	ctx->bio_out = bio_out;
-	ctx->offset_in = 0;
-	ctx->offset_out = 0;
-	ctx->idx_in = bio_in ? bio_in->bi_idx : 0;
-	ctx->idx_out = bio_out ? bio_out->bi_idx : 0;
-	ctx->sector = sector + cc->iv_offset;
-	ctx->write = write;
-}
-
 /*
  * Encrypt / decrypt data from one bio to another one (can be the same one)
  */
@@ -319,6 +545,24 @@
 	return r;
 }
 
+#endif
+
+static void
+crypt_convert_init(struct crypt_config *cc, struct convert_context *ctx,
+                   struct bio *bio_out, struct bio *bio_in,
+                   sector_t sector, int write)
+{
+	ctx->bio_in = bio_in;
+	ctx->bio_out = bio_out;
+	ctx->offset_in = 0;
+	ctx->offset_out = 0;
+	ctx->idx_in = bio_in ? bio_in->bi_idx : 0;
+	ctx->idx_out = bio_out ? bio_out->bi_idx : 0;
+	ctx->sector = sector + cc->iv_offset;
+	ctx->write = write;
+}
+
+
 /*
  * Generate a new unfragmented bio with the given size
  * This should never violate the device limitations
@@ -468,9 +712,16 @@
 
 	crypt_convert_init(cc, &ctx, io->bio, io->bio,
 	                   io->bio->bi_sector - io->target->begin, 0);
+#if defined(CONFIG_OCF_DM_CRYPT)
+	r = ocf_crypt_convert(cc, &ctx, io);
+
+	if(r < 0)
+		dec_pending(io, r);
+#else
 	r = crypt_convert(cc, &ctx);
 
 	dec_pending(io, r);
+#endif
 }
 
 static void kcryptd_queue_io(struct crypt_io *io)
@@ -527,13 +778,17 @@
 static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
 {
 	struct crypt_config *cc;
+	unsigned int crypto_flags;
+#if defined(CONFIG_OCF_DM_CRYPT)
+	struct cryptoini cr_dm;
+#else
 	struct crypto_tfm *tfm;
+#endif
 	char *tmp;
 	char *cipher;
 	char *chainmode;
 	char *ivmode;
 	char *ivopts;
-	unsigned int crypto_flags;
 	unsigned int key_size;
 
 	if (argc != 5) {
@@ -587,6 +842,34 @@
 		goto bad1;
 	}
 
+#if defined(CONFIG_OCF_DM_CRYPT)
+	/* prepare a new OCF session */
+        memset(&cc->cr_dm, 0, sizeof(cr_dm));
+
+	if((strcmp(cipher,"aes") == 0) && (strcmp(chainmode, "cbc") == 0))
+        	cc->cr_dm.cri_alg  = CRYPTO_AES_CBC;
+	else if((strcmp(cipher,"des") == 0) && (strcmp(chainmode, "cbc") == 0))
+        	cc->cr_dm.cri_alg  = CRYPTO_DES_CBC;
+	else if((strcmp(cipher,"3des") == 0) && (strcmp(chainmode, "cbc") == 0))
+        	cc->cr_dm.cri_alg  = CRYPTO_3DES_CBC; 
+	else {
+		ti->error = PFX "using OCF: unknown cipher or bad chain mode";
+		goto bad1;
+	}
+
+	strcpy(cc->ocf_alg_name, cipher);
+	dmprintk("key size is %d\n",cc->key_size);
+        cc->cr_dm.cri_klen = cc->key_size*8;
+        cc->cr_dm.cri_key  = cc->key;
+        cc->cr_dm.cri_next = NULL;
+
+        if(crypto_newsession(&cc->ocf_cryptoid, &cc->cr_dm, 0)){
+		dmprintk("crypt_ctr: crypto_newsession failed\n");
+                ti->error = PFX "crypto_newsession failed";
+                goto bad2;
+        }
+
+#else
 	tfm = crypto_alloc_tfm(cipher, crypto_flags);
 	if (!tfm) {
 		ti->error = PFX "Error allocating crypto tfm";
@@ -598,7 +881,7 @@
 	}
 
 	cc->tfm = tfm;
-
+#endif
 	/*
 	 * Choose ivmode. Valid modes: "plain", "essiv:<esshash>".
 	 * See comments at iv code
@@ -619,6 +902,16 @@
 	    cc->iv_gen_ops->ctr(cc, ti, ivopts) < 0)
 		goto bad2;
 
+#if defined(CONFIG_OCF_DM_CRYPT)
+	switch (cr_dm.cri_alg) {
+		case CRYPTO_AES_CBC:
+			cc->iv_size = 16;
+			break;
+		default:
+			cc->iv_size = 8;
+			break;
+	}
+#else
 	if (tfm->crt_cipher.cit_decrypt_iv && tfm->crt_cipher.cit_encrypt_iv)
 		/* at least a 64 bit sector number should fit in our buffer */
 		cc->iv_size = max(crypto_tfm_alg_ivsize(tfm),
@@ -632,6 +925,7 @@
 			cc->iv_gen_ops = NULL;
 		}
 	}
+#endif
 
 	cc->io_pool = mempool_create(MIN_IOS, mempool_alloc_slab,
 				     mempool_free_slab, _crypt_io_pool);
@@ -647,10 +941,12 @@
 		goto bad4;
 	}
 
+#if !defined(CONFIG_OCF_DM_CRYPT)
 	if (tfm->crt_cipher.cit_setkey(tfm, cc->key, key_size) < 0) {
 		ti->error = PFX "Error setting key";
 		goto bad5;
 	}
+#endif
 
 	if (sscanf(argv[2], SECTOR_FORMAT, &cc->iv_offset) != 1) {
 		ti->error = PFX "Invalid iv_offset sector";
@@ -691,7 +987,11 @@
 	if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
 		cc->iv_gen_ops->dtr(cc);
 bad2:
+#if defined(CONFIG_OCF_DM_CRYPT)
+	crypto_freesession(cc->ocf_cryptoid);
+#else
 	crypto_free_tfm(tfm);
+#endif
 bad1:
 	kfree(cc);
 	return -EINVAL;
@@ -708,7 +1008,11 @@
 		kfree(cc->iv_mode);
 	if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
 		cc->iv_gen_ops->dtr(cc);
+#if defined(CONFIG_OCF_DM_CRYPT)
+	crypto_freesession(cc->ocf_cryptoid);
+#else
 	crypto_free_tfm(cc->tfm);
+#endif
 	dm_put_device(ti, cc->dev);
 	kfree(cc);
 }
@@ -756,7 +1060,11 @@
                                  io->first_clone, bvec_idx);
 		if (clone) {
 			ctx->bio_out = clone;
+#if defined(CONFIG_OCF_DM_CRYPT)
+			if (ocf_crypt_convert(cc, ctx, io) < 0) {
+#else
 			if (crypt_convert(cc, ctx) < 0) {
+#endif
 				crypt_free_buffer_pages(cc, clone,
 				                        clone->bi_size);
 				bio_put(clone);
@@ -870,6 +1178,11 @@
 		break;
 
 	case STATUSTYPE_TABLE:
+
+#if  defined(CONFIG_OCF_DM_CRYPT)
+		cipher = cc->ocf_alg_name;
+		chainmode = "cbc";
+#else
 		cipher = crypto_tfm_alg_name(cc->tfm);
 
 		switch(cc->tfm->crt_cipher.cit_mode) {
@@ -882,6 +1195,7 @@
 		default:
 			BUG();
 		}
+#endif
 
 		if (cc->iv_mode)
 			DMEMIT("%s-%s-%s ", cipher, chainmode, cc->iv_mode);
@@ -940,6 +1254,9 @@
 		goto bad2;
 	}
 
+#ifdef CONFIG_OCF_DM_CRYPT
+	printk("dm_crypt using the OCF package.");
+#endif
 	return 0;
 
 bad2:
--- a/crypto/ocf/Kconfig	2005-06-14 03:42:51.000000000 +0300
+++ b/crypto/ocf/Kconfig	2005-09-06 21:13:20.858846600 +0300
@@ -25,6 +25,16 @@
 	  A software driver for the OCF framework that uses
 	  the kernel CryptoAPI.
 
+config OCF_DM_CRYPT
+	bool "OCF dm_crypt"
+	depends on OCF_OCF && DM_CRYPT
+	help
+	  The dm_crypt device mapper will use the OCF for encryption/decryption,
+	  in case of essiv, the essiv generation will use the kernel crypto APIs.
+	  When using the OCF dm_crypt, only the following encryption algorithms 
+	  are supported:
+		DES-CBC, 3DES-CBC and AES-CBC.
+
 config OCF_SAFE
 	tristate "safenet (HW crypto engine)"
 	depends on OCF_OCF
