- keep dma functions away from chained scatterlists.
Use the existing scatterlist iteration inside the driver
to call dma_map_single() for each chunk and avoid dma_map_sg().
Signed-off-by: Christian Hohnstaedt chohnsta...@innominate.com
Tested-By: Karl Hiramoto k...@hiramoto.org
---
drivers/crypto/ixp4xx_crypto.c | 182 ++--
1 files changed, 63 insertions(+), 119 deletions(-)
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 2d637e0..fdcd0ab 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -101,6 +101,7 @@ struct buffer_desc {
u32 phys_addr;
u32 __reserved[4];
struct buffer_desc *next;
+ enum dma_data_direction dir;
};
struct crypt_ctl {
@@ -132,14 +133,10 @@ struct crypt_ctl {
struct ablk_ctx {
struct buffer_desc *src;
struct buffer_desc *dst;
- unsigned src_nents;
- unsigned dst_nents;
};
struct aead_ctx {
struct buffer_desc *buffer;
- unsigned short assoc_nents;
- unsigned short src_nents;
struct scatterlist ivlist;
/* used when the hmac is not on one sg entry */
u8 *hmac_virt;
@@ -312,7 +309,7 @@ static struct crypt_ctl *get_crypt_desc_emerg(void)
}
}
-static void free_buf_chain(struct buffer_desc *buf, u32 phys)
+static void free_buf_chain(struct device *dev, struct buffer_desc *buf,u32
phys)
{
while (buf) {
struct buffer_desc *buf1;
@@ -320,6 +317,7 @@ static void free_buf_chain(struct buffer_desc *buf, u32
phys)
buf1 = buf-next;
phys1 = buf-phys_next;
+ dma_unmap_single(dev, buf-phys_next, buf-buf_len, buf-dir);
dma_pool_free(buffer_pool, buf, phys);
buf = buf1;
phys = phys1;
@@ -348,7 +346,6 @@ static void one_packet(dma_addr_t phys)
struct crypt_ctl *crypt;
struct ixp_ctx *ctx;
int failed;
- enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
failed = phys 0x1 ? -EBADMSG : 0;
phys = ~0x3;
@@ -358,13 +355,8 @@ static void one_packet(dma_addr_t phys)
case CTL_FLAG_PERFORM_AEAD: {
struct aead_request *req = crypt-data.aead_req;
struct aead_ctx *req_ctx = aead_request_ctx(req);
- dma_unmap_sg(dev, req-assoc, req_ctx-assoc_nents,
- DMA_TO_DEVICE);
- dma_unmap_sg(dev, req_ctx-ivlist, 1, DMA_BIDIRECTIONAL);
- dma_unmap_sg(dev, req-src, req_ctx-src_nents,
- DMA_BIDIRECTIONAL);
- free_buf_chain(req_ctx-buffer, crypt-src_buf);
+ free_buf_chain(dev, req_ctx-buffer, crypt-src_buf);
if (req_ctx-hmac_virt) {
finish_scattered_hmac(crypt);
}
@@ -374,16 +366,11 @@ static void one_packet(dma_addr_t phys)
case CTL_FLAG_PERFORM_ABLK: {
struct ablkcipher_request *req = crypt-data.ablk_req;
struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req);
- int nents;
+
if (req_ctx-dst) {
- nents = req_ctx-dst_nents;
- dma_unmap_sg(dev, req-dst, nents, DMA_FROM_DEVICE);
- free_buf_chain(req_ctx-dst, crypt-dst_buf);
- src_direction = DMA_TO_DEVICE;
+ free_buf_chain(dev, req_ctx-dst, crypt-dst_buf);
}
- nents = req_ctx-src_nents;
- dma_unmap_sg(dev, req-src, nents, src_direction);
- free_buf_chain(req_ctx-src, crypt-src_buf);
+ free_buf_chain(dev, req_ctx-src, crypt-src_buf);
req-base.complete(req-base, failed);
break;
}
@@ -748,56 +735,35 @@ static int setup_cipher(struct crypto_tfm *tfm, int
encrypt,
return 0;
}
-static int count_sg(struct scatterlist *sg, int nbytes)
+static struct buffer_desc *chainup_buffers(struct device *dev,
+ struct scatterlist *sg, unsigned nbytes,
+ struct buffer_desc *buf, gfp_t flags,
+ enum dma_data_direction dir)
{
- int i;
- for (i = 0; nbytes 0; i++, sg = sg_next(sg))
- nbytes -= sg-length;
- return i;
-}
-
-static struct buffer_desc *chainup_buffers(struct scatterlist *sg,
- unsigned nbytes, struct buffer_desc *buf, gfp_t flags)
-{
- int nents = 0;
-
- while (nbytes 0) {
+ for (;nbytes 0; sg = scatterwalk_sg_next(sg)) {
+ unsigned len = min(nbytes, sg-length);
struct buffer_desc *next_buf;
u32 next_buf_phys;
- unsigned len = min(nbytes, sg_dma_len(sg));
+ void *ptr;
- nents++;
nbytes -= len;
- if (!buf-phys_addr) {
-