[PATCH] dcp: aes: Move the AES operation type from actx to rctx
Move the AES operation type and mode from async crypto context to crypto request context. This allows for recycling of the async crypto context for different kinds of operations. I found this problem when I used dm-crypt, which uses the same async crypto context (actx) for both encryption and decryption requests. Since the requests are enqueued into the processing queue, immediatelly storing the type of operation into async crypto context (actx) caused corruption of this information when encryption and decryption operations followed imediatelly one after the other. When the first operation was dequeued, the second operation was already enqueued and overwritten the type of operation in actx, thus causing incorrect result of the first operation. Fix this problem by storing the type of operation into the crypto request context. Signed-off-by: Marek Vasut ma...@denx.de Cc: David S. Miller da...@davemloft.net Cc: Fabio Estevam fabio.este...@freescale.com Cc: Herbert Xu herb...@gondor.apana.org.au Cc: Shawn Guo shawn@linaro.org Cc: Tom Lendacky thomas.lenda...@amd.com --- drivers/crypto/mxs-dcp.c | 27 +-- 1 file changed, 17 insertions(+), 10 deletions(-) diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c index d41917c..48f12dd 100644 --- a/drivers/crypto/mxs-dcp.c +++ b/drivers/crypto/mxs-dcp.c @@ -83,13 +83,16 @@ struct dcp_async_ctx { unsigned inthot:1; /* Crypto-specific context */ - unsigned intenc:1; - unsigned intecb:1; struct crypto_ablkcipher*fallback; unsigned intkey_len; uint8_t key[AES_KEYSIZE_128]; }; +struct dcp_aes_req_ctx { + unsigned intenc:1; + unsigned intecb:1; +}; + struct dcp_sha_req_ctx { unsigned intinit:1; unsigned intfini:1; @@ -190,10 +193,12 @@ static int mxs_dcp_start_dma(struct dcp_async_ctx *actx) /* * Encryption (AES128) */ -static int mxs_dcp_run_aes(struct dcp_async_ctx *actx, int init) +static int mxs_dcp_run_aes(struct dcp_async_ctx *actx, + struct ablkcipher_request *req, int init) { struct dcp *sdcp = global_sdcp; struct dcp_dma_desc *desc = sdcp-coh-desc[actx-chan]; + struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req); int ret; dma_addr_t key_phys = dma_map_single(sdcp-dev, sdcp-coh-aes_key, @@ -212,14 +217,14 @@ static int mxs_dcp_run_aes(struct dcp_async_ctx *actx, int init) /* Payload contains the key. */ desc-control0 |= MXS_DCP_CONTROL0_PAYLOAD_KEY; - if (actx-enc) + if (rctx-enc) desc-control0 |= MXS_DCP_CONTROL0_CIPHER_ENCRYPT; if (init) desc-control0 |= MXS_DCP_CONTROL0_CIPHER_INIT; desc-control1 = MXS_DCP_CONTROL1_CIPHER_SELECT_AES128; - if (actx-ecb) + if (rctx-ecb) desc-control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_ECB; else desc-control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_CBC; @@ -247,6 +252,7 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq) struct ablkcipher_request *req = ablkcipher_request_cast(arq); struct dcp_async_ctx *actx = crypto_tfm_ctx(arq-tfm); + struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req); struct scatterlist *dst = req-dst; struct scatterlist *src = req-src; @@ -271,7 +277,7 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq) /* Copy the key from the temporary location. */ memcpy(key, actx-key, actx-key_len); - if (!actx-ecb) { + if (!rctx-ecb) { /* Copy the CBC IV just past the key. */ memcpy(key + AES_KEYSIZE_128, req-info, AES_KEYSIZE_128); /* CBC needs the INIT set. */ @@ -300,7 +306,7 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq) * submit the buffer. */ if (actx-fill == out_off || sg_is_last(src)) { - ret = mxs_dcp_run_aes(actx, init); + ret = mxs_dcp_run_aes(actx, req, init); if (ret) return ret; init = 0; @@ -391,13 +397,14 @@ static int mxs_dcp_aes_enqueue(struct ablkcipher_request *req, int enc, int ecb) struct dcp *sdcp = global_sdcp; struct crypto_async_request *arq = req-base; struct dcp_async_ctx *actx = crypto_tfm_ctx(arq-tfm); + struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req); int ret; if (unlikely(actx-key_len != AES_KEYSIZE_128)) return mxs_dcp_block_fallback(req, enc); - actx-enc = enc; - actx-ecb = ecb; + rctx-enc = enc; + rctx-ecb =
[PATCH 2/3] crypto: Pull out the functions to save/restore request
The functions to save original request within a newly adjusted request and it's counterpart to restore the original request can be re-used by more code in the crypto/ahash.c file. Pull these functions out from the code so they're available. Signed-off-by: Marek Vasut ma...@denx.de Cc: David S. Miller da...@davemloft.net Cc: Fabio Estevam fabio.este...@freescale.com Cc: Herbert Xu herb...@gondor.apana.org.au Cc: Shawn Guo shawn@linaro.org Cc: Tom Lendacky thomas.lenda...@amd.com --- crypto/ahash.c | 112 + 1 file changed, 65 insertions(+), 47 deletions(-) diff --git a/crypto/ahash.c b/crypto/ahash.c index 5ca8ede..635cd49 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -191,58 +191,14 @@ static inline unsigned int ahash_align_buffer_size(unsigned len, return len + (mask ~(crypto_tfm_ctx_alignment() - 1)); } -static void ahash_op_unaligned_finish(struct ahash_request *req, int err) -{ - struct ahash_request_priv *priv = req-priv; - - if (err == -EINPROGRESS) - return; - - if (!err) - memcpy(priv-result, req-result, - crypto_ahash_digestsize(crypto_ahash_reqtfm(req))); -} +static void ahash_op_unaligned_done(struct crypto_async_request *areq, int err); -static void ahash_op_unaligned_done(struct crypto_async_request *areq, int err) -{ - struct ahash_request *req = areq-data; - struct ahash_request_priv *priv = req-priv; - struct crypto_async_request *data; - - /* -* Restore the original request, see ahash_op_unaligned() for what -* goes where. -* -* The struct ahash_request *req here is in fact the req.base -* from the ADJUSTED request from ahash_op_unaligned(), thus as it -* is a pointer to self, it is also the ADJUSTED req . -*/ - - /* First copy req-result into req-priv.result */ - ahash_op_unaligned_finish(req, err); - - /* Restore the original crypto request. */ - req-result = priv-result; - req-base.complete = priv-complete; - req-base.data = priv-data; - req-priv = priv-priv; - - /* Free the req-priv.priv from the ADJUSTED request. */ - kzfree(priv); - - /* Complete the ORIGINAL request. */ - data = req-base.data; - req-base.complete(data, err); -} - -static int ahash_op_unaligned(struct ahash_request *req, - int (*op)(struct ahash_request *)) +static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); unsigned long alignmask = crypto_ahash_alignmask(tfm); unsigned int ds = crypto_ahash_digestsize(tfm); struct ahash_request_priv *priv; - int err; priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask), (req-base.flags CRYPTO_TFM_REQ_MAY_SLEEP) ? @@ -282,10 +238,72 @@ static int ahash_op_unaligned(struct ahash_request *req, priv-priv = req-priv; req-result = PTR_ALIGN((u8 *)priv-ubuf, alignmask + 1); - req-base.complete = ahash_op_unaligned_done; + req-base.complete = cplt; req-base.data = req; req-priv = priv; + return 0; +} + +static void ahash_restore_req(struct ahash_request *req) +{ + struct ahash_request_priv *priv = req-priv; + + /* Restore the original crypto request. */ + req-result = priv-result; + req-base.complete = priv-complete; + req-base.data = priv-data; + req-priv = priv-priv; + + /* Free the req-priv.priv from the ADJUSTED request. */ + kzfree(priv); +} + +static void ahash_op_unaligned_finish(struct ahash_request *req, int err) +{ + struct ahash_request_priv *priv = req-priv; + + if (err == -EINPROGRESS) + return; + + if (!err) + memcpy(priv-result, req-result, + crypto_ahash_digestsize(crypto_ahash_reqtfm(req))); +} + +static void ahash_op_unaligned_done(struct crypto_async_request *areq, int err) +{ + struct ahash_request *req = areq-data; + struct crypto_async_request *data; + + /* +* Restore the original request, see ahash_op_unaligned() for what +* goes where. +* +* The struct ahash_request *req here is in fact the req.base +* from the ADJUSTED request from ahash_op_unaligned(), thus as it +* is a pointer to self, it is also the ADJUSTED req . +*/ + + /* First copy req-result into req-priv.result */ + ahash_op_unaligned_finish(req, err); + + ahash_restore_req(req); + + /* Complete the ORIGINAL request. */ + data = req-base.data; + req-base.complete(data, err); +} + +static int ahash_op_unaligned(struct ahash_request *req, + int (*op)(struct ahash_request *)) +{ + int err; + + err =
[PATCH 3/3] crypto: Simplify the ahash_finup implementation
The ahash_def_finup() can make use of the request save/restore functions, thus make it so. This simplifies the code a little and unifies the code paths. Note that the same remark about free()ing the req-priv applies here, the req-priv can only be free()'d after the original request was restored. Signed-off-by: Marek Vasut ma...@denx.de Cc: David S. Miller da...@davemloft.net Cc: Fabio Estevam fabio.este...@freescale.com Cc: Herbert Xu herb...@gondor.apana.org.au Cc: Shawn Guo shawn@linaro.org Cc: Tom Lendacky thomas.lenda...@amd.com --- crypto/ahash.c | 53 - 1 file changed, 20 insertions(+), 33 deletions(-) diff --git a/crypto/ahash.c b/crypto/ahash.c index 635cd49..561ebaf 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -350,20 +350,19 @@ static void ahash_def_finup_finish2(struct ahash_request *req, int err) if (!err) memcpy(priv-result, req-result, crypto_ahash_digestsize(crypto_ahash_reqtfm(req))); - - kzfree(priv); } -static void ahash_def_finup_done2(struct crypto_async_request *req, int err) +static void ahash_def_finup_done2(struct crypto_async_request *areq, int err) { - struct ahash_request *areq = req-data; - struct ahash_request_priv *priv = areq-priv; - crypto_completion_t complete = priv-complete; - void *data = priv-data; + struct ahash_request *req = areq-data; + struct crypto_async_request *data; - ahash_def_finup_finish2(areq, err); + ahash_def_finup_finish2(req, err); - complete(data, err); + ahash_restore_req(req); + + data = req-base.data; + req-base.complete(data, err); } static int ahash_def_finup_finish1(struct ahash_request *req, int err) @@ -380,39 +379,27 @@ out: return err; } -static void ahash_def_finup_done1(struct crypto_async_request *req, int err) +static void ahash_def_finup_done1(struct crypto_async_request *areq, int err) { - struct ahash_request *areq = req-data; - struct ahash_request_priv *priv = areq-priv; - crypto_completion_t complete = priv-complete; - void *data = priv-data; + struct ahash_request *req = areq-data; + struct crypto_async_request *data; - err = ahash_def_finup_finish1(areq, err); + err = ahash_def_finup_finish1(req, err); + + ahash_restore_req(req); - complete(data, err); + data = req-base.data; + req-base.complete(data, err); } static int ahash_def_finup(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - unsigned long alignmask = crypto_ahash_alignmask(tfm); - unsigned int ds = crypto_ahash_digestsize(tfm); - struct ahash_request_priv *priv; - - priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask), - (req-base.flags CRYPTO_TFM_REQ_MAY_SLEEP) ? - GFP_KERNEL : GFP_ATOMIC); - if (!priv) - return -ENOMEM; - - priv-result = req-result; - priv-complete = req-base.complete; - priv-data = req-base.data; + int err; - req-result = PTR_ALIGN((u8 *)priv-ubuf, alignmask + 1); - req-base.complete = ahash_def_finup_done1; - req-base.data = req; - req-priv = priv; + err = ahash_save_req(req, ahash_def_finup_done1); + if (err) + return err; return ahash_def_finup_finish1(req, tfm-update(req)); } -- 1.8.5.2 -- To unsubscribe from this list: send the line unsubscribe linux-crypto in the body of a message to majord...@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
[PATCH 1/3] crypto: Fix the pointer voodoo in unaligned ahash
Add documentation for the pointer voodoo that is happening in crypto/ahash.c in ahash_op_unaligned(). This code is quite confusing, so add a beefy chunk of documentation. Moreover, make sure the mangled request is completely restored after finishing this unaligned operation. This means restoring all of .result, .priv, .base.data and .base.complete . Also, remove the crypto_completion_t complete = ... line present in the ahash_op_unaligned_done() function. This type actually declares a function pointer, which is very confusing. Finally, yet very important nonetheless, make sure the req-priv is free()'d only after the original request is restored in ahash_op_unaligned_done(). The req-priv data must not be free()'d before that in ahash_op_unaligned_finish(), since we would be accessing previously free()'d data in ahash_op_unaligned_done() and cause corruption. Signed-off-by: Marek Vasut ma...@denx.de Cc: David S. Miller da...@davemloft.net Cc: Fabio Estevam fabio.este...@freescale.com Cc: Herbert Xu herb...@gondor.apana.org.au Cc: Shawn Guo shawn@linaro.org Cc: Tom Lendacky thomas.lenda...@amd.com --- crypto/ahash.c | 65 -- 1 file changed, 54 insertions(+), 11 deletions(-) diff --git a/crypto/ahash.c b/crypto/ahash.c index a92dc38..5ca8ede 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -29,6 +29,7 @@ struct ahash_request_priv { crypto_completion_t complete; void *data; + void *priv; u8 *result; void *ubuf[] CRYPTO_MINALIGN_ATTR; }; @@ -200,23 +201,38 @@ static void ahash_op_unaligned_finish(struct ahash_request *req, int err) if (!err) memcpy(priv-result, req-result, crypto_ahash_digestsize(crypto_ahash_reqtfm(req))); - - kzfree(priv); } -static void ahash_op_unaligned_done(struct crypto_async_request *req, int err) +static void ahash_op_unaligned_done(struct crypto_async_request *areq, int err) { - struct ahash_request *areq = req-data; - struct ahash_request_priv *priv = areq-priv; - crypto_completion_t complete = priv-complete; - void *data = priv-data; + struct ahash_request *req = areq-data; + struct ahash_request_priv *priv = req-priv; + struct crypto_async_request *data; + + /* +* Restore the original request, see ahash_op_unaligned() for what +* goes where. +* +* The struct ahash_request *req here is in fact the req.base +* from the ADJUSTED request from ahash_op_unaligned(), thus as it +* is a pointer to self, it is also the ADJUSTED req . +*/ + + /* First copy req-result into req-priv.result */ + ahash_op_unaligned_finish(req, err); - ahash_op_unaligned_finish(areq, err); + /* Restore the original crypto request. */ + req-result = priv-result; + req-base.complete = priv-complete; + req-base.data = priv-data; + req-priv = priv-priv; - areq-base.complete = complete; - areq-base.data = data; + /* Free the req-priv.priv from the ADJUSTED request. */ + kzfree(priv); - complete(areq-base, err); + /* Complete the ORIGINAL request. */ + data = req-base.data; + req-base.complete(data, err); } static int ahash_op_unaligned(struct ahash_request *req, @@ -234,9 +250,36 @@ static int ahash_op_unaligned(struct ahash_request *req, if (!priv) return -ENOMEM; + /* +* WARNING: Voodoo programming below! +* +* The code below is obscure and hard to understand, thus explanation +* is necessary. See include/crypto/hash.h and include/linux/crypto.h +* to understand the layout of structures used here! +* +* The code here will replace portions of the ORIGINAL request with +* pointers to new code and buffers so the hashing operation can store +* the result in aligned buffer. We will call the modified request +* an ADJUSTED request. +* +* The newly mangled request will look as such: +* +* req { +* .result= ADJUSTED[new aligned buffer] +* .base.complete = ADJUSTED[pointer to completion function] +* .base.data = ADJUSTED[*req (pointer to self)] +* .priv = ADJUSTED[new priv] { +* .result = ORIGINAL(result) +* .complete = ORIGINAL(base.complete) +* .data = ORIGINAL(base.data) +* .priv = ORIGINAL(priv) +* } +*/ + priv-result = req-result; priv-complete = req-base.complete; priv-data = req-base.data; + priv-priv = req-priv; req-result = PTR_ALIGN((u8 *)priv-ubuf, alignmask + 1); req-base.complete = ahash_op_unaligned_done; -- 1.8.5.2 -- To unsubscribe from this list: send the line unsubscribe linux-crypto in
[PATCH 0/3] crypto: Clean up ahash handling confusion
This set of patches shall clean up the confusion in restoring the ahash request context in crypto/ahash.c . The code was a bit refactored to make it easier to understand as well. Please, make sure the code is well tested before applying. Also, please review very thoroughly. Marek Vasut (3): crypto: Fix the pointer voodoo in unaligned ahash crypto: Pull out the functions to save/restore request crypto: Simplify the ahash_finup implementation crypto/ahash.c | 172 - 1 file changed, 110 insertions(+), 62 deletions(-) Cc: David S. Miller da...@davemloft.net Cc: Fabio Estevam fabio.este...@freescale.com Cc: Herbert Xu herb...@gondor.apana.org.au Cc: Shawn Guo shawn@linaro.org Cc: Tom Lendacky thomas.lenda...@amd.com -- 1.8.5.2 -- To unsubscribe from this list: send the line unsubscribe linux-crypto in the body of a message to majord...@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
Re: [PATCH 1/3] crypto: Fix the pointer voodoo in unaligned ahash
On Tuesday, January 14, 2014 06:33:47 PM Marek Vasut wrote: Add documentation for the pointer voodoo that is happening in crypto/ahash.c in ahash_op_unaligned(). This code is quite confusing, so add a beefy chunk of documentation. Moreover, make sure the mangled request is completely restored after finishing this unaligned operation. This means restoring all of .result, .priv, .base.data and .base.complete . Also, remove the crypto_completion_t complete = ... line present in the ahash_op_unaligned_done() function. This type actually declares a function pointer, which is very confusing. Finally, yet very important nonetheless, make sure the req-priv is free()'d only after the original request is restored in ahash_op_unaligned_done(). The req-priv data must not be free()'d before that in ahash_op_unaligned_finish(), since we would be accessing previously free()'d data in ahash_op_unaligned_done() and cause corruption. Signed-off-by: Marek Vasut ma...@denx.de Cc: David S. Miller da...@davemloft.net Cc: Fabio Estevam fabio.este...@freescale.com Cc: Herbert Xu herb...@gondor.apana.org.au Cc: Shawn Guo shawn@linaro.org Cc: Tom Lendacky thomas.lenda...@amd.com --- crypto/ahash.c | 65 -- 1 file changed, 54 insertions(+), 11 deletions(-) diff --git a/crypto/ahash.c b/crypto/ahash.c index a92dc38..5ca8ede 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -29,6 +29,7 @@ struct ahash_request_priv { crypto_completion_t complete; void *data; + void *priv; u8 *result; void *ubuf[] CRYPTO_MINALIGN_ATTR; }; @@ -200,23 +201,38 @@ static void ahash_op_unaligned_finish(struct ahash_request *req, int err) if (!err) memcpy(priv-result, req-result, crypto_ahash_digestsize(crypto_ahash_reqtfm(req))); - - kzfree(priv); You can't move/remove this kzfree since a synchronous operation will not take the ahash_op_unaligned_done path. A synchronous operation will never return -EINPROGRESS and the effect will be to never free the priv structure. } -static void ahash_op_unaligned_done(struct crypto_async_request *req, int err) +static void ahash_op_unaligned_done(struct crypto_async_request *areq, int err) { - struct ahash_request *areq = req-data; - struct ahash_request_priv *priv = areq-priv; - crypto_completion_t complete = priv-complete; - void *data = priv-data; + struct ahash_request *req = areq-data; + struct ahash_request_priv *priv = req-priv; + struct crypto_async_request *data; + + /* + * Restore the original request, see ahash_op_unaligned() for what + * goes where. + * + * The struct ahash_request *req here is in fact the req.base + * from the ADJUSTED request from ahash_op_unaligned(), thus as it + * is a pointer to self, it is also the ADJUSTED req . + */ + + /* First copy req-result into req-priv.result */ + ahash_op_unaligned_finish(req, err); Given the above comment on the kzfree, you'll need to save all the priv values as was done previously. Thanks, Tom - ahash_op_unaligned_finish(areq, err); + /* Restore the original crypto request. */ + req-result = priv-result; + req-base.complete = priv-complete; + req-base.data = priv-data; + req-priv = priv-priv; - areq-base.complete = complete; - areq-base.data = data; + /* Free the req-priv.priv from the ADJUSTED request. */ + kzfree(priv); - complete(areq-base, err); + /* Complete the ORIGINAL request. */ + data = req-base.data; + req-base.complete(data, err); } static int ahash_op_unaligned(struct ahash_request *req, @@ -234,9 +250,36 @@ static int ahash_op_unaligned(struct ahash_request *req, if (!priv) return -ENOMEM; + /* + * WARNING: Voodoo programming below! + * + * The code below is obscure and hard to understand, thus explanation + * is necessary. See include/crypto/hash.h and include/linux/crypto.h + * to understand the layout of structures used here! + * + * The code here will replace portions of the ORIGINAL request with + * pointers to new code and buffers so the hashing operation can store + * the result in aligned buffer. We will call the modified request + * an ADJUSTED request. + * + * The newly mangled request will look as such: + * + * req { + * .result= ADJUSTED[new aligned buffer] + * .base.complete = ADJUSTED[pointer to completion function] + * .base.data = ADJUSTED[*req (pointer to self)] + * .priv = ADJUSTED[new priv] { + * .result = ORIGINAL(result) + * .complete = ORIGINAL(base.complete) + * .data = ORIGINAL(base.data) + *
Re: [PATCH -next] crypto: mxs - Fix sparse non static symbol warning
On Thu, Jan 09, 2014 at 03:43:01PM +0100, Marek Vasut wrote: On Wednesday, January 08, 2014 at 02:48:56 PM, Wei Yongjun wrote: From: Wei Yongjun yongjun_...@trendmicro.com.cn Fixes the following sparse warning: drivers/crypto/mxs-dcp.c:103:1: warning: symbol 'global_mutex' was not declared. Should it be static? Signed-off-by: Wei Yongjun yongjun_...@trendmicro.com.cn --- drivers/crypto/mxs-dcp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c index d41917c..a6db7fa 100644 --- a/drivers/crypto/mxs-dcp.c +++ b/drivers/crypto/mxs-dcp.c @@ -100,7 +100,7 @@ struct dcp_sha_req_ctx { * design of Linux Crypto API. */ static struct dcp *global_sdcp; -DEFINE_MUTEX(global_mutex); +static DEFINE_MUTEX(global_mutex); /* DCP register layout. */ #define MXS_DCP_CTRL 0x00 Thank you. Acked-by: Marek Vasut ma...@denx.de Patch applied. -- Email: Herbert Xu herb...@gondor.apana.org.au Home Page: http://gondor.apana.org.au/~herbert/ PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt -- To unsubscribe from this list: send the line unsubscribe linux-crypto in the body of a message to majord...@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
Re: [PATCH 0/6] crypto: ccp - more code fixes/cleanup
On Mon, Jan 06, 2014 at 01:33:53PM -0600, Tom Lendacky wrote: The following series implements a fix to hash length wrapping as well as some additional fixes and cleanups (proper gfp_t type on some memory allocations, scatterlist usage improvements, null request result field checks and driver enabled/disabled changes). This patch series is based on the cryptodev-2.6 kernel tree. All applied. Thanks! -- Email: Herbert Xu herb...@gondor.apana.org.au Home Page: http://gondor.apana.org.au/~herbert/ PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt -- To unsubscribe from this list: send the line unsubscribe linux-crypto in the body of a message to majord...@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
Re: [PATCH v1] crypto: aesni - fix build on x86 (32bit)
On Thu, Jan 09, 2014 at 08:57:42AM -0800, Tim Chen wrote: From 41656afcbd63ccb92357d4937a75629499f4fd4f Mon Sep 17 00:00:00 2001 From: Tim Chen tim.c.c...@linux.intel.com Date: Mon, 6 Jan 2014 07:23:52 -0800 Subject: [PATCH] crypto: Rename aesni-intel_avx.S to indicate it only supports x86_64 To: Herbert Xu herb...@gondor.apana.org.au, H. Peter Anvin h...@zytor.com Cc: Borislav Petkov b...@alien8.de, Andy Shevchenko andriy.shevche...@linux.intel.com, linux-crypto@vger.kernel.org We rename aesni-intel_avx.S to aesni-intel_avx-x86_64.S to indicate that it is only used by x86_64 architecture. --- arch/x86/crypto/Makefile| 2 +- arch/x86/crypto/{aesni-intel_avx.S = aesni-intel_avx-x86_64.S} | 0 2 files changed, 1 insertion(+), 1 deletion(-) rename arch/x86/crypto/{aesni-intel_avx.S = aesni-intel_avx-x86_64.S} (100%) Signed-off-by: Tim Chen tim.c.c...@linux.intel.com Patch applied. -- Email: Herbert Xu herb...@gondor.apana.org.au Home Page: http://gondor.apana.org.au/~herbert/ PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt -- To unsubscribe from this list: send the line unsubscribe linux-crypto in the body of a message to majord...@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
Re: [PATCH 7/8 v3] crypto:s5p-sss: validate iv before memcpy
Hello Tomasz, On 10 January 2014 21:33, Tomasz Figa t.f...@samsung.com wrote: Hi Naveen, On 10.01.2014 12:45, Naveen Krishna Chatradhi wrote: This patch adds code to validate iv buffer before trying to memcpy the contents Signed-off-by: Naveen Krishna Chatradhi ch.nav...@samsung.com --- Changes since v2: None drivers/crypto/s5p-sss.c |5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c index f274f5f..7058bb6 100644 --- a/drivers/crypto/s5p-sss.c +++ b/drivers/crypto/s5p-sss.c @@ -381,8 +381,9 @@ static void s5p_set_aes(struct s5p_aes_dev *dev, struct samsung_aes_variant *var = dev-variant; void __iomem *keystart; - memcpy(dev-ioaddr + SSS_REG_AES_IV_DATA - (var-aes_offset, 0), iv, 0x10); + if (iv) + memcpy(dev-ioaddr + SSS_REG_AES_IV_DATA + (var-aes_offset, 0), iv, 0x10); In what conditions can the iv end up being NULL? req-info is the initialization vector in our case, which comes from user space. Its good to have a check to avoid any crashes. Also AES ECB mode does not use IV. Best regards, Tomasz -- Shine bright, (: Nav :) -- To unsubscribe from this list: send the line unsubscribe linux-crypto in the body of a message to majord...@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html