[patch -next] crypto: algif - signedness bug in skcipher_recvmsg_async()

2015-03-26 Thread Dan Carpenter
This needs to be signed because af_alg_make_sg() returns negative error
codes.

Fixes: a596999b7ddf ('crypto: algif - change algif_skcipher to be asynchronous')
Signed-off-by: Dan Carpenter dan.carpen...@oracle.com

diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
index 506eb5f..142430f 100644
--- a/crypto/algif_skcipher.c
+++ b/crypto/algif_skcipher.c
@@ -537,7 +537,7 @@ static int skcipher_recvmsg_async(struct socket *sock, 
struct msghdr *msg,
 
while (iov_iter_count(msg-msg_iter)) {
struct skcipher_async_rsgl *rsgl;
-   unsigned long used;
+   long used;
 
if (!ctx-used) {
err = skcipher_wait_for_data(sk, flags);
--
To unsubscribe from this list: send the line unsubscribe linux-crypto in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH v6 4/4] crypto: Add Allwinner Security System crypto accelerator

2015-03-26 Thread Boris Brezillon
Hi Corentin,

Here is a quick review, there surely are a lot of other things I didn't
spot.

On Mon, 16 Mar 2015 20:01:22 +0100
LABBE Corentin clabbe.montj...@gmail.com wrote:

 Add support for the Security System included in Allwinner SoC A20.
 The Security System is a hardware cryptographic accelerator that support:
 - MD5 and SHA1 hash algorithms
 - AES block cipher in CBC mode with 128/196/256bits keys.
 - DES and 3DES block cipher in CBC mode
 
 Signed-off-by: LABBE Corentin clabbe.montj...@gmail.com
 ---

[...]

 +static int sunxi_ss_cipher(struct ablkcipher_request *areq, u32 mode)
 +{
 + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
 + struct sunxi_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
 + const char *cipher_type;
 + struct sunxi_ss_ctx *ss = op-ss;
 +
 + if (areq-nbytes == 0)
 + return 0;
 +
 + if (areq-info == NULL) {
 + dev_err(ss-dev, ERROR: Empty IV\n);
 + return -EINVAL;
 + }
 +
 + if (areq-src == NULL || areq-dst == NULL) {
 + dev_err(ss-dev, ERROR: Some SGs are NULL\n);
 + return -EINVAL;
 + }
 +
 + cipher_type = crypto_tfm_alg_name(crypto_ablkcipher_tfm(tfm));
 +
 + if (strcmp(cbc(aes), cipher_type) == 0) {
 + mode |= SS_OP_AES | SS_CBC | SS_ENABLED | op-keymode;
 + return sunxi_ss_aes_poll(areq, mode);
 + }
 +
 + if (strcmp(cbc(des), cipher_type) == 0) {
 + mode |= SS_OP_DES | SS_CBC | SS_ENABLED | op-keymode;
 + return sunxi_ss_des_poll(areq, mode);
 + }
 +
 + if (strcmp(cbc(des3_ede), cipher_type) == 0) {
 + mode |= SS_OP_3DES | SS_CBC | SS_ENABLED | op-keymode;
 + return sunxi_ss_des_poll(areq, mode);
 + }

Hm, I'm not sure doing these string comparisons in the crypto operation
path is a good idea.
Moreover, you're doing 3 string comparisons, even though only one can
be valid at a time (using 'else if' would have been a bit better).

Anyway, IMHO this function should be split into 3 functions, and
referenced by your alg template definitions.
Something like:

int sunxi_ss_xxx_encrypt(struct ablkcipher_request *areq)
{
/* put your cipher specific intialization here */

return sunxi_ss_xxx_poll(areq, SS_ENCRYPTION);
}

int sunxi_ss_xxx_decrypt(struct ablkcipher_request *areq)
{
/* put your cipher specific intialization here */

return sunxi_ss_xxx_poll(areq, SS_DECRYPTION);
}


 +
 +int sunxi_ss_cipher_init(struct crypto_tfm *tfm)
 +{
 + const char *name = crypto_tfm_alg_name(tfm);
 + struct sunxi_tfm_ctx *op = crypto_tfm_ctx(tfm);
 + struct crypto_alg *alg = tfm-__crt_alg;
 + struct sunxi_ss_alg_template *algt;
 + struct sunxi_ss_ctx *ss;
 +
 + memset(op, 0, sizeof(struct sunxi_tfm_ctx));
 +
 + algt = container_of(alg, struct sunxi_ss_alg_template, alg.crypto);
 + ss = algt-ss;
 + op-ss = algt-ss;
 +
 + /* fallback is needed only for DES/3DES */
 + if (strcmp(cbc(des), name) == 0 ||
 + strcmp(cbc(des3_ede), name) == 0) {
 + op-fallback = crypto_alloc_ablkcipher(name, 0,
 + CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
 + if (IS_ERR(op-fallback)) {
 + dev_err(ss-dev, ERROR: allocating fallback algo %s\n,
 + name);
 + return PTR_ERR(op-fallback);
 + }
 + }

Ditto: just create a specific init function for the des case:

int sunxi_ss_cbc_des_init(struct crypto_tfm *tfm)
{
sunxi_ss_cipher_init(tfm);

op-fallback = crypto_alloc_ablkcipher(name, 0,
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(op-fallback)) {
dev_err(ss-dev, ERROR: allocating fallback algo %s\n,
name);
return PTR_ERR(op-fallback);
}

return 0;
}


[..]

 +/*
 + * Optimized function for the case where we have only one SG,
 + * so we can use kmap_atomic
 + */
 +static int sunxi_ss_aes_poll_atomic(struct ablkcipher_request *areq)
 +{
 + u32 spaces;
 + struct scatterlist *in_sg = areq-src;
 + struct scatterlist *out_sg = areq-dst;
 + void *src_addr;
 + void *dst_addr;
 + unsigned int ileft = areq-nbytes;
 + unsigned int oleft = areq-nbytes;
 + unsigned int todo;
 + u32 *src32;
 + u32 *dst32;
 + u32 rx_cnt = 32;
 + u32 tx_cnt = 0;
 + int i;
 + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
 + struct sunxi_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
 + struct sunxi_ss_ctx *ss = op-ss;
 +
 + src_addr = kmap_atomic(sg_page(in_sg)) + in_sg-offset;
 + if (src_addr == NULL) {
 + dev_err(ss-dev, kmap_atomic error for src SG\n);
 + return -EINVAL;
 + }
 +
 + dst_addr = kmap_atomic(sg_page(out_sg)) +