[PATCH 2/2] crypto: ctr - Propagate NEED_FALLBACK bit
When requesting a fallback algorithm, we should propagate the NEED_FALLBACK bit when search for the underlying algorithm. This will prevents drivers from allocating unnecessary fallbacks that are never called. For instance, currently the vmx-crypto driver will use the following chain of calls when calling the fallback implementation: p8_aes_ctr -> ctr(p8_aes) -> aes-generic However p8_aes will always delegate its calls to aes-generic. With this patch, p8_aes_ctr will be able to use ctr(aes-generic) directly as its fallback. The same applies to aes_s390. Signed-off-by: Marcelo Henrique Cerri--- crypto/ctr.c | 13 +++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/crypto/ctr.c b/crypto/ctr.c index a4f4a89..3afe21a 100644 --- a/crypto/ctr.c +++ b/crypto/ctr.c @@ -181,15 +181,24 @@ static void crypto_ctr_exit_tfm(struct crypto_tfm *tfm) static struct crypto_instance *crypto_ctr_alloc(struct rtattr **tb) { struct crypto_instance *inst; + struct crypto_attr_type *algt; struct crypto_alg *alg; + u32 mask; int err; err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); if (err) return ERR_PTR(err); - alg = crypto_attr_alg(tb[1], CRYPTO_ALG_TYPE_CIPHER, - CRYPTO_ALG_TYPE_MASK); + algt = crypto_get_attr_type(tb); + if (IS_ERR(algt)) + return PTR_ERR(algt); + + mask = CRYPTO_ALG_TYPE_MASK | + crypto_requires_off(algt->type, algt->mask, + CRYPTO_ALG_NEED_FALLBACK); + + alg = crypto_attr_alg(tb[1], CRYPTO_ALG_TYPE_CIPHER, mask); if (IS_ERR(alg)) return ERR_CAST(alg); -- 2.7.4
[PATCH 1/2] crypto: cbc - Propagate NEED_FALLBACK bit
When requesting a fallback algorithm, we should propagate the NEED_FALLBACK bit when search for the underlying algorithm. This will prevents drivers from allocating unnecessary fallbacks that are never called. For instance, currently the vmx-crypto driver will use the following chain of calls when calling the fallback implementation: p8_aes_cbc -> cbc(p8_aes) -> aes-generic However p8_aes will always delegate its calls to aes-generic. With this patch, p8_aes_cbc will be able to use cbc(aes-generic) directly as its fallback. The same applies to aes_s390. Signed-off-by: Marcelo Henrique Cerri--- crypto/cbc.c | 20 ++-- 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/crypto/cbc.c b/crypto/cbc.c index bc160a3..7147842 100644 --- a/crypto/cbc.c +++ b/crypto/cbc.c @@ -108,24 +108,32 @@ static void crypto_cbc_free(struct skcipher_instance *inst) static int crypto_cbc_create(struct crypto_template *tmpl, struct rtattr **tb) { struct skcipher_instance *inst; + struct crypto_attr_type *algt; struct crypto_spawn *spawn; struct crypto_alg *alg; + u32 mask; int err; err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER); if (err) return err; + algt = crypto_get_attr_type(tb); + if (IS_ERR(algt)) + return PTR_ERR(algt); + + mask = CRYPTO_ALG_TYPE_MASK | + crypto_requires_off(algt->type, algt->mask, + CRYPTO_ALG_NEED_FALLBACK); + + alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, mask); + if (IS_ERR(alg)) + return PTR_ERR(alg); + inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); if (!inst) return -ENOMEM; - alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, - CRYPTO_ALG_TYPE_MASK); - err = PTR_ERR(alg); - if (IS_ERR(alg)) - goto err_free_inst; - spawn = skcipher_instance_ctx(inst); err = crypto_init_spawn(spawn, alg, skcipher_crypto_instance(inst), CRYPTO_ALG_TYPE_MASK); -- 2.7.4
Re: [PATCH v2 1/2] crypto: vmx - Use skcipher for cbc fallback
On Fri, Feb 24, 2017 at 11:23:54AM -0300, Paulo Flabiano Smorigo wrote: > Signed-off-by: Paulo Flabiano Smorigo> --- > drivers/crypto/vmx/aes_cbc.c | 44 > ++-- > 1 file changed, 22 insertions(+), 22 deletions(-) > > diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c > index 94ad5c0..2bb5910 100644 > --- a/drivers/crypto/vmx/aes_cbc.c > +++ b/drivers/crypto/vmx/aes_cbc.c > @@ -27,11 +27,12 @@ > #include > #include > #include > +#include > > #include "aesp8-ppc.h" > > struct p8_aes_cbc_ctx { > - struct crypto_blkcipher *fallback; > + struct crypto_skcipher *fallback; > struct aes_key enc_key; > struct aes_key dec_key; > }; > @@ -39,7 +40,7 @@ struct p8_aes_cbc_ctx { > static int p8_aes_cbc_init(struct crypto_tfm *tfm) > { > const char *alg; > - struct crypto_blkcipher *fallback; > + struct crypto_skcipher *fallback; > struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm); > > if (!(alg = crypto_tfm_alg_name(tfm))) { > @@ -47,8 +48,9 @@ static int p8_aes_cbc_init(struct crypto_tfm *tfm) > return -ENOENT; > } > > - fallback = > - crypto_alloc_blkcipher(alg, 0, CRYPTO_ALG_NEED_FALLBACK); > + fallback = crypto_alloc_skcipher(alg, 0, > + CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); > + > if (IS_ERR(fallback)) { > printk(KERN_ERR > "Failed to allocate transformation for '%s': %ld\n", > @@ -58,9 +60,9 @@ static int p8_aes_cbc_init(struct crypto_tfm *tfm) > printk(KERN_INFO "Using '%s' as fallback implementation.\n", > crypto_tfm_alg_driver_name((struct crypto_tfm *) fallback)); You need to update that to use crypto_skcipher_tfm(). The same is valid for the xts patch. > > - crypto_blkcipher_set_flags( > + crypto_skcipher_set_flags( > fallback, > - crypto_blkcipher_get_flags((struct crypto_blkcipher *)tfm)); > + crypto_skcipher_get_flags((struct crypto_skcipher *)tfm)); > ctx->fallback = fallback; > > return 0; > @@ -71,7 +73,7 @@ static void p8_aes_cbc_exit(struct crypto_tfm *tfm) > struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm); > > if (ctx->fallback) { > - crypto_free_blkcipher(ctx->fallback); > + crypto_free_skcipher(ctx->fallback); > ctx->fallback = NULL; > } > } > @@ -91,7 +93,7 @@ static int p8_aes_cbc_setkey(struct crypto_tfm *tfm, const > u8 *key, > pagefault_enable(); > preempt_enable(); > > - ret += crypto_blkcipher_setkey(ctx->fallback, key, keylen); > + ret += crypto_skcipher_setkey(ctx->fallback, key, keylen); > return ret; > } > > @@ -103,15 +105,14 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc > *desc, > struct blkcipher_walk walk; > struct p8_aes_cbc_ctx *ctx = > crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm)); > - struct blkcipher_desc fallback_desc = { > - .tfm = ctx->fallback, > - .info = desc->info, > - .flags = desc->flags > - }; > > if (in_interrupt()) { > - ret = crypto_blkcipher_encrypt(_desc, dst, src, > -nbytes); > + SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback); > + skcipher_request_set_tfm(req, ctx->fallback); > + skcipher_request_set_callback(req, desc->flags, NULL, NULL); > + skcipher_request_set_crypt(req, src, dst, nbytes, desc->info); > + ret = crypto_skcipher_encrypt(req); > + skcipher_request_zero(req); > } else { > preempt_disable(); > pagefault_disable(); > @@ -144,15 +145,14 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc > *desc, > struct blkcipher_walk walk; > struct p8_aes_cbc_ctx *ctx = > crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm)); > - struct blkcipher_desc fallback_desc = { > - .tfm = ctx->fallback, > - .info = desc->info, > - .flags = desc->flags > - }; > > if (in_interrupt()) { > - ret = crypto_blkcipher_decrypt(_desc, dst, src, > -nbytes); > + SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback); > + skcipher_request_set_tfm(req, ctx->fallback); > + skcipher_request_set_callback(req, desc->flags, NULL, NULL); > + skcipher_request_set_crypt(req, src, dst, nbytes, desc->info); > + ret = crypto_skcipher_decrypt(req); > + skcipher_request_zero(req); > } else { > preempt_disable(); > pagefault_disable(); > -- > 2.7.4 > -- Regards, Marcelo signature.asc Description: PGP signature
Re: crypto: hang in crypto_larval_lookup
On 02/26/2017 05:22 AM, Herbert Xu wrote: > On Sat, Feb 25, 2017 at 04:20:22PM -0300, Marcelo Cerri wrote: >> Yeah, I agree. This should work as long as the module aliases are >> correct, which is enough. >> >> Other templates will not trigger the same error since they don't have to >> try more than one underlying algorithm. But I think this is still >> desirable for the remaining templates to avoid a long chain of unused >> fallbacks as in the example I gave in my previous email. >> >> Probably a helper function to return the correct mask might be useful >> for readability and to avoid duplicate code. > You're right. Here is a patch to add a helper for this. > Thanks! > > ---8<--- > Subject: crypto: api - Add crypto_requires_off helper > > This patch adds crypto_requires_off which is an extension of > crypto_requires_sync for similar bits such as NEED_FALLBACK. > > Suggested-by: Marcelo Cerri> Signed-off-by: Herbert Xu > > diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h > index ebe4ded..436c4c2 100644 > --- a/include/crypto/algapi.h > +++ b/include/crypto/algapi.h > @@ -360,13 +360,18 @@ static inline struct crypto_alg > *crypto_get_attr_alg(struct rtattr **tb, > return crypto_attr_alg(tb[1], type, mask); > } > > +static inline int crypto_requires_off(u32 type, u32 mask, u32 off) > +{ > + return (type ^ off) & mask & off; > +} > + > /* > * Returns CRYPTO_ALG_ASYNC if type/mask requires the use of sync algorithms. > * Otherwise returns zero. > */ > static inline int crypto_requires_sync(u32 type, u32 mask) > { > - return (type ^ CRYPTO_ALG_ASYNC) & mask & CRYPTO_ALG_ASYNC; > + return crypto_requires_off(type, mask, CRYPTO_ALG_ASYNC); > } > > noinline unsigned long __crypto_memneq(const void *a, const void *b, size_t > size); applied the xts.c create patch v2 and the helper patch, built and installed. Now the aes_s390 module loads perfect without any hang, no complains in syslog and /proc/crypto shows that all selftests for the algs in the module passed successful. Thanks all for your help :-) regards, Harald Freudenberger