From: Eric Biggers <ebigg...@google.com>

To improve responsivesess, disable preemption for each step of the walk
(which is at most PAGE_SIZE) rather than for the entire
encryption/decryption operation.

Suggested-by: Ard Biesheuvel <ard.biesheu...@linaro.org>
Signed-off-by: Eric Biggers <ebigg...@google.com>
---
 arch/arm/crypto/chacha20-neon-glue.c | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/arch/arm/crypto/chacha20-neon-glue.c 
b/arch/arm/crypto/chacha20-neon-glue.c
index 7386eb1c1889..2bc035cb8f23 100644
--- a/arch/arm/crypto/chacha20-neon-glue.c
+++ b/arch/arm/crypto/chacha20-neon-glue.c
@@ -68,22 +68,22 @@ static int chacha20_neon(struct skcipher_request *req)
        if (req->cryptlen <= CHACHA_BLOCK_SIZE || !may_use_simd())
                return crypto_chacha_crypt(req);
 
-       err = skcipher_walk_virt(&walk, req, true);
+       err = skcipher_walk_virt(&walk, req, false);
 
        crypto_chacha_init(state, ctx, walk.iv);
 
-       kernel_neon_begin();
        while (walk.nbytes > 0) {
                unsigned int nbytes = walk.nbytes;
 
                if (nbytes < walk.total)
                        nbytes = round_down(nbytes, walk.stride);
 
+               kernel_neon_begin();
                chacha20_doneon(state, walk.dst.virt.addr, walk.src.virt.addr,
                                nbytes);
+               kernel_neon_end();
                err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
        }
-       kernel_neon_end();
 
        return err;
 }
-- 
2.19.1.930.g4563a0d9d0-goog

Reply via email to