Unrolling the LOAD and BLEND loops improves performance by ~8% on x86_64
(tested on Broadwell Xeon) while not increasing code size too much.

Signed-off-by: Arvind Sankar <nived...@alum.mit.edu>
---
 lib/crypto/sha256.c | 24 ++++++++++++++++++++----
 1 file changed, 20 insertions(+), 4 deletions(-)

diff --git a/lib/crypto/sha256.c b/lib/crypto/sha256.c
index 5efd390706c6..3a8802d5f747 100644
--- a/lib/crypto/sha256.c
+++ b/lib/crypto/sha256.c
@@ -68,12 +68,28 @@ static void sha256_transform(u32 *state, const u8 *input, 
u32 *W)
        int i;
 
        /* load the input */
-       for (i = 0; i < 16; i++)
-               LOAD_OP(i, W, input);
+       for (i = 0; i < 16; i += 8) {
+               LOAD_OP(i + 0, W, input);
+               LOAD_OP(i + 1, W, input);
+               LOAD_OP(i + 2, W, input);
+               LOAD_OP(i + 3, W, input);
+               LOAD_OP(i + 4, W, input);
+               LOAD_OP(i + 5, W, input);
+               LOAD_OP(i + 6, W, input);
+               LOAD_OP(i + 7, W, input);
+       }
 
        /* now blend */
-       for (i = 16; i < 64; i++)
-               BLEND_OP(i, W);
+       for (i = 16; i < 64; i += 8) {
+               BLEND_OP(i + 0, W);
+               BLEND_OP(i + 1, W);
+               BLEND_OP(i + 2, W);
+               BLEND_OP(i + 3, W);
+               BLEND_OP(i + 4, W);
+               BLEND_OP(i + 5, W);
+               BLEND_OP(i + 6, W);
+               BLEND_OP(i + 7, W);
+       }
 
        /* load the state into our registers */
        a = state[0];  b = state[1];  c = state[2];  d = state[3];
-- 
2.26.2

Reply via email to