[PATCH 3.12 42/56] crypto: ux500 - make interrupt mode plausible

2014-09-03 Thread Jiri Slaby
From: Arnd Bergmann 

3.12-stable review patch.  If anyone has any objections, please let me know.

===

commit e1f8859ee265fc89bd21b4dca79e8e983a044892 upstream.

The interrupt handler in the ux500 crypto driver has an obviously
incorrect way to access the data buffer, which for a while has
caused this build warning:

../ux500/cryp/cryp_core.c: In function 'cryp_interrupt_handler':
../ux500/cryp/cryp_core.c:234:5: warning: passing argument 1 of '__fswab32' 
makes integer from pointer without a cast [enabled by default]
 writel_relaxed(ctx->indata,
 ^
In file included from ../include/linux/swab.h:4:0,
 from ../include/uapi/linux/byteorder/big_endian.h:12,
 from ../include/linux/byteorder/big_endian.h:4,
 from ../arch/arm/include/uapi/asm/byteorder.h:19,
 from ../include/asm-generic/bitops/le.h:5,
 from ../arch/arm/include/asm/bitops.h:340,
 from ../include/linux/bitops.h:33,
 from ../include/linux/kernel.h:10,
 from ../include/linux/clk.h:16,
 from ../drivers/crypto/ux500/cryp/cryp_core.c:12:
../include/uapi/linux/swab.h:57:119: note: expected '__u32' but argument is of 
type 'const u8 *'
 static inline __attribute_const__ __u32 __fswab32(__u32 val)

There are at least two, possibly three problems here:
a) when writing into the FIFO, we copy the pointer rather than the
   actual data we want to give to the hardware
b) the data pointer is an array of 8-bit values, while the FIFO
   is 32-bit wide, so both the read and write access fail to do
   a proper type conversion
c) This seems incorrect for big-endian kernels, on which we need to
   byte-swap any register access, but not normally FIFO accesses,
   at least the DMA case doesn't do it either.

This converts the bogus loop to use the same readsl/writesl pair
that we use for the two other modes (DMA and polling). This is
more efficient and consistent, and probably correct for endianess.

The bug has existed since the driver was first merged, and was
probably never detected because nobody tried to use interrupt mode.
It might make sense to backport this fix to stable kernels, depending
on how the crypto maintainers feel about that.

Signed-off-by: Arnd Bergmann 
Cc: linux-crypto@vger.kernel.org
Cc: Fabio Baltieri 
Cc: Linus Walleij 
Cc: Herbert Xu 
Cc: "David S. Miller" 
Signed-off-by: Herbert Xu 
Signed-off-by: Jiri Slaby 
---
 drivers/crypto/ux500/cryp/cryp_core.c | 25 -
 1 file changed, 12 insertions(+), 13 deletions(-)

diff --git a/drivers/crypto/ux500/cryp/cryp_core.c 
b/drivers/crypto/ux500/cryp/cryp_core.c
index a999f537228f..92105f3dc8e0 100644
--- a/drivers/crypto/ux500/cryp/cryp_core.c
+++ b/drivers/crypto/ux500/cryp/cryp_core.c
@@ -190,7 +190,7 @@ static void add_session_id(struct cryp_ctx *ctx)
 static irqreturn_t cryp_interrupt_handler(int irq, void *param)
 {
struct cryp_ctx *ctx;
-   int i;
+   int count;
struct cryp_device_data *device_data;
 
if (param == NULL) {
@@ -215,12 +215,11 @@ static irqreturn_t cryp_interrupt_handler(int irq, void 
*param)
if (cryp_pending_irq_src(device_data,
 CRYP_IRQ_SRC_OUTPUT_FIFO)) {
if (ctx->outlen / ctx->blocksize > 0) {
-   for (i = 0; i < ctx->blocksize / 4; i++) {
-   *(ctx->outdata) = readl_relaxed(
-   &device_data->base->dout);
-   ctx->outdata += 4;
-   ctx->outlen -= 4;
-   }
+   count = ctx->blocksize / 4;
+
+   readsl(&device_data->base->dout, ctx->outdata, count);
+   ctx->outdata += count;
+   ctx->outlen -= count;
 
if (ctx->outlen == 0) {
cryp_disable_irq_src(device_data,
@@ -230,12 +229,12 @@ static irqreturn_t cryp_interrupt_handler(int irq, void 
*param)
} else if (cryp_pending_irq_src(device_data,
CRYP_IRQ_SRC_INPUT_FIFO)) {
if (ctx->datalen / ctx->blocksize > 0) {
-   for (i = 0 ; i < ctx->blocksize / 4; i++) {
-   writel_relaxed(ctx->indata,
-   &device_data->base->din);
-   ctx->indata += 4;
-   ctx->datalen -= 4;
-   }
+   count = ctx->blocksize / 4;
+
+   writesl(&device_data->base->din, ctx->indata, count);
+
+   ctx->indata += count;
+   

[patch added to the 3.12 stable tree] crypto: ux500 - make interrupt mode plausible

2014-09-03 Thread Jiri Slaby
From: Arnd Bergmann 

This patch has been added to the 3.12 stable tree. If you have any
objections, please let us know.

===

commit e1f8859ee265fc89bd21b4dca79e8e983a044892 upstream.

The interrupt handler in the ux500 crypto driver has an obviously
incorrect way to access the data buffer, which for a while has
caused this build warning:

../ux500/cryp/cryp_core.c: In function 'cryp_interrupt_handler':
../ux500/cryp/cryp_core.c:234:5: warning: passing argument 1 of '__fswab32' 
makes integer from pointer without a cast [enabled by default]
 writel_relaxed(ctx->indata,
 ^
In file included from ../include/linux/swab.h:4:0,
 from ../include/uapi/linux/byteorder/big_endian.h:12,
 from ../include/linux/byteorder/big_endian.h:4,
 from ../arch/arm/include/uapi/asm/byteorder.h:19,
 from ../include/asm-generic/bitops/le.h:5,
 from ../arch/arm/include/asm/bitops.h:340,
 from ../include/linux/bitops.h:33,
 from ../include/linux/kernel.h:10,
 from ../include/linux/clk.h:16,
 from ../drivers/crypto/ux500/cryp/cryp_core.c:12:
../include/uapi/linux/swab.h:57:119: note: expected '__u32' but argument is of 
type 'const u8 *'
 static inline __attribute_const__ __u32 __fswab32(__u32 val)

There are at least two, possibly three problems here:
a) when writing into the FIFO, we copy the pointer rather than the
   actual data we want to give to the hardware
b) the data pointer is an array of 8-bit values, while the FIFO
   is 32-bit wide, so both the read and write access fail to do
   a proper type conversion
c) This seems incorrect for big-endian kernels, on which we need to
   byte-swap any register access, but not normally FIFO accesses,
   at least the DMA case doesn't do it either.

This converts the bogus loop to use the same readsl/writesl pair
that we use for the two other modes (DMA and polling). This is
more efficient and consistent, and probably correct for endianess.

The bug has existed since the driver was first merged, and was
probably never detected because nobody tried to use interrupt mode.
It might make sense to backport this fix to stable kernels, depending
on how the crypto maintainers feel about that.

Signed-off-by: Arnd Bergmann 
Cc: linux-crypto@vger.kernel.org
Cc: Fabio Baltieri 
Cc: Linus Walleij 
Cc: Herbert Xu 
Cc: "David S. Miller" 
Signed-off-by: Herbert Xu 
Signed-off-by: Jiri Slaby 
---
 drivers/crypto/ux500/cryp/cryp_core.c | 25 -
 1 file changed, 12 insertions(+), 13 deletions(-)

diff --git a/drivers/crypto/ux500/cryp/cryp_core.c 
b/drivers/crypto/ux500/cryp/cryp_core.c
index a999f537228f..92105f3dc8e0 100644
--- a/drivers/crypto/ux500/cryp/cryp_core.c
+++ b/drivers/crypto/ux500/cryp/cryp_core.c
@@ -190,7 +190,7 @@ static void add_session_id(struct cryp_ctx *ctx)
 static irqreturn_t cryp_interrupt_handler(int irq, void *param)
 {
struct cryp_ctx *ctx;
-   int i;
+   int count;
struct cryp_device_data *device_data;
 
if (param == NULL) {
@@ -215,12 +215,11 @@ static irqreturn_t cryp_interrupt_handler(int irq, void 
*param)
if (cryp_pending_irq_src(device_data,
 CRYP_IRQ_SRC_OUTPUT_FIFO)) {
if (ctx->outlen / ctx->blocksize > 0) {
-   for (i = 0; i < ctx->blocksize / 4; i++) {
-   *(ctx->outdata) = readl_relaxed(
-   &device_data->base->dout);
-   ctx->outdata += 4;
-   ctx->outlen -= 4;
-   }
+   count = ctx->blocksize / 4;
+
+   readsl(&device_data->base->dout, ctx->outdata, count);
+   ctx->outdata += count;
+   ctx->outlen -= count;
 
if (ctx->outlen == 0) {
cryp_disable_irq_src(device_data,
@@ -230,12 +229,12 @@ static irqreturn_t cryp_interrupt_handler(int irq, void 
*param)
} else if (cryp_pending_irq_src(device_data,
CRYP_IRQ_SRC_INPUT_FIFO)) {
if (ctx->datalen / ctx->blocksize > 0) {
-   for (i = 0 ; i < ctx->blocksize / 4; i++) {
-   writel_relaxed(ctx->indata,
-   &device_data->base->din);
-   ctx->indata += 4;
-   ctx->datalen -= 4;
-   }
+   count = ctx->blocksize / 4;
+
+   writesl(&device_data->base->din, ctx->indata, count);
+
+   ctx->indata += count;
+

[PATCH -resend with CC] crypto: algif_hash, avoid zero-sized array

2016-12-15 Thread Jiri Slaby
With this reproducer:
  struct sockaddr_alg alg = {
  .salg_family = 0x26,
  .salg_type = "hash",
  .salg_feat = 0xf,
  .salg_mask = 0x5,
  .salg_name = "digest_null",
  };
  int sock, sock2;

  sock = socket(AF_ALG, SOCK_SEQPACKET, 0);
  bind(sock, (struct sockaddr *)&alg, sizeof(alg));
  sock2 = accept(sock, NULL, NULL);
  setsockopt(sock, SOL_ALG, ALG_SET_KEY, "\x9b\xca", 2);
  accept(sock2, NULL, NULL);

 8<  8<  8<  8< 

one can immediatelly see an UBSAN warning:
UBSAN: Undefined behaviour in crypto/algif_hash.c:187:7
variable length array bound value 0 <= 0
CPU: 0 PID: 15949 Comm: syz-executor Tainted: GE  
4.4.30-0-default #1
...
Call Trace:
...
 [] ? __ubsan_handle_vla_bound_not_positive+0x13d/0x188
 [] ? __ubsan_handle_out_of_bounds+0x1bc/0x1bc
 [] ? hash_accept+0x5bd/0x7d0 [algif_hash]
 [] ? hash_accept_nokey+0x3f/0x51 [algif_hash]
 [] ? hash_accept_parent_nokey+0x4a0/0x4a0 [algif_hash]
 [] ? SyS_accept+0x2b/0x40

It is a correct warning, as hash state is propagated to accept as zero,
but creating a zero-length variable array is not allowed in C.

Fix this as proposed by Herbert -- do "?: 1" on that site. No sizeof or
similar happens in the code there, so we just allocate one byte even
though we do not use the array.

Signed-off-by: Jiri Slaby 
Cc: Herbert Xu 
Cc: "David S. Miller"  (maintainer:CRYPTO API)
Reported-by: Sasha Levin 
---
 crypto/algif_hash.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
index d19b09cdf284..54fc90e8339c 100644
--- a/crypto/algif_hash.c
+++ b/crypto/algif_hash.c
@@ -245,7 +245,7 @@ static int hash_accept(struct socket *sock, struct socket 
*newsock, int flags)
struct alg_sock *ask = alg_sk(sk);
struct hash_ctx *ctx = ask->private;
struct ahash_request *req = &ctx->req;
-   char state[crypto_ahash_statesize(crypto_ahash_reqtfm(req))];
+   char state[crypto_ahash_statesize(crypto_ahash_reqtfm(req)) ? : 1];
struct sock *sk2;
struct alg_sock *ask2;
struct hash_ctx *ctx2;
-- 
2.11.0

--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 06/10] x86: crypto, annotate local functions

2017-02-17 Thread Jiri Slaby
Use the newly added ENTRY_LOCAL to annotate starts of all functions
which do not have ".globl" annotation, but their ends are annotated by
ENDPROC. This is needed to balance ENDPROC for tools that are about to
generate debuginfo.

Signed-off-by: Jiri Slaby 
Cc: Herbert Xu 
Cc: "David S. Miller" 
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: "H. Peter Anvin" 
Cc: 
Cc: 
---
 arch/x86/crypto/aesni-intel_asm.S| 29 ++--
 arch/x86/crypto/camellia-aesni-avx-asm_64.S  | 10 +-
 arch/x86/crypto/camellia-aesni-avx2-asm_64.S | 10 +-
 arch/x86/crypto/cast5-avx-x86_64-asm_64.S|  4 ++--
 arch/x86/crypto/cast6-avx-x86_64-asm_64.S|  4 ++--
 arch/x86/crypto/ghash-clmulni-intel_asm.S|  2 +-
 arch/x86/crypto/serpent-avx-x86_64-asm_64.S  |  4 ++--
 arch/x86/crypto/serpent-avx2-asm_64.S|  4 ++--
 arch/x86/crypto/twofish-avx-x86_64-asm_64.S  |  4 ++--
 9 files changed, 31 insertions(+), 40 deletions(-)

diff --git a/arch/x86/crypto/aesni-intel_asm.S 
b/arch/x86/crypto/aesni-intel_asm.S
index 3c465184ff8a..624e4303d0fb 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -1746,7 +1746,7 @@ ENDPROC(aesni_gcm_enc)
 
 .align 4
 _key_expansion_128:
-_key_expansion_256a:
+ENTRY_LOCAL(_key_expansion_256a)
pshufd $0b, %xmm1, %xmm1
shufps $0b0001, %xmm0, %xmm4
pxor %xmm4, %xmm0
@@ -1759,8 +1759,7 @@ _key_expansion_256a:
 ENDPROC(_key_expansion_128)
 ENDPROC(_key_expansion_256a)
 
-.align 4
-_key_expansion_192a:
+ENTRY_LOCAL(_key_expansion_192a)
pshufd $0b01010101, %xmm1, %xmm1
shufps $0b0001, %xmm0, %xmm4
pxor %xmm4, %xmm0
@@ -1784,8 +1783,7 @@ _key_expansion_192a:
ret
 ENDPROC(_key_expansion_192a)
 
-.align 4
-_key_expansion_192b:
+ENTRY_LOCAL(_key_expansion_192b)
pshufd $0b01010101, %xmm1, %xmm1
shufps $0b0001, %xmm0, %xmm4
pxor %xmm4, %xmm0
@@ -1804,8 +1802,7 @@ _key_expansion_192b:
ret
 ENDPROC(_key_expansion_192b)
 
-.align 4
-_key_expansion_256b:
+ENTRY_LOCAL(_key_expansion_256b)
pshufd $0b10101010, %xmm1, %xmm1
shufps $0b0001, %xmm2, %xmm4
pxor %xmm4, %xmm2
@@ -1968,8 +1965,7 @@ ENDPROC(aesni_enc)
  * KEY
  * TKEYP (T1)
  */
-.align 4
-_aesni_enc1:
+ENTRY_LOCAL(_aesni_enc1)
movaps (KEYP), KEY  # key
mov KEYP, TKEYP
pxor KEY, STATE # round 0
@@ -2032,8 +2028,7 @@ ENDPROC(_aesni_enc1)
  * KEY
  * TKEYP (T1)
  */
-.align 4
-_aesni_enc4:
+ENTRY_LOCAL(_aesni_enc4)
movaps (KEYP), KEY  # key
mov KEYP, TKEYP
pxor KEY, STATE1# round 0
@@ -2160,8 +2155,7 @@ ENDPROC(aesni_dec)
  * KEY
  * TKEYP (T1)
  */
-.align 4
-_aesni_dec1:
+ENTRY_LOCAL(_aesni_dec1)
movaps (KEYP), KEY  # key
mov KEYP, TKEYP
pxor KEY, STATE # round 0
@@ -2224,8 +2218,7 @@ ENDPROC(_aesni_dec1)
  * KEY
  * TKEYP (T1)
  */
-.align 4
-_aesni_dec4:
+ENTRY_LOCAL(_aesni_dec4)
movaps (KEYP), KEY  # key
mov KEYP, TKEYP
pxor KEY, STATE1# round 0
@@ -2591,8 +2584,7 @@ ENDPROC(aesni_cbc_dec)
  * INC:== 1, in little endian
  * BSWAP_MASK == endian swapping mask
  */
-.align 4
-_aesni_inc_init:
+ENTRY_LOCAL(_aesni_inc_init)
movaps .Lbswap_mask, BSWAP_MASK
movaps IV, CTR
PSHUFB_XMM BSWAP_MASK CTR
@@ -2617,8 +2609,7 @@ ENDPROC(_aesni_inc_init)
  * CTR:== output IV, in little endian
  * TCTR_LOW: == lower qword of CTR
  */
-.align 4
-_aesni_inc:
+ENTRY_LOCAL(_aesni_inc)
paddq INC, CTR
add $1, TCTR_LOW
jnc .Linc_low
diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S 
b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
index f7c495e2863c..f203607da5d0 100644
--- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S
+++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
@@ -188,7 +188,7 @@
  * larger and would only be 0.5% faster (on sandy-bridge).
  */
 .align 8
-roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
+ENTRY_LOCAL(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
roundsm16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
  %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15,
  %rcx, (%r9));
@@ -196,7 +196,7 @@ 
roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
 ENDPROC(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
 
 .align 8
-roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
+ENTRY_LOCAL(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
roundsm16(%xmm4, %xmm5, %xmm6, %xmm7, %xmm0, %xmm1, %xmm2, %xmm3,
  %xmm12, %xmm13, %xmm14, %xmm15, %xmm8, %xmm9, %xmm10, %xmm11,
  %rax, (%r9));
@@ -721,7 +721,7 @@ 
ENDPROC(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y

[PATCH 08/10] x86: assembly, annotate aliases

2017-02-17 Thread Jiri Slaby
_key_expansion_128 is an alias to _key_expansion_256a, __memcpy to
memcpy, xen_syscall32_target to xen_sysenter_target, and so on. Annotate
them all using the new ENTRY_ALIAS and ENTRY_LOCAL_ALIAS. This will make
the tools generating the debuginfo happy.

Signed-off-by: Jiri Slaby 
Cc: Herbert Xu 
Cc: "David S. Miller" 
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: "H. Peter Anvin" 
Cc: 
Cc: Boris Ostrovsky 
Cc: Juergen Gross 
Cc: 
Cc: 
---
 arch/x86/crypto/aesni-intel_asm.S | 5 ++---
 arch/x86/lib/memcpy_64.S  | 4 ++--
 arch/x86/lib/memmove_64.S | 4 ++--
 arch/x86/lib/memset_64.S  | 4 ++--
 arch/x86/xen/xen-asm_64.S | 4 ++--
 5 files changed, 10 insertions(+), 11 deletions(-)

diff --git a/arch/x86/crypto/aesni-intel_asm.S 
b/arch/x86/crypto/aesni-intel_asm.S
index 624e4303d0fb..6a0f25be1a56 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -1744,8 +1744,7 @@ ENDPROC(aesni_gcm_enc)
 #endif
 
 
-.align 4
-_key_expansion_128:
+ENTRY_LOCAL_ALIAS(_key_expansion_128)
 ENTRY_LOCAL(_key_expansion_256a)
pshufd $0b, %xmm1, %xmm1
shufps $0b0001, %xmm0, %xmm4
@@ -1756,8 +1755,8 @@ ENTRY_LOCAL(_key_expansion_256a)
movaps %xmm0, (TKEYP)
add $0x10, TKEYP
ret
-ENDPROC(_key_expansion_128)
 ENDPROC(_key_expansion_256a)
+END_ALIAS(_key_expansion_128)
 
 ENTRY_LOCAL(_key_expansion_192a)
pshufd $0b01010101, %xmm1, %xmm1
diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
index 779782f58324..c9ac54822e87 100644
--- a/arch/x86/lib/memcpy_64.S
+++ b/arch/x86/lib/memcpy_64.S
@@ -26,7 +26,7 @@
  * Output:
  * rax original destination
  */
-ENTRY(__memcpy)
+ENTRY_ALIAS(__memcpy)
 ENTRY(memcpy)
ALTERNATIVE_2 "jmp memcpy_orig", "", X86_FEATURE_REP_GOOD, \
  "jmp memcpy_erms", X86_FEATURE_ERMS
@@ -40,7 +40,7 @@ ENTRY(memcpy)
rep movsb
ret
 ENDPROC(memcpy)
-ENDPROC(__memcpy)
+END_ALIAS(__memcpy)
 EXPORT_SYMBOL(memcpy)
 EXPORT_SYMBOL(__memcpy)
 
diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
index 15de86cd15b0..76f54ba3dd26 100644
--- a/arch/x86/lib/memmove_64.S
+++ b/arch/x86/lib/memmove_64.S
@@ -25,7 +25,7 @@
  */
 .weak memmove
 
-ENTRY(memmove)
+ENTRY_ALIAS(memmove)
 ENTRY(__memmove)
 
/* Handle more 32 bytes in loop */
@@ -207,6 +207,6 @@ ENTRY(__memmove)
 13:
retq
 ENDPROC(__memmove)
-ENDPROC(memmove)
+END_ALIAS(memmove)
 EXPORT_SYMBOL(__memmove)
 EXPORT_SYMBOL(memmove)
diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
index 55b95db30a61..be6c4705ec51 100644
--- a/arch/x86/lib/memset_64.S
+++ b/arch/x86/lib/memset_64.S
@@ -18,7 +18,7 @@
  *
  * rax   original destination
  */
-ENTRY(memset)
+ENTRY_ALIAS(memset)
 ENTRY(__memset)
/*
 * Some CPUs support enhanced REP MOVSB/STOSB feature. It is recommended
@@ -42,8 +42,8 @@ ENTRY(__memset)
rep stosb
movq %r9,%rax
ret
-ENDPROC(memset)
 ENDPROC(__memset)
+END_ALIAS(memset)
 EXPORT_SYMBOL(memset)
 EXPORT_SYMBOL(__memset)
 
diff --git a/arch/x86/xen/xen-asm_64.S b/arch/x86/xen/xen-asm_64.S
index d617bea76039..4b0fe749f10c 100644
--- a/arch/x86/xen/xen-asm_64.S
+++ b/arch/x86/xen/xen-asm_64.S
@@ -117,13 +117,13 @@ ENDPROC(xen_sysenter_target)
 
 #else /* !CONFIG_IA32_EMULATION */
 
-ENTRY(xen_syscall32_target)
+ENTRY_ALIAS(xen_syscall32_target)
 ENTRY(xen_sysenter_target)
lea 16(%rsp), %rsp  /* strip %rcx, %r11 */
mov $-ENOSYS, %rax
pushq $0
jmp hypercall_iret
-ENDPROC(xen_syscall32_target)
 ENDPROC(xen_sysenter_target)
+END_ALIAS(xen_syscall32_target)
 
 #endif /* CONFIG_IA32_EMULATION */
-- 
2.11.1



[PATCH v4 23/27] x86_64: assembly, change all ENTRY+ENDPROC to SYM_FUNC_*

2017-10-02 Thread Jiri Slaby
These are all functions which are invoked from elsewhere, so we annotate
them as global using the new SYM_FUNC_START. And their ENDPROC's by
SYM_FUNC_END.

And make sure ENTRY/ENDPROC is not defined on X86_64, given these were
the last users.

Signed-off-by: Jiri Slaby 
Cc: "H. Peter Anvin" 
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: x...@kernel.org
Cc: Herbert Xu 
Cc: "David S. Miller" 
Cc: "Rafael J. Wysocki" 
Cc: Len Brown 
Cc: Pavel Machek 
Cc: Matt Fleming 
Cc: Ard Biesheuvel 
Cc: Boris Ostrovsky 
Cc: Juergen Gross 
Cc: linux-crypto@vger.kernel.org
Cc: linux...@vger.kernel.org
Cc: linux-...@vger.kernel.org
Cc: xen-de...@lists.xenproject.org
---
 arch/x86/boot/compressed/efi_thunk_64.S|  4 +-
 arch/x86/boot/compressed/head_64.S | 16 
 arch/x86/crypto/aes-i586-asm_32.S  |  8 ++--
 arch/x86/crypto/aes-x86_64-asm_64.S|  4 +-
 arch/x86/crypto/aes_ctrby8_avx-x86_64.S| 12 +++---
 arch/x86/crypto/aesni-intel_asm.S  | 44 +++---
 arch/x86/crypto/aesni-intel_avx-x86_64.S   | 24 ++--
 arch/x86/crypto/blowfish-x86_64-asm_64.S   | 16 
 arch/x86/crypto/camellia-aesni-avx-asm_64.S| 24 ++--
 arch/x86/crypto/camellia-aesni-avx2-asm_64.S   | 24 ++--
 arch/x86/crypto/camellia-x86_64-asm_64.S   | 16 
 arch/x86/crypto/cast5-avx-x86_64-asm_64.S  | 16 
 arch/x86/crypto/cast6-avx-x86_64-asm_64.S  | 24 ++--
 arch/x86/crypto/chacha20-avx2-x86_64.S |  4 +-
 arch/x86/crypto/chacha20-ssse3-x86_64.S|  8 ++--
 arch/x86/crypto/crc32-pclmul_asm.S |  4 +-
 arch/x86/crypto/crc32c-pcl-intel-asm_64.S  |  4 +-
 arch/x86/crypto/crct10dif-pcl-asm_64.S |  4 +-
 arch/x86/crypto/des3_ede-asm_64.S  |  8 ++--
 arch/x86/crypto/ghash-clmulni-intel_asm.S  |  8 ++--
 arch/x86/crypto/poly1305-avx2-x86_64.S |  4 +-
 arch/x86/crypto/poly1305-sse2-x86_64.S |  8 ++--
 arch/x86/crypto/salsa20-x86_64-asm_64.S| 12 +++---
 arch/x86/crypto/serpent-avx-x86_64-asm_64.S| 24 ++--
 arch/x86/crypto/serpent-avx2-asm_64.S  | 24 ++--
 arch/x86/crypto/serpent-sse2-x86_64-asm_64.S   |  8 ++--
 arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S   |  8 ++--
 arch/x86/crypto/sha1-mb/sha1_mb_mgr_submit_avx2.S  |  4 +-
 arch/x86/crypto/sha1-mb/sha1_x8_avx2.S |  4 +-
 arch/x86/crypto/sha1_avx2_x86_64_asm.S |  4 +-
 arch/x86/crypto/sha1_ni_asm.S  |  4 +-
 arch/x86/crypto/sha1_ssse3_asm.S   |  4 +-
 arch/x86/crypto/sha256-avx-asm.S   |  4 +-
 arch/x86/crypto/sha256-avx2-asm.S  |  4 +-
 .../crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S|  8 ++--
 .../crypto/sha256-mb/sha256_mb_mgr_submit_avx2.S   |  4 +-
 arch/x86/crypto/sha256-mb/sha256_x8_avx2.S |  4 +-
 arch/x86/crypto/sha256-ssse3-asm.S |  4 +-
 arch/x86/crypto/sha256_ni_asm.S|  4 +-
 arch/x86/crypto/sha512-avx-asm.S   |  4 +-
 arch/x86/crypto/sha512-avx2-asm.S  |  4 +-
 .../crypto/sha512-mb/sha512_mb_mgr_flush_avx2.S|  8 ++--
 .../crypto/sha512-mb/sha512_mb_mgr_submit_avx2.S   |  4 +-
 arch/x86/crypto/sha512-mb/sha512_x4_avx2.S |  4 +-
 arch/x86/crypto/sha512-ssse3-asm.S |  4 +-
 arch/x86/crypto/twofish-avx-x86_64-asm_64.S| 24 ++--
 arch/x86/crypto/twofish-x86_64-asm_64-3way.S   |  8 ++--
 arch/x86/crypto/twofish-x86_64-asm_64.S|  8 ++--
 arch/x86/entry/entry_64.S  | 10 ++---
 arch/x86/entry/entry_64_compat.S   |  8 ++--
 arch/x86/kernel/acpi/wakeup_64.S   |  8 ++--
 arch/x86/kernel/head_64.S  | 12 +++---
 arch/x86/lib/checksum_32.S |  8 ++--
 arch/x86/lib/clear_page_64.S   | 12 +++---
 arch/x86/lib/cmpxchg16b_emu.S  |  4 +-
 arch/x86/lib/cmpxchg8b_emu.S   |  4 +-
 arch/x86/lib/copy_page_64.S|  4 +-
 arch/x86/lib/copy_user_64.S| 16 
 arch/x86/lib/csum-copy_64.S|  4 +-
 arch/x86/lib/getuser.S | 16 
 arch/x86/lib/hweight.S |  8 ++--
 arch/x86/lib/iomap_copy_64.S   |  4 +-
 arch/x86/lib/memcpy_64.S   |  4 +-
 arch/x86/lib/memmove_64.S  |  4 +-
 arch/x86/lib/memset_64.S   |  4 +-
 arch/x86/lib/msr-reg.S |  8 ++--
 arch/x86/lib/putuser.S | 16 
 arch/x86/lib/rwsem.S   | 20 +-
 arch/x86/mm/mem_encrypt_boot.S 

[PATCH v4 26/27] x86_32: assembly, change all ENTRY+ENDPROC to SYM_FUNC_*

2017-10-02 Thread Jiri Slaby
These are all functions which are invoked from elsewhere, so we annotate
them as global using the new SYM_FUNC_START (and their ENDPROC's by
SYM_FUNC_END.)

Now, we can finally force ENTRY/ENDPROC to be undefined on X86.

Signed-off-by: Jiri Slaby 
Cc: "H. Peter Anvin" 
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: x...@kernel.org
Cc: Herbert Xu 
Cc: "David S. Miller" 
Cc: Bill Metzenthen 
Cc: Matt Fleming 
Cc: Ard Biesheuvel 
Cc: linux-crypto@vger.kernel.org
Cc: linux-...@vger.kernel.org
---
 arch/x86/boot/compressed/efi_stub_32.S |  4 ++--
 arch/x86/boot/compressed/head_32.S | 12 +--
 arch/x86/crypto/salsa20-i586-asm_32.S  | 12 +--
 arch/x86/crypto/serpent-sse2-i586-asm_32.S |  8 
 arch/x86/crypto/twofish-i586-asm_32.S  |  8 
 arch/x86/entry/entry_32.S  | 24 +++---
 arch/x86/kernel/head_32.S  | 16 +++
 arch/x86/lib/atomic64_386_32.S |  4 ++--
 arch/x86/lib/atomic64_cx8_32.S | 32 +++---
 arch/x86/lib/checksum_32.S |  8 
 arch/x86/math-emu/div_Xsig.S   |  4 ++--
 arch/x86/math-emu/div_small.S  |  4 ++--
 arch/x86/math-emu/mul_Xsig.S   | 12 +--
 arch/x86/math-emu/polynom_Xsig.S   |  4 ++--
 arch/x86/math-emu/reg_norm.S   |  8 
 arch/x86/math-emu/reg_round.S  |  4 ++--
 arch/x86/math-emu/reg_u_add.S  |  4 ++--
 arch/x86/math-emu/reg_u_div.S  |  4 ++--
 arch/x86/math-emu/reg_u_mul.S  |  4 ++--
 arch/x86/math-emu/reg_u_sub.S  |  4 ++--
 arch/x86/math-emu/round_Xsig.S |  8 
 arch/x86/math-emu/shr_Xsig.S   |  4 ++--
 arch/x86/math-emu/wm_shrx.S|  8 
 arch/x86/math-emu/wm_sqrt.S|  4 ++--
 arch/x86/platform/efi/efi_stub_32.S|  4 ++--
 include/linux/linkage.h|  8 +++-
 26 files changed, 107 insertions(+), 109 deletions(-)

diff --git a/arch/x86/boot/compressed/efi_stub_32.S 
b/arch/x86/boot/compressed/efi_stub_32.S
index a53440e81d52..4ceff75b0d2a 100644
--- a/arch/x86/boot/compressed/efi_stub_32.S
+++ b/arch/x86/boot/compressed/efi_stub_32.S
@@ -23,7 +23,7 @@
  */
 
 .text
-ENTRY(efi_call_phys)
+SYM_FUNC_START(efi_call_phys)
/*
 * 0. The function can only be called in Linux kernel. So CS has been
 * set to 0x0010, DS and SS have been set to 0x0018. In EFI, I found
@@ -76,7 +76,7 @@ ENTRY(efi_call_phys)
movlsaved_return_addr(%edx), %ecx
pushl   %ecx
ret
-ENDPROC(efi_call_phys)
+SYM_FUNC_END(efi_call_phys)
 .previous
 
 .data
diff --git a/arch/x86/boot/compressed/head_32.S 
b/arch/x86/boot/compressed/head_32.S
index d832ddb78ea2..86484c3788f8 100644
--- a/arch/x86/boot/compressed/head_32.S
+++ b/arch/x86/boot/compressed/head_32.S
@@ -60,7 +60,7 @@
.hidden _egot
 
__HEAD
-ENTRY(startup_32)
+SYM_FUNC_START(startup_32)
cld
/*
 * Test KEEP_SEGMENTS flag to see if the bootloader is asking
@@ -141,14 +141,14 @@ ENTRY(startup_32)
  */
lealrelocated(%ebx), %eax
jmp *%eax
-ENDPROC(startup_32)
+SYM_FUNC_END(startup_32)
 
 #ifdef CONFIG_EFI_STUB
 /*
  * We don't need the return address, so set up the stack so efi_main() can find
  * its arguments.
  */
-ENTRY(efi_pe_entry)
+SYM_FUNC_START(efi_pe_entry)
add $0x4, %esp
 
call1f
@@ -173,9 +173,9 @@ ENTRY(efi_pe_entry)
pushl   %eax
pushl   %ecx
jmp 2f  /* Skip efi_config initialization */
-ENDPROC(efi_pe_entry)
+SYM_FUNC_END(efi_pe_entry)
 
-ENTRY(efi32_stub_entry)
+SYM_FUNC_START(efi32_stub_entry)
add $0x4, %esp
popl%ecx
popl%edx
@@ -204,7 +204,7 @@ fail:
movlBP_code32_start(%esi), %eax
lealstartup_32(%eax), %eax
jmp *%eax
-ENDPROC(efi32_stub_entry)
+SYM_FUNC_END(efi32_stub_entry)
 #endif
 
.text
diff --git a/arch/x86/crypto/salsa20-i586-asm_32.S 
b/arch/x86/crypto/salsa20-i586-asm_32.S
index 329452b8f794..e9a6703056fc 100644
--- a/arch/x86/crypto/salsa20-i586-asm_32.S
+++ b/arch/x86/crypto/salsa20-i586-asm_32.S
@@ -7,7 +7,7 @@
 .text
 
 # enter salsa20_encrypt_bytes
-ENTRY(salsa20_encrypt_bytes)
+SYM_FUNC_START(salsa20_encrypt_bytes)
mov %esp,%eax
and $31,%eax
add $256,%eax
@@ -934,10 +934,10 @@ ENTRY(salsa20_encrypt_bytes)
add $64,%esi
# goto bytesatleast1
jmp ._bytesatleast1
-ENDPROC(salsa20_encrypt_bytes)
+SYM_FUNC_END(salsa20_encrypt_bytes)
 
 # enter salsa20_keysetup
-ENTRY(salsa20_keysetup)
+SYM_FUNC_START(salsa20_keysetup)
mov %esp,%eax
and $31,%eax
add $256,%eax
@@ -1060,10 +1060,10 @@ ENTRY(salsa20_keysetup)
# leave
add %eax,%esp
ret
-ENDPROC(s

[PATCH v4 08/27] x86: assembly, annotate aliases

2017-10-02 Thread Jiri Slaby
_key_expansion_128 is an alias to _key_expansion_256a, __memcpy to
memcpy, xen_syscall32_target to xen_sysenter_target, and so on. Annotate
them all using the new SYM_FUNC_START_ALIAS, SYM_FUNC_START_LOCAL_ALIAS,
and SYM_FUNC_END_ALIAS. This will make the tools generating the
debuginfo happy.

Signed-off-by: Jiri Slaby 
Cc: Herbert Xu 
Cc: "David S. Miller" 
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: "H. Peter Anvin" 
Cc: 
Cc: Boris Ostrovsky 
Cc: Juergen Gross 
Reviewed-by: Juergen Gross  [xen parts]
Cc: 
Cc: 
---
 arch/x86/crypto/aesni-intel_asm.S | 5 ++---
 arch/x86/lib/memcpy_64.S  | 4 ++--
 arch/x86/lib/memmove_64.S | 4 ++--
 arch/x86/lib/memset_64.S  | 4 ++--
 arch/x86/xen/xen-asm_64.S | 4 ++--
 5 files changed, 10 insertions(+), 11 deletions(-)

diff --git a/arch/x86/crypto/aesni-intel_asm.S 
b/arch/x86/crypto/aesni-intel_asm.S
index 1d34d5a14682..426a22192d69 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -1873,8 +1873,7 @@ ENDPROC(aesni_gcm_enc)
 #endif
 
 
-.align 4
-_key_expansion_128:
+SYM_FUNC_START_LOCAL_ALIAS(_key_expansion_128)
 SYM_FUNC_START_LOCAL(_key_expansion_256a)
pshufd $0b, %xmm1, %xmm1
shufps $0b0001, %xmm0, %xmm4
@@ -1885,8 +1884,8 @@ SYM_FUNC_START_LOCAL(_key_expansion_256a)
movaps %xmm0, (TKEYP)
add $0x10, TKEYP
ret
-ENDPROC(_key_expansion_128)
 SYM_FUNC_END(_key_expansion_256a)
+SYM_FUNC_END_ALIAS(_key_expansion_128)
 
 SYM_FUNC_START_LOCAL(_key_expansion_192a)
pshufd $0b01010101, %xmm1, %xmm1
diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
index 9a53a06e5a3e..4911b1c61aa8 100644
--- a/arch/x86/lib/memcpy_64.S
+++ b/arch/x86/lib/memcpy_64.S
@@ -26,7 +26,7 @@
  * Output:
  * rax original destination
  */
-ENTRY(__memcpy)
+SYM_FUNC_START_ALIAS(__memcpy)
 ENTRY(memcpy)
ALTERNATIVE_2 "jmp memcpy_orig", "", X86_FEATURE_REP_GOOD, \
  "jmp memcpy_erms", X86_FEATURE_ERMS
@@ -40,7 +40,7 @@ ENTRY(memcpy)
rep movsb
ret
 ENDPROC(memcpy)
-ENDPROC(__memcpy)
+SYM_FUNC_END_ALIAS(__memcpy)
 EXPORT_SYMBOL(memcpy)
 EXPORT_SYMBOL(__memcpy)
 
diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
index 15de86cd15b0..d22af97e5b27 100644
--- a/arch/x86/lib/memmove_64.S
+++ b/arch/x86/lib/memmove_64.S
@@ -25,7 +25,7 @@
  */
 .weak memmove
 
-ENTRY(memmove)
+SYM_FUNC_START_ALIAS(memmove)
 ENTRY(__memmove)
 
/* Handle more 32 bytes in loop */
@@ -207,6 +207,6 @@ ENTRY(__memmove)
 13:
retq
 ENDPROC(__memmove)
-ENDPROC(memmove)
+SYM_FUNC_END_ALIAS(memmove)
 EXPORT_SYMBOL(__memmove)
 EXPORT_SYMBOL(memmove)
diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
index 55b95db30a61..0d3a1d341e60 100644
--- a/arch/x86/lib/memset_64.S
+++ b/arch/x86/lib/memset_64.S
@@ -18,7 +18,7 @@
  *
  * rax   original destination
  */
-ENTRY(memset)
+SYM_FUNC_START_ALIAS(memset)
 ENTRY(__memset)
/*
 * Some CPUs support enhanced REP MOVSB/STOSB feature. It is recommended
@@ -42,8 +42,8 @@ ENTRY(__memset)
rep stosb
movq %r9,%rax
ret
-ENDPROC(memset)
 ENDPROC(__memset)
+SYM_FUNC_END_ALIAS(memset)
 EXPORT_SYMBOL(memset)
 EXPORT_SYMBOL(__memset)
 
diff --git a/arch/x86/xen/xen-asm_64.S b/arch/x86/xen/xen-asm_64.S
index dae2cc33afb5..f7e9a8344910 100644
--- a/arch/x86/xen/xen-asm_64.S
+++ b/arch/x86/xen/xen-asm_64.S
@@ -149,13 +149,13 @@ ENDPROC(xen_sysenter_target)
 
 #else /* !CONFIG_IA32_EMULATION */
 
-ENTRY(xen_syscall32_target)
+SYM_FUNC_START_ALIAS(xen_syscall32_target)
 ENTRY(xen_sysenter_target)
lea 16(%rsp), %rsp  /* strip %rcx, %r11 */
mov $-ENOSYS, %rax
pushq $0
jmp hypercall_iret
-ENDPROC(xen_syscall32_target)
 ENDPROC(xen_sysenter_target)
+SYM_FUNC_END_ALIAS(xen_syscall32_target)
 
 #endif /* CONFIG_IA32_EMULATION */
-- 
2.14.2



[PATCH v4 06/27] x86: crypto, annotate local functions

2017-10-02 Thread Jiri Slaby
Use the newly added SYM_FUNC_START_LOCAL to annotate starts of all
functions which do not have ".globl" annotation, but their ends are
annotated by ENDPROC. This is needed to balance ENDPROC for tools that
are about to generate debuginfo.

To be symmetric, we also convert their ENDPROCs to the new SYM_FUNC_END.

Signed-off-by: Jiri Slaby 
Cc: Herbert Xu 
Cc: "David S. Miller" 
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: "H. Peter Anvin" 
Cc: 
Cc: 
---
 arch/x86/crypto/aesni-intel_asm.S| 49 
 arch/x86/crypto/camellia-aesni-avx-asm_64.S  | 20 ++--
 arch/x86/crypto/camellia-aesni-avx2-asm_64.S | 20 ++--
 arch/x86/crypto/cast5-avx-x86_64-asm_64.S|  8 ++---
 arch/x86/crypto/cast6-avx-x86_64-asm_64.S|  8 ++---
 arch/x86/crypto/ghash-clmulni-intel_asm.S|  4 +--
 arch/x86/crypto/serpent-avx-x86_64-asm_64.S  |  8 ++---
 arch/x86/crypto/serpent-avx2-asm_64.S|  8 ++---
 arch/x86/crypto/twofish-avx-x86_64-asm_64.S  |  8 ++---
 9 files changed, 62 insertions(+), 71 deletions(-)

diff --git a/arch/x86/crypto/aesni-intel_asm.S 
b/arch/x86/crypto/aesni-intel_asm.S
index 16627fec80b2..1d34d5a14682 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -1875,7 +1875,7 @@ ENDPROC(aesni_gcm_enc)
 
 .align 4
 _key_expansion_128:
-_key_expansion_256a:
+SYM_FUNC_START_LOCAL(_key_expansion_256a)
pshufd $0b, %xmm1, %xmm1
shufps $0b0001, %xmm0, %xmm4
pxor %xmm4, %xmm0
@@ -1886,10 +1886,9 @@ _key_expansion_256a:
add $0x10, TKEYP
ret
 ENDPROC(_key_expansion_128)
-ENDPROC(_key_expansion_256a)
+SYM_FUNC_END(_key_expansion_256a)
 
-.align 4
-_key_expansion_192a:
+SYM_FUNC_START_LOCAL(_key_expansion_192a)
pshufd $0b01010101, %xmm1, %xmm1
shufps $0b0001, %xmm0, %xmm4
pxor %xmm4, %xmm0
@@ -1911,10 +1910,9 @@ _key_expansion_192a:
movaps %xmm1, 0x10(TKEYP)
add $0x20, TKEYP
ret
-ENDPROC(_key_expansion_192a)
+SYM_FUNC_END(_key_expansion_192a)
 
-.align 4
-_key_expansion_192b:
+SYM_FUNC_START_LOCAL(_key_expansion_192b)
pshufd $0b01010101, %xmm1, %xmm1
shufps $0b0001, %xmm0, %xmm4
pxor %xmm4, %xmm0
@@ -1931,10 +1929,9 @@ _key_expansion_192b:
movaps %xmm0, (TKEYP)
add $0x10, TKEYP
ret
-ENDPROC(_key_expansion_192b)
+SYM_FUNC_END(_key_expansion_192b)
 
-.align 4
-_key_expansion_256b:
+SYM_FUNC_START_LOCAL(_key_expansion_256b)
pshufd $0b10101010, %xmm1, %xmm1
shufps $0b0001, %xmm2, %xmm4
pxor %xmm4, %xmm2
@@ -1944,7 +1941,7 @@ _key_expansion_256b:
movaps %xmm2, (TKEYP)
add $0x10, TKEYP
ret
-ENDPROC(_key_expansion_256b)
+SYM_FUNC_END(_key_expansion_256b)
 
 /*
  * int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
@@ -2097,8 +2094,7 @@ ENDPROC(aesni_enc)
  * KEY
  * TKEYP (T1)
  */
-.align 4
-_aesni_enc1:
+SYM_FUNC_START_LOCAL(_aesni_enc1)
movaps (KEYP), KEY  # key
mov KEYP, TKEYP
pxor KEY, STATE # round 0
@@ -2141,7 +2137,7 @@ _aesni_enc1:
movaps 0x70(TKEYP), KEY
AESENCLAST KEY STATE
ret
-ENDPROC(_aesni_enc1)
+SYM_FUNC_END(_aesni_enc1)
 
 /*
  * _aesni_enc4:internal ABI
@@ -2161,8 +2157,7 @@ ENDPROC(_aesni_enc1)
  * KEY
  * TKEYP (T1)
  */
-.align 4
-_aesni_enc4:
+SYM_FUNC_START_LOCAL(_aesni_enc4)
movaps (KEYP), KEY  # key
mov KEYP, TKEYP
pxor KEY, STATE1# round 0
@@ -2250,7 +2245,7 @@ _aesni_enc4:
AESENCLAST KEY STATE3
AESENCLAST KEY STATE4
ret
-ENDPROC(_aesni_enc4)
+SYM_FUNC_END(_aesni_enc4)
 
 /*
  * void aesni_dec (struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
@@ -2289,8 +2284,7 @@ ENDPROC(aesni_dec)
  * KEY
  * TKEYP (T1)
  */
-.align 4
-_aesni_dec1:
+SYM_FUNC_START_LOCAL(_aesni_dec1)
movaps (KEYP), KEY  # key
mov KEYP, TKEYP
pxor KEY, STATE # round 0
@@ -2333,7 +2327,7 @@ _aesni_dec1:
movaps 0x70(TKEYP), KEY
AESDECLAST KEY STATE
ret
-ENDPROC(_aesni_dec1)
+SYM_FUNC_END(_aesni_dec1)
 
 /*
  * _aesni_dec4:internal ABI
@@ -2353,8 +2347,7 @@ ENDPROC(_aesni_dec1)
  * KEY
  * TKEYP (T1)
  */
-.align 4
-_aesni_dec4:
+SYM_FUNC_START_LOCAL(_aesni_dec4)
movaps (KEYP), KEY  # key
mov KEYP, TKEYP
pxor KEY, STATE1# round 0
@@ -2442,7 +2435,7 @@ _aesni_dec4:
AESDECLAST KEY STATE3
AESDECLAST KEY STATE4
ret
-ENDPROC(_aesni_dec4)
+SYM_FUNC_END(_aesni_dec4)
 
 /*
  * void aesni_ecb_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
@@ -2720,8 +2713,7 @@ ENDPROC(aesni_cbc_dec)
  * INC:== 1, in little endian
  * BSWAP_MASK == endian swapping mask
  */
-.align 4
-_aesni_inc_init:
+SYM_FUNC_START_LOCAL(_aesni_inc_init)
movaps .Lbswap_mask, BSW

[PATCH v5 26/27] x86_32: assembly, change all ENTRY+ENDPROC to SYM_FUNC_*

2017-11-30 Thread Jiri Slaby
These are all functions which are invoked from elsewhere, so we annotate
them as global using the new SYM_FUNC_START (and their ENDPROC's by
SYM_FUNC_END.)

Now, we can finally force ENTRY/ENDPROC to be undefined on X86.

Signed-off-by: Jiri Slaby 
Cc: "H. Peter Anvin" 
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: x...@kernel.org
Cc: Herbert Xu 
Cc: "David S. Miller" 
Cc: Bill Metzenthen 
Cc: Matt Fleming 
Cc: Ard Biesheuvel 
Cc: linux-crypto@vger.kernel.org
Cc: linux-...@vger.kernel.org
---
 arch/x86/boot/compressed/efi_stub_32.S |  4 ++--
 arch/x86/boot/compressed/head_32.S | 12 +--
 arch/x86/crypto/salsa20-i586-asm_32.S  | 12 +--
 arch/x86/crypto/serpent-sse2-i586-asm_32.S |  8 
 arch/x86/crypto/twofish-i586-asm_32.S  |  8 
 arch/x86/entry/entry_32.S  | 24 +++---
 arch/x86/kernel/head_32.S  | 16 +++
 arch/x86/lib/atomic64_386_32.S |  4 ++--
 arch/x86/lib/atomic64_cx8_32.S | 32 +++---
 arch/x86/lib/checksum_32.S |  8 
 arch/x86/math-emu/div_Xsig.S   |  4 ++--
 arch/x86/math-emu/div_small.S  |  4 ++--
 arch/x86/math-emu/mul_Xsig.S   | 12 +--
 arch/x86/math-emu/polynom_Xsig.S   |  4 ++--
 arch/x86/math-emu/reg_norm.S   |  8 
 arch/x86/math-emu/reg_round.S  |  4 ++--
 arch/x86/math-emu/reg_u_add.S  |  4 ++--
 arch/x86/math-emu/reg_u_div.S  |  4 ++--
 arch/x86/math-emu/reg_u_mul.S  |  4 ++--
 arch/x86/math-emu/reg_u_sub.S  |  4 ++--
 arch/x86/math-emu/round_Xsig.S |  8 
 arch/x86/math-emu/shr_Xsig.S   |  4 ++--
 arch/x86/math-emu/wm_shrx.S|  8 
 arch/x86/math-emu/wm_sqrt.S|  4 ++--
 arch/x86/platform/efi/efi_stub_32.S|  4 ++--
 include/linux/linkage.h|  8 +++-
 26 files changed, 107 insertions(+), 109 deletions(-)

diff --git a/arch/x86/boot/compressed/efi_stub_32.S 
b/arch/x86/boot/compressed/efi_stub_32.S
index 257e341fd2c8..ed6c351d34ed 100644
--- a/arch/x86/boot/compressed/efi_stub_32.S
+++ b/arch/x86/boot/compressed/efi_stub_32.S
@@ -24,7 +24,7 @@
  */
 
 .text
-ENTRY(efi_call_phys)
+SYM_FUNC_START(efi_call_phys)
/*
 * 0. The function can only be called in Linux kernel. So CS has been
 * set to 0x0010, DS and SS have been set to 0x0018. In EFI, I found
@@ -77,7 +77,7 @@ ENTRY(efi_call_phys)
movlsaved_return_addr(%edx), %ecx
pushl   %ecx
ret
-ENDPROC(efi_call_phys)
+SYM_FUNC_END(efi_call_phys)
 .previous
 
 .data
diff --git a/arch/x86/boot/compressed/head_32.S 
b/arch/x86/boot/compressed/head_32.S
index 7e8ab0bb6968..3fa36496af12 100644
--- a/arch/x86/boot/compressed/head_32.S
+++ b/arch/x86/boot/compressed/head_32.S
@@ -61,7 +61,7 @@
.hidden _egot
 
__HEAD
-ENTRY(startup_32)
+SYM_FUNC_START(startup_32)
cld
/*
 * Test KEEP_SEGMENTS flag to see if the bootloader is asking
@@ -142,14 +142,14 @@ ENTRY(startup_32)
  */
lealrelocated(%ebx), %eax
jmp *%eax
-ENDPROC(startup_32)
+SYM_FUNC_END(startup_32)
 
 #ifdef CONFIG_EFI_STUB
 /*
  * We don't need the return address, so set up the stack so efi_main() can find
  * its arguments.
  */
-ENTRY(efi_pe_entry)
+SYM_FUNC_START(efi_pe_entry)
add $0x4, %esp
 
call1f
@@ -174,9 +174,9 @@ ENTRY(efi_pe_entry)
pushl   %eax
pushl   %ecx
jmp 2f  /* Skip efi_config initialization */
-ENDPROC(efi_pe_entry)
+SYM_FUNC_END(efi_pe_entry)
 
-ENTRY(efi32_stub_entry)
+SYM_FUNC_START(efi32_stub_entry)
add $0x4, %esp
popl%ecx
popl%edx
@@ -205,7 +205,7 @@ fail:
movlBP_code32_start(%esi), %eax
lealstartup_32(%eax), %eax
jmp *%eax
-ENDPROC(efi32_stub_entry)
+SYM_FUNC_END(efi32_stub_entry)
 #endif
 
.text
diff --git a/arch/x86/crypto/salsa20-i586-asm_32.S 
b/arch/x86/crypto/salsa20-i586-asm_32.S
index 329452b8f794..e9a6703056fc 100644
--- a/arch/x86/crypto/salsa20-i586-asm_32.S
+++ b/arch/x86/crypto/salsa20-i586-asm_32.S
@@ -7,7 +7,7 @@
 .text
 
 # enter salsa20_encrypt_bytes
-ENTRY(salsa20_encrypt_bytes)
+SYM_FUNC_START(salsa20_encrypt_bytes)
mov %esp,%eax
and $31,%eax
add $256,%eax
@@ -934,10 +934,10 @@ ENTRY(salsa20_encrypt_bytes)
add $64,%esi
# goto bytesatleast1
jmp ._bytesatleast1
-ENDPROC(salsa20_encrypt_bytes)
+SYM_FUNC_END(salsa20_encrypt_bytes)
 
 # enter salsa20_keysetup
-ENTRY(salsa20_keysetup)
+SYM_FUNC_START(salsa20_keysetup)
mov %esp,%eax
and $31,%eax
add $256,%eax
@@ -1060,10 +1060,10 @@ ENTRY(salsa20_keysetup)
# leave
add %eax,%esp
ret
-ENDPROC(s

[PATCH v5 23/27] x86_64: assembly, change all ENTRY+ENDPROC to SYM_FUNC_*

2017-11-30 Thread Jiri Slaby
These are all functions which are invoked from elsewhere, so we annotate
them as global using the new SYM_FUNC_START. And their ENDPROC's by
SYM_FUNC_END.

And make sure ENTRY/ENDPROC is not defined on X86_64, given these were
the last users.

Signed-off-by: Jiri Slaby 
Reviewed-by: Rafael J. Wysocki  [hibernate]
Reviewed-by: Boris Ostrovsky  [xen bits]
Cc: "H. Peter Anvin" 
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: x...@kernel.org
Cc: Herbert Xu 
Cc: "David S. Miller" 
Cc: "Rafael J. Wysocki" 
Cc: Len Brown 
Cc: Pavel Machek 
Cc: Matt Fleming 
Cc: Ard Biesheuvel 
Cc: Boris Ostrovsky 
Cc: Juergen Gross 
Cc: linux-crypto@vger.kernel.org
Cc: linux...@vger.kernel.org
Cc: linux-...@vger.kernel.org
Cc: xen-de...@lists.xenproject.org
---
 arch/x86/boot/compressed/efi_thunk_64.S|  4 +-
 arch/x86/boot/compressed/head_64.S | 16 
 arch/x86/boot/compressed/mem_encrypt.S |  8 ++--
 arch/x86/crypto/aes-i586-asm_32.S  |  8 ++--
 arch/x86/crypto/aes-x86_64-asm_64.S|  4 +-
 arch/x86/crypto/aes_ctrby8_avx-x86_64.S| 12 +++---
 arch/x86/crypto/aesni-intel_asm.S  | 44 +++---
 arch/x86/crypto/aesni-intel_avx-x86_64.S   | 24 ++--
 arch/x86/crypto/blowfish-x86_64-asm_64.S   | 16 
 arch/x86/crypto/camellia-aesni-avx-asm_64.S| 24 ++--
 arch/x86/crypto/camellia-aesni-avx2-asm_64.S   | 24 ++--
 arch/x86/crypto/camellia-x86_64-asm_64.S   | 16 
 arch/x86/crypto/cast5-avx-x86_64-asm_64.S  | 16 
 arch/x86/crypto/cast6-avx-x86_64-asm_64.S  | 24 ++--
 arch/x86/crypto/chacha20-avx2-x86_64.S |  4 +-
 arch/x86/crypto/chacha20-ssse3-x86_64.S|  8 ++--
 arch/x86/crypto/crc32-pclmul_asm.S |  4 +-
 arch/x86/crypto/crc32c-pcl-intel-asm_64.S  |  4 +-
 arch/x86/crypto/crct10dif-pcl-asm_64.S |  4 +-
 arch/x86/crypto/des3_ede-asm_64.S  |  8 ++--
 arch/x86/crypto/ghash-clmulni-intel_asm.S  |  8 ++--
 arch/x86/crypto/poly1305-avx2-x86_64.S |  4 +-
 arch/x86/crypto/poly1305-sse2-x86_64.S |  8 ++--
 arch/x86/crypto/salsa20-x86_64-asm_64.S| 12 +++---
 arch/x86/crypto/serpent-avx-x86_64-asm_64.S| 24 ++--
 arch/x86/crypto/serpent-avx2-asm_64.S  | 24 ++--
 arch/x86/crypto/serpent-sse2-x86_64-asm_64.S   |  8 ++--
 arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S   |  8 ++--
 arch/x86/crypto/sha1-mb/sha1_mb_mgr_submit_avx2.S  |  4 +-
 arch/x86/crypto/sha1-mb/sha1_x8_avx2.S |  4 +-
 arch/x86/crypto/sha1_avx2_x86_64_asm.S |  4 +-
 arch/x86/crypto/sha1_ni_asm.S  |  4 +-
 arch/x86/crypto/sha1_ssse3_asm.S   |  4 +-
 arch/x86/crypto/sha256-avx-asm.S   |  4 +-
 arch/x86/crypto/sha256-avx2-asm.S  |  4 +-
 .../crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S|  8 ++--
 .../crypto/sha256-mb/sha256_mb_mgr_submit_avx2.S   |  4 +-
 arch/x86/crypto/sha256-mb/sha256_x8_avx2.S |  4 +-
 arch/x86/crypto/sha256-ssse3-asm.S |  4 +-
 arch/x86/crypto/sha256_ni_asm.S|  4 +-
 arch/x86/crypto/sha512-avx-asm.S   |  4 +-
 arch/x86/crypto/sha512-avx2-asm.S  |  4 +-
 .../crypto/sha512-mb/sha512_mb_mgr_flush_avx2.S|  8 ++--
 .../crypto/sha512-mb/sha512_mb_mgr_submit_avx2.S   |  4 +-
 arch/x86/crypto/sha512-mb/sha512_x4_avx2.S |  4 +-
 arch/x86/crypto/sha512-ssse3-asm.S |  4 +-
 arch/x86/crypto/twofish-avx-x86_64-asm_64.S| 24 ++--
 arch/x86/crypto/twofish-x86_64-asm_64-3way.S   |  8 ++--
 arch/x86/crypto/twofish-x86_64-asm_64.S|  8 ++--
 arch/x86/entry/entry_64.S  | 10 ++---
 arch/x86/entry/entry_64_compat.S   |  8 ++--
 arch/x86/kernel/acpi/wakeup_64.S   |  8 ++--
 arch/x86/kernel/head_64.S  | 12 +++---
 arch/x86/lib/checksum_32.S |  8 ++--
 arch/x86/lib/clear_page_64.S   | 12 +++---
 arch/x86/lib/cmpxchg16b_emu.S  |  4 +-
 arch/x86/lib/cmpxchg8b_emu.S   |  4 +-
 arch/x86/lib/copy_page_64.S|  4 +-
 arch/x86/lib/copy_user_64.S| 16 
 arch/x86/lib/csum-copy_64.S|  4 +-
 arch/x86/lib/getuser.S | 16 
 arch/x86/lib/hweight.S |  8 ++--
 arch/x86/lib/iomap_copy_64.S   |  4 +-
 arch/x86/lib/memcpy_64.S   |  4 +-
 arch/x86/lib/memmove_64.S  |  4 +-
 arch/x86/lib/memset_64.S   |  4 +-
 arch/x86/lib/msr-reg.S |  8 ++--
 arch/x86/lib/putuser

[PATCH v5 08/27] x86: assembly, annotate aliases

2017-11-30 Thread Jiri Slaby
_key_expansion_128 is an alias to _key_expansion_256a, __memcpy to
memcpy, xen_syscall32_target to xen_sysenter_target, and so on. Annotate
them all using the new SYM_FUNC_START_ALIAS, SYM_FUNC_START_LOCAL_ALIAS,
and SYM_FUNC_END_ALIAS. This will make the tools generating the
debuginfo happy.

Signed-off-by: Jiri Slaby 
Cc: Herbert Xu 
Cc: "David S. Miller" 
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: "H. Peter Anvin" 
Cc: 
Cc: Boris Ostrovsky 
Cc: Juergen Gross 
Reviewed-by: Juergen Gross  [xen parts]
Cc: 
Cc: 
---
 arch/x86/crypto/aesni-intel_asm.S | 5 ++---
 arch/x86/lib/memcpy_64.S  | 4 ++--
 arch/x86/lib/memmove_64.S | 4 ++--
 arch/x86/lib/memset_64.S  | 4 ++--
 arch/x86/xen/xen-asm_64.S | 4 ++--
 5 files changed, 10 insertions(+), 11 deletions(-)

diff --git a/arch/x86/crypto/aesni-intel_asm.S 
b/arch/x86/crypto/aesni-intel_asm.S
index 1d34d5a14682..426a22192d69 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -1873,8 +1873,7 @@ ENDPROC(aesni_gcm_enc)
 #endif
 
 
-.align 4
-_key_expansion_128:
+SYM_FUNC_START_LOCAL_ALIAS(_key_expansion_128)
 SYM_FUNC_START_LOCAL(_key_expansion_256a)
pshufd $0b, %xmm1, %xmm1
shufps $0b0001, %xmm0, %xmm4
@@ -1885,8 +1884,8 @@ SYM_FUNC_START_LOCAL(_key_expansion_256a)
movaps %xmm0, (TKEYP)
add $0x10, TKEYP
ret
-ENDPROC(_key_expansion_128)
 SYM_FUNC_END(_key_expansion_256a)
+SYM_FUNC_END_ALIAS(_key_expansion_128)
 
 SYM_FUNC_START_LOCAL(_key_expansion_192a)
pshufd $0b01010101, %xmm1, %xmm1
diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
index 9a53a06e5a3e..4911b1c61aa8 100644
--- a/arch/x86/lib/memcpy_64.S
+++ b/arch/x86/lib/memcpy_64.S
@@ -26,7 +26,7 @@
  * Output:
  * rax original destination
  */
-ENTRY(__memcpy)
+SYM_FUNC_START_ALIAS(__memcpy)
 ENTRY(memcpy)
ALTERNATIVE_2 "jmp memcpy_orig", "", X86_FEATURE_REP_GOOD, \
  "jmp memcpy_erms", X86_FEATURE_ERMS
@@ -40,7 +40,7 @@ ENTRY(memcpy)
rep movsb
ret
 ENDPROC(memcpy)
-ENDPROC(__memcpy)
+SYM_FUNC_END_ALIAS(__memcpy)
 EXPORT_SYMBOL(memcpy)
 EXPORT_SYMBOL(__memcpy)
 
diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
index bbec69d8223b..50c1648311b3 100644
--- a/arch/x86/lib/memmove_64.S
+++ b/arch/x86/lib/memmove_64.S
@@ -26,7 +26,7 @@
  */
 .weak memmove
 
-ENTRY(memmove)
+SYM_FUNC_START_ALIAS(memmove)
 ENTRY(__memmove)
 
/* Handle more 32 bytes in loop */
@@ -208,6 +208,6 @@ ENTRY(__memmove)
 13:
retq
 ENDPROC(__memmove)
-ENDPROC(memmove)
+SYM_FUNC_END_ALIAS(memmove)
 EXPORT_SYMBOL(__memmove)
 EXPORT_SYMBOL(memmove)
diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
index 9bc861c71e75..927ac44d34aa 100644
--- a/arch/x86/lib/memset_64.S
+++ b/arch/x86/lib/memset_64.S
@@ -19,7 +19,7 @@
  *
  * rax   original destination
  */
-ENTRY(memset)
+SYM_FUNC_START_ALIAS(memset)
 ENTRY(__memset)
/*
 * Some CPUs support enhanced REP MOVSB/STOSB feature. It is recommended
@@ -43,8 +43,8 @@ ENTRY(__memset)
rep stosb
movq %r9,%rax
ret
-ENDPROC(memset)
 ENDPROC(__memset)
+SYM_FUNC_END_ALIAS(memset)
 EXPORT_SYMBOL(memset)
 EXPORT_SYMBOL(__memset)
 
diff --git a/arch/x86/xen/xen-asm_64.S b/arch/x86/xen/xen-asm_64.S
index 8a10c9a9e2b5..0820db159053 100644
--- a/arch/x86/xen/xen-asm_64.S
+++ b/arch/x86/xen/xen-asm_64.S
@@ -150,13 +150,13 @@ ENDPROC(xen_sysenter_target)
 
 #else /* !CONFIG_IA32_EMULATION */
 
-ENTRY(xen_syscall32_target)
+SYM_FUNC_START_ALIAS(xen_syscall32_target)
 ENTRY(xen_sysenter_target)
lea 16(%rsp), %rsp  /* strip %rcx, %r11 */
mov $-ENOSYS, %rax
pushq $0
jmp hypercall_iret
-ENDPROC(xen_syscall32_target)
 ENDPROC(xen_sysenter_target)
+SYM_FUNC_END_ALIAS(xen_syscall32_target)
 
 #endif /* CONFIG_IA32_EMULATION */
-- 
2.15.0



[PATCH v5 06/27] x86: crypto, annotate local functions

2017-11-30 Thread Jiri Slaby
Use the newly added SYM_FUNC_START_LOCAL to annotate starts of all
functions which do not have ".globl" annotation, but their ends are
annotated by ENDPROC. This is needed to balance ENDPROC for tools that
are about to generate debuginfo.

To be symmetric, we also convert their ENDPROCs to the new SYM_FUNC_END.

Signed-off-by: Jiri Slaby 
Cc: Herbert Xu 
Cc: "David S. Miller" 
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: "H. Peter Anvin" 
Cc: 
Cc: 
---
 arch/x86/crypto/aesni-intel_asm.S| 49 
 arch/x86/crypto/camellia-aesni-avx-asm_64.S  | 20 ++--
 arch/x86/crypto/camellia-aesni-avx2-asm_64.S | 20 ++--
 arch/x86/crypto/cast5-avx-x86_64-asm_64.S|  8 ++---
 arch/x86/crypto/cast6-avx-x86_64-asm_64.S|  8 ++---
 arch/x86/crypto/ghash-clmulni-intel_asm.S|  4 +--
 arch/x86/crypto/serpent-avx-x86_64-asm_64.S  |  8 ++---
 arch/x86/crypto/serpent-avx2-asm_64.S|  8 ++---
 arch/x86/crypto/twofish-avx-x86_64-asm_64.S  |  8 ++---
 9 files changed, 62 insertions(+), 71 deletions(-)

diff --git a/arch/x86/crypto/aesni-intel_asm.S 
b/arch/x86/crypto/aesni-intel_asm.S
index 16627fec80b2..1d34d5a14682 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -1875,7 +1875,7 @@ ENDPROC(aesni_gcm_enc)
 
 .align 4
 _key_expansion_128:
-_key_expansion_256a:
+SYM_FUNC_START_LOCAL(_key_expansion_256a)
pshufd $0b, %xmm1, %xmm1
shufps $0b0001, %xmm0, %xmm4
pxor %xmm4, %xmm0
@@ -1886,10 +1886,9 @@ _key_expansion_256a:
add $0x10, TKEYP
ret
 ENDPROC(_key_expansion_128)
-ENDPROC(_key_expansion_256a)
+SYM_FUNC_END(_key_expansion_256a)
 
-.align 4
-_key_expansion_192a:
+SYM_FUNC_START_LOCAL(_key_expansion_192a)
pshufd $0b01010101, %xmm1, %xmm1
shufps $0b0001, %xmm0, %xmm4
pxor %xmm4, %xmm0
@@ -1911,10 +1910,9 @@ _key_expansion_192a:
movaps %xmm1, 0x10(TKEYP)
add $0x20, TKEYP
ret
-ENDPROC(_key_expansion_192a)
+SYM_FUNC_END(_key_expansion_192a)
 
-.align 4
-_key_expansion_192b:
+SYM_FUNC_START_LOCAL(_key_expansion_192b)
pshufd $0b01010101, %xmm1, %xmm1
shufps $0b0001, %xmm0, %xmm4
pxor %xmm4, %xmm0
@@ -1931,10 +1929,9 @@ _key_expansion_192b:
movaps %xmm0, (TKEYP)
add $0x10, TKEYP
ret
-ENDPROC(_key_expansion_192b)
+SYM_FUNC_END(_key_expansion_192b)
 
-.align 4
-_key_expansion_256b:
+SYM_FUNC_START_LOCAL(_key_expansion_256b)
pshufd $0b10101010, %xmm1, %xmm1
shufps $0b0001, %xmm2, %xmm4
pxor %xmm4, %xmm2
@@ -1944,7 +1941,7 @@ _key_expansion_256b:
movaps %xmm2, (TKEYP)
add $0x10, TKEYP
ret
-ENDPROC(_key_expansion_256b)
+SYM_FUNC_END(_key_expansion_256b)
 
 /*
  * int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
@@ -2097,8 +2094,7 @@ ENDPROC(aesni_enc)
  * KEY
  * TKEYP (T1)
  */
-.align 4
-_aesni_enc1:
+SYM_FUNC_START_LOCAL(_aesni_enc1)
movaps (KEYP), KEY  # key
mov KEYP, TKEYP
pxor KEY, STATE # round 0
@@ -2141,7 +2137,7 @@ _aesni_enc1:
movaps 0x70(TKEYP), KEY
AESENCLAST KEY STATE
ret
-ENDPROC(_aesni_enc1)
+SYM_FUNC_END(_aesni_enc1)
 
 /*
  * _aesni_enc4:internal ABI
@@ -2161,8 +2157,7 @@ ENDPROC(_aesni_enc1)
  * KEY
  * TKEYP (T1)
  */
-.align 4
-_aesni_enc4:
+SYM_FUNC_START_LOCAL(_aesni_enc4)
movaps (KEYP), KEY  # key
mov KEYP, TKEYP
pxor KEY, STATE1# round 0
@@ -2250,7 +2245,7 @@ _aesni_enc4:
AESENCLAST KEY STATE3
AESENCLAST KEY STATE4
ret
-ENDPROC(_aesni_enc4)
+SYM_FUNC_END(_aesni_enc4)
 
 /*
  * void aesni_dec (struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
@@ -2289,8 +2284,7 @@ ENDPROC(aesni_dec)
  * KEY
  * TKEYP (T1)
  */
-.align 4
-_aesni_dec1:
+SYM_FUNC_START_LOCAL(_aesni_dec1)
movaps (KEYP), KEY  # key
mov KEYP, TKEYP
pxor KEY, STATE # round 0
@@ -2333,7 +2327,7 @@ _aesni_dec1:
movaps 0x70(TKEYP), KEY
AESDECLAST KEY STATE
ret
-ENDPROC(_aesni_dec1)
+SYM_FUNC_END(_aesni_dec1)
 
 /*
  * _aesni_dec4:internal ABI
@@ -2353,8 +2347,7 @@ ENDPROC(_aesni_dec1)
  * KEY
  * TKEYP (T1)
  */
-.align 4
-_aesni_dec4:
+SYM_FUNC_START_LOCAL(_aesni_dec4)
movaps (KEYP), KEY  # key
mov KEYP, TKEYP
pxor KEY, STATE1# round 0
@@ -2442,7 +2435,7 @@ _aesni_dec4:
AESDECLAST KEY STATE3
AESDECLAST KEY STATE4
ret
-ENDPROC(_aesni_dec4)
+SYM_FUNC_END(_aesni_dec4)
 
 /*
  * void aesni_ecb_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
@@ -2720,8 +2713,7 @@ ENDPROC(aesni_cbc_dec)
  * INC:== 1, in little endian
  * BSWAP_MASK == endian swapping mask
  */
-.align 4
-_aesni_inc_init:
+SYM_FUNC_START_LOCAL(_aesni_inc_init)
movaps .Lbswap_mask, BSW

Re: Broken userspace crypto in linux-4.1.18

2016-02-18 Thread Jiri Slaby
Hi,

On 02/18/2016, 09:17 AM, Stephan Mueller wrote:
> Am Donnerstag, 18. Februar 2016, 00:49:57 schrieb Thomas D.:
>> Willy Tarreau wrote:
 Is there a dependency I missed in 4.1? I don't really see anything that
 could have gone wrong there.
>>>
>>> Or maybe Thomas can run a bisect ?
>>
>> I cannot follow. I did a bisect between 4.1.7 and 4.1.8 as I have written
>>
>> in my first mail. The bad commit was:
> 
> That breakage was expected and forcasted by Milan Broz a couple of days ago 
> on 
> this mailing list.

Could you be more specific? What is "this mailing list"? stable or
crypto? I cannot see anything from Milan on this mailing list aka stable.

> The referenced patch covered a bug that must have been 
> fixed but introduced a regression for backwards compatibility. Since then, 
> this regression was fixed.

By what? Please use SHAs, links, whatever.

thanks,
-- 
js
suse labs
--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH] Re: Broken userspace crypto in linux-4.1.18

2016-02-24 Thread Jiri Slaby
On 02/21/2016, 05:40 PM, Milan Broz wrote:
> On 02/20/2016 03:33 PM, Thomas D. wrote:
>> Hi,
>>
>> FYI: v3.10.97, v3.14.61 and 3.18.27 are also affected.
>>
>> v4.3.6 works. Looks like the patch set is only compatible with >=linux-4.3.
>>
>> v3.12.54 works because it doesn't contain the patch in question.
> 
> Hi,
> 
> indeed, because whoever backported this patchset skipped two patches
> from series (because of skcipher interface file was introduced later).
> 
> I tried to backport it (on 4.1.18 tree, see patch below) and it fixes the 
> problem
> for me.
> 
> Anyway, it is just quick test what is the problem, patch need proper review or
> backport from someone who knows the code better.
> 
> I will release stable cryptsetup soon that will work with new kernel even 
> without
> this patch but I would strongly recommend that stable kernels get these 
> compatibility
> backports as well to avoid breaking existing userspace.
> 
> Thanks,
> Milan
> -
> 
> This patch backports missing parts of crypto API compatibility changes:
> 
>   dd504589577d8e8e70f51f997ad487a4cb6c026f
>   crypto: algif_skcipher - Require setkey before accept(2)
> 
>   a0fa2d037129a9849918a92d91b79ed6c7bd2818
>   crypto: algif_skcipher - Add nokey compatibility path
> 
> --- crypto/algif_skcipher.c.orig  2016-02-21 16:44:27.172312990 +0100
> +++ crypto/algif_skcipher.c   2016-02-21 17:03:58.555785347 +0100
...
> @@ -790,24 +912,50 @@
>   af_alg_release_parent(sk);

This,

>  }
>  
> -static int skcipher_accept_parent(void *private, struct sock *sk)
> +static void skcipher_sock_destruct(struct sock *sk)
> +{
> + skcipher_sock_destruct_common(sk);
> + af_alg_release_parent(sk);

this,

> +}
> +
> +static void skcipher_release_parent_nokey(struct sock *sk)
> +{
> + struct alg_sock *ask = alg_sk(sk);
> +
> + if (!ask->refcnt) {
> + sock_put(ask->parent);
> + return;
> + }
> +
> + af_alg_release_parent(sk);

and this occurs to me like a double release?

thanks,
-- 
js
suse labs
--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH] hw_random: core, sleep interruptible in read

2015-11-26 Thread Jiri Slaby
hwrng kthread can be waiting via hwrng_fillfn for some data from a rng
like virtio-rng:
hwrng   D 880093e17798 0   382  2 0x
...
Call Trace:
 [] wait_for_completion_killable+0x96/0x210
 [] virtio_read+0x57/0xf0 [virtio_rng]
 [] hwrng_fillfn+0x75/0x130
 [] kthread+0xf3/0x110

And when some user program tries to read the /dev node in this state,
we get:
rngdD 880093e17798 0   762  1 0x0004
...
Call Trace:
 [] mutex_lock_nested+0x15c/0x3e0
 [] rng_dev_read+0x6e/0x240
 [] __vfs_read+0x28/0xe0
 [] vfs_read+0x83/0x130

And this is indeed unkillable. So use mutex_lock_interruptible
instead of mutex_lock in rng_dev_read and exit immediatelly when
interrupted. And possibly return already read data, if any (as POSIX
allows).

Signed-off-by: Jiri Slaby 
Cc: Matt Mackall 
Cc: Herbert Xu 
Cc: 
---
 drivers/char/hw_random/core.c | 6 +-
 1 file changed, 5 insertions(+), 1 deletion(-)

diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index a064237ff362..f003df162e09 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -238,7 +238,10 @@ static ssize_t rng_dev_read(struct file *filp, char __user 
*buf,
goto out;
}
 
-   mutex_lock(&reading_mutex);
+   if (mutex_lock_interruptible(&reading_mutex)) {
+   err = -EINTR;
+   goto out_put;
+   }
if (!data_avail) {
bytes_read = rng_get_data(rng, rng_buffer,
rng_buffer_size(),
@@ -288,6 +291,7 @@ out:
 
 out_unlock_reading:
mutex_unlock(&reading_mutex);
+out_put:
put_rng(rng);
goto out;
 }
-- 
2.6.3

--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH] hw_random: core, sleep interruptible in read

2015-11-27 Thread Jiri Slaby
On 11/27/2015, 02:06 PM, Herbert Xu wrote:
> On Thu, Nov 26, 2015 at 08:56:29PM +0100, Jiri Slaby wrote:
>>
>> diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
>> index a064237ff362..f003df162e09 100644
>> --- a/drivers/char/hw_random/core.c
>> +++ b/drivers/char/hw_random/core.c
>> @@ -238,7 +238,10 @@ static ssize_t rng_dev_read(struct file *filp, char 
>> __user *buf,
>>  goto out;
>>  }
>>  
>> -mutex_lock(&reading_mutex);
>> +if (mutex_lock_interruptible(&reading_mutex)) {
>> +err = -EINTR;
> 
> Shouldn't this be ERESTARTSYS?

Yes, it actually can, given the retval is returned only if no data were
read yet.

thanks,
-- 
js
suse labs
--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v2] hw_random: core, sleep interruptible in read

2015-11-27 Thread Jiri Slaby
hwrng kthread can be waiting via hwrng_fillfn for some data from a rng
like virtio-rng:
hwrng   D 880093e17798 0   382  2 0x
...
Call Trace:
 [] wait_for_completion_killable+0x96/0x210
 [] virtio_read+0x57/0xf0 [virtio_rng]
 [] hwrng_fillfn+0x75/0x130
 [] kthread+0xf3/0x110

And when some user program tries to read the /dev node in this state,
we get:
rngdD 880093e17798 0   762  1 0x0004
...
Call Trace:
 [] mutex_lock_nested+0x15c/0x3e0
 [] rng_dev_read+0x6e/0x240
 [] __vfs_read+0x28/0xe0
 [] vfs_read+0x83/0x130

And this is indeed unkillable. So use mutex_lock_interruptible
instead of mutex_lock in rng_dev_read and exit immediatelly when
interrupted. And possibly return already read data, if any (as POSIX
allows).

v2: use ERESTARTSYS instead of EINTR

Signed-off-by: Jiri Slaby 
Cc: Matt Mackall 
Cc: Herbert Xu 
Cc: 
---
 drivers/char/hw_random/core.c | 6 +-
 1 file changed, 5 insertions(+), 1 deletion(-)

diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index a064237ff362..ca5a5325eb6f 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -238,7 +238,10 @@ static ssize_t rng_dev_read(struct file *filp, char __user 
*buf,
goto out;
}
 
-   mutex_lock(&reading_mutex);
+   if (mutex_lock_interruptible(&reading_mutex)) {
+   err = -ERESTARTSYS;
+   goto out_put;
+   }
if (!data_avail) {
bytes_read = rng_get_data(rng, rng_buffer,
rng_buffer_size(),
@@ -288,6 +291,7 @@ out:
 
 out_unlock_reading:
mutex_unlock(&reading_mutex);
+out_put:
put_rng(rng);
goto out;
 }
-- 
2.6.3

--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v2 07/10] x86: assembly, annotate aliases

2017-03-20 Thread Jiri Slaby
_key_expansion_128 is an alias to _key_expansion_256a, __memcpy to
memcpy, xen_syscall32_target to xen_sysenter_target, and so on. Annotate
them all using the new SYM_FUNC_START_ALIAS, SYM_FUNC_START_LOCAL_ALIAS,
and SYM_FUNC_END_ALIAS. This will make the tools generating the
debuginfo happy.

Signed-off-by: Jiri Slaby 
Cc: Herbert Xu 
Cc: "David S. Miller" 
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: "H. Peter Anvin" 
Cc: 
Cc: Boris Ostrovsky 
Cc: Juergen Gross 
Reviewed-by: Juergen Gross  [xen parts]
Cc: 
Cc: 
---
 arch/x86/crypto/aesni-intel_asm.S | 5 ++---
 arch/x86/lib/memcpy_64.S  | 4 ++--
 arch/x86/lib/memmove_64.S | 4 ++--
 arch/x86/lib/memset_64.S  | 4 ++--
 arch/x86/xen/xen-asm_64.S | 4 ++--
 5 files changed, 10 insertions(+), 11 deletions(-)

diff --git a/arch/x86/crypto/aesni-intel_asm.S 
b/arch/x86/crypto/aesni-intel_asm.S
index 8913ea70323c..ea247bfed89d 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -1744,8 +1744,7 @@ ENDPROC(aesni_gcm_enc)
 #endif
 
 
-.align 4
-_key_expansion_128:
+SYM_FUNC_START_LOCAL_ALIAS(_key_expansion_128)
 SYM_FUNC_START_LOCAL(_key_expansion_256a)
pshufd $0b, %xmm1, %xmm1
shufps $0b0001, %xmm0, %xmm4
@@ -1756,8 +1755,8 @@ SYM_FUNC_START_LOCAL(_key_expansion_256a)
movaps %xmm0, (TKEYP)
add $0x10, TKEYP
ret
-ENDPROC(_key_expansion_128)
 ENDPROC(_key_expansion_256a)
+SYM_FUNC_END_ALIAS(_key_expansion_128)
 
 SYM_FUNC_START_LOCAL(_key_expansion_192a)
pshufd $0b01010101, %xmm1, %xmm1
diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
index 779782f58324..2d6518b4dbd6 100644
--- a/arch/x86/lib/memcpy_64.S
+++ b/arch/x86/lib/memcpy_64.S
@@ -26,7 +26,7 @@
  * Output:
  * rax original destination
  */
-ENTRY(__memcpy)
+SYM_FUNC_START_ALIAS(__memcpy)
 ENTRY(memcpy)
ALTERNATIVE_2 "jmp memcpy_orig", "", X86_FEATURE_REP_GOOD, \
  "jmp memcpy_erms", X86_FEATURE_ERMS
@@ -40,7 +40,7 @@ ENTRY(memcpy)
rep movsb
ret
 ENDPROC(memcpy)
-ENDPROC(__memcpy)
+SYM_FUNC_END_ALIAS(__memcpy)
 EXPORT_SYMBOL(memcpy)
 EXPORT_SYMBOL(__memcpy)
 
diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
index 15de86cd15b0..d22af97e5b27 100644
--- a/arch/x86/lib/memmove_64.S
+++ b/arch/x86/lib/memmove_64.S
@@ -25,7 +25,7 @@
  */
 .weak memmove
 
-ENTRY(memmove)
+SYM_FUNC_START_ALIAS(memmove)
 ENTRY(__memmove)
 
/* Handle more 32 bytes in loop */
@@ -207,6 +207,6 @@ ENTRY(__memmove)
 13:
retq
 ENDPROC(__memmove)
-ENDPROC(memmove)
+SYM_FUNC_END_ALIAS(memmove)
 EXPORT_SYMBOL(__memmove)
 EXPORT_SYMBOL(memmove)
diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
index 55b95db30a61..0d3a1d341e60 100644
--- a/arch/x86/lib/memset_64.S
+++ b/arch/x86/lib/memset_64.S
@@ -18,7 +18,7 @@
  *
  * rax   original destination
  */
-ENTRY(memset)
+SYM_FUNC_START_ALIAS(memset)
 ENTRY(__memset)
/*
 * Some CPUs support enhanced REP MOVSB/STOSB feature. It is recommended
@@ -42,8 +42,8 @@ ENTRY(__memset)
rep stosb
movq %r9,%rax
ret
-ENDPROC(memset)
 ENDPROC(__memset)
+SYM_FUNC_END_ALIAS(memset)
 EXPORT_SYMBOL(memset)
 EXPORT_SYMBOL(__memset)
 
diff --git a/arch/x86/xen/xen-asm_64.S b/arch/x86/xen/xen-asm_64.S
index 2a968c087269..b3bf662a4f6a 100644
--- a/arch/x86/xen/xen-asm_64.S
+++ b/arch/x86/xen/xen-asm_64.S
@@ -117,13 +117,13 @@ ENDPROC(xen_sysenter_target)
 
 #else /* !CONFIG_IA32_EMULATION */
 
-ENTRY(xen_syscall32_target)
+SYM_FUNC_START_ALIAS(xen_syscall32_target)
 ENTRY(xen_sysenter_target)
lea 16(%rsp), %rsp  /* strip %rcx, %r11 */
mov $-ENOSYS, %rax
pushq $0
jmp hypercall_iret
-ENDPROC(xen_syscall32_target)
 ENDPROC(xen_sysenter_target)
+SYM_FUNC_END_ALIAS(xen_syscall32_target)
 
 #endif /* CONFIG_IA32_EMULATION */
-- 
2.12.0



[PATCH v2 06/10] x86: crypto, annotate local functions

2017-03-20 Thread Jiri Slaby
Use the newly added SYM_FUNC_START_LOCAL to annotate starts of all
functions which do not have ".globl" annotation, but their ends are
annotated by ENDPROC. This is needed to balance ENDPROC for tools that
are about to generate debuginfo.

Signed-off-by: Jiri Slaby 
Cc: Herbert Xu 
Cc: "David S. Miller" 
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: "H. Peter Anvin" 
Cc: 
Cc: 
---
 arch/x86/crypto/aesni-intel_asm.S| 29 ++--
 arch/x86/crypto/camellia-aesni-avx-asm_64.S  | 10 +-
 arch/x86/crypto/camellia-aesni-avx2-asm_64.S | 10 +-
 arch/x86/crypto/cast5-avx-x86_64-asm_64.S|  4 ++--
 arch/x86/crypto/cast6-avx-x86_64-asm_64.S|  4 ++--
 arch/x86/crypto/ghash-clmulni-intel_asm.S|  2 +-
 arch/x86/crypto/serpent-avx-x86_64-asm_64.S  |  4 ++--
 arch/x86/crypto/serpent-avx2-asm_64.S|  4 ++--
 arch/x86/crypto/twofish-avx-x86_64-asm_64.S  |  4 ++--
 9 files changed, 31 insertions(+), 40 deletions(-)

diff --git a/arch/x86/crypto/aesni-intel_asm.S 
b/arch/x86/crypto/aesni-intel_asm.S
index 3c465184ff8a..8913ea70323c 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -1746,7 +1746,7 @@ ENDPROC(aesni_gcm_enc)
 
 .align 4
 _key_expansion_128:
-_key_expansion_256a:
+SYM_FUNC_START_LOCAL(_key_expansion_256a)
pshufd $0b, %xmm1, %xmm1
shufps $0b0001, %xmm0, %xmm4
pxor %xmm4, %xmm0
@@ -1759,8 +1759,7 @@ _key_expansion_256a:
 ENDPROC(_key_expansion_128)
 ENDPROC(_key_expansion_256a)
 
-.align 4
-_key_expansion_192a:
+SYM_FUNC_START_LOCAL(_key_expansion_192a)
pshufd $0b01010101, %xmm1, %xmm1
shufps $0b0001, %xmm0, %xmm4
pxor %xmm4, %xmm0
@@ -1784,8 +1783,7 @@ _key_expansion_192a:
ret
 ENDPROC(_key_expansion_192a)
 
-.align 4
-_key_expansion_192b:
+SYM_FUNC_START_LOCAL(_key_expansion_192b)
pshufd $0b01010101, %xmm1, %xmm1
shufps $0b0001, %xmm0, %xmm4
pxor %xmm4, %xmm0
@@ -1804,8 +1802,7 @@ _key_expansion_192b:
ret
 ENDPROC(_key_expansion_192b)
 
-.align 4
-_key_expansion_256b:
+SYM_FUNC_START_LOCAL(_key_expansion_256b)
pshufd $0b10101010, %xmm1, %xmm1
shufps $0b0001, %xmm2, %xmm4
pxor %xmm4, %xmm2
@@ -1968,8 +1965,7 @@ ENDPROC(aesni_enc)
  * KEY
  * TKEYP (T1)
  */
-.align 4
-_aesni_enc1:
+SYM_FUNC_START_LOCAL(_aesni_enc1)
movaps (KEYP), KEY  # key
mov KEYP, TKEYP
pxor KEY, STATE # round 0
@@ -2032,8 +2028,7 @@ ENDPROC(_aesni_enc1)
  * KEY
  * TKEYP (T1)
  */
-.align 4
-_aesni_enc4:
+SYM_FUNC_START_LOCAL(_aesni_enc4)
movaps (KEYP), KEY  # key
mov KEYP, TKEYP
pxor KEY, STATE1# round 0
@@ -2160,8 +2155,7 @@ ENDPROC(aesni_dec)
  * KEY
  * TKEYP (T1)
  */
-.align 4
-_aesni_dec1:
+SYM_FUNC_START_LOCAL(_aesni_dec1)
movaps (KEYP), KEY  # key
mov KEYP, TKEYP
pxor KEY, STATE # round 0
@@ -2224,8 +2218,7 @@ ENDPROC(_aesni_dec1)
  * KEY
  * TKEYP (T1)
  */
-.align 4
-_aesni_dec4:
+SYM_FUNC_START_LOCAL(_aesni_dec4)
movaps (KEYP), KEY  # key
mov KEYP, TKEYP
pxor KEY, STATE1# round 0
@@ -2591,8 +2584,7 @@ ENDPROC(aesni_cbc_dec)
  * INC:== 1, in little endian
  * BSWAP_MASK == endian swapping mask
  */
-.align 4
-_aesni_inc_init:
+SYM_FUNC_START_LOCAL(_aesni_inc_init)
movaps .Lbswap_mask, BSWAP_MASK
movaps IV, CTR
PSHUFB_XMM BSWAP_MASK CTR
@@ -2617,8 +2609,7 @@ ENDPROC(_aesni_inc_init)
  * CTR:== output IV, in little endian
  * TCTR_LOW: == lower qword of CTR
  */
-.align 4
-_aesni_inc:
+SYM_FUNC_START_LOCAL(_aesni_inc)
paddq INC, CTR
add $1, TCTR_LOW
jnc .Linc_low
diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S 
b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
index f7c495e2863c..5964d98c4448 100644
--- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S
+++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
@@ -188,7 +188,7 @@
  * larger and would only be 0.5% faster (on sandy-bridge).
  */
 .align 8
-roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
+SYM_FUNC_START_LOCAL(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
roundsm16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
  %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15,
  %rcx, (%r9));
@@ -196,7 +196,7 @@ 
roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
 ENDPROC(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
 
 .align 8
-roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
+SYM_FUNC_START_LOCAL(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
roundsm16(%xmm4, %xmm5, %xmm6, %xmm7, %xmm0, %xmm1, %xmm2, %xmm3,
  %xmm12, %xmm13, %xmm14, %xmm15, %xmm8, %xmm9, %xmm10, %xmm11

[PATCH v3 27/29] x86_32: assembly, change all ENTRY to SYM_FUNC_START

2017-04-21 Thread Jiri Slaby
These are all functions which are invoked from elsewhere, so we annotate
them as global using the new SYM_FUNC_START (and their ENDPROC's by
SYM_FUNC_END.)

Signed-off-by: Jiri Slaby 
Cc: "H. Peter Anvin" 
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: x...@kernel.org
Cc: Herbert Xu 
Cc: "David S. Miller" 
Cc: "Rafael J. Wysocki" 
Cc: Len Brown 
Cc: Pavel Machek 
Cc: Rusty Russell 
Cc: Bill Metzenthen 
Cc: Matt Fleming 
Cc: Ard Biesheuvel 
Cc: Boris Ostrovsky 
Cc: Juergen Gross 
Cc: linux-crypto@vger.kernel.org
Cc: linux...@vger.kernel.org
Cc: lgu...@lists.ozlabs.org
Cc: linux-...@vger.kernel.org
Cc: xen-de...@lists.xenproject.org
---
 arch/x86/boot/compressed/efi_stub_32.S |   4 +-
 arch/x86/boot/compressed/head_32.S |  12 +--
 arch/x86/crypto/salsa20-i586-asm_32.S  |  12 +--
 arch/x86/crypto/serpent-sse2-i586-asm_32.S |   8 +-
 arch/x86/crypto/twofish-i586-asm_32.S  |   8 +-
 arch/x86/entry/entry_32.S  | 132 ++---
 arch/x86/kernel/acpi/wakeup_32.S   |   8 +-
 arch/x86/kernel/ftrace_32.S|  20 ++---
 arch/x86/kernel/head_32.S  |  16 ++--
 arch/x86/lguest/head_32.S  |  16 ++--
 arch/x86/lib/atomic64_386_32.S |   4 +-
 arch/x86/lib/atomic64_cx8_32.S |  32 +++
 arch/x86/lib/checksum_32.S |   8 +-
 arch/x86/math-emu/div_Xsig.S   |   4 +-
 arch/x86/math-emu/div_small.S  |   4 +-
 arch/x86/math-emu/mul_Xsig.S   |  12 +--
 arch/x86/math-emu/polynom_Xsig.S   |   4 +-
 arch/x86/math-emu/reg_norm.S   |   8 +-
 arch/x86/math-emu/reg_round.S  |   4 +-
 arch/x86/math-emu/reg_u_add.S  |   4 +-
 arch/x86/math-emu/reg_u_div.S  |   4 +-
 arch/x86/math-emu/reg_u_mul.S  |   4 +-
 arch/x86/math-emu/reg_u_sub.S  |   4 +-
 arch/x86/math-emu/round_Xsig.S |   8 +-
 arch/x86/math-emu/shr_Xsig.S   |   4 +-
 arch/x86/math-emu/wm_shrx.S|   8 +-
 arch/x86/math-emu/wm_sqrt.S|   4 +-
 arch/x86/platform/efi/efi_stub_32.S|   4 +-
 arch/x86/power/hibernate_asm_32.S  |   8 +-
 arch/x86/realmode/rm/trampoline_32.S   |   8 +-
 arch/x86/xen/xen-asm_32.S  |   8 +-
 drivers/lguest/x86/switcher_32.S   |   4 +-
 32 files changed, 194 insertions(+), 194 deletions(-)

diff --git a/arch/x86/boot/compressed/efi_stub_32.S 
b/arch/x86/boot/compressed/efi_stub_32.S
index a53440e81d52..4ceff75b0d2a 100644
--- a/arch/x86/boot/compressed/efi_stub_32.S
+++ b/arch/x86/boot/compressed/efi_stub_32.S
@@ -23,7 +23,7 @@
  */
 
 .text
-ENTRY(efi_call_phys)
+SYM_FUNC_START(efi_call_phys)
/*
 * 0. The function can only be called in Linux kernel. So CS has been
 * set to 0x0010, DS and SS have been set to 0x0018. In EFI, I found
@@ -76,7 +76,7 @@ ENTRY(efi_call_phys)
movlsaved_return_addr(%edx), %ecx
pushl   %ecx
ret
-ENDPROC(efi_call_phys)
+SYM_FUNC_END(efi_call_phys)
 .previous
 
 .data
diff --git a/arch/x86/boot/compressed/head_32.S 
b/arch/x86/boot/compressed/head_32.S
index d832ddb78ea2..86484c3788f8 100644
--- a/arch/x86/boot/compressed/head_32.S
+++ b/arch/x86/boot/compressed/head_32.S
@@ -60,7 +60,7 @@
.hidden _egot
 
__HEAD
-ENTRY(startup_32)
+SYM_FUNC_START(startup_32)
cld
/*
 * Test KEEP_SEGMENTS flag to see if the bootloader is asking
@@ -141,14 +141,14 @@ ENTRY(startup_32)
  */
lealrelocated(%ebx), %eax
jmp *%eax
-ENDPROC(startup_32)
+SYM_FUNC_END(startup_32)
 
 #ifdef CONFIG_EFI_STUB
 /*
  * We don't need the return address, so set up the stack so efi_main() can find
  * its arguments.
  */
-ENTRY(efi_pe_entry)
+SYM_FUNC_START(efi_pe_entry)
add $0x4, %esp
 
call1f
@@ -173,9 +173,9 @@ ENTRY(efi_pe_entry)
pushl   %eax
pushl   %ecx
jmp 2f  /* Skip efi_config initialization */
-ENDPROC(efi_pe_entry)
+SYM_FUNC_END(efi_pe_entry)
 
-ENTRY(efi32_stub_entry)
+SYM_FUNC_START(efi32_stub_entry)
add $0x4, %esp
popl%ecx
popl%edx
@@ -204,7 +204,7 @@ fail:
movlBP_code32_start(%esi), %eax
lealstartup_32(%eax), %eax
jmp *%eax
-ENDPROC(efi32_stub_entry)
+SYM_FUNC_END(efi32_stub_entry)
 #endif
 
.text
diff --git a/arch/x86/crypto/salsa20-i586-asm_32.S 
b/arch/x86/crypto/salsa20-i586-asm_32.S
index 329452b8f794..e9a6703056fc 100644
--- a/arch/x86/crypto/salsa20-i586-asm_32.S
+++ b/arch/x86/crypto/salsa20-i586-asm_32.S
@@ -7,7 +7,7 @@
 .text
 
 # enter salsa20_encrypt_bytes
-ENTRY(salsa20_encrypt_bytes)
+SYM_FUNC_START(salsa20_encrypt_bytes)
mov %esp,%eax
and $31,%eax
add $256,%eax
@@ -934,10 +934,10 @@ ENTRY(salsa20_encrypt_bytes)
add $64,%esi
# goto bytes

[PATCH v3 15/29] x86: assembly, annotate aliases

2017-04-21 Thread Jiri Slaby
_key_expansion_128 is an alias to _key_expansion_256a, __memcpy to
memcpy, xen_syscall32_target to xen_sysenter_target, and so on. Annotate
them all using the new SYM_FUNC_START_ALIAS, SYM_FUNC_START_LOCAL_ALIAS,
and SYM_FUNC_END_ALIAS. This will make the tools generating the
debuginfo happy.

Signed-off-by: Jiri Slaby 
Cc: Herbert Xu 
Cc: "David S. Miller" 
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: "H. Peter Anvin" 
Cc: 
Cc: Boris Ostrovsky 
Cc: Juergen Gross 
Reviewed-by: Juergen Gross  [xen parts]
Cc: 
Cc: 
---
 arch/x86/crypto/aesni-intel_asm.S | 5 ++---
 arch/x86/lib/memcpy_64.S  | 4 ++--
 arch/x86/lib/memmove_64.S | 4 ++--
 arch/x86/lib/memset_64.S  | 4 ++--
 arch/x86/xen/xen-asm_64.S | 4 ++--
 5 files changed, 10 insertions(+), 11 deletions(-)

diff --git a/arch/x86/crypto/aesni-intel_asm.S 
b/arch/x86/crypto/aesni-intel_asm.S
index da76ae01e791..3469670df832 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -1744,8 +1744,7 @@ ENDPROC(aesni_gcm_enc)
 #endif
 
 
-.align 4
-_key_expansion_128:
+SYM_FUNC_START_LOCAL_ALIAS(_key_expansion_128)
 SYM_FUNC_START_LOCAL(_key_expansion_256a)
pshufd $0b, %xmm1, %xmm1
shufps $0b0001, %xmm0, %xmm4
@@ -1756,8 +1755,8 @@ SYM_FUNC_START_LOCAL(_key_expansion_256a)
movaps %xmm0, (TKEYP)
add $0x10, TKEYP
ret
-ENDPROC(_key_expansion_128)
 SYM_FUNC_END(_key_expansion_256a)
+SYM_FUNC_END_ALIAS(_key_expansion_128)
 
 SYM_FUNC_START_LOCAL(_key_expansion_192a)
pshufd $0b01010101, %xmm1, %xmm1
diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
index 9a53a06e5a3e..4911b1c61aa8 100644
--- a/arch/x86/lib/memcpy_64.S
+++ b/arch/x86/lib/memcpy_64.S
@@ -26,7 +26,7 @@
  * Output:
  * rax original destination
  */
-ENTRY(__memcpy)
+SYM_FUNC_START_ALIAS(__memcpy)
 ENTRY(memcpy)
ALTERNATIVE_2 "jmp memcpy_orig", "", X86_FEATURE_REP_GOOD, \
  "jmp memcpy_erms", X86_FEATURE_ERMS
@@ -40,7 +40,7 @@ ENTRY(memcpy)
rep movsb
ret
 ENDPROC(memcpy)
-ENDPROC(__memcpy)
+SYM_FUNC_END_ALIAS(__memcpy)
 EXPORT_SYMBOL(memcpy)
 EXPORT_SYMBOL(__memcpy)
 
diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
index 15de86cd15b0..d22af97e5b27 100644
--- a/arch/x86/lib/memmove_64.S
+++ b/arch/x86/lib/memmove_64.S
@@ -25,7 +25,7 @@
  */
 .weak memmove
 
-ENTRY(memmove)
+SYM_FUNC_START_ALIAS(memmove)
 ENTRY(__memmove)
 
/* Handle more 32 bytes in loop */
@@ -207,6 +207,6 @@ ENTRY(__memmove)
 13:
retq
 ENDPROC(__memmove)
-ENDPROC(memmove)
+SYM_FUNC_END_ALIAS(memmove)
 EXPORT_SYMBOL(__memmove)
 EXPORT_SYMBOL(memmove)
diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
index 55b95db30a61..0d3a1d341e60 100644
--- a/arch/x86/lib/memset_64.S
+++ b/arch/x86/lib/memset_64.S
@@ -18,7 +18,7 @@
  *
  * rax   original destination
  */
-ENTRY(memset)
+SYM_FUNC_START_ALIAS(memset)
 ENTRY(__memset)
/*
 * Some CPUs support enhanced REP MOVSB/STOSB feature. It is recommended
@@ -42,8 +42,8 @@ ENTRY(__memset)
rep stosb
movq %r9,%rax
ret
-ENDPROC(memset)
 ENDPROC(__memset)
+SYM_FUNC_END_ALIAS(memset)
 EXPORT_SYMBOL(memset)
 EXPORT_SYMBOL(__memset)
 
diff --git a/arch/x86/xen/xen-asm_64.S b/arch/x86/xen/xen-asm_64.S
index d617bea76039..e1174171ab57 100644
--- a/arch/x86/xen/xen-asm_64.S
+++ b/arch/x86/xen/xen-asm_64.S
@@ -117,13 +117,13 @@ ENDPROC(xen_sysenter_target)
 
 #else /* !CONFIG_IA32_EMULATION */
 
-ENTRY(xen_syscall32_target)
+SYM_FUNC_START_ALIAS(xen_syscall32_target)
 ENTRY(xen_sysenter_target)
lea 16(%rsp), %rsp  /* strip %rcx, %r11 */
mov $-ENOSYS, %rax
pushq $0
jmp hypercall_iret
-ENDPROC(xen_syscall32_target)
 ENDPROC(xen_sysenter_target)
+SYM_FUNC_END_ALIAS(xen_syscall32_target)
 
 #endif /* CONFIG_IA32_EMULATION */
-- 
2.12.2



[PATCH v3 26/29] x86_64: assembly, change all ENTRY to SYM_FUNC_START

2017-04-21 Thread Jiri Slaby
These are all functions which are invoked from elsewhere, so we annotate
them as global using the new SYM_FUNC_START (and their ENDPROC's by
SYM_FUNC_END.)

And make sure ENTRY/ENDPROC is not defined on X86_64.

Signed-off-by: Jiri Slaby 
Cc: "H. Peter Anvin" 
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: x...@kernel.org
Cc: Herbert Xu 
Cc: "David S. Miller" 
Cc: "Rafael J. Wysocki" 
Cc: Len Brown 
Cc: Pavel Machek 
Cc: Bill Metzenthen 
Cc: Matt Fleming 
Cc: Ard Biesheuvel 
Cc: Boris Ostrovsky 
Cc: Juergen Gross 
Cc: linux-crypto@vger.kernel.org
Cc: linux...@vger.kernel.org
Cc: linux-...@vger.kernel.org
Cc: xen-de...@lists.xenproject.org
Cc: "David S. Miller" 
Cc: Alexey Kuznetsov 
Cc: James Morris 
Cc: Hideaki YOSHIFUJI 
Cc: Patrick McHardy 
Cc: net...@vger.kernel.org
---
 arch/x86/boot/compressed/efi_thunk_64.S|  4 +-
 arch/x86/boot/compressed/head_64.S | 20 
 arch/x86/boot/copy.S   | 16 +++---
 arch/x86/boot/pmjump.S |  4 +-
 arch/x86/crypto/aes-i586-asm_32.S  |  8 +--
 arch/x86/crypto/aes-x86_64-asm_64.S|  4 +-
 arch/x86/crypto/aes_ctrby8_avx-x86_64.S| 12 ++---
 arch/x86/crypto/aesni-intel_asm.S  | 44 
 arch/x86/crypto/aesni-intel_avx-x86_64.S   | 24 -
 arch/x86/crypto/blowfish-x86_64-asm_64.S   | 16 +++---
 arch/x86/crypto/camellia-aesni-avx-asm_64.S| 24 -
 arch/x86/crypto/camellia-aesni-avx2-asm_64.S   | 24 -
 arch/x86/crypto/camellia-x86_64-asm_64.S   | 16 +++---
 arch/x86/crypto/cast5-avx-x86_64-asm_64.S  | 16 +++---
 arch/x86/crypto/cast6-avx-x86_64-asm_64.S  | 24 -
 arch/x86/crypto/chacha20-avx2-x86_64.S |  4 +-
 arch/x86/crypto/chacha20-ssse3-x86_64.S|  8 +--
 arch/x86/crypto/crc32-pclmul_asm.S |  4 +-
 arch/x86/crypto/crc32c-pcl-intel-asm_64.S  |  4 +-
 arch/x86/crypto/crct10dif-pcl-asm_64.S |  4 +-
 arch/x86/crypto/des3_ede-asm_64.S  |  8 +--
 arch/x86/crypto/ghash-clmulni-intel_asm.S  |  8 +--
 arch/x86/crypto/poly1305-avx2-x86_64.S |  4 +-
 arch/x86/crypto/poly1305-sse2-x86_64.S |  8 +--
 arch/x86/crypto/salsa20-x86_64-asm_64.S| 12 ++---
 arch/x86/crypto/serpent-avx-x86_64-asm_64.S| 24 -
 arch/x86/crypto/serpent-avx2-asm_64.S  | 24 -
 arch/x86/crypto/serpent-sse2-x86_64-asm_64.S   |  8 +--
 arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S   |  8 +--
 arch/x86/crypto/sha1-mb/sha1_mb_mgr_submit_avx2.S  |  4 +-
 arch/x86/crypto/sha1-mb/sha1_x8_avx2.S |  4 +-
 arch/x86/crypto/sha1_avx2_x86_64_asm.S |  4 +-
 arch/x86/crypto/sha1_ni_asm.S  |  4 +-
 arch/x86/crypto/sha1_ssse3_asm.S   |  4 +-
 arch/x86/crypto/sha256-avx-asm.S   |  4 +-
 arch/x86/crypto/sha256-avx2-asm.S  |  4 +-
 .../crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S|  8 +--
 .../crypto/sha256-mb/sha256_mb_mgr_submit_avx2.S   |  4 +-
 arch/x86/crypto/sha256-mb/sha256_x8_avx2.S |  4 +-
 arch/x86/crypto/sha256-ssse3-asm.S |  4 +-
 arch/x86/crypto/sha256_ni_asm.S|  4 +-
 arch/x86/crypto/sha512-avx-asm.S   |  4 +-
 arch/x86/crypto/sha512-avx2-asm.S  |  4 +-
 .../crypto/sha512-mb/sha512_mb_mgr_flush_avx2.S|  8 +--
 .../crypto/sha512-mb/sha512_mb_mgr_submit_avx2.S   |  4 +-
 arch/x86/crypto/sha512-mb/sha512_x4_avx2.S |  4 +-
 arch/x86/crypto/sha512-ssse3-asm.S |  4 +-
 arch/x86/crypto/twofish-avx-x86_64-asm_64.S| 24 -
 arch/x86/crypto/twofish-x86_64-asm_64-3way.S   |  8 +--
 arch/x86/crypto/twofish-x86_64-asm_64.S|  8 +--
 arch/x86/entry/entry_64.S  | 58 +++---
 arch/x86/entry/entry_64_compat.S   | 16 +++---
 arch/x86/kernel/acpi/wakeup_64.S   |  8 +--
 arch/x86/kernel/ftrace_64.S| 24 -
 arch/x86/kernel/head_64.S  | 16 +++---
 arch/x86/lib/checksum_32.S |  8 +--
 arch/x86/lib/clear_page_64.S   | 12 ++---
 arch/x86/lib/cmpxchg16b_emu.S  |  4 +-
 arch/x86/lib/cmpxchg8b_emu.S   |  4 +-
 arch/x86/lib/copy_page_64.S|  4 +-
 arch/x86/lib/copy_user_64.S| 16 +++---
 arch/x86/lib/csum-copy_64.S|  4 +-
 arch/x86/lib/getuser.S | 16 +++---
 arch/x86/lib/hweight.S |  8 +--
 arch/x86/lib/iomap_copy_64.S   |  4 +-
 arch/x86/lib/memcpy_64.S   |  4 +-
 arch/x86/lib/memmove_64.S  

[PATCH v3 13/29] x86: crypto, annotate local functions

2017-04-21 Thread Jiri Slaby
Use the newly added SYM_FUNC_START_LOCAL to annotate starts of all
functions which do not have ".globl" annotation, but their ends are
annotated by ENDPROC. This is needed to balance ENDPROC for tools that
are about to generate debuginfo.

We also convert their ENDPROCs to the new SYM_FUNC_END.

Signed-off-by: Jiri Slaby 
Cc: Herbert Xu 
Cc: "David S. Miller" 
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: "H. Peter Anvin" 
Cc: 
Cc: 
---
 arch/x86/crypto/aesni-intel_asm.S| 49 
 arch/x86/crypto/camellia-aesni-avx-asm_64.S  | 20 ++--
 arch/x86/crypto/camellia-aesni-avx2-asm_64.S | 20 ++--
 arch/x86/crypto/cast5-avx-x86_64-asm_64.S|  8 ++---
 arch/x86/crypto/cast6-avx-x86_64-asm_64.S|  8 ++---
 arch/x86/crypto/ghash-clmulni-intel_asm.S|  4 +--
 arch/x86/crypto/serpent-avx-x86_64-asm_64.S  |  8 ++---
 arch/x86/crypto/serpent-avx2-asm_64.S|  8 ++---
 arch/x86/crypto/twofish-avx-x86_64-asm_64.S  |  8 ++---
 9 files changed, 62 insertions(+), 71 deletions(-)

diff --git a/arch/x86/crypto/aesni-intel_asm.S 
b/arch/x86/crypto/aesni-intel_asm.S
index 3c465184ff8a..da76ae01e791 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -1746,7 +1746,7 @@ ENDPROC(aesni_gcm_enc)
 
 .align 4
 _key_expansion_128:
-_key_expansion_256a:
+SYM_FUNC_START_LOCAL(_key_expansion_256a)
pshufd $0b, %xmm1, %xmm1
shufps $0b0001, %xmm0, %xmm4
pxor %xmm4, %xmm0
@@ -1757,10 +1757,9 @@ _key_expansion_256a:
add $0x10, TKEYP
ret
 ENDPROC(_key_expansion_128)
-ENDPROC(_key_expansion_256a)
+SYM_FUNC_END(_key_expansion_256a)
 
-.align 4
-_key_expansion_192a:
+SYM_FUNC_START_LOCAL(_key_expansion_192a)
pshufd $0b01010101, %xmm1, %xmm1
shufps $0b0001, %xmm0, %xmm4
pxor %xmm4, %xmm0
@@ -1782,10 +1781,9 @@ _key_expansion_192a:
movaps %xmm1, 0x10(TKEYP)
add $0x20, TKEYP
ret
-ENDPROC(_key_expansion_192a)
+SYM_FUNC_END(_key_expansion_192a)
 
-.align 4
-_key_expansion_192b:
+SYM_FUNC_START_LOCAL(_key_expansion_192b)
pshufd $0b01010101, %xmm1, %xmm1
shufps $0b0001, %xmm0, %xmm4
pxor %xmm4, %xmm0
@@ -1802,10 +1800,9 @@ _key_expansion_192b:
movaps %xmm0, (TKEYP)
add $0x10, TKEYP
ret
-ENDPROC(_key_expansion_192b)
+SYM_FUNC_END(_key_expansion_192b)
 
-.align 4
-_key_expansion_256b:
+SYM_FUNC_START_LOCAL(_key_expansion_256b)
pshufd $0b10101010, %xmm1, %xmm1
shufps $0b0001, %xmm2, %xmm4
pxor %xmm4, %xmm2
@@ -1815,7 +1812,7 @@ _key_expansion_256b:
movaps %xmm2, (TKEYP)
add $0x10, TKEYP
ret
-ENDPROC(_key_expansion_256b)
+SYM_FUNC_END(_key_expansion_256b)
 
 /*
  * int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
@@ -1968,8 +1965,7 @@ ENDPROC(aesni_enc)
  * KEY
  * TKEYP (T1)
  */
-.align 4
-_aesni_enc1:
+SYM_FUNC_START_LOCAL(_aesni_enc1)
movaps (KEYP), KEY  # key
mov KEYP, TKEYP
pxor KEY, STATE # round 0
@@ -2012,7 +2008,7 @@ _aesni_enc1:
movaps 0x70(TKEYP), KEY
AESENCLAST KEY STATE
ret
-ENDPROC(_aesni_enc1)
+SYM_FUNC_END(_aesni_enc1)
 
 /*
  * _aesni_enc4:internal ABI
@@ -2032,8 +2028,7 @@ ENDPROC(_aesni_enc1)
  * KEY
  * TKEYP (T1)
  */
-.align 4
-_aesni_enc4:
+SYM_FUNC_START_LOCAL(_aesni_enc4)
movaps (KEYP), KEY  # key
mov KEYP, TKEYP
pxor KEY, STATE1# round 0
@@ -2121,7 +2116,7 @@ _aesni_enc4:
AESENCLAST KEY STATE3
AESENCLAST KEY STATE4
ret
-ENDPROC(_aesni_enc4)
+SYM_FUNC_END(_aesni_enc4)
 
 /*
  * void aesni_dec (struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
@@ -2160,8 +2155,7 @@ ENDPROC(aesni_dec)
  * KEY
  * TKEYP (T1)
  */
-.align 4
-_aesni_dec1:
+SYM_FUNC_START_LOCAL(_aesni_dec1)
movaps (KEYP), KEY  # key
mov KEYP, TKEYP
pxor KEY, STATE # round 0
@@ -2204,7 +2198,7 @@ _aesni_dec1:
movaps 0x70(TKEYP), KEY
AESDECLAST KEY STATE
ret
-ENDPROC(_aesni_dec1)
+SYM_FUNC_END(_aesni_dec1)
 
 /*
  * _aesni_dec4:internal ABI
@@ -2224,8 +2218,7 @@ ENDPROC(_aesni_dec1)
  * KEY
  * TKEYP (T1)
  */
-.align 4
-_aesni_dec4:
+SYM_FUNC_START_LOCAL(_aesni_dec4)
movaps (KEYP), KEY  # key
mov KEYP, TKEYP
pxor KEY, STATE1# round 0
@@ -2313,7 +2306,7 @@ _aesni_dec4:
AESDECLAST KEY STATE3
AESDECLAST KEY STATE4
ret
-ENDPROC(_aesni_dec4)
+SYM_FUNC_END(_aesni_dec4)
 
 /*
  * void aesni_ecb_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
@@ -2591,8 +2584,7 @@ ENDPROC(aesni_cbc_dec)
  * INC:== 1, in little endian
  * BSWAP_MASK == endian swapping mask
  */
-.align 4
-_aesni_inc_init:
+SYM_FUNC_START_LOCAL(_aesni_inc_init)
movaps .Lbswap_mask, BSWAP_MASK
mov

Re: [PATCH v8 06/28] x86/asm/crypto: annotate local functions

2019-09-05 Thread Jiri Slaby
On 17. 08. 19, 11:17, Borislav Petkov wrote:
> On Thu, Aug 08, 2019 at 12:38:32PM +0200, Jiri Slaby wrote:
>> Use the newly added SYM_FUNC_START_LOCAL to annotate starts of all
>> functions which do not have ".globl" annotation, but their ends are
>> annotated by ENDPROC. This is needed to balance ENDPROC for tools that
>> generate debuginfo.
>>
>> To be symmetric, we also convert their ENDPROCs to the new SYM_FUNC_END.
> 
> All those functions look like they could be made local symbols by
> prepending their names with ".L" so that they disappear from the
> vmlinux symtable too.

In that case, objtool won't see them and won't generate ORC info for
them. That means not only they disappear from stacktraces, but it will
be disallowed to unwind below them. Note that objtool currently reads
symtables and looks for STT_FUNC in them.

We can make them local (.L*) when the patchset is applied and all
functions are annotated properly -- then we won't need entries in the
global symtable. We can store the info in some private table and delete
it after objtool is done with the object.

thanks,
-- 
js
suse labs


[PATCH v9 24/28] x86_64/asm: Change all ENTRY+ENDPROC to SYM_FUNC_*

2019-10-11 Thread Jiri Slaby
These are all functions which are invoked from elsewhere, so annotate
them as global using the new SYM_FUNC_START. And their ENDPROC's by
SYM_FUNC_END.

And make sure ENTRY/ENDPROC is not defined on X86_64, given these were
the last users.

Signed-off-by: Jiri Slaby 
Reviewed-by: Rafael J. Wysocki  [hibernate]
Reviewed-by: Boris Ostrovsky  [xen bits]
Cc: "H. Peter Anvin" 
Cc: Borislav Petkov 
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: x...@kernel.org
Cc: Herbert Xu 
Cc: "David S. Miller" 
Cc: "Rafael J. Wysocki" 
Cc: Len Brown 
Cc: Pavel Machek 
Cc: Matt Fleming 
Cc: Ard Biesheuvel 
Cc: Boris Ostrovsky 
Cc: Juergen Gross 
Cc: linux-crypto@vger.kernel.org
Cc: linux...@vger.kernel.org
Cc: linux-...@vger.kernel.org
Cc: xen-de...@lists.xenproject.org
---
 arch/x86/boot/compressed/efi_thunk_64.S  |  4 +-
 arch/x86/boot/compressed/head_64.S   | 16 +++---
 arch/x86/boot/compressed/mem_encrypt.S   |  8 +--
 arch/x86/crypto/aegis128-aesni-asm.S | 28 -
 arch/x86/crypto/aes_ctrby8_avx-x86_64.S  | 12 ++--
 arch/x86/crypto/aesni-intel_asm.S| 60 ++--
 arch/x86/crypto/aesni-intel_avx-x86_64.S | 32 +--
 arch/x86/crypto/blowfish-x86_64-asm_64.S | 16 +++---
 arch/x86/crypto/camellia-aesni-avx-asm_64.S  | 24 
 arch/x86/crypto/camellia-aesni-avx2-asm_64.S | 24 
 arch/x86/crypto/camellia-x86_64-asm_64.S | 16 +++---
 arch/x86/crypto/cast5-avx-x86_64-asm_64.S| 16 +++---
 arch/x86/crypto/cast6-avx-x86_64-asm_64.S| 24 
 arch/x86/crypto/chacha-avx2-x86_64.S | 12 ++--
 arch/x86/crypto/chacha-avx512vl-x86_64.S | 12 ++--
 arch/x86/crypto/chacha-ssse3-x86_64.S| 12 ++--
 arch/x86/crypto/crc32-pclmul_asm.S   |  4 +-
 arch/x86/crypto/crc32c-pcl-intel-asm_64.S|  4 +-
 arch/x86/crypto/crct10dif-pcl-asm_64.S   |  4 +-
 arch/x86/crypto/des3_ede-asm_64.S|  8 +--
 arch/x86/crypto/ghash-clmulni-intel_asm.S|  8 +--
 arch/x86/crypto/nh-avx2-x86_64.S |  4 +-
 arch/x86/crypto/nh-sse2-x86_64.S |  4 +-
 arch/x86/crypto/poly1305-avx2-x86_64.S   |  4 +-
 arch/x86/crypto/poly1305-sse2-x86_64.S   |  8 +--
 arch/x86/crypto/serpent-avx-x86_64-asm_64.S  | 24 
 arch/x86/crypto/serpent-avx2-asm_64.S| 24 
 arch/x86/crypto/serpent-sse2-x86_64-asm_64.S |  8 +--
 arch/x86/crypto/sha1_avx2_x86_64_asm.S   |  4 +-
 arch/x86/crypto/sha1_ni_asm.S|  4 +-
 arch/x86/crypto/sha1_ssse3_asm.S |  4 +-
 arch/x86/crypto/sha256-avx-asm.S |  4 +-
 arch/x86/crypto/sha256-avx2-asm.S|  4 +-
 arch/x86/crypto/sha256-ssse3-asm.S   |  4 +-
 arch/x86/crypto/sha256_ni_asm.S  |  4 +-
 arch/x86/crypto/sha512-avx-asm.S |  4 +-
 arch/x86/crypto/sha512-avx2-asm.S|  4 +-
 arch/x86/crypto/sha512-ssse3-asm.S   |  4 +-
 arch/x86/crypto/twofish-avx-x86_64-asm_64.S  | 24 
 arch/x86/crypto/twofish-x86_64-asm_64-3way.S |  8 +--
 arch/x86/crypto/twofish-x86_64-asm_64.S  |  8 +--
 arch/x86/entry/entry_64.S| 10 ++--
 arch/x86/entry/entry_64_compat.S |  4 +-
 arch/x86/kernel/acpi/wakeup_64.S |  8 +--
 arch/x86/kernel/ftrace_64.S  | 20 +++
 arch/x86/kernel/irqflags.S   |  8 +--
 arch/x86/kvm/vmx/vmenter.S   | 12 ++--
 arch/x86/lib/checksum_32.S   |  8 +--
 arch/x86/lib/clear_page_64.S | 12 ++--
 arch/x86/lib/cmpxchg16b_emu.S|  4 +-
 arch/x86/lib/cmpxchg8b_emu.S |  4 +-
 arch/x86/lib/copy_page_64.S  |  4 +-
 arch/x86/lib/copy_user_64.S  | 16 +++---
 arch/x86/lib/csum-copy_64.S  |  4 +-
 arch/x86/lib/getuser.S   | 16 +++---
 arch/x86/lib/hweight.S   |  8 +--
 arch/x86/lib/iomap_copy_64.S |  4 +-
 arch/x86/lib/memcpy_64.S |  4 +-
 arch/x86/lib/memmove_64.S|  4 +-
 arch/x86/lib/memset_64.S |  4 +-
 arch/x86/lib/msr-reg.S   |  8 +--
 arch/x86/lib/putuser.S   | 16 +++---
 arch/x86/lib/retpoline.S |  4 +-
 arch/x86/mm/mem_encrypt_boot.S   |  8 +--
 arch/x86/platform/efi/efi_stub_64.S  |  4 +-
 arch/x86/platform/efi/efi_thunk_64.S |  4 +-
 arch/x86/power/hibernate_asm_64.S|  8 +--
 arch/x86/xen/xen-asm.S   | 28 -
 arch/x86/xen/xen-asm_64.S| 16 +++---
 include/linux/linkage.h  |  4 ++
 70 files changed, 379 insertions(+), 375 deletions(-)

diff --git a/arch/x86/boot/compressed/efi_thunk_64.S 
b/arch/x86/boot/compressed/efi_thunk_64.S
index 31312070db22..593913692d16 100644
--- a/arch/x86/boot/compressed/efi_thunk_64.S
+++ b/arch/x86/boot/compr

[PATCH v9 27/28] x86_32/asm: Change all ENTRY+ENDPROC to SYM_FUNC_*

2019-10-11 Thread Jiri Slaby
These are all functions which are invoked from elsewhere, so they are
annotated as global using the new SYM_FUNC_START. And their ENDPROC's by
SYM_FUNC_END.

Now, ENTRY/ENDPROC can be forced to be undefined on X86, so do so.

Signed-off-by: Jiri Slaby 
Cc: "H. Peter Anvin" 
Cc: Borislav Petkov 
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: x...@kernel.org
Cc: Herbert Xu 
Cc: "David S. Miller" 
Cc: Bill Metzenthen 
Cc: Matt Fleming 
Cc: Ard Biesheuvel 
Cc: linux-crypto@vger.kernel.org
Cc: linux-...@vger.kernel.org
---
 arch/x86/boot/compressed/efi_stub_32.S |  4 +--
 arch/x86/boot/compressed/head_32.S | 12 
 arch/x86/crypto/serpent-sse2-i586-asm_32.S |  8 +++---
 arch/x86/crypto/twofish-i586-asm_32.S  |  8 +++---
 arch/x86/entry/entry_32.S  | 24 
 arch/x86/kernel/head_32.S  | 16 +--
 arch/x86/lib/atomic64_386_32.S |  4 +--
 arch/x86/lib/atomic64_cx8_32.S | 32 +++---
 arch/x86/lib/checksum_32.S |  8 +++---
 arch/x86/math-emu/div_Xsig.S   |  4 +--
 arch/x86/math-emu/div_small.S  |  4 +--
 arch/x86/math-emu/mul_Xsig.S   | 12 
 arch/x86/math-emu/polynom_Xsig.S   |  4 +--
 arch/x86/math-emu/reg_norm.S   |  8 +++---
 arch/x86/math-emu/reg_round.S  |  4 +--
 arch/x86/math-emu/reg_u_add.S  |  4 +--
 arch/x86/math-emu/reg_u_div.S  |  4 +--
 arch/x86/math-emu/reg_u_mul.S  |  4 +--
 arch/x86/math-emu/reg_u_sub.S  |  4 +--
 arch/x86/math-emu/round_Xsig.S |  8 +++---
 arch/x86/math-emu/shr_Xsig.S   |  4 +--
 arch/x86/math-emu/wm_shrx.S|  8 +++---
 arch/x86/math-emu/wm_sqrt.S|  4 +--
 arch/x86/platform/efi/efi_stub_32.S|  4 +--
 arch/x86/power/hibernate_asm_32.S  |  8 +++---
 include/linux/linkage.h|  8 ++
 26 files changed, 104 insertions(+), 108 deletions(-)

diff --git a/arch/x86/boot/compressed/efi_stub_32.S 
b/arch/x86/boot/compressed/efi_stub_32.S
index 257e341fd2c8..ed6c351d34ed 100644
--- a/arch/x86/boot/compressed/efi_stub_32.S
+++ b/arch/x86/boot/compressed/efi_stub_32.S
@@ -24,7 +24,7 @@
  */
 
 .text
-ENTRY(efi_call_phys)
+SYM_FUNC_START(efi_call_phys)
/*
 * 0. The function can only be called in Linux kernel. So CS has been
 * set to 0x0010, DS and SS have been set to 0x0018. In EFI, I found
@@ -77,7 +77,7 @@ ENTRY(efi_call_phys)
movlsaved_return_addr(%edx), %ecx
pushl   %ecx
ret
-ENDPROC(efi_call_phys)
+SYM_FUNC_END(efi_call_phys)
 .previous
 
 .data
diff --git a/arch/x86/boot/compressed/head_32.S 
b/arch/x86/boot/compressed/head_32.S
index f9e2a80bd699..f2dfd6d083ef 100644
--- a/arch/x86/boot/compressed/head_32.S
+++ b/arch/x86/boot/compressed/head_32.S
@@ -61,7 +61,7 @@
.hidden _egot
 
__HEAD
-ENTRY(startup_32)
+SYM_FUNC_START(startup_32)
cld
/*
 * Test KEEP_SEGMENTS flag to see if the bootloader is asking
@@ -142,14 +142,14 @@ ENTRY(startup_32)
  */
leal.Lrelocated(%ebx), %eax
jmp *%eax
-ENDPROC(startup_32)
+SYM_FUNC_END(startup_32)
 
 #ifdef CONFIG_EFI_STUB
 /*
  * We don't need the return address, so set up the stack so efi_main() can find
  * its arguments.
  */
-ENTRY(efi_pe_entry)
+SYM_FUNC_START(efi_pe_entry)
add $0x4, %esp
 
call1f
@@ -174,9 +174,9 @@ ENTRY(efi_pe_entry)
pushl   %eax
pushl   %ecx
jmp 2f  /* Skip efi_config initialization */
-ENDPROC(efi_pe_entry)
+SYM_FUNC_END(efi_pe_entry)
 
-ENTRY(efi32_stub_entry)
+SYM_FUNC_START(efi32_stub_entry)
add $0x4, %esp
popl%ecx
popl%edx
@@ -205,7 +205,7 @@ fail:
movlBP_code32_start(%esi), %eax
lealstartup_32(%eax), %eax
jmp *%eax
-ENDPROC(efi32_stub_entry)
+SYM_FUNC_END(efi32_stub_entry)
 #endif
 
.text
diff --git a/arch/x86/crypto/serpent-sse2-i586-asm_32.S 
b/arch/x86/crypto/serpent-sse2-i586-asm_32.S
index e5c4a4690ca9..6379b99cb722 100644
--- a/arch/x86/crypto/serpent-sse2-i586-asm_32.S
+++ b/arch/x86/crypto/serpent-sse2-i586-asm_32.S
@@ -497,7 +497,7 @@
pxor t0,x3; \
movdqu x3,  (3*4*4)(out);
 
-ENTRY(__serpent_enc_blk_4way)
+SYM_FUNC_START(__serpent_enc_blk_4way)
/* input:
 *  arg_ctx(%esp): ctx, CTX
 *  arg_dst(%esp): dst
@@ -559,9 +559,9 @@ ENTRY(__serpent_enc_blk_4way)
xor_blocks(%eax, RA, RB, RC, RD, RT0, RT1, RE);
 
ret;
-ENDPROC(__serpent_enc_blk_4way)
+SYM_FUNC_END(__serpent_enc_blk_4way)
 
-ENTRY(serpent_dec_blk_4way)
+SYM_FUNC_START(serpent_dec_blk_4way)
/* input:
 *  arg_ctx(%esp): ctx, CTX
 *  arg_dst(%esp): dst
@@ -613,4 +613,4 @@ ENTRY(serpent_dec_blk_4way)
write_blocks(

[PATCH v9 06/28] x86/asm/crypto: Annotate local functions

2019-10-11 Thread Jiri Slaby
Use the newly added SYM_FUNC_START_LOCAL to annotate starts of all
functions which do not have ".globl" annotation, but their ends are
annotated by ENDPROC. This is needed to balance ENDPROC for tools that
generate debuginfo.

These fucntions are not prepended with ".L" as they might appear in
call traces and they wouldn't be visible after such change.

To be symmetric, the functions' ENDPROCs are converted to the new
SYM_FUNC_END.

Signed-off-by: Jiri Slaby 
Cc: Herbert Xu 
Cc: "David S. Miller" 
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: Borislav Petkov 
Cc: "H. Peter Anvin" 
Cc: 
Cc: 
---
 arch/x86/crypto/aegis128-aesni-asm.S |  8 ++--
 arch/x86/crypto/aesni-intel_asm.S| 49 
 arch/x86/crypto/camellia-aesni-avx-asm_64.S  | 20 
 arch/x86/crypto/camellia-aesni-avx2-asm_64.S | 20 
 arch/x86/crypto/cast5-avx-x86_64-asm_64.S|  8 ++--
 arch/x86/crypto/cast6-avx-x86_64-asm_64.S|  8 ++--
 arch/x86/crypto/chacha-ssse3-x86_64.S|  4 +-
 arch/x86/crypto/ghash-clmulni-intel_asm.S|  4 +-
 arch/x86/crypto/serpent-avx-x86_64-asm_64.S  |  8 ++--
 arch/x86/crypto/serpent-avx2-asm_64.S|  8 ++--
 arch/x86/crypto/twofish-avx-x86_64-asm_64.S  |  8 ++--
 11 files changed, 68 insertions(+), 77 deletions(-)

diff --git a/arch/x86/crypto/aegis128-aesni-asm.S 
b/arch/x86/crypto/aegis128-aesni-asm.S
index 4434607e366d..b7026fdef4ff 100644
--- a/arch/x86/crypto/aegis128-aesni-asm.S
+++ b/arch/x86/crypto/aegis128-aesni-asm.S
@@ -71,7 +71,7 @@
  *   %r8
  *   %r9
  */
-__load_partial:
+SYM_FUNC_START_LOCAL(__load_partial)
xor %r9d, %r9d
pxor MSG, MSG
 
@@ -123,7 +123,7 @@ __load_partial:
 
 .Lld_partial_8:
ret
-ENDPROC(__load_partial)
+SYM_FUNC_END(__load_partial)
 
 /*
  * __store_partial: internal ABI
@@ -137,7 +137,7 @@ ENDPROC(__load_partial)
  *   %r9
  *   %r10
  */
-__store_partial:
+SYM_FUNC_START_LOCAL(__store_partial)
mov LEN, %r8
mov DST, %r9
 
@@ -181,7 +181,7 @@ __store_partial:
 
 .Lst_partial_1:
ret
-ENDPROC(__store_partial)
+SYM_FUNC_END(__store_partial)
 
 /*
  * void crypto_aegis128_aesni_init(void *state, const void *key, const void 
*iv);
diff --git a/arch/x86/crypto/aesni-intel_asm.S 
b/arch/x86/crypto/aesni-intel_asm.S
index e40bdf024ba7..e0a5fb462a0a 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -1759,7 +1759,7 @@ ENDPROC(aesni_gcm_finalize)
 
 .align 4
 _key_expansion_128:
-_key_expansion_256a:
+SYM_FUNC_START_LOCAL(_key_expansion_256a)
pshufd $0b, %xmm1, %xmm1
shufps $0b0001, %xmm0, %xmm4
pxor %xmm4, %xmm0
@@ -1770,10 +1770,9 @@ _key_expansion_256a:
add $0x10, TKEYP
ret
 ENDPROC(_key_expansion_128)
-ENDPROC(_key_expansion_256a)
+SYM_FUNC_END(_key_expansion_256a)
 
-.align 4
-_key_expansion_192a:
+SYM_FUNC_START_LOCAL(_key_expansion_192a)
pshufd $0b01010101, %xmm1, %xmm1
shufps $0b0001, %xmm0, %xmm4
pxor %xmm4, %xmm0
@@ -1795,10 +1794,9 @@ _key_expansion_192a:
movaps %xmm1, 0x10(TKEYP)
add $0x20, TKEYP
ret
-ENDPROC(_key_expansion_192a)
+SYM_FUNC_END(_key_expansion_192a)
 
-.align 4
-_key_expansion_192b:
+SYM_FUNC_START_LOCAL(_key_expansion_192b)
pshufd $0b01010101, %xmm1, %xmm1
shufps $0b0001, %xmm0, %xmm4
pxor %xmm4, %xmm0
@@ -1815,10 +1813,9 @@ _key_expansion_192b:
movaps %xmm0, (TKEYP)
add $0x10, TKEYP
ret
-ENDPROC(_key_expansion_192b)
+SYM_FUNC_END(_key_expansion_192b)
 
-.align 4
-_key_expansion_256b:
+SYM_FUNC_START_LOCAL(_key_expansion_256b)
pshufd $0b10101010, %xmm1, %xmm1
shufps $0b0001, %xmm2, %xmm4
pxor %xmm4, %xmm2
@@ -1828,7 +1825,7 @@ _key_expansion_256b:
movaps %xmm2, (TKEYP)
add $0x10, TKEYP
ret
-ENDPROC(_key_expansion_256b)
+SYM_FUNC_END(_key_expansion_256b)
 
 /*
  * int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
@@ -1981,8 +1978,7 @@ ENDPROC(aesni_enc)
  * KEY
  * TKEYP (T1)
  */
-.align 4
-_aesni_enc1:
+SYM_FUNC_START_LOCAL(_aesni_enc1)
movaps (KEYP), KEY  # key
mov KEYP, TKEYP
pxor KEY, STATE # round 0
@@ -2025,7 +2021,7 @@ _aesni_enc1:
movaps 0x70(TKEYP), KEY
AESENCLAST KEY STATE
ret
-ENDPROC(_aesni_enc1)
+SYM_FUNC_END(_aesni_enc1)
 
 /*
  * _aesni_enc4:internal ABI
@@ -2045,8 +2041,7 @@ ENDPROC(_aesni_enc1)
  * KEY
  * TKEYP (T1)
  */
-.align 4
-_aesni_enc4:
+SYM_FUNC_START_LOCAL(_aesni_enc4)
movaps (KEYP), KEY  # key
mov KEYP, TKEYP
pxor KEY, STATE1# round 0
@@ -2134,7 +2129,7 @@ _aesni_enc4:
AESENCLAST KEY STATE3
AESENCLAST KEY STATE4
ret
-ENDPROC(_aesni_enc4)
+SYM_FUNC_END(_aesni_enc4)
 
 /*
  * void aesni_dec (struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
@@ -2173,8 +2168

[PATCH v9 09/28] x86/asm: Annotate aliases

2019-10-11 Thread Jiri Slaby
_key_expansion_128 is an alias to _key_expansion_256a, __memcpy to
memcpy, xen_syscall32_target to xen_sysenter_target, and so on. Annotate
them all using the new SYM_FUNC_START_ALIAS, SYM_FUNC_START_LOCAL_ALIAS,
and SYM_FUNC_END_ALIAS. This will make the tools generating the
debuginfo happy as it avoid nesting and double symbols.

Signed-off-by: Jiri Slaby 
Cc: Herbert Xu 
Cc: "David S. Miller" 
Cc: Borislav Petkov 
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: "H. Peter Anvin" 
Cc: 
Cc: Boris Ostrovsky 
Reviewed-by: Juergen Gross  [xen parts]
Cc: 
Cc: 
---
 arch/x86/crypto/aesni-intel_asm.S | 5 ++---
 arch/x86/lib/memcpy_64.S  | 4 ++--
 arch/x86/lib/memmove_64.S | 4 ++--
 arch/x86/lib/memset_64.S  | 4 ++--
 arch/x86/xen/xen-asm_64.S | 4 ++--
 5 files changed, 10 insertions(+), 11 deletions(-)

diff --git a/arch/x86/crypto/aesni-intel_asm.S 
b/arch/x86/crypto/aesni-intel_asm.S
index e0a5fb462a0a..9d8d5f2296e1 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -1757,8 +1757,7 @@ ENDPROC(aesni_gcm_finalize)
 #endif
 
 
-.align 4
-_key_expansion_128:
+SYM_FUNC_START_LOCAL_ALIAS(_key_expansion_128)
 SYM_FUNC_START_LOCAL(_key_expansion_256a)
pshufd $0b, %xmm1, %xmm1
shufps $0b0001, %xmm0, %xmm4
@@ -1769,8 +1768,8 @@ SYM_FUNC_START_LOCAL(_key_expansion_256a)
movaps %xmm0, (TKEYP)
add $0x10, TKEYP
ret
-ENDPROC(_key_expansion_128)
 SYM_FUNC_END(_key_expansion_256a)
+SYM_FUNC_END_ALIAS(_key_expansion_128)
 
 SYM_FUNC_START_LOCAL(_key_expansion_192a)
pshufd $0b01010101, %xmm1, %xmm1
diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
index 92748660ba51..57a64266ba69 100644
--- a/arch/x86/lib/memcpy_64.S
+++ b/arch/x86/lib/memcpy_64.S
@@ -28,7 +28,7 @@
  * Output:
  * rax original destination
  */
-ENTRY(__memcpy)
+SYM_FUNC_START_ALIAS(__memcpy)
 ENTRY(memcpy)
ALTERNATIVE_2 "jmp memcpy_orig", "", X86_FEATURE_REP_GOOD, \
  "jmp memcpy_erms", X86_FEATURE_ERMS
@@ -42,7 +42,7 @@ ENTRY(memcpy)
rep movsb
ret
 ENDPROC(memcpy)
-ENDPROC(__memcpy)
+SYM_FUNC_END_ALIAS(__memcpy)
 EXPORT_SYMBOL(memcpy)
 EXPORT_SYMBOL(__memcpy)
 
diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
index bbec69d8223b..50c1648311b3 100644
--- a/arch/x86/lib/memmove_64.S
+++ b/arch/x86/lib/memmove_64.S
@@ -26,7 +26,7 @@
  */
 .weak memmove
 
-ENTRY(memmove)
+SYM_FUNC_START_ALIAS(memmove)
 ENTRY(__memmove)
 
/* Handle more 32 bytes in loop */
@@ -208,6 +208,6 @@ ENTRY(__memmove)
 13:
retq
 ENDPROC(__memmove)
-ENDPROC(memmove)
+SYM_FUNC_END_ALIAS(memmove)
 EXPORT_SYMBOL(__memmove)
 EXPORT_SYMBOL(memmove)
diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
index 9bc861c71e75..927ac44d34aa 100644
--- a/arch/x86/lib/memset_64.S
+++ b/arch/x86/lib/memset_64.S
@@ -19,7 +19,7 @@
  *
  * rax   original destination
  */
-ENTRY(memset)
+SYM_FUNC_START_ALIAS(memset)
 ENTRY(__memset)
/*
 * Some CPUs support enhanced REP MOVSB/STOSB feature. It is recommended
@@ -43,8 +43,8 @@ ENTRY(__memset)
rep stosb
movq %r9,%rax
ret
-ENDPROC(memset)
 ENDPROC(__memset)
+SYM_FUNC_END_ALIAS(memset)
 EXPORT_SYMBOL(memset)
 EXPORT_SYMBOL(__memset)
 
diff --git a/arch/x86/xen/xen-asm_64.S b/arch/x86/xen/xen-asm_64.S
index ebf610b49c06..45c1249f370d 100644
--- a/arch/x86/xen/xen-asm_64.S
+++ b/arch/x86/xen/xen-asm_64.S
@@ -167,13 +167,13 @@ ENDPROC(xen_sysenter_target)
 
 #else /* !CONFIG_IA32_EMULATION */
 
-ENTRY(xen_syscall32_target)
+SYM_FUNC_START_ALIAS(xen_syscall32_target)
 ENTRY(xen_sysenter_target)
lea 16(%rsp), %rsp  /* strip %rcx, %r11 */
mov $-ENOSYS, %rax
pushq $0
jmp hypercall_iret
-ENDPROC(xen_syscall32_target)
 ENDPROC(xen_sysenter_target)
+SYM_FUNC_END_ALIAS(xen_syscall32_target)
 
 #endif /* CONFIG_IA32_EMULATION */
-- 
2.23.0



[PATCH -resend 23/27] x86_64: assembly, change all ENTRY+ENDPROC to SYM_FUNC_*

2018-05-10 Thread Jiri Slaby
These are all functions which are invoked from elsewhere, so we annotate
them as global using the new SYM_FUNC_START. And their ENDPROC's by
SYM_FUNC_END.

And make sure ENTRY/ENDPROC is not defined on X86_64, given these were
the last users.

Signed-off-by: Jiri Slaby 
Reviewed-by: Rafael J. Wysocki  [hibernate]
Reviewed-by: Boris Ostrovsky  [xen bits]
Cc: "H. Peter Anvin" 
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: x...@kernel.org
Cc: Herbert Xu 
Cc: "David S. Miller" 
Cc: "Rafael J. Wysocki" 
Cc: Len Brown 
Cc: Pavel Machek 
Cc: Matt Fleming 
Cc: Ard Biesheuvel 
Cc: Boris Ostrovsky 
Cc: Juergen Gross 
Cc: linux-crypto@vger.kernel.org
Cc: linux...@vger.kernel.org
Cc: linux-...@vger.kernel.org
Cc: xen-de...@lists.xenproject.org
---
 arch/x86/boot/compressed/efi_thunk_64.S|  4 +-
 arch/x86/boot/compressed/head_64.S | 16 +++---
 arch/x86/boot/compressed/mem_encrypt.S |  8 +--
 arch/x86/crypto/aes-i586-asm_32.S  |  8 +--
 arch/x86/crypto/aes-x86_64-asm_64.S|  4 +-
 arch/x86/crypto/aes_ctrby8_avx-x86_64.S| 12 ++---
 arch/x86/crypto/aesni-intel_asm.S  | 60 +++---
 arch/x86/crypto/aesni-intel_avx-x86_64.S   | 24 -
 arch/x86/crypto/blowfish-x86_64-asm_64.S   | 16 +++---
 arch/x86/crypto/camellia-aesni-avx-asm_64.S| 24 -
 arch/x86/crypto/camellia-aesni-avx2-asm_64.S   | 24 -
 arch/x86/crypto/camellia-x86_64-asm_64.S   | 16 +++---
 arch/x86/crypto/cast5-avx-x86_64-asm_64.S  | 16 +++---
 arch/x86/crypto/cast6-avx-x86_64-asm_64.S  | 24 -
 arch/x86/crypto/chacha20-avx2-x86_64.S |  4 +-
 arch/x86/crypto/chacha20-ssse3-x86_64.S|  8 +--
 arch/x86/crypto/crc32-pclmul_asm.S |  4 +-
 arch/x86/crypto/crc32c-pcl-intel-asm_64.S  |  4 +-
 arch/x86/crypto/crct10dif-pcl-asm_64.S |  4 +-
 arch/x86/crypto/des3_ede-asm_64.S  |  8 +--
 arch/x86/crypto/ghash-clmulni-intel_asm.S  |  8 +--
 arch/x86/crypto/poly1305-avx2-x86_64.S |  4 +-
 arch/x86/crypto/poly1305-sse2-x86_64.S |  8 +--
 arch/x86/crypto/salsa20-x86_64-asm_64.S|  4 +-
 arch/x86/crypto/serpent-avx-x86_64-asm_64.S| 24 -
 arch/x86/crypto/serpent-avx2-asm_64.S  | 24 -
 arch/x86/crypto/serpent-sse2-x86_64-asm_64.S   |  8 +--
 arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S   |  8 +--
 arch/x86/crypto/sha1-mb/sha1_mb_mgr_submit_avx2.S  |  4 +-
 arch/x86/crypto/sha1-mb/sha1_x8_avx2.S |  4 +-
 arch/x86/crypto/sha1_avx2_x86_64_asm.S |  4 +-
 arch/x86/crypto/sha1_ni_asm.S  |  4 +-
 arch/x86/crypto/sha1_ssse3_asm.S   |  4 +-
 arch/x86/crypto/sha256-avx-asm.S   |  4 +-
 arch/x86/crypto/sha256-avx2-asm.S  |  4 +-
 .../crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S|  8 +--
 .../crypto/sha256-mb/sha256_mb_mgr_submit_avx2.S   |  4 +-
 arch/x86/crypto/sha256-mb/sha256_x8_avx2.S |  4 +-
 arch/x86/crypto/sha256-ssse3-asm.S |  4 +-
 arch/x86/crypto/sha256_ni_asm.S|  4 +-
 arch/x86/crypto/sha512-avx-asm.S   |  4 +-
 arch/x86/crypto/sha512-avx2-asm.S  |  4 +-
 .../crypto/sha512-mb/sha512_mb_mgr_flush_avx2.S|  8 +--
 .../crypto/sha512-mb/sha512_mb_mgr_submit_avx2.S   |  4 +-
 arch/x86/crypto/sha512-mb/sha512_x4_avx2.S |  4 +-
 arch/x86/crypto/sha512-ssse3-asm.S |  4 +-
 arch/x86/crypto/twofish-avx-x86_64-asm_64.S| 24 -
 arch/x86/crypto/twofish-x86_64-asm_64-3way.S   |  8 +--
 arch/x86/crypto/twofish-x86_64-asm_64.S|  8 +--
 arch/x86/entry/entry_64.S  | 10 ++--
 arch/x86/entry/entry_64_compat.S   |  4 +-
 arch/x86/kernel/acpi/wakeup_64.S   |  8 +--
 arch/x86/kernel/ftrace_64.S| 20 
 arch/x86/kernel/head_64.S  | 12 ++---
 arch/x86/lib/checksum_32.S |  8 +--
 arch/x86/lib/clear_page_64.S   | 12 ++---
 arch/x86/lib/cmpxchg16b_emu.S  |  4 +-
 arch/x86/lib/cmpxchg8b_emu.S   |  4 +-
 arch/x86/lib/copy_page_64.S|  4 +-
 arch/x86/lib/copy_user_64.S| 16 +++---
 arch/x86/lib/csum-copy_64.S|  4 +-
 arch/x86/lib/getuser.S | 16 +++---
 arch/x86/lib/hweight.S |  8 +--
 arch/x86/lib/iomap_copy_64.S   |  4 +-
 arch/x86/lib/memcpy_64.S   |  4 +-
 arch/x86/lib/memmove_64.S  |  4 +-
 arch/x86/lib/memset_64.S   |  4 +-
 arch/x86/lib/msr-reg.S  

[PATCH -resend 26/27] x86_32: assembly, change all ENTRY+ENDPROC to SYM_FUNC_*

2018-05-10 Thread Jiri Slaby
These are all functions which are invoked from elsewhere, so we annotate
them as global using the new SYM_FUNC_START (and their ENDPROC's by
SYM_FUNC_END.)

Now, we can finally force ENTRY/ENDPROC to be undefined on X86.

Signed-off-by: Jiri Slaby 
Cc: "H. Peter Anvin" 
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: x...@kernel.org
Cc: Herbert Xu 
Cc: "David S. Miller" 
Cc: Bill Metzenthen 
Cc: Matt Fleming 
Cc: Ard Biesheuvel 
Cc: linux-crypto@vger.kernel.org
Cc: linux-...@vger.kernel.org
---
 arch/x86/boot/compressed/efi_stub_32.S |  4 ++--
 arch/x86/boot/compressed/head_32.S | 12 +--
 arch/x86/crypto/salsa20-i586-asm_32.S  |  4 ++--
 arch/x86/crypto/serpent-sse2-i586-asm_32.S |  8 
 arch/x86/crypto/twofish-i586-asm_32.S  |  8 
 arch/x86/entry/entry_32.S  | 24 +++---
 arch/x86/kernel/head_32.S  | 16 +++
 arch/x86/lib/atomic64_386_32.S |  4 ++--
 arch/x86/lib/atomic64_cx8_32.S | 32 +++---
 arch/x86/lib/checksum_32.S |  8 
 arch/x86/math-emu/div_Xsig.S   |  4 ++--
 arch/x86/math-emu/div_small.S  |  4 ++--
 arch/x86/math-emu/mul_Xsig.S   | 12 +--
 arch/x86/math-emu/polynom_Xsig.S   |  4 ++--
 arch/x86/math-emu/reg_norm.S   |  8 
 arch/x86/math-emu/reg_round.S  |  4 ++--
 arch/x86/math-emu/reg_u_add.S  |  4 ++--
 arch/x86/math-emu/reg_u_div.S  |  4 ++--
 arch/x86/math-emu/reg_u_mul.S  |  4 ++--
 arch/x86/math-emu/reg_u_sub.S  |  4 ++--
 arch/x86/math-emu/round_Xsig.S |  8 
 arch/x86/math-emu/shr_Xsig.S   |  4 ++--
 arch/x86/math-emu/wm_shrx.S|  8 
 arch/x86/math-emu/wm_sqrt.S|  4 ++--
 arch/x86/platform/efi/efi_stub_32.S|  4 ++--
 include/linux/linkage.h|  8 +++-
 26 files changed, 103 insertions(+), 105 deletions(-)

diff --git a/arch/x86/boot/compressed/efi_stub_32.S 
b/arch/x86/boot/compressed/efi_stub_32.S
index 257e341fd2c8..ed6c351d34ed 100644
--- a/arch/x86/boot/compressed/efi_stub_32.S
+++ b/arch/x86/boot/compressed/efi_stub_32.S
@@ -24,7 +24,7 @@
  */
 
 .text
-ENTRY(efi_call_phys)
+SYM_FUNC_START(efi_call_phys)
/*
 * 0. The function can only be called in Linux kernel. So CS has been
 * set to 0x0010, DS and SS have been set to 0x0018. In EFI, I found
@@ -77,7 +77,7 @@ ENTRY(efi_call_phys)
movlsaved_return_addr(%edx), %ecx
pushl   %ecx
ret
-ENDPROC(efi_call_phys)
+SYM_FUNC_END(efi_call_phys)
 .previous
 
 .data
diff --git a/arch/x86/boot/compressed/head_32.S 
b/arch/x86/boot/compressed/head_32.S
index 7e8ab0bb6968..3fa36496af12 100644
--- a/arch/x86/boot/compressed/head_32.S
+++ b/arch/x86/boot/compressed/head_32.S
@@ -61,7 +61,7 @@
.hidden _egot
 
__HEAD
-ENTRY(startup_32)
+SYM_FUNC_START(startup_32)
cld
/*
 * Test KEEP_SEGMENTS flag to see if the bootloader is asking
@@ -142,14 +142,14 @@ ENTRY(startup_32)
  */
lealrelocated(%ebx), %eax
jmp *%eax
-ENDPROC(startup_32)
+SYM_FUNC_END(startup_32)
 
 #ifdef CONFIG_EFI_STUB
 /*
  * We don't need the return address, so set up the stack so efi_main() can find
  * its arguments.
  */
-ENTRY(efi_pe_entry)
+SYM_FUNC_START(efi_pe_entry)
add $0x4, %esp
 
call1f
@@ -174,9 +174,9 @@ ENTRY(efi_pe_entry)
pushl   %eax
pushl   %ecx
jmp 2f  /* Skip efi_config initialization */
-ENDPROC(efi_pe_entry)
+SYM_FUNC_END(efi_pe_entry)
 
-ENTRY(efi32_stub_entry)
+SYM_FUNC_START(efi32_stub_entry)
add $0x4, %esp
popl%ecx
popl%edx
@@ -205,7 +205,7 @@ fail:
movlBP_code32_start(%esi), %eax
lealstartup_32(%eax), %eax
jmp *%eax
-ENDPROC(efi32_stub_entry)
+SYM_FUNC_END(efi32_stub_entry)
 #endif
 
.text
diff --git a/arch/x86/crypto/salsa20-i586-asm_32.S 
b/arch/x86/crypto/salsa20-i586-asm_32.S
index 6014b7b9e52a..edeb4c3e7389 100644
--- a/arch/x86/crypto/salsa20-i586-asm_32.S
+++ b/arch/x86/crypto/salsa20-i586-asm_32.S
@@ -8,7 +8,7 @@
 .text
 
 # enter salsa20_encrypt_bytes
-ENTRY(salsa20_encrypt_bytes)
+SYM_FUNC_START(salsa20_encrypt_bytes)
mov %esp,%eax
and $31,%eax
add $256,%eax
@@ -935,4 +935,4 @@ ENTRY(salsa20_encrypt_bytes)
add $64,%esi
# goto bytesatleast1
jmp ._bytesatleast1
-ENDPROC(salsa20_encrypt_bytes)
+SYM_FUNC_END(salsa20_encrypt_bytes)
diff --git a/arch/x86/crypto/serpent-sse2-i586-asm_32.S 
b/arch/x86/crypto/serpent-sse2-i586-asm_32.S
index d348f1553a79..f3cebd3c6739 100644
--- a/arch/x86/crypto/serpent-sse2-i586-asm_32.S
+++ b/arch/x86/crypto/serpent-sse2-i586-asm_32.S
@@ -512,7 +512,7 @@
pxor t0, 

[PATCH -resend 06/27] x86: crypto, annotate local functions

2018-05-10 Thread Jiri Slaby
Use the newly added SYM_FUNC_START_LOCAL to annotate starts of all
functions which do not have ".globl" annotation, but their ends are
annotated by ENDPROC. This is needed to balance ENDPROC for tools that
are about to generate debuginfo.

To be symmetric, we also convert their ENDPROCs to the new SYM_FUNC_END.

Signed-off-by: Jiri Slaby 
Cc: Herbert Xu 
Cc: "David S. Miller" 
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: "H. Peter Anvin" 
Cc: 
Cc: 
---
 arch/x86/crypto/aesni-intel_asm.S| 49 
 arch/x86/crypto/camellia-aesni-avx-asm_64.S  | 20 ++--
 arch/x86/crypto/camellia-aesni-avx2-asm_64.S | 20 ++--
 arch/x86/crypto/cast5-avx-x86_64-asm_64.S|  8 ++---
 arch/x86/crypto/cast6-avx-x86_64-asm_64.S|  8 ++---
 arch/x86/crypto/ghash-clmulni-intel_asm.S|  4 +--
 arch/x86/crypto/serpent-avx-x86_64-asm_64.S  |  8 ++---
 arch/x86/crypto/serpent-avx2-asm_64.S|  8 ++---
 arch/x86/crypto/twofish-avx-x86_64-asm_64.S  |  8 ++---
 9 files changed, 62 insertions(+), 71 deletions(-)

diff --git a/arch/x86/crypto/aesni-intel_asm.S 
b/arch/x86/crypto/aesni-intel_asm.S
index e762ef417562..b482ac1a1fb3 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -1763,7 +1763,7 @@ ENDPROC(aesni_gcm_finalize)
 
 .align 4
 _key_expansion_128:
-_key_expansion_256a:
+SYM_FUNC_START_LOCAL(_key_expansion_256a)
pshufd $0b, %xmm1, %xmm1
shufps $0b0001, %xmm0, %xmm4
pxor %xmm4, %xmm0
@@ -1774,10 +1774,9 @@ _key_expansion_256a:
add $0x10, TKEYP
ret
 ENDPROC(_key_expansion_128)
-ENDPROC(_key_expansion_256a)
+SYM_FUNC_END(_key_expansion_256a)
 
-.align 4
-_key_expansion_192a:
+SYM_FUNC_START_LOCAL(_key_expansion_192a)
pshufd $0b01010101, %xmm1, %xmm1
shufps $0b0001, %xmm0, %xmm4
pxor %xmm4, %xmm0
@@ -1799,10 +1798,9 @@ _key_expansion_192a:
movaps %xmm1, 0x10(TKEYP)
add $0x20, TKEYP
ret
-ENDPROC(_key_expansion_192a)
+SYM_FUNC_END(_key_expansion_192a)
 
-.align 4
-_key_expansion_192b:
+SYM_FUNC_START_LOCAL(_key_expansion_192b)
pshufd $0b01010101, %xmm1, %xmm1
shufps $0b0001, %xmm0, %xmm4
pxor %xmm4, %xmm0
@@ -1819,10 +1817,9 @@ _key_expansion_192b:
movaps %xmm0, (TKEYP)
add $0x10, TKEYP
ret
-ENDPROC(_key_expansion_192b)
+SYM_FUNC_END(_key_expansion_192b)
 
-.align 4
-_key_expansion_256b:
+SYM_FUNC_START_LOCAL(_key_expansion_256b)
pshufd $0b10101010, %xmm1, %xmm1
shufps $0b0001, %xmm2, %xmm4
pxor %xmm4, %xmm2
@@ -1832,7 +1829,7 @@ _key_expansion_256b:
movaps %xmm2, (TKEYP)
add $0x10, TKEYP
ret
-ENDPROC(_key_expansion_256b)
+SYM_FUNC_END(_key_expansion_256b)
 
 /*
  * int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
@@ -1985,8 +1982,7 @@ ENDPROC(aesni_enc)
  * KEY
  * TKEYP (T1)
  */
-.align 4
-_aesni_enc1:
+SYM_FUNC_START_LOCAL(_aesni_enc1)
movaps (KEYP), KEY  # key
mov KEYP, TKEYP
pxor KEY, STATE # round 0
@@ -2029,7 +2025,7 @@ _aesni_enc1:
movaps 0x70(TKEYP), KEY
AESENCLAST KEY STATE
ret
-ENDPROC(_aesni_enc1)
+SYM_FUNC_END(_aesni_enc1)
 
 /*
  * _aesni_enc4:internal ABI
@@ -2049,8 +2045,7 @@ ENDPROC(_aesni_enc1)
  * KEY
  * TKEYP (T1)
  */
-.align 4
-_aesni_enc4:
+SYM_FUNC_START_LOCAL(_aesni_enc4)
movaps (KEYP), KEY  # key
mov KEYP, TKEYP
pxor KEY, STATE1# round 0
@@ -2138,7 +2133,7 @@ _aesni_enc4:
AESENCLAST KEY STATE3
AESENCLAST KEY STATE4
ret
-ENDPROC(_aesni_enc4)
+SYM_FUNC_END(_aesni_enc4)
 
 /*
  * void aesni_dec (struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
@@ -2177,8 +2172,7 @@ ENDPROC(aesni_dec)
  * KEY
  * TKEYP (T1)
  */
-.align 4
-_aesni_dec1:
+SYM_FUNC_START_LOCAL(_aesni_dec1)
movaps (KEYP), KEY  # key
mov KEYP, TKEYP
pxor KEY, STATE # round 0
@@ -2221,7 +2215,7 @@ _aesni_dec1:
movaps 0x70(TKEYP), KEY
AESDECLAST KEY STATE
ret
-ENDPROC(_aesni_dec1)
+SYM_FUNC_END(_aesni_dec1)
 
 /*
  * _aesni_dec4:internal ABI
@@ -2241,8 +2235,7 @@ ENDPROC(_aesni_dec1)
  * KEY
  * TKEYP (T1)
  */
-.align 4
-_aesni_dec4:
+SYM_FUNC_START_LOCAL(_aesni_dec4)
movaps (KEYP), KEY  # key
mov KEYP, TKEYP
pxor KEY, STATE1# round 0
@@ -2330,7 +2323,7 @@ _aesni_dec4:
AESDECLAST KEY STATE3
AESDECLAST KEY STATE4
ret
-ENDPROC(_aesni_dec4)
+SYM_FUNC_END(_aesni_dec4)
 
 /*
  * void aesni_ecb_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
@@ -2608,8 +2601,7 @@ ENDPROC(aesni_cbc_dec)
  * INC:== 1, in little endian
  * BSWAP_MASK == endian swapping mask
  */
-.align 4
-_aesni_inc_init:
+SYM_FUNC_START_LOCAL(_aesni_inc_init)
movaps .Lbswap_mask, BSW

[PATCH -resend 08/27] x86: assembly, annotate aliases

2018-05-10 Thread Jiri Slaby
_key_expansion_128 is an alias to _key_expansion_256a, __memcpy to
memcpy, xen_syscall32_target to xen_sysenter_target, and so on. Annotate
them all using the new SYM_FUNC_START_ALIAS, SYM_FUNC_START_LOCAL_ALIAS,
and SYM_FUNC_END_ALIAS. This will make the tools generating the
debuginfo happy.

Signed-off-by: Jiri Slaby 
Cc: Herbert Xu 
Cc: "David S. Miller" 
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: "H. Peter Anvin" 
Cc: 
Cc: Boris Ostrovsky 
Cc: Juergen Gross 
Reviewed-by: Juergen Gross  [xen parts]
Cc: 
Cc: 
---
 arch/x86/crypto/aesni-intel_asm.S | 5 ++---
 arch/x86/lib/memcpy_64.S  | 4 ++--
 arch/x86/lib/memmove_64.S | 4 ++--
 arch/x86/lib/memset_64.S  | 4 ++--
 arch/x86/xen/xen-asm_64.S | 4 ++--
 5 files changed, 10 insertions(+), 11 deletions(-)

diff --git a/arch/x86/crypto/aesni-intel_asm.S 
b/arch/x86/crypto/aesni-intel_asm.S
index b482ac1a1fb3..c85ecb163c78 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -1761,8 +1761,7 @@ ENDPROC(aesni_gcm_finalize)
 #endif
 
 
-.align 4
-_key_expansion_128:
+SYM_FUNC_START_LOCAL_ALIAS(_key_expansion_128)
 SYM_FUNC_START_LOCAL(_key_expansion_256a)
pshufd $0b, %xmm1, %xmm1
shufps $0b0001, %xmm0, %xmm4
@@ -1773,8 +1772,8 @@ SYM_FUNC_START_LOCAL(_key_expansion_256a)
movaps %xmm0, (TKEYP)
add $0x10, TKEYP
ret
-ENDPROC(_key_expansion_128)
 SYM_FUNC_END(_key_expansion_256a)
+SYM_FUNC_END_ALIAS(_key_expansion_128)
 
 SYM_FUNC_START_LOCAL(_key_expansion_192a)
pshufd $0b01010101, %xmm1, %xmm1
diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
index 9a53a06e5a3e..4911b1c61aa8 100644
--- a/arch/x86/lib/memcpy_64.S
+++ b/arch/x86/lib/memcpy_64.S
@@ -26,7 +26,7 @@
  * Output:
  * rax original destination
  */
-ENTRY(__memcpy)
+SYM_FUNC_START_ALIAS(__memcpy)
 ENTRY(memcpy)
ALTERNATIVE_2 "jmp memcpy_orig", "", X86_FEATURE_REP_GOOD, \
  "jmp memcpy_erms", X86_FEATURE_ERMS
@@ -40,7 +40,7 @@ ENTRY(memcpy)
rep movsb
ret
 ENDPROC(memcpy)
-ENDPROC(__memcpy)
+SYM_FUNC_END_ALIAS(__memcpy)
 EXPORT_SYMBOL(memcpy)
 EXPORT_SYMBOL(__memcpy)
 
diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
index bbec69d8223b..50c1648311b3 100644
--- a/arch/x86/lib/memmove_64.S
+++ b/arch/x86/lib/memmove_64.S
@@ -26,7 +26,7 @@
  */
 .weak memmove
 
-ENTRY(memmove)
+SYM_FUNC_START_ALIAS(memmove)
 ENTRY(__memmove)
 
/* Handle more 32 bytes in loop */
@@ -208,6 +208,6 @@ ENTRY(__memmove)
 13:
retq
 ENDPROC(__memmove)
-ENDPROC(memmove)
+SYM_FUNC_END_ALIAS(memmove)
 EXPORT_SYMBOL(__memmove)
 EXPORT_SYMBOL(memmove)
diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
index 9bc861c71e75..927ac44d34aa 100644
--- a/arch/x86/lib/memset_64.S
+++ b/arch/x86/lib/memset_64.S
@@ -19,7 +19,7 @@
  *
  * rax   original destination
  */
-ENTRY(memset)
+SYM_FUNC_START_ALIAS(memset)
 ENTRY(__memset)
/*
 * Some CPUs support enhanced REP MOVSB/STOSB feature. It is recommended
@@ -43,8 +43,8 @@ ENTRY(__memset)
rep stosb
movq %r9,%rax
ret
-ENDPROC(memset)
 ENDPROC(__memset)
+SYM_FUNC_END_ALIAS(memset)
 EXPORT_SYMBOL(memset)
 EXPORT_SYMBOL(__memset)
 
diff --git a/arch/x86/xen/xen-asm_64.S b/arch/x86/xen/xen-asm_64.S
index 417b339e5c8e..e8f6f482bb20 100644
--- a/arch/x86/xen/xen-asm_64.S
+++ b/arch/x86/xen/xen-asm_64.S
@@ -164,13 +164,13 @@ ENDPROC(xen_sysenter_target)
 
 #else /* !CONFIG_IA32_EMULATION */
 
-ENTRY(xen_syscall32_target)
+SYM_FUNC_START_ALIAS(xen_syscall32_target)
 ENTRY(xen_sysenter_target)
lea 16(%rsp), %rsp  /* strip %rcx, %r11 */
mov $-ENOSYS, %rax
pushq $0
jmp hypercall_iret
-ENDPROC(xen_syscall32_target)
 ENDPROC(xen_sysenter_target)
+SYM_FUNC_END_ALIAS(xen_syscall32_target)
 
 #endif /* CONFIG_IA32_EMULATION */
-- 
2.16.3



[PATCH v6 27/28] x86_32/asm: change all ENTRY+ENDPROC to SYM_FUNC_*

2018-05-18 Thread Jiri Slaby
These are all functions which are invoked from elsewhere, so we annotate
them as global using the new SYM_FUNC_START. And their ENDPROC's by
SYM_FUNC_END.

Now, we can finally force ENTRY/ENDPROC to be undefined on X86.

Signed-off-by: Jiri Slaby 
Cc: "H. Peter Anvin" 
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: x...@kernel.org
Cc: Herbert Xu 
Cc: "David S. Miller" 
Cc: Bill Metzenthen 
Cc: Matt Fleming 
Cc: Ard Biesheuvel 
Cc: linux-crypto@vger.kernel.org
Cc: linux-...@vger.kernel.org
---
 arch/x86/boot/compressed/efi_stub_32.S |  4 ++--
 arch/x86/boot/compressed/head_32.S | 12 +--
 arch/x86/crypto/salsa20-i586-asm_32.S  |  4 ++--
 arch/x86/crypto/serpent-sse2-i586-asm_32.S |  8 
 arch/x86/crypto/twofish-i586-asm_32.S  |  8 
 arch/x86/entry/entry_32.S  | 24 +++---
 arch/x86/kernel/head_32.S  | 16 +++
 arch/x86/lib/atomic64_386_32.S |  4 ++--
 arch/x86/lib/atomic64_cx8_32.S | 32 +++---
 arch/x86/lib/checksum_32.S |  8 
 arch/x86/math-emu/div_Xsig.S   |  4 ++--
 arch/x86/math-emu/div_small.S  |  4 ++--
 arch/x86/math-emu/mul_Xsig.S   | 12 +--
 arch/x86/math-emu/polynom_Xsig.S   |  4 ++--
 arch/x86/math-emu/reg_norm.S   |  8 
 arch/x86/math-emu/reg_round.S  |  4 ++--
 arch/x86/math-emu/reg_u_add.S  |  4 ++--
 arch/x86/math-emu/reg_u_div.S  |  4 ++--
 arch/x86/math-emu/reg_u_mul.S  |  4 ++--
 arch/x86/math-emu/reg_u_sub.S  |  4 ++--
 arch/x86/math-emu/round_Xsig.S |  8 
 arch/x86/math-emu/shr_Xsig.S   |  4 ++--
 arch/x86/math-emu/wm_shrx.S|  8 
 arch/x86/math-emu/wm_sqrt.S|  4 ++--
 arch/x86/platform/efi/efi_stub_32.S|  4 ++--
 include/linux/linkage.h|  8 +++-
 26 files changed, 103 insertions(+), 105 deletions(-)

diff --git a/arch/x86/boot/compressed/efi_stub_32.S 
b/arch/x86/boot/compressed/efi_stub_32.S
index 257e341fd2c8..ed6c351d34ed 100644
--- a/arch/x86/boot/compressed/efi_stub_32.S
+++ b/arch/x86/boot/compressed/efi_stub_32.S
@@ -24,7 +24,7 @@
  */
 
 .text
-ENTRY(efi_call_phys)
+SYM_FUNC_START(efi_call_phys)
/*
 * 0. The function can only be called in Linux kernel. So CS has been
 * set to 0x0010, DS and SS have been set to 0x0018. In EFI, I found
@@ -77,7 +77,7 @@ ENTRY(efi_call_phys)
movlsaved_return_addr(%edx), %ecx
pushl   %ecx
ret
-ENDPROC(efi_call_phys)
+SYM_FUNC_END(efi_call_phys)
 .previous
 
 .data
diff --git a/arch/x86/boot/compressed/head_32.S 
b/arch/x86/boot/compressed/head_32.S
index 7e8ab0bb6968..3fa36496af12 100644
--- a/arch/x86/boot/compressed/head_32.S
+++ b/arch/x86/boot/compressed/head_32.S
@@ -61,7 +61,7 @@
.hidden _egot
 
__HEAD
-ENTRY(startup_32)
+SYM_FUNC_START(startup_32)
cld
/*
 * Test KEEP_SEGMENTS flag to see if the bootloader is asking
@@ -142,14 +142,14 @@ ENTRY(startup_32)
  */
lealrelocated(%ebx), %eax
jmp *%eax
-ENDPROC(startup_32)
+SYM_FUNC_END(startup_32)
 
 #ifdef CONFIG_EFI_STUB
 /*
  * We don't need the return address, so set up the stack so efi_main() can find
  * its arguments.
  */
-ENTRY(efi_pe_entry)
+SYM_FUNC_START(efi_pe_entry)
add $0x4, %esp
 
call1f
@@ -174,9 +174,9 @@ ENTRY(efi_pe_entry)
pushl   %eax
pushl   %ecx
jmp 2f  /* Skip efi_config initialization */
-ENDPROC(efi_pe_entry)
+SYM_FUNC_END(efi_pe_entry)
 
-ENTRY(efi32_stub_entry)
+SYM_FUNC_START(efi32_stub_entry)
add $0x4, %esp
popl%ecx
popl%edx
@@ -205,7 +205,7 @@ fail:
movlBP_code32_start(%esi), %eax
lealstartup_32(%eax), %eax
jmp *%eax
-ENDPROC(efi32_stub_entry)
+SYM_FUNC_END(efi32_stub_entry)
 #endif
 
.text
diff --git a/arch/x86/crypto/salsa20-i586-asm_32.S 
b/arch/x86/crypto/salsa20-i586-asm_32.S
index 6014b7b9e52a..edeb4c3e7389 100644
--- a/arch/x86/crypto/salsa20-i586-asm_32.S
+++ b/arch/x86/crypto/salsa20-i586-asm_32.S
@@ -8,7 +8,7 @@
 .text
 
 # enter salsa20_encrypt_bytes
-ENTRY(salsa20_encrypt_bytes)
+SYM_FUNC_START(salsa20_encrypt_bytes)
mov %esp,%eax
and $31,%eax
add $256,%eax
@@ -935,4 +935,4 @@ ENTRY(salsa20_encrypt_bytes)
add $64,%esi
# goto bytesatleast1
jmp ._bytesatleast1
-ENDPROC(salsa20_encrypt_bytes)
+SYM_FUNC_END(salsa20_encrypt_bytes)
diff --git a/arch/x86/crypto/serpent-sse2-i586-asm_32.S 
b/arch/x86/crypto/serpent-sse2-i586-asm_32.S
index d348f1553a79..f3cebd3c6739 100644
--- a/arch/x86/crypto/serpent-sse2-i586-asm_32.S
+++ b/arch/x86/crypto/serpent-sse2-i586-asm_32.S
@@ -512,7 +512,7 @@
pxor t0, 

[PATCH v6 24/28] x86_64/asm: change all ENTRY+ENDPROC to SYM_FUNC_*

2018-05-18 Thread Jiri Slaby
These are all functions which are invoked from elsewhere, so we annotate
them as global using the new SYM_FUNC_START. And their ENDPROC's by
SYM_FUNC_END.

And make sure ENTRY/ENDPROC is not defined on X86_64, given these were
the last users.

Signed-off-by: Jiri Slaby 
Reviewed-by: Rafael J. Wysocki  [hibernate]
Reviewed-by: Boris Ostrovsky  [xen bits]
Cc: "H. Peter Anvin" 
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: x...@kernel.org
Cc: Herbert Xu 
Cc: "David S. Miller" 
Cc: "Rafael J. Wysocki" 
Cc: Len Brown 
Cc: Pavel Machek 
Cc: Matt Fleming 
Cc: Ard Biesheuvel 
Cc: Boris Ostrovsky 
Cc: Juergen Gross 
Cc: linux-crypto@vger.kernel.org
Cc: linux...@vger.kernel.org
Cc: linux-...@vger.kernel.org
Cc: xen-de...@lists.xenproject.org
---
 arch/x86/boot/compressed/efi_thunk_64.S|  4 +-
 arch/x86/boot/compressed/head_64.S | 16 +++---
 arch/x86/boot/compressed/mem_encrypt.S |  8 +--
 arch/x86/crypto/aes-i586-asm_32.S  |  8 +--
 arch/x86/crypto/aes-x86_64-asm_64.S|  4 +-
 arch/x86/crypto/aes_ctrby8_avx-x86_64.S| 12 ++---
 arch/x86/crypto/aesni-intel_asm.S  | 60 +++---
 arch/x86/crypto/aesni-intel_avx-x86_64.S   | 24 -
 arch/x86/crypto/blowfish-x86_64-asm_64.S   | 16 +++---
 arch/x86/crypto/camellia-aesni-avx-asm_64.S| 24 -
 arch/x86/crypto/camellia-aesni-avx2-asm_64.S   | 24 -
 arch/x86/crypto/camellia-x86_64-asm_64.S   | 16 +++---
 arch/x86/crypto/cast5-avx-x86_64-asm_64.S  | 16 +++---
 arch/x86/crypto/cast6-avx-x86_64-asm_64.S  | 24 -
 arch/x86/crypto/chacha20-avx2-x86_64.S |  4 +-
 arch/x86/crypto/chacha20-ssse3-x86_64.S|  8 +--
 arch/x86/crypto/crc32-pclmul_asm.S |  4 +-
 arch/x86/crypto/crc32c-pcl-intel-asm_64.S  |  4 +-
 arch/x86/crypto/crct10dif-pcl-asm_64.S |  4 +-
 arch/x86/crypto/des3_ede-asm_64.S  |  8 +--
 arch/x86/crypto/ghash-clmulni-intel_asm.S  |  8 +--
 arch/x86/crypto/poly1305-avx2-x86_64.S |  4 +-
 arch/x86/crypto/poly1305-sse2-x86_64.S |  8 +--
 arch/x86/crypto/salsa20-x86_64-asm_64.S|  4 +-
 arch/x86/crypto/serpent-avx-x86_64-asm_64.S| 24 -
 arch/x86/crypto/serpent-avx2-asm_64.S  | 24 -
 arch/x86/crypto/serpent-sse2-x86_64-asm_64.S   |  8 +--
 arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S   |  8 +--
 arch/x86/crypto/sha1-mb/sha1_mb_mgr_submit_avx2.S  |  4 +-
 arch/x86/crypto/sha1-mb/sha1_x8_avx2.S |  4 +-
 arch/x86/crypto/sha1_avx2_x86_64_asm.S |  4 +-
 arch/x86/crypto/sha1_ni_asm.S  |  4 +-
 arch/x86/crypto/sha1_ssse3_asm.S   |  4 +-
 arch/x86/crypto/sha256-avx-asm.S   |  4 +-
 arch/x86/crypto/sha256-avx2-asm.S  |  4 +-
 .../crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S|  8 +--
 .../crypto/sha256-mb/sha256_mb_mgr_submit_avx2.S   |  4 +-
 arch/x86/crypto/sha256-mb/sha256_x8_avx2.S |  4 +-
 arch/x86/crypto/sha256-ssse3-asm.S |  4 +-
 arch/x86/crypto/sha256_ni_asm.S|  4 +-
 arch/x86/crypto/sha512-avx-asm.S   |  4 +-
 arch/x86/crypto/sha512-avx2-asm.S  |  4 +-
 .../crypto/sha512-mb/sha512_mb_mgr_flush_avx2.S|  8 +--
 .../crypto/sha512-mb/sha512_mb_mgr_submit_avx2.S   |  4 +-
 arch/x86/crypto/sha512-mb/sha512_x4_avx2.S |  4 +-
 arch/x86/crypto/sha512-ssse3-asm.S |  4 +-
 arch/x86/crypto/twofish-avx-x86_64-asm_64.S| 24 -
 arch/x86/crypto/twofish-x86_64-asm_64-3way.S   |  8 +--
 arch/x86/crypto/twofish-x86_64-asm_64.S|  8 +--
 arch/x86/entry/entry_64.S  | 10 ++--
 arch/x86/entry/entry_64_compat.S   |  4 +-
 arch/x86/kernel/acpi/wakeup_64.S   |  8 +--
 arch/x86/kernel/ftrace_64.S| 20 
 arch/x86/kernel/head_64.S  | 12 ++---
 arch/x86/lib/checksum_32.S |  8 +--
 arch/x86/lib/clear_page_64.S   | 12 ++---
 arch/x86/lib/cmpxchg16b_emu.S  |  4 +-
 arch/x86/lib/cmpxchg8b_emu.S   |  4 +-
 arch/x86/lib/copy_page_64.S|  4 +-
 arch/x86/lib/copy_user_64.S| 16 +++---
 arch/x86/lib/csum-copy_64.S|  4 +-
 arch/x86/lib/getuser.S | 16 +++---
 arch/x86/lib/hweight.S |  8 +--
 arch/x86/lib/iomap_copy_64.S   |  4 +-
 arch/x86/lib/memcpy_64.S   |  4 +-
 arch/x86/lib/memmove_64.S  |  4 +-
 arch/x86/lib/memset_64.S   |  4 +-
 arch/x86/lib/msr-reg.S  

[PATCH v6 09/28] x86/asm: annotate aliases

2018-05-18 Thread Jiri Slaby
_key_expansion_128 is an alias to _key_expansion_256a, __memcpy to
memcpy, xen_syscall32_target to xen_sysenter_target, and so on. Annotate
them all using the new SYM_FUNC_START_ALIAS, SYM_FUNC_START_LOCAL_ALIAS,
and SYM_FUNC_END_ALIAS. This will make the tools generating the
debuginfo happy.

Signed-off-by: Jiri Slaby 
Cc: Herbert Xu 
Cc: "David S. Miller" 
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: "H. Peter Anvin" 
Cc: 
Cc: Boris Ostrovsky 
Cc: Juergen Gross 
Reviewed-by: Juergen Gross  [xen parts]
Cc: 
Cc: 
---
 arch/x86/crypto/aesni-intel_asm.S | 5 ++---
 arch/x86/lib/memcpy_64.S  | 4 ++--
 arch/x86/lib/memmove_64.S | 4 ++--
 arch/x86/lib/memset_64.S  | 4 ++--
 arch/x86/xen/xen-asm_64.S | 4 ++--
 5 files changed, 10 insertions(+), 11 deletions(-)

diff --git a/arch/x86/crypto/aesni-intel_asm.S 
b/arch/x86/crypto/aesni-intel_asm.S
index b482ac1a1fb3..c85ecb163c78 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -1761,8 +1761,7 @@ ENDPROC(aesni_gcm_finalize)
 #endif
 
 
-.align 4
-_key_expansion_128:
+SYM_FUNC_START_LOCAL_ALIAS(_key_expansion_128)
 SYM_FUNC_START_LOCAL(_key_expansion_256a)
pshufd $0b, %xmm1, %xmm1
shufps $0b0001, %xmm0, %xmm4
@@ -1773,8 +1772,8 @@ SYM_FUNC_START_LOCAL(_key_expansion_256a)
movaps %xmm0, (TKEYP)
add $0x10, TKEYP
ret
-ENDPROC(_key_expansion_128)
 SYM_FUNC_END(_key_expansion_256a)
+SYM_FUNC_END_ALIAS(_key_expansion_128)
 
 SYM_FUNC_START_LOCAL(_key_expansion_192a)
pshufd $0b01010101, %xmm1, %xmm1
diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
index 9a53a06e5a3e..4911b1c61aa8 100644
--- a/arch/x86/lib/memcpy_64.S
+++ b/arch/x86/lib/memcpy_64.S
@@ -26,7 +26,7 @@
  * Output:
  * rax original destination
  */
-ENTRY(__memcpy)
+SYM_FUNC_START_ALIAS(__memcpy)
 ENTRY(memcpy)
ALTERNATIVE_2 "jmp memcpy_orig", "", X86_FEATURE_REP_GOOD, \
  "jmp memcpy_erms", X86_FEATURE_ERMS
@@ -40,7 +40,7 @@ ENTRY(memcpy)
rep movsb
ret
 ENDPROC(memcpy)
-ENDPROC(__memcpy)
+SYM_FUNC_END_ALIAS(__memcpy)
 EXPORT_SYMBOL(memcpy)
 EXPORT_SYMBOL(__memcpy)
 
diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
index bbec69d8223b..50c1648311b3 100644
--- a/arch/x86/lib/memmove_64.S
+++ b/arch/x86/lib/memmove_64.S
@@ -26,7 +26,7 @@
  */
 .weak memmove
 
-ENTRY(memmove)
+SYM_FUNC_START_ALIAS(memmove)
 ENTRY(__memmove)
 
/* Handle more 32 bytes in loop */
@@ -208,6 +208,6 @@ ENTRY(__memmove)
 13:
retq
 ENDPROC(__memmove)
-ENDPROC(memmove)
+SYM_FUNC_END_ALIAS(memmove)
 EXPORT_SYMBOL(__memmove)
 EXPORT_SYMBOL(memmove)
diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
index 9bc861c71e75..927ac44d34aa 100644
--- a/arch/x86/lib/memset_64.S
+++ b/arch/x86/lib/memset_64.S
@@ -19,7 +19,7 @@
  *
  * rax   original destination
  */
-ENTRY(memset)
+SYM_FUNC_START_ALIAS(memset)
 ENTRY(__memset)
/*
 * Some CPUs support enhanced REP MOVSB/STOSB feature. It is recommended
@@ -43,8 +43,8 @@ ENTRY(__memset)
rep stosb
movq %r9,%rax
ret
-ENDPROC(memset)
 ENDPROC(__memset)
+SYM_FUNC_END_ALIAS(memset)
 EXPORT_SYMBOL(memset)
 EXPORT_SYMBOL(__memset)
 
diff --git a/arch/x86/xen/xen-asm_64.S b/arch/x86/xen/xen-asm_64.S
index 417b339e5c8e..e8f6f482bb20 100644
--- a/arch/x86/xen/xen-asm_64.S
+++ b/arch/x86/xen/xen-asm_64.S
@@ -164,13 +164,13 @@ ENDPROC(xen_sysenter_target)
 
 #else /* !CONFIG_IA32_EMULATION */
 
-ENTRY(xen_syscall32_target)
+SYM_FUNC_START_ALIAS(xen_syscall32_target)
 ENTRY(xen_sysenter_target)
lea 16(%rsp), %rsp  /* strip %rcx, %r11 */
mov $-ENOSYS, %rax
pushq $0
jmp hypercall_iret
-ENDPROC(xen_syscall32_target)
 ENDPROC(xen_sysenter_target)
+SYM_FUNC_END_ALIAS(xen_syscall32_target)
 
 #endif /* CONFIG_IA32_EMULATION */
-- 
2.16.3



[PATCH v6 07/28] x86/asm/crypto: annotate local functions

2018-05-18 Thread Jiri Slaby
Use the newly added SYM_FUNC_START_LOCAL to annotate starts of all
functions which do not have ".globl" annotation, but their ends are
annotated by ENDPROC. This is needed to balance ENDPROC for tools that
generate debuginfo.

To be symmetric, we also convert their ENDPROCs to the new SYM_FUNC_END.

Signed-off-by: Jiri Slaby 
Cc: Herbert Xu 
Cc: "David S. Miller" 
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: "H. Peter Anvin" 
Cc: 
Cc: 
---
 arch/x86/crypto/aesni-intel_asm.S| 49 
 arch/x86/crypto/camellia-aesni-avx-asm_64.S  | 20 ++--
 arch/x86/crypto/camellia-aesni-avx2-asm_64.S | 20 ++--
 arch/x86/crypto/cast5-avx-x86_64-asm_64.S|  8 ++---
 arch/x86/crypto/cast6-avx-x86_64-asm_64.S|  8 ++---
 arch/x86/crypto/ghash-clmulni-intel_asm.S|  4 +--
 arch/x86/crypto/serpent-avx-x86_64-asm_64.S  |  8 ++---
 arch/x86/crypto/serpent-avx2-asm_64.S|  8 ++---
 arch/x86/crypto/twofish-avx-x86_64-asm_64.S  |  8 ++---
 9 files changed, 62 insertions(+), 71 deletions(-)

diff --git a/arch/x86/crypto/aesni-intel_asm.S 
b/arch/x86/crypto/aesni-intel_asm.S
index e762ef417562..b482ac1a1fb3 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -1763,7 +1763,7 @@ ENDPROC(aesni_gcm_finalize)
 
 .align 4
 _key_expansion_128:
-_key_expansion_256a:
+SYM_FUNC_START_LOCAL(_key_expansion_256a)
pshufd $0b, %xmm1, %xmm1
shufps $0b0001, %xmm0, %xmm4
pxor %xmm4, %xmm0
@@ -1774,10 +1774,9 @@ _key_expansion_256a:
add $0x10, TKEYP
ret
 ENDPROC(_key_expansion_128)
-ENDPROC(_key_expansion_256a)
+SYM_FUNC_END(_key_expansion_256a)
 
-.align 4
-_key_expansion_192a:
+SYM_FUNC_START_LOCAL(_key_expansion_192a)
pshufd $0b01010101, %xmm1, %xmm1
shufps $0b0001, %xmm0, %xmm4
pxor %xmm4, %xmm0
@@ -1799,10 +1798,9 @@ _key_expansion_192a:
movaps %xmm1, 0x10(TKEYP)
add $0x20, TKEYP
ret
-ENDPROC(_key_expansion_192a)
+SYM_FUNC_END(_key_expansion_192a)
 
-.align 4
-_key_expansion_192b:
+SYM_FUNC_START_LOCAL(_key_expansion_192b)
pshufd $0b01010101, %xmm1, %xmm1
shufps $0b0001, %xmm0, %xmm4
pxor %xmm4, %xmm0
@@ -1819,10 +1817,9 @@ _key_expansion_192b:
movaps %xmm0, (TKEYP)
add $0x10, TKEYP
ret
-ENDPROC(_key_expansion_192b)
+SYM_FUNC_END(_key_expansion_192b)
 
-.align 4
-_key_expansion_256b:
+SYM_FUNC_START_LOCAL(_key_expansion_256b)
pshufd $0b10101010, %xmm1, %xmm1
shufps $0b0001, %xmm2, %xmm4
pxor %xmm4, %xmm2
@@ -1832,7 +1829,7 @@ _key_expansion_256b:
movaps %xmm2, (TKEYP)
add $0x10, TKEYP
ret
-ENDPROC(_key_expansion_256b)
+SYM_FUNC_END(_key_expansion_256b)
 
 /*
  * int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
@@ -1985,8 +1982,7 @@ ENDPROC(aesni_enc)
  * KEY
  * TKEYP (T1)
  */
-.align 4
-_aesni_enc1:
+SYM_FUNC_START_LOCAL(_aesni_enc1)
movaps (KEYP), KEY  # key
mov KEYP, TKEYP
pxor KEY, STATE # round 0
@@ -2029,7 +2025,7 @@ _aesni_enc1:
movaps 0x70(TKEYP), KEY
AESENCLAST KEY STATE
ret
-ENDPROC(_aesni_enc1)
+SYM_FUNC_END(_aesni_enc1)
 
 /*
  * _aesni_enc4:internal ABI
@@ -2049,8 +2045,7 @@ ENDPROC(_aesni_enc1)
  * KEY
  * TKEYP (T1)
  */
-.align 4
-_aesni_enc4:
+SYM_FUNC_START_LOCAL(_aesni_enc4)
movaps (KEYP), KEY  # key
mov KEYP, TKEYP
pxor KEY, STATE1# round 0
@@ -2138,7 +2133,7 @@ _aesni_enc4:
AESENCLAST KEY STATE3
AESENCLAST KEY STATE4
ret
-ENDPROC(_aesni_enc4)
+SYM_FUNC_END(_aesni_enc4)
 
 /*
  * void aesni_dec (struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
@@ -2177,8 +2172,7 @@ ENDPROC(aesni_dec)
  * KEY
  * TKEYP (T1)
  */
-.align 4
-_aesni_dec1:
+SYM_FUNC_START_LOCAL(_aesni_dec1)
movaps (KEYP), KEY  # key
mov KEYP, TKEYP
pxor KEY, STATE # round 0
@@ -2221,7 +2215,7 @@ _aesni_dec1:
movaps 0x70(TKEYP), KEY
AESDECLAST KEY STATE
ret
-ENDPROC(_aesni_dec1)
+SYM_FUNC_END(_aesni_dec1)
 
 /*
  * _aesni_dec4:internal ABI
@@ -2241,8 +2235,7 @@ ENDPROC(_aesni_dec1)
  * KEY
  * TKEYP (T1)
  */
-.align 4
-_aesni_dec4:
+SYM_FUNC_START_LOCAL(_aesni_dec4)
movaps (KEYP), KEY  # key
mov KEYP, TKEYP
pxor KEY, STATE1# round 0
@@ -2330,7 +2323,7 @@ _aesni_dec4:
AESDECLAST KEY STATE3
AESDECLAST KEY STATE4
ret
-ENDPROC(_aesni_dec4)
+SYM_FUNC_END(_aesni_dec4)
 
 /*
  * void aesni_ecb_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
@@ -2608,8 +2601,7 @@ ENDPROC(aesni_cbc_dec)
  * INC:== 1, in little endian
  * BSWAP_MASK == endian swapping mask
  */
-.align 4
-_aesni_inc_init:
+SYM_FUNC_START_LOCAL(_aesni_inc_init)
movaps .Lbswap_mask, BSW

[PATCH v8 24/28] x86_64/asm: change all ENTRY+ENDPROC to SYM_FUNC_*

2019-08-08 Thread Jiri Slaby
These are all functions which are invoked from elsewhere, so we annotate
them as global using the new SYM_FUNC_START. And their ENDPROC's by
SYM_FUNC_END.

And make sure ENTRY/ENDPROC is not defined on X86_64, given these were
the last users.

Signed-off-by: Jiri Slaby 
Reviewed-by: Rafael J. Wysocki  [hibernate]
Reviewed-by: Boris Ostrovsky  [xen bits]
Cc: "H. Peter Anvin" 
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: x...@kernel.org
Cc: Herbert Xu 
Cc: "David S. Miller" 
Cc: "Rafael J. Wysocki" 
Cc: Len Brown 
Cc: Pavel Machek 
Cc: Matt Fleming 
Cc: Ard Biesheuvel 
Cc: Boris Ostrovsky 
Cc: Juergen Gross 
Cc: linux-crypto@vger.kernel.org
Cc: linux...@vger.kernel.org
Cc: linux-...@vger.kernel.org
Cc: xen-de...@lists.xenproject.org
---
 arch/x86/boot/compressed/efi_thunk_64.S  |  4 +-
 arch/x86/boot/compressed/head_64.S   | 16 +++---
 arch/x86/boot/compressed/mem_encrypt.S   |  8 +--
 arch/x86/crypto/aegis128-aesni-asm.S | 28 -
 arch/x86/crypto/aes_ctrby8_avx-x86_64.S  | 12 ++--
 arch/x86/crypto/aesni-intel_asm.S| 60 ++--
 arch/x86/crypto/aesni-intel_avx-x86_64.S | 32 +--
 arch/x86/crypto/blowfish-x86_64-asm_64.S | 16 +++---
 arch/x86/crypto/camellia-aesni-avx-asm_64.S  | 24 
 arch/x86/crypto/camellia-aesni-avx2-asm_64.S | 24 
 arch/x86/crypto/camellia-x86_64-asm_64.S | 16 +++---
 arch/x86/crypto/cast5-avx-x86_64-asm_64.S| 16 +++---
 arch/x86/crypto/cast6-avx-x86_64-asm_64.S| 24 
 arch/x86/crypto/chacha-avx2-x86_64.S | 12 ++--
 arch/x86/crypto/chacha-avx512vl-x86_64.S | 12 ++--
 arch/x86/crypto/chacha-ssse3-x86_64.S| 12 ++--
 arch/x86/crypto/crc32-pclmul_asm.S   |  4 +-
 arch/x86/crypto/crc32c-pcl-intel-asm_64.S|  4 +-
 arch/x86/crypto/crct10dif-pcl-asm_64.S   |  4 +-
 arch/x86/crypto/des3_ede-asm_64.S|  8 +--
 arch/x86/crypto/ghash-clmulni-intel_asm.S|  8 +--
 arch/x86/crypto/nh-avx2-x86_64.S |  4 +-
 arch/x86/crypto/nh-sse2-x86_64.S |  4 +-
 arch/x86/crypto/poly1305-avx2-x86_64.S   |  4 +-
 arch/x86/crypto/poly1305-sse2-x86_64.S   |  8 +--
 arch/x86/crypto/serpent-avx-x86_64-asm_64.S  | 24 
 arch/x86/crypto/serpent-avx2-asm_64.S| 24 
 arch/x86/crypto/serpent-sse2-x86_64-asm_64.S |  8 +--
 arch/x86/crypto/sha1_avx2_x86_64_asm.S   |  4 +-
 arch/x86/crypto/sha1_ni_asm.S|  4 +-
 arch/x86/crypto/sha1_ssse3_asm.S |  4 +-
 arch/x86/crypto/sha256-avx-asm.S |  4 +-
 arch/x86/crypto/sha256-avx2-asm.S|  4 +-
 arch/x86/crypto/sha256-ssse3-asm.S   |  4 +-
 arch/x86/crypto/sha256_ni_asm.S  |  4 +-
 arch/x86/crypto/sha512-avx-asm.S |  4 +-
 arch/x86/crypto/sha512-avx2-asm.S|  4 +-
 arch/x86/crypto/sha512-ssse3-asm.S   |  4 +-
 arch/x86/crypto/twofish-avx-x86_64-asm_64.S  | 24 
 arch/x86/crypto/twofish-x86_64-asm_64-3way.S |  8 +--
 arch/x86/crypto/twofish-x86_64-asm_64.S  |  8 +--
 arch/x86/entry/entry_64.S| 10 ++--
 arch/x86/entry/entry_64_compat.S |  4 +-
 arch/x86/kernel/acpi/wakeup_64.S |  8 +--
 arch/x86/kernel/ftrace_64.S  | 20 +++
 arch/x86/kernel/irqflags.S   |  8 +--
 arch/x86/kvm/vmx/vmenter.S   | 12 ++--
 arch/x86/lib/checksum_32.S   |  8 +--
 arch/x86/lib/clear_page_64.S | 12 ++--
 arch/x86/lib/cmpxchg16b_emu.S|  4 +-
 arch/x86/lib/cmpxchg8b_emu.S |  4 +-
 arch/x86/lib/copy_page_64.S  |  4 +-
 arch/x86/lib/copy_user_64.S  | 16 +++---
 arch/x86/lib/csum-copy_64.S  |  4 +-
 arch/x86/lib/getuser.S   | 16 +++---
 arch/x86/lib/hweight.S   |  8 +--
 arch/x86/lib/iomap_copy_64.S |  4 +-
 arch/x86/lib/memcpy_64.S |  4 +-
 arch/x86/lib/memmove_64.S|  4 +-
 arch/x86/lib/memset_64.S |  4 +-
 arch/x86/lib/msr-reg.S   |  8 +--
 arch/x86/lib/putuser.S   | 16 +++---
 arch/x86/lib/retpoline.S |  4 +-
 arch/x86/mm/mem_encrypt_boot.S   |  8 +--
 arch/x86/platform/efi/efi_stub_64.S  |  4 +-
 arch/x86/platform/efi/efi_thunk_64.S |  4 +-
 arch/x86/power/hibernate_asm_64.S|  8 +--
 arch/x86/xen/xen-asm.S   | 28 -
 arch/x86/xen/xen-asm_64.S| 16 +++---
 include/linux/linkage.h  |  4 ++
 70 files changed, 379 insertions(+), 375 deletions(-)

diff --git a/arch/x86/boot/compressed/efi_thunk_64.S 
b/arch/x86/boot/compressed/efi_thunk_64.S
index 31312070db22..593913692d16 100644
--- a/arch/x86/boot/compressed/efi_thunk_64.S
+++ b/arch/x86/boot/compressed/efi_thunk_64.S
@@ -23,7 +23,7

[PATCH v8 27/28] x86_32/asm: change all ENTRY+ENDPROC to SYM_FUNC_*

2019-08-08 Thread Jiri Slaby
These are all functions which are invoked from elsewhere, so we annotate
them as global using the new SYM_FUNC_START. And their ENDPROC's by
SYM_FUNC_END.

Now, we can finally force ENTRY/ENDPROC to be undefined on X86.

Signed-off-by: Jiri Slaby 
Cc: "H. Peter Anvin" 
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: x...@kernel.org
Cc: Herbert Xu 
Cc: "David S. Miller" 
Cc: Bill Metzenthen 
Cc: Matt Fleming 
Cc: Ard Biesheuvel 
Cc: linux-crypto@vger.kernel.org
Cc: linux-...@vger.kernel.org
---
 arch/x86/boot/compressed/efi_stub_32.S |  4 +--
 arch/x86/boot/compressed/head_32.S | 12 
 arch/x86/crypto/serpent-sse2-i586-asm_32.S |  8 +++---
 arch/x86/crypto/twofish-i586-asm_32.S  |  8 +++---
 arch/x86/entry/entry_32.S  | 24 
 arch/x86/kernel/head_32.S  | 16 +--
 arch/x86/lib/atomic64_386_32.S |  4 +--
 arch/x86/lib/atomic64_cx8_32.S | 32 +++---
 arch/x86/lib/checksum_32.S |  8 +++---
 arch/x86/math-emu/div_Xsig.S   |  4 +--
 arch/x86/math-emu/div_small.S  |  4 +--
 arch/x86/math-emu/mul_Xsig.S   | 12 
 arch/x86/math-emu/polynom_Xsig.S   |  4 +--
 arch/x86/math-emu/reg_norm.S   |  8 +++---
 arch/x86/math-emu/reg_round.S  |  4 +--
 arch/x86/math-emu/reg_u_add.S  |  4 +--
 arch/x86/math-emu/reg_u_div.S  |  4 +--
 arch/x86/math-emu/reg_u_mul.S  |  4 +--
 arch/x86/math-emu/reg_u_sub.S  |  4 +--
 arch/x86/math-emu/round_Xsig.S |  8 +++---
 arch/x86/math-emu/shr_Xsig.S   |  4 +--
 arch/x86/math-emu/wm_shrx.S|  8 +++---
 arch/x86/math-emu/wm_sqrt.S|  4 +--
 arch/x86/platform/efi/efi_stub_32.S|  4 +--
 arch/x86/power/hibernate_asm_32.S  |  8 +++---
 include/linux/linkage.h|  8 ++
 26 files changed, 104 insertions(+), 108 deletions(-)

diff --git a/arch/x86/boot/compressed/efi_stub_32.S 
b/arch/x86/boot/compressed/efi_stub_32.S
index 257e341fd2c8..ed6c351d34ed 100644
--- a/arch/x86/boot/compressed/efi_stub_32.S
+++ b/arch/x86/boot/compressed/efi_stub_32.S
@@ -24,7 +24,7 @@
  */
 
 .text
-ENTRY(efi_call_phys)
+SYM_FUNC_START(efi_call_phys)
/*
 * 0. The function can only be called in Linux kernel. So CS has been
 * set to 0x0010, DS and SS have been set to 0x0018. In EFI, I found
@@ -77,7 +77,7 @@ ENTRY(efi_call_phys)
movlsaved_return_addr(%edx), %ecx
pushl   %ecx
ret
-ENDPROC(efi_call_phys)
+SYM_FUNC_END(efi_call_phys)
 .previous
 
 .data
diff --git a/arch/x86/boot/compressed/head_32.S 
b/arch/x86/boot/compressed/head_32.S
index 7e8ab0bb6968..3fa36496af12 100644
--- a/arch/x86/boot/compressed/head_32.S
+++ b/arch/x86/boot/compressed/head_32.S
@@ -61,7 +61,7 @@
.hidden _egot
 
__HEAD
-ENTRY(startup_32)
+SYM_FUNC_START(startup_32)
cld
/*
 * Test KEEP_SEGMENTS flag to see if the bootloader is asking
@@ -142,14 +142,14 @@ ENTRY(startup_32)
  */
lealrelocated(%ebx), %eax
jmp *%eax
-ENDPROC(startup_32)
+SYM_FUNC_END(startup_32)
 
 #ifdef CONFIG_EFI_STUB
 /*
  * We don't need the return address, so set up the stack so efi_main() can find
  * its arguments.
  */
-ENTRY(efi_pe_entry)
+SYM_FUNC_START(efi_pe_entry)
add $0x4, %esp
 
call1f
@@ -174,9 +174,9 @@ ENTRY(efi_pe_entry)
pushl   %eax
pushl   %ecx
jmp 2f  /* Skip efi_config initialization */
-ENDPROC(efi_pe_entry)
+SYM_FUNC_END(efi_pe_entry)
 
-ENTRY(efi32_stub_entry)
+SYM_FUNC_START(efi32_stub_entry)
add $0x4, %esp
popl%ecx
popl%edx
@@ -205,7 +205,7 @@ fail:
movlBP_code32_start(%esi), %eax
lealstartup_32(%eax), %eax
jmp *%eax
-ENDPROC(efi32_stub_entry)
+SYM_FUNC_END(efi32_stub_entry)
 #endif
 
.text
diff --git a/arch/x86/crypto/serpent-sse2-i586-asm_32.S 
b/arch/x86/crypto/serpent-sse2-i586-asm_32.S
index e5c4a4690ca9..6379b99cb722 100644
--- a/arch/x86/crypto/serpent-sse2-i586-asm_32.S
+++ b/arch/x86/crypto/serpent-sse2-i586-asm_32.S
@@ -497,7 +497,7 @@
pxor t0,x3; \
movdqu x3,  (3*4*4)(out);
 
-ENTRY(__serpent_enc_blk_4way)
+SYM_FUNC_START(__serpent_enc_blk_4way)
/* input:
 *  arg_ctx(%esp): ctx, CTX
 *  arg_dst(%esp): dst
@@ -559,9 +559,9 @@ ENTRY(__serpent_enc_blk_4way)
xor_blocks(%eax, RA, RB, RC, RD, RT0, RT1, RE);
 
ret;
-ENDPROC(__serpent_enc_blk_4way)
+SYM_FUNC_END(__serpent_enc_blk_4way)
 
-ENTRY(serpent_dec_blk_4way)
+SYM_FUNC_START(serpent_dec_blk_4way)
/* input:
 *  arg_ctx(%esp): ctx, CTX
 *  arg_dst(%esp): dst
@@ -613,4 +613,4 @@ ENTRY(serpent_dec_blk_4way)
write_blocks(%eax, RC, RD, RB, RE, RT0, RT1

[PATCH v8 09/28] x86/asm: annotate aliases

2019-08-08 Thread Jiri Slaby
_key_expansion_128 is an alias to _key_expansion_256a, __memcpy to
memcpy, xen_syscall32_target to xen_sysenter_target, and so on. Annotate
them all using the new SYM_FUNC_START_ALIAS, SYM_FUNC_START_LOCAL_ALIAS,
and SYM_FUNC_END_ALIAS. This will make the tools generating the
debuginfo happy.

Signed-off-by: Jiri Slaby 
Cc: Herbert Xu 
Cc: "David S. Miller" 
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: "H. Peter Anvin" 
Cc: 
Cc: Boris Ostrovsky 
Cc: Juergen Gross 
Reviewed-by: Juergen Gross  [xen parts]
Cc: 
Cc: 
---
 arch/x86/crypto/aesni-intel_asm.S | 5 ++---
 arch/x86/lib/memcpy_64.S  | 4 ++--
 arch/x86/lib/memmove_64.S | 4 ++--
 arch/x86/lib/memset_64.S  | 4 ++--
 arch/x86/xen/xen-asm_64.S | 4 ++--
 5 files changed, 10 insertions(+), 11 deletions(-)

diff --git a/arch/x86/crypto/aesni-intel_asm.S 
b/arch/x86/crypto/aesni-intel_asm.S
index e0a5fb462a0a..9d8d5f2296e1 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -1757,8 +1757,7 @@ ENDPROC(aesni_gcm_finalize)
 #endif
 
 
-.align 4
-_key_expansion_128:
+SYM_FUNC_START_LOCAL_ALIAS(_key_expansion_128)
 SYM_FUNC_START_LOCAL(_key_expansion_256a)
pshufd $0b, %xmm1, %xmm1
shufps $0b0001, %xmm0, %xmm4
@@ -1769,8 +1768,8 @@ SYM_FUNC_START_LOCAL(_key_expansion_256a)
movaps %xmm0, (TKEYP)
add $0x10, TKEYP
ret
-ENDPROC(_key_expansion_128)
 SYM_FUNC_END(_key_expansion_256a)
+SYM_FUNC_END_ALIAS(_key_expansion_128)
 
 SYM_FUNC_START_LOCAL(_key_expansion_192a)
pshufd $0b01010101, %xmm1, %xmm1
diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
index 92748660ba51..57a64266ba69 100644
--- a/arch/x86/lib/memcpy_64.S
+++ b/arch/x86/lib/memcpy_64.S
@@ -28,7 +28,7 @@
  * Output:
  * rax original destination
  */
-ENTRY(__memcpy)
+SYM_FUNC_START_ALIAS(__memcpy)
 ENTRY(memcpy)
ALTERNATIVE_2 "jmp memcpy_orig", "", X86_FEATURE_REP_GOOD, \
  "jmp memcpy_erms", X86_FEATURE_ERMS
@@ -42,7 +42,7 @@ ENTRY(memcpy)
rep movsb
ret
 ENDPROC(memcpy)
-ENDPROC(__memcpy)
+SYM_FUNC_END_ALIAS(__memcpy)
 EXPORT_SYMBOL(memcpy)
 EXPORT_SYMBOL(__memcpy)
 
diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
index bbec69d8223b..50c1648311b3 100644
--- a/arch/x86/lib/memmove_64.S
+++ b/arch/x86/lib/memmove_64.S
@@ -26,7 +26,7 @@
  */
 .weak memmove
 
-ENTRY(memmove)
+SYM_FUNC_START_ALIAS(memmove)
 ENTRY(__memmove)
 
/* Handle more 32 bytes in loop */
@@ -208,6 +208,6 @@ ENTRY(__memmove)
 13:
retq
 ENDPROC(__memmove)
-ENDPROC(memmove)
+SYM_FUNC_END_ALIAS(memmove)
 EXPORT_SYMBOL(__memmove)
 EXPORT_SYMBOL(memmove)
diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
index 9bc861c71e75..927ac44d34aa 100644
--- a/arch/x86/lib/memset_64.S
+++ b/arch/x86/lib/memset_64.S
@@ -19,7 +19,7 @@
  *
  * rax   original destination
  */
-ENTRY(memset)
+SYM_FUNC_START_ALIAS(memset)
 ENTRY(__memset)
/*
 * Some CPUs support enhanced REP MOVSB/STOSB feature. It is recommended
@@ -43,8 +43,8 @@ ENTRY(__memset)
rep stosb
movq %r9,%rax
ret
-ENDPROC(memset)
 ENDPROC(__memset)
+SYM_FUNC_END_ALIAS(memset)
 EXPORT_SYMBOL(memset)
 EXPORT_SYMBOL(__memset)
 
diff --git a/arch/x86/xen/xen-asm_64.S b/arch/x86/xen/xen-asm_64.S
index ebf610b49c06..45c1249f370d 100644
--- a/arch/x86/xen/xen-asm_64.S
+++ b/arch/x86/xen/xen-asm_64.S
@@ -167,13 +167,13 @@ ENDPROC(xen_sysenter_target)
 
 #else /* !CONFIG_IA32_EMULATION */
 
-ENTRY(xen_syscall32_target)
+SYM_FUNC_START_ALIAS(xen_syscall32_target)
 ENTRY(xen_sysenter_target)
lea 16(%rsp), %rsp  /* strip %rcx, %r11 */
mov $-ENOSYS, %rax
pushq $0
jmp hypercall_iret
-ENDPROC(xen_syscall32_target)
 ENDPROC(xen_sysenter_target)
+SYM_FUNC_END_ALIAS(xen_syscall32_target)
 
 #endif /* CONFIG_IA32_EMULATION */
-- 
2.22.0



[PATCH v8 06/28] x86/asm/crypto: annotate local functions

2019-08-08 Thread Jiri Slaby
Use the newly added SYM_FUNC_START_LOCAL to annotate starts of all
functions which do not have ".globl" annotation, but their ends are
annotated by ENDPROC. This is needed to balance ENDPROC for tools that
generate debuginfo.

To be symmetric, we also convert their ENDPROCs to the new SYM_FUNC_END.

Signed-off-by: Jiri Slaby 
Cc: Herbert Xu 
Cc: "David S. Miller" 
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: "H. Peter Anvin" 
Cc: 
Cc: 
---
 arch/x86/crypto/aegis128-aesni-asm.S |  8 ++--
 arch/x86/crypto/aesni-intel_asm.S| 49 
 arch/x86/crypto/camellia-aesni-avx-asm_64.S  | 20 
 arch/x86/crypto/camellia-aesni-avx2-asm_64.S | 20 
 arch/x86/crypto/cast5-avx-x86_64-asm_64.S|  8 ++--
 arch/x86/crypto/cast6-avx-x86_64-asm_64.S|  8 ++--
 arch/x86/crypto/chacha-ssse3-x86_64.S|  4 +-
 arch/x86/crypto/ghash-clmulni-intel_asm.S|  4 +-
 arch/x86/crypto/serpent-avx-x86_64-asm_64.S  |  8 ++--
 arch/x86/crypto/serpent-avx2-asm_64.S|  8 ++--
 arch/x86/crypto/twofish-avx-x86_64-asm_64.S  |  8 ++--
 11 files changed, 68 insertions(+), 77 deletions(-)

diff --git a/arch/x86/crypto/aegis128-aesni-asm.S 
b/arch/x86/crypto/aegis128-aesni-asm.S
index 4434607e366d..b7026fdef4ff 100644
--- a/arch/x86/crypto/aegis128-aesni-asm.S
+++ b/arch/x86/crypto/aegis128-aesni-asm.S
@@ -71,7 +71,7 @@
  *   %r8
  *   %r9
  */
-__load_partial:
+SYM_FUNC_START_LOCAL(__load_partial)
xor %r9d, %r9d
pxor MSG, MSG
 
@@ -123,7 +123,7 @@ __load_partial:
 
 .Lld_partial_8:
ret
-ENDPROC(__load_partial)
+SYM_FUNC_END(__load_partial)
 
 /*
  * __store_partial: internal ABI
@@ -137,7 +137,7 @@ ENDPROC(__load_partial)
  *   %r9
  *   %r10
  */
-__store_partial:
+SYM_FUNC_START_LOCAL(__store_partial)
mov LEN, %r8
mov DST, %r9
 
@@ -181,7 +181,7 @@ __store_partial:
 
 .Lst_partial_1:
ret
-ENDPROC(__store_partial)
+SYM_FUNC_END(__store_partial)
 
 /*
  * void crypto_aegis128_aesni_init(void *state, const void *key, const void 
*iv);
diff --git a/arch/x86/crypto/aesni-intel_asm.S 
b/arch/x86/crypto/aesni-intel_asm.S
index e40bdf024ba7..e0a5fb462a0a 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -1759,7 +1759,7 @@ ENDPROC(aesni_gcm_finalize)
 
 .align 4
 _key_expansion_128:
-_key_expansion_256a:
+SYM_FUNC_START_LOCAL(_key_expansion_256a)
pshufd $0b, %xmm1, %xmm1
shufps $0b0001, %xmm0, %xmm4
pxor %xmm4, %xmm0
@@ -1770,10 +1770,9 @@ _key_expansion_256a:
add $0x10, TKEYP
ret
 ENDPROC(_key_expansion_128)
-ENDPROC(_key_expansion_256a)
+SYM_FUNC_END(_key_expansion_256a)
 
-.align 4
-_key_expansion_192a:
+SYM_FUNC_START_LOCAL(_key_expansion_192a)
pshufd $0b01010101, %xmm1, %xmm1
shufps $0b0001, %xmm0, %xmm4
pxor %xmm4, %xmm0
@@ -1795,10 +1794,9 @@ _key_expansion_192a:
movaps %xmm1, 0x10(TKEYP)
add $0x20, TKEYP
ret
-ENDPROC(_key_expansion_192a)
+SYM_FUNC_END(_key_expansion_192a)
 
-.align 4
-_key_expansion_192b:
+SYM_FUNC_START_LOCAL(_key_expansion_192b)
pshufd $0b01010101, %xmm1, %xmm1
shufps $0b0001, %xmm0, %xmm4
pxor %xmm4, %xmm0
@@ -1815,10 +1813,9 @@ _key_expansion_192b:
movaps %xmm0, (TKEYP)
add $0x10, TKEYP
ret
-ENDPROC(_key_expansion_192b)
+SYM_FUNC_END(_key_expansion_192b)
 
-.align 4
-_key_expansion_256b:
+SYM_FUNC_START_LOCAL(_key_expansion_256b)
pshufd $0b10101010, %xmm1, %xmm1
shufps $0b0001, %xmm2, %xmm4
pxor %xmm4, %xmm2
@@ -1828,7 +1825,7 @@ _key_expansion_256b:
movaps %xmm2, (TKEYP)
add $0x10, TKEYP
ret
-ENDPROC(_key_expansion_256b)
+SYM_FUNC_END(_key_expansion_256b)
 
 /*
  * int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
@@ -1981,8 +1978,7 @@ ENDPROC(aesni_enc)
  * KEY
  * TKEYP (T1)
  */
-.align 4
-_aesni_enc1:
+SYM_FUNC_START_LOCAL(_aesni_enc1)
movaps (KEYP), KEY  # key
mov KEYP, TKEYP
pxor KEY, STATE # round 0
@@ -2025,7 +2021,7 @@ _aesni_enc1:
movaps 0x70(TKEYP), KEY
AESENCLAST KEY STATE
ret
-ENDPROC(_aesni_enc1)
+SYM_FUNC_END(_aesni_enc1)
 
 /*
  * _aesni_enc4:internal ABI
@@ -2045,8 +2041,7 @@ ENDPROC(_aesni_enc1)
  * KEY
  * TKEYP (T1)
  */
-.align 4
-_aesni_enc4:
+SYM_FUNC_START_LOCAL(_aesni_enc4)
movaps (KEYP), KEY  # key
mov KEYP, TKEYP
pxor KEY, STATE1# round 0
@@ -2134,7 +2129,7 @@ _aesni_enc4:
AESENCLAST KEY STATE3
AESENCLAST KEY STATE4
ret
-ENDPROC(_aesni_enc4)
+SYM_FUNC_END(_aesni_enc4)
 
 /*
  * void aesni_dec (struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
@@ -2173,8 +2168,7 @@ ENDPROC(aesni_dec)
  * KEY
  * TKEYP (T1)
  */
-.align 4
-_aesni_dec1:
+SYM_FUNC_START_LOCAL(_aesni_dec1)
movaps (KEYP), KEY  # key
mo

[PATCH v7 07/28] x86/asm/crypto: annotate local functions

2019-01-30 Thread Jiri Slaby
Use the newly added SYM_FUNC_START_LOCAL to annotate starts of all
functions which do not have ".globl" annotation, but their ends are
annotated by ENDPROC. This is needed to balance ENDPROC for tools that
generate debuginfo.

To be symmetric, we also convert their ENDPROCs to the new SYM_FUNC_END.

Signed-off-by: Jiri Slaby 
Cc: Herbert Xu 
Cc: "David S. Miller" 
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: "H. Peter Anvin" 
Cc: 
Cc: 
---
 arch/x86/crypto/aegis128-aesni-asm.S |  8 ++--
 arch/x86/crypto/aegis128l-aesni-asm.S|  8 ++--
 arch/x86/crypto/aegis256-aesni-asm.S |  8 ++--
 arch/x86/crypto/aesni-intel_asm.S| 49 
 arch/x86/crypto/camellia-aesni-avx-asm_64.S  | 20 
 arch/x86/crypto/camellia-aesni-avx2-asm_64.S | 20 
 arch/x86/crypto/cast5-avx-x86_64-asm_64.S|  8 ++--
 arch/x86/crypto/cast6-avx-x86_64-asm_64.S|  8 ++--
 arch/x86/crypto/chacha-ssse3-x86_64.S|  4 +-
 arch/x86/crypto/ghash-clmulni-intel_asm.S|  4 +-
 arch/x86/crypto/morus1280-avx2-asm.S | 16 +++
 arch/x86/crypto/morus1280-sse2-asm.S | 16 +++
 arch/x86/crypto/morus640-sse2-asm.S  | 16 +++
 arch/x86/crypto/serpent-avx-x86_64-asm_64.S  |  8 ++--
 arch/x86/crypto/serpent-avx2-asm_64.S|  8 ++--
 arch/x86/crypto/twofish-avx-x86_64-asm_64.S  |  8 ++--
 16 files changed, 100 insertions(+), 109 deletions(-)

diff --git a/arch/x86/crypto/aegis128-aesni-asm.S 
b/arch/x86/crypto/aegis128-aesni-asm.S
index 5f7e43d4f64a..87b94664296a 100644
--- a/arch/x86/crypto/aegis128-aesni-asm.S
+++ b/arch/x86/crypto/aegis128-aesni-asm.S
@@ -74,7 +74,7 @@
  *   %r8
  *   %r9
  */
-__load_partial:
+SYM_FUNC_START_LOCAL(__load_partial)
xor %r9d, %r9d
pxor MSG, MSG
 
@@ -126,7 +126,7 @@ __load_partial:
 
 .Lld_partial_8:
ret
-ENDPROC(__load_partial)
+SYM_FUNC_END(__load_partial)
 
 /*
  * __store_partial: internal ABI
@@ -140,7 +140,7 @@ ENDPROC(__load_partial)
  *   %r9
  *   %r10
  */
-__store_partial:
+SYM_FUNC_START_LOCAL(__store_partial)
mov LEN, %r8
mov DST, %r9
 
@@ -184,7 +184,7 @@ __store_partial:
 
 .Lst_partial_1:
ret
-ENDPROC(__store_partial)
+SYM_FUNC_END(__store_partial)
 
 /*
  * void crypto_aegis128_aesni_init(void *state, const void *key, const void 
*iv);
diff --git a/arch/x86/crypto/aegis128l-aesni-asm.S 
b/arch/x86/crypto/aegis128l-aesni-asm.S
index 491dd61c845c..9f79a2c6752a 100644
--- a/arch/x86/crypto/aegis128l-aesni-asm.S
+++ b/arch/x86/crypto/aegis128l-aesni-asm.S
@@ -65,7 +65,7 @@
  *   %r8
  *   %r9
  */
-__load_partial:
+SYM_FUNC_START_LOCAL(__load_partial)
xor %r9d, %r9d
pxor MSG0, MSG0
pxor MSG1, MSG1
@@ -126,7 +126,7 @@ __load_partial:
 
 .Lld_partial_16:
ret
-ENDPROC(__load_partial)
+SYM_FUNC_END(__load_partial)
 
 /*
  * __store_partial: internal ABI
@@ -141,7 +141,7 @@ ENDPROC(__load_partial)
  *   %r9
  *   %r10
  */
-__store_partial:
+SYM_FUNC_START_LOCAL(__store_partial)
mov LEN, %r8
mov DST, %r9
 
@@ -195,7 +195,7 @@ __store_partial:
 
 .Lst_partial_1:
ret
-ENDPROC(__store_partial)
+SYM_FUNC_END(__store_partial)
 
 .macro update
movdqa STATE7, T0
diff --git a/arch/x86/crypto/aegis256-aesni-asm.S 
b/arch/x86/crypto/aegis256-aesni-asm.S
index 8870c7c5d9a4..e974a876c627 100644
--- a/arch/x86/crypto/aegis256-aesni-asm.S
+++ b/arch/x86/crypto/aegis256-aesni-asm.S
@@ -58,7 +58,7 @@
  *   %r8
  *   %r9
  */
-__load_partial:
+SYM_FUNC_START_LOCAL(__load_partial)
xor %r9d, %r9d
pxor MSG, MSG
 
@@ -110,7 +110,7 @@ __load_partial:
 
 .Lld_partial_8:
ret
-ENDPROC(__load_partial)
+SYM_FUNC_END(__load_partial)
 
 /*
  * __store_partial: internal ABI
@@ -124,7 +124,7 @@ ENDPROC(__load_partial)
  *   %r9
  *   %r10
  */
-__store_partial:
+SYM_FUNC_START_LOCAL(__store_partial)
mov LEN, %r8
mov DST, %r9
 
@@ -168,7 +168,7 @@ __store_partial:
 
 .Lst_partial_1:
ret
-ENDPROC(__store_partial)
+SYM_FUNC_END(__store_partial)
 
 .macro update
movdqa STATE5, T0
diff --git a/arch/x86/crypto/aesni-intel_asm.S 
b/arch/x86/crypto/aesni-intel_asm.S
index cb2deb61c5d9..6c349e844581 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -1763,7 +1763,7 @@ ENDPROC(aesni_gcm_finalize)
 
 .align 4
 _key_expansion_128:
-_key_expansion_256a:
+SYM_FUNC_START_LOCAL(_key_expansion_256a)
pshufd $0b, %xmm1, %xmm1
shufps $0b0001, %xmm0, %xmm4
pxor %xmm4, %xmm0
@@ -1774,10 +1774,9 @@ _key_expansion_256a:
add $0x10, TKEYP
ret
 ENDPROC(_key_expansion_128)
-ENDPROC(_key_expansion_256a)
+SYM_FUNC_END(_key_expansion_256a)
 
-.align 4
-_key_expansion_192a:
+SYM_FUNC_START_LOCAL(_key_expansion_192a)
pshufd $0b01010101, %xmm1, %xmm1
shufps $0b0001, %xmm0, %xmm4
pxor %xmm4, %xmm0
@@ -1799,10 +1798,9 @@ _key_expansion_192a:
movaps 

[PATCH v7 24/28] x86_64/asm: change all ENTRY+ENDPROC to SYM_FUNC_*

2019-01-30 Thread Jiri Slaby
These are all functions which are invoked from elsewhere, so we annotate
them as global using the new SYM_FUNC_START. And their ENDPROC's by
SYM_FUNC_END.

And make sure ENTRY/ENDPROC is not defined on X86_64, given these were
the last users.

Signed-off-by: Jiri Slaby 
Reviewed-by: Rafael J. Wysocki  [hibernate]
Reviewed-by: Boris Ostrovsky  [xen bits]
Cc: "H. Peter Anvin" 
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: x...@kernel.org
Cc: Herbert Xu 
Cc: "David S. Miller" 
Cc: "Rafael J. Wysocki" 
Cc: Len Brown 
Cc: Pavel Machek 
Cc: Matt Fleming 
Cc: Ard Biesheuvel 
Cc: Boris Ostrovsky 
Cc: Juergen Gross 
Cc: linux-crypto@vger.kernel.org
Cc: linux...@vger.kernel.org
Cc: linux-...@vger.kernel.org
Cc: xen-de...@lists.xenproject.org
---
 arch/x86/boot/compressed/efi_thunk_64.S  |  4 +-
 arch/x86/boot/compressed/head_64.S   | 16 +++---
 arch/x86/boot/compressed/mem_encrypt.S   |  8 +--
 arch/x86/crypto/aegis128-aesni-asm.S | 28 -
 arch/x86/crypto/aegis128l-aesni-asm.S| 28 -
 arch/x86/crypto/aegis256-aesni-asm.S | 28 -
 arch/x86/crypto/aes-i586-asm_32.S|  8 +--
 arch/x86/crypto/aes-x86_64-asm_64.S  |  4 +-
 arch/x86/crypto/aes_ctrby8_avx-x86_64.S  | 12 ++--
 arch/x86/crypto/aesni-intel_asm.S| 60 ++--
 arch/x86/crypto/aesni-intel_avx-x86_64.S | 32 +--
 arch/x86/crypto/blowfish-x86_64-asm_64.S | 16 +++---
 arch/x86/crypto/camellia-aesni-avx-asm_64.S  | 24 
 arch/x86/crypto/camellia-aesni-avx2-asm_64.S | 24 
 arch/x86/crypto/camellia-x86_64-asm_64.S | 16 +++---
 arch/x86/crypto/cast5-avx-x86_64-asm_64.S| 16 +++---
 arch/x86/crypto/cast6-avx-x86_64-asm_64.S| 24 
 arch/x86/crypto/chacha-avx2-x86_64.S | 12 ++--
 arch/x86/crypto/chacha-avx512vl-x86_64.S | 12 ++--
 arch/x86/crypto/chacha-ssse3-x86_64.S| 12 ++--
 arch/x86/crypto/crc32-pclmul_asm.S   |  4 +-
 arch/x86/crypto/crc32c-pcl-intel-asm_64.S|  4 +-
 arch/x86/crypto/crct10dif-pcl-asm_64.S   |  4 +-
 arch/x86/crypto/des3_ede-asm_64.S|  8 +--
 arch/x86/crypto/ghash-clmulni-intel_asm.S|  8 +--
 arch/x86/crypto/morus1280-avx2-asm.S | 28 -
 arch/x86/crypto/morus1280-sse2-asm.S | 28 -
 arch/x86/crypto/morus640-sse2-asm.S  | 28 -
 arch/x86/crypto/nh-avx2-x86_64.S |  4 +-
 arch/x86/crypto/nh-sse2-x86_64.S |  4 +-
 arch/x86/crypto/poly1305-avx2-x86_64.S   |  4 +-
 arch/x86/crypto/poly1305-sse2-x86_64.S   |  8 +--
 arch/x86/crypto/serpent-avx-x86_64-asm_64.S  | 24 
 arch/x86/crypto/serpent-avx2-asm_64.S| 24 
 arch/x86/crypto/serpent-sse2-x86_64-asm_64.S |  8 +--
 arch/x86/crypto/sha1_avx2_x86_64_asm.S   |  4 +-
 arch/x86/crypto/sha1_ni_asm.S|  4 +-
 arch/x86/crypto/sha1_ssse3_asm.S |  4 +-
 arch/x86/crypto/sha256-avx-asm.S |  4 +-
 arch/x86/crypto/sha256-avx2-asm.S|  4 +-
 arch/x86/crypto/sha256-ssse3-asm.S   |  4 +-
 arch/x86/crypto/sha256_ni_asm.S  |  4 +-
 arch/x86/crypto/sha512-avx-asm.S |  4 +-
 arch/x86/crypto/sha512-avx2-asm.S|  4 +-
 arch/x86/crypto/sha512-ssse3-asm.S   |  4 +-
 arch/x86/crypto/twofish-avx-x86_64-asm_64.S  | 24 
 arch/x86/crypto/twofish-x86_64-asm_64-3way.S |  8 +--
 arch/x86/crypto/twofish-x86_64-asm_64.S  |  8 +--
 arch/x86/entry/entry_64.S| 10 ++--
 arch/x86/entry/entry_64_compat.S |  4 +-
 arch/x86/kernel/acpi/wakeup_64.S |  8 +--
 arch/x86/kernel/ftrace_64.S  | 20 +++
 arch/x86/kernel/head_64.S| 12 ++--
 arch/x86/kernel/irqflags.S   |  8 +--
 arch/x86/kvm/vmx/vmenter.S   |  8 +--
 arch/x86/lib/checksum_32.S   |  8 +--
 arch/x86/lib/clear_page_64.S | 12 ++--
 arch/x86/lib/cmpxchg16b_emu.S|  4 +-
 arch/x86/lib/cmpxchg8b_emu.S |  4 +-
 arch/x86/lib/copy_page_64.S  |  4 +-
 arch/x86/lib/copy_user_64.S  | 16 +++---
 arch/x86/lib/csum-copy_64.S  |  4 +-
 arch/x86/lib/getuser.S   | 16 +++---
 arch/x86/lib/hweight.S   |  8 +--
 arch/x86/lib/iomap_copy_64.S |  4 +-
 arch/x86/lib/memcpy_64.S |  4 +-
 arch/x86/lib/memmove_64.S|  4 +-
 arch/x86/lib/memset_64.S |  4 +-
 arch/x86/lib/msr-reg.S   |  8 +--
 arch/x86/lib/putuser.S   | 16 +++---
 arch/x86/lib/retpoline.S |  4 +-
 arch/x86/lib/rwsem.S | 24 
 arch/x86/mm/mem_encrypt_boot.S   |  8 +--
 arch/x86/platform/efi/efi_stub_64.S  |  4 +-
 arch/x86/platform/efi/efi_thunk_64.S |  4 

[PATCH v7 27/28] x86_32/asm: change all ENTRY+ENDPROC to SYM_FUNC_*

2019-01-30 Thread Jiri Slaby
These are all functions which are invoked from elsewhere, so we annotate
them as global using the new SYM_FUNC_START. And their ENDPROC's by
SYM_FUNC_END.

Now, we can finally force ENTRY/ENDPROC to be undefined on X86.

Signed-off-by: Jiri Slaby 
Cc: "H. Peter Anvin" 
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: x...@kernel.org
Cc: Herbert Xu 
Cc: "David S. Miller" 
Cc: Bill Metzenthen 
Cc: Matt Fleming 
Cc: Ard Biesheuvel 
Cc: linux-crypto@vger.kernel.org
Cc: linux-...@vger.kernel.org
---
 arch/x86/boot/compressed/efi_stub_32.S |  4 +--
 arch/x86/boot/compressed/head_32.S | 12 
 arch/x86/crypto/serpent-sse2-i586-asm_32.S |  8 +++---
 arch/x86/crypto/twofish-i586-asm_32.S  |  8 +++---
 arch/x86/entry/entry_32.S  | 24 
 arch/x86/kernel/head_32.S  | 16 +--
 arch/x86/lib/atomic64_386_32.S |  4 +--
 arch/x86/lib/atomic64_cx8_32.S | 32 +++---
 arch/x86/lib/checksum_32.S |  8 +++---
 arch/x86/math-emu/div_Xsig.S   |  4 +--
 arch/x86/math-emu/div_small.S  |  4 +--
 arch/x86/math-emu/mul_Xsig.S   | 12 
 arch/x86/math-emu/polynom_Xsig.S   |  4 +--
 arch/x86/math-emu/reg_norm.S   |  8 +++---
 arch/x86/math-emu/reg_round.S  |  4 +--
 arch/x86/math-emu/reg_u_add.S  |  4 +--
 arch/x86/math-emu/reg_u_div.S  |  4 +--
 arch/x86/math-emu/reg_u_mul.S  |  4 +--
 arch/x86/math-emu/reg_u_sub.S  |  4 +--
 arch/x86/math-emu/round_Xsig.S |  8 +++---
 arch/x86/math-emu/shr_Xsig.S   |  4 +--
 arch/x86/math-emu/wm_shrx.S|  8 +++---
 arch/x86/math-emu/wm_sqrt.S|  4 +--
 arch/x86/platform/efi/efi_stub_32.S|  4 +--
 arch/x86/power/hibernate_asm_32.S  |  8 +++---
 include/linux/linkage.h|  8 ++
 26 files changed, 104 insertions(+), 108 deletions(-)

diff --git a/arch/x86/boot/compressed/efi_stub_32.S 
b/arch/x86/boot/compressed/efi_stub_32.S
index 257e341fd2c8..ed6c351d34ed 100644
--- a/arch/x86/boot/compressed/efi_stub_32.S
+++ b/arch/x86/boot/compressed/efi_stub_32.S
@@ -24,7 +24,7 @@
  */
 
 .text
-ENTRY(efi_call_phys)
+SYM_FUNC_START(efi_call_phys)
/*
 * 0. The function can only be called in Linux kernel. So CS has been
 * set to 0x0010, DS and SS have been set to 0x0018. In EFI, I found
@@ -77,7 +77,7 @@ ENTRY(efi_call_phys)
movlsaved_return_addr(%edx), %ecx
pushl   %ecx
ret
-ENDPROC(efi_call_phys)
+SYM_FUNC_END(efi_call_phys)
 .previous
 
 .data
diff --git a/arch/x86/boot/compressed/head_32.S 
b/arch/x86/boot/compressed/head_32.S
index 7e8ab0bb6968..3fa36496af12 100644
--- a/arch/x86/boot/compressed/head_32.S
+++ b/arch/x86/boot/compressed/head_32.S
@@ -61,7 +61,7 @@
.hidden _egot
 
__HEAD
-ENTRY(startup_32)
+SYM_FUNC_START(startup_32)
cld
/*
 * Test KEEP_SEGMENTS flag to see if the bootloader is asking
@@ -142,14 +142,14 @@ ENTRY(startup_32)
  */
lealrelocated(%ebx), %eax
jmp *%eax
-ENDPROC(startup_32)
+SYM_FUNC_END(startup_32)
 
 #ifdef CONFIG_EFI_STUB
 /*
  * We don't need the return address, so set up the stack so efi_main() can find
  * its arguments.
  */
-ENTRY(efi_pe_entry)
+SYM_FUNC_START(efi_pe_entry)
add $0x4, %esp
 
call1f
@@ -174,9 +174,9 @@ ENTRY(efi_pe_entry)
pushl   %eax
pushl   %ecx
jmp 2f  /* Skip efi_config initialization */
-ENDPROC(efi_pe_entry)
+SYM_FUNC_END(efi_pe_entry)
 
-ENTRY(efi32_stub_entry)
+SYM_FUNC_START(efi32_stub_entry)
add $0x4, %esp
popl%ecx
popl%edx
@@ -205,7 +205,7 @@ fail:
movlBP_code32_start(%esi), %eax
lealstartup_32(%eax), %eax
jmp *%eax
-ENDPROC(efi32_stub_entry)
+SYM_FUNC_END(efi32_stub_entry)
 #endif
 
.text
diff --git a/arch/x86/crypto/serpent-sse2-i586-asm_32.S 
b/arch/x86/crypto/serpent-sse2-i586-asm_32.S
index d348f1553a79..f3cebd3c6739 100644
--- a/arch/x86/crypto/serpent-sse2-i586-asm_32.S
+++ b/arch/x86/crypto/serpent-sse2-i586-asm_32.S
@@ -512,7 +512,7 @@
pxor t0,x3; \
movdqu x3,  (3*4*4)(out);
 
-ENTRY(__serpent_enc_blk_4way)
+SYM_FUNC_START(__serpent_enc_blk_4way)
/* input:
 *  arg_ctx(%esp): ctx, CTX
 *  arg_dst(%esp): dst
@@ -574,9 +574,9 @@ ENTRY(__serpent_enc_blk_4way)
xor_blocks(%eax, RA, RB, RC, RD, RT0, RT1, RE);
 
ret;
-ENDPROC(__serpent_enc_blk_4way)
+SYM_FUNC_END(__serpent_enc_blk_4way)
 
-ENTRY(serpent_dec_blk_4way)
+SYM_FUNC_START(serpent_dec_blk_4way)
/* input:
 *  arg_ctx(%esp): ctx, CTX
 *  arg_dst(%esp): dst
@@ -628,4 +628,4 @@ ENTRY(serpent_dec_blk_4way)
write_blocks(%eax, RC, RD, RB, RE, RT0, RT1

[PATCH v7 09/28] x86/asm: annotate aliases

2019-01-30 Thread Jiri Slaby
_key_expansion_128 is an alias to _key_expansion_256a, __memcpy to
memcpy, xen_syscall32_target to xen_sysenter_target, and so on. Annotate
them all using the new SYM_FUNC_START_ALIAS, SYM_FUNC_START_LOCAL_ALIAS,
and SYM_FUNC_END_ALIAS. This will make the tools generating the
debuginfo happy.

Signed-off-by: Jiri Slaby 
Cc: Herbert Xu 
Cc: "David S. Miller" 
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: "H. Peter Anvin" 
Cc: 
Cc: Boris Ostrovsky 
Cc: Juergen Gross 
Reviewed-by: Juergen Gross  [xen parts]
Cc: 
Cc: 
---
 arch/x86/crypto/aesni-intel_asm.S | 5 ++---
 arch/x86/lib/memcpy_64.S  | 4 ++--
 arch/x86/lib/memmove_64.S | 4 ++--
 arch/x86/lib/memset_64.S  | 4 ++--
 arch/x86/xen/xen-asm_64.S | 4 ++--
 5 files changed, 10 insertions(+), 11 deletions(-)

diff --git a/arch/x86/crypto/aesni-intel_asm.S 
b/arch/x86/crypto/aesni-intel_asm.S
index 6c349e844581..19effbf9ce35 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -1761,8 +1761,7 @@ ENDPROC(aesni_gcm_finalize)
 #endif
 
 
-.align 4
-_key_expansion_128:
+SYM_FUNC_START_LOCAL_ALIAS(_key_expansion_128)
 SYM_FUNC_START_LOCAL(_key_expansion_256a)
pshufd $0b, %xmm1, %xmm1
shufps $0b0001, %xmm0, %xmm4
@@ -1773,8 +1772,8 @@ SYM_FUNC_START_LOCAL(_key_expansion_256a)
movaps %xmm0, (TKEYP)
add $0x10, TKEYP
ret
-ENDPROC(_key_expansion_128)
 SYM_FUNC_END(_key_expansion_256a)
+SYM_FUNC_END_ALIAS(_key_expansion_128)
 
 SYM_FUNC_START_LOCAL(_key_expansion_192a)
pshufd $0b01010101, %xmm1, %xmm1
diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
index 3b24dc05251c..68fcd8f9a48b 100644
--- a/arch/x86/lib/memcpy_64.S
+++ b/arch/x86/lib/memcpy_64.S
@@ -27,7 +27,7 @@
  * Output:
  * rax original destination
  */
-ENTRY(__memcpy)
+SYM_FUNC_START_ALIAS(__memcpy)
 ENTRY(memcpy)
ALTERNATIVE_2 "jmp memcpy_orig", "", X86_FEATURE_REP_GOOD, \
  "jmp memcpy_erms", X86_FEATURE_ERMS
@@ -41,7 +41,7 @@ ENTRY(memcpy)
rep movsb
ret
 ENDPROC(memcpy)
-ENDPROC(__memcpy)
+SYM_FUNC_END_ALIAS(__memcpy)
 EXPORT_SYMBOL(memcpy)
 EXPORT_SYMBOL(__memcpy)
 
diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
index bbec69d8223b..50c1648311b3 100644
--- a/arch/x86/lib/memmove_64.S
+++ b/arch/x86/lib/memmove_64.S
@@ -26,7 +26,7 @@
  */
 .weak memmove
 
-ENTRY(memmove)
+SYM_FUNC_START_ALIAS(memmove)
 ENTRY(__memmove)
 
/* Handle more 32 bytes in loop */
@@ -208,6 +208,6 @@ ENTRY(__memmove)
 13:
retq
 ENDPROC(__memmove)
-ENDPROC(memmove)
+SYM_FUNC_END_ALIAS(memmove)
 EXPORT_SYMBOL(__memmove)
 EXPORT_SYMBOL(memmove)
diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
index 9bc861c71e75..927ac44d34aa 100644
--- a/arch/x86/lib/memset_64.S
+++ b/arch/x86/lib/memset_64.S
@@ -19,7 +19,7 @@
  *
  * rax   original destination
  */
-ENTRY(memset)
+SYM_FUNC_START_ALIAS(memset)
 ENTRY(__memset)
/*
 * Some CPUs support enhanced REP MOVSB/STOSB feature. It is recommended
@@ -43,8 +43,8 @@ ENTRY(__memset)
rep stosb
movq %r9,%rax
ret
-ENDPROC(memset)
 ENDPROC(__memset)
+SYM_FUNC_END_ALIAS(memset)
 EXPORT_SYMBOL(memset)
 EXPORT_SYMBOL(__memset)
 
diff --git a/arch/x86/xen/xen-asm_64.S b/arch/x86/xen/xen-asm_64.S
index 1e9ef0ba30a5..30dcc311f566 100644
--- a/arch/x86/xen/xen-asm_64.S
+++ b/arch/x86/xen/xen-asm_64.S
@@ -168,13 +168,13 @@ ENDPROC(xen_sysenter_target)
 
 #else /* !CONFIG_IA32_EMULATION */
 
-ENTRY(xen_syscall32_target)
+SYM_FUNC_START_ALIAS(xen_syscall32_target)
 ENTRY(xen_sysenter_target)
lea 16(%rsp), %rsp  /* strip %rcx, %r11 */
mov $-ENOSYS, %rax
pushq $0
jmp hypercall_iret
-ENDPROC(xen_syscall32_target)
 ENDPROC(xen_sysenter_target)
+SYM_FUNC_END_ALIAS(xen_syscall32_target)
 
 #endif /* CONFIG_IA32_EMULATION */
-- 
2.20.1



[tip: x86/asm] x86/asm/32: Change all ENTRY+ENDPROC to SYM_FUNC_*

2019-10-18 Thread tip-bot2 for Jiri Slaby
The following commit has been merged into the x86/asm branch of tip:

Commit-ID: 6d685e5318e51b843ca50adeca50dc6300bf2cbb
Gitweb:
https://git.kernel.org/tip/6d685e5318e51b843ca50adeca50dc6300bf2cbb
Author:Jiri Slaby 
AuthorDate:Fri, 11 Oct 2019 13:51:07 +02:00
Committer: Borislav Petkov 
CommitterDate: Fri, 18 Oct 2019 12:03:43 +02:00

x86/asm/32: Change all ENTRY+ENDPROC to SYM_FUNC_*

These are all functions which are invoked from elsewhere, so annotate
them as global using the new SYM_FUNC_START and their ENDPROC's by
SYM_FUNC_END.

Now, ENTRY/ENDPROC can be forced to be undefined on X86, so do so.

Signed-off-by: Jiri Slaby 
Signed-off-by: Borislav Petkov 
Cc: Allison Randal 
Cc: Andrey Ryabinin 
Cc: Andy Lutomirski 
Cc: Andy Shevchenko 
Cc: Ard Biesheuvel 
Cc: Bill Metzenthen 
Cc: Boris Ostrovsky 
Cc: Darren Hart 
Cc: "David S. Miller" 
Cc: Greg Kroah-Hartman 
Cc: Herbert Xu 
Cc: "H. Peter Anvin" 
Cc: Ingo Molnar 
Cc: linux-a...@vger.kernel.org
Cc: linux-crypto@vger.kernel.org
Cc: linux-efi 
Cc: linux-...@vger.kernel.org
Cc: linux...@vger.kernel.org
Cc: Mark Rutland 
Cc: Matt Fleming 
Cc: Pavel Machek 
Cc: platform-driver-...@vger.kernel.org
Cc: "Rafael J. Wysocki" 
Cc: Thomas Gleixner 
Cc: Will Deacon 
Cc: x86-ml 
Link: https://lkml.kernel.org/r/2019105108.12392-28-jsl...@suse.cz
---
 arch/x86/boot/compressed/efi_stub_32.S |  4 +--
 arch/x86/boot/compressed/head_32.S | 12 
 arch/x86/crypto/serpent-sse2-i586-asm_32.S |  8 ++---
 arch/x86/crypto/twofish-i586-asm_32.S  |  8 ++---
 arch/x86/entry/entry_32.S  | 24 
 arch/x86/kernel/head_32.S  | 16 +--
 arch/x86/lib/atomic64_386_32.S |  4 +--
 arch/x86/lib/atomic64_cx8_32.S | 32 ++---
 arch/x86/lib/checksum_32.S |  8 ++---
 arch/x86/math-emu/div_Xsig.S   |  4 +--
 arch/x86/math-emu/div_small.S  |  4 +--
 arch/x86/math-emu/mul_Xsig.S   | 12 
 arch/x86/math-emu/polynom_Xsig.S   |  4 +--
 arch/x86/math-emu/reg_norm.S   |  8 ++---
 arch/x86/math-emu/reg_round.S  |  4 +--
 arch/x86/math-emu/reg_u_add.S  |  4 +--
 arch/x86/math-emu/reg_u_div.S  |  4 +--
 arch/x86/math-emu/reg_u_mul.S  |  4 +--
 arch/x86/math-emu/reg_u_sub.S  |  4 +--
 arch/x86/math-emu/round_Xsig.S |  8 ++---
 arch/x86/math-emu/shr_Xsig.S   |  4 +--
 arch/x86/math-emu/wm_shrx.S|  8 ++---
 arch/x86/math-emu/wm_sqrt.S|  4 +--
 arch/x86/platform/efi/efi_stub_32.S|  4 +--
 arch/x86/power/hibernate_asm_32.S  |  8 ++---
 include/linux/linkage.h|  8 +
 26 files changed, 104 insertions(+), 108 deletions(-)

diff --git a/arch/x86/boot/compressed/efi_stub_32.S 
b/arch/x86/boot/compressed/efi_stub_32.S
index 257e341..ed6c351 100644
--- a/arch/x86/boot/compressed/efi_stub_32.S
+++ b/arch/x86/boot/compressed/efi_stub_32.S
@@ -24,7 +24,7 @@
  */
 
 .text
-ENTRY(efi_call_phys)
+SYM_FUNC_START(efi_call_phys)
/*
 * 0. The function can only be called in Linux kernel. So CS has been
 * set to 0x0010, DS and SS have been set to 0x0018. In EFI, I found
@@ -77,7 +77,7 @@ ENTRY(efi_call_phys)
movlsaved_return_addr(%edx), %ecx
pushl   %ecx
ret
-ENDPROC(efi_call_phys)
+SYM_FUNC_END(efi_call_phys)
 .previous
 
 .data
diff --git a/arch/x86/boot/compressed/head_32.S 
b/arch/x86/boot/compressed/head_32.S
index f9e2a80..f2dfd6d 100644
--- a/arch/x86/boot/compressed/head_32.S
+++ b/arch/x86/boot/compressed/head_32.S
@@ -61,7 +61,7 @@
.hidden _egot
 
__HEAD
-ENTRY(startup_32)
+SYM_FUNC_START(startup_32)
cld
/*
 * Test KEEP_SEGMENTS flag to see if the bootloader is asking
@@ -142,14 +142,14 @@ ENTRY(startup_32)
  */
leal.Lrelocated(%ebx), %eax
jmp *%eax
-ENDPROC(startup_32)
+SYM_FUNC_END(startup_32)
 
 #ifdef CONFIG_EFI_STUB
 /*
  * We don't need the return address, so set up the stack so efi_main() can find
  * its arguments.
  */
-ENTRY(efi_pe_entry)
+SYM_FUNC_START(efi_pe_entry)
add $0x4, %esp
 
call1f
@@ -174,9 +174,9 @@ ENTRY(efi_pe_entry)
pushl   %eax
pushl   %ecx
jmp 2f  /* Skip efi_config initialization */
-ENDPROC(efi_pe_entry)
+SYM_FUNC_END(efi_pe_entry)
 
-ENTRY(efi32_stub_entry)
+SYM_FUNC_START(efi32_stub_entry)
add $0x4, %esp
popl%ecx
popl%edx
@@ -205,7 +205,7 @@ fail:
movlBP_code32_start(%esi), %eax
lealstartup_32(%eax), %eax
jmp *%eax
-ENDPROC(efi32_stub_entry)
+SYM_FUNC_END(efi32_stub_entry)
 #endif
 
.text
diff --git a/arch/x86/crypto/serpent-sse2-i586-asm_32.S 
b/arch/x86/crypto/serpent-sse2-i586-asm_32.S
index e5c4a46..6379b99 100644

[tip: x86/asm] x86/asm: Annotate aliases

2019-10-18 Thread tip-bot2 for Jiri Slaby
The following commit has been merged into the x86/asm branch of tip:

Commit-ID: e9b9d020c4873d5e90d9986cfd137afbafbc5bfa
Gitweb:
https://git.kernel.org/tip/e9b9d020c4873d5e90d9986cfd137afbafbc5bfa
Author:Jiri Slaby 
AuthorDate:Fri, 11 Oct 2019 13:50:49 +02:00
Committer: Borislav Petkov 
CommitterDate: Fri, 18 Oct 2019 10:38:23 +02:00

x86/asm: Annotate aliases

_key_expansion_128 is an alias to _key_expansion_256a, __memcpy to
memcpy, xen_syscall32_target to xen_sysenter_target, and so on. Annotate
them all using the new SYM_FUNC_START_ALIAS, SYM_FUNC_START_LOCAL_ALIAS,
and SYM_FUNC_END_ALIAS. This will make the tools generating the
debuginfo happy as it avoids nesting and double symbols.

Signed-off-by: Jiri Slaby 
Signed-off-by: Borislav Petkov 
Reviewed-by: Juergen Gross  [xen parts]
Cc: Boris Ostrovsky 
Cc: "David S. Miller" 
Cc: Greg Kroah-Hartman 
Cc: Herbert Xu 
Cc: "H. Peter Anvin" 
Cc: Ingo Molnar 
Cc: linux-a...@vger.kernel.org
Cc: linux-crypto@vger.kernel.org
Cc: Peter Zijlstra 
Cc: Stefano Stabellini 
Cc: Thomas Gleixner 
Cc: x86-ml 
Cc: xen-de...@lists.xenproject.org
Link: https://lkml.kernel.org/r/2019105108.12392-10-jsl...@suse.cz
---
 arch/x86/crypto/aesni-intel_asm.S | 5 ++---
 arch/x86/lib/memcpy_64.S  | 4 ++--
 arch/x86/lib/memmove_64.S | 4 ++--
 arch/x86/lib/memset_64.S  | 4 ++--
 arch/x86/xen/xen-asm_64.S | 4 ++--
 5 files changed, 10 insertions(+), 11 deletions(-)

diff --git a/arch/x86/crypto/aesni-intel_asm.S 
b/arch/x86/crypto/aesni-intel_asm.S
index e0a5fb4..9d8d5f2 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -1757,8 +1757,7 @@ ENDPROC(aesni_gcm_finalize)
 #endif
 
 
-.align 4
-_key_expansion_128:
+SYM_FUNC_START_LOCAL_ALIAS(_key_expansion_128)
 SYM_FUNC_START_LOCAL(_key_expansion_256a)
pshufd $0b, %xmm1, %xmm1
shufps $0b0001, %xmm0, %xmm4
@@ -1769,8 +1768,8 @@ SYM_FUNC_START_LOCAL(_key_expansion_256a)
movaps %xmm0, (TKEYP)
add $0x10, TKEYP
ret
-ENDPROC(_key_expansion_128)
 SYM_FUNC_END(_key_expansion_256a)
+SYM_FUNC_END_ALIAS(_key_expansion_128)
 
 SYM_FUNC_START_LOCAL(_key_expansion_192a)
pshufd $0b01010101, %xmm1, %xmm1
diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
index 9274866..57a6426 100644
--- a/arch/x86/lib/memcpy_64.S
+++ b/arch/x86/lib/memcpy_64.S
@@ -28,7 +28,7 @@
  * Output:
  * rax original destination
  */
-ENTRY(__memcpy)
+SYM_FUNC_START_ALIAS(__memcpy)
 ENTRY(memcpy)
ALTERNATIVE_2 "jmp memcpy_orig", "", X86_FEATURE_REP_GOOD, \
  "jmp memcpy_erms", X86_FEATURE_ERMS
@@ -42,7 +42,7 @@ ENTRY(memcpy)
rep movsb
ret
 ENDPROC(memcpy)
-ENDPROC(__memcpy)
+SYM_FUNC_END_ALIAS(__memcpy)
 EXPORT_SYMBOL(memcpy)
 EXPORT_SYMBOL(__memcpy)
 
diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
index bbec69d..50c1648 100644
--- a/arch/x86/lib/memmove_64.S
+++ b/arch/x86/lib/memmove_64.S
@@ -26,7 +26,7 @@
  */
 .weak memmove
 
-ENTRY(memmove)
+SYM_FUNC_START_ALIAS(memmove)
 ENTRY(__memmove)
 
/* Handle more 32 bytes in loop */
@@ -208,6 +208,6 @@ ENTRY(__memmove)
 13:
retq
 ENDPROC(__memmove)
-ENDPROC(memmove)
+SYM_FUNC_END_ALIAS(memmove)
 EXPORT_SYMBOL(__memmove)
 EXPORT_SYMBOL(memmove)
diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
index 9bc861c..927ac44 100644
--- a/arch/x86/lib/memset_64.S
+++ b/arch/x86/lib/memset_64.S
@@ -19,7 +19,7 @@
  *
  * rax   original destination
  */
-ENTRY(memset)
+SYM_FUNC_START_ALIAS(memset)
 ENTRY(__memset)
/*
 * Some CPUs support enhanced REP MOVSB/STOSB feature. It is recommended
@@ -43,8 +43,8 @@ ENTRY(__memset)
rep stosb
movq %r9,%rax
ret
-ENDPROC(memset)
 ENDPROC(__memset)
+SYM_FUNC_END_ALIAS(memset)
 EXPORT_SYMBOL(memset)
 EXPORT_SYMBOL(__memset)
 
diff --git a/arch/x86/xen/xen-asm_64.S b/arch/x86/xen/xen-asm_64.S
index ebf610b..45c1249 100644
--- a/arch/x86/xen/xen-asm_64.S
+++ b/arch/x86/xen/xen-asm_64.S
@@ -167,13 +167,13 @@ ENDPROC(xen_sysenter_target)
 
 #else /* !CONFIG_IA32_EMULATION */
 
-ENTRY(xen_syscall32_target)
+SYM_FUNC_START_ALIAS(xen_syscall32_target)
 ENTRY(xen_sysenter_target)
lea 16(%rsp), %rsp  /* strip %rcx, %r11 */
mov $-ENOSYS, %rax
pushq $0
jmp hypercall_iret
-ENDPROC(xen_syscall32_target)
 ENDPROC(xen_sysenter_target)
+SYM_FUNC_END_ALIAS(xen_syscall32_target)
 
 #endif /* CONFIG_IA32_EMULATION */


[tip: x86/asm] x86/asm/crypto: Annotate local functions

2019-10-18 Thread tip-bot2 for Jiri Slaby
The following commit has been merged into the x86/asm branch of tip:

Commit-ID: 74d8b90a889022e306b543ff2147a6941c99b354
Gitweb:
https://git.kernel.org/tip/74d8b90a889022e306b543ff2147a6941c99b354
Author:Jiri Slaby 
AuthorDate:Fri, 11 Oct 2019 13:50:46 +02:00
Committer: Borislav Petkov 
CommitterDate: Fri, 18 Oct 2019 10:07:15 +02:00

x86/asm/crypto: Annotate local functions

Use the newly added SYM_FUNC_START_LOCAL to annotate beginnings of all
functions which do not have ".globl" annotation, but their endings are
annotated by ENDPROC. This is needed to balance ENDPROC for tools that
generate debuginfo.

These function names are not prepended with ".L" as they might appear in
call traces and they wouldn't be visible after such change.

To be symmetric, the functions' ENDPROCs are converted to the new
SYM_FUNC_END.

Signed-off-by: Jiri Slaby 
Signed-off-by: Borislav Petkov 
Cc: "David S. Miller" 
Cc: Herbert Xu 
Cc: "H. Peter Anvin" 
Cc: Ingo Molnar 
Cc: linux-a...@vger.kernel.org
Cc: linux-crypto@vger.kernel.org
Cc: Thomas Gleixner 
Cc: x86-ml 
Link: https://lkml.kernel.org/r/2019105108.12392-7-jsl...@suse.cz
---
 arch/x86/crypto/aegis128-aesni-asm.S |  8 +--
 arch/x86/crypto/aesni-intel_asm.S| 49 +++
 arch/x86/crypto/camellia-aesni-avx-asm_64.S  | 20 
 arch/x86/crypto/camellia-aesni-avx2-asm_64.S | 20 
 arch/x86/crypto/cast5-avx-x86_64-asm_64.S|  8 +--
 arch/x86/crypto/cast6-avx-x86_64-asm_64.S|  8 +--
 arch/x86/crypto/chacha-ssse3-x86_64.S|  4 +-
 arch/x86/crypto/ghash-clmulni-intel_asm.S|  4 +-
 arch/x86/crypto/serpent-avx-x86_64-asm_64.S  |  8 +--
 arch/x86/crypto/serpent-avx2-asm_64.S|  8 +--
 arch/x86/crypto/twofish-avx-x86_64-asm_64.S  |  8 +--
 11 files changed, 68 insertions(+), 77 deletions(-)

diff --git a/arch/x86/crypto/aegis128-aesni-asm.S 
b/arch/x86/crypto/aegis128-aesni-asm.S
index 4434607..b7026fd 100644
--- a/arch/x86/crypto/aegis128-aesni-asm.S
+++ b/arch/x86/crypto/aegis128-aesni-asm.S
@@ -71,7 +71,7 @@
  *   %r8
  *   %r9
  */
-__load_partial:
+SYM_FUNC_START_LOCAL(__load_partial)
xor %r9d, %r9d
pxor MSG, MSG
 
@@ -123,7 +123,7 @@ __load_partial:
 
 .Lld_partial_8:
ret
-ENDPROC(__load_partial)
+SYM_FUNC_END(__load_partial)
 
 /*
  * __store_partial: internal ABI
@@ -137,7 +137,7 @@ ENDPROC(__load_partial)
  *   %r9
  *   %r10
  */
-__store_partial:
+SYM_FUNC_START_LOCAL(__store_partial)
mov LEN, %r8
mov DST, %r9
 
@@ -181,7 +181,7 @@ __store_partial:
 
 .Lst_partial_1:
ret
-ENDPROC(__store_partial)
+SYM_FUNC_END(__store_partial)
 
 /*
  * void crypto_aegis128_aesni_init(void *state, const void *key, const void 
*iv);
diff --git a/arch/x86/crypto/aesni-intel_asm.S 
b/arch/x86/crypto/aesni-intel_asm.S
index e40bdf0..e0a5fb4 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -1759,7 +1759,7 @@ ENDPROC(aesni_gcm_finalize)
 
 .align 4
 _key_expansion_128:
-_key_expansion_256a:
+SYM_FUNC_START_LOCAL(_key_expansion_256a)
pshufd $0b, %xmm1, %xmm1
shufps $0b0001, %xmm0, %xmm4
pxor %xmm4, %xmm0
@@ -1770,10 +1770,9 @@ _key_expansion_256a:
add $0x10, TKEYP
ret
 ENDPROC(_key_expansion_128)
-ENDPROC(_key_expansion_256a)
+SYM_FUNC_END(_key_expansion_256a)
 
-.align 4
-_key_expansion_192a:
+SYM_FUNC_START_LOCAL(_key_expansion_192a)
pshufd $0b01010101, %xmm1, %xmm1
shufps $0b0001, %xmm0, %xmm4
pxor %xmm4, %xmm0
@@ -1795,10 +1794,9 @@ _key_expansion_192a:
movaps %xmm1, 0x10(TKEYP)
add $0x20, TKEYP
ret
-ENDPROC(_key_expansion_192a)
+SYM_FUNC_END(_key_expansion_192a)
 
-.align 4
-_key_expansion_192b:
+SYM_FUNC_START_LOCAL(_key_expansion_192b)
pshufd $0b01010101, %xmm1, %xmm1
shufps $0b0001, %xmm0, %xmm4
pxor %xmm4, %xmm0
@@ -1815,10 +1813,9 @@ _key_expansion_192b:
movaps %xmm0, (TKEYP)
add $0x10, TKEYP
ret
-ENDPROC(_key_expansion_192b)
+SYM_FUNC_END(_key_expansion_192b)
 
-.align 4
-_key_expansion_256b:
+SYM_FUNC_START_LOCAL(_key_expansion_256b)
pshufd $0b10101010, %xmm1, %xmm1
shufps $0b0001, %xmm2, %xmm4
pxor %xmm4, %xmm2
@@ -1828,7 +1825,7 @@ _key_expansion_256b:
movaps %xmm2, (TKEYP)
add $0x10, TKEYP
ret
-ENDPROC(_key_expansion_256b)
+SYM_FUNC_END(_key_expansion_256b)
 
 /*
  * int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
@@ -1981,8 +1978,7 @@ ENDPROC(aesni_enc)
  * KEY
  * TKEYP (T1)
  */
-.align 4
-_aesni_enc1:
+SYM_FUNC_START_LOCAL(_aesni_enc1)
movaps (KEYP), KEY  # key
mov KEYP, TKEYP
pxor KEY, STATE # round 0
@@ -2025,7 +2021,7 @@ _aesni_enc1:
movaps 0x70(TKEYP), KEY
AESENCLAST KEY STATE
ret
-ENDPROC(_aesni_enc1)
+SYM_FUNC_END(_aesni_enc1)
 
 /*
  * _