Re: [PATCH net-next] ipv6: Implement optimized IPv6 masked address comparison for ARM64

2017-03-17 Thread Subash Abhinov Kasiviswanathan



That's clearly not right - I'm not sure quite what undefined behaviour
assumption convinces GCC to optimise the whole thing away>


While the pointer casting is a bit ghastly, I don't actually think that
GCC is taking advantage of undefined behaviour here, rather it looks 
like

you have a simple typo on line 3:


const __uint128_t *ul1 = (const __uint128_t *)a1;
const __uint128_t *ulm = (const __uint128_t *)m;
const __uint128_t *ul2 = (const __uint128_t *)a1;


ul2 = a2, surely?

As it is (stripping casts) you have a1 ^ a1, which will get you to 0
pretty quickly. Fixing that up for you;

  bool
  ipv6_masked_addr_cmp_new(const struct in6_addr *a1, const struct
  in6_addr *m,
 const struct in6_addr *a2)
  {
const __uint128_t *ul1 = (const __uint128_t *)a1;
const __uint128_t *ulm = (const __uint128_t *)m;
const __uint128_t *ul2 = (const __uint128_t *)a2;

return !!((*ul1 ^ *ul2) & *ulm);
  }

$ gcc -O2

  ipv6_masked_addr_cmp_new:
ldp x4, x3, [x0]
ldp x5, x2, [x2]
ldp x0, x1, [x1]
eor x4, x4, x5
eor x2, x3, x2
and x0, x0, x4
and x1, x1, x2
orr x0, x0, x1
cmp x0, 0
csetw0, ne
ret

Which at least looks like it might calculate something useful :-)


Hi Robin / James

Thanks for checking and sorry for the confusion. I'll retest this.

--
Qualcomm Innovation Center, Inc.
The Qualcomm Innovation Center, Inc. is a member of Code Aurora Forum, a 
Linux Foundation Collaborative Project


Re: [PATCH net-next] ipv6: Implement optimized IPv6 masked address comparison for ARM64

2017-03-17 Thread James Greenhalgh
On Fri, Mar 17, 2017 at 12:00:42PM +, Robin Murphy wrote:
> On 17/03/17 04:42, Subash Abhinov Kasiviswanathan wrote:
> > Android devices use multiple ip[6]tables for statistics, UID matching
> > and other functionality. Perf output indicated that ip6_do_table
> > was taking a considerable amount of CPU and more that ip_do_table
> > for an equivalent rate. ipv6_masked_addr_cmp was chosen for
> > optimization as there are more instructions required than the
> > equivalent operation in ip_packet_match.
> > 
> > Using 128 bit operations helps to reduce the number of instructions
> > for the match on an ARM64 system. This helps to improve UDPv6 DL
> > performance by 40Mbps (860Mbps -> 900Mbps) on a CPU limited system.
> 
> After trying to have a look at the codegen difference it makes, I think
> I may have found why it's faster ;)
> 
> --
> [root@space-channel-5 ~]# cat > ip.c
> #include 
> #include 
>   
> bool
> ipv6_masked_addr_cmp(const struct in6_addr *a1, const struct in6_addr *m,
>const struct in6_addr *a2)
> {
>   const unsigned long *ul1 = (const unsigned long *)a1;
>   const unsigned long *ulm = (const unsigned long *)m;
>   const unsigned long *ul2 = (const unsigned long *)a2;
> 
>   return !!(((ul1[0] ^ ul2[0]) & ulm[0]) |
> ((ul1[1] ^ ul2[1]) & ulm[1]));
> }
> 
> bool
> ipv6_masked_addr_cmp_new(const struct in6_addr *a1, const struct
> in6_addr *m,
>const struct in6_addr *a2)
> {
>   const __uint128_t *ul1 = (const __uint128_t *)a1;
>   const __uint128_t *ulm = (const __uint128_t *)m;
>   const __uint128_t *ul2 = (const __uint128_t *)a1;
> 
>   return !!((*ul1 ^ *ul2) & *ulm);
> }



> That's clearly not right - I'm not sure quite what undefined behaviour
> assumption convinces GCC to optimise the whole thing away>

While the pointer casting is a bit ghastly, I don't actually think that
GCC is taking advantage of undefined behaviour here, rather it looks like
you have a simple typo on line 3:

>   const __uint128_t *ul1 = (const __uint128_t *)a1;
>   const __uint128_t *ulm = (const __uint128_t *)m;
>   const __uint128_t *ul2 = (const __uint128_t *)a1;

ul2 = a2, surely?

As it is (stripping casts) you have a1 ^ a1, which will get you to 0
pretty quickly. Fixing that up for you;

  bool
  ipv6_masked_addr_cmp_new(const struct in6_addr *a1, const struct
  in6_addr *m,
 const struct in6_addr *a2)
  {
const __uint128_t *ul1 = (const __uint128_t *)a1;
const __uint128_t *ulm = (const __uint128_t *)m;
const __uint128_t *ul2 = (const __uint128_t *)a2;

return !!((*ul1 ^ *ul2) & *ulm);
  }

$ gcc -O2

  ipv6_masked_addr_cmp_new:
ldp x4, x3, [x0]
ldp x5, x2, [x2]
ldp x0, x1, [x1]
eor x4, x4, x5
eor x2, x3, x2
and x0, x0, x4
and x1, x1, x2
orr x0, x0, x1
cmp x0, 0
csetw0, ne
ret

Which at least looks like it might calculate something useful :-)

Cheers,
James



Re: [PATCH net-next] ipv6: Implement optimized IPv6 masked address comparison for ARM64

2017-03-17 Thread Robin Murphy
On 17/03/17 04:42, Subash Abhinov Kasiviswanathan wrote:
> Android devices use multiple ip[6]tables for statistics, UID matching
> and other functionality. Perf output indicated that ip6_do_table
> was taking a considerable amount of CPU and more that ip_do_table
> for an equivalent rate. ipv6_masked_addr_cmp was chosen for
> optimization as there are more instructions required than the
> equivalent operation in ip_packet_match.
> 
> Using 128 bit operations helps to reduce the number of instructions
> for the match on an ARM64 system. This helps to improve UDPv6 DL
> performance by 40Mbps (860Mbps -> 900Mbps) on a CPU limited system.

After trying to have a look at the codegen difference it makes, I think
I may have found why it's faster ;)

--
[root@space-channel-5 ~]# cat > ip.c
#include 
#include 

bool
ipv6_masked_addr_cmp(const struct in6_addr *a1, const struct in6_addr *m,
 const struct in6_addr *a2)
{
const unsigned long *ul1 = (const unsigned long *)a1;
const unsigned long *ulm = (const unsigned long *)m;
const unsigned long *ul2 = (const unsigned long *)a2;

return !!(((ul1[0] ^ ul2[0]) & ulm[0]) |
  ((ul1[1] ^ ul2[1]) & ulm[1]));
}

bool
ipv6_masked_addr_cmp_new(const struct in6_addr *a1, const struct
in6_addr *m,
 const struct in6_addr *a2)
{
const __uint128_t *ul1 = (const __uint128_t *)a1;
const __uint128_t *ulm = (const __uint128_t *)m;
const __uint128_t *ul2 = (const __uint128_t *)a1;

return !!((*ul1 ^ *ul2) & *ulm);
}
[root@space-channel-5 ~]# gcc -c -O2 ip.c
[root@space-channel-5 ~]# objdump -d ip.o

ip.o: file format elf64-littleaarch64


Disassembly of section .text:

 :
   0:   a9401847ldp x7, x6, [x2]
   4:   a9401003ldp x3, x4, [x0]
   8:   f9400025ldr x5, [x1]
   c:   f9400422ldr x2, [x1, #8]
  10:   ca070060eor x0, x3, x7
  14:   ca060081eor x1, x4, x6
  18:   8a05and x0, x0, x5
  1c:   8a020021and x1, x1, x2
  20:   aa01orr x0, x0, x1
  24:   f11fcmp x0, #0x0
  28:   1a9f07e0csetw0, ne  // ne = any
  2c:   d65f03c0ret

0030 :
  30:   5280mov w0, #0x0// #0
  34:   d65f03c0ret
[root@space-channel-5 ~]# gcc --version
gcc (GCC) 6.3.1 20170306
Copyright (C) 2016 Free Software Foundation, Inc.
This is free software; see the source for copying conditions.  There is NO
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.

--

That's clearly not right - I'm not sure quite what undefined behaviour
assumption convinces GCC to optimise the whole thing away, but I do note
that the generic 64-bit version really isn't far off optimal already.
Even if it happens to work out in practice due to inlining behaviour, I
don't think that's something we'd want to rely on.

Robin.

> Tested on x86_64 UML to check if generic version is used and ARM64
> to verify that ARM64 version is used.
> 
> Signed-off-by: Subash Abhinov Kasiviswanathan 
> ---
>  arch/alpha/include/asm/Kbuild  |  1 +
>  arch/arc/include/asm/Kbuild|  1 +
>  arch/arm/include/asm/Kbuild|  1 +
>  arch/arm64/include/asm/ipv6.h  | 29 +
>  arch/avr32/include/asm/Kbuild  |  1 +
>  arch/blackfin/include/asm/Kbuild   |  1 +
>  arch/c6x/include/asm/Kbuild|  1 +
>  arch/cris/include/asm/Kbuild   |  1 +
>  arch/frv/include/asm/Kbuild|  1 +
>  arch/h8300/include/asm/Kbuild  |  1 +
>  arch/hexagon/include/asm/Kbuild|  1 +
>  arch/ia64/include/asm/Kbuild   |  1 +
>  arch/m32r/include/asm/Kbuild   |  1 +
>  arch/m68k/include/asm/Kbuild   |  1 +
>  arch/metag/include/asm/Kbuild  |  1 +
>  arch/microblaze/include/asm/Kbuild |  1 +
>  arch/mips/include/asm/Kbuild   |  1 +
>  arch/mn10300/include/asm/Kbuild|  1 +
>  arch/nios2/include/asm/Kbuild  |  1 +
>  arch/openrisc/include/asm/Kbuild   |  1 +
>  arch/parisc/include/asm/Kbuild |  1 +
>  arch/powerpc/include/asm/Kbuild|  1 +
>  arch/s390/include/asm/Kbuild   |  1 +
>  arch/score/include/asm/Kbuild  |  1 +
>  arch/sh/include/asm/Kbuild |  1 +
>  arch/sparc/include/asm/Kbuild  |  1 +
>  arch/tile/include/asm/Kbuild   |  1 +
>  arch/um/include/asm/Kbuild |  1 +
>  arch/unicore32/include/asm/Kbuild  |  1 +
>  arch/x86/include/asm/Kbuild|  1 +
>  arch/xtensa/include/asm/Kbuild |  1 +
>  include/asm-generic/ipv6.h | 32 
>  include/net/ipv6.h | 20 +---
>  33 files changed, 92 insertions(+), 19 deletions(-)
>  create mode 100644 arch/arm64/include/asm/ipv6.h
>  create mode 100644 include/asm-generic/ipv6.h
> 
> diff --git a/arch/alpha/include/asm/Kbuild 

[PATCH net-next] ipv6: Implement optimized IPv6 masked address comparison for ARM64

2017-03-16 Thread Subash Abhinov Kasiviswanathan
Android devices use multiple ip[6]tables for statistics, UID matching
and other functionality. Perf output indicated that ip6_do_table
was taking a considerable amount of CPU and more that ip_do_table
for an equivalent rate. ipv6_masked_addr_cmp was chosen for
optimization as there are more instructions required than the
equivalent operation in ip_packet_match.

Using 128 bit operations helps to reduce the number of instructions
for the match on an ARM64 system. This helps to improve UDPv6 DL
performance by 40Mbps (860Mbps -> 900Mbps) on a CPU limited system.

Tested on x86_64 UML to check if generic version is used and ARM64
to verify that ARM64 version is used.

Signed-off-by: Subash Abhinov Kasiviswanathan 
---
 arch/alpha/include/asm/Kbuild  |  1 +
 arch/arc/include/asm/Kbuild|  1 +
 arch/arm/include/asm/Kbuild|  1 +
 arch/arm64/include/asm/ipv6.h  | 29 +
 arch/avr32/include/asm/Kbuild  |  1 +
 arch/blackfin/include/asm/Kbuild   |  1 +
 arch/c6x/include/asm/Kbuild|  1 +
 arch/cris/include/asm/Kbuild   |  1 +
 arch/frv/include/asm/Kbuild|  1 +
 arch/h8300/include/asm/Kbuild  |  1 +
 arch/hexagon/include/asm/Kbuild|  1 +
 arch/ia64/include/asm/Kbuild   |  1 +
 arch/m32r/include/asm/Kbuild   |  1 +
 arch/m68k/include/asm/Kbuild   |  1 +
 arch/metag/include/asm/Kbuild  |  1 +
 arch/microblaze/include/asm/Kbuild |  1 +
 arch/mips/include/asm/Kbuild   |  1 +
 arch/mn10300/include/asm/Kbuild|  1 +
 arch/nios2/include/asm/Kbuild  |  1 +
 arch/openrisc/include/asm/Kbuild   |  1 +
 arch/parisc/include/asm/Kbuild |  1 +
 arch/powerpc/include/asm/Kbuild|  1 +
 arch/s390/include/asm/Kbuild   |  1 +
 arch/score/include/asm/Kbuild  |  1 +
 arch/sh/include/asm/Kbuild |  1 +
 arch/sparc/include/asm/Kbuild  |  1 +
 arch/tile/include/asm/Kbuild   |  1 +
 arch/um/include/asm/Kbuild |  1 +
 arch/unicore32/include/asm/Kbuild  |  1 +
 arch/x86/include/asm/Kbuild|  1 +
 arch/xtensa/include/asm/Kbuild |  1 +
 include/asm-generic/ipv6.h | 32 
 include/net/ipv6.h | 20 +---
 33 files changed, 92 insertions(+), 19 deletions(-)
 create mode 100644 arch/arm64/include/asm/ipv6.h
 create mode 100644 include/asm-generic/ipv6.h

diff --git a/arch/alpha/include/asm/Kbuild b/arch/alpha/include/asm/Kbuild
index d103db5..5b7e92b 100644
--- a/arch/alpha/include/asm/Kbuild
+++ b/arch/alpha/include/asm/Kbuild
@@ -3,6 +3,7 @@
 generic-y += clkdev.h
 generic-y += exec.h
 generic-y += export.h
+generic-y += ipv6.h
 generic-y += irq_work.h
 generic-y += mcs_spinlock.h
 generic-y += mm-arch-hooks.h
diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild
index 63a0401..99f1456 100644
--- a/arch/arc/include/asm/Kbuild
+++ b/arch/arc/include/asm/Kbuild
@@ -14,6 +14,7 @@ generic-y += hw_irq.h
 generic-y += ioctl.h
 generic-y += ioctls.h
 generic-y += ipcbuf.h
+generic-y += ipv6.h
 generic-y += irq_regs.h
 generic-y += irq_work.h
 generic-y += kmap_types.h
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild
index b14e8c7..a0ba9ac 100644
--- a/arch/arm/include/asm/Kbuild
+++ b/arch/arm/include/asm/Kbuild
@@ -9,6 +9,7 @@ generic-y += errno.h
 generic-y += exec.h
 generic-y += ioctl.h
 generic-y += ipcbuf.h
+generic-y += ipv6.h
 generic-y += irq_regs.h
 generic-y += kdebug.h
 generic-y += local.h
diff --git a/arch/arm64/include/asm/ipv6.h b/arch/arm64/include/asm/ipv6.h
new file mode 100644
index 000..d49dec6
--- /dev/null
+++ b/arch/arm64/include/asm/ipv6.h
@@ -0,0 +1,29 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ASM_IPV6_H
+#define __ASM_IPV6_H
+
+#include 
+
+static inline bool
+ipv6_masked_addr_cmp(const struct in6_addr *a1, const struct in6_addr *m,
+const struct in6_addr *a2)
+{
+   const __uint128_t *ul1 = (const __uint128_t *)a1;
+   const __uint128_t *ulm = (const __uint128_t *)m;
+   const __uint128_t *ul2 = (const __uint128_t *)a1;
+
+   return !!((*ul1 ^ *ul2) & *ulm);
+}
+#define ipv6_masked_addr_cmp ipv6_masked_addr_cmp
+#endif /* __ASM_IPV6_H */
diff --git a/arch/avr32/include/asm/Kbuild b/arch/avr32/include/asm/Kbuild
index 3d7ef2c..fd6a964 100644
--- a/arch/avr32/include/asm/Kbuild
+++ b/arch/avr32/include/asm/Kbuild
@@ -6,6 +6,7 @@ generic-y += div64.h
 generic-y += emergency-restart.h
 generic-y += exec.h