This avoids potential "bleeding" when size == 0 as cache line would be
dirtied (and possibly fetched from other cores) and due to the same
reaons more optimal too.

Signed-off-by: Vineet Gupta <vgu...@kernel.org>
---
 arch/arc/lib/memset-archs.S | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/arch/arc/lib/memset-archs.S b/arch/arc/lib/memset-archs.S
index d2e09fece5bc..d0a5cec4cdca 100644
--- a/arch/arc/lib/memset-archs.S
+++ b/arch/arc/lib/memset-archs.S
@@ -36,12 +36,13 @@
 #endif
 
 ENTRY_CFI(memset)
-       PREFETCHW_INSTR r0, 0   ; Prefetch the first write location
        mov.f   0, r2
 ;;; if size is zero
        jz.d    [blink]
        mov     r3, r0          ; don't clobber ret val
 
+       PREFETCHW_INSTR r0, 0   ; Prefetch the first write location
+
 ;;; if length < 8
        brls.d.nt       r2, 8, .Lsmallchunk
        mov.f   lp_count,r2
-- 
2.34.1


_______________________________________________
linux-snps-arc mailing list
linux-snps-arc@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-snps-arc

Reply via email to