This is an automated email from the ASF dual-hosted git repository.

xiaoxiang pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-nuttx.git

commit 1b3005accf0bea1178a1b288f80d84b4008008c5
Author: zhuyanlin <[email protected]>
AuthorDate: Tue Nov 23 20:27:32 2021 +0800

    arch:cache_invalidate: fix unalign cacheline invalidate
    
    Only invalidate may corrupt data in unalign start and end.
    Use writeback and invalidate instead.
    
    Signed-off-by: zhuyanlin <[email protected]>
---
 arch/arm/src/arm/arm_cache.S          | 11 ++++++-
 arch/arm/src/armv7-m/arm_cache.c      | 16 +++++++--
 arch/arm/src/armv8-m/arm_cache.c      | 16 +++++++--
 arch/xtensa/src/common/xtensa_cache.c | 61 +++++++++++++++++++++--------------
 4 files changed, 72 insertions(+), 32 deletions(-)

diff --git a/arch/arm/src/arm/arm_cache.S b/arch/arm/src/arm/arm_cache.S
index 92c32d9..53dd580 100644
--- a/arch/arm/src/arm/arm_cache.S
+++ b/arch/arm/src/arm/arm_cache.S
@@ -123,7 +123,16 @@ up_invalidate_icache_all:
        .type   up_invalidate_dcache, function
 
 up_invalidate_dcache:
-       bic             r0, r0, #CACHE_DLINESIZE - 1
+       mov             r2, #CACHE_DLINESIZE - 1
+
+       tst             r0, r2
+       bic             r0, r0, r2                      /* R0=aligned start 
address */
+       mcrne           p15, 0, r0, c7, c14, 1          /* Clean & invalidate D 
entry */
+
+       tst             r1, r2
+       bic             r1, r1, r2                      /* R1=aligned end 
address */
+       mcrne           p15, 0, r1, c7, c14, 1          /* Clean & invalidate D 
entry */
+
 1:     mcr             p15, 0, r0, c7, c6, 1           /* Invalidate D entry */
        add             r0, r0, #CACHE_DLINESIZE
        cmp             r0, r1
diff --git a/arch/arm/src/armv7-m/arm_cache.c b/arch/arm/src/armv7-m/arm_cache.c
index f1629bb..f640286 100644
--- a/arch/arm/src/armv7-m/arm_cache.c
+++ b/arch/arm/src/armv7-m/arm_cache.c
@@ -404,10 +404,16 @@ void up_invalidate_dcache(uintptr_t start, uintptr_t end)
    *   (ssize - 1)  = 0x007f : Mask of the set field
    */
 
-  start &= ~(ssize - 1);
   ARM_DSB();
 
-  do
+  if (start & (ssize - 1))
+    {
+      start &= ~(ssize - 1);
+      putreg32(start, NVIC_DCCIMVAC);
+      start += ssize;
+    }
+
+  while (start + ssize <= end)
     {
       /* The below store causes the cache to check its directory and
        * determine if this address is contained in the cache. If so, it
@@ -422,7 +428,11 @@ void up_invalidate_dcache(uintptr_t start, uintptr_t end)
 
       start += ssize;
     }
-  while (start < end);
+
+  if (start < end)
+    {
+      putreg32(start, NVIC_DCCIMVAC);
+    }
 
   ARM_DSB();
   ARM_ISB();
diff --git a/arch/arm/src/armv8-m/arm_cache.c b/arch/arm/src/armv8-m/arm_cache.c
index 56b963e..4b0e67d 100644
--- a/arch/arm/src/armv8-m/arm_cache.c
+++ b/arch/arm/src/armv8-m/arm_cache.c
@@ -404,10 +404,16 @@ void up_invalidate_dcache(uintptr_t start, uintptr_t end)
    *   (ssize - 1)  = 0x007f : Mask of the set field
    */
 
-  start &= ~(ssize - 1);
   ARM_DSB();
 
-  do
+  if (start & (ssize - 1))
+    {
+      start &= ~(ssize - 1);
+      putreg32(start, NVIC_DCCIMVAC);
+      start += ssize;
+    }
+
+  while (start + ssize <= end)
     {
       /* The below store causes the cache to check its directory and
        * determine if this address is contained in the cache. If so, it
@@ -422,7 +428,11 @@ void up_invalidate_dcache(uintptr_t start, uintptr_t end)
 
       start += ssize;
     }
-  while (start < end);
+
+  if (start < end)
+    {
+      putreg32(start, NVIC_DCCIMVAC);
+    }
 
   ARM_DSB();
   ARM_ISB();
diff --git a/arch/xtensa/src/common/xtensa_cache.c 
b/arch/xtensa/src/common/xtensa_cache.c
index 5a0b123..3ad30ce 100644
--- a/arch/xtensa/src/common/xtensa_cache.c
+++ b/arch/xtensa/src/common/xtensa_cache.c
@@ -116,11 +116,11 @@ void up_invalidate_icache(uintptr_t start, uintptr_t end)
 {
   /* align to XCHAL_ICACHE_LINESIZE */
 
-  uint32_t addr = start - (start & (XCHAL_ICACHE_LINESIZE - 1));
+  start &= ~(XCHAL_ICACHE_LINESIZE - 1);
 
-  for (; addr < end; addr += XCHAL_ICACHE_LINESIZE)
+  for (; start < end; start += XCHAL_ICACHE_LINESIZE)
     {
-      __asm__ __volatile__ ("ihi %0, 0\n" : : "r"(addr));
+      __asm__ __volatile__ ("ihi %0, 0\n" : : "r"(start));
     }
 
   __asm__ __volatile__ ("isync\n");
@@ -175,11 +175,11 @@ void up_lock_icache(uintptr_t start, uintptr_t end)
 {
   /* align to XCHAL_ICACHE_LINESIZE */
 
-  uint32_t addr = start - (start & (XCHAL_ICACHE_LINESIZE - 1));
+  start &= ~(XCHAL_ICACHE_LINESIZE - 1);
 
-  for (; addr < end; addr += XCHAL_ICACHE_LINESIZE)
+  for (; start < end; start += XCHAL_ICACHE_LINESIZE)
     {
-      __asm__ __volatile__ ("ipfl %0, 0\n": : "r"(addr));
+      __asm__ __volatile__ ("ipfl %0, 0\n": : "r"(start));
     };
 
   __asm__ __volatile__ ("isync\n");
@@ -206,11 +206,11 @@ void up_unlock_icache(uintptr_t start, uintptr_t end)
 {
   /* align to XCHAL_ICACHE_LINESIZE */
 
-  uint32_t addr = start - (start & (XCHAL_ICACHE_LINESIZE - 1));
+  start &= ~(XCHAL_ICACHE_LINESIZE - 1);
 
-  for (; addr < end; addr += XCHAL_ICACHE_LINESIZE)
+  for (; start < end; start += XCHAL_ICACHE_LINESIZE)
     {
-      __asm__ __volatile__ ("ihu %0, 0\n": : "r"(addr));
+      __asm__ __volatile__ ("ihu %0, 0\n": : "r"(start));
     };
 
   __asm__ __volatile__ ("isync\n");
@@ -335,13 +335,24 @@ void up_disable_dcache(void)
 #ifdef CONFIG_XTENSA_DCACHE
 void up_invalidate_dcache(uintptr_t start, uintptr_t end)
 {
-  /* Align to XCHAL_DCACHE_LINESIZE */
+  if (start & (XCHAL_DCACHE_LINESIZE - 1))
+    {
+      /* Align to XCHAL_DCACHE_LINESIZE */
+
+      start &= ~(XCHAL_DCACHE_LINESIZE - 1);
+      __asm__ __volatile__ ("dhwbi %0, 0\n" : : "r"(start));
+      start += XCHAL_DCACHE_LINESIZE;
+    }
 
-  uint32_t addr = start - (start & (XCHAL_DCACHE_LINESIZE - 1));
+  for (; start + XCHAL_DCACHE_LINESIZE <= end;
+       start += XCHAL_DCACHE_LINESIZE)
+    {
+      __asm__ __volatile__ ("dhi %0, 0\n" : : "r"(start));
+    }
 
-  for (; addr < end; addr += XCHAL_DCACHE_LINESIZE)
+  if (start != end)
     {
-      __asm__ __volatile__ ("dhi %0, 0\n" : : "r"(addr));
+      __asm__ __volatile__ ("dhwbi %0, 0\n" : : "r"(start));
     }
 
   __asm__ __volatile__ ("dsync\n");
@@ -405,11 +416,11 @@ void up_clean_dcache(uintptr_t start, uintptr_t end)
 {
   /* Align to XCHAL_DCACHE_SIZE */
 
-  uint32_t addr = start - (start & (XCHAL_DCACHE_LINESIZE - 1));
+  start &= ~(XCHAL_DCACHE_LINESIZE - 1);
 
-  for (; addr < end; addr += XCHAL_DCACHE_LINESIZE)
+  for (; start < end; start += XCHAL_DCACHE_LINESIZE)
     {
-      __asm__ __volatile__ ("dhwb %0, 0\n" : : "r"(addr));
+      __asm__ __volatile__ ("dhwb %0, 0\n" : : "r"(start));
     }
 
   __asm__ __volatile__ ("dsync\n");
@@ -482,11 +493,11 @@ void up_flush_dcache(uintptr_t start, uintptr_t end)
 {
   /* Align to XCHAL_DCACHE_LINESIZE */
 
-  uint32_t addr = start - (start & (XCHAL_DCACHE_LINESIZE - 1));
+  start &= ~(XCHAL_DCACHE_LINESIZE - 1);
 
-  for (; addr < end; addr += XCHAL_DCACHE_LINESIZE)
+  for (; start < end; start += XCHAL_DCACHE_LINESIZE)
     {
-      __asm__ __volatile__ ("dhwbi %0, 0\n" : : "r"(addr));
+      __asm__ __volatile__ ("dhwbi %0, 0\n" : : "r"(start));
     }
 
   __asm__ __volatile__ ("dsync\n");
@@ -549,11 +560,11 @@ void up_lock_dcache(uintptr_t start, uintptr_t end)
 {
   /* align to XCHAL_DCACHE_LINESIZE */
 
-  uint32_t addr = start - (start & (XCHAL_DCACHE_LINESIZE - 1));
+  start &= ~(XCHAL_DCACHE_LINESIZE - 1);
 
-  for (; addr < end; addr += XCHAL_DCACHE_LINESIZE)
+  for (; start < end; start += XCHAL_DCACHE_LINESIZE)
     {
-      __asm__ __volatile__ ("dpfl %0, 0\n": : "r"(addr));
+      __asm__ __volatile__ ("dpfl %0, 0\n": : "r"(start));
     };
 
   __asm__ __volatile__ ("dsync\n");
@@ -580,11 +591,11 @@ void up_unlock_dcache(uintptr_t start, uintptr_t end)
 {
   /* align to XCHAL_DCACHE_LINESIZE */
 
-  uint32_t addr = start - (start & (XCHAL_DCACHE_LINESIZE - 1));
+  start &= ~(XCHAL_DCACHE_LINESIZE - 1);
 
-  for (; addr < end; addr += XCHAL_DCACHE_LINESIZE)
+  for (; start < end; start += XCHAL_DCACHE_LINESIZE)
     {
-      __asm__ __volatile__ ("dhu %0, 0\n": : "r"(addr));
+      __asm__ __volatile__ ("dhu %0, 0\n": : "r"(start));
     };
 
   __asm__ __volatile__ ("dsync\n");

Reply via email to