This is an automated email from the ASF dual-hosted git repository.

xiaoxiang pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/nuttx.git

commit 05e620d12bad8a7b6915f5b3ba100b28df5b733c
Author: David Sidrane <david.sidr...@nscdg.com>
AuthorDate: Mon Nov 20 08:16:03 2023 -0800

    imxrt:Serial Use smart invalidate
---
 arch/arm/src/imxrt/imxrt_serial.c | 55 ++++++++++++++++++++++++++++++++++-----
 1 file changed, 48 insertions(+), 7 deletions(-)

diff --git a/arch/arm/src/imxrt/imxrt_serial.c 
b/arch/arm/src/imxrt/imxrt_serial.c
index 6926cd8da5..ec8ee84abd 100644
--- a/arch/arm/src/imxrt/imxrt_serial.c
+++ b/arch/arm/src/imxrt/imxrt_serial.c
@@ -770,6 +770,10 @@ struct imxrt_uart_s
   DMACH_HANDLE       rxdma;         /* currently-open receive DMA stream */
   bool               rxenable;      /* DMA-based reception en/disable */
   uint32_t           rxdmanext;     /* Next byte in the DMA buffer to be read 
*/
+#ifdef CONFIG_ARMV7M_DCACHE
+  uint32_t          rxdmaavail;     /* Number of bytes available without need 
to
+                                     * to invalidate the data cache */
+#endif
   char *const        rxfifo;        /* Receive DMA buffer */
 #endif
 };
@@ -2086,6 +2090,9 @@ static int imxrt_dma_setup(struct uart_dev_s *dev)
        */
 
       priv->rxdmanext = 0;
+#ifdef CONFIG_ARMV7M_DCACHE
+      priv->rxdmaavail = 0;
+#endif
 
       /* Enable receive Rx DMA for the UART */
 
@@ -2907,7 +2914,6 @@ static bool imxrt_rxflowcontrol(struct uart_dev_s *dev,
 static int imxrt_dma_receive(struct uart_dev_s *dev, unsigned int *status)
 {
   struct imxrt_uart_s *priv   = (struct imxrt_uart_s *)dev;
-  static uint32_t last_nextrx = -1;
   uint32_t nextrx             = imxrt_dma_nextrx(priv);
   int c                       = 0;
 
@@ -2915,17 +2921,49 @@ static int imxrt_dma_receive(struct uart_dev_s *dev, 
unsigned int *status)
 
   if (nextrx != priv->rxdmanext)
     {
-      /* Now we must ensure the cache is updated if the DMA has
-       * updated again.
+#ifdef CONFIG_ARMV7M_DCACHE
+      /* If the data cache is enabled, then we will also need to manage
+       * cache coherency.  Are any bytes available in the currently coherent
+       * region of the data cache?
        */
 
-      if (last_nextrx != nextrx)
+      if (priv->rxdmaavail == 0)
         {
-          up_invalidate_dcache((uintptr_t)priv->rxfifo,
-                               (uintptr_t)priv->rxfifo + RXDMA_BUFFER_SIZE);
-          last_nextrx = nextrx;
+          uint32_t rxdmaavail;
+          uintptr_t addr;
+
+          /* No.. then we will have to invalidate additional space in the Rx
+           * DMA buffer.
+           */
+
+          if (nextrx > priv->rxdmanext)
+            {
+              /* Number of available bytes */
+
+              rxdmaavail = nextrx - priv->rxdmanext;
+            }
+          else
+            {
+              /* Number of available bytes up to the end of RXDMA buffer */
+
+              rxdmaavail = RXDMA_BUFFER_SIZE - priv->rxdmanext;
+            }
+
+          /* Invalidate the DMA buffer range */
+
+          addr = (uintptr_t)&priv->rxfifo[priv->rxdmanext];
+          up_invalidate_dcache(addr, addr + rxdmaavail);
+
+          /* We don't need to invalidate the data cache for the next
+           * rxdmaavail number of next bytes.
+           */
+
+          priv->rxdmaavail = rxdmaavail;
         }
 
+      priv->rxdmaavail--;
+#endif
+
       /* Now read from the DMA buffer */
 
       c = priv->rxfifo[priv->rxdmanext];
@@ -2990,6 +3028,9 @@ static void imxrt_dma_reenable(struct imxrt_uart_s *priv)
    */
 
   priv->rxdmanext = 0;
+#ifdef CONFIG_ARMV7M_DCACHE
+  priv->rxdmaavail = 0;
+#endif
 
   /* Start the DMA channel, and arrange for callbacks at the half and
    * full points in the FIFO.  This ensures that we have half a FIFO

Reply via email to