Author: mmel
Date: Mon Nov  2 08:26:19 2020
New Revision: 367268
URL: https://svnweb.freebsd.org/changeset/base/367268

Log:
  Improve loading of multipage aligned buffers.
  
  The multipage alignment requirements is incompatible with many aspects
  of actual busdma code. Multi-page alignment requests are incompatible
  with many aspects of current busdma code. Mainly with partially bounced
  buffer segments and per-page loop in bus_dmamap_load_buffer(). Because
  proper implementation would be a major restructuring of the code, add
  the fix only for already known uses and do KASSERT for all other cases.
  
  For this reason, bus_dmamap_load_buffer () should take the memory allocated
  by bus_dmam_alloc () as one segment bypassing per page segmentation. We can
  do this because it is guaranteed that the memory is physically continuous.
  
  Reviewed by:  bz
  Tested by:    imp, mv, daniel.engberg.lists_pyret.net, kjopek_gmail.com
  Differential Revision: https://reviews.freebsd.org/D26735

Modified:
  head/sys/arm64/arm64/busdma_bounce.c

Modified: head/sys/arm64/arm64/busdma_bounce.c
==============================================================================
--- head/sys/arm64/arm64/busdma_bounce.c        Mon Nov  2 06:16:11 2020        
(r367267)
+++ head/sys/arm64/arm64/busdma_bounce.c        Mon Nov  2 08:26:19 2020        
(r367268)
@@ -501,13 +501,6 @@ static int
 bounce_bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
     bus_dmamap_t *mapp)
 {
-       /*
-        * XXX ARM64TODO:
-        * This bus_dma implementation requires IO-Coherent architecutre.
-        * If IO-Coherency is not guaranteed, the BUS_DMA_COHERENT flag has
-        * to be implented using non-cacheable memory.
-        */
-
        vm_memattr_t attr;
        int mflags;
 
@@ -830,7 +823,19 @@ bounce_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dm
                sgsize = MIN(buflen, dmat->common.maxsegsz);
                if (map->pagesneeded != 0 &&
                    must_bounce(dmat, map, curaddr, sgsize)) {
-                       sgsize = MIN(sgsize, PAGE_SIZE - (curaddr & PAGE_MASK));
+                       /*
+                        * The attempt to split a physically continuous buffer
+                        * seems very controversial, it's unclear whether we
+                        * can do this in all cases. Also, memory for bounced
+                        * buffers is allocated as pages, so we cannot
+                        * guarantee multipage alignment.
+                        */
+                       KASSERT(dmat->common.alignment <= PAGE_SIZE,
+                           ("bounced buffer cannot have alignment bigger "
+                           "than PAGE_SIZE: %lu", dmat->common.alignment));
+                       sgsize = PAGE_SIZE - (curaddr & PAGE_MASK);
+                       sgsize = roundup2(sgsize, dmat->common.alignment);
+                       sgsize = MIN(sgsize, dmat->common.maxsegsz);
                        curaddr = add_bounce_page(dmat, map, 0, curaddr,
                            sgsize);
                } else if ((map->flags & DMAMAP_COHERENT) == 0) {
@@ -843,11 +848,11 @@ bounce_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dm
                                sl++;
                                sl->vaddr = 0;
                                sl->paddr = curaddr;
-                               sl->datacount = sgsize;
                                sl->pages = PHYS_TO_VM_PAGE(curaddr);
                                KASSERT(sl->pages != NULL,
                                    ("%s: page at PA:0x%08lx is not in "
                                    "vm_page_array", __func__, curaddr));
+                               sl->datacount = sgsize;
                        } else
                                sl->datacount += sgsize;
                }
@@ -880,6 +885,11 @@ bounce_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_
        vm_offset_t kvaddr, vaddr, sl_vend;
        int error;
 
+       KASSERT((map->flags & DMAMAP_FROM_DMAMEM) != 0 ||
+           dmat->common.alignment <= PAGE_SIZE,
+           ("loading user buffer with alignment bigger than PAGE_SIZE is not "
+           "supported"));
+
        if (segs == NULL)
                segs = dmat->segments;
 
@@ -895,6 +905,11 @@ bounce_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_
                }
        }
 
+       /*
+        * XXX Optimally we should parse input buffer for physically
+        * continuous segments first and then pass these segment into
+        * load loop.
+        */
        sl = map->slist + map->sync_count - 1;
        vaddr = (vm_offset_t)buf;
        sl_pend = 0;
@@ -916,15 +931,25 @@ bounce_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_
                 * Compute the segment size, and adjust counts.
                 */
                max_sgsize = MIN(buflen, dmat->common.maxsegsz);
-               sgsize = PAGE_SIZE - (curaddr & PAGE_MASK);
+               if ((map->flags & DMAMAP_FROM_DMAMEM) != 0) {
+                       sgsize = max_sgsize;
+               } else {
+                       sgsize = PAGE_SIZE - (curaddr & PAGE_MASK);
+                       sgsize = MIN(sgsize, max_sgsize);
+               }
+
                if (map->pagesneeded != 0 &&
                    must_bounce(dmat, map, curaddr, sgsize)) {
+                       /* See comment in bounce_bus_dmamap_load_phys */
+                       KASSERT(dmat->common.alignment <= PAGE_SIZE,
+                           ("bounced buffer cannot have alignment bigger "
+                           "than PAGE_SIZE: %lu", dmat->common.alignment));
+                       sgsize = PAGE_SIZE - (curaddr & PAGE_MASK);
                        sgsize = roundup2(sgsize, dmat->common.alignment);
                        sgsize = MIN(sgsize, max_sgsize);
                        curaddr = add_bounce_page(dmat, map, kvaddr, curaddr,
                            sgsize);
                } else if ((map->flags & DMAMAP_COHERENT) == 0) {
-                       sgsize = MIN(sgsize, max_sgsize);
                        if (map->sync_count > 0) {
                                sl_pend = sl->paddr + sl->datacount;
                                sl_vend = sl->vaddr + sl->datacount;
@@ -934,7 +959,7 @@ bounce_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_
                            (kvaddr != 0 && kvaddr != sl_vend) ||
                            (curaddr != sl_pend)) {
                                if (++map->sync_count > dmat->common.nsegments)
-                                       goto cleanup;
+                                       break;
                                sl++;
                                sl->vaddr = kvaddr;
                                sl->paddr = curaddr;
@@ -950,8 +975,6 @@ bounce_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_
                                sl->datacount = sgsize;
                        } else
                                sl->datacount += sgsize;
-               } else {
-                       sgsize = MIN(sgsize, max_sgsize);
                }
                sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
                    segp);
@@ -961,7 +984,6 @@ bounce_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_
                buflen -= sgsize;
        }
 
-cleanup:
        /*
         * Did we fit?
         */
_______________________________________________
svn-src-head@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-head
To unsubscribe, send any mail to "svn-src-head-unsubscr...@freebsd.org"

Reply via email to