I like this approach better than the alternative. although i might put
BROKEN in the flag name or something.

On Saturday, 2 April 2016, Mark Kettenis <[email protected]> wrote:

> It is pretty clear that the DMA engine on the Davicom dc(4) hardware
> is broken and will read beyond the end of the buffer that we pass it.
> This is bad news for hardware that uses an IOMMU, as it will detect
> the DMA overrun and (at least on sparc64) signal an unrecoverable
> error.
>
> It is somewhat difficult to fix this in the driver.  We could copy
> mbufs that we want to transmit into a buffer that has enough trailing
> space, but then we have to make sure that the buffer is physically
> contiguous and that the unused trailing space is mapped into the
> IOMMU.
>
> The diff below takes a different approach.  It introduces a new
> BUS_DMA_OVERRUN flag.  On sparc64 this flag will make sure that we map
> an additional scratch page into the IOMMU.  This fixes the DMA errors
> that people reported ever since dlg@ changed the pools.
>
> If people agree with this approach, I will add a
>
> #define BUS_DMA_OVERRUN 0
>
> to all the other bus_dma implementations in the tree.
>
> Thoughts?
>
>
> Index: dev/ic/dc.c
> ===================================================================
> RCS file: /cvs/src/sys/dev/ic/dc.c,v
> retrieving revision 1.149
> diff -u -p -r1.149 dc.c
> --- dev/ic/dc.c 28 Nov 2015 22:57:43 -0000      1.149
> +++ dev/ic/dc.c 2 Apr 2016 15:42:15 -0000
> @@ -2584,13 +2584,13 @@ dc_start(struct ifnet *ifp)
>
>                 map = sc->sc_tx_sparemap;
>                 switch (bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
> -                   BUS_DMA_NOWAIT)) {
> +                   BUS_DMA_NOWAIT | BUS_DMA_OVERRUN)) {
>                 case 0:
>                         break;
>                 case EFBIG:
>                         if (m_defrag(m, M_DONTWAIT) == 0 &&
>                             bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
> -                            BUS_DMA_NOWAIT) == 0)
> +                            BUS_DMA_NOWAIT | BUS_DMA_OVERRUN) == 0)
>                                 break;
>
>                         /* FALLTHROUGH */
> Index: arch/sparc64/dev/iommu.c
> ===================================================================
> RCS file: /cvs/src/sys/arch/sparc64/dev/iommu.c,v
> retrieving revision 1.72
> diff -u -p -r1.72 iommu.c
> --- arch/sparc64/dev/iommu.c    9 Jan 2015 14:23:25 -0000       1.72
> +++ arch/sparc64/dev/iommu.c    2 Apr 2016 15:42:17 -0000
> @@ -195,6 +195,13 @@ iommu_init(char *name, struct iommu_stat
>         pmap_update(pmap_kernel());
>         memset(is->is_tsb, 0, size);
>
> +       TAILQ_INIT(&mlist);
> +       if (uvm_pglistalloc(PAGE_SIZE, (paddr_t)0, (paddr_t)-1,
> +           (paddr_t)PAGE_SIZE, (paddr_t)0, &mlist, 1, UVM_PLA_NOWAIT) !=
> 0)
> +               panic("iommu_init: no memory");
> +       m = TAILQ_FIRST(&mlist);
> +       is->is_scratch = VM_PAGE_TO_PHYS(m);
> +
>  #ifdef DEBUG
>         if (iommudebug & IDB_INFO) {
>                 /* Probe the iommu */
> @@ -734,6 +741,13 @@ iommu_dvmamap_load(bus_dma_tag_t t, bus_
>                         }
>                 }
>         }
> +       if (flags & BUS_DMA_OVERRUN) {
> +               err = iommu_iomap_insert_page(ims, is->is_scratch);
> +               if (err) {
> +                       iommu_iomap_clear_pages(ims);
> +                       return (EFBIG);
> +               }
> +       }
>         sgsize = ims->ims_map.ipm_pagecnt * PAGE_SIZE;
>
>         mtx_enter(&is->is_mtx);
> @@ -939,6 +953,13 @@ iommu_dvmamap_load_raw(bus_dma_tag_t t,
>                         }
>
>                         left -= seg_len;
> +               }
> +       }
> +       if (flags & BUS_DMA_OVERRUN) {
> +               err = iommu_iomap_insert_page(ims, is->is_scratch);
> +               if (err) {
> +                       iommu_iomap_clear_pages(ims);
> +                       return (EFBIG);
>                 }
>         }
>         sgsize = ims->ims_map.ipm_pagecnt * PAGE_SIZE;
> Index: arch/sparc64/dev/iommuvar.h
> ===================================================================
> RCS file: /cvs/src/sys/arch/sparc64/dev/iommuvar.h,v
> retrieving revision 1.16
> diff -u -p -r1.16 iommuvar.h
> --- arch/sparc64/dev/iommuvar.h 22 Jan 2014 10:52:35 -0000      1.16
> +++ arch/sparc64/dev/iommuvar.h 2 Apr 2016 15:42:17 -0000
> @@ -117,6 +117,8 @@ struct iommu_state {
>
>         struct strbuf_ctl       *is_sb[2];      /* Streaming buffers if
> any */
>
> +       paddr_t                 is_scratch;     /* Scratch page */
> +
>         /* copies of our parents state, to allow us to be self contained */
>         bus_space_tag_t         is_bustag;      /* our bus tag */
>         bus_space_handle_t      is_iommu;       /* IOMMU registers */
> Index: arch/sparc64/dev/viommu.c
> ===================================================================
> RCS file: /cvs/src/sys/arch/sparc64/dev/viommu.c,v
> retrieving revision 1.16
> diff -u -p -r1.16 viommu.c
> --- arch/sparc64/dev/viommu.c   9 Jan 2015 14:23:25 -0000       1.16
> +++ arch/sparc64/dev/viommu.c   2 Apr 2016 15:42:17 -0000
> @@ -106,6 +106,9 @@ void
>  viommu_init(char *name, struct iommu_state *is, int tsbsize,
>      u_int32_t iovabase)
>  {
> +       struct vm_page *m;
> +       struct pglist mlist;
> +
>         /*
>          * Setup the iommu.
>          *
> @@ -121,6 +124,13 @@ viommu_init(char *name, struct iommu_sta
>                 is->is_dvmaend = iovabase + IOTSB_VSIZE(tsbsize) - 1;
>         }
>
> +       TAILQ_INIT(&mlist);
> +       if (uvm_pglistalloc(PAGE_SIZE, (paddr_t)0, (paddr_t)-1,
> +           (paddr_t)PAGE_SIZE, (paddr_t)0, &mlist, 1, UVM_PLA_NOWAIT) !=
> 0)
> +               panic("%s: no memory", __func__);
> +       m = TAILQ_FIRST(&mlist);
> +       is->is_scratch = VM_PAGE_TO_PHYS(m);
> +
>         /*
>          * Allocate a dvma map.
>          */
> @@ -341,6 +351,13 @@ viommu_dvmamap_load(bus_dma_tag_t t, bus
>                         }
>                 }
>         }
> +       if (flags & BUS_DMA_OVERRUN) {
> +               err = iommu_iomap_insert_page(ims, is->is_scratch);
> +               if (err) {
> +                       iommu_iomap_clear_pages(ims);
> +                       return (EFBIG);
> +               }
> +       }
>         sgsize = ims->ims_map.ipm_pagecnt * PAGE_SIZE;
>
>         mtx_enter(&is->is_mtx);
> @@ -522,6 +539,13 @@ viommu_dvmamap_load_raw(bus_dma_tag_t t,
>                         }
>
>                         left -= seg_len;
> +               }
> +       }
> +       if (flags & BUS_DMA_OVERRUN) {
> +               err = iommu_iomap_insert_page(ims, is->is_scratch);
> +               if (err) {
> +                       iommu_iomap_clear_pages(ims);
> +                       return (EFBIG);
>                 }
>         }
>         sgsize = ims->ims_map.ipm_pagecnt * PAGE_SIZE;
> Index: arch/sparc64/include/bus.h
> ===================================================================
> RCS file: /cvs/src/sys/arch/sparc64/include/bus.h,v
> retrieving revision 1.29
> diff -u -p -r1.29 bus.h
> --- arch/sparc64/include/bus.h  13 May 2013 17:46:42 -0000      1.29
> +++ arch/sparc64/include/bus.h  2 Apr 2016 15:42:17 -0000
> @@ -356,19 +356,20 @@ bus_space_barrier(t, h, o, s, f)
>  /*
>   * Flags used in various bus DMA methods.
>   */
> -#define        BUS_DMA_WAITOK          0x000   /* safe to sleep
> (pseudo-flag) */
> -#define        BUS_DMA_NOWAIT          0x001   /* not safe to sleep */
> -#define        BUS_DMA_ALLOCNOW        0x002   /* perform resource
> allocation now */
> -#define        BUS_DMA_COHERENT        0x004   /* hint: map memory DMA
> coherent */
> -#define        BUS_DMA_NOWRITE         0x008   /* I suppose the following
> two should default on */
> -#define        BUS_DMA_BUS1            0x010   /* placeholders for bus
> functions... */
> -#define        BUS_DMA_BUS2            0x020
> -#define        BUS_DMA_BUS3            0x040
> -#define        BUS_DMA_BUS4            0x080
> -#define        BUS_DMA_STREAMING       0x100   /* hint: sequential,
> unidirectional */
> -#define        BUS_DMA_READ            0x200   /* mapping is device ->
> memory only */
> -#define        BUS_DMA_WRITE           0x400   /* mapping is memory ->
> device only */
> -#define        BUS_DMA_ZERO            0x800   /* zero memory in
> dmamem_alloc */
> +#define        BUS_DMA_WAITOK          0x0000  /* safe to sleep
> (pseudo-flag) */
> +#define        BUS_DMA_NOWAIT          0x0001  /* not safe to sleep */
> +#define        BUS_DMA_ALLOCNOW        0x0002  /* perform resource
> allocation now */
> +#define        BUS_DMA_COHERENT        0x0004  /* hint: map memory DMA
> coherent */
> +#define        BUS_DMA_NOWRITE         0x0008  /* I suppose the following
> two should default on */
> +#define        BUS_DMA_BUS1            0x0010  /* placeholders for bus
> functions... */
> +#define        BUS_DMA_BUS2            0x0020
> +#define        BUS_DMA_BUS3            0x0040
> +#define        BUS_DMA_BUS4            0x0080
> +#define        BUS_DMA_STREAMING       0x0100  /* hint: sequential,
> unidirectional */
> +#define        BUS_DMA_READ            0x0200  /* mapping is device ->
> memory only */
> +#define        BUS_DMA_WRITE           0x0400  /* mapping is memory ->
> device only */
> +#define        BUS_DMA_ZERO            0x0800  /* zero memory in
> dmamem_alloc */
> +#define BUS_DMA_OVERRUN                0x1000  /* tolerate DMA overruns */
>
>  #define        BUS_DMA_NOCACHE         BUS_DMA_BUS1
>  #define        BUS_DMA_DVMA            BUS_DMA_BUS2    /* Don't bother
> with alignment */
>
>

Reply via email to