Re: [RFC 1/5] RISC-V: Implement arch_sync_dma* functions

2021-09-11 Thread Guo Ren
On Tue, Jul 27, 2021 at 5:53 AM Atish Patra  wrote:
>
> On Sun, Jul 25, 2021 at 11:57 PM Christoph Hellwig  wrote:
> >
> > > +#ifdef CONFIG_RISCV_DMA_NONCOHERENT
> > > +struct riscv_dma_cache_sync {
> > > + void (*cache_invalidate)(phys_addr_t paddr, size_t size);
> > > + void (*cache_clean)(phys_addr_t paddr, size_t size);
> > > + void (*cache_flush)(phys_addr_t paddr, size_t size);
> > > +};
> > > +
> > > +void riscv_dma_cache_sync_set(struct riscv_dma_cache_sync *ops);
> > > +#endif
> >
> > As told a bunch of times before: doing indirect calls here is a
> > performance nightmare.  Use something that actually does perform
> > horribly like alternatives.  Or even delay implementing that until
> > we need it and do a plain direct call for now.
> >
>
> I was initially planning to replace this with alternatives in the
> future versions. But there is no point in doing it
> until we have CMO spec finalized. We also don't have any other
> platform using these apart from sifive l2
> cache controllers for now.
>
> I will change these to direct for now.
I think alternatives' would be helpful, I've rebased on your patchset, thx.
https://lore.kernel.org/linux-riscv/20210911092139.79607-6-guo...@kernel.org/

>
> > static void __dma_sync(phys_addr_t paddr, size_t size, enum 
> > dma_data_direction dir)
> > > +{
> > > + if ((dir == DMA_FROM_DEVICE) && (dma_cache_sync->cache_invalidate))
> > > + dma_cache_sync->cache_invalidate(paddr, size);
> > > + else if ((dir == DMA_TO_DEVICE) && (dma_cache_sync->cache_clean))
> > > + dma_cache_sync->cache_clean(paddr, size);
> > > + else if ((dir == DMA_BIDIRECTIONAL) && dma_cache_sync->cache_flush)
> > > + dma_cache_sync->cache_flush(paddr, size);
> > > +}
> >
> > The seletion of flush types is completely broken.  Take a look at the
> > comment in arch/arc/mm/dma.c above arch_sync_dma_for_device for a good
> > explanation.
> >
>
> Thanks. I will fix it.
>
> > > +void arch_dma_prep_coherent(struct page *page, size_t size)
> > > +{
> > > + void *flush_addr = page_address(page);
> > > +
> > > + memset(flush_addr, 0, size);
> >
> > arch_dma_prep_coherent is not supposed to modify the content of
> > the data.
>
> Sorry. This was a leftover from some experimental code. It shouldn't
> have been included.
>
> > ___
> > iommu mailing list
> > iommu@lists.linux-foundation.org
> > https://lists.linuxfoundation.org/mailman/listinfo/iommu
>
>
>
> --
> Regards,
> Atish
> ___
> iommu mailing list
> iommu@lists.linux-foundation.org
> https://lists.linuxfoundation.org/mailman/listinfo/iommu



-- 
Best Regards
 Guo Ren

ML: https://lore.kernel.org/linux-csky/
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [RFC 1/5] RISC-V: Implement arch_sync_dma* functions

2021-08-17 Thread Guo Ren
On Tue, Aug 17, 2021 at 11:24 AM Atish Patra  wrote:
>
> On Mon, Aug 16, 2021 at 6:48 PM Guo Ren  wrote:
> >
> > On Sat, Jul 24, 2021 at 5:40 AM Atish Patra  wrote:
> > >
> > > To facilitate streaming DMA APIs, this patch introduces a set of generic
> > > cache operations related dma sync. Any platform can use the generic ops
> > > to provide platform specific cache management operations. Once the
> > > standard RISC-V CMO extension is available, it can be built on top of it.
> > >
> > > Signed-off-by: Atish Patra 
> > > ---
> > >  arch/riscv/include/asm/dma-noncoherent.h | 19 +++
> > >  arch/riscv/mm/Makefile   |  1 +
> > >  arch/riscv/mm/dma-noncoherent.c  | 66 
> > >  3 files changed, 86 insertions(+)
> > >  create mode 100644 arch/riscv/include/asm/dma-noncoherent.h
> > >  create mode 100644 arch/riscv/mm/dma-noncoherent.c
> > >
> > > diff --git a/arch/riscv/include/asm/dma-noncoherent.h 
> > > b/arch/riscv/include/asm/dma-noncoherent.h
> > > new file mode 100644
> > > index ..5bdb03c9c427
> > > --- /dev/null
> > > +++ b/arch/riscv/include/asm/dma-noncoherent.h
> > > @@ -0,0 +1,19 @@
> > > +/* SPDX-License-Identifier: GPL-2.0-only */
> > > +/*
> > > + * Copyright (c) 2021 Western Digital Corporation or its affiliates.
> > > + */
> > > +
> > > +#ifndef __ASM_RISCV_DMA_NON_COHERENT_H
> > > +#define __ASM_RISCV_DMA_NON_COHERENT_H
> > > +
> > > +#ifdef CONFIG_RISCV_DMA_NONCOHERENT
> > > +struct riscv_dma_cache_sync {
> > > +   void (*cache_invalidate)(phys_addr_t paddr, size_t size);
> > > +   void (*cache_clean)(phys_addr_t paddr, size_t size);
> > > +   void (*cache_flush)(phys_addr_t paddr, size_t size);
> > > +};
> > I like the style like this than my previous patch which using
> > sbi_call. The c906 has custom instructions that could be called in
> > S-mode directly.
> >
>
> How are you going to include the custom instructions in the upstream kernel ?
In errata, call set_ops? I'm headache with that issue.

>
> > Hope the patch could be soon merged, after correct the
> > DMA_FROM/TO_DEVICE/BIDIRECTIONAL and alternatives ops_set.
> >
> > > +
> > > +void riscv_dma_cache_sync_set(struct riscv_dma_cache_sync *ops);
> > > +#endif
> > > +
> > > +#endif
> > > diff --git a/arch/riscv/mm/Makefile b/arch/riscv/mm/Makefile
> > > index 7ebaef10ea1b..959bef49098b 100644
> > > --- a/arch/riscv/mm/Makefile
> > > +++ b/arch/riscv/mm/Makefile
> > > @@ -27,3 +27,4 @@ KASAN_SANITIZE_init.o := n
> > >  endif
> > >
> > >  obj-$(CONFIG_DEBUG_VIRTUAL) += physaddr.o
> > > +obj-$(CONFIG_RISCV_DMA_NONCOHERENT) += dma-noncoherent.o
> > > diff --git a/arch/riscv/mm/dma-noncoherent.c 
> > > b/arch/riscv/mm/dma-noncoherent.c
> > > new file mode 100644
> > > index ..2f6e9627c4aa
> > > --- /dev/null
> > > +++ b/arch/riscv/mm/dma-noncoherent.c
> > > @@ -0,0 +1,66 @@
> > > +// SPDX-License-Identifier: GPL-2.0-only
> > > +/*
> > > + * RISC-V specific functions to support DMA for non-coherent devices
> > > + *
> > > + * Copyright (c) 2021 Western Digital Corporation or its affiliates.
> > > + */
> > > +
> > > +#include 
> > > +#include 
> > > +#include 
> > > +#include 
> > > +#include 
> > > +#include 
> > > +#include 
> > > +#include 
> > > +#include 
> > > +
> > > +static struct riscv_dma_cache_sync *dma_cache_sync;
> > > +unsigned long riscv_dma_uc_offset;
> > > +
> > > +static void __dma_sync(phys_addr_t paddr, size_t size, enum 
> > > dma_data_direction dir)
> > > +{
> > > +   if ((dir == DMA_FROM_DEVICE) && 
> > > (dma_cache_sync->cache_invalidate))
> > > +   dma_cache_sync->cache_invalidate(paddr, size);
> > > +   else if ((dir == DMA_TO_DEVICE) && (dma_cache_sync->cache_clean))
> > > +   dma_cache_sync->cache_clean(paddr, size);
> > > +   else if ((dir == DMA_BIDIRECTIONAL) && 
> > > dma_cache_sync->cache_flush)
> > > +   dma_cache_sync->cache_flush(paddr, size);
> > > +}
> > > +
> > > +void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, enum 
> > > dma_data_direction dir)
> > > +{
> > > +   if (!dma_cache_sync)
> > > +   return;
> > > +
> > > +   __dma_sync(paddr, size, dir);
> > > +}
> > > +
> > > +void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, enum 
> > > dma_data_direction dir)
> > > +{
> > > +   if (!dma_cache_sync)
> > > +   return;
> > > +
> > > +   __dma_sync(paddr, size, dir);
> > > +}
> > > +
> > > +void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
> > > +   const struct iommu_ops *iommu, bool coherent)
> > > +{
> > > +   /* If a specific device is dma-coherent, set it here */
> > > +   dev->dma_coherent = coherent;
> > > +}
> > > +
> > > +void arch_dma_prep_coherent(struct page *page, size_t size)
> > > +{
> > > +   void *flush_addr = page_address(page);
> > > +
> > > +   memset(flush_addr, 0, size);
> > > +   if (dma_cache_sync && dma_cache_sync->cache_flush)
> > > +   

Re: [RFC 1/5] RISC-V: Implement arch_sync_dma* functions

2021-08-16 Thread Atish Patra
On Mon, Aug 16, 2021 at 6:48 PM Guo Ren  wrote:
>
> On Sat, Jul 24, 2021 at 5:40 AM Atish Patra  wrote:
> >
> > To facilitate streaming DMA APIs, this patch introduces a set of generic
> > cache operations related dma sync. Any platform can use the generic ops
> > to provide platform specific cache management operations. Once the
> > standard RISC-V CMO extension is available, it can be built on top of it.
> >
> > Signed-off-by: Atish Patra 
> > ---
> >  arch/riscv/include/asm/dma-noncoherent.h | 19 +++
> >  arch/riscv/mm/Makefile   |  1 +
> >  arch/riscv/mm/dma-noncoherent.c  | 66 
> >  3 files changed, 86 insertions(+)
> >  create mode 100644 arch/riscv/include/asm/dma-noncoherent.h
> >  create mode 100644 arch/riscv/mm/dma-noncoherent.c
> >
> > diff --git a/arch/riscv/include/asm/dma-noncoherent.h 
> > b/arch/riscv/include/asm/dma-noncoherent.h
> > new file mode 100644
> > index ..5bdb03c9c427
> > --- /dev/null
> > +++ b/arch/riscv/include/asm/dma-noncoherent.h
> > @@ -0,0 +1,19 @@
> > +/* SPDX-License-Identifier: GPL-2.0-only */
> > +/*
> > + * Copyright (c) 2021 Western Digital Corporation or its affiliates.
> > + */
> > +
> > +#ifndef __ASM_RISCV_DMA_NON_COHERENT_H
> > +#define __ASM_RISCV_DMA_NON_COHERENT_H
> > +
> > +#ifdef CONFIG_RISCV_DMA_NONCOHERENT
> > +struct riscv_dma_cache_sync {
> > +   void (*cache_invalidate)(phys_addr_t paddr, size_t size);
> > +   void (*cache_clean)(phys_addr_t paddr, size_t size);
> > +   void (*cache_flush)(phys_addr_t paddr, size_t size);
> > +};
> I like the style like this than my previous patch which using
> sbi_call. The c906 has custom instructions that could be called in
> S-mode directly.
>

How are you going to include the custom instructions in the upstream kernel ?

> Hope the patch could be soon merged, after correct the
> DMA_FROM/TO_DEVICE/BIDIRECTIONAL and alternatives ops_set.
>
> > +
> > +void riscv_dma_cache_sync_set(struct riscv_dma_cache_sync *ops);
> > +#endif
> > +
> > +#endif
> > diff --git a/arch/riscv/mm/Makefile b/arch/riscv/mm/Makefile
> > index 7ebaef10ea1b..959bef49098b 100644
> > --- a/arch/riscv/mm/Makefile
> > +++ b/arch/riscv/mm/Makefile
> > @@ -27,3 +27,4 @@ KASAN_SANITIZE_init.o := n
> >  endif
> >
> >  obj-$(CONFIG_DEBUG_VIRTUAL) += physaddr.o
> > +obj-$(CONFIG_RISCV_DMA_NONCOHERENT) += dma-noncoherent.o
> > diff --git a/arch/riscv/mm/dma-noncoherent.c 
> > b/arch/riscv/mm/dma-noncoherent.c
> > new file mode 100644
> > index ..2f6e9627c4aa
> > --- /dev/null
> > +++ b/arch/riscv/mm/dma-noncoherent.c
> > @@ -0,0 +1,66 @@
> > +// SPDX-License-Identifier: GPL-2.0-only
> > +/*
> > + * RISC-V specific functions to support DMA for non-coherent devices
> > + *
> > + * Copyright (c) 2021 Western Digital Corporation or its affiliates.
> > + */
> > +
> > +#include 
> > +#include 
> > +#include 
> > +#include 
> > +#include 
> > +#include 
> > +#include 
> > +#include 
> > +#include 
> > +
> > +static struct riscv_dma_cache_sync *dma_cache_sync;
> > +unsigned long riscv_dma_uc_offset;
> > +
> > +static void __dma_sync(phys_addr_t paddr, size_t size, enum 
> > dma_data_direction dir)
> > +{
> > +   if ((dir == DMA_FROM_DEVICE) && (dma_cache_sync->cache_invalidate))
> > +   dma_cache_sync->cache_invalidate(paddr, size);
> > +   else if ((dir == DMA_TO_DEVICE) && (dma_cache_sync->cache_clean))
> > +   dma_cache_sync->cache_clean(paddr, size);
> > +   else if ((dir == DMA_BIDIRECTIONAL) && dma_cache_sync->cache_flush)
> > +   dma_cache_sync->cache_flush(paddr, size);
> > +}
> > +
> > +void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, enum 
> > dma_data_direction dir)
> > +{
> > +   if (!dma_cache_sync)
> > +   return;
> > +
> > +   __dma_sync(paddr, size, dir);
> > +}
> > +
> > +void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, enum 
> > dma_data_direction dir)
> > +{
> > +   if (!dma_cache_sync)
> > +   return;
> > +
> > +   __dma_sync(paddr, size, dir);
> > +}
> > +
> > +void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
> > +   const struct iommu_ops *iommu, bool coherent)
> > +{
> > +   /* If a specific device is dma-coherent, set it here */
> > +   dev->dma_coherent = coherent;
> > +}
> > +
> > +void arch_dma_prep_coherent(struct page *page, size_t size)
> > +{
> > +   void *flush_addr = page_address(page);
> > +
> > +   memset(flush_addr, 0, size);
> > +   if (dma_cache_sync && dma_cache_sync->cache_flush)
> > +   dma_cache_sync->cache_flush(__pa(flush_addr), size);
> > +}
> > +
> > +void riscv_dma_cache_sync_set(struct riscv_dma_cache_sync *ops)
> > +{
> > +   dma_cache_sync = ops;
> > +}
> > --
> > 2.31.1
> >
> > ___
> > iommu mailing list
> > iommu@lists.linux-foundation.org
> > https://lists.linuxfoundation.org/mailman/listinfo/iommu
>

Re: [RFC 1/5] RISC-V: Implement arch_sync_dma* functions

2021-08-16 Thread Guo Ren
On Sat, Jul 24, 2021 at 5:40 AM Atish Patra  wrote:
>
> To facilitate streaming DMA APIs, this patch introduces a set of generic
> cache operations related dma sync. Any platform can use the generic ops
> to provide platform specific cache management operations. Once the
> standard RISC-V CMO extension is available, it can be built on top of it.
>
> Signed-off-by: Atish Patra 
> ---
>  arch/riscv/include/asm/dma-noncoherent.h | 19 +++
>  arch/riscv/mm/Makefile   |  1 +
>  arch/riscv/mm/dma-noncoherent.c  | 66 
>  3 files changed, 86 insertions(+)
>  create mode 100644 arch/riscv/include/asm/dma-noncoherent.h
>  create mode 100644 arch/riscv/mm/dma-noncoherent.c
>
> diff --git a/arch/riscv/include/asm/dma-noncoherent.h 
> b/arch/riscv/include/asm/dma-noncoherent.h
> new file mode 100644
> index ..5bdb03c9c427
> --- /dev/null
> +++ b/arch/riscv/include/asm/dma-noncoherent.h
> @@ -0,0 +1,19 @@
> +/* SPDX-License-Identifier: GPL-2.0-only */
> +/*
> + * Copyright (c) 2021 Western Digital Corporation or its affiliates.
> + */
> +
> +#ifndef __ASM_RISCV_DMA_NON_COHERENT_H
> +#define __ASM_RISCV_DMA_NON_COHERENT_H
> +
> +#ifdef CONFIG_RISCV_DMA_NONCOHERENT
> +struct riscv_dma_cache_sync {
> +   void (*cache_invalidate)(phys_addr_t paddr, size_t size);
> +   void (*cache_clean)(phys_addr_t paddr, size_t size);
> +   void (*cache_flush)(phys_addr_t paddr, size_t size);
> +};
I like the style like this than my previous patch which using
sbi_call. The c906 has custom instructions that could be called in
S-mode directly.

Hope the patch could be soon merged, after correct the
DMA_FROM/TO_DEVICE/BIDIRECTIONAL and alternatives ops_set.

> +
> +void riscv_dma_cache_sync_set(struct riscv_dma_cache_sync *ops);
> +#endif
> +
> +#endif
> diff --git a/arch/riscv/mm/Makefile b/arch/riscv/mm/Makefile
> index 7ebaef10ea1b..959bef49098b 100644
> --- a/arch/riscv/mm/Makefile
> +++ b/arch/riscv/mm/Makefile
> @@ -27,3 +27,4 @@ KASAN_SANITIZE_init.o := n
>  endif
>
>  obj-$(CONFIG_DEBUG_VIRTUAL) += physaddr.o
> +obj-$(CONFIG_RISCV_DMA_NONCOHERENT) += dma-noncoherent.o
> diff --git a/arch/riscv/mm/dma-noncoherent.c b/arch/riscv/mm/dma-noncoherent.c
> new file mode 100644
> index ..2f6e9627c4aa
> --- /dev/null
> +++ b/arch/riscv/mm/dma-noncoherent.c
> @@ -0,0 +1,66 @@
> +// SPDX-License-Identifier: GPL-2.0-only
> +/*
> + * RISC-V specific functions to support DMA for non-coherent devices
> + *
> + * Copyright (c) 2021 Western Digital Corporation or its affiliates.
> + */
> +
> +#include 
> +#include 
> +#include 
> +#include 
> +#include 
> +#include 
> +#include 
> +#include 
> +#include 
> +
> +static struct riscv_dma_cache_sync *dma_cache_sync;
> +unsigned long riscv_dma_uc_offset;
> +
> +static void __dma_sync(phys_addr_t paddr, size_t size, enum 
> dma_data_direction dir)
> +{
> +   if ((dir == DMA_FROM_DEVICE) && (dma_cache_sync->cache_invalidate))
> +   dma_cache_sync->cache_invalidate(paddr, size);
> +   else if ((dir == DMA_TO_DEVICE) && (dma_cache_sync->cache_clean))
> +   dma_cache_sync->cache_clean(paddr, size);
> +   else if ((dir == DMA_BIDIRECTIONAL) && dma_cache_sync->cache_flush)
> +   dma_cache_sync->cache_flush(paddr, size);
> +}
> +
> +void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, enum 
> dma_data_direction dir)
> +{
> +   if (!dma_cache_sync)
> +   return;
> +
> +   __dma_sync(paddr, size, dir);
> +}
> +
> +void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, enum 
> dma_data_direction dir)
> +{
> +   if (!dma_cache_sync)
> +   return;
> +
> +   __dma_sync(paddr, size, dir);
> +}
> +
> +void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
> +   const struct iommu_ops *iommu, bool coherent)
> +{
> +   /* If a specific device is dma-coherent, set it here */
> +   dev->dma_coherent = coherent;
> +}
> +
> +void arch_dma_prep_coherent(struct page *page, size_t size)
> +{
> +   void *flush_addr = page_address(page);
> +
> +   memset(flush_addr, 0, size);
> +   if (dma_cache_sync && dma_cache_sync->cache_flush)
> +   dma_cache_sync->cache_flush(__pa(flush_addr), size);
> +}
> +
> +void riscv_dma_cache_sync_set(struct riscv_dma_cache_sync *ops)
> +{
> +   dma_cache_sync = ops;
> +}
> --
> 2.31.1
>
> ___
> iommu mailing list
> iommu@lists.linux-foundation.org
> https://lists.linuxfoundation.org/mailman/listinfo/iommu



-- 
Best Regards
 Guo Ren

ML: https://lore.kernel.org/linux-csky/
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [RFC 1/5] RISC-V: Implement arch_sync_dma* functions

2021-07-26 Thread Atish Patra
On Sun, Jul 25, 2021 at 11:57 PM Christoph Hellwig  wrote:
>
> > +#ifdef CONFIG_RISCV_DMA_NONCOHERENT
> > +struct riscv_dma_cache_sync {
> > + void (*cache_invalidate)(phys_addr_t paddr, size_t size);
> > + void (*cache_clean)(phys_addr_t paddr, size_t size);
> > + void (*cache_flush)(phys_addr_t paddr, size_t size);
> > +};
> > +
> > +void riscv_dma_cache_sync_set(struct riscv_dma_cache_sync *ops);
> > +#endif
>
> As told a bunch of times before: doing indirect calls here is a
> performance nightmare.  Use something that actually does perform
> horribly like alternatives.  Or even delay implementing that until
> we need it and do a plain direct call for now.
>

I was initially planning to replace this with alternatives in the
future versions. But there is no point in doing it
until we have CMO spec finalized. We also don't have any other
platform using these apart from sifive l2
cache controllers for now.

I will change these to direct for now.

> static void __dma_sync(phys_addr_t paddr, size_t size, enum 
> dma_data_direction dir)
> > +{
> > + if ((dir == DMA_FROM_DEVICE) && (dma_cache_sync->cache_invalidate))
> > + dma_cache_sync->cache_invalidate(paddr, size);
> > + else if ((dir == DMA_TO_DEVICE) && (dma_cache_sync->cache_clean))
> > + dma_cache_sync->cache_clean(paddr, size);
> > + else if ((dir == DMA_BIDIRECTIONAL) && dma_cache_sync->cache_flush)
> > + dma_cache_sync->cache_flush(paddr, size);
> > +}
>
> The seletion of flush types is completely broken.  Take a look at the
> comment in arch/arc/mm/dma.c above arch_sync_dma_for_device for a good
> explanation.
>

Thanks. I will fix it.

> > +void arch_dma_prep_coherent(struct page *page, size_t size)
> > +{
> > + void *flush_addr = page_address(page);
> > +
> > + memset(flush_addr, 0, size);
>
> arch_dma_prep_coherent is not supposed to modify the content of
> the data.

Sorry. This was a leftover from some experimental code. It shouldn't
have been included.

> ___
> iommu mailing list
> iommu@lists.linux-foundation.org
> https://lists.linuxfoundation.org/mailman/listinfo/iommu



-- 
Regards,
Atish
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [RFC 1/5] RISC-V: Implement arch_sync_dma* functions

2021-07-26 Thread Christoph Hellwig
> +#ifdef CONFIG_RISCV_DMA_NONCOHERENT
> +struct riscv_dma_cache_sync {
> + void (*cache_invalidate)(phys_addr_t paddr, size_t size);
> + void (*cache_clean)(phys_addr_t paddr, size_t size);
> + void (*cache_flush)(phys_addr_t paddr, size_t size);
> +};
> +
> +void riscv_dma_cache_sync_set(struct riscv_dma_cache_sync *ops);
> +#endif

As told a bunch of times before: doing indirect calls here is a
performance nightmare.  Use something that actually does perform
horribly like alternatives.  Or even delay implementing that until
we need it and do a plain direct call for now.

static void __dma_sync(phys_addr_t paddr, size_t size, enum dma_data_direction 
dir)
> +{
> + if ((dir == DMA_FROM_DEVICE) && (dma_cache_sync->cache_invalidate))
> + dma_cache_sync->cache_invalidate(paddr, size);
> + else if ((dir == DMA_TO_DEVICE) && (dma_cache_sync->cache_clean))
> + dma_cache_sync->cache_clean(paddr, size);
> + else if ((dir == DMA_BIDIRECTIONAL) && dma_cache_sync->cache_flush)
> + dma_cache_sync->cache_flush(paddr, size);
> +}

The seletion of flush types is completely broken.  Take a look at the
comment in arch/arc/mm/dma.c above arch_sync_dma_for_device for a good
explanation.

> +void arch_dma_prep_coherent(struct page *page, size_t size)
> +{
> + void *flush_addr = page_address(page);
> +
> + memset(flush_addr, 0, size);

arch_dma_prep_coherent is not supposed to modify the content of
the data.
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[RFC 1/5] RISC-V: Implement arch_sync_dma* functions

2021-07-23 Thread Atish Patra
To facilitate streaming DMA APIs, this patch introduces a set of generic
cache operations related dma sync. Any platform can use the generic ops
to provide platform specific cache management operations. Once the
standard RISC-V CMO extension is available, it can be built on top of it.

Signed-off-by: Atish Patra 
---
 arch/riscv/include/asm/dma-noncoherent.h | 19 +++
 arch/riscv/mm/Makefile   |  1 +
 arch/riscv/mm/dma-noncoherent.c  | 66 
 3 files changed, 86 insertions(+)
 create mode 100644 arch/riscv/include/asm/dma-noncoherent.h
 create mode 100644 arch/riscv/mm/dma-noncoherent.c

diff --git a/arch/riscv/include/asm/dma-noncoherent.h 
b/arch/riscv/include/asm/dma-noncoherent.h
new file mode 100644
index ..5bdb03c9c427
--- /dev/null
+++ b/arch/riscv/include/asm/dma-noncoherent.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021 Western Digital Corporation or its affiliates.
+ */
+
+#ifndef __ASM_RISCV_DMA_NON_COHERENT_H
+#define __ASM_RISCV_DMA_NON_COHERENT_H
+
+#ifdef CONFIG_RISCV_DMA_NONCOHERENT
+struct riscv_dma_cache_sync {
+   void (*cache_invalidate)(phys_addr_t paddr, size_t size);
+   void (*cache_clean)(phys_addr_t paddr, size_t size);
+   void (*cache_flush)(phys_addr_t paddr, size_t size);
+};
+
+void riscv_dma_cache_sync_set(struct riscv_dma_cache_sync *ops);
+#endif
+
+#endif
diff --git a/arch/riscv/mm/Makefile b/arch/riscv/mm/Makefile
index 7ebaef10ea1b..959bef49098b 100644
--- a/arch/riscv/mm/Makefile
+++ b/arch/riscv/mm/Makefile
@@ -27,3 +27,4 @@ KASAN_SANITIZE_init.o := n
 endif
 
 obj-$(CONFIG_DEBUG_VIRTUAL) += physaddr.o
+obj-$(CONFIG_RISCV_DMA_NONCOHERENT) += dma-noncoherent.o
diff --git a/arch/riscv/mm/dma-noncoherent.c b/arch/riscv/mm/dma-noncoherent.c
new file mode 100644
index ..2f6e9627c4aa
--- /dev/null
+++ b/arch/riscv/mm/dma-noncoherent.c
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * RISC-V specific functions to support DMA for non-coherent devices
+ *
+ * Copyright (c) 2021 Western Digital Corporation or its affiliates.
+ */
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+static struct riscv_dma_cache_sync *dma_cache_sync;
+unsigned long riscv_dma_uc_offset;
+
+static void __dma_sync(phys_addr_t paddr, size_t size, enum dma_data_direction 
dir)
+{
+   if ((dir == DMA_FROM_DEVICE) && (dma_cache_sync->cache_invalidate))
+   dma_cache_sync->cache_invalidate(paddr, size);
+   else if ((dir == DMA_TO_DEVICE) && (dma_cache_sync->cache_clean))
+   dma_cache_sync->cache_clean(paddr, size);
+   else if ((dir == DMA_BIDIRECTIONAL) && dma_cache_sync->cache_flush)
+   dma_cache_sync->cache_flush(paddr, size);
+}
+
+void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, enum 
dma_data_direction dir)
+{
+   if (!dma_cache_sync)
+   return;
+
+   __dma_sync(paddr, size, dir);
+}
+
+void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, enum 
dma_data_direction dir)
+{
+   if (!dma_cache_sync)
+   return;
+
+   __dma_sync(paddr, size, dir);
+}
+
+void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
+   const struct iommu_ops *iommu, bool coherent)
+{
+   /* If a specific device is dma-coherent, set it here */
+   dev->dma_coherent = coherent;
+}
+
+void arch_dma_prep_coherent(struct page *page, size_t size)
+{
+   void *flush_addr = page_address(page);
+
+   memset(flush_addr, 0, size);
+   if (dma_cache_sync && dma_cache_sync->cache_flush)
+   dma_cache_sync->cache_flush(__pa(flush_addr), size);
+}
+
+void riscv_dma_cache_sync_set(struct riscv_dma_cache_sync *ops)
+{
+   dma_cache_sync = ops;
+}
-- 
2.31.1

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu