Re: [Freedreno] [PATCH v11 09/12] drm/msm: Add support to create a local pagetable

2020-08-07 Thread Rob Clark
On Mon, Aug 3, 2020 at 12:36 PM Jordan Crouse  wrote:
>
> Add support to create a io-pgtable for use by targets that support
> per-instance pagetables. In order to support per-instance pagetables the
> GPU SMMU device needs to have the qcom,adreno-smmu compatible string and
> split pagetables enabled.
>
> Signed-off-by: Jordan Crouse 
> ---
>
>  drivers/gpu/drm/msm/msm_gpummu.c |   2 +-
>  drivers/gpu/drm/msm/msm_iommu.c  | 191 ++-
>  drivers/gpu/drm/msm/msm_mmu.h|  16 ++-
>  3 files changed, 206 insertions(+), 3 deletions(-)
>
> diff --git a/drivers/gpu/drm/msm/msm_gpummu.c 
> b/drivers/gpu/drm/msm/msm_gpummu.c
> index 310a31b05faa..aab121f4beb7 100644
> --- a/drivers/gpu/drm/msm/msm_gpummu.c
> +++ b/drivers/gpu/drm/msm/msm_gpummu.c
> @@ -102,7 +102,7 @@ struct msm_mmu *msm_gpummu_new(struct device *dev, struct 
> msm_gpu *gpu)
> }
>
> gpummu->gpu = gpu;
> -   msm_mmu_init(>base, dev, );
> +   msm_mmu_init(>base, dev, , MSM_MMU_GPUMMU);
>
> return >base;
>  }
> diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
> index 1b6635504069..bc6a4bbc904a 100644
> --- a/drivers/gpu/drm/msm/msm_iommu.c
> +++ b/drivers/gpu/drm/msm/msm_iommu.c
> @@ -4,15 +4,202 @@
>   * Author: Rob Clark 
>   */
>
> +#include 
>  #include "msm_drv.h"
>  #include "msm_mmu.h"
>
>  struct msm_iommu {
> struct msm_mmu base;
> struct iommu_domain *domain;
> +   atomic_t pagetables;
>  };
> +
>  #define to_msm_iommu(x) container_of(x, struct msm_iommu, base)
>
> +struct msm_iommu_pagetable {
> +   struct msm_mmu base;
> +   struct msm_mmu *parent;
> +   struct io_pgtable_ops *pgtbl_ops;
> +   phys_addr_t ttbr;
> +   u32 asid;
> +};
> +static struct msm_iommu_pagetable *to_pagetable(struct msm_mmu *mmu)
> +{
> +   return container_of(mmu, struct msm_iommu_pagetable, base);
> +}
> +
> +static int msm_iommu_pagetable_unmap(struct msm_mmu *mmu, u64 iova,
> +   size_t size)
> +{
> +   struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
> +   struct io_pgtable_ops *ops = pagetable->pgtbl_ops;
> +   size_t unmapped = 0;
> +
> +   /* Unmap the block one page at a time */
> +   while (size) {
> +   unmapped += ops->unmap(ops, iova, 4096, NULL);
> +   iova += 4096;
> +   size -= 4096;
> +   }
> +
> +   iommu_flush_tlb_all(to_msm_iommu(pagetable->parent)->domain);
> +
> +   return (unmapped == size) ? 0 : -EINVAL;
> +}
> +
> +static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova,
> +   struct sg_table *sgt, size_t len, int prot)
> +{
> +   struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
> +   struct io_pgtable_ops *ops = pagetable->pgtbl_ops;
> +   struct scatterlist *sg;
> +   size_t mapped = 0;
> +   u64 addr = iova;
> +   unsigned int i;
> +
> +   for_each_sg(sgt->sgl, sg, sgt->nents, i) {
> +   size_t size = sg->length;
> +   phys_addr_t phys = sg_phys(sg);
> +
> +   /* Map the block one page at a time */
> +   while (size) {
> +   if (ops->map(ops, addr, phys, 4096, prot, 
> GFP_KERNEL)) {
> +   msm_iommu_pagetable_unmap(mmu, iova, mapped);
> +   return -EINVAL;
> +   }
> +
> +   phys += 4096;
> +   addr += 4096;
> +   size -= 4096;
> +   mapped += 4096;
> +   }
> +   }
> +
> +   return 0;
> +}
> +
> +static void msm_iommu_pagetable_destroy(struct msm_mmu *mmu)
> +{
> +   struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
> +   struct msm_iommu *iommu = to_msm_iommu(pagetable->parent);
> +
> +   /*
> +* If this is the last attached pagetable for the parent,
> +* disable TTBR0 in the arm-smmu driver
> +*/
> +   if (atomic_dec_return(>pagetables) == 0)
> +   iommu_domain_set_attr(iommu->domain,
> +   DOMAIN_ATTR_PGTABLE_CFG, NULL);
> +
> +   free_io_pgtable_ops(pagetable->pgtbl_ops);
> +   kfree(pagetable);
> +}
> +
> +int msm_iommu_pagetable_params(struct msm_mmu *mmu,
> +   phys_addr_t *ttbr, int *asid)
> +{
> +   struct msm_iommu_pagetable *pagetable;
> +
> +   if (mmu->type != MSM_MMU_IOMMU_PAGETABLE)
> +   return -EINVAL;
> +
> +   pagetable = to_pagetable(mmu);
> +
> +   if (ttbr)
> +   *ttbr = pagetable->ttbr;
> +
> +   if (asid)
> +   *asid = pagetable->asid;
> +
> +   return 0;
> +}
> +
> +static const struct msm_mmu_funcs pagetable_funcs = {
> +   .map = msm_iommu_pagetable_map,
> +   .unmap = msm_iommu_pagetable_unmap,
> +   .destroy = msm_iommu_pagetable_destroy,
> +};
> +
> +static void msm_iommu_tlb_flush_all(void *cookie)

[PATCH v11 09/12] drm/msm: Add support to create a local pagetable

2020-08-03 Thread Jordan Crouse
Add support to create a io-pgtable for use by targets that support
per-instance pagetables. In order to support per-instance pagetables the
GPU SMMU device needs to have the qcom,adreno-smmu compatible string and
split pagetables enabled.

Signed-off-by: Jordan Crouse 
---

 drivers/gpu/drm/msm/msm_gpummu.c |   2 +-
 drivers/gpu/drm/msm/msm_iommu.c  | 191 ++-
 drivers/gpu/drm/msm/msm_mmu.h|  16 ++-
 3 files changed, 206 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/msm/msm_gpummu.c b/drivers/gpu/drm/msm/msm_gpummu.c
index 310a31b05faa..aab121f4beb7 100644
--- a/drivers/gpu/drm/msm/msm_gpummu.c
+++ b/drivers/gpu/drm/msm/msm_gpummu.c
@@ -102,7 +102,7 @@ struct msm_mmu *msm_gpummu_new(struct device *dev, struct 
msm_gpu *gpu)
}
 
gpummu->gpu = gpu;
-   msm_mmu_init(>base, dev, );
+   msm_mmu_init(>base, dev, , MSM_MMU_GPUMMU);
 
return >base;
 }
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index 1b6635504069..bc6a4bbc904a 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -4,15 +4,202 @@
  * Author: Rob Clark 
  */
 
+#include 
 #include "msm_drv.h"
 #include "msm_mmu.h"
 
 struct msm_iommu {
struct msm_mmu base;
struct iommu_domain *domain;
+   atomic_t pagetables;
 };
+
 #define to_msm_iommu(x) container_of(x, struct msm_iommu, base)
 
+struct msm_iommu_pagetable {
+   struct msm_mmu base;
+   struct msm_mmu *parent;
+   struct io_pgtable_ops *pgtbl_ops;
+   phys_addr_t ttbr;
+   u32 asid;
+};
+static struct msm_iommu_pagetable *to_pagetable(struct msm_mmu *mmu)
+{
+   return container_of(mmu, struct msm_iommu_pagetable, base);
+}
+
+static int msm_iommu_pagetable_unmap(struct msm_mmu *mmu, u64 iova,
+   size_t size)
+{
+   struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
+   struct io_pgtable_ops *ops = pagetable->pgtbl_ops;
+   size_t unmapped = 0;
+
+   /* Unmap the block one page at a time */
+   while (size) {
+   unmapped += ops->unmap(ops, iova, 4096, NULL);
+   iova += 4096;
+   size -= 4096;
+   }
+
+   iommu_flush_tlb_all(to_msm_iommu(pagetable->parent)->domain);
+
+   return (unmapped == size) ? 0 : -EINVAL;
+}
+
+static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova,
+   struct sg_table *sgt, size_t len, int prot)
+{
+   struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
+   struct io_pgtable_ops *ops = pagetable->pgtbl_ops;
+   struct scatterlist *sg;
+   size_t mapped = 0;
+   u64 addr = iova;
+   unsigned int i;
+
+   for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+   size_t size = sg->length;
+   phys_addr_t phys = sg_phys(sg);
+
+   /* Map the block one page at a time */
+   while (size) {
+   if (ops->map(ops, addr, phys, 4096, prot, GFP_KERNEL)) {
+   msm_iommu_pagetable_unmap(mmu, iova, mapped);
+   return -EINVAL;
+   }
+
+   phys += 4096;
+   addr += 4096;
+   size -= 4096;
+   mapped += 4096;
+   }
+   }
+
+   return 0;
+}
+
+static void msm_iommu_pagetable_destroy(struct msm_mmu *mmu)
+{
+   struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
+   struct msm_iommu *iommu = to_msm_iommu(pagetable->parent);
+
+   /*
+* If this is the last attached pagetable for the parent,
+* disable TTBR0 in the arm-smmu driver
+*/
+   if (atomic_dec_return(>pagetables) == 0)
+   iommu_domain_set_attr(iommu->domain,
+   DOMAIN_ATTR_PGTABLE_CFG, NULL);
+
+   free_io_pgtable_ops(pagetable->pgtbl_ops);
+   kfree(pagetable);
+}
+
+int msm_iommu_pagetable_params(struct msm_mmu *mmu,
+   phys_addr_t *ttbr, int *asid)
+{
+   struct msm_iommu_pagetable *pagetable;
+
+   if (mmu->type != MSM_MMU_IOMMU_PAGETABLE)
+   return -EINVAL;
+
+   pagetable = to_pagetable(mmu);
+
+   if (ttbr)
+   *ttbr = pagetable->ttbr;
+
+   if (asid)
+   *asid = pagetable->asid;
+
+   return 0;
+}
+
+static const struct msm_mmu_funcs pagetable_funcs = {
+   .map = msm_iommu_pagetable_map,
+   .unmap = msm_iommu_pagetable_unmap,
+   .destroy = msm_iommu_pagetable_destroy,
+};
+
+static void msm_iommu_tlb_flush_all(void *cookie)
+{
+}
+
+static void msm_iommu_tlb_flush_walk(unsigned long iova, size_t size,
+   size_t granule, void *cookie)
+{
+}
+
+static void msm_iommu_tlb_add_page(struct iommu_iotlb_gather *gather,
+   unsigned long iova, size_t granule, void *cookie)
+{
+}
+
+static const struct iommu_flush_ops null_tlb_ops = {
+