>
> WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks));
>
>@@ -570,6 +577,9 @@ static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
>
> clk_bulk_disable(iommu->num_clocks, iommu->clocks);
>
>+ if (need_runtime_put)
>+ pm_runtime_put(iommu->dev);
>+
> return ret;
> }
>
>@@ -611,10 +621,20 @@ static void rk_iommu_zap_iova(struct rk_iommu_domain
*rk_domain,
> spin_lock_irqsave(&rk_domain->iommus_lock, flags);
> list_for_each(pos, &rk_domain->iommus) {
> struct rk_iommu *iommu;
>+ int ret;
>+
> iommu = list_entry(pos, struct rk_iommu, node);
>- WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks));
>- rk_iommu_zap_lines(iommu, iova, size);
>- clk_bulk_disable(iommu->num_clocks, iommu->clocks);
>+
>+ /* Only zap TLBs of IOMMUs that are powered on. */
>+ ret = pm_runtime_get_if_in_use(iommu->dev);
>+ if (ret > 0 || ret == -EINVAL) {
>+ WARN_ON(clk_bulk_enable(iommu->num_clocks,
>+ iommu->clocks));
>+ rk_iommu_zap_lines(iommu, iova, size);
>+ clk_bulk_disable(iommu->num_clocks, iommu->clocks);
>+ }
>+ if (ret > 0)
>+ pm_runtime_put(iommu->dev);
> }
> spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
> }
>@@ -817,22 +837,30 @@ static struct rk_iommu *rk_iommu_from_dev(struct device
*dev)
> return data ? data->iommu : NULL;
> }
>
>-static int rk_iommu_attach_device(struct iommu_domain *domain,
>- struct device *dev)
>+/* Must be called with iommu powered on and attached */
>+static void rk_iommu_shutdown(struct rk_iommu *iommu)
> {
>- struct rk_iommu *iommu;
>+ int i;
>+
>+ /* Ignore error while disabling, just keep going */
>+ WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks));
>+ rk_iommu_enable_stall(iommu);
>+ rk_iommu_disable_paging(iommu);
>+ for (i = 0; i < iommu->num_mmu; i++) {
>+ rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, 0);
>+ rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, 0);
>+ }
>+ rk_iommu_disable_stall(iommu);
>+ clk_bulk_disable(iommu->num_clocks, iommu->clocks);
>+}
>+
>+/* Must be called with iommu powered on and attached */
>+static int rk_iommu_startup(struct rk_iommu *iommu)
>+{
>+ struct iommu_domain *domain = iommu->domain;
> struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
>- unsigned long flags;
> int ret, i;
>
>- /*
>- * Allow 'virtual devices' (e.g., drm) to attach to domain.
>- * Such a device does not belong to an iommu group.
>- */
>- iommu = rk_iommu_from_dev(dev);
>- if (!iommu)
>- return 0;
>-
> ret = clk_bulk_enable(iommu->num_clocks, iommu->clocks);
> if (ret)
> return ret;
>@@ -845,8 +873,6 @@ static int rk_iommu_attach_device(struct iommu_domain
*domain,
> if (ret)
> goto out_disable_stall;
>
>- iommu->domain = domain;
>-
> for (i = 0; i < iommu->num_mmu; i++) {
> rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR,
> rk_domain->dt_dma);
>@@ -855,14 +881,6 @@ static int rk_iommu_attach_device(struct iommu_domain
*domain,
> }
>
> ret = rk_iommu_enable_paging(iommu);
>- if (ret)
>- goto out_disable_stall;
>-
>- spin_lock_irqsave(&rk_domain->iommus_lock, flags);
>- list_add_tail(&iommu->node, &rk_domain->iommus);
>- spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
>-
>- dev_dbg(dev, "Attached to iommu domain\n");
>
> out_disable_stall:
> rk_iommu_disable_stall(iommu);
>@@ -877,31 +895,76 @@ static void rk_iommu_detach_device(struct iommu_domain
*domain,
> struct rk_iommu *iommu;
> struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
> unsigned long flags;
>- int i;
>+ int ret;
>
> /* Allow 'virtual devices' (eg drm) to detach from domain */
> iommu = rk_iommu_from_dev(dev);
> if (!iommu)
> return;
>
>+ dev_dbg(dev, "Detaching from iommu domain\n");
>+
>+ /* iommu already detached */
>+ if (iommu->domain != domain)
>+ return;
>+
>+ iommu->domain = NULL;
>+
> spin_lock_irqsave(&rk_domain->iommus_lock, flags);
> list_del_init(&iommu->node);
> spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
>
>- /* Ignore error while disabling, just keep going */
>- WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks));
>- rk_iommu_enable_stall(iommu);
>- rk_iommu_disable_paging(iommu);
>- for (i = 0; i < iommu->num_mmu; i++) {
>- rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, 0);
>- rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, 0);
>- }
>- rk_iommu_disable_stall(iommu);
>- clk_bulk_disable(iommu->num_clocks, iommu->clocks);
>+ ret = pm_runtime_get_if_in_use(iommu->dev);
>+ if (ret > 0 || ret == -EINVAL)
>+ rk_iommu_shutdown(iommu);
>+ if (ret > 0)
>+ pm_runtime_put(iommu->dev);
>+}
>
>- iommu->domain = NULL;
>+static int rk_iommu_attach_device(struct iommu_domain *domain,
>+ struct device *dev)
>+{
>+ struct rk_iommu *iommu;
>+ struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
>+ unsigned long flags;
>+ int ret, need_runtime_put;