From: Ashok Raj <[email protected]>

When a kernel client calls intel_svm_bind_mm() and gets a valid
supervisor PASID,  the memory mapping of init_mm will be shared
between CPUs and device. IOMMU has to track the changes to this
memory mapping, and get notified whenever a TLB flush is needed.
Otherwise, the device TLB will be stale compared to that on the
cpu for kernel mappings. This is similar to what have been done
for user space registrations via mmu_notifier_register() api's.

To: Alex Williamson <[email protected]>
To: [email protected]
To: Joerg Roedel <[email protected]>
Cc: Ashok Raj <[email protected]>
Cc: Dave Hansen <[email protected]>
Cc: Huang Ying <[email protected]>
Cc: CQ Tang <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: H. Peter Anvin <[email protected]>
Cc: Andy Lutomirski <[email protected]>
Cc: Rik van Riel <[email protected]>
Cc: Kees Cook <[email protected]>
Cc: Andrew Morton <[email protected]>
Cc: Michal Hocko <[email protected]>
Cc: Paul E. McKenney <[email protected]>
Cc: Vegard Nossum <[email protected]>
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: David Woodhouse <[email protected]>
CC: Jean-Phillipe Brucker <[email protected]>

Signed-off-by: Ashok Raj <[email protected]>
Signed-off-by: Lu Baolu <[email protected]>
---
 drivers/iommu/intel-svm.c   | 27 +++++++++++++++++++++++++--
 include/linux/intel-iommu.h |  5 ++++-
 2 files changed, 29 insertions(+), 3 deletions(-)

diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
index ed1cf7c..1456092 100644
--- a/drivers/iommu/intel-svm.c
+++ b/drivers/iommu/intel-svm.c
@@ -283,6 +283,24 @@ static const struct mmu_notifier_ops intel_mmuops = {
 
 static DEFINE_MUTEX(pasid_mutex);
 
+static int intel_init_mm_inval_range(struct notifier_block *nb,
+                                    unsigned long action, void *data)
+{
+       struct kernel_mmu_address_range *range;
+       struct intel_svm *svm = container_of(nb, struct intel_svm, init_mm_nb);
+       unsigned long start, end;
+
+       if (action == KERNEL_MMU_INVALIDATE_RANGE) {
+               range = data;
+               start = range->start;
+               end = range->end;
+
+               intel_flush_svm_range(svm, start,
+                       (end - start + PAGE_SIZE - 1) >> VTD_PAGE_SHIFT, 0, 0);
+       }
+       return 0;
+}
+
 int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct 
svm_dev_ops *ops)
 {
        struct intel_iommu *iommu = intel_svm_device_to_iommu(dev);
@@ -382,12 +400,12 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int 
flags, struct svm_dev_
                        goto out;
                }
                svm->pasid = ret;
-               svm->notifier.ops = &intel_mmuops;
                svm->mm = mm;
                svm->flags = flags;
                INIT_LIST_HEAD_RCU(&svm->devs);
                ret = -ENOMEM;
                if (mm) {
+                       svm->notifier.ops = &intel_mmuops;
                        ret = mmu_notifier_register(&svm->notifier, mm);
                        if (ret) {
                                idr_remove(&svm->iommu->pasid_idr, svm->pasid);
@@ -396,8 +414,11 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int 
flags, struct svm_dev_
                                goto out;
                        }
                        iommu->pasid_table[svm->pasid].val = (u64)__pa(mm->pgd) 
| 1;
-               } else
+               } else {
+                       svm->init_mm_nb.notifier_call = 
intel_init_mm_inval_range;
+                       kernel_mmu_notifier_register(&svm->init_mm_nb);
                        iommu->pasid_table[svm->pasid].val = 
(u64)__pa(init_mm.pgd) | 1 | (1ULL << 11);
+               }
                wmb();
                /* In caching mode, we still have to flush with PASID 0 when
                 * a PASID table entry becomes present. Not entirely clear
@@ -464,6 +485,8 @@ int intel_svm_unbind_mm(struct device *dev, int pasid)
                                        idr_remove(&svm->iommu->pasid_idr, 
svm->pasid);
                                        if (svm->mm)
                                                
mmu_notifier_unregister(&svm->notifier, svm->mm);
+                                       else
+                                               
kernel_mmu_notifier_unregister(&svm->init_mm_nb);
 
                                        /* We mandate that no page faults may 
be outstanding
                                         * for the PASID when 
intel_svm_unbind_mm() is called.
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index f3274d9..5cf83db 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -478,7 +478,10 @@ struct intel_svm_dev {
 };
 
 struct intel_svm {
-       struct mmu_notifier notifier;
+       union {
+               struct mmu_notifier notifier;
+               struct notifier_block init_mm_nb;
+       };
        struct mm_struct *mm;
        struct intel_iommu *iommu;
        int flags;
-- 
2.7.4

_______________________________________________
iommu mailing list
[email protected]
https://lists.linuxfoundation.org/mailman/listinfo/iommu

Reply via email to