From: Jérôme Glisse <[email protected]>

When range of virtual address is updated read only and corresponding
user ptr object are already read only it is pointless to do anything.
Optimize this case out.

Signed-off-by: Jérôme Glisse <[email protected]>
Cc: Christian König <[email protected]>
Cc: Jan Kara <[email protected]>
Cc: Felix Kuehling <[email protected]>
Cc: Jason Gunthorpe <[email protected]>
Cc: Andrew Morton <[email protected]>
Cc: Matthew Wilcox <[email protected]>
Cc: Ross Zwisler <[email protected]>
Cc: Dan Williams <[email protected]>
Cc: Paolo Bonzini <[email protected]>
Cc: Radim Krčmář <[email protected]>
Cc: Michal Hocko <[email protected]>
Cc: Ralph Campbell <[email protected]>
Cc: John Hubbard <[email protected]>
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: Arnd Bergmann <[email protected]>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c | 13 +++++++++++++
 1 file changed, 13 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index 3e6823fdd939..7880eda064cd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -294,6 +294,7 @@ static int amdgpu_mn_invalidate_range_start_hsa(struct 
mmu_notifier *mn,
 {
        struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn);
        struct interval_tree_node *it;
+       bool update_to_read_only;
        unsigned long end;
 
        /* notification is exclusive, but interval is inclusive */
@@ -302,6 +303,8 @@ static int amdgpu_mn_invalidate_range_start_hsa(struct 
mmu_notifier *mn,
        if (amdgpu_mn_read_lock(amn, range->blockable))
                return -EAGAIN;
 
+       update_to_read_only = mmu_notifier_range_update_to_read_only(range);
+
        it = interval_tree_iter_first(&amn->objects, range->start, end);
        while (it) {
                struct amdgpu_mn_node *node;
@@ -317,6 +320,16 @@ static int amdgpu_mn_invalidate_range_start_hsa(struct 
mmu_notifier *mn,
 
                list_for_each_entry(bo, &node->bos, mn_list) {
                        struct kgd_mem *mem = bo->kfd_bo;
+                       bool read_only;
+
+                       /*
+                        * If it is already read only and we are updating to
+                        * read only then we do not need to change anything.
+                        * So save time and skip this one.
+                        */
+                       read_only = amdgpu_ttm_tt_is_readonly(bo->tbo.ttm);
+                       if (update_to_read_only && read_only)
+                               continue;
 
                        if (amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm,
                                                         range->start,
-- 
2.17.2

Reply via email to