From: Honglei Huang <[email protected]> Add interval tree support for efficient lookup of affected userptr ranges during MMU notifier callbacks.
add: - Include linux/interval_tree.h - mark_invalid_ranges() function that uses interval tree to identify and mark ranges affected by a given invalidation event Signed-off-by: Honglei Huang <[email protected]> --- .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 21 +++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index a32b46355..3b7fc6d15 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -25,6 +25,7 @@ #include <linux/pagemap.h> #include <linux/sched/mm.h> #include <linux/sched/task.h> +#include <linux/interval_tree.h> #include <drm/ttm/ttm_tt.h> #include <drm/drm_exec.h> @@ -1122,6 +1123,26 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr, return ret; } +static bool mark_invalid_ranges(struct kgd_mem *mem, + unsigned long inv_start, unsigned long inv_end) +{ + struct interval_tree_node *node; + struct user_range_info *range_info; + bool any_invalid = false; + + for (node = interval_tree_iter_first(&mem->user_ranges_itree, inv_start, inv_end - 1); + node; + node = interval_tree_iter_next(node, inv_start, inv_end - 1)) { + range_info = container_of(node, struct user_range_info, it_node); + range_info->invalid++; + any_invalid = true; + pr_debug("Range [0x%llx-0x%llx) marked invalid (count=%u)\n", + range_info->start, range_info->start + range_info->size, + range_info->invalid); + } + return any_invalid; +} + /* Reserving a BO and its page table BOs must happen atomically to * avoid deadlocks. Some operations update multiple VMs at once. Track * all the reservation info in a context structure. Optionally a sync -- 2.34.1
