From: Honglei Huang <[email protected]>

Add ioctl entry point and fault handling:

amdgpu_svm.c additions:
- amdgpu_svm_copy_attrs: memdup_user for attribute array
- amdgpu_svm_garbage_collector: dequeue and remove unmapped ranges
- amdgpu_svm_range_clean_queue: drain work list releasing refs
- amdgpu_svm_garbage_collector_work_func: GC work handler
- amdgpu_svm_gc_init/fini/flush: workqueue lifecycle
- amdgpu_gem_svm_ioctl: validate args, lazy-init SVM, dispatch
  SET_ATTR/GET_ATTR/RESET_ATTR with copy_to_user for GET

amdgpu_svm_fault.c (new):
- svm_check_fault_allowed: VMA permission check for fault address
- amdgpu_svm_range_get_unregistered_attrs: create default attr range
  for faulting address not covered by explicit attributes, bounded
  by VMA and granularity alignment
- amdgpu_svm_range_map_fault: fault mapping pipeline with retry
  (GC, permission check, find/insert range, timestamp dedup,
  validity check, get_pages, update_mapping)
- amdgpu_svm_handle_fault: top-level entry point from VM fault path
  (PASID lookup, exiting check, xnack gate, checkpoint timestamp
  filtering, attr lookup with bounds, dispatch to map_fault)

Signed-off-by: Honglei Huang <[email protected]>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_svm.c       | 148 +++++++
 drivers/gpu/drm/amd/amdgpu/amdgpu_svm_fault.c | 386 ++++++++++++++++++
 drivers/gpu/drm/amd/amdgpu/amdgpu_svm_fault.h |  39 ++
 3 files changed, 573 insertions(+)
 create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_svm_fault.c
 create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_svm_fault.h

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_svm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_svm.c
index f88bad1d6..1fd09a1bf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_svm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_svm.c
@@ -434,4 +434,152 @@ bool amdgpu_svm_is_enabled(struct amdgpu_vm *vm)
        return vm->svm != NULL;
 }
 
+static int amdgpu_svm_copy_attrs(const struct drm_amdgpu_gem_svm *args,
+                                          struct drm_amdgpu_svm_attribute 
**attrs,
+                                          size_t *size)
+{
+       if (!args->nattr || args->nattr > AMDGPU_SVM_MAX_ATTRS)
+               return -EINVAL;
+       if (!args->attrs_ptr)
+               return -EINVAL;
+
+       *size = args->nattr * sizeof(**attrs);
+       *attrs = memdup_user(u64_to_user_ptr(args->attrs_ptr), *size);
+
+       return PTR_ERR_OR_ZERO(*attrs);
+}
+
+int amdgpu_svm_garbage_collector(struct amdgpu_svm *svm)
+{
+       int ret;
+       struct amdgpu_svm_range_op_ctx op_ctx;
+
+       amdgpu_svm_assert_locked(svm);
+
+       spin_lock(&svm->work_lock);
+       while (amdgpu_svm_range_dequeue_locked(svm, &svm->gc.list, &op_ctx)) {
+               spin_unlock(&svm->work_lock);
+
+               WARN_ON(!UNMAP_WORK(op_ctx.pending_ops));
+
+               drm_gpusvm_range_remove(&svm->gpusvm,
+                                       &op_ctx.range->base);
+
+               amdgpu_svm_range_put_if_dequeued(svm, op_ctx.range);
+               spin_lock(&svm->work_lock);
+       }
+       spin_unlock(&svm->work_lock);
+       return 0;
+}
+
+void
+amdgpu_svm_range_clean_queue(struct amdgpu_svm *svm,
+                            struct list_head *work_list)
+{
+       struct amdgpu_svm_range_op_ctx op_ctx;
+
+       spin_lock(&svm->work_lock);
+       while (amdgpu_svm_range_dequeue_locked(svm, work_list,
+                                   &op_ctx)) {
+               spin_unlock(&svm->work_lock);
+               amdgpu_svm_range_put_if_dequeued(svm, op_ctx.range);
+               spin_lock(&svm->work_lock);
+       }
+       spin_unlock(&svm->work_lock);
+}
+
+static void amdgpu_svm_garbage_collector_work_func(struct work_struct *w)
+{
+       struct amdgpu_svm_gc *gc = container_of(w, struct amdgpu_svm_gc, work);
+       struct amdgpu_svm *svm = container_of(gc, struct amdgpu_svm, gc);
+
+       amdgpu_svm_lock(svm);
+       amdgpu_svm_garbage_collector(svm);
+       amdgpu_svm_unlock(svm);
+}
+
+int amdgpu_svm_gc_init(struct amdgpu_svm *svm)
+{
+       svm->gc.wq = alloc_workqueue(AMDGPU_SVM_GC_WQ_NAME,
+                                       WQ_UNBOUND | WQ_HIGHPRI | 
WQ_MEM_RECLAIM, 0);
+       if (!svm->gc.wq)
+               return -ENOMEM;
+
+       INIT_LIST_HEAD(&svm->gc.list);
+       INIT_WORK(&svm->gc.work, amdgpu_svm_garbage_collector_work_func);
+
+       return 0;
+}
+
+void amdgpu_svm_gc_fini(struct amdgpu_svm *svm)
+{
+       flush_work(&svm->gc.work);
+       amdgpu_svm_range_clean_queue(svm, &svm->gc.list);
+       destroy_workqueue(svm->gc.wq);
+       svm->gc.wq = NULL;
+}
+
+void amdgpu_svm_gc_flush(struct amdgpu_svm *svm)
+{
+       flush_work(&svm->gc.work);
+}
+
+int amdgpu_gem_svm_ioctl(struct drm_device *dev, void *data,
+                        struct drm_file *filp)
+{
+       struct amdgpu_fpriv *fpriv = filp->driver_priv;
+       struct amdgpu_device *adev = drm_to_adev(dev);
+       struct drm_amdgpu_gem_svm *args = data;
+       struct drm_amdgpu_svm_attribute *attrs = NULL;
+       struct amdgpu_vm *vm;
+       size_t attrs_size = 0;
+       int ret = 0;
+
+       AMDGPU_SVM_TRACE("ioctl op=%u va:[0x%llx-0x%llx)-0x%llx nattr=%u\n",
+                        args->operation, args->start_addr, args->start_addr + 
args->size,
+                        args->size, args->nattr);
+
+       vm = &fpriv->vm;
+       if (!amdgpu_svm_is_enabled(vm)) {
+               ret = amdgpu_svm_init(adev, vm);
+               if (ret)
+                       return ret;
+       }
+
+       if ((args->start_addr & ~PAGE_MASK) || (args->size & ~PAGE_MASK))
+               return -EINVAL;
+
+       if (!args->start_addr || !args->size)
+               return -EINVAL;
+
+       if (args->operation != AMDGPU_SVM_OP_RESET_ATTR) {
+               ret = amdgpu_svm_copy_attrs(args, &attrs, &attrs_size);
+               if (ret)
+                       return ret;
+       }
+
+       switch (args->operation) {
+       case AMDGPU_SVM_OP_SET_ATTR:
+               ret = amdgpu_svm_op_set_attr(vm, args->start_addr, args->size,
+                                        args->nattr, attrs);
+               break;
+       case AMDGPU_SVM_OP_GET_ATTR:
+               ret = amdgpu_svm_op_get_attr(vm, args->start_addr, args->size,
+                                        args->nattr, attrs);
+               if (!ret && copy_to_user(u64_to_user_ptr(args->attrs_ptr),
+                                        attrs, attrs_size))
+                       ret = -EFAULT;
+               break;
+       case AMDGPU_SVM_OP_RESET_ATTR:
+               ret = amdgpu_svm_op_reset_attr(vm, args->start_addr, 
args->size);
+               break;
+       default:
+               ret = -EINVAL;
+               break;
+       }
+
+       kvfree(attrs);
+       return ret;
+}
+
 #endif /* CONFIG_DRM_AMDGPU_SVM */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_svm_fault.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_svm_fault.c
new file mode 100644
index 000000000..7763eb029
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_svm_fault.c
@@ -0,0 +1,386 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
+ * Copyright 2026 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "amdgpu_svm.h"
+#include "amdgpu_svm_attr.h"
+#include "amdgpu_svm_fault.h"
+#include "amdgpu_svm_range.h"
+#include "amdgpu.h"
+#include "amdgpu_vm.h"
+#include "amdgpu_gmc.h"
+#include "amdgpu_ih.h"
+
+#include <drm/drm_exec.h>
+#include <drm/drm_gpusvm.h>
+
+#include <linux/delay.h>
+#include <linux/mm.h>
+#include <linux/sched/mm.h>
+
+#if IS_ENABLED(CONFIG_DRM_AMDGPU_SVM)
+
+#define AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING   (2UL * NSEC_PER_MSEC)
+
+static int amdgpu_svm_range_get_unregistered_attrs(struct amdgpu_svm *svm,
+                                           unsigned long fault_addr,
+                                           unsigned long attr_start_page,
+                                           unsigned long attr_last_page,
+                                           struct amdgpu_svm_attr_range **out)
+{
+       struct amdgpu_svm_attr_tree *attr_tree = svm->attr_tree;
+       struct amdgpu_svm_attr_range *range;
+       struct amdgpu_svm_attrs attrs;
+       struct mm_struct *mm = svm->gpusvm.mm;
+       struct vm_area_struct *vma;
+       unsigned long fault_page = fault_addr >> PAGE_SHIFT;
+       unsigned long start_page, last_page;
+       unsigned long vma_start_page, vma_last_page;
+       unsigned long bo_start = 0, bo_last = 0;
+       int r;
+
+       amdgpu_svm_attr_set_default(svm, &attrs);
+
+       mmap_read_lock(mm);
+
+       vma = amdgpu_svm_check_vma(mm, fault_addr);
+       if (IS_ERR(vma)) {
+               mmap_read_unlock(mm);
+               AMDGPU_SVM_ERR("get_unregistered_attrs: invalid VMA for 
fault_addr=0x%lx\n",
+                      fault_addr);
+               return PTR_ERR(vma);
+       }
+       vma_start_page = vma->vm_start >> PAGE_SHIFT;
+       vma_last_page = (vma->vm_end >> PAGE_SHIFT) - 1;
+
+       if (vma_is_initial_heap(vma) || vma_is_initial_stack(vma))
+               attrs.preferred_loc = AMDGPU_SVM_LOCATION_SYSMEM;
+
+       mmap_read_unlock(mm);
+
+       start_page = max(vma_start_page,
+                   (unsigned long)ALIGN_DOWN(fault_page, 1UL << 
attrs.granularity));
+       last_page = min(vma_last_page,
+                  (unsigned long)ALIGN(fault_page + 1, 1UL << 
attrs.granularity) - 1);
+
+       start_page = max(start_page, attr_start_page);
+       last_page = min(last_page, attr_last_page);
+
+       r = amdgpu_svm_attr_check_vm_bo(attr_tree, start_page, last_page,
+                                       &bo_start, &bo_last);
+       if (r == -EADDRINUSE) {
+               if (fault_page >= bo_start && fault_page <= bo_last)
+                       return -EFAULT;
+
+               /* Narrow to single page if expanded range overlaps BO */
+               start_page = fault_page;
+               last_page = fault_page;
+       } else if (r) {
+               return r;
+       }
+
+       mutex_lock(&attr_tree->lock);
+       range = amdgpu_svm_attr_range_alloc(start_page, last_page, &attrs);
+       if (!range) {
+               mutex_unlock(&attr_tree->lock);
+               return -ENOMEM;
+       }
+       amdgpu_svm_attr_range_insert_locked(attr_tree, range);
+       mutex_unlock(&attr_tree->lock);
+
+       AMDGPU_SVM_TRACE(
+               "Created unregistered range for fault_addr=0x%lx: attr 
range=[0x%lx-0x%lx] size: 0x%lx attrs={preferred_loc=%d, prefetch_loc=%d, 
flags=0x%x, granularity=%u, access=%u}\n",
+               fault_addr, amdgpu_svm_attr_start_page(range),
+               amdgpu_svm_attr_last_page(range) + 1,
+               amdgpu_svm_attr_last_page(range) -
+                       amdgpu_svm_attr_start_page(range) + 1,
+               range->attrs.preferred_loc, range->attrs.prefetch_loc,
+               range->attrs.flags, range->attrs.granularity,
+               range->attrs.access);
+
+       *out = range;
+       return 0;
+}
+
+static int svm_check_fault_allowed(struct amdgpu_svm *svm,
+                                  unsigned long fault_addr, bool write_fault)
+{
+       struct mm_struct *mm = svm->gpusvm.mm;
+       struct vm_area_struct *vma;
+       unsigned long requested = VM_READ;
+       int ret = 0;
+
+       if (write_fault)
+               requested |= VM_WRITE;
+
+       mmap_read_lock(mm);
+       vma = vma_lookup(mm, fault_addr);
+       if (vma && (vma->vm_flags & requested) != requested) {
+               AMDGPU_SVM_ERR("fault addr 0x%lx no %s permission\n",
+                        fault_addr, write_fault ? "write" : "read");
+               ret = -EPERM;
+       }
+       mmap_read_unlock(mm);
+
+       return ret;
+}
+
+static int amdgpu_svm_range_map_fault(struct amdgpu_svm *svm,
+                              unsigned long fault_addr,
+                              const struct amdgpu_svm_attr_range *attr_range,
+                              bool write_fault)
+{
+       const struct amdgpu_svm_attrs *attrs = &attr_range->attrs;
+       bool devmem_possible = amdgpu_svm_attr_devmem_possible(svm, attrs);
+       bool need_vram_migration = amdgpu_svm_attr_prefer_vram(svm, attrs);
+       devmem_possible = false; /* TODO: add migration */
+       struct drm_gpusvm_ctx map_ctx = {
+               .read_only = !!(attrs->flags & AMDGPU_SVM_ATTR_BIT_GPU_RO),
+               .devmem_possible = devmem_possible,
+               .check_pages_threshold = devmem_possible ? SZ_64K : 0,
+               .devmem_only = need_vram_migration && devmem_possible,
+               .timeslice_ms = need_vram_migration && devmem_possible ? 5 : 0,
+       };
+       struct amdgpu_svm_range *range;
+       ktime_t timestamp = ktime_get_boottime();
+       uint64_t range_pte_flags;
+       int retry_count = 3;
+       int ret;
+
+       amdgpu_svm_assert_locked(svm);
+       WARN_ON(!svm->xnack_enabled);
+
+retry:
+       ret = amdgpu_svm_garbage_collector(svm);
+       if (ret) {
+               AMDGPU_SVM_ERR(
+                       "fault garbage collector failed: ret=%d, 
fault_addr=0x%lx\n",
+                       ret, fault_addr);
+               return ret;
+       }
+
+       ret = svm_check_fault_allowed(svm, fault_addr, write_fault);
+       if (ret)
+               return ret;
+
+       range = amdgpu_svm_range_find_or_insert(
+               svm, fault_addr,
+               amdgpu_svm_attr_start(attr_range),
+               amdgpu_svm_attr_end(attr_range),
+               &map_ctx);
+       if (IS_ERR(range)) {
+               ret = PTR_ERR(range);
+               AMDGPU_SVM_ERR("map_fault: range_find_or_insert failed: 
fault=0x%lx ret=%d\n",
+                                fault_addr, ret);
+               /*
+                * -EINVAL: fault_addr out of gpusvm range, or no chunk size
+                *          fits within VMA/notifier/attr_range bounds.
+                * -EFAULT: mmget_not_zero failed.
+                * -ENOENT: No VMA at fault_addr.
+                * -ENOMEM: Notifier or range allocation failed.
+                */
+               if (ret == -EFAULT || ret == -ENOENT) {
+                       AMDGPU_SVM_ERR("no vma or mm is dying: 0x%lx, ret=%d\n",
+                                        fault_addr, ret);
+                       ret = 0;
+               }
+
+               return ret;
+       }
+
+       if (ktime_before(timestamp, ktime_add_ns(range->validate_timestamp,
+                                        
AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING))) {
+               AMDGPU_SVM_TRACE("already restored, skip: fault=0x%lx 
range=[0x%lx-0x%lx)\n",
+                                fault_addr, 
drm_gpusvm_range_start(&range->base),
+                                drm_gpusvm_range_end(&range->base));
+               goto out;
+       }
+
+       range_pte_flags = amdgpu_svm_range_attr_pte_flags(
+                                       svm, attrs, map_ctx.read_only);
+
+       if (!(write_fault && map_ctx.read_only) &&
+           amdgpu_svm_range_is_valid(svm, range, attrs, range_pte_flags)) {
+               AMDGPU_SVM_TRACE("valid range, skip: fault=0x%lx 
range=[0x%lx-0x%lx)\n",
+                                fault_addr, 
drm_gpusvm_range_start(&range->base),
+                                drm_gpusvm_range_end(&range->base));
+               goto out;
+       }
+
+       AMDGPU_SVM_RANGE_DEBUG(range, "PAGE FAULT");
+       /* TODO: add migration*/
+
+       AMDGPU_SVM_RANGE_DEBUG(range, "GET PAGES");
+       ret = amdgpu_svm_range_get_pages(svm, &range->base, &map_ctx);
+       if (ret == -EOPNOTSUPP || ret == -EFAULT) {
+               /*
+               * -EOPNOTSUPP  Mixed page types within range.
+               * -EFAULT      (a) mm is dying.
+               *              (b) range was unmapped.
+               *              (c) DMA mapping failed.
+               *              (d) devmem_only requested but system page 
encountered.
+               *              (e) hmm_range_fault: no VMA, page fault error, 
bad pte/pmd.
+               * -EBUSY       HMM retry loop timed out.
+               * -ENOMEM      PFN or DMA address array allocation failed.
+               * -EINVAL      hmm_range_fault: invalid VMA type.
+               */
+               map_ctx.timeslice_ms <<= 1;
+               if (!map_ctx.devmem_only && --retry_count > 0) {
+                       AMDGPU_SVM_ERR("start retry: get_pages failed with %d, 
retries_left=%d: fault=0x%lx range=[0x%lx-0x%lx)\n",
+                                        ret, retry_count, fault_addr,
+                                        drm_gpusvm_range_start(&range->base),
+                                        drm_gpusvm_range_end(&range->base));
+                       goto retry;
+               } else {
+                       AMDGPU_SVM_ERR("map_fault: get_pages failed with %d, 
devmem fallback allowed, but no devmem pages: fault=0x%lx 
range=[0x%lx-0x%lx)\n",
+                                        ret, fault_addr, 
drm_gpusvm_range_start(&range->base),
+                                        drm_gpusvm_range_end(&range->base));
+               }
+       }
+
+       if (ret == -EPERM) {
+               AMDGPU_SVM_ERR("get_pages -EPERM: fault=0x%lx 
range=[0x%lx-0x%lx)\n",
+                              fault_addr, drm_gpusvm_range_start(&range->base),
+                                      drm_gpusvm_range_end(&range->base));
+               return ret;
+       }
+
+       if (ret) {
+               AMDGPU_SVM_RANGE_DEBUG(range, "PAGE FAULT - FAIL PAGE COLLECT");
+               goto out;
+       }
+
+       AMDGPU_SVM_RANGE_DEBUG(range, "PAGE FAULT - GPU MAP");
+
+       ret = amdgpu_svm_range_update_mapping(svm, range,
+                                             range_pte_flags, attrs->flags,
+                                             false, false, false);
+
+       if (ret)
+               goto err_out;
+
+out:
+       return 0;
+
+err_out:
+       if (ret == -EAGAIN && --retry_count > 0) {
+               map_ctx.timeslice_ms <<= 1;
+               AMDGPU_SVM_RANGE_DEBUG(range, "PAGE FAULT - RETRY GPU MAP");
+               goto retry;
+       }
+
+       return ret;
+}
+
+int amdgpu_svm_handle_fault(struct amdgpu_device *adev, uint32_t pasid,
+                           uint64_t fault_addr, uint64_t ts,
+                           bool write_fault)
+{
+       struct amdgpu_svm *svm;
+       struct amdgpu_svm_attr_range *attr_range;
+       unsigned long attr_start_page, attr_last_page;
+       unsigned long fault_page;
+       uint64_t ckpt;
+       int ret;
+
+       fault_addr = fault_addr << PAGE_SHIFT;
+       fault_page = fault_addr >> PAGE_SHIFT;
+
+       svm = amdgpu_svm_lookup_by_pasid(adev, pasid);
+       if (!svm) {
+               AMDGPU_SVM_ERR("handle_fault: no SVM context for pasid %u\n", 
pasid);
+               return -EOPNOTSUPP;
+       }
+
+       if (atomic_read(&svm->exiting)) {
+               AMDGPU_SVM_ERR("handle_fault: SVM context is exiting for pasid 
%u\n", pasid);
+               ret = -EAGAIN;
+               goto out_put;
+       }
+
+       if (!svm->xnack_enabled) {
+               AMDGPU_SVM_ERR("handle_fault: SVM context does not have xnack 
enabled for pasid %u\n", pasid);
+               ret = -EOPNOTSUPP;
+               goto out_put;
+       }
+
+       ckpt = READ_ONCE(svm->checkpoint_ts);
+       if (ckpt != 0) {
+               if (amdgpu_ih_ts_after_or_equal(ts, ckpt)) {
+                       AMDGPU_SVM_TRACE(
+                       "handle_fault: draining stale retry fault, drop fault 
0x%llx ts=%llu checkpoint=%llu\n",
+                               fault_addr, ts, ckpt);
+                       amdgpu_gmc_filter_faults_remove(
+                               adev, fault_addr >> PAGE_SHIFT, pasid);
+                       ret = 0;
+                       goto out_put;
+               } else {
+                       WRITE_ONCE(svm->checkpoint_ts, 0);
+               }
+       }
+
+       amdgpu_svm_lock(svm);
+
+retry:
+       mutex_lock(&svm->attr_tree->lock);
+       attr_range = amdgpu_svm_attr_get_bounds_locked(svm->attr_tree,
+                                                      fault_page,
+                                                      &attr_start_page, 
&attr_last_page);
+       mutex_unlock(&svm->attr_tree->lock);
+       if (!attr_range) {
+               ret = amdgpu_svm_range_get_unregistered_attrs(svm, fault_addr,
+                                                             attr_start_page,
+                                                             attr_last_page,
+                                                             &attr_range);
+               if (ret) {
+                       if (ret == -EFAULT)
+                               goto out_no_vma;
+                       goto out_unlock;
+               }
+       }
+       ret = amdgpu_svm_range_map_fault(svm, fault_addr, attr_range,
+                                        write_fault);
+
+       if (ret == -EAGAIN) {
+               AMDGPU_SVM_ERR("handle_fault: got -EAGAIN: fault=0x%llx\n",
+                              fault_addr);
+               amdgpu_gmc_filter_faults_remove(adev, fault_addr>>PAGE_SHIFT, 
pasid);
+               ret = 0;
+       }
+
+       goto out_unlock;
+
+out_no_vma:
+       AMDGPU_SVM_ERR("handle_fault: no VMA for fault=0x%llx (stale retry or 
GPU NULL deref)\n",
+                fault_addr);
+       ret = 0;
+
+out_unlock:
+       amdgpu_svm_unlock(svm);
+
+out_put:
+       amdgpu_svm_put(svm);
+       return ret;
+}
+
+#endif /* CONFIG_DRM_AMDGPU_SVM */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_svm_fault.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_svm_fault.h
new file mode 100644
index 000000000..1c8f6c15e
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_svm_fault.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
+ * Copyright 2026 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_SVM_FAULT_H__
+#define __AMDGPU_SVM_FAULT_H__
+
+#include <linux/types.h>
+
+struct amdgpu_device;
+struct amdgpu_svm;
+struct amdgpu_svm_attr_range;
+struct amdgpu_svm_attrs;
+
+int amdgpu_svm_handle_fault(struct amdgpu_device *adev, uint32_t pasid,
+                           uint64_t fault_addr, uint64_t ts,
+                           bool write_fault);
+
+#endif /* __AMDGPU_SVM_FAULT_H__ */
-- 
2.34.1

Reply via email to