[AMD Official Use Only - General]
On 2024-01-11 11:19, Felix Kuehling wrote:
> On 2024-01-11 02:22, Lang Yu wrote:
>> Fixes: 410f08516e0f ("drm/amdkfd: Move dma unmapping after TLB flush")
>>
>> [ 41.708711] WARNING: CPU: 0 PID: 1463 at
>> drivers/gpu/drm/ttm/ttm_bo.c:846 ttm_bo_validate+0x146/0x1b0 [ttm]
>> [ 41.708989] Call Trace:
>> [ 41.708992] <TASK>
>> [ 41.708996] ? show_regs+0x6c/0x80
>> [ 41.709000] ? ttm_bo_validate+0x146/0x1b0 [ttm]
>> [ 41.709008] ? __warn+0x93/0x190
>> [ 41.709014] ? ttm_bo_validate+0x146/0x1b0 [ttm]
>> [ 41.709024] ? report_bug+0x1f9/0x210
>> [ 41.709035] ? handle_bug+0x46/0x80
>> [ 41.709041] ? exc_invalid_op+0x1d/0x80
>> [ 41.709048] ? asm_exc_invalid_op+0x1f/0x30
>> [ 41.709057] ? amdgpu_amdkfd_gpuvm_dmaunmap_mem+0x2c/0x80 [amdgpu]
>> [ 41.709185] ? ttm_bo_validate+0x146/0x1b0 [ttm]
>> [ 41.709197] ? amdgpu_amdkfd_gpuvm_dmaunmap_mem+0x2c/0x80 [amdgpu]
>> [ 41.709337] ? srso_alias_return_thunk+0x5/0x7f
>> [ 41.709346] kfd_mem_dmaunmap_attachment+0x9e/0x1e0 [amdgpu]
>> [ 41.709467] amdgpu_amdkfd_gpuvm_dmaunmap_mem+0x56/0x80 [amdgpu]
>> [ 41.709586] kfd_ioctl_unmap_memory_from_gpu+0x1b7/0x300 [amdgpu]
>> [ 41.709710] kfd_ioctl+0x1ec/0x650 [amdgpu]
>> [ 41.709822] ? __pfx_kfd_ioctl_unmap_memory_from_gpu+0x10/0x10
>> [amdgpu]
>> [ 41.709945] ? srso_alias_return_thunk+0x5/0x7f
>> [ 41.709949] ? tomoyo_file_ioctl+0x20/0x30
>> [ 41.709959] __x64_sys_ioctl+0x9c/0xd0
>> [ 41.709967] do_syscall_64+0x3f/0x90
>> [ 41.709973] entry_SYSCALL_64_after_hwframe+0x6e/0xd8
>>
>> Signed-off-by: Lang Yu <[email protected]>
>> ---
>> drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 7 ++++++-
>> 1 file changed, 6 insertions(+), 1 deletion(-)
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
>> index 48697b789342..f5542a4ab8ed 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
>> @@ -2095,8 +2095,13 @@ void amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct
>> kgd_mem *mem, void *drm_priv)
>> mutex_lock(&mem->lock);
>> list_for_each_entry(entry, &mem->attachments, list) {
>> - if (entry->bo_va->base.vm == vm)
>> + if (entry->bo_va->base.vm != vm)
>> + continue;
>> +
>> + if (!WARN_ON(amdgpu_bo_reserve(entry->bo_va->base.bo, true))) {
>> kfd_mem_dmaunmap_attachment(mem, entry);
>> + amdgpu_bo_unreserve(entry->bo_va->base.bo);
>> + }
>
> I'm pretty sure someone else worked on a fix for this before. This is
> not a good solution. We need to handle failed reservations (due to
> ERESTARTSYS) and make sure that the unmap ioctl can be restarted
> correctly in that case.
>
> See
> https://lore.kernel.org/amd-gfx/[email protected]/
>
> David, do you have any update on this work?
>
I tried to solve this same problem.
After feedback from November's post, I updated my patch.
In testing, I found that my patch (attached to avoid cluttering this
thread and to make clear the patch is not ready for primetime) causes
lockdep to emit possible circular dependency warnings, so I haven't
pursued it any further.
> Regards,
> Felix
>
>
>> }
>> mutex_unlock(&mem->lock);
From 5f00ef4518ae1971f2cd31a89a647380b49fbe43 Mon Sep 17 00:00:00 2001
From: David Francis <[email protected]>
Date: Thu, 5 Oct 2023 11:36:06 -0400
Subject: [PATCH] drm/amdgpu: Acquire ttm locks for dmaunmap
dmaunmap can call ttm_bo_validate, which expects the
ttm dma_resv to be held.
Acquire the locks in amdgpu_amdkfd_gpuvm_dmaunmap_mem.
Because the dmaunmap step can now fail, it is now necessary to
track which attachments have and have not been dmaunmapped.
This failure can also cause the sync_memory step of the ioctl
to be repeated; it is idempotent, so this should not cause any issues.
Signed-off-by: David Francis <[email protected]>
---
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | 3 ++-
.../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 18 +++++++++++++++---
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 7 ++++++-
3 files changed, 23 insertions(+), 5 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 3ad8dc523b42..aeed6a893902 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -58,6 +58,7 @@ struct kfd_mem_attachment {
struct list_head list;
enum kfd_mem_attachment_type type;
bool is_mapped;
+ bool is_dmamapped;
struct amdgpu_bo_va *bo_va;
struct amdgpu_device *adev;
uint64_t va;
@@ -302,7 +303,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(struct amdgpu_device *adev,
struct kgd_mem *mem, void *drm_priv);
int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv);
-void amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv);
+int amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv);
int amdgpu_amdkfd_gpuvm_sync_memory(
struct amdgpu_device *adev, struct kgd_mem *mem, bool intr);
int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_mem *mem,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 54f31a420229..c05acd99c25a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -2081,6 +2081,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
}
entry->is_mapped = true;
+ entry->is_dmamapped = true;
mem->mapped_to_gpu_memory++;
pr_debug("\t INC mapping count %d\n",
mem->mapped_to_gpu_memory);
@@ -2102,21 +2103,32 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
return ret;
}
-void amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv)
+int amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv)
{
struct kfd_mem_attachment *entry;
struct amdgpu_vm *vm;
+ int ret;
vm = drm_priv_to_vm(drm_priv);
mutex_lock(&mem->lock);
list_for_each_entry(entry, &mem->attachments, list) {
- if (entry->bo_va->base.vm == vm)
+ if (entry->bo_va->base.vm == vm && entry->is_dmamapped) {
+ ret = amdgpu_bo_reserve(entry->bo_va->base.bo, false);
+ if (ret) {
+ goto out;
+ }
+
kfd_mem_dmaunmap_attachment(mem, entry);
- }
+ entry->is_dmamapped = false;
+ amdgpu_bo_unreserve(entry->bo_va->base.bo);
+ }
+ }
+out:
mutex_unlock(&mem->lock);
+ return ret;
}
int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 06988cf1db51..d75c181cb744 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -1442,7 +1442,11 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
kfd_flush_tlb(peer_pdd, TLB_FLUSH_HEAVYWEIGHT);
/* Remove dma mapping after tlb flush to avoid IO_PAGE_FAULT */
- amdgpu_amdkfd_gpuvm_dmaunmap_mem(mem, peer_pdd->drm_priv);
+ err = amdgpu_amdkfd_gpuvm_dmaunmap_mem(mem, peer_pdd->drm_priv);
+ if (err) {
+ pr_debug("DMA unmapping failed, acquire interrupted by user signal\n");
+ goto dmaunmap_failed;
+ }
}
mutex_unlock(&p->mutex);
@@ -1455,6 +1459,7 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
get_mem_obj_from_handle_failed:
unmap_memory_from_gpu_failed:
sync_memory_failed:
+dmaunmap_failed:
mutex_unlock(&p->mutex);
copy_from_user_failed:
kfree(devices_arr);
--
2.34.1