RE: [PATCH v2] drm/amdgpu: add codes to clear AccVGPR for arcturus

2020-03-12 Thread Chen, Guchun
[AMD Official Use Only - Internal Distribution Only]

Looks file mode is changed.
old mode 100644
new mode 100755

Regards,
Guchun

-Original Message-
From: Zhang, Hawking  
Sent: Friday, March 13, 2020 11:35 AM
To: Li, Dennis ; amd-gfx@lists.freedesktop.org; Deucher, 
Alexander ; Zhou1, Tao ; Chen, 
Guchun 
Cc: Li, Dennis 
Subject: RE: [PATCH v2] drm/amdgpu: add codes to clear AccVGPR for arcturus

[AMD Official Use Only - Internal Distribution Only]

Reviewed-by: Hawking Zhang 

Regards,
Hawking

-Original Message-
From: Dennis Li  
Sent: Friday, March 13, 2020 11:22
To: amd-gfx@lists.freedesktop.org; Deucher, Alexander 
; Zhou1, Tao ; Zhang, Hawking 
; Chen, Guchun 
Cc: Li, Dennis 
Subject: [PATCH v2] drm/amdgpu: add codes to clear AccVGPR for arcturus

AccVGPRs are newly added in arcturus. Before reading these registers, they 
should be initialized. Otherwise edc error happens, when RAS is enabled.

v2: reuse the existing logical to calculate register size

Change-Id: I4ed384f0cc4b781a10cfd6ad1e3a132445bdc261
Signed-off-by: Dennis Li 

diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 
b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
old mode 100644
new mode 100755
index c78ffdc51373..324838baa71c
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -4144,6 +4144,101 @@ static const u32 sgpr_init_compute_shader[] =
0xbe800080, 0xbf81,
 };
 
+static const u32 vgpr_init_compute_shader_arcturus[] = {
+   0xd3d94000, 0x1880, 0xd3d94001, 0x1880, 0xd3d94002, 0x1880,
+   0xd3d94003, 0x1880, 0xd3d94004, 0x1880, 0xd3d94005, 0x1880,
+   0xd3d94006, 0x1880, 0xd3d94007, 0x1880, 0xd3d94008, 0x1880,
+   0xd3d94009, 0x1880, 0xd3d9400a, 0x1880, 0xd3d9400b, 0x1880,
+   0xd3d9400c, 0x1880, 0xd3d9400d, 0x1880, 0xd3d9400e, 0x1880,
+   0xd3d9400f, 0x1880, 0xd3d94010, 0x1880, 0xd3d94011, 0x1880,
+   0xd3d94012, 0x1880, 0xd3d94013, 0x1880, 0xd3d94014, 0x1880,
+   0xd3d94015, 0x1880, 0xd3d94016, 0x1880, 0xd3d94017, 0x1880,
+   0xd3d94018, 0x1880, 0xd3d94019, 0x1880, 0xd3d9401a, 0x1880,
+   0xd3d9401b, 0x1880, 0xd3d9401c, 0x1880, 0xd3d9401d, 0x1880,
+   0xd3d9401e, 0x1880, 0xd3d9401f, 0x1880, 0xd3d94020, 0x1880,
+   0xd3d94021, 0x1880, 0xd3d94022, 0x1880, 0xd3d94023, 0x1880,
+   0xd3d94024, 0x1880, 0xd3d94025, 0x1880, 0xd3d94026, 0x1880,
+   0xd3d94027, 0x1880, 0xd3d94028, 0x1880, 0xd3d94029, 0x1880,
+   0xd3d9402a, 0x1880, 0xd3d9402b, 0x1880, 0xd3d9402c, 0x1880,
+   0xd3d9402d, 0x1880, 0xd3d9402e, 0x1880, 0xd3d9402f, 0x1880,
+   0xd3d94030, 0x1880, 0xd3d94031, 0x1880, 0xd3d94032, 0x1880,
+   0xd3d94033, 0x1880, 0xd3d94034, 0x1880, 0xd3d94035, 0x1880,
+   0xd3d94036, 0x1880, 0xd3d94037, 0x1880, 0xd3d94038, 0x1880,
+   0xd3d94039, 0x1880, 0xd3d9403a, 0x1880, 0xd3d9403b, 0x1880,
+   0xd3d9403c, 0x1880, 0xd3d9403d, 0x1880, 0xd3d9403e, 0x1880,
+   0xd3d9403f, 0x1880, 0xd3d94040, 0x1880, 0xd3d94041, 0x1880,
+   0xd3d94042, 0x1880, 0xd3d94043, 0x1880, 0xd3d94044, 0x1880,
+   0xd3d94045, 0x1880, 0xd3d94046, 0x1880, 0xd3d94047, 0x1880,
+   0xd3d94048, 0x1880, 0xd3d94049, 0x1880, 0xd3d9404a, 0x1880,
+   0xd3d9404b, 0x1880, 0xd3d9404c, 0x1880, 0xd3d9404d, 0x1880,
+   0xd3d9404e, 0x1880, 0xd3d9404f, 0x1880, 0xd3d94050, 0x1880,
+   0xd3d94051, 0x1880, 0xd3d94052, 0x1880, 0xd3d94053, 0x1880,
+   0xd3d94054, 0x1880, 0xd3d94055, 0x1880, 0xd3d94056, 0x1880,
+   0xd3d94057, 0x1880, 0xd3d94058, 0x1880, 0xd3d94059, 0x1880,
+   0xd3d9405a, 0x1880, 0xd3d9405b, 0x1880, 0xd3d9405c, 0x1880,
+   0xd3d9405d, 0x1880, 0xd3d9405e, 0x1880, 0xd3d9405f, 0x1880,
+   0xd3d94060, 0x1880, 0xd3d94061, 0x1880, 0xd3d94062, 0x1880,
+   0xd3d94063, 0x1880, 0xd3d94064, 0x1880, 0xd3d94065, 0x1880,
+   0xd3d94066, 0x1880, 0xd3d94067, 0x1880, 0xd3d94068, 0x1880,
+   0xd3d94069, 0x1880, 0xd3d9406a, 0x1880, 0xd3d9406b, 0x1880,
+   0xd3d9406c, 0x1880, 0xd3d9406d, 0x1880, 0xd3d9406e, 0x1880,
+   0xd3d9406f, 0x1880, 0xd3d94070, 0x1880, 0xd3d94071, 0x1880,
+   0xd3d94072, 0x1880, 0xd3d94073, 0x1880, 0xd3d94074, 0x1880,
+   0xd3d94075, 0x1880, 0xd3d94076, 0x1880, 0xd3d94077, 0x1880,
+   0xd3d94078, 0x1880, 0xd3d94079, 0x1880, 0xd3d9407a, 0x1880,
+   0xd3d9407b, 0x1880, 0xd3d9407c, 0x1880, 0xd3d9407d, 0x1880,
+   0xd3d9407e, 0x1880, 0xd3d9407f, 0x1880, 0xd3d94080, 0x1880,
+   0xd3d94081, 0x1880, 0xd3d94082, 0x1880, 0xd3d94083, 

RE: [PATCH v2] drm/amdgpu: add codes to clear AccVGPR for arcturus

2020-03-12 Thread Zhang, Hawking
[AMD Official Use Only - Internal Distribution Only]

Reviewed-by: Hawking Zhang 

Regards,
Hawking

-Original Message-
From: Dennis Li  
Sent: Friday, March 13, 2020 11:22
To: amd-gfx@lists.freedesktop.org; Deucher, Alexander 
; Zhou1, Tao ; Zhang, Hawking 
; Chen, Guchun 
Cc: Li, Dennis 
Subject: [PATCH v2] drm/amdgpu: add codes to clear AccVGPR for arcturus

AccVGPRs are newly added in arcturus. Before reading these registers, they 
should be initialized. Otherwise edc error happens, when RAS is enabled.

v2: reuse the existing logical to calculate register size

Change-Id: I4ed384f0cc4b781a10cfd6ad1e3a132445bdc261
Signed-off-by: Dennis Li 

diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 
b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
old mode 100644
new mode 100755
index c78ffdc51373..324838baa71c
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -4144,6 +4144,101 @@ static const u32 sgpr_init_compute_shader[] =
0xbe800080, 0xbf81,
 };
 
+static const u32 vgpr_init_compute_shader_arcturus[] = {
+   0xd3d94000, 0x1880, 0xd3d94001, 0x1880, 0xd3d94002, 0x1880,
+   0xd3d94003, 0x1880, 0xd3d94004, 0x1880, 0xd3d94005, 0x1880,
+   0xd3d94006, 0x1880, 0xd3d94007, 0x1880, 0xd3d94008, 0x1880,
+   0xd3d94009, 0x1880, 0xd3d9400a, 0x1880, 0xd3d9400b, 0x1880,
+   0xd3d9400c, 0x1880, 0xd3d9400d, 0x1880, 0xd3d9400e, 0x1880,
+   0xd3d9400f, 0x1880, 0xd3d94010, 0x1880, 0xd3d94011, 0x1880,
+   0xd3d94012, 0x1880, 0xd3d94013, 0x1880, 0xd3d94014, 0x1880,
+   0xd3d94015, 0x1880, 0xd3d94016, 0x1880, 0xd3d94017, 0x1880,
+   0xd3d94018, 0x1880, 0xd3d94019, 0x1880, 0xd3d9401a, 0x1880,
+   0xd3d9401b, 0x1880, 0xd3d9401c, 0x1880, 0xd3d9401d, 0x1880,
+   0xd3d9401e, 0x1880, 0xd3d9401f, 0x1880, 0xd3d94020, 0x1880,
+   0xd3d94021, 0x1880, 0xd3d94022, 0x1880, 0xd3d94023, 0x1880,
+   0xd3d94024, 0x1880, 0xd3d94025, 0x1880, 0xd3d94026, 0x1880,
+   0xd3d94027, 0x1880, 0xd3d94028, 0x1880, 0xd3d94029, 0x1880,
+   0xd3d9402a, 0x1880, 0xd3d9402b, 0x1880, 0xd3d9402c, 0x1880,
+   0xd3d9402d, 0x1880, 0xd3d9402e, 0x1880, 0xd3d9402f, 0x1880,
+   0xd3d94030, 0x1880, 0xd3d94031, 0x1880, 0xd3d94032, 0x1880,
+   0xd3d94033, 0x1880, 0xd3d94034, 0x1880, 0xd3d94035, 0x1880,
+   0xd3d94036, 0x1880, 0xd3d94037, 0x1880, 0xd3d94038, 0x1880,
+   0xd3d94039, 0x1880, 0xd3d9403a, 0x1880, 0xd3d9403b, 0x1880,
+   0xd3d9403c, 0x1880, 0xd3d9403d, 0x1880, 0xd3d9403e, 0x1880,
+   0xd3d9403f, 0x1880, 0xd3d94040, 0x1880, 0xd3d94041, 0x1880,
+   0xd3d94042, 0x1880, 0xd3d94043, 0x1880, 0xd3d94044, 0x1880,
+   0xd3d94045, 0x1880, 0xd3d94046, 0x1880, 0xd3d94047, 0x1880,
+   0xd3d94048, 0x1880, 0xd3d94049, 0x1880, 0xd3d9404a, 0x1880,
+   0xd3d9404b, 0x1880, 0xd3d9404c, 0x1880, 0xd3d9404d, 0x1880,
+   0xd3d9404e, 0x1880, 0xd3d9404f, 0x1880, 0xd3d94050, 0x1880,
+   0xd3d94051, 0x1880, 0xd3d94052, 0x1880, 0xd3d94053, 0x1880,
+   0xd3d94054, 0x1880, 0xd3d94055, 0x1880, 0xd3d94056, 0x1880,
+   0xd3d94057, 0x1880, 0xd3d94058, 0x1880, 0xd3d94059, 0x1880,
+   0xd3d9405a, 0x1880, 0xd3d9405b, 0x1880, 0xd3d9405c, 0x1880,
+   0xd3d9405d, 0x1880, 0xd3d9405e, 0x1880, 0xd3d9405f, 0x1880,
+   0xd3d94060, 0x1880, 0xd3d94061, 0x1880, 0xd3d94062, 0x1880,
+   0xd3d94063, 0x1880, 0xd3d94064, 0x1880, 0xd3d94065, 0x1880,
+   0xd3d94066, 0x1880, 0xd3d94067, 0x1880, 0xd3d94068, 0x1880,
+   0xd3d94069, 0x1880, 0xd3d9406a, 0x1880, 0xd3d9406b, 0x1880,
+   0xd3d9406c, 0x1880, 0xd3d9406d, 0x1880, 0xd3d9406e, 0x1880,
+   0xd3d9406f, 0x1880, 0xd3d94070, 0x1880, 0xd3d94071, 0x1880,
+   0xd3d94072, 0x1880, 0xd3d94073, 0x1880, 0xd3d94074, 0x1880,
+   0xd3d94075, 0x1880, 0xd3d94076, 0x1880, 0xd3d94077, 0x1880,
+   0xd3d94078, 0x1880, 0xd3d94079, 0x1880, 0xd3d9407a, 0x1880,
+   0xd3d9407b, 0x1880, 0xd3d9407c, 0x1880, 0xd3d9407d, 0x1880,
+   0xd3d9407e, 0x1880, 0xd3d9407f, 0x1880, 0xd3d94080, 0x1880,
+   0xd3d94081, 0x1880, 0xd3d94082, 0x1880, 0xd3d94083, 0x1880,
+   0xd3d94084, 0x1880, 0xd3d94085, 0x1880, 0xd3d94086, 0x1880,
+   0xd3d94087, 0x1880, 0xd3d94088, 0x1880, 0xd3d94089, 0x1880,
+   0xd3d9408a, 0x1880, 0xd3d9408b, 0x1880, 0xd3d9408c, 0x1880,
+   0xd3d9408d, 0x1880, 0xd3d9408e, 0x1880, 0xd3d9408f, 0x1880,
+   0xd3d94090, 0x1880, 0xd3d94091, 0x1880, 0xd3d94092, 0x1880,
+  

[PATCH v2] drm/amdgpu: add codes to clear AccVGPR for arcturus

2020-03-12 Thread Dennis Li
AccVGPRs are newly added in arcturus. Before reading these
registers, they should be initialized. Otherwise edc error
happens, when RAS is enabled.

v2: reuse the existing logical to calculate register size

Change-Id: I4ed384f0cc4b781a10cfd6ad1e3a132445bdc261
Signed-off-by: Dennis Li 

diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 
b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
old mode 100644
new mode 100755
index c78ffdc51373..324838baa71c
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -4144,6 +4144,101 @@ static const u32 sgpr_init_compute_shader[] =
0xbe800080, 0xbf81,
 };
 
+static const u32 vgpr_init_compute_shader_arcturus[] = {
+   0xd3d94000, 0x1880, 0xd3d94001, 0x1880, 0xd3d94002, 0x1880,
+   0xd3d94003, 0x1880, 0xd3d94004, 0x1880, 0xd3d94005, 0x1880,
+   0xd3d94006, 0x1880, 0xd3d94007, 0x1880, 0xd3d94008, 0x1880,
+   0xd3d94009, 0x1880, 0xd3d9400a, 0x1880, 0xd3d9400b, 0x1880,
+   0xd3d9400c, 0x1880, 0xd3d9400d, 0x1880, 0xd3d9400e, 0x1880,
+   0xd3d9400f, 0x1880, 0xd3d94010, 0x1880, 0xd3d94011, 0x1880,
+   0xd3d94012, 0x1880, 0xd3d94013, 0x1880, 0xd3d94014, 0x1880,
+   0xd3d94015, 0x1880, 0xd3d94016, 0x1880, 0xd3d94017, 0x1880,
+   0xd3d94018, 0x1880, 0xd3d94019, 0x1880, 0xd3d9401a, 0x1880,
+   0xd3d9401b, 0x1880, 0xd3d9401c, 0x1880, 0xd3d9401d, 0x1880,
+   0xd3d9401e, 0x1880, 0xd3d9401f, 0x1880, 0xd3d94020, 0x1880,
+   0xd3d94021, 0x1880, 0xd3d94022, 0x1880, 0xd3d94023, 0x1880,
+   0xd3d94024, 0x1880, 0xd3d94025, 0x1880, 0xd3d94026, 0x1880,
+   0xd3d94027, 0x1880, 0xd3d94028, 0x1880, 0xd3d94029, 0x1880,
+   0xd3d9402a, 0x1880, 0xd3d9402b, 0x1880, 0xd3d9402c, 0x1880,
+   0xd3d9402d, 0x1880, 0xd3d9402e, 0x1880, 0xd3d9402f, 0x1880,
+   0xd3d94030, 0x1880, 0xd3d94031, 0x1880, 0xd3d94032, 0x1880,
+   0xd3d94033, 0x1880, 0xd3d94034, 0x1880, 0xd3d94035, 0x1880,
+   0xd3d94036, 0x1880, 0xd3d94037, 0x1880, 0xd3d94038, 0x1880,
+   0xd3d94039, 0x1880, 0xd3d9403a, 0x1880, 0xd3d9403b, 0x1880,
+   0xd3d9403c, 0x1880, 0xd3d9403d, 0x1880, 0xd3d9403e, 0x1880,
+   0xd3d9403f, 0x1880, 0xd3d94040, 0x1880, 0xd3d94041, 0x1880,
+   0xd3d94042, 0x1880, 0xd3d94043, 0x1880, 0xd3d94044, 0x1880,
+   0xd3d94045, 0x1880, 0xd3d94046, 0x1880, 0xd3d94047, 0x1880,
+   0xd3d94048, 0x1880, 0xd3d94049, 0x1880, 0xd3d9404a, 0x1880,
+   0xd3d9404b, 0x1880, 0xd3d9404c, 0x1880, 0xd3d9404d, 0x1880,
+   0xd3d9404e, 0x1880, 0xd3d9404f, 0x1880, 0xd3d94050, 0x1880,
+   0xd3d94051, 0x1880, 0xd3d94052, 0x1880, 0xd3d94053, 0x1880,
+   0xd3d94054, 0x1880, 0xd3d94055, 0x1880, 0xd3d94056, 0x1880,
+   0xd3d94057, 0x1880, 0xd3d94058, 0x1880, 0xd3d94059, 0x1880,
+   0xd3d9405a, 0x1880, 0xd3d9405b, 0x1880, 0xd3d9405c, 0x1880,
+   0xd3d9405d, 0x1880, 0xd3d9405e, 0x1880, 0xd3d9405f, 0x1880,
+   0xd3d94060, 0x1880, 0xd3d94061, 0x1880, 0xd3d94062, 0x1880,
+   0xd3d94063, 0x1880, 0xd3d94064, 0x1880, 0xd3d94065, 0x1880,
+   0xd3d94066, 0x1880, 0xd3d94067, 0x1880, 0xd3d94068, 0x1880,
+   0xd3d94069, 0x1880, 0xd3d9406a, 0x1880, 0xd3d9406b, 0x1880,
+   0xd3d9406c, 0x1880, 0xd3d9406d, 0x1880, 0xd3d9406e, 0x1880,
+   0xd3d9406f, 0x1880, 0xd3d94070, 0x1880, 0xd3d94071, 0x1880,
+   0xd3d94072, 0x1880, 0xd3d94073, 0x1880, 0xd3d94074, 0x1880,
+   0xd3d94075, 0x1880, 0xd3d94076, 0x1880, 0xd3d94077, 0x1880,
+   0xd3d94078, 0x1880, 0xd3d94079, 0x1880, 0xd3d9407a, 0x1880,
+   0xd3d9407b, 0x1880, 0xd3d9407c, 0x1880, 0xd3d9407d, 0x1880,
+   0xd3d9407e, 0x1880, 0xd3d9407f, 0x1880, 0xd3d94080, 0x1880,
+   0xd3d94081, 0x1880, 0xd3d94082, 0x1880, 0xd3d94083, 0x1880,
+   0xd3d94084, 0x1880, 0xd3d94085, 0x1880, 0xd3d94086, 0x1880,
+   0xd3d94087, 0x1880, 0xd3d94088, 0x1880, 0xd3d94089, 0x1880,
+   0xd3d9408a, 0x1880, 0xd3d9408b, 0x1880, 0xd3d9408c, 0x1880,
+   0xd3d9408d, 0x1880, 0xd3d9408e, 0x1880, 0xd3d9408f, 0x1880,
+   0xd3d94090, 0x1880, 0xd3d94091, 0x1880, 0xd3d94092, 0x1880,
+   0xd3d94093, 0x1880, 0xd3d94094, 0x1880, 0xd3d94095, 0x1880,
+   0xd3d94096, 0x1880, 0xd3d94097, 0x1880, 0xd3d94098, 0x1880,
+   0xd3d94099, 0x1880, 0xd3d9409a, 0x1880, 0xd3d9409b, 0x1880,
+   0xd3d9409c, 0x1880, 0xd3d9409d, 0x1880, 0xd3d9409e, 0x1880,
+   0xd3d9409f, 0x1880, 0xd3d940a0, 0x1880, 

Re: [PATCH hmm 9/8] mm/hmm: do not check pmd_protnone twice in hmm_vma_handle_pmd()

2020-03-12 Thread Ralph Campbell



On 3/12/20 12:33 PM, Jason Gunthorpe wrote:

pmd_to_hmm_pfn_flags() already checks it and makes the cpu flags 0. If no
fault is requested then the pfns should be returned with the not valid
flags.

It should not unconditionally fault if faulting is not requested.

Fixes: 2aee09d8c116 ("mm/hmm: change hmm_vma_fault() to allow write fault on page 
basis")
Signed-off-by: Jason Gunthorpe 


Looks good to me.
Reviewed-by: Ralph Campbell 


---
  mm/hmm.c | 2 +-
  1 file changed, 1 insertion(+), 1 deletion(-)

Bonus patch, this one got found after I made the series..

diff --git a/mm/hmm.c b/mm/hmm.c
index ca33d086bdc190..6d9da4b0f0a9f8 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -226,7 +226,7 @@ static int hmm_vma_handle_pmd(struct mm_walk *walk, 
unsigned long addr,
hmm_range_need_fault(hmm_vma_walk, pfns, npages, cpu_flags,
 , _fault);
  
-	if (pmd_protnone(pmd) || fault || write_fault)

+   if (fault || write_fault)
return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
  
  	pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);



___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH hmm 9/8] mm/hmm: do not check pmd_protnone twice in hmm_vma_handle_pmd()

2020-03-12 Thread Jason Gunthorpe
pmd_to_hmm_pfn_flags() already checks it and makes the cpu flags 0. If no
fault is requested then the pfns should be returned with the not valid
flags.

It should not unconditionally fault if faulting is not requested.

Fixes: 2aee09d8c116 ("mm/hmm: change hmm_vma_fault() to allow write fault on 
page basis")
Signed-off-by: Jason Gunthorpe 
---
 mm/hmm.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

Bonus patch, this one got found after I made the series..

diff --git a/mm/hmm.c b/mm/hmm.c
index ca33d086bdc190..6d9da4b0f0a9f8 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -226,7 +226,7 @@ static int hmm_vma_handle_pmd(struct mm_walk *walk, 
unsigned long addr,
hmm_range_need_fault(hmm_vma_walk, pfns, npages, cpu_flags,
 , _fault);
 
-   if (pmd_protnone(pmd) || fault || write_fault)
+   if (fault || write_fault)
return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
 
pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
-- 
2.25.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] mm/hmm: Simplify hmm_vma_walk_pud slightly

2020-03-12 Thread Jason Gunthorpe
On Thu, Mar 12, 2020 at 05:02:18PM +, Steven Price wrote:

> > Having the walker deref the pointer and pass the value it into the ops
> > for use rather than repeatedly de-refing an unlocked value seems like
> > a much safer design to me.
> 
> Yeah that sounds like a good idea.

Ok.. let see when I get this hmm & odp stuff cleared off
 
> > I also didn't quite understand why walk_pte_range() skipped locking
> > the pte in the no_vma case - I don't get why vma would be related to
> > locking here.
> 
> The no_vma case is for walking the kernel's page tables and they may have
> entries that are not backed by struct page, so there isn't (reliably) a PTE
> lock to take.

Oh, that is an interesting bit of insight..

> > I also saw that hmm open coded the pte walk, presumably for
> > performance, so I was thinking of adding some kind of pte_range()
> > callback to avoid the expensive indirect function call per pte, but
> > hmm also can't have the pmd locked...
> 
> Yeah the callback per PTE is a bit heavy because of the indirect function
> call. I'm not sure how to optimise it beyond open coding at the PMD level.
> One option would be to provide helper functions to make it a bit more
> generic.
> 
> Do you have an idea of what pte_range() would look like?

Basically just pass in the already mapped pte array just like is
already done at the tail of the pmd

The reason to do it like this is so that the common code in the walker
can correctly prove the pmd is pointing at a pte before trying to map
it.

This is complicated, and hmm at least already got it wrong when trying
to open code at the PMD level.

Jason
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] mm/hmm: Simplify hmm_vma_walk_pud slightly

2020-03-12 Thread Steven Price

On 12/03/2020 16:37, Jason Gunthorpe wrote:

On Thu, Mar 12, 2020 at 04:16:33PM +, Steven Price wrote:

Actually, while you are looking at this, do you think we should be
adding at least READ_ONCE in the pagewalk.c walk_* functions? The
multiple references of pmd, pud, etc without locking seems sketchy to
me.


I agree it seems worrying. I'm not entirely sure whether the holding of
mmap_sem is sufficient,


I looked at this question, and at least for PMD, mmap_sem is not
sufficient. I didn't easilly figure it out for the other ones

I'm guessing if PMD is not safe then none of them are.


this isn't something that I changed so I've just
been hoping that it's sufficient since it seems to have been working
(whether that's by chance because the compiler didn't generate multiple
reads I've no idea). For walking the kernel's page tables the lack of
READ_ONCE is also not great, but at least for PTDUMP we don't care too much
about accuracy and it should be crash proof because there's no RCU grace
period. And again the code I was replacing didn't have any special
protection.

I can't see any harm in updating the code to include READ_ONCE and I'm happy
to review a patch.


The reason I ask is because hmm's walkers often have this pattern
where they get the pointer and then de-ref it (again) then
immediately have to recheck the 'again' conditions of the walker
itself because the re-read may have given a different value.

Having the walker deref the pointer and pass the value it into the ops
for use rather than repeatedly de-refing an unlocked value seems like
a much safer design to me.


Yeah that sounds like a good idea.


If this also implicitly relies on a RCU grace period then it is also
missing RCU locking...


True - I'm not 100% sure in what situations a page table entry can be 
freed. Anshuman has added locking to deal with memory hotplug[1]. I 
believe this is sufficient.


[1] bf2b59f60ee1 ("arm64/mm: Hold memory hotplug lock while walking for 
kernel page table dump")



I also didn't quite understand why walk_pte_range() skipped locking
the pte in the no_vma case - I don't get why vma would be related to
locking here.


The no_vma case is for walking the kernel's page tables and they may 
have entries that are not backed by struct page, so there isn't 
(reliably) a PTE lock to take.



I also saw that hmm open coded the pte walk, presumably for
performance, so I was thinking of adding some kind of pte_range()
callback to avoid the expensive indirect function call per pte, but
hmm also can't have the pmd locked...


Yeah the callback per PTE is a bit heavy because of the indirect 
function call. I'm not sure how to optimise it beyond open coding at the 
PMD level. One option would be to provide helper functions to make it a 
bit more generic.


Do you have an idea of what pte_range() would look like?

Steve
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] mm/hmm: Simplify hmm_vma_walk_pud slightly

2020-03-12 Thread Steven Price

On 12/03/2020 15:11, Jason Gunthorpe wrote:

On Thu, Mar 12, 2020 at 02:40:08PM +, Steven Price wrote:

On 12/03/2020 14:27, Jason Gunthorpe wrote:

On Thu, Mar 12, 2020 at 10:28:13AM +, Steven Price wrote:

By refactoring to deal with the !pud_huge(pud) || !pud_devmap(pud)
condition early it's possible to remove the 'ret' variable and remove a
level of indentation from half the function making the code easier to
read.

No functional change.

Signed-off-by: Steven Price 
Thanks to Jason's changes there were only two code paths left using
the out_unlock label so it seemed like a good opportunity to
refactor.


Yes, I made something very similar, what do you think of this:

https://github.com/jgunthorpe/linux/commit/93f0ed42ab3f9ceb27b58fb7c7c3ecaf60f16b36


Even better! Sorry I didn't realise you'd already done this. I just saw that
the function was needlessly complicated after your fix, so I thought I'd do
a drive-by cleanup since part of the mess was my fault! :)


No worries, I've got a lot of patches for hmm_range_fault right now,
just trying to organize them, test them and post them. Haven't posted
that one yet.

Actually, while you are looking at this, do you think we should be
adding at least READ_ONCE in the pagewalk.c walk_* functions? The
multiple references of pmd, pud, etc without locking seems sketchy to
me.


I agree it seems worrying. I'm not entirely sure whether the holding of 
mmap_sem is sufficient, this isn't something that I changed so I've just 
been hoping that it's sufficient since it seems to have been working 
(whether that's by chance because the compiler didn't generate multiple 
reads I've no idea). For walking the kernel's page tables the lack of 
READ_ONCE is also not great, but at least for PTDUMP we don't care too 
much about accuracy and it should be crash proof because there's no RCU 
grace period. And again the code I was replacing didn't have any special 
protection.


I can't see any harm in updating the code to include READ_ONCE and I'm 
happy to review a patch.


Thanks,

Steve
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amdgpu: fix and cleanup amdgpu_gem_object_close

2020-03-12 Thread Christian König

Am 12.03.20 um 16:03 schrieb Liu, Monk:

The problem is that dma_resv_test_signaled_rcu() tests only the shared fence if 
one is present.

Okay I got the point now, but why we cannot modify dma_resv_test_signaled_rcu() 
to let it wait for both exclusive and shared lists ?


That is exactly what I and Xinhui said as well and what we also both 
proposed in patches, but the Intel guys are against that.


I also already proposed an extension to the dma_resv infrastructure 
where you get a list of "other" fences for stuff like this, but it 
wasn't to well received either and I can't dedicate more time to this.


Regards,
Christian.




Ack-by: Monk Liu 
_
Monk Liu|GPU Virtualization Team |AMD


-Original Message-
From: Koenig, Christian 
Sent: Thursday, March 12, 2020 9:42 PM
To: Liu, Monk ; Pan, Xinhui ; 
amd-gfx@lists.freedesktop.org
Subject: Re: [PATCH] drm/amdgpu: fix and cleanup amdgpu_gem_object_close

The problem is that dma_resv_test_signaled_rcu() tests only the shared fence if 
one is present.

Now what happened is that the clear fence completed before the exclusive one, 
and that in turn caused TTM to think that the BO is unused and freed it.

Christian.

Am 12.03.20 um 14:25 schrieb Liu, Monk:

without your patch, the clear fence is also hooked in the shared list
of bo's reservation obj,  no matter the exclusive fence of that BO
signaled before clear fence or not

since the clear fence is always kept in the bo's resv object, can you tell me 
what's the problem than ? are we going to loose this clear fence and still use 
it during the  VM pt/pd clearing ?

thanks
_
Monk Liu|GPU Virtualization Team |AMD


-Original Message-
From: Christian König 
Sent: Thursday, March 12, 2020 8:50 PM
To: Liu, Monk ; Pan, Xinhui ;
amd-gfx@lists.freedesktop.org
Subject: Re: [PATCH] drm/amdgpu: fix and cleanup
amdgpu_gem_object_close

   From the semantic the dma_resv object contains a single exclusive and 
multiple shared fences and it is mandatory that the shared fences complete 
after the exclusive one.

Now what happens is that clearing the VM page tables runs asynchronously to the 
exclusive fence which moves the buffer around.

And because of this Xinhui ran into a rare problem that TTM thought it could 
reuse the memory while in reality it was still in use.

Regards,
Christian.

Am 12.03.20 um 12:30 schrieb Liu, Monk:

Can you give more details about " we can't guarantee the the clear fence will 
complete after the exclusive one." ?

Thanks

_
Monk Liu|GPU Virtualization Team |AMD


-Original Message-
From: amd-gfx  On Behalf Of
Christian K?nig
Sent: Thursday, March 12, 2020 7:12 PM
To: Pan, Xinhui ; amd-gfx@lists.freedesktop.org
Subject: [PATCH] drm/amdgpu: fix and cleanup amdgpu_gem_object_close

The problem is that we can't add the clear fence to the BO when there is an 
exclusive fence on it since we can't guarantee the the clear fence will 
complete after the exclusive one.

To fix this refactor the function and wait for any potential exclusive fence 
with a small timeout before adding the shared clear fence.

Signed-off-by: Christian König 
---
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 41 +++--
1 file changed, 25 insertions(+), 16 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 4277125a79ee..0782162fb5f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -161,10 +161,11 @@ void amdgpu_gem_object_close(struct
drm_gem_object *obj,

	struct amdgpu_bo_list_entry vm_pd;

struct list_head list, duplicates;
+   struct dma_fence *fence = NULL;
struct ttm_validate_buffer tv;
struct ww_acquire_ctx ticket;
struct amdgpu_bo_va *bo_va;
-   int r;
+   long r;

	INIT_LIST_HEAD();

INIT_LIST_HEAD();
@@ -182,24 +183,32 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
return;
}
bo_va = amdgpu_vm_bo_find(vm, bo);
-   if (bo_va && --bo_va->ref_count == 0) {
-   amdgpu_vm_bo_rmv(adev, bo_va);
+   if (!bo_va || --bo_va->ref_count)
+   goto out_unlock;

-		if (amdgpu_vm_ready(vm)) {

-   struct dma_fence *fence = NULL;
+   amdgpu_vm_bo_rmv(adev, bo_va);
+   if (!amdgpu_vm_ready(vm))
+   goto out_unlock;

-			r = amdgpu_vm_clear_freed(adev, vm, );

-   if (unlikely(r)) {
-   dev_err(adev->dev, "failed to clear page "
-   "tables on GEM object close (%d)\n", r);
-   }

-			if (fence) {

-   amdgpu_bo_fence(bo, fence, true);
-   dma_fence_put(fence);
-   }
-   }
-   }
+  

Re: [PATCH] mm/hmm: Simplify hmm_vma_walk_pud slightly

2020-03-12 Thread Jason Gunthorpe
On Thu, Mar 12, 2020 at 02:40:08PM +, Steven Price wrote:
> On 12/03/2020 14:27, Jason Gunthorpe wrote:
> > On Thu, Mar 12, 2020 at 10:28:13AM +, Steven Price wrote:
> > > By refactoring to deal with the !pud_huge(pud) || !pud_devmap(pud)
> > > condition early it's possible to remove the 'ret' variable and remove a
> > > level of indentation from half the function making the code easier to
> > > read.
> > > 
> > > No functional change.
> > > 
> > > Signed-off-by: Steven Price 
> > > Thanks to Jason's changes there were only two code paths left using
> > > the out_unlock label so it seemed like a good opportunity to
> > > refactor.
> > 
> > Yes, I made something very similar, what do you think of this:
> > 
> > https://github.com/jgunthorpe/linux/commit/93f0ed42ab3f9ceb27b58fb7c7c3ecaf60f16b36
> 
> Even better! Sorry I didn't realise you'd already done this. I just saw that
> the function was needlessly complicated after your fix, so I thought I'd do
> a drive-by cleanup since part of the mess was my fault! :)

No worries, I've got a lot of patches for hmm_range_fault right now,
just trying to organize them, test them and post them. Haven't posted
that one yet.

Actually, while you are looking at this, do you think we should be
adding at least READ_ONCE in the pagewalk.c walk_* functions? The
multiple references of pmd, pud, etc without locking seems sketchy to
me.

Jason
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


RE: [PATCH] drm/amdgpu: fix and cleanup amdgpu_gem_object_close

2020-03-12 Thread Liu, Monk
>>> The problem is that dma_resv_test_signaled_rcu() tests only the shared 
>>> fence if one is present.
Okay I got the point now, but why we cannot modify dma_resv_test_signaled_rcu() 
to let it wait for both exclusive and shared lists ? 


Ack-by: Monk Liu 
_
Monk Liu|GPU Virtualization Team |AMD


-Original Message-
From: Koenig, Christian  
Sent: Thursday, March 12, 2020 9:42 PM
To: Liu, Monk ; Pan, Xinhui ; 
amd-gfx@lists.freedesktop.org
Subject: Re: [PATCH] drm/amdgpu: fix and cleanup amdgpu_gem_object_close

The problem is that dma_resv_test_signaled_rcu() tests only the shared fence if 
one is present.

Now what happened is that the clear fence completed before the exclusive one, 
and that in turn caused TTM to think that the BO is unused and freed it.

Christian.

Am 12.03.20 um 14:25 schrieb Liu, Monk:
> without your patch, the clear fence is also hooked in the shared list 
> of bo's reservation obj,  no matter the exclusive fence of that BO 
> signaled before clear fence or not
>
> since the clear fence is always kept in the bo's resv object, can you tell me 
> what's the problem than ? are we going to loose this clear fence and still 
> use it during the  VM pt/pd clearing ?
>
> thanks
> _
> Monk Liu|GPU Virtualization Team |AMD
>
>
> -Original Message-
> From: Christian König 
> Sent: Thursday, March 12, 2020 8:50 PM
> To: Liu, Monk ; Pan, Xinhui ; 
> amd-gfx@lists.freedesktop.org
> Subject: Re: [PATCH] drm/amdgpu: fix and cleanup 
> amdgpu_gem_object_close
>
>   From the semantic the dma_resv object contains a single exclusive and 
> multiple shared fences and it is mandatory that the shared fences complete 
> after the exclusive one.
>
> Now what happens is that clearing the VM page tables runs asynchronously to 
> the exclusive fence which moves the buffer around.
>
> And because of this Xinhui ran into a rare problem that TTM thought it could 
> reuse the memory while in reality it was still in use.
>
> Regards,
> Christian.
>
> Am 12.03.20 um 12:30 schrieb Liu, Monk:
>> Can you give more details about " we can't guarantee the the clear fence 
>> will complete after the exclusive one." ?
>>
>> Thanks
>>
>> _
>> Monk Liu|GPU Virtualization Team |AMD
>>
>>
>> -Original Message-
>> From: amd-gfx  On Behalf Of 
>> Christian K?nig
>> Sent: Thursday, March 12, 2020 7:12 PM
>> To: Pan, Xinhui ; amd-gfx@lists.freedesktop.org
>> Subject: [PATCH] drm/amdgpu: fix and cleanup amdgpu_gem_object_close
>>
>> The problem is that we can't add the clear fence to the BO when there is an 
>> exclusive fence on it since we can't guarantee the the clear fence will 
>> complete after the exclusive one.
>>
>> To fix this refactor the function and wait for any potential exclusive fence 
>> with a small timeout before adding the shared clear fence.
>>
>> Signed-off-by: Christian König 
>> ---
>>drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 41 +++--
>>1 file changed, 25 insertions(+), 16 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
>> index 4277125a79ee..0782162fb5f3 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
>> @@ -161,10 +161,11 @@ void amdgpu_gem_object_close(struct 
>> drm_gem_object *obj,
>>
>>  struct amdgpu_bo_list_entry vm_pd;
>>  struct list_head list, duplicates;
>> +struct dma_fence *fence = NULL;
>>  struct ttm_validate_buffer tv;
>>  struct ww_acquire_ctx ticket;
>>  struct amdgpu_bo_va *bo_va;
>> -int r;
>> +long r;
>>
>>  INIT_LIST_HEAD();
>>  INIT_LIST_HEAD();
>> @@ -182,24 +183,32 @@ void amdgpu_gem_object_close(struct drm_gem_object 
>> *obj,
>>  return;
>>  }
>>  bo_va = amdgpu_vm_bo_find(vm, bo);
>> -if (bo_va && --bo_va->ref_count == 0) {
>> -amdgpu_vm_bo_rmv(adev, bo_va);
>> +if (!bo_va || --bo_va->ref_count)
>> +goto out_unlock;
>>
>> -if (amdgpu_vm_ready(vm)) {
>> -struct dma_fence *fence = NULL;
>> +amdgpu_vm_bo_rmv(adev, bo_va);
>> +if (!amdgpu_vm_ready(vm))
>> +goto out_unlock;
>>
>> -r = amdgpu_vm_clear_freed(adev, vm, );
>> -if (unlikely(r)) {
>> -dev_err(adev->dev, "failed to clear page "
>> -"tables on GEM object close (%d)\n", r);
>> -}
>>
>> -if (fence) {
>> -amdgpu_bo_fence(bo, fence, true);
>> -dma_fence_put(fence);
>> -}
>> -}
>> -}
>> +r = amdgpu_vm_clear_freed(adev, vm, );
>> +if (r || !fence)
>> +goto out_unlock;
>> +
>> +r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, false, 

Re: [PATCH] mm/hmm: Simplify hmm_vma_walk_pud slightly

2020-03-12 Thread Steven Price

On 12/03/2020 14:27, Jason Gunthorpe wrote:

On Thu, Mar 12, 2020 at 10:28:13AM +, Steven Price wrote:

By refactoring to deal with the !pud_huge(pud) || !pud_devmap(pud)
condition early it's possible to remove the 'ret' variable and remove a
level of indentation from half the function making the code easier to
read.

No functional change.

Signed-off-by: Steven Price 
---
Thanks to Jason's changes there were only two code paths left using
the out_unlock label so it seemed like a good opportunity to
refactor.


Yes, I made something very similar, what do you think of this:

https://github.com/jgunthorpe/linux/commit/93f0ed42ab3f9ceb27b58fb7c7c3ecaf60f16b36


Even better! Sorry I didn't realise you'd already done this. I just saw 
that the function was needlessly complicated after your fix, so I 
thought I'd do a drive-by cleanup since part of the mess was my fault! :)


Thanks,

Steve
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH hmm 7/8] mm/hmm: return -EFAULT when setting HMM_PFN_ERROR on requested valid pages

2020-03-12 Thread Jason Gunthorpe
On Wed, Mar 11, 2020 at 06:36:47PM -0700, Ralph Campbell wrote:
> > @@ -390,8 +384,15 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp,
> > return -EBUSY;
> > }
> > return hmm_pfns_fill(start, end, range, HMM_PFN_NONE);
> > -   } else if (!pmd_present(pmd))
> > +   }
> > +
> > +   if (!pmd_present(pmd)) {
> > +   hmm_range_need_fault(hmm_vma_walk, pfns, npages, 0, ,
> > +_fault);
> > +   if (fault || write_fault)
> > +   return -EFAULT;
> > return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
> 
> Shouldn't this fill with HMM_PFN_NONE instead of HMM_PFN_ERROR?
> Otherwise, when a THP is swapped out, you will get a different
> value than if a PTE is swapped out and you are prefetching/snapshotting.

If this is the case then the problem is that the return -EFAULT path
needs to do something else.. ie since the above code can't trigger
swap in, it is correct to return PFN_ERROR.

I'm completely guessing, but do we need to call pmd_to_swp_entry() and
handle things similarly to the pte? What swp_entries are valid for a
pmd?

Do you understand this better, or know how to trigger a !pmd_present
for test?

I suppose another option would be this:

if (!pmd_present(pmd)) {
hmm_range_need_fault(hmm_vma_walk, pfns, npages, 0, ,
 _fault);
/* We can't handle this. Cause the PMD to be split and
 * handle it in the pte handler. */
if (fault || write_fault)
return 0;
return hmm_pfns_fill(start, end, range, HMM_PFN_NONE);
}

Which, I think, must be correct, but inefficient?

Jason
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] mm/hmm: Simplify hmm_vma_walk_pud slightly

2020-03-12 Thread Jason Gunthorpe
On Thu, Mar 12, 2020 at 10:28:13AM +, Steven Price wrote:
> By refactoring to deal with the !pud_huge(pud) || !pud_devmap(pud)
> condition early it's possible to remove the 'ret' variable and remove a
> level of indentation from half the function making the code easier to
> read.
> 
> No functional change.
> 
> Signed-off-by: Steven Price 
> ---
> Thanks to Jason's changes there were only two code paths left using
> the out_unlock label so it seemed like a good opportunity to
> refactor.

Yes, I made something very similar, what do you think of this:

https://github.com/jgunthorpe/linux/commit/93f0ed42ab3f9ceb27b58fb7c7c3ecaf60f16b36

>From 93f0ed42ab3f9ceb27b58fb7c7c3ecaf60f16b36 Mon Sep 17 00:00:00 2001
From: Jason Gunthorpe 
Date: Wed, 4 Mar 2020 17:11:10 -0400
Subject: [PATCH] mm/hmm: rework hmm_vma_walk_pud()

At least since commit 3afc423632a1 ("mm: pagewalk: add p4d_entry() and
pgd_entry()") this code has developed a number of strange control flows.

The purpose of the routine is to copy the pfns of a huge devmap PUD into
the pfns output array, without splitting the PUD. Everything that is not a
huge devmap PUD should go back to the walker for splitting.

Rework the logic to show this goal and remove redundant stuff:

- If pud_trans_huge_lock returns !NULL then this is already
  'pud_trans_huge() || pud_devmap()' and 'pud_huge() || pud_devmap()'
  so some of the logic is redundant.

- Hitting pud_none() is a race, treat it as such and return back to the
  walker using ACTION_AGAIN

- !pud_present() gives 0 cpu_flags, so the extra checks are redundant

- Once the *pudp is read there is no need to continue holding the pud
  lock, so drop it. The only thing the following code cares about is the
  pfn from the devmap, and if there is racing then the notifiers will
  resolve everything. Perhaps the unlocked READ_ONCE in an ealier version
  was correct

Signed-off-by: Jason Gunthorpe 
---
 mm/hmm.c | 79 +++-
 1 file changed, 33 insertions(+), 46 deletions(-)

diff --git a/mm/hmm.c b/mm/hmm.c
index 8fec801a33c9e2..87a376659b5ad4 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -455,66 +455,53 @@ static int hmm_vma_walk_pud(pud_t *pudp, unsigned long 
start, unsigned long end,
 {
struct hmm_vma_walk *hmm_vma_walk = walk->private;
struct hmm_range *range = hmm_vma_walk->range;
-   unsigned long addr = start;
+   unsigned long i, npages, pfn;
+   unsigned int required_fault;
+   uint64_t cpu_flags;
+   uint64_t *pfns;
+   spinlock_t *ptl;
pud_t pud;
-   int ret = 0;
-   spinlock_t *ptl = pud_trans_huge_lock(pudp, walk->vma);
 
+   /*
+* This only handles huge devmap pages, the default return is
+* ACTION_SUBTREE, so everything else is split by the walker and passed
+* to the other routines.
+*/
+   ptl = pud_trans_huge_lock(pudp, walk->vma);
if (!ptl)
return 0;
+   pud = *pudp;
+   spin_unlock(ptl);
 
-   /* Normally we don't want to split the huge page */
-   walk->action = ACTION_CONTINUE;
-
-   pud = READ_ONCE(*pudp);
if (pud_none(pud)) {
-   spin_unlock(ptl);
-   return hmm_vma_walk_hole(start, end, -1, walk);
+   walk->action = ACTION_AGAIN;
+   return 0;
}
 
-   if (pud_huge(pud) && pud_devmap(pud)) {
-   unsigned long i, npages, pfn;
-   unsigned int required_flags;
-   uint64_t *pfns, cpu_flags;
-
-   if (!pud_present(pud)) {
-   spin_unlock(ptl);
-   return hmm_vma_walk_hole(start, end, -1, walk);
-   }
-
-   i = (addr - range->start) >> PAGE_SHIFT;
-   npages = (end - addr) >> PAGE_SHIFT;
-   pfns = >pfns[i];
+   if (!pud_devmap(pud))
+   return 0;
 
-   cpu_flags = pud_to_hmm_pfn_flags(range, pud);
+   pfns = >pfns[(start - range->start) >> PAGE_SHIFT];
+   cpu_flags = pud_to_hmm_pfn_flags(range, pud);
+   required_fault =
hmm_range_need_fault(hmm_vma_walk, pfns, npages, cpu_flags);
-   if (required_flags) {
-   spin_unlock(ptl);
-   return hmm_vma_walk_hole_(addr, end, required_flags,
- walk);
-   }
+   if (required_fault)
+   return hmm_vma_walk_hole_(start, end, required_fault, walk);
 
-   pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
-   for (i = 0; i < npages; ++i, ++pfn) {
-   hmm_vma_walk->pgmap = get_dev_pagemap(pfn,
- hmm_vma_walk->pgmap);
-   if (unlikely(!hmm_vma_walk->pgmap)) {
-   ret = -EBUSY;
-   goto out_unlock;
-   }
-   

Re: [PATCH] drm/amd/display: Fix pageflip event race condition for DCN. (v2)

2020-03-12 Thread Alex Deucher
On Thu, Mar 5, 2020 at 4:21 PM Mario Kleiner  wrote:
>
> Commit '16f17eda8bad ("drm/amd/display: Send vblank and user
> events at vsartup for DCN")' introduces a new way of pageflip
> completion handling for DCN, and some trouble.
>
> The current implementation introduces a race condition, which
> can cause pageflip completion events to be sent out one vblank
> too early, thereby confusing userspace and causing flicker:
>
> prepare_flip_isr():
>
> 1. Pageflip programming takes the ddev->event_lock.
> 2. Sets acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED
> 3. Releases ddev->event_lock.
>
> --> Deadline for surface address regs double-buffering passes on
> target pipe.
>
> 4. dc_commit_updates_for_stream() MMIO programs the new pageflip
>into hw, but too late for current vblank.
>
> => pflip_status == AMDGPU_FLIP_SUBMITTED, but flip won't complete
>in current vblank due to missing the double-buffering deadline
>by a tiny bit.
>
> 5. VSTARTUP trigger point in vblank is reached, VSTARTUP irq fires,
>dm_dcn_crtc_high_irq() gets called.
>
> 6. Detects pflip_status == AMDGPU_FLIP_SUBMITTED and assumes the
>pageflip has been completed/will complete in this vblank and
>sends out pageflip completion event to userspace and resets
>pflip_status = AMDGPU_FLIP_NONE.
>
> => Flip completion event sent out one vblank too early.
>
> This behaviour has been observed during my testing with measurement
> hardware a couple of time.
>
> The commit message says that the extra flip event code was added to
> dm_dcn_crtc_high_irq() to prevent missing to send out pageflip events
> in case the pflip irq doesn't fire, because the "DCH HUBP" component
> is clock gated and doesn't fire pflip irqs in that state. Also that
> this clock gating may happen if no planes are active. According to
> Nicholas, the clock gating can also happen if psr is active, and the
> gating is controlled independently by the hardware, so difficult to
> detect if and when the completion code in above commit is needed.
>
> This patch tries the following solution: It only executes the extra pflip
> completion code in dm_dcn_crtc_high_irq() iff the hardware reports
> that there aren't any surface updated pending in the double-buffered
> surface scanout address registers. Otherwise it leaves pflip completion
> to the pflip irq handler, for a more race-free experience.
>
> This would only guard against the order of events mentioned above.
> If Step 5 (VSTARTUP trigger) happens before step 4 then this won't help
> at all, because 1-3 + 5 might happen even without the hw being programmed
> at all, ie. no surface update pending because none yet programmed into hw.
>
> Therefore this patch also changes locking in amdgpu_dm_commit_planes(),
> so that prepare_flip_isr() and dc_commit_updates_for_stream() are done
> under event_lock protection within the same critical section.
>
> v2: Take Nicholas comments into account, try a different solution.
>
> Lightly tested on Polaris (locking) and Raven (the whole DCN stuff).
> Seems to work without causing obvious new trouble.

Nick, any comments on this?  Can we get this committed or do you think
it needs additional rework?

Thanks,

Alex

>
> Fixes: 16f17eda8bad ("drm/amd/display: Send vblank and user events at vsartup 
> for DCN")
> Signed-off-by: Mario Kleiner 
> ---
>  .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 80 ---
>  1 file changed, 67 insertions(+), 13 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
> b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
> index d7df1a85e72f..aa4e941b276f 100644
> --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
> +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
> @@ -287,6 +287,28 @@ static inline bool amdgpu_dm_vrr_active(struct 
> dm_crtc_state *dm_state)
>dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
>  }
>
> +/**
> + * dm_crtc_is_flip_pending() - Is a pageflip pending on this crtc?
> + *
> + * Returns true if any plane on the crtc has a flip pending, false otherwise.
> + */
> +static bool dm_crtc_is_flip_pending(struct dm_crtc_state *acrtc_state)
> +{
> +   struct dc_stream_status *status = 
> dc_stream_get_status(acrtc_state->stream);
> +   const struct dc_plane_status *plane_status;
> +   int i;
> +   bool pending = false;
> +
> +   for (i = 0; i < status->plane_count; i++) {
> +   plane_status = dc_plane_get_status(status->plane_states[i]);
> +   pending |= plane_status->is_flip_pending;
> +   DRM_DEBUG_DRIVER("plane:%d, flip_pending=%d\n",
> +i, plane_status->is_flip_pending);
> +   }
> +
> +   return pending;
> +}
> +
>  /**
>   * dm_pflip_high_irq() - Handle pageflip interrupt
>   * @interrupt_params: ignored
> @@ -435,6 +457,11 @@ static void dm_vupdate_high_irq(void *interrupt_params)
> 
> 

Re: [PATCH hmm 1/8] mm/hmm: add missing unmaps of the ptep during hmm_vma_handle_pte()

2020-03-12 Thread Jason Gunthorpe
On Wed, Mar 11, 2020 at 06:28:30PM -0700, Ralph Campbell wrote:
> >   mm/hmm.c | 8 ++--
> >   1 file changed, 6 insertions(+), 2 deletions(-)
> > 
> > diff --git a/mm/hmm.c b/mm/hmm.c
> > index 72e5a6d9a41756..35f85424176d14 100644
> > +++ b/mm/hmm.c
> > @@ -325,6 +325,7 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, 
> > unsigned long addr,
> > }
> > /* Report error for everything else */
> > +   pte_unmap(ptep);
> > *pfn = range->values[HMM_PFN_ERROR];
> > return -EFAULT;
> > } else {
> > @@ -339,10 +340,13 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, 
> > unsigned long addr,
> > if (pte_devmap(pte)) {
> > hmm_vma_walk->pgmap = get_dev_pagemap(pte_pfn(pte),
> >   hmm_vma_walk->pgmap);
> > -   if (unlikely(!hmm_vma_walk->pgmap))
> > +   if (unlikely(!hmm_vma_walk->pgmap)) {
> > +   pte_unmap(ptep);
> > return -EBUSY;
> > +   }
> > } else if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL) && pte_special(pte)) 
> > {
> > if (!is_zero_pfn(pte_pfn(pte))) {
> > +   pte_unmap(ptep);
> > *pfn = range->values[HMM_PFN_SPECIAL];
> > return -EFAULT;
> > }
> > @@ -437,7 +441,7 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp,
> > r = hmm_vma_handle_pte(walk, addr, end, pmdp, ptep, [i]);
> > if (r) {
> > -   /* hmm_vma_handle_pte() did unmap pte directory */
> > +   /* hmm_vma_handle_pte() did pte_unmap() */
> > hmm_vma_walk->last = addr;
> > return r;
> > }
> > 
> 
> I think there is a case where hmm_vma_handle_pte() is called, a fault is 
> requested,
> pte_unmap() and hmm_vma_walk_hole_() are called, the latter returns zero (the 
> fault
> was handled OK)

Not quite, hmm_vma_walk_hole_() never returns 0 if called with fault:

return (fault || write_fault) ? -EBUSY : 0;

And all the call sites of hmm_vma_walk_hole_() in hmm_vma_handle_pte()
are structured as:

if (fault || write_fault)
goto fault;
fault:
return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);

So, it never returns 0.

I already made a patch making this less twisty while fixing something
else:

https://github.com/jgunthorpe/linux/commit/078e10ca5919f2c263c245784fb5fe63ddbb61f4

Thanks,
Jason
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amd/display: Add link_rate quirk for Apple 15" MBP 2017

2020-03-12 Thread Alex Deucher
Applied.  Thanks!

Alex

On Fri, Feb 28, 2020 at 4:36 PM Mario Kleiner
 wrote:
>
> This fixes a problem found on the MacBookPro 2017 Retina panel:
>
> The panel reports 10 bpc color depth in its EDID, and the
> firmware chooses link settings at boot which support enough
> bandwidth for 10 bpc (324000 kbit/sec aka LINK_RATE_RBR2
> aka 0xc), but the DP_MAX_LINK_RATE dpcd register only reports
> 2.7 Gbps (multiplier value 0xa) as possible, in direct
> contradiction of what the firmware successfully set up.
>
> This restricts the panel to 8 bpc, not providing the full
> color depth of the panel on Linux <= 5.5. Additionally, commit
> '4a8ca46bae8a ("drm/amd/display: Default max bpc to 16 for eDP")'
> introduced into Linux 5.6-rc1 will unclamp panel depth to
> its full 10 bpc, thereby requiring a eDP bandwidth for all
> modes that exceeds the bandwidth available and causes all modes
> to fail validation -> No modes for the laptop panel -> failure
> to set any mode -> Panel goes dark.
>
> This patch adds a quirk specific to the MBP 2017 15" Retina
> panel to override reported max link rate to the correct maximum
> of 0xc = LINK_RATE_RBR2 to fix the darkness and reduced display
> precision.
>
> Please apply for Linux 5.6+ to avoid regressing Apple MBP panel
> support.
>
> Signed-off-by: Mario Kleiner 
> ---
>  drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 11 +++
>  1 file changed, 11 insertions(+)
>
> diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c 
> b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
> index cb731c1d30b1..fd9e69634c50 100644
> --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
> +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
> @@ -3401,6 +3401,17 @@ static bool retrieve_link_cap(struct dc_link *link)
> sink_id.ieee_device_id,
> sizeof(sink_id.ieee_device_id));
>
> +   /* Quirk Apple MBP 2017 15" Retina panel: Wrong DP_MAX_LINK_RATE */
> +   {
> +   uint8_t str_mbp_2017[] = { 101, 68, 21, 101, 98, 97 };
> +
> +   if ((link->dpcd_caps.sink_dev_id == 0x0010fa) &&
> +   !memcmp(link->dpcd_caps.sink_dev_id_str, str_mbp_2017,
> +   sizeof(str_mbp_2017))) {
> +   link->reported_link_cap.link_rate = 0x0c;
> +   }
> +   }
> +
> core_link_read_dpcd(
> link,
> DP_SINK_HW_REVISION_START,
> --
> 2.20.1
>
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amdgpu: Stop using the DRIVER debugging flag for vblank debugging messages

2020-03-12 Thread Alex Deucher
Applied.  thanks!

Alex

On Fri, Jan 24, 2020 at 9:48 AM Harry Wentland  wrote:
>
> On 2020-01-23 8:07 p.m., Lyude Paul wrote:
> > These are some very loud debug statements that get printed on every
> > vblank when driver level debug printing is enabled in DRM, and doesn't
> > really tell us anything that isn't related to vblanks. So let's move
> > this over to the proper debug flag to be a little less spammy with our
> > debug output.
> >
> > Signed-off-by: Lyude Paul 
>
> Thanks. Great change.
>
> Reviewed-by: Harry Wentland 
>
> Harry
>
> > ---
> >  drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 14 --
> >  1 file changed, 8 insertions(+), 6 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
> > b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
> > index 9402374d2466..3675e1c32707 100644
> > --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
> > +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
> > @@ -407,8 +407,9 @@ static void dm_vupdate_high_irq(void *interrupt_params)
> >   if (acrtc) {
> >   acrtc_state = to_dm_crtc_state(acrtc->base.state);
> >
> > - DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
> > -  amdgpu_dm_vrr_active(acrtc_state));
> > + DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
> > +   acrtc->crtc_id,
> > +   amdgpu_dm_vrr_active(acrtc_state));
> >
> >   /* Core vblank handling is done here after end of front-porch 
> > in
> >* vrr mode, as vblank timestamping will give valid results
> > @@ -458,8 +459,9 @@ static void dm_crtc_high_irq(void *interrupt_params)
> >   if (acrtc) {
> >   acrtc_state = to_dm_crtc_state(acrtc->base.state);
> >
> > - DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
> > -  amdgpu_dm_vrr_active(acrtc_state));
> > + DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
> > +   acrtc->crtc_id,
> > +   amdgpu_dm_vrr_active(acrtc_state));
> >
> >   /* Core vblank handling at start of front-porch is only 
> > possible
> >* in non-vrr mode, as only there vblank timestamping will 
> > give
> > @@ -522,8 +524,8 @@ static void dm_dcn_crtc_high_irq(void *interrupt_params)
> >
> >   acrtc_state = to_dm_crtc_state(acrtc->base.state);
> >
> > - DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
> > - amdgpu_dm_vrr_active(acrtc_state));
> > + DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
> > +   amdgpu_dm_vrr_active(acrtc_state));
> >
> >   amdgpu_dm_crtc_handle_crc_irq(>base);
> >   drm_crtc_handle_vblank(>base);
> >
> ___
> dri-devel mailing list
> dri-de...@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/dri-devel
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amd/display: Possible divide by zero in set_speed()

2020-03-12 Thread Alex Deucher
Applied.  Thanks!

Alex

On Thu, Jan 30, 2020 at 11:58 PM Dan Carpenter  wrote:
>
> If "speed" is zero then we use it as a divisor to find "prescale".  It's
> better to move the check for zero to the very start of the function.
>
> Fixes: 9eeec26a1339 ("drm/amd/display: Refine i2c frequency calculating 
> sequence")
> Signed-off-by: Dan Carpenter 
> ---
>  drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c | 23 ---
>  1 file changed, 12 insertions(+), 11 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c 
> b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
> index 066188ba7949..24adec407972 100644
> --- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
> +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
> @@ -267,6 +267,9 @@ static void set_speed(
> uint32_t xtal_ref_div = 0;
> uint32_t prescale = 0;
>
> +   if (speed == 0)
> +   return;
> +
> REG_GET(MICROSECOND_TIME_BASE_DIV, XTAL_REF_DIV, _ref_div);
>
> if (xtal_ref_div == 0)
> @@ -274,17 +277,15 @@ static void set_speed(
>
> prescale = ((dce_i2c_hw->reference_frequency * 2) / xtal_ref_div) / 
> speed;
>
> -   if (speed) {
> -   if (dce_i2c_hw->masks->DC_I2C_DDC1_START_STOP_TIMING_CNTL)
> -   REG_UPDATE_N(SPEED, 3,
> -FN(DC_I2C_DDC1_SPEED, 
> DC_I2C_DDC1_PRESCALE), prescale,
> -FN(DC_I2C_DDC1_SPEED, 
> DC_I2C_DDC1_THRESHOLD), 2,
> -FN(DC_I2C_DDC1_SPEED, 
> DC_I2C_DDC1_START_STOP_TIMING_CNTL), speed > 50 ? 2:1);
> -   else
> -   REG_UPDATE_N(SPEED, 2,
> -FN(DC_I2C_DDC1_SPEED, 
> DC_I2C_DDC1_PRESCALE), prescale,
> -FN(DC_I2C_DDC1_SPEED, 
> DC_I2C_DDC1_THRESHOLD), 2);
> -   }
> +   if (dce_i2c_hw->masks->DC_I2C_DDC1_START_STOP_TIMING_CNTL)
> +   REG_UPDATE_N(SPEED, 3,
> +FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), 
> prescale,
> +FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2,
> +FN(DC_I2C_DDC1_SPEED, 
> DC_I2C_DDC1_START_STOP_TIMING_CNTL), speed > 50 ? 2:1);
> +   else
> +   REG_UPDATE_N(SPEED, 2,
> +FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), 
> prescale,
> +FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2);
>  }
>
>  static bool setup_engine(
> --
> 2.11.0
>
> ___
> dri-devel mailing list
> dri-de...@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/dri-devel
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


RE: [PATCH] drm/amdgpu: add codes to clear AccVGPR for arcturus

2020-03-12 Thread Zhang, Hawking
[AMD Official Use Only - Internal Distribution Only]

Per offline discussion, we can re-use the existing logical, gpr_reg_size = 
compute_dim_x / 16 + 6; For vega20, totally 10 configuration registers, while 
for Arcturus, totally 14 configuration registers. That's fine.

We just need to create a separate vgpr_init _registers that specific for 
Arcturus since the shader requires different settings than vega20 one.

Please create a v2 to re-use code as much as possible. Thanks.

Regards,
Hawking

-Original Message-
From: amd-gfx  On Behalf Of Zhang, 
Hawking
Sent: Thursday, March 12, 2020 22:11
To: Li, Dennis ; amd-gfx@lists.freedesktop.org; Deucher, 
Alexander ; Zhou1, Tao ; Chen, 
Guchun 
Cc: Li, Dennis 
Subject: RE: [PATCH] drm/amdgpu: add codes to clear AccVGPR for arcturus

[AMD Official Use Only - Internal Distribution Only]

Since we separated MI100 vgpr shader program configuration register list from 
Vega20 ones, it's better to remove SE4 ~ SE7 thread registers from vega20 list.

Other than that, the patch is

Reviewed-by: Hawking Zhang 

Regards,
Hawking
-Original Message-
From: Dennis Li  
Sent: Thursday, March 12, 2020 22:02
To: amd-gfx@lists.freedesktop.org; Deucher, Alexander 
; Zhou1, Tao ; Zhang, Hawking 
; Chen, Guchun 
Cc: Li, Dennis 
Subject: [PATCH] drm/amdgpu: add codes to clear AccVGPR for arcturus

AccVGPRs are newly added in arcturus. Before reading these registers, they 
should be initialized. Otherwise edc error happens, when RAS is enabled.

Change-Id: I4ed384f0cc4b781a10cfd6ad1e3a132445bdc261
Signed-off-by: Dennis Li 

diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 
b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
old mode 100644
new mode 100755
index c78ffdc51373..d5dd754bfb85
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -4144,6 +4144,101 @@ static const u32 sgpr_init_compute_shader[] =
0xbe800080, 0xbf81,
 };
 
+static const u32 vgpr_init_compute_shader_arcturus[] = {
+   0xd3d94000, 0x1880, 0xd3d94001, 0x1880, 0xd3d94002, 0x1880,
+   0xd3d94003, 0x1880, 0xd3d94004, 0x1880, 0xd3d94005, 0x1880,
+   0xd3d94006, 0x1880, 0xd3d94007, 0x1880, 0xd3d94008, 0x1880,
+   0xd3d94009, 0x1880, 0xd3d9400a, 0x1880, 0xd3d9400b, 0x1880,
+   0xd3d9400c, 0x1880, 0xd3d9400d, 0x1880, 0xd3d9400e, 0x1880,
+   0xd3d9400f, 0x1880, 0xd3d94010, 0x1880, 0xd3d94011, 0x1880,
+   0xd3d94012, 0x1880, 0xd3d94013, 0x1880, 0xd3d94014, 0x1880,
+   0xd3d94015, 0x1880, 0xd3d94016, 0x1880, 0xd3d94017, 0x1880,
+   0xd3d94018, 0x1880, 0xd3d94019, 0x1880, 0xd3d9401a, 0x1880,
+   0xd3d9401b, 0x1880, 0xd3d9401c, 0x1880, 0xd3d9401d, 0x1880,
+   0xd3d9401e, 0x1880, 0xd3d9401f, 0x1880, 0xd3d94020, 0x1880,
+   0xd3d94021, 0x1880, 0xd3d94022, 0x1880, 0xd3d94023, 0x1880,
+   0xd3d94024, 0x1880, 0xd3d94025, 0x1880, 0xd3d94026, 0x1880,
+   0xd3d94027, 0x1880, 0xd3d94028, 0x1880, 0xd3d94029, 0x1880,
+   0xd3d9402a, 0x1880, 0xd3d9402b, 0x1880, 0xd3d9402c, 0x1880,
+   0xd3d9402d, 0x1880, 0xd3d9402e, 0x1880, 0xd3d9402f, 0x1880,
+   0xd3d94030, 0x1880, 0xd3d94031, 0x1880, 0xd3d94032, 0x1880,
+   0xd3d94033, 0x1880, 0xd3d94034, 0x1880, 0xd3d94035, 0x1880,
+   0xd3d94036, 0x1880, 0xd3d94037, 0x1880, 0xd3d94038, 0x1880,
+   0xd3d94039, 0x1880, 0xd3d9403a, 0x1880, 0xd3d9403b, 0x1880,
+   0xd3d9403c, 0x1880, 0xd3d9403d, 0x1880, 0xd3d9403e, 0x1880,
+   0xd3d9403f, 0x1880, 0xd3d94040, 0x1880, 0xd3d94041, 0x1880,
+   0xd3d94042, 0x1880, 0xd3d94043, 0x1880, 0xd3d94044, 0x1880,
+   0xd3d94045, 0x1880, 0xd3d94046, 0x1880, 0xd3d94047, 0x1880,
+   0xd3d94048, 0x1880, 0xd3d94049, 0x1880, 0xd3d9404a, 0x1880,
+   0xd3d9404b, 0x1880, 0xd3d9404c, 0x1880, 0xd3d9404d, 0x1880,
+   0xd3d9404e, 0x1880, 0xd3d9404f, 0x1880, 0xd3d94050, 0x1880,
+   0xd3d94051, 0x1880, 0xd3d94052, 0x1880, 0xd3d94053, 0x1880,
+   0xd3d94054, 0x1880, 0xd3d94055, 0x1880, 0xd3d94056, 0x1880,
+   0xd3d94057, 0x1880, 0xd3d94058, 0x1880, 0xd3d94059, 0x1880,
+   0xd3d9405a, 0x1880, 0xd3d9405b, 0x1880, 0xd3d9405c, 0x1880,
+   0xd3d9405d, 0x1880, 0xd3d9405e, 0x1880, 0xd3d9405f, 0x1880,
+   0xd3d94060, 0x1880, 0xd3d94061, 0x1880, 0xd3d94062, 0x1880,
+   0xd3d94063, 0x1880, 0xd3d94064, 0x1880, 0xd3d94065, 0x1880,
+   0xd3d94066, 0x1880, 0xd3d94067, 0x1880, 0xd3d94068, 0x1880,
+   0xd3d94069, 0x1880, 0xd3d9406a, 0x1880, 0xd3d9406b, 0x1880,
+   0xd3d9406c, 0x1880, 0xd3d9406d, 0x1880, 0xd3d9406e, 0x1880,
+   0xd3d9406f, 0x1880, 

Re: [PATCH] drm/amdgpu/display: clean up some indenting

2020-03-12 Thread Alex Deucher
On Mon, Feb 24, 2020 at 5:31 AM Dan Carpenter  wrote:
>
> These lines were accidentally indented 4 spaces more than they should
> be.
>
> Signed-off-by: Dan Carpenter 

Applied.  thanks!

Alex

> ---
>  drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 8 
>  1 file changed, 4 insertions(+), 4 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
> b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
> index 4cb3eb7c6745..408405d9f30c 100644
> --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
> +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
> @@ -2138,10 +2138,10 @@ static void handle_hpd_rx_irq(void *param)
> }
> }
>  #ifdef CONFIG_DRM_AMD_DC_HDCP
> -   if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
> -   if (adev->dm.hdcp_workqueue)
> -   hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  
> aconnector->base.index);
> -   }
> +   if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
> +   if (adev->dm.hdcp_workqueue)
> +   hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  
> aconnector->base.index);
> +   }
>  #endif
> if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
> (dc_link->type == dc_connection_mst_branch))
> --
> 2.11.0
>
> ___
> dri-devel mailing list
> dri-de...@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/dri-devel
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amd/display: clean up a condition in dmub_psr_copy_settings()

2020-03-12 Thread Alex Deucher
On Thu, Mar 12, 2020 at 7:32 AM Dan Carpenter  wrote:
>
> We can remove the NULL check for "res_ctx" and
> "res_ctx->pipe_ctx[i].stream->link".  Also it's nicer to align the
> conditions using spaces so I re-indented a bit.
>
> Longer explanation: The "res_ctx" pointer points to an address in the
> middle of a struct so it can't be NULL.  For
> "res_ctx->pipe_ctx[i].stream->link" we know that it is equal to "link"
> and "link" is non-NULL.
>
> Signed-off-by: Dan Carpenter 

Applied.  thanks!

Alex

> ---
>  drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c | 8 +++-
>  1 file changed, 3 insertions(+), 5 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c 
> b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
> index 9c88a92bd96a..bc109d4fc6e6 100644
> --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
> +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
> @@ -134,11 +134,9 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub,
> int i = 0;
>
> for (i = 0; i < MAX_PIPES; i++) {
> -   if (res_ctx &&
> -   res_ctx->pipe_ctx[i].stream &&
> -   res_ctx->pipe_ctx[i].stream->link &&
> -   res_ctx->pipe_ctx[i].stream->link == link &&
> -   res_ctx->pipe_ctx[i].stream->link->connector_signal 
> == SIGNAL_TYPE_EDP) {
> +   if (res_ctx->pipe_ctx[i].stream &&
> +   res_ctx->pipe_ctx[i].stream->link == link &&
> +   res_ctx->pipe_ctx[i].stream->link->connector_signal == 
> SIGNAL_TYPE_EDP) {
> pipe_ctx = _ctx->pipe_ctx[i];
> break;
> }
> --
> 2.20.1
>
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm: amd/acp: fix broken menu structure

2020-03-12 Thread Alex Deucher
Applied.  thanks!

Alex

On Thu, Mar 12, 2020 at 4:09 AM Randy Dunlap  wrote:
>
> From: Randy Dunlap 
>
> Fix the Kconfig dependencies so that the menu is presented
> correctly by adding a dependency on DRM_AMDGPU to the "menu"
> Kconfig statement.  This makes a continuous dependency on
> DRM_AMDGPU in the DRM AMD menus and eliminates a broken menu
> structure.
>
> Fixes: a8fe58cec351 ("drm/amd: add ACP driver support")
> Signed-off-by: Randy Dunlap 
> Cc: Alex Deucher 
> Cc: Christian König 
> Cc: David (ChunMing) Zhou 
> Cc: Maruthi Bayyavarapu 
> Cc: amd-gfx@lists.freedesktop.org
> ---
>  drivers/gpu/drm/amd/acp/Kconfig |1 +
>  1 file changed, 1 insertion(+)
>
> --- linux-next.orig/drivers/gpu/drm/amd/acp/Kconfig
> +++ linux-next/drivers/gpu/drm/amd/acp/Kconfig
> @@ -1,5 +1,6 @@
>  # SPDX-License-Identifier: MIT
>  menu "ACP (Audio CoProcessor) Configuration"
> +   depends on DRM_AMDGPU
>
>  config DRM_AMD_ACP
> bool "Enable AMD Audio CoProcessor IP support"
>
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH -next 023/491] AMD KFD: Use fallthrough;

2020-03-12 Thread Alex Deucher
Applied.  Thanks.  Link fixed locally.

Alex


On Wed, Mar 11, 2020 at 6:11 PM Joe Perches  wrote:
>
> On Wed, 2020-03-11 at 17:50 -0400, Felix Kuehling wrote:
> > On 2020-03-11 12:51 a.m., Joe Perches wrote:
> > > Convert the various uses of fallthrough comments to fallthrough;
> > >
> > > Done via script
> > > Link: 
> > > https://lore.kernel.org/lkml/b56602fcf79f849e733e7b521bb0e17895d390fa.1582230379.git.joe.com/
> >
> > The link seems to be broken. This one works:
> > https://lore.kernel.org/lkml/b56602fcf79f849e733e7b521bb0e17895d390fa.1582230379.git@perches.com/
>
> Thanks.
>
> I neglected to use a backslash on the generating script.
> In the script in 0/491,
>
> Link: 
> https://lore.kernel.org/lkml/b56602fcf79f849e733e7b521bb0e17895d390fa.1582230379.git@perches.com/
>
> likely should have been:
>
> Link: 
> https://lore.kernel.org/lkml/b56602fcf79f849e733e7b521bb0e17895d390fa.1582230379.git.joe\@perches.com/
>
>
> ___
> dri-devel mailing list
> dri-de...@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/dri-devel
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


RE: [PATCH] drm/amdgpu: add codes to clear AccVGPR for arcturus

2020-03-12 Thread Zhang, Hawking
[AMD Official Use Only - Internal Distribution Only]

Since we separated MI100 vgpr shader program configuration register list from 
Vega20 ones, it's better to remove SE4 ~ SE7 thread registers from vega20 list.

Other than that, the patch is

Reviewed-by: Hawking Zhang 

Regards,
Hawking
-Original Message-
From: Dennis Li  
Sent: Thursday, March 12, 2020 22:02
To: amd-gfx@lists.freedesktop.org; Deucher, Alexander 
; Zhou1, Tao ; Zhang, Hawking 
; Chen, Guchun 
Cc: Li, Dennis 
Subject: [PATCH] drm/amdgpu: add codes to clear AccVGPR for arcturus

AccVGPRs are newly added in arcturus. Before reading these registers, they 
should be initialized. Otherwise edc error happens, when RAS is enabled.

Change-Id: I4ed384f0cc4b781a10cfd6ad1e3a132445bdc261
Signed-off-by: Dennis Li 

diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 
b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
old mode 100644
new mode 100755
index c78ffdc51373..d5dd754bfb85
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -4144,6 +4144,101 @@ static const u32 sgpr_init_compute_shader[] =
0xbe800080, 0xbf81,
 };
 
+static const u32 vgpr_init_compute_shader_arcturus[] = {
+   0xd3d94000, 0x1880, 0xd3d94001, 0x1880, 0xd3d94002, 0x1880,
+   0xd3d94003, 0x1880, 0xd3d94004, 0x1880, 0xd3d94005, 0x1880,
+   0xd3d94006, 0x1880, 0xd3d94007, 0x1880, 0xd3d94008, 0x1880,
+   0xd3d94009, 0x1880, 0xd3d9400a, 0x1880, 0xd3d9400b, 0x1880,
+   0xd3d9400c, 0x1880, 0xd3d9400d, 0x1880, 0xd3d9400e, 0x1880,
+   0xd3d9400f, 0x1880, 0xd3d94010, 0x1880, 0xd3d94011, 0x1880,
+   0xd3d94012, 0x1880, 0xd3d94013, 0x1880, 0xd3d94014, 0x1880,
+   0xd3d94015, 0x1880, 0xd3d94016, 0x1880, 0xd3d94017, 0x1880,
+   0xd3d94018, 0x1880, 0xd3d94019, 0x1880, 0xd3d9401a, 0x1880,
+   0xd3d9401b, 0x1880, 0xd3d9401c, 0x1880, 0xd3d9401d, 0x1880,
+   0xd3d9401e, 0x1880, 0xd3d9401f, 0x1880, 0xd3d94020, 0x1880,
+   0xd3d94021, 0x1880, 0xd3d94022, 0x1880, 0xd3d94023, 0x1880,
+   0xd3d94024, 0x1880, 0xd3d94025, 0x1880, 0xd3d94026, 0x1880,
+   0xd3d94027, 0x1880, 0xd3d94028, 0x1880, 0xd3d94029, 0x1880,
+   0xd3d9402a, 0x1880, 0xd3d9402b, 0x1880, 0xd3d9402c, 0x1880,
+   0xd3d9402d, 0x1880, 0xd3d9402e, 0x1880, 0xd3d9402f, 0x1880,
+   0xd3d94030, 0x1880, 0xd3d94031, 0x1880, 0xd3d94032, 0x1880,
+   0xd3d94033, 0x1880, 0xd3d94034, 0x1880, 0xd3d94035, 0x1880,
+   0xd3d94036, 0x1880, 0xd3d94037, 0x1880, 0xd3d94038, 0x1880,
+   0xd3d94039, 0x1880, 0xd3d9403a, 0x1880, 0xd3d9403b, 0x1880,
+   0xd3d9403c, 0x1880, 0xd3d9403d, 0x1880, 0xd3d9403e, 0x1880,
+   0xd3d9403f, 0x1880, 0xd3d94040, 0x1880, 0xd3d94041, 0x1880,
+   0xd3d94042, 0x1880, 0xd3d94043, 0x1880, 0xd3d94044, 0x1880,
+   0xd3d94045, 0x1880, 0xd3d94046, 0x1880, 0xd3d94047, 0x1880,
+   0xd3d94048, 0x1880, 0xd3d94049, 0x1880, 0xd3d9404a, 0x1880,
+   0xd3d9404b, 0x1880, 0xd3d9404c, 0x1880, 0xd3d9404d, 0x1880,
+   0xd3d9404e, 0x1880, 0xd3d9404f, 0x1880, 0xd3d94050, 0x1880,
+   0xd3d94051, 0x1880, 0xd3d94052, 0x1880, 0xd3d94053, 0x1880,
+   0xd3d94054, 0x1880, 0xd3d94055, 0x1880, 0xd3d94056, 0x1880,
+   0xd3d94057, 0x1880, 0xd3d94058, 0x1880, 0xd3d94059, 0x1880,
+   0xd3d9405a, 0x1880, 0xd3d9405b, 0x1880, 0xd3d9405c, 0x1880,
+   0xd3d9405d, 0x1880, 0xd3d9405e, 0x1880, 0xd3d9405f, 0x1880,
+   0xd3d94060, 0x1880, 0xd3d94061, 0x1880, 0xd3d94062, 0x1880,
+   0xd3d94063, 0x1880, 0xd3d94064, 0x1880, 0xd3d94065, 0x1880,
+   0xd3d94066, 0x1880, 0xd3d94067, 0x1880, 0xd3d94068, 0x1880,
+   0xd3d94069, 0x1880, 0xd3d9406a, 0x1880, 0xd3d9406b, 0x1880,
+   0xd3d9406c, 0x1880, 0xd3d9406d, 0x1880, 0xd3d9406e, 0x1880,
+   0xd3d9406f, 0x1880, 0xd3d94070, 0x1880, 0xd3d94071, 0x1880,
+   0xd3d94072, 0x1880, 0xd3d94073, 0x1880, 0xd3d94074, 0x1880,
+   0xd3d94075, 0x1880, 0xd3d94076, 0x1880, 0xd3d94077, 0x1880,
+   0xd3d94078, 0x1880, 0xd3d94079, 0x1880, 0xd3d9407a, 0x1880,
+   0xd3d9407b, 0x1880, 0xd3d9407c, 0x1880, 0xd3d9407d, 0x1880,
+   0xd3d9407e, 0x1880, 0xd3d9407f, 0x1880, 0xd3d94080, 0x1880,
+   0xd3d94081, 0x1880, 0xd3d94082, 0x1880, 0xd3d94083, 0x1880,
+   0xd3d94084, 0x1880, 0xd3d94085, 0x1880, 0xd3d94086, 0x1880,
+   0xd3d94087, 0x1880, 0xd3d94088, 0x1880, 0xd3d94089, 0x1880,
+   0xd3d9408a, 0x1880, 0xd3d9408b, 0x1880, 0xd3d9408c, 0x1880,
+   0xd3d9408d, 0x1880, 

Re: [PATCH -next 024/491] AMD DISPLAY CORE: Use fallthrough;

2020-03-12 Thread Alex Deucher
Applied.  thanks! (link fixed locally).

Alex

On Wed, Mar 11, 2020 at 1:07 AM Joe Perches  wrote:
>
> Convert the various uses of fallthrough comments to fallthrough;
>
> Done via script
> Link: 
> https://lore.kernel.org/lkml/b56602fcf79f849e733e7b521bb0e17895d390fa.1582230379.git.joe.com/
>
> Signed-off-by: Joe Perches 
> ---
>  drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c | 4 ++--
>  drivers/gpu/drm/amd/display/dc/dce/dce_aux.c   | 2 +-
>  drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c | 2 +-
>  3 files changed, 4 insertions(+), 4 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c 
> b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
> index 2f1c958..37fa7b 100644
> --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
> +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
> @@ -267,7 +267,7 @@ static struct atom_display_object_path_v2 
> *get_bios_object(
> && id.enum_id == obj_id.enum_id)
> return 
> >object_info_tbl.v1_4->display_path[i];
> }
> -   /* fall through */
> +   fallthrough;
> case OBJECT_TYPE_CONNECTOR:
> case OBJECT_TYPE_GENERIC:
> /* Both Generic and Connector Object ID
> @@ -280,7 +280,7 @@ static struct atom_display_object_path_v2 
> *get_bios_object(
> && id.enum_id == obj_id.enum_id)
> return 
> >object_info_tbl.v1_4->display_path[i];
> }
> -   /* fall through */
> +   fallthrough;
> default:
> return NULL;
> }
> diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c 
> b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
> index 68c4049..743042 100644
> --- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
> +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
> @@ -645,7 +645,7 @@ bool dce_aux_transfer_with_retries(struct ddc_service 
> *ddc,
> case AUX_TRANSACTION_REPLY_AUX_DEFER:
> case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER:
> retry_on_defer = true;
> -   /* fall through */
> +   fallthrough;
> case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_NACK:
> if (++aux_defer_retries >= 
> AUX_MAX_DEFER_RETRIES) {
> goto fail;
> diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c 
> b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
> index 8aa937f..51481e 100644
> --- a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
> +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
> @@ -479,7 +479,7 @@ static void program_grph_pixel_format(
> case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
> sign = 1;
> floating = 1;
> -   /* fall through */
> +   fallthrough;
> case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F: /* shouldn't this get 
> float too? */
> case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
> grph_depth = 3;
> --
> 2.24.0
>
> ___
> dri-devel mailing list
> dri-de...@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/dri-devel
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH -next 025/491] AMD POWERPLAY: Use fallthrough;

2020-03-12 Thread Alex Deucher
Applied.  thanks! (link fixed locally).

Alex

On Wed, Mar 11, 2020 at 1:07 AM Joe Perches  wrote:
>
> Convert the various uses of fallthrough comments to fallthrough;
>
> Done via script
> Link: 
> https://lore.kernel.org/lkml/b56602fcf79f849e733e7b521bb0e17895d390fa.1582230379.git.joe.com/
>
> Signed-off-by: Joe Perches 
> ---
>  drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 6 +++---
>  1 file changed, 3 insertions(+), 3 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c 
> b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
> index bf04cf..fc5236c 100644
> --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
> +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
> @@ -1250,7 +1250,7 @@ static void smu7_set_dpm_event_sources(struct pp_hwmgr 
> *hwmgr, uint32_t sources)
> switch (sources) {
> default:
> pr_err("Unknown throttling event sources.");
> -   /* fall through */
> +   fallthrough;
> case 0:
> protection = false;
> /* src is unused */
> @@ -3698,12 +3698,12 @@ static int 
> smu7_request_link_speed_change_before_state_change(
> data->force_pcie_gen = PP_PCIEGen2;
> if (current_link_speed == PP_PCIEGen2)
> break;
> -   /* fall through */
> +   fallthrough;
> case PP_PCIEGen2:
> if (0 == 
> amdgpu_acpi_pcie_performance_request(hwmgr->adev, PCIE_PERF_REQ_GEN2, false))
> break;
>  #endif
> -   /* fall through */
> +   fallthrough;
> default:
> data->force_pcie_gen = 
> smu7_get_current_pcie_speed(hwmgr);
> break;
> --
> 2.24.0
>
> ___
> dri-devel mailing list
> dri-de...@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/dri-devel
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH] drm/amdgpu: add codes to clear AccVGPR for arcturus

2020-03-12 Thread Dennis Li
AccVGPRs are newly added in arcturus. Before reading these
registers, they should be initialized. Otherwise edc error
happens, when RAS is enabled.

Change-Id: I4ed384f0cc4b781a10cfd6ad1e3a132445bdc261
Signed-off-by: Dennis Li 

diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 
b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
old mode 100644
new mode 100755
index c78ffdc51373..d5dd754bfb85
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -4144,6 +4144,101 @@ static const u32 sgpr_init_compute_shader[] =
0xbe800080, 0xbf81,
 };
 
+static const u32 vgpr_init_compute_shader_arcturus[] = {
+   0xd3d94000, 0x1880, 0xd3d94001, 0x1880, 0xd3d94002, 0x1880,
+   0xd3d94003, 0x1880, 0xd3d94004, 0x1880, 0xd3d94005, 0x1880,
+   0xd3d94006, 0x1880, 0xd3d94007, 0x1880, 0xd3d94008, 0x1880,
+   0xd3d94009, 0x1880, 0xd3d9400a, 0x1880, 0xd3d9400b, 0x1880,
+   0xd3d9400c, 0x1880, 0xd3d9400d, 0x1880, 0xd3d9400e, 0x1880,
+   0xd3d9400f, 0x1880, 0xd3d94010, 0x1880, 0xd3d94011, 0x1880,
+   0xd3d94012, 0x1880, 0xd3d94013, 0x1880, 0xd3d94014, 0x1880,
+   0xd3d94015, 0x1880, 0xd3d94016, 0x1880, 0xd3d94017, 0x1880,
+   0xd3d94018, 0x1880, 0xd3d94019, 0x1880, 0xd3d9401a, 0x1880,
+   0xd3d9401b, 0x1880, 0xd3d9401c, 0x1880, 0xd3d9401d, 0x1880,
+   0xd3d9401e, 0x1880, 0xd3d9401f, 0x1880, 0xd3d94020, 0x1880,
+   0xd3d94021, 0x1880, 0xd3d94022, 0x1880, 0xd3d94023, 0x1880,
+   0xd3d94024, 0x1880, 0xd3d94025, 0x1880, 0xd3d94026, 0x1880,
+   0xd3d94027, 0x1880, 0xd3d94028, 0x1880, 0xd3d94029, 0x1880,
+   0xd3d9402a, 0x1880, 0xd3d9402b, 0x1880, 0xd3d9402c, 0x1880,
+   0xd3d9402d, 0x1880, 0xd3d9402e, 0x1880, 0xd3d9402f, 0x1880,
+   0xd3d94030, 0x1880, 0xd3d94031, 0x1880, 0xd3d94032, 0x1880,
+   0xd3d94033, 0x1880, 0xd3d94034, 0x1880, 0xd3d94035, 0x1880,
+   0xd3d94036, 0x1880, 0xd3d94037, 0x1880, 0xd3d94038, 0x1880,
+   0xd3d94039, 0x1880, 0xd3d9403a, 0x1880, 0xd3d9403b, 0x1880,
+   0xd3d9403c, 0x1880, 0xd3d9403d, 0x1880, 0xd3d9403e, 0x1880,
+   0xd3d9403f, 0x1880, 0xd3d94040, 0x1880, 0xd3d94041, 0x1880,
+   0xd3d94042, 0x1880, 0xd3d94043, 0x1880, 0xd3d94044, 0x1880,
+   0xd3d94045, 0x1880, 0xd3d94046, 0x1880, 0xd3d94047, 0x1880,
+   0xd3d94048, 0x1880, 0xd3d94049, 0x1880, 0xd3d9404a, 0x1880,
+   0xd3d9404b, 0x1880, 0xd3d9404c, 0x1880, 0xd3d9404d, 0x1880,
+   0xd3d9404e, 0x1880, 0xd3d9404f, 0x1880, 0xd3d94050, 0x1880,
+   0xd3d94051, 0x1880, 0xd3d94052, 0x1880, 0xd3d94053, 0x1880,
+   0xd3d94054, 0x1880, 0xd3d94055, 0x1880, 0xd3d94056, 0x1880,
+   0xd3d94057, 0x1880, 0xd3d94058, 0x1880, 0xd3d94059, 0x1880,
+   0xd3d9405a, 0x1880, 0xd3d9405b, 0x1880, 0xd3d9405c, 0x1880,
+   0xd3d9405d, 0x1880, 0xd3d9405e, 0x1880, 0xd3d9405f, 0x1880,
+   0xd3d94060, 0x1880, 0xd3d94061, 0x1880, 0xd3d94062, 0x1880,
+   0xd3d94063, 0x1880, 0xd3d94064, 0x1880, 0xd3d94065, 0x1880,
+   0xd3d94066, 0x1880, 0xd3d94067, 0x1880, 0xd3d94068, 0x1880,
+   0xd3d94069, 0x1880, 0xd3d9406a, 0x1880, 0xd3d9406b, 0x1880,
+   0xd3d9406c, 0x1880, 0xd3d9406d, 0x1880, 0xd3d9406e, 0x1880,
+   0xd3d9406f, 0x1880, 0xd3d94070, 0x1880, 0xd3d94071, 0x1880,
+   0xd3d94072, 0x1880, 0xd3d94073, 0x1880, 0xd3d94074, 0x1880,
+   0xd3d94075, 0x1880, 0xd3d94076, 0x1880, 0xd3d94077, 0x1880,
+   0xd3d94078, 0x1880, 0xd3d94079, 0x1880, 0xd3d9407a, 0x1880,
+   0xd3d9407b, 0x1880, 0xd3d9407c, 0x1880, 0xd3d9407d, 0x1880,
+   0xd3d9407e, 0x1880, 0xd3d9407f, 0x1880, 0xd3d94080, 0x1880,
+   0xd3d94081, 0x1880, 0xd3d94082, 0x1880, 0xd3d94083, 0x1880,
+   0xd3d94084, 0x1880, 0xd3d94085, 0x1880, 0xd3d94086, 0x1880,
+   0xd3d94087, 0x1880, 0xd3d94088, 0x1880, 0xd3d94089, 0x1880,
+   0xd3d9408a, 0x1880, 0xd3d9408b, 0x1880, 0xd3d9408c, 0x1880,
+   0xd3d9408d, 0x1880, 0xd3d9408e, 0x1880, 0xd3d9408f, 0x1880,
+   0xd3d94090, 0x1880, 0xd3d94091, 0x1880, 0xd3d94092, 0x1880,
+   0xd3d94093, 0x1880, 0xd3d94094, 0x1880, 0xd3d94095, 0x1880,
+   0xd3d94096, 0x1880, 0xd3d94097, 0x1880, 0xd3d94098, 0x1880,
+   0xd3d94099, 0x1880, 0xd3d9409a, 0x1880, 0xd3d9409b, 0x1880,
+   0xd3d9409c, 0x1880, 0xd3d9409d, 0x1880, 0xd3d9409e, 0x1880,
+   0xd3d9409f, 0x1880, 0xd3d940a0, 0x1880, 0xd3d940a1, 0x1880,
+   0xd3d940a2, 0x1880, 

Re: [PATCH] drm/amdgpu: fix and cleanup amdgpu_gem_object_close

2020-03-12 Thread Christian König
The problem is that dma_resv_test_signaled_rcu() tests only the shared 
fence if one is present.


Now what happened is that the clear fence completed before the exclusive 
one, and that in turn caused TTM to think that the BO is unused and 
freed it.


Christian.

Am 12.03.20 um 14:25 schrieb Liu, Monk:

without your patch, the clear fence is also hooked in the shared list of bo's 
reservation obj,  no matter the exclusive fence of that BO signaled before 
clear fence or not

since the clear fence is always kept in the bo's resv object, can you tell me 
what's the problem than ? are we going to loose this clear fence and still use 
it during the  VM pt/pd clearing ?

thanks
_
Monk Liu|GPU Virtualization Team |AMD


-Original Message-
From: Christian König 
Sent: Thursday, March 12, 2020 8:50 PM
To: Liu, Monk ; Pan, Xinhui ; 
amd-gfx@lists.freedesktop.org
Subject: Re: [PATCH] drm/amdgpu: fix and cleanup amdgpu_gem_object_close

  From the semantic the dma_resv object contains a single exclusive and 
multiple shared fences and it is mandatory that the shared fences complete 
after the exclusive one.

Now what happens is that clearing the VM page tables runs asynchronously to the 
exclusive fence which moves the buffer around.

And because of this Xinhui ran into a rare problem that TTM thought it could 
reuse the memory while in reality it was still in use.

Regards,
Christian.

Am 12.03.20 um 12:30 schrieb Liu, Monk:

Can you give more details about " we can't guarantee the the clear fence will 
complete after the exclusive one." ?

Thanks

_
Monk Liu|GPU Virtualization Team |AMD


-Original Message-
From: amd-gfx  On Behalf Of
Christian K?nig
Sent: Thursday, March 12, 2020 7:12 PM
To: Pan, Xinhui ; amd-gfx@lists.freedesktop.org
Subject: [PATCH] drm/amdgpu: fix and cleanup amdgpu_gem_object_close

The problem is that we can't add the clear fence to the BO when there is an 
exclusive fence on it since we can't guarantee the the clear fence will 
complete after the exclusive one.

To fix this refactor the function and wait for any potential exclusive fence 
with a small timeout before adding the shared clear fence.

Signed-off-by: Christian König 
---
   drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 41 +++--
   1 file changed, 25 insertions(+), 16 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 4277125a79ee..0782162fb5f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -161,10 +161,11 @@ void amdgpu_gem_object_close(struct
drm_gem_object *obj,
   
   	struct amdgpu_bo_list_entry vm_pd;

struct list_head list, duplicates;
+   struct dma_fence *fence = NULL;
struct ttm_validate_buffer tv;
struct ww_acquire_ctx ticket;
struct amdgpu_bo_va *bo_va;
-   int r;
+   long r;
   
   	INIT_LIST_HEAD();

INIT_LIST_HEAD();
@@ -182,24 +183,32 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
return;
}
bo_va = amdgpu_vm_bo_find(vm, bo);
-   if (bo_va && --bo_va->ref_count == 0) {
-   amdgpu_vm_bo_rmv(adev, bo_va);
+   if (!bo_va || --bo_va->ref_count)
+   goto out_unlock;
   
-		if (amdgpu_vm_ready(vm)) {

-   struct dma_fence *fence = NULL;
+   amdgpu_vm_bo_rmv(adev, bo_va);
+   if (!amdgpu_vm_ready(vm))
+   goto out_unlock;
   
-			r = amdgpu_vm_clear_freed(adev, vm, );

-   if (unlikely(r)) {
-   dev_err(adev->dev, "failed to clear page "
-   "tables on GEM object close (%d)\n", r);
-   }
   
-			if (fence) {

-   amdgpu_bo_fence(bo, fence, true);
-   dma_fence_put(fence);
-   }
-   }
-   }
+   r = amdgpu_vm_clear_freed(adev, vm, );
+   if (r || !fence)
+   goto out_unlock;
+
+   r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, false, false,
+ msecs_to_jiffies(10));
+   if (r == 0)
+   r = -ETIMEDOUT;
+   if (r)
+   goto out_unlock;
+
+   amdgpu_bo_fence(bo, fence, true);
+   dma_fence_put(fence);
+
+out_unlock:
+   if (unlikely(r))
+   dev_err(adev->dev, "failed to clear page "
+   "tables on GEM object close (%d)\n", r);
ttm_eu_backoff_reservation(, );  }
   
--

2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flist
s.freedesktop.org%2Fmailman%2Flistinfo%2Famd-gfxdata=02%7C01%7CMo
nk.Liu%40amd.com%7C26730e56c5b944f8cbb408d7c683d4a1%7C3dd8961fe4884e60

Re: [PATCH] drm/amdgpu/display: Fix an error handling path in 'dm_update_crtc_state()'

2020-03-12 Thread Dan Carpenter
On Mon, Mar 09, 2020 at 08:24:04AM +, Walter Harms wrote:
> 
> 
> Von: kernel-janitors-ow...@vger.kernel.org 
>  im Auftrag von Christophe JAILLET 
> 
> Gesendet: Sonntag, 8. März 2020 10:26
> An: harry.wentl...@amd.com; sunpeng...@amd.com; alexander.deuc...@amd.com; 
> christian.koe...@amd.com; david1.z...@amd.com; airl...@linux.ie; 
> dan...@ffwll.ch; nicholas.kazlaus...@amd.com; bhawanpreet.la...@amd.com; 
> mario.kleiner...@gmail.com; david.fran...@amd.com
> Cc: amd-gfx@lists.freedesktop.org; dri-de...@lists.freedesktop.org; 
> linux-ker...@vger.kernel.org; kernel-janit...@vger.kernel.org; Christophe 
> JAILLET
> Betreff: [PATCH] drm/amdgpu/display: Fix an error handling path in 
> 'dm_update_crtc_state()'
> 
> 'dc_stream_release()' may be called twice. Once here, and once below in the
> error handling path if we branch to the 'fail' label.
> 
> Set 'new_stream' to NULL, once released to avoid the duplicated release
> function call.
> 
> Signed-off-by: Christophe JAILLET 
> ---
> Maybe the 'goto fail' at line 7745 should be turned into a 'return ret'
> instead. Could be clearer.
> 
> No Fixes tag provided because I've not been able to dig deep enough in the
> git history.
> ---
>  drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 6 --
>  1 file changed, 4 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
> b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
> index 97c1b01c0fc1..9d7773a77c4f 100644
> --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
> +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
> @@ -7704,8 +7704,10 @@ static int dm_update_crtc_state(struct 
> amdgpu_display_manager *dm,
> 
>  skip_modeset:
> /* Release extra reference */
> -   if (new_stream)
> -dc_stream_release(new_stream);
> +   if (new_stream) {
> +   dc_stream_release(new_stream);
> +   new_stream = NULL;
> +   }
> 
> 
> dc_stream_release() is NULL-checked, so the if can be dropped.
> 
> re,
>  wh

Walter, it's really hard to separate your reply from the quoted email.
What's going on with that?  Could you configure your email client to
use "> " for the quoted bit?

regards,
dan carpenter

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amdgpu/swsmu: clean up unused header in swsmu

2020-03-12 Thread Deucher, Alexander
[AMD Public Use]

Reviewed-by: Alex Deucher 

From: Wang, Kevin(Yang) 
Sent: Thursday, March 12, 2020 5:50 AM
To: amd-gfx@lists.freedesktop.org 
Cc: Feng, Kenneth ; Deucher, Alexander 
; Wang, Kevin(Yang) 
Subject: [PATCH] drm/amdgpu/swsmu: clean up unused header in swsmu

clean up unused header in swsmu driver stack:
1. pp_debug.h
2. amd_pcie.h
3. soc15_common.h

Signed-off-by: Kevin Wang 
---
 drivers/gpu/drm/amd/powerplay/amdgpu_smu.c   | 3 ---
 drivers/gpu/drm/amd/powerplay/arcturus_ppt.c | 1 -
 drivers/gpu/drm/amd/powerplay/navi10_ppt.c   | 2 --
 drivers/gpu/drm/amd/powerplay/renoir_ppt.c   | 1 -
 drivers/gpu/drm/amd/powerplay/smu_v11_0.c| 1 -
 drivers/gpu/drm/amd/powerplay/smu_v12_0.c| 1 -
 drivers/gpu/drm/amd/powerplay/vega20_ppt.c   | 1 -
 7 files changed, 10 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c 
b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index f18e3fadbc26..8de8436f0839 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -23,15 +23,12 @@
 #include 
 #include 

-#include "pp_debug.h"
 #include "amdgpu.h"
 #include "amdgpu_smu.h"
 #include "smu_internal.h"
-#include "soc15_common.h"
 #include "smu_v11_0.h"
 #include "smu_v12_0.h"
 #include "atom.h"
-#include "amd_pcie.h"
 #include "vega20_ppt.h"
 #include "arcturus_ppt.h"
 #include "navi10_ppt.h"
diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c 
b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
index cc4427ebf169..61596e8d522c 100644
--- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
@@ -21,7 +21,6 @@
  *
  */

-#include "pp_debug.h"
 #include 
 #include "amdgpu.h"
 #include "amdgpu_smu.h"
diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c 
b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
index 6e41f3c9ff1b..d66dfa7410b6 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
@@ -21,7 +21,6 @@
  *
  */

-#include "pp_debug.h"
 #include 
 #include 
 #include "amdgpu.h"
@@ -31,7 +30,6 @@
 #include "amdgpu_atomfirmware.h"
 #include "smu_v11_0.h"
 #include "smu11_driver_if_navi10.h"
-#include "soc15_common.h"
 #include "atom.h"
 #include "navi10_ppt.h"
 #include "smu_v11_0_pptable.h"
diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c 
b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
index 653faadaafb3..7bf52ecba01d 100644
--- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
@@ -24,7 +24,6 @@
 #include "amdgpu.h"
 #include "amdgpu_smu.h"
 #include "smu_internal.h"
-#include "soc15_common.h"
 #include "smu_v12_0_ppsmc.h"
 #include "smu12_driver_if.h"
 #include "smu_v12_0.h"
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c 
b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
index 3a5d00573d2c..4fd77c7cfc80 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
@@ -26,7 +26,6 @@

 #define SMU_11_0_PARTIAL_PPTABLE

-#include "pp_debug.h"
 #include "amdgpu.h"
 #include "amdgpu_smu.h"
 #include "smu_internal.h"
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c 
b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
index d52e624f16d3..169ebdad87b8 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
@@ -20,7 +20,6 @@
  * OTHER DEALINGS IN THE SOFTWARE.
  */

-#include "pp_debug.h"
 #include 
 #include "amdgpu.h"
 #include "amdgpu_smu.h"
diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c 
b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
index d7fa8c02c166..49ff3756bd9f 100644
--- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
@@ -21,7 +21,6 @@
  *
  */

-#include "pp_debug.h"
 #include 
 #include "amdgpu.h"
 #include "amdgpu_smu.h"
--
2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


RE: [PATCH] drm/amdgpu: fix and cleanup amdgpu_gem_object_close

2020-03-12 Thread Liu, Monk
>> Now what happens is that clearing the VM page tables runs asynchronously to 
>> the exclusive fence which moves the buffer around.

The amdgpu_vm_clear_freed  is already kicked off before you wait for the 
exclusive fence signaled,  why you can avoid clearing PT not overlap with the 
"move" action after you introduce a " dma_resv_wait_timeout_rcu"
*after* the amdgpu_vm_clear_freed () ?
_
Monk Liu|GPU Virtualization Team |AMD


-Original Message-
From: Christian König  
Sent: Thursday, March 12, 2020 8:50 PM
To: Liu, Monk ; Pan, Xinhui ; 
amd-gfx@lists.freedesktop.org
Subject: Re: [PATCH] drm/amdgpu: fix and cleanup amdgpu_gem_object_close

 From the semantic the dma_resv object contains a single exclusive and multiple 
shared fences and it is mandatory that the shared fences complete after the 
exclusive one.

Now what happens is that clearing the VM page tables runs asynchronously to the 
exclusive fence which moves the buffer around.

And because of this Xinhui ran into a rare problem that TTM thought it could 
reuse the memory while in reality it was still in use.

Regards,
Christian.

Am 12.03.20 um 12:30 schrieb Liu, Monk:
> Can you give more details about " we can't guarantee the the clear fence will 
> complete after the exclusive one." ?
>
> Thanks
>
> _
> Monk Liu|GPU Virtualization Team |AMD
>
>
> -Original Message-
> From: amd-gfx  On Behalf Of 
> Christian K?nig
> Sent: Thursday, March 12, 2020 7:12 PM
> To: Pan, Xinhui ; amd-gfx@lists.freedesktop.org
> Subject: [PATCH] drm/amdgpu: fix and cleanup amdgpu_gem_object_close
>
> The problem is that we can't add the clear fence to the BO when there is an 
> exclusive fence on it since we can't guarantee the the clear fence will 
> complete after the exclusive one.
>
> To fix this refactor the function and wait for any potential exclusive fence 
> with a small timeout before adding the shared clear fence.
>
> Signed-off-by: Christian König 
> ---
>   drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 41 +++--
>   1 file changed, 25 insertions(+), 16 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
> index 4277125a79ee..0782162fb5f3 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
> @@ -161,10 +161,11 @@ void amdgpu_gem_object_close(struct 
> drm_gem_object *obj,
>   
>   struct amdgpu_bo_list_entry vm_pd;
>   struct list_head list, duplicates;
> + struct dma_fence *fence = NULL;
>   struct ttm_validate_buffer tv;
>   struct ww_acquire_ctx ticket;
>   struct amdgpu_bo_va *bo_va;
> - int r;
> + long r;
>   
>   INIT_LIST_HEAD();
>   INIT_LIST_HEAD();
> @@ -182,24 +183,32 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
>   return;
>   }
>   bo_va = amdgpu_vm_bo_find(vm, bo);
> - if (bo_va && --bo_va->ref_count == 0) {
> - amdgpu_vm_bo_rmv(adev, bo_va);
> + if (!bo_va || --bo_va->ref_count)
> + goto out_unlock;
>   
> - if (amdgpu_vm_ready(vm)) {
> - struct dma_fence *fence = NULL;
> + amdgpu_vm_bo_rmv(adev, bo_va);
> + if (!amdgpu_vm_ready(vm))
> + goto out_unlock;
>   
> - r = amdgpu_vm_clear_freed(adev, vm, );
> - if (unlikely(r)) {
> - dev_err(adev->dev, "failed to clear page "
> - "tables on GEM object close (%d)\n", r);
> - }
>   
> - if (fence) {
> - amdgpu_bo_fence(bo, fence, true);
> - dma_fence_put(fence);
> - }
> - }
> - }
> + r = amdgpu_vm_clear_freed(adev, vm, );
> + if (r || !fence)
> + goto out_unlock;
> +
> + r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, false, false,
> +   msecs_to_jiffies(10));
> + if (r == 0)
> + r = -ETIMEDOUT;
> + if (r)
> + goto out_unlock;
> +
> + amdgpu_bo_fence(bo, fence, true);
> + dma_fence_put(fence);
> +
> +out_unlock:
> + if (unlikely(r))
> + dev_err(adev->dev, "failed to clear page "
> + "tables on GEM object close (%d)\n", r);
>   ttm_eu_backoff_reservation(, );  }
>   
> --
> 2.17.1
>
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flist
> s.freedesktop.org%2Fmailman%2Flistinfo%2Famd-gfxdata=02%7C01%7CMo
> nk.Liu%40amd.com%7C26730e56c5b944f8cbb408d7c683d4a1%7C3dd8961fe4884e60
> 8e11a82d994e183d%7C0%7C0%7C637196141815929915sdata=yP5Yc1BWYWS93f
> 0hHERUfECmShwyQ5fbMkhCeG82n6M%3Dreserved=0

___
amd-gfx 

RE: [PATCH v5 2/4] drm/amdgpu/vcn: fix race condition issue for dpg unpause mode switch

2020-03-12 Thread Liu, Leo
This patch is
Reviewed-by: Leo Liu 

-Original Message-
From: amd-gfx  On Behalf Of James Zhu
Sent: March 11, 2020 4:50 PM
To: amd-gfx@lists.freedesktop.org
Cc: Zhu, James 
Subject: [PATCH v5 2/4] drm/amdgpu/vcn: fix race condition issue for dpg 
unpause mode switch

Couldn't only rely on enc fence to decide switching to dpg unpaude mode.
Since a enc thread may not schedule a fence in time during multiple threads 
running situation.

v3: 1. Rename enc_submission_cnt to dpg_enc_submission_cnt
2. Add dpg_enc_submission_cnt check in idle_work_handler

v4:  Remove extra counter check, and reduce counter before idle
work schedule

Signed-off-by: James Zhu 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 32 +---  
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h |  1 +
 2 files changed, 22 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 6dacf78..0110610 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -65,6 +65,8 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
INIT_DELAYED_WORK(>vcn.idle_work, amdgpu_vcn_idle_work_handler);
mutex_init(>vcn.vcn_pg_lock);
atomic_set(>vcn.total_submission_cnt, 0);
+   for (i = 0; i < adev->vcn.num_vcn_inst; i++)
+   atomic_set(>vcn.inst[i].dpg_enc_submission_cnt, 0);
 
switch (adev->asic_type) {
case CHIP_RAVEN:
@@ -298,7 +300,8 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct 
*work)
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG){
struct dpg_pause_state new_state;
 
-   if (fence[j])
+   if (fence[j] ||
+   
unlikely(atomic_read(>vcn.inst[j].dpg_enc_submission_cnt)))
new_state.fw_based = VCN_DPG_STATE__PAUSE;
else
new_state.fw_based = VCN_DPG_STATE__UNPAUSE; @@ 
-333,19 +336,22 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
 
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG){
struct dpg_pause_state new_state;
-   unsigned int fences = 0;
-   unsigned int i;
 
-   for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
-   fences += 
amdgpu_fence_count_emitted(>vcn.inst[ring->me].ring_enc[i]);
-   }
-   if (fences)
+   if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) {
+   
atomic_inc(>vcn.inst[ring->me].dpg_enc_submission_cnt);
new_state.fw_based = VCN_DPG_STATE__PAUSE;
-   else
-   new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
+   } else {
+   unsigned int fences = 0;
+   unsigned int i;
 
-   if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
-   new_state.fw_based = VCN_DPG_STATE__PAUSE;
+   for (i = 0; i < adev->vcn.num_enc_rings; ++i)
+   fences += 
+amdgpu_fence_count_emitted(>vcn.inst[ring->me].ring_enc[i]);
+
+   if (fences || 
atomic_read(>vcn.inst[ring->me].dpg_enc_submission_cnt))
+   new_state.fw_based = VCN_DPG_STATE__PAUSE;
+   else
+   new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
+   }
 
adev->vcn.pause_dpg_mode(adev, ring->me, _state);
}
@@ -354,6 +360,10 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
 
 void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)  {
+   if (ring->adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
+   ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
+   
atomic_dec(>adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
+
atomic_dec(>adev->vcn.total_submission_cnt);
 
schedule_delayed_work(>adev->vcn.idle_work, VCN_IDLE_TIMEOUT); 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index 111c4cc..e913de8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -183,6 +183,7 @@ struct amdgpu_vcn_inst {
void*dpg_sram_cpu_addr;
uint64_tdpg_sram_gpu_addr;
uint32_t*dpg_sram_curr_addr;
+   atomic_tdpg_enc_submission_cnt;
 };
 
 struct amdgpu_vcn {
--
2.7.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org

RE: [PATCH] drm/amdgpu: fix and cleanup amdgpu_gem_object_close

2020-03-12 Thread Liu, Monk
without your patch, the clear fence is also hooked in the shared list of bo's 
reservation obj,  no matter the exclusive fence of that BO signaled before 
clear fence or not 

since the clear fence is always kept in the bo's resv object, can you tell me 
what's the problem than ? are we going to loose this clear fence and still use 
it during the  VM pt/pd clearing ?

thanks 
_
Monk Liu|GPU Virtualization Team |AMD


-Original Message-
From: Christian König  
Sent: Thursday, March 12, 2020 8:50 PM
To: Liu, Monk ; Pan, Xinhui ; 
amd-gfx@lists.freedesktop.org
Subject: Re: [PATCH] drm/amdgpu: fix and cleanup amdgpu_gem_object_close

 From the semantic the dma_resv object contains a single exclusive and multiple 
shared fences and it is mandatory that the shared fences complete after the 
exclusive one.

Now what happens is that clearing the VM page tables runs asynchronously to the 
exclusive fence which moves the buffer around.

And because of this Xinhui ran into a rare problem that TTM thought it could 
reuse the memory while in reality it was still in use.

Regards,
Christian.

Am 12.03.20 um 12:30 schrieb Liu, Monk:
> Can you give more details about " we can't guarantee the the clear fence will 
> complete after the exclusive one." ?
>
> Thanks
>
> _
> Monk Liu|GPU Virtualization Team |AMD
>
>
> -Original Message-
> From: amd-gfx  On Behalf Of 
> Christian K?nig
> Sent: Thursday, March 12, 2020 7:12 PM
> To: Pan, Xinhui ; amd-gfx@lists.freedesktop.org
> Subject: [PATCH] drm/amdgpu: fix and cleanup amdgpu_gem_object_close
>
> The problem is that we can't add the clear fence to the BO when there is an 
> exclusive fence on it since we can't guarantee the the clear fence will 
> complete after the exclusive one.
>
> To fix this refactor the function and wait for any potential exclusive fence 
> with a small timeout before adding the shared clear fence.
>
> Signed-off-by: Christian König 
> ---
>   drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 41 +++--
>   1 file changed, 25 insertions(+), 16 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
> index 4277125a79ee..0782162fb5f3 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
> @@ -161,10 +161,11 @@ void amdgpu_gem_object_close(struct 
> drm_gem_object *obj,
>   
>   struct amdgpu_bo_list_entry vm_pd;
>   struct list_head list, duplicates;
> + struct dma_fence *fence = NULL;
>   struct ttm_validate_buffer tv;
>   struct ww_acquire_ctx ticket;
>   struct amdgpu_bo_va *bo_va;
> - int r;
> + long r;
>   
>   INIT_LIST_HEAD();
>   INIT_LIST_HEAD();
> @@ -182,24 +183,32 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
>   return;
>   }
>   bo_va = amdgpu_vm_bo_find(vm, bo);
> - if (bo_va && --bo_va->ref_count == 0) {
> - amdgpu_vm_bo_rmv(adev, bo_va);
> + if (!bo_va || --bo_va->ref_count)
> + goto out_unlock;
>   
> - if (amdgpu_vm_ready(vm)) {
> - struct dma_fence *fence = NULL;
> + amdgpu_vm_bo_rmv(adev, bo_va);
> + if (!amdgpu_vm_ready(vm))
> + goto out_unlock;
>   
> - r = amdgpu_vm_clear_freed(adev, vm, );
> - if (unlikely(r)) {
> - dev_err(adev->dev, "failed to clear page "
> - "tables on GEM object close (%d)\n", r);
> - }
>   
> - if (fence) {
> - amdgpu_bo_fence(bo, fence, true);
> - dma_fence_put(fence);
> - }
> - }
> - }
> + r = amdgpu_vm_clear_freed(adev, vm, );
> + if (r || !fence)
> + goto out_unlock;
> +
> + r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, false, false,
> +   msecs_to_jiffies(10));
> + if (r == 0)
> + r = -ETIMEDOUT;
> + if (r)
> + goto out_unlock;
> +
> + amdgpu_bo_fence(bo, fence, true);
> + dma_fence_put(fence);
> +
> +out_unlock:
> + if (unlikely(r))
> + dev_err(adev->dev, "failed to clear page "
> + "tables on GEM object close (%d)\n", r);
>   ttm_eu_backoff_reservation(, );  }
>   
> --
> 2.17.1
>
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flist
> s.freedesktop.org%2Fmailman%2Flistinfo%2Famd-gfxdata=02%7C01%7CMo
> nk.Liu%40amd.com%7C26730e56c5b944f8cbb408d7c683d4a1%7C3dd8961fe4884e60
> 8e11a82d994e183d%7C0%7C0%7C637196141815929915sdata=yP5Yc1BWYWS93f
> 0hHERUfECmShwyQ5fbMkhCeG82n6M%3Dreserved=0

___
amd-gfx mailing list

Re: [PATCH] drm/amdgpu: fix and cleanup amdgpu_gem_object_close

2020-03-12 Thread Christian König
From the semantic the dma_resv object contains a single exclusive and 
multiple shared fences and it is mandatory that the shared fences 
complete after the exclusive one.


Now what happens is that clearing the VM page tables runs asynchronously 
to the exclusive fence which moves the buffer around.


And because of this Xinhui ran into a rare problem that TTM thought it 
could reuse the memory while in reality it was still in use.


Regards,
Christian.

Am 12.03.20 um 12:30 schrieb Liu, Monk:

Can you give more details about " we can't guarantee the the clear fence will 
complete after the exclusive one." ?

Thanks

_
Monk Liu|GPU Virtualization Team |AMD


-Original Message-
From: amd-gfx  On Behalf Of Christian 
K?nig
Sent: Thursday, March 12, 2020 7:12 PM
To: Pan, Xinhui ; amd-gfx@lists.freedesktop.org
Subject: [PATCH] drm/amdgpu: fix and cleanup amdgpu_gem_object_close

The problem is that we can't add the clear fence to the BO when there is an 
exclusive fence on it since we can't guarantee the the clear fence will 
complete after the exclusive one.

To fix this refactor the function and wait for any potential exclusive fence 
with a small timeout before adding the shared clear fence.

Signed-off-by: Christian König 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 41 +++--
  1 file changed, 25 insertions(+), 16 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 4277125a79ee..0782162fb5f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -161,10 +161,11 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
  
  	struct amdgpu_bo_list_entry vm_pd;

struct list_head list, duplicates;
+   struct dma_fence *fence = NULL;
struct ttm_validate_buffer tv;
struct ww_acquire_ctx ticket;
struct amdgpu_bo_va *bo_va;
-   int r;
+   long r;
  
  	INIT_LIST_HEAD();

INIT_LIST_HEAD();
@@ -182,24 +183,32 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
return;
}
bo_va = amdgpu_vm_bo_find(vm, bo);
-   if (bo_va && --bo_va->ref_count == 0) {
-   amdgpu_vm_bo_rmv(adev, bo_va);
+   if (!bo_va || --bo_va->ref_count)
+   goto out_unlock;
  
-		if (amdgpu_vm_ready(vm)) {

-   struct dma_fence *fence = NULL;
+   amdgpu_vm_bo_rmv(adev, bo_va);
+   if (!amdgpu_vm_ready(vm))
+   goto out_unlock;
  
-			r = amdgpu_vm_clear_freed(adev, vm, );

-   if (unlikely(r)) {
-   dev_err(adev->dev, "failed to clear page "
-   "tables on GEM object close (%d)\n", r);
-   }
  
-			if (fence) {

-   amdgpu_bo_fence(bo, fence, true);
-   dma_fence_put(fence);
-   }
-   }
-   }
+   r = amdgpu_vm_clear_freed(adev, vm, );
+   if (r || !fence)
+   goto out_unlock;
+
+   r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, false, false,
+ msecs_to_jiffies(10));
+   if (r == 0)
+   r = -ETIMEDOUT;
+   if (r)
+   goto out_unlock;
+
+   amdgpu_bo_fence(bo, fence, true);
+   dma_fence_put(fence);
+
+out_unlock:
+   if (unlikely(r))
+   dev_err(adev->dev, "failed to clear page "
+   "tables on GEM object close (%d)\n", r);
ttm_eu_backoff_reservation(, );  }
  
--

2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flists.freedesktop.org%2Fmailman%2Flistinfo%2Famd-gfxdata=02%7C01%7Cmonk.liu%40amd.com%7C3826cd029c9a4d00617008d7c6763433%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C637196083320910768sdata=1jCINJLzbYhXx9at5%2FU%2FX6d6476FhbhUYZrs53ARSAY%3Dreserved=0


___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH] drm/amd/display: clean up a condition in dmub_psr_copy_settings()

2020-03-12 Thread Dan Carpenter
We can remove the NULL check for "res_ctx" and
"res_ctx->pipe_ctx[i].stream->link".  Also it's nicer to align the
conditions using spaces so I re-indented a bit.

Longer explanation: The "res_ctx" pointer points to an address in the
middle of a struct so it can't be NULL.  For
"res_ctx->pipe_ctx[i].stream->link" we know that it is equal to "link"
and "link" is non-NULL.

Signed-off-by: Dan Carpenter 
---
 drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c | 8 +++-
 1 file changed, 3 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c 
b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
index 9c88a92bd96a..bc109d4fc6e6 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
@@ -134,11 +134,9 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub,
int i = 0;
 
for (i = 0; i < MAX_PIPES; i++) {
-   if (res_ctx &&
-   res_ctx->pipe_ctx[i].stream &&
-   res_ctx->pipe_ctx[i].stream->link &&
-   res_ctx->pipe_ctx[i].stream->link == link &&
-   res_ctx->pipe_ctx[i].stream->link->connector_signal == 
SIGNAL_TYPE_EDP) {
+   if (res_ctx->pipe_ctx[i].stream &&
+   res_ctx->pipe_ctx[i].stream->link == link &&
+   res_ctx->pipe_ctx[i].stream->link->connector_signal == 
SIGNAL_TYPE_EDP) {
pipe_ctx = _ctx->pipe_ctx[i];
break;
}
-- 
2.20.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


RE: [PATCH] drm/amdgpu: fix and cleanup amdgpu_gem_object_close

2020-03-12 Thread Liu, Monk
Can you give more details about " we can't guarantee the the clear fence will 
complete after the exclusive one." ?

Thanks 

_
Monk Liu|GPU Virtualization Team |AMD


-Original Message-
From: amd-gfx  On Behalf Of Christian 
K?nig
Sent: Thursday, March 12, 2020 7:12 PM
To: Pan, Xinhui ; amd-gfx@lists.freedesktop.org
Subject: [PATCH] drm/amdgpu: fix and cleanup amdgpu_gem_object_close

The problem is that we can't add the clear fence to the BO when there is an 
exclusive fence on it since we can't guarantee the the clear fence will 
complete after the exclusive one.

To fix this refactor the function and wait for any potential exclusive fence 
with a small timeout before adding the shared clear fence.

Signed-off-by: Christian König 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 41 +++--
 1 file changed, 25 insertions(+), 16 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 4277125a79ee..0782162fb5f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -161,10 +161,11 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
 
struct amdgpu_bo_list_entry vm_pd;
struct list_head list, duplicates;
+   struct dma_fence *fence = NULL;
struct ttm_validate_buffer tv;
struct ww_acquire_ctx ticket;
struct amdgpu_bo_va *bo_va;
-   int r;
+   long r;
 
INIT_LIST_HEAD();
INIT_LIST_HEAD();
@@ -182,24 +183,32 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
return;
}
bo_va = amdgpu_vm_bo_find(vm, bo);
-   if (bo_va && --bo_va->ref_count == 0) {
-   amdgpu_vm_bo_rmv(adev, bo_va);
+   if (!bo_va || --bo_va->ref_count)
+   goto out_unlock;
 
-   if (amdgpu_vm_ready(vm)) {
-   struct dma_fence *fence = NULL;
+   amdgpu_vm_bo_rmv(adev, bo_va);
+   if (!amdgpu_vm_ready(vm))
+   goto out_unlock;
 
-   r = amdgpu_vm_clear_freed(adev, vm, );
-   if (unlikely(r)) {
-   dev_err(adev->dev, "failed to clear page "
-   "tables on GEM object close (%d)\n", r);
-   }
 
-   if (fence) {
-   amdgpu_bo_fence(bo, fence, true);
-   dma_fence_put(fence);
-   }
-   }
-   }
+   r = amdgpu_vm_clear_freed(adev, vm, );
+   if (r || !fence)
+   goto out_unlock;
+
+   r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, false, false,
+ msecs_to_jiffies(10));
+   if (r == 0)
+   r = -ETIMEDOUT;
+   if (r)
+   goto out_unlock;
+
+   amdgpu_bo_fence(bo, fence, true);
+   dma_fence_put(fence);
+
+out_unlock:
+   if (unlikely(r))
+   dev_err(adev->dev, "failed to clear page "
+   "tables on GEM object close (%d)\n", r);
ttm_eu_backoff_reservation(, );  }
 
--
2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flists.freedesktop.org%2Fmailman%2Flistinfo%2Famd-gfxdata=02%7C01%7Cmonk.liu%40amd.com%7C3826cd029c9a4d00617008d7c6763433%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C637196083320910768sdata=1jCINJLzbYhXx9at5%2FU%2FX6d6476FhbhUYZrs53ARSAY%3Dreserved=0
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH] drm/amdgpu: fix and cleanup amdgpu_gem_object_close

2020-03-12 Thread Christian König
The problem is that we can't add the clear fence to the BO
when there is an exclusive fence on it since we can't
guarantee the the clear fence will complete after the
exclusive one.

To fix this refactor the function and wait for any potential
exclusive fence with a small timeout before adding the
shared clear fence.

Signed-off-by: Christian König 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 41 +++--
 1 file changed, 25 insertions(+), 16 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 4277125a79ee..0782162fb5f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -161,10 +161,11 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
 
struct amdgpu_bo_list_entry vm_pd;
struct list_head list, duplicates;
+   struct dma_fence *fence = NULL;
struct ttm_validate_buffer tv;
struct ww_acquire_ctx ticket;
struct amdgpu_bo_va *bo_va;
-   int r;
+   long r;
 
INIT_LIST_HEAD();
INIT_LIST_HEAD();
@@ -182,24 +183,32 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
return;
}
bo_va = amdgpu_vm_bo_find(vm, bo);
-   if (bo_va && --bo_va->ref_count == 0) {
-   amdgpu_vm_bo_rmv(adev, bo_va);
+   if (!bo_va || --bo_va->ref_count)
+   goto out_unlock;
 
-   if (amdgpu_vm_ready(vm)) {
-   struct dma_fence *fence = NULL;
+   amdgpu_vm_bo_rmv(adev, bo_va);
+   if (!amdgpu_vm_ready(vm))
+   goto out_unlock;
 
-   r = amdgpu_vm_clear_freed(adev, vm, );
-   if (unlikely(r)) {
-   dev_err(adev->dev, "failed to clear page "
-   "tables on GEM object close (%d)\n", r);
-   }
 
-   if (fence) {
-   amdgpu_bo_fence(bo, fence, true);
-   dma_fence_put(fence);
-   }
-   }
-   }
+   r = amdgpu_vm_clear_freed(adev, vm, );
+   if (r || !fence)
+   goto out_unlock;
+
+   r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, false, false,
+ msecs_to_jiffies(10));
+   if (r == 0)
+   r = -ETIMEDOUT;
+   if (r)
+   goto out_unlock;
+
+   amdgpu_bo_fence(bo, fence, true);
+   dma_fence_put(fence);
+
+out_unlock:
+   if (unlikely(r))
+   dev_err(adev->dev, "failed to clear page "
+   "tables on GEM object close (%d)\n", r);
ttm_eu_backoff_reservation(, );
 }
 
-- 
2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


RE: [PATCH] drm/amdgpu: fix warning in ras_debugfs_create_all()

2020-03-12 Thread Zhang, Hawking
[AMD Official Use Only - Internal Distribution Only]

Reviewed-by: Hawking Zhang 

Regards,
Hawking
-Original Message-
From: Stanley.Yang  
Sent: Thursday, March 12, 2020 18:43
To: amd-gfx@lists.freedesktop.org
Cc: Zhang, Hawking ; Chen, Guchun ; 
Li, Dennis ; Clements, John ; Zhou1, 
Tao ; Yang, Stanley 
Subject: [PATCH] drm/amdgpu: fix warning in ras_debugfs_create_all()

Fix the warning
"warn: variable dereferenced before check 'obj' (see line 1131)"
by removing unnecessary checks as amdgpu_ras_debugfs_create_all() is only 
called from amdgpu_debugfs_init() where obj member in
con->head list is not NULL.
Use list_for_each_entry() instead list_for_each_entry_safe() as obj do not to 
be freeing or removing from list during this process.

Signed-off-by: Stanley.Yang 
Change-Id: I33d68d5c0b9db2744732f4db924600afd99f956c
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 7 ++-
 1 file changed, 2 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index ce8548d5fbf3..ae2d569ac678 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -1116,7 +1116,7 @@ void amdgpu_ras_debugfs_create(struct amdgpu_device 
*adev,  void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)  {
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
-   struct ras_manager *obj, *tmp;
+   struct ras_manager *obj;
struct ras_fs_if fs_info;
 
/*
@@ -1128,10 +1128,7 @@ void amdgpu_ras_debugfs_create_all(struct amdgpu_device 
*adev)
 
amdgpu_ras_debugfs_create_ctrl_node(adev);
 
-   list_for_each_entry_safe(obj, tmp, >head, node) {
-   if (!obj)
-   continue;
-
+   list_for_each_entry(obj, >head, node) {
if (amdgpu_ras_is_supported(adev, obj->head.block) &&
(obj->attr_inuse == 1)) {
sprintf(fs_info.debugfs_name, "%s_err_inject",
--
2.17.1
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH] drm/amdgpu: fix warning in ras_debugfs_create_all()

2020-03-12 Thread Stanley . Yang
Fix the warning
"warn: variable dereferenced before check 'obj' (see line 1131)"
by removing unnecessary checks as amdgpu_ras_debugfs_create_all()
is only called from amdgpu_debugfs_init() where obj member in
con->head list is not NULL.
Use list_for_each_entry() instead list_for_each_entry_safe() as obj
do not to be freeing or removing from list during this process.

Signed-off-by: Stanley.Yang 
Change-Id: I33d68d5c0b9db2744732f4db924600afd99f956c
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 7 ++-
 1 file changed, 2 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index ce8548d5fbf3..ae2d569ac678 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -1116,7 +1116,7 @@ void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
 void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
 {
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
-   struct ras_manager *obj, *tmp;
+   struct ras_manager *obj;
struct ras_fs_if fs_info;
 
/*
@@ -1128,10 +1128,7 @@ void amdgpu_ras_debugfs_create_all(struct amdgpu_device 
*adev)
 
amdgpu_ras_debugfs_create_ctrl_node(adev);
 
-   list_for_each_entry_safe(obj, tmp, >head, node) {
-   if (!obj)
-   continue;
-
+   list_for_each_entry(obj, >head, node) {
if (amdgpu_ras_is_supported(adev, obj->head.block) &&
(obj->attr_inuse == 1)) {
sprintf(fs_info.debugfs_name, "%s_err_inject",
-- 
2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH] drm/amdgpu/swsmu: clean up unused header in swsmu

2020-03-12 Thread Kevin Wang
clean up unused header in swsmu driver stack:
1. pp_debug.h
2. amd_pcie.h
3. soc15_common.h

Signed-off-by: Kevin Wang 
---
 drivers/gpu/drm/amd/powerplay/amdgpu_smu.c   | 3 ---
 drivers/gpu/drm/amd/powerplay/arcturus_ppt.c | 1 -
 drivers/gpu/drm/amd/powerplay/navi10_ppt.c   | 2 --
 drivers/gpu/drm/amd/powerplay/renoir_ppt.c   | 1 -
 drivers/gpu/drm/amd/powerplay/smu_v11_0.c| 1 -
 drivers/gpu/drm/amd/powerplay/smu_v12_0.c| 1 -
 drivers/gpu/drm/amd/powerplay/vega20_ppt.c   | 1 -
 7 files changed, 10 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c 
b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index f18e3fadbc26..8de8436f0839 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -23,15 +23,12 @@
 #include 
 #include 
 
-#include "pp_debug.h"
 #include "amdgpu.h"
 #include "amdgpu_smu.h"
 #include "smu_internal.h"
-#include "soc15_common.h"
 #include "smu_v11_0.h"
 #include "smu_v12_0.h"
 #include "atom.h"
-#include "amd_pcie.h"
 #include "vega20_ppt.h"
 #include "arcturus_ppt.h"
 #include "navi10_ppt.h"
diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c 
b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
index cc4427ebf169..61596e8d522c 100644
--- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
@@ -21,7 +21,6 @@
  *
  */
 
-#include "pp_debug.h"
 #include 
 #include "amdgpu.h"
 #include "amdgpu_smu.h"
diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c 
b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
index 6e41f3c9ff1b..d66dfa7410b6 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
@@ -21,7 +21,6 @@
  *
  */
 
-#include "pp_debug.h"
 #include 
 #include 
 #include "amdgpu.h"
@@ -31,7 +30,6 @@
 #include "amdgpu_atomfirmware.h"
 #include "smu_v11_0.h"
 #include "smu11_driver_if_navi10.h"
-#include "soc15_common.h"
 #include "atom.h"
 #include "navi10_ppt.h"
 #include "smu_v11_0_pptable.h"
diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c 
b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
index 653faadaafb3..7bf52ecba01d 100644
--- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
@@ -24,7 +24,6 @@
 #include "amdgpu.h"
 #include "amdgpu_smu.h"
 #include "smu_internal.h"
-#include "soc15_common.h"
 #include "smu_v12_0_ppsmc.h"
 #include "smu12_driver_if.h"
 #include "smu_v12_0.h"
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c 
b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
index 3a5d00573d2c..4fd77c7cfc80 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
@@ -26,7 +26,6 @@
 
 #define SMU_11_0_PARTIAL_PPTABLE
 
-#include "pp_debug.h"
 #include "amdgpu.h"
 #include "amdgpu_smu.h"
 #include "smu_internal.h"
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c 
b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
index d52e624f16d3..169ebdad87b8 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
@@ -20,7 +20,6 @@
  * OTHER DEALINGS IN THE SOFTWARE.
  */
 
-#include "pp_debug.h"
 #include 
 #include "amdgpu.h"
 #include "amdgpu_smu.h"
diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c 
b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
index d7fa8c02c166..49ff3756bd9f 100644
--- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
@@ -21,7 +21,6 @@
  *
  */
 
-#include "pp_debug.h"
 #include 
 #include "amdgpu.h"
 #include "amdgpu_smu.h"
-- 
2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


RE: [bug report] drm/amdgpu: add function to creat all ras debugfs node

2020-03-12 Thread Yang, Stanley
[AMD Official Use Only - Internal Distribution Only]

Hi Carpenter,

Thanks for your report and advice, I will update the code.

Regards,
Stanley

-Original Message-
From: amd-gfx  On Behalf Of Dan Carpenter
Sent: Thursday, March 12, 2020 3:34 PM
To: Zhou1, Tao 
Cc: amd-gfx@lists.freedesktop.org
Subject: [bug report] drm/amdgpu: add function to creat all ras debugfs node

Hello Tao Zhou,

The patch f9317014ea51: "drm/amdgpu: add function to creat all ras debugfs 
node" from Mar 6, 2020, leads to the following static checker
warning:

drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c:1132 
amdgpu_ras_debugfs_create_all()
warn: variable dereferenced before check 'obj' (see line 1131)

drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
  1116  void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
  1117  {
  1118  struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
  1119  struct ras_manager *obj, *tmp;
  1120  struct ras_fs_if fs_info;
  1121  
  1122  /*
  1123   * it won't be called in resume path, no need to check
  1124   * suspend and gpu reset status
  1125   */
  1126  if (!con)
  1127  return;
  1128  
  1129  amdgpu_ras_debugfs_create_ctrl_node(adev);
  1130  
  1131  list_for_each_entry_safe(obj, tmp, >head, node) {
  1132  if (!obj)

There is no need to check for NULL here, so just remove the check.  The other 
question is why is this using list_for_each_entry_safe() instead of vanilla 
list_for_each_entry()?  It doesn't seem to be freeing "obj"
or removing "obj" from the list which are basically the only reasons why 
_safe() is used.  Some people think _safe() has something to do with locking 
but it doesn't.

Please remove the test and use vanilla list_for_each_entry().

  1133  continue;
  1134  
  1135  if (amdgpu_ras_is_supported(adev, obj->head.block) &&
  1136  (obj->attr_inuse == 1)) {
  1137  sprintf(fs_info.debugfs_name, "%s_err_inject",
  1138  ras_block_str(obj->head.block));
  1139  fs_info.head = obj->head;
  1140  amdgpu_ras_debugfs_create(adev, _info);
  1141  }
  1142  }
  1143  }

regards,
dan carpenter
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flists.freedesktop.org%2Fmailman%2Flistinfo%2Famd-gfxdata=02%7C01%7CStanley.Yang%40amd.com%7C53ba68a9a148488cc80208d7c657bce8%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C637195953186661183sdata=d8nkKXcNy6Jyg3SyTdlUpe%2B28WsltOmkCMbeNPXksCg%3Dreserved=0
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH hmm 3/8] mm/hmm: do not call hmm_vma_walk_hole() while holding a spinlock

2020-03-12 Thread Steven Price

On 11/03/2020 18:35, Jason Gunthorpe wrote:

From: Jason Gunthorpe 

This eventually calls into handle_mm_fault() which is a sleeping function.
Release the lock first.

hmm_vma_walk_hole() does not touch the contents of the PUD, so it does not
need the lock.

Fixes: 3afc423632a1 ("mm: pagewalk: add p4d_entry() and pgd_entry()")
Cc: Steven Price 
Signed-off-by: Jason Gunthorpe 


Sorry about that, thanks for fixing.

Reviewed-by: Steven Price 


---
  mm/hmm.c | 14 +++---
  1 file changed, 7 insertions(+), 7 deletions(-)

diff --git a/mm/hmm.c b/mm/hmm.c
index 9e8f68eb83287a..32dcbfd3908315 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -473,8 +473,8 @@ static int hmm_vma_walk_pud(pud_t *pudp, unsigned long 
start, unsigned long end,
  
  	pud = READ_ONCE(*pudp);

if (pud_none(pud)) {
-   ret = hmm_vma_walk_hole(start, end, -1, walk);
-   goto out_unlock;
+   spin_unlock(ptl);
+   return hmm_vma_walk_hole(start, end, -1, walk);
}
  
  	if (pud_huge(pud) && pud_devmap(pud)) {

@@ -483,8 +483,8 @@ static int hmm_vma_walk_pud(pud_t *pudp, unsigned long 
start, unsigned long end,
bool fault, write_fault;
  
  		if (!pud_present(pud)) {

-   ret = hmm_vma_walk_hole(start, end, -1, walk);
-   goto out_unlock;
+   spin_unlock(ptl);
+   return hmm_vma_walk_hole(start, end, -1, walk);
}
  
  		i = (addr - range->start) >> PAGE_SHIFT;

@@ -495,9 +495,9 @@ static int hmm_vma_walk_pud(pud_t *pudp, unsigned long 
start, unsigned long end,
hmm_range_need_fault(hmm_vma_walk, pfns, npages,
 cpu_flags, , _fault);
if (fault || write_fault) {
-   ret = hmm_vma_walk_hole_(addr, end, fault,
-write_fault, walk);
-   goto out_unlock;
+   spin_unlock(ptl);
+   return hmm_vma_walk_hole_(addr, end, fault, write_fault,
+ walk);
}
  
  		pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);




___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 1/1] drm/amdgpu: disable gpu_sched load balancer for vcn jobs

2020-03-12 Thread Christian König

Am 11.03.20 um 21:55 schrieb Nirmoy:


On 3/11/20 9:35 PM, Andrey Grodzovsky wrote:


On 3/11/20 4:32 PM, Nirmoy wrote:


On 3/11/20 9:02 PM, Andrey Grodzovsky wrote:


On 3/11/20 4:00 PM, Andrey Grodzovsky wrote:


On 3/11/20 4:00 PM, Nirmoy Das wrote:

[SNIP]
@@ -1257,6 +1258,9 @@ static int amdgpu_cs_submit(struct 
amdgpu_cs_parser *p,

  priority = job->base.s_priority;
  drm_sched_entity_push_job(>base, entity);
  +    if (ring->funcs->no_gpu_sched_loadbalance)
+ amdgpu_ctx_disable_gpu_sched_load_balance(entity);
+



Why this needs to be done each time a job is submitted and not 
once in drm_sched_entity_init (same foramdgpu_job_submit bellow ?)


Andrey



My bad - not in drm_sched_entity_init but in relevant amdgpu code.



Hi Andrey,

Do you mean drm_sched_job_init() or after creating VCN entities?


Nirmoy



I guess after creating the VCN entities (has to be amdgpu specific 
code) - I just don't get why it needs to be done each time job is 
submitted, I mean - since you set .no_gpu_sched_loadbalance = true 
anyway this is always true and so shouldn't you just initialize the 
VCN entity with a schedulers list consisting of one scheduler and 
that it ?



Assumption: If I understand correctly we shouldn't be doing load 
balance among VCN jobs in the same context. Christian, James and Leo 
can clarify that if I am wrong.


But we can still do load balance of VNC jobs among multiple contexts. 
That load balance decision happens in drm_sched_entity_init(). If we 
initialize VCN entity with one scheduler then


all entities irrespective of context gets that one scheduler which 
means we are not utilizing extra VNC instances.


Andrey has a very good point here. So far we only looked at this from 
the hardware requirement side that we can't change the ring after the 
first submission any more.


But it is certainly valuable to keep the extra overhead out of the hot 
path during command submission.


Ideally we should be calling 
amdgpu_ctx_disable_gpu_sched_load_balance() only once after 1st call 
of drm_sched_entity_init() of a VCN job. I am not sure how to do that 
efficiently.


Another option might be to copy the logic of 
drm_sched_entity_get_free_sched() and choose suitable VNC sched 
at/after VCN entity creation.


Yes, but we should not copy the logic but rather refactor it :)

Basically we need a drm_sched_pick_best() function which gets an array 
of drm_gpu_scheduler structures and returns the one with the least load 
on it.


This function can then be used by VCN to pick one instance before 
initializing the entity as well as a replacement for 
drm_sched_entity_get_free_sched() to change the scheduler for load 
balancing.


Regards,
Christian.




Regards,

Nirmoy



___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH] drm: amd/acp: fix broken menu structure

2020-03-12 Thread Randy Dunlap
From: Randy Dunlap 

Fix the Kconfig dependencies so that the menu is presented
correctly by adding a dependency on DRM_AMDGPU to the "menu"
Kconfig statement.  This makes a continuous dependency on
DRM_AMDGPU in the DRM AMD menus and eliminates a broken menu
structure.

Fixes: a8fe58cec351 ("drm/amd: add ACP driver support")
Signed-off-by: Randy Dunlap 
Cc: Alex Deucher 
Cc: Christian König 
Cc: David (ChunMing) Zhou 
Cc: Maruthi Bayyavarapu 
Cc: amd-gfx@lists.freedesktop.org
---
 drivers/gpu/drm/amd/acp/Kconfig |1 +
 1 file changed, 1 insertion(+)

--- linux-next.orig/drivers/gpu/drm/amd/acp/Kconfig
+++ linux-next/drivers/gpu/drm/amd/acp/Kconfig
@@ -1,5 +1,6 @@
 # SPDX-License-Identifier: MIT
 menu "ACP (Audio CoProcessor) Configuration"
+   depends on DRM_AMDGPU
 
 config DRM_AMD_ACP
bool "Enable AMD Audio CoProcessor IP support"

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[bug report] drm/amdgpu: add function to creat all ras debugfs node

2020-03-12 Thread Dan Carpenter
Hello Tao Zhou,

The patch f9317014ea51: "drm/amdgpu: add function to creat all ras
debugfs node" from Mar 6, 2020, leads to the following static checker
warning:

drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c:1132 
amdgpu_ras_debugfs_create_all()
warn: variable dereferenced before check 'obj' (see line 1131)

drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
  1116  void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
  1117  {
  1118  struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
  1119  struct ras_manager *obj, *tmp;
  1120  struct ras_fs_if fs_info;
  1121  
  1122  /*
  1123   * it won't be called in resume path, no need to check
  1124   * suspend and gpu reset status
  1125   */
  1126  if (!con)
  1127  return;
  1128  
  1129  amdgpu_ras_debugfs_create_ctrl_node(adev);
  1130  
  1131  list_for_each_entry_safe(obj, tmp, >head, node) {
  1132  if (!obj)

There is no need to check for NULL here, so just remove the check.  The
other question is why is this using list_for_each_entry_safe() instead
of vanilla list_for_each_entry()?  It doesn't seem to be freeing "obj"
or removing "obj" from the list which are basically the only reasons
why _safe() is used.  Some people think _safe() has something to do with
locking but it doesn't.

Please remove the test and use vanilla list_for_each_entry().

  1133  continue;
  1134  
  1135  if (amdgpu_ras_is_supported(adev, obj->head.block) &&
  1136  (obj->attr_inuse == 1)) {
  1137  sprintf(fs_info.debugfs_name, "%s_err_inject",
  1138  ras_block_str(obj->head.block));
  1139  fs_info.head = obj->head;
  1140  amdgpu_ras_debugfs_create(adev, _info);
  1141  }
  1142  }
  1143  }

regards,
dan carpenter
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


RE: [PATCH] drm/amdgpu: update ras capability's query based on mem ecc configuration

2020-03-12 Thread Zhang, Hawking
[AMD Official Use Only - Internal Distribution Only]

+   if (!r) {
+   DRM_INFO("SRAM ECC is not present.\n");
+   } else {
+   DRM_INFO("SRAM ECC is active.\n");
}
{} is not  needed. With that fixed, the patch is

Reviewed-by: Hawking Zhang 

Regards,
Hawking

-Original Message-
From: Chen, Guchun  
Sent: Thursday, March 12, 2020 12:03
To: amd-gfx@lists.freedesktop.org; Zhang, Hawking ; Li, 
Dennis ; Zhou1, Tao ; Clements, John 

Cc: Chen, Guchun 
Subject: [PATCH] drm/amdgpu: update ras capability's query based on mem ecc 
configuration

RAS support capability needs to be updated on top of different memeory ECC 
enablement, and remove redundant memory ecc check in gmc module for vega20 and 
arcturus.

v2: check HBM ECC enablement and set ras mask accordingly.
v3: avoid to invoke atomfirmware interface to query twice.

Suggested-by: Hawking Zhang 
Signed-off-by: Guchun Chen 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 24 -
 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c   | 36 ++---
 2 files changed, 32 insertions(+), 28 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index 69b02b9d4131..38782add479a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -1738,18 +1738,30 @@ static void amdgpu_ras_check_supported(struct 
amdgpu_device *adev,
*hw_supported = 0;
*supported = 0;
 
-   if (amdgpu_sriov_vf(adev) ||
+   if (amdgpu_sriov_vf(adev) || !adev->is_atom_fw ||
(adev->asic_type != CHIP_VEGA20 &&
 adev->asic_type != CHIP_ARCTURUS))
return;
 
-   if (adev->is_atom_fw &&
-   (amdgpu_atomfirmware_mem_ecc_supported(adev) ||
-amdgpu_atomfirmware_sram_ecc_supported(adev)))
-   *hw_supported = AMDGPU_RAS_BLOCK_MASK;
+   if (amdgpu_atomfirmware_mem_ecc_supported(adev)) {
+   DRM_INFO("HBM ECC is active.\n");
+   *hw_supported |= (1 << AMDGPU_RAS_BLOCK__UMC |
+   1 << AMDGPU_RAS_BLOCK__DF);
+   } else
+   DRM_INFO("HBM ECC is not presented.\n");
+
+   if (amdgpu_atomfirmware_sram_ecc_supported(adev)) {
+   DRM_INFO("SRAM ECC is active.\n");
+   *hw_supported |= ~(1 << AMDGPU_RAS_BLOCK__UMC |
+   1 << AMDGPU_RAS_BLOCK__DF);
+   } else
+   DRM_INFO("SRAM ECC is not presented.\n");
+
+   /* hw_supported needs to be aligned with RAS block mask. */
+   *hw_supported &= AMDGPU_RAS_BLOCK_MASK;
 
*supported = amdgpu_ras_enable == 0 ?
-   0 : *hw_supported & amdgpu_ras_mask;
+   0 : *hw_supported & amdgpu_ras_mask;
 }
 
 int amdgpu_ras_init(struct amdgpu_device *adev) diff --git 
a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 90216abf14a4..3cc886e96420 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -886,29 +886,21 @@ static int gmc_v9_0_late_init(void *handle)
if (r)
return r;
/* Check if ecc is available */
-   if (!amdgpu_sriov_vf(adev)) {
-   switch (adev->asic_type) {
-   case CHIP_VEGA10:
-   case CHIP_VEGA20:
-   case CHIP_ARCTURUS:
-   r = amdgpu_atomfirmware_mem_ecc_supported(adev);
-   if (!r) {
-   DRM_INFO("ECC is not present.\n");
-   if (adev->df.funcs->enable_ecc_force_par_wr_rmw)
-   
adev->df.funcs->enable_ecc_force_par_wr_rmw(adev, false);
-   } else {
-   DRM_INFO("ECC is active.\n");
-   }
+   if (!amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_VEGA10)) {
+   r = amdgpu_atomfirmware_mem_ecc_supported(adev);
+   if (!r) {
+   DRM_INFO("ECC is not present.\n");
+   if (adev->df.funcs->enable_ecc_force_par_wr_rmw)
+   
adev->df.funcs->enable_ecc_force_par_wr_rmw(adev, false);
+   } else {
+   DRM_INFO("ECC is active.\n");
+   }
 
-   r = amdgpu_atomfirmware_sram_ecc_supported(adev);
-   if (!r) {
-   DRM_INFO("SRAM ECC is not present.\n");
-   } else {
-   DRM_INFO("SRAM ECC is active.\n");
-   }
-   break;
-   default:
-   break;
+   r = amdgpu_atomfirmware_sram_ecc_supported(adev);
+   if (!r) {
+   DRM_INFO("SRAM ECC is