Re: [PATCH] drm/amdgpu: add another ATPX quirk for TOPAZ

2018-07-17 Thread Zhang, Jerry (Junwei)

On 07/18/2018 03:38 AM, Alex Deucher wrote:

Needs ATPX rather than _PR3.

Bug: https://bugzilla.kernel.org/show_bug.cgi?id=200517
Signed-off-by: Alex Deucher 
Cc: sta...@vger.kernel.org

Reviewed-by: Junwei Zhang 


---
  drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c | 1 +
  1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index b33f1680c9a3..a028661d9e20 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -575,6 +575,7 @@ static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] 
= {
{ 0x1002, 0x6900, 0x1002, 0x0124, AMDGPU_PX_QUIRK_FORCE_ATPX },
{ 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX },
{ 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX },
+   { 0x1002, 0x6900, 0x1025, 0x125A, AMDGPU_PX_QUIRK_FORCE_ATPX },
{ 0, 0, 0, 0, 0 },
  };



___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amdgpu/display: Replace CONFIG_DRM_AMD_DC_DCN1_0 with CONFIG_X86

2018-07-17 Thread Harry Wentland
On 2018-07-17 06:39 AM, Michel Dänzer wrote:
> From: Michel Dänzer 
> 
> Allowing CONFIG_DRM_AMD_DC_DCN1_0 to be disabled on X86 was an
> opportunity for display with Raven Ridge accidentally not working.
> 
> Signed-off-by: Michel Dänzer 

Reviewed-by: Harry Wentland 

Harry

> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_device.c|  2 +-
>  drivers/gpu/drm/amd/display/Kconfig   |  8 
>  .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c |  8 
>  drivers/gpu/drm/amd/display/dc/Makefile   |  2 +-
>  .../display/dc/bios/command_table_helper2.c   |  2 +-
>  drivers/gpu/drm/amd/display/dc/calcs/Makefile |  2 +-
>  drivers/gpu/drm/amd/display/dc/core/dc.c  |  6 +++---
>  .../gpu/drm/amd/display/dc/core/dc_debug.c|  2 +-
>  .../gpu/drm/amd/display/dc/core/dc_resource.c | 12 +--
>  drivers/gpu/drm/amd/display/dc/dc.h   |  2 +-
>  .../drm/amd/display/dc/dce/dce_clock_source.c |  6 +++---
>  .../drm/amd/display/dc/dce/dce_clock_source.h |  2 +-
>  .../gpu/drm/amd/display/dc/dce/dce_clocks.c   |  8 
>  .../gpu/drm/amd/display/dc/dce/dce_clocks.h   |  2 +-
>  drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c |  6 +++---
>  .../amd/display/dc/dce/dce_stream_encoder.c   | 20 +--
>  .../display/dc/dce110/dce110_hw_sequencer.c   |  2 +-
>  drivers/gpu/drm/amd/display/dc/gpio/Makefile  |  2 +-
>  .../gpu/drm/amd/display/dc/gpio/hw_factory.c  |  4 ++--
>  .../drm/amd/display/dc/gpio/hw_translate.c|  4 ++--
>  .../gpu/drm/amd/display/dc/i2caux/Makefile|  2 +-
>  .../gpu/drm/amd/display/dc/i2caux/i2caux.c|  4 ++--
>  .../gpu/drm/amd/display/dc/inc/core_types.h   |  6 +++---
>  drivers/gpu/drm/amd/display/dc/irq/Makefile   |  2 +-
>  .../gpu/drm/amd/display/dc/irq/irq_service.c  |  2 +-
>  drivers/gpu/drm/amd/display/dc/os_types.h |  2 +-
>  26 files changed, 56 insertions(+), 64 deletions(-)
> 
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> index 709e4a386a0e..fb8c72851dfb 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> @@ -2200,7 +2200,7 @@ bool amdgpu_device_asic_has_dc_support(enum 
> amd_asic_type asic_type)
>   case CHIP_VEGA10:
>   case CHIP_VEGA12:
>   case CHIP_VEGA20:
> -#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
> +#ifdef CONFIG_X86
>   case CHIP_RAVEN:
>  #endif
>   return amdgpu_dc != 0;
> diff --git a/drivers/gpu/drm/amd/display/Kconfig 
> b/drivers/gpu/drm/amd/display/Kconfig
> index 4c35625eb2c7..325083b0297e 100644
> --- a/drivers/gpu/drm/amd/display/Kconfig
> +++ b/drivers/gpu/drm/amd/display/Kconfig
> @@ -9,14 +9,6 @@ config DRM_AMD_DC
> support for AMDGPU. This adds required support for Vega and
> Raven ASICs.
>  
> -config DRM_AMD_DC_DCN1_0
> - bool "DCN 1.0 Raven family"
> - depends on DRM_AMD_DC && X86
> - default y
> - help
> -   Choose this option if you want to have
> -   RV family for display engine
> -
>  config DEBUG_KERNEL_DC
>   bool "Enable kgdb break in DC"
>   depends on DRM_AMD_DC
> diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
> b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
> index 952691c6f81e..8e3ebd988043 100644
> --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
> +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
> @@ -58,7 +58,7 @@
>  #include 
>  #include 
>  
> -#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
> +#ifdef CONFIG_X86
>  #include "ivsrcid/irqsrcs_dcn_1_0.h"
>  
>  #include "dcn/dcn_1_0_offset.h"
> @@ -1188,7 +1188,7 @@ static int dce110_register_irq_handlers(struct 
> amdgpu_device *adev)
>   return 0;
>  }
>  
> -#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
> +#ifdef CONFIG_X86
>  /* Register IRQ sources and initialize IRQ callbacks */
>  static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
>  {
> @@ -1522,7 +1522,7 @@ static int amdgpu_dm_initialize_drm_device(struct 
> amdgpu_device *adev)
>   goto fail;
>   }
>   break;
> -#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
> +#ifdef CONFIG_X86
>   case CHIP_RAVEN:
>   if (dcn10_register_irq_handlers(dm->adev)) {
>   DRM_ERROR("DM: Failed to initialize IRQ\n");
> @@ -1767,7 +1767,7 @@ static int dm_early_init(void *handle)
>   adev->mode_info.num_dig = 6;
>   adev->mode_info.plane_type = dm_plane_type_default;
>   break;
> -#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
> +#ifdef CONFIG_X86
>   case CHIP_RAVEN:
>   adev->mode_info.num_crtc = 4;
>   adev->mode_info.num_hpd = 4;
> diff --git a/drivers/gpu/drm/amd/display/dc/Makefile 
> b/drivers/gpu/drm/amd/display/dc/Makefile
> index aed538a4d1ba..532a515fda9a 100644
> --- a/drivers/gpu/drm/amd/display/dc/Makefile
> +++ b/drivers/gpu/drm/amd/display/dc/Makefile
> @@ -25,7 +25,7 @@
>  
>  DC_LIBS = basics bios calcs dce gpio i2caux irq 

Re: [PATCH v2] drm/amdgpu: remove internal/unused kernel module parameters

2018-07-17 Thread Alex Deucher
On Tue, Jul 17, 2018 at 4:12 PM, Sonny Jiang  wrote:
> Signed-off-by: Sonny Jiang 

Please add a basic commit message.  With that:
Reviewed-by: Alex Deucher 

> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu.h |  3 ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 12 
>  2 files changed, 15 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> index d852d11..a9f09da 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> @@ -106,11 +106,8 @@ extern int amdgpu_vm_fault_stop;
>  extern int amdgpu_vm_debug;
>  extern int amdgpu_vm_update_mode;
>  extern int amdgpu_dc;
> -extern int amdgpu_dc_log;
>  extern int amdgpu_sched_jobs;
>  extern int amdgpu_sched_hw_submission;
> -extern int amdgpu_no_evict;
> -extern int amdgpu_direct_gma_size;
>  extern uint amdgpu_pcie_gen_cap;
>  extern uint amdgpu_pcie_lane_cap;
>  extern uint amdgpu_cg_mask;
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
> index 529500c..8843a06 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
> @@ -104,11 +104,8 @@ int amdgpu_vram_page_split = 512;
>  int amdgpu_vm_update_mode = -1;
>  int amdgpu_exp_hw_support = 0;
>  int amdgpu_dc = -1;
> -int amdgpu_dc_log = 0;
>  int amdgpu_sched_jobs = 32;
>  int amdgpu_sched_hw_submission = 2;
> -int amdgpu_no_evict = 0;
> -int amdgpu_direct_gma_size = 0;
>  uint amdgpu_pcie_gen_cap = 0;
>  uint amdgpu_pcie_lane_cap = 0;
>  uint amdgpu_cg_mask = 0x;
> @@ -341,9 +338,6 @@ module_param_named(exp_hw_support, amdgpu_exp_hw_support, 
> int, 0444);
>  MODULE_PARM_DESC(dc, "Display Core driver (1 = enable, 0 = disable, -1 = 
> auto (default))");
>  module_param_named(dc, amdgpu_dc, int, 0444);
>
> -MODULE_PARM_DESC(dc_log, "Display Core Log Level (0 = minimal (default), 1 = 
> chatty");
> -module_param_named(dc_log, amdgpu_dc_log, int, 0444);
> -
>  /**
>   * DOC: sched_jobs (int)
>   * Override the max number of jobs supported in the sw queue. The default is 
> 32.
> @@ -366,12 +360,6 @@ module_param_named(sched_hw_submission, 
> amdgpu_sched_hw_submission, int, 0444);
>  MODULE_PARM_DESC(ppfeaturemask, "all power features enabled (default))");
>  module_param_named(ppfeaturemask, amdgpu_pp_feature_mask, uint, 0444);
>
> -MODULE_PARM_DESC(no_evict, "Support pinning request from user space (1 = 
> enable, 0 = disable (default))");
> -module_param_named(no_evict, amdgpu_no_evict, int, 0444);
> -
> -MODULE_PARM_DESC(direct_gma_size, "Direct GMA size in megabytes (max 96MB)");
> -module_param_named(direct_gma_size, amdgpu_direct_gma_size, int, 0444);
> -
>  /**
>   * DOC: pcie_gen_cap (uint)
>   * Override PCIE gen speed capabilities. See the CAIL flags in 
> drivers/gpu/drm/amd/include/amd_pcie.h.
> --
> 2.7.4
>
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH v2] drm/amdgpu: remove internal/unused kernel module parameters

2018-07-17 Thread Sonny Jiang
Signed-off-by: Sonny Jiang 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h |  3 ---
 drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 12 
 2 files changed, 15 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index d852d11..a9f09da 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -106,11 +106,8 @@ extern int amdgpu_vm_fault_stop;
 extern int amdgpu_vm_debug;
 extern int amdgpu_vm_update_mode;
 extern int amdgpu_dc;
-extern int amdgpu_dc_log;
 extern int amdgpu_sched_jobs;
 extern int amdgpu_sched_hw_submission;
-extern int amdgpu_no_evict;
-extern int amdgpu_direct_gma_size;
 extern uint amdgpu_pcie_gen_cap;
 extern uint amdgpu_pcie_lane_cap;
 extern uint amdgpu_cg_mask;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 529500c..8843a06 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -104,11 +104,8 @@ int amdgpu_vram_page_split = 512;
 int amdgpu_vm_update_mode = -1;
 int amdgpu_exp_hw_support = 0;
 int amdgpu_dc = -1;
-int amdgpu_dc_log = 0;
 int amdgpu_sched_jobs = 32;
 int amdgpu_sched_hw_submission = 2;
-int amdgpu_no_evict = 0;
-int amdgpu_direct_gma_size = 0;
 uint amdgpu_pcie_gen_cap = 0;
 uint amdgpu_pcie_lane_cap = 0;
 uint amdgpu_cg_mask = 0x;
@@ -341,9 +338,6 @@ module_param_named(exp_hw_support, amdgpu_exp_hw_support, 
int, 0444);
 MODULE_PARM_DESC(dc, "Display Core driver (1 = enable, 0 = disable, -1 = auto 
(default))");
 module_param_named(dc, amdgpu_dc, int, 0444);
 
-MODULE_PARM_DESC(dc_log, "Display Core Log Level (0 = minimal (default), 1 = 
chatty");
-module_param_named(dc_log, amdgpu_dc_log, int, 0444);
-
 /**
  * DOC: sched_jobs (int)
  * Override the max number of jobs supported in the sw queue. The default is 
32.
@@ -366,12 +360,6 @@ module_param_named(sched_hw_submission, 
amdgpu_sched_hw_submission, int, 0444);
 MODULE_PARM_DESC(ppfeaturemask, "all power features enabled (default))");
 module_param_named(ppfeaturemask, amdgpu_pp_feature_mask, uint, 0444);
 
-MODULE_PARM_DESC(no_evict, "Support pinning request from user space (1 = 
enable, 0 = disable (default))");
-module_param_named(no_evict, amdgpu_no_evict, int, 0444);
-
-MODULE_PARM_DESC(direct_gma_size, "Direct GMA size in megabytes (max 96MB)");
-module_param_named(direct_gma_size, amdgpu_direct_gma_size, int, 0444);
-
 /**
  * DOC: pcie_gen_cap (uint)
  * Override PCIE gen speed capabilities. See the CAIL flags in 
drivers/gpu/drm/amd/include/amd_pcie.h.
-- 
2.7.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amdgpu: remove internal/unused kernel module parameters

2018-07-17 Thread Alex Deucher
On Tue, Jul 17, 2018 at 3:35 PM, Sonny Jiang  wrote:
> Signed-off-by: Sonny Jiang 
> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu.h |  4 
>  drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 16 
>  drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c |  6 --
>  3 files changed, 26 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> index d852d11..e15fa64 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> @@ -106,11 +106,8 @@ extern int amdgpu_vm_fault_stop;
>  extern int amdgpu_vm_debug;
>  extern int amdgpu_vm_update_mode;
>  extern int amdgpu_dc;
> -extern int amdgpu_dc_log;
>  extern int amdgpu_sched_jobs;
>  extern int amdgpu_sched_hw_submission;
> -extern int amdgpu_no_evict;
> -extern int amdgpu_direct_gma_size;
>  extern uint amdgpu_pcie_gen_cap;
>  extern uint amdgpu_pcie_lane_cap;
>  extern uint amdgpu_cg_mask;
> @@ -127,7 +124,6 @@ extern int amdgpu_cntl_sb_buf_per_se;
>  extern int amdgpu_param_buf_per_se;
>  extern int amdgpu_job_hang_limit;
>  extern int amdgpu_lbpw;
> -extern int amdgpu_compute_multipipe;
>  extern int amdgpu_gpu_recovery;
>  extern int amdgpu_emu_mode;
>  extern uint amdgpu_smu_memory_pool_size;
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
> index 529500c..de8a2ee 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
> @@ -104,11 +104,8 @@ int amdgpu_vram_page_split = 512;
>  int amdgpu_vm_update_mode = -1;
>  int amdgpu_exp_hw_support = 0;
>  int amdgpu_dc = -1;
> -int amdgpu_dc_log = 0;
>  int amdgpu_sched_jobs = 32;
>  int amdgpu_sched_hw_submission = 2;
> -int amdgpu_no_evict = 0;
> -int amdgpu_direct_gma_size = 0;
>  uint amdgpu_pcie_gen_cap = 0;
>  uint amdgpu_pcie_lane_cap = 0;
>  uint amdgpu_cg_mask = 0x;
> @@ -125,7 +122,6 @@ int amdgpu_cntl_sb_buf_per_se = 0;
>  int amdgpu_param_buf_per_se = 0;
>  int amdgpu_job_hang_limit = 0;
>  int amdgpu_lbpw = -1;
> -int amdgpu_compute_multipipe = -1;
>  int amdgpu_gpu_recovery = -1; /* auto */
>  int amdgpu_emu_mode = 0;
>  uint amdgpu_smu_memory_pool_size = 0;
> @@ -341,9 +337,6 @@ module_param_named(exp_hw_support, amdgpu_exp_hw_support, 
> int, 0444);
>  MODULE_PARM_DESC(dc, "Display Core driver (1 = enable, 0 = disable, -1 = 
> auto (default))");
>  module_param_named(dc, amdgpu_dc, int, 0444);
>
> -MODULE_PARM_DESC(dc_log, "Display Core Log Level (0 = minimal (default), 1 = 
> chatty");
> -module_param_named(dc_log, amdgpu_dc_log, int, 0444);
> -
>  /**
>   * DOC: sched_jobs (int)
>   * Override the max number of jobs supported in the sw queue. The default is 
> 32.
> @@ -366,12 +359,6 @@ module_param_named(sched_hw_submission, 
> amdgpu_sched_hw_submission, int, 0444);
>  MODULE_PARM_DESC(ppfeaturemask, "all power features enabled (default))");
>  module_param_named(ppfeaturemask, amdgpu_pp_feature_mask, uint, 0444);
>
> -MODULE_PARM_DESC(no_evict, "Support pinning request from user space (1 = 
> enable, 0 = disable (default))");
> -module_param_named(no_evict, amdgpu_no_evict, int, 0444);
> -
> -MODULE_PARM_DESC(direct_gma_size, "Direct GMA size in megabytes (max 96MB)");
> -module_param_named(direct_gma_size, amdgpu_direct_gma_size, int, 0444);
> -
>  /**
>   * DOC: pcie_gen_cap (uint)
>   * Override PCIE gen speed capabilities. See the CAIL flags in 
> drivers/gpu/drm/amd/include/amd_pcie.h.
> @@ -478,9 +465,6 @@ module_param_named(job_hang_limit, amdgpu_job_hang_limit, 
> int ,0444);
>  MODULE_PARM_DESC(lbpw, "Load Balancing Per Watt (LBPW) support (1 = enable, 
> 0 = disable, -1 = auto)");
>  module_param_named(lbpw, amdgpu_lbpw, int, 0444);
>
> -MODULE_PARM_DESC(compute_multipipe, "Force compute queues to be spread 
> across pipes (1 = enable, 0 = disable, -1 = auto)");
> -module_param_named(compute_multipipe, amdgpu_compute_multipipe, int, 0444);
> -
>  /**
>   * DOC: gpu_recovery (int)
>   * Set to enable GPU recovery mechanism (1 = enable, 0 = disable). The 
> default is -1 (auto, disabled except SRIOV).
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
> index 239bf2a..581959a 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
> @@ -111,12 +111,6 @@ void amdgpu_gfx_parse_disable_cu(unsigned *mask, 
> unsigned max_se, unsigned max_s
>
>  static bool amdgpu_gfx_is_multipipe_capable(struct amdgpu_device *adev)
>  {
> -   if (amdgpu_compute_multipipe != -1) {
> -   DRM_INFO("amdgpu: forcing compute pipe policy %d\n",
> -amdgpu_compute_multipipe);
> -   return amdgpu_compute_multipipe == 1;
> -   }
> -

Looks like this option is not actually unused.  We should keep this one for now.

Alex

> /* FIXME: spreading the queues across pipes causes perf regressions
>  * on POLARIS11 compute workloads */
> if (adev->asic_type == 

[PATCH] drm/amdgpu: add another ATPX quirk for TOPAZ

2018-07-17 Thread Alex Deucher
Needs ATPX rather than _PR3.

Bug: https://bugzilla.kernel.org/show_bug.cgi?id=200517
Signed-off-by: Alex Deucher 
Cc: sta...@vger.kernel.org
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index b33f1680c9a3..a028661d9e20 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -575,6 +575,7 @@ static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] 
= {
{ 0x1002, 0x6900, 0x1002, 0x0124, AMDGPU_PX_QUIRK_FORCE_ATPX },
{ 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX },
{ 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX },
+   { 0x1002, 0x6900, 0x1025, 0x125A, AMDGPU_PX_QUIRK_FORCE_ATPX },
{ 0, 0, 0, 0, 0 },
 };
 
-- 
2.13.6

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH] drm/amdgpu: remove internal/unused kernel module parameters

2018-07-17 Thread Sonny Jiang
Signed-off-by: Sonny Jiang 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h |  4 
 drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 16 
 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c |  6 --
 3 files changed, 26 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index d852d11..e15fa64 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -106,11 +106,8 @@ extern int amdgpu_vm_fault_stop;
 extern int amdgpu_vm_debug;
 extern int amdgpu_vm_update_mode;
 extern int amdgpu_dc;
-extern int amdgpu_dc_log;
 extern int amdgpu_sched_jobs;
 extern int amdgpu_sched_hw_submission;
-extern int amdgpu_no_evict;
-extern int amdgpu_direct_gma_size;
 extern uint amdgpu_pcie_gen_cap;
 extern uint amdgpu_pcie_lane_cap;
 extern uint amdgpu_cg_mask;
@@ -127,7 +124,6 @@ extern int amdgpu_cntl_sb_buf_per_se;
 extern int amdgpu_param_buf_per_se;
 extern int amdgpu_job_hang_limit;
 extern int amdgpu_lbpw;
-extern int amdgpu_compute_multipipe;
 extern int amdgpu_gpu_recovery;
 extern int amdgpu_emu_mode;
 extern uint amdgpu_smu_memory_pool_size;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 529500c..de8a2ee 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -104,11 +104,8 @@ int amdgpu_vram_page_split = 512;
 int amdgpu_vm_update_mode = -1;
 int amdgpu_exp_hw_support = 0;
 int amdgpu_dc = -1;
-int amdgpu_dc_log = 0;
 int amdgpu_sched_jobs = 32;
 int amdgpu_sched_hw_submission = 2;
-int amdgpu_no_evict = 0;
-int amdgpu_direct_gma_size = 0;
 uint amdgpu_pcie_gen_cap = 0;
 uint amdgpu_pcie_lane_cap = 0;
 uint amdgpu_cg_mask = 0x;
@@ -125,7 +122,6 @@ int amdgpu_cntl_sb_buf_per_se = 0;
 int amdgpu_param_buf_per_se = 0;
 int amdgpu_job_hang_limit = 0;
 int amdgpu_lbpw = -1;
-int amdgpu_compute_multipipe = -1;
 int amdgpu_gpu_recovery = -1; /* auto */
 int amdgpu_emu_mode = 0;
 uint amdgpu_smu_memory_pool_size = 0;
@@ -341,9 +337,6 @@ module_param_named(exp_hw_support, amdgpu_exp_hw_support, 
int, 0444);
 MODULE_PARM_DESC(dc, "Display Core driver (1 = enable, 0 = disable, -1 = auto 
(default))");
 module_param_named(dc, amdgpu_dc, int, 0444);
 
-MODULE_PARM_DESC(dc_log, "Display Core Log Level (0 = minimal (default), 1 = 
chatty");
-module_param_named(dc_log, amdgpu_dc_log, int, 0444);
-
 /**
  * DOC: sched_jobs (int)
  * Override the max number of jobs supported in the sw queue. The default is 
32.
@@ -366,12 +359,6 @@ module_param_named(sched_hw_submission, 
amdgpu_sched_hw_submission, int, 0444);
 MODULE_PARM_DESC(ppfeaturemask, "all power features enabled (default))");
 module_param_named(ppfeaturemask, amdgpu_pp_feature_mask, uint, 0444);
 
-MODULE_PARM_DESC(no_evict, "Support pinning request from user space (1 = 
enable, 0 = disable (default))");
-module_param_named(no_evict, amdgpu_no_evict, int, 0444);
-
-MODULE_PARM_DESC(direct_gma_size, "Direct GMA size in megabytes (max 96MB)");
-module_param_named(direct_gma_size, amdgpu_direct_gma_size, int, 0444);
-
 /**
  * DOC: pcie_gen_cap (uint)
  * Override PCIE gen speed capabilities. See the CAIL flags in 
drivers/gpu/drm/amd/include/amd_pcie.h.
@@ -478,9 +465,6 @@ module_param_named(job_hang_limit, amdgpu_job_hang_limit, 
int ,0444);
 MODULE_PARM_DESC(lbpw, "Load Balancing Per Watt (LBPW) support (1 = enable, 0 
= disable, -1 = auto)");
 module_param_named(lbpw, amdgpu_lbpw, int, 0444);
 
-MODULE_PARM_DESC(compute_multipipe, "Force compute queues to be spread across 
pipes (1 = enable, 0 = disable, -1 = auto)");
-module_param_named(compute_multipipe, amdgpu_compute_multipipe, int, 0444);
-
 /**
  * DOC: gpu_recovery (int)
  * Set to enable GPU recovery mechanism (1 = enable, 0 = disable). The default 
is -1 (auto, disabled except SRIOV).
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 239bf2a..581959a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -111,12 +111,6 @@ void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned 
max_se, unsigned max_s
 
 static bool amdgpu_gfx_is_multipipe_capable(struct amdgpu_device *adev)
 {
-   if (amdgpu_compute_multipipe != -1) {
-   DRM_INFO("amdgpu: forcing compute pipe policy %d\n",
-amdgpu_compute_multipipe);
-   return amdgpu_compute_multipipe == 1;
-   }
-
/* FIXME: spreading the queues across pipes causes perf regressions
 * on POLARIS11 compute workloads */
if (adev->asic_type == CHIP_POLARIS11)
-- 
2.7.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] RFC: drm/amd/display: enable ABGR and XBGR formats (v2)

2018-07-17 Thread Mauro Rossi
Hi Alex,

Il giorno mar 17 lug 2018 alle ore 15:43 Alex Deucher 
ha scritto:

> On Sun, Jul 15, 2018 at 10:03 PM, Mauro Rossi 
> wrote:
> > From: Mauro Rossi 
> >
> > (v1) {A,X}BGR code paths are added in amdgpu_dm, by using an
> fb_format
> >  already listed in dc/dc_hw_types.h
> (SURFACE_PIXEL_FORMAT_GRPH_ABGR),
> >  and in dce 8.0, 10.0 and 11.0, i.e. Bonaire and later.
> >  GRPH_FORMAT_ARGB is used due to lack of specific
> GRPH_FORMAT_ABGR
> >
> > (v2) support for {A,X}BGR in atombios_crtc (now in dce4 path, to be
> refined)
> >  to initialize frame buffer device and avoid following dmesg error:
> >  "[drm] Cannot find any crtc or sizes"
> >
> > Tested with oreo-x86 (hwcomposer.drm + gralloc.gbm + mesa-dev/radv)
> > SurfaceFlinger can now select RGBA_ format for HWC_FRAMEBUFFER_TARGET
> > No major regression or crash observed so far, but some android 2D overlay
> > may be affected by color artifacts. Kind feedback requested.
> >
> > Signed-off-by: Mauro Rossi 
>
> Please split the patch in three (one for radeon and one for amdgpu dc
> and one for amdgpu non-dc).  Also the GRPH_SWAP_CONTROL register has a
> crossbar where you can change the channel routing.  You may need that
> for the channel routing to work correctly.
>
> Alex
>

Thanks for your suggestion and guidance! :-)

I may need some time to assimilate the suggestions and some confirmations,
as I am an amateur in AMD GPU coding, to be honest, I should have mentioned
that before.

Regarding the radeon scope of changes,
do you recommend to keep the enablement of {A,X}BGR  for dce4 and later,
or to extend the enablement of  {A,X}BGR to older families of radeon
gpus/chipsets?

What is the lower radeon family where {A,X}BGR  can be natively
supported by HW,
by means of  swap control registers for channel routing configuration?

Based on the scope of  {A,X}BGR support in final patches,
I may need to add handling in other dce code and maybe other modules,
could you please provide information in terms of necessary changes/high
level steps to follow?

Do you have some pointer to documentation on  swap control registers for
the families
that may be considered as 'safe to be kept in scope' for  {A,X}BGR
support?

Last but not least I would like to ask you about how to test no-regression,
even if this will come later,
when patches will be in good shape for further evaluation, do you have
tools and samples for conformance/no-regression testing?
I am asking because I don't have samples for all families.

Kind regards

Mauro




>
>
> > ---
> >  drivers/gpu/drm/amd/amdgpu/dce_v10_0.c| 9 +
> >  drivers/gpu/drm/amd/amdgpu/dce_v11_0.c| 9 +
> >  drivers/gpu/drm/amd/amdgpu/dce_v8_0.c | 8 
> >  drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 6 ++
> >  drivers/gpu/drm/radeon/atombios_crtc.c| 8 
> >  5 files changed, 40 insertions(+)
> >
> > diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
> b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
> > index 022f303463fc..d4280d2e7737 100644
> > --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
> > +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
> > @@ -2005,6 +2005,15 @@ static int dce_v10_0_crtc_do_set_base(struct
> drm_crtc *crtc,
> > /* Greater 8 bpc fb needs to bypass hw-lut to retain
> precision */
> > bypass_lut = true;
> > break;
> > +   case DRM_FORMAT_XBGR:
> > +   case DRM_FORMAT_ABGR:
> > +   fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH,
> 2);
> > +   fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL,
> GRPH_FORMAT, 0); /* Hack */
> > +#ifdef __BIG_ENDIAN
> > +   fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL,
> GRPH_ENDIAN_SWAP,
> > +   ENDIAN_8IN32);
> > +#endif
> > +   break;
> > default:
> > DRM_ERROR("Unsupported screen format %s\n",
> >   drm_get_format_name(target_fb->format->format,
> _name));
> > diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
> b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
> > index 800a9f36ab4f..d48ee8f2e192 100644
> > --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
> > +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
> > @@ -2044,6 +2044,15 @@ static int dce_v11_0_crtc_do_set_base(struct
> drm_crtc *crtc,
> > /* Greater 8 bpc fb needs to bypass hw-lut to retain
> precision */
> > bypass_lut = true;
> > break;
> > +   case DRM_FORMAT_XBGR:
> > +   case DRM_FORMAT_ABGR:
> > +   fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH,
> 2);
> > +   fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL,
> GRPH_FORMAT, 0); /* Hack */
> > +#ifdef __BIG_ENDIAN
> > +   fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL,
> GRPH_ENDIAN_SWAP,
> > +

Re: [PATCH 2/2] drm/amd/display: Convert 10kHz clks from PPLib into kHz

2018-07-17 Thread Harry Wentland
On 2018-07-17 08:36 AM, Rex Zhu wrote:
> Except special naming as *_in_khz, The default clock unit in powerplay
> is in 10KHz. so need to * 10 as expecting clock frequency in display
> is in kHz.
> 
> Signed-off-by: Rex Zhu 

Reviewed-by: Harry Wentland 

Harry

> ---
>  drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c | 4 ++--
>  1 file changed, 2 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c 
> b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
> index c69ae78..fbe878a 100644
> --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
> +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
> @@ -469,8 +469,8 @@ bool dm_pp_get_static_clocks(
>   return false;
>  
>   static_clk_info->max_clocks_state = 
> pp_to_dc_powerlevel_state(pp_clk_info.max_clocks_state);
> - static_clk_info->max_mclk_khz = pp_clk_info.max_memory_clock;
> - static_clk_info->max_sclk_khz = pp_clk_info.max_engine_clock;
> + static_clk_info->max_mclk_khz = pp_clk_info.max_memory_clock * 10;
> + static_clk_info->max_sclk_khz = pp_clk_info.max_engine_clock * 10;
>  
>   return true;
>  }
> 
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 1/2] drm/amd/pp: Set Max clock level to display by default

2018-07-17 Thread Harry Wentland


On 2018-07-17 08:36 AM, Rex Zhu wrote:
> avoid the error in dmesg:
> [drm:dm_pp_get_static_clocks]
> *ERROR* DM_PPLIB: invalid powerlevel state: 0!
> 
> Signed-off-by: Rex Zhu 
> ---
>  drivers/gpu/drm/amd/powerplay/amd_powerplay.c | 7 +--
>  1 file changed, 5 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c 
> b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
> index 75c2082..63adcd1 100644
> --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
> +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
> @@ -998,7 +998,7 @@ static int pp_get_display_power_level(void *handle,
>  static int pp_get_current_clocks(void *handle,
>   struct amd_pp_clock_info *clocks)
>  {
> - struct amd_pp_simple_clock_info simple_clocks;
> + struct amd_pp_simple_clock_info simple_clocks = { 0 };
>   struct pp_clock_info hw_clocks;
>   struct pp_hwmgr *hwmgr = handle;
>   int ret = 0;
> @@ -1034,7 +1034,8 @@ static int pp_get_current_clocks(void *handle,
>   clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
>   clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
>  
> - clocks->max_clocks_state = simple_clocks.level;
> + if (simple_clocks.level == 0)
> + clocks->max_clocks_state = PP_DAL_POWERLEVEL_7;

shouldn't we still assign simple_clocks.level to clocks->max_clocks_state if 
it's non-zero?

Harry

>  
>   if (0 == phm_get_current_shallow_sleep_clocks(hwmgr, 
> >current_ps->hardware, _clocks)) {
>   clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
> @@ -1137,6 +1138,8 @@ static int pp_get_display_mode_validation_clocks(void 
> *handle,
>   if (!hwmgr || !hwmgr->pm_en ||!clocks)
>   return -EINVAL;
>  
> + clocks->level = PP_DAL_POWERLEVEL_7;
> +
>   mutex_lock(>smu_lock);
>  
>   if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 
> PHM_PlatformCaps_DynamicPatchPowerState))
> 
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH libdrm] amdgpu: add amdgpu_bo_handle_type_kms_noimport

2018-07-17 Thread Marek Olšák
Michel, I think you are wasting your time. This change can be misused
as easily as any other API. It's not more dangerous that any other
amdgpu libdrm function. You won't achieve anything by optimizing the
hash table (= losing time), and you also won't achieve anything by
NAKing this (= losing performance on the lookup). Both are lose-lose
solutions, because you'll lose and others will lose too.

Marek

On Tue, Jul 17, 2018 at 4:57 AM, Michel Dänzer  wrote:
> On 2018-07-16 08:51 PM, Marek Olšák wrote:
>> On Mon, Jul 16, 2018 at 12:05 PM, Michel Dänzer  wrote:
>>> On 2018-07-13 08:47 PM, Marek Olšák wrote:
 On Fri, Jul 13, 2018 at 4:28 AM, Michel Dänzer  wrote:
>>>
> I'd rather add the handle to the hash table in amdgpu_bo_alloc,
> amdgpu_create_bo_from_user_mem and amdgpu_bo_import instead of in
> amdgpu_bo_export, making amdgpu_bo_export(bo, amdgpu_bo_handle_type_kms,
> ...) essentially free. In the unlikely (since allocating a BO from the
> kernel is expensive) case that the hash table shows up on profiles, we
> can optimize it.

 The hash table isn't very good for high BO counts. The time complexity
 of a lookup is O(n).
>>>
>>> A lookup is only needed in amdgpu_bo_import. amdgpu_bo_alloc and
>>> amdgpu_create_bo_from_user_mem can just add the handle to the hash
>>> bucket directly.
>>>
>>> Do you know of, or can you imagine, any workload where amdgpu_bo_import
>>> is called often enough for this to be a concern?
>>
>> Fullscreen DRI2 or DRI3 re-imports buffers every frame.
>
> DRI3 doesn't. The X server only imports each DRI3 buffer once, after
> that it's referred to via the pixmap XID.
>
>
> With DRI2 page flipping (ignoring that basically nobody's using that
> anymore with radeonsi :), it's always the same set of buffers, so the
> lookup can be made fast as discussed in the sub-thread with Christian.
> (Also, DRI2 can only use page flipping with sync-to-vblank enabled, so
> this happens on the order of hundreds of times per second max)
>
>
> --
> Earthling Michel Dänzer   |   http://www.amd.com
> Libre software enthusiast | Mesa and X developer
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH xf86-video-amdgpu] Support gamma correction & colormaps at depth 30 as well

2018-07-17 Thread Alex Deucher
On Tue, Jul 17, 2018 at 12:22 PM, Michel Dänzer  wrote:
> From: Michel Dänzer 
>
> Only supported with the advanced colour management properties available
> with DC as of kernel 4.17.
>
> Signed-off-by: Michel Dänzer 

Reviewed-by: Alex Deucher 

> ---
>  src/drmmode_display.c | 47 +++
>  1 file changed, 34 insertions(+), 13 deletions(-)
>
> diff --git a/src/drmmode_display.c b/src/drmmode_display.c
> index f6cafccdc..f5ab7955e 100644
> --- a/src/drmmode_display.c
> +++ b/src/drmmode_display.c
> @@ -,14 +,14 @@ Bool drmmode_pre_init(ScrnInfoPtr pScrn, drmmode_ptr 
> drmmode, int cpp)
> info->drmmode_crtc_funcs.shadow_destroy = NULL;
> }
>
> +   drmmode_cm_init(pAMDGPUEnt->fd, drmmode, mode_res);
> +
> /* Hw gamma lut's are currently bypassed by the hw at color depth 30,
>  * so spare the server the effort to compute and update the cluts.
>  */
> -   if (pScrn->depth == 30)
> +   if (pScrn->depth == 30 && !drmmode_cm_enabled(drmmode))
> info->drmmode_crtc_funcs.gamma_set = NULL;
>
> -   drmmode_cm_init(pAMDGPUEnt->fd, drmmode, mode_res);
> -
> for (i = 0; i < mode_res->count_crtcs; i++)
> if (!xf86IsEntityShared(pScrn->entityList[0]) ||
> (crtcs_needed && !(pAMDGPUEnt->assigned_crtcs & (1 << 
> i
> @@ -3636,29 +3636,50 @@ Bool drmmode_set_desired_modes(ScrnInfoPtr pScrn, 
> drmmode_ptr drmmode,
>  Bool drmmode_setup_colormap(ScreenPtr pScreen, ScrnInfoPtr pScrn)
>  {
> xf86CrtcConfigPtr xf86_config = XF86_CRTC_CONFIG_PTR(pScrn);
> +   AMDGPUInfoPtr info = AMDGPUPTR(pScrn);
> int i;
>
> if (xf86_config->num_crtc) {
> xf86DrvMsgVerb(pScrn->scrnIndex, X_INFO, 
> AMDGPU_LOGLEVEL_DEBUG,
>"Initializing kms color map\n");
> +
> if (!miCreateDefColormap(pScreen))
> return FALSE;
> -   /* All radeons support 10 bit CLUTs. They get bypassed at 
> depth 30. */
> -   if (pScrn->depth != 30) {
> -   if (!xf86HandleColormaps(pScreen, 256, 10, NULL, NULL,
> -CMAP_PALETTED_TRUECOLOR
> -| 
> CMAP_RELOAD_ON_MODE_SWITCH))
> -   return FALSE;
> +
> +   if (pScrn->depth == 30) {
> +   if (!drmmode_cm_enabled(>drmmode))
> +   return TRUE;
>
> for (i = 0; i < xf86_config->num_crtc; i++) {
> xf86CrtcPtr crtc = xf86_config->crtc[i];
> +   void *gamma = malloc(1024 * 3 * 
> sizeof(CARD16));
> +
> +   if (!gamma) {
> +   ErrorF("Failed to allocate gamma LUT 
> memory\n");
> +   return FALSE;
> +   }
>
> -   drmmode_crtc_gamma_do_set(crtc, 
> crtc->gamma_red,
> - crtc->gamma_green,
> - crtc->gamma_blue,
> - crtc->gamma_size);
> +   crtc->gamma_size = 1024;
> +   crtc->gamma_red = gamma;
> +   crtc->gamma_green = crtc->gamma_red + 
> crtc->gamma_size;
> +   crtc->gamma_blue = crtc->gamma_green + 
> crtc->gamma_size;
> }
> }
> +
> +   /* All Radeons support 10 bit CLUTs. */
> +   if (!xf86HandleColormaps(pScreen, 1 << pScrn->rgbBits, 10,
> +NULL, NULL, CMAP_PALETTED_TRUECOLOR
> +| CMAP_RELOAD_ON_MODE_SWITCH))
> +   return FALSE;
> +
> +   for (i = 0; i < xf86_config->num_crtc; i++) {
> +   xf86CrtcPtr crtc = xf86_config->crtc[i];
> +
> +   drmmode_crtc_gamma_do_set(crtc, crtc->gamma_red,
> + crtc->gamma_green,
> + crtc->gamma_blue,
> + crtc->gamma_size);
> +   }
> }
>
> return TRUE;
> --
> 2.18.0
>
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH xf86-video-amdgpu] Support gamma correction & colormaps at depth 30 as well

2018-07-17 Thread Michel Dänzer
From: Michel Dänzer 

Only supported with the advanced colour management properties available
with DC as of kernel 4.17.

Signed-off-by: Michel Dänzer 
---
 src/drmmode_display.c | 47 +++
 1 file changed, 34 insertions(+), 13 deletions(-)

diff --git a/src/drmmode_display.c b/src/drmmode_display.c
index f6cafccdc..f5ab7955e 100644
--- a/src/drmmode_display.c
+++ b/src/drmmode_display.c
@@ -,14 +,14 @@ Bool drmmode_pre_init(ScrnInfoPtr pScrn, drmmode_ptr 
drmmode, int cpp)
info->drmmode_crtc_funcs.shadow_destroy = NULL;
}
 
+   drmmode_cm_init(pAMDGPUEnt->fd, drmmode, mode_res);
+
/* Hw gamma lut's are currently bypassed by the hw at color depth 30,
 * so spare the server the effort to compute and update the cluts.
 */
-   if (pScrn->depth == 30)
+   if (pScrn->depth == 30 && !drmmode_cm_enabled(drmmode))
info->drmmode_crtc_funcs.gamma_set = NULL;
 
-   drmmode_cm_init(pAMDGPUEnt->fd, drmmode, mode_res);
-
for (i = 0; i < mode_res->count_crtcs; i++)
if (!xf86IsEntityShared(pScrn->entityList[0]) ||
(crtcs_needed && !(pAMDGPUEnt->assigned_crtcs & (1 << i
@@ -3636,29 +3636,50 @@ Bool drmmode_set_desired_modes(ScrnInfoPtr pScrn, 
drmmode_ptr drmmode,
 Bool drmmode_setup_colormap(ScreenPtr pScreen, ScrnInfoPtr pScrn)
 {
xf86CrtcConfigPtr xf86_config = XF86_CRTC_CONFIG_PTR(pScrn);
+   AMDGPUInfoPtr info = AMDGPUPTR(pScrn);
int i;
 
if (xf86_config->num_crtc) {
xf86DrvMsgVerb(pScrn->scrnIndex, X_INFO, AMDGPU_LOGLEVEL_DEBUG,
   "Initializing kms color map\n");
+
if (!miCreateDefColormap(pScreen))
return FALSE;
-   /* All radeons support 10 bit CLUTs. They get bypassed at depth 
30. */
-   if (pScrn->depth != 30) {
-   if (!xf86HandleColormaps(pScreen, 256, 10, NULL, NULL,
-CMAP_PALETTED_TRUECOLOR
-| CMAP_RELOAD_ON_MODE_SWITCH))
-   return FALSE;
+
+   if (pScrn->depth == 30) {
+   if (!drmmode_cm_enabled(>drmmode))
+   return TRUE;
 
for (i = 0; i < xf86_config->num_crtc; i++) {
xf86CrtcPtr crtc = xf86_config->crtc[i];
+   void *gamma = malloc(1024 * 3 * sizeof(CARD16));
+
+   if (!gamma) {
+   ErrorF("Failed to allocate gamma LUT 
memory\n");
+   return FALSE;
+   }
 
-   drmmode_crtc_gamma_do_set(crtc, crtc->gamma_red,
- crtc->gamma_green,
- crtc->gamma_blue,
- crtc->gamma_size);
+   crtc->gamma_size = 1024;
+   crtc->gamma_red = gamma;
+   crtc->gamma_green = crtc->gamma_red + 
crtc->gamma_size;
+   crtc->gamma_blue = crtc->gamma_green + 
crtc->gamma_size;
}
}
+
+   /* All Radeons support 10 bit CLUTs. */
+   if (!xf86HandleColormaps(pScreen, 1 << pScrn->rgbBits, 10,
+NULL, NULL, CMAP_PALETTED_TRUECOLOR
+| CMAP_RELOAD_ON_MODE_SWITCH))
+   return FALSE;
+
+   for (i = 0; i < xf86_config->num_crtc; i++) {
+   xf86CrtcPtr crtc = xf86_config->crtc[i];
+
+   drmmode_crtc_gamma_do_set(crtc, crtc->gamma_red,
+ crtc->gamma_green,
+ crtc->gamma_blue,
+ crtc->gamma_size);
+   }
}
 
return TRUE;
-- 
2.18.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amdgpu: fix spelling mistake "successed" -> "succeeded"

2018-07-17 Thread Alex Deucher
On Tue, Jul 17, 2018 at 5:29 AM, Colin King  wrote:
> From: Colin Ian King 
>
> Trivial fix to spelling mistake in dev_err error message.
>
> Signed-off-by: Colin Ian King 

Applied.  thanks!

Alex


> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 10 +-
>  1 file changed, 5 insertions(+), 5 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> index 9883fa9bb41b..e9feb3c58389 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> @@ -2004,7 +2004,7 @@ static int amdgpu_device_ip_reinit_early_sriov(struct 
> amdgpu_device *adev)
> continue;
>
> r = block->version->funcs->hw_init(adev);
> -   DRM_INFO("RE-INIT: %s %s\n", 
> block->version->funcs->name, r?"failed":"successed");
> +   DRM_INFO("RE-INIT: %s %s\n", 
> block->version->funcs->name, r?"failed":"succeeded");
> if (r)
> return r;
> }
> @@ -2039,7 +2039,7 @@ static int amdgpu_device_ip_reinit_late_sriov(struct 
> amdgpu_device *adev)
> continue;
>
> r = block->version->funcs->hw_init(adev);
> -   DRM_INFO("RE-INIT: %s %s\n", 
> block->version->funcs->name, r?"failed":"successed");
> +   DRM_INFO("RE-INIT: %s %s\n", 
> block->version->funcs->name, r?"failed":"succeeded");
> if (r)
> return r;
> }
> @@ -3092,7 +3092,7 @@ static int amdgpu_device_handle_vram_lost(struct 
> amdgpu_device *adev)
>   * @adev: amdgpu device pointer
>   *
>   * attempt to do soft-reset or full-reset and reinitialize Asic
> - * return 0 means successed otherwise failed
> + * return 0 means succeeded otherwise failed
>   */
>  static int amdgpu_device_reset(struct amdgpu_device *adev)
>  {
> @@ -3170,7 +3170,7 @@ static int amdgpu_device_reset(struct amdgpu_device 
> *adev)
>   * @from_hypervisor: request from hypervisor
>   *
>   * do VF FLR and reinitialize Asic
> - * return 0 means successed otherwise failed
> + * return 0 means succeeded otherwise failed
>   */
>  static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
>  bool from_hypervisor)
> @@ -3295,7 +3295,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device 
> *adev,
> dev_info(adev->dev, "GPU reset(%d) failed\n", 
> atomic_read(>gpu_reset_counter));
> amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, 
> r);
> } else {
> -   dev_info(adev->dev, "GPU reset(%d) 
> successed!\n",atomic_read(>gpu_reset_counter));
> +   dev_info(adev->dev, "GPU reset(%d) 
> succeeded!\n",atomic_read(>gpu_reset_counter));
> }
>
> amdgpu_vf_error_trans_all(adev);
> --
> 2.17.1
>
> ___
> dri-devel mailing list
> dri-de...@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/dri-devel
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 2/2] drm/amdkfd: Call kfd2kgd.set_compute_idle

2018-07-17 Thread Alex Deucher
On Mon, Jul 16, 2018 at 7:10 PM, Felix Kuehling  wrote:
> User mode queue submissions don't go through KFD. Therefore we don't
> know exactly when compute is idle or not idle. We use the existence
> of user mode queues on a device as an approximation.
>
> register_process is called when the first queue of a process is
> created. Conversely unregister_process is called when the last queue
> is destroyed. The first process that is registered takes compute
> out of idle. The last process that is unregisters sets compute back
> to idle.
>
> Signed-off-by: Felix Kuehling 
> Reviewed-by: Eric Huang 

Series is:
Reviewed-by: Alex Deucher 

> ---
>  drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 7 +--
>  1 file changed, 5 insertions(+), 2 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c 
> b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
> index ace94d6..ec0d62a 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
> @@ -782,7 +782,8 @@ static int register_process(struct device_queue_manager 
> *dqm,
>
> retval = dqm->asic_ops.update_qpd(dqm, qpd);
>
> -   dqm->processes_count++;
> +   if (dqm->processes_count++ == 0)
> +   dqm->dev->kfd2kgd->set_compute_idle(dqm->dev->kgd, false);
>
> dqm_unlock(dqm);
>
> @@ -805,7 +806,9 @@ static int unregister_process(struct device_queue_manager 
> *dqm,
> if (qpd == cur->qpd) {
> list_del(>list);
> kfree(cur);
> -   dqm->processes_count--;
> +   if (--dqm->processes_count == 0)
> +   dqm->dev->kfd2kgd->set_compute_idle(
> +   dqm->dev->kgd, true);
> goto out;
> }
> }
> --
> 2.7.4
>
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 2/2] drm/amd/display: Convert 10kHz clks from PPLib into kHz

2018-07-17 Thread Alex Deucher
On Tue, Jul 17, 2018 at 8:36 AM, Rex Zhu  wrote:
> Except special naming as *_in_khz, The default clock unit in powerplay
> is in 10KHz. so need to * 10 as expecting clock frequency in display
> is in kHz.
>
> Signed-off-by: Rex Zhu 

Series is:
Reviewed-by: Alex Deucher 

> ---
>  drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c | 4 ++--
>  1 file changed, 2 insertions(+), 2 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c 
> b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
> index c69ae78..fbe878a 100644
> --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
> +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
> @@ -469,8 +469,8 @@ bool dm_pp_get_static_clocks(
> return false;
>
> static_clk_info->max_clocks_state = 
> pp_to_dc_powerlevel_state(pp_clk_info.max_clocks_state);
> -   static_clk_info->max_mclk_khz = pp_clk_info.max_memory_clock;
> -   static_clk_info->max_sclk_khz = pp_clk_info.max_engine_clock;
> +   static_clk_info->max_mclk_khz = pp_clk_info.max_memory_clock * 10;
> +   static_clk_info->max_sclk_khz = pp_clk_info.max_engine_clock * 10;
>
> return true;
>  }
> --
> 1.9.1
>
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amd/display: Honor pplib stutter mask for all ASICs in DC

2018-07-17 Thread Alex Deucher
On Tue, Jul 17, 2018 at 10:54 AM, Harry Wentland  wrote:
> [Why]
> We were only setting this mask for DCN, but should really use it
> universally for all ASICs.
>
> [How]
> Move the assignment out of the Raven switch statement.
>
> Cc: rex@amd.com
> Cc: feifei...@amd.com
> Cc: kenneth.f...@amd.com
> Cc: evan.q...@amd.com
> Cc: bhawanpreet.la...@amd.com
> Cc: jordan.laz...@amd.com
> Signed-off-by: Harry Wentland 

I thought we had previously enabled stutter for CZ.  Does that use a
different path or was the code reworked in the interim?

Reviewed-by: Alex Deucher 

> ---
>  drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 6 ++
>  1 file changed, 2 insertions(+), 4 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
> b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
> index 952691c6f81e..004636ead29b 100644
> --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
> +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
> @@ -1528,10 +1528,6 @@ static int amdgpu_dm_initialize_drm_device(struct 
> amdgpu_device *adev)
> DRM_ERROR("DM: Failed to initialize IRQ\n");
> goto fail;
> }
> -   /*
> -* Temporary disable until pplib/smu interaction is 
> implemented
> -*/
> -   dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & 
> PP_STUTTER_MODE ? false : true;
> break;
>  #endif
> default:
> @@ -1539,6 +1535,8 @@ static int amdgpu_dm_initialize_drm_device(struct 
> amdgpu_device *adev)
> goto fail;
> }
>
> +   dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & 
> PP_STUTTER_MODE ? false : true;
> +
> return 0;
>  fail:
> kfree(aencoder);
> --
> 2.17.1
>
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 5/7] drm/amd/display: Decouple aux from i2c

2018-07-17 Thread Alex Deucher
On Tue, Jul 17, 2018 at 9:29 AM,   wrote:
> From: Bhawanpreet Lakha 
>
> [Why]
> Aux engine is created from i2caux layer. We want to remove this layer
> and use the engine directly.
>
> [How]
> Decouple aux engine from i2caux. Move aux engine related code to dce folder 
> and use
> dc resource pool to manage the engine. And use the engine functions directly
>

Don't i2c and aux share the same physical pins?  If so, do you have
appropriate locking to arbitrate access to the pins from either
engine?

Alex


> Change-Id: Iecb609fe815dab31ed6b6100916c4b49ed6539a0
> Signed-off-by: Bhawanpreet Lakha 
> Reviewed-by: Harry Wentland 
> Acked-by: Leo Li 
> ---
>  drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c  |  22 +-
>  drivers/gpu/drm/amd/display/dc/dce/Makefile|   2 +-
>  drivers/gpu/drm/amd/display/dc/dce/dce_aux.c   | 942 
> +
>  drivers/gpu/drm/amd/display/dc/dce/dce_aux.h   | 111 +++
>  .../drm/amd/display/dc/dce100/dce100_resource.c|  42 +
>  .../drm/amd/display/dc/dce110/dce110_resource.c|  45 +
>  .../drm/amd/display/dc/dce112/dce112_resource.c|  47 +
>  .../drm/amd/display/dc/dce120/dce120_resource.c|  42 +
>  .../gpu/drm/amd/display/dc/dce80/dce80_resource.c  |  44 +
>  .../gpu/drm/amd/display/dc/dcn10/dcn10_resource.c  |  44 +
>  drivers/gpu/drm/amd/display/dc/i2caux/engine.h |   1 +
>  drivers/gpu/drm/amd/display/dc/inc/core_types.h|   2 +-
>  drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h | 113 +++
>  drivers/gpu/drm/amd/display/dc/inc/hw/engine.h | 106 +++
>  14 files changed, 1549 insertions(+), 14 deletions(-)
>  create mode 100644 drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
>  create mode 100644 drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
>  create mode 100644 drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h
>  create mode 100644 drivers/gpu/drm/amd/display/dc/inc/hw/engine.h
>
> diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c 
> b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
> index 08c9d73..4019fe07 100644
> --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
> +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
> @@ -33,10 +33,8 @@
>  #include "include/vector.h"
>  #include "core_types.h"
>  #include "dc_link_ddc.h"
> -#include "i2caux/engine.h"
> -#include "i2caux/i2c_engine.h"
> -#include "i2caux/aux_engine.h"
> -#include "i2caux/i2caux.h"
> +#include "engine.h"
> +#include "aux_engine.h"
>
>  #define AUX_POWER_UP_WA_DELAY 500
>  #define I2C_OVER_AUX_DEFER_WA_DELAY 70
> @@ -641,9 +639,9 @@ int dc_link_aux_transfer(struct ddc_service *ddc,
>  enum aux_transaction_type type,
>  enum i2caux_transaction_action action)
>  {
> -   struct i2caux *i2caux = ddc->ctx->i2caux;
> struct ddc *ddc_pin = ddc->ddc_pin;
> -   struct aux_engine *engine;
> +   struct engine *engine;
> +   struct aux_engine *aux_engine;
> enum aux_channel_operation_result operation_result;
> struct aux_request_transaction_data aux_req;
> struct aux_reply_transaction_data aux_rep;
> @@ -654,7 +652,8 @@ int dc_link_aux_transfer(struct ddc_service *ddc,
> memset(_req, 0, sizeof(aux_req));
> memset(_rep, 0, sizeof(aux_rep));
>
> -   engine = i2caux->funcs->acquire_aux_engine(i2caux, ddc_pin);
> +   engine = ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en];
> +   aux_engine = engine->funcs->acquire(engine, ddc_pin);
>
> aux_req.type = type;
> aux_req.action = action;
> @@ -664,15 +663,15 @@ int dc_link_aux_transfer(struct ddc_service *ddc,
> aux_req.length = size;
> aux_req.data = buffer;
>
> -   engine->funcs->submit_channel_request(engine, _req);
> -   operation_result = engine->funcs->get_channel_status(engine, 
> _bytes);
> +   aux_engine->funcs->submit_channel_request(aux_engine, _req);
> +   operation_result = aux_engine->funcs->get_channel_status(aux_engine, 
> _bytes);
>
> switch (operation_result) {
> case AUX_CHANNEL_OPERATION_SUCCEEDED:
> res = returned_bytes;
>
> if (res <= size && res >= 0)
> -   res = engine->funcs->read_channel_reply(engine, size,
> +   res = 
> aux_engine->funcs->read_channel_reply(aux_engine, size,
> buffer, reply,
> );
>
> @@ -686,8 +685,7 @@ int dc_link_aux_transfer(struct ddc_service *ddc,
> res = -1;
> break;
> }
> -
> -   i2caux->funcs->release_engine(i2caux, >base);
> +   aux_engine->base.funcs->release_engine(_engine->base);
> return res;
>  }
>
> diff --git a/drivers/gpu/drm/amd/display/dc/dce/Makefile 
> b/drivers/gpu/drm/amd/display/dc/dce/Makefile
> index 11401fd..825537b 100644
> --- a/drivers/gpu/drm/amd/display/dc/dce/Makefile
> +++ 

Re: kfd kfd: DID 15dd is missing in supported_devices

2018-07-17 Thread Alex Deucher
On Tue, Jul 17, 2018 at 11:47 AM, Paul Menzel
 wrote:
> Dear AMD Linux folks,
>
>
> Using Linux 4.18.0-rc5+ I get the warning and error below in the logs.
> I am using a Ryzen 3 2200g.
>
> ```
> $ dmesg
> [0.00] Linux version 4.18.0-rc5+ (paul@tokeiihto) (gcc version 8.1.0 
> (Debian 8.1.0-10)) #1 SMP Tue Jul 17 11:43:33 CEST 2018
> [0.00] Command line: BOOT_IMAGE=/vmlinuz-4.18.0-rc5+ 
> root=UUID=313f78e2-468f-4660-b236-d1de214bf6b4 ro noisapnp cryptomgr.notests 
> apparmor=0 log_buf_len=4M initcall_debug quiet
> […]
> [   24.263395] calling  kfd_module_init+0x0/0x1000 [amdkfd] @ 411
> [   24.263441] Parsing CRAT table with 1 nodes
> [   24.263451] Creating topology SYSFS entries
> [   24.263480] Topology: Add APU node [0x0:0x0]
> [   24.263480] Finished initializing topology
> [   24.263503] kfd kfd: Initialized module
> [   24.263510] initcall kfd_module_init+0x0/0x1000 [amdkfd] returned 0 after 
> 104 usecs
> […]
> [   24.266813] kfd kfd: DID 15dd is missing in supported_devices
> [   24.266815] kfd kfd: kgd2kfd_probe failed
> […]
> ```
>
> The messages are from the code below.
>
> ```
> $ vim drivers/gpu/drm/amd/amdkfd/kfd_device.c
> […]
> 305 static const struct kfd_device_info *lookup_device_info(unsigned short 
> did)
> 306 {
> 307 size_t i;
> 308
> 309 for (i = 0; i < ARRAY_SIZE(supported_devices); i++) {
> 310 if (supported_devices[i].did == did) {
> 311 WARN_ON(!supported_devices[i].device_info);
> 312 return supported_devices[i].device_info;
> 313 }
> 314 }
> 315
> 316 dev_warn(kfd_device, "DID %04x is missing in supported_devices\n",
> 317  did);
> 318
> 319 return NULL;
> 320 }
> 321
> 322 struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
> 323 struct pci_dev *pdev, const struct kfd2kgd_calls *f2g)
> 324 {
> 325 struct kfd_dev *kfd;
> 326 int ret;
> 327 const struct kfd_device_info *device_info =
> 328 lookup_device_info(pdev->device);
> 329
> 330 if (!device_info) {
> 331 dev_err(kfd_device, "kgd2kfd_probe failed\n");
> 332 return NULL;
> 333 }
> […]
> ```
>
> Can my ID just be added? Otherwise, if it’s expected that the probe function
> can fail, the log level shouldn’t be an error?


Full KFD support for Raven devices is available in this patch set:
https://patchwork.freedesktop.org/series/46440/

Alex

>
> Maybe something like below would be more clear, that it is expected.
>
>> DID 15dd is missing in supported_devices and not supported yet.
>
>
> Kind regards,
>
> Paul
>
>
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
>
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


kfd kfd: DID 15dd is missing in supported_devices

2018-07-17 Thread Paul Menzel
Dear AMD Linux folks,


Using Linux 4.18.0-rc5+ I get the warning and error below in the logs.
I am using a Ryzen 3 2200g.

```
$ dmesg
[0.00] Linux version 4.18.0-rc5+ (paul@tokeiihto) (gcc version 8.1.0 
(Debian 8.1.0-10)) #1 SMP Tue Jul 17 11:43:33 CEST 2018
[0.00] Command line: BOOT_IMAGE=/vmlinuz-4.18.0-rc5+ 
root=UUID=313f78e2-468f-4660-b236-d1de214bf6b4 ro noisapnp cryptomgr.notests 
apparmor=0 log_buf_len=4M initcall_debug quiet
[…]
[   24.263395] calling  kfd_module_init+0x0/0x1000 [amdkfd] @ 411
[   24.263441] Parsing CRAT table with 1 nodes
[   24.263451] Creating topology SYSFS entries
[   24.263480] Topology: Add APU node [0x0:0x0]
[   24.263480] Finished initializing topology
[   24.263503] kfd kfd: Initialized module
[   24.263510] initcall kfd_module_init+0x0/0x1000 [amdkfd] returned 0 after 
104 usecs
[…]
[   24.266813] kfd kfd: DID 15dd is missing in supported_devices
[   24.266815] kfd kfd: kgd2kfd_probe failed
[…]
```

The messages are from the code below.

```
$ vim drivers/gpu/drm/amd/amdkfd/kfd_device.c
[…]
305 static const struct kfd_device_info *lookup_device_info(unsigned short did)
306 {
307 size_t i;
308 
309 for (i = 0; i < ARRAY_SIZE(supported_devices); i++) {
310 if (supported_devices[i].did == did) {
311 WARN_ON(!supported_devices[i].device_info);
312 return supported_devices[i].device_info;
313 }
314 }
315 
316 dev_warn(kfd_device, "DID %04x is missing in supported_devices\n",
317  did);
318 
319 return NULL;
320 }
321 
322 struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
323 struct pci_dev *pdev, const struct kfd2kgd_calls *f2g)
324 {
325 struct kfd_dev *kfd;
326 int ret;
327 const struct kfd_device_info *device_info =
328 lookup_device_info(pdev->device);
329 
330 if (!device_info) {
331 dev_err(kfd_device, "kgd2kfd_probe failed\n");
332 return NULL;
333 }
[…]
```

Can my ID just be added? Otherwise, if it’s expected that the probe function
can fail, the log level shouldn’t be an error?

Maybe something like below would be more clear, that it is expected.

> DID 15dd is missing in supported_devices and not supported yet.


Kind regards,

Paul



smime.p7s
Description: S/MIME Cryptographic Signature
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH xf86-video-ati] glamor: Invalidate cached GEM handle in radeon_set_pixmap_bo

2018-07-17 Thread Alex Deucher
On Tue, Jul 17, 2018 at 5:43 AM, Michel Dänzer  wrote:
> From: Michel Dänzer 
>
> We continued using the stale cached handle, causing issues e.g. when
> resizing the screen via RandR.
>
> Reported-by: iive on IRC
> Signed-off-by: Michel Dänzer 

Acked-by: Alex Deucher 

> ---
>  src/radeon.h | 1 +
>  1 file changed, 1 insertion(+)
>
> diff --git a/src/radeon.h b/src/radeon.h
> index 450c69aa8..1a1edb1ba 100644
> --- a/src/radeon.h
> +++ b/src/radeon.h
> @@ -735,6 +735,7 @@ static inline Bool radeon_set_pixmap_bo(PixmapPtr pPix, 
> struct radeon_buffer *bo
> return TRUE;
>
> radeon_buffer_unref(>bo);
> +   priv->handle_valid = FALSE;
> }
>
> drmmode_fb_reference(pRADEONEnt->fd, >fb, NULL);
> --
> 2.18.0
>
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 1/7] drm/amd/display: Remove unnecessary warning

2018-07-17 Thread Harry Wentland
On 2018-07-17 11:15 AM, Michel Dänzer wrote:
> On 2018-07-17 05:10 PM, Harry Wentland wrote:
>> On 2018-07-17 10:17 AM, Michel Dänzer wrote:
>>> On 2018-07-17 03:29 PM, sunpeng...@amd.com wrote:
 From: Mikita Lipski 

 [why]
 The warning message floods the dmesg log on Tonga even
 though it is expected to have a pix_clk set to zero,
 when there is no display connected.
 [how]
 remove the assert

 Change-Id: I4ca1e42439369b2305694b403457b5de60fc4ab1
 Signed-off-by: Mikita Lipski 
 Reviewed-by: Harry Wentland 
 Acked-by: Leo Li 
 ---
  drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c | 4 
  1 file changed, 4 deletions(-)

 diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c 
 b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
 index ec32213..74c05e8 100644
 --- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
 +++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
 @@ -149,10 +149,6 @@ static uint32_t get_max_pixel_clock_for_all_paths(
max_pix_clk =

 pipe_ctx->stream_res.pix_clk_params.requested_pix_clk;
}
 -
 -  if (max_pix_clk == 0)
 -  ASSERT(0);
 -
return max_pix_clk;
  }
  

>>>
>>> On my development system, max_pix_clk == 0 even though there's a display
>>> connected via HDMI. Is that expected as well?
>>>
>>
>> It's not really expected. Does it happen on mode set, hotplug, randomly?
> 
> Seems to happen when the display is turned off, e.g. via DPMS.
> 
> 
>> Do you have a stack trace?
> 
> Yep, attached.
> 

Thanks. It makes sense now. DPMS will turn off all displays and hit the same 
codepath.

Leo, Nick, please update the commit message to say "when no pipes are active" 
or something similar instead of "when there is no display connected".

Harry

> 
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 1/7] drm/amd/display: Remove unnecessary warning

2018-07-17 Thread Michel Dänzer
On 2018-07-17 05:10 PM, Harry Wentland wrote:
> On 2018-07-17 10:17 AM, Michel Dänzer wrote:
>> On 2018-07-17 03:29 PM, sunpeng...@amd.com wrote:
>>> From: Mikita Lipski 
>>>
>>> [why]
>>> The warning message floods the dmesg log on Tonga even
>>> though it is expected to have a pix_clk set to zero,
>>> when there is no display connected.
>>> [how]
>>> remove the assert
>>>
>>> Change-Id: I4ca1e42439369b2305694b403457b5de60fc4ab1
>>> Signed-off-by: Mikita Lipski 
>>> Reviewed-by: Harry Wentland 
>>> Acked-by: Leo Li 
>>> ---
>>>  drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c | 4 
>>>  1 file changed, 4 deletions(-)
>>>
>>> diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c 
>>> b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
>>> index ec32213..74c05e8 100644
>>> --- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
>>> +++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
>>> @@ -149,10 +149,6 @@ static uint32_t get_max_pixel_clock_for_all_paths(
>>> max_pix_clk =
>>> 
>>> pipe_ctx->stream_res.pix_clk_params.requested_pix_clk;
>>> }
>>> -
>>> -   if (max_pix_clk == 0)
>>> -   ASSERT(0);
>>> -
>>> return max_pix_clk;
>>>  }
>>>  
>>>
>>
>> On my development system, max_pix_clk == 0 even though there's a display
>> connected via HDMI. Is that expected as well?
>>
> 
> It's not really expected. Does it happen on mode set, hotplug, randomly?

Seems to happen when the display is turned off, e.g. via DPMS.


> Do you have a stack trace?

Yep, attached.


-- 
Earthling Michel Dänzer   |   http://www.amd.com
Libre software enthusiast | Mesa and X developer
[ 2367.329074] WARNING: CPU: 8 PID: 16019 at 
drivers/gpu/drm//amd/amdgpu/../display/dc/dce100/dce100_hw_sequencer.c:154 
dce100_set_bandwidth+0x154/0x300 [amdgpu]
[ 2367.329086] Modules linked in: lz4(E) lz4_compress(E) cpufreq_powersave(E) 
cpufreq_userspace(E) cpufreq_conservative(E) binfmt_misc(E) nls_ascii(E) 
nls_cp437(E) vfat(E) edac_mce_amd(E) fat(E) amdkfd(OE) kvm(E) irqbypass(E) 
crct10dif_pclmul(E) crc32_pclmul(E) ghash_clmulni_intel(E) pcbc(E) radeon(OE) 
wmi_bmof(E) amdgpu(OE) snd_hda_codec_realtek(E) snd_hda_codec_generic(E) 
chash(OE) gpu_sched(OE) snd_hda_codec_hdmi(E) ttm(OE) aesni_intel(E) 
aes_x86_64(E) crypto_simd(E) drm_kms_helper(OE) cryptd(E) snd_hda_intel(E) 
efi_pstore(E) glue_helper(E) r8169(E) pcspkr(E) snd_hda_codec(E) drm(OE) mii(E) 
snd_hda_core(E) efivars(E) sg(E) i2c_algo_bit(E) snd_hwdep(E) fb_sys_fops(E) 
syscopyarea(E) snd_pcm(E) sysfillrect(E) sp5100_tco(E) snd_timer(E) 
sysimgblt(E) i2c_piix4(E) k10temp(E) wmi(E) ccp(E) snd(E) soundcore(E)
[ 2367.329232]  rng_core(E) button(E) acpi_cpufreq(E) tcp_bbr(E) sch_fq(E) 
nct6775(E) hwmon_vid(E) sunrpc(E) efivarfs(E) ip_tables(E) x_tables(E) ext4(E) 
crc32c_generic(E) crc16(E) mbcache(E) jbd2(E) fscrypto(E) dm_mod(E) raid10(E) 
raid1(E) raid0(E) multipath(E) linear(E) md_mod(E) sd_mod(E) evdev(E) 
hid_generic(E) usbhid(E) hid(E) ahci(E) xhci_pci(E) libahci(E) xhci_hcd(E) 
libata(E) crc32c_intel(E) usbcore(E) scsi_mod(E) gpio_amdpt(E) gpio_generic(E)
[ 2367.329337] CPU: 8 PID: 16019 Comm: Xorg Tainted: GW  OE 
4.18.0-rc1+ #110
[ 2367.329344] Hardware name: Micro-Star International Co., Ltd. MS-7A34/B350 
TOMAHAWK (MS-7A34), BIOS 1.80 09/13/2017
[ 2367.329532] RIP: 0010:dce100_set_bandwidth+0x154/0x300 [amdgpu]
[ 2367.329538] Code: 84 c0 74 08 3c 03 0f 8e 7d 01 00 00 8b 83 10 01 00 00 41 
39 c6 44 0f 42 f0 48 81 c3 30 04 00 00 48 39 d3 75 90 45 85 f6 75 02 <0f> 0b 49 
8d 9d 48 01 00 00 48 b8 00 00 00 00 00 fc ff df 44 89 74 
[ 2367.329663] RSP: 0018:88028cc2f338 EFLAGS: 00010246
[ 2367.329672] RAX: 11007b1652d0 RBX: 8803d8b29ab0 RCX: 
[ 2367.329678] RDX: 8803d8b29ab0 RSI:  RDI: 8803d8b29c5c
[ 2367.329684] RBP: dc00 R08: ed007a55d632 R09: ed007a55d632
[ 2367.329690] R10: 88028cc2f140 R11: ed007a55d631 R12: 110051985e69
[ 2367.329695] R13: 8803c8bee600 R14:  R15: 8803d8b28000
[ 2367.329703] FS:  7f3ad605f940() GS:8803ee20() 
knlGS:
[ 2367.329709] CS:  0010 DS:  ES:  CR0: 80050033
[ 2367.329714] CR2: 55720e2bb560 CR3: 0003c1418000 CR4: 003406e0
[ 2367.329720] Call Trace:
[ 2367.329882]  ? dce100_pplib_apply_display_requirements+0x1b0/0x1b0 [amdgpu]
[ 2367.330041]  ? bios_is_accelerated_mode+0xdb/0x140 [amdgpu]
[ 2367.330201]  dc_commit_state+0xd1e/0x1520 [amdgpu]
[ 2367.330364]  ? dc_destroy+0x90/0x90 [amdgpu]
[ 2367.330403]  ? drm_dev_dbg+0x1a0/0x1a0 [drm]
[ 2367.330571]  amdgpu_dm_atomic_commit_tail+0x968/0x3fa0 [amdgpu]
[ 2367.330592]  ? do_raw_spin_unlock+0x54/0x220
[ 2367.330602]  ? _raw_spin_unlock_irq+0x29/0x40
[ 2367.330751]  ? amdgpu_dm_do_flip+0xab0/0xab0 [amdgpu]
[ 2367.330774]  ? 

[PATCH] drm/amd/display: Honor pplib stutter mask for all ASICs in DC

2018-07-17 Thread Harry Wentland
[Why]
We were only setting this mask for DCN, but should really use it
universally for all ASICs.

[How]
Move the assignment out of the Raven switch statement.

Cc: rex@amd.com
Cc: feifei...@amd.com
Cc: kenneth.f...@amd.com
Cc: evan.q...@amd.com
Cc: bhawanpreet.la...@amd.com
Cc: jordan.laz...@amd.com
Signed-off-by: Harry Wentland 
---
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 6 ++
 1 file changed, 2 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 952691c6f81e..004636ead29b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1528,10 +1528,6 @@ static int amdgpu_dm_initialize_drm_device(struct 
amdgpu_device *adev)
DRM_ERROR("DM: Failed to initialize IRQ\n");
goto fail;
}
-   /*
-* Temporary disable until pplib/smu interaction is implemented
-*/
-   dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & 
PP_STUTTER_MODE ? false : true;
break;
 #endif
default:
@@ -1539,6 +1535,8 @@ static int amdgpu_dm_initialize_drm_device(struct 
amdgpu_device *adev)
goto fail;
}
 
+   dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & 
PP_STUTTER_MODE ? false : true;
+
return 0;
 fail:
kfree(aencoder);
-- 
2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 1/7] drm/amd/display: Remove unnecessary warning

2018-07-17 Thread Michel Dänzer
On 2018-07-17 03:29 PM, sunpeng...@amd.com wrote:
> From: Mikita Lipski 
> 
> [why]
> The warning message floods the dmesg log on Tonga even
> though it is expected to have a pix_clk set to zero,
> when there is no display connected.
> [how]
> remove the assert
> 
> Change-Id: I4ca1e42439369b2305694b403457b5de60fc4ab1
> Signed-off-by: Mikita Lipski 
> Reviewed-by: Harry Wentland 
> Acked-by: Leo Li 
> ---
>  drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c | 4 
>  1 file changed, 4 deletions(-)
> 
> diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c 
> b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
> index ec32213..74c05e8 100644
> --- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
> +++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
> @@ -149,10 +149,6 @@ static uint32_t get_max_pixel_clock_for_all_paths(
>   max_pix_clk =
>   
> pipe_ctx->stream_res.pix_clk_params.requested_pix_clk;
>   }
> -
> - if (max_pix_clk == 0)
> - ASSERT(0);
> -
>   return max_pix_clk;
>  }
>  
> 

On my development system, max_pix_clk == 0 even though there's a display
connected via HDMI. Is that expected as well?


-- 
Earthling Michel Dänzer   |   http://www.amd.com
Libre software enthusiast | Mesa and X developer
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] RFC: drm/amd/display: enable ABGR and XBGR formats (v2)

2018-07-17 Thread Alex Deucher
On Sun, Jul 15, 2018 at 10:03 PM, Mauro Rossi  wrote:
> From: Mauro Rossi 
>
> (v1) {A,X}BGR code paths are added in amdgpu_dm, by using an fb_format
>  already listed in dc/dc_hw_types.h (SURFACE_PIXEL_FORMAT_GRPH_ABGR),
>  and in dce 8.0, 10.0 and 11.0, i.e. Bonaire and later.
>  GRPH_FORMAT_ARGB is used due to lack of specific GRPH_FORMAT_ABGR
>
> (v2) support for {A,X}BGR in atombios_crtc (now in dce4 path, to be 
> refined)
>  to initialize frame buffer device and avoid following dmesg error:
>  "[drm] Cannot find any crtc or sizes"
>
> Tested with oreo-x86 (hwcomposer.drm + gralloc.gbm + mesa-dev/radv)
> SurfaceFlinger can now select RGBA_ format for HWC_FRAMEBUFFER_TARGET
> No major regression or crash observed so far, but some android 2D overlay
> may be affected by color artifacts. Kind feedback requested.
>
> Signed-off-by: Mauro Rossi 

Please split the patch in three (one for radeon and one for amdgpu dc
and one for amdgpu non-dc).  Also the GRPH_SWAP_CONTROL register has a
crossbar where you can change the channel routing.  You may need that
for the channel routing to work correctly.

Alex


> ---
>  drivers/gpu/drm/amd/amdgpu/dce_v10_0.c| 9 +
>  drivers/gpu/drm/amd/amdgpu/dce_v11_0.c| 9 +
>  drivers/gpu/drm/amd/amdgpu/dce_v8_0.c | 8 
>  drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 6 ++
>  drivers/gpu/drm/radeon/atombios_crtc.c| 8 
>  5 files changed, 40 insertions(+)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c 
> b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
> index 022f303463fc..d4280d2e7737 100644
> --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
> @@ -2005,6 +2005,15 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc 
> *crtc,
> /* Greater 8 bpc fb needs to bypass hw-lut to retain 
> precision */
> bypass_lut = true;
> break;
> +   case DRM_FORMAT_XBGR:
> +   case DRM_FORMAT_ABGR:
> +   fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
> +   fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, 
> GRPH_FORMAT, 0); /* Hack */
> +#ifdef __BIG_ENDIAN
> +   fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, 
> GRPH_ENDIAN_SWAP,
> +   ENDIAN_8IN32);
> +#endif
> +   break;
> default:
> DRM_ERROR("Unsupported screen format %s\n",
>   drm_get_format_name(target_fb->format->format, 
> _name));
> diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c 
> b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
> index 800a9f36ab4f..d48ee8f2e192 100644
> --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
> @@ -2044,6 +2044,15 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc 
> *crtc,
> /* Greater 8 bpc fb needs to bypass hw-lut to retain 
> precision */
> bypass_lut = true;
> break;
> +   case DRM_FORMAT_XBGR:
> +   case DRM_FORMAT_ABGR:
> +   fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
> +   fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, 
> GRPH_FORMAT, 0); /* Hack */
> +#ifdef __BIG_ENDIAN
> +   fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, 
> GRPH_ENDIAN_SWAP,
> +   ENDIAN_8IN32);
> +#endif
> +   break;
> default:
> DRM_ERROR("Unsupported screen format %s\n",
>   drm_get_format_name(target_fb->format->format, 
> _name));
> diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c 
> b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
> index 012e0a9ae0ff..0e2fc1ac475f 100644
> --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
> @@ -1929,6 +1929,14 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc 
> *crtc,
> /* Greater 8 bpc fb needs to bypass hw-lut to retain 
> precision */
> bypass_lut = true;
> break;
> +   case DRM_FORMAT_XBGR:
> +   case DRM_FORMAT_ABGR:
> +   fb_format = ((GRPH_DEPTH_32BPP << 
> GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
> +(GRPH_FORMAT_ARGB << 
> GRPH_CONTROL__GRPH_FORMAT__SHIFT)); /* Hack */
> +#ifdef __BIG_ENDIAN
> +   fb_swap = (GRPH_ENDIAN_8IN32 << 
> GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
> +#endif
> +   break;
> default:
> DRM_ERROR("Unsupported screen format %s\n",
>   drm_get_format_name(target_fb->format->format, 
> _name));
> diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
> b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
> index 63c67346d316..6c10fa291150 100644
> --- 

[PATCH 7/7] drm/amd/display: dal 3.1.58

2018-07-17 Thread sunpeng.li
From: Harry Wentland 

Change-Id: I788210abbb33e0a38267c9bfd3656f51c844d5ac
Signed-off-by: Harry Wentland 
Reviewed-by: Aric Cyr 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/dc.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dc.h 
b/drivers/gpu/drm/amd/display/dc/dc.h
index 9fe7bb8..a1c34e8 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -38,7 +38,7 @@
 #include "inc/compressor.h"
 #include "dml/display_mode_lib.h"
 
-#define DC_VER "3.1.56"
+#define DC_VER "3.1.58"
 
 #define MAX_SURFACES 3
 #define MAX_STREAMS 6
-- 
2.7.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 6/7] drm/amd/display: separate dc_debug into dc_debug_options and dc_debug data

2018-07-17 Thread sunpeng.li
From: Jun Lei 

[why]
confusing as to which part of debug is informational, and which part causes 
behavioral change

Change-Id: I3248c1576c405d3e4deb30e9514098d13390158d
Signed-off-by: Jun Lei 
Reviewed-by: Tony Cheng 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c  |  2 +-
 drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c  |  2 +-
 drivers/gpu/drm/amd/display/dc/dc.h   | 19 +++
 drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c   |  4 ++--
 .../gpu/drm/amd/display/dc/dce120/dce120_resource.c   |  2 +-
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c |  4 ++--
 6 files changed, 18 insertions(+), 15 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c 
b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
index 080f777..bd03932 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
@@ -676,7 +676,7 @@ static void hack_force_pipe_split(struct 
dcn_bw_internal_vars *v,
 }
 
 static void hack_bounding_box(struct dcn_bw_internal_vars *v,
-   struct dc_debug *dbg,
+   struct dc_debug_options *dbg,
struct dc_state *context)
 {
if (dbg->pipe_split_policy == MPC_SPLIT_AVOID)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 88acc88..1254fba 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -1029,7 +1029,7 @@ enum link_training_result 
dc_link_dp_perform_link_training(
lt_settings.lane_settings[0].PRE_EMPHASIS);
 
if (status != LINK_TRAINING_SUCCESS)
-   link->ctx->dc->debug.debug_data.ltFailCount++;
+   link->ctx->dc->debug_data.ltFailCount++;
 
return status;
 }
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h 
b/drivers/gpu/drm/amd/display/dc/dc.h
index 46d9a1b..9fe7bb8 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -207,7 +207,7 @@ struct dc_clocks {
int phyclk_khz;
 };
 
-struct dc_debug {
+struct dc_debug_options {
enum visual_confirm visual_confirm;
bool sanity_checks;
bool max_disp_clk;
@@ -259,13 +259,15 @@ struct dc_debug {
bool scl_reset_length10;
bool hdmi20_disable;
bool skip_detection_link_training;
+};
 
-   struct {
-   uint32_t ltFailCount;
-   uint32_t i2cErrorCount;
-   uint32_t auxErrorCount;
-   } debug_data;
+struct dc_debug_data {
+   uint32_t ltFailCount;
+   uint32_t i2cErrorCount;
+   uint32_t auxErrorCount;
 };
+
+
 struct dc_state;
 struct resource_pool;
 struct dce_hwseq;
@@ -274,8 +276,7 @@ struct dc {
struct dc_caps caps;
struct dc_cap_funcs cap_funcs;
struct dc_config config;
-   struct dc_debug debug;
-
+   struct dc_debug_options debug;
struct dc_context *ctx;
 
uint8_t link_count;
@@ -311,6 +312,8 @@ struct dc {
 
/* FBC compressor */
struct compressor *fbc_compressor;
+
+   struct dc_debug_data debug_data;
 };
 
 enum frame_buffer_mode {
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c 
b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
index 6882dc9..8900a04 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
@@ -337,7 +337,7 @@ static int dce112_set_clock(
 
 static void dce_clock_read_integrated_info(struct dce_dccg *clk_dce)
 {
-   struct dc_debug *debug = _dce->base.ctx->dc->debug;
+   struct dc_debug_options *debug = _dce->base.ctx->dc->debug;
struct dc_bios *bp = clk_dce->base.ctx->dc_bios;
struct integrated_info info = { { { 0 } } };
struct dc_firmware_info fw_info = { { 0 } };
@@ -824,7 +824,7 @@ struct dccg *dce120_dccg_create(struct dc_context *ctx)
 #ifdef CONFIG_DRM_AMD_DC_DCN1_0
 struct dccg *dcn1_dccg_create(struct dc_context *ctx)
 {
-   struct dc_debug *debug = >dc->debug;
+   struct dc_debug_options *debug = >dc->debug;
struct dc_bios *bp = ctx->dc_bios;
struct dc_firmware_info fw_info = { { 0 } };
struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 
b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
index e389832..f7d02f2 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
@@ -404,7 +404,7 @@ static const struct resource_caps res_cap = {
.num_pll = 6,
 };
 
-static const struct dc_debug debug_defaults = {
+static const struct dc_debug_options debug_defaults = {
.disable_clock_gate = true,
 };
 
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 

[PATCH 3/7] drm/amd/display: On dce100, set clocks to 0 on suspend

2018-07-17 Thread sunpeng.li
From: David Francis 

[Why]
When a dce100 asic was suspended, the clocks were not set to 0.
Upon resume, the new clock was compared to the existing clock,
they were found to be the same, and so the clock was not set.
This resulted in a pernicious blackscreen.

[How]
In atomic commit, check to see if there are any active pipes.
If no, set clocks to 0

Change-Id: Ic175d5fa48cac5e306272071f113c33f8b7a18c8
Signed-off-by: David Francis 
Reviewed-by: Tony Cheng 
Acked-by: Leo Li 
---
 .../gpu/drm/amd/display/dc/dce100/dce100_resource.c   | 19 ---
 1 file changed, 16 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 
b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
index 8ed8eac..ad8ad4e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
@@ -678,9 +678,22 @@ bool dce100_validate_bandwidth(
struct dc  *dc,
struct dc_state *context)
 {
-   /* TODO implement when needed but for now hardcode max value*/
-   context->bw.dce.dispclk_khz = 681000;
-   context->bw.dce.yclk_khz = 25 * MEMORY_TYPE_MULTIPLIER;
+   int i;
+   bool at_least_one_pipe = false;
+
+   for (i = 0; i < dc->res_pool->pipe_count; i++) {
+   if (context->res_ctx.pipe_ctx[i].stream)
+   at_least_one_pipe = true;
+   }
+
+   if (at_least_one_pipe) {
+   /* TODO implement when needed but for now hardcode max value*/
+   context->bw.dce.dispclk_khz = 681000;
+   context->bw.dce.yclk_khz = 25 * MEMORY_TYPE_MULTIPLIER;
+   } else {
+   context->bw.dce.dispclk_khz = 0;
+   context->bw.dce.yclk_khz = 0;
+   }
 
return true;
 }
-- 
2.7.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 2/7] drm/amd/display: allow diags to skip initial link training

2018-07-17 Thread sunpeng.li
From: Tony Cheng 

[why]
diag specify what the full config and is only concerned about pass/fail at the 
end

having inter-op code like verifiying we can actually train at reported link rate
slows down diag test and add complexity we don't need

[how]
add dc_debug option to skip capability link trianing

also  remove hbr in function name as verify is not specific to hbr

Change-Id: Icdfb60f57b5d564899b09d853b342ad5ff41c71b
Signed-off-by: Tony Cheng 
Reviewed-by: Ken Chalmers 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/core/dc_link.c| 2 +-
 drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 7 ++-
 drivers/gpu/drm/amd/display/dc/dc.h  | 1 +
 drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h  | 2 +-
 4 files changed, 9 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 4de68fa..0adaabc 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -760,7 +760,7 @@ bool dc_link_detect(struct dc_link *link, enum 
dc_detect_reason reason)
 */
 
/* deal with non-mst cases */
-   dp_hbr_verify_link_cap(link, 
>reported_link_cap);
+   dp_verify_link_cap(link, 
>reported_link_cap);
}
 
/* HDMI-DVI Dongle */
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 8fbb999..88acc88 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -1086,7 +1086,7 @@ static struct dc_link_settings get_max_link_cap(struct 
dc_link *link)
return max_link_cap;
 }
 
-bool dp_hbr_verify_link_cap(
+bool dp_verify_link_cap(
struct dc_link *link,
struct dc_link_settings *known_limit_link_setting)
 {
@@ -1101,6 +1101,11 @@ bool dp_hbr_verify_link_cap(
enum clock_source_id dp_cs_id = CLOCK_SOURCE_ID_EXTERNAL;
enum link_training_result status;
 
+   if (link->dc->debug.skip_detection_link_training) {
+   link->verified_link_cap = *known_limit_link_setting;
+   return true;
+   }
+
success = false;
skip_link_training = false;
 
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h 
b/drivers/gpu/drm/amd/display/dc/dc.h
index b10dc17..46d9a1b 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -258,6 +258,7 @@ struct dc_debug {
bool avoid_vbios_exec_table;
bool scl_reset_length10;
bool hdmi20_disable;
+   bool skip_detection_link_training;
 
struct {
uint32_t ltFailCount;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h 
b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
index 2f783c6..697b5ee 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
@@ -33,7 +33,7 @@ struct dc_link;
 struct dc_stream_state;
 struct dc_link_settings;
 
-bool dp_hbr_verify_link_cap(
+bool dp_verify_link_cap(
struct dc_link *link,
struct dc_link_settings *known_limit_link_setting);
 
-- 
2.7.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 5/7] drm/amd/display: Decouple aux from i2c

2018-07-17 Thread sunpeng.li
From: Bhawanpreet Lakha 

[Why]
Aux engine is created from i2caux layer. We want to remove this layer
and use the engine directly.

[How]
Decouple aux engine from i2caux. Move aux engine related code to dce folder and 
use
dc resource pool to manage the engine. And use the engine functions directly

Change-Id: Iecb609fe815dab31ed6b6100916c4b49ed6539a0
Signed-off-by: Bhawanpreet Lakha 
Reviewed-by: Harry Wentland 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c  |  22 +-
 drivers/gpu/drm/amd/display/dc/dce/Makefile|   2 +-
 drivers/gpu/drm/amd/display/dc/dce/dce_aux.c   | 942 +
 drivers/gpu/drm/amd/display/dc/dce/dce_aux.h   | 111 +++
 .../drm/amd/display/dc/dce100/dce100_resource.c|  42 +
 .../drm/amd/display/dc/dce110/dce110_resource.c|  45 +
 .../drm/amd/display/dc/dce112/dce112_resource.c|  47 +
 .../drm/amd/display/dc/dce120/dce120_resource.c|  42 +
 .../gpu/drm/amd/display/dc/dce80/dce80_resource.c  |  44 +
 .../gpu/drm/amd/display/dc/dcn10/dcn10_resource.c  |  44 +
 drivers/gpu/drm/amd/display/dc/i2caux/engine.h |   1 +
 drivers/gpu/drm/amd/display/dc/inc/core_types.h|   2 +-
 drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h | 113 +++
 drivers/gpu/drm/amd/display/dc/inc/hw/engine.h | 106 +++
 14 files changed, 1549 insertions(+), 14 deletions(-)
 create mode 100644 drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
 create mode 100644 drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
 create mode 100644 drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h
 create mode 100644 drivers/gpu/drm/amd/display/dc/inc/hw/engine.h

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
index 08c9d73..4019fe07 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
@@ -33,10 +33,8 @@
 #include "include/vector.h"
 #include "core_types.h"
 #include "dc_link_ddc.h"
-#include "i2caux/engine.h"
-#include "i2caux/i2c_engine.h"
-#include "i2caux/aux_engine.h"
-#include "i2caux/i2caux.h"
+#include "engine.h"
+#include "aux_engine.h"
 
 #define AUX_POWER_UP_WA_DELAY 500
 #define I2C_OVER_AUX_DEFER_WA_DELAY 70
@@ -641,9 +639,9 @@ int dc_link_aux_transfer(struct ddc_service *ddc,
 enum aux_transaction_type type,
 enum i2caux_transaction_action action)
 {
-   struct i2caux *i2caux = ddc->ctx->i2caux;
struct ddc *ddc_pin = ddc->ddc_pin;
-   struct aux_engine *engine;
+   struct engine *engine;
+   struct aux_engine *aux_engine;
enum aux_channel_operation_result operation_result;
struct aux_request_transaction_data aux_req;
struct aux_reply_transaction_data aux_rep;
@@ -654,7 +652,8 @@ int dc_link_aux_transfer(struct ddc_service *ddc,
memset(_req, 0, sizeof(aux_req));
memset(_rep, 0, sizeof(aux_rep));
 
-   engine = i2caux->funcs->acquire_aux_engine(i2caux, ddc_pin);
+   engine = ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en];
+   aux_engine = engine->funcs->acquire(engine, ddc_pin);
 
aux_req.type = type;
aux_req.action = action;
@@ -664,15 +663,15 @@ int dc_link_aux_transfer(struct ddc_service *ddc,
aux_req.length = size;
aux_req.data = buffer;
 
-   engine->funcs->submit_channel_request(engine, _req);
-   operation_result = engine->funcs->get_channel_status(engine, 
_bytes);
+   aux_engine->funcs->submit_channel_request(aux_engine, _req);
+   operation_result = aux_engine->funcs->get_channel_status(aux_engine, 
_bytes);
 
switch (operation_result) {
case AUX_CHANNEL_OPERATION_SUCCEEDED:
res = returned_bytes;
 
if (res <= size && res >= 0)
-   res = engine->funcs->read_channel_reply(engine, size,
+   res = aux_engine->funcs->read_channel_reply(aux_engine, 
size,
buffer, reply,
);
 
@@ -686,8 +685,7 @@ int dc_link_aux_transfer(struct ddc_service *ddc,
res = -1;
break;
}
-
-   i2caux->funcs->release_engine(i2caux, >base);
+   aux_engine->base.funcs->release_engine(_engine->base);
return res;
 }
 
diff --git a/drivers/gpu/drm/amd/display/dc/dce/Makefile 
b/drivers/gpu/drm/amd/display/dc/dce/Makefile
index 11401fd..825537b 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce/Makefile
@@ -28,7 +28,7 @@
 
 DCE = dce_audio.o dce_stream_encoder.o dce_link_encoder.o dce_hwseq.o \
 dce_mem_input.o dce_clock_source.o dce_scl_filters.o dce_transform.o \
-dce_clocks.o dce_opp.o dce_dmcu.o dce_abm.o dce_ipp.o
+dce_clocks.o dce_opp.o dce_dmcu.o dce_abm.o dce_ipp.o dce_aux.o
 
 
 AMD_DAL_DCE = $(addprefix $(AMDDALPATH)/dc/dce/,$(DCE))
diff 

[PATCH 0/7] DC Patches Jul 17, 2018

2018-07-17 Thread sunpeng.li
From: "Leo (Sunpeng) Li" 

Summary of change:
* De-midlayering work on AUX engine
* Fix S3 resume blackscreen for DCE10
* De-spamming of log for DCE10

Bhawanpreet Lakha (1):
  drm/amd/display: Decouple aux from i2c

David Francis (1):
  drm/amd/display: On dce100, set clocks to 0 on suspend

Harry Wentland (1):
  drm/amd/display: dal 3.1.58

Jun Lei (1):
  drm/amd/display: separate dc_debug into dc_debug_options and dc_debug
data

Mikita Lipski (1):
  drm/amd/display: Remove unnecessary warning

Tony Cheng (1):
  drm/amd/display: allow diags to skip initial link training

vikrant mhaske (1):
  drm/amd/display: DPP CM ICSC AYCRCB format support

 drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c   |   2 +-
 drivers/gpu/drm/amd/display/dc/core/dc_link.c  |   2 +-
 drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c  |  22 +-
 drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c   |   9 +-
 drivers/gpu/drm/amd/display/dc/dc.h|  22 +-
 drivers/gpu/drm/amd/display/dc/dc_hw_types.h   |   2 +-
 drivers/gpu/drm/amd/display/dc/dce/Makefile|   2 +-
 drivers/gpu/drm/amd/display/dc/dce/dce_aux.c   | 942 +
 drivers/gpu/drm/amd/display/dc/dce/dce_aux.h   | 111 +++
 drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c|   4 +-
 .../amd/display/dc/dce100/dce100_hw_sequencer.c|   4 -
 .../drm/amd/display/dc/dce100/dce100_resource.c|  61 +-
 .../drm/amd/display/dc/dce110/dce110_resource.c|  45 +
 .../drm/amd/display/dc/dce112/dce112_resource.c|  47 +
 .../drm/amd/display/dc/dce120/dce120_resource.c|  44 +-
 .../gpu/drm/amd/display/dc/dce80/dce80_resource.c  |  44 +
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c  |   4 +
 .../gpu/drm/amd/display/dc/dcn10/dcn10_resource.c  |  48 +-
 drivers/gpu/drm/amd/display/dc/i2caux/engine.h |   1 +
 drivers/gpu/drm/amd/display/dc/inc/core_types.h|   2 +-
 drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h|   2 +-
 drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h | 113 +++
 drivers/gpu/drm/amd/display/dc/inc/hw/engine.h | 106 +++
 23 files changed, 1598 insertions(+), 41 deletions(-)
 create mode 100644 drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
 create mode 100644 drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
 create mode 100644 drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h
 create mode 100644 drivers/gpu/drm/amd/display/dc/inc/hw/engine.h

-- 
2.7.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 1/7] drm/amd/display: Remove unnecessary warning

2018-07-17 Thread sunpeng.li
From: Mikita Lipski 

[why]
The warning message floods the dmesg log on Tonga even
though it is expected to have a pix_clk set to zero,
when there is no display connected.
[how]
remove the assert

Change-Id: I4ca1e42439369b2305694b403457b5de60fc4ab1
Signed-off-by: Mikita Lipski 
Reviewed-by: Harry Wentland 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c | 4 
 1 file changed, 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c 
b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
index ec32213..74c05e8 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
@@ -149,10 +149,6 @@ static uint32_t get_max_pixel_clock_for_all_paths(
max_pix_clk =

pipe_ctx->stream_res.pix_clk_params.requested_pix_clk;
}
-
-   if (max_pix_clk == 0)
-   ASSERT(0);
-
return max_pix_clk;
 }
 
-- 
2.7.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 4/7] drm/amd/display: DPP CM ICSC AYCRCB8888 format support

2018-07-17 Thread sunpeng.li
From: vikrant mhaske 

[why]
Diags has POR to run the video workload using AYCRCB through DCN;
capture it through DWB and send it to VCN hardware to encode

[how]
added the code to support this format so that DPP ICSC will be able to
convert it from YUV444 to internal RGB and DWB OCSC will be able to
convert from internal RGB to YUV420

Change-Id: I6d6f6905e90881e8ac1622473bf0aa5a6cd8eacc
Signed-off-by: vikrant mhaske 
Reviewed-by: Tony Cheng 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/dc_hw_types.h  | 2 +-
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c | 4 
 2 files changed, 5 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h 
b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
index e9c1ec5..bbc384f 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
@@ -192,7 +192,7 @@ enum surface_pixel_format {
/*swaped & float*/
SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F,
/*grow graphics here if necessary */
-
+   SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb,
SURFACE_PIXEL_FORMAT_VIDEO_BEGIN,
SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr =
SURFACE_PIXEL_FORMAT_VIDEO_BEGIN,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c 
b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
index deae210..b8fc62a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
@@ -294,6 +294,10 @@ void hubp1_program_pixel_format(
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 66);
break;
+   case SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb:
+   REG_UPDATE(DCSURF_SURFACE_CONFIG,
+   SURFACE_PIXEL_FORMAT, 12);
+   break;
default:
BREAK_TO_DEBUGGER();
break;
-- 
2.7.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH] drm/amdgpu: fix spelling mistake "successed" -> "succeeded"

2018-07-17 Thread Colin King
From: Colin Ian King 

Trivial fix to spelling mistake in dev_err error message.

Signed-off-by: Colin Ian King 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 10 +-
 1 file changed, 5 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 9883fa9bb41b..e9feb3c58389 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2004,7 +2004,7 @@ static int amdgpu_device_ip_reinit_early_sriov(struct 
amdgpu_device *adev)
continue;
 
r = block->version->funcs->hw_init(adev);
-   DRM_INFO("RE-INIT: %s %s\n", 
block->version->funcs->name, r?"failed":"successed");
+   DRM_INFO("RE-INIT: %s %s\n", 
block->version->funcs->name, r?"failed":"succeeded");
if (r)
return r;
}
@@ -2039,7 +2039,7 @@ static int amdgpu_device_ip_reinit_late_sriov(struct 
amdgpu_device *adev)
continue;
 
r = block->version->funcs->hw_init(adev);
-   DRM_INFO("RE-INIT: %s %s\n", 
block->version->funcs->name, r?"failed":"successed");
+   DRM_INFO("RE-INIT: %s %s\n", 
block->version->funcs->name, r?"failed":"succeeded");
if (r)
return r;
}
@@ -3092,7 +3092,7 @@ static int amdgpu_device_handle_vram_lost(struct 
amdgpu_device *adev)
  * @adev: amdgpu device pointer
  *
  * attempt to do soft-reset or full-reset and reinitialize Asic
- * return 0 means successed otherwise failed
+ * return 0 means succeeded otherwise failed
  */
 static int amdgpu_device_reset(struct amdgpu_device *adev)
 {
@@ -3170,7 +3170,7 @@ static int amdgpu_device_reset(struct amdgpu_device *adev)
  * @from_hypervisor: request from hypervisor
  *
  * do VF FLR and reinitialize Asic
- * return 0 means successed otherwise failed
+ * return 0 means succeeded otherwise failed
  */
 static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
 bool from_hypervisor)
@@ -3295,7 +3295,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
dev_info(adev->dev, "GPU reset(%d) failed\n", 
atomic_read(>gpu_reset_counter));
amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
} else {
-   dev_info(adev->dev, "GPU reset(%d) 
successed!\n",atomic_read(>gpu_reset_counter));
+   dev_info(adev->dev, "GPU reset(%d) 
succeeded!\n",atomic_read(>gpu_reset_counter));
}
 
amdgpu_vf_error_trans_all(adev);
-- 
2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 1/2] drm/amd/pp: Set Max clock level to display by default

2018-07-17 Thread Rex Zhu
avoid the error in dmesg:
[drm:dm_pp_get_static_clocks]
*ERROR* DM_PPLIB: invalid powerlevel state: 0!

Signed-off-by: Rex Zhu 
---
 drivers/gpu/drm/amd/powerplay/amd_powerplay.c | 7 +--
 1 file changed, 5 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c 
b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index 75c2082..63adcd1 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -998,7 +998,7 @@ static int pp_get_display_power_level(void *handle,
 static int pp_get_current_clocks(void *handle,
struct amd_pp_clock_info *clocks)
 {
-   struct amd_pp_simple_clock_info simple_clocks;
+   struct amd_pp_simple_clock_info simple_clocks = { 0 };
struct pp_clock_info hw_clocks;
struct pp_hwmgr *hwmgr = handle;
int ret = 0;
@@ -1034,7 +1034,8 @@ static int pp_get_current_clocks(void *handle,
clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
 
-   clocks->max_clocks_state = simple_clocks.level;
+   if (simple_clocks.level == 0)
+   clocks->max_clocks_state = PP_DAL_POWERLEVEL_7;
 
if (0 == phm_get_current_shallow_sleep_clocks(hwmgr, 
>current_ps->hardware, _clocks)) {
clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
@@ -1137,6 +1138,8 @@ static int pp_get_display_mode_validation_clocks(void 
*handle,
if (!hwmgr || !hwmgr->pm_en ||!clocks)
return -EINVAL;
 
+   clocks->level = PP_DAL_POWERLEVEL_7;
+
mutex_lock(>smu_lock);
 
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 
PHM_PlatformCaps_DynamicPatchPowerState))
-- 
1.9.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 2/2] drm/amd/display: Convert 10kHz clks from PPLib into kHz

2018-07-17 Thread Rex Zhu
Except special naming as *_in_khz, The default clock unit in powerplay
is in 10KHz. so need to * 10 as expecting clock frequency in display
is in kHz.

Signed-off-by: Rex Zhu 
---
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
index c69ae78..fbe878a 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
@@ -469,8 +469,8 @@ bool dm_pp_get_static_clocks(
return false;
 
static_clk_info->max_clocks_state = 
pp_to_dc_powerlevel_state(pp_clk_info.max_clocks_state);
-   static_clk_info->max_mclk_khz = pp_clk_info.max_memory_clock;
-   static_clk_info->max_sclk_khz = pp_clk_info.max_engine_clock;
+   static_clk_info->max_mclk_khz = pp_clk_info.max_memory_clock * 10;
+   static_clk_info->max_sclk_khz = pp_clk_info.max_engine_clock * 10;
 
return true;
 }
-- 
1.9.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH] drm/amd/pp: Set Max clock level to display by default

2018-07-17 Thread Rex Zhu
avoid the error in dmesg:
[drm:dm_pp_get_static_clocks]
*ERROR* DM_PPLIB: invalid powerlevel state: 0!

Signed-off-by: Rex Zhu 
---
 drivers/gpu/drm/amd/powerplay/amd_powerplay.c | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c 
b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index 75c2082..aec2ed0 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -998,7 +998,7 @@ static int pp_get_display_power_level(void *handle,
 static int pp_get_current_clocks(void *handle,
struct amd_pp_clock_info *clocks)
 {
-   struct amd_pp_simple_clock_info simple_clocks;
+   struct amd_pp_simple_clock_info simple_clocks = { 0 };
struct pp_clock_info hw_clocks;
struct pp_hwmgr *hwmgr = handle;
int ret = 0;
@@ -1034,7 +1034,8 @@ static int pp_get_current_clocks(void *handle,
clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
 
-   clocks->max_clocks_state = simple_clocks.level;
+   if (simple_clocks.level == 0)
+   clocks->max_clocks_state = PP_DAL_POWERLEVEL_7;
 
if (0 == phm_get_current_shallow_sleep_clocks(hwmgr, 
>current_ps->hardware, _clocks)) {
clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
-- 
1.9.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH] drm/amdgpu/display: Replace CONFIG_DRM_AMD_DC_DCN1_0 with CONFIG_X86

2018-07-17 Thread Michel Dänzer
From: Michel Dänzer 

Allowing CONFIG_DRM_AMD_DC_DCN1_0 to be disabled on X86 was an
opportunity for display with Raven Ridge accidentally not working.

Signed-off-by: Michel Dänzer 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c|  2 +-
 drivers/gpu/drm/amd/display/Kconfig   |  8 
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c |  8 
 drivers/gpu/drm/amd/display/dc/Makefile   |  2 +-
 .../display/dc/bios/command_table_helper2.c   |  2 +-
 drivers/gpu/drm/amd/display/dc/calcs/Makefile |  2 +-
 drivers/gpu/drm/amd/display/dc/core/dc.c  |  6 +++---
 .../gpu/drm/amd/display/dc/core/dc_debug.c|  2 +-
 .../gpu/drm/amd/display/dc/core/dc_resource.c | 12 +--
 drivers/gpu/drm/amd/display/dc/dc.h   |  2 +-
 .../drm/amd/display/dc/dce/dce_clock_source.c |  6 +++---
 .../drm/amd/display/dc/dce/dce_clock_source.h |  2 +-
 .../gpu/drm/amd/display/dc/dce/dce_clocks.c   |  8 
 .../gpu/drm/amd/display/dc/dce/dce_clocks.h   |  2 +-
 drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c |  6 +++---
 .../amd/display/dc/dce/dce_stream_encoder.c   | 20 +--
 .../display/dc/dce110/dce110_hw_sequencer.c   |  2 +-
 drivers/gpu/drm/amd/display/dc/gpio/Makefile  |  2 +-
 .../gpu/drm/amd/display/dc/gpio/hw_factory.c  |  4 ++--
 .../drm/amd/display/dc/gpio/hw_translate.c|  4 ++--
 .../gpu/drm/amd/display/dc/i2caux/Makefile|  2 +-
 .../gpu/drm/amd/display/dc/i2caux/i2caux.c|  4 ++--
 .../gpu/drm/amd/display/dc/inc/core_types.h   |  6 +++---
 drivers/gpu/drm/amd/display/dc/irq/Makefile   |  2 +-
 .../gpu/drm/amd/display/dc/irq/irq_service.c  |  2 +-
 drivers/gpu/drm/amd/display/dc/os_types.h |  2 +-
 26 files changed, 56 insertions(+), 64 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 709e4a386a0e..fb8c72851dfb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2200,7 +2200,7 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type 
asic_type)
case CHIP_VEGA10:
case CHIP_VEGA12:
case CHIP_VEGA20:
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
case CHIP_RAVEN:
 #endif
return amdgpu_dc != 0;
diff --git a/drivers/gpu/drm/amd/display/Kconfig 
b/drivers/gpu/drm/amd/display/Kconfig
index 4c35625eb2c7..325083b0297e 100644
--- a/drivers/gpu/drm/amd/display/Kconfig
+++ b/drivers/gpu/drm/amd/display/Kconfig
@@ -9,14 +9,6 @@ config DRM_AMD_DC
  support for AMDGPU. This adds required support for Vega and
  Raven ASICs.
 
-config DRM_AMD_DC_DCN1_0
-   bool "DCN 1.0 Raven family"
-   depends on DRM_AMD_DC && X86
-   default y
-   help
- Choose this option if you want to have
- RV family for display engine
-
 config DEBUG_KERNEL_DC
bool "Enable kgdb break in DC"
depends on DRM_AMD_DC
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 952691c6f81e..8e3ebd988043 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -58,7 +58,7 @@
 #include 
 #include 
 
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
 #include "ivsrcid/irqsrcs_dcn_1_0.h"
 
 #include "dcn/dcn_1_0_offset.h"
@@ -1188,7 +1188,7 @@ static int dce110_register_irq_handlers(struct 
amdgpu_device *adev)
return 0;
 }
 
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
 /* Register IRQ sources and initialize IRQ callbacks */
 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
 {
@@ -1522,7 +1522,7 @@ static int amdgpu_dm_initialize_drm_device(struct 
amdgpu_device *adev)
goto fail;
}
break;
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
case CHIP_RAVEN:
if (dcn10_register_irq_handlers(dm->adev)) {
DRM_ERROR("DM: Failed to initialize IRQ\n");
@@ -1767,7 +1767,7 @@ static int dm_early_init(void *handle)
adev->mode_info.num_dig = 6;
adev->mode_info.plane_type = dm_plane_type_default;
break;
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
case CHIP_RAVEN:
adev->mode_info.num_crtc = 4;
adev->mode_info.num_hpd = 4;
diff --git a/drivers/gpu/drm/amd/display/dc/Makefile 
b/drivers/gpu/drm/amd/display/dc/Makefile
index aed538a4d1ba..532a515fda9a 100644
--- a/drivers/gpu/drm/amd/display/dc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/Makefile
@@ -25,7 +25,7 @@
 
 DC_LIBS = basics bios calcs dce gpio i2caux irq virtual
 
-ifdef CONFIG_DRM_AMD_DC_DCN1_0
+ifdef CONFIG_X86
 DC_LIBS += dcn10 dml
 endif
 
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c 
b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
index bbbcef566c55..770ff89ba7e1 100644
--- 

Re: KMS does not work on MSI B350M MORTAR with AMD Ryzen 3 2200g

2018-07-17 Thread Paul Menzel
Dear Alex,


On 07/17/18 00:00, Paul Menzel wrote:

> Am 16.07.2018 um 18:30 schrieb Alex Deucher:
>> On Mon, Jul 16, 2018 at 12:14 PM, Paul Menzel
>>  wrote:
>>> Dear Linux folks,
>>>
>>>
>>> Trying to boot Debian Buster/testing with Linux 4.16.16 on a MSI B350M 
>>> MORTAR [1]
>>> with a Ryzen 3 2200g and a monitor connected over HDMI, when kernel mode 
>>> setting
>>> (KMS) is enabled the monitor does not receive any updates, that means I do 
>>> not
>>> see the login prompt and only the Linux and systemd service messages from 
>>> before.
>>> The system seems to running though.
>>>
>>> Debian’s package *firmware-amd-graphics* [2] is from August 2017 and 
>>> therefore
>>> outdated. I manually installed the firmware files from the Linux firmware
>>> repository [3].
>>>
>>> Here is the log.
>>>
>>> ```
>>> Jul 16 17:54:39 tokeiihto kernel: [   25.652026] [drm] BIOS signature 
>>> incorrect 0 0
>>> Jul 16 17:54:39 tokeiihto kernel: [   25.652058] ATOM BIOS: 113-RAVEN-106
>>> Jul 16 17:54:39 tokeiihto kernel: [   25.652096] [drm] vm size is 262144 
>>> GB, 4 levels, block size is 9-bit, fragment size is 9-bit
>>> Jul 16 17:54:39 tokeiihto kernel: [   25.652107] amdgpu :38:00.0: VRAM: 
>>> 1024M 0x00F4 - 0x00F43FFF (1024M used)
>>> Jul 16 17:54:39 tokeiihto kernel: [   25.652109] amdgpu :38:00.0: GTT: 
>>> 1024M 0x00F5 - 0x00F53FFF
>>> Jul 16 17:54:39 tokeiihto kernel: [   25.652113] [drm] Detected VRAM 
>>> RAM=1024M, BAR=256M
>>> Jul 16 17:54:39 tokeiihto kernel: [   25.652114] [drm] RAM width 128bits 
>>> UNKNOWN
>>> Jul 16 17:54:39 tokeiihto kernel: [   25.652170] [TTM] Zone  kernel: 
>>> Available graphics memory: 7703370 kiB
>>> Jul 16 17:54:39 tokeiihto kernel: [   25.652171] [TTM] Zone   dma32: 
>>> Available graphics memory: 2097152 kiB
>>> Jul 16 17:54:39 tokeiihto kernel: [   25.652171] [TTM] Initializing pool 
>>> allocator
>>> Jul 16 17:54:39 tokeiihto kernel: [   25.652174] [TTM] Initializing DMA 
>>> pool allocator
>>> Jul 16 17:54:39 tokeiihto kernel: [   25.652202] [drm] amdgpu: 1024M of 
>>> VRAM memory ready
>>> Jul 16 17:54:39 tokeiihto kernel: [   25.652203] [drm] amdgpu: 3072M of GTT 
>>> memory ready.
>>> Jul 16 17:54:39 tokeiihto kernel: [   25.652220] [drm] GART: num cpu pages 
>>> 262144, num gpu pages 262144
>>> Jul 16 17:54:39 tokeiihto kernel: [   25.652433] [drm] PCIE GART of 1024M 
>>> enabled (table at 0x00F40080).
>>> Jul 16 17:54:39 tokeiihto kernel: [   25.652541] [drm] Supports vblank 
>>> timestamp caching Rev 2 (21.10.2013).
>>> Jul 16 17:54:39 tokeiihto kernel: [   25.652541] [drm] Driver supports 
>>> precise vblank timestamp query.
>>> Jul 16 17:54:39 tokeiihto kernel: [   25.652983] amdgpu :38:00.0: 
>>> firmware: direct-loading firmware amdgpu/raven_asd.bin
>>> Jul 16 17:54:39 tokeiihto kernel: [   25.653276] amdgpu :38:00.0: 
>>> firmware: direct-loading firmware amdgpu/raven_pfp.bin
>>> Jul 16 17:54:39 tokeiihto kernel: [   25.653498] amdgpu :38:00.0: 
>>> firmware: direct-loading firmware amdgpu/raven_me.bin
>>> Jul 16 17:54:39 tokeiihto kernel: [   25.653668] amdgpu :38:00.0: 
>>> firmware: direct-loading firmware amdgpu/raven_ce.bin
>>> Jul 16 17:54:39 tokeiihto kernel: [   25.653960] amdgpu :38:00.0: 
>>> firmware: direct-loading firmware amdgpu/raven_rlc.bin
>>> Jul 16 17:54:39 tokeiihto kernel: [   25.654817] amdgpu :38:00.0: 
>>> firmware: direct-loading firmware amdgpu/raven_mec.bin
>>> Jul 16 17:54:39 tokeiihto kernel: [   25.656137] amdgpu :38:00.0: 
>>> firmware: direct-loading firmware amdgpu/raven_mec2.bin
>>> Jul 16 17:54:39 tokeiihto kernel: [   25.658217] amdgpu :38:00.0: 
>>> firmware: direct-loading firmware amdgpu/raven_sdma.bin
>>> Jul 16 17:54:39 tokeiihto kernel: [   25.658220] [drm] use_doorbell being 
>>> set to: [true]
>>> Jul 16 17:54:39 tokeiihto kernel: [   25.659663] amdgpu :38:00.0: 
>>> firmware: direct-loading firmware amdgpu/raven_vcn.bin
>>> Jul 16 17:54:39 tokeiihto kernel: [   25.659667] [drm] Found VCN firmware 
>>> Version: 1.73 Family ID: 18
>>> Jul 16 17:54:39 tokeiihto kernel: [   25.847424] [drm] VCN decode and 
>>> encode initialized successfully.
>>> Jul 16 17:54:39 tokeiihto kernel: [   25.852189] amdgpu :38:00.0: ring 
>>> 0(gfx) uses VM inv eng 4 on hub 0
>>> Jul 16 17:54:39 tokeiihto kernel: [   25.852191] amdgpu :38:00.0: ring 
>>> 1(comp_1.0.0) uses VM inv eng 5 on hub 0
>>> Jul 16 17:54:39 tokeiihto kernel: [   25.852193] amdgpu :38:00.0: ring 
>>> 2(comp_1.1.0) uses VM inv eng 6 on hub 0
>>> Jul 16 17:54:39 tokeiihto kernel: [   25.852194] amdgpu :38:00.0: ring 
>>> 3(comp_1.2.0) uses VM inv eng 7 on hub 0
>>> Jul 16 17:54:39 tokeiihto kernel: [   25.852195] amdgpu :38:00.0: ring 
>>> 4(comp_1.3.0) uses VM inv eng 8 on hub 0
>>> Jul 16 17:54:39 tokeiihto kernel: [   25.852197] amdgpu :38:00.0: ring 
>>> 5(comp_1.0.1) uses VM inv eng 9 on hub 0
>>> Jul 16 17:54:39 tokeiihto kernel: [   25.852198] 

[PATCH xf86-video-ati] glamor: Invalidate cached GEM handle in radeon_set_pixmap_bo

2018-07-17 Thread Michel Dänzer
From: Michel Dänzer 

We continued using the stale cached handle, causing issues e.g. when
resizing the screen via RandR.

Reported-by: iive on IRC
Signed-off-by: Michel Dänzer 
---
 src/radeon.h | 1 +
 1 file changed, 1 insertion(+)

diff --git a/src/radeon.h b/src/radeon.h
index 450c69aa8..1a1edb1ba 100644
--- a/src/radeon.h
+++ b/src/radeon.h
@@ -735,6 +735,7 @@ static inline Bool radeon_set_pixmap_bo(PixmapPtr pPix, 
struct radeon_buffer *bo
return TRUE;
 
radeon_buffer_unref(>bo);
+   priv->handle_valid = FALSE;
}
 
drmmode_fb_reference(pRADEONEnt->fd, >fb, NULL);
-- 
2.18.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [V3] vga_switcheroo: set audio client id according to bound GPU id

2018-07-17 Thread Takashi Iwai
On Tue, 17 Jul 2018 10:56:37 +0200,
jimqu wrote:
> 
> 
> 
> On 2018年07月17日 16:52, Takashi Iwai wrote:
> > On Tue, 17 Jul 2018 10:38:58 +0200,
> > Lukas Wunner wrote:
> >> On Tue, Jul 17, 2018 at 04:20:50PM +0800, Jim Qu wrote:
> >>> On modern laptop, there are more and more platforms
> >>> have two GPUs, and each of them maybe have audio codec
> >>> for HDMP/DP output. For some dGPU which is no output,
> >>> audio codec usually is disabled.
> >>>
> >>> In currect HDA audio driver, it will set all codec as
> >>> VGA_SWITCHEROO_DIS, the audio which is binded to UMA
> >>> will be suspended if user use debugfs to contorl power
> >>>
> >>> In HDA driver side, it is difficult to know which GPU
> >>> the audio has binded to. So set the bound gpu pci dev
> >>> to vga_switcheroo.
> >>>
> >>> if the audio client is not the third registration, audio
> >>> id will set in vga_switcheroo enable function. if the
> >>> audio client is the last registration when vga_switcheroo
> >>> _ready() get true, we should get audio client id from bound
> >>> GPU directly.
> >>>
> >>> Signed-off-by: Jim Qu 
> >> Reviewed-by: Lukas Wunner 
> >>
> >> @Takashi, any preference which tree to merge this through?
> >> sound or drm-misc, either way would seem fine to me.  I think
> >> there's going to be one final drm-misc pull sent to Dave this
> >> week, after that it's 4.20.
> > Since it's basically an audio problem and I'd love to merge it for
> > 4.19, I'd prefer taking through sound git tree, unless anyone has
> > objection.
> 
> Thanks to Takashi and Lukas great help. Please kindly help merge the
> patch into suitable branch.

I pushed the fix to topic/vga_switcheroo branch of sound git tree now
so that 0day bot can check it.  I'll wait for a while and merge it
later to for-next branch if nothing happens.

The brach is (and will be) based on fresh 4.18-rc5 so that other trees
may merge it cleanly.


thanks,

Takashi
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH libdrm] amdgpu: make sure to set CLOEXEC on duplicated FDs

2018-07-17 Thread Zhang, Jerry (Junwei)

On 07/17/2018 05:04 PM, Christian König wrote:

Otherwise we leak file descriptors into child processes.

Signed-off-by: Christian König 


Yeah, that's the key point that dup could remove CLOEXEC effect.

Reviewed-and-Tested-by: Junwei Zhang 


---
  amdgpu/amdgpu_device.c | 5 +++--
  1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/amdgpu/amdgpu_device.c b/amdgpu/amdgpu_device.c
index 34ac95b8..d7aec6a4 100644
--- a/amdgpu/amdgpu_device.c
+++ b/amdgpu/amdgpu_device.c
@@ -34,6 +34,7 @@
  #include 
  #include 
  #include 
+#include 

  #include "xf86drm.h"
  #include "amdgpu_drm.h"
@@ -205,7 +206,7 @@ int amdgpu_device_initialize(int fd,
return r;
}
if ((flag_auth) && (!flag_authexist)) {
-   dev->flink_fd = dup(fd);
+   dev->flink_fd = fcntl(fd, F_DUPFD_CLOEXEC, 0);
}
*major_version = dev->major_version;
*minor_version = dev->minor_version;
@@ -239,7 +240,7 @@ int amdgpu_device_initialize(int fd,
goto cleanup;
}

-   dev->fd = dup(fd);
+   dev->fd = fcntl(fd, F_DUPFD_CLOEXEC, 0);
dev->flink_fd = dev->fd;
dev->major_version = version->version_major;
dev->minor_version = version->version_minor;


___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH libdrm] amdgpu: make sure to set CLOEXEC on duplicated FDs

2018-07-17 Thread Michel Dänzer
On 2018-07-17 11:04 AM, Christian König wrote:
> Otherwise we leak file descriptors into child processes.
> 
> Signed-off-by: Christian König 
> ---
>  amdgpu/amdgpu_device.c | 5 +++--
>  1 file changed, 3 insertions(+), 2 deletions(-)
> 
> diff --git a/amdgpu/amdgpu_device.c b/amdgpu/amdgpu_device.c
> index 34ac95b8..d7aec6a4 100644
> --- a/amdgpu/amdgpu_device.c
> +++ b/amdgpu/amdgpu_device.c
> @@ -34,6 +34,7 @@
>  #include 
>  #include 
>  #include 
> +#include 
>  
>  #include "xf86drm.h"
>  #include "amdgpu_drm.h"
> @@ -205,7 +206,7 @@ int amdgpu_device_initialize(int fd,
>   return r;
>   }
>   if ((flag_auth) && (!flag_authexist)) {
> - dev->flink_fd = dup(fd);
> + dev->flink_fd = fcntl(fd, F_DUPFD_CLOEXEC, 0);
>   }
>   *major_version = dev->major_version;
>   *minor_version = dev->minor_version;
> @@ -239,7 +240,7 @@ int amdgpu_device_initialize(int fd,
>   goto cleanup;
>   }
>  
> - dev->fd = dup(fd);
> + dev->fd = fcntl(fd, F_DUPFD_CLOEXEC, 0);
>   dev->flink_fd = dev->fd;
>   dev->major_version = version->version_major;
>   dev->minor_version = version->version_minor;
> 

Nice catch.

Reviewed-by: Michel Dänzer 


-- 
Earthling Michel Dänzer   |   http://www.amd.com
Libre software enthusiast | Mesa and X developer
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH libdrm] amdgpu: make sure to set CLOEXEC on duplicated FDs

2018-07-17 Thread Christian König
Otherwise we leak file descriptors into child processes.

Signed-off-by: Christian König 
---
 amdgpu/amdgpu_device.c | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/amdgpu/amdgpu_device.c b/amdgpu/amdgpu_device.c
index 34ac95b8..d7aec6a4 100644
--- a/amdgpu/amdgpu_device.c
+++ b/amdgpu/amdgpu_device.c
@@ -34,6 +34,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #include "xf86drm.h"
 #include "amdgpu_drm.h"
@@ -205,7 +206,7 @@ int amdgpu_device_initialize(int fd,
return r;
}
if ((flag_auth) && (!flag_authexist)) {
-   dev->flink_fd = dup(fd);
+   dev->flink_fd = fcntl(fd, F_DUPFD_CLOEXEC, 0);
}
*major_version = dev->major_version;
*minor_version = dev->minor_version;
@@ -239,7 +240,7 @@ int amdgpu_device_initialize(int fd,
goto cleanup;
}
 
-   dev->fd = dup(fd);
+   dev->fd = fcntl(fd, F_DUPFD_CLOEXEC, 0);
dev->flink_fd = dev->fd;
dev->major_version = version->version_major;
dev->minor_version = version->version_minor;
-- 
2.14.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [V3] vga_switcheroo: set audio client id according to bound GPU id

2018-07-17 Thread jimqu



On 2018年07月17日 16:52, Takashi Iwai wrote:

On Tue, 17 Jul 2018 10:38:58 +0200,
Lukas Wunner wrote:

On Tue, Jul 17, 2018 at 04:20:50PM +0800, Jim Qu wrote:

On modern laptop, there are more and more platforms
have two GPUs, and each of them maybe have audio codec
for HDMP/DP output. For some dGPU which is no output,
audio codec usually is disabled.

In currect HDA audio driver, it will set all codec as
VGA_SWITCHEROO_DIS, the audio which is binded to UMA
will be suspended if user use debugfs to contorl power

In HDA driver side, it is difficult to know which GPU
the audio has binded to. So set the bound gpu pci dev
to vga_switcheroo.

if the audio client is not the third registration, audio
id will set in vga_switcheroo enable function. if the
audio client is the last registration when vga_switcheroo
_ready() get true, we should get audio client id from bound
GPU directly.

Signed-off-by: Jim Qu 

Reviewed-by: Lukas Wunner 

@Takashi, any preference which tree to merge this through?
sound or drm-misc, either way would seem fine to me.  I think
there's going to be one final drm-misc pull sent to Dave this
week, after that it's 4.20.

Since it's basically an audio problem and I'd love to merge it for
4.19, I'd prefer taking through sound git tree, unless anyone has
objection.


Thanks to Takashi and Lukas great help. Please kindly help merge the 
patch into suitable branch.


Thanks
JimQu


thanks,

Takashi


___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH libdrm] amdgpu: add amdgpu_bo_handle_type_kms_noimport

2018-07-17 Thread Michel Dänzer
On 2018-07-16 08:51 PM, Marek Olšák wrote:
> On Mon, Jul 16, 2018 at 12:05 PM, Michel Dänzer  wrote:
>> On 2018-07-13 08:47 PM, Marek Olšák wrote:
>>> On Fri, Jul 13, 2018 at 4:28 AM, Michel Dänzer  wrote:
>>
 I'd rather add the handle to the hash table in amdgpu_bo_alloc,
 amdgpu_create_bo_from_user_mem and amdgpu_bo_import instead of in
 amdgpu_bo_export, making amdgpu_bo_export(bo, amdgpu_bo_handle_type_kms,
 ...) essentially free. In the unlikely (since allocating a BO from the
 kernel is expensive) case that the hash table shows up on profiles, we
 can optimize it.
>>>
>>> The hash table isn't very good for high BO counts. The time complexity
>>> of a lookup is O(n).
>>
>> A lookup is only needed in amdgpu_bo_import. amdgpu_bo_alloc and
>> amdgpu_create_bo_from_user_mem can just add the handle to the hash
>> bucket directly.
>>
>> Do you know of, or can you imagine, any workload where amdgpu_bo_import
>> is called often enough for this to be a concern?
> 
> Fullscreen DRI2 or DRI3 re-imports buffers every frame.

DRI3 doesn't. The X server only imports each DRI3 buffer once, after
that it's referred to via the pixmap XID.


With DRI2 page flipping (ignoring that basically nobody's using that
anymore with radeonsi :), it's always the same set of buffers, so the
lookup can be made fast as discussed in the sub-thread with Christian.
(Also, DRI2 can only use page flipping with sync-to-vblank enabled, so
this happens on the order of hundreds of times per second max)


-- 
Earthling Michel Dänzer   |   http://www.amd.com
Libre software enthusiast | Mesa and X developer
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [V3] vga_switcheroo: set audio client id according to bound GPU id

2018-07-17 Thread Takashi Iwai
On Tue, 17 Jul 2018 10:38:58 +0200,
Lukas Wunner wrote:
> 
> On Tue, Jul 17, 2018 at 04:20:50PM +0800, Jim Qu wrote:
> > On modern laptop, there are more and more platforms
> > have two GPUs, and each of them maybe have audio codec
> > for HDMP/DP output. For some dGPU which is no output,
> > audio codec usually is disabled.
> > 
> > In currect HDA audio driver, it will set all codec as
> > VGA_SWITCHEROO_DIS, the audio which is binded to UMA
> > will be suspended if user use debugfs to contorl power
> > 
> > In HDA driver side, it is difficult to know which GPU
> > the audio has binded to. So set the bound gpu pci dev
> > to vga_switcheroo.
> > 
> > if the audio client is not the third registration, audio
> > id will set in vga_switcheroo enable function. if the
> > audio client is the last registration when vga_switcheroo
> > _ready() get true, we should get audio client id from bound
> > GPU directly.
> > 
> > Signed-off-by: Jim Qu 
> 
> Reviewed-by: Lukas Wunner 
> 
> @Takashi, any preference which tree to merge this through?
> sound or drm-misc, either way would seem fine to me.  I think
> there's going to be one final drm-misc pull sent to Dave this
> week, after that it's 4.20.

Since it's basically an audio problem and I'd love to merge it for
4.19, I'd prefer taking through sound git tree, unless anyone has
objection.


thanks,

Takashi
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [V3] vga_switcheroo: set audio client id according to bound GPU id

2018-07-17 Thread Lukas Wunner
On Tue, Jul 17, 2018 at 04:20:50PM +0800, Jim Qu wrote:
> On modern laptop, there are more and more platforms
> have two GPUs, and each of them maybe have audio codec
> for HDMP/DP output. For some dGPU which is no output,
> audio codec usually is disabled.
> 
> In currect HDA audio driver, it will set all codec as
> VGA_SWITCHEROO_DIS, the audio which is binded to UMA
> will be suspended if user use debugfs to contorl power
> 
> In HDA driver side, it is difficult to know which GPU
> the audio has binded to. So set the bound gpu pci dev
> to vga_switcheroo.
> 
> if the audio client is not the third registration, audio
> id will set in vga_switcheroo enable function. if the
> audio client is the last registration when vga_switcheroo
> _ready() get true, we should get audio client id from bound
> GPU directly.
> 
> Signed-off-by: Jim Qu 

Reviewed-by: Lukas Wunner 

@Takashi, any preference which tree to merge this through?
sound or drm-misc, either way would seem fine to me.  I think
there's going to be one final drm-misc pull sent to Dave this
week, after that it's 4.20.

Thanks,

Lukas
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH libdrm] amdgpu: add amdgpu_bo_handle_type_kms_noimport

2018-07-17 Thread Christian König

Am 17.07.2018 um 10:30 schrieb Michel Dänzer:

On 2018-07-17 10:19 AM, Christian König wrote:

Am 17.07.2018 um 10:03 schrieb Michel Dänzer:

On 2018-07-17 09:59 AM, Christian König wrote:

Am 17.07.2018 um 09:46 schrieb Michel Dänzer:

On 2018-07-17 09:33 AM, Christian König wrote:

Am 17.07.2018 um 09:26 schrieb Michel Dänzer:
[SNIP]

All that should be needed is one struct list_head per BO, 16 bytes on
64-bit.

+malloc overhead and that for *every* BO the application/driver
allocated.

The struct list_head can be stored in struct amdgpu_bo, no additional
malloc necessary.

Well that sounds we are not talking about the same code, do we?

IIRC the hashtable implementation in libdrm is using an ever growing
array for the BOs and *NOT* a linked list.

So let's use something more suitable, e.g.:

An array of 2^n struct list_head in struct amdgpu_device for the hash
buckets. The BO's handle is hashed to the bucket number

  handle & (2^n - 1)

and linked in there via struct list_head in struct amdgpu_bo.
amdgpu_bo_alloc and amdgpu_create_bo_from_user_mem add the handle at the
end of the list, amdgpu_bo_import adds it at or moves it to the beginning.


Yeah, that would certainly reduce the problem quite a bit and would 
allow us to get rid of the util_hash* implementation which to me always 
seemed to be a bit overkill.


I actually don't see a reason why amdgpu_create_bo_from_user_mem() 
should add the handle at all, those BOs are not exportable.


Christian.
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH libdrm] amdgpu: add amdgpu_bo_handle_type_kms_noimport

2018-07-17 Thread Michel Dänzer
On 2018-07-17 10:19 AM, Christian König wrote:
> Am 17.07.2018 um 10:03 schrieb Michel Dänzer:
>> On 2018-07-17 09:59 AM, Christian König wrote:
>>> Am 17.07.2018 um 09:46 schrieb Michel Dänzer:
 On 2018-07-17 09:33 AM, Christian König wrote:
> Am 17.07.2018 um 09:26 schrieb Michel Dänzer:
> [SNIP]
 All that should be needed is one struct list_head per BO, 16 bytes on
 64-bit.
>>> +malloc overhead and that for *every* BO the application/driver
>>> allocated.
>> The struct list_head can be stored in struct amdgpu_bo, no additional
>> malloc necessary.
> 
> Well that sounds we are not talking about the same code, do we?
> 
> IIRC the hashtable implementation in libdrm is using an ever growing
> array for the BOs and *NOT* a linked list.

So let's use something more suitable, e.g.:

An array of 2^n struct list_head in struct amdgpu_device for the hash
buckets. The BO's handle is hashed to the bucket number

 handle & (2^n - 1)

and linked in there via struct list_head in struct amdgpu_bo.
amdgpu_bo_alloc and amdgpu_create_bo_from_user_mem add the handle at the
end of the list, amdgpu_bo_import adds it at or moves it to the beginning.


-- 
Earthling Michel Dänzer   |   http://www.amd.com
Libre software enthusiast | Mesa and X developer
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 6/7] drm/amdgpu: remove job->adev

2018-07-17 Thread Christian König

Am 16.07.2018 um 17:50 schrieb Michel Dänzer:

On 2018-07-13 05:19 PM, Christian König wrote:

We can get that from the ring.

Signed-off-by: Christian König 

This change introduced the attached oops when running the piglit
max-texture-size test, after which the test process hangs.

Note that the test always triggers the out of memory condition in
amdgpu_cs_ioctl, but before this change that was handled gracefully.


Sorry for the noise. I didn't thought about the possibility of 
destroying the job before the scheduler was determined.


Patch to fix this was just pushed to amd-staging-drm-next.

Regards,
Christian.
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[V3] vga_switcheroo: set audio client id according to bound GPU id

2018-07-17 Thread Jim Qu
On modern laptop, there are more and more platforms
have two GPUs, and each of them maybe have audio codec
for HDMP/DP output. For some dGPU which is no output,
audio codec usually is disabled.

In currect HDA audio driver, it will set all codec as
VGA_SWITCHEROO_DIS, the audio which is binded to UMA
will be suspended if user use debugfs to contorl power

In HDA driver side, it is difficult to know which GPU
the audio has binded to. So set the bound gpu pci dev
to vga_switcheroo.

if the audio client is not the third registration, audio
id will set in vga_switcheroo enable function. if the
audio client is the last registration when vga_switcheroo
_ready() get true, we should get audio client id from bound
GPU directly.

Signed-off-by: Jim Qu 
---
 drivers/gpu/vga/vga_switcheroo.c | 63 +---
 include/linux/vga_switcheroo.h   |  8 ++---
 sound/pci/hda/hda_intel.c| 11 +++
 3 files changed, 62 insertions(+), 20 deletions(-)

diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index fc4adf3..af04e3c 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -103,9 +103,11 @@
  * runtime pm. If true, writing ON and OFF to the vga_switcheroo debugfs
  * interface is a no-op so as not to interfere with runtime pm
  * @list: client list
+ * @vga_dev: pci device, indicate which GPU is bound to current audio client
  *
  * Registered client. A client can be either a GPU or an audio device on a GPU.
- * For audio clients, the @fb_info and @active members are bogus.
+ * For audio clients, the @fb_info and @active members are bogus. For GPU
+ * clients, the @vga_dev is bogus.
  */
 struct vga_switcheroo_client {
struct pci_dev *pdev;
@@ -116,6 +118,7 @@ struct vga_switcheroo_client {
bool active;
bool driver_power_control;
struct list_head list;
+   struct pci_dev *vga_dev;
 };
 
 /*
@@ -161,9 +164,8 @@ struct vgasr_priv {
 };
 
 #define ID_BIT_AUDIO   0x100
-#define client_is_audio(c) ((c)->id & ID_BIT_AUDIO)
-#define client_is_vga(c)   ((c)->id == VGA_SWITCHEROO_UNKNOWN_ID || \
-!client_is_audio(c))
+#define client_is_audio(c) ((c)->id & ID_BIT_AUDIO)
+#define client_is_vga(c)   (!client_is_audio(c))
 #define client_id(c)   ((c)->id & ~ID_BIT_AUDIO)
 
 static int vga_switcheroo_debugfs_init(struct vgasr_priv *priv);
@@ -192,14 +194,29 @@ static void vga_switcheroo_enable(void)
vgasr_priv.handler->init();
 
list_for_each_entry(client, _priv.clients, list) {
-   if (client->id != VGA_SWITCHEROO_UNKNOWN_ID)
+   if (!client_is_vga(client) ||
+client_id(client) != VGA_SWITCHEROO_UNKNOWN_ID)
continue;
+
ret = vgasr_priv.handler->get_client_id(client->pdev);
if (ret < 0)
return;
 
client->id = ret;
}
+
+   list_for_each_entry(client, _priv.clients, list) {
+   if (!client_is_audio(client) ||
+client_id(client) != VGA_SWITCHEROO_UNKNOWN_ID)
+   continue;
+
+   ret = vgasr_priv.handler->get_client_id(client->vga_dev);
+   if (ret < 0)
+   return;
+
+   client->id = ret | ID_BIT_AUDIO;
+   }
+
vga_switcheroo_debugfs_init(_priv);
vgasr_priv.active = true;
 }
@@ -272,7 +289,9 @@ EXPORT_SYMBOL(vga_switcheroo_handler_flags);
 
 static int register_client(struct pci_dev *pdev,
   const struct vga_switcheroo_client_ops *ops,
-  enum vga_switcheroo_client_id id, bool active,
+  enum vga_switcheroo_client_id id,
+  struct pci_dev *vga_dev,
+  bool active,
   bool driver_power_control)
 {
struct vga_switcheroo_client *client;
@@ -287,6 +306,7 @@ static int register_client(struct pci_dev *pdev,
client->id = id;
client->active = active;
client->driver_power_control = driver_power_control;
+   client->vga_dev = vga_dev;
 
mutex_lock(_mutex);
list_add_tail(>list, _priv.clients);
@@ -319,7 +339,7 @@ int vga_switcheroo_register_client(struct pci_dev *pdev,
   const struct vga_switcheroo_client_ops *ops,
   bool driver_power_control)
 {
-   return register_client(pdev, ops, VGA_SWITCHEROO_UNKNOWN_ID,
+   return register_client(pdev, ops, VGA_SWITCHEROO_UNKNOWN_ID, NULL,
   pdev == vga_default_device(),
   driver_power_control);
 }
@@ -329,19 +349,40 @@ EXPORT_SYMBOL(vga_switcheroo_register_client);
  * vga_switcheroo_register_audio_client - register audio client
  * @pdev: client pci device
  * @ops: 

Re: [PATCH libdrm] amdgpu: add amdgpu_bo_handle_type_kms_noimport

2018-07-17 Thread Christian König

Am 17.07.2018 um 10:03 schrieb Michel Dänzer:

On 2018-07-17 09:59 AM, Christian König wrote:

Am 17.07.2018 um 09:46 schrieb Michel Dänzer:

On 2018-07-17 09:33 AM, Christian König wrote:

Am 17.07.2018 um 09:26 schrieb Michel Dänzer:
[SNIP]

All that should be needed is one struct list_head per BO, 16 bytes on
64-bit.

+malloc overhead and that for *every* BO the application/driver
allocated.

The struct list_head can be stored in struct amdgpu_bo, no additional
malloc necessary.


Well that sounds we are not talking about the same code, do we?

IIRC the hashtable implementation in libdrm is using an ever growing 
array for the BOs and *NOT* a linked list.


So we have at least two mallocs involved here, the one for the key/value 
pair and the one for the node array.


Regards,
Christian.





The last time I looked we could easily have a few thousands of that
(but not in the same CS).

So I would guess that the wasted memory can easily be in the lower kb
range, compared to adding just a flag that we never going to import the
handle again.

I wouldn't call the memory "wasted", as it serves a clear purpose.




___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] mm, oom: distinguish blockable mode for mmu notifiers

2018-07-17 Thread Michal Hocko
On Mon 16-07-18 16:12:49, Andrew Morton wrote:
> On Mon, 16 Jul 2018 13:50:58 +0200 Michal Hocko  wrote:
> 
> > From: Michal Hocko 
> > 
> > There are several blockable mmu notifiers which might sleep in
> > mmu_notifier_invalidate_range_start and that is a problem for the
> > oom_reaper because it needs to guarantee a forward progress so it cannot
> > depend on any sleepable locks.
> > 
> > Currently we simply back off and mark an oom victim with blockable mmu
> > notifiers as done after a short sleep. That can result in selecting a
> > new oom victim prematurely because the previous one still hasn't torn
> > its memory down yet.
> > 
> > We can do much better though. Even if mmu notifiers use sleepable locks
> > there is no reason to automatically assume those locks are held.
> > Moreover majority of notifiers only care about a portion of the address
> > space and there is absolutely zero reason to fail when we are unmapping an
> > unrelated range. Many notifiers do really block and wait for HW which is
> > harder to handle and we have to bail out though.
> > 
> > This patch handles the low hanging fruid. 
> > __mmu_notifier_invalidate_range_start
> > gets a blockable flag and callbacks are not allowed to sleep if the
> > flag is set to false. This is achieved by using trylock instead of the
> > sleepable lock for most callbacks and continue as long as we do not
> > block down the call chain.
> 
> I assume device driver developers are wondering "what does this mean
> for me".  As I understand it, the only time they will see
> blockable==false is when their driver is being called in response to an
> out-of-memory condition, yes?  So it is a very rare thing.

Yes, this is the case right now. Maybe we will grow other users in
future. Those other potential users is the reason why I used blockable
rather than oom parameter name.

> Any suggestions regarding how the driver developers can test this code
> path?  I don't think we presently have a way to fake an oom-killing
> event?  Perhaps we should add such a thing, given the problems we're
> having with that feature.

The simplest way is to wrap an userspace code which uses these notifiers
into a memcg and set the hard limit to hit the oom. This can be done
e.g. after the test faults in all the mmu notifier managed memory and
set the hard limit to something really small. Then we are looking for a
proper process tear down.

> > I think we can improve that even further because there is a common
> > pattern to do a range lookup first and then do something about that.
> > The first part can be done without a sleeping lock in most cases AFAICS.
> > 
> > The oom_reaper end then simply retries if there is at least one notifier
> > which couldn't make any progress in !blockable mode. A retry loop is
> > already implemented to wait for the mmap_sem and this is basically the
> > same thing.
> > 
> > ...
> >
> > +static inline int mmu_notifier_invalidate_range_start_nonblock(struct 
> > mm_struct *mm,
> > + unsigned long start, unsigned long end)
> > +{
> > +   int ret = 0;
> > +   if (mm_has_notifiers(mm))
> > +   ret = __mmu_notifier_invalidate_range_start(mm, start, end, 
> > false);
> > +
> > +   return ret;
> >  }
> 
> nit,
> 
> {
>   if (mm_has_notifiers(mm))
>   return __mmu_notifier_invalidate_range_start(mm, start, end, 
> false);
>   return 0;
> }
> 
> would suffice.

Sure. Fixed
 
> > 
> > ...
> >
> > --- a/mm/mmap.c
> > +++ b/mm/mmap.c
> > @@ -3074,7 +3074,7 @@ void exit_mmap(struct mm_struct *mm)
> >  * reliably test it.
> >  */
> > mutex_lock(_lock);
> > -   __oom_reap_task_mm(mm);
> > +   (void)__oom_reap_task_mm(mm);
> > mutex_unlock(_lock);
> 
> What does this do?

There is no error to be returned here as the comment above explains
 * Nothing can be holding mm->mmap_sem here and the above call
 * to mmu_notifier_release(mm) ensures mmu notifier callbacks in
 * __oom_reap_task_mm() will not block.
-- 
Michal Hocko
SUSE Labs
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH libdrm] amdgpu: add amdgpu_bo_handle_type_kms_noimport

2018-07-17 Thread Michel Dänzer
On 2018-07-17 09:59 AM, Christian König wrote:
> Am 17.07.2018 um 09:46 schrieb Michel Dänzer:
>> On 2018-07-17 09:33 AM, Christian König wrote:
>>> Am 17.07.2018 um 09:26 schrieb Michel Dänzer:
 On 2018-07-17 08:50 AM, Christian König wrote:
> Am 16.07.2018 um 18:05 schrieb Michel Dänzer:
>> On 2018-07-13 08:47 PM, Marek Olšák wrote:
>> [SNIP]
>> Other opinions?
> I understand the reason why Marek wants to do this, but I agree that
> this is a little bit dangerous if used incorrectly.
>
> On the other hand I don't see any other way to sanely handle it
> either.
 Sanely handle what exactly? :) I still haven't seen any description of
 an actual problem, other than "the handle is stored in the hash table".
>>> Well the problem is that it's not "the handle" but rather "all handles"
>>> which are now stored in the hash table.
>>>
>>> To begin with that is quite a bunch of wasted memory, not talking about
>>> the extra CPU cycles.
>> All that should be needed is one struct list_head per BO, 16 bytes on
>> 64-bit.
> 
> +malloc overhead and that for *every* BO the application/driver
> allocated.

The struct list_head can be stored in struct amdgpu_bo, no additional
malloc necessary.


> The last time I looked we could easily have a few thousands of that
> (but not in the same CS).
> 
> So I would guess that the wasted memory can easily be in the lower kb
> range, compared to adding just a flag that we never going to import the
> handle again.

I wouldn't call the memory "wasted", as it serves a clear purpose.


-- 
Earthling Michel Dänzer   |   http://www.amd.com
Libre software enthusiast | Mesa and X developer
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH libdrm] amdgpu: add amdgpu_bo_handle_type_kms_noimport

2018-07-17 Thread Christian König

Am 17.07.2018 um 09:46 schrieb Michel Dänzer:

On 2018-07-17 09:33 AM, Christian König wrote:

Am 17.07.2018 um 09:26 schrieb Michel Dänzer:

On 2018-07-17 08:50 AM, Christian König wrote:

Am 16.07.2018 um 18:05 schrieb Michel Dänzer:

On 2018-07-13 08:47 PM, Marek Olšák wrote:
[SNIP]
Other opinions?

I understand the reason why Marek wants to do this, but I agree that
this is a little bit dangerous if used incorrectly.

On the other hand I don't see any other way to sanely handle it either.

Sanely handle what exactly? :) I still haven't seen any description of
an actual problem, other than "the handle is stored in the hash table".

Well the problem is that it's not "the handle" but rather "all handles"
which are now stored in the hash table.

To begin with that is quite a bunch of wasted memory, not talking about
the extra CPU cycles.

All that should be needed is one struct list_head per BO, 16 bytes on
64-bit.


+malloc overhead and that for *every* BO the application/driver 
allocated. The last time I looked we could easily have a few thousands 
of that (but not in the same CS).


So I would guess that the wasted memory can easily be in the lower kb 
range, compared to adding just a flag that we never going to import the 
handle again.


Christian.
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH libdrm] amdgpu: add amdgpu_bo_handle_type_kms_noimport

2018-07-17 Thread Michel Dänzer
On 2018-07-17 09:33 AM, Christian König wrote:
> Am 17.07.2018 um 09:26 schrieb Michel Dänzer:
>> On 2018-07-17 08:50 AM, Christian König wrote:
>>> Am 16.07.2018 um 18:05 schrieb Michel Dänzer:
 On 2018-07-13 08:47 PM, Marek Olšák wrote:
 [SNIP]
 Other opinions?
>>> I understand the reason why Marek wants to do this, but I agree that
>>> this is a little bit dangerous if used incorrectly.
>>>
>>> On the other hand I don't see any other way to sanely handle it either.
>> Sanely handle what exactly? :) I still haven't seen any description of
>> an actual problem, other than "the handle is stored in the hash table".
> 
> Well the problem is that it's not "the handle" but rather "all handles"
> which are now stored in the hash table.
> 
> To begin with that is quite a bunch of wasted memory, not talking about
> the extra CPU cycles.

All that should be needed is one struct list_head per BO, 16 bytes on
64-bit.


-- 
Earthling Michel Dänzer   |   http://www.amd.com
Libre software enthusiast | Mesa and X developer
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amdgpu/pm: Remove VLA usage

2018-07-17 Thread Kees Cook
On Wed, Jun 20, 2018 at 11:26 AM, Kees Cook  wrote:
> In the quest to remove all stack VLA usage from the kernel[1], this
> uses the maximum sane buffer size and removes copy/paste code.
>
> [1] 
> https://lkml.kernel.org/r/CA+55aFzCG-zNmZwX4A2FQpadafLfEzK6CC=qpxydaacu1rq...@mail.gmail.com
>
> Signed-off-by: Kees Cook 

Friendly ping! Who's tree should this go through?

Thanks!

-Kees

> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 100 +++--
>  1 file changed, 42 insertions(+), 58 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> index b455da487782..5eb98cde22ed 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> @@ -593,40 +593,59 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device 
> *dev,
> return snprintf(buf, PAGE_SIZE, "\n");
>  }
>
> -static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
> -   struct device_attribute *attr,
> -   const char *buf,
> -   size_t count)
> +/*
> + * Worst case: 32 bits individually specified, in octal at 12 characters
> + * per line (+1 for \n).
> + */
> +#define AMDGPU_MASK_BUF_MAX(32 * 13)
> +
> +static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t 
> *mask)
>  {
> -   struct drm_device *ddev = dev_get_drvdata(dev);
> -   struct amdgpu_device *adev = ddev->dev_private;
> int ret;
> long level;
> -   uint32_t mask = 0;
> char *sub_str = NULL;
> char *tmp;
> -   char buf_cpy[count];
> +   char buf_cpy[AMDGPU_MASK_BUF_MAX + 1];
> const char delimiter[3] = {' ', '\n', '\0'};
> +   size_t bytes;
>
> -   memcpy(buf_cpy, buf, count+1);
> +   *mask = 0;
> +
> +   bytes = min(count, sizeof(buf_cpy) - 1);
> +   memcpy(buf_cpy, buf, bytes);
> +   buf_cpy[bytes] = '\0';
> tmp = buf_cpy;
> while (tmp[0]) {
> -   sub_str =  strsep(, delimiter);
> +   sub_str = strsep(, delimiter);
> if (strlen(sub_str)) {
> ret = kstrtol(sub_str, 0, );
> -
> -   if (ret) {
> -   count = -EINVAL;
> -   goto fail;
> -   }
> -   mask |= 1 << level;
> +   if (ret)
> +   return -EINVAL;
> +   *mask |= 1 << level;
> } else
> break;
> }
> +
> +   return 0;
> +}
> +
> +static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
> +   struct device_attribute *attr,
> +   const char *buf,
> +   size_t count)
> +{
> +   struct drm_device *ddev = dev_get_drvdata(dev);
> +   struct amdgpu_device *adev = ddev->dev_private;
> +   int ret;
> +   uint32_t mask = 0;
> +
> +   ret = amdgpu_read_mask(buf, count, );
> +   if (ret)
> +   return ret;
> +
> if (adev->powerplay.pp_funcs->force_clock_level)
> amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
>
> -fail:
> return count;
>  }
>
> @@ -651,32 +670,15 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device 
> *dev,
> struct drm_device *ddev = dev_get_drvdata(dev);
> struct amdgpu_device *adev = ddev->dev_private;
> int ret;
> -   long level;
> uint32_t mask = 0;
> -   char *sub_str = NULL;
> -   char *tmp;
> -   char buf_cpy[count];
> -   const char delimiter[3] = {' ', '\n', '\0'};
>
> -   memcpy(buf_cpy, buf, count+1);
> -   tmp = buf_cpy;
> -   while (tmp[0]) {
> -   sub_str =  strsep(, delimiter);
> -   if (strlen(sub_str)) {
> -   ret = kstrtol(sub_str, 0, );
> +   ret = amdgpu_read_mask(buf, count, );
> +   if (ret)
> +   return ret;
>
> -   if (ret) {
> -   count = -EINVAL;
> -   goto fail;
> -   }
> -   mask |= 1 << level;
> -   } else
> -   break;
> -   }
> if (adev->powerplay.pp_funcs->force_clock_level)
> amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
>
> -fail:
> return count;
>  }
>
> @@ -701,33 +703,15 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device 
> *dev,
> struct drm_device *ddev = dev_get_drvdata(dev);
> struct amdgpu_device *adev = ddev->dev_private;
> int ret;
> -   long level;
> uint32_t mask = 0;
> -   char *sub_str = NULL;
> -   char *tmp;
> -   char buf_cpy[count];
> -   const char delimiter[3] = {' ', '\n', '\0'};
> -
> -   memcpy(buf_cpy, buf, count+1);
> -   tmp = buf_cpy;
>
> -   while (tmp[0]) {
> -   sub_str =  strsep(, delimiter);

Re: [PATCH] mm, oom: distinguish blockable mode for mmu notifiers

2018-07-17 Thread Andrew Morton
On Mon, 16 Jul 2018 13:50:58 +0200 Michal Hocko  wrote:

> From: Michal Hocko 
> 
> There are several blockable mmu notifiers which might sleep in
> mmu_notifier_invalidate_range_start and that is a problem for the
> oom_reaper because it needs to guarantee a forward progress so it cannot
> depend on any sleepable locks.
> 
> Currently we simply back off and mark an oom victim with blockable mmu
> notifiers as done after a short sleep. That can result in selecting a
> new oom victim prematurely because the previous one still hasn't torn
> its memory down yet.
> 
> We can do much better though. Even if mmu notifiers use sleepable locks
> there is no reason to automatically assume those locks are held.
> Moreover majority of notifiers only care about a portion of the address
> space and there is absolutely zero reason to fail when we are unmapping an
> unrelated range. Many notifiers do really block and wait for HW which is
> harder to handle and we have to bail out though.
> 
> This patch handles the low hanging fruid. 
> __mmu_notifier_invalidate_range_start
> gets a blockable flag and callbacks are not allowed to sleep if the
> flag is set to false. This is achieved by using trylock instead of the
> sleepable lock for most callbacks and continue as long as we do not
> block down the call chain.

I assume device driver developers are wondering "what does this mean
for me".  As I understand it, the only time they will see
blockable==false is when their driver is being called in response to an
out-of-memory condition, yes?  So it is a very rare thing.

Any suggestions regarding how the driver developers can test this code
path?  I don't think we presently have a way to fake an oom-killing
event?  Perhaps we should add such a thing, given the problems we're
having with that feature.

> I think we can improve that even further because there is a common
> pattern to do a range lookup first and then do something about that.
> The first part can be done without a sleeping lock in most cases AFAICS.
> 
> The oom_reaper end then simply retries if there is at least one notifier
> which couldn't make any progress in !blockable mode. A retry loop is
> already implemented to wait for the mmap_sem and this is basically the
> same thing.
> 
> ...
>
> +static inline int mmu_notifier_invalidate_range_start_nonblock(struct 
> mm_struct *mm,
> +   unsigned long start, unsigned long end)
> +{
> + int ret = 0;
> + if (mm_has_notifiers(mm))
> + ret = __mmu_notifier_invalidate_range_start(mm, start, end, 
> false);
> +
> + return ret;
>  }

nit,

{
if (mm_has_notifiers(mm))
return __mmu_notifier_invalidate_range_start(mm, start, end, 
false);
return 0;
}

would suffice.


> 
> ...
>
> --- a/mm/mmap.c
> +++ b/mm/mmap.c
> @@ -3074,7 +3074,7 @@ void exit_mmap(struct mm_struct *mm)
>* reliably test it.
>*/
>   mutex_lock(_lock);
> - __oom_reap_task_mm(mm);
> + (void)__oom_reap_task_mm(mm);
>   mutex_unlock(_lock);

What does this do?

>   set_bit(MMF_OOM_SKIP, >flags);
> 
> ...
>

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] mm, oom: distinguish blockable mode for mmu notifiers

2018-07-17 Thread Leon Romanovsky
On Mon, Jul 16, 2018 at 04:12:49PM -0700, Andrew Morton wrote:
> On Mon, 16 Jul 2018 13:50:58 +0200 Michal Hocko  wrote:
>
> > From: Michal Hocko 
> >
> > There are several blockable mmu notifiers which might sleep in
> > mmu_notifier_invalidate_range_start and that is a problem for the
> > oom_reaper because it needs to guarantee a forward progress so it cannot
> > depend on any sleepable locks.
> >
> > Currently we simply back off and mark an oom victim with blockable mmu
> > notifiers as done after a short sleep. That can result in selecting a
> > new oom victim prematurely because the previous one still hasn't torn
> > its memory down yet.
> >
> > We can do much better though. Even if mmu notifiers use sleepable locks
> > there is no reason to automatically assume those locks are held.
> > Moreover majority of notifiers only care about a portion of the address
> > space and there is absolutely zero reason to fail when we are unmapping an
> > unrelated range. Many notifiers do really block and wait for HW which is
> > harder to handle and we have to bail out though.
> >
> > This patch handles the low hanging fruid. 
> > __mmu_notifier_invalidate_range_start
> > gets a blockable flag and callbacks are not allowed to sleep if the
> > flag is set to false. This is achieved by using trylock instead of the
> > sleepable lock for most callbacks and continue as long as we do not
> > block down the call chain.
>
> I assume device driver developers are wondering "what does this mean
> for me".  As I understand it, the only time they will see
> blockable==false is when their driver is being called in response to an
> out-of-memory condition, yes?  So it is a very rare thing.

I can't say for everyone, but at least for me (mlx5), it is not rare event.
I'm seeing OOM very often while I'm running my tests in low memory VMs.

Thanks

>
> Any suggestions regarding how the driver developers can test this code
> path?  I don't think we presently have a way to fake an oom-killing
> event?  Perhaps we should add such a thing, given the problems we're
> having with that feature.


signature.asc
Description: PGP signature
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH libdrm] amdgpu: add amdgpu_bo_handle_type_kms_noimport

2018-07-17 Thread Christian König

Am 17.07.2018 um 09:26 schrieb Michel Dänzer:

On 2018-07-17 08:50 AM, Christian König wrote:

Am 16.07.2018 um 18:05 schrieb Michel Dänzer:

On 2018-07-13 08:47 PM, Marek Olšák wrote:
[SNIP]
Other opinions?

I understand the reason why Marek wants to do this, but I agree that
this is a little bit dangerous if used incorrectly.

On the other hand I don't see any other way to sanely handle it either.

Sanely handle what exactly? :) I still haven't seen any description of
an actual problem, other than "the handle is stored in the hash table".


Well the problem is that it's not "the handle" but rather "all handles" 
which are now stored in the hash table.


To begin with that is quite a bunch of wasted memory, not talking about 
the extra CPU cycles.



MM interop with GFX usually imports BOs on each frame, so that can hurt
here.

That's always the same set of BOs in the steady state, right? So it's
easy to make the repeated lookups fast, by moving them to the start of
their hash buckets.


Yeah, that can help with the CPU cycles but it is still not ideal.

I think that Mareks change is justified, but we should add a comment 
explaining the restrictions.


Christian.
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amdgpu: always initialize job->base.sched

2018-07-17 Thread zhoucm1



On 2018年07月17日 15:26, Christian König wrote:

Am 17.07.2018 um 09:16 schrieb Zhou, David(ChunMing):
Acked-by: Chunming Zhou , but I think it isn't a 
nice evaluation although there is comment in code.


Yeah, I didn't thought about the possibility that we need to free the 
job before it is submitted (in other words before the scheduler is 
determined).


Alternatively we could provide the adev manually to amdgpu_job_free() 
and amdgpu_job_free_resources().

not a big deal, you can still go ahead with this patch.

David


Regards,
Christian.




-Original Message-
From: amd-gfx [mailto:amd-gfx-boun...@lists.freedesktop.org] On 
Behalf Of Christian K?nig

Sent: Tuesday, July 17, 2018 3:05 PM
To: amd-gfx@lists.freedesktop.org
Subject: [PATCH] drm/amdgpu: always initialize job->base.sched

Otherwise we can't clean up the job if we run into an error before it 
is pushed to the scheduler.


Signed-off-by: Christian König 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 5 +
  1 file changed, 5 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c

index 024efb7ea6d6..42a4764d728e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -54,6 +54,11 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, 
unsigned num_ibs,

  if (!*job)
  return -ENOMEM;
  +    /*
+ * Initialize the scheduler to at least some ring so that we always
+ * have a pointer to adev.
+ */
+    (*job)->base.sched = >rings[0]->sched;
  (*job)->vm = vm;
  (*job)->ibs = (void *)&(*job)[1];
  (*job)->num_ibs = num_ibs;
--
2.14.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx




___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amdgpu: always initialize job->base.sched

2018-07-17 Thread Christian König

Am 17.07.2018 um 09:16 schrieb Zhou, David(ChunMing):

Acked-by: Chunming Zhou , but I think it isn't a nice 
evaluation although there is comment in code.


Yeah, I didn't thought about the possibility that we need to free the 
job before it is submitted (in other words before the scheduler is 
determined).


Alternatively we could provide the adev manually to amdgpu_job_free() 
and amdgpu_job_free_resources().


Regards,
Christian.




-Original Message-
From: amd-gfx [mailto:amd-gfx-boun...@lists.freedesktop.org] On Behalf Of 
Christian K?nig
Sent: Tuesday, July 17, 2018 3:05 PM
To: amd-gfx@lists.freedesktop.org
Subject: [PATCH] drm/amdgpu: always initialize job->base.sched

Otherwise we can't clean up the job if we run into an error before it is pushed 
to the scheduler.

Signed-off-by: Christian König 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 5 +
  1 file changed, 5 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 024efb7ea6d6..42a4764d728e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -54,6 +54,11 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned 
num_ibs,
if (!*job)
return -ENOMEM;
  
+	/*

+* Initialize the scheduler to at least some ring so that we always
+* have a pointer to adev.
+*/
+   (*job)->base.sched = >rings[0]->sched;
(*job)->vm = vm;
(*job)->ibs = (void *)&(*job)[1];
(*job)->num_ibs = num_ibs;
--
2.14.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH libdrm] amdgpu: add amdgpu_bo_handle_type_kms_noimport

2018-07-17 Thread Michel Dänzer
On 2018-07-17 08:50 AM, Christian König wrote:
> Am 16.07.2018 um 18:05 schrieb Michel Dänzer:
>> On 2018-07-13 08:47 PM, Marek Olšák wrote:
>>> On Fri, Jul 13, 2018 at 4:28 AM, Michel Dänzer 
>>> wrote:
 On 2018-07-12 07:03 PM, Marek Olšák wrote:
> On Thu, Jul 12, 2018, 3:31 AM Michel Dänzer 
> wrote:
>> What is the rationale for this? I.e. why do you want to not store
>> some
>> handles in the hash table?
>
> Because I have the option.
 Seems like you're expecting this patch to be accepted without providing
 any real justification for it (here or in the corresponding Mesa
 patch).
 NAK from me if so.
>>> The real justification is implied by the patch. See:
>>> amdgpu_add_handle_to_table
>>> Like I said: There is no risk of regression and it simplifies one
>>> simple case trivially. We shouldn't have to even talk about it.
>> IMO you haven't provided enough justification for adding API which is
>> prone to breakage if used incorrectly.
>>
>> Other opinions?
> 
> I understand the reason why Marek wants to do this, but I agree that
> this is a little bit dangerous if used incorrectly.
> 
> On the other hand I don't see any other way to sanely handle it either.

Sanely handle what exactly? :) I still haven't seen any description of
an actual problem, other than "the handle is stored in the hash table".


 I'd rather add the handle to the hash table in amdgpu_bo_alloc,
 amdgpu_create_bo_from_user_mem and amdgpu_bo_import instead of in
 amdgpu_bo_export, making amdgpu_bo_export(bo,
 amdgpu_bo_handle_type_kms,
 ...) essentially free. In the unlikely (since allocating a BO from the
 kernel is expensive) case that the hash table shows up on profiles, we
 can optimize it.
>>> The hash table isn't very good for high BO counts. The time complexity
>>> of a lookup is O(n).
>> A lookup is only needed in amdgpu_bo_import. amdgpu_bo_alloc and
>> amdgpu_create_bo_from_user_mem can just add the handle to the hash
>> bucket directly.
>>
>> Do you know of, or can you imagine, any workload where amdgpu_bo_import
>> is called often enough for this to be a concern?
> 
> MM interop with GFX usually imports BOs on each frame, so that can hurt
> here.

That's always the same set of BOs in the steady state, right? So it's
easy to make the repeated lookups fast, by moving them to the start of
their hash buckets.


-- 
Earthling Michel Dänzer   |   http://www.amd.com
Libre software enthusiast | Mesa and X developer
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


RE: [PATCH] drm/amdgpu: always initialize job->base.sched

2018-07-17 Thread Zhou, David(ChunMing)
Acked-by: Chunming Zhou , but I think it isn't a nice 
evaluation although there is comment in code.


-Original Message-
From: amd-gfx [mailto:amd-gfx-boun...@lists.freedesktop.org] On Behalf Of 
Christian K?nig
Sent: Tuesday, July 17, 2018 3:05 PM
To: amd-gfx@lists.freedesktop.org
Subject: [PATCH] drm/amdgpu: always initialize job->base.sched

Otherwise we can't clean up the job if we run into an error before it is pushed 
to the scheduler.

Signed-off-by: Christian König 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 5 +
 1 file changed, 5 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 024efb7ea6d6..42a4764d728e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -54,6 +54,11 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned 
num_ibs,
if (!*job)
return -ENOMEM;
 
+   /*
+* Initialize the scheduler to at least some ring so that we always
+* have a pointer to adev.
+*/
+   (*job)->base.sched = >rings[0]->sched;
(*job)->vm = vm;
(*job)->ibs = (void *)&(*job)[1];
(*job)->num_ibs = num_ibs;
--
2.14.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH] drm/amdgpu: always initialize job->base.sched

2018-07-17 Thread Christian König
Otherwise we can't clean up the job if we run into an error before it is
pushed to the scheduler.

Signed-off-by: Christian König 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 5 +
 1 file changed, 5 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 024efb7ea6d6..42a4764d728e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -54,6 +54,11 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned 
num_ibs,
if (!*job)
return -ENOMEM;
 
+   /*
+* Initialize the scheduler to at least some ring so that we always
+* have a pointer to adev.
+*/
+   (*job)->base.sched = >rings[0]->sched;
(*job)->vm = vm;
(*job)->ibs = (void *)&(*job)[1];
(*job)->num_ibs = num_ibs;
-- 
2.14.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amdgpu/pm: Remove VLA usage

2018-07-17 Thread Christian König

Who's tree should this go through?
To answer the question: When Rex is ok with that he pushes it to our 
internal amd-staging-drm-next tree.


Alex then pushes that tree to a public server and at some point sends a 
pull request for inclusion in drm-next.


Regards,
Christian.

Am 17.07.2018 um 08:23 schrieb Zhu, Rex:

Patch is:
Reviewed-by: Rex Zhumailto:re...@amd.com>>



Best Regards
Rex



*From:* keesc...@google.com  on behalf of Kees 
Cook 

*Sent:* Tuesday, July 17, 2018 11:59 AM
*To:* Deucher, Alexander
*Cc:* LKML; Koenig, Christian; Zhou, David(ChunMing); David Airlie; 
Zhu, Rex; Huang, Ray; Kuehling, Felix; amd-gfx list; Maling list - DRI 
developers

*Subject:* Re: [PATCH] drm/amdgpu/pm: Remove VLA usage
On Wed, Jun 20, 2018 at 11:26 AM, Kees Cook  wrote:
> In the quest to remove all stack VLA usage from the kernel[1], this
> uses the maximum sane buffer size and removes copy/paste code.
>
> [1] 
https://lkml.kernel.org/r/CA+55aFzCG-zNmZwX4A2FQpadafLfEzK6CC=qpxydaacu1rq...@mail.gmail.com

>
> Signed-off-by: Kees Cook 

Friendly ping! Who's tree should this go through?

Thanks!

-Kees

> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 100 +++--
>  1 file changed, 42 insertions(+), 58 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c

> index b455da487782..5eb98cde22ed 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> @@ -593,40 +593,59 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct 
device *dev,

> return snprintf(buf, PAGE_SIZE, "\n");
>  }
>
> -static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
> -   struct device_attribute *attr,
> -   const char *buf,
> -   size_t count)
> +/*
> + * Worst case: 32 bits individually specified, in octal at 12 
characters

> + * per line (+1 for \n).
> + */
> +#define AMDGPU_MASK_BUF_MAX    (32 * 13)
> +
> +static ssize_t amdgpu_read_mask(const char *buf, size_t count, 
uint32_t *mask)

>  {
> -   struct drm_device *ddev = dev_get_drvdata(dev);
> -   struct amdgpu_device *adev = ddev->dev_private;
> int ret;
> long level;
> -   uint32_t mask = 0;
> char *sub_str = NULL;
> char *tmp;
> -   char buf_cpy[count];
> +   char buf_cpy[AMDGPU_MASK_BUF_MAX + 1];
> const char delimiter[3] = {' ', '\n', '\0'};
> +   size_t bytes;
>
> -   memcpy(buf_cpy, buf, count+1);
> +   *mask = 0;
> +
> +   bytes = min(count, sizeof(buf_cpy) - 1);
> +   memcpy(buf_cpy, buf, bytes);
> +   buf_cpy[bytes] = '\0';
> tmp = buf_cpy;
> while (tmp[0]) {
> -   sub_str =  strsep(, delimiter);
> +   sub_str = strsep(, delimiter);
> if (strlen(sub_str)) {
> ret = kstrtol(sub_str, 0, );
> -
> -   if (ret) {
> -   count = -EINVAL;
> -   goto fail;
> -   }
> -   mask |= 1 << level;
> +   if (ret)
> +   return -EINVAL;
> +   *mask |= 1 << level;
> } else
> break;
> }
> +
> +   return 0;
> +}
> +
> +static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
> +   struct device_attribute *attr,
> +   const char *buf,
> +   size_t count)
> +{
> +   struct drm_device *ddev = dev_get_drvdata(dev);
> +   struct amdgpu_device *adev = ddev->dev_private;
> +   int ret;
> +   uint32_t mask = 0;
> +
> +   ret = amdgpu_read_mask(buf, count, );
> +   if (ret)
> +   return ret;
> +
> if (adev->powerplay.pp_funcs->force_clock_level)
> amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
>
> -fail:
> return count;
>  }
>
> @@ -651,32 +670,15 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct 
device *dev,

> struct drm_device *ddev = dev_get_drvdata(dev);
> struct amdgpu_device *adev = ddev->dev_private;
> int ret;
> -   long level;
> uint32_t mask = 0;
> -   char *sub_str = NULL;
> -   char *tmp;
> -   char buf_cpy[count];
> -   const char delimiter[3] = {' ', '\n', '\0'};
>
> -   memcpy(buf_cpy, buf, count+1);
> -   tmp = buf_cpy;
> -   while (tmp[0]) {
> -   sub_str =  strsep(, delimiter);
> -   if (strlen(sub_str)) {
> -   ret = kstrtol(sub_str, 0, );
> +   ret = amdgpu_read_mask(buf, count, );
> +   if (ret)
> +   return ret;
>
> -   if (ret) {
> -   count = -EINVAL;
> -   goto fail;
> -   }
> -   mask |= 1 << level;
> -   } 

Re: [PATCH libdrm] amdgpu: add amdgpu_bo_handle_type_kms_noimport

2018-07-17 Thread Christian König

Am 16.07.2018 um 18:05 schrieb Michel Dänzer:

On 2018-07-13 08:47 PM, Marek Olšák wrote:

On Fri, Jul 13, 2018 at 4:28 AM, Michel Dänzer  wrote:

On 2018-07-12 07:03 PM, Marek Olšák wrote:

On Thu, Jul 12, 2018, 3:31 AM Michel Dänzer  wrote:

What is the rationale for this? I.e. why do you want to not store some
handles in the hash table?


Because I have the option.

Seems like you're expecting this patch to be accepted without providing
any real justification for it (here or in the corresponding Mesa patch).
NAK from me if so.

The real justification is implied by the patch. See: amdgpu_add_handle_to_table
Like I said: There is no risk of regression and it simplifies one
simple case trivially. We shouldn't have to even talk about it.

IMO you haven't provided enough justification for adding API which is
prone to breakage if used incorrectly.

Other opinions?


I understand the reason why Marek wants to do this, but I agree that 
this is a little bit dangerous if used incorrectly.


On the other hand I don't see any other way to sanely handle it either.





I'd rather add the handle to the hash table in amdgpu_bo_alloc,
amdgpu_create_bo_from_user_mem and amdgpu_bo_import instead of in
amdgpu_bo_export, making amdgpu_bo_export(bo, amdgpu_bo_handle_type_kms,
...) essentially free. In the unlikely (since allocating a BO from the
kernel is expensive) case that the hash table shows up on profiles, we
can optimize it.

The hash table isn't very good for high BO counts. The time complexity
of a lookup is O(n).

A lookup is only needed in amdgpu_bo_import. amdgpu_bo_alloc and
amdgpu_create_bo_from_user_mem can just add the handle to the hash
bucket directly.

Do you know of, or can you imagine, any workload where amdgpu_bo_import
is called often enough for this to be a concern?


MM interop with GFX usually imports BOs on each frame, so that can hurt 
here.


Christian.
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amdgpu/pm: Remove VLA usage

2018-07-17 Thread Zhu, Rex
Patch is:
Reviewed-by: Rex Zhumailto:re...@amd.com>>



Best Regards
Rex



From: keesc...@google.com  on behalf of Kees Cook 

Sent: Tuesday, July 17, 2018 11:59 AM
To: Deucher, Alexander
Cc: LKML; Koenig, Christian; Zhou, David(ChunMing); David Airlie; Zhu, Rex; 
Huang, Ray; Kuehling, Felix; amd-gfx list; Maling list - DRI developers
Subject: Re: [PATCH] drm/amdgpu/pm: Remove VLA usage

On Wed, Jun 20, 2018 at 11:26 AM, Kees Cook  wrote:
> In the quest to remove all stack VLA usage from the kernel[1], this
> uses the maximum sane buffer size and removes copy/paste code.
>
> [1] 
> https://lkml.kernel.org/r/CA+55aFzCG-zNmZwX4A2FQpadafLfEzK6CC=qpxydaacu1rq...@mail.gmail.com
>
> Signed-off-by: Kees Cook 

Friendly ping! Who's tree should this go through?

Thanks!

-Kees

> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 100 +++--
>  1 file changed, 42 insertions(+), 58 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> index b455da487782..5eb98cde22ed 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> @@ -593,40 +593,59 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device 
> *dev,
> return snprintf(buf, PAGE_SIZE, "\n");
>  }
>
> -static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
> -   struct device_attribute *attr,
> -   const char *buf,
> -   size_t count)
> +/*
> + * Worst case: 32 bits individually specified, in octal at 12 characters
> + * per line (+1 for \n).
> + */
> +#define AMDGPU_MASK_BUF_MAX(32 * 13)
> +
> +static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t 
> *mask)
>  {
> -   struct drm_device *ddev = dev_get_drvdata(dev);
> -   struct amdgpu_device *adev = ddev->dev_private;
> int ret;
> long level;
> -   uint32_t mask = 0;
> char *sub_str = NULL;
> char *tmp;
> -   char buf_cpy[count];
> +   char buf_cpy[AMDGPU_MASK_BUF_MAX + 1];
> const char delimiter[3] = {' ', '\n', '\0'};
> +   size_t bytes;
>
> -   memcpy(buf_cpy, buf, count+1);
> +   *mask = 0;
> +
> +   bytes = min(count, sizeof(buf_cpy) - 1);
> +   memcpy(buf_cpy, buf, bytes);
> +   buf_cpy[bytes] = '\0';
> tmp = buf_cpy;
> while (tmp[0]) {
> -   sub_str =  strsep(, delimiter);
> +   sub_str = strsep(, delimiter);
> if (strlen(sub_str)) {
> ret = kstrtol(sub_str, 0, );
> -
> -   if (ret) {
> -   count = -EINVAL;
> -   goto fail;
> -   }
> -   mask |= 1 << level;
> +   if (ret)
> +   return -EINVAL;
> +   *mask |= 1 << level;
> } else
> break;
> }
> +
> +   return 0;
> +}
> +
> +static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
> +   struct device_attribute *attr,
> +   const char *buf,
> +   size_t count)
> +{
> +   struct drm_device *ddev = dev_get_drvdata(dev);
> +   struct amdgpu_device *adev = ddev->dev_private;
> +   int ret;
> +   uint32_t mask = 0;
> +
> +   ret = amdgpu_read_mask(buf, count, );
> +   if (ret)
> +   return ret;
> +
> if (adev->powerplay.pp_funcs->force_clock_level)
> amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
>
> -fail:
> return count;
>  }
>
> @@ -651,32 +670,15 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device 
> *dev,
> struct drm_device *ddev = dev_get_drvdata(dev);
> struct amdgpu_device *adev = ddev->dev_private;
> int ret;
> -   long level;
> uint32_t mask = 0;
> -   char *sub_str = NULL;
> -   char *tmp;
> -   char buf_cpy[count];
> -   const char delimiter[3] = {' ', '\n', '\0'};
>
> -   memcpy(buf_cpy, buf, count+1);
> -   tmp = buf_cpy;
> -   while (tmp[0]) {
> -   sub_str =  strsep(, delimiter);
> -   if (strlen(sub_str)) {
> -   ret = kstrtol(sub_str, 0, );
> +   ret = amdgpu_read_mask(buf, count, );
> +   if (ret)
> +   return ret;
>
> -   if (ret) {
> -   count = -EINVAL;
> -   goto fail;
> -   }
> -   mask |= 1 << level;
> -   } else
> -   break;
> -   }
> if (adev->powerplay.pp_funcs->force_clock_level)
> amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
>
> -fail:
> return count;
>  }
>
> @@ -701,33 +703,15 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device 
> *dev,
> struct drm_device *ddev =