RE: [PATCH xf86-video-amdgpu] Improve AMDGPUPreInitAccel_KMS log messages

2017-06-07 Thread Deucher, Alexander
> -Original Message-
> From: amd-gfx [mailto:amd-gfx-boun...@lists.freedesktop.org] On Behalf
> Of Michel Dänzer
> Sent: Wednesday, June 07, 2017 9:50 PM
> To: amd-gfx@lists.freedesktop.org
> Subject: [PATCH xf86-video-amdgpu] Improve AMDGPUPreInitAccel_KMS
> log messages
> 
> From: Michel Dänzer 
> 
> Now it should always be clear in the log file why acceleration isn't
> enabled.
> 
> Signed-off-by: Michel Dänzer 

Reviewed-by: Alex Deucher 

> ---
>  src/amdgpu_glamor.c |  3 ---
>  src/amdgpu_kms.c| 23 +--
>  2 files changed, 13 insertions(+), 13 deletions(-)
> 
> diff --git a/src/amdgpu_glamor.c b/src/amdgpu_glamor.c
> index 5583cd382..197592aa0 100644
> --- a/src/amdgpu_glamor.c
> +++ b/src/amdgpu_glamor.c
> @@ -81,9 +81,6 @@ Bool amdgpu_glamor_pre_init(ScrnInfoPtr scrn)
>   pointer glamor_module;
>   CARD32 version;
> 
> - if (!info->dri2.available)
> - return FALSE;
> -
>   if (scrn->depth < 24) {
>   xf86DrvMsg(scrn->scrnIndex, X_ERROR,
>  "glamor requires depth >= 24, disabling.\n");
> diff --git a/src/amdgpu_kms.c b/src/amdgpu_kms.c
> index 69d61943d..784f7388a 100644
> --- a/src/amdgpu_kms.c
> +++ b/src/amdgpu_kms.c
> @@ -1191,19 +1191,22 @@ static Bool
> AMDGPUPreInitAccel_KMS(ScrnInfoPtr pScrn)
> 
>   if (info->dri2.available)
>   info->gbm = gbm_create_device(pAMDGPUEnt-
> >fd);
> - if (info->gbm == NULL)
> - info->dri2.available = FALSE;
> 
> - if (use_glamor &&
> - amdgpu_glamor_pre_init(pScrn))
> - return TRUE;
> -
> - if (info->dri2.available)
> - return TRUE;
> + if (info->gbm) {
> + if (!use_glamor ||
> + amdgpu_glamor_pre_init(pScrn))
> + return TRUE;
> + } else {
> + xf86DrvMsg(pScrn->scrnIndex, X_WARNING,
> +"gbm_create_device returned NULL, using "
> +"ShadowFB\n");
> + }
> + } else {
> + xf86DrvMsg(pScrn->scrnIndex, X_CONFIG,
> +"GPU acceleration disabled, using ShadowFB\n");
>   }
> 
> - xf86DrvMsg(pScrn->scrnIndex, X_INFO,
> -"GPU accel disabled or not working, using shadowfb for
> KMS\n");
> + info->dri2.available = FALSE;
>   info->shadow_fb = TRUE;
>   if (!xf86LoadSubModule(pScrn, "shadow"))
>   info->shadow_fb = FALSE;
> --
> 2.11.0
> 
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH xf86-video-amdgpu] Improve AMDGPUPreInitAccel_KMS log messages

2017-06-07 Thread Michel Dänzer
From: Michel Dänzer 

Now it should always be clear in the log file why acceleration isn't
enabled.

Signed-off-by: Michel Dänzer 
---
 src/amdgpu_glamor.c |  3 ---
 src/amdgpu_kms.c| 23 +--
 2 files changed, 13 insertions(+), 13 deletions(-)

diff --git a/src/amdgpu_glamor.c b/src/amdgpu_glamor.c
index 5583cd382..197592aa0 100644
--- a/src/amdgpu_glamor.c
+++ b/src/amdgpu_glamor.c
@@ -81,9 +81,6 @@ Bool amdgpu_glamor_pre_init(ScrnInfoPtr scrn)
pointer glamor_module;
CARD32 version;
 
-   if (!info->dri2.available)
-   return FALSE;
-
if (scrn->depth < 24) {
xf86DrvMsg(scrn->scrnIndex, X_ERROR,
   "glamor requires depth >= 24, disabling.\n");
diff --git a/src/amdgpu_kms.c b/src/amdgpu_kms.c
index 69d61943d..784f7388a 100644
--- a/src/amdgpu_kms.c
+++ b/src/amdgpu_kms.c
@@ -1191,19 +1191,22 @@ static Bool AMDGPUPreInitAccel_KMS(ScrnInfoPtr pScrn)
 
if (info->dri2.available)
info->gbm = gbm_create_device(pAMDGPUEnt->fd);
-   if (info->gbm == NULL)
-   info->dri2.available = FALSE;
 
-   if (use_glamor &&
-   amdgpu_glamor_pre_init(pScrn))
-   return TRUE;
-
-   if (info->dri2.available)
-   return TRUE;
+   if (info->gbm) {
+   if (!use_glamor ||
+   amdgpu_glamor_pre_init(pScrn))
+   return TRUE;
+   } else {
+   xf86DrvMsg(pScrn->scrnIndex, X_WARNING,
+  "gbm_create_device returned NULL, using "
+  "ShadowFB\n");
+   }
+   } else {
+   xf86DrvMsg(pScrn->scrnIndex, X_CONFIG,
+  "GPU acceleration disabled, using ShadowFB\n");
}
 
-   xf86DrvMsg(pScrn->scrnIndex, X_INFO,
-  "GPU accel disabled or not working, using shadowfb for 
KMS\n");
+   info->dri2.available = FALSE;
info->shadow_fb = TRUE;
if (!xf86LoadSubModule(pScrn, "shadow"))
info->shadow_fb = FALSE;
-- 
2.11.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 4/6] drm/amdgpu/gfx: move more common KIQ code to amdgpu_gfx.c

2017-06-07 Thread axie

Reviewed-by: Alex Xie 


On 2017-06-07 03:34 PM, Alex Deucher wrote:

Lots more common stuff.

Signed-off-by: Alex Deucher 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 103 ++
  drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h |  11 
  drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c   | 110 ++--
  drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c   | 109 ++-
  4 files changed, 122 insertions(+), 211 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 51a9708..c5aa465 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -145,3 +145,106 @@ void amdgpu_gfx_compute_queue_acquire(struct 
amdgpu_device *adev)
if (WARN_ON(adev->gfx.num_compute_rings > AMDGPU_MAX_COMPUTE_RINGS))
adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS;
  }
+
+static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring)
+{
+   int queue_bit;
+   int mec, pipe, queue;
+
+   queue_bit = adev->gfx.mec.num_mec
+   * adev->gfx.mec.num_pipe_per_mec
+   * adev->gfx.mec.num_queue_per_pipe;
+
+   while (queue_bit-- >= 0) {
+   if (test_bit(queue_bit, adev->gfx.mec.queue_bitmap))
+   continue;
+
+   amdgpu_gfx_bit_to_queue(adev, queue_bit, &mec, &pipe, &queue);
+
+   /* Using pipes 2/3 from MEC 2 seems cause problems */
+   if (mec == 1 && pipe > 1)
+   continue;
+
+   ring->me = mec + 1;
+   ring->pipe = pipe;
+   ring->queue = queue;
+
+   return 0;
+   }
+
+   dev_err(adev->dev, "Failed to find a queue for KIQ\n");
+   return -EINVAL;
+}
+
+int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
+struct amdgpu_ring *ring,
+struct amdgpu_irq_src *irq)
+{
+   struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+   int r = 0;
+
+   mutex_init(&kiq->ring_mutex);
+
+   r = amdgpu_wb_get(adev, &adev->virt.reg_val_offs);
+   if (r)
+   return r;
+
+   ring->adev = NULL;
+   ring->ring_obj = NULL;
+   ring->use_doorbell = true;
+   ring->doorbell_index = AMDGPU_DOORBELL_KIQ;
+
+   r = amdgpu_gfx_kiq_acquire(adev, ring);
+   if (r)
+   return r;
+
+   ring->eop_gpu_addr = kiq->eop_gpu_addr;
+   sprintf(ring->name, "kiq %d.%d.%d", ring->me, ring->pipe, ring->queue);
+   r = amdgpu_ring_init(adev, ring, 1024,
+irq, AMDGPU_CP_KIQ_IRQ_DRIVER0);
+   if (r)
+   dev_warn(adev->dev, "(%d) failed to init kiq ring\n", r);
+
+   return r;
+}
+
+void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring,
+ struct amdgpu_irq_src *irq)
+{
+   amdgpu_wb_free(ring->adev, ring->adev->virt.reg_val_offs);
+   amdgpu_ring_fini(ring);
+}
+
+void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev)
+{
+   struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+
+   amdgpu_bo_free_kernel(&kiq->eop_obj, &kiq->eop_gpu_addr, NULL);
+}
+
+int amdgpu_gfx_kiq_init(struct amdgpu_device *adev,
+   unsigned hpd_size)
+{
+   int r;
+   u32 *hpd;
+   struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+
+   r = amdgpu_bo_create_kernel(adev, hpd_size, PAGE_SIZE,
+   AMDGPU_GEM_DOMAIN_GTT, &kiq->eop_obj,
+   &kiq->eop_gpu_addr, (void **)&hpd);
+   if (r) {
+   dev_warn(adev->dev, "failed to create KIQ bo (%d).\n", r);
+   return r;
+   }
+
+   memset(hpd, 0, hpd_size);
+
+   r = amdgpu_bo_reserve(kiq->eop_obj, true);
+   if (unlikely(r != 0))
+   dev_warn(adev->dev, "(%d) reserve kiq eop bo failed\n", r);
+   amdgpu_bo_kunmap(kiq->eop_obj);
+   amdgpu_bo_unreserve(kiq->eop_obj);
+
+   return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index fa20438..b1766fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -32,6 +32,17 @@ void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned 
max_se,
  
  void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev);
  
+int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,

+struct amdgpu_ring *ring,
+struct amdgpu_irq_src *irq);
+
+void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring,
+ struct amdgpu_irq_src *irq);
+
+void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev);
+int amdgpu_gfx_kiq_init(struct amdgpu_device *adev,
+   unsigned hpd_size);
+
  /**
   * amdgpu_gfx_create_bitmask - create a bitmask
   *
diff --git a/drivers/gpu/dr

Re: [PATCH 6/6] drm/amdgpu/gfx: consolidate mqd buffer setup code

2017-06-07 Thread axie

Reviewed-by: Alex Xie 


On 2017-06-07 03:34 PM, Alex Deucher wrote:

It was duplicated across multiple generations.

Signed-off-by: Alex Deucher 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 66 ++
  drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h |  4 ++
  drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c   | 71 +
  drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c   | 65 +-
  4 files changed, 74 insertions(+), 132 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index c5aa465..dfbf027 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -248,3 +248,69 @@ int amdgpu_gfx_kiq_init(struct amdgpu_device *adev,
  
  	return 0;

  }
+
+/* create MQD for each compute queue */
+int amdgpu_gfx_compute_mqd_sw_init(struct amdgpu_device *adev,
+  unsigned mqd_size)
+{
+   struct amdgpu_ring *ring = NULL;
+   int r, i;
+
+   /* create MQD for KIQ */
+   ring = &adev->gfx.kiq.ring;
+   if (!ring->mqd_obj) {
+   r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
+   AMDGPU_GEM_DOMAIN_GTT, 
&ring->mqd_obj,
+   &ring->mqd_gpu_addr, 
&ring->mqd_ptr);
+   if (r) {
+   dev_warn(adev->dev, "failed to create ring mqd ob 
(%d)", r);
+   return r;
+   }
+
+   /* prepare MQD backup */
+   adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS] = 
kmalloc(mqd_size, GFP_KERNEL);
+   if (!adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS])
+   dev_warn(adev->dev, "no memory to create MQD backup for 
ring %s\n", ring->name);
+   }
+
+   /* create MQD for each KCQ */
+   for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+   ring = &adev->gfx.compute_ring[i];
+   if (!ring->mqd_obj) {
+   r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
+   AMDGPU_GEM_DOMAIN_GTT, 
&ring->mqd_obj,
+   &ring->mqd_gpu_addr, 
&ring->mqd_ptr);
+   if (r) {
+   dev_warn(adev->dev, "failed to create ring mqd ob 
(%d)", r);
+   return r;
+   }
+
+   /* prepare MQD backup */
+   adev->gfx.mec.mqd_backup[i] = kmalloc(mqd_size, 
GFP_KERNEL);
+   if (!adev->gfx.mec.mqd_backup[i])
+   dev_warn(adev->dev, "no memory to create MQD backup for 
ring %s\n", ring->name);
+   }
+   }
+
+   return 0;
+}
+
+void amdgpu_gfx_compute_mqd_sw_fini(struct amdgpu_device *adev)
+{
+   struct amdgpu_ring *ring = NULL;
+   int i;
+
+   for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+   ring = &adev->gfx.compute_ring[i];
+   kfree(adev->gfx.mec.mqd_backup[i]);
+   amdgpu_bo_free_kernel(&ring->mqd_obj,
+ &ring->mqd_gpu_addr,
+ &ring->mqd_ptr);
+   }
+
+   ring = &adev->gfx.kiq.ring;
+   kfree(adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS]);
+   amdgpu_bo_free_kernel(&ring->mqd_obj,
+ &ring->mqd_gpu_addr,
+ &ring->mqd_ptr);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index b1766fa..1f27905 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -43,6 +43,10 @@ void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev);
  int amdgpu_gfx_kiq_init(struct amdgpu_device *adev,
unsigned hpd_size);
  
+int amdgpu_gfx_compute_mqd_sw_init(struct amdgpu_device *adev,

+  unsigned mqd_size);
+void amdgpu_gfx_compute_mqd_sw_fini(struct amdgpu_device *adev);
+
  /**
   * amdgpu_gfx_create_bitmask - create a bitmask
   *
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 
b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 97d39369..6e541af 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -659,8 +659,6 @@ static u32 gfx_v8_0_get_csb_size(struct amdgpu_device 
*adev);
  static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev);
  static void gfx_v8_0_ring_emit_ce_meta(struct amdgpu_ring *ring);
  static void gfx_v8_0_ring_emit_de_meta(struct amdgpu_ring *ring);
-static int gfx_v8_0_compute_mqd_sw_init(struct amdgpu_device *adev);
-static void gfx_v8_0_compute_mqd_sw_fini(struct amdgpu_device *adev);
  
  static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev)

  {
@@ -2102,7 +2100,7 @@ static int gfx_v8_0_s

Re: [PATCH 5/6] drm/amdgpu/gfx: move mec parameter setup into sw_init

2017-06-07 Thread axie

Reviewed-by: Alex Xie 

On 2017-06-07 03:34 PM, Alex Deucher wrote:

This will allow us to share more mec code.

Signed-off-by: Alex Deucher 
---
  drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | 30 +--
  drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 38 +--
  drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 26 
  3 files changed, 47 insertions(+), 47 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 
b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index d80cf72..e30c7d0 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -2817,21 +2817,6 @@ static int gfx_v7_0_mec_init(struct amdgpu_device *adev)
  
  	bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
  
-	switch (adev->asic_type) {

-   case CHIP_KAVERI:
-   adev->gfx.mec.num_mec = 2;
-   break;
-   case CHIP_BONAIRE:
-   case CHIP_HAWAII:
-   case CHIP_KABINI:
-   case CHIP_MULLINS:
-   default:
-   adev->gfx.mec.num_mec = 1;
-   break;
-   }
-   adev->gfx.mec.num_pipe_per_mec = 4;
-   adev->gfx.mec.num_queue_per_pipe = 8;
-
/* take ownership of the relevant compute queues */
amdgpu_gfx_compute_queue_acquire(adev);
  
@@ -4723,6 +4708,21 @@ static int gfx_v7_0_sw_init(void *handle)

struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int i, j, k, r, ring_id;
  
+	switch (adev->asic_type) {

+   case CHIP_KAVERI:
+   adev->gfx.mec.num_mec = 2;
+   break;
+   case CHIP_BONAIRE:
+   case CHIP_HAWAII:
+   case CHIP_KABINI:
+   case CHIP_MULLINS:
+   default:
+   adev->gfx.mec.num_mec = 1;
+   break;
+   }
+   adev->gfx.mec.num_pipe_per_mec = 4;
+   adev->gfx.mec.num_queue_per_pipe = 8;
+
/* EOP Event */
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 181, 
&adev->gfx.eop_irq);
if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 
b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 8a9d35a..97d39369 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -1387,25 +1387,6 @@ static int gfx_v8_0_mec_init(struct amdgpu_device *adev)
  
  	bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
  
-	switch (adev->asic_type) {

-   case CHIP_FIJI:
-   case CHIP_TONGA:
-   case CHIP_POLARIS11:
-   case CHIP_POLARIS12:
-   case CHIP_POLARIS10:
-   case CHIP_CARRIZO:
-   adev->gfx.mec.num_mec = 2;
-   break;
-   case CHIP_TOPAZ:
-   case CHIP_STONEY:
-   default:
-   adev->gfx.mec.num_mec = 1;
-   break;
-   }
-
-   adev->gfx.mec.num_pipe_per_mec = 4;
-   adev->gfx.mec.num_queue_per_pipe = 8;
-
/* take ownership of the relevant compute queues */
amdgpu_gfx_compute_queue_acquire(adev);
  
@@ -2009,6 +1990,25 @@ static int gfx_v8_0_sw_init(void *handle)

struct amdgpu_kiq *kiq;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  
+	switch (adev->asic_type) {

+   case CHIP_FIJI:
+   case CHIP_TONGA:
+   case CHIP_POLARIS11:
+   case CHIP_POLARIS12:
+   case CHIP_POLARIS10:
+   case CHIP_CARRIZO:
+   adev->gfx.mec.num_mec = 2;
+   break;
+   case CHIP_TOPAZ:
+   case CHIP_STONEY:
+   default:
+   adev->gfx.mec.num_mec = 1;
+   break;
+   }
+
+   adev->gfx.mec.num_pipe_per_mec = 4;
+   adev->gfx.mec.num_queue_per_pipe = 8;
+
/* KIQ event */
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 178, 
&adev->gfx.kiq.irq);
if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 
b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index fbb9d20..b7094c3 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -870,19 +870,6 @@ static int gfx_v9_0_mec_init(struct amdgpu_device *adev)
  
  	bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
  
-	switch (adev->asic_type) {

-   case CHIP_VEGA10:
-   case CHIP_RAVEN:
-   adev->gfx.mec.num_mec = 2;
-   break;
-   default:
-   adev->gfx.mec.num_mec = 1;
-   break;
-   }
-
-   adev->gfx.mec.num_pipe_per_mec = 4;
-   adev->gfx.mec.num_queue_per_pipe = 8;
-
/* take ownership of the relevant compute queues */
amdgpu_gfx_compute_queue_acquire(adev);
mec_hpd_size = adev->gfx.num_compute_rings * GFX9_MEC_HPD_SIZE;
@@ -1393,6 +1380,19 @@ static int gfx_v9_0_sw_init(void *handle)
struct amdgpu_kiq *kiq;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  
+	switch (adev->asic_type) {

+   case CHIP_VEGA10:
+   case CHIP_RAVEN:
+   adev->gfx.mec.num_mec = 2;
+   break;
+  

Re: [PATCH 2/6] drm/amdgpu/gfx9: remove spurious line in kiq setup

2017-06-07 Thread axie

Reviewed-by: Alex Xie 


On 2017-06-07 03:34 PM, Alex Deucher wrote:

This overrode what queue was actually assigned for kiq.

Signed-off-by: Alex Deucher 
---
  drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 1 -
  1 file changed, 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 
b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index e0193e4..4c47754f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -1054,7 +1054,6 @@ static int gfx_v9_0_kiq_init_ring(struct amdgpu_device 
*adev,
if (r)
return r;
  
-	ring->queue = 0;

ring->eop_gpu_addr = kiq->eop_gpu_addr;
sprintf(ring->name, "kiq %d.%d.%d", ring->me, ring->pipe, ring->queue);
r = amdgpu_ring_init(adev, ring, 1024,


___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 3/6] drm/amdgpu: move mec queue helpers to amdgpu_gfx.h

2017-06-07 Thread axie

Reviewed-by: Alex Xie 


On 2017-06-07 03:34 PM, Alex Deucher wrote:

They are gfx related, not general helpers.

Signed-off-by: Alex Deucher 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu.h| 30 --
  drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c |  9 +
  drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h| 30 ++
  drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c  |  2 +-
  drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c  |  4 ++--
  drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c  |  4 ++--
  6 files changed, 40 insertions(+), 39 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index ef34ff2..3308e62 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1831,36 +1831,6 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
return NULL;
  }
  
-static inline int amdgpu_queue_to_bit(struct amdgpu_device *adev,

- int mec, int pipe, int queue)
-{
-   int bit = 0;
-
-   bit += mec * adev->gfx.mec.num_pipe_per_mec
-   * adev->gfx.mec.num_queue_per_pipe;
-   bit += pipe * adev->gfx.mec.num_queue_per_pipe;
-   bit += queue;
-
-   return bit;
-}
-
-static inline void amdgpu_bit_to_queue(struct amdgpu_device *adev, int bit,
-  int *mec, int *pipe, int *queue)
-{
-   *queue = bit % adev->gfx.mec.num_queue_per_pipe;
-   *pipe = (bit / adev->gfx.mec.num_queue_per_pipe)
-   % adev->gfx.mec.num_pipe_per_mec;
-   *mec = (bit / adev->gfx.mec.num_queue_per_pipe)
-  / adev->gfx.mec.num_pipe_per_mec;
-
-}
-static inline bool amdgpu_is_mec_queue_enabled(struct amdgpu_device *adev,
-  int mec, int pipe, int queue)
-{
-   return test_bit(amdgpu_queue_to_bit(adev, mec, pipe, queue),
-   adev->gfx.mec.queue_bitmap);
-}
-
  /*
   * ASICs macro.
   */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 339e8cd..5f8ada1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -24,6 +24,7 @@
  #include "amd_shared.h"
  #include 
  #include "amdgpu.h"
+#include "amdgpu_gfx.h"
  #include 
  
  const struct kfd2kgd_calls *kfd2kgd;

@@ -113,10 +114,10 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
  
  		/* remove the KIQ bit as well */

if (adev->gfx.kiq.ring.ready)
-   clear_bit(amdgpu_queue_to_bit(adev,
- adev->gfx.kiq.ring.me - 1,
- adev->gfx.kiq.ring.pipe,
- adev->gfx.kiq.ring.queue),
+   clear_bit(amdgpu_gfx_queue_to_bit(adev,
+ adev->gfx.kiq.ring.me 
- 1,
+ 
adev->gfx.kiq.ring.pipe,
+ 
adev->gfx.kiq.ring.queue),
  gpu_resources.queue_bitmap);
  
  		/* According to linux/bitmap.h we shouldn't use bitmap_clear if

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index 9b9ea6e..fa20438 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -45,4 +45,34 @@ static inline u32 amdgpu_gfx_create_bitmask(u32 bit_width)
return (u32)((1ULL << bit_width) - 1);
  }
  
+static inline int amdgpu_gfx_queue_to_bit(struct amdgpu_device *adev,

+ int mec, int pipe, int queue)
+{
+   int bit = 0;
+
+   bit += mec * adev->gfx.mec.num_pipe_per_mec
+   * adev->gfx.mec.num_queue_per_pipe;
+   bit += pipe * adev->gfx.mec.num_queue_per_pipe;
+   bit += queue;
+
+   return bit;
+}
+
+static inline void amdgpu_gfx_bit_to_queue(struct amdgpu_device *adev, int bit,
+  int *mec, int *pipe, int *queue)
+{
+   *queue = bit % adev->gfx.mec.num_queue_per_pipe;
+   *pipe = (bit / adev->gfx.mec.num_queue_per_pipe)
+   % adev->gfx.mec.num_pipe_per_mec;
+   *mec = (bit / adev->gfx.mec.num_queue_per_pipe)
+  / adev->gfx.mec.num_pipe_per_mec;
+
+}
+static inline bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev,
+  int mec, int pipe, int queue)
+{
+   return test_bit(amdgpu_gfx_queue_to_bit(adev, mec, pipe, queue),
+   adev->gfx.mec.queue_bitmap);
+}
+
  #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 
b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 6ffb2da..d80cf72 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@

Re: [PATCH 1/6] drm/amdgpu/gfx8: whitespace change

2017-06-07 Thread axie

Reviewed-by: Alex Xie 


On 2017-06-07 03:34 PM, Alex Deucher wrote:

Make it consistent.

Signed-off-by: Alex Deucher 
---
  drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 3 ++-
  1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 
b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index cfa37f1..8d39e7d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -859,7 +859,8 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, 
long timeout)
  }
  
  
-static void gfx_v8_0_free_microcode(struct amdgpu_device *adev) {

+static void gfx_v8_0_free_microcode(struct amdgpu_device *adev)
+{
release_firmware(adev->gfx.pfp_fw);
adev->gfx.pfp_fw = NULL;
release_firmware(adev->gfx.me_fw);


___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH] drm/amdgpu: add virtual display support for raven

2017-06-07 Thread Alex Deucher
Same as other asics.  If enabled, exposes a user selectable
number of virtual displays.

Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/amdgpu/soc15.c | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c 
b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 22b222b..e343844 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -503,6 +503,8 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_ip_block_add(adev, &vega10_ih_ip_block);
amdgpu_ip_block_add(adev, &psp_v10_0_ip_block);
amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+   if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
+   amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
amdgpu_ip_block_add(adev, &gfx_v9_0_ip_block);
amdgpu_ip_block_add(adev, &sdma_v4_0_ip_block);
amdgpu_ip_block_add(adev, &vcn_v1_0_ip_block);
-- 
2.5.5

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 2/2] drm/amdgpu: drop deprecated drm_get_pci_dev and drm_put_dev

2017-06-07 Thread Alex Deucher
Open code them so we can adjust the order in the
driver more easily.

Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 28 ++--
 1 file changed, 26 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 05c8343..855b0c2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -507,6 +507,7 @@ static int amdgpu_kick_out_firmware_fb(struct pci_dev *pdev)
 static int amdgpu_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
 {
+   struct drm_device *dev;
unsigned long flags = ent->driver_data;
int ret;
 
@@ -529,7 +530,29 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
if (ret)
return ret;
 
-   return drm_get_pci_dev(pdev, ent, &kms_driver);
+   dev = drm_dev_alloc(&kms_driver, &pdev->dev);
+   if (IS_ERR(dev))
+   return PTR_ERR(dev);
+
+   ret = pci_enable_device(pdev);
+   if (ret)
+   goto err_free;
+
+   dev->pdev = pdev;
+
+   pci_set_drvdata(pdev, dev);
+
+   ret = drm_dev_register(dev, ent->driver_data);
+   if (ret)
+   goto err_pci;
+
+   return 0;
+
+err_pci:
+   pci_disable_device(pdev);
+err_free:
+   drm_dev_unref(dev);
+   return ret;
 }
 
 static void
@@ -537,7 +560,8 @@ amdgpu_pci_remove(struct pci_dev *pdev)
 {
struct drm_device *dev = pci_get_drvdata(pdev);
 
-   drm_put_dev(dev);
+   drm_dev_unregister(dev);
+   drm_dev_unref(dev);
 }
 
 static void
-- 
2.5.5

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 1/2] drm/amdgpu: call pci_[un]register_driver() directly

2017-06-07 Thread Alex Deucher
Rather than calling the deprecated drm_pci_init() and
drm_pci_exit() which just wrapped the pci functions
anyway.

Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 8c0c1c1..05c8343 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -833,7 +833,7 @@ static int __init amdgpu_init(void)
driver->num_ioctls = amdgpu_max_kms_ioctl;
amdgpu_register_atpx_handler();
/* let modprobe override vga console setting */
-   return drm_pci_init(driver, pdriver);
+   return pci_register_driver(pdriver);
 
 error_sched:
amdgpu_fence_slab_fini();
@@ -848,7 +848,7 @@ static int __init amdgpu_init(void)
 static void __exit amdgpu_exit(void)
 {
amdgpu_amdkfd_fini();
-   drm_pci_exit(driver, pdriver);
+   pci_unregister_driver(pdriver);
amdgpu_unregister_atpx_handler();
amdgpu_sync_fini();
amd_sched_fence_slab_fini();
-- 
2.5.5

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 5/6] drm/amdgpu/gfx: move mec parameter setup into sw_init

2017-06-07 Thread Alex Deucher
This will allow us to share more mec code.

Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | 30 +--
 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 38 +--
 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 26 
 3 files changed, 47 insertions(+), 47 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 
b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index d80cf72..e30c7d0 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -2817,21 +2817,6 @@ static int gfx_v7_0_mec_init(struct amdgpu_device *adev)
 
bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
 
-   switch (adev->asic_type) {
-   case CHIP_KAVERI:
-   adev->gfx.mec.num_mec = 2;
-   break;
-   case CHIP_BONAIRE:
-   case CHIP_HAWAII:
-   case CHIP_KABINI:
-   case CHIP_MULLINS:
-   default:
-   adev->gfx.mec.num_mec = 1;
-   break;
-   }
-   adev->gfx.mec.num_pipe_per_mec = 4;
-   adev->gfx.mec.num_queue_per_pipe = 8;
-
/* take ownership of the relevant compute queues */
amdgpu_gfx_compute_queue_acquire(adev);
 
@@ -4723,6 +4708,21 @@ static int gfx_v7_0_sw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int i, j, k, r, ring_id;
 
+   switch (adev->asic_type) {
+   case CHIP_KAVERI:
+   adev->gfx.mec.num_mec = 2;
+   break;
+   case CHIP_BONAIRE:
+   case CHIP_HAWAII:
+   case CHIP_KABINI:
+   case CHIP_MULLINS:
+   default:
+   adev->gfx.mec.num_mec = 1;
+   break;
+   }
+   adev->gfx.mec.num_pipe_per_mec = 4;
+   adev->gfx.mec.num_queue_per_pipe = 8;
+
/* EOP Event */
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 181, 
&adev->gfx.eop_irq);
if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 
b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 8a9d35a..97d39369 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -1387,25 +1387,6 @@ static int gfx_v8_0_mec_init(struct amdgpu_device *adev)
 
bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
 
-   switch (adev->asic_type) {
-   case CHIP_FIJI:
-   case CHIP_TONGA:
-   case CHIP_POLARIS11:
-   case CHIP_POLARIS12:
-   case CHIP_POLARIS10:
-   case CHIP_CARRIZO:
-   adev->gfx.mec.num_mec = 2;
-   break;
-   case CHIP_TOPAZ:
-   case CHIP_STONEY:
-   default:
-   adev->gfx.mec.num_mec = 1;
-   break;
-   }
-
-   adev->gfx.mec.num_pipe_per_mec = 4;
-   adev->gfx.mec.num_queue_per_pipe = 8;
-
/* take ownership of the relevant compute queues */
amdgpu_gfx_compute_queue_acquire(adev);
 
@@ -2009,6 +1990,25 @@ static int gfx_v8_0_sw_init(void *handle)
struct amdgpu_kiq *kiq;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+   switch (adev->asic_type) {
+   case CHIP_FIJI:
+   case CHIP_TONGA:
+   case CHIP_POLARIS11:
+   case CHIP_POLARIS12:
+   case CHIP_POLARIS10:
+   case CHIP_CARRIZO:
+   adev->gfx.mec.num_mec = 2;
+   break;
+   case CHIP_TOPAZ:
+   case CHIP_STONEY:
+   default:
+   adev->gfx.mec.num_mec = 1;
+   break;
+   }
+
+   adev->gfx.mec.num_pipe_per_mec = 4;
+   adev->gfx.mec.num_queue_per_pipe = 8;
+
/* KIQ event */
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 178, 
&adev->gfx.kiq.irq);
if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 
b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index fbb9d20..b7094c3 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -870,19 +870,6 @@ static int gfx_v9_0_mec_init(struct amdgpu_device *adev)
 
bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
 
-   switch (adev->asic_type) {
-   case CHIP_VEGA10:
-   case CHIP_RAVEN:
-   adev->gfx.mec.num_mec = 2;
-   break;
-   default:
-   adev->gfx.mec.num_mec = 1;
-   break;
-   }
-
-   adev->gfx.mec.num_pipe_per_mec = 4;
-   adev->gfx.mec.num_queue_per_pipe = 8;
-
/* take ownership of the relevant compute queues */
amdgpu_gfx_compute_queue_acquire(adev);
mec_hpd_size = adev->gfx.num_compute_rings * GFX9_MEC_HPD_SIZE;
@@ -1393,6 +1380,19 @@ static int gfx_v9_0_sw_init(void *handle)
struct amdgpu_kiq *kiq;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+   switch (adev->asic_type) {
+   case CHIP_VEGA10:
+   case CHIP_RAVEN:
+   adev->gfx.mec.num_mec = 2;
+   break;
+   default:
+   adev->gfx.me

[PATCH 6/6] drm/amdgpu/gfx: consolidate mqd buffer setup code

2017-06-07 Thread Alex Deucher
It was duplicated across multiple generations.

Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 66 ++
 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h |  4 ++
 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c   | 71 +
 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c   | 65 +-
 4 files changed, 74 insertions(+), 132 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index c5aa465..dfbf027 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -248,3 +248,69 @@ int amdgpu_gfx_kiq_init(struct amdgpu_device *adev,
 
return 0;
 }
+
+/* create MQD for each compute queue */
+int amdgpu_gfx_compute_mqd_sw_init(struct amdgpu_device *adev,
+  unsigned mqd_size)
+{
+   struct amdgpu_ring *ring = NULL;
+   int r, i;
+
+   /* create MQD for KIQ */
+   ring = &adev->gfx.kiq.ring;
+   if (!ring->mqd_obj) {
+   r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
+   AMDGPU_GEM_DOMAIN_GTT, 
&ring->mqd_obj,
+   &ring->mqd_gpu_addr, 
&ring->mqd_ptr);
+   if (r) {
+   dev_warn(adev->dev, "failed to create ring mqd ob 
(%d)", r);
+   return r;
+   }
+
+   /* prepare MQD backup */
+   adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS] = 
kmalloc(mqd_size, GFP_KERNEL);
+   if (!adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS])
+   dev_warn(adev->dev, "no memory to create MQD 
backup for ring %s\n", ring->name);
+   }
+
+   /* create MQD for each KCQ */
+   for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+   ring = &adev->gfx.compute_ring[i];
+   if (!ring->mqd_obj) {
+   r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
+   AMDGPU_GEM_DOMAIN_GTT, 
&ring->mqd_obj,
+   &ring->mqd_gpu_addr, 
&ring->mqd_ptr);
+   if (r) {
+   dev_warn(adev->dev, "failed to create ring mqd 
ob (%d)", r);
+   return r;
+   }
+
+   /* prepare MQD backup */
+   adev->gfx.mec.mqd_backup[i] = kmalloc(mqd_size, 
GFP_KERNEL);
+   if (!adev->gfx.mec.mqd_backup[i])
+   dev_warn(adev->dev, "no memory to create MQD 
backup for ring %s\n", ring->name);
+   }
+   }
+
+   return 0;
+}
+
+void amdgpu_gfx_compute_mqd_sw_fini(struct amdgpu_device *adev)
+{
+   struct amdgpu_ring *ring = NULL;
+   int i;
+
+   for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+   ring = &adev->gfx.compute_ring[i];
+   kfree(adev->gfx.mec.mqd_backup[i]);
+   amdgpu_bo_free_kernel(&ring->mqd_obj,
+ &ring->mqd_gpu_addr,
+ &ring->mqd_ptr);
+   }
+
+   ring = &adev->gfx.kiq.ring;
+   kfree(adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS]);
+   amdgpu_bo_free_kernel(&ring->mqd_obj,
+ &ring->mqd_gpu_addr,
+ &ring->mqd_ptr);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index b1766fa..1f27905 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -43,6 +43,10 @@ void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev);
 int amdgpu_gfx_kiq_init(struct amdgpu_device *adev,
unsigned hpd_size);
 
+int amdgpu_gfx_compute_mqd_sw_init(struct amdgpu_device *adev,
+  unsigned mqd_size);
+void amdgpu_gfx_compute_mqd_sw_fini(struct amdgpu_device *adev);
+
 /**
  * amdgpu_gfx_create_bitmask - create a bitmask
  *
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 
b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 97d39369..6e541af 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -659,8 +659,6 @@ static u32 gfx_v8_0_get_csb_size(struct amdgpu_device 
*adev);
 static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev);
 static void gfx_v8_0_ring_emit_ce_meta(struct amdgpu_ring *ring);
 static void gfx_v8_0_ring_emit_de_meta(struct amdgpu_ring *ring);
-static int gfx_v8_0_compute_mqd_sw_init(struct amdgpu_device *adev);
-static void gfx_v8_0_compute_mqd_sw_fini(struct amdgpu_device *adev);
 
 static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev)
 {
@@ -2102,7 +2100,7 @@ static int gfx_v8_0_sw_init(void *handle)
return r;
 
/* create MQD for all compute 

[PATCH 2/6] drm/amdgpu/gfx9: remove spurious line in kiq setup

2017-06-07 Thread Alex Deucher
This overrode what queue was actually assigned for kiq.

Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 1 -
 1 file changed, 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 
b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index e0193e4..4c47754f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -1054,7 +1054,6 @@ static int gfx_v9_0_kiq_init_ring(struct amdgpu_device 
*adev,
if (r)
return r;
 
-   ring->queue = 0;
ring->eop_gpu_addr = kiq->eop_gpu_addr;
sprintf(ring->name, "kiq %d.%d.%d", ring->me, ring->pipe, ring->queue);
r = amdgpu_ring_init(adev, ring, 1024,
-- 
2.5.5

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 1/6] drm/amdgpu/gfx8: whitespace change

2017-06-07 Thread Alex Deucher
Make it consistent.

Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 
b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index cfa37f1..8d39e7d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -859,7 +859,8 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, 
long timeout)
 }
 
 
-static void gfx_v8_0_free_microcode(struct amdgpu_device *adev) {
+static void gfx_v8_0_free_microcode(struct amdgpu_device *adev)
+{
release_firmware(adev->gfx.pfp_fw);
adev->gfx.pfp_fw = NULL;
release_firmware(adev->gfx.me_fw);
-- 
2.5.5

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 3/6] drm/amdgpu: move mec queue helpers to amdgpu_gfx.h

2017-06-07 Thread Alex Deucher
They are gfx related, not general helpers.

Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h| 30 --
 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c |  9 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h| 30 ++
 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c  |  2 +-
 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c  |  4 ++--
 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c  |  4 ++--
 6 files changed, 40 insertions(+), 39 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index ef34ff2..3308e62 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1831,36 +1831,6 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
return NULL;
 }
 
-static inline int amdgpu_queue_to_bit(struct amdgpu_device *adev,
- int mec, int pipe, int queue)
-{
-   int bit = 0;
-
-   bit += mec * adev->gfx.mec.num_pipe_per_mec
-   * adev->gfx.mec.num_queue_per_pipe;
-   bit += pipe * adev->gfx.mec.num_queue_per_pipe;
-   bit += queue;
-
-   return bit;
-}
-
-static inline void amdgpu_bit_to_queue(struct amdgpu_device *adev, int bit,
-  int *mec, int *pipe, int *queue)
-{
-   *queue = bit % adev->gfx.mec.num_queue_per_pipe;
-   *pipe = (bit / adev->gfx.mec.num_queue_per_pipe)
-   % adev->gfx.mec.num_pipe_per_mec;
-   *mec = (bit / adev->gfx.mec.num_queue_per_pipe)
-  / adev->gfx.mec.num_pipe_per_mec;
-
-}
-static inline bool amdgpu_is_mec_queue_enabled(struct amdgpu_device *adev,
-  int mec, int pipe, int queue)
-{
-   return test_bit(amdgpu_queue_to_bit(adev, mec, pipe, queue),
-   adev->gfx.mec.queue_bitmap);
-}
-
 /*
  * ASICs macro.
  */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 339e8cd..5f8ada1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -24,6 +24,7 @@
 #include "amd_shared.h"
 #include 
 #include "amdgpu.h"
+#include "amdgpu_gfx.h"
 #include 
 
 const struct kfd2kgd_calls *kfd2kgd;
@@ -113,10 +114,10 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
 
/* remove the KIQ bit as well */
if (adev->gfx.kiq.ring.ready)
-   clear_bit(amdgpu_queue_to_bit(adev,
- adev->gfx.kiq.ring.me - 1,
- adev->gfx.kiq.ring.pipe,
- adev->gfx.kiq.ring.queue),
+   clear_bit(amdgpu_gfx_queue_to_bit(adev,
+ adev->gfx.kiq.ring.me 
- 1,
+ 
adev->gfx.kiq.ring.pipe,
+ 
adev->gfx.kiq.ring.queue),
  gpu_resources.queue_bitmap);
 
/* According to linux/bitmap.h we shouldn't use bitmap_clear if
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index 9b9ea6e..fa20438 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -45,4 +45,34 @@ static inline u32 amdgpu_gfx_create_bitmask(u32 bit_width)
return (u32)((1ULL << bit_width) - 1);
 }
 
+static inline int amdgpu_gfx_queue_to_bit(struct amdgpu_device *adev,
+ int mec, int pipe, int queue)
+{
+   int bit = 0;
+
+   bit += mec * adev->gfx.mec.num_pipe_per_mec
+   * adev->gfx.mec.num_queue_per_pipe;
+   bit += pipe * adev->gfx.mec.num_queue_per_pipe;
+   bit += queue;
+
+   return bit;
+}
+
+static inline void amdgpu_gfx_bit_to_queue(struct amdgpu_device *adev, int bit,
+  int *mec, int *pipe, int *queue)
+{
+   *queue = bit % adev->gfx.mec.num_queue_per_pipe;
+   *pipe = (bit / adev->gfx.mec.num_queue_per_pipe)
+   % adev->gfx.mec.num_pipe_per_mec;
+   *mec = (bit / adev->gfx.mec.num_queue_per_pipe)
+  / adev->gfx.mec.num_pipe_per_mec;
+
+}
+static inline bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev,
+  int mec, int pipe, int queue)
+{
+   return test_bit(amdgpu_gfx_queue_to_bit(adev, mec, pipe, queue),
+   adev->gfx.mec.queue_bitmap);
+}
+
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 
b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 6ffb2da..d80cf72 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -4776,7 +4776,7 @@ static int gfx_v7_0_sw_init(void *handle)
for (

[PATCH 4/6] drm/amdgpu/gfx: move more common KIQ code to amdgpu_gfx.c

2017-06-07 Thread Alex Deucher
Lots more common stuff.

Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 103 ++
 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h |  11 
 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c   | 110 ++--
 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c   | 109 ++-
 4 files changed, 122 insertions(+), 211 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 51a9708..c5aa465 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -145,3 +145,106 @@ void amdgpu_gfx_compute_queue_acquire(struct 
amdgpu_device *adev)
if (WARN_ON(adev->gfx.num_compute_rings > AMDGPU_MAX_COMPUTE_RINGS))
adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS;
 }
+
+static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring)
+{
+   int queue_bit;
+   int mec, pipe, queue;
+
+   queue_bit = adev->gfx.mec.num_mec
+   * adev->gfx.mec.num_pipe_per_mec
+   * adev->gfx.mec.num_queue_per_pipe;
+
+   while (queue_bit-- >= 0) {
+   if (test_bit(queue_bit, adev->gfx.mec.queue_bitmap))
+   continue;
+
+   amdgpu_gfx_bit_to_queue(adev, queue_bit, &mec, &pipe, &queue);
+
+   /* Using pipes 2/3 from MEC 2 seems cause problems */
+   if (mec == 1 && pipe > 1)
+   continue;
+
+   ring->me = mec + 1;
+   ring->pipe = pipe;
+   ring->queue = queue;
+
+   return 0;
+   }
+
+   dev_err(adev->dev, "Failed to find a queue for KIQ\n");
+   return -EINVAL;
+}
+
+int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
+struct amdgpu_ring *ring,
+struct amdgpu_irq_src *irq)
+{
+   struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+   int r = 0;
+
+   mutex_init(&kiq->ring_mutex);
+
+   r = amdgpu_wb_get(adev, &adev->virt.reg_val_offs);
+   if (r)
+   return r;
+
+   ring->adev = NULL;
+   ring->ring_obj = NULL;
+   ring->use_doorbell = true;
+   ring->doorbell_index = AMDGPU_DOORBELL_KIQ;
+
+   r = amdgpu_gfx_kiq_acquire(adev, ring);
+   if (r)
+   return r;
+
+   ring->eop_gpu_addr = kiq->eop_gpu_addr;
+   sprintf(ring->name, "kiq %d.%d.%d", ring->me, ring->pipe, ring->queue);
+   r = amdgpu_ring_init(adev, ring, 1024,
+irq, AMDGPU_CP_KIQ_IRQ_DRIVER0);
+   if (r)
+   dev_warn(adev->dev, "(%d) failed to init kiq ring\n", r);
+
+   return r;
+}
+
+void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring,
+ struct amdgpu_irq_src *irq)
+{
+   amdgpu_wb_free(ring->adev, ring->adev->virt.reg_val_offs);
+   amdgpu_ring_fini(ring);
+}
+
+void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev)
+{
+   struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+
+   amdgpu_bo_free_kernel(&kiq->eop_obj, &kiq->eop_gpu_addr, NULL);
+}
+
+int amdgpu_gfx_kiq_init(struct amdgpu_device *adev,
+   unsigned hpd_size)
+{
+   int r;
+   u32 *hpd;
+   struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+
+   r = amdgpu_bo_create_kernel(adev, hpd_size, PAGE_SIZE,
+   AMDGPU_GEM_DOMAIN_GTT, &kiq->eop_obj,
+   &kiq->eop_gpu_addr, (void **)&hpd);
+   if (r) {
+   dev_warn(adev->dev, "failed to create KIQ bo (%d).\n", r);
+   return r;
+   }
+
+   memset(hpd, 0, hpd_size);
+
+   r = amdgpu_bo_reserve(kiq->eop_obj, true);
+   if (unlikely(r != 0))
+   dev_warn(adev->dev, "(%d) reserve kiq eop bo failed\n", r);
+   amdgpu_bo_kunmap(kiq->eop_obj);
+   amdgpu_bo_unreserve(kiq->eop_obj);
+
+   return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index fa20438..b1766fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -32,6 +32,17 @@ void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned 
max_se,
 
 void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev);
 
+int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
+struct amdgpu_ring *ring,
+struct amdgpu_irq_src *irq);
+
+void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring,
+ struct amdgpu_irq_src *irq);
+
+void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev);
+int amdgpu_gfx_kiq_init(struct amdgpu_device *adev,
+   unsigned hpd_size);
+
 /**
  * amdgpu_gfx_create_bitmask - create a bitmask
  *
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 
b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index fc8e03b..8a9d

Re: [PATCH 3/3] drm/amdgpu/gfx9: Raven has two MECs

2017-06-07 Thread axie

Reviewed-by: Alex Xie 


On 2017-06-07 11:10 AM, Alex Deucher wrote:

This was missed when Andres' queue patches were rebased.

Fixes: 42794b27 (drm/amdgpu: take ownership of per-pipe configuration v3)
Signed-off-by: Alex Deucher 
---
  drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 1 +
  1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 
b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 3ea0e71..e0193e4 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -872,6 +872,7 @@ static int gfx_v9_0_mec_init(struct amdgpu_device *adev)
  
  	switch (adev->asic_type) {

case CHIP_VEGA10:
+   case CHIP_RAVEN:
adev->gfx.mec.num_mec = 2;
break;
default:


___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 2/3] drm/amdgpu: move gfx_v*_0_compute_queue_acquire to common code

2017-06-07 Thread axie

Reviewed-by: Alex Xie 


On 2017-06-07 11:10 AM, Alex Deucher wrote:

Same function was duplicated in all gfx IP files.

Signed-off-by: Alex Deucher 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 37 +++
  drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h |  2 ++
  drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c   | 39 +
  drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c   | 39 +
  drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c   | 39 +
  5 files changed, 42 insertions(+), 114 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 1994335..51a9708 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -108,3 +108,40 @@ void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned 
max_se, unsigned max_s
p = next + 1;
}
  }
+
+void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev)
+{
+   int i, queue, pipe, mec;
+
+   /* policy for amdgpu compute queue ownership */
+   for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
+   queue = i % adev->gfx.mec.num_queue_per_pipe;
+   pipe = (i / adev->gfx.mec.num_queue_per_pipe)
+   % adev->gfx.mec.num_pipe_per_mec;
+   mec = (i / adev->gfx.mec.num_queue_per_pipe)
+   / adev->gfx.mec.num_pipe_per_mec;
+
+   /* we've run out of HW */
+   if (mec >= adev->gfx.mec.num_mec)
+   break;
+
+   if (adev->gfx.mec.num_mec > 1) {
+   /* policy: amdgpu owns the first two queues of the 
first MEC */
+   if (mec == 0 && queue < 2)
+   set_bit(i, adev->gfx.mec.queue_bitmap);
+   } else {
+   /* policy: amdgpu owns all queues in the first pipe */
+   if (mec == 0 && pipe == 0)
+   set_bit(i, adev->gfx.mec.queue_bitmap);
+   }
+   }
+
+   /* update the number of active compute rings */
+   adev->gfx.num_compute_rings =
+   bitmap_weight(adev->gfx.mec.queue_bitmap, 
AMDGPU_MAX_COMPUTE_QUEUES);
+
+   /* If you hit this case and edited the policy, you probably just
+* need to increase AMDGPU_MAX_COMPUTE_RINGS */
+   if (WARN_ON(adev->gfx.num_compute_rings > AMDGPU_MAX_COMPUTE_RINGS))
+   adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index 2d846ef..9b9ea6e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -30,6 +30,8 @@ void amdgpu_gfx_scratch_free(struct amdgpu_device *adev, 
uint32_t reg);
  void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se,
unsigned max_sh);
  
+void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev);

+
  /**
   * amdgpu_gfx_create_bitmask - create a bitmask
   *
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 
b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 862bc72..6ffb2da 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -2809,43 +2809,6 @@ static void gfx_v7_0_mec_fini(struct amdgpu_device *adev)
}
  }
  
-static void gfx_v7_0_compute_queue_acquire(struct amdgpu_device *adev)

-{
-   int i, queue, pipe, mec;
-
-   /* policy for amdgpu compute queue ownership */
-   for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
-   queue = i % adev->gfx.mec.num_queue_per_pipe;
-   pipe = (i / adev->gfx.mec.num_queue_per_pipe)
-   % adev->gfx.mec.num_pipe_per_mec;
-   mec = (i / adev->gfx.mec.num_queue_per_pipe)
-   / adev->gfx.mec.num_pipe_per_mec;
-
-   /* we've run out of HW */
-   if (mec >= adev->gfx.mec.num_mec)
-   break;
-
-   if (adev->gfx.mec.num_mec > 1) {
-   /* policy: amdgpu owns the first two queues of the 
first MEC */
-   if (mec == 0 && queue < 2)
-   set_bit(i, adev->gfx.mec.queue_bitmap);
-   } else {
-   /* policy: amdgpu owns all queues in the first pipe */
-   if (mec == 0 && pipe == 0)
-   set_bit(i, adev->gfx.mec.queue_bitmap);
-   }
-   }
-
-   /* update the number of active compute rings */
-   adev->gfx.num_compute_rings =
-   bitmap_weight(adev->gfx.mec.queue_bitmap, 
AMDGPU_MAX_COMPUTE_QUEUES);
-
-   /* If you hit this case and edited the policy, you probably just
-* need to increase AMDGPU_MAX_COMPUTE_RINGS */
-   if (WARN_ON(adev->gfx.num_compute_rings > AMDGPU_MAX_COMPUTE_RINGS))
-  

Re: [PATCH 1/3] drm/amdgpu: fix mec queue policy on single MEC asics

2017-06-07 Thread axie

Hi Alex

I agree that we revert the change for single MEC for the time being.

Reviewed-by: Alex Xie 


On 2017-06-07 11:10 AM, Alex Deucher wrote:

Fixes hangs on single MEC asics.

Fixes: 2ed286fb434 (drm/amdgpu: new queue policy, take first 2 queues of each 
pipe v2)
Signed-off-by: Alex Deucher 
---
  drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | 12 +---
  drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 12 +---
  drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 12 +---
  3 files changed, 27 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 
b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 4c04e9d..862bc72 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -2825,9 +2825,15 @@ static void gfx_v7_0_compute_queue_acquire(struct 
amdgpu_device *adev)
if (mec >= adev->gfx.mec.num_mec)
break;
  
-		/* policy: amdgpu owns the first two queues of the first MEC */

-   if (mec == 0 && queue < 2)
-   set_bit(i, adev->gfx.mec.queue_bitmap);
+   if (adev->gfx.mec.num_mec > 1) {
+   /* policy: amdgpu owns the first two queues of the 
first MEC */
+   if (mec == 0 && queue < 2)
+   set_bit(i, adev->gfx.mec.queue_bitmap);
+   } else {
+   /* policy: amdgpu owns all queues in the first pipe */
+   if (mec == 0 && pipe == 0)
+   set_bit(i, adev->gfx.mec.queue_bitmap);
+   }
}
  
  	/* update the number of active compute rings */

diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 
b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index ad2e0bb..1370b39 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -1464,9 +1464,15 @@ static void gfx_v8_0_compute_queue_acquire(struct 
amdgpu_device *adev)
if (mec >= adev->gfx.mec.num_mec)
break;
  
-		/* policy: amdgpu owns the first two queues of the first MEC */

-   if (mec == 0 && queue < 2)
-   set_bit(i, adev->gfx.mec.queue_bitmap);
+   if (adev->gfx.mec.num_mec > 1) {
+   /* policy: amdgpu owns the first two queues of the 
first MEC */
+   if (mec == 0 && queue < 2)
+   set_bit(i, adev->gfx.mec.queue_bitmap);
+   } else {
+   /* policy: amdgpu owns all queues in the first pipe */
+   if (mec == 0 && pipe == 0)
+   set_bit(i, adev->gfx.mec.queue_bitmap);
+   }
}
  
  	/* update the number of active compute rings */

diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 
b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index cf15a350..9d675b3 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -873,9 +873,15 @@ static void gfx_v9_0_compute_queue_acquire(struct 
amdgpu_device *adev)
if (mec >= adev->gfx.mec.num_mec)
break;
  
-		/* policy: amdgpu owns the first two queues of the first MEC */

-   if (mec == 0 && queue < 2)
-   set_bit(i, adev->gfx.mec.queue_bitmap);
+   if (adev->gfx.mec.num_mec > 1) {
+   /* policy: amdgpu owns the first two queues of the 
first MEC */
+   if (mec == 0 && queue < 2)
+   set_bit(i, adev->gfx.mec.queue_bitmap);
+   } else {
+   /* policy: amdgpu owns all queues in the first pipe */
+   if (mec == 0 && pipe == 0)
+   set_bit(i, adev->gfx.mec.queue_bitmap);
+   }
}
  
  	/* update the number of active compute rings */


___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 1/3] drm/amdgpu: fix mec queue policy on single MEC asics

2017-06-07 Thread axie

Hi Alex,

In Catalyst driver source code, Stoney is listed as supported, I don't 
know how well it is supported though. I think at least people have run 
some graphic operations.


What kind of hangs happens in Stoney? Is it a graphic operation hang? Or 
OpenCL? Or something else? How easily it is reproduced?


Stoney is similar to Carizzo. People has tried computes on Carrizo till 
Fiji in fglrx.


At least, we need to limit this change for Stoney only unless it is 
reported in other ASICs too.


This change can affect compute performance a lot. People are evaluating 
even the AMDGPU PRO OpenCL on FIJI, as far as I know.


Thanks,

Alex Bin


On 2017-06-07 12:32 PM, Deucher, Alexander wrote:

> -Original Message-
> From: Xie, AlexBin
> Sent: Wednesday, June 07, 2017 12:31 PM
> To: Alex Deucher; amd-gfx@lists.freedesktop.org
> Cc: Deucher, Alexander
> Subject: Re: [PATCH 1/3] drm/amdgpu: fix mec queue policy on single MEC
> asics
>
> Hi Alex,
>
> In closed source driver, we assign compute queues to all 4 pipes always.
> There is no hangs.
>
> May I know which ASIC?

It was reported on Stoney.

Alex

>
> Assign all queues in first pipe first mec can slow things down.
>
> Thanks,
>
> Alex Bin Xie
>
>
>
> On 2017-06-07 11:10 AM, Alex Deucher wrote:
> > Fixes hangs on single MEC asics.
> >
> > Fixes: 2ed286fb434 (drm/amdgpu: new queue policy, take first 2 
queues of

> each pipe v2)
> > Signed-off-by: Alex Deucher 
> > ---
> >   drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | 12 +---
> >   drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 12 +---
> >   drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 12 +---
> >   3 files changed, 27 insertions(+), 9 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
> b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
> > index 4c04e9d..862bc72 100644
> > --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
> > +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
> > @@ -2825,9 +2825,15 @@ static void
> gfx_v7_0_compute_queue_acquire(struct amdgpu_device *adev)
> >  if (mec >= adev->gfx.mec.num_mec)
> >  break;
> >
> > -   /* policy: amdgpu owns the first two queues of the 
first MEC

> */
> > -   if (mec == 0 && queue < 2)
> > -   set_bit(i, adev->gfx.mec.queue_bitmap);
> > +   if (adev->gfx.mec.num_mec > 1) {
> > +   /* policy: amdgpu owns the first two queues of the
> first MEC */
> > +   if (mec == 0 && queue < 2)
> > +   set_bit(i, adev->gfx.mec.queue_bitmap);
> > +   } else {
> > +   /* policy: amdgpu owns all queues in the first 
pipe */

> > +   if (mec == 0 && pipe == 0)
> > +   set_bit(i, adev->gfx.mec.queue_bitmap);
> > +   }
> >  }
> >
> >  /* update the number of active compute rings */
> > diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
> b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
> > index ad2e0bb..1370b39 100644
> > --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
> > +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
> > @@ -1464,9 +1464,15 @@ static void
> gfx_v8_0_compute_queue_acquire(struct amdgpu_device *adev)
> >  if (mec >= adev->gfx.mec.num_mec)
> >  break;
> >
> > -   /* policy: amdgpu owns the first two queues of the 
first MEC

> */
> > -   if (mec == 0 && queue < 2)
> > -   set_bit(i, adev->gfx.mec.queue_bitmap);
> > +   if (adev->gfx.mec.num_mec > 1) {
> > +   /* policy: amdgpu owns the first two queues of the
> first MEC */
> > +   if (mec == 0 && queue < 2)
> > +   set_bit(i, adev->gfx.mec.queue_bitmap);
> > +   } else {
> > +   /* policy: amdgpu owns all queues in the first 
pipe */

> > +   if (mec == 0 && pipe == 0)
> > +   set_bit(i, adev->gfx.mec.queue_bitmap);
> > +   }
> >  }
> >
> >  /* update the number of active compute rings */
> > diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
> b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
> > index cf15a350..9d675b3 100644
> > --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
> > +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
> > @@ -873,9 +873,15 @@ static void
> gfx_v9_0_compute_queue_acquire(struct amdgpu_device *adev)
> >  if (mec >= adev->gfx.mec.num_mec)
> >  break;
> >
> > -   /* policy: amdgpu owns the first two queues of the 
first MEC

> */
> > -   if (mec == 0 && queue < 2)
> > -   set_bit(i, adev->gfx.mec.queue_bitmap);
> > +   if (adev->gfx.mec.num_mec > 1) {
> > +   /* policy: amdgpu owns the first two queues of the
> first MEC */
> > +   if (mec == 0 && queue < 2)
> > +   set_bit(i, adev->gfx.mec.queue_bitmap);
> > +   } else {
> > +   /* policy: amd

RE: [PATCH 1/3] drm/amdgpu: fix mec queue policy on single MEC asics

2017-06-07 Thread Deucher, Alexander
> -Original Message-
> From: Xie, AlexBin
> Sent: Wednesday, June 07, 2017 12:31 PM
> To: Alex Deucher; amd-gfx@lists.freedesktop.org
> Cc: Deucher, Alexander
> Subject: Re: [PATCH 1/3] drm/amdgpu: fix mec queue policy on single MEC
> asics
> 
> Hi Alex,
> 
> In closed source driver, we assign compute queues to all 4 pipes always.
> There is no hangs.
> 
> May I know which ASIC?

It was reported on Stoney.

Alex

> 
> Assign all queues in first pipe first mec can slow things down.
> 
> Thanks,
> 
> Alex Bin Xie
> 
> 
> 
> On 2017-06-07 11:10 AM, Alex Deucher wrote:
> > Fixes hangs on single MEC asics.
> >
> > Fixes: 2ed286fb434 (drm/amdgpu: new queue policy, take first 2 queues of
> each pipe v2)
> > Signed-off-by: Alex Deucher 
> > ---
> >   drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | 12 +---
> >   drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 12 +---
> >   drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 12 +---
> >   3 files changed, 27 insertions(+), 9 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
> b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
> > index 4c04e9d..862bc72 100644
> > --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
> > +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
> > @@ -2825,9 +2825,15 @@ static void
> gfx_v7_0_compute_queue_acquire(struct amdgpu_device *adev)
> > if (mec >= adev->gfx.mec.num_mec)
> > break;
> >
> > -   /* policy: amdgpu owns the first two queues of the first MEC
> */
> > -   if (mec == 0 && queue < 2)
> > -   set_bit(i, adev->gfx.mec.queue_bitmap);
> > +   if (adev->gfx.mec.num_mec > 1) {
> > +   /* policy: amdgpu owns the first two queues of the
> first MEC */
> > +   if (mec == 0 && queue < 2)
> > +   set_bit(i, adev->gfx.mec.queue_bitmap);
> > +   } else {
> > +   /* policy: amdgpu owns all queues in the first pipe */
> > +   if (mec == 0 && pipe == 0)
> > +   set_bit(i, adev->gfx.mec.queue_bitmap);
> > +   }
> > }
> >
> > /* update the number of active compute rings */
> > diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
> b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
> > index ad2e0bb..1370b39 100644
> > --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
> > +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
> > @@ -1464,9 +1464,15 @@ static void
> gfx_v8_0_compute_queue_acquire(struct amdgpu_device *adev)
> > if (mec >= adev->gfx.mec.num_mec)
> > break;
> >
> > -   /* policy: amdgpu owns the first two queues of the first MEC
> */
> > -   if (mec == 0 && queue < 2)
> > -   set_bit(i, adev->gfx.mec.queue_bitmap);
> > +   if (adev->gfx.mec.num_mec > 1) {
> > +   /* policy: amdgpu owns the first two queues of the
> first MEC */
> > +   if (mec == 0 && queue < 2)
> > +   set_bit(i, adev->gfx.mec.queue_bitmap);
> > +   } else {
> > +   /* policy: amdgpu owns all queues in the first pipe */
> > +   if (mec == 0 && pipe == 0)
> > +   set_bit(i, adev->gfx.mec.queue_bitmap);
> > +   }
> > }
> >
> > /* update the number of active compute rings */
> > diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
> b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
> > index cf15a350..9d675b3 100644
> > --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
> > +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
> > @@ -873,9 +873,15 @@ static void
> gfx_v9_0_compute_queue_acquire(struct amdgpu_device *adev)
> > if (mec >= adev->gfx.mec.num_mec)
> > break;
> >
> > -   /* policy: amdgpu owns the first two queues of the first MEC
> */
> > -   if (mec == 0 && queue < 2)
> > -   set_bit(i, adev->gfx.mec.queue_bitmap);
> > +   if (adev->gfx.mec.num_mec > 1) {
> > +   /* policy: amdgpu owns the first two queues of the
> first MEC */
> > +   if (mec == 0 && queue < 2)
> > +   set_bit(i, adev->gfx.mec.queue_bitmap);
> > +   } else {
> > +   /* policy: amdgpu owns all queues in the first pipe */
> > +   if (mec == 0 && pipe == 0)
> > +   set_bit(i, adev->gfx.mec.queue_bitmap);
> > +   }
> > }
> >
> > /* update the number of active compute rings */

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 1/3] drm/amdgpu: fix mec queue policy on single MEC asics

2017-06-07 Thread axie

Hi Alex,

In closed source driver, we assign compute queues to all 4 pipes always. 
There is no hangs.


May I know which ASIC?

Assign all queues in first pipe first mec can slow things down.

Thanks,

Alex Bin Xie



On 2017-06-07 11:10 AM, Alex Deucher wrote:

Fixes hangs on single MEC asics.

Fixes: 2ed286fb434 (drm/amdgpu: new queue policy, take first 2 queues of each 
pipe v2)
Signed-off-by: Alex Deucher 
---
  drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | 12 +---
  drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 12 +---
  drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 12 +---
  3 files changed, 27 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 
b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 4c04e9d..862bc72 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -2825,9 +2825,15 @@ static void gfx_v7_0_compute_queue_acquire(struct 
amdgpu_device *adev)
if (mec >= adev->gfx.mec.num_mec)
break;
  
-		/* policy: amdgpu owns the first two queues of the first MEC */

-   if (mec == 0 && queue < 2)
-   set_bit(i, adev->gfx.mec.queue_bitmap);
+   if (adev->gfx.mec.num_mec > 1) {
+   /* policy: amdgpu owns the first two queues of the 
first MEC */
+   if (mec == 0 && queue < 2)
+   set_bit(i, adev->gfx.mec.queue_bitmap);
+   } else {
+   /* policy: amdgpu owns all queues in the first pipe */
+   if (mec == 0 && pipe == 0)
+   set_bit(i, adev->gfx.mec.queue_bitmap);
+   }
}
  
  	/* update the number of active compute rings */

diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 
b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index ad2e0bb..1370b39 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -1464,9 +1464,15 @@ static void gfx_v8_0_compute_queue_acquire(struct 
amdgpu_device *adev)
if (mec >= adev->gfx.mec.num_mec)
break;
  
-		/* policy: amdgpu owns the first two queues of the first MEC */

-   if (mec == 0 && queue < 2)
-   set_bit(i, adev->gfx.mec.queue_bitmap);
+   if (adev->gfx.mec.num_mec > 1) {
+   /* policy: amdgpu owns the first two queues of the 
first MEC */
+   if (mec == 0 && queue < 2)
+   set_bit(i, adev->gfx.mec.queue_bitmap);
+   } else {
+   /* policy: amdgpu owns all queues in the first pipe */
+   if (mec == 0 && pipe == 0)
+   set_bit(i, adev->gfx.mec.queue_bitmap);
+   }
}
  
  	/* update the number of active compute rings */

diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 
b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index cf15a350..9d675b3 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -873,9 +873,15 @@ static void gfx_v9_0_compute_queue_acquire(struct 
amdgpu_device *adev)
if (mec >= adev->gfx.mec.num_mec)
break;
  
-		/* policy: amdgpu owns the first two queues of the first MEC */

-   if (mec == 0 && queue < 2)
-   set_bit(i, adev->gfx.mec.queue_bitmap);
+   if (adev->gfx.mec.num_mec > 1) {
+   /* policy: amdgpu owns the first two queues of the 
first MEC */
+   if (mec == 0 && queue < 2)
+   set_bit(i, adev->gfx.mec.queue_bitmap);
+   } else {
+   /* policy: amdgpu owns all queues in the first pipe */
+   if (mec == 0 && pipe == 0)
+   set_bit(i, adev->gfx.mec.queue_bitmap);
+   }
}
  
  	/* update the number of active compute rings */


___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


RE: [PATCH 2/3] drm/amdgpu: move gfx_v*_0_compute_queue_acquire to common code

2017-06-07 Thread Deucher, Alexander
> -Original Message-
> From: amd-gfx [mailto:amd-gfx-boun...@lists.freedesktop.org] On Behalf
> Of Tom St Denis
> Sent: Wednesday, June 07, 2017 12:01 PM
> To: amd-gfx@lists.freedesktop.org
> Subject: Re: [PATCH 2/3] drm/amdgpu: move
> gfx_v*_0_compute_queue_acquire to common code
> 
> Hi Alex,
> 
> This failed to apply on top of amd-staging-4.11... Am I trying the wrong
> branch?

Sorry, I have this patch in my tree as well which I just pushed to 
amd-staging-4.11:
https://patchwork.freedesktop.org/patch/160199/

Alex

> 
> Cheers,
> Tom
> 
> On 07/06/17 11:10 AM, Alex Deucher wrote:
> > Same function was duplicated in all gfx IP files.
> >
> > Signed-off-by: Alex Deucher 
> > ---
> >   drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 37
> +++
> >   drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h |  2 ++
> >   drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c   | 39 
> > +-
> ---
> >   drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c   | 39 
> > +-
> ---
> >   drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c   | 39 
> > +-
> ---
> >   5 files changed, 42 insertions(+), 114 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
> > index 1994335..51a9708 100644
> > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
> > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
> > @@ -108,3 +108,40 @@ void amdgpu_gfx_parse_disable_cu(unsigned
> *mask, unsigned max_se, unsigned max_s
> > p = next + 1;
> > }
> >   }
> > +
> > +void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device
> *adev)
> > +{
> > +   int i, queue, pipe, mec;
> > +
> > +   /* policy for amdgpu compute queue ownership */
> > +   for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
> > +   queue = i % adev->gfx.mec.num_queue_per_pipe;
> > +   pipe = (i / adev->gfx.mec.num_queue_per_pipe)
> > +   % adev->gfx.mec.num_pipe_per_mec;
> > +   mec = (i / adev->gfx.mec.num_queue_per_pipe)
> > +   / adev->gfx.mec.num_pipe_per_mec;
> > +
> > +   /* we've run out of HW */
> > +   if (mec >= adev->gfx.mec.num_mec)
> > +   break;
> > +
> > +   if (adev->gfx.mec.num_mec > 1) {
> > +   /* policy: amdgpu owns the first two queues of the
> first MEC */
> > +   if (mec == 0 && queue < 2)
> > +   set_bit(i, adev->gfx.mec.queue_bitmap);
> > +   } else {
> > +   /* policy: amdgpu owns all queues in the first pipe */
> > +   if (mec == 0 && pipe == 0)
> > +   set_bit(i, adev->gfx.mec.queue_bitmap);
> > +   }
> > +   }
> > +
> > +   /* update the number of active compute rings */
> > +   adev->gfx.num_compute_rings =
> > +   bitmap_weight(adev->gfx.mec.queue_bitmap,
> AMDGPU_MAX_COMPUTE_QUEUES);
> > +
> > +   /* If you hit this case and edited the policy, you probably just
> > +* need to increase AMDGPU_MAX_COMPUTE_RINGS */
> > +   if (WARN_ON(adev->gfx.num_compute_rings >
> AMDGPU_MAX_COMPUTE_RINGS))
> > +   adev->gfx.num_compute_rings =
> AMDGPU_MAX_COMPUTE_RINGS;
> > +}
> > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
> > index 2d846ef..9b9ea6e 100644
> > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
> > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
> > @@ -30,6 +30,8 @@ void amdgpu_gfx_scratch_free(struct amdgpu_device
> *adev, uint32_t reg);
> >   void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se,
> > unsigned max_sh);
> >
> > +void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device
> *adev);
> > +
> >   /**
> >* amdgpu_gfx_create_bitmask - create a bitmask
> >*
> > diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
> b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
> > index 862bc72..6ffb2da 100644
> > --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
> > +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
> > @@ -2809,43 +2809,6 @@ static void gfx_v7_0_mec_fini(struct
> amdgpu_device *adev)
> > }
> >   }
> >
> > -static void gfx_v7_0_compute_queue_acquire(struct amdgpu_device
> *adev)
> > -{
> > -   int i, queue, pipe, mec;
> > -
> > -   /* policy for amdgpu compute queue ownership */
> > -   for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
> > -   queue = i % adev->gfx.mec.num_queue_per_pipe;
> > -   pipe = (i / adev->gfx.mec.num_queue_per_pipe)
> > -   % adev->gfx.mec.num_pipe_per_mec;
> > -   mec = (i / adev->gfx.mec.num_queue_per_pipe)
> > -   / adev->gfx.mec.num_pipe_per_mec;
> > -
> > -   /* we've run out of HW */
> > -   if (mec >= adev->gfx.mec.num_mec)
> > -   break;
> > -
> > -   if (adev->gfx.mec.num_mec > 1) {
> > -   /* policy: amdgpu owns the first two queues of the
> first MEC */
> > -  

Re: [PATCH 2/3] drm/amdgpu: move gfx_v*_0_compute_queue_acquire to common code

2017-06-07 Thread Tom St Denis

Hi Alex,

This failed to apply on top of amd-staging-4.11... Am I trying the wrong 
branch?


Cheers,
Tom

On 07/06/17 11:10 AM, Alex Deucher wrote:

Same function was duplicated in all gfx IP files.

Signed-off-by: Alex Deucher 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 37 +++
  drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h |  2 ++
  drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c   | 39 +
  drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c   | 39 +
  drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c   | 39 +
  5 files changed, 42 insertions(+), 114 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 1994335..51a9708 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -108,3 +108,40 @@ void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned 
max_se, unsigned max_s
p = next + 1;
}
  }
+
+void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev)
+{
+   int i, queue, pipe, mec;
+
+   /* policy for amdgpu compute queue ownership */
+   for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
+   queue = i % adev->gfx.mec.num_queue_per_pipe;
+   pipe = (i / adev->gfx.mec.num_queue_per_pipe)
+   % adev->gfx.mec.num_pipe_per_mec;
+   mec = (i / adev->gfx.mec.num_queue_per_pipe)
+   / adev->gfx.mec.num_pipe_per_mec;
+
+   /* we've run out of HW */
+   if (mec >= adev->gfx.mec.num_mec)
+   break;
+
+   if (adev->gfx.mec.num_mec > 1) {
+   /* policy: amdgpu owns the first two queues of the 
first MEC */
+   if (mec == 0 && queue < 2)
+   set_bit(i, adev->gfx.mec.queue_bitmap);
+   } else {
+   /* policy: amdgpu owns all queues in the first pipe */
+   if (mec == 0 && pipe == 0)
+   set_bit(i, adev->gfx.mec.queue_bitmap);
+   }
+   }
+
+   /* update the number of active compute rings */
+   adev->gfx.num_compute_rings =
+   bitmap_weight(adev->gfx.mec.queue_bitmap, 
AMDGPU_MAX_COMPUTE_QUEUES);
+
+   /* If you hit this case and edited the policy, you probably just
+* need to increase AMDGPU_MAX_COMPUTE_RINGS */
+   if (WARN_ON(adev->gfx.num_compute_rings > AMDGPU_MAX_COMPUTE_RINGS))
+   adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index 2d846ef..9b9ea6e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -30,6 +30,8 @@ void amdgpu_gfx_scratch_free(struct amdgpu_device *adev, 
uint32_t reg);
  void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se,
unsigned max_sh);
  
+void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev);

+
  /**
   * amdgpu_gfx_create_bitmask - create a bitmask
   *
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 
b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 862bc72..6ffb2da 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -2809,43 +2809,6 @@ static void gfx_v7_0_mec_fini(struct amdgpu_device *adev)
}
  }
  
-static void gfx_v7_0_compute_queue_acquire(struct amdgpu_device *adev)

-{
-   int i, queue, pipe, mec;
-
-   /* policy for amdgpu compute queue ownership */
-   for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
-   queue = i % adev->gfx.mec.num_queue_per_pipe;
-   pipe = (i / adev->gfx.mec.num_queue_per_pipe)
-   % adev->gfx.mec.num_pipe_per_mec;
-   mec = (i / adev->gfx.mec.num_queue_per_pipe)
-   / adev->gfx.mec.num_pipe_per_mec;
-
-   /* we've run out of HW */
-   if (mec >= adev->gfx.mec.num_mec)
-   break;
-
-   if (adev->gfx.mec.num_mec > 1) {
-   /* policy: amdgpu owns the first two queues of the 
first MEC */
-   if (mec == 0 && queue < 2)
-   set_bit(i, adev->gfx.mec.queue_bitmap);
-   } else {
-   /* policy: amdgpu owns all queues in the first pipe */
-   if (mec == 0 && pipe == 0)
-   set_bit(i, adev->gfx.mec.queue_bitmap);
-   }
-   }
-
-   /* update the number of active compute rings */
-   adev->gfx.num_compute_rings =
-   bitmap_weight(adev->gfx.mec.queue_bitmap, 
AMDGPU_MAX_COMPUTE_QUEUES);
-
-   /* If you hit this case and edited the policy, you probably just
-* need to increase AMDGPU_MAX_COMPUTE_RINGS */
-   

Re: [PATCH libdrm v6 1/1] amdgpu: move asic id table to a separate file

2017-06-07 Thread Michel Dänzer
On 07/06/17 08:12 PM, Emil Velikov wrote:
> On 7 June 2017 at 09:40, Michel Dänzer  wrote:
>> On 06/06/17 10:43 PM, Emil Velikov wrote:
>>> On 31 May 2017 at 21:22, Samuel Li  wrote:
>>>
 --- /dev/null
 +++ b/amdgpu/amdgpu_asic_id.c
>>>
 +static int parse_one_line(const char *line, struct amdgpu_asic_id *id)
 +{
 +   char *buf, *saveptr;
 +   char *s_did;
 +   char *s_rid;
 +   char *s_name;
 +   char *endptr;
 +   int r = 0;
 +
 +   buf = strdup(line);
>>> You don't need the extra strdup here if you use strchr over strtok.
>>
>> Beware that without strdup here, amdgpu_parse_asic_ids must set line =
>> NULL after table_line++, so that getline allocates a new buffer for the
>> next line.
>>
> A simple "line = NULL" will lead to a memory leak, AFAICT.
>
> In either case, I'm a bit baffled how that is affected by the
> presence/lack of strdup?
> We don't alter or reuse the backing storage only
> strcmp/isblank/strtol/strdup it.

Oh, I missed that id->marketing_name is strdup'd again.

Anyway, it's probably better not to change the logic too much at this
point, other than anything needed to fix immediate bugs. It can always
be improved with follow-up patches.


-- 
Earthling Michel Dänzer   |   http://www.amd.com
Libre software enthusiast | Mesa and X developer
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 1/3] drm/amdgpu: fix mec queue policy on single MEC asics

2017-06-07 Thread Alex Deucher
Fixes hangs on single MEC asics.

Fixes: 2ed286fb434 (drm/amdgpu: new queue policy, take first 2 queues of each 
pipe v2)
Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | 12 +---
 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 12 +---
 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 12 +---
 3 files changed, 27 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 
b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 4c04e9d..862bc72 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -2825,9 +2825,15 @@ static void gfx_v7_0_compute_queue_acquire(struct 
amdgpu_device *adev)
if (mec >= adev->gfx.mec.num_mec)
break;
 
-   /* policy: amdgpu owns the first two queues of the first MEC */
-   if (mec == 0 && queue < 2)
-   set_bit(i, adev->gfx.mec.queue_bitmap);
+   if (adev->gfx.mec.num_mec > 1) {
+   /* policy: amdgpu owns the first two queues of the 
first MEC */
+   if (mec == 0 && queue < 2)
+   set_bit(i, adev->gfx.mec.queue_bitmap);
+   } else {
+   /* policy: amdgpu owns all queues in the first pipe */
+   if (mec == 0 && pipe == 0)
+   set_bit(i, adev->gfx.mec.queue_bitmap);
+   }
}
 
/* update the number of active compute rings */
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 
b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index ad2e0bb..1370b39 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -1464,9 +1464,15 @@ static void gfx_v8_0_compute_queue_acquire(struct 
amdgpu_device *adev)
if (mec >= adev->gfx.mec.num_mec)
break;
 
-   /* policy: amdgpu owns the first two queues of the first MEC */
-   if (mec == 0 && queue < 2)
-   set_bit(i, adev->gfx.mec.queue_bitmap);
+   if (adev->gfx.mec.num_mec > 1) {
+   /* policy: amdgpu owns the first two queues of the 
first MEC */
+   if (mec == 0 && queue < 2)
+   set_bit(i, adev->gfx.mec.queue_bitmap);
+   } else {
+   /* policy: amdgpu owns all queues in the first pipe */
+   if (mec == 0 && pipe == 0)
+   set_bit(i, adev->gfx.mec.queue_bitmap);
+   }
}
 
/* update the number of active compute rings */
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 
b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index cf15a350..9d675b3 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -873,9 +873,15 @@ static void gfx_v9_0_compute_queue_acquire(struct 
amdgpu_device *adev)
if (mec >= adev->gfx.mec.num_mec)
break;
 
-   /* policy: amdgpu owns the first two queues of the first MEC */
-   if (mec == 0 && queue < 2)
-   set_bit(i, adev->gfx.mec.queue_bitmap);
+   if (adev->gfx.mec.num_mec > 1) {
+   /* policy: amdgpu owns the first two queues of the 
first MEC */
+   if (mec == 0 && queue < 2)
+   set_bit(i, adev->gfx.mec.queue_bitmap);
+   } else {
+   /* policy: amdgpu owns all queues in the first pipe */
+   if (mec == 0 && pipe == 0)
+   set_bit(i, adev->gfx.mec.queue_bitmap);
+   }
}
 
/* update the number of active compute rings */
-- 
2.5.5

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 3/3] drm/amdgpu/gfx9: Raven has two MECs

2017-06-07 Thread Alex Deucher
This was missed when Andres' queue patches were rebased.

Fixes: 42794b27 (drm/amdgpu: take ownership of per-pipe configuration v3)
Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 
b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 3ea0e71..e0193e4 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -872,6 +872,7 @@ static int gfx_v9_0_mec_init(struct amdgpu_device *adev)
 
switch (adev->asic_type) {
case CHIP_VEGA10:
+   case CHIP_RAVEN:
adev->gfx.mec.num_mec = 2;
break;
default:
-- 
2.5.5

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 2/3] drm/amdgpu: move gfx_v*_0_compute_queue_acquire to common code

2017-06-07 Thread Alex Deucher
Same function was duplicated in all gfx IP files.

Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 37 +++
 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h |  2 ++
 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c   | 39 +
 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c   | 39 +
 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c   | 39 +
 5 files changed, 42 insertions(+), 114 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 1994335..51a9708 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -108,3 +108,40 @@ void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned 
max_se, unsigned max_s
p = next + 1;
}
 }
+
+void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev)
+{
+   int i, queue, pipe, mec;
+
+   /* policy for amdgpu compute queue ownership */
+   for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
+   queue = i % adev->gfx.mec.num_queue_per_pipe;
+   pipe = (i / adev->gfx.mec.num_queue_per_pipe)
+   % adev->gfx.mec.num_pipe_per_mec;
+   mec = (i / adev->gfx.mec.num_queue_per_pipe)
+   / adev->gfx.mec.num_pipe_per_mec;
+
+   /* we've run out of HW */
+   if (mec >= adev->gfx.mec.num_mec)
+   break;
+
+   if (adev->gfx.mec.num_mec > 1) {
+   /* policy: amdgpu owns the first two queues of the 
first MEC */
+   if (mec == 0 && queue < 2)
+   set_bit(i, adev->gfx.mec.queue_bitmap);
+   } else {
+   /* policy: amdgpu owns all queues in the first pipe */
+   if (mec == 0 && pipe == 0)
+   set_bit(i, adev->gfx.mec.queue_bitmap);
+   }
+   }
+
+   /* update the number of active compute rings */
+   adev->gfx.num_compute_rings =
+   bitmap_weight(adev->gfx.mec.queue_bitmap, 
AMDGPU_MAX_COMPUTE_QUEUES);
+
+   /* If you hit this case and edited the policy, you probably just
+* need to increase AMDGPU_MAX_COMPUTE_RINGS */
+   if (WARN_ON(adev->gfx.num_compute_rings > AMDGPU_MAX_COMPUTE_RINGS))
+   adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index 2d846ef..9b9ea6e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -30,6 +30,8 @@ void amdgpu_gfx_scratch_free(struct amdgpu_device *adev, 
uint32_t reg);
 void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se,
unsigned max_sh);
 
+void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev);
+
 /**
  * amdgpu_gfx_create_bitmask - create a bitmask
  *
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 
b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 862bc72..6ffb2da 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -2809,43 +2809,6 @@ static void gfx_v7_0_mec_fini(struct amdgpu_device *adev)
}
 }
 
-static void gfx_v7_0_compute_queue_acquire(struct amdgpu_device *adev)
-{
-   int i, queue, pipe, mec;
-
-   /* policy for amdgpu compute queue ownership */
-   for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
-   queue = i % adev->gfx.mec.num_queue_per_pipe;
-   pipe = (i / adev->gfx.mec.num_queue_per_pipe)
-   % adev->gfx.mec.num_pipe_per_mec;
-   mec = (i / adev->gfx.mec.num_queue_per_pipe)
-   / adev->gfx.mec.num_pipe_per_mec;
-
-   /* we've run out of HW */
-   if (mec >= adev->gfx.mec.num_mec)
-   break;
-
-   if (adev->gfx.mec.num_mec > 1) {
-   /* policy: amdgpu owns the first two queues of the 
first MEC */
-   if (mec == 0 && queue < 2)
-   set_bit(i, adev->gfx.mec.queue_bitmap);
-   } else {
-   /* policy: amdgpu owns all queues in the first pipe */
-   if (mec == 0 && pipe == 0)
-   set_bit(i, adev->gfx.mec.queue_bitmap);
-   }
-   }
-
-   /* update the number of active compute rings */
-   adev->gfx.num_compute_rings =
-   bitmap_weight(adev->gfx.mec.queue_bitmap, 
AMDGPU_MAX_COMPUTE_QUEUES);
-
-   /* If you hit this case and edited the policy, you probably just
-* need to increase AMDGPU_MAX_COMPUTE_RINGS */
-   if (WARN_ON(adev->gfx.num_compute_rings > AMDGPU_MAX_COMPUTE_RINGS))
-   adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS;
-}
-
 static int gfx_v

[PATCH] drm/amd/amdkcl: fix drm-get-put.cocci warnings

2017-06-07 Thread Julia Lawall
 Use drm_*_get() and drm_*_put() helpers instead of drm_*_reference() and
 drm_*_unreference() helpers.

Generated by: scripts/coccinelle/api/drm-get-put.cocci

CC: annwang 
Signed-off-by: Julia Lawall 
Signed-off-by: Fengguang Wu 
---
tree:   git://people.freedesktop.org/~agd5f/linux.git
amd-mainline-hybrid-4.11
head:   7ccf5ab3da7a87288cc0fd11910b212e4ac154a6
commit: 67207f0941969278dd47e2549fae4fe5502183c1 [1119/1800]
drm/amd/amdkcl: [4.7] fix dev->struct_mutex

Please take the patch only if it's a positive warning. Thanks!

 amdgpu_gem.c |   24 
 1 file changed, 12 insertions(+), 12 deletions(-)

--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -108,9 +108,9 @@ void amdgpu_gem_force_release(struct amd
idr_for_each_entry(&file->object_idr, gobj, handle) {
WARN_ONCE(1, "And also active allocations!\n");
 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 7, 0)
-   drm_gem_object_unreference(gobj);
+   drm_gem_object_put(gobj);
 #else
-   drm_gem_object_unreference_unlocked(gobj);
+   drm_gem_object_put_unlocked(gobj);
 #endif
}
idr_destroy(&file->object_idr);
@@ -287,7 +287,7 @@ int amdgpu_gem_create_ioctl(struct drm_d

r = drm_gem_handle_create(filp, gobj, &handle);
/* drop reference from allocate - handle holds it now */
-   drm_gem_object_unreference_unlocked(gobj);
+   drm_gem_object_put_unlocked(gobj);
if (r)
return r;

@@ -365,7 +365,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_

r = drm_gem_handle_create(filp, gobj, &handle);
/* drop reference from allocate - handle holds it now */
-   drm_gem_object_unreference_unlocked(gobj);
+   drm_gem_object_put_unlocked(gobj);
if (r)
return r;

@@ -379,7 +379,7 @@ unlock_mmap_sem:
up_read(¤t->mm->mmap_sem);

 release_object:
-   drm_gem_object_unreference_unlocked(gobj);
+   drm_gem_object_put_unlocked(gobj);

return r;
 }
@@ -398,11 +398,11 @@ int amdgpu_mode_dumb_mmap(struct drm_fil
robj = gem_to_amdgpu_bo(gobj);
if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) ||
(robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
-   drm_gem_object_unreference_unlocked(gobj);
+   drm_gem_object_put_unlocked(gobj);
return -EPERM;
}
*offset_p = amdgpu_bo_mmap_offset(robj);
-   drm_gem_object_unreference_unlocked(gobj);
+   drm_gem_object_put_unlocked(gobj);
return 0;
 }

@@ -472,7 +472,7 @@ int amdgpu_gem_wait_idle_ioctl(struct dr
} else
r = ret;

-   drm_gem_object_unreference_unlocked(gobj);
+   drm_gem_object_put_unlocked(gobj);
return r;
 }

@@ -515,7 +515,7 @@ int amdgpu_gem_metadata_ioctl(struct drm
 unreserve:
amdgpu_bo_unreserve(robj);
 out:
-   drm_gem_object_unreference_unlocked(gobj);
+   drm_gem_object_put_unlocked(gobj);
return r;
 }

@@ -686,7 +686,7 @@ error_backoff:
ttm_eu_backoff_reservation(&ticket, &list);

 error_unref:
-   drm_gem_object_unreference_unlocked(gobj);
+   drm_gem_object_put_unlocked(gobj);
return r;
 }

@@ -748,7 +748,7 @@ int amdgpu_gem_op_ioctl(struct drm_devic
}

 out:
-   drm_gem_object_unreference_unlocked(gobj);
+   drm_gem_object_put_unlocked(gobj);
return r;
 }

@@ -776,7 +776,7 @@ int amdgpu_mode_dumb_create(struct drm_f

r = drm_gem_handle_create(file_priv, gobj, &handle);
/* drop reference from allocate - handle holds it now */
-   drm_gem_object_unreference_unlocked(gobj);
+   drm_gem_object_put_unlocked(gobj);
if (r) {
return r;
}
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH] drm/amdgpu: fix drm-get-put.cocci warnings

2017-06-07 Thread Julia Lawall
 Use drm_*_get() and drm_*_put() helpers instead of drm_*_reference() and
 drm_*_unreference() helpers.

Generated by: scripts/coccinelle/api/drm-get-put.cocci

CC: Christian König 
Signed-off-by: Julia Lawall 
Signed-off-by: Fengguang Wu 
---

Please take the patch only if it's a positive warning. Thanks!

 amdgpu_prime.c |2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
@@ -173,7 +173,7 @@ amdgpu_gem_prime_foreign_bo(struct amdgp
continue;

ww_mutex_unlock(&bo->tbo.resv->lock);
-   drm_gem_object_reference(&gobj->base);
+   drm_gem_object_get(&gobj->base);
return &gobj->base;
}
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH libdrm v6 1/1] amdgpu: move asic id table to a separate file

2017-06-07 Thread Emil Velikov
On 7 June 2017 at 09:40, Michel Dänzer  wrote:
> On 06/06/17 10:43 PM, Emil Velikov wrote:
>> On 31 May 2017 at 21:22, Samuel Li  wrote:
>>
>>> --- /dev/null
>>> +++ b/amdgpu/amdgpu_asic_id.c
>>
>>> +static int parse_one_line(const char *line, struct amdgpu_asic_id *id)
>>> +{
>>> +   char *buf, *saveptr;
>>> +   char *s_did;
>>> +   char *s_rid;
>>> +   char *s_name;
>>> +   char *endptr;
>>> +   int r = 0;
>>> +
>>> +   buf = strdup(line);
>> You don't need the extra strdup here if you use strchr over strtok.
>
> Beware that without strdup here, amdgpu_parse_asic_ids must set line =
> NULL after table_line++, so that getline allocates a new buffer for the
> next line.
>
A simple "line = NULL" will lead to a memory leak, AFAICT.

In either case, I'm a bit baffled how that is affected by the
presence/lack of strdup?
We don't alter or reuse the backing storage only
strcmp/isblank/strtol/strdup it.

>
>>> +int amdgpu_parse_asic_ids(struct amdgpu_asic_id **p_asic_id_table)
>>> +{
>>
>>> +   /* 1st valid line is file version */
>>> +   while ((n = getline(&line, &len, fp)) != -1) {
>>> +   /* trim trailing newline */
>>> +   if (line[n - 1] == '\n')
>>> +   line[n - 1] = '\0';
>> Why do we need this - afaict none of the parsing code cares if we have
>> \n or not?
>
> The \n has to be removed somehow, otherwise it ends up as part of the
> marketing name returned to the application.
>
Wouldn't be better to do that in parse_one_line() around the
marketing_name = strdup(...) call?
It's a matter of taste, so feel free to ignore me.

>
>>> +   /* end of table */
>>> +   id = asic_id_table + table_size;
>>> +   memset(id, 0, sizeof(struct amdgpu_asic_id));
>> Here one clears the sentinel, which is needed as we hit realloc above, 
>> correct?
>>
>>> +   asic_id_table = realloc(asic_id_table, (table_size+1) *
>>> +   sizeof(struct amdgpu_asic_id));
>> But why do we realloc again?
>
> I asked for that, in order not to waste memory for unused table entries.
>
D'oh, indeed. Thank you. Worth adding a note?

>
>>> +free:
>>> +   free(line);
>>> +
>>> +   if (r && asic_id_table) {
>>> +   while (table_size--) {
>>> +   id = asic_id_table + table_size;
>>> +   free(id->marketing_name);
>>> +   }
>>> +   free(asic_id_table);
>>> +   asic_id_table = NULL;
>>> +   }
>>> +close:
>>> +   fclose(fp);
>>> +
>>> +   *p_asic_id_table = asic_id_table;
>>> +
>> Please don't entwine the error path with the normal one.
>>
>> Setting *p_asic_id_table (or any user provided pointer) when the
>> function fails is bad design.
>
> I don't really see the issue with that; it's fine for the only caller of
> this function.
>
it's not obvious and might come to bite. Since *p_asic_id_table is
already NULL (we're using calloc) I'd opt for dropping it.
Not trying to force my opinion, just stating concerns.

Another crazy idea that just came to mind:
Since getline() can do multiple implicit realloc's one can allocate "a
sane" default and feed that instead of the current NULL.

Regards,
Emil
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH libdrm v6 1/1] amdgpu: move asic id table to a separate file

2017-06-07 Thread Michel Dänzer
On 06/06/17 10:43 PM, Emil Velikov wrote:
> On 31 May 2017 at 21:22, Samuel Li  wrote:
> 
>> --- /dev/null
>> +++ b/amdgpu/amdgpu_asic_id.c
> 
>> +static int parse_one_line(const char *line, struct amdgpu_asic_id *id)
>> +{
>> +   char *buf, *saveptr;
>> +   char *s_did;
>> +   char *s_rid;
>> +   char *s_name;
>> +   char *endptr;
>> +   int r = 0;
>> +
>> +   buf = strdup(line);
> You don't need the extra strdup here if you use strchr over strtok.

Beware that without strdup here, amdgpu_parse_asic_ids must set line =
NULL after table_line++, so that getline allocates a new buffer for the
next line.


>> +int amdgpu_parse_asic_ids(struct amdgpu_asic_id **p_asic_id_table)
>> +{
> 
>> +   /* 1st valid line is file version */
>> +   while ((n = getline(&line, &len, fp)) != -1) {
>> +   /* trim trailing newline */
>> +   if (line[n - 1] == '\n')
>> +   line[n - 1] = '\0';
> Why do we need this - afaict none of the parsing code cares if we have
> \n or not?

The \n has to be removed somehow, otherwise it ends up as part of the
marketing name returned to the application.


>> +   /* end of table */
>> +   id = asic_id_table + table_size;
>> +   memset(id, 0, sizeof(struct amdgpu_asic_id));
> Here one clears the sentinel, which is needed as we hit realloc above, 
> correct?
> 
>> +   asic_id_table = realloc(asic_id_table, (table_size+1) *
>> +   sizeof(struct amdgpu_asic_id));
> But why do we realloc again?

I asked for that, in order not to waste memory for unused table entries.


>> +free:
>> +   free(line);
>> +
>> +   if (r && asic_id_table) {
>> +   while (table_size--) {
>> +   id = asic_id_table + table_size;
>> +   free(id->marketing_name);
>> +   }
>> +   free(asic_id_table);
>> +   asic_id_table = NULL;
>> +   }
>> +close:
>> +   fclose(fp);
>> +
>> +   *p_asic_id_table = asic_id_table;
>> +
> Please don't entwine the error path with the normal one.
> 
> Setting *p_asic_id_table (or any user provided pointer) when the
> function fails is bad design.

I don't really see the issue with that; it's fine for the only caller of
this function.


-- 
Earthling Michel Dänzer   |   http://www.amd.com
Libre software enthusiast | Mesa and X developer
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx