Re: [PATCH 2/2] drm/scheduler: Remove obsolete spinlock.

2018-05-15 Thread Grodzovsky, Andrey
Yeah, that what I am not sure about... It's lockless in a sense of single 
producer single consumer but not for multiple concurrent producers... So now I 
think this spinlock should stay there... It just looked useless to me at first 
sight...

Andrey


From: Zhou, David(ChunMing)
Sent: 15 May 2018 23:04:44
To: Grodzovsky, Andrey; amd-gfx@lists.freedesktop.org; 
dri-de...@lists.freedesktop.org
Cc: Koenig, Christian
Subject: Re: [PATCH 2/2] drm/scheduler: Remove obsolete spinlock.



On 2018年05月16日 03:31, Andrey Grodzovsky wrote:
> Signed-off-by: Andrey Grodzovsky 
> ---
>   drivers/gpu/drm/scheduler/gpu_scheduler.c | 4 
>   include/drm/gpu_scheduler.h   | 1 -
>   2 files changed, 5 deletions(-)
>
> diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c 
> b/drivers/gpu/drm/scheduler/gpu_scheduler.c
> index 1f1dd70..2569a63 100644
> --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
> +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
> @@ -140,7 +140,6 @@ int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
>   entity->last_scheduled = NULL;
>
>   spin_lock_init(>rq_lock);
> - spin_lock_init(>queue_lock);
>   spsc_queue_init(>job_queue);
>
>   atomic_set(>fence_seq, 0);
> @@ -424,11 +423,8 @@ void drm_sched_entity_push_job(struct drm_sched_job 
> *sched_job,
>
>   trace_drm_sched_job(sched_job, entity);
>
> - spin_lock(>queue_lock);
>   first = spsc_queue_push(>job_queue, _job->queue_node);
>
> - spin_unlock(>queue_lock);
Is your spsc safely to be added simultaneously?

Regards,
David Zhou
> -
>   /* first job wakes up scheduler */
>   if (first) {
>   /* Add the entity to the run queue */
> diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
> index 350a62c..683eb65 100644
> --- a/include/drm/gpu_scheduler.h
> +++ b/include/drm/gpu_scheduler.h
> @@ -56,7 +56,6 @@ struct drm_sched_entity {
>   spinlock_t  rq_lock;
>   struct drm_gpu_scheduler*sched;
>
> - spinlock_t  queue_lock;
>   struct spsc_queue   job_queue;
>
>   atomic_tfence_seq;

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 2/2] drm/scheduler: Remove obsolete spinlock.

2018-05-15 Thread zhoucm1



On 2018年05月16日 03:31, Andrey Grodzovsky wrote:

Signed-off-by: Andrey Grodzovsky 
---
  drivers/gpu/drm/scheduler/gpu_scheduler.c | 4 
  include/drm/gpu_scheduler.h   | 1 -
  2 files changed, 5 deletions(-)

diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c 
b/drivers/gpu/drm/scheduler/gpu_scheduler.c
index 1f1dd70..2569a63 100644
--- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
@@ -140,7 +140,6 @@ int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
entity->last_scheduled = NULL;
  
  	spin_lock_init(>rq_lock);

-   spin_lock_init(>queue_lock);
spsc_queue_init(>job_queue);
  
  	atomic_set(>fence_seq, 0);

@@ -424,11 +423,8 @@ void drm_sched_entity_push_job(struct drm_sched_job 
*sched_job,
  
  	trace_drm_sched_job(sched_job, entity);
  
-	spin_lock(>queue_lock);

first = spsc_queue_push(>job_queue, _job->queue_node);
  
-	spin_unlock(>queue_lock);

Is your spsc safely to be added simultaneously?

Regards,
David Zhou

-
/* first job wakes up scheduler */
if (first) {
/* Add the entity to the run queue */
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index 350a62c..683eb65 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -56,7 +56,6 @@ struct drm_sched_entity {
spinlock_t  rq_lock;
struct drm_gpu_scheduler*sched;
  
-	spinlock_t			queue_lock;

struct spsc_queue   job_queue;
  
  	atomic_t			fence_seq;


___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 04/21] drm/amdgpu: Add GFXv9 kfd2kgd interface functions

2018-05-15 Thread Felix Kuehling
On 2018-05-15 05:41 AM, Dave Airlie wrote:
>> +static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
>> +   uint32_t queue_id, uint32_t __user *wptr,
>> +   uint32_t wptr_shift, uint32_t wptr_mask,
>> +   struct mm_struct *mm)
>> +{
>> +   struct amdgpu_device *adev = get_amdgpu_device(kgd);
>> +   struct v9_mqd *m;
>> +   uint32_t *mqd_hqd;
>> +   uint32_t reg, hqd_base, data;
>> +
>> +   m = get_mqd(mqd);
>> +
>> +   acquire_queue(kgd, pipe_id, queue_id);
>> +
>> +   /* HIQ is set during driver init period with vmid set to 0*/
>> +   if (m->cp_hqd_vmid == 0) {
>> +   uint32_t value, mec, pipe;
>> +
>> +   mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
>> +   pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
>> +
>> +   pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
>> +   mec, pipe, queue_id);
>> +   value = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CP_SCHEDULERS));
>> +   value = REG_SET_FIELD(value, RLC_CP_SCHEDULERS, scheduler1,
>> +   ((mec << 5) | (pipe << 3) | queue_id | 0x80));
>> +   WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CP_SCHEDULERS), value);
>> +   }
>> +
>> +   /* HQD registers extend from CP_MQD_BASE_ADDR to 
>> CP_HQD_EOP_WPTR_MEM. */
>> +   mqd_hqd = >cp_mqd_base_addr_lo;
>> +   hqd_base = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR);
>> +
>> +   for (reg = hqd_base;
>> +reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++)
>> +   WREG32(reg, mqd_hqd[reg - hqd_base]);
>> +
>> +
>> +   /* Activate doorbell logic before triggering WPTR poll. */
>> +   data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control,
>> +CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
>> +   WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL), data);
>> +
>> +   if (wptr) {
>> +   /* Don't read wptr with get_user because the user
>> +* context may not be accessible (if this function
>> +* runs in a work queue). Instead trigger a one-shot
>> +* polling read from memory in the CP. This assumes
>> +* that wptr is GPU-accessible in the queue's VMID via
>> +* ATC or SVM. WPTR==RPTR before starting the poll so
>> +* the CP starts fetching new commands from the right
>> +* place.
>> +*
>> +* Guessing a 64-bit WPTR from a 32-bit RPTR is a bit
>> +* tricky. Assume that the queue didn't overflow. The
>> +* number of valid bits in the 32-bit RPTR depends on
>> +* the queue size. The remaining bits are taken from
>> +* the saved 64-bit WPTR. If the WPTR wrapped, add the
>> +* queue size.
>> +*/
>> +   uint32_t queue_size =
>> +   2 << REG_GET_FIELD(m->cp_hqd_pq_control,
>> +  CP_HQD_PQ_CONTROL, QUEUE_SIZE);
>> +   uint64_t guessed_wptr = m->cp_hqd_pq_rptr & (queue_size - 1);
>> +
>> +   if ((m->cp_hqd_pq_wptr_lo & (queue_size - 1)) < guessed_wptr)
>> +   guessed_wptr += queue_size;
>> +   guessed_wptr += m->cp_hqd_pq_wptr_lo & ~(queue_size - 1);
>> +   guessed_wptr += (uint64_t)m->cp_hqd_pq_wptr_hi << 32;
>> +
>> +   WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_LO),
>> +  lower_32_bits(guessed_wptr));
>> +   WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI),
>> +  upper_32_bits(guessed_wptr));
>> +   WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR),
>> +  lower_32_bits((uint64_t)wptr));
>> +   WREG32(SOC15_REG_OFFSET(GC, 0, 
>> mmCP_HQD_PQ_WPTR_POLL_ADDR_HI),
>> +  upper_32_bits((uint64_t)wptr));
>  CC [M]  drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.o
> In file included from
> /home/airlied/devel/kernel/dim/src/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c:30:0:
> /home/airlied/devel/kernel/dim/src/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c:
> In function ‘kgd_hqd_load’:
> /home/airlied/devel/kernel/dim/src/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c:473:24:
> warning: cast from pointer to integer of different size
> [-Wpointer-to-int-cast]
>   lower_32_bits((uint64_t)wptr));
> ^
> /home/airlied/devel/kernel/dim/src/drivers/gpu/drm/amd/amdgpu/amdgpu.h:1666:53:
> note: in definition of macro ‘WREG32’
>  #define WREG32(reg, v) amdgpu_mm_wreg(adev, (reg), (v), 0)
>  ^
> /home/airlied/devel/kernel/dim/src/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c:473:10:
> note: 

Re: [PATCH 2/2] drm/scheduler: Remove obsolete spinlock.

2018-05-15 Thread Andrey Grodzovsky
Yea, I might need to give another thought to whether this  spinlock can 
actually be removed.


Andrey

On 05/15/2018 03:38 PM, Alex Deucher wrote:

On Tue, May 15, 2018 at 3:31 PM, Andrey Grodzovsky
 wrote:

Signed-off-by: Andrey Grodzovsky 

Please provide a better patch description.

Alex


---
  drivers/gpu/drm/scheduler/gpu_scheduler.c | 4 
  include/drm/gpu_scheduler.h   | 1 -
  2 files changed, 5 deletions(-)

diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c 
b/drivers/gpu/drm/scheduler/gpu_scheduler.c
index 1f1dd70..2569a63 100644
--- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
@@ -140,7 +140,6 @@ int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
 entity->last_scheduled = NULL;

 spin_lock_init(>rq_lock);
-   spin_lock_init(>queue_lock);
 spsc_queue_init(>job_queue);

 atomic_set(>fence_seq, 0);
@@ -424,11 +423,8 @@ void drm_sched_entity_push_job(struct drm_sched_job 
*sched_job,

 trace_drm_sched_job(sched_job, entity);

-   spin_lock(>queue_lock);
 first = spsc_queue_push(>job_queue, _job->queue_node);

-   spin_unlock(>queue_lock);
-
 /* first job wakes up scheduler */
 if (first) {
 /* Add the entity to the run queue */
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index 350a62c..683eb65 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -56,7 +56,6 @@ struct drm_sched_entity {
 spinlock_t  rq_lock;
 struct drm_gpu_scheduler*sched;

-   spinlock_t  queue_lock;
 struct spsc_queue   job_queue;

 atomic_tfence_seq;
--
2.7.4

___
dri-devel mailing list
dri-de...@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 1/2] drm/amdgpu: Skip drm_sched_entity realted ops for KIQ ring.

2018-05-15 Thread Alex Deucher
On Tue, May 15, 2018 at 3:31 PM, Andrey Grodzovsky
 wrote:
> Follwoing change 75fbed2 we need to skip KIQ ring when iterating
> amdgpu_ctx's scheduler entites.
>
> Signed-off-by: Andrey Grodzovsky 

Typo in the title: realted -> related
Typo in the description: Follwoing -> Following
Also expand on the reasoning a bit in the patch description.  E.g., we
never initialize or use the GPU scheduler for KIQ.
With those things fixes:
Reviewed-by: Alex Deucher 

Alex

> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 21 ++---
>  1 file changed, 18 insertions(+), 3 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
> index 6741a62..744519b 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
> @@ -173,9 +173,14 @@ static void amdgpu_ctx_do_release(struct kref *ref)
>
> ctx = container_of(ref, struct amdgpu_ctx, refcount);
>
> -   for (i = 0; i < ctx->adev->num_rings; i++)
> +   for (i = 0; i < ctx->adev->num_rings; i++) {
> +
> +   if (ctx->adev->rings[i] == >adev->gfx.kiq.ring)
> +   continue;
> +
> drm_sched_entity_fini(>adev->rings[i]->sched,
> >rings[i].entity);
> +   }
>
> amdgpu_ctx_fini(ref);
>  }
> @@ -452,12 +457,17 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr 
> *mgr)
> if (!ctx->adev)
> return;
>
> -   for (i = 0; i < ctx->adev->num_rings; i++)
> +   for (i = 0; i < ctx->adev->num_rings; i++) {
> +
> +   if (ctx->adev->rings[i] == >adev->gfx.kiq.ring)
> +   continue;
> +
> if (kref_read(>refcount) == 1)
> 
> drm_sched_entity_do_release(>adev->rings[i]->sched,
>   >rings[i].entity);
> else
> DRM_ERROR("ctx %p is still alive\n", ctx);
> +   }
> }
>  }
>
> @@ -474,12 +484,17 @@ void amdgpu_ctx_mgr_entity_cleanup(struct 
> amdgpu_ctx_mgr *mgr)
> if (!ctx->adev)
> return;
>
> -   for (i = 0; i < ctx->adev->num_rings; i++)
> +   for (i = 0; i < ctx->adev->num_rings; i++) {
> +
> +   if (ctx->adev->rings[i] == >adev->gfx.kiq.ring)
> +   continue;
> +
> if (kref_read(>refcount) == 1)
> 
> drm_sched_entity_cleanup(>adev->rings[i]->sched,
> >rings[i].entity);
> else
> DRM_ERROR("ctx %p is still alive\n", ctx);
> +   }
> }
>  }
>
> --
> 2.7.4
>
> ___
> dri-devel mailing list
> dri-de...@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/dri-devel
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 2/2] drm/scheduler: Remove obsolete spinlock.

2018-05-15 Thread Alex Deucher
On Tue, May 15, 2018 at 3:31 PM, Andrey Grodzovsky
 wrote:
> Signed-off-by: Andrey Grodzovsky 

Please provide a better patch description.

Alex

> ---
>  drivers/gpu/drm/scheduler/gpu_scheduler.c | 4 
>  include/drm/gpu_scheduler.h   | 1 -
>  2 files changed, 5 deletions(-)
>
> diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c 
> b/drivers/gpu/drm/scheduler/gpu_scheduler.c
> index 1f1dd70..2569a63 100644
> --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
> +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
> @@ -140,7 +140,6 @@ int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
> entity->last_scheduled = NULL;
>
> spin_lock_init(>rq_lock);
> -   spin_lock_init(>queue_lock);
> spsc_queue_init(>job_queue);
>
> atomic_set(>fence_seq, 0);
> @@ -424,11 +423,8 @@ void drm_sched_entity_push_job(struct drm_sched_job 
> *sched_job,
>
> trace_drm_sched_job(sched_job, entity);
>
> -   spin_lock(>queue_lock);
> first = spsc_queue_push(>job_queue, _job->queue_node);
>
> -   spin_unlock(>queue_lock);
> -
> /* first job wakes up scheduler */
> if (first) {
> /* Add the entity to the run queue */
> diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
> index 350a62c..683eb65 100644
> --- a/include/drm/gpu_scheduler.h
> +++ b/include/drm/gpu_scheduler.h
> @@ -56,7 +56,6 @@ struct drm_sched_entity {
> spinlock_t  rq_lock;
> struct drm_gpu_scheduler*sched;
>
> -   spinlock_t  queue_lock;
> struct spsc_queue   job_queue;
>
> atomic_tfence_seq;
> --
> 2.7.4
>
> ___
> dri-devel mailing list
> dri-de...@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/dri-devel
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 2/2] drm/scheduler: Remove obsolete spinlock.

2018-05-15 Thread Andrey Grodzovsky
Signed-off-by: Andrey Grodzovsky 
---
 drivers/gpu/drm/scheduler/gpu_scheduler.c | 4 
 include/drm/gpu_scheduler.h   | 1 -
 2 files changed, 5 deletions(-)

diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c 
b/drivers/gpu/drm/scheduler/gpu_scheduler.c
index 1f1dd70..2569a63 100644
--- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
@@ -140,7 +140,6 @@ int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
entity->last_scheduled = NULL;
 
spin_lock_init(>rq_lock);
-   spin_lock_init(>queue_lock);
spsc_queue_init(>job_queue);
 
atomic_set(>fence_seq, 0);
@@ -424,11 +423,8 @@ void drm_sched_entity_push_job(struct drm_sched_job 
*sched_job,
 
trace_drm_sched_job(sched_job, entity);
 
-   spin_lock(>queue_lock);
first = spsc_queue_push(>job_queue, _job->queue_node);
 
-   spin_unlock(>queue_lock);
-
/* first job wakes up scheduler */
if (first) {
/* Add the entity to the run queue */
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index 350a62c..683eb65 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -56,7 +56,6 @@ struct drm_sched_entity {
spinlock_t  rq_lock;
struct drm_gpu_scheduler*sched;
 
-   spinlock_t  queue_lock;
struct spsc_queue   job_queue;
 
atomic_tfence_seq;
-- 
2.7.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 1/2] drm/amdgpu: Skip drm_sched_entity realted ops for KIQ ring.

2018-05-15 Thread Andrey Grodzovsky
Follwoing change 75fbed2 we need to skip KIQ ring when iterating
amdgpu_ctx's scheduler entites.

Signed-off-by: Andrey Grodzovsky 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 21 ++---
 1 file changed, 18 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 6741a62..744519b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -173,9 +173,14 @@ static void amdgpu_ctx_do_release(struct kref *ref)
 
ctx = container_of(ref, struct amdgpu_ctx, refcount);
 
-   for (i = 0; i < ctx->adev->num_rings; i++)
+   for (i = 0; i < ctx->adev->num_rings; i++) {
+
+   if (ctx->adev->rings[i] == >adev->gfx.kiq.ring)
+   continue;
+
drm_sched_entity_fini(>adev->rings[i]->sched,
>rings[i].entity);
+   }
 
amdgpu_ctx_fini(ref);
 }
@@ -452,12 +457,17 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr 
*mgr)
if (!ctx->adev)
return;
 
-   for (i = 0; i < ctx->adev->num_rings; i++)
+   for (i = 0; i < ctx->adev->num_rings; i++) {
+
+   if (ctx->adev->rings[i] == >adev->gfx.kiq.ring)
+   continue;
+
if (kref_read(>refcount) == 1)

drm_sched_entity_do_release(>adev->rings[i]->sched,
  >rings[i].entity);
else
DRM_ERROR("ctx %p is still alive\n", ctx);
+   }
}
 }
 
@@ -474,12 +484,17 @@ void amdgpu_ctx_mgr_entity_cleanup(struct amdgpu_ctx_mgr 
*mgr)
if (!ctx->adev)
return;
 
-   for (i = 0; i < ctx->adev->num_rings; i++)
+   for (i = 0; i < ctx->adev->num_rings; i++) {
+
+   if (ctx->adev->rings[i] == >adev->gfx.kiq.ring)
+   continue;
+
if (kref_read(>refcount) == 1)

drm_sched_entity_cleanup(>adev->rings[i]->sched,
>rings[i].entity);
else
DRM_ERROR("ctx %p is still alive\n", ctx);
+   }
}
 }
 
-- 
2.7.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[pull] radeon, amdgpu, ttm, and gpu scheduler drm-next-4.18

2018-05-15 Thread Alex Deucher
Hi Dave,

Main changes for 4.18.  I'd like to do a separate pull for vega20 later
this week or next.  Highlights:
- Reserve pre-OS scanout buffer during init for seemless transition from
  console to driver
- VEGAM support
- Improved GPU scheduler documentation
- Initial gfxoff support for raven
- SR-IOV fixes
- Default to non-AGP on PowerPC for radeon
- Fine grained clock voltage control for vega10
- Power profiles for vega10
- Further clean up of powerplay/driver interface
- Underlay fixes
- Display link bw updates
- Gamma fixes
- Scatter/Gather display support on CZ/ST
- Misc bug fixes and clean ups

The following changes since commit 871e899db19da3dbd17a5d263b555dc5b7d8fed5:

  Merge branch 'drm-next-4.17' of git://people.freedesktop.org/~agd5f/linux 
into drm-next (2018-04-11 08:35:41 +1000)

are available in the git repository at:

  git://people.freedesktop.org/~agd5f/linux drm-next-4.18

for you to fetch changes up to 8344c53f57057b42a5da87e9557c40fcda18fb7a:

  drm/scheduler: remove unused parameter (2018-05-15 13:44:27 -0500)


Alex Deucher (29):
  drm/amdgpu/sdma4: use a helper for SDMA_OP_POLL_REGMEM
  drm/amdgpu: add documentation on hwmon interfaces exposed (v3)
  drm/amdgpu: add asic need_full_reset callback
  drm/amdgpu/si: implement asic need_full_reset callback
  drm/amdgpu/cik: implement asic need_full_reset callback
  drm/amdgpu/vi: implement asic need_full_reset callback
  drm/amdgpu/soc15: implement asic need_full_reset callback
  drm/amdgpu: use new asic need_full_reset callback
  drm/amdgpu/gfx9: cache DB_DEBUG2 and make it available to userspace
  drm/amdgpu/powerplay: fix smu7_get_memory_type for fiji
  drm/amdgpu/powerplay: rename smu7_upload_mc_firmware
  drm/amdgpu: add emit_reg_write_reg_wait ring callback
  drm/amdgpu/gfx9: add emit_reg_write_reg_wait ring callback (v2)
  drm/amdgpu/sdma4: add emit_reg_write_reg_wait ring callback (v2)
  drm/amdgpu/uvd7: add emit_reg_write_reg_wait ring callback
  drm/amdgpu/vce4: add emit_reg_write_reg_wait ring callback
  drm/amdgpu/vcn1: add emit_reg_write_reg_wait ring callback
  drm/amdgpu/gmc9: use amdgpu_ring_emit_reg_write_reg_wait in gpu tlb flush
  drm/amdgpu/gmc: steal the appropriate amount of vram for fw hand-over (v3)
  drm/amdgpu: print the vbios version in the debugfs firmware info
  drm/amdgpu/pm: document power_dpm_force_performance_level
  drm/amdgpu/pm: document power_dpm_state
  drm/amdgpu/pm: document pp_table
  drm/amdgpu/pm: document pp_dpm_sclk pp_dpm_mclk pp_dpm_pcie (v2)
  drm/amdgpu/pm: document pp_power_profile_mode
  drm/amdgpu/pm: document pp_od_clk_voltage
  drm/amdgpu/powerplay: actually return the power with the new query
  drm/amdgpu: Add VEGAM support to the legacy DCE 11 module
  drm/amdgpu/gmc9: remove unused register defs

Andrey Grodzovsky (2):
  drm/amdgpu: Free VGA stolen memory as soon as possible.
  drm/amdgpu: Switch to interruptable wait to recover from ring hang.

Anthony Koo (19):
  drm/amd/display: Couple bug fixes in stats module
  drm/amd/display: Rename encoder_info_packet to dc_info_packet
  drm/amd/display: Only program MSA_TIMING_PARAM if it changed
  drm/amd/display: Program v_total_min/max after v_total_cntl
  drm/amd/display: Add vmax/min_sel prints to dcn10_log_hw_state
  drm/amd/display: csc updates require FULL update
  drm/amd/display: Updated HDR Static Metadata to directly take info packet 
raw
  drm/amd/display: Get rid of unused input_tf
  drm/amd/display: Remove unused fields
  drm/amd/display: Do not use os types
  drm/amd/display: csc_transform to dc_csc_transform
  drm/amd/display: Refactor color module
  drm/amd/display: move color_transfer_func to color mod
  drm/amd/display: Fix structure initialization of hdmi_info_packet
  drm/amd/display: Have DC manage its own allocation of gamma
  drm/amd/display: Do not create memory allocation if stats not enabled
  drm/amd/display: fix issue related to infopacket was not transmitted
  drm/amd/display: add some DTN logs for input and output tf
  drm/amd/display: update dtn logging and goldens

Bhawanpreet Lakha (3):
  drm/amd/display: Add debug prints for bandwidth calculations
  drm/amd/display: Correct print types in DC_LOGS
  drm/amd/display: Add Dynamic debug prints

Charlene Liu (3):
  drm/amd/display: add delay between panel pwr off to on.
  drm/amd/display: HDMI has no sound after Panel power off/on
  drm/amd/display: add cursor TTU CRQ related

Christian König (11):
  drm/amdgpu: use ctx bytes_moved
  drm/amdgpu: fix and cleanup cpu visible VRAM handling
  drm/ttm: keep a reference to transfer pipelined BOs
  drm/amdgpu: revert "add new bo flag that indicates BOs don't need 
fallback (v2)"
  drm/amdgpu: revert "Don't 

Re: iommu/amd: flush IOTLB for specific domains only (v2)

2018-05-15 Thread Tom Lendacky
On 5/15/2018 9:47 AM, Joseph Salisbury wrote:
> On 05/15/2018 09:08 AM, Tom Lendacky wrote:
>> On 5/15/2018 7:34 AM, Nath, Arindam wrote:
>>>
 -Original Message-
 From: Joseph Salisbury [mailto:joseph.salisb...@canonical.com]
 Sent: Tuesday, May 15, 2018 5:40 PM
 To: Nath, Arindam 
 Cc: io...@lists.linux-foundation.org; Bridgman, John
 ; j...@8bytes.org; amd-
 g...@lists.freedesktop.org; dr...@endlessm.com; stein...@gmail.com;
 Suthikulpanit, Suravee ; Deucher,
 Alexander ; Kuehling, Felix
 ; li...@endlessm.com; mic...@daenzer.net;
 1747...@bugs.launchpad.net; Lendacky, Thomas
 
 Subject: Re: iommu/amd: flush IOTLB for specific domains only (v2)

 On 05/15/2018 04:03 AM, Nath, Arindam wrote:
> Adding Tom.
>
> Hi Joe,
>
> My original patch was never accepted. Tom and Joerg worked on another
 patch series which was supposed to fix the issue in question in addition 
 to do
 some code cleanups. I believe their patches are already in the mainline. 
 If I
 remember correctly, one of the patches disabled PCI ATS for the graphics
 card which was causing the issue.
> Do you still see the issue with latest mainline kernel?
>
> BR,
> Arindam
>
> -Original Message-
> From: Joseph Salisbury [mailto:joseph.salisb...@canonical.com]
> Sent: Tuesday, May 15, 2018 1:17 AM
> To: Nath, Arindam 
> Cc: io...@lists.linux-foundation.org; Bridgman, John
> ; j...@8bytes.org;
> amd-gfx@lists.freedesktop.org; dr...@endlessm.com;
 stein...@gmail.com;
> Suthikulpanit, Suravee ; Deucher,
> Alexander ; Kuehling, Felix
> ; li...@endlessm.com; mic...@daenzer.net;
> 1747...@bugs.launchpad.net
> Subject: iommu/amd: flush IOTLB for specific domains only (v2)
>
> Hello Arindam,
>
> There is a bug report[0] that you created a patch[1] for a while back.
 However, the patch never landed in mainline.  There is a bug reporter in
 Ubuntu[2] that is affected by this bug and is willing to test the patch.  I
 attempted to build a test kernel with the patch, but it does not apply to
 currently mainline cleanly.  Do you still think this patch may resolve this
 bug?  If so, is there a version of your patch available that will apply to 
 current
 mainline?
> Thanks,
>
> Joe
>
> [0] https://bugs.freedesktop.org/show_bug.cgi?id=101029
> [1] https://patchwork.freedesktop.org/patch/157327/
> [2] http://pad.lv/1747463
>
 Hi Arindam,

 Thanks for the feedback.  Yes, the latest mainline kernel was tested, and 
 it is
 reported the bug still happens in the Ubuntu kernel bug[0]. Is there any
 specific diagnostic info we can collect that might help?
>>> Joe, I believe all the information needed is already provided in [2]. Let 
>>> us wait for inputs from Tom and Joerg.
>>>
>>> I could take a look at the issue locally, but it will take me some really 
>>> long time since I am occupied with other assignments right now.
>> I don't see anything in the bug that indicates the latest mainline kernel
>> was tested.  The patches/fixes in question are part of the 4.13 kernel, I
>> only see references to 4.10 kernels so I wouldn't expect the issue to be
>> resolved unless the patches from 4.13 were backported to the Ubuntu 4.10
>> kernel.
>>
>> Thanks,
>> Tom
>>
>>> BR,
>>> Arindam
>>>
 Thanks,

 Joe

 [0] http://pad.lv/1747463
> Hi Tom,
> 
> The request to test mainline was in comment #30[0].  However, the bug
> reporter stated the bug still existed on IRC and not in the bug report. 
> I'll request he adds the test results to the bug.
> 

Ok, I was looking at the wrong bug.  For the original 4.13 kernel, I don't
see any attachments that have the AMD-Vi messages in question.  Were they
completion timeouts (like in the later mainline kernel test, which I'll
get to in a bit) or I/O page fault messages?  Without that information it
is hard to determine what the issue really is.

(Just as an FYI, if the IOMMU is disabled in BIOS, then iommu=soft is not
 necessary on the kernel command line).

For the upstream kernel test, since this is a Ryzen system, it's possible
that the BIOS does not have a requisite fix for SME and IOMMU (see [1]).
On the upstream kernel, if memory encryption is active by default without
this BIOS fix, then the result is AMD-Vi completion-wait timeout messages.
Try booting with mem_encrypt=off on the kernel command line or build a
kernel with CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT=n and see if that
allows the kernel to boot.

Thanks,
Tom

[1] 

[PATCH 56/57] drm/amdgpu: Add vega20 pci ids

2018-05-15 Thread Alex Deucher
From: Feifei Xu 

Signed-off-by: Feifei Xu 
Reviewed-by: Alex Deucher 
Reviewed-by: Hawking Zhang 
Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 7 +++
 1 file changed, 7 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index a7a81ddab721..07319227d357 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -564,6 +564,13 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x69A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12},
{0x1002, 0x69A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12},
{0x1002, 0x69AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12},
+   /* Vega 20 */
+   {0x1002, 0x66A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
+   {0x1002, 0x66A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
+   {0x1002, 0x66A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
+   {0x1002, 0x66A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
+   {0x1002, 0x66A7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
+   {0x1002, 0x66AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
/* Raven */
{0x1002, 0x15dd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RAVEN|AMD_IS_APU},
 
-- 
2.13.6

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 57/57] drm/amdgpu: flag Vega20 as experimental

2018-05-15 Thread Alex Deucher
Must set amdgpu.exp_hw_support=1 on the kernel command line in
grub to enable support.

Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 12 ++--
 1 file changed, 6 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 07319227d357..cdc5182806bd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -565,12 +565,12 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x69A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12},
{0x1002, 0x69AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12},
/* Vega 20 */
-   {0x1002, 0x66A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
-   {0x1002, 0x66A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
-   {0x1002, 0x66A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
-   {0x1002, 0x66A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
-   {0x1002, 0x66A7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
-   {0x1002, 0x66AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
+   {0x1002, 0x66A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 
CHIP_VEGA20|AMD_EXP_HW_SUPPORT},
+   {0x1002, 0x66A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 
CHIP_VEGA20|AMD_EXP_HW_SUPPORT},
+   {0x1002, 0x66A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 
CHIP_VEGA20|AMD_EXP_HW_SUPPORT},
+   {0x1002, 0x66A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 
CHIP_VEGA20|AMD_EXP_HW_SUPPORT},
+   {0x1002, 0x66A7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 
CHIP_VEGA20|AMD_EXP_HW_SUPPORT},
+   {0x1002, 0x66AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 
CHIP_VEGA20|AMD_EXP_HW_SUPPORT},
/* Raven */
{0x1002, 0x15dd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RAVEN|AMD_IS_APU},
 
-- 
2.13.6

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 55/57] drm/amdgpu: Switch to use df_v3_6_funcs for vega20 (v2)

2018-05-15 Thread Alex Deucher
From: Feifei Xu 

v2: fix whitespace (Alex)

Signed-off-by: Feifei Xu 
Reviewed-by: Hawking Zhang 
Reviewed-by: Huang Rui 
---
 drivers/gpu/drm/amd/amdgpu/soc15.c | 6 +-
 1 file changed, 5 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c 
b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 79354847f4c0..8ccbcf9885d1 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -51,6 +51,7 @@
 #include "gfxhub_v1_0.h"
 #include "mmhub_v1_0.h"
 #include "df_v1_7.h"
+#include "df_v3_6.h"
 #include "vega10_ih.h"
 #include "sdma_v4_0.h"
 #include "uvd_v7_0.h"
@@ -501,7 +502,10 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
else
adev->nbio_funcs = _v6_1_funcs;
 
-   adev->df_funcs = _v1_7_funcs;
+   if (adev->asic_type == CHIP_VEGA20)
+   adev->df_funcs = _v3_6_funcs;
+   else
+   adev->df_funcs = _v1_7_funcs;
adev->nbio_funcs->detect_hw_virt(adev);
 
if (amdgpu_sriov_vf(adev))
-- 
2.13.6

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 51/57] drm/amdgpu/vg20:Enable 2nd instance queue maping for uvd 7.2

2018-05-15 Thread Alex Deucher
From: James Zhu 

Enable 2nd instance uvd queue maping for uvd 7.2. For user, only one UVD
instance presents. there is two rings for uvd decode, and
4 rings for uvd encode.

Signed-off-by: James Zhu 
Reviewed-by: Alex Deucher 
Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c | 14 ++
 1 file changed, 10 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
index 2458d385e55a..8af16e81c7d4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
@@ -66,6 +66,8 @@ static int amdgpu_identity_map(struct amdgpu_device *adev,
   u32 ring,
   struct amdgpu_ring **out_ring)
 {
+   u32 instance;
+
switch (mapper->hw_ip) {
case AMDGPU_HW_IP_GFX:
*out_ring = >gfx.gfx_ring[ring];
@@ -77,13 +79,16 @@ static int amdgpu_identity_map(struct amdgpu_device *adev,
*out_ring = >sdma.instance[ring].ring;
break;
case AMDGPU_HW_IP_UVD:
-   *out_ring = >uvd.inst->ring;
+   instance = ring;
+   *out_ring = >uvd.inst[instance].ring;
break;
case AMDGPU_HW_IP_VCE:
*out_ring = >vce.ring[ring];
break;
case AMDGPU_HW_IP_UVD_ENC:
-   *out_ring = >uvd.inst->ring_enc[ring];
+   instance = ring / adev->uvd.num_enc_rings;
+   *out_ring =
+   
>uvd.inst[instance].ring_enc[ring%adev->uvd.num_enc_rings];
break;
case AMDGPU_HW_IP_VCN_DEC:
*out_ring = >vcn.ring_dec;
@@ -240,13 +245,14 @@ int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
ip_num_rings = adev->sdma.num_instances;
break;
case AMDGPU_HW_IP_UVD:
-   ip_num_rings = 1;
+   ip_num_rings = adev->uvd.num_uvd_inst;
break;
case AMDGPU_HW_IP_VCE:
ip_num_rings = adev->vce.num_rings;
break;
case AMDGPU_HW_IP_UVD_ENC:
-   ip_num_rings = adev->uvd.num_enc_rings;
+   ip_num_rings =
+   adev->uvd.num_enc_rings * adev->uvd.num_uvd_inst;
break;
case AMDGPU_HW_IP_VCN_DEC:
ip_num_rings = 1;
-- 
2.13.6

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 54/57] drm/amdgpu/df: implement df v3_6 callback functions (v2)

2018-05-15 Thread Alex Deucher
From: Feifei Xu 

New df helpers for 3.6.

v2: switch to using df 3.5 headers.

Signed-off-by: Feifei Xu 
Reviewed-by: Hawking Zhang 
Reviewed-by: Huang Rui 
---
 drivers/gpu/drm/amd/amdgpu/Makefile  |   3 +-
 drivers/gpu/drm/amd/amdgpu/df_v3_6.c | 116 +++
 drivers/gpu/drm/amd/amdgpu/df_v3_6.h |  40 
 3 files changed, 158 insertions(+), 1 deletion(-)
 create mode 100644 drivers/gpu/drm/amd/amdgpu/df_v3_6.c
 create mode 100644 drivers/gpu/drm/amd/amdgpu/df_v3_6.h

diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile 
b/drivers/gpu/drm/amd/amdgpu/Makefile
index 1dd740b76d41..4ed943df2461 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -67,7 +67,8 @@ amdgpu-y += \
 
 # add DF block
 amdgpu-y += \
-   df_v1_7.o
+   df_v1_7.o \
+   df_v3_6.o
 
 # add GMC block
 amdgpu-y += \
diff --git a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c 
b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
new file mode 100644
index ..60608b3df881
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
@@ -0,0 +1,116 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "df_v3_6.h"
+
+#include "df/df_3_6_default.h"
+#include "df/df_3_6_offset.h"
+#include "df/df_3_6_sh_mask.h"
+
+static u32 df_v3_6_channel_number[] = {1, 2, 0, 4, 0, 8, 0,
+  16, 32, 0, 0, 0, 2, 4, 8};
+
+static void df_v3_6_init(struct amdgpu_device *adev)
+{
+}
+
+static void df_v3_6_enable_broadcast_mode(struct amdgpu_device *adev,
+ bool enable)
+{
+   u32 tmp;
+
+   if (enable) {
+   tmp = RREG32_SOC15(DF, 0, mmFabricConfigAccessControl);
+   tmp &= ~FabricConfigAccessControl__CfgRegInstAccEn_MASK;
+   WREG32_SOC15(DF, 0, mmFabricConfigAccessControl, tmp);
+   } else
+   WREG32_SOC15(DF, 0, mmFabricConfigAccessControl,
+mmFabricConfigAccessControl_DEFAULT);
+}
+
+static u32 df_v3_6_get_fb_channel_number(struct amdgpu_device *adev)
+{
+   u32 tmp;
+
+   tmp = RREG32_SOC15(DF, 0, mmDF_CS_UMC_AON0_DramBaseAddress0);
+   tmp &= DF_CS_UMC_AON0_DramBaseAddress0__IntLvNumChan_MASK;
+   tmp >>= DF_CS_UMC_AON0_DramBaseAddress0__IntLvNumChan__SHIFT;
+
+   return tmp;
+}
+
+static u32 df_v3_6_get_hbm_channel_number(struct amdgpu_device *adev)
+{
+   int fb_channel_number;
+
+   fb_channel_number = adev->df_funcs->get_fb_channel_number(adev);
+   if (fb_channel_number > ARRAY_SIZE(df_v3_6_channel_number))
+   fb_channel_number = 0;
+
+   return df_v3_6_channel_number[fb_channel_number];
+}
+
+static void df_v3_6_update_medium_grain_clock_gating(struct amdgpu_device 
*adev,
+bool enable)
+{
+   u32 tmp;
+
+   /* Put DF on broadcast mode */
+   adev->df_funcs->enable_broadcast_mode(adev, true);
+
+   if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DF_MGCG)) {
+   tmp = RREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater);
+   tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK;
+   tmp |= DF_V3_6_MGCG_ENABLE_15_CYCLE_DELAY;
+   WREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater, tmp);
+   } else {
+   tmp = RREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater);
+   tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK;
+   tmp |= DF_V3_6_MGCG_DISABLE;
+   WREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater, tmp);
+   }
+
+   /* Exit broadcast mode */
+   adev->df_funcs->enable_broadcast_mode(adev, false);
+}
+
+static void df_v3_6_get_clockgating_state(struct amdgpu_device *adev,
+   

[PATCH 52/57] drm/amdgpu/vg20:Enable UVD/VCE for Vega20

2018-05-15 Thread Alex Deucher
From: James Zhu 

Vega20 ucode load type is set to AMDGPU_FW_LOAD_DIRECT for default.
So UVD/VCE needn't PSP IP block up. UVD/VCE for Vega20 can be enabled
at this moment.

Signed-off-by: James Zhu 
Reviewed-by: Leo Liu 
Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/amdgpu/soc15.c | 6 ++
 1 file changed, 2 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c 
b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 8d0d0540ebfd..79354847f4c0 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -529,10 +529,8 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
 #endif
amdgpu_device_ip_block_add(adev, _v9_0_ip_block);
amdgpu_device_ip_block_add(adev, _v4_0_ip_block);
-   if (adev->asic_type != CHIP_VEGA20) {
-   amdgpu_device_ip_block_add(adev, _v7_0_ip_block);
-   amdgpu_device_ip_block_add(adev, _v4_0_ip_block);
-   }
+   amdgpu_device_ip_block_add(adev, _v7_0_ip_block);
+   amdgpu_device_ip_block_add(adev, _v4_0_ip_block);
break;
case CHIP_RAVEN:
amdgpu_device_ip_block_add(adev, _common_ip_block);
-- 
2.13.6

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 53/57] drm/amdgpu: add df 3.6 headers

2018-05-15 Thread Alex Deucher
Needed for vega20.

Signed-off-by: Alex Deucher 
---
 .../drm/amd/include/asic_reg/df/df_3_6_default.h   | 26 
 .../drm/amd/include/asic_reg/df/df_3_6_offset.h| 33 +++
 .../drm/amd/include/asic_reg/df/df_3_6_sh_mask.h   | 48 ++
 3 files changed, 107 insertions(+)
 create mode 100644 drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_default.h
 create mode 100644 drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h
 create mode 100644 drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h

diff --git a/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_default.h 
b/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_default.h
new file mode 100644
index ..e58c207ac980
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_default.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2018  Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 
LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _df_3_6_DEFAULT_HEADER
+#define _df_3_6_DEFAULT_HEADER
+
+#define mmFabricConfigAccessControl_DEFAULT
0x
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h 
b/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h
new file mode 100644
index ..a9575db8d7aa
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2018  Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 
LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _df_3_6_OFFSET_HEADER
+#define _df_3_6_OFFSET_HEADER
+
+#define mmFabricConfigAccessControl
0x0410
+#define mmFabricConfigAccessControl_BASE_IDX   
0
+
+#define mmDF_PIE_AON0_DfGlobalClkGater 
0x00fc
+#define mmDF_PIE_AON0_DfGlobalClkGater_BASE_IDX
0
+
+#define mmDF_CS_UMC_AON0_DramBaseAddress0  
0x0044
+#define mmDF_CS_UMC_AON0_DramBaseAddress0_BASE_IDX 
0
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h 
b/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h
new file mode 100644
index ..88f7c69df6b9
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2018  Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell 

[PATCH 50/57] drm/amdgpu/vg20:Enable the 2nd instance IRQ for uvd 7.2

2018-05-15 Thread Alex Deucher
From: James Zhu 

For Vega20, the 2nd instance uvd IRQ using different client id.
Enable the 2nd instance IRQ for uvd 7.2

Signed-off-by: James Zhu 
Reviewed-by: Alex Deucher 
Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c | 12 ++--
 1 file changed, 10 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 
b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index 6e371c0f2428..61650afd0570 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -49,6 +49,11 @@ static int uvd_v7_0_start(struct amdgpu_device *adev);
 static void uvd_v7_0_stop(struct amdgpu_device *adev);
 static int uvd_v7_0_sriov_start(struct amdgpu_device *adev);
 
+static int amdgpu_ih_clientid_uvds[] = {
+   SOC15_IH_CLIENTID_UVD,
+   SOC15_IH_CLIENTID_UVD1
+};
+
 /**
  * uvd_v7_0_ring_get_rptr - get read pointer
  *
@@ -397,13 +402,13 @@ static int uvd_v7_0_sw_init(void *handle)
 
for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
/* UVD TRAP */
-   r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UVD, 124, 
>uvd.inst[j].irq);
+   r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], 124, 
>uvd.inst[j].irq);
if (r)
return r;
 
/* UVD ENC TRAP */
for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
-   r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UVD, i + 
119, >uvd.inst[j].irq);
+   r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], 
i + 119, >uvd.inst[j].irq);
if (r)
return r;
}
@@ -1480,6 +1485,9 @@ static int uvd_v7_0_process_interrupt(struct 
amdgpu_device *adev,
case SOC15_IH_CLIENTID_UVD:
ip_instance = 0;
break;
+   case SOC15_IH_CLIENTID_UVD1:
+   ip_instance = 1;
+   break;
default:
DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
return 0;
-- 
2.13.6

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 45/57] drm/amdgpu/vg20:Restruct uvd.inst to support multiple instances

2018-05-15 Thread Alex Deucher
From: James Zhu 

Vega20 has dual-UVD. Need add multiple instances support for uvd.
Restruct uvd.inst, using uvd.inst[0] to replace uvd.inst->.
Repurpose amdgpu_ring::me for instance index, and initialize to 0.
There are no any logical changes here.

Signed-off-by: James Zhu 
Reviewed-by: Alex Deucher 
Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c |6 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c   |   12 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c  |1 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c   |  229 +++
 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c | 1002 +++--
 5 files changed, 660 insertions(+), 590 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 1070f4042cbb..39ec6b8890a1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -376,14 +376,14 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring 
*ring,
struct amdgpu_device *adev = ring->adev;
uint64_t index;
 
-   if (ring != >uvd.inst->ring) {
+   if (ring != >uvd.inst[ring->me].ring) {
ring->fence_drv.cpu_addr = >wb.wb[ring->fence_offs];
ring->fence_drv.gpu_addr = adev->wb.gpu_addr + 
(ring->fence_offs * 4);
} else {
/* put fence directly behind firmware */
index = ALIGN(adev->uvd.fw->size, 8);
-   ring->fence_drv.cpu_addr = adev->uvd.inst->cpu_addr + index;
-   ring->fence_drv.gpu_addr = adev->uvd.inst->gpu_addr + index;
+   ring->fence_drv.cpu_addr = adev->uvd.inst[ring->me].cpu_addr + 
index;
+   ring->fence_drv.gpu_addr = adev->uvd.inst[ring->me].gpu_addr + 
index;
}
amdgpu_fence_write(ring, atomic_read(>fence_drv.last_seq));
amdgpu_irq_get(adev, irq_src, irq_type);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 1a2a0e93b290..ca215490a881 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -286,7 +286,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void 
*data, struct drm_file
struct drm_crtc *crtc;
uint32_t ui32 = 0;
uint64_t ui64 = 0;
-   int i, found;
+   int i, j, found;
int ui32_size = sizeof(ui32);
 
if (!info->return_size || !info->return_pointer)
@@ -348,7 +348,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void 
*data, struct drm_file
break;
case AMDGPU_HW_IP_UVD:
type = AMD_IP_BLOCK_TYPE_UVD;
-   ring_mask = adev->uvd.inst->ring.ready ? 1 : 0;
+   for (i = 0; i < adev->uvd.num_uvd_inst; i++)
+   ring_mask |= ((adev->uvd.inst[i].ring.ready ? 1 
: 0) << i);
ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
ib_size_alignment = 16;
break;
@@ -361,8 +362,11 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void 
*data, struct drm_file
break;
case AMDGPU_HW_IP_UVD_ENC:
type = AMD_IP_BLOCK_TYPE_UVD;
-   for (i = 0; i < adev->uvd.num_enc_rings; i++)
-   ring_mask |= 
((adev->uvd.inst->ring_enc[i].ready ? 1 : 0) << i);
+   for (i = 0; i < adev->uvd.num_uvd_inst; i++)
+   for (j = 0; j < adev->uvd.num_enc_rings; j++)
+   ring_mask |=
+   ((adev->uvd.inst[i].ring_enc[j].ready ? 
1 : 0) <<
+   (j + i * adev->uvd.num_enc_rings));
ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
ib_size_alignment = 1;
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 49cad08b5c16..c6850b629d0e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -362,6 +362,7 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
 
dma_fence_put(ring->vmid_wait);
ring->vmid_wait = NULL;
+   ring->me = 0;
 
ring->adev->rings[ring->idx] = NULL;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 18c1096686f8..083e3bdb54e1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -127,7 +127,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
const char *fw_name;
const struct common_firmware_header *hdr;
unsigned version_major, version_minor, family_id;
-   int i, r;
+   int i, j, r;

[PATCH 49/57] drm/amdgpu/vg20:Add IH client ID for the 2nd UVD

2018-05-15 Thread Alex Deucher
From: James Zhu 

For Vega20, there are two UVD hardware. Need add
the 2nd IH client ID for the 2nd UVD Hardware.

Signed-off-by: James Zhu 
Reviewed-by: Alex Deucher 
Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/include/soc15_ih_clientid.h | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/amd/include/soc15_ih_clientid.h 
b/drivers/gpu/drm/amd/include/soc15_ih_clientid.h
index a12d4f27cfa4..12e196c15bbe 100644
--- a/drivers/gpu/drm/amd/include/soc15_ih_clientid.h
+++ b/drivers/gpu/drm/amd/include/soc15_ih_clientid.h
@@ -43,6 +43,7 @@ enum soc15_ih_clientid {
SOC15_IH_CLIENTID_SE2SH = 0x0c,
SOC15_IH_CLIENTID_SE3SH = 0x0d,
SOC15_IH_CLIENTID_SYSHUB= 0x0e,
+   SOC15_IH_CLIENTID_UVD1  = 0x0e,
SOC15_IH_CLIENTID_THM   = 0x0f,
SOC15_IH_CLIENTID_UVD   = 0x10,
SOC15_IH_CLIENTID_VCE0  = 0x11,
-- 
2.13.6

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 48/57] drm/amdgpu/vg20:Enable the 2nd instance for uvd

2018-05-15 Thread Alex Deucher
From: James Zhu 

For Vega20, set num of uvd instance to 2, to enble 2nd instance.
The IB test build-in registers need update for vega20 2nd instance.

Signed-off-by: James Zhu 
Reviewed-by: Alex Deucher 
Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 30 --
 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c   |  7 ++-
 2 files changed, 22 insertions(+), 15 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 575e8d111211..f8abbfb2df79 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -72,11 +72,12 @@
 #define FIRMWARE_VEGA12"amdgpu/vega12_uvd.bin"
 #define FIRMWARE_VEGA20"amdgpu/vega20_uvd.bin"
 
-#define mmUVD_GPCOM_VCPU_DATA0_VEGA10 (0x03c4 + 0x7e00)
-#define mmUVD_GPCOM_VCPU_DATA1_VEGA10 (0x03c5 + 0x7e00)
-#define mmUVD_GPCOM_VCPU_CMD_VEGA10 (0x03c3 + 0x7e00)
-#define mmUVD_NO_OP_VEGA10 (0x03ff + 0x7e00)
-#define mmUVD_ENGINE_CNTL_VEGA10 (0x03c6 + 0x7e00)
+/* These are common relative offsets for all asics, from uvd_7_0_offset.h,  */
+#define UVD_GPCOM_VCPU_CMD 0x03c3
+#define UVD_GPCOM_VCPU_DATA0   0x03c4
+#define UVD_GPCOM_VCPU_DATA1   0x03c5
+#define UVD_NO_OP  0x03ff
+#define UVD_BASE_SI0x3800
 
 /**
  * amdgpu_uvd_cs_ctx - Command submission parser context
@@ -990,6 +991,8 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, 
struct amdgpu_bo *bo,
uint64_t addr;
long r;
int i;
+   unsigned offset_idx = 0;
+   unsigned offset[3] = { UVD_BASE_SI, 0, 0 };
 
amdgpu_bo_kunmap(bo);
amdgpu_bo_unpin(bo);
@@ -1009,17 +1012,16 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring 
*ring, struct amdgpu_bo *bo,
goto err;
 
if (adev->asic_type >= CHIP_VEGA10) {
-   data[0] = PACKET0(mmUVD_GPCOM_VCPU_DATA0_VEGA10, 0);
-   data[1] = PACKET0(mmUVD_GPCOM_VCPU_DATA1_VEGA10, 0);
-   data[2] = PACKET0(mmUVD_GPCOM_VCPU_CMD_VEGA10, 0);
-   data[3] = PACKET0(mmUVD_NO_OP_VEGA10, 0);
-   } else {
-   data[0] = PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0);
-   data[1] = PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0);
-   data[2] = PACKET0(mmUVD_GPCOM_VCPU_CMD, 0);
-   data[3] = PACKET0(mmUVD_NO_OP, 0);
+   offset_idx = 1 + ring->me;
+   offset[1] = adev->reg_offset[UVD_HWIP][0][1];
+   offset[2] = adev->reg_offset[UVD_HWIP][1][1];
}
 
+   data[0] = PACKET0(offset[offset_idx] + UVD_GPCOM_VCPU_DATA0, 0);
+   data[1] = PACKET0(offset[offset_idx] + UVD_GPCOM_VCPU_DATA1, 0);
+   data[2] = PACKET0(offset[offset_idx] + UVD_GPCOM_VCPU_CMD, 0);
+   data[3] = PACKET0(offset[offset_idx] + UVD_NO_OP, 0);
+
ib = >ibs[0];
addr = amdgpu_bo_gpu_offset(bo);
ib->ptr[0] = data[0];
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 
b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index 8ec98b0bb42d..6e371c0f2428 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -40,6 +40,8 @@
 #include "mmhub/mmhub_1_0_offset.h"
 #include "mmhub/mmhub_1_0_sh_mask.h"
 
+#define UVD7_MAX_HW_INSTANCES_VEGA20   2
+
 static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev);
 static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev);
 static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev);
@@ -370,7 +372,10 @@ static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring 
*ring, long timeout)
 static int uvd_v7_0_early_init(void *handle)
 {
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-   adev->uvd.num_uvd_inst = 1;
+   if (adev->asic_type == CHIP_VEGA20)
+   adev->uvd.num_uvd_inst = UVD7_MAX_HW_INSTANCES_VEGA20;
+   else
+   adev->uvd.num_uvd_inst = 1;
 
if (amdgpu_sriov_vf(adev))
adev->uvd.num_enc_rings = 1;
-- 
2.13.6

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 44/57] drm/amdgpu/vg20:Restruct uvd to support multiple uvds

2018-05-15 Thread Alex Deucher
From: James Zhu 

Vega20 has dual-UVD. Need Restruct amdgpu_device::uvd to support
multiple uvds. There are no any logical changes here.

Signed-off-by: James Zhu 
Reviewed-by: Alex Deucher 
Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c |   6 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c   |   4 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c |   4 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c   | 102 +--
 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h   |  19 ++--
 drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c |  27 +++---
 drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c |  25 ++---
 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c |  77 +++
 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c | 135 +-
 9 files changed, 205 insertions(+), 194 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index d09fcab2398f..1070f4042cbb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -376,14 +376,14 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring 
*ring,
struct amdgpu_device *adev = ring->adev;
uint64_t index;
 
-   if (ring != >uvd.ring) {
+   if (ring != >uvd.inst->ring) {
ring->fence_drv.cpu_addr = >wb.wb[ring->fence_offs];
ring->fence_drv.gpu_addr = adev->wb.gpu_addr + 
(ring->fence_offs * 4);
} else {
/* put fence directly behind firmware */
index = ALIGN(adev->uvd.fw->size, 8);
-   ring->fence_drv.cpu_addr = adev->uvd.cpu_addr + index;
-   ring->fence_drv.gpu_addr = adev->uvd.gpu_addr + index;
+   ring->fence_drv.cpu_addr = adev->uvd.inst->cpu_addr + index;
+   ring->fence_drv.gpu_addr = adev->uvd.inst->gpu_addr + index;
}
amdgpu_fence_write(ring, atomic_read(>fence_drv.last_seq));
amdgpu_irq_get(adev, irq_src, irq_type);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 6d55caed4b98..1a2a0e93b290 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -348,7 +348,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void 
*data, struct drm_file
break;
case AMDGPU_HW_IP_UVD:
type = AMD_IP_BLOCK_TYPE_UVD;
-   ring_mask = adev->uvd.ring.ready ? 1 : 0;
+   ring_mask = adev->uvd.inst->ring.ready ? 1 : 0;
ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
ib_size_alignment = 16;
break;
@@ -362,7 +362,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void 
*data, struct drm_file
case AMDGPU_HW_IP_UVD_ENC:
type = AMD_IP_BLOCK_TYPE_UVD;
for (i = 0; i < adev->uvd.num_enc_rings; i++)
-   ring_mask |= ((adev->uvd.ring_enc[i].ready ? 1 
: 0) << i);
+   ring_mask |= 
((adev->uvd.inst->ring_enc[i].ready ? 1 : 0) << i);
ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
ib_size_alignment = 1;
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
index 262c1267249e..2458d385e55a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
@@ -77,13 +77,13 @@ static int amdgpu_identity_map(struct amdgpu_device *adev,
*out_ring = >sdma.instance[ring].ring;
break;
case AMDGPU_HW_IP_UVD:
-   *out_ring = >uvd.ring;
+   *out_ring = >uvd.inst->ring;
break;
case AMDGPU_HW_IP_VCE:
*out_ring = >vce.ring[ring];
break;
case AMDGPU_HW_IP_UVD_ENC:
-   *out_ring = >uvd.ring_enc[ring];
+   *out_ring = >uvd.inst->ring_enc[ring];
break;
case AMDGPU_HW_IP_VCN_DEC:
*out_ring = >vcn.ring_dec;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 869c909a96f9..18c1096686f8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -129,7 +129,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
unsigned version_major, version_minor, family_id;
int i, r;
 
-   INIT_DELAYED_WORK(>uvd.idle_work, amdgpu_uvd_idle_work_handler);
+   INIT_DELAYED_WORK(>uvd.inst->idle_work, 
amdgpu_uvd_idle_work_handler);
 
switch (adev->asic_type) {
 #ifdef CONFIG_DRM_AMDGPU_CIK
@@ -237,16 +237,16 @@ int amdgpu_uvd_sw_init(struct 

[PATCH 46/57] drm/amdgpu/vg20:Restruct uvd.idle_work to support multiple instance (v2)

2018-05-15 Thread Alex Deucher
From: James Zhu 

Vega20 dual-UVD Hardware need two idle_works, restruct to support
multiple instance.

v2: squash in indentation fix

Signed-off-by: James Zhu 
Reviewed-by: Alex Deucher 
Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 17 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h |  7 ++-
 2 files changed, 15 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 083e3bdb54e1..575e8d111211 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -129,8 +129,6 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
unsigned version_major, version_minor, family_id;
int i, j, r;
 
-   INIT_DELAYED_WORK(>uvd.inst->idle_work, 
amdgpu_uvd_idle_work_handler);
-
switch (adev->asic_type) {
 #ifdef CONFIG_DRM_AMDGPU_CIK
case CHIP_BONAIRE:
@@ -237,6 +235,8 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
bo_size += 
AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
 
for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
+   adev->uvd.inst[j].delayed_work.ip_instance = j;
+   INIT_DELAYED_WORK(>uvd.inst[j].delayed_work.idle_work, 
amdgpu_uvd_idle_work_handler);
 
r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM, 
>uvd.inst[j].vcpu_bo,
@@ -317,7 +317,7 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
if (adev->uvd.inst[j].vcpu_bo == NULL)
continue;
 
-   cancel_delayed_work_sync(>uvd.inst[j].idle_work);
+   
cancel_delayed_work_sync(>uvd.inst[j].delayed_work.idle_work);
 
/* only valid for physical mode */
if (adev->asic_type < CHIP_POLARIS10) {
@@ -1142,9 +1142,10 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, 
uint32_t handle,
 
 static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
 {
+   struct amdgpu_delayed_work *my_work = (struct amdgpu_delayed_work 
*)work;
struct amdgpu_device *adev =
-   container_of(work, struct amdgpu_device, 
uvd.inst->idle_work.work);
-   unsigned fences = amdgpu_fence_count_emitted(>uvd.inst->ring);
+   container_of(work, struct amdgpu_device, 
uvd.inst[my_work->ip_instance].delayed_work.idle_work.work);
+   unsigned fences = 
amdgpu_fence_count_emitted(>uvd.inst[my_work->ip_instance].ring);
 
if (fences == 0) {
if (adev->pm.dpm_enabled) {
@@ -1158,7 +1159,7 @@ static void amdgpu_uvd_idle_work_handler(struct 
work_struct *work)
   
AMD_CG_STATE_GATE);
}
} else {
-   schedule_delayed_work(>uvd.inst->idle_work, 
UVD_IDLE_TIMEOUT);
+   
schedule_delayed_work(>uvd.inst[my_work->ip_instance].delayed_work.idle_work,
 UVD_IDLE_TIMEOUT);
}
 }
 
@@ -1170,7 +1171,7 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
if (amdgpu_sriov_vf(adev))
return;
 
-   set_clocks = !cancel_delayed_work_sync(>uvd.inst->idle_work);
+   set_clocks = 
!cancel_delayed_work_sync(>uvd.inst[ring->me].delayed_work.idle_work);
if (set_clocks) {
if (adev->pm.dpm_enabled) {
amdgpu_dpm_enable_uvd(adev, true);
@@ -1187,7 +1188,7 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
 void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring)
 {
if (!amdgpu_sriov_vf(ring->adev))
-   schedule_delayed_work(>adev->uvd.inst->idle_work, 
UVD_IDLE_TIMEOUT);
+   
schedule_delayed_work(>adev->uvd.inst[ring->me].delayed_work.idle_work, 
UVD_IDLE_TIMEOUT);
 }
 
 /**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
index b1579fba134c..7801eb8d4199 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
@@ -37,6 +37,11 @@
(AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(((const struct 
common_firmware_header *)(adev)->uvd.fw->data)->ucode_size_bytes) + \
   8) - AMDGPU_UVD_FIRMWARE_OFFSET)
 
+struct amdgpu_delayed_work{
+   struct delayed_work idle_work;
+   unsigned ip_instance;
+};
+
 struct amdgpu_uvd_inst {
struct amdgpu_bo*vcpu_bo;
void*cpu_addr;
@@ -44,12 +49,12 @@ struct amdgpu_uvd_inst {
void*saved_bo;
atomic_thandles[AMDGPU_MAX_UVD_HANDLES];
struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES];
-   struct delayed_work idle_work;
struct amdgpu_ring  ring;
struct amdgpu_ring  ring_enc[AMDGPU_MAX_UVD_ENC_RINGS];
  

[PATCH 47/57] drm/amdgpu/vg20:increase 3 rings for AMDGPU_MAX_RINGS

2018-05-15 Thread Alex Deucher
From: James Zhu 

For Vega20, there are two UVD Hardware. One more UVD hardware
adds one decode ring and two encode rings. So AMDGPU_MAX_RINGS
need increase by 3.

Signed-off-by: James Zhu 
Reviewed-by: Alex Deucher 
Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index 4f8dac2d36a5..1513124c5659 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -29,7 +29,7 @@
 #include 
 
 /* max number of rings */
-#define AMDGPU_MAX_RINGS   18
+#define AMDGPU_MAX_RINGS   21
 #define AMDGPU_MAX_GFX_RINGS   1
 #define AMDGPU_MAX_COMPUTE_RINGS   8
 #define AMDGPU_MAX_VCE_RINGS   3
-- 
2.13.6

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 43/57] drm/amdgpu: Disable ip modules that are not ready yet

2018-05-15 Thread Alex Deucher
From: Feifei Xu 

Please enable above ips on soc15.c when they're available.

Signed-off-by: Feifei Xu 
Reviewed-by: Hawking Zhang 
Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/amdgpu/soc15.c | 14 +-
 1 file changed, 9 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c 
b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 91d87f83a830..8d0d0540ebfd 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -514,9 +514,11 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, _common_ip_block);
amdgpu_device_ip_block_add(adev, _v9_0_ip_block);
amdgpu_device_ip_block_add(adev, _ih_ip_block);
-   amdgpu_device_ip_block_add(adev, _v3_1_ip_block);
-   if (!amdgpu_sriov_vf(adev))
-   amdgpu_device_ip_block_add(adev, _smu_ip_block);
+   if (adev->asic_type != CHIP_VEGA20) {
+   amdgpu_device_ip_block_add(adev, _v3_1_ip_block);
+   if (!amdgpu_sriov_vf(adev))
+   amdgpu_device_ip_block_add(adev, 
_smu_ip_block);
+   }
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, _virtual_ip_block);
 #if defined(CONFIG_DRM_AMD_DC)
@@ -527,8 +529,10 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
 #endif
amdgpu_device_ip_block_add(adev, _v9_0_ip_block);
amdgpu_device_ip_block_add(adev, _v4_0_ip_block);
-   amdgpu_device_ip_block_add(adev, _v7_0_ip_block);
-   amdgpu_device_ip_block_add(adev, _v4_0_ip_block);
+   if (adev->asic_type != CHIP_VEGA20) {
+   amdgpu_device_ip_block_add(adev, _v7_0_ip_block);
+   amdgpu_device_ip_block_add(adev, _v4_0_ip_block);
+   }
break;
case CHIP_RAVEN:
amdgpu_device_ip_block_add(adev, _common_ip_block);
-- 
2.13.6

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 40/57] drm/amd/powerplay: update vega20 cg flags

2018-05-15 Thread Alex Deucher
From: Evan Quan 

Signed-off-by: Evan Quan 
Reviewed-by: Huang Rui 
Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/amdgpu/soc15.c | 19 ++-
 1 file changed, 18 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c 
b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 4e065c68b86c..91d87f83a830 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -665,7 +665,24 @@ static int soc15_common_early_init(void *handle)
adev->external_rev_id = adev->rev_id + 0x14;
break;
case CHIP_VEGA20:
-   adev->cg_flags = 0;
+   adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
+   AMD_CG_SUPPORT_GFX_MGLS |
+   AMD_CG_SUPPORT_GFX_CGCG |
+   AMD_CG_SUPPORT_GFX_CGLS |
+   AMD_CG_SUPPORT_GFX_3D_CGCG |
+   AMD_CG_SUPPORT_GFX_3D_CGLS |
+   AMD_CG_SUPPORT_GFX_CP_LS |
+   AMD_CG_SUPPORT_MC_LS |
+   AMD_CG_SUPPORT_MC_MGCG |
+   AMD_CG_SUPPORT_SDMA_MGCG |
+   AMD_CG_SUPPORT_SDMA_LS |
+   AMD_CG_SUPPORT_BIF_MGCG |
+   AMD_CG_SUPPORT_BIF_LS |
+   AMD_CG_SUPPORT_HDP_MGCG |
+   AMD_CG_SUPPORT_BIF_LS |
+   AMD_CG_SUPPORT_ROM_MGCG |
+   AMD_CG_SUPPORT_VCE_MGCG |
+   AMD_CG_SUPPORT_UVD_MGCG;
adev->pg_flags = 0;
adev->external_rev_id = adev->rev_id + 0x28;
break;
-- 
2.13.6

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 41/57] drm/include: Fix MP1_BASE address for vega20

2018-05-15 Thread Alex Deucher
From: Feifei Xu 

Signed-off-by: Feifei Xu 
Reviewed-by: Evan Quan 
Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/include/vega20_ip_offset.h | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/include/vega20_ip_offset.h 
b/drivers/gpu/drm/amd/include/vega20_ip_offset.h
index 2da2d9790bac..97db93ceba4b 100644
--- a/drivers/gpu/drm/amd/include/vega20_ip_offset.h
+++ b/drivers/gpu/drm/amd/include/vega20_ip_offset.h
@@ -90,7 +90,7 @@ static const struct IP_BASE MP0_BASE={ { { { 
0x00016000, 0, 0, 0, 0,
 { { 0, 0, 0, 0, 0, 0 } },
 { { 0, 0, 0, 0, 0, 0 } },
 { { 0, 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE MP1_BASE={ { { { 0x00016200, 0, 0, 0, 
0, 0 } },
+static const struct IP_BASE MP1_BASE={ { { { 0x00016000, 0, 0, 0, 
0, 0 } },
 { { 0, 0, 0, 0, 0, 0 } },
 { { 0, 0, 0, 0, 0, 0 } },
 { { 0, 0, 0, 0, 0, 0 } },
@@ -542,7 +542,7 @@ static const struct IP_BASE RSMU_BASE={ { { { 
0x00012000, 0, 0, 0, 0
 #define MP0_BASE__INST5_SEG4   0
 #define MP0_BASE__INST5_SEG5   0
 
-#define MP1_BASE__INST0_SEG0   0x00016200
+#define MP1_BASE__INST0_SEG0   0x00016000
 #define MP1_BASE__INST0_SEG1   0
 #define MP1_BASE__INST0_SEG2   0
 #define MP1_BASE__INST0_SEG3   0
-- 
2.13.6

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 37/57] drm/amdgpu/vg20: fallback to vbios table if gpu info fw is not available (v2)

2018-05-15 Thread Alex Deucher
First try and fetch the gpu info firmware and then fall back to
the vbios table if the gpu info firmware is not available.

v2: warning fix (Alex)

Reviewed-by: Hawking Zhang 
Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c |  3 +++
 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c  | 16 ++--
 2 files changed, 17 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 5aa3b1d69cfe..a8a5d6e0eccf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1467,6 +1467,9 @@ static int amdgpu_device_parse_gpu_info_fw(struct 
amdgpu_device *adev)
goto out;
}
 out:
+   /* fall back to vbios tables for vega20 */
+   if (adev->asic_type == CHIP_VEGA20)
+   return 0;
return err;
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 
b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 9d55f6e24bc5..c92b95a7d9a0 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -27,6 +27,7 @@
 #include "amdgpu_gfx.h"
 #include "soc15.h"
 #include "soc15d.h"
+#include "amdgpu_atomfirmware.h"
 
 #include "gc/gc_9_0_offset.h"
 #include "gc/gc_9_0_sh_mask.h"
@@ -1113,9 +1114,10 @@ static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs 
= {
.select_me_pipe_q = _v9_0_select_me_pipe_q
 };
 
-static void gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
+static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
 {
u32 gb_addr_config;
+   int err;
 
adev->gfx.funcs = _v9_0_gfx_funcs;
 
@@ -1147,6 +1149,12 @@ static void gfx_v9_0_gpu_early_init(struct amdgpu_device 
*adev)
gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
gb_addr_config &= ~0xf3e777ff;
gb_addr_config |= 0x22014042;
+   /* check vbios table if gpu info is not available */
+   if (!adev->gfx.config.max_shader_engines) {
+   err = amdgpu_atomfirmware_get_gfx_info(adev);
+   if (err)
+   return err;
+   }
break;
case CHIP_RAVEN:
adev->gfx.config.max_hw_contexts = 8;
@@ -1197,6 +1205,8 @@ static void gfx_v9_0_gpu_early_init(struct amdgpu_device 
*adev)
adev->gfx.config.gb_addr_config,
GB_ADDR_CONFIG,
PIPE_INTERLEAVE_SIZE));
+
+   return 0;
 }
 
 static int gfx_v9_0_ngg_create_buf(struct amdgpu_device *adev,
@@ -1558,7 +1568,9 @@ static int gfx_v9_0_sw_init(void *handle)
 
adev->gfx.ce_ram_size = 0x8000;
 
-   gfx_v9_0_gpu_early_init(adev);
+   r = gfx_v9_0_gpu_early_init(adev);
+   if (r)
+   return r;
 
r = gfx_v9_0_ngg_init(adev);
if (r)
-- 
2.13.6

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 42/57] drm/amd/include/vg20: adjust VCE_BASE to reuse vce 4.0 header files

2018-05-15 Thread Alex Deucher
From: James Zhu 

Vega20 uses vce 4.1 engine, all the registers have the
same absolute offset with vce 4.0. By adjusting vega20
VCE_BASE, vce 4.1 can reuse vce 4.0 header files.

Signed-off-by: James Zhu 
Reviewed-by: Alex Deucher 
Reviewed-by: Huang Rui 
Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/include/vega20_ip_offset.h | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/include/vega20_ip_offset.h 
b/drivers/gpu/drm/amd/include/vega20_ip_offset.h
index 97db93ceba4b..2a2a9cc8bedb 100644
--- a/drivers/gpu/drm/amd/include/vega20_ip_offset.h
+++ b/drivers/gpu/drm/amd/include/vega20_ip_offset.h
@@ -144,7 +144,8 @@ static const struct IP_BASE UVD_BASE={ { { { 
0x7800, 0x7E00,
 { { 0, 0, 0, 0, 0, 0 } },
 { { 0, 0, 0, 0, 0, 0 } },
 { { 0, 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE VCE_BASE={ { { { 0x8800, 0, 0, 0, 
0, 0 } },
+/* Adjust VCE_BASE to make vce_4_1 use vce_4_0 offset header files*/
+static const struct IP_BASE VCE_BASE={ { { { 0x7E00/* 
0x8800 */, 0, 0, 0, 0, 0 } },
 { { 0, 0, 0, 0, 0, 0 } },
 { { 0, 0, 0, 0, 0, 0 } },
 { { 0, 0, 0, 0, 0, 0 } },
-- 
2.13.6

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 32/57] drm/amd/display: Remove COMBO_DISPLAY_PLL0 from Vega20

2018-05-15 Thread Alex Deucher
From: Feifei Xu 

Signed-off-by: Jerry (Fangzhi) Zuo 
Reviewed-by: Hersen Wu 
Reviewed-by: Alex Deucher 
Signed-off-by: Alex Deucher 
Signed-off-by: Feifei Xu 
---
 drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c   | 11 ++-
 drivers/gpu/drm/amd/display/include/dal_asic_id.h |  6 ++
 2 files changed, 16 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c 
b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
index 78e6beb6cf26..aa4cf3095235 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
@@ -35,7 +35,7 @@
 #endif
 #include "core_types.h"
 #include "dc_types.h"
-
+#include "dal_asic_id.h"
 
 #define TO_DCE_CLOCKS(clocks)\
container_of(clocks, struct dce_disp_clk, base)
@@ -413,9 +413,18 @@ static int dce112_set_clock(
/*VBIOS will determine DPREFCLK frequency, so we don't set it*/
dce_clk_params.target_clock_frequency = 0;
dce_clk_params.clock_type = DCECLOCK_TYPE_DPREFCLK;
+#ifndef CONFIG_DRM_AMD_DC_VG20
dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK =
(dce_clk_params.pll_id ==
CLOCK_SOURCE_COMBO_DISPLAY_PLL0);
+#else
+   if (!ASICREV_IS_VEGA20_P(clk->ctx->asic_id.hw_internal_rev))
+   dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK =
+   (dce_clk_params.pll_id ==
+   CLOCK_SOURCE_COMBO_DISPLAY_PLL0);
+   else
+   dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK = false;
+#endif
 
bp->funcs->set_dce_clock(bp, _clk_params);
 
diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h 
b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
index 1b987b6a347d..77d2856be9f6 100644
--- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h
+++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
@@ -117,6 +117,12 @@
((rev >= STONEY_A0) && (rev < CZ_UNKNOWN))
 
 /* DCE12 */
+#define AI_UNKNOWN 0xFF
+
+#ifdef CONFIG_DRM_AMD_DC_VG20
+#define AI_VEGA20_P_A0 40
+#define ASICREV_IS_VEGA20_P(eChipRev) ((eChipRev >= AI_VEGA20_P_A0) && 
(eChipRev < AI_UNKNOWN))
+#endif
 
 #define AI_GREENLAND_P_A0 1
 #define AI_GREENLAND_P_A1 2
-- 
2.13.6

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 33/57] drm/amd/display: Add BIOS smu_info v3_3 support for Vega20

2018-05-15 Thread Alex Deucher
From: "Jerry (Fangzhi) Zuo" 

Signed-off-by: Jerry (Fangzhi) Zuo 
Reviewed-by: Hersen Wu 
Reviewed-by: Alex Deucher 
Signed-off-by: Alex Deucher 
Signed-off-by: Feifei Xu 
---
 drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c | 5 +
 1 file changed, 5 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c 
b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index 10a5807a7e8b..4561673a0fe6 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -1330,6 +1330,11 @@ static enum bp_result bios_parser_get_firmware_info(
case 2:
result = get_firmware_info_v3_2(bp, info);
break;
+   case 3:
+#ifdef CONFIG_DRM_AMD_DC_VG20
+   result = get_firmware_info_v3_2(bp, info);
+#endif
+   break;
default:
break;
}
-- 
2.13.6

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 38/57] drm/amdgpu: drop gpu_info firmware for vega20

2018-05-15 Thread Alex Deucher
No longer required.

Reviewed-by: Amber Lin 
Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 8 +---
 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c  | 8 +++-
 2 files changed, 4 insertions(+), 12 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index a8a5d6e0eccf..e730f480a876 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -60,7 +60,6 @@
 
 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
 MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
-MODULE_FIRMWARE("amdgpu/vega20_gpu_info.bin");
 MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
 
 #define AMDGPU_RESUME_MS   2000
@@ -1398,6 +1397,7 @@ static int amdgpu_device_parse_gpu_info_fw(struct 
amdgpu_device *adev)
case CHIP_KABINI:
case CHIP_MULLINS:
 #endif
+case CHIP_VEGA20:
default:
return 0;
case CHIP_VEGA10:
@@ -1406,9 +1406,6 @@ static int amdgpu_device_parse_gpu_info_fw(struct 
amdgpu_device *adev)
case CHIP_VEGA12:
chip_name = "vega12";
break;
-   case CHIP_VEGA20:
-   chip_name = "vega20";
-   break;
case CHIP_RAVEN:
chip_name = "raven";
break;
@@ -1467,9 +1464,6 @@ static int amdgpu_device_parse_gpu_info_fw(struct 
amdgpu_device *adev)
goto out;
}
 out:
-   /* fall back to vbios tables for vega20 */
-   if (adev->asic_type == CHIP_VEGA20)
-   return 0;
return err;
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 
b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index c92b95a7d9a0..7c5a85087d4e 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -1150,11 +1150,9 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device 
*adev)
gb_addr_config &= ~0xf3e777ff;
gb_addr_config |= 0x22014042;
/* check vbios table if gpu info is not available */
-   if (!adev->gfx.config.max_shader_engines) {
-   err = amdgpu_atomfirmware_get_gfx_info(adev);
-   if (err)
-   return err;
-   }
+   err = amdgpu_atomfirmware_get_gfx_info(adev);
+   if (err)
+   return err;
break;
case CHIP_RAVEN:
adev->gfx.config.max_hw_contexts = 8;
-- 
2.13.6

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 39/57] drm/amdgpu: Set vega20 load_type to AMDGPU_FW_LOAD_DIRECT.

2018-05-15 Thread Alex Deucher
From: Feifei Xu 

Please revert this patch when psp load fw is enabled.

Signed-off-by: Feifei Xu 
Reviewed-by: Hawking Zhang 
Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index 63e29969dbf6..abcc163300d9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -303,11 +303,12 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, 
int load_type)
case CHIP_VEGA10:
case CHIP_RAVEN:
case CHIP_VEGA12:
-   case CHIP_VEGA20:
if (!load_type)
return AMDGPU_FW_LOAD_DIRECT;
else
return AMDGPU_FW_LOAD_PSP;
+   case CHIP_VEGA20:
+   return AMDGPU_FW_LOAD_DIRECT;
default:
DRM_ERROR("Unknow firmware load type\n");
}
-- 
2.13.6

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 35/57] drm/amdgpu/atomfirmware: add new gfx_info data table v2.4 (v2)

2018-05-15 Thread Alex Deucher
Adds additional gfx configuration data.

v2: fix typo

Reviewed-by: Hawking Zhang 
Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/include/atomfirmware.h | 34 ++
 1 file changed, 34 insertions(+)

diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h 
b/drivers/gpu/drm/amd/include/atomfirmware.h
index de177ce8ca80..fd5e80c92ed0 100644
--- a/drivers/gpu/drm/amd/include/atomfirmware.h
+++ b/drivers/gpu/drm/amd/include/atomfirmware.h
@@ -1219,6 +1219,40 @@ struct  atom_gfx_info_v2_3 {
   uint32_t rm21_sram_vmin_value;
 };
 
+struct  atom_gfx_info_v2_4 {
+  struct  atom_common_table_header  table_header;
+  uint8_t gfxip_min_ver;
+  uint8_t gfxip_max_ver;
+  uint8_t gc_num_se;
+  uint8_t max_tile_pipes;
+  uint8_t gc_num_cu_per_sh;
+  uint8_t gc_num_sh_per_se;
+  uint8_t gc_num_rb_per_se;
+  uint8_t gc_num_tccs;
+  uint32_t regaddr_cp_dma_src_addr;
+  uint32_t regaddr_cp_dma_src_addr_hi;
+  uint32_t regaddr_cp_dma_dst_addr;
+  uint32_t regaddr_cp_dma_dst_addr_hi;
+  uint32_t regaddr_cp_dma_command;
+  uint32_t regaddr_cp_status;
+  uint32_t regaddr_rlc_gpu_clock_32;
+  uint32_t rlc_gpu_timer_refclk;
+  uint8_t active_cu_per_sh;
+  uint8_t active_rb_per_se;
+  uint16_t gcgoldenoffset;
+  uint32_t rm21_sram_vmin_value;
+  uint16_t gc_num_gprs;
+  uint16_t gc_gsprim_buff_depth;
+  uint16_t gc_parameter_cache_depth;
+  uint16_t gc_wave_size;
+  uint16_t gc_max_waves_per_simd;
+  uint16_t gc_lds_size;
+  uint8_t gc_num_max_gs_thds;
+  uint8_t gc_gs_table_depth;
+  uint8_t gc_double_offchip_lds_buffer;
+  uint8_t gc_max_scratch_slots_per_cu;
+};
+
 /* 
   ***
 Data Table smu_info  structure
-- 
2.13.6

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 36/57] drm/amdgpu/atomfirmware: add parser for gfx_info table

2018-05-15 Thread Alex Deucher
Add support for the gfx_info table on boards that use atomfirmware.

Reviewed-by: Hawking Zhang 
Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c | 46 
 drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h |  1 +
 2 files changed, 47 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
index a0f48cb9b8f0..7014d5875d5b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
@@ -322,3 +322,49 @@ int amdgpu_atomfirmware_get_clock_info(struct 
amdgpu_device *adev)
 
return ret;
 }
+
+union gfx_info {
+   struct  atom_gfx_info_v2_4 v24;
+};
+
+int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev)
+{
+   struct amdgpu_mode_info *mode_info = >mode_info;
+   int index;
+   uint8_t frev, crev;
+   uint16_t data_offset;
+
+   index = 
get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+   gfx_info);
+   if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
+  , , _offset)) {
+   union gfx_info *gfx_info = (union gfx_info *)
+   (mode_info->atom_context->bios + data_offset);
+   switch (crev) {
+   case 4:
+   adev->gfx.config.max_shader_engines = 
gfx_info->v24.gc_num_se;
+   adev->gfx.config.max_cu_per_sh = 
gfx_info->v24.gc_num_cu_per_sh;
+   adev->gfx.config.max_sh_per_se = 
gfx_info->v24.gc_num_sh_per_se;
+   adev->gfx.config.max_backends_per_se = 
gfx_info->v24.gc_num_rb_per_se;
+   adev->gfx.config.max_texture_channel_caches = 
gfx_info->v24.gc_num_tccs;
+   adev->gfx.config.max_gprs = 
le16_to_cpu(gfx_info->v24.gc_num_gprs);
+   adev->gfx.config.max_gs_threads = 
gfx_info->v24.gc_num_max_gs_thds;
+   adev->gfx.config.gs_vgt_table_depth = 
gfx_info->v24.gc_gs_table_depth;
+   adev->gfx.config.gs_prim_buffer_depth =
+   le16_to_cpu(gfx_info->v24.gc_gsprim_buff_depth);
+   adev->gfx.config.double_offchip_lds_buf =
+   gfx_info->v24.gc_double_offchip_lds_buffer;
+   adev->gfx.cu_info.wave_front_size = 
gfx_info->v24.gc_wave_size;
+   adev->gfx.cu_info.max_waves_per_simd =
+   
le16_to_cpu(gfx_info->v24.gc_max_waves_per_simd);
+   adev->gfx.cu_info.max_scratch_slots_per_cu =
+   gfx_info->v24.gc_max_scratch_slots_per_cu;
+   adev->gfx.cu_info.lds_size = 
le16_to_cpu(gfx_info->v24.gc_lds_size);
+   return 0;
+   default:
+   return -EINVAL;
+   }
+
+   }
+   return -EINVAL;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
index 7689c961c4ef..20f158fd3b76 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
@@ -30,5 +30,6 @@ int amdgpu_atomfirmware_allocate_fb_scratch(struct 
amdgpu_device *adev);
 int amdgpu_atomfirmware_get_vram_width(struct amdgpu_device *adev);
 int amdgpu_atomfirmware_get_vram_type(struct amdgpu_device *adev);
 int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev);
+int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev);
 
 #endif
-- 
2.13.6

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 34/57] drm/amd/display: Add harvest IP support for Vega20

2018-05-15 Thread Alex Deucher
From: "Jerry (Fangzhi) Zuo" 

Retrieve fuses to determine the availability of pipes, and
eliminate pipes that cannot be used.

Signed-off-by: Jerry (Fangzhi) Zuo 
Reviewed-by: Hersen Wu 
Reviewed-by: Tony Cheng 
Reviewed-by: Alex Deucher 
Signed-off-by: Alex Deucher 
Signed-off-by: Feifei Xu 
---
 .../drm/amd/display/dc/dce120/dce120_resource.c| 208 +
 1 file changed, 208 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 
b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
index fda01574d1ba..545f35f0821f 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
@@ -814,6 +814,213 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
dm_pp_notify_wm_clock_changes(dc->ctx, _ranges);
 }
 
+#ifdef CONFIG_DRM_AMD_DC_VG20
+static uint32_t read_pipe_fuses(struct dc_context *ctx)
+{
+   uint32_t value = dm_read_reg_soc15(ctx, mmCC_DC_PIPE_DIS, 0);
+   /* VG20 support max 6 pipes */
+   value = value & 0x3f;
+   return value;
+}
+
+static bool construct(
+   uint8_t num_virtual_links,
+   struct dc *dc,
+   struct dce110_resource_pool *pool)
+{
+   unsigned int i;
+   int j;
+   struct dc_context *ctx = dc->ctx;
+   struct irq_service_init_data irq_init_data;
+   bool harvest_enabled = 
ASICREV_IS_VEGA20_P(ctx->asic_id.hw_internal_rev);
+   uint32_t pipe_fuses;
+
+   ctx->dc_bios->regs = _regs;
+
+   pool->base.res_cap = _cap;
+   pool->base.funcs = _res_pool_funcs;
+
+   /* TODO: Fill more data from GreenlandAsicCapability.cpp */
+   pool->base.pipe_count = res_cap.num_timing_generator;
+   pool->base.timing_generator_count = 
pool->base.res_cap->num_timing_generator;
+   pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
+
+   dc->caps.max_downscale_ratio = 200;
+   dc->caps.i2c_speed_in_khz = 100;
+   dc->caps.max_cursor_size = 128;
+   dc->caps.dual_link_dvi = true;
+
+   dc->debug = debug_defaults;
+
+   /*
+*  Create resources *
+*/
+
+   pool->base.clock_sources[DCE120_CLK_SRC_PLL0] =
+   dce120_clock_source_create(ctx, ctx->dc_bios,
+   CLOCK_SOURCE_COMBO_PHY_PLL0,
+   _src_regs[0], false);
+   pool->base.clock_sources[DCE120_CLK_SRC_PLL1] =
+   dce120_clock_source_create(ctx, ctx->dc_bios,
+   CLOCK_SOURCE_COMBO_PHY_PLL1,
+   _src_regs[1], false);
+   pool->base.clock_sources[DCE120_CLK_SRC_PLL2] =
+   dce120_clock_source_create(ctx, ctx->dc_bios,
+   CLOCK_SOURCE_COMBO_PHY_PLL2,
+   _src_regs[2], false);
+   pool->base.clock_sources[DCE120_CLK_SRC_PLL3] =
+   dce120_clock_source_create(ctx, ctx->dc_bios,
+   CLOCK_SOURCE_COMBO_PHY_PLL3,
+   _src_regs[3], false);
+   pool->base.clock_sources[DCE120_CLK_SRC_PLL4] =
+   dce120_clock_source_create(ctx, ctx->dc_bios,
+   CLOCK_SOURCE_COMBO_PHY_PLL4,
+   _src_regs[4], false);
+   pool->base.clock_sources[DCE120_CLK_SRC_PLL5] =
+   dce120_clock_source_create(ctx, ctx->dc_bios,
+   CLOCK_SOURCE_COMBO_PHY_PLL5,
+   _src_regs[5], false);
+   pool->base.clk_src_count = DCE120_CLK_SRC_TOTAL;
+
+   pool->base.dp_clock_source =
+   dce120_clock_source_create(ctx, ctx->dc_bios,
+   CLOCK_SOURCE_ID_DP_DTO,
+   _src_regs[0], true);
+
+   for (i = 0; i < pool->base.clk_src_count; i++) {
+   if (pool->base.clock_sources[i] == NULL) {
+   dm_error("DC: failed to create clock sources!\n");
+   BREAK_TO_DEBUGGER();
+   goto clk_src_create_fail;
+   }
+   }
+
+   pool->base.display_clock = dce120_disp_clk_create(ctx);
+   if (pool->base.display_clock == NULL) {
+   dm_error("DC: failed to create display clock!\n");
+   BREAK_TO_DEBUGGER();
+   goto disp_clk_create_fail;
+   }
+
+   pool->base.dmcu = dce_dmcu_create(ctx,
+   _regs,
+   _shift,
+   _mask);
+   if (pool->base.dmcu == NULL) {
+   dm_error("DC: failed to create dmcu!\n");
+   

[PATCH 31/57] drm/amd/display: Add Vega20 config. support

2018-05-15 Thread Alex Deucher
From: "Jerry (Fangzhi) Zuo" 

Signed-off-by: Jerry (Fangzhi) Zuo 
Reviewed-by: Harry Wentland 
Reviewed-by: Alex Deucher 
Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/display/Kconfig | 9 +
 1 file changed, 9 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/Kconfig 
b/drivers/gpu/drm/amd/display/Kconfig
index e6ca72c0d347..6dcec9c9126b 100644
--- a/drivers/gpu/drm/amd/display/Kconfig
+++ b/drivers/gpu/drm/amd/display/Kconfig
@@ -40,4 +40,13 @@ config DRM_AMD_DC_VEGAM
 help
  Choose this option if you want to have
  VEGAM support for display engine
+
+config DRM_AMD_DC_VG20
+   bool "Vega20 support"
+   depends on DRM_AMD_DC
+   help
+   Choose this option if you want to have
+   Vega20 support for display engine
+
+
 endmenu
-- 
2.13.6

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 29/57] drm/amdgpu: Add vega20 to dc support check

2018-05-15 Thread Alex Deucher
From: Feifei Xu 

Signed-off-by: Feifei Xu 
Reviewed-by: Alex Deucher 
Reviewed-by: Hawking Zhang 
Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 9c9be878aeb5..5aa3b1d69cfe 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2170,6 +2170,7 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type 
asic_type)
case CHIP_FIJI:
case CHIP_VEGA10:
case CHIP_VEGA12:
+case CHIP_VEGA20:
 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
case CHIP_RAVEN:
 #endif
-- 
2.13.6

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 28/57] drm/amd/display/dm: Add vega20 support

2018-05-15 Thread Alex Deucher
From: Feifei Xu 

Signed-off-by: Feifei Xu 
Reviewed-by: Alex Deucher 
Reviewed-by: Hawking Zhang 
Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 4 
 1 file changed, 4 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index a48587c47aae..148ea9d4196f 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -,6 +,7 @@ static int dce110_register_irq_handlers(struct 
amdgpu_device *adev)
 
if (adev->asic_type == CHIP_VEGA10 ||
adev->asic_type == CHIP_VEGA12 ||
+   adev->asic_type == CHIP_VEGA20 ||
adev->asic_type == CHIP_RAVEN)
client_id = SOC15_IH_CLIENTID_DCE;
 
@@ -1514,6 +1515,7 @@ static int amdgpu_dm_initialize_drm_device(struct 
amdgpu_device *adev)
 #endif
case CHIP_VEGA10:
case CHIP_VEGA12:
+   case CHIP_VEGA20:
if (dce110_register_irq_handlers(dm->adev)) {
DRM_ERROR("DM: Failed to initialize IRQ\n");
goto fail;
@@ -1760,6 +1762,7 @@ static int dm_early_init(void *handle)
break;
case CHIP_VEGA10:
case CHIP_VEGA12:
+   case CHIP_VEGA20:
adev->mode_info.num_crtc = 6;
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 6;
@@ -2008,6 +2011,7 @@ static int fill_plane_attributes_from_fb(struct 
amdgpu_device *adev,
 
if (adev->asic_type == CHIP_VEGA10 ||
adev->asic_type == CHIP_VEGA12 ||
+   adev->asic_type == CHIP_VEGA20 ||
adev->asic_type == CHIP_RAVEN) {
/* Fill GFX9 params */
plane_state->tiling_info.gfx9.num_pipes =
-- 
2.13.6

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 30/57] drm/amd: Add dce-12.1 gpio aux registers

2018-05-15 Thread Alex Deucher
From: Roman Li 

Updating dce12 register headers by adding dc registers
required for potential DP LTTPR support.

Signed-off-by: Roman Li 
Acked-by: Alex Deucher 
Signed-off-by: Alex Deucher 
---
 .../drm/amd/include/asic_reg/dce/dce_12_0_offset.h |  12 ++
 .../amd/include/asic_reg/dce/dce_12_0_sh_mask.h| 152 +
 2 files changed, 164 insertions(+)
 mode change 100644 => 100755 
drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_offset.h
 mode change 100644 => 100755 
drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_sh_mask.h

diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_offset.h 
b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_offset.h
old mode 100644
new mode 100755
index f730d0629020..b6f74bf4af02
--- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_offset.h
@@ -2095,6 +2095,18 @@
 #define mmDC_GPIO_AUX_CTRL_2_BASE_IDX  
2
 #define mmDC_GPIO_RXEN 
0x212f
 #define mmDC_GPIO_RXEN_BASE_IDX
2
+#define mmDC_GPIO_AUX_CTRL_3   
0x2130
+#define mmDC_GPIO_AUX_CTRL_3_BASE_IDX  
2
+#define mmDC_GPIO_AUX_CTRL_4   
0x2131
+#define mmDC_GPIO_AUX_CTRL_4_BASE_IDX  
2
+#define mmDC_GPIO_AUX_CTRL_5   
0x2132
+#define mmDC_GPIO_AUX_CTRL_5_BASE_IDX  
2
+#define mmAUXI2C_PAD_ALL_PWR_OK
0x2133
+#define mmAUXI2C_PAD_ALL_PWR_OK_BASE_IDX   
2
+#define mmDC_GPIO_PULLUPEN 
0x2134
+#define mmDC_GPIO_PULLUPEN_BASE_IDX
2
+#define mmDC_GPIO_AUX_CTRL_6   
0x2135
+#define mmDC_GPIO_AUX_CTRL_6_BASE_IDX  
2
 #define mmBPHYC_DAC_MACRO_CNTL 
0x2136
 #define mmBPHYC_DAC_MACRO_CNTL_BASE_IDX
2
 #define mmDAC_MACRO_CNTL_RESERVED0 
0x2136
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_sh_mask.h 
b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_sh_mask.h
old mode 100644
new mode 100755
index 6d3162c42957..bcd190a3fcdd
--- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_sh_mask.h
@@ -10971,6 +10971,158 @@
 #define DC_GPIO_RXEN__DC_GPIO_BLON_RXEN_MASK   
   0x0010L
 #define DC_GPIO_RXEN__DC_GPIO_DIGON_RXEN_MASK  
   0x0020L
 #define DC_GPIO_RXEN__DC_GPIO_ENA_BL_RXEN_MASK 
   0x0040L
+//DC_GPIO_AUX_CTRL_3
+#define DC_GPIO_AUX_CTRL_3__AUX1_NEN_RTERM__SHIFT  
   0x0
+#define DC_GPIO_AUX_CTRL_3__AUX2_NEN_RTERM__SHIFT  
   0x1
+#define DC_GPIO_AUX_CTRL_3__AUX3_NEN_RTERM__SHIFT  
   0x2
+#define DC_GPIO_AUX_CTRL_3__AUX4_NEN_RTERM__SHIFT  
   0x3
+#define DC_GPIO_AUX_CTRL_3__AUX5_NEN_RTERM__SHIFT  
   0x4
+#define DC_GPIO_AUX_CTRL_3__AUX6_NEN_RTERM__SHIFT  
   0x5
+#define DC_GPIO_AUX_CTRL_3__AUX1_DP_DN_SWAP__SHIFT 
   0x8
+#define DC_GPIO_AUX_CTRL_3__AUX2_DP_DN_SWAP__SHIFT 
   0x9
+#define DC_GPIO_AUX_CTRL_3__AUX3_DP_DN_SWAP__SHIFT 
   0xa
+#define DC_GPIO_AUX_CTRL_3__AUX4_DP_DN_SWAP__SHIFT 
   0xb
+#define DC_GPIO_AUX_CTRL_3__AUX5_DP_DN_SWAP__SHIFT 
   0xc

[PATCH 26/57] drm/amdgpu: Add nbio support for vega20 (v2)

2018-05-15 Thread Alex Deucher
From: Feifei Xu 

Some register offset in nbio v7.4 are different with v7.0.

v2: Use nbio7.0 for now.

TODO: add a new nbio 7.4 module (Alex)

Signed-off-by: Feifei Xu 
Reviewed-by: Hawking Zhang 
Reviewed-by: Alex Deucher 
Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c | 18 +-
 drivers/gpu/drm/amd/amdgpu/soc15.c |  2 ++
 2 files changed, 19 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c 
b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
index df34dc79d444..365517c0121e 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
@@ -34,10 +34,19 @@
 #define smnCPM_CONTROL 
 0x11180460
 #define smnPCIE_CNTL2  
 0x11180070
 
+/* vega20 */
+#define mmRCC_DEV0_EPF0_STRAP0_VG20
 0x0011
+#define mmRCC_DEV0_EPF0_STRAP0_VG20_BASE_IDX   
 2
+
 static u32 nbio_v7_0_get_rev_id(struct amdgpu_device *adev)
 {
 u32 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0);
 
+   if (adev->asic_type == CHIP_VEGA20)
+   tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0_VG20);
+   else
+   tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0);
+
tmp &= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK;
tmp >>= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT;
 
@@ -75,10 +84,14 @@ static void nbio_v7_0_sdma_doorbell_range(struct 
amdgpu_device *adev, int instan
SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA1_DOORBELL_RANGE);
 
u32 doorbell_range = RREG32(reg);
+   u32 range = 2;
+
+   if (adev->asic_type == CHIP_VEGA20)
+   range = 8;
 
if (use_doorbell) {
doorbell_range = REG_SET_FIELD(doorbell_range, 
BIF_SDMA0_DOORBELL_RANGE, OFFSET, doorbell_index);
-   doorbell_range = REG_SET_FIELD(doorbell_range, 
BIF_SDMA0_DOORBELL_RANGE, SIZE, 2);
+   doorbell_range = REG_SET_FIELD(doorbell_range, 
BIF_SDMA0_DOORBELL_RANGE, SIZE, range);
} else
doorbell_range = REG_SET_FIELD(doorbell_range, 
BIF_SDMA0_DOORBELL_RANGE, SIZE, 0);
 
@@ -133,6 +146,9 @@ static void 
nbio_v7_0_update_medium_grain_clock_gating(struct amdgpu_device *ade
 {
uint32_t def, data;
 
+   if (adev->asic_type == CHIP_VEGA20)
+   return;
+
/* NBIF_MGCG_CTRL_LCLK */
def = data = RREG32_PCIE(smnNBIF_MGCG_CTRL_LCLK);
 
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c 
b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 10337fb3fc1f..4e065c68b86c 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -496,6 +496,8 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
 
if (adev->flags & AMD_IS_APU)
adev->nbio_funcs = _v7_0_funcs;
+   else if (adev->asic_type == CHIP_VEGA20)
+   adev->nbio_funcs = _v7_0_funcs;
else
adev->nbio_funcs = _v6_1_funcs;
 
-- 
2.13.6

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 25/57] drm/amdgpu/soc15: Add ip blocks for vega20 (v2)

2018-05-15 Thread Alex Deucher
From: Feifei Xu 

Same as vega10 now.

v2: squash in typo fix

Signed-off-by: Feifei Xu 
Reviewed-by: Hawking Zhang 
Reviewed-by: Alex Deucher 
Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/amdgpu/soc15.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c 
b/drivers/gpu/drm/amd/amdgpu/soc15.c
index c3133d16de77..10337fb3fc1f 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -508,6 +508,7 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
switch (adev->asic_type) {
case CHIP_VEGA10:
case CHIP_VEGA12:
+   case CHIP_VEGA20:
amdgpu_device_ip_block_add(adev, _common_ip_block);
amdgpu_device_ip_block_add(adev, _v9_0_ip_block);
amdgpu_device_ip_block_add(adev, _ih_ip_block);
-- 
2.13.6

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 24/57] drm/amdgpu/soc15: dynamic initialize ip offset for vega20

2018-05-15 Thread Alex Deucher
From: Feifei Xu 

Vega20 need a seperate vega20_reg_init.c due to ip base
offset difference.

Signed-off-by: Feifei Xu 
Reviewed-by: Hawking Zhang 
Reviewed-by: Alex Deucher 
Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/amdgpu/Makefile  |  3 +-
 drivers/gpu/drm/amd/amdgpu/soc15.c   |  3 ++
 drivers/gpu/drm/amd/amdgpu/soc15.h   |  1 +
 drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c | 53 
 4 files changed, 59 insertions(+), 1 deletion(-)
 create mode 100644 drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c

diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile 
b/drivers/gpu/drm/amd/amdgpu/Makefile
index 2fe4a0bf98c8..1dd740b76d41 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -62,7 +62,8 @@ amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o 
kv_dpm.o \
 amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o 
dce_v6_0.o si_dpm.o si_smc.o
 
 amdgpu-y += \
-   vi.o mxgpu_vi.o nbio_v6_1.o soc15.o emu_soc.o mxgpu_ai.o nbio_v7_0.o 
vega10_reg_init.o
+   vi.o mxgpu_vi.o nbio_v6_1.o soc15.o emu_soc.o mxgpu_ai.o nbio_v7_0.o 
vega10_reg_init.o \
+   vega20_reg_init.o
 
 # add DF block
 amdgpu-y += \
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c 
b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 1fd75f5aa22b..c3133d16de77 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -487,6 +487,9 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
case CHIP_RAVEN:
vega10_reg_base_init(adev);
break;
+   case CHIP_VEGA20:
+   vega20_reg_base_init(adev);
+   break;
default:
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.h 
b/drivers/gpu/drm/amd/amdgpu/soc15.h
index f70da8a29f86..1f714b7af520 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.h
@@ -55,5 +55,6 @@ void soc15_program_register_sequence(struct amdgpu_device 
*adev,
 const u32 array_size);
 
 int vega10_reg_base_init(struct amdgpu_device *adev);
+int vega20_reg_base_init(struct amdgpu_device *adev);
 
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c 
b/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c
new file mode 100644
index ..52778de93ab0
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "soc15.h"
+
+#include "soc15_common.h"
+#include "soc15_hw_ip.h"
+#include "vega20_ip_offset.h"
+
+int vega20_reg_base_init(struct amdgpu_device *adev)
+{
+   /* HW has more IP blocks,  only initialized the blocke beend by our 
driver  */
+   uint32_t i;
+   for (i = 0 ; i < MAX_INSTANCE ; ++i) {
+   adev->reg_offset[GC_HWIP][i] = (uint32_t 
*)(&(GC_BASE.instance[i]));
+   adev->reg_offset[HDP_HWIP][i] = (uint32_t 
*)(&(HDP_BASE.instance[i]));
+   adev->reg_offset[MMHUB_HWIP][i] = (uint32_t 
*)(&(MMHUB_BASE.instance[i]));
+   adev->reg_offset[ATHUB_HWIP][i] = (uint32_t 
*)(&(ATHUB_BASE.instance[i]));
+   adev->reg_offset[NBIO_HWIP][i] = (uint32_t 
*)(&(NBIO_BASE.instance[i]));
+   adev->reg_offset[MP0_HWIP][i] = (uint32_t 
*)(&(MP0_BASE.instance[i]));
+   adev->reg_offset[UVD_HWIP][i] = (uint32_t 
*)(&(UVD_BASE.instance[i]));
+   adev->reg_offset[VCE_HWIP][i] = (uint32_t 
*)(&(VCE_BASE.instance[i]));
+   adev->reg_offset[DF_HWIP][i] = (uint32_t 
*)(&(DF_BASE.instance[i]));
+   adev->reg_offset[DCE_HWIP][i] = (uint32_t 
*)(&(DCE_BASE.instance[i]));
+  

[PATCH 23/57] drm/amdgpu/soc15: Set common clockgating for vega20.

2018-05-15 Thread Alex Deucher
From: Feifei Xu 

Same as vega10 for now.

Signed-off-by: Feifei Xu 
Reviewed-by: Hawking Zhang 
Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/amdgpu/soc15.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c 
b/drivers/gpu/drm/amd/amdgpu/soc15.c
index f45bea84a73e..1fd75f5aa22b 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -875,6 +875,7 @@ static int soc15_common_set_clockgating_state(void *handle,
switch (adev->asic_type) {
case CHIP_VEGA10:
case CHIP_VEGA12:
+   case CHIP_VEGA20:
adev->nbio_funcs->update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE ? true : false);
adev->nbio_funcs->update_medium_grain_light_sleep(adev,
-- 
2.13.6

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 22/57] drm/amdgpu/soc15:Add vega20 soc15_common_early_init support

2018-05-15 Thread Alex Deucher
From: Feifei Xu 

Set external_rev_id and disable cg,pg for now.

Signed-off-by: Feifei Xu 
Reviewed-by: Hawking Zhang 
Reviewed-by: Huang Rui 
Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/amdgpu/soc15.c | 5 +
 1 file changed, 5 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c 
b/drivers/gpu/drm/amd/amdgpu/soc15.c
index f31df18fcb81..f45bea84a73e 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -658,6 +658,11 @@ static int soc15_common_early_init(void *handle)
adev->pg_flags = 0;
adev->external_rev_id = adev->rev_id + 0x14;
break;
+   case CHIP_VEGA20:
+   adev->cg_flags = 0;
+   adev->pg_flags = 0;
+   adev->external_rev_id = adev->rev_id + 0x28;
+   break;
case CHIP_RAVEN:
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
-- 
2.13.6

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 20/57] drm/amdgpu/gfx9: Add support for vega20

2018-05-15 Thread Alex Deucher
From: Feifei Xu 

Signed-off-by: Feifei Xu 
Reviewed-by: Alex Deucher 
Reviewed-by: Hawking Zhang 
Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 
b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 681b3da2c6be..630727863794 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -1430,6 +1430,7 @@ static int gfx_v9_0_sw_init(void *handle)
switch (adev->asic_type) {
case CHIP_VEGA10:
case CHIP_VEGA12:
+   case CHIP_VEGA20:
case CHIP_RAVEN:
adev->gfx.mec.num_mec = 2;
break;
@@ -4716,6 +4717,7 @@ static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device 
*adev)
switch (adev->asic_type) {
case CHIP_VEGA10:
case CHIP_VEGA12:
+   case CHIP_VEGA20:
case CHIP_RAVEN:
adev->gfx.rlc.funcs = _v9_0_rlc_funcs;
break;
-- 
2.13.6

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 19/57] drm/amdgpu/gfx9: Add gfx config for vega20. (v3)

2018-05-15 Thread Alex Deucher
From: Feifei Xu 

v2: clean up (Alex)
v3: additional cleanups (Alex)

Signed-off-by: Feifei Xu 
Reviewed-by: Alex Deucher 
Reviewed-by: Hawking Zhang 
Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 11 +++
 1 file changed, 11 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 
b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 9fd5afa8c745..681b3da2c6be 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -1137,6 +1137,17 @@ static void gfx_v9_0_gpu_early_init(struct amdgpu_device 
*adev)
gb_addr_config = VEGA12_GB_ADDR_CONFIG_GOLDEN;
DRM_INFO("fix gfx.config for vega12\n");
break;
+   case CHIP_VEGA20:
+   adev->gfx.config.max_hw_contexts = 8;
+   adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+   adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
+   adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
+   adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
+   //TODO: Need to update this for vega20
+   gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
+   gb_addr_config &= ~0xf3e777ff;
+   gb_addr_config |= 0x22014042;
+   break;
case CHIP_RAVEN:
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
-- 
2.13.6

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 16/57] drm/amdgpu/sdma4: Add clockgating support for vega20

2018-05-15 Thread Alex Deucher
From: Feifei Xu 

Signed-off-by: Feifei Xu 
Reviewed-by: Alex Deucher 
Reviewed-by: Hawking Zhang 
Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 
b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index dc12c365a886..ca53b3fba422 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -1548,6 +1548,7 @@ static int sdma_v4_0_set_clockgating_state(void *handle,
switch (adev->asic_type) {
case CHIP_VEGA10:
case CHIP_VEGA12:
+   case CHIP_VEGA20:
case CHIP_RAVEN:
sdma_v4_0_update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE ? true : false);
-- 
2.13.6

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 17/57] drm/amdgpu/gfx9: Add support for vega20 firmware

2018-05-15 Thread Alex Deucher
From: Feifei Xu 

Signed-off-by: Feifei Xu 
Reviewed-by: Alex Deucher 
Reviewed-by: Hawking Zhang 
Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 10 ++
 1 file changed, 10 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 
b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 2d9679076c47..bc3d2ed5cc81 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -63,6 +63,13 @@ MODULE_FIRMWARE("amdgpu/vega12_mec.bin");
 MODULE_FIRMWARE("amdgpu/vega12_mec2.bin");
 MODULE_FIRMWARE("amdgpu/vega12_rlc.bin");
 
+MODULE_FIRMWARE("amdgpu/vega20_ce.bin");
+MODULE_FIRMWARE("amdgpu/vega20_pfp.bin");
+MODULE_FIRMWARE("amdgpu/vega20_me.bin");
+MODULE_FIRMWARE("amdgpu/vega20_mec.bin");
+MODULE_FIRMWARE("amdgpu/vega20_mec2.bin");
+MODULE_FIRMWARE("amdgpu/vega20_rlc.bin");
+
 MODULE_FIRMWARE("amdgpu/raven_ce.bin");
 MODULE_FIRMWARE("amdgpu/raven_pfp.bin");
 MODULE_FIRMWARE("amdgpu/raven_me.bin");
@@ -461,6 +468,9 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device 
*adev)
case CHIP_VEGA12:
chip_name = "vega12";
break;
+   case CHIP_VEGA20:
+   chip_name = "vega20";
+   break;
case CHIP_RAVEN:
chip_name = "raven";
break;
-- 
2.13.6

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 18/57] drm/amdgpu/gfx9: Add vega20 golden settings (v3)

2018-05-15 Thread Alex Deucher
From: Feifei Xu 

v2: squash in updates (Alex)
v3: squash in more updates (Alex)

Signed-off-by: Feifei Xu 
Reviewed-by: Alex Deucher 
Reviewed-by: Hawking Zhang 
Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 22 ++
 1 file changed, 22 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 
b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index bc3d2ed5cc81..9fd5afa8c745 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -108,6 +108,20 @@ static const struct soc15_reg_golden 
golden_settings_gc_9_0_vg10[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x1800, 0x0800)
 };
 
+static const struct soc15_reg_golden golden_settings_gc_9_0_vg20[] =
+{
+   SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0x0f00, 
0x0a00),
+   SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x3000, 
0x1000),
+   SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xf3e777ff, 0x22014042),
+   SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xf3e777ff, 
0x22014042),
+   SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0x3e00, 0x0400),
+   SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xff84, 
0x0404),
+   SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x0003, 
0x0003),
+   SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x010f, 
0x01000107),
+   SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0x000b, 0x000b),
+   SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x0100, 0x0100)
+};
+
 static const struct soc15_reg_golden golden_settings_gc_9_1[] =
 {
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
@@ -241,6 +255,14 @@ static void gfx_v9_0_init_golden_registers(struct 
amdgpu_device *adev)
golden_settings_gc_9_2_1_vg12,

ARRAY_SIZE(golden_settings_gc_9_2_1_vg12));
break;
+   case CHIP_VEGA20:
+   soc15_program_register_sequence(adev,
+   golden_settings_gc_9_0,
+   
ARRAY_SIZE(golden_settings_gc_9_0));
+   soc15_program_register_sequence(adev,
+   golden_settings_gc_9_0_vg20,
+   
ARRAY_SIZE(golden_settings_gc_9_0_vg20));
+   break;
case CHIP_RAVEN:
soc15_program_register_sequence(adev,
 golden_settings_gc_9_1,
-- 
2.13.6

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 15/57] drm/amdgpu/sdma4: Add vega20 golden settings (v3)

2018-05-15 Thread Alex Deucher
From: Feifei Xu 

v2: squash in updates (Alex)
v3: squash in more updates (Alex)

Signed-off-by: Feifei Xu 
Reviewed-by: Alex Deucher 
Reviewed-by: Hawking Zhang 
Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 27 +++
 1 file changed, 27 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 
b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 79b3a45b5715..dc12c365a886 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -109,6 +109,28 @@ static const struct soc15_reg_golden 
golden_settings_sdma_4_1[] =
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x03ff, 
0x03c0)
 };
 
+static const struct soc15_reg_golden golden_settings_sdma_4_2[] =
+{
+   SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 
0x02831d07),
+   SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CLK_CTRL, 0x, 
0x3f000100),
+   SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x773f, 
0x4002),
+   SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 
0x773f, 0x4002),
+   SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, 
0xfff7, 0x00403000),
+   SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL, 
0xfff7, 0x00403000),
+   SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 
0xfff0, 0x00403000),
+   SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 
0xfff7, 0x00403000),
+   SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x03ff, 
0x03c0),
+   SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 
0x02831d07),
+   SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0x, 
0x3f000100),
+   SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x773f, 
0x4002),
+   SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 
0x773f, 0x4002),
+   SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 
0xfff7, 0x00403000),
+   SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_PAGE_RB_WPTR_POLL_CNTL, 
0xfff7, 0x00403000),
+   SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 
0xfff7, 0x00403000),
+   SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL, 
0xfff7, 0x00403000),
+   SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_PAGE, 0x03ff, 
0x03c0)
+};
+
 static const struct soc15_reg_golden golden_settings_sdma_rv1[] =
 {
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 
0x0002),
@@ -141,6 +163,11 @@ static void sdma_v4_0_init_golden_registers(struct 
amdgpu_device *adev)
golden_settings_sdma_vg12,

ARRAY_SIZE(golden_settings_sdma_vg12));
break;
+   case CHIP_VEGA20:
+   soc15_program_register_sequence(adev,
+   golden_settings_sdma_4_2,
+   
ARRAY_SIZE(golden_settings_sdma_4_2));
+   break;
case CHIP_RAVEN:
soc15_program_register_sequence(adev,
 golden_settings_sdma_4_1,
-- 
2.13.6

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 13/57] drm/amdgpu/mmhub: Add clockgating support for vega20

2018-05-15 Thread Alex Deucher
From: Feifei Xu 

Signed-off-by: Feifei Xu 
Reviewed-by: Alex Deucher 
Reviewed-by: Hawking Zhang 
Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c 
b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index 2721403f1452..b7731d3438dc 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -752,6 +752,7 @@ int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev,
switch (adev->asic_type) {
case CHIP_VEGA10:
case CHIP_VEGA12:
+   case CHIP_VEGA20:
case CHIP_RAVEN:
mmhub_v1_0_update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE ? true : false);
-- 
2.13.6

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 12/57] drm/amdgpu/gmc9: Add vega20 support

2018-05-15 Thread Alex Deucher
From: Feifei Xu 

Signed-off-by: Feifei Xu 
Reviewed-by: Alex Deucher 
Reviewed-by: Hawking Zhang 
Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c 
b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 780a8fdb7369..e3ca62e409ce 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -773,6 +773,7 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
switch (adev->asic_type) {
case CHIP_VEGA10:  /* all engines support GPUVM */
case CHIP_VEGA12:  /* all engines support GPUVM */
+   case CHIP_VEGA20:
default:
adev->gmc.gart_size = 512ULL << 20;
break;
@@ -878,6 +879,7 @@ static int gmc_v9_0_sw_init(void *handle)
break;
case CHIP_VEGA10:
case CHIP_VEGA12:
+   case CHIP_VEGA20:
/*
 * To fulfill 4-level page support,
 * vm size is 256TB (48bit), maximum size of Vega10,
@@ -995,6 +997,7 @@ static void gmc_v9_0_init_golden_registers(struct 
amdgpu_device *adev)
 
switch (adev->asic_type) {
case CHIP_VEGA10:
+   case CHIP_VEGA20:
soc15_program_register_sequence(adev,
golden_settings_mmhub_1_0_0,

ARRAY_SIZE(golden_settings_mmhub_1_0_0));
-- 
2.13.6

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 09/57] drm/amdgpu: Specify vega20 uvd firmware

2018-05-15 Thread Alex Deucher
From: Feifei Xu 

Signed-off-by: Feifei Xu 
Reviewed-by: Alex Deucher 
Reviewed-by: Hawking Zhang 
Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 5 +
 1 file changed, 5 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index d8dd4028c2bb..869c909a96f9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -70,6 +70,7 @@
 
 #define FIRMWARE_VEGA10"amdgpu/vega10_uvd.bin"
 #define FIRMWARE_VEGA12"amdgpu/vega12_uvd.bin"
+#define FIRMWARE_VEGA20"amdgpu/vega20_uvd.bin"
 
 #define mmUVD_GPCOM_VCPU_DATA0_VEGA10 (0x03c4 + 0x7e00)
 #define mmUVD_GPCOM_VCPU_DATA1_VEGA10 (0x03c5 + 0x7e00)
@@ -114,6 +115,7 @@ MODULE_FIRMWARE(FIRMWARE_VEGAM);
 
 MODULE_FIRMWARE(FIRMWARE_VEGA10);
 MODULE_FIRMWARE(FIRMWARE_VEGA12);
+MODULE_FIRMWARE(FIRMWARE_VEGA20);
 
 static void amdgpu_uvd_idle_work_handler(struct work_struct *work);
 
@@ -177,6 +179,9 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
case CHIP_VEGAM:
fw_name = FIRMWARE_VEGAM;
break;
+   case CHIP_VEGA20:
+   fw_name = FIRMWARE_VEGA20;
+   break;
default:
return -EINVAL;
}
-- 
2.13.6

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 11/57] drm/amdgpu/virtual_dce: Add vega20 support

2018-05-15 Thread Alex Deucher
From: Feifei Xu 

Signed-off-by: Feifei Xu 
Reviewed-by: Alex Deucher 
Reviewed-by: Hawking Zhang 
Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/amdgpu/dce_virtual.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c 
b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
index de7be3de0f41..dbf2ccd0c744 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -473,6 +473,7 @@ static int dce_virtual_hw_init(void *handle)
break;
case CHIP_VEGA10:
case CHIP_VEGA12:
+   case CHIP_VEGA20:
break;
default:
DRM_ERROR("Virtual display unsupported ASIC type: 0x%X\n", 
adev->asic_type);
-- 
2.13.6

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 08/57] drm/amdgpu: Add vega20 ucode loading method

2018-05-15 Thread Alex Deucher
From: Feifei Xu 

The same as vega10.

Signed-off-by: Feifei Xu 
Reviewed-by: Alex Deucher 
Reviewed-by: Hawking Zhang 
Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index ee71c40b3920..63e29969dbf6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -303,6 +303,7 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int 
load_type)
case CHIP_VEGA10:
case CHIP_RAVEN:
case CHIP_VEGA12:
+   case CHIP_VEGA20:
if (!load_type)
return AMDGPU_FW_LOAD_DIRECT;
else
-- 
2.13.6

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 05/57] drm/amdgpu: Add smu firmware support for vega20

2018-05-15 Thread Alex Deucher
From: Feifei Xu 

Signed-off-by: Feifei Xu 
Reviewed-by: Alex Deucher 
Reviewed-by: Hawking Zhang 
Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c   | 3 +++
 drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c | 1 +
 2 files changed, 4 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index 5b3d3bf5b599..e950730f1933 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -400,6 +400,9 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device 
*cgs_device,
case CHIP_VEGA12:
strcpy(fw_name, "amdgpu/vega12_smc.bin");
break;
+   case CHIP_VEGA20:
+   strcpy(fw_name, "amdgpu/vega20_smc.bin");
+   break;
default:
DRM_ERROR("SMC firmware not supported\n");
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c 
b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
index ee236dfbf1d6..c9837935f0f5 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
@@ -45,6 +45,7 @@ MODULE_FIRMWARE("amdgpu/vegam_smc.bin");
 MODULE_FIRMWARE("amdgpu/vega10_smc.bin");
 MODULE_FIRMWARE("amdgpu/vega10_acg_smc.bin");
 MODULE_FIRMWARE("amdgpu/vega12_smc.bin");
+MODULE_FIRMWARE("amdgpu/vega20_smc.bin");
 
 int smum_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
 {
-- 
2.13.6

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 06/57] drm/amdgpu/powerplay: Add initial vega20 support v2

2018-05-15 Thread Alex Deucher
From: Feifei Xu 

Initial powerplay support the same as vega10 for now.

Signed-off-by: Feifei Xu 
Reviewed-by: Alex Deucher 
Reviewed-by: Hawking Zhang 
Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c 
b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index 71b42331f185..e63bc47dc715 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -151,6 +151,7 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr)
hwmgr->feature_mask &= ~PP_GFXOFF_MASK;
switch (hwmgr->chip_id) {
case CHIP_VEGA10:
+   case CHIP_VEGA20:
hwmgr->smumgr_funcs = _smu_funcs;
vega10_hwmgr_init(hwmgr);
break;
-- 
2.13.6

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 04/57] drm/amdgpu: set asic family for vega20.

2018-05-15 Thread Alex Deucher
From: Feifei Xu 

Signed-off-by: Feifei Xu 
Reviewed-by: Alex Deucher 
Reviewed-by: Hawking Zhang 
Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index c0510a6b79c6..9c9be878aeb5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1535,6 +1535,7 @@ static int amdgpu_device_ip_early_init(struct 
amdgpu_device *adev)
 #endif
case CHIP_VEGA10:
case CHIP_VEGA12:
+   case CHIP_VEGA20:
case CHIP_RAVEN:
if (adev->asic_type == CHIP_RAVEN)
adev->family = AMDGPU_FAMILY_RV;
-- 
2.13.6

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 01/57] drm/amd: Add vega20_ip_offset.h headerfile for vega20.

2018-05-15 Thread Alex Deucher
From: Feifei Xu 

This headerfile contains vega20's ip base addresses.

Signed-off-by: Feifei Xu 
Reviewed-by: Hawking Zhang 
Reviewed-by: Alex Deucher 
Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/include/vega20_ip_offset.h | 1050 
 1 file changed, 1050 insertions(+)
 create mode 100644 drivers/gpu/drm/amd/include/vega20_ip_offset.h

diff --git a/drivers/gpu/drm/amd/include/vega20_ip_offset.h 
b/drivers/gpu/drm/amd/include/vega20_ip_offset.h
new file mode 100644
index ..2da2d9790bac
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/vega20_ip_offset.h
@@ -0,0 +1,1050 @@
+/*
+ * Copyright (C) 2018  Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 
LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _vega20_ip_offset_HEADER
+#define _vega20_ip_offset_HEADER
+
+#define MAX_INSTANCE   6
+#define MAX_SEGMENT6
+
+
+struct IP_BASE_INSTANCE
+{
+unsigned int segment[MAX_SEGMENT];
+};
+
+struct IP_BASE
+{
+struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
+};
+
+
+static const struct IP_BASE ATHUB_BASE={ { { { 0x0C20, 0, 0, 
0, 0, 0 } },
+{ { 0, 0, 0, 0, 0, 0 } },
+{ { 0, 0, 0, 0, 0, 0 } },
+{ { 0, 0, 0, 0, 0, 0 } },
+{ { 0, 0, 0, 0, 0, 0 } },
+{ { 0, 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE CLK_BASE={ { { { 0x00016C00, 
0x00016E00, 0x00017000, 0x00017200, 0x0001B000, 0x0001B200 } },
+{ { 0, 0, 0, 0, 0, 0 } },
+{ { 0, 0, 0, 0, 0, 0 } },
+{ { 0, 0, 0, 0, 0, 0 } },
+{ { 0, 0, 0, 0, 0, 0 } },
+{ { 0, 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE DCE_BASE={ { { { 0x0012, 
0x00C0, 0x34C0, 0, 0, 0 } },
+{ { 0, 0, 0, 0, 0, 0 } },
+{ { 0, 0, 0, 0, 0, 0 } },
+{ { 0, 0, 0, 0, 0, 0 } },
+{ { 0, 0, 0, 0, 0, 0 } },
+{ { 0, 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE DF_BASE={ { { { 0x7000, 0, 0, 0, 
0, 0 } },
+{ { 0, 0, 0, 0, 0, 0 } },
+{ { 0, 0, 0, 0, 0, 0 } },
+{ { 0, 0, 0, 0, 0, 0 } },
+{ { 0, 0, 0, 0, 0, 0 } },
+{ { 0, 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE FUSE_BASE={ { { { 0x00017400, 0, 0, 0, 
0, 0 } },
+{ { 0, 0, 0, 0, 0, 0 } },
+{ { 0, 0, 0, 0, 0, 0 } },
+{ { 0, 0, 0, 0, 0, 0 } },
+{ { 0, 0, 0, 0, 0, 0 } },
+{ { 0, 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE GC_BASE={ { { { 0x2000, 
0xA000, 0, 0, 0, 0 } },
+{ { 0, 0, 0, 0, 0, 0 } },
+{ { 0, 0, 0, 0, 0, 0 } },
+{ { 0, 0, 0, 0, 0, 0 } },
+{ { 0, 0, 0, 0, 0, 0 } },
+{ { 0, 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE HDP_BASE={ { { { 0x0F20, 0, 0, 0, 
0, 0 } },
+  

[PATCH 02/57] drm/amdgpu: Add vega20 to asic_type enum.

2018-05-15 Thread Alex Deucher
From: Feifei Xu 

Add vega20 to amd_asic_type enum and amdgpu_asic_name[].

Signed-off-by: Feifei Xu 
Reviewed-by: Alex Deucher 
Reviewed-by: Hawking Zhang 
Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 1 +
 include/drm/amd_asic_type.h| 1 +
 2 files changed, 2 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index b175178345aa..b78c1b9b182c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -86,6 +86,7 @@ static const char *amdgpu_asic_name[] = {
"VEGAM",
"VEGA10",
"VEGA12",
+   "VEGA20",
"RAVEN",
"LAST",
 };
diff --git a/include/drm/amd_asic_type.h b/include/drm/amd_asic_type.h
index 695bde7eb055..dd63d08cc54e 100644
--- a/include/drm/amd_asic_type.h
+++ b/include/drm/amd_asic_type.h
@@ -47,6 +47,7 @@ enum amd_asic_type {
CHIP_VEGAM,
CHIP_VEGA10,
CHIP_VEGA12,
+   CHIP_VEGA20,
CHIP_RAVEN,
CHIP_LAST,
 };
-- 
2.13.6

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 03/57] drm/amdgpu: Add gpu_info firmware for vega20.

2018-05-15 Thread Alex Deucher
From: Feifei Xu 

vega20_gpu_info firmware stores gpu configuration for vega20.

Signed-off-by: Feifei Xu 
Reviewed-by: Alex Deucher 
Reviewed-by: Hawking Zhang 
Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 4 
 1 file changed, 4 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index b78c1b9b182c..c0510a6b79c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -60,6 +60,7 @@
 
 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
 MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
+MODULE_FIRMWARE("amdgpu/vega20_gpu_info.bin");
 MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
 
 #define AMDGPU_RESUME_MS   2000
@@ -1405,6 +1406,9 @@ static int amdgpu_device_parse_gpu_info_fw(struct 
amdgpu_device *adev)
case CHIP_VEGA12:
chip_name = "vega12";
break;
+   case CHIP_VEGA20:
+   chip_name = "vega20";
+   break;
case CHIP_RAVEN:
chip_name = "raven";
break;
-- 
2.13.6

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 00/57] Vega20 support

2018-05-15 Thread Alex Deucher
This patch set adds initial support for vega20.

Alex Deucher (6):
  drm/amdgpu/atomfirmware: add new gfx_info data table v2.4 (v2)
  drm/amdgpu/atomfirmware: add parser for gfx_info table
  drm/amdgpu/vg20: fallback to vbios table if gpu info fw is not
available (v2)
  drm/amdgpu: drop gpu_info firmware for vega20
  drm/amdgpu: add df 3.6 headers
  drm/amdgpu: flag Vega20 as experimental

Evan Quan (1):
  drm/amd/powerplay: update vega20 cg flags

Feifei Xu (35):
  drm/amd: Add vega20_ip_offset.h headerfile for vega20.
  drm/amdgpu: Add vega20 to asic_type enum.
  drm/amdgpu: Add gpu_info firmware for vega20.
  drm/amdgpu: set asic family for vega20.
  drm/amdgpu: Add smu firmware support for vega20
  drm/amdgpu/powerplay: Add initial vega20 support v2
  drm/amdgpu/psp: Add initial psp support for vega20
  drm/amdgpu: Add vega20 ucode loading method
  drm/amdgpu: Specify vega20 uvd firmware
  drm/amdgpu: Specify vega20 vce firmware
  drm/amdgpu/virtual_dce: Add vega20 support
  drm/amdgpu/gmc9: Add vega20 support
  drm/amdgpu/mmhub: Add clockgating support for vega20
  drm/amdgpu/sdma4: Specify vega20 firmware
  drm/amdgpu/sdma4: Add vega20 golden settings (v3)
  drm/amdgpu/sdma4: Add clockgating support for vega20
  drm/amdgpu/gfx9: Add support for vega20 firmware
  drm/amdgpu/gfx9: Add vega20 golden settings (v3)
  drm/amdgpu/gfx9: Add gfx config for vega20. (v3)
  drm/amdgpu/gfx9: Add support for vega20
  drm/amdgpu/gfx9: Add clockgatting support for vega20
  drm/amdgpu/soc15:Add vega20 soc15_common_early_init support
  drm/amdgpu/soc15: Set common clockgating for vega20.
  drm/amdgpu/soc15: dynamic initialize ip offset for vega20
  drm/amdgpu/soc15: Add ip blocks for vega20 (v2)
  drm/amdgpu: Add nbio support for vega20 (v2)
  drm/amd/display/dm: Add vega20 support
  drm/amdgpu: Add vega20 to dc support check
  drm/amd/display: Remove COMBO_DISPLAY_PLL0 from Vega20
  drm/amdgpu: Set vega20 load_type to AMDGPU_FW_LOAD_DIRECT.
  drm/include: Fix MP1_BASE address for vega20
  drm/amdgpu: Disable ip modules that are not ready yet
  drm/amdgpu/df: implement df v3_6 callback functions (v2)
  drm/amdgpu: Switch to use df_v3_6_funcs for vega20 (v2)
  drm/amdgpu: Add vega20 pci ids

James Zhu (10):
  drm/amd/include/vg20: adjust VCE_BASE to reuse vce 4.0 header files
  drm/amdgpu/vg20:Restruct uvd to support multiple uvds
  drm/amdgpu/vg20:Restruct uvd.inst to support multiple instances
  drm/amdgpu/vg20:Restruct uvd.idle_work to support multiple instance
(v2)
  drm/amdgpu/vg20:increase 3 rings for AMDGPU_MAX_RINGS
  drm/amdgpu/vg20:Enable the 2nd instance for uvd
  drm/amdgpu/vg20:Add IH client ID for the 2nd UVD
  drm/amdgpu/vg20:Enable the 2nd instance IRQ for uvd 7.2
  drm/amdgpu/vg20:Enable 2nd instance queue maping for uvd 7.2
  drm/amdgpu/vg20:Enable UVD/VCE for Vega20

Jerry (Fangzhi) Zuo (3):
  drm/amd/display: Add Vega20 config. support
  drm/amd/display: Add BIOS smu_info v3_3 support for Vega20
  drm/amd/display: Add harvest IP support for Vega20

Roman Li (1):
  drm/amd: Add dce-12.1 gpio aux registers

Shaoyun Liu (1):
  drm/amdgpu: Add vega20 soc init sequence on emulator (v3)

 drivers/gpu/drm/amd/amdgpu/Makefile| 6 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c   |46 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h   | 1 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c| 3 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 4 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c| 7 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c  | 6 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c|12 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c| 1 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c  |14 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c   | 1 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h   | 2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c  | 2 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c|   283 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h|26 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c| 5 +
 drivers/gpu/drm/amd/amdgpu/dce_virtual.c   | 1 +
 drivers/gpu/drm/amd/amdgpu/df_v3_6.c   |   116 +
 drivers/gpu/drm/amd/amdgpu/df_v3_6.h   |40 +
 drivers/gpu/drm/amd/amdgpu/emu_soc.c   | 10091 +++
 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c  |60 +-
 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c  | 3 +
 drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c| 1 +
 drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c |18 +-
 drivers/gpu/drm/amd/amdgpu/psp_v3_1.c  | 3 +
 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c |33 +
 drivers/gpu/drm/amd/amdgpu/soc15.c |43 +-
 drivers/gpu/drm/amd/amdgpu/soc15.h | 1 +
 drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c  |27 +-
 

Re: [PATCH 2/7] drm/amdgpu: add amdgpu module parameter for zfb

2018-05-15 Thread Christian König

Am 15.05.2018 um 16:50 schrieb Alex Deucher:

From: Feifei Xu 

Users can pass in an array to decide enable/disable Zero Frame Buffer.
zfb[0] = zfb_size(MB), zfb[1] = zfb_phys_addr(MB).
If zbf_size > 0, zfb is enabled. Otherwise disabled.
Usage for example:
 modprobe amdgpu zfb=256,4096


I still vote for using the CMA instead of this hack for that.

Should be trivial to implement and is far less error prone than manually 
messing with memory configuration.


Christian.



Signed-off-by: Feifei Xu 
Acked-by: John Bridgman 
Reviewed-by: Alex Deucher 
Signed-off-by: Alex Deucher 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 +
  drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 4 
  2 files changed, 5 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 2d7500921c0b..dc55b73cbeed 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -130,6 +130,7 @@ extern int amdgpu_compute_multipipe;
  extern int amdgpu_gpu_recovery;
  extern int amdgpu_emu_mode;
  extern uint amdgpu_smu_memory_pool_size;
+extern ulong amdgpu_zfb[];
  
  #ifdef CONFIG_DRM_AMDGPU_SI

  extern int amdgpu_si_support;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 739e7e09c8b0..a7a81ddab721 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -134,6 +134,7 @@ int amdgpu_compute_multipipe = -1;
  int amdgpu_gpu_recovery = -1; /* auto */
  int amdgpu_emu_mode = 0;
  uint amdgpu_smu_memory_pool_size = 0;
+ulong amdgpu_zfb[2] = {0,4096UL}; /* {0,0x1} */
  
  MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");

  module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
@@ -292,6 +293,9 @@ module_param_named(gpu_recovery, amdgpu_gpu_recovery, int, 
0444);
  MODULE_PARM_DESC(emu_mode, "Emulation mode, (1 = enable, 0 = disable)");
  module_param_named(emu_mode, amdgpu_emu_mode, int, 0444);
  
+MODULE_PARM_DESC(zfb,

+"Enable Zero Frame Buffer feature (zfb will be set like ,(zfb_size 
MB,zfb_phys_addr MB),default disabled)");
+module_param_array_named(zfb, amdgpu_zfb, ulong, NULL, 0444);
  #ifdef CONFIG_DRM_AMDGPU_SI
  
  #if defined(CONFIG_DRM_RADEON) || defined(CONFIG_DRM_RADEON_MODULE)


___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 7/7] drm/amdgpu: program system bit for pte/pde when ZFB is enabled

2018-05-15 Thread Alex Deucher
From: Feifei Xu 

Signed-off-by: Hawking Zhang 
Signed-off-by: Feifei Xu 
Acked-by: John Bridgman 
Reviewed-by: Alex Deucher 
Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c | 2 ++
 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c| 6 ++
 drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c  | 2 ++
 3 files changed, 10 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c 
b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
index 3689f1d43685..6b172caa88f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
@@ -44,6 +44,8 @@ static void gfxhub_v1_0_init_gart_pt_regs(struct 
amdgpu_device *adev)
+ adev->vm_manager.vram_base_offset;
value &= 0xF000ULL;
value |= 0x1; /*valid bit*/
+   if (adev->gmc.zfb_size > 0)
+   value |= 0x2; /*system bit*/
 
WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
 lower_32_bits(value));
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c 
b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 65aa28f14153..780a8fdb7369 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -472,6 +472,9 @@ static uint64_t gmc_v9_0_get_vm_pte_flags(struct 
amdgpu_device *adev,
if (flags & AMDGPU_VM_PAGE_WRITEABLE)
pte_flag |= AMDGPU_PTE_WRITEABLE;
 
+   if (adev->gmc.zfb_size > 0)
+   pte_flag |= AMDGPU_PTE_SYSTEM;
+
switch (flags & AMDGPU_VM_MTYPE_MASK) {
case AMDGPU_VM_MTYPE_DEFAULT:
pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_NC);
@@ -507,6 +510,9 @@ static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, 
int level,
adev->gmc.vram_start;
BUG_ON(*addr & 0x003FULL);
 
+   if (adev->gmc.zfb_size > 0)
+   *flags |= AMDGPU_PTE_SYSTEM;
+
if (!adev->gmc.translate_further)
return;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c 
b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index dc0157093635..2721403f1452 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -54,6 +54,8 @@ static void mmhub_v1_0_init_gart_pt_regs(struct amdgpu_device 
*adev)
adev->vm_manager.vram_base_offset;
value &= 0xF000ULL;
value |= 0x1; /* valid bit */
+   if (adev->gmc.zfb_size > 0)
+   value |= 0x2; /* system bit*/
 
WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
 lower_32_bits(value));
-- 
2.13.6

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 5/7] drm/amdgpu: enable physical transaction for ptd/pde when ZFB is enabled

2018-05-15 Thread Alex Deucher
From: Feifei Xu 

Signed-off-by: Hawking Zhang 
Signed-off-by: Feifei Xu 
Acked-by: John Bridgman 
Reviewed-by: Alex Deucher 
Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c | 9 +++--
 drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c  | 9 +++--
 2 files changed, 14 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c 
b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
index acfbd2d749cf..0d72f52a41b6 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
@@ -155,8 +155,13 @@ static void gfxhub_v1_0_init_cache_regs(struct 
amdgpu_device *adev)
WREG32_SOC15(GC, 0, mmVM_L2_CNTL3, tmp);
 
tmp = mmVM_L2_CNTL4_DEFAULT;
-   tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
-   tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 0);
+   if (adev->gmc.zfb_size > 0) {
+   tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, 
VMC_TAP_PDE_REQUEST_PHYSICAL, 1);
+   tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, 
VMC_TAP_PTE_REQUEST_PHYSICAL, 1);
+   } else {
+   tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, 
VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
+   tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, 
VMC_TAP_PTE_REQUEST_PHYSICAL, 0);
+   }
WREG32_SOC15(GC, 0, mmVM_L2_CNTL4, tmp);
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c 
b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index 43f925773b57..e9289a6b61de 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -166,8 +166,13 @@ static void mmhub_v1_0_init_cache_regs(struct 
amdgpu_device *adev)
}
 
tmp = mmVM_L2_CNTL4_DEFAULT;
-   tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
-   tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 0);
+   if (adev->gmc.zfb_size > 0) {
+   tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, 
VMC_TAP_PDE_REQUEST_PHYSICAL, 1);
+   tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, 
VMC_TAP_PTE_REQUEST_PHYSICAL, 1);
+   } else {
+   tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, 
VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
+   tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, 
VMC_TAP_PTE_REQUEST_PHYSICAL, 0);
+   }
WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL4, tmp);
 }
 
-- 
2.13.6

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 6/7] drm/amdgpu: program AGP aperture as frame buffer when ZFB is enabled

2018-05-15 Thread Alex Deucher
From: Hawking Zhang 

Signed-off-by: Hawking Zhang 
Acked-by: John Bridgman 
Reviewed-by: Alex Deucher 
Signed-off-by: Feifei Xu 
Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c | 19 +++
 drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c  | 19 +++
 2 files changed, 30 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c 
b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
index 0d72f52a41b6..3689f1d43685 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
@@ -71,10 +71,21 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct 
amdgpu_device *adev)
 {
uint64_t value;
 
-   /* Disable AGP. */
-   WREG32_SOC15(GC, 0, mmMC_VM_AGP_BASE, 0);
-   WREG32_SOC15(GC, 0, mmMC_VM_AGP_TOP, 0);
-   WREG32_SOC15(GC, 0, mmMC_VM_AGP_BOT, 0x);
+   if (adev->gmc.zfb_size > 0) {
+   /* Disable LFB */
+   WREG32_SOC15(GC, 0, mmMC_VM_FB_LOCATION_TOP, 0);
+   WREG32_SOC15(GC, 0, mmMC_VM_FB_LOCATION_BASE, 0x00FF);
+
+   /* Enable AGP */
+   WREG32_SOC15(GC, 0, mmMC_VM_AGP_BASE, adev->gmc.zfb_phys_addr 
>> 24);
+   WREG32_SOC15(GC, 0, mmMC_VM_AGP_TOP, adev->gmc.vram_end >> 24);
+   WREG32_SOC15(GC, 0, mmMC_VM_AGP_BOT, adev->gmc.vram_start >> 
24);
+   } else {
+   /* Disable AGP. */
+   WREG32_SOC15(GC, 0, mmMC_VM_AGP_BASE, 0);
+   WREG32_SOC15(GC, 0, mmMC_VM_AGP_TOP, 0);
+   WREG32_SOC15(GC, 0, mmMC_VM_AGP_BOT, 0x);
+   }
 
/* Program the system aperture low logical page number. */
WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c 
b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index e9289a6b61de..dc0157093635 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -82,10 +82,21 @@ static void mmhub_v1_0_init_system_aperture_regs(struct 
amdgpu_device *adev)
uint64_t value;
uint32_t tmp;
 
-   /* Disable AGP. */
-   WREG32_SOC15(MMHUB, 0, mmMC_VM_AGP_BASE, 0);
-   WREG32_SOC15(MMHUB, 0, mmMC_VM_AGP_TOP, 0);
-   WREG32_SOC15(MMHUB, 0, mmMC_VM_AGP_BOT, 0x00FF);
+   if (adev->gmc.zfb_size > 0) {
+   /* Disable LFB */
+   WREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_TOP, 0);
+   WREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_BASE, 0x00FF);
+
+   /* Enable AGP */
+   WREG32_SOC15(MMHUB, 0, mmMC_VM_AGP_BASE, 
adev->gmc.zfb_phys_addr >> 24);
+   WREG32_SOC15(MMHUB, 0, mmMC_VM_AGP_TOP, adev->gmc.vram_end >> 
24);
+   WREG32_SOC15(MMHUB, 0, mmMC_VM_AGP_BOT, adev->gmc.vram_start >> 
24);
+   } else {
+   /* Disable AGP. */
+   WREG32_SOC15(MMHUB, 0, mmMC_VM_AGP_BASE, 0);
+   WREG32_SOC15(MMHUB, 0, mmMC_VM_AGP_TOP, 0);
+   WREG32_SOC15(MMHUB, 0, mmMC_VM_AGP_BOT, 0x00FF);
+   }
 
/* Program the system aperture low logical page number. */
WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
-- 
2.13.6

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 4/7] drm/amdgpu: user reserved zfb to init vram base offset and size

2018-05-15 Thread Alex Deucher
From: Feifei Xu 

Signed-off-by: Feifei Xu 
Signed-off-by: Hawking Zhang 
Acked-by: John Bridgman 
Reviewed-by: Alex Deucher 
Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 27 +--
 1 file changed, 21 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c 
b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 734306902e4e..65aa28f14153 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -693,7 +693,10 @@ static void gmc_v9_0_vram_gtt_location(struct 
amdgpu_device *adev,
amdgpu_device_vram_location(adev, >gmc, base);
amdgpu_device_gart_location(adev, mc);
/* base offset of vram pages */
-   adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev);
+   if (adev->gmc.zfb_size > 0)
+   adev->vm_manager.vram_base_offset = adev->gmc.zfb_phys_addr;
+   else
+   adev->vm_manager.vram_base_offset = 
gfxhub_v1_0_get_mc_fb_offset(adev);
 }
 
 /**
@@ -724,8 +727,11 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
}
 
/* size in MB on si */
-   adev->gmc.mc_vram_size =
-   adev->nbio_funcs->get_memsize(adev) * 1024ULL * 1024ULL;
+   if (adev->gmc.zfb_size > 0)
+   adev->gmc.mc_vram_size = adev->gmc.zfb_size;
+   else
+   adev->gmc.mc_vram_size =
+   adev->nbio_funcs->get_memsize(adev) * 1024ULL * 1024ULL;
adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
 
if (!(adev->flags & AMD_IS_APU)) {
@@ -733,12 +739,21 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
if (r)
return r;
}
-   adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
-   adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
+
+   if (adev->gmc.zfb_size > 0) {
+   adev->gmc.aper_base = adev->gmc.zfb_phys_addr;
+   adev->gmc.aper_size = adev->gmc.zfb_size;
+   } else {
+   adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
+   adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
+   }
 
 #ifdef CONFIG_X86_64
if (adev->flags & AMD_IS_APU) {
-   adev->gmc.aper_base = gfxhub_v1_0_get_mc_fb_offset(adev);
+   if (adev->gmc.zfb_size > 0)
+   adev->gmc.aper_base = adev->gmc.zfb_phys_addr;
+   else
+   adev->gmc.aper_base = 
gfxhub_v1_0_get_mc_fb_offset(adev);
adev->gmc.aper_size = adev->gmc.real_vram_size;
}
 #endif
-- 
2.13.6

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 1/7] drm/amdgpu: add new member in amdgpu_mc for zfb support

2018-05-15 Thread Alex Deucher
From: Feifei Xu 

Signed-off-by: Hawking Zhang 
Signed-off-by: Feifei Xu 
Acked-by: John Bridgman 
Reviewed-by: Alex Deucher 
Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h | 4 
 1 file changed, 4 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
index 893c2490b783..c760b9ad69d9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
@@ -107,6 +107,10 @@ struct amdgpu_gmc {
booltranslate_further;
 
const struct amdgpu_gmc_funcs   *gmc_funcs;
+
+   /* zero frame buffer */
+   u64 zfb_phys_addr;
+   u64 zfb_size;
 };
 
 #endif
-- 
2.13.6

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 0/7] ZFB support for amdgpu

2018-05-15 Thread Alex Deucher
This patch set adds Zero FrameBuffer (ZFB) support to amdgpu.  This
feature is used for emulators and early silicon before vram is
available.  System ram is used for "vram".

Feifei Xu (6):
  drm/amdgpu: add new member in amdgpu_mc for zfb support
  drm/amdgpu: add amdgpu module parameter for zfb
  drm/amdgpu: init zfb start address and size
  drm/amdgpu: user reserved zfb to init vram base offset and size
  drm/amdgpu: enable physical transaction for ptd/pde when ZFB is
enabled
  drm/amdgpu: program system bit for pte/pde when ZFB is enabled

Hawking Zhang (1):
  drm/amdgpu: program AGP aperture as frame buffer when ZFB is enabled

 drivers/gpu/drm/amd/amdgpu/amdgpu.h|  1 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c |  9 
 drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c|  4 
 drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h|  4 
 drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c   | 30 +--
 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c  | 33 --
 drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c| 30 +--
 7 files changed, 93 insertions(+), 18 deletions(-)

-- 
2.13.6

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 2/7] drm/amdgpu: add amdgpu module parameter for zfb

2018-05-15 Thread Alex Deucher
From: Feifei Xu 

Users can pass in an array to decide enable/disable Zero Frame Buffer.
zfb[0] = zfb_size(MB), zfb[1] = zfb_phys_addr(MB).
If zbf_size > 0, zfb is enabled. Otherwise disabled.
Usage for example:
modprobe amdgpu zfb=256,4096

Signed-off-by: Feifei Xu 
Acked-by: John Bridgman 
Reviewed-by: Alex Deucher 
Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 4 
 2 files changed, 5 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 2d7500921c0b..dc55b73cbeed 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -130,6 +130,7 @@ extern int amdgpu_compute_multipipe;
 extern int amdgpu_gpu_recovery;
 extern int amdgpu_emu_mode;
 extern uint amdgpu_smu_memory_pool_size;
+extern ulong amdgpu_zfb[];
 
 #ifdef CONFIG_DRM_AMDGPU_SI
 extern int amdgpu_si_support;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 739e7e09c8b0..a7a81ddab721 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -134,6 +134,7 @@ int amdgpu_compute_multipipe = -1;
 int amdgpu_gpu_recovery = -1; /* auto */
 int amdgpu_emu_mode = 0;
 uint amdgpu_smu_memory_pool_size = 0;
+ulong amdgpu_zfb[2] = {0,4096UL}; /* {0,0x1} */
 
 MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
 module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
@@ -292,6 +293,9 @@ module_param_named(gpu_recovery, amdgpu_gpu_recovery, int, 
0444);
 MODULE_PARM_DESC(emu_mode, "Emulation mode, (1 = enable, 0 = disable)");
 module_param_named(emu_mode, amdgpu_emu_mode, int, 0444);
 
+MODULE_PARM_DESC(zfb,
+"Enable Zero Frame Buffer feature (zfb will be set like ,(zfb_size 
MB,zfb_phys_addr MB),default disabled)");
+module_param_array_named(zfb, amdgpu_zfb, ulong, NULL, 0444);
 #ifdef CONFIG_DRM_AMDGPU_SI
 
 #if defined(CONFIG_DRM_RADEON) || defined(CONFIG_DRM_RADEON_MODULE)
-- 
2.13.6

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 3/7] drm/amdgpu: init zfb start address and size

2018-05-15 Thread Alex Deucher
From: Feifei Xu 

Use module parameter passed from user to initialize zfb start address
and size.

Signed-off-by: Feifei Xu 
Signed-off-by: Hawking Zhang 
Acked-by: John Bridgman 
Reviewed-by: Alex Deucher 
Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 9 +
 1 file changed, 9 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 887f7c9e84e0..b175178345aa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1010,6 +1010,15 @@ static void amdgpu_device_check_arguments(struct 
amdgpu_device *adev)
}
 
adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, 
amdgpu_fw_load_type);
+
+   if (amdgpu_zfb[0] > 0) {
+   dev_warn(adev->dev, "Zero Fram Buffer is enabled\n");
+   adev->gmc.zfb_phys_addr = amdgpu_zfb[1] << 20;
+   adev->gmc.zfb_size = amdgpu_zfb[0] << 20;
+   } else {
+   adev->gmc.zfb_phys_addr = 0;
+   adev->gmc.zfb_size = 0;
+   }
 }
 
 /**
-- 
2.13.6

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 4/4] drm/amdgpu: Drop the unused header files in soc15.c.

2018-05-15 Thread Alex Deucher
From: Feifei Xu 

Signed-off-by: Feifei Xu 
Reviewed-by: Alex Deucher 
Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/amdgpu/soc15.c | 2 --
 1 file changed, 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c 
b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 90065766fffb..f31df18fcb81 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -41,8 +41,6 @@
 #include "sdma1/sdma1_4_0_offset.h"
 #include "hdp/hdp_4_0_offset.h"
 #include "hdp/hdp_4_0_sh_mask.h"
-#include "mp/mp_9_0_offset.h"
-#include "mp/mp_9_0_sh_mask.h"
 #include "smuio/smuio_9_0_offset.h"
 #include "smuio/smuio_9_0_sh_mask.h"
 
-- 
2.13.6

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 2/4] drm/amd/powerplay: new framework to honour DAL clock limits

2018-05-15 Thread Alex Deucher
From: Evan Quan 

This is needed for vega12 and vega20 which do not support legacy
powerstate. With this new framework, the DAL clocks limits can also
be honored on these asics.

Signed-off-by: Evan Quan 
Reviewed-by: Alex Deucher 
Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c | 9 +
 drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c  | 7 +++
 drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h   | 2 ++
 drivers/gpu/drm/amd/powerplay/inc/hwmgr.h | 2 ++
 4 files changed, 20 insertions(+)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c 
b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
index e411012b3dcb..f5571e9fde26 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
@@ -132,6 +132,15 @@ int phm_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
return 0;
 }
 
+int phm_apply_clock_adjust_rules(struct pp_hwmgr *hwmgr)
+{
+   PHM_FUNC_CHECK(hwmgr);
+
+   if (hwmgr->hwmgr_func->apply_clocks_adjust_rules != NULL)
+   return hwmgr->hwmgr_func->apply_clocks_adjust_rules(hwmgr);
+   return 0;
+}
+
 int phm_powerdown_uvd(struct pp_hwmgr *hwmgr)
 {
PHM_FUNC_CHECK(hwmgr);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c 
b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
index 308bff2b5d1d..2a2955c17d78 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
@@ -265,6 +265,13 @@ int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, 
bool skip,
if (skip)
return 0;
 
+   if (!hwmgr->ps)
+   /*
+* for vega12/vega20 which does not support power state manager
+* DAL clock limits should also be honoured
+*/
+   phm_apply_clock_adjust_rules(hwmgr);
+
phm_display_configuration_changed(hwmgr);
 
if (hwmgr->ps)
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h 
b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
index 9bb87857a20f..e029555dfc2d 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
@@ -410,6 +410,8 @@ extern int phm_apply_state_adjust_rules(struct pp_hwmgr 
*hwmgr,
   struct pp_power_state *adjusted_ps,
 const struct pp_power_state *current_ps);
 
+extern int phm_apply_clock_adjust_rules(struct pp_hwmgr *hwmgr);
+
 extern int phm_force_dpm_levels(struct pp_hwmgr *hwmgr, enum 
amd_dpm_forced_level level);
 extern int phm_display_configuration_changed(struct pp_hwmgr *hwmgr);
 extern int phm_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr 
*hwmgr);
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h 
b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index 3c321c7d9626..9b6c6af869a6 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -229,6 +229,8 @@ struct pp_hwmgr_func {
struct pp_power_state  *prequest_ps,
const struct pp_power_state *pcurrent_ps);
 
+   int (*apply_clocks_adjust_rules)(struct pp_hwmgr *hwmgr);
+
int (*force_dpm_level)(struct pp_hwmgr *hw_mgr,
enum amd_dpm_forced_level level);
 
-- 
2.13.6

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 3/4] drm/amd/powerplay: add a framework for perfroming pre display configuration change settings

2018-05-15 Thread Alex Deucher
From: Evan Quan 

Signed-off-by: Evan Quan 
Reviewed-by: Alex Deucher 
Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c | 10 ++
 drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c  |  2 ++
 drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h   |  1 +
 drivers/gpu/drm/amd/powerplay/inc/hwmgr.h |  1 +
 4 files changed, 14 insertions(+)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c 
b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
index f5571e9fde26..a0bb921fac22 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
@@ -170,6 +170,16 @@ int phm_disable_clock_power_gatings(struct pp_hwmgr *hwmgr)
return 0;
 }
 
+int phm_pre_display_configuration_changed(struct pp_hwmgr *hwmgr)
+{
+   PHM_FUNC_CHECK(hwmgr);
+
+   if (NULL != hwmgr->hwmgr_func->pre_display_config_changed)
+   hwmgr->hwmgr_func->pre_display_config_changed(hwmgr);
+
+   return 0;
+
+}
 
 int phm_display_configuration_changed(struct pp_hwmgr *hwmgr)
 {
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c 
b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
index 2a2955c17d78..0af13c154328 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
@@ -272,6 +272,8 @@ int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, 
bool skip,
 */
phm_apply_clock_adjust_rules(hwmgr);
 
+   phm_pre_display_configuration_changed(hwmgr);
+
phm_display_configuration_changed(hwmgr);
 
if (hwmgr->ps)
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h 
b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
index e029555dfc2d..a202247c9894 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
@@ -413,6 +413,7 @@ extern int phm_apply_state_adjust_rules(struct pp_hwmgr 
*hwmgr,
 extern int phm_apply_clock_adjust_rules(struct pp_hwmgr *hwmgr);
 
 extern int phm_force_dpm_levels(struct pp_hwmgr *hwmgr, enum 
amd_dpm_forced_level level);
+extern int phm_pre_display_configuration_changed(struct pp_hwmgr *hwmgr);
 extern int phm_display_configuration_changed(struct pp_hwmgr *hwmgr);
 extern int phm_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr 
*hwmgr);
 extern int phm_register_irq_handlers(struct pp_hwmgr *hwmgr);
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h 
b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index 9b6c6af869a6..b99fb8ac822c 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -254,6 +254,7 @@ struct pp_hwmgr_func {
const void *state);
int (*enable_clock_power_gating)(struct pp_hwmgr *hwmgr);
int (*notify_smc_display_config_after_ps_adjustment)(struct pp_hwmgr 
*hwmgr);
+   int (*pre_display_config_changed)(struct pp_hwmgr *hwmgr);
int (*display_config_changed)(struct pp_hwmgr *hwmgr);
int (*disable_clock_power_gating)(struct pp_hwmgr *hwmgr);
int (*update_clock_gatings)(struct pp_hwmgr *hwmgr,
-- 
2.13.6

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 1/4] drm/amdgpu/gfx9: Update golden setting for gfx9_0.

2018-05-15 Thread Alex Deucher
From: Feifei Xu 

Update golden_settings_gc_9_0[].

Signed-off-by: Feifei Xu 
Reviewed-by: Alex Deucher 
Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 17 +
 1 file changed, 5 insertions(+), 12 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 
b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 2c5e2a41632e..2d9679076c47 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -72,29 +72,22 @@ MODULE_FIRMWARE("amdgpu/raven_rlc.bin");
 
 static const struct soc15_reg_golden golden_settings_gc_9_0[] =
 {
-   SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x0800, 0x0880),
-   SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0x0800, 0x0880),
-   SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x0800, 0x0880),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00f, 0x0420),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x000f, 0x),
-   SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x0800, 0x0880),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x0003, 
0x82400024),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fff, 0x0001),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0xff0f, 
0x),
-   SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0x0800, 
0x0880),
-   SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0x0800, 
0x0880),
-   SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0x0800, 
0x0880),
-   SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0x0800, 
0x0880),
-   SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0x0800, 
0x0880),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x1000, 0x1000),
-   SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x000f, 
0x01000107),
+   SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_0, 0x0007, 
0x0800),
+   SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_1, 0x0007, 
0x0800),
+   SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 
0x01ff, 0xff87),
+   SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 
0x01ff, 0xff8f),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x0300, 0x020a2000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfeef, 0x010b),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0x, 
0x4a2c0e68),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0x, 
0xb5d3f197),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 
0x1920),
-   SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x0fff, 
0x03ff),
-   SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x0800, 0x0880)
+   SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x0fff, 
0x03ff)
 };
 
 static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] =
-- 
2.13.6

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: iommu/amd: flush IOTLB for specific domains only (v2)

2018-05-15 Thread Joseph Salisbury
On 05/15/2018 04:03 AM, Nath, Arindam wrote:
> Adding Tom.
>
> Hi Joe,
>
> My original patch was never accepted. Tom and Joerg worked on another patch 
> series which was supposed to fix the issue in question in addition to do some 
> code cleanups. I believe their patches are already in the mainline. If I 
> remember correctly, one of the patches disabled PCI ATS for the graphics card 
> which was causing the issue.
>
> Do you still see the issue with latest mainline kernel?
>
> BR,
> Arindam
>
> -Original Message-
> From: Joseph Salisbury [mailto:joseph.salisb...@canonical.com] 
> Sent: Tuesday, May 15, 2018 1:17 AM
> To: Nath, Arindam 
> Cc: io...@lists.linux-foundation.org; Bridgman, John ; 
> j...@8bytes.org; amd-gfx@lists.freedesktop.org; dr...@endlessm.com; 
> stein...@gmail.com; Suthikulpanit, Suravee ; 
> Deucher, Alexander ; Kuehling, Felix 
> ; li...@endlessm.com; mic...@daenzer.net; 
> 1747...@bugs.launchpad.net
> Subject: iommu/amd: flush IOTLB for specific domains only (v2)
>
> Hello Arindam,
>
> There is a bug report[0] that you created a patch[1] for a while back. 
> However, the patch never landed in mainline.  There is a bug reporter in 
> Ubuntu[2] that is affected by this bug and is willing to test the patch.  I 
> attempted to build a test kernel with the patch, but it does not apply to 
> currently mainline cleanly.  Do you still think this patch may resolve this 
> bug?  If so, is there a version of your patch available that will apply to 
> current mainline?
>
> Thanks,
>
> Joe
>
> [0] https://bugs.freedesktop.org/show_bug.cgi?id=101029
> [1] https://patchwork.freedesktop.org/patch/157327/
> [2] http://pad.lv/1747463
>
Hi Arindam,

Thanks for the feedback.  Yes, the latest mainline kernel was tested,
and it is reported the bug still happens in the Ubuntu kernel bug[0]. 
Is there any specific diagnostic info we can collect that might help?

Thanks,

Joe

[0] http://pad.lv/1747463
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH] drm/amd/amdgpu: Code comments for the amdgpu_ttm.c driver. (v2)

2018-05-15 Thread Tom St Denis
NFC just comments.

(v2):  Updated based on feedback from Alex Deucher.

Signed-off-by: Tom St Denis 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 347 +++-
 1 file changed, 340 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index dfd22db13fb1..2eaaa1fb7b59 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -63,16 +63,44 @@ static void amdgpu_ttm_debugfs_fini(struct amdgpu_device 
*adev);
 /*
  * Global memory.
  */
+
+/**
+ * amdgpu_ttm_mem_global_init - Initialize and acquire reference to
+ * memory object
+ *
+ * @ref: Object for initialization.
+ *
+ * This is called by drm_global_item_ref() when an object is being
+ * initialized.
+ */
 static int amdgpu_ttm_mem_global_init(struct drm_global_reference *ref)
 {
return ttm_mem_global_init(ref->object);
 }
 
+/**
+ * amdgpu_ttm_mem_global_release - Drop reference to a memory object
+ *
+ * @ref: Object being removed
+ *
+ * This is called by drm_global_item_unref() when an object is being
+ * released.
+ */
 static void amdgpu_ttm_mem_global_release(struct drm_global_reference *ref)
 {
ttm_mem_global_release(ref->object);
 }
 
+/**
+ * amdgpu_ttm_global_init - Initialize global TTM memory reference
+ * structures.
+ *
+ * @adev:  AMDGPU device for which the global structures need to be
+ * registered.
+ *
+ * This is called as part of the AMDGPU ttm init from amdgpu_ttm_init()
+ * during bring up.
+ */
 static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
 {
struct drm_global_reference *global_ref;
@@ -80,7 +108,9 @@ static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
struct drm_sched_rq *rq;
int r;
 
+   /* ensure reference is false in case init fails */
adev->mman.mem_global_referenced = false;
+
global_ref = >mman.mem_global_ref;
global_ref->global_type = DRM_GLOBAL_TTM_MEM;
global_ref->size = sizeof(struct ttm_mem_global);
@@ -146,6 +176,18 @@ static int amdgpu_invalidate_caches(struct ttm_bo_device 
*bdev, uint32_t flags)
return 0;
 }
 
+/**
+ * amdgpu_init_mem_type -  Initialize a memory manager for a specific
+ * type of memory request.
+ *
+ * @bdev:  The TTM BO device object (contains a reference to
+ * amdgpu_device)
+ * @type:  The type of memory requested
+ * @man:
+ *
+ * This is called by ttm_bo_init_mm() when a buffer object is being
+ * initialized.
+ */
 static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
struct ttm_mem_type_manager *man)
 {
@@ -161,6 +203,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, 
uint32_t type,
man->default_caching = TTM_PL_FLAG_CACHED;
break;
case TTM_PL_TT:
+   /* GTT memory  */
man->func = _gtt_mgr_func;
man->gpu_offset = adev->gmc.gart_start;
man->available_caching = TTM_PL_MASK_CACHING;
@@ -193,6 +236,14 @@ static int amdgpu_init_mem_type(struct ttm_bo_device 
*bdev, uint32_t type,
return 0;
 }
 
+/**
+ * amdgpu_evict_flags - Compute placement flags
+ *
+ * @bo: The buffer object to evict
+ * @placement: Possible destination(s) for evicted BO
+ *
+ * Fill in placement data when ttm_bo_evict() is called
+ */
 static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
struct ttm_placement *placement)
 {
@@ -204,12 +255,14 @@ static void amdgpu_evict_flags(struct ttm_buffer_object 
*bo,
.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM
};
 
+   /* Don't handle scatter gather BOs */
if (bo->type == ttm_bo_type_sg) {
placement->num_placement = 0;
placement->num_busy_placement = 0;
return;
}
 
+   /* Object isn't an AMDGPU object so ignore */
if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) {
placement->placement = 
placement->busy_placement = 
@@ -217,10 +270,12 @@ static void amdgpu_evict_flags(struct ttm_buffer_object 
*bo,
placement->num_busy_placement = 1;
return;
}
+
abo = ttm_to_amdgpu_bo(bo);
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
if (!adev->mman.buffer_funcs_enabled) {
+   /* Move to system memory */
amdgpu_ttm_placement_from_domain(abo, 
AMDGPU_GEM_DOMAIN_CPU);
} else if (adev->gmc.visible_vram_size < 
adev->gmc.real_vram_size &&
   !(abo->flags & 
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
@@ -238,6 +293,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,

Re: iommu/amd: flush IOTLB for specific domains only (v2)

2018-05-15 Thread Tom Lendacky
On 5/15/2018 7:34 AM, Nath, Arindam wrote:
> 
> 
>> -Original Message-
>> From: Joseph Salisbury [mailto:joseph.salisb...@canonical.com]
>> Sent: Tuesday, May 15, 2018 5:40 PM
>> To: Nath, Arindam 
>> Cc: io...@lists.linux-foundation.org; Bridgman, John
>> ; j...@8bytes.org; amd-
>> g...@lists.freedesktop.org; dr...@endlessm.com; stein...@gmail.com;
>> Suthikulpanit, Suravee ; Deucher,
>> Alexander ; Kuehling, Felix
>> ; li...@endlessm.com; mic...@daenzer.net;
>> 1747...@bugs.launchpad.net; Lendacky, Thomas
>> 
>> Subject: Re: iommu/amd: flush IOTLB for specific domains only (v2)
>>
>> On 05/15/2018 04:03 AM, Nath, Arindam wrote:
>>> Adding Tom.
>>>
>>> Hi Joe,
>>>
>>> My original patch was never accepted. Tom and Joerg worked on another
>> patch series which was supposed to fix the issue in question in addition to 
>> do
>> some code cleanups. I believe their patches are already in the mainline. If I
>> remember correctly, one of the patches disabled PCI ATS for the graphics
>> card which was causing the issue.
>>>
>>> Do you still see the issue with latest mainline kernel?
>>>
>>> BR,
>>> Arindam
>>>
>>> -Original Message-
>>> From: Joseph Salisbury [mailto:joseph.salisb...@canonical.com]
>>> Sent: Tuesday, May 15, 2018 1:17 AM
>>> To: Nath, Arindam 
>>> Cc: io...@lists.linux-foundation.org; Bridgman, John
>>> ; j...@8bytes.org;
>>> amd-gfx@lists.freedesktop.org; dr...@endlessm.com;
>> stein...@gmail.com;
>>> Suthikulpanit, Suravee ; Deucher,
>>> Alexander ; Kuehling, Felix
>>> ; li...@endlessm.com; mic...@daenzer.net;
>>> 1747...@bugs.launchpad.net
>>> Subject: iommu/amd: flush IOTLB for specific domains only (v2)
>>>
>>> Hello Arindam,
>>>
>>> There is a bug report[0] that you created a patch[1] for a while back.
>> However, the patch never landed in mainline.  There is a bug reporter in
>> Ubuntu[2] that is affected by this bug and is willing to test the patch.  I
>> attempted to build a test kernel with the patch, but it does not apply to
>> currently mainline cleanly.  Do you still think this patch may resolve this
>> bug?  If so, is there a version of your patch available that will apply to 
>> current
>> mainline?
>>>
>>> Thanks,
>>>
>>> Joe
>>>
>>> [0] https://bugs.freedesktop.org/show_bug.cgi?id=101029
>>> [1] https://patchwork.freedesktop.org/patch/157327/
>>> [2] http://pad.lv/1747463
>>>
>> Hi Arindam,
>>
>> Thanks for the feedback.  Yes, the latest mainline kernel was tested, and it 
>> is
>> reported the bug still happens in the Ubuntu kernel bug[0]. Is there any
>> specific diagnostic info we can collect that might help?
> 
> Joe, I believe all the information needed is already provided in [2]. Let us 
> wait for inputs from Tom and Joerg.
> 
> I could take a look at the issue locally, but it will take me some really 
> long time since I am occupied with other assignments right now.

I don't see anything in the bug that indicates the latest mainline kernel
was tested.  The patches/fixes in question are part of the 4.13 kernel, I
only see references to 4.10 kernels so I wouldn't expect the issue to be
resolved unless the patches from 4.13 were backported to the Ubuntu 4.10
kernel.

Thanks,
Tom

> 
> BR,
> Arindam
> 
>>
>> Thanks,
>>
>> Joe
>>
>> [0] http://pad.lv/1747463
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


RE: iommu/amd: flush IOTLB for specific domains only (v2)

2018-05-15 Thread Nath, Arindam


> -Original Message-
> From: Joseph Salisbury [mailto:joseph.salisb...@canonical.com]
> Sent: Tuesday, May 15, 2018 5:40 PM
> To: Nath, Arindam 
> Cc: io...@lists.linux-foundation.org; Bridgman, John
> ; j...@8bytes.org; amd-
> g...@lists.freedesktop.org; dr...@endlessm.com; stein...@gmail.com;
> Suthikulpanit, Suravee ; Deucher,
> Alexander ; Kuehling, Felix
> ; li...@endlessm.com; mic...@daenzer.net;
> 1747...@bugs.launchpad.net; Lendacky, Thomas
> 
> Subject: Re: iommu/amd: flush IOTLB for specific domains only (v2)
> 
> On 05/15/2018 04:03 AM, Nath, Arindam wrote:
> > Adding Tom.
> >
> > Hi Joe,
> >
> > My original patch was never accepted. Tom and Joerg worked on another
> patch series which was supposed to fix the issue in question in addition to do
> some code cleanups. I believe their patches are already in the mainline. If I
> remember correctly, one of the patches disabled PCI ATS for the graphics
> card which was causing the issue.
> >
> > Do you still see the issue with latest mainline kernel?
> >
> > BR,
> > Arindam
> >
> > -Original Message-
> > From: Joseph Salisbury [mailto:joseph.salisb...@canonical.com]
> > Sent: Tuesday, May 15, 2018 1:17 AM
> > To: Nath, Arindam 
> > Cc: io...@lists.linux-foundation.org; Bridgman, John
> > ; j...@8bytes.org;
> > amd-gfx@lists.freedesktop.org; dr...@endlessm.com;
> stein...@gmail.com;
> > Suthikulpanit, Suravee ; Deucher,
> > Alexander ; Kuehling, Felix
> > ; li...@endlessm.com; mic...@daenzer.net;
> > 1747...@bugs.launchpad.net
> > Subject: iommu/amd: flush IOTLB for specific domains only (v2)
> >
> > Hello Arindam,
> >
> > There is a bug report[0] that you created a patch[1] for a while back.
> However, the patch never landed in mainline.  There is a bug reporter in
> Ubuntu[2] that is affected by this bug and is willing to test the patch.  I
> attempted to build a test kernel with the patch, but it does not apply to
> currently mainline cleanly.  Do you still think this patch may resolve this
> bug?  If so, is there a version of your patch available that will apply to 
> current
> mainline?
> >
> > Thanks,
> >
> > Joe
> >
> > [0] https://bugs.freedesktop.org/show_bug.cgi?id=101029
> > [1] https://patchwork.freedesktop.org/patch/157327/
> > [2] http://pad.lv/1747463
> >
> Hi Arindam,
> 
> Thanks for the feedback.  Yes, the latest mainline kernel was tested, and it 
> is
> reported the bug still happens in the Ubuntu kernel bug[0]. Is there any
> specific diagnostic info we can collect that might help?

Joe, I believe all the information needed is already provided in [2]. Let us 
wait for inputs from Tom and Joerg.

I could take a look at the issue locally, but it will take me some really long 
time since I am occupied with other assignments right now.

BR,
Arindam

> 
> Thanks,
> 
> Joe
> 
> [0] http://pad.lv/1747463
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 04/21] drm/amdgpu: Add GFXv9 kfd2kgd interface functions

2018-05-15 Thread Dave Airlie
> +static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
> +   uint32_t queue_id, uint32_t __user *wptr,
> +   uint32_t wptr_shift, uint32_t wptr_mask,
> +   struct mm_struct *mm)
> +{
> +   struct amdgpu_device *adev = get_amdgpu_device(kgd);
> +   struct v9_mqd *m;
> +   uint32_t *mqd_hqd;
> +   uint32_t reg, hqd_base, data;
> +
> +   m = get_mqd(mqd);
> +
> +   acquire_queue(kgd, pipe_id, queue_id);
> +
> +   /* HIQ is set during driver init period with vmid set to 0*/
> +   if (m->cp_hqd_vmid == 0) {
> +   uint32_t value, mec, pipe;
> +
> +   mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
> +   pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
> +
> +   pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
> +   mec, pipe, queue_id);
> +   value = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CP_SCHEDULERS));
> +   value = REG_SET_FIELD(value, RLC_CP_SCHEDULERS, scheduler1,
> +   ((mec << 5) | (pipe << 3) | queue_id | 0x80));
> +   WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CP_SCHEDULERS), value);
> +   }
> +
> +   /* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. 
> */
> +   mqd_hqd = >cp_mqd_base_addr_lo;
> +   hqd_base = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR);
> +
> +   for (reg = hqd_base;
> +reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++)
> +   WREG32(reg, mqd_hqd[reg - hqd_base]);
> +
> +
> +   /* Activate doorbell logic before triggering WPTR poll. */
> +   data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control,
> +CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
> +   WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL), data);
> +
> +   if (wptr) {
> +   /* Don't read wptr with get_user because the user
> +* context may not be accessible (if this function
> +* runs in a work queue). Instead trigger a one-shot
> +* polling read from memory in the CP. This assumes
> +* that wptr is GPU-accessible in the queue's VMID via
> +* ATC or SVM. WPTR==RPTR before starting the poll so
> +* the CP starts fetching new commands from the right
> +* place.
> +*
> +* Guessing a 64-bit WPTR from a 32-bit RPTR is a bit
> +* tricky. Assume that the queue didn't overflow. The
> +* number of valid bits in the 32-bit RPTR depends on
> +* the queue size. The remaining bits are taken from
> +* the saved 64-bit WPTR. If the WPTR wrapped, add the
> +* queue size.
> +*/
> +   uint32_t queue_size =
> +   2 << REG_GET_FIELD(m->cp_hqd_pq_control,
> +  CP_HQD_PQ_CONTROL, QUEUE_SIZE);
> +   uint64_t guessed_wptr = m->cp_hqd_pq_rptr & (queue_size - 1);
> +
> +   if ((m->cp_hqd_pq_wptr_lo & (queue_size - 1)) < guessed_wptr)
> +   guessed_wptr += queue_size;
> +   guessed_wptr += m->cp_hqd_pq_wptr_lo & ~(queue_size - 1);
> +   guessed_wptr += (uint64_t)m->cp_hqd_pq_wptr_hi << 32;
> +
> +   WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_LO),
> +  lower_32_bits(guessed_wptr));
> +   WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI),
> +  upper_32_bits(guessed_wptr));
> +   WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR),
> +  lower_32_bits((uint64_t)wptr));
> +   WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI),
> +  upper_32_bits((uint64_t)wptr));

 CC [M]  drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.o
In file included from
/home/airlied/devel/kernel/dim/src/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c:30:0:
/home/airlied/devel/kernel/dim/src/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c:
In function ‘kgd_hqd_load’:
/home/airlied/devel/kernel/dim/src/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c:473:24:
warning: cast from pointer to integer of different size
[-Wpointer-to-int-cast]
  lower_32_bits((uint64_t)wptr));
^
/home/airlied/devel/kernel/dim/src/drivers/gpu/drm/amd/amdgpu/amdgpu.h:1666:53:
note: in definition of macro ‘WREG32’
 #define WREG32(reg, v) amdgpu_mm_wreg(adev, (reg), (v), 0)
 ^
/home/airlied/devel/kernel/dim/src/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c:473:10:
note: in expansion of macro ‘lower_32_bits’
  lower_32_bits((uint64_t)wptr));
  ^

RE: iommu/amd: flush IOTLB for specific domains only (v2)

2018-05-15 Thread Nath, Arindam
Adding Tom.

Hi Joe,

My original patch was never accepted. Tom and Joerg worked on another patch 
series which was supposed to fix the issue in question in addition to do some 
code cleanups. I believe their patches are already in the mainline. If I 
remember correctly, one of the patches disabled PCI ATS for the graphics card 
which was causing the issue.

Do you still see the issue with latest mainline kernel?

BR,
Arindam

-Original Message-
From: Joseph Salisbury [mailto:joseph.salisb...@canonical.com] 
Sent: Tuesday, May 15, 2018 1:17 AM
To: Nath, Arindam 
Cc: io...@lists.linux-foundation.org; Bridgman, John ; 
j...@8bytes.org; amd-gfx@lists.freedesktop.org; dr...@endlessm.com; 
stein...@gmail.com; Suthikulpanit, Suravee ; 
Deucher, Alexander ; Kuehling, Felix 
; li...@endlessm.com; mic...@daenzer.net; 
1747...@bugs.launchpad.net
Subject: iommu/amd: flush IOTLB for specific domains only (v2)

Hello Arindam,

There is a bug report[0] that you created a patch[1] for a while back. However, 
the patch never landed in mainline.  There is a bug reporter in Ubuntu[2] that 
is affected by this bug and is willing to test the patch.  I attempted to build 
a test kernel with the patch, but it does not apply to currently mainline 
cleanly.  Do you still think this patch may resolve this bug?  If so, is there 
a version of your patch available that will apply to current mainline?

Thanks,

Joe

[0] https://bugs.freedesktop.org/show_bug.cgi?id=101029
[1] https://patchwork.freedesktop.org/patch/157327/
[2] http://pad.lv/1747463

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [4.17-rc4+ regression] X server does not start anymore with segmentation fault in `r600_dri.so`

2018-05-15 Thread Paul Menzel

On 2018-05-14 10:44, Michel Dänzer wrote:

On 2018-05-13 11:01 AM, Paul Menzel wrote:

Dear Linux AMD folks,


There is a regression introduced by a commit after Linux 4.17-rc4
causing the X.Org X server start to fail with the Radeon module loaded
on Debian Sid/unstable. The same Linux kernel build works with the
modesetting driver on the same system (no module *radeon* loaded) and
with i915 and the modesetting driver on a different system with Debian
9.4 (Stretch/stable).


[    16.263] xf86EnableIOPorts: failed to set IOPL for I/O (Operation
not permitted)
[…]
[    16.765] (EE) 0: /usr/lib/xorg/Xorg (xorg_backtrace+0x50) 
[0x5b4e60]
[    16.766] (EE) 1: /usr/lib/xorg/Xorg (0x40d000+0x1abd92) 
[0x5b8d92]

[    16.766] (EE) 2: linux-gate.so.1 (__kernel_rt_sigreturn+0x0)
[0xb7f2ad5c]
[    16.766] (EE) 3: /lib/i386-linux-gnu/libc.so.6
(0xb78a+0x140099) [0xb79e0099]
[    16.766] (EE) 4: /usr/lib/i386-linux-gnu/dri/r600_dri.so
(0xb62f9000+0x6698fd) [0xb69628fd]


Crashes in r600_dri.so => most likely a Mesa bug. Can you get a gdb
backtrace of the crash?


```
#0  0xb7f1bd45 in __kernel_vsyscall ()
#1  0xb78bd5b2 in __libc_signal_restore_set (set=0xbf93883c) at 
../sysdeps/unix/sysv/linux/nptl-signals.h:80

#2  __GI_raise (sig=6) at ../sysdeps/unix/sysv/linux/raise.c:48
#3  0xb78be9d1 in __GI_abort () at abort.c:79
#4  0x0061bf45 in OsAbort () at ../../../../os/utils.c:1361
#5  0x004ec96c in ddxGiveUp (error=EXIT_ERR_ABORT) at 
../../../../../../hw/xfree86/common/xf86Init.c:1011
#6  0x004eca05 in AbortDDX (error=EXIT_ERR_ABORT) at 
../../../../../../hw/xfree86/common/xf86Init.c:1055

#7  0x00621c6f in AbortServer () at ../../../../os/log.c:874
#8  0x00622654 in FatalError (f=0x650110 "Caught signal %d (%s). Server 
aborting\n") at ../../../../os/log.c:1015
#9  0x00618def in OsSigHandler (signo=11, sip=0xbf938b4c, 
unused=0xbf938bcc) at ../../../../os/osinit.c:154

#10 
#11 __memcpy_ssse3 () at 
../sysdeps/i386/i686/multiarch/memcpy-ssse3.S:144
#12 0xb69518fd in memcpy (__len=48, __src=, 
__dest=) at 
/usr/include/i386-linux-gnu/bits/string_fortified.h:34
#13 r600_create_vertex_fetch_shader (ctx=0xf08e40, count=2, 
elements=0xbf93933c) at 
../../../../../src/gallium/drivers/r600/r600_asm.c:2701
#14 0xb67227a8 in util_blitter_create (pipe=) at 
../../../../src/gallium/auxiliary/util/u_blitter.c:299
#15 0xb6956ab3 in r600_create_context (screen=0xf07060, priv=0x0, 
flags=0) at ../../../../../src/gallium/drivers/r600/r600_pipe.c:217
#16 0xb6956e41 in r600_screen_create (ws=0xf05800, config=0xbf939548) at 
../../../../../src/gallium/drivers/r600/r600_pipe.c:767
#17 0xb6aa6942 in radeon_drm_winsys_create (fd=, 
config=, screen_create=) at 
../../../../../../src/gallium/winsys/radeon/drm/radeon_drm_winsys.c:835
#18 0xb631ae73 in pipe_r600_create_screen (fd=12, config=0xbf939548) at 
../../../../../src/gallium/auxiliary/target-helpers/drm_helper.h:144
#19 0xb67e893e in pipe_loader_create_screen (dev=0xf06780) at 
../../../../../src/gallium/auxiliary/pipe-loader/pipe_loader.c:137
#20 0xb66a4ba3 in dri2_init_screen (sPriv=0xf04be0) at 
../../../../../src/gallium/state_trackers/dri/dri2.c:2066
#21 0xb669fef8 in driCreateNewScreen2 (scrn=0, fd=11, 
extensions=0xb7233220, driver_extensions=0xb6ed0100 
, driver_configs=0xed5d70, data=0xed5c70) 
at ../../../../../../src/mesa/drivers/dri/common/dri_util.c:151

#22 0xb7228ef5 in ?? () from /usr/lib/i386-linux-gnu/libgbm.so.1
#23 0xb7229230 in ?? () from /usr/lib/i386-linux-gnu/libgbm.so.1
#24 0xb7226cd8 in gbm_create_device () from 
/usr/lib/i386-linux-gnu/libgbm.so.1
#25 0xb724ab04 in glamor_egl_init (scrn=0xed21c0, fd=11) at 
../../../../../../glamor/glamor_egl.c:764

#26 0xb731efef in ?? () from /usr/lib/xorg/modules/drivers/radeon_drv.so
#27 0xb7310b14 in ?? () from /usr/lib/xorg/modules/drivers/radeon_drv.so
#28 0x004ee677 in InitOutput (pScreenInfo=, 
argc=, argv=) at 
../../../../../../hw/xfree86/common/xf86Init.c:583
#29 0x004aab81 in dix_main (argc=8, argv=0xbf93a084, envp=0xbf93a0a8) at 
../../../../dix/main.c:197
#30 0x0049423a in main (argc=8, argv=0xbf93a084, envp=0xbf93a0a8) at 
../../../../dix/stubmain.c:34

```

Please find the full backtrace attached.


Kind regards,

Paul#0  0xb7f1bd45 in __kernel_vsyscall ()
#1  0xb78bd5b2 in __libc_signal_restore_set (set=0xbf93883c) at 
../sysdeps/unix/sysv/linux/nptl-signals.h:80
#2  __GI_raise (sig=6) at ../sysdeps/unix/sysv/linux/raise.c:48
#3  0xb78be9d1 in __GI_abort () at abort.c:79
#4  0x0061bf45 in OsAbort () at ../../../../os/utils.c:1361
#5  0x004ec96c in ddxGiveUp (error=EXIT_ERR_ABORT) at 
../../../../../../hw/xfree86/common/xf86Init.c:1011
#6  0x004eca05 in AbortDDX (error=EXIT_ERR_ABORT) at 
../../../../../../hw/xfree86/common/xf86Init.c:1055
#7  0x00621c6f in AbortServer () at ../../../../os/log.c:874
#8  0x00622654 in FatalError (f=0x650110 "Caught signal %d (%s). Server 
aborting\n") at ../../../../os/log.c:1015
#9  0x00618def in OsSigHandler (signo=11, sip=0xbf938b4c, unused=0xbf938bcc) at