Re: [PATCH 3/3] drm/amdgpu: drop need_vm_flush/need_pipe_sync

2018-11-03 Thread Christian König

Am 03.11.18 um 06:33 schrieb Monk Liu:

use a flag to hold need_flush and need_pipe_sync


NAK, if you want to safe space then use "bool variable:1" instead.

Flags in structures are not as self explaining as a member variable.

Christian.



Signed-off-by: Monk Liu 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c|  2 +-
  drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c   | 14 +-
  drivers/gpu/drm/amd/amdgpu/amdgpu_job.c   |  2 +-
  drivers/gpu/drm/amd/amdgpu/amdgpu_job.h   |  6 --
  drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h |  2 +-
  drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c   |  2 +-
  drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c|  4 ++--
  7 files changed, 19 insertions(+), 13 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index ac7d2da..3231a3b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -170,7 +170,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned 
num_ibs,
if ((need_ctx_switch && amdgpu_sriov_vf(adev)) ||
amdgpu_vm_need_pipeline_sync(ring, job))
need_pipe_sync = true;
-   else if (job->need_pipe_sync)
+   else if (job->flags & JOB_NEED_PIPE_SYNC)
need_pipe_sync = true;
}
  
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c

index df9b173..ed9b6c1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -311,7 +311,9 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
dma_fence_put((*id)->flushed_updates);
(*id)->flushed_updates = dma_fence_get(updates);
}
-   job->vm_needs_flush = needs_flush;
+
+   if (needs_flush)
+   job->flags |= JOB_NEED_VM_FLUSH;
return 0;
  }
  
@@ -341,7 +343,8 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,

struct dma_fence *updates = sync->last_vm_update;
int r;
  
-	job->vm_needs_flush = vm->use_cpu_for_update;

+   if (vm->use_cpu_for_update)
+   job->flags |= JOB_NEED_VM_FLUSH;
  
  	/* Check if we can use a VMID already assigned to this VM */

list_for_each_entry_reverse((*id), _mgr->ids_lru, list) {
@@ -380,7 +383,8 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
(*id)->flushed_updates = dma_fence_get(updates);
}
  
-		job->vm_needs_flush |= needs_flush;

+   if (needs_flush)
+   job->flags |= JOB_NEED_VM_FLUSH;
return 0;
}
  
@@ -438,7 +442,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
  
  			dma_fence_put(id->flushed_updates);

id->flushed_updates = dma_fence_get(updates);
-   job->vm_needs_flush = true;
+   job->flags |= JOB_NEED_VM_FLUSH;
}
  
  		list_move_tail(>list, _mgr->ids_lru);

@@ -447,7 +451,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct 
amdgpu_ring *ring,
id->pd_gpu_addr = job->vm_pd_addr;
id->owner = vm->entity.fence_context;
  
-	if (job->vm_needs_flush) {

+   if (job->flags & JOB_NEED_VM_FLUSH) {
dma_fence_put(id->last_flush);
id->last_flush = NULL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index dae997d..82e190d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -181,7 +181,7 @@ static struct dma_fence *amdgpu_job_dependency(struct 
drm_sched_job *sched_job,
  
  	if (fence && need_pipe_sync && drm_sched_dependency_optimized(fence, s_entity)) {

trace_amdgpu_ib_pipe_sync(job, fence);
-   job->need_pipe_sync = true;
+   job->flags |= JOB_NEED_PIPE_SYNC;
}
  
  	while (fence == NULL && vm && !job->vmid) {

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
index c1d00f0..f9e8ecd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
@@ -37,6 +37,9 @@
  
  struct amdgpu_fence;
  
+#define JOB_NEED_VM_FLUSH   1   /* require a vm flush for this job */

+#define JOB_NEED_PIPE_SYNC  2   /* require a pipeline sync for this job */
+
  struct amdgpu_job {
struct drm_sched_jobbase;
struct amdgpu_vm*vm;
@@ -46,7 +49,6 @@ struct amdgpu_job {
uint32_tpreamble_status;
uint32_tnum_ibs;
void*owner;
-   boolvm_needs_flush;
uint64_tvm_pd_addr;
unsignedvmid;
unsignedpasid;
@@ -58,7 +60,7 @@ struct amdgpu_job {
/* user fence handling */
uint64_t

[PATCH 3/3] drm/amdgpu: drop need_vm_flush/need_pipe_sync

2018-11-02 Thread Monk Liu
use a flag to hold need_flush and need_pipe_sync

Signed-off-by: Monk Liu 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c|  2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c   | 14 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c   |  2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_job.h   |  6 --
 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h |  2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c   |  2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c|  4 ++--
 7 files changed, 19 insertions(+), 13 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index ac7d2da..3231a3b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -170,7 +170,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned 
num_ibs,
if ((need_ctx_switch && amdgpu_sriov_vf(adev)) ||
amdgpu_vm_need_pipeline_sync(ring, job))
need_pipe_sync = true;
-   else if (job->need_pipe_sync)
+   else if (job->flags & JOB_NEED_PIPE_SYNC)
need_pipe_sync = true;
}
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index df9b173..ed9b6c1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -311,7 +311,9 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
dma_fence_put((*id)->flushed_updates);
(*id)->flushed_updates = dma_fence_get(updates);
}
-   job->vm_needs_flush = needs_flush;
+
+   if (needs_flush)
+   job->flags |= JOB_NEED_VM_FLUSH;
return 0;
 }
 
@@ -341,7 +343,8 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
struct dma_fence *updates = sync->last_vm_update;
int r;
 
-   job->vm_needs_flush = vm->use_cpu_for_update;
+   if (vm->use_cpu_for_update)
+   job->flags |= JOB_NEED_VM_FLUSH;
 
/* Check if we can use a VMID already assigned to this VM */
list_for_each_entry_reverse((*id), _mgr->ids_lru, list) {
@@ -380,7 +383,8 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
(*id)->flushed_updates = dma_fence_get(updates);
}
 
-   job->vm_needs_flush |= needs_flush;
+   if (needs_flush)
+   job->flags |= JOB_NEED_VM_FLUSH;
return 0;
}
 
@@ -438,7 +442,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct 
amdgpu_ring *ring,
 
dma_fence_put(id->flushed_updates);
id->flushed_updates = dma_fence_get(updates);
-   job->vm_needs_flush = true;
+   job->flags |= JOB_NEED_VM_FLUSH;
}
 
list_move_tail(>list, _mgr->ids_lru);
@@ -447,7 +451,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct 
amdgpu_ring *ring,
id->pd_gpu_addr = job->vm_pd_addr;
id->owner = vm->entity.fence_context;
 
-   if (job->vm_needs_flush) {
+   if (job->flags & JOB_NEED_VM_FLUSH) {
dma_fence_put(id->last_flush);
id->last_flush = NULL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index dae997d..82e190d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -181,7 +181,7 @@ static struct dma_fence *amdgpu_job_dependency(struct 
drm_sched_job *sched_job,
 
if (fence && need_pipe_sync && drm_sched_dependency_optimized(fence, 
s_entity)) {
trace_amdgpu_ib_pipe_sync(job, fence);
-   job->need_pipe_sync = true;
+   job->flags |= JOB_NEED_PIPE_SYNC;
}
 
while (fence == NULL && vm && !job->vmid) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
index c1d00f0..f9e8ecd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
@@ -37,6 +37,9 @@
 
 struct amdgpu_fence;
 
+#define JOB_NEED_VM_FLUSH   1   /* require a vm flush for this job */
+#define JOB_NEED_PIPE_SYNC  2   /* require a pipeline sync for this job */
+
 struct amdgpu_job {
struct drm_sched_jobbase;
struct amdgpu_vm*vm;
@@ -46,7 +49,6 @@ struct amdgpu_job {
uint32_tpreamble_status;
uint32_tnum_ibs;
void*owner;
-   boolvm_needs_flush;
uint64_tvm_pd_addr;
unsignedvmid;
unsignedpasid;
@@ -58,7 +60,7 @@ struct amdgpu_job {
/* user fence handling */
uint64_tuf_addr;
uint64_tuf_sequence;
-   boolneed_pipe_sync; /* require a pipeline sync for this job 
*/
+   uint64_t