Re: [Intel-gfx] [PATCH] drm/i915: Use helpers for drm_mm_node booleans

2019-10-04 Thread Tvrtko Ursulin


On 04/10/2019 15:22, Chris Wilson wrote:

A subset of 71724f708997 ("drm/mm: Use helpers for drm_mm_node booleans")
in order to prepare drm-intel-next-queued for subsequent patches before
we can backmerge 71724f708997 itself.

Signed-off-by: Chris Wilson 
Cc: Tvrtko Ursulin 
---
  drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c |  4 ++--
  drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c   |  2 +-
  drivers/gpu/drm/i915/i915_gem.c| 12 ++--
  drivers/gpu/drm/i915/i915_gem_evict.c  |  2 +-
  drivers/gpu/drm/i915/i915_vma.c|  2 +-
  drivers/gpu/drm/i915/i915_vma.h|  4 ++--
  6 files changed, 13 insertions(+), 13 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 
b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 8fbb454cfd6b..228ce24ea280 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -968,7 +968,7 @@ static void reloc_cache_reset(struct reloc_cache *cache)
intel_gt_flush_ggtt_writes(ggtt->vm.gt);
io_mapping_unmap_atomic((void __iomem *)vaddr);
  
-		if (cache->node.allocated) {

+   if (drm_mm_node_allocated(>node)) {
ggtt->vm.clear_range(>vm,
 cache->node.start,
 cache->node.size);
@@ -1061,7 +1061,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
}
  
  	offset = cache->node.start;

-   if (cache->node.allocated) {
+   if (drm_mm_node_allocated(>node)) {
ggtt->vm.insert_page(>vm,
 i915_gem_object_get_dma_address(obj, page),
 offset, I915_CACHE_NONE, 0);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c 
b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index bb878119f06c..bb4889d2346d 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -387,7 +387,7 @@ static u32 uc_fw_ggtt_offset(struct intel_uc_fw *uc_fw, 
struct i915_ggtt *ggtt)
  {
struct drm_mm_node *node = >uc_fw;
  
-	GEM_BUG_ON(!node->allocated);

+   GEM_BUG_ON(!drm_mm_node_allocated(node));
GEM_BUG_ON(upper_32_bits(node->start));
GEM_BUG_ON(upper_32_bits(node->start + node->size - 1));
  
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c

index 1426e506700d..fa8e028ac0b5 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -356,7 +356,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
ret = insert_mappable_node(ggtt, , PAGE_SIZE);
if (ret)
goto out_unlock;
-   GEM_BUG_ON(!node.allocated);
+   GEM_BUG_ON(!drm_mm_node_allocated());
}
  
  	mutex_unlock(>drm.struct_mutex);

@@ -393,7 +393,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
unsigned page_offset = offset_in_page(offset);
unsigned page_length = PAGE_SIZE - page_offset;
page_length = remain < page_length ? remain : page_length;
-   if (node.allocated) {
+   if (drm_mm_node_allocated()) {
ggtt->vm.insert_page(>vm,
 i915_gem_object_get_dma_address(obj, 
offset >> PAGE_SHIFT),
 node.start, I915_CACHE_NONE, 0);
@@ -415,7 +415,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
i915_gem_object_unlock_fence(obj, fence);
  out_unpin:
mutex_lock(>drm.struct_mutex);
-   if (node.allocated) {
+   if (drm_mm_node_allocated()) {
ggtt->vm.clear_range(>vm, node.start, node.size);
remove_mappable_node();
} else {
@@ -566,7 +566,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
ret = insert_mappable_node(ggtt, , PAGE_SIZE);
if (ret)
goto out_rpm;
-   GEM_BUG_ON(!node.allocated);
+   GEM_BUG_ON(!drm_mm_node_allocated());
}
  
  	mutex_unlock(>drm.struct_mutex);

@@ -604,7 +604,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
unsigned int page_offset = offset_in_page(offset);
unsigned int page_length = PAGE_SIZE - page_offset;
page_length = remain < page_length ? remain : page_length;
-   if (node.allocated) {
+   if (drm_mm_node_allocated()) {
/* flush the write before we modify the GGTT */
intel_gt_flush_ggtt_writes(ggtt->vm.gt);
ggtt->vm.insert_page(>vm,
@@ -636,7 +636,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
  out_unpin:
mutex_lock(>drm.struct_mutex);
intel_gt_flush_ggtt_writes(ggtt->vm.gt);
-   if 

[Intel-gfx] [PATCH] drm/i915: Use helpers for drm_mm_node booleans

2019-10-04 Thread Chris Wilson
A subset of 71724f708997 ("drm/mm: Use helpers for drm_mm_node booleans")
in order to prepare drm-intel-next-queued for subsequent patches before
we can backmerge 71724f708997 itself.

Signed-off-by: Chris Wilson 
Cc: Tvrtko Ursulin 
---
 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c |  4 ++--
 drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c   |  2 +-
 drivers/gpu/drm/i915/i915_gem.c| 12 ++--
 drivers/gpu/drm/i915/i915_gem_evict.c  |  2 +-
 drivers/gpu/drm/i915/i915_vma.c|  2 +-
 drivers/gpu/drm/i915/i915_vma.h|  4 ++--
 6 files changed, 13 insertions(+), 13 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 
b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 8fbb454cfd6b..228ce24ea280 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -968,7 +968,7 @@ static void reloc_cache_reset(struct reloc_cache *cache)
intel_gt_flush_ggtt_writes(ggtt->vm.gt);
io_mapping_unmap_atomic((void __iomem *)vaddr);
 
-   if (cache->node.allocated) {
+   if (drm_mm_node_allocated(>node)) {
ggtt->vm.clear_range(>vm,
 cache->node.start,
 cache->node.size);
@@ -1061,7 +1061,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
}
 
offset = cache->node.start;
-   if (cache->node.allocated) {
+   if (drm_mm_node_allocated(>node)) {
ggtt->vm.insert_page(>vm,
 i915_gem_object_get_dma_address(obj, page),
 offset, I915_CACHE_NONE, 0);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c 
b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index bb878119f06c..bb4889d2346d 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -387,7 +387,7 @@ static u32 uc_fw_ggtt_offset(struct intel_uc_fw *uc_fw, 
struct i915_ggtt *ggtt)
 {
struct drm_mm_node *node = >uc_fw;
 
-   GEM_BUG_ON(!node->allocated);
+   GEM_BUG_ON(!drm_mm_node_allocated(node));
GEM_BUG_ON(upper_32_bits(node->start));
GEM_BUG_ON(upper_32_bits(node->start + node->size - 1));
 
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 1426e506700d..fa8e028ac0b5 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -356,7 +356,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
ret = insert_mappable_node(ggtt, , PAGE_SIZE);
if (ret)
goto out_unlock;
-   GEM_BUG_ON(!node.allocated);
+   GEM_BUG_ON(!drm_mm_node_allocated());
}
 
mutex_unlock(>drm.struct_mutex);
@@ -393,7 +393,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
unsigned page_offset = offset_in_page(offset);
unsigned page_length = PAGE_SIZE - page_offset;
page_length = remain < page_length ? remain : page_length;
-   if (node.allocated) {
+   if (drm_mm_node_allocated()) {
ggtt->vm.insert_page(>vm,
 
i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
 node.start, I915_CACHE_NONE, 0);
@@ -415,7 +415,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
i915_gem_object_unlock_fence(obj, fence);
 out_unpin:
mutex_lock(>drm.struct_mutex);
-   if (node.allocated) {
+   if (drm_mm_node_allocated()) {
ggtt->vm.clear_range(>vm, node.start, node.size);
remove_mappable_node();
} else {
@@ -566,7 +566,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
ret = insert_mappable_node(ggtt, , PAGE_SIZE);
if (ret)
goto out_rpm;
-   GEM_BUG_ON(!node.allocated);
+   GEM_BUG_ON(!drm_mm_node_allocated());
}
 
mutex_unlock(>drm.struct_mutex);
@@ -604,7 +604,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
unsigned int page_offset = offset_in_page(offset);
unsigned int page_length = PAGE_SIZE - page_offset;
page_length = remain < page_length ? remain : page_length;
-   if (node.allocated) {
+   if (drm_mm_node_allocated()) {
/* flush the write before we modify the GGTT */
intel_gt_flush_ggtt_writes(ggtt->vm.gt);
ggtt->vm.insert_page(>vm,
@@ -636,7 +636,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
 out_unpin:
mutex_lock(>drm.struct_mutex);
intel_gt_flush_ggtt_writes(ggtt->vm.gt);
-   if (node.allocated) {
+   if