Re: [PATCH v3 04/13] drm/sched: Add DRM_SCHED_POLICY_SINGLE_ENTITY scheduling policy

2023-09-13 Thread kernel test robot
Hi Matthew,

kernel test robot noticed the following build warnings:

[auto build test WARNING on drm/drm-next]
[also build test WARNING on drm-exynos/exynos-drm-next 
drm-intel/for-linux-next-fixes drm-tip/drm-tip linus/master v6.6-rc1 
next-20230913]
[cannot apply to drm-misc/drm-misc-next drm-intel/for-linux-next]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]

url:
https://github.com/intel-lab-lkp/linux/commits/Matthew-Brost/drm-sched-Add-drm_sched_submit_-helpers/20230912-102001
base:   git://anongit.freedesktop.org/drm/drm drm-next
patch link:
https://lore.kernel.org/r/20230912021615.2086698-5-matthew.brost%40intel.com
patch subject: [PATCH v3 04/13] drm/sched: Add DRM_SCHED_POLICY_SINGLE_ENTITY 
scheduling policy
reproduce: 
(https://download.01.org/0day-ci/archive/20230913/202309132041.76l2ukon-...@intel.com/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot 
| Closes: 
https://lore.kernel.org/oe-kbuild-all/202309132041.76l2ukon-...@intel.com/

All warnings (new ones prefixed by >>):

>> Documentation/gpu/drm-mm:552: ./drivers/gpu/drm/scheduler/sched_main.c:52: 
>> WARNING: Enumerated list ends without a blank line; unexpected unindent.

-- 
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki


[PATCH v3 04/13] drm/sched: Add DRM_SCHED_POLICY_SINGLE_ENTITY scheduling policy

2023-09-11 Thread Matthew Brost
DRM_SCHED_POLICY_SINGLE_ENTITY creates a 1 to 1 relationship between
scheduler and entity. No priorities or run queue used in this mode.
Intended for devices with firmware schedulers.

v2:
  - Drop sched / rq union (Luben)
v3:
  - Don't pick entity if stopped in drm_sched_select_entity (Danilo)

Signed-off-by: Matthew Brost 
---
 drivers/gpu/drm/scheduler/sched_entity.c | 69 ++--
 drivers/gpu/drm/scheduler/sched_fence.c  |  2 +-
 drivers/gpu/drm/scheduler/sched_main.c   | 64 +++---
 include/drm/gpu_scheduler.h  |  8 +++
 4 files changed, 120 insertions(+), 23 deletions(-)

diff --git a/drivers/gpu/drm/scheduler/sched_entity.c 
b/drivers/gpu/drm/scheduler/sched_entity.c
index 65a972b52eda..1dec97caaba3 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -83,6 +83,7 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
memset(entity, 0, sizeof(struct drm_sched_entity));
INIT_LIST_HEAD(>list);
entity->rq = NULL;
+   entity->single_sched = NULL;
entity->guilty = guilty;
entity->num_sched_list = num_sched_list;
entity->priority = priority;
@@ -90,8 +91,17 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
RCU_INIT_POINTER(entity->last_scheduled, NULL);
RB_CLEAR_NODE(>rb_tree_node);
 
-   if(num_sched_list)
-   entity->rq = _list[0]->sched_rq[entity->priority];
+   if (num_sched_list) {
+   if (sched_list[0]->sched_policy !=
+   DRM_SCHED_POLICY_SINGLE_ENTITY) {
+   entity->rq = _list[0]->sched_rq[entity->priority];
+   } else {
+   if (num_sched_list != 1 || sched_list[0]->single_entity)
+   return -EINVAL;
+   sched_list[0]->single_entity = entity;
+   entity->single_sched = sched_list[0];
+   }
+   }
 
init_completion(>entity_idle);
 
@@ -124,7 +134,8 @@ void drm_sched_entity_modify_sched(struct drm_sched_entity 
*entity,
struct drm_gpu_scheduler **sched_list,
unsigned int num_sched_list)
 {
-   WARN_ON(!num_sched_list || !sched_list);
+   WARN_ON(!num_sched_list || !sched_list ||
+   !!entity->single_sched);
 
entity->sched_list = sched_list;
entity->num_sched_list = num_sched_list;
@@ -231,13 +242,15 @@ static void drm_sched_entity_kill(struct drm_sched_entity 
*entity)
 {
struct drm_sched_job *job;
struct dma_fence *prev;
+   bool single_entity = !!entity->single_sched;
 
-   if (!entity->rq)
+   if (!entity->rq && !single_entity)
return;
 
spin_lock(>rq_lock);
entity->stopped = true;
-   drm_sched_rq_remove_entity(entity->rq, entity);
+   if (!single_entity)
+   drm_sched_rq_remove_entity(entity->rq, entity);
spin_unlock(>rq_lock);
 
/* Make sure this entity is not used by the scheduler at the moment */
@@ -259,6 +272,20 @@ static void drm_sched_entity_kill(struct drm_sched_entity 
*entity)
dma_fence_put(prev);
 }
 
+/**
+ * drm_sched_entity_to_scheduler - Schedule entity to GPU scheduler
+ * @entity: scheduler entity
+ *
+ * Returns GPU scheduler for the entity
+ */
+struct drm_gpu_scheduler *
+drm_sched_entity_to_scheduler(struct drm_sched_entity *entity)
+{
+   bool single_entity = !!entity->single_sched;
+
+   return single_entity ? entity->single_sched : entity->rq->sched;
+}
+
 /**
  * drm_sched_entity_flush - Flush a context entity
  *
@@ -276,11 +303,12 @@ long drm_sched_entity_flush(struct drm_sched_entity 
*entity, long timeout)
struct drm_gpu_scheduler *sched;
struct task_struct *last_user;
long ret = timeout;
+   bool single_entity = !!entity->single_sched;
 
-   if (!entity->rq)
+   if (!entity->rq && !single_entity)
return 0;
 
-   sched = entity->rq->sched;
+   sched = drm_sched_entity_to_scheduler(entity);
/**
 * The client will not queue more IBs during this fini, consume existing
 * queued IBs or discard them on SIGKILL
@@ -373,7 +401,7 @@ static void drm_sched_entity_wakeup(struct dma_fence *f,
container_of(cb, struct drm_sched_entity, cb);
 
drm_sched_entity_clear_dep(f, cb);
-   drm_sched_wakeup_if_can_queue(entity->rq->sched);
+   drm_sched_wakeup_if_can_queue(drm_sched_entity_to_scheduler(entity));
 }
 
 /**
@@ -387,6 +415,8 @@ static void drm_sched_entity_wakeup(struct dma_fence *f,
 void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
   enum drm_sched_priority priority)
 {
+   WARN_ON(!!entity->single_sched);
+
spin_lock(>rq_lock);
entity->priority = priority;
spin_unlock(>rq_lock);
@@ -399,7 +429,7 @@