From: Dominik Kaszewski <[email protected]>
[Why]
Currently all of the preparation and execution of plane update is done
under a DC lock, blocking other code from accessing DC for longer than
strictly necessary.
[How]
Break the v3 update flow into 3 parts:
* prepare - locked, calculate update flow and modify DC state
* execute - unlocked, program hardware
* cleanup - locked, finalize DC state and free temp resources
Legacy v2 flow too compilicated to break down for now, link new API
with old by executing everything in slightly misnamed prepare stage.
V2:
Keep the new code structure, but point all users back at the old code,
until fully tested.
Reviewed-by: Nicholas Kazlauskas <[email protected]>
Signed-off-by: Dominik Kaszewski <[email protected]>
Signed-off-by: Roman Li <[email protected]>
Signed-off-by: Chenyu Chen <[email protected]>
---
drivers/gpu/drm/amd/display/dc/core/dc.c | 351 +++++++++++++++++-
.../gpu/drm/amd/display/dc/core/dc_stream.c | 31 +-
drivers/gpu/drm/amd/display/dc/dc_stream.h | 31 +-
3 files changed, 403 insertions(+), 10 deletions(-)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c
b/drivers/gpu/drm/amd/display/dc/core/dc.c
index e0db791953a5..1be5c1c15798 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -3849,7 +3849,7 @@ static bool dc_dmub_should_send_dirty_rect_cmd(struct dc
*dc, struct dc_stream_s
void dc_dmub_update_dirty_rect(struct dc *dc,
int surface_count,
struct dc_stream_state *stream,
- struct dc_surface_update *srf_updates,
+ const struct dc_surface_update *srf_updates,
struct dc_state *context)
{
union dmub_rb_cmd cmd;
@@ -4154,7 +4154,7 @@ static void commit_planes_for_stream_fast(struct dc *dc,
}
static void commit_planes_for_stream(struct dc *dc,
- struct dc_surface_update *srf_updates,
+ const struct dc_surface_update *srf_updates,
int surface_count,
struct dc_stream_state *stream,
struct dc_stream_update *stream_update,
@@ -7175,3 +7175,350 @@ bool dc_get_qos_info(struct dc *dc, struct dc_qos_info
*info)
return true;
}
+
+enum update_v3_flow {
+ UPDATE_V3_FLOW_INVALID,
+ UPDATE_V3_FLOW_NO_NEW_CONTEXT_CONTEXT_FAST,
+ UPDATE_V3_FLOW_NO_NEW_CONTEXT_CONTEXT_FULL,
+ UPDATE_V3_FLOW_NEW_CONTEXT_SEAMLESS,
+ UPDATE_V3_FLOW_NEW_CONTEXT_MINIMAL_NEW,
+ UPDATE_V3_FLOW_NEW_CONTEXT_MINIMAL_CURRENT,
+};
+
+struct dc_update_scratch_space {
+ struct dc *dc;
+ struct dc_surface_update *surface_updates;
+ int surface_count;
+ struct dc_stream_state *stream;
+ struct dc_stream_update *stream_update;
+ bool update_v3;
+ bool do_clear_update_flags;
+ enum surface_update_type update_type;
+ struct dc_state *new_context;
+ enum update_v3_flow flow;
+ struct dc_state *backup_context;
+ struct dc_state *intermediate_context;
+ struct pipe_split_policy_backup intermediate_policy;
+ struct dc_surface_update intermediate_updates[MAX_SURFACES];
+ int intermediate_count;
+};
+
+size_t dc_update_scratch_space_size(void)
+{
+ return sizeof(struct dc_update_scratch_space);
+}
+
+static bool update_planes_and_stream_prepare_v2(
+ struct dc_update_scratch_space *scratch
+)
+{
+ // v2 is too tangled to break into stages, so just execute everything
under lock
+ dc_exit_ips_for_hw_access(scratch->dc);
+ return update_planes_and_stream_v2(
+ scratch->dc,
+ scratch->surface_updates,
+ scratch->surface_count,
+ scratch->stream,
+ scratch->stream_update
+ );
+}
+
+static void update_planes_and_stream_execute_v2(
+ const struct dc_update_scratch_space *scratch
+)
+{
+ // Nothing to do, see `update_planes_and_stream_prepare_v2`
+ (void) scratch;
+}
+
+static bool update_planes_and_stream_cleanup_v2(
+ const struct dc_update_scratch_space *scratch
+)
+{
+ if (scratch->do_clear_update_flags)
+ clear_update_flags(scratch->surface_updates,
scratch->surface_count, scratch->stream);
+
+ return false;
+}
+
+static void update_planes_and_stream_cleanup_v3_intermediate(
+ struct dc_update_scratch_space *scratch,
+ bool backup
+);
+
+static bool update_planes_and_stream_prepare_v3_intermediate_seamless(
+ struct dc_update_scratch_space *scratch
+)
+{
+ return is_pipe_topology_transition_seamless_with_intermediate_step(
+ scratch->dc,
+ scratch->dc->current_state,
+ scratch->intermediate_context,
+ scratch->new_context
+ );
+}
+
+static bool update_planes_and_stream_prepare_v3(
+ struct dc_update_scratch_space *scratch
+)
+{
+ dc_exit_ips_for_hw_access(scratch->dc);
+
+ if (!update_planes_and_stream_state(
+ scratch->dc,
+ scratch->surface_updates,
+ scratch->surface_count,
+ scratch->stream,
+ scratch->stream_update,
+ &scratch->update_type,
+ &scratch->new_context
+ )) {
+ return false;
+ }
+
+ if (scratch->new_context == scratch->dc->current_state) {
+ ASSERT(scratch->update_type < UPDATE_TYPE_FULL);
+
+ // TODO: Do we need this to be alive in execute?
+ struct dc_fast_update fast_update[MAX_SURFACES] = { 0 };
+
+ populate_fast_updates(
+ fast_update,
+ scratch->surface_updates,
+ scratch->surface_count,
+ scratch->stream_update
+ );
+ const bool fast = fast_update_only(
+ scratch->dc,
+ fast_update,
+ scratch->surface_updates,
+ scratch->surface_count,
+ scratch->stream_update,
+ scratch->stream
+ )
+ // TODO: Can this be used to skip `populate_fast_updates`?
+ &&
!scratch->dc->check_config.enable_legacy_fast_update;
+ scratch->flow = fast
+ ? UPDATE_V3_FLOW_NO_NEW_CONTEXT_CONTEXT_FAST
+ : UPDATE_V3_FLOW_NO_NEW_CONTEXT_CONTEXT_FULL;
+ return true;
+ }
+
+ ASSERT(scratch->update_type >= UPDATE_TYPE_FULL);
+
+ const bool seamless =
scratch->dc->hwss.is_pipe_topology_transition_seamless(
+ scratch->dc,
+ scratch->dc->current_state,
+ scratch->new_context
+ );
+ if (seamless) {
+ scratch->flow = UPDATE_V3_FLOW_NEW_CONTEXT_SEAMLESS;
+ return true;
+ }
+
+ scratch->intermediate_context = create_minimal_transition_state(
+ scratch->dc,
+ scratch->new_context,
+ &scratch->intermediate_policy
+ );
+ if (scratch->intermediate_context) {
+ if
(update_planes_and_stream_prepare_v3_intermediate_seamless(scratch)) {
+ scratch->flow = UPDATE_V3_FLOW_NEW_CONTEXT_MINIMAL_NEW;
+ return true;
+ }
+
+ update_planes_and_stream_cleanup_v3_intermediate(scratch,
false);
+ }
+
+ restore_planes_and_stream_state(&scratch->dc->scratch.current_state,
scratch->stream);
+ scratch->backup_context = scratch->dc->current_state;
+ dc_state_retain(scratch->backup_context);
+ scratch->intermediate_context = create_minimal_transition_state(
+ scratch->dc,
+ scratch->backup_context,
+ &scratch->intermediate_policy
+ );
+ if (scratch->intermediate_context) {
+ if
(update_planes_and_stream_prepare_v3_intermediate_seamless(scratch)) {
+ scratch->flow =
UPDATE_V3_FLOW_NEW_CONTEXT_MINIMAL_CURRENT;
+ scratch->intermediate_count =
initialize_empty_surface_updates(
+ scratch->stream,
scratch->intermediate_updates
+ );
+ return true;
+ }
+
+ update_planes_and_stream_cleanup_v3_intermediate(scratch, true);
+ }
+
+ scratch->flow = UPDATE_V3_FLOW_INVALID;
+ dc_state_release(scratch->backup_context);
+ restore_planes_and_stream_state(&scratch->dc->scratch.new_state,
scratch->stream);
+ return false;
+}
+
+static void update_planes_and_stream_execute_v3_commit(
+ const struct dc_update_scratch_space *scratch,
+ bool intermediate_update,
+ bool intermediate_context
+)
+{
+ commit_planes_for_stream(
+ scratch->dc,
+ intermediate_update ? scratch->intermediate_updates :
scratch->surface_updates,
+ intermediate_update ? scratch->intermediate_count :
scratch->surface_count,
+ scratch->stream,
+ intermediate_context ? NULL : scratch->stream_update,
+ intermediate_context ? UPDATE_TYPE_FULL :
scratch->update_type,
+ // `dc->current_state` only used in `NO_NEW_CONTEXT`,
where it is equal to `new_context`
+ intermediate_context ? scratch->intermediate_context :
scratch->new_context
+ );
+}
+
+static void update_planes_and_stream_execute_v3(
+ const struct dc_update_scratch_space *scratch
+)
+{
+ switch (scratch->flow) {
+ case UPDATE_V3_FLOW_NO_NEW_CONTEXT_CONTEXT_FAST:
+ commit_planes_for_stream_fast(
+ scratch->dc,
+ scratch->surface_updates,
+ scratch->surface_count,
+ scratch->stream,
+ scratch->stream_update,
+ scratch->update_type,
+ scratch->new_context
+ );
+ break;
+
+ case UPDATE_V3_FLOW_NO_NEW_CONTEXT_CONTEXT_FULL:
+ case UPDATE_V3_FLOW_NEW_CONTEXT_SEAMLESS:
+ update_planes_and_stream_execute_v3_commit(scratch, false,
false);
+ break;
+
+ case UPDATE_V3_FLOW_NEW_CONTEXT_MINIMAL_NEW:
+ update_planes_and_stream_execute_v3_commit(scratch, false,
true);
+ update_planes_and_stream_execute_v3_commit(scratch, false,
false);
+ break;
+
+ case UPDATE_V3_FLOW_NEW_CONTEXT_MINIMAL_CURRENT:
+ update_planes_and_stream_execute_v3_commit(scratch, true, true);
+ update_planes_and_stream_execute_v3_commit(scratch, false,
false);
+ break;
+
+ case UPDATE_V3_FLOW_INVALID:
+ default:
+ ASSERT(false);
+ }
+}
+
+static void update_planes_and_stream_cleanup_v3_new_context(
+ struct dc_update_scratch_space *scratch
+)
+{
+ swap_and_release_current_context(scratch->dc, scratch->new_context,
scratch->stream);
+}
+
+static void update_planes_and_stream_cleanup_v3_intermediate(
+ struct dc_update_scratch_space *scratch,
+ bool backup
+)
+{
+ release_minimal_transition_state(
+ scratch->dc,
+ scratch->intermediate_context,
+ backup ? scratch->backup_context : scratch->new_context,
+ &scratch->intermediate_policy
+ );
+}
+
+static bool update_planes_and_stream_cleanup_v3(
+ struct dc_update_scratch_space *scratch
+)
+{
+ switch (scratch->flow) {
+ case UPDATE_V3_FLOW_NO_NEW_CONTEXT_CONTEXT_FAST:
+ case UPDATE_V3_FLOW_NO_NEW_CONTEXT_CONTEXT_FULL:
+ // No cleanup required
+ break;
+
+ case UPDATE_V3_FLOW_NEW_CONTEXT_SEAMLESS:
+ update_planes_and_stream_cleanup_v3_new_context(scratch);
+ break;
+
+ case UPDATE_V3_FLOW_NEW_CONTEXT_MINIMAL_NEW:
+ update_planes_and_stream_cleanup_v3_intermediate(scratch,
false);
+ update_planes_and_stream_cleanup_v3_new_context(scratch);
+ break;
+
+ case UPDATE_V3_FLOW_NEW_CONTEXT_MINIMAL_CURRENT:
+ swap_and_release_current_context(scratch->dc,
scratch->intermediate_context, scratch->stream);
+ dc_state_retain(scratch->dc->current_state);
+ update_planes_and_stream_cleanup_v3_intermediate(scratch, true);
+ dc_state_release(scratch->backup_context);
+
restore_planes_and_stream_state(&scratch->dc->scratch.new_state,
scratch->stream);
+ update_planes_and_stream_cleanup_v3_new_context(scratch);
+ break;
+
+ case UPDATE_V3_FLOW_INVALID:
+ default:
+ ASSERT(false);
+ }
+
+ if (scratch->do_clear_update_flags)
+ clear_update_flags(scratch->surface_updates,
scratch->surface_count, scratch->stream);
+
+ return false;
+}
+
+struct dc_update_scratch_space *dc_update_planes_and_stream_init(
+ struct dc *dc,
+ struct dc_surface_update *surface_updates,
+ int surface_count,
+ struct dc_stream_state *stream,
+ struct dc_stream_update *stream_update
+)
+{
+ const enum dce_version version = dc->ctx->dce_version;
+ struct dc_update_scratch_space *scratch = stream->update_scratch;
+
+ *scratch = (struct dc_update_scratch_space){
+ .dc = dc,
+ .surface_updates = surface_updates,
+ .surface_count = surface_count,
+ .stream = stream,
+ .stream_update = stream_update,
+ .update_v3 = version >= DCN_VERSION_4_01 || version ==
DCN_VERSION_3_2 || version == DCN_VERSION_3_21,
+ .do_clear_update_flags = version >= DCN_VERSION_3_2 || version
== DCN_VERSION_3_01,
+ };
+
+ return scratch;
+}
+
+bool dc_update_planes_and_stream_prepare(
+ struct dc_update_scratch_space *scratch
+)
+{
+ return scratch->update_v3
+ ? update_planes_and_stream_prepare_v3(scratch)
+ : update_planes_and_stream_prepare_v2(scratch);
+}
+
+void dc_update_planes_and_stream_execute(
+ const struct dc_update_scratch_space *scratch
+)
+{
+ scratch->update_v3
+ ? update_planes_and_stream_execute_v3(scratch)
+ : update_planes_and_stream_execute_v2(scratch);
+}
+
+bool dc_update_planes_and_stream_cleanup(
+ struct dc_update_scratch_space *scratch
+)
+{
+ return scratch->update_v3
+ ? update_planes_and_stream_cleanup_v3(scratch)
+ : update_planes_and_stream_cleanup_v2(scratch);
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index 129cd5f84983..9349cccc8438 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -151,6 +151,7 @@ static void dc_stream_free(struct kref *kref)
struct dc_stream_state *stream = container_of(kref, struct
dc_stream_state, refcount);
dc_stream_destruct(stream);
+ kfree(stream->update_scratch);
kfree(stream);
}
@@ -164,26 +165,32 @@ void dc_stream_release(struct dc_stream_state *stream)
struct dc_stream_state *dc_create_stream_for_sink(
struct dc_sink *sink)
{
- struct dc_stream_state *stream;
+ struct dc_stream_state *stream = NULL;
if (sink == NULL)
- return NULL;
+ goto fail;
stream = kzalloc(sizeof(struct dc_stream_state), GFP_KERNEL);
if (stream == NULL)
- goto alloc_fail;
+ goto fail;
+
+ stream->update_scratch = kzalloc((int32_t)
dc_update_scratch_space_size(), GFP_KERNEL);
+ if (stream->update_scratch == NULL)
+ goto fail;
if (dc_stream_construct(stream, sink) == false)
- goto construct_fail;
+ goto fail;
kref_init(&stream->refcount);
return stream;
-construct_fail:
- kfree(stream);
+fail:
+ if (stream) {
+ kfree(stream->update_scratch);
+ kfree(stream);
+ }
-alloc_fail:
return NULL;
}
@@ -195,6 +202,16 @@ struct dc_stream_state *dc_copy_stream(const struct
dc_stream_state *stream)
if (!new_stream)
return NULL;
+ // Scratch is not meant to be reused across copies, as might have
self-referential pointers
+ new_stream->update_scratch = kzalloc(
+ (int32_t) dc_update_scratch_space_size(),
+ GFP_KERNEL
+ );
+ if (!new_stream->update_scratch) {
+ kfree(new_stream);
+ return NULL;
+ }
+
if (new_stream->sink)
dc_sink_retain(new_stream->sink);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h
b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index 321cfe92d799..719b98d8e8ca 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -315,6 +315,8 @@ struct dc_stream_state {
struct luminance_data lumin_data;
bool scaler_sharpener_update;
bool sharpening_required;
+
+ struct dc_update_scratch_space *update_scratch;
};
#define ABM_LEVEL_IMMEDIATE_DISABLE 255
@@ -390,6 +392,33 @@ bool dc_update_planes_and_stream(struct dc *dc,
struct dc_stream_state *dc_stream,
struct dc_stream_update *stream_update);
+struct dc_update_scratch_space;
+
+size_t dc_update_scratch_space_size(void);
+
+struct dc_update_scratch_space *dc_update_planes_and_stream_init(
+ struct dc *dc,
+ struct dc_surface_update *surface_updates,
+ int surface_count,
+ struct dc_stream_state *dc_stream,
+ struct dc_stream_update *stream_update
+);
+
+// Locked, false is failed
+bool dc_update_planes_and_stream_prepare(
+ struct dc_update_scratch_space *scratch
+);
+
+// Unlocked
+void dc_update_planes_and_stream_execute(
+ const struct dc_update_scratch_space *scratch
+);
+
+// Locked, true if call again
+bool dc_update_planes_and_stream_cleanup(
+ struct dc_update_scratch_space *scratch
+);
+
/*
* Set up surface attributes and associate to a stream
* The surfaces parameter is an absolute set of all surface active for the
stream.
@@ -597,7 +626,7 @@ struct pipe_ctx *dc_stream_get_pipe_ctx(struct
dc_stream_state *stream);
void dc_dmub_update_dirty_rect(struct dc *dc,
int surface_count,
struct dc_stream_state *stream,
- struct dc_surface_update *srf_updates,
+ const struct dc_surface_update *srf_updates,
struct dc_state *context);
bool dc_stream_is_cursor_limit_pending(struct dc *dc, struct dc_stream_state
*stream);
--
2.43.0