Re: [git pull] drm for 5.13-rc1
Hi Linus, The patch to fix the warning is here (https://www.spinics.net/lists/amd-gfx/msg61614.html) I guess it just didn't propagate all the way to the release. Can it still be pulled into 5.13-rc1 release? From: Mikita Lipski [why] Previous statement would always evaluate to true making it meaningless [how] Just check if a connector is MST by checking if its port exists. Reported-by: kernel test robot Signed-off-by: Mikita Lipski Reviewed-by: Nicholas Kazlauskas Acked-by: Wayne Lin --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c index 656bc8f00a42..8bf0b566612b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c @@ -3030,7 +3030,7 @@ static int trigger_hpd_mst_set(void *data, u64 val) if (!aconnector->dc_link) continue; - if (!(aconnector->port && >mst_port->mst_mgr)) + if (!aconnector->mst_port) continue; link = aconnector->dc_link; -- 2.17.1 Thanks, Mikita On 2021-04-28 6:21 p.m., Linus Torvalds wrote: On Tue, Apr 27, 2021 at 8:32 PM Dave Airlie wrote: This is the main drm pull request for 5.13. The usual lots of work all over the place. [...] Mikita Lipski: drm/amd/display: Add MST capability to trigger_hotplug interface Hmm. I've already merged this, but my clang build shows that this looks buggy: drivers/gpu/drm/amd/amdgpu/amdgpu_dm/amdgpu_dm_debugfs.c:3015:53: warning: address of 'aconnector->mst_port->mst_mgr' will always evaluate to 'true' [-Wpointer-bool-conversion] if (!(aconnector->port && >mst_port->mst_mgr)) ~~ ~~^~~ and yeah, checking for the address of a member of a structure benign NULL doesn't really work. I'm assuming the '&' is just a left-over cut-and-paste error or something. Please fix after reviewing (I'm not going to blindly just remove the '&' just to silence the warning, since I don't know the code). Linus ___ dri-devel mailing list dri-devel@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/dri-devel
Re: [PATCH v3 01/20] drm/amdgpu: Add error handling to amdgpu_dm_initialize_dp_connector()
Thanks for the change! Reviewed-by: Mikita Lipski On 2021-04-19 6:55 p.m., Lyude Paul wrote: While working on moving i2c device registration into drm_dp_aux_init() - I realized that in order to do so we need to make sure that drivers calling drm_dp_aux_init() handle any errors it could possibly return. In the process of doing that, I noticed that the majority of AMD's code for DP connector creation doesn't attempt to do any real error handling. So, let's fix this and also cleanup amdgpu_dm_initialize_dp_connector() while we're at it. This way we can handle the error codes from drm_dp_aux_init(). Signed-off-by: Lyude Paul --- .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 29 +++- .../display/amdgpu_dm/amdgpu_dm_mst_types.c | 44 +++ .../display/amdgpu_dm/amdgpu_dm_mst_types.h | 6 +-- 3 files changed, 45 insertions(+), 34 deletions(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index a0c8c41e4e57..fc5d315bbb05 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -7608,10 +7608,9 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, aconnector->i2c = i2c; res = i2c_add_adapter(>base); - if (res) { DRM_ERROR("Failed to register hw i2c %d\n", link->link_index); - goto out_free; + goto fail_free; } connector_type = to_drm_connector_type(link->connector_signal); @@ -7625,8 +7624,7 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, if (res) { DRM_ERROR("connector_init failed\n"); - aconnector->connector_id = -1; - goto out_free; + goto fail_id; } drm_connector_helper_add( @@ -7643,15 +7641,22 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, drm_connector_attach_encoder( >base, >base); - if (connector_type == DRM_MODE_CONNECTOR_DisplayPort - || connector_type == DRM_MODE_CONNECTOR_eDP) - amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index); - -out_free: - if (res) { - kfree(i2c); - aconnector->i2c = NULL; + if (connector_type == DRM_MODE_CONNECTOR_DisplayPort || + connector_type == DRM_MODE_CONNECTOR_eDP) { + res = amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index); + if (res) + goto fail_cleanup; } + + return 0; +fail_cleanup: + drm_connector_cleanup(>base); +fail_id: + aconnector->connector_id = -1; +fail_free: + kfree(i2c); + aconnector->i2c = NULL; + return res; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 73cdb9fe981a..3dee9cce9c9e 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -425,33 +425,39 @@ static const struct drm_dp_mst_topology_cbs dm_mst_cbs = { .add_connector = dm_dp_add_mst_connector, }; -void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm, - struct amdgpu_dm_connector *aconnector, - int link_index) +int amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm, + struct amdgpu_dm_connector *aconnector, + int link_index) { - aconnector->dm_dp_aux.aux.name = - kasprintf(GFP_KERNEL, "AMDGPU DM aux hw bus %d", - link_index); - aconnector->dm_dp_aux.aux.transfer = dm_dp_aux_transfer; - aconnector->dm_dp_aux.ddc_service = aconnector->dc_link->ddc; + struct amdgpu_dm_dp_aux *dm_aux = >dm_dp_aux; + int ret; - drm_dp_aux_init(>dm_dp_aux.aux); - drm_dp_cec_register_connector(>dm_dp_aux.aux, - >base); + dm_aux->aux.name = kasprintf(GFP_KERNEL, "AMDGPU DM aux hw bus %d", link_index); + if (!dm_aux->aux.name) + return -ENOMEM; + + dm_aux->aux.transfer = dm_dp_aux_transfer; + dm_aux->ddc_service = aconnector->dc_link->ddc; + + drm_dp_aux_init(_aux->aux); + drm_dp_cec_register_connector(_aux->aux, >base); if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_eDP) - return; + return 0; aconnector->mst_mgr.cbs = _mst_cbs; - drm_dp_mst_topology_mgr_init( - >mst_mgr, - adev_to_drm(dm->adev), -
Re: [PATCH v3] drm/dp_mst: Rewrite and fix bandwidth limit checks
On 3/9/20 5:01 PM, Lyude Paul wrote: Sigh, this is mostly my fault for not giving commit cd82d82cbc04 ("drm/dp_mst: Add branch bandwidth validation to MST atomic check") enough scrutiny during review. The way we're checking bandwidth limitations here is mostly wrong: For starters, drm_dp_mst_atomic_check_bw_limit() determines the pbn_limit of a branch by simply scanning each port on the current branch device, then uses the last non-zero full_pbn value that it finds. It then counts the sum of the PBN used on each branch device for that level, and compares against the full_pbn value it found before. This is wrong because ports can and will have different PBN limitations on many hubs, especially since a number of DisplayPort hubs out there will be clever and only use the smallest link rate required for each downstream sink - potentially giving every port a different full_pbn value depending on what link rate it's trained at. This means with our current code, which max PBN value we end up with is not well defined. Additionally, we also need to remember when checking bandwidth limitations that the top-most device in any MST topology is a branch device, not a port. This means that the first level of a topology doesn't technically have a full_pbn value that needs to be checked. Instead, we should assume that so long as our VCPI allocations fit we're within the bandwidth limitations of the primary MSTB. We do however, want to check full_pbn on every port including those of the primary MSTB. However, it's important to keep in mind that this value represents the minimum link rate /between a port's sink or mstb, and the mstb itself/. A quick diagram to explain: MSTB #1 / \ / \ Port #1Port #2 full_pbn for Port #1 → | | ← full_pbn for Port #2 Sink #1MSTB #2 | etc... Note that in the above diagram, the combined PBN from all VCPI allocations on said hub should not exceed the full_pbn value of port #2, and the display configuration on sink #1 should not exceed the full_pbn value of port #1. However, port #1 and port #2 can otherwise consume as much bandwidth as they want so long as their VCPI allocations still fit. And finally - our current bandwidth checking code also makes the mistake of not checking whether something is an end device or not before trying to traverse down it. So, let's fix it by rewriting our bandwidth checking helpers. We split the function into one part for handling branches which simply adds up the total PBN on each branch and returns it, and one for checking each port to ensure we're not going over its PBN limit. Phew. This should fix regressions seen, where we erroneously reject display configurations due to thinking they're going over our bandwidth limits when they're not. Changes since v1: * Took an even closer look at how PBN limitations are supposed to be handled, and did some experimenting with Sean Paul. Ended up rewriting these helpers again, but this time they should actually be correct! Changes since v2: * Small indenting fix * Fix pbn_used check in drm_dp_mst_atomic_check_port_bw_limit() Thank you for rewriting the bandwidth check helper! My initial understanding of available_pbn was completely wrong and therefore the bandwidth validation was not doing what it intended. This version is much cleaner and easier to follow than the initial one, since you're separating branch and port validation into 2 different functions, and also go down the device topology rather than starting from the end nodes. Also the explanation and the diagram help a lot to understand how it should have be done initially. This change makes sense and looks correct to me, therefore: Reviewed-by: Mikita Lipski Thanks, Mikita Signed-off-by: Lyude Paul Fixes: cd82d82cbc04 ("drm/dp_mst: Add branch bandwidth validation to MST atomic check") Cc: Mikita Lipski Cc: Sean Paul Cc: Hans de Goede --- drivers/gpu/drm/drm_dp_mst_topology.c | 119 -- 1 file changed, 93 insertions(+), 26 deletions(-) diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index b81ad444c24f..d2f464bdcfff 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -4841,41 +4841,102 @@ static bool drm_dp_mst_port_downstream_of_branch(struct drm_dp_mst_port *port, return false; } -static inline -int drm_dp_mst_atomic_check_bw_limit(struct drm_dp_mst_branch *branch, -struct drm_dp_mst_topology_state *mst_state) +static int +drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port, + struct drm_dp_mst_topology_state *s
Re: [PATCH v2 2/4] drm/dp_mst: Use full_pbn instead of available_pbn for bandwidth checks
On 3/6/20 6:46 PM, Lyude Paul wrote: DisplayPort specifications are fun. For a while, it's been really unclear to us what available_pbn actually does. There's a somewhat vague explanation in the DisplayPort spec (starting from 1.2) that partially explains it: The minimum payload bandwidth number supported by the path. Each node updates this number with its available payload bandwidth number if its payload bandwidth number is less than that in the Message Transaction reply. So, it sounds like available_pbn represents the smallest link rate in use between the source and the branch device. Cool, so full_pbn is just the highest possible PBN that the branch device supports right? Well, we assumed that for quite a while until Sean Paul noticed that on some MST hubs, available_pbn will actually get set to 0 whenever there's any active payloads on the respective branch device. This caused quite a bit of confusion since clearing the payload ID table would end up fixing the available_pbn value. So, we just went with that until commit cd82d82cbc04 ("drm/dp_mst: Add branch bandwidth validation to MST atomic check") started breaking people's setups due to us getting erroneous available_pbn values. So, we did some more digging and got confused until we finally looked at the definition for full_pbn: The bandwidth of the link at the trained link rate and lane count between the DP Source device and the DP Sink device with no time slots allocated to VC Payloads, represented as a Payload Bandwidth Number. As with the Available_Payload_Bandwidth_Number, this number is determined by the link with the lowest lane count and link rate. That's what we get for not reading specs closely enough, hehe. So, since full_pbn is definitely what we want for doing bandwidth restriction checks - let's start using that instead and ignore available_pbn entirely. Thank you for the fix and a very detailed explanation. I was really confused by available_pbn and why it wouldn't get updated and was searching for the solution in wrong places. But I'm glad you were able to identify the solution. I still think the port should have an available_pbn value so it could be updated when driver parses RESOURCE_STATUS_NOTIFY and ENUM_PATH_RESOURCES messages. With that being said it is a great find. Thanks. Reviewed-by: Mikita Lipski Signed-off-by: Lyude Paul Fixes: cd82d82cbc04 ("drm/dp_mst: Add branch bandwidth validation to MST atomic check") Cc: Mikita Lipski Cc: Hans de Goede Cc: Sean Paul --- drivers/gpu/drm/drm_dp_mst_topology.c | 15 +++ include/drm/drm_dp_mst_helper.h | 4 ++-- 2 files changed, 9 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 6714d8a5c558..79ebb871230b 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -2306,7 +2306,7 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb, port); } } else { - port->available_pbn = 0; + port->full_pbn = 0; } } @@ -2401,7 +2401,7 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb, if (port->ddps) { dowork = true; } else { - port->available_pbn = 0; + port->full_pbn = 0; } } @@ -2553,7 +2553,7 @@ static int drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mg if (port->input || !port->ddps) continue; - if (!port->available_pbn) { + if (!port->full_pbn) { drm_modeset_lock(>base.lock, NULL); drm_dp_send_enum_path_resources(mgr, mstb, port); drm_modeset_unlock(>base.lock); @@ -2999,8 +2999,7 @@ drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr, path_res->port_number, path_res->full_payload_bw_number, path_res->avail_payload_bw_number); - port->available_pbn = - path_res->avail_payload_bw_number; + port->full_pbn = path_res->full_payload_bw_number; port->fec_capable = path_res->fec_capable; } } @@ -3585,7 +3584,7 @@ drm_dp_mst_topology_mgr_invalidate_mstb(struct drm_dp_mst_branch *mstb) list_for_each_entry(port, >ports, next) { /* The PBN for each port will also need to be re-probed */ - port->available_pbn = 0; + port->full_pbn = 0; if
Re: [PATCH v2 0/4] drm/dp_mst: Fix bandwidth checking regressions from DSC patches
On 3/6/20 6:46 PM, Lyude Paul wrote: AMD's patch series for adding DSC support to the MST helpers unfortunately introduced a few regressions into the kernel that I didn't get around to fixing until just now. I would have reverted the changes earlier, but seeing as that would have reverted all of amd's DSC support + everything that was done on top of that I really wanted to avoid doing that. Anyway, this should fix everything bandwidth-check related as far as I can tell (I found some other regressions unrelated to AMD's DSC patches which I'll be sending out patches for shortly). Note that I don't have any DSC displays locally yet, so if someone from AMD could sanity check this I would appreciate it ♥. The series is tested and verified with MST DSC Realtek board. Tested-by: Mikita Lipski Cc: Mikita Lipski Cc: Alex Deucher Cc: Sean Paul Cc: Hans de Goede Lyude Paul (4): drm/dp_mst: Rename drm_dp_mst_is_dp_mst_end_device() to be less redundant drm/dp_mst: Use full_pbn instead of available_pbn for bandwidth checks drm/dp_mst: Reprobe path resources in CSN handler drm/dp_mst: Rewrite and fix bandwidth limit checks drivers/gpu/drm/drm_dp_mst_topology.c | 185 ++ include/drm/drm_dp_mst_helper.h | 4 +- 2 files changed, 129 insertions(+), 60 deletions(-) -- Thanks, Mikita Lipski Software Engineer 2, AMD mikita.lip...@amd.com ___ dri-devel mailing list dri-devel@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/dri-devel
Re: 5.6 DP-MST regression: 1 of 2 monitors on TB3 (DP-MST) dock no longer light up
n%2Flistinfo%2Fdri-develdata=02%7C01%7Cmikita.lipski%40amd.com%7Ca48e7470afee41cb208508d7bb155ad0%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C637183572706454329sdata=im2LrBE%2BgjCL%2FN4%2B%2BZOOu6Eci5SuaZrT8l3mOuDRQH0%3Dreserved=0 ___ dri-devel mailing list dri-devel@lists.freedesktop.org https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flists.freedesktop.org%2Fmailman%2Flistinfo%2Fdri-develdata=02%7C01%7Cmikita.lipski%40amd.com%7Ca48e7470afee41cb208508d7bb155ad0%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C637183572706454329sdata=im2LrBE%2BgjCL%2FN4%2B%2BZOOu6Eci5SuaZrT8l3mOuDRQH0%3Dreserved=0 -- Thanks, Mikita Lipski Software Engineer 2, AMD mikita.lip...@amd.com ___ dri-devel mailing list dri-devel@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/dri-devel
Re: [PATCH v2] drm/amd/dm/mst: Ignore payload update failures
On 1/24/20 5:01 PM, Lyude Paul wrote: On Fri, 2020-01-24 at 16:46 -0500, Lyude Paul wrote: On Fri, 2020-01-24 at 14:20 -0500, Mikita Lipski wrote: On 1/24/20 2:10 PM, Lyude Paul wrote: Disabling a display on MST can potentially happen after the entire MST topology has been removed, which means that we can't communicate with the topology at all in this scenario. Likewise, this also means that we can't properly update payloads on the topology and as such, it's a good idea to ignore payload update failures when disabling displays. Currently, amdgpu makes the mistake of halting the payload update process when any payload update failures occur, resulting in leaving DC's local copies of the payload tables out of date. This ends up causing problems with hotplugging MST topologies, and causes modesets on the second hotplug to fail like so: [drm] Failed to updateMST allocation table forpipe idx:1 [ cut here ] WARNING: CPU: 5 PID: 1511 at drivers/gpu/drm/amd/amdgpu/../display/dc/core/dc_link.c:2677 update_mst_stream_alloc_table+0x11e/0x130 [amdgpu] Modules linked in: cdc_ether usbnet fuse xt_conntrack nf_conntrack nf_defrag_ipv6 libcrc32c nf_defrag_ipv4 ipt_REJECT nf_reject_ipv4 nft_counter nft_compat nf_tables nfnetlink tun bridge stp llc sunrpc vfat fat wmi_bmof uvcvideo snd_hda_codec_realtek snd_hda_codec_generic snd_hda_codec_hdmi videobuf2_vmalloc snd_hda_intel videobuf2_memops videobuf2_v4l2 snd_intel_dspcfg videobuf2_common crct10dif_pclmul snd_hda_codec videodev crc32_pclmul snd_hwdep snd_hda_core ghash_clmulni_intel snd_seq mc joydev pcspkr snd_seq_device snd_pcm sp5100_tco k10temp i2c_piix4 snd_timer thinkpad_acpi ledtrig_audio snd wmi soundcore video i2c_scmi acpi_cpufreq ip_tables amdgpu(O) rtsx_pci_sdmmc amd_iommu_v2 gpu_sched mmc_core i2c_algo_bit ttm drm_kms_helper syscopyarea sysfillrect sysimgblt fb_sys_fops cec drm crc32c_intel serio_raw hid_multitouch r8152 mii nvme r8169 nvme_core rtsx_pci pinctrl_amd CPU: 5 PID: 1511 Comm: gnome-shell Tainted: G O 5.5.0- rc7Lyude-Test+ #4 Hardware name: LENOVO FA495SIT26/FA495SIT26, BIOS R12ET22W(0.22 ) 01/31/2019 RIP: 0010:update_mst_stream_alloc_table+0x11e/0x130 [amdgpu] Code: 28 00 00 00 75 2b 48 8d 65 e0 5b 41 5c 41 5d 41 5e 5d c3 0f b6 06 49 89 1c 24 41 88 44 24 08 0f b6 46 01 41 88 44 24 09 eb 93 <0f> 0b e9 2f ff ff ff e8 a6 82 a3 c2 66 0f 1f 44 00 00 0f 1f 44 00 RSP: 0018:ac428127f5b0 EFLAGS: 00010202 RAX: 0002 RBX: 8d1e166eee80 RCX: RDX: ac428127f668 RSI: 8d1e166eee80 RDI: ac428127f610 RBP: ac428127f640 R08: c03d94a8 R09: R10: 8d1e24b02000 R11: ac428127f5b0 R12: 8d1e1b83d000 R13: 8d1e1bea0b08 R14: 0002 R15: 0002 FS: 7fab23ffcd80() GS:8d1e28b4() knlGS: CS: 0010 DS: ES: CR0: 80050033 CR2: 7f151f1711e8 CR3: 0005997c CR4: 003406e0 Call Trace: ? mutex_lock+0xe/0x30 dc_link_allocate_mst_payload+0x9a/0x210 [amdgpu] ? dm_read_reg_func+0x39/0xb0 [amdgpu] ? core_link_enable_stream+0x656/0x730 [amdgpu] core_link_enable_stream+0x656/0x730 [amdgpu] dce110_apply_ctx_to_hw+0x58e/0x5d0 [amdgpu] ? dcn10_verify_allow_pstate_change_high+0x1d/0x280 [amdgpu] ? dcn10_wait_for_mpcc_disconnect+0x3c/0x130 [amdgpu] dc_commit_state+0x292/0x770 [amdgpu] ? add_timer+0x101/0x1f0 ? ttm_bo_put+0x1a1/0x2f0 [ttm] amdgpu_dm_atomic_commit_tail+0xb59/0x1ff0 [amdgpu] ? amdgpu_move_blit.constprop.0+0xb8/0x1f0 [amdgpu] ? amdgpu_bo_move+0x16d/0x2b0 [amdgpu] ? ttm_bo_handle_move_mem+0x118/0x570 [ttm] ? ttm_bo_validate+0x134/0x150 [ttm] ? dm_plane_helper_prepare_fb+0x1b9/0x2a0 [amdgpu] ? _cond_resched+0x15/0x30 ? wait_for_completion_timeout+0x38/0x160 ? _cond_resched+0x15/0x30 ? wait_for_completion_interruptible+0x33/0x190 commit_tail+0x94/0x130 [drm_kms_helper] drm_atomic_helper_commit+0x113/0x140 [drm_kms_helper] drm_atomic_helper_set_config+0x70/0xb0 [drm_kms_helper] drm_mode_setcrtc+0x194/0x6a0 [drm] ? _cond_resched+0x15/0x30 ? mutex_lock+0xe/0x30 ? drm_mode_getcrtc+0x180/0x180 [drm] drm_ioctl_kernel+0xaa/0xf0 [drm] drm_ioctl+0x208/0x390 [drm] ? drm_mode_getcrtc+0x180/0x180 [drm] amdgpu_drm_ioctl+0x49/0x80 [amdgpu] do_vfs_ioctl+0x458/0x6d0 ksys_ioctl+0x5e/0x90 __x64_sys_ioctl+0x16/0x20 do_syscall_64+0x55/0x1b0 entry_SYSCALL_64_after_hwframe+0x44/0xa9 RIP: 0033:0x7fab2121f87b Code: 0f 1e fa 48 8b 05 0d 96 2c 00 64 c7 00 26 00 00 00 48 c7 c0 ff ff ff ff c3 66 0f 1f 44 00 00 f3 0f 1e fa b8 10 00 00 00 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d dd 95 2c 00 f7 d8 64 89 01 48 RSP: 002b:7ffd045f9068 EFLAGS: 0246 ORIG_RAX: 0010 RAX: ffda RBX: 7ffd045f90a0 RCX: 7fab2121f87b RDX: 7ffd045f90a0 RSI: c06864a2 RDI: 000b RBP: 7ffd045f90a0 R08: R09: 55db
Re: [PATCH v2] drm/amd/dm/mst: Ignore payload update failures
I have only been able to reproduce this on setups with 2 MST displays. Changes since v1: * Don't return false when part 1 or part 2 of updating the payloads fails, we don't want to abort at any step of the process even if things fail Signed-off-by: Lyude Paul Acked-by: Harry Wentland Cc: sta...@vger.kernel.org --- .../drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c | 13 - 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index 069b7a6f5597..318b474ff20e 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -216,7 +216,8 @@ bool dm_helpers_dp_mst_write_payload_allocation_table( drm_dp_mst_reset_vcpi_slots(mst_mgr, mst_port); } - ret = drm_dp_update_payload_part1(mst_mgr); + /* It's OK for this to fail */ + drm_dp_update_payload_part1(mst_mgr); /* mst_mgr->->payloads are VC payload notify MST branch using DPCD or * AUX message. The sequence is slot 1-63 allocated sequence for each @@ -225,9 +226,6 @@ bool dm_helpers_dp_mst_write_payload_allocation_table( get_payload_table(aconnector, proposed_table); - if (ret) - return false; - Sorry for being picky, but I think this might cause compilation error on some systems for having unused variable (int ret). Its better just to strip out both ret integer declarations. Otherwise the patch is good. Thanks again! Reviewed-by: Mikita Lipski Mikita return true; } @@ -285,7 +283,6 @@ bool dm_helpers_dp_mst_send_payload_allocation( struct amdgpu_dm_connector *aconnector; struct drm_dp_mst_topology_mgr *mst_mgr; struct drm_dp_mst_port *mst_port; - int ret; aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; @@ -299,10 +296,8 @@ bool dm_helpers_dp_mst_send_payload_allocation( if (!mst_mgr->mst_state) return false; - ret = drm_dp_update_payload_part2(mst_mgr); - - if (ret) - return false; + /* It's OK for this to fail */ + drm_dp_update_payload_part2(mst_mgr); if (!enable) drm_dp_mst_deallocate_vcpi(mst_mgr, mst_port); ___ dri-devel mailing list dri-devel@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/dri-devel
Re: [PATCH] drm/amd/dm/mst: Ignore payload update failures on disable
On 1/24/20 9:55 AM, Harry Wentland wrote: On 2020-01-23 7:06 p.m., Lyude Paul wrote: Disabling a display on MST can potentially happen after the entire MST topology has been removed, which means that we can't communicate with the topology at all in this scenario. Likewise, this also means that we can't properly update payloads on the topology and as such, it's a good idea to ignore payload update failures when disabling displays. Currently, amdgpu makes the mistake of halting the payload update process when any payload update failures occur, resulting in leaving DC's local copies of the payload tables out of date. This ends up causing problems with hotplugging MST topologies, and causes modesets on the second hotplug to fail like so: [drm] Failed to updateMST allocation table forpipe idx:1 [ cut here ] WARNING: CPU: 5 PID: 1511 at drivers/gpu/drm/amd/amdgpu/../display/dc/core/dc_link.c:2677 update_mst_stream_alloc_table+0x11e/0x130 [amdgpu] Modules linked in: cdc_ether usbnet fuse xt_conntrack nf_conntrack nf_defrag_ipv6 libcrc32c nf_defrag_ipv4 ipt_REJECT nf_reject_ipv4 nft_counter nft_compat nf_tables nfnetlink tun bridge stp llc sunrpc vfat fat wmi_bmof uvcvideo snd_hda_codec_realtek snd_hda_codec_generic snd_hda_codec_hdmi videobuf2_vmalloc snd_hda_intel videobuf2_memops videobuf2_v4l2 snd_intel_dspcfg videobuf2_common crct10dif_pclmul snd_hda_codec videodev crc32_pclmul snd_hwdep snd_hda_core ghash_clmulni_intel snd_seq mc joydev pcspkr snd_seq_device snd_pcm sp5100_tco k10temp i2c_piix4 snd_timer thinkpad_acpi ledtrig_audio snd wmi soundcore video i2c_scmi acpi_cpufreq ip_tables amdgpu(O) rtsx_pci_sdmmc amd_iommu_v2 gpu_sched mmc_core i2c_algo_bit ttm drm_kms_helper syscopyarea sysfillrect sysimgblt fb_sys_fops cec drm crc32c_intel serio_raw hid_multitouch r8152 mii nvme r8169 nvme_core rtsx_pci pinctrl_amd CPU: 5 PID: 1511 Comm: gnome-shell Tainted: G O 5.5.0-rc7Lyude-Test+ #4 Hardware name: LENOVO FA495SIT26/FA495SIT26, BIOS R12ET22W(0.22 ) 01/31/2019 RIP: 0010:update_mst_stream_alloc_table+0x11e/0x130 [amdgpu] Code: 28 00 00 00 75 2b 48 8d 65 e0 5b 41 5c 41 5d 41 5e 5d c3 0f b6 06 49 89 1c 24 41 88 44 24 08 0f b6 46 01 41 88 44 24 09 eb 93 <0f> 0b e9 2f ff ff ff e8 a6 82 a3 c2 66 0f 1f 44 00 00 0f 1f 44 00 RSP: 0018:ac428127f5b0 EFLAGS: 00010202 RAX: 0002 RBX: 8d1e166eee80 RCX: RDX: ac428127f668 RSI: 8d1e166eee80 RDI: ac428127f610 RBP: ac428127f640 R08: c03d94a8 R09: R10: 8d1e24b02000 R11: ac428127f5b0 R12: 8d1e1b83d000 R13: 8d1e1bea0b08 R14: 0002 R15: 0002 FS: 7fab23ffcd80() GS:8d1e28b4() knlGS: CS: 0010 DS: ES: CR0: 80050033 CR2: 7f151f1711e8 CR3: 0005997c CR4: 003406e0 Call Trace: ? mutex_lock+0xe/0x30 dc_link_allocate_mst_payload+0x9a/0x210 [amdgpu] ? dm_read_reg_func+0x39/0xb0 [amdgpu] ? core_link_enable_stream+0x656/0x730 [amdgpu] core_link_enable_stream+0x656/0x730 [amdgpu] dce110_apply_ctx_to_hw+0x58e/0x5d0 [amdgpu] ? dcn10_verify_allow_pstate_change_high+0x1d/0x280 [amdgpu] ? dcn10_wait_for_mpcc_disconnect+0x3c/0x130 [amdgpu] dc_commit_state+0x292/0x770 [amdgpu] ? add_timer+0x101/0x1f0 ? ttm_bo_put+0x1a1/0x2f0 [ttm] amdgpu_dm_atomic_commit_tail+0xb59/0x1ff0 [amdgpu] ? amdgpu_move_blit.constprop.0+0xb8/0x1f0 [amdgpu] ? amdgpu_bo_move+0x16d/0x2b0 [amdgpu] ? ttm_bo_handle_move_mem+0x118/0x570 [ttm] ? ttm_bo_validate+0x134/0x150 [ttm] ? dm_plane_helper_prepare_fb+0x1b9/0x2a0 [amdgpu] ? _cond_resched+0x15/0x30 ? wait_for_completion_timeout+0x38/0x160 ? _cond_resched+0x15/0x30 ? wait_for_completion_interruptible+0x33/0x190 commit_tail+0x94/0x130 [drm_kms_helper] drm_atomic_helper_commit+0x113/0x140 [drm_kms_helper] drm_atomic_helper_set_config+0x70/0xb0 [drm_kms_helper] drm_mode_setcrtc+0x194/0x6a0 [drm] ? _cond_resched+0x15/0x30 ? mutex_lock+0xe/0x30 ? drm_mode_getcrtc+0x180/0x180 [drm] drm_ioctl_kernel+0xaa/0xf0 [drm] drm_ioctl+0x208/0x390 [drm] ? drm_mode_getcrtc+0x180/0x180 [drm] amdgpu_drm_ioctl+0x49/0x80 [amdgpu] do_vfs_ioctl+0x458/0x6d0 ksys_ioctl+0x5e/0x90 __x64_sys_ioctl+0x16/0x20 do_syscall_64+0x55/0x1b0 entry_SYSCALL_64_after_hwframe+0x44/0xa9 RIP: 0033:0x7fab2121f87b Code: 0f 1e fa 48 8b 05 0d 96 2c 00 64 c7 00 26 00 00 00 48 c7 c0 ff ff ff ff c3 66 0f 1f 44 00 00 f3 0f 1e fa b8 10 00 00 00 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d dd 95 2c 00 f7 d8 64 89 01 48 RSP: 002b:7ffd045f9068 EFLAGS: 0246 ORIG_RAX: 0010 RAX: ffda RBX: 7ffd045f90a0 RCX: 7fab2121f87b RDX: 7ffd045f90a0 RSI: c06864a2 RDI: 000b RBP: 7ffd045f90a0 R08: R09: 55dbd2985d10 R10: 55dbd2196280 R11: 0246 R12: c06864a2 R13: 000b R14: R15: 55dbd2196280 ---[ end trace
Re: [PATCH v9 12/18] drm/dp_mst: Add branch bandwidth validation to MST atomic check
On 1/17/20 10:09 AM, Sean Paul wrote: On Fri, Dec 13, 2019 at 3:09 PM wrote: From: Mikita Lipski Hi Mikita, Unfortunately this patch causes a crash on my i915 device when I unplug my MST hub. The panic is below. Hi Sean, I thought this issue was fixed by Wayne Lin in https://patchwork.freedesktop.org/patch/346736/?series=71388=1 but now I checked it seems it never got pushed. I will resend Wayne's patch once again. Thanks Mikita [ 38.514014] BUG: kernel NULL pointer dereference, address: 0030 [ 38.521801] #PF: supervisor read access in kernel mode [ 38.527556] #PF: error_code(0x) - not-present page [ 38.533299] PGD 0 P4D 0 [ 38.536127] Oops: [#1] PREEMPT SMP PTI [ 38.540798] CPU: 1 PID: 1324 Comm: DrmThread Not tainted 5.5.0-rc6-02273-g9bb4096398e7 #36 [ 38.550040] Hardware name: Google Fizz/Fizz, BIOS Google_Fizz.10139.39.0 01/04/2018 [ 38.558606] RIP: 0010:drm_dp_mst_atomic_check_bw_limit+0x11/0x102 [ 38.565418] Code: 05 ff cb bf 19 48 f7 f6 c3 0f 1f 44 00 00 55 b8 0b 80 ff 0f 48 89 e5 5d c3 55 48 89 e5 41 57 41 56 41 55 41 54 4c 8d 77 30 53 <48> 8b 47 30 49 89 fd 49 89 f7 45 31 e4 48 8d 58 e8 48 8d 53 18 4c [ 38.586422] RSP: 0018:c9000139f9d8 EFLAGS: 00010282 [ 38.592264] RAX: RBX: 888272aeac88 RCX: 888236f529e0 [ 38.600242] RDX: 888272aeac88 RSI: 888236f529e0 RDI: [ 38.608220] RBP: c9000139fa00 R08: 0031 R09: 000e [ 38.616198] R10: 888236f529e8 R11: 8882621f3440 R12: [ 38.624176] R13: 888236f529d0 R14: 0030 R15: 888236f529e0 [ 38.632153] FS: 7cd9229ce700() GS:888276c8() knlGS: [ 38.641193] CS: 0010 DS: ES: CR0: 80050033 [ 38.647616] CR2: 0030 CR3: 0002618e8004 CR4: 003606e0 [ 38.655593] Call Trace: [ 38.658329] drm_dp_mst_atomic_check+0x152/0x16d [ 38.663484] intel_atomic_check+0xcfe/0x1e6f [ 38.668259] ? trace_hardirqs_on+0x28/0x3d [ 38.672835] ? intel_pipe_config_compare+0x1b38/0x1b38 [ 38.678580] drm_atomic_check_only+0x5ab/0x70f [ 38.683547] ? drm_atomic_set_crtc_for_connector+0xf5/0x102 [ 38.689778] ? drm_atomic_helper_shutdown+0xb6/0xb6 [ 38.695221] drm_atomic_commit+0x18/0x53 [ 38.699604] drm_atomic_helper_set_config+0x5a/0x70 [ 38.705057] drm_mode_setcrtc+0x2ab/0x833 [ 38.709537] ? rcu_read_unlock+0x57/0x57 [ 38.713920] ? drm_mode_getcrtc+0x173/0x173 [ 38.718594] drm_ioctl+0x2e5/0x424 [ 38.722392] ? drm_mode_getcrtc+0x173/0x173 [ 38.727069] vfs_ioctl+0x21/0x2f [ 38.730675] do_vfs_ioctl+0x5fb/0x61e [ 38.734766] ksys_ioctl+0x55/0x75 [ 38.738469] __x64_sys_ioctl+0x1a/0x1e [ 38.742659] do_syscall_64+0x5c/0x6d [ 38.746653] entry_SYSCALL_64_after_hwframe+0x49/0xbe [ 38.752298] RIP: 0033:0x7cd92552d497 [ 38.756291] Code: 8a 66 90 48 8b 05 d1 d9 2b 00 64 c7 00 26 00 00 00 48 c7 c0 ff ff ff ff c3 66 2e 0f 1f 84 00 00 00 00 00 b8 10 00 00 00 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d a1 d9 2b 00 f7 d8 64 89 01 48 [ 38.777296] RSP: 002b:7cd9229cd698 EFLAGS: 0246 ORIG_RAX: 0010 [ 38.785762] RAX: ffda RBX: 20323373da80 RCX: 7cd92552d497 [ 38.793740] RDX: 7cd9229cd6d0 RSI: c06864a2 RDI: 001c [ 38.801717] RBP: 7cd9229cd6c0 R08: R09: [ 38.809693] R10: R11: 0246 R12: 001c [ 38.817670] R13: R14: 7cd9229cd6d0 R15: c06864a2 [ 38.825642] Modules linked in: xt_nat cdc_ether r8152 bridge stp llc usbhid btusb btrtl btbcm btintel bluetooth asix usbnet ecdh_generic ecc mii snd_soc_hdac_hdmi snd_soc_dmic xhci_pci xhci_hcd snd_soc_skl snd_soc_sst_ipc snd_soc_sst_dsp snd_hda_ext_core snd_intel_dspcfg snd_hda_core usbcore usb_common acpi_als kfifo_buf industrialio xt_MASQUERADE iptable_nat nf_nat xt_mark fuse ip6table_filter iwlmvm mac80211 r8169 realtek iwlwifi lzo_rle lzo_compress zram cfg80211 [ 38.871839] CR2: 0030 [ 38.875542] ---[ end trace 6bb39ec52e30c7cb ]--- [ 38.886142] RIP: 0010:drm_dp_mst_atomic_check_bw_limit+0x11/0x102 [ 38.892957] Code: 05 ff cb bf 19 48 f7 f6 c3 0f 1f 44 00 00 55 b8 0b 80 ff 0f 48 89 e5 5d c3 55 48 89 e5 41 57 41 56 41 55 41 54 4c 8d 77 30 53 <48> 8b 47 30 49 89 fd 49 89 f7 45 31 e4 48 8d 58 e8 48 8d 53 18 4c [ 38.913964] RSP: 0018:c9000139f9d8 EFLAGS: 00010282 [ 38.919804] RAX: RBX: 888272aeac88 RCX: 888236f529e0 [ 38.927784] RDX: 888272aeac88 RSI: 888236f529e0 RDI: [ 38.935765] RBP: c9000139fa00 R08: 0031 R09: 000e [ 38.943733] R10: 888236f529e8 R11: 8882621f3440 R12: [ 38.951712] R13: 888236f529d0 R14: 0030 R15: 888236f529e0 [ 38.959692] FS: 7cd9229ce700() GS:888276
Re: [PATCH 1/4] drm/mst: Don't do atomic checks over disabled managers
Thanks for the catch! Makes sense to skip disabled managers there, but I wonder why it didn't cause a crash with amdgpu. Anyways looks good to me. Acked-by: Mikita Lipski On 1/16/20 8:58 PM, José Roberto de Souza wrote: When a main MST port is disconnected drivers should call drm_dp_mst_topology_mgr_set_mst() disabling the MST manager, this function will set manager mst_primary to NULL and it will cause the crash bellow on the next atomic check when trying to access mst_primary->port. As there is no use in running checks over managers that are not active this patch will skip it. [ 305.616450] [drm:drm_dp_mst_atomic_check] [MST PORT:cc2049e9] releases all VCPI slots [ 305.625085] [drm:drm_dp_mst_atomic_check] [MST PORT:020ab43e] releases all VCPI slots [ 305.633729] [drm:drm_dp_mst_atomic_check] [MST MGR:cdd467d4] mst state b67672eb VCPI avail=63 used=0 [ 305.644405] BUG: kernel NULL pointer dereference, address: 0030 [ 305.651448] #PF: supervisor read access in kernel mode [ 305.656640] #PF: error_code(0x) - not-present page [ 305.661807] PGD 0 P4D 0 [ 305.664396] Oops: [#1] PREEMPT SMP NOPTI [ 305.668789] CPU: 3 PID: 183 Comm: kworker/3:2 Not tainted 5.5.0-rc6+ #1404 [ 305.675703] Hardware name: Intel Corporation Ice Lake Client Platform/IceLake U DDR4 SODIMM PD RVP TLC, BIOS ICLSFWR1.R00.3201.A00.1905140358 05/14/2019 [ 305.689425] Workqueue: events drm_dp_delayed_destroy_work [ 305.694874] RIP: 0010:drm_dp_mst_atomic_check+0x138/0x2c0 [ 305.700306] Code: 00 00 00 41 29 d9 41 89 d8 4c 89 fa 4c 89 f1 48 c7 c6 b0 b1 34 82 bf 10 00 00 00 45 31 ed e8 3f 99 02 00 4d 8b bf 80 04 00 00 <49> 8b 47 30 49 8d 5f 30 4c 8d 60 e8 48 39 c3 74 35 49 8b 7c 24 28 [ 305.719169] RSP: 0018:c90001687b58 EFLAGS: 00010246 [ 305.724434] RAX: RBX: 003f RCX: [ 305.731611] RDX: RSI: 88849fba8cb8 RDI: [ 305.738785] RBP: R08: R09: 0001 [ 305.745962] R10: c900016879a0 R11: c900016879a5 R12: [ 305.753139] R13: R14: 8884905c9bc0 R15: [ 305.760315] FS: () GS:88849fb8() knlGS: [ 305.768452] CS: 0010 DS: ES: CR0: 80050033 [ 305.774263] CR2: 0030 CR3: 05610006 CR4: 00760ee0 [ 305.781441] PKRU: 5554 [ 305.784228] Call Trace: [ 305.786739] intel_atomic_check+0xb2e/0x2560 [i915] [ 305.791678] ? printk+0x53/0x6a [ 305.794856] ? drm_atomic_check_only+0x3e/0x810 [ 305.799417] ? __drm_dbg+0x82/0x90 [ 305.802848] drm_atomic_check_only+0x56a/0x810 [ 305.807322] drm_atomic_commit+0xe/0x50 [ 305.811185] drm_client_modeset_commit_atomic+0x1e2/0x250 [ 305.816619] drm_client_modeset_commit_force+0x4d/0x180 [ 305.821921] drm_fb_helper_restore_fbdev_mode_unlocked+0x46/0xa0 [ 305.827963] drm_fb_helper_set_par+0x2b/0x40 [ 305.832265] drm_fb_helper_hotplug_event.part.0+0xb2/0xd0 [ 305.837755] drm_kms_helper_hotplug_event+0x21/0x30 [ 305.842694] process_one_work+0x25b/0x5b0 [ 305.846735] worker_thread+0x4b/0x3b0 [ 305.850439] kthread+0x100/0x140 [ 305.853690] ? process_one_work+0x5b0/0x5b0 [ 305.857901] ? kthread_park+0x80/0x80 [ 305.861588] ret_from_fork+0x24/0x50 [ 305.865202] Modules linked in: snd_hda_codec_hdmi snd_hda_codec_realtek snd_hda_codec_generic i915 btusb btrtl btbcm btintel bluetooth prime_numbers snd_hda_intel snd_intel_dspcfg snd_hda_codec e1000e snd_hwdep snd_hda_core thunderbolt mei_hdcp mei_me asix cdc_ether x86_pkg_temp_thermal r8152 mei coretemp usbnet snd_pcm mii crct10dif_pclmul ptp crc32_pclmul ecdh_generic ghash_clmulni_intel pps_core ecc i2c_i801 intel_lpss_pci [ 305.903096] CR2: 0030 [ 305.906431] ---[ end trace 70ee364eed801cb0 ]--- [ 305.940816] RIP: 0010:drm_dp_mst_atomic_check+0x138/0x2c0 [ 305.946261] Code: 00 00 00 41 29 d9 41 89 d8 4c 89 fa 4c 89 f1 48 c7 c6 b0 b1 34 82 bf 10 00 00 00 45 31 ed e8 3f 99 02 00 4d 8b bf 80 04 00 00 <49> 8b 47 30 49 8d 5f 30 4c 8d 60 e8 48 39 c3 74 35 49 8b 7c 24 28 [ 305.965125] RSP: 0018:c90001687b58 EFLAGS: 00010246 [ 305.970382] RAX: RBX: 003f RCX: [ 305.977571] RDX: RSI: 88849fba8cb8 RDI: [ 305.984747] RBP: R08: R09: 0001 [ 305.991921] R10: c900016879a0 R11: c900016879a5 R12: [ 305.999099] R13: R14: 8884905c9bc0 R15: [ 306.006271] FS: () GS:88849fb8() knlGS: [ 306.014407] CS: 0010 DS: ES: CR0: 80050033 [ 306.020185] CR2: 0030 CR3: 00048b3aa003 CR4: 00760ee0 [ 306.027404] PKRU: 5554 [ 306.030127] BUG: sleeping function called from
Re: [PATCH] drm/dp_mst: fix documentation of drm_dp_mst_add_affected_dsc_crtcs
Thank you, Reviewed-by: Mikita Lipski On 1/8/20 10:24 PM, Alex Deucher wrote: the parameter is the mst manager, not the port. Signed-off-by: Alex Deucher --- drivers/gpu/drm/drm_dp_mst_topology.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 7e9b9b7e50cf..a4be2f825899 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -4781,7 +4781,7 @@ drm_dp_mst_atomic_check_vcpi_alloc_limit(struct drm_dp_mst_topology_mgr *mgr, /** * drm_dp_mst_add_affected_dsc_crtcs * @state: Pointer to the new struct drm_dp_mst_topology_state - * @port: Port pointer of connector with new state + * @mgr: MST topology manager * * Whenever there is a change in mst topology * DSC configuration would have to be recalculated -- Thanks, Mikita Lipski Software Engineer, AMD mikita.lip...@amd.com ___ dri-devel mailing list dri-devel@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/dri-devel
Re: [PATCH v9 16/18] drm/amd/display: Recalculate VCPI slots for new DSC connectors
On 12/20/19 4:41 PM, Lyude Paul wrote: So I reviewed this already but realized I made a very silly mistake, comments down below: On Fri, 2019-12-13 at 15:08 -0500, mikita.lip...@amd.com wrote: From: Mikita Lipski [why] Since for DSC MST connector's PBN is claculated differently due to compression, we have to recalculate both PBN and VCPI slots for that connector. [how] The function iterates through all the active streams to find, which have DSC enabled, then recalculates PBN for it and calls drm_dp_helper_update_vcpi_slots_for_dsc to update connector's VCPI slots. v2: - use drm_dp_mst_atomic_enable_dsc per port to enable/disable DSC v3: - Iterate through connector states from the state passed - On each connector state get stream from dc_state, instead CRTC state Reviewed-by: Lyude Paul Signed-off-by: Mikita Lipski --- .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 76 +-- 1 file changed, 71 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 93a230d956ee..2ac3a2f0b452 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -4986,6 +4986,69 @@ const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = { .atomic_check = dm_encoder_helper_atomic_check }; +static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state, + struct dc_state *dc_state) +{ + struct dc_stream_state *stream = NULL; + struct drm_connector *connector; + struct drm_connector_state *new_con_state, *old_con_state; + struct amdgpu_dm_connector *aconnector; + struct dm_connector_state *dm_conn_state; + int i, j, clock, bpp; + int vcpi, pbn_div, pbn = 0; + + for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { + + aconnector = to_amdgpu_dm_connector(connector); + + if (!aconnector->port) + continue; + + if (!new_con_state || !new_con_state->crtc) + continue; + + dm_conn_state = to_dm_connector_state(new_con_state); + + for (j = 0; j < dc_state->stream_count; j++) { + stream = dc_state->streams[j]; + if (!stream) + continue; + + if ((struct amdgpu_dm_connector*)stream- dm_stream_context == aconnector) + break; + + stream = NULL; + } + + if (!stream) + continue; + + if (stream->timing.flags.DSC != 1) { + drm_dp_mst_atomic_enable_dsc(state, +aconnector->port, +dm_conn_state->pbn, +0, +false); + continue; + } + + pbn_div = dm_mst_get_pbn_divider(stream->link); + bpp = stream->timing.dsc_cfg.bits_per_pixel; + clock = stream->timing.pix_clk_100hz / 10; + pbn = drm_dp_calc_pbn_mode(clock, bpp, true); + vcpi = drm_dp_mst_atomic_enable_dsc(state, + aconnector->port, + pbn, pbn_div, + true); + if (vcpi < 0) + return vcpi; + + dm_conn_state->pbn = pbn; + dm_conn_state->vcpi_slots = vcpi; + } + return 0; +} + static void dm_drm_plane_reset(struct drm_plane *plane) { struct dm_plane_state *amdgpu_state = NULL; @@ -8022,11 +8085,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, if (ret) goto fail; - /* Perform validation of MST topology in the state*/ - ret = drm_dp_mst_atomic_check(state); - if (ret) - goto fail; - if (state->legacy_cursor_update) { /* * This is a fast cursor update coming from the plane update @@ -8098,6 +8156,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, if (!compute_mst_dsc_configs_for_state(state, dm_state- context)) goto fail; + ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state- context); + if (ret) + goto fail; + if (dc_validate_global_state(dc, dm_state->context, false) != DC_OK) { ret = -EINVAL; goto fail; @@ -8126,6 +8188,10 @@ stati
Re: [PATCH] drm/amd/display: replace BUG_ON with WARN_ON
On 12/18/19 11:15 AM, Aditya Pakki wrote: In skip_modeset label within dm_update_crtc_state(), the dc stream cannot be NULL. Using BUG_ON as an assertion is not required and can be removed. The patch replaces the check with a WARN_ON in case dm_new_crtc_state->stream is NULL. Signed-off-by: Aditya Pakki --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 7aac9568d3be..03cb30913c20 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -7012,7 +7012,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, * 3. Is currently active and enabled. * => The dc stream state currently exists. */ - BUG_ON(dm_new_crtc_state->stream == NULL); + WARN_ON(!dm_new_crtc_state->stream); Thanks for the patch, but this is NAK from me since it doesn't really do anything to prevent it or fix it. If the stream is NULL and it passed this far in the function then something really wrong has happened and the process should be stopped. I'm currently dealing with an issue where dm_new_crtc_state->stream is NULL. One of the scenarios could be that driver creates stream for a fake sink instead of failing, that is connected over MST, and calls dm_update_crtc_state to enable CRTC. /* Scaling or underscan settings */ if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state)) -- Thanks, Mikita Lipski Software Engineer, AMD mikita.lip...@amd.com ___ dri-devel mailing list dri-devel@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/dri-devel
Re: [PATCH v8 11/17] drm/dp_mst: Add DSC enablement helpers to DRM
On 12/6/19 7:24 PM, Lyude Paul wrote: Nice! All I've got is a couple of typos I noticed and one question, this looks great :) Thanks! I'll clean it up. The response to the question is below. On Tue, 2019-12-03 at 09:35 -0500, mikita.lip...@amd.com wrote: From: Mikita Lipski Adding a helper function to be called by drivers outside of DRM to enable DSC on the MST ports. Function is called to recalculate VCPI allocation if DSC is enabled and raise the DSC flag to enable. In case of disabling DSC the flag is set to false and recalculation of VCPI slots is expected to be done in encoder's atomic_check. v2: squash separate functions into one and call it per port Cc: Harry Wentland Cc: Lyude Paul Signed-off-by: Mikita Lipski --- drivers/gpu/drm/drm_dp_mst_topology.c | 61 +++ include/drm/drm_dp_mst_helper.h | 5 +++ 2 files changed, 66 insertions(+) diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index f1d883960831..5e549f48ffb8 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -4742,6 +4742,67 @@ drm_dp_mst_atomic_check_topology_state(struct drm_dp_mst_topology_mgr *mgr, return 0; } +/** + * drm_dp_mst_atomic_enable_dsc - Set DSC Enable Flag to On/Off + * @state: Pointer to the new drm_atomic_state + * @pointer: Pointer to the affected MST Port Typo here + * @pbn: Newly recalculated bw required for link with DSC enabled + * @pbn_div: Divider to calculate correct number of pbn per slot + * @enable: Boolean flag enabling or disabling DSC on the port + * + * This function enables DSC on the given Port + * by recalculating its vcpi from pbn provided + * and sets dsc_enable flag to keep track of which + * ports have DSC enabled + * + */ +int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state, +struct drm_dp_mst_port *port, +int pbn, int pbn_div, +bool enable) +{ + struct drm_dp_mst_topology_state *mst_state; + struct drm_dp_vcpi_allocation *pos; + bool found = false; + int vcpi = 0; + + mst_state = drm_atomic_get_mst_topology_state(state, port->mgr); + + if (IS_ERR(mst_state)) + return PTR_ERR(mst_state); + + list_for_each_entry(pos, _state->vcpis, next) { + if (pos->port == port) { + found = true; + break; + } + } + + if (!found) { + DRM_DEBUG_ATOMIC("[MST PORT:%p] Couldn't find VCPI allocation in mst state %p\n", +port, mst_state); + return -EINVAL; + } Just double checking-does this handle the case where we're enabling DSC on a port that didn't previously have a VCPI allocation because it wasn't enabled previously? Or do we not need to handle that here Because we call encoder atomic check previously to allocate VCPI slots - the port should have VCPI allocation before enabling DSC even if it wasn't enabled previously. Therefore, I was thinking, that if encoder atomic check fails to allocate VCPI slots for the port - we shouldn't enable DSC on it and probably should fail atomic check if that is even requested. Assuming you did the right thing here, with the small typo fixes: Reviewed-by: Lyude Paul + + if (pos->dsc_enabled == enable) { + DRM_DEBUG_ATOMIC("[MST PORT:%p] DSC flag is already set to %d, returning %d VCPI slots\n", +port, enable, pos->vcpi); + vcpi = pos->vcpi; + } + + if (enable) { + vcpi = drm_dp_atomic_find_vcpi_slots(state, port->mgr, port, pbn, pbn_div); + DRM_DEBUG_ATOMIC("[MST PORT:%p] Enabling DSC flag, reallocating %d VCPI slots on the port\n", +port, vcpi); + if (vcpi < 0) + return -EINVAL; + } + + pos->dsc_enabled = enable; + + return vcpi; +} +EXPORT_SYMBOL(drm_dp_mst_atomic_enable_dsc); /** * drm_dp_mst_atomic_check - Check that the new state of an MST topology in an * atomic update is valid diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h index 0f813d6346aa..830c94b7f45d 100644 --- a/include/drm/drm_dp_mst_helper.h +++ b/include/drm/drm_dp_mst_helper.h @@ -502,6 +502,7 @@ struct drm_dp_payload { struct drm_dp_vcpi_allocation { struct drm_dp_mst_port *port; int vcpi; + bool dsc_enabled; struct list_head next; }; @@ -773,6 +774,10 @@ drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, int pbn, int pbn_div); +int dr
Re: [PATCH v9] drm/dp_mst: Add PBN calculation for DSC modes
On 12/4/19 11:45 AM, Jani Nikula wrote: On Tue, 03 Dec 2019, wrote: From: David Francis With DSC, bpp can be fractional in multiples of 1/16. Can be? I worry a bit that "bpp" can either be integer or fixed point depending on other variables. I admit I haven't followed up on this too much, but how widespread it is? It seems like something that is bound to be broken in subtle ways, when it won't even cross people's minds that bpp is fractional. Hi Jani, Target rate is expected to be a multiple of bits per pixel of incremental divider (which is 16), so incoming bpp variable is an integer and not a fixed point. It is expected, as it is described in the DP1.4a spec. It is also true that if driver is passing non DSC bit rate with dsc flag set to true it will cause issue on MST as there likely won't be enough bandwidth allocated. But you have pointed to an error, I shouldn't divide (64 * 1006) by 16 since it comes out to 18645,45 and after it is floored to an integer it will cause pbn to be lower than needed. Instead the numerator should rather be like this: mul_u32_u32(clock * bpp / 16, 64 * 1006) Mikita BR, Jani. Change drm_dp_calc_pbn_mode to reflect this, adding a new parameter bool dsc. When this parameter is true, treat the bpp parameter as having units not of bits per pixel, but 1/16 of a bit per pixel v2: Don't add separate function for this v3: Keep the calculation in a single equation v4: Update the tests in test-drm_dp_mst_helper.c Reviewed-by: Manasi Navare Reviewed-by: Lyude Paul Reviewed-by: Harry Wentland Signed-off-by: David Francis Signed-off-by: Mikita Lipski --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 2 +- drivers/gpu/drm/drm_dp_mst_topology.c | 12 +++- drivers/gpu/drm/i915/display/intel_dp_mst.c| 3 ++- drivers/gpu/drm/nouveau/dispnv50/disp.c| 2 +- drivers/gpu/drm/radeon/radeon_dp_mst.c | 2 +- drivers/gpu/drm/selftests/test-drm_dp_mst_helper.c | 10 ++ include/drm/drm_dp_mst_helper.h| 3 +-- 7 files changed, 23 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 455c51c38720 ..9fc03fc1017d 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -4967,7 +4967,7 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder, is_y420); bpp = convert_dc_color_depth_into_bpc(color_depth) * 3; clock = adjusted_mode->clock; - dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp); + dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false); } dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state, mst_mgr, diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index ae5809a1f19a..828b5eae529c 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -4342,10 +4342,11 @@ EXPORT_SYMBOL(drm_dp_check_act_status); * drm_dp_calc_pbn_mode() - Calculate the PBN for a mode. * @clock: dot clock for the mode * @bpp: bpp for the mode. + * @dsc: DSC mode. If true, bpp has units of 1/16 of a bit per pixel * * This uses the formula in the spec to calculate the PBN value for a mode. */ -int drm_dp_calc_pbn_mode(int clock, int bpp) +int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc) { /* * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006 @@ -4356,7 +4357,16 @@ int drm_dp_calc_pbn_mode(int clock, int bpp) * peak_kbps *= (1006/1000) * peak_kbps *= (64/54) * peak_kbps *= 8convert to bytes +* +* If the bpp is in units of 1/16, further divide by 16. Put this +* factor in the numerator rather than the denominator to avoid +* integer overflow */ + + if(dsc) + return DIV_ROUND_UP_ULL(mul_u32_u32(clock * bpp, 64 * 1006 / 16), + 8 * 54 * 1000 * 1000); + return DIV_ROUND_UP_ULL(mul_u32_u32(clock * bpp, 64 * 1006), 8 * 54 * 1000 * 1000); } diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index 03d1cba0b696..92be17711287 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -61,7 +61,8 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder, crtc_state->pipe_bpp = bpp; crtc_state->pbn = drm_dp_calc_pbn_mo
Re: [PATCH] drm/dsc: Return unsigned long on compute offset
On 20/11/2019 05:17, Ville Syrjälä wrote: On Tue, Nov 19, 2019 at 04:11:43PM -0500, Mikita Lipski wrote: On 19/11/2019 16:09, Mikita Lipski wrote: On 19/11/2019 12:11, Ville Syrjälä wrote: On Tue, Nov 19, 2019 at 04:59:40PM +, Cornij, Nikola wrote: If you're going to make all of them the same, then u64, please. This is because I'm not sure if calculations require 64-bit at some stage. If it does then it's already broken. Someone should probably figure out what's actally needed instead of shooting ducks with an icbm. Sorry made a type below. Supposed to be "I don't think it is broken" I mean that it's broken if it actually needs u64 when it's currently using unsigned long. So u64 is either overkill or the code is currently broken. None of the calculations exceed u32, so u64 would be an overkill, since none of the variables in the structure exceed 16 bits. Therefore u32 is enough. I don't think it is not broken, cause I'm currently testing DSC. The patch I sent early simply fixes the error of comparing signed and unsigned variables. We can then submit a second patch addressing the issue of using unsigned long int instead of u32. Also, since the variables in drm_dsc_config structure are all of type u8 and u16, the calculation values shouldn't exceed the size of u32. Thanks -Original Message- From: Lipski, Mikita Sent: November 19, 2019 10:08 AM To: Ville Syrjälä ; Lipski, Mikita Cc: amd-...@lists.freedesktop.org; dri-devel@lists.freedesktop.org; Cornij, Nikola Subject: Re: [PATCH] drm/dsc: Return unsigned long on compute offset On 19/11/2019 09:56, Ville Syrjälä wrote: On Tue, Nov 19, 2019 at 09:45:26AM -0500, mikita.lip...@amd.com wrote: From: Mikita Lipski We shouldn't compare int with unsigned long to find the max value and since we are not expecting negative value returned from compute_offset we should make this function return unsigned long so we can compare the values when computing rc parameters. Why are there other unsigned longs in dsc parameter computation in the first place? I believe it was initially set to be unsigned long for variable consistency, when we ported intel_compute_rc_parameters into drm_dsc_compute_rc_parameters. But now that I look at it, we can actually just set them to u32 or u64, as nothing should exceed that. Cc: Nikola Cornij Cc: Harry Wentland Signed-off-by: Mikita Lipski --- drivers/gpu/drm/drm_dsc.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/drm_dsc.c b/drivers/gpu/drm/drm_dsc.c index 74f3527f567d..ec40604ab6a2 100644 --- a/drivers/gpu/drm/drm_dsc.c +++ b/drivers/gpu/drm/drm_dsc.c @@ -245,11 +245,11 @@ void drm_dsc_pps_payload_pack(struct drm_dsc_picture_parameter_set *pps_payload, } EXPORT_SYMBOL(drm_dsc_pps_payload_pack); -static int compute_offset(struct drm_dsc_config *vdsc_cfg, int pixels_per_group, +static unsigned long compute_offset(struct drm_dsc_config *vdsc_cfg, int pixels_per_group, int groups_per_line, int grpcnt) { - int offset = 0; - int grpcnt_id = DIV_ROUND_UP(vdsc_cfg->initial_xmit_delay, pixels_per_group); + unsigned long offset = 0; + unsigned long grpcnt_id = DIV_ROUND_UP(vdsc_cfg->initial_xmit_delay, pixels_per_group); if (grpcnt <= grpcnt_id) offset = DIV_ROUND_UP(grpcnt * pixels_per_group * vdsc_cfg->bits_per_pixel, 16); -- 2.17.1 ___ dri-devel mailing list dri-devel@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/dri-devel -- Thanks, Mikita Lipski Software Engineer 2, AMD mikita.lip...@amd.com -- Thanks, Mikita Lipski Software Engineer 2, AMD mikita.lip...@amd.com -- Thanks, Mikita Lipski Software Engineer 2, AMD mikita.lip...@amd.com ___ dri-devel mailing list dri-devel@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/dri-devel
Re: [PATCH] drm/dsc: Return unsigned long on compute offset
On 19/11/2019 16:09, Mikita Lipski wrote: On 19/11/2019 12:11, Ville Syrjälä wrote: On Tue, Nov 19, 2019 at 04:59:40PM +, Cornij, Nikola wrote: If you're going to make all of them the same, then u64, please. This is because I'm not sure if calculations require 64-bit at some stage. If it does then it's already broken. Someone should probably figure out what's actally needed instead of shooting ducks with an icbm. Sorry made a type below. Supposed to be "I don't think it is broken" I don't think it is not broken, cause I'm currently testing DSC. The patch I sent early simply fixes the error of comparing signed and unsigned variables. We can then submit a second patch addressing the issue of using unsigned long int instead of u32. Also, since the variables in drm_dsc_config structure are all of type u8 and u16, the calculation values shouldn't exceed the size of u32. Thanks -Original Message- From: Lipski, Mikita Sent: November 19, 2019 10:08 AM To: Ville Syrjälä ; Lipski, Mikita Cc: amd-...@lists.freedesktop.org; dri-devel@lists.freedesktop.org; Cornij, Nikola Subject: Re: [PATCH] drm/dsc: Return unsigned long on compute offset On 19/11/2019 09:56, Ville Syrjälä wrote: On Tue, Nov 19, 2019 at 09:45:26AM -0500, mikita.lip...@amd.com wrote: From: Mikita Lipski We shouldn't compare int with unsigned long to find the max value and since we are not expecting negative value returned from compute_offset we should make this function return unsigned long so we can compare the values when computing rc parameters. Why are there other unsigned longs in dsc parameter computation in the first place? I believe it was initially set to be unsigned long for variable consistency, when we ported intel_compute_rc_parameters into drm_dsc_compute_rc_parameters. But now that I look at it, we can actually just set them to u32 or u64, as nothing should exceed that. Cc: Nikola Cornij Cc: Harry Wentland Signed-off-by: Mikita Lipski --- drivers/gpu/drm/drm_dsc.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/drm_dsc.c b/drivers/gpu/drm/drm_dsc.c index 74f3527f567d..ec40604ab6a2 100644 --- a/drivers/gpu/drm/drm_dsc.c +++ b/drivers/gpu/drm/drm_dsc.c @@ -245,11 +245,11 @@ void drm_dsc_pps_payload_pack(struct drm_dsc_picture_parameter_set *pps_payload, } EXPORT_SYMBOL(drm_dsc_pps_payload_pack); -static int compute_offset(struct drm_dsc_config *vdsc_cfg, int pixels_per_group, +static unsigned long compute_offset(struct drm_dsc_config *vdsc_cfg, int pixels_per_group, int groups_per_line, int grpcnt) { - int offset = 0; - int grpcnt_id = DIV_ROUND_UP(vdsc_cfg->initial_xmit_delay, pixels_per_group); + unsigned long offset = 0; + unsigned long grpcnt_id = DIV_ROUND_UP(vdsc_cfg->initial_xmit_delay, pixels_per_group); if (grpcnt <= grpcnt_id) offset = DIV_ROUND_UP(grpcnt * pixels_per_group * vdsc_cfg->bits_per_pixel, 16); -- 2.17.1 ___ dri-devel mailing list dri-devel@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/dri-devel -- Thanks, Mikita Lipski Software Engineer 2, AMD mikita.lip...@amd.com -- Thanks, Mikita Lipski Software Engineer 2, AMD mikita.lip...@amd.com ___ dri-devel mailing list dri-devel@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/dri-devel
Re: [PATCH] drm/dsc: Return unsigned long on compute offset
On 19/11/2019 12:11, Ville Syrjälä wrote: On Tue, Nov 19, 2019 at 04:59:40PM +, Cornij, Nikola wrote: If you're going to make all of them the same, then u64, please. This is because I'm not sure if calculations require 64-bit at some stage. If it does then it's already broken. Someone should probably figure out what's actally needed instead of shooting ducks with an icbm. I don't think it is not broken, cause I'm currently testing DSC. The patch I sent early simply fixes the error of comparing signed and unsigned variables. We can then submit a second patch addressing the issue of using unsigned long int instead of u32. Also, since the variables in drm_dsc_config structure are all of type u8 and u16, the calculation values shouldn't exceed the size of u32. Thanks -Original Message- From: Lipski, Mikita Sent: November 19, 2019 10:08 AM To: Ville Syrjälä ; Lipski, Mikita Cc: amd-...@lists.freedesktop.org; dri-devel@lists.freedesktop.org; Cornij, Nikola Subject: Re: [PATCH] drm/dsc: Return unsigned long on compute offset On 19/11/2019 09:56, Ville Syrjälä wrote: On Tue, Nov 19, 2019 at 09:45:26AM -0500, mikita.lip...@amd.com wrote: From: Mikita Lipski We shouldn't compare int with unsigned long to find the max value and since we are not expecting negative value returned from compute_offset we should make this function return unsigned long so we can compare the values when computing rc parameters. Why are there other unsigned longs in dsc parameter computation in the first place? I believe it was initially set to be unsigned long for variable consistency, when we ported intel_compute_rc_parameters into drm_dsc_compute_rc_parameters. But now that I look at it, we can actually just set them to u32 or u64, as nothing should exceed that. Cc: Nikola Cornij Cc: Harry Wentland Signed-off-by: Mikita Lipski --- drivers/gpu/drm/drm_dsc.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/drm_dsc.c b/drivers/gpu/drm/drm_dsc.c index 74f3527f567d..ec40604ab6a2 100644 --- a/drivers/gpu/drm/drm_dsc.c +++ b/drivers/gpu/drm/drm_dsc.c @@ -245,11 +245,11 @@ void drm_dsc_pps_payload_pack(struct drm_dsc_picture_parameter_set *pps_payload, } EXPORT_SYMBOL(drm_dsc_pps_payload_pack); -static int compute_offset(struct drm_dsc_config *vdsc_cfg, int pixels_per_group, +static unsigned long compute_offset(struct drm_dsc_config *vdsc_cfg, int pixels_per_group, int groups_per_line, int grpcnt) { - int offset = 0; - int grpcnt_id = DIV_ROUND_UP(vdsc_cfg->initial_xmit_delay, pixels_per_group); + unsigned long offset = 0; + unsigned long grpcnt_id = DIV_ROUND_UP(vdsc_cfg->initial_xmit_delay, pixels_per_group); if (grpcnt <= grpcnt_id) offset = DIV_ROUND_UP(grpcnt * pixels_per_group * vdsc_cfg->bits_per_pixel, 16); -- 2.17.1 ___ dri-devel mailing list dri-devel@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/dri-devel -- Thanks, Mikita Lipski Software Engineer 2, AMD mikita.lip...@amd.com -- Thanks, Mikita Lipski Software Engineer 2, AMD mikita.lip...@amd.com ___ dri-devel mailing list dri-devel@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/dri-devel
Re: [PATCH] drm/dsc: Return unsigned long on compute offset
On 19/11/2019 09:56, Ville Syrjälä wrote: On Tue, Nov 19, 2019 at 09:45:26AM -0500, mikita.lip...@amd.com wrote: From: Mikita Lipski We shouldn't compare int with unsigned long to find the max value and since we are not expecting negative value returned from compute_offset we should make this function return unsigned long so we can compare the values when computing rc parameters. Why are there other unsigned longs in dsc parameter computation in the first place? I believe it was initially set to be unsigned long for variable consistency, when we ported intel_compute_rc_parameters into drm_dsc_compute_rc_parameters. But now that I look at it, we can actually just set them to u32 or u64, as nothing should exceed that. Cc: Nikola Cornij Cc: Harry Wentland Signed-off-by: Mikita Lipski --- drivers/gpu/drm/drm_dsc.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/drm_dsc.c b/drivers/gpu/drm/drm_dsc.c index 74f3527f567d..ec40604ab6a2 100644 --- a/drivers/gpu/drm/drm_dsc.c +++ b/drivers/gpu/drm/drm_dsc.c @@ -245,11 +245,11 @@ void drm_dsc_pps_payload_pack(struct drm_dsc_picture_parameter_set *pps_payload, } EXPORT_SYMBOL(drm_dsc_pps_payload_pack); -static int compute_offset(struct drm_dsc_config *vdsc_cfg, int pixels_per_group, +static unsigned long compute_offset(struct drm_dsc_config *vdsc_cfg, int pixels_per_group, int groups_per_line, int grpcnt) { - int offset = 0; - int grpcnt_id = DIV_ROUND_UP(vdsc_cfg->initial_xmit_delay, pixels_per_group); + unsigned long offset = 0; + unsigned long grpcnt_id = DIV_ROUND_UP(vdsc_cfg->initial_xmit_delay, pixels_per_group); if (grpcnt <= grpcnt_id) offset = DIV_ROUND_UP(grpcnt * pixels_per_group * vdsc_cfg->bits_per_pixel, 16); -- 2.17.1 ___ dri-devel mailing list dri-devel@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/dri-devel -- Thanks, Mikita Lipski Software Engineer 2, AMD mikita.lip...@amd.com ___ dri-devel mailing list dri-devel@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/dri-devel
Re: [PATCH] drm/amd/display: Fix unsigned variable compared to less than zero
Thanks for catching it! Reviewed-by: Mikita Lipski On 11.11.2019 12:25, Gustavo A. R. Silva wrote: Currenly, the error check below on variable*vcpi_slots* is always false because it is a uint64_t type variable, hence, the values this variable can hold are never less than zero: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c: 4870 if (dm_new_connector_state->vcpi_slots < 0) { 4871 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_stat e->vcpi_slots); 4872 return dm_new_connector_state->vcpi_slots; 4873 } Fix this by making*vcpi_slots* of int type Addresses-Coverity: 1487838 ("Unsigned compared against 0") Fixes: b4c578f08378 ("drm/amd/display: Add MST atomic routines") Signed-off-by: Gustavo A. R. Silva --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index 6db07e9e33ab..a8fc90a927d6 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -403,7 +403,7 @@ struct dm_connector_state { bool underscan_enable; bool freesync_capable; uint8_t abm_level; - uint64_t vcpi_slots; + int vcpi_slots; uint64_t pbn; }; -- 2.23.0 -- Thanks, Mikita Lipski Software Engineer, AMD mikita.lip...@amd.com ___ dri-devel mailing list dri-devel@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/dri-devel
Re: [PATCH][next] drm/amd/display: fix dereference of pointer aconnector when it is null
Thanks! Reviewed-by: Mikita Lipski On 08.11.2019 9:38, Colin King wrote: > From: Colin Ian King > > Currently pointer aconnector is being dereferenced by the call to > to_dm_connector_state before it is being null checked, this could > lead to a null pointer dereference. Fix this by checking that > aconnector is null before dereferencing it. > > Addresses-Coverity: ("Dereference before null check") > Fixes: 5133c6241d9c ("drm/amd/display: Add MST atomic routines") > Signed-off-by: Colin Ian King > --- > drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c | 5 ++--- > 1 file changed, 2 insertions(+), 3 deletions(-) > > diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c > b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c > index e3cda6984d28..72e677796a48 100644 > --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c > +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c > @@ -193,12 +193,11 @@ bool dm_helpers_dp_mst_write_payload_allocation_table( >* that blocks before commit guaranteeing that the state >* is not gonna be swapped while still in use in commit tail */ > > - dm_conn_state = to_dm_connector_state(aconnector->base.state); > - > - > if (!aconnector || !aconnector->mst_port) > return false; > > + dm_conn_state = to_dm_connector_state(aconnector->base.state); > + > mst_mgr = >mst_port->mst_mgr; > > if (!mst_mgr->mst_state) > -- Thanks, Mikita Lipski mikita.lip...@amd.com ___ dri-devel mailing list dri-devel@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/dri-devel
Re: [PATCH 01/13] drm/amd/display: Add MST atomic routines
On 31.10.2019 9:16, Kazlauskas, Nicholas wrote: > On 2019-10-30 3:24 p.m., mikita.lip...@amd.com wrote: >> From: Mikita Lipski >> >> - Adding encoder atomic check to find vcpi slots for a connector >> - Using DRM helper functions to calculate PBN >> - Adding connector atomic check to release vcpi slots if connector >> loses CRTC >> - Calculate PBN and VCPI slots only once during atomic >> check and store them on crtc_state to eliminate >> redundant calculation >> - Call drm_dp_mst_atomic_check to verify validity of MST topology >> during state atomic check >> >> v2: squashed previous 3 separate patches, removed DSC PBN calculation, >> and added PBN and VCPI slots properties to amdgpu connector >> >> v3: >> - moved vcpi_slots and pbn properties to dm_crtc_state and dc_stream_state >> - updates stream's vcpi_slots and pbn on commit >> - separated patch from the DSC MST series >> >> v4: >> - set vcpi_slots and pbn properties to dm_connector_state >> - copy porperties from connector state on to crtc state >> >> v5: >> - keep the pbn and vcpi values only on connnector state >> - added a void pointer to the stream state instead on two ints, >> because dc_stream_state is OS agnostic. Pointer points to the >> current dm_connector_state. >> >> v6: >> - Remove new param from stream >> >> v7: >> - Fix error with using max capable bpc >> >> Cc: Jun Lei >> Cc: Jerry Zuo >> Cc: Harry Wentland >> Cc: Nicholas Kazlauskas >> Cc: Lyude Paul >> Signed-off-by: Mikita Lipski > Reviewed-by: Nicholas Kazlauskas > > You might want to verify that this still works as you expect when > changing "max bpc" on an MST display. > > My understanding is that it'd still force a new modeset before encoder > atomic check is called so you *should* have the correct bpc value during > MST calculations. Thanks. It does still works with MST even if you change the mode to the lower resolution. The encoder atomic check is called during drm_atomic_helper_check_modeset so new modeset is already forced then. >> --- >>.../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 65 ++- >>.../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h | 2 + >>.../amd/display/amdgpu_dm/amdgpu_dm_helpers.c | 51 --- >>.../display/amdgpu_dm/amdgpu_dm_mst_types.c | 32 + >>4 files changed, 109 insertions(+), 41 deletions(-) >> >> diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c >> b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c >> index 48f5b43e2698..d75726013436 100644 >> --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c >> +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c >> @@ -4180,7 +4180,8 @@ void amdgpu_dm_connector_funcs_reset(struct >> drm_connector *connector) >> state->underscan_hborder = 0; >> state->underscan_vborder = 0; >> state->base.max_requested_bpc = 8; >> - >> +state->vcpi_slots = 0; >> +state->pbn = 0; >> if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) >> state->abm_level = amdgpu_dm_abm_level; >> >> @@ -4209,7 +4210,8 @@ amdgpu_dm_connector_atomic_duplicate_state(struct >> drm_connector *connector) >> new_state->underscan_enable = state->underscan_enable; >> new_state->underscan_hborder = state->underscan_hborder; >> new_state->underscan_vborder = state->underscan_vborder; >> - >> +new_state->vcpi_slots = state->vcpi_slots; >> +new_state->pbn = state->pbn; >> return _state->base; >>} >> >> @@ -4606,10 +4608,64 @@ static void dm_encoder_helper_disable(struct >> drm_encoder *encoder) >> >>} >> >> +static int convert_dc_color_depth_into_bpc (enum dc_color_depth >> display_color_depth) >> +{ >> +switch (display_color_depth) { >> +case COLOR_DEPTH_666: >> +return 6; >> +case COLOR_DEPTH_888: >> +return 8; >> +case COLOR_DEPTH_101010: >> +return 10; >> +case COLOR_DEPTH_121212: >> +return 12; >> +case COLOR_DEPTH_141414: >> +return 14; >> +case COLOR_DEPTH_161616: >> +return 16; >> +default: >> +break; &g
Re: [PATCH 13/14] drm/amd/display: Recalculate VCPI slots for new DSC connectors
like this in the future and continually building more tech debt for >> ourselves. >> >> Please note as well: if anything I've asked for is confusing, or you don't >> understand what I'm asking or looking for I am more then willing to help >> explain things and help out as best as I can. I understand that a lot of the >> developers working on DRM at AMD may have more experience in Windows and Mac >> land and as a result, trying to get used to the way that we do things in the >> Linux kernel can be a confusing endeavor. I'm more then happy to help out >> with >> this wherever I can, all you need to do is ask. Asking a few questions about >> something you aren't sure you understand can save both of us a lot of time, >> and avoid having to go through this many patch respins. >> >> In the mean time, I'd be willing to look at what patches from this series >> that >> have already been reviewed which could be pushed to drm-misc or friends in >> the >> mean time to speed things up a bit. >> >> On Tue, 2019-10-01 at 12:17 -0400, mikita.lip...@amd.com wrote: >>> From: Mikita Lipski >>> >>> Since for DSC MST connector's PBN is claculated differently >>> due to compression, we have to recalculate both PBN and >>> VCPI slots for that connector. >>> >>> This patch proposes to use similair logic as in >>> dm_encoder_helper_atomic_chek, but since we do not know which >>> connectors will have DSC enabled we have to recalculate PBN only >>> after that's determined, which is done in >>> compute_mst_dsc_configs_for_state. >>> >>> Cc: Jerry Zuo >>> Cc: Harry Wentland >>> Cc: Lyude Paul >>> Signed-off-by: Mikita Lipski >>> --- >>> .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 64 +-- >>> .../amd/display/amdgpu_dm/amdgpu_dm_helpers.c | 6 -- >>> 2 files changed, 59 insertions(+), 11 deletions(-) >>> >>> diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c >>> b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c >>> index 81e30918f9ec..7501ce2233ed 100644 >>> --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c >>> +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c >>> @@ -4569,6 +4569,27 @@ static void dm_encoder_helper_disable(struct >>> drm_encoder *encoder) >>> >>> } >>> >>> +static int convert_dc_color_depth_into_bpc (enum dc_color_depth >>> display_color_depth) >>> +{ >>> + switch (display_color_depth) { >>> + case COLOR_DEPTH_666: >>> + return 6; >>> + case COLOR_DEPTH_888: >>> + return 8; >>> + case COLOR_DEPTH_101010: >>> + return 10; >>> + case COLOR_DEPTH_121212: >>> + return 12; >>> + case COLOR_DEPTH_141414: >>> + return 14; >>> + case COLOR_DEPTH_161616: >>> + return 16; >>> + default: >>> + break; >>> + } >>> + return 0; >>> +} >>> + >>> static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder, >>> struct drm_crtc_state *crtc_state, >>> struct drm_connector_state >>> *conn_state) >>> @@ -4616,6 +4637,36 @@ const struct drm_encoder_helper_funcs >>> amdgpu_dm_encoder_helper_funcs = { >>> .atomic_check = dm_encoder_helper_atomic_check >>> }; >>> >>> +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT >>> +static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state >>> *state, >>> + struct dc_state *dc_state) >>> +{ >>> + struct dc_stream_state *stream; >>> + struct amdgpu_dm_connector *aconnector; >>> + int i, clock = 0, bpp = 0; >>> + >>> + for (i = 0; i < dc_state->stream_count; i++) { >>> + stream = dc_state->streams[i]; >>> + aconnector = (struct amdgpu_dm_connector *)stream- >>>> dm_stream_context; >>> + >>> + if (stream && stream->timing.flags.DSC == 1) { >>> + bpp = convert_dc_color_depth_into_bpc(stream- >>>> timing.display_color_depth)* 3; >>> + clock = stream->timing.pix_clk_100hz / 10; >&g
Re: [PATCH 15/15] drm/amd/display: Trigger modesets on MST DSC connectors
{ 0 }; >> + >> +for_each_new_connector_in_state(state, connector, conn_state, i) { >> +if (conn_state->crtc != crtc) >> +continue; >> + >> +aconnector = to_amdgpu_dm_connector(connector); >> +if (!aconnector->port) >> +aconnector = NULL; >> +else >> +break; >> +} >> + >> +if (!aconnector) >> +return 0; >> + >> +i = 0; >> +drm_connector_list_iter_begin(state->dev, _iter); >> +drm_for_each_connector_iter(connector, _iter) { >> +if (!connector->state || !connector->state->crtc) >> +continue; >> + >> +aconnector_to_add = to_amdgpu_dm_connector(connector); >> +if (!aconnector_to_add->port) >> +continue; >> + >> +if (aconnector_to_add->port->mgr != aconnector->port->mgr) >> +continue; >> + >> +if (!aconnector_to_add->dc_sink) >> +continue; >> + >> +if (!aconnector_to_add->dc_sink- >>> sink_dsc_caps.dsc_dec_caps.is_dsc_supported) >> +continue; >> + >> +if (i >= AMDGPU_MAX_CRTCS) >> +continue; >> + >> +crtcs_affected[i] = connector->state->crtc; >> +i++; >> +} >> +drm_connector_list_iter_end(_iter); >> + >> +for (j = 0; j < i; j++) { >> +new_crtc_state = drm_atomic_get_crtc_state(state, >> crtcs_affected[j]); >> +if (IS_ERR(new_crtc_state)) >> +return PTR_ERR(new_crtc_state); >> + >> +new_crtc_state->mode_changed = true; >> +} >> + >> +return 0; >> + >> +} >> +#endif >> + >> static void get_freesync_config_for_crtc( >> struct dm_crtc_state *new_crtc_state, >> struct dm_connector_state *new_con_state) >> @@ -7388,6 +7456,17 @@ static int amdgpu_dm_atomic_check(struct drm_device >> *dev, >> if (ret) >> goto fail; >> >> +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT >> +if (adev->asic_type >= CHIP_NAVI10) { >> +for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, >> new_crtc_state, i) { >> +if (drm_atomic_crtc_needs_modeset(new_crtc_state)) { >> +ret = add_affected_mst_dsc_crtcs(state, crtc); >> +if (ret) >> +goto fail; >> +} >> +} >> +} >> +#endif >> for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, >> new_crtc_state, i) { >> if (!drm_atomic_crtc_needs_modeset(new_crtc_state) && >> !new_crtc_state->color_mgmt_changed && -- Thanks, Mikita Lipski Software Engineer 2, AMD mikita.lip...@amd.com ___ dri-devel mailing list dri-devel@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/dri-devel
Re: [PATCH 05/14] drm/amd/display: Enable SST DSC in DM
Tested-by: Mikita Lipski Mikita Lipski On 2019-08-19 11:50 a.m., David Francis wrote: > In create_stream_for_sink, check for SST DP connectors > > Parse DSC caps to DC format, then, if DSC is supported, > compute the config > > DSC hardware will be programmed by dc_commit_state > > Cc: Mikita Lipski > Signed-off-by: David Francis > --- > .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 32 --- > .../amd/display/amdgpu_dm/amdgpu_dm_helpers.c | 4 ++- > 2 files changed, 24 insertions(+), 12 deletions(-) > > diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c > b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c > index 911fe78b47c1..84249057e181 100644 > --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c > +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c > @@ -3576,6 +3576,10 @@ create_stream_for_sink(struct amdgpu_dm_connector > *aconnector, > bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false; > int mode_refresh; > int preferred_refresh = 0; > +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT > + struct dsc_dec_dpcd_caps dsc_caps; > + uint32_t link_bandwidth_kbps; > +#endif > > struct dc_sink *sink = NULL; > if (aconnector == NULL) { > @@ -3648,17 +3652,23 @@ create_stream_for_sink(struct amdgpu_dm_connector > *aconnector, > , >base, con_state, old_stream); > > #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT > - /* stream->timing.flags.DSC = 0; */ > -/* */ > - /* if (aconnector->dc_link && */ > - /* aconnector->dc_link->connector_signal == > SIGNAL_TYPE_DISPLAY_PORT #<{(|&& */ > - /* > aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.is_dsc_supported|)}>#) > */ > - /* if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc, */ > - /* >dc_link->dpcd_caps.dsc_caps, */ > - /* dc_link_bandwidth_kbps(aconnector->dc_link, > dc_link_get_link_cap(aconnector->dc_link)), */ > - /* >timing, */ > - /* >timing.dsc_cfg)) */ > - /* stream->timing.flags.DSC = 1; */ > + stream->timing.flags.DSC = 0; > + > + if (aconnector->dc_link && sink->sink_signal == > SIGNAL_TYPE_DISPLAY_PORT) { > + > dc_dsc_parse_dsc_dpcd(aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw, > + > aconnector->dc_link->dpcd_caps.dsc_caps.dsc_ext_caps.raw, > + _caps); > + link_bandwidth_kbps = > dc_link_bandwidth_kbps(aconnector->dc_link, > + > dc_link_get_link_cap(aconnector->dc_link)); > + > + if (dsc_caps.is_dsc_supported) > + if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc, > + _caps, > + link_bandwidth_kbps, > + >timing, > + >timing.dsc_cfg)) > + stream->timing.flags.DSC = 1; > + } > #endif > > update_stream_scaling_settings(, dm_state, stream); > diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c > b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c > index 7cf0573ab25f..5f2c315b18ba 100644 > --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c > +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c > @@ -549,7 +549,9 @@ bool dm_helpers_dp_write_dsc_enable( > bool enable > ) > { > - return false; > + uint8_t enable_dsc = enable ? 1 : 0; > + > + return dm_helpers_dp_write_dpcd(ctx, stream->sink->link, DP_DSC_ENABLE, > _dsc, 1); > } > #endif > > ___ dri-devel mailing list dri-devel@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/dri-devel