Re: [PATCH 02/19] drm/dp: Add support for DP tunneling

2024-02-08 Thread Ville Syrjälä
On Wed, Feb 07, 2024 at 11:02:27PM +0200, Imre Deak wrote:
> On Wed, Feb 07, 2024 at 10:48:53PM +0200, Imre Deak wrote:
> > On Wed, Feb 07, 2024 at 10:02:18PM +0200, Ville Syrjälä wrote:
> > > > [...]
> > > > +static int
> > > > +drm_dp_tunnel_atomic_check_group_bw(struct drm_dp_tunnel_group_state 
> > > > *new_group_state,
> > > > +   u32 *failed_stream_mask)
> > > > +{
> > > > +   struct drm_dp_tunnel_group *group = 
> > > > to_group(new_group_state->base.obj);
> > > > +   struct drm_dp_tunnel_state *new_tunnel_state;
> > > > +   u32 group_stream_mask = 0;
> > > > +   int group_bw = 0;
> > > > +
> > > > +   for_each_tunnel_state(new_group_state, new_tunnel_state) {
> > > > +   struct drm_dp_tunnel *tunnel = 
> > > > new_tunnel_state->tunnel_ref.tunnel;
> > > > +   int max_dprx_bw = get_max_dprx_bw(tunnel);
> > > > +   int tunnel_bw = 
> > > > drm_dp_tunnel_atomic_get_tunnel_bw(new_tunnel_state);
> > > > +
> > > > +   tun_dbg(tunnel,
> > > > +   "%sRequired %d/%d Mb/s total for tunnel.\n",
> > > > +   tunnel_bw > max_dprx_bw ? "Not enough BW: " : 
> > > > "",
> > > > +   DPTUN_BW_ARG(tunnel_bw),
> > > > +   DPTUN_BW_ARG(max_dprx_bw));
> > > > +
> > > > +   if (tunnel_bw > max_dprx_bw) {
> > > 
> > > I'm a bit confused why we're checking this here. Aren't we already
> > > checking this somewhere else?
> > 
> > Ah, yes this should be checked already by the encoder compute config +
> > the MST link BW check. It can be removed, thanks.
> 
> Though neither of that is guaranteed for drivers in general, so
> shouldn't it be here still?

I suppose there isn't any real harm in doing it here too.

> 
> > > > +   *failed_stream_mask = 
> > > > new_tunnel_state->stream_mask;
> > > > +   return -ENOSPC;
> > > > +   }
> > > > +
> > > > +   group_bw += min(roundup(tunnel_bw, 
> > > > tunnel->bw_granularity),
> > > > +   max_dprx_bw);
> > > > +   group_stream_mask |= new_tunnel_state->stream_mask;
> > > > +   }
> > > > +
> > > > +   tun_grp_dbg(group,
> > > > +   "%sRequired %d/%d Mb/s total for tunnel group.\n",
> > > > +   group_bw > group->available_bw ? "Not enough BW: " 
> > > > : "",
> > > > +   DPTUN_BW_ARG(group_bw),
> > > > +   DPTUN_BW_ARG(group->available_bw));
> > > > +
> > > > +   if (group_bw > group->available_bw) {
> > > > +   *failed_stream_mask = group_stream_mask;
> > > > +   return -ENOSPC;
> > > > +   }
> > > > +
> > > > +   return 0;
> > > > +}
> > > > +

-- 
Ville Syrjälä
Intel


Re: [PATCH 02/19] drm/dp: Add support for DP tunneling

2024-02-07 Thread Imre Deak
On Wed, Feb 07, 2024 at 10:48:43PM +0200, Imre Deak wrote:
> On Wed, Feb 07, 2024 at 10:02:18PM +0200, Ville Syrjälä wrote:
> > On Tue, Jan 23, 2024 at 12:28:33PM +0200, Imre Deak wrote:
> > > + [...]
> > > +static int group_allocated_bw(struct drm_dp_tunnel_group *group)
> > > +{
> > > + struct drm_dp_tunnel *tunnel;
> > > + int group_allocated_bw = 0;
> > > +
> > > + for_each_tunnel_in_group(group, tunnel) {
> > > + if (check_tunnel(tunnel) == 0 &&
> > > + tunnel->bw_alloc_enabled)
> > > + group_allocated_bw += tunnel->allocated_bw;
> > > + }
> > > +
> > > + return group_allocated_bw;
> > > +}
> > > +
> > > +static int calc_group_available_bw(const struct drm_dp_tunnel *tunnel)
> > > +{
> > > + return group_allocated_bw(tunnel->group) -
> > > +tunnel->allocated_bw +
> > > +tunnel->estimated_bw;
> > 
> > Hmm. So the estimated_bw=actually_free_bw + tunnel->allocated_bw?
> 
> Yes.
> 
> > Ie. how much bw might be available for this tunnel right now?
> 
> Correct.
> 
> > And here we're trying to deduce the total bandwidth available by
> > adding in the allocated_bw of all the other tunnels in the group?
> 
> Yes.
> 
> > Rather weird that we can't just get that number directly...
> 
> It is. Imo this could be simply communicated via a DPCD register
> dedicated for this. Perhaps adding this should be requested from TBT
> architects.

One reason for this design can be that a host/driver may not see all the
tunnels in the group. In that case the tunnel's current usable BW will
be only its estimated_bw (that is it can't use the BW already allocated
by other tunnels in the group, until those are released by the other
host/driver).

> I assume this could also use a code comment.
> 
> > > +}
> > > +
> > > +static int update_group_available_bw(struct drm_dp_tunnel *tunnel,
> > > +  const struct drm_dp_tunnel_regs *regs)
> > > +{
> > > + struct drm_dp_tunnel *tunnel_iter;
> > > + int group_available_bw;
> > > + bool changed;
> > > +
> > > + tunnel->estimated_bw = tunnel_reg(regs, DP_ESTIMATED_BW) * 
> > > tunnel->bw_granularity;
> > > +
> > > + if (calc_group_available_bw(tunnel) == tunnel->group->available_bw)
> > > + return 0;
> > > +
> > > + for_each_tunnel_in_group(tunnel->group, tunnel_iter) {
> > > + int err;
> > > +
> > > + if (tunnel_iter == tunnel)
> > > + continue;
> > > +
> > > + if (check_tunnel(tunnel_iter) != 0 ||
> > > + !tunnel_iter->bw_alloc_enabled)
> > > + continue;
> > > +
> > > + err = drm_dp_dpcd_probe(tunnel_iter->aux, DP_DPCD_REV);
> > > + if (err) {
> > > + tun_dbg(tunnel_iter,
> > > + "Probe failed, assume disconnected (err %pe)\n",
> > > + ERR_PTR(err));
> > > + drm_dp_tunnel_set_io_error(tunnel_iter);
> > > + }
> > > + }
> > > +
> > > + group_available_bw = calc_group_available_bw(tunnel);
> > > +
> > > + tun_dbg(tunnel, "Updated group available BW: %d->%d\n",
> > > + DPTUN_BW_ARG(tunnel->group->available_bw),
> > > + DPTUN_BW_ARG(group_available_bw));
> > > +
> > > + changed = tunnel->group->available_bw != group_available_bw;
> > > +
> > > + tunnel->group->available_bw = group_available_bw;
> > > +
> > > + return changed ? 1 : 0;
> > > +}
> > > +
> > > +static int set_bw_alloc_mode(struct drm_dp_tunnel *tunnel, bool enable)
> > > +{
> > > + u8 mask = DP_DISPLAY_DRIVER_BW_ALLOCATION_MODE_ENABLE | 
> > > DP_UNMASK_BW_ALLOCATION_IRQ;
> > > + u8 val;
> > > +
> > > + if (drm_dp_dpcd_readb(tunnel->aux, DP_DPTX_BW_ALLOCATION_MODE_CONTROL, 
> > > ) < 0)
> > > + goto out_err;
> > > +
> > > + if (enable)
> > > + val |= mask;
> > > + else
> > > + val &= ~mask;
> > > +
> > > + if (drm_dp_dpcd_writeb(tunnel->aux, DP_DPTX_BW_ALLOCATION_MODE_CONTROL, 
> > > val) < 0)
> > > + goto out_err;
> > > +
> > > + tunnel->bw_alloc_enabled = enable;
> > > +
> > > + return 0;
> > > +
> > > +out_err:
> > > + drm_dp_tunnel_set_io_error(tunnel);
> > > +
> > > + return -EIO;
> > > +}
> > > +
> > > +/**
> > > + * drm_dp_tunnel_enable_bw_alloc: Enable DP tunnel BW allocation mode
> > > + * @tunnel: Tunnel object
> > > + *
> > > + * Enable the DP tunnel BW allocation mode on @tunnel if it supports it.
> > > + *
> > > + * Returns 0 in case of success, negative error code otherwise.
> > > + */
> > > +int drm_dp_tunnel_enable_bw_alloc(struct drm_dp_tunnel *tunnel)
> > > +{
> > > + struct drm_dp_tunnel_regs regs;
> > > + int err = check_tunnel(tunnel);
> > > +
> > > + if (err)
> > > + return err;
> > > +
> > > + if (!tunnel->bw_alloc_supported)
> > > + return -EOPNOTSUPP;
> > > +
> > > + if (!tunnel_group_id(tunnel->group->drv_group_id))
> > > + return -EINVAL;
> > > +
> > > + err = set_bw_alloc_mode(tunnel, true);
> > > + if (err)
> > > + goto out;
> > > +
> > > + err = 

Re: [PATCH 02/19] drm/dp: Add support for DP tunneling

2024-02-07 Thread Imre Deak
On Wed, Feb 07, 2024 at 10:48:53PM +0200, Imre Deak wrote:
> On Wed, Feb 07, 2024 at 10:02:18PM +0200, Ville Syrjälä wrote:
> > > [...]
> > > +static int
> > > +drm_dp_tunnel_atomic_check_group_bw(struct drm_dp_tunnel_group_state 
> > > *new_group_state,
> > > + u32 *failed_stream_mask)
> > > +{
> > > + struct drm_dp_tunnel_group *group = to_group(new_group_state->base.obj);
> > > + struct drm_dp_tunnel_state *new_tunnel_state;
> > > + u32 group_stream_mask = 0;
> > > + int group_bw = 0;
> > > +
> > > + for_each_tunnel_state(new_group_state, new_tunnel_state) {
> > > + struct drm_dp_tunnel *tunnel = 
> > > new_tunnel_state->tunnel_ref.tunnel;
> > > + int max_dprx_bw = get_max_dprx_bw(tunnel);
> > > + int tunnel_bw = 
> > > drm_dp_tunnel_atomic_get_tunnel_bw(new_tunnel_state);
> > > +
> > > + tun_dbg(tunnel,
> > > + "%sRequired %d/%d Mb/s total for tunnel.\n",
> > > + tunnel_bw > max_dprx_bw ? "Not enough BW: " : "",
> > > + DPTUN_BW_ARG(tunnel_bw),
> > > + DPTUN_BW_ARG(max_dprx_bw));
> > > +
> > > + if (tunnel_bw > max_dprx_bw) {
> > 
> > I'm a bit confused why we're checking this here. Aren't we already
> > checking this somewhere else?
> 
> Ah, yes this should be checked already by the encoder compute config +
> the MST link BW check. It can be removed, thanks.

Though neither of that is guaranteed for drivers in general, so
shouldn't it be here still?

> > > + *failed_stream_mask = new_tunnel_state->stream_mask;
> > > + return -ENOSPC;
> > > + }
> > > +
> > > + group_bw += min(roundup(tunnel_bw, tunnel->bw_granularity),
> > > + max_dprx_bw);
> > > + group_stream_mask |= new_tunnel_state->stream_mask;
> > > + }
> > > +
> > > + tun_grp_dbg(group,
> > > + "%sRequired %d/%d Mb/s total for tunnel group.\n",
> > > + group_bw > group->available_bw ? "Not enough BW: " : "",
> > > + DPTUN_BW_ARG(group_bw),
> > > + DPTUN_BW_ARG(group->available_bw));
> > > +
> > > + if (group_bw > group->available_bw) {
> > > + *failed_stream_mask = group_stream_mask;
> > > + return -ENOSPC;
> > > + }
> > > +
> > > + return 0;
> > > +}
> > > +


Re: [PATCH 02/19] drm/dp: Add support for DP tunneling

2024-02-07 Thread Imre Deak
On Wed, Feb 07, 2024 at 10:02:18PM +0200, Ville Syrjälä wrote:
> On Tue, Jan 23, 2024 at 12:28:33PM +0200, Imre Deak wrote:
> > +static char yes_no_chr(int val)
> > +{
> > +   return val ? 'Y' : 'N';
> > +}
> 
> We have str_yes_no() already.

Ok, will use this.

> v> +
> > +#define SKIP_DPRX_CAPS_CHECK   BIT(0)
> > +#define ALLOW_ALLOCATED_BW_CHANGE  BIT(1)
> > +
> > +static bool tunnel_regs_are_valid(struct drm_dp_tunnel_mgr *mgr,
> > + const struct drm_dp_tunnel_regs *regs,
> > + unsigned int flags)
> > +{
> > +   int drv_group_id = tunnel_reg_drv_group_id(regs);
> > +   bool check_dprx = !(flags & SKIP_DPRX_CAPS_CHECK);
> > +   bool ret = true;
> > +
> > +   if (!tunnel_reg_bw_alloc_supported(regs)) {
> > +   if (tunnel_group_id(drv_group_id)) {
> > +   drm_dbg_kms(mgr->dev,
> > +   "DPTUN: A non-zero group ID is only allowed 
> > with BWA support\n");
> > +   ret = false;
> > +   }
> > +
> > +   if (tunnel_reg(regs, DP_ALLOCATED_BW)) {
> > +   drm_dbg_kms(mgr->dev,
> > +   "DPTUN: BW is allocated without BWA 
> > support\n");
> > +   ret = false;
> > +   }
> > +
> > +   return ret;
> > +   }
> > +
> > +   if (!tunnel_group_id(drv_group_id)) {
> > +   drm_dbg_kms(mgr->dev,
> > +   "DPTUN: BWA support requires a non-zero group 
> > ID\n");
> > +   ret = false;
> > +   }
> > +
> > +   if (check_dprx && hweight8(tunnel_reg_max_dprx_lane_count(regs)) != 1) {
> > +   drm_dbg_kms(mgr->dev,
> > +   "DPTUN: Invalid DPRX lane count: %d\n",
> > +   tunnel_reg_max_dprx_lane_count(regs));
> > +
> > +   ret = false;
> > +   }
> > +
> > +   if (check_dprx && !tunnel_reg_max_dprx_rate(regs)) {
> > +   drm_dbg_kms(mgr->dev,
> > +   "DPTUN: DPRX rate is 0\n");
> > +
> > +   ret = false;
> > +   }
> > +
> > +   if (tunnel_reg(regs, DP_ALLOCATED_BW) > tunnel_reg(regs, 
> > DP_ESTIMATED_BW)) {
> > +   drm_dbg_kms(mgr->dev,
> > +   "DPTUN: Allocated BW %d > estimated BW %d Mb/s\n",
> > +   DPTUN_BW_ARG(tunnel_reg(regs, DP_ALLOCATED_BW) *
> > +tunnel_reg_bw_granularity(regs)),
> > +   DPTUN_BW_ARG(tunnel_reg(regs, DP_ESTIMATED_BW) *
> > +tunnel_reg_bw_granularity(regs)));
> > +
> > +   ret = false;
> > +   }
> > +
> > +   return ret;
> > +}
> > +
> > +static bool tunnel_info_changes_are_valid(struct drm_dp_tunnel *tunnel,
> > + const struct drm_dp_tunnel_regs *regs,
> > + unsigned int flags)
> > +{
> > +   int new_drv_group_id = tunnel_reg_drv_group_id(regs);
> > +   bool ret = true;
> > +
> > +   if (tunnel->bw_alloc_supported != tunnel_reg_bw_alloc_supported(regs)) {
> > +   tun_dbg(tunnel,
> > +   "BW alloc support has changed %c -> %c\n",
> > +   yes_no_chr(tunnel->bw_alloc_supported),
> > +   yes_no_chr(tunnel_reg_bw_alloc_supported(regs)));
> > +
> > +   ret = false;
> > +   }
> > +
> > +   if (tunnel->group->drv_group_id != new_drv_group_id) {
> > +   tun_dbg(tunnel,
> > +   "Driver/group ID has changed %d:%d:* -> %d:%d:*\n",
> > +   tunnel_group_drv_id(tunnel->group->drv_group_id),
> > +   tunnel_group_id(tunnel->group->drv_group_id),
> > +   tunnel_group_drv_id(new_drv_group_id),
> > +   tunnel_group_id(new_drv_group_id));
> > +
> > +   ret = false;
> > +   }
> > +
> > +   if (!tunnel->bw_alloc_supported)
> > +   return ret;
> > +
> > +   if (tunnel->bw_granularity != tunnel_reg_bw_granularity(regs)) {
> > +   tun_dbg(tunnel,
> > +   "BW granularity has changed: %d -> %d Mb/s\n",
> > +   DPTUN_BW_ARG(tunnel->bw_granularity),
> > +   DPTUN_BW_ARG(tunnel_reg_bw_granularity(regs)));
> > +
> > +   ret = false;
> > +   }
> > +
> > +   /*
> > +* On some devices at least the BW alloc mode enabled status is always
> > +* reported as 0, so skip checking that here.
> > +*/
> 
> So it's reported as supported and we enable it, but it's never
> reported back as being enabled?

Yes, at least using an engineering TBT (DP adapter) FW. I'll check if
this is fixed already on released platforms/FWs.

> > +
> > +   if (!(flags & ALLOW_ALLOCATED_BW_CHANGE) &&
> > +   tunnel->allocated_bw !=
> > +   tunnel_reg(regs, DP_ALLOCATED_BW) * tunnel->bw_granularity) {
> > +   tun_dbg(tunnel,
> > +   "Allocated BW has changed: %d -> %d Mb/s\n",
> > +   DPTUN_BW_ARG(tunnel->allocated_bw),
> > +  

Re: [PATCH 02/19] drm/dp: Add support for DP tunneling

2024-02-07 Thread Ville Syrjälä
On Tue, Jan 23, 2024 at 12:28:33PM +0200, Imre Deak wrote:
> +static char yes_no_chr(int val)
> +{
> + return val ? 'Y' : 'N';
> +}

We have str_yes_no() already.

v> +
> +#define SKIP_DPRX_CAPS_CHECK BIT(0)
> +#define ALLOW_ALLOCATED_BW_CHANGEBIT(1)
> +
> +static bool tunnel_regs_are_valid(struct drm_dp_tunnel_mgr *mgr,
> +   const struct drm_dp_tunnel_regs *regs,
> +   unsigned int flags)
> +{
> + int drv_group_id = tunnel_reg_drv_group_id(regs);
> + bool check_dprx = !(flags & SKIP_DPRX_CAPS_CHECK);
> + bool ret = true;
> +
> + if (!tunnel_reg_bw_alloc_supported(regs)) {
> + if (tunnel_group_id(drv_group_id)) {
> + drm_dbg_kms(mgr->dev,
> + "DPTUN: A non-zero group ID is only allowed 
> with BWA support\n");
> + ret = false;
> + }
> +
> + if (tunnel_reg(regs, DP_ALLOCATED_BW)) {
> + drm_dbg_kms(mgr->dev,
> + "DPTUN: BW is allocated without BWA 
> support\n");
> + ret = false;
> + }
> +
> + return ret;
> + }
> +
> + if (!tunnel_group_id(drv_group_id)) {
> + drm_dbg_kms(mgr->dev,
> + "DPTUN: BWA support requires a non-zero group 
> ID\n");
> + ret = false;
> + }
> +
> + if (check_dprx && hweight8(tunnel_reg_max_dprx_lane_count(regs)) != 1) {
> + drm_dbg_kms(mgr->dev,
> + "DPTUN: Invalid DPRX lane count: %d\n",
> + tunnel_reg_max_dprx_lane_count(regs));
> +
> + ret = false;
> + }
> +
> + if (check_dprx && !tunnel_reg_max_dprx_rate(regs)) {
> + drm_dbg_kms(mgr->dev,
> + "DPTUN: DPRX rate is 0\n");
> +
> + ret = false;
> + }
> +
> + if (tunnel_reg(regs, DP_ALLOCATED_BW) > tunnel_reg(regs, 
> DP_ESTIMATED_BW)) {
> + drm_dbg_kms(mgr->dev,
> + "DPTUN: Allocated BW %d > estimated BW %d Mb/s\n",
> + DPTUN_BW_ARG(tunnel_reg(regs, DP_ALLOCATED_BW) *
> +  tunnel_reg_bw_granularity(regs)),
> + DPTUN_BW_ARG(tunnel_reg(regs, DP_ESTIMATED_BW) *
> +  tunnel_reg_bw_granularity(regs)));
> +
> + ret = false;
> + }
> +
> + return ret;
> +}
> +
> +static bool tunnel_info_changes_are_valid(struct drm_dp_tunnel *tunnel,
> +   const struct drm_dp_tunnel_regs *regs,
> +   unsigned int flags)
> +{
> + int new_drv_group_id = tunnel_reg_drv_group_id(regs);
> + bool ret = true;
> +
> + if (tunnel->bw_alloc_supported != tunnel_reg_bw_alloc_supported(regs)) {
> + tun_dbg(tunnel,
> + "BW alloc support has changed %c -> %c\n",
> + yes_no_chr(tunnel->bw_alloc_supported),
> + yes_no_chr(tunnel_reg_bw_alloc_supported(regs)));
> +
> + ret = false;
> + }
> +
> + if (tunnel->group->drv_group_id != new_drv_group_id) {
> + tun_dbg(tunnel,
> + "Driver/group ID has changed %d:%d:* -> %d:%d:*\n",
> + tunnel_group_drv_id(tunnel->group->drv_group_id),
> + tunnel_group_id(tunnel->group->drv_group_id),
> + tunnel_group_drv_id(new_drv_group_id),
> + tunnel_group_id(new_drv_group_id));
> +
> + ret = false;
> + }
> +
> + if (!tunnel->bw_alloc_supported)
> + return ret;
> +
> + if (tunnel->bw_granularity != tunnel_reg_bw_granularity(regs)) {
> + tun_dbg(tunnel,
> + "BW granularity has changed: %d -> %d Mb/s\n",
> + DPTUN_BW_ARG(tunnel->bw_granularity),
> + DPTUN_BW_ARG(tunnel_reg_bw_granularity(regs)));
> +
> + ret = false;
> + }
> +
> + /*
> +  * On some devices at least the BW alloc mode enabled status is always
> +  * reported as 0, so skip checking that here.
> +  */

So it's reported as supported and we enable it, but it's never
reported back as being enabled?

> +
> + if (!(flags & ALLOW_ALLOCATED_BW_CHANGE) &&
> + tunnel->allocated_bw !=
> + tunnel_reg(regs, DP_ALLOCATED_BW) * tunnel->bw_granularity) {
> + tun_dbg(tunnel,
> + "Allocated BW has changed: %d -> %d Mb/s\n",
> + DPTUN_BW_ARG(tunnel->allocated_bw),
> + DPTUN_BW_ARG(tunnel_reg(regs, DP_ALLOCATED_BW) * 
> tunnel->bw_granularity));
> +
> + ret = false;
> + }
> +
> + return ret;
> +}
> +
> +static int
> +read_and_verify_tunnel_regs(struct drm_dp_tunnel *tunnel,
> + struct 

Re: [PATCH 02/19] drm/dp: Add support for DP tunneling

2024-02-05 Thread Ville Syrjälä
On Mon, Feb 05, 2024 at 07:15:17PM +0200, Imre Deak wrote:
> On Mon, Feb 05, 2024 at 06:13:30PM +0200, Ville Syrjälä wrote:
> > On Wed, Jan 31, 2024 at 08:49:16PM +0200, Imre Deak wrote:
> > > On Wed, Jan 31, 2024 at 06:09:04PM +0200, Ville Syrjälä wrote:
> > > > On Tue, Jan 23, 2024 at 12:28:33PM +0200, Imre Deak wrote:
> > > > > +static void untrack_tunnel_ref(struct drm_dp_tunnel *tunnel,
> > > > > +struct ref_tracker **tracker)
> > > > > +{
> > > > > + ref_tracker_free(>group->mgr->ref_tracker,
> > > > > +  tracker);
> > > > > +}
> > > > > +
> > > > > +struct drm_dp_tunnel *
> > > > > +drm_dp_tunnel_get_untracked(struct drm_dp_tunnel *tunnel)
> > > > > +{
> > > > > + track_tunnel_ref(tunnel, NULL);
> > > > > +
> > > > > + return tunnel_get(tunnel);
> > > > > +}
> > > > > +EXPORT_SYMBOL(drm_dp_tunnel_get_untracked);
> > > > 
> > > > Why do these exist?
> > > 
> > > They implement drm_dp_tunnel_get()/put() if
> > > CONFIG_DRM_DISPLAY_DEBUG_DP_TUNNEL_STATE=n.
> > 
> > Why does that kind of irrelevant detail need to be visible
> > in the exported api?
> 
> In non-debug builds the ref_tracker object isn't needed and so
> drm_dp_tunnel_ref won't contain a pointer to it either.

Since it's just a pointer I don't see much point in making
things more complicated by leaving it out.

> drm_dp_tunnel_get/put_untracked() provide a way to get/put a tunnel
> reference without having to pass a ref_tracker pointer.
> 
> > 
> > -- 
> > Ville Syrjälä
> > Intel

-- 
Ville Syrjälä
Intel


Re: [PATCH 02/19] drm/dp: Add support for DP tunneling

2024-02-05 Thread Imre Deak
On Mon, Feb 05, 2024 at 06:13:30PM +0200, Ville Syrjälä wrote:
> On Wed, Jan 31, 2024 at 08:49:16PM +0200, Imre Deak wrote:
> > On Wed, Jan 31, 2024 at 06:09:04PM +0200, Ville Syrjälä wrote:
> > > On Tue, Jan 23, 2024 at 12:28:33PM +0200, Imre Deak wrote:
> > > > +static void untrack_tunnel_ref(struct drm_dp_tunnel *tunnel,
> > > > +  struct ref_tracker **tracker)
> > > > +{
> > > > +   ref_tracker_free(>group->mgr->ref_tracker,
> > > > +tracker);
> > > > +}
> > > > +
> > > > +struct drm_dp_tunnel *
> > > > +drm_dp_tunnel_get_untracked(struct drm_dp_tunnel *tunnel)
> > > > +{
> > > > +   track_tunnel_ref(tunnel, NULL);
> > > > +
> > > > +   return tunnel_get(tunnel);
> > > > +}
> > > > +EXPORT_SYMBOL(drm_dp_tunnel_get_untracked);
> > > 
> > > Why do these exist?
> > 
> > They implement drm_dp_tunnel_get()/put() if
> > CONFIG_DRM_DISPLAY_DEBUG_DP_TUNNEL_STATE=n.
> 
> Why does that kind of irrelevant detail need to be visible
> in the exported api?

In non-debug builds the ref_tracker object isn't needed and so
drm_dp_tunnel_ref won't contain a pointer to it either.
drm_dp_tunnel_get/put_untracked() provide a way to get/put a tunnel
reference without having to pass a ref_tracker pointer.

> 
> -- 
> Ville Syrjälä
> Intel


Re: [PATCH 02/19] drm/dp: Add support for DP tunneling

2024-02-05 Thread Ville Syrjälä
On Wed, Jan 31, 2024 at 08:49:16PM +0200, Imre Deak wrote:
> On Wed, Jan 31, 2024 at 06:09:04PM +0200, Ville Syrjälä wrote:
> > On Tue, Jan 23, 2024 at 12:28:33PM +0200, Imre Deak wrote:
> > > +static void untrack_tunnel_ref(struct drm_dp_tunnel *tunnel,
> > > +struct ref_tracker **tracker)
> > > +{
> > > + ref_tracker_free(>group->mgr->ref_tracker,
> > > +  tracker);
> > > +}
> > > +
> > > +struct drm_dp_tunnel *
> > > +drm_dp_tunnel_get_untracked(struct drm_dp_tunnel *tunnel)
> > > +{
> > > + track_tunnel_ref(tunnel, NULL);
> > > +
> > > + return tunnel_get(tunnel);
> > > +}
> > > +EXPORT_SYMBOL(drm_dp_tunnel_get_untracked);
> > 
> > Why do these exist?
> 
> They implement drm_dp_tunnel_get()/put() if
> CONFIG_DRM_DISPLAY_DEBUG_DP_TUNNEL_STATE=n.

Why does that kind of irrelevant detail need to be visible
in the exported api?

-- 
Ville Syrjälä
Intel


Re: [PATCH 02/19] drm/dp: Add support for DP tunneling

2024-01-31 Thread Imre Deak
On Wed, Jan 31, 2024 at 06:09:04PM +0200, Ville Syrjälä wrote:
> On Tue, Jan 23, 2024 at 12:28:33PM +0200, Imre Deak wrote:
> > Add support for Display Port DP tunneling. For now this includes the
> > support for Bandwidth Allocation Mode, leaving adding Panel Replay
> > support for later.
> > 
> > BWA allows using displays that share the same (Thunderbolt) link with
> > their maximum resolution. Atm, this may not be possible due to the
> > coarse granularity of partitioning the link BW among the displays on the
> > link: the BW allocation policy is in a SW/FW/HW component on the link
> > (on Thunderbolt it's the SW or FW Connection Manager), independent of
> > the driver. This policy will set the DPRX maximum rate and lane count
> > DPCD registers the GFX driver will see (0x0, 0x1, 0x02200,
> > 0x02201) based on the available link BW.
> > 
> > The granularity of the current BW allocation policy is course, based on
> > the required link rate in the 1.62Gbs..8.1Gbps range and it may prevent
> > using higher resolutions all together: the display connected first will
> > get a share of the link BW which corresponds to its full DPRX capability
> > (regardless of the actual mode it uses). A subsequent display connected
> > will only get the remaining BW, which could be well below its full
> > capability.
> > 
> > BWA solves the above course granularity (reducing it to a 250Mbs..1Gps
> > range) and first-come/first-served issues by letting the driver request
> > the BW for each display on a link which reflects the actual modes the
> > displays use.
> > 
> > This patch adds the DRM core helper functions, while a follow-up change
> > in the patchset takes them into use in the i915 driver.
> > 
> > Signed-off-by: Imre Deak 
> > ---
> >  drivers/gpu/drm/display/Kconfig |   17 +
> >  drivers/gpu/drm/display/Makefile|2 +
> >  drivers/gpu/drm/display/drm_dp_tunnel.c | 1715 +++
> >  include/drm/display/drm_dp.h|   60 +
> >  include/drm/display/drm_dp_tunnel.h |  270 
> >  5 files changed, 2064 insertions(+)
> >  create mode 100644 drivers/gpu/drm/display/drm_dp_tunnel.c
> >  create mode 100644 include/drm/display/drm_dp_tunnel.h
> > 
> > diff --git a/drivers/gpu/drm/display/Kconfig 
> > b/drivers/gpu/drm/display/Kconfig
> > index 09712b88a5b83..b024a84b94c1c 100644
> > --- a/drivers/gpu/drm/display/Kconfig
> > +++ b/drivers/gpu/drm/display/Kconfig
> > @@ -17,6 +17,23 @@ config DRM_DISPLAY_DP_HELPER
> > help
> >   DRM display helpers for DisplayPort.
> >  
> > +config DRM_DISPLAY_DP_TUNNEL
> > +   bool
> > +   select DRM_DISPLAY_DP_HELPER
> > +   help
> > + Enable support for DisplayPort tunnels.
> > +
> > +config DRM_DISPLAY_DEBUG_DP_TUNNEL_STATE
> > +   bool "Enable debugging the DP tunnel state"
> > +   depends on REF_TRACKER
> > +   depends on DRM_DISPLAY_DP_TUNNEL
> > +   depends on DEBUG_KERNEL
> > +   depends on EXPERT
> > +   help
> > + Enables debugging the DP tunnel manager's status.
> > +
> > + If in doubt, say "N".
> 
> It's not exactly clear what a "DP tunnel" is.
> Shouldn't thunderbolt be mentioned here somewhere?

The only way I'm aware of tunneling can work is through a TBT link yes,
however I'm not sure if it couldn't work on any DP link, the interface -
to request BW - is simply the AUX bus after all and AFAIR the standard
doesn't mention TBT either (but have to reread that). The above
descriptions should be extended anyway and the usual TBT scenario
mentioned at least, so will do that.

> > +
> >  config DRM_DISPLAY_HDCP_HELPER
> > bool
> > depends on DRM_DISPLAY_HELPER
> > diff --git a/drivers/gpu/drm/display/Makefile 
> > b/drivers/gpu/drm/display/Makefile
> > index 17ac4a1006a80..7ca61333c6696 100644
> > --- a/drivers/gpu/drm/display/Makefile
> > +++ b/drivers/gpu/drm/display/Makefile
> > @@ -8,6 +8,8 @@ drm_display_helper-$(CONFIG_DRM_DISPLAY_DP_HELPER) += \
> > drm_dp_helper.o \
> > drm_dp_mst_topology.o \
> > drm_dsc_helper.o
> > +drm_display_helper-$(CONFIG_DRM_DISPLAY_DP_TUNNEL) += \
> > +   drm_dp_tunnel.o
> >  drm_display_helper-$(CONFIG_DRM_DISPLAY_HDCP_HELPER) += drm_hdcp_helper.o
> >  drm_display_helper-$(CONFIG_DRM_DISPLAY_HDMI_HELPER) += \
> > drm_hdmi_helper.o \
> > diff --git a/drivers/gpu/drm/display/drm_dp_tunnel.c 
> > b/drivers/gpu/drm/display/drm_dp_tunnel.c
> > new file mode 100644
> > index 0..58f6330db7d9d
> > --- /dev/null
> > +++ b/drivers/gpu/drm/display/drm_dp_tunnel.c
> > @@ -0,0 +1,1715 @@
> > +// SPDX-License-Identifier: MIT
> > +/*
> > + * Copyright © 2023 Intel Corporation
> > + */
> > +
> > +#include 
> > +#include 
> > +
> > +#include 
> > +
> > +#include 
> > +#include 
> > +#include 
> > +#include 
> > +#include 
> > +
> > +#define to_group(__private_obj) \
> > +   container_of(__private_obj, struct drm_dp_tunnel_group, base)
> > +
> > +#define to_group_state(__private_state) \
> > +   container_of(__private_state, struct 

Re: [PATCH 02/19] drm/dp: Add support for DP tunneling

2024-01-31 Thread Ville Syrjälä
On Tue, Jan 23, 2024 at 12:28:33PM +0200, Imre Deak wrote:
> Add support for Display Port DP tunneling. For now this includes the
> support for Bandwidth Allocation Mode, leaving adding Panel Replay
> support for later.
> 
> BWA allows using displays that share the same (Thunderbolt) link with
> their maximum resolution. Atm, this may not be possible due to the
> coarse granularity of partitioning the link BW among the displays on the
> link: the BW allocation policy is in a SW/FW/HW component on the link
> (on Thunderbolt it's the SW or FW Connection Manager), independent of
> the driver. This policy will set the DPRX maximum rate and lane count
> DPCD registers the GFX driver will see (0x0, 0x1, 0x02200,
> 0x02201) based on the available link BW.
> 
> The granularity of the current BW allocation policy is course, based on
> the required link rate in the 1.62Gbs..8.1Gbps range and it may prevent
> using higher resolutions all together: the display connected first will
> get a share of the link BW which corresponds to its full DPRX capability
> (regardless of the actual mode it uses). A subsequent display connected
> will only get the remaining BW, which could be well below its full
> capability.
> 
> BWA solves the above course granularity (reducing it to a 250Mbs..1Gps
> range) and first-come/first-served issues by letting the driver request
> the BW for each display on a link which reflects the actual modes the
> displays use.
> 
> This patch adds the DRM core helper functions, while a follow-up change
> in the patchset takes them into use in the i915 driver.
> 
> Signed-off-by: Imre Deak 
> ---
>  drivers/gpu/drm/display/Kconfig |   17 +
>  drivers/gpu/drm/display/Makefile|2 +
>  drivers/gpu/drm/display/drm_dp_tunnel.c | 1715 +++
>  include/drm/display/drm_dp.h|   60 +
>  include/drm/display/drm_dp_tunnel.h |  270 
>  5 files changed, 2064 insertions(+)
>  create mode 100644 drivers/gpu/drm/display/drm_dp_tunnel.c
>  create mode 100644 include/drm/display/drm_dp_tunnel.h
> 
> diff --git a/drivers/gpu/drm/display/Kconfig b/drivers/gpu/drm/display/Kconfig
> index 09712b88a5b83..b024a84b94c1c 100644
> --- a/drivers/gpu/drm/display/Kconfig
> +++ b/drivers/gpu/drm/display/Kconfig
> @@ -17,6 +17,23 @@ config DRM_DISPLAY_DP_HELPER
>   help
> DRM display helpers for DisplayPort.
>  
> +config DRM_DISPLAY_DP_TUNNEL
> + bool
> + select DRM_DISPLAY_DP_HELPER
> + help
> +   Enable support for DisplayPort tunnels.
> +
> +config DRM_DISPLAY_DEBUG_DP_TUNNEL_STATE
> + bool "Enable debugging the DP tunnel state"
> + depends on REF_TRACKER
> + depends on DRM_DISPLAY_DP_TUNNEL
> + depends on DEBUG_KERNEL
> + depends on EXPERT
> + help
> +   Enables debugging the DP tunnel manager's status.
> +
> +   If in doubt, say "N".

It's not exactly clear what a "DP tunnel" is.
Shouldn't thunderbolt be mentioned here somewhere?

> +
>  config DRM_DISPLAY_HDCP_HELPER
>   bool
>   depends on DRM_DISPLAY_HELPER
> diff --git a/drivers/gpu/drm/display/Makefile 
> b/drivers/gpu/drm/display/Makefile
> index 17ac4a1006a80..7ca61333c6696 100644
> --- a/drivers/gpu/drm/display/Makefile
> +++ b/drivers/gpu/drm/display/Makefile
> @@ -8,6 +8,8 @@ drm_display_helper-$(CONFIG_DRM_DISPLAY_DP_HELPER) += \
>   drm_dp_helper.o \
>   drm_dp_mst_topology.o \
>   drm_dsc_helper.o
> +drm_display_helper-$(CONFIG_DRM_DISPLAY_DP_TUNNEL) += \
> + drm_dp_tunnel.o
>  drm_display_helper-$(CONFIG_DRM_DISPLAY_HDCP_HELPER) += drm_hdcp_helper.o
>  drm_display_helper-$(CONFIG_DRM_DISPLAY_HDMI_HELPER) += \
>   drm_hdmi_helper.o \
> diff --git a/drivers/gpu/drm/display/drm_dp_tunnel.c 
> b/drivers/gpu/drm/display/drm_dp_tunnel.c
> new file mode 100644
> index 0..58f6330db7d9d
> --- /dev/null
> +++ b/drivers/gpu/drm/display/drm_dp_tunnel.c
> @@ -0,0 +1,1715 @@
> +// SPDX-License-Identifier: MIT
> +/*
> + * Copyright © 2023 Intel Corporation
> + */
> +
> +#include 
> +#include 
> +
> +#include 
> +
> +#include 
> +#include 
> +#include 
> +#include 
> +#include 
> +
> +#define to_group(__private_obj) \
> + container_of(__private_obj, struct drm_dp_tunnel_group, base)
> +
> +#define to_group_state(__private_state) \
> + container_of(__private_state, struct drm_dp_tunnel_group_state, base)
> +
> +#define is_dp_tunnel_private_obj(__obj) \
> + ((__obj)->funcs == _group_funcs)
> +
> +#define for_each_new_group_in_state(__state, __new_group_state, __i) \
> + for ((__i) = 0; \
> +  (__i) < (__state)->num_private_objs; \
> +  (__i)++) \
> + for_each_if ((__state)->private_objs[__i].ptr && \
> +  
> is_dp_tunnel_private_obj((__state)->private_objs[__i].ptr) && \
> +  ((__new_group_state) = \
> + 
> to_group_state((__state)->private_objs[__i].new_state), 1))
> +
> +#define 

Re: [PATCH 02/19] drm/dp: Add support for DP tunneling

2024-01-31 Thread Imre Deak
On Wed, Jan 31, 2024 at 02:50:16PM +0200, Hogander, Jouni wrote:
> [...]
> > +
> > +struct drm_dp_tunnel_group;
> > +
> > +struct drm_dp_tunnel {
> > +   struct drm_dp_tunnel_group *group;
> > +
> > +   struct list_head node;
> > +
> > +   struct kref kref;
> > +#ifdef CONFIG_DRM_DISPLAY_DEBUG_DP_TUNNEL_STATE
> > +   struct ref_tracker *tracker;
> > +#endif
> > +   struct drm_dp_aux *aux;
> > +   char name[8];
> > +
> > +   int bw_granularity;
> > +   int estimated_bw;
> > +   int allocated_bw;
> > +
> > +   int max_dprx_rate;
> > +   u8 max_dprx_lane_count;
> > +
> > +   u8 adapter_id;
> > +
> > +   bool bw_alloc_supported:1;
> > +   bool bw_alloc_enabled:1;
> > +   bool has_io_error:1;
> > +   bool destroyed:1;
> > +};
> > +
> > +struct drm_dp_tunnel_group_state;
> > +
> > +struct drm_dp_tunnel_state {
> > +   struct drm_dp_tunnel_group_state *group_state;
> > +
> > +   struct drm_dp_tunnel_ref tunnel_ref;
> > +
> > +   struct list_head node;
> > +
> > +   u32 stream_mask;
> 
> I'm wondering if drm_dp_tunnel_state can really contain several streams
> and what kind of scenario this would be? From i915 point of view I
> would understand that several pipes are routed to DP tunnel.

Yes, multiple pipes through the same tunnel and the use case for that is
MST with multiple streams. The "stream" term is only an abstraction
where it could be a different physical thing in various drivers, but for
i915 it just means pipes. Not 100% sure if that's the best mapping,
since in case of bigjoiner there would be multiple pipes, but possibly
(in the SST case) only one stream from the tunneling POV.

> Is it bigjoiner case?

IIUC in that (SST) case the streams would be joined already before going
to the TBT DP_IN adapter, so that's only one stream in stream_mask above
(unless MST + bigjoiner, where you could have 2 MST/DP tunnel streams
each consisting of 2 pipes).

> BR,
> 
> Jouni Högander
> 
> > +   int *stream_bw;
> > +};
> > +
> > +struct drm_dp_tunnel_group_state {
> > +   struct drm_private_state base;
> > +
> > +   struct list_head tunnel_states;
> > +};
> > +
> > +struct drm_dp_tunnel_group {
> > +   struct drm_private_obj base;
> > +   struct drm_dp_tunnel_mgr *mgr;
> > +
> > +   struct list_head tunnels;
> > +
> > +   int available_bw;   /* available BW including the
> > allocated_bw of all tunnels */
> > +   int drv_group_id;
> > +
> > +   char name[8];
> > +
> > +   bool active:1;
> > +};
> > +
> > +struct drm_dp_tunnel_mgr {
> > +   struct drm_device *dev;
> > +
> > +   int group_count;
> > +   struct drm_dp_tunnel_group *groups;
> > +   wait_queue_head_t bw_req_queue;
> > +
> > +#ifdef CONFIG_DRM_DISPLAY_DEBUG_DP_TUNNEL_STATE
> > +   struct ref_tracker_dir ref_tracker;
> > +#endif
> > +};
> > +
> > +static int next_reg_area(int *offset)
> > +{
> > +   *offset = find_next_bit(dptun_info_regs, 64, *offset);
> > +
> > +   return find_next_zero_bit(dptun_info_regs, 64, *offset + 1) -
> > *offset;
> > +}
> > +
> > +#define tunnel_reg_ptr(__regs, __address) ({ \
> > +   WARN_ON(!test_bit((__address) - DP_TUNNELING_BASE,
> > dptun_info_regs)); \
> > +   &(__regs)->buf[bitmap_weight(dptun_info_regs, (__address) -
> > DP_TUNNELING_BASE)]; \
> > +})
> > +
> > +static int read_tunnel_regs(struct drm_dp_aux *aux, struct
> > drm_dp_tunnel_regs *regs)
> > +{
> > +   int offset = 0;
> > +   int len;
> > +
> > +   while ((len = next_reg_area())) {
> > +   int address = DP_TUNNELING_BASE + offset;
> > +
> > +   if (drm_dp_dpcd_read(aux, address,
> > tunnel_reg_ptr(regs, address), len) < 0)
> > +   return -EIO;
> > +
> > +   offset += len;
> > +   }
> > +
> > +   return 0;
> > +}
> > +
> > +static u8 tunnel_reg(const struct drm_dp_tunnel_regs *regs, int
> > address)
> > +{
> > +   return *tunnel_reg_ptr(regs, address);
> > +}
> > +
> > +static int tunnel_reg_drv_group_id(const struct drm_dp_tunnel_regs
> > *regs)
> > +{
> > +   int drv_id = tunnel_reg(regs, DP_USB4_DRIVER_ID) &
> > DP_USB4_DRIVER_ID_MASK;
> > +   int group_id = tunnel_reg(regs,
> > DP_IN_ADAPTER_TUNNEL_INFORMATION) & DP_GROUP_ID_MASK;
> > +
> > +   if (!group_id)
> > +   return 0;
> > +
> > +   return (drv_id << DP_GROUP_ID_BITS) | group_id;
> > +}
> > +
> > +/* Return granularity in kB/s units */
> > +static int tunnel_reg_bw_granularity(const struct drm_dp_tunnel_regs
> > *regs)
> > +{
> > +   int gr = tunnel_reg(regs, DP_BW_GRANULARITY) &
> > DP_BW_GRANULARITY_MASK;
> > +
> > +   WARN_ON(gr > 2);
> > +
> > +   return (25 << gr) / 8;
> > +}
> > +
> > +static int tunnel_reg_max_dprx_rate(const struct drm_dp_tunnel_regs
> > *regs)
> > +{
> > +   u8 bw_code = tunnel_reg(regs, DP_TUNNELING_MAX_LINK_RATE);
> > +
> > +   return drm_dp_bw_code_to_link_rate(bw_code);
> > +}
> 

Re: [PATCH 02/19] drm/dp: Add support for DP tunneling

2024-01-31 Thread Hogander, Jouni
On Tue, 2024-01-23 at 12:28 +0200, Imre Deak wrote:
> Add support for Display Port DP tunneling. For now this includes the
> support for Bandwidth Allocation Mode, leaving adding Panel Replay
> support for later.
> 
> BWA allows using displays that share the same (Thunderbolt) link with
> their maximum resolution. Atm, this may not be possible due to the
> coarse granularity of partitioning the link BW among the displays on
> the
> link: the BW allocation policy is in a SW/FW/HW component on the link
> (on Thunderbolt it's the SW or FW Connection Manager), independent of
> the driver. This policy will set the DPRX maximum rate and lane count
> DPCD registers the GFX driver will see (0x0, 0x1, 0x02200,
> 0x02201) based on the available link BW.
> 
> The granularity of the current BW allocation policy is course, based
> on
> the required link rate in the 1.62Gbs..8.1Gbps range and it may
> prevent
> using higher resolutions all together: the display connected first
> will
> get a share of the link BW which corresponds to its full DPRX
> capability
> (regardless of the actual mode it uses). A subsequent display
> connected
> will only get the remaining BW, which could be well below its full
> capability.
> 
> BWA solves the above course granularity (reducing it to a
> 250Mbs..1Gps
> range) and first-come/first-served issues by letting the driver
> request
> the BW for each display on a link which reflects the actual modes the
> displays use.
> 
> This patch adds the DRM core helper functions, while a follow-up
> change
> in the patchset takes them into use in the i915 driver.
> 
> Signed-off-by: Imre Deak 
> ---
>  drivers/gpu/drm/display/Kconfig |   17 +
>  drivers/gpu/drm/display/Makefile    |    2 +
>  drivers/gpu/drm/display/drm_dp_tunnel.c | 1715
> +++
>  include/drm/display/drm_dp.h    |   60 +
>  include/drm/display/drm_dp_tunnel.h |  270 
>  5 files changed, 2064 insertions(+)
>  create mode 100644 drivers/gpu/drm/display/drm_dp_tunnel.c
>  create mode 100644 include/drm/display/drm_dp_tunnel.h
> 
> diff --git a/drivers/gpu/drm/display/Kconfig
> b/drivers/gpu/drm/display/Kconfig
> index 09712b88a5b83..b024a84b94c1c 100644
> --- a/drivers/gpu/drm/display/Kconfig
> +++ b/drivers/gpu/drm/display/Kconfig
> @@ -17,6 +17,23 @@ config DRM_DISPLAY_DP_HELPER
> help
>   DRM display helpers for DisplayPort.
>  
> +config DRM_DISPLAY_DP_TUNNEL
> +   bool
> +   select DRM_DISPLAY_DP_HELPER
> +   help
> + Enable support for DisplayPort tunnels.
> +
> +config DRM_DISPLAY_DEBUG_DP_TUNNEL_STATE
> +   bool "Enable debugging the DP tunnel state"
> +   depends on REF_TRACKER
> +   depends on DRM_DISPLAY_DP_TUNNEL
> +   depends on DEBUG_KERNEL
> +   depends on EXPERT
> +   help
> + Enables debugging the DP tunnel manager's status.
> +
> + If in doubt, say "N".
> +
>  config DRM_DISPLAY_HDCP_HELPER
> bool
> depends on DRM_DISPLAY_HELPER
> diff --git a/drivers/gpu/drm/display/Makefile
> b/drivers/gpu/drm/display/Makefile
> index 17ac4a1006a80..7ca61333c6696 100644
> --- a/drivers/gpu/drm/display/Makefile
> +++ b/drivers/gpu/drm/display/Makefile
> @@ -8,6 +8,8 @@ drm_display_helper-$(CONFIG_DRM_DISPLAY_DP_HELPER) +=
> \
> drm_dp_helper.o \
> drm_dp_mst_topology.o \
> drm_dsc_helper.o
> +drm_display_helper-$(CONFIG_DRM_DISPLAY_DP_TUNNEL) += \
> +   drm_dp_tunnel.o
>  drm_display_helper-$(CONFIG_DRM_DISPLAY_HDCP_HELPER) +=
> drm_hdcp_helper.o
>  drm_display_helper-$(CONFIG_DRM_DISPLAY_HDMI_HELPER) += \
> drm_hdmi_helper.o \
> diff --git a/drivers/gpu/drm/display/drm_dp_tunnel.c
> b/drivers/gpu/drm/display/drm_dp_tunnel.c
> new file mode 100644
> index 0..58f6330db7d9d
> --- /dev/null
> +++ b/drivers/gpu/drm/display/drm_dp_tunnel.c
> @@ -0,0 +1,1715 @@
> +// SPDX-License-Identifier: MIT
> +/*
> + * Copyright © 2023 Intel Corporation
> + */
> +
> +#include 
> +#include 
> +
> +#include 
> +
> +#include 
> +#include 
> +#include 
> +#include 
> +#include 
> +
> +#define to_group(__private_obj) \
> +   container_of(__private_obj, struct drm_dp_tunnel_group, base)
> +
> +#define to_group_state(__private_state) \
> +   container_of(__private_state, struct
> drm_dp_tunnel_group_state, base)
> +
> +#define is_dp_tunnel_private_obj(__obj) \
> +   ((__obj)->funcs == _group_funcs)
> +
> +#define for_each_new_group_in_state(__state, __new_group_state, __i)
> \
> +   for ((__i) = 0; \
> +    (__i) < (__state)->num_private_objs; \
> +    (__i)++) \
> +   for_each_if ((__state)->private_objs[__i].ptr && \
> +    is_dp_tunnel_private_obj((__state)-
> >private_objs[__i].ptr) && \
> +    ((__new_group_state) = \
> +   to_group_state((__state)-
> >private_objs[__i].new_state), 1))
> +
> +#define for_each_old_group_in_state(__state,