On Mon, Oct 13, 2025 at 01:56:48PM +0200, Michal Wajdeczko wrote:
> 
> 
> On 10/11/2025 9:38 PM, Michał Winiarski wrote:
> > Connect the helpers to allow save and restore of GuC migration data in
> > stop_copy / resume device state.
> > 
> > Signed-off-by: Michał Winiarski <[email protected]>
> > ---
> >  drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c   | 28 ++++++++++++++++++-
> >  .../gpu/drm/xe/xe_gt_sriov_pf_control_types.h |  1 +
> >  drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c |  8 ++++++
> >  3 files changed, 36 insertions(+), 1 deletion(-)
> > 
> > diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c 
> > b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c
> > index 6ece775b2e80e..f73a3bf40037c 100644
> > --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c
> > +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c
> > @@ -187,6 +187,7 @@ static const char *control_bit_to_string(enum 
> > xe_gt_sriov_control_bits bit)
> >     CASE2STR(PAUSED);
> >     CASE2STR(MIGRATION_DATA_WIP);
> >     CASE2STR(SAVE_WIP);
> > +   CASE2STR(SAVE_DATA_GUC);
> >     CASE2STR(SAVE_FAILED);
> >     CASE2STR(SAVED);
> >     CASE2STR(RESTORE_WIP);
> > @@ -338,6 +339,7 @@ static void pf_exit_vf_mismatch(struct xe_gt *gt, 
> > unsigned int vfid)
> >     pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_STOP_FAILED);
> >     pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_FAILED);
> >     pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESUME_FAILED);
> > +   pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_FAILED);
> >     pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_FAILED);
> >  }
> >  
> > @@ -801,6 +803,7 @@ void xe_gt_sriov_pf_control_vf_data_eof(struct xe_gt 
> > *gt, unsigned int vfid)
> >  
> >  static void pf_exit_vf_save_wip(struct xe_gt *gt, unsigned int vfid)
> >  {
> > +   pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_DATA_GUC);
> >     pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_WIP);
> >  }
> >  
> > @@ -820,16 +823,35 @@ static void pf_exit_vf_saved(struct xe_gt *gt, 
> > unsigned int vfid)
> >     pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVED);
> >  }
> >  
> > +static void pf_enter_vf_save_failed(struct xe_gt *gt, unsigned int vfid)
> > +{
> > +   pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_FAILED);
> > +   pf_exit_vf_wip(gt, vfid);
> > +}
> > +
> >  static bool pf_handle_vf_save_wip(struct xe_gt *gt, unsigned int vfid)
> >  {
> > +   int ret;
> > +
> >     if (!pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_WIP))
> >             return false;
> >  
> > +   if (pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_DATA_GUC)) {
> > +           ret = xe_gt_sriov_pf_migration_guc_save(gt, vfid);
> > +           if (ret)
> > +                   goto err;
> > +           return true;
> > +   }
> > +
> >     xe_gt_sriov_pf_control_vf_data_eof(gt, vfid);
> >     pf_exit_vf_save_wip(gt, vfid);
> >     pf_enter_vf_saved(gt, vfid);
> >  
> >     return true;
> > +
> > +err:
> > +   pf_enter_vf_save_failed(gt, vfid);
> > +   return false;
> 
> return true - as this is an indication that state was processed (not that it 
> was successful or not)

Ok.

> 
> >  }
> >  
> >  static bool pf_enter_vf_save_wip(struct xe_gt *gt, unsigned int vfid)
> > @@ -838,6 +860,8 @@ static bool pf_enter_vf_save_wip(struct xe_gt *gt, 
> > unsigned int vfid)
> >             pf_enter_vf_state(gt, vfid, 
> > XE_GT_SRIOV_STATE_MIGRATION_DATA_WIP);
> >             pf_exit_vf_restored(gt, vfid);
> >             pf_enter_vf_wip(gt, vfid);
> > +           if (xe_gt_sriov_pf_migration_guc_size(gt, vfid) > 0)
> > +                   pf_enter_vf_state(gt, vfid, 
> > XE_GT_SRIOV_STATE_SAVE_DATA_GUC);
> >             pf_queue_vf(gt, vfid);
> >             return true;
> >     }
> > @@ -946,6 +970,8 @@ static int pf_handle_vf_restore_data(struct xe_gt *gt, 
> > unsigned int vfid,
> >                                  struct xe_sriov_pf_migration_data *data)
> >  {
> >     switch (data->type) {
> > +   case XE_SRIOV_MIG_DATA_GUC:
> > +           return xe_gt_sriov_pf_migration_guc_restore(gt, vfid, data);
> >     default:
> >             xe_gt_sriov_notice(gt, "Skipping VF%u invalid data type: %d\n", 
> > vfid, data->type);
> >             pf_enter_vf_restore_failed(gt, vfid);
> > @@ -996,7 +1022,7 @@ static bool pf_enter_vf_restore_wip(struct xe_gt *gt, 
> > unsigned int vfid)
> >             pf_enter_vf_state(gt, vfid, 
> > XE_GT_SRIOV_STATE_MIGRATION_DATA_WIP);
> >             pf_exit_vf_saved(gt, vfid);
> >             pf_enter_vf_wip(gt, vfid);
> > -           pf_enter_vf_restored(gt, vfid);
> > +           pf_queue_vf(gt, vfid);
> >             return true;
> >     }
> >  
> > diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h 
> > b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h
> > index 68ec9d1fc3daf..b9787c425d9f6 100644
> > --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h
> > +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h
> > @@ -71,6 +71,7 @@ enum xe_gt_sriov_control_bits {
> >     XE_GT_SRIOV_STATE_MIGRATION_DATA_WIP,
> >  
> >     XE_GT_SRIOV_STATE_SAVE_WIP,
> > +   XE_GT_SRIOV_STATE_SAVE_DATA_GUC,
> >     XE_GT_SRIOV_STATE_SAVE_FAILED,
> >     XE_GT_SRIOV_STATE_SAVED,
> >  
> > diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c 
> > b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c
> > index e1031465e65c4..0c10284f0b09a 100644
> > --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c
> > +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c
> > @@ -279,9 +279,17 @@ int xe_gt_sriov_pf_migration_guc_restore(struct xe_gt 
> > *gt, unsigned int vfid,
> >  ssize_t xe_gt_sriov_pf_migration_size(struct xe_gt *gt, unsigned int vfid)
> >  {
> >     ssize_t total = 0;
> > +   ssize_t size;
> >  
> >     xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
> >  
> > +   size = xe_gt_sriov_pf_migration_guc_size(gt, vfid);
> > +   if (size < 0)
> > +           return size;
> > +   else if (size > 0)
> 
> no need for "else"
> 
> and isn't zero GuC state size an error anyway ?

Replaced with an assert.

Thanks,
-Michał

> 
> > +           size += sizeof(struct xe_sriov_pf_migration_hdr);
> > +   total += size;
> > +
> >     return total;
> >  }
> >  
> 

Reply via email to