Now that it's possible to free the packets - connect the restore
handling logic with the ring.
The helpers will also be used in upcoming changes that will start producing
migration data packets.

Signed-off-by: Michał Winiarski <[email protected]>
---
 drivers/gpu/drm/xe/Makefile                   |   1 +
 drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c   |  48 ++++++-
 drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c |  10 +-
 drivers/gpu/drm/xe/xe_sriov_pf_migration.c    |   1 +
 .../gpu/drm/xe/xe_sriov_pf_migration_data.c   | 135 ++++++++++++++++++
 .../gpu/drm/xe/xe_sriov_pf_migration_data.h   |  32 +++++
 6 files changed, 224 insertions(+), 3 deletions(-)
 create mode 100644 drivers/gpu/drm/xe/xe_sriov_pf_migration_data.c
 create mode 100644 drivers/gpu/drm/xe/xe_sriov_pf_migration_data.h

diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile
index 71f685a315dca..e253d65366de4 100644
--- a/drivers/gpu/drm/xe/Makefile
+++ b/drivers/gpu/drm/xe/Makefile
@@ -177,6 +177,7 @@ xe-$(CONFIG_PCI_IOV) += \
        xe_sriov_pf_control.o \
        xe_sriov_pf_debugfs.o \
        xe_sriov_pf_migration.o \
+       xe_sriov_pf_migration_data.o \
        xe_sriov_pf_service.o \
        xe_tile_sriov_pf_debugfs.o
 
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c 
b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c
index 16a88e7599f6d..04a4e92133c2e 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c
@@ -20,6 +20,7 @@
 #include "xe_sriov.h"
 #include "xe_sriov_pf_control.h"
 #include "xe_sriov_pf_migration.h"
+#include "xe_sriov_pf_migration_data.h"
 #include "xe_sriov_pf_service.h"
 #include "xe_tile.h"
 
@@ -949,14 +950,57 @@ static void pf_exit_vf_restored(struct xe_gt *gt, 
unsigned int vfid)
        pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORED);
 }
 
+static void pf_enter_vf_restore_failed(struct xe_gt *gt, unsigned int vfid)
+{
+       pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_FAILED);
+       pf_exit_vf_wip(gt, vfid);
+}
+
+static int pf_handle_vf_restore_data(struct xe_gt *gt, unsigned int vfid,
+                                    struct xe_sriov_pf_migration_data *data)
+{
+       switch (data->type) {
+       default:
+               xe_gt_sriov_notice(gt, "Skipping VF%u invalid data type: %d\n", 
vfid, data->type);
+               pf_enter_vf_restore_failed(gt, vfid);
+       }
+
+       return -EINVAL;
+}
+
 static bool pf_handle_vf_restore_wip(struct xe_gt *gt, unsigned int vfid)
 {
+       struct xe_sriov_pf_migration_data *data;
+       int ret;
+
        if (!pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_WIP))
                return false;
 
-       pf_exit_vf_restore_wip(gt, vfid);
-       pf_enter_vf_restored(gt, vfid);
+       data = xe_gt_sriov_pf_migration_ring_consume(gt, vfid);
+       if (IS_ERR(data)) {
+               if (PTR_ERR(data) == -ENODATA &&
+                   !xe_gt_sriov_pf_control_check_vf_data_wip(gt, vfid)) {
+                       pf_exit_vf_restore_wip(gt, vfid);
+                       pf_enter_vf_restored(gt, vfid);
+               } else {
+                       pf_enter_vf_restore_failed(gt, vfid);
+               }
+               return false;
+       }
+
+       xe_gt_assert(gt, gt->info.id == data->gt);
+       xe_gt_assert(gt, gt->tile->id == data->tile);
+
+       ret = pf_handle_vf_restore_data(gt, vfid, data);
+       if (ret) {
+               xe_gt_sriov_err(gt, "VF%u failed to restore data type: %d 
(%d)\n",
+                               vfid, data->type, ret);
+               xe_sriov_pf_migration_data_free(data);
+               pf_enter_vf_restore_failed(gt, vfid);
+               return false;
+       }
 
+       xe_sriov_pf_migration_data_free(data);
        return true;
 }
 
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c 
b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c
index af5952f42fff1..582aaf062cbd4 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c
@@ -15,6 +15,7 @@
 #include "xe_guc_ct.h"
 #include "xe_sriov.h"
 #include "xe_sriov_pf_migration.h"
+#include "xe_sriov_pf_migration_data.h"
 
 #define XE_GT_SRIOV_PF_MIGRATION_RING_TIMEOUT (HZ * 20)
 #define XE_GT_SRIOV_PF_MIGRATION_RING_SIZE 5
@@ -523,11 +524,18 @@ xe_gt_sriov_pf_migration_ring_consume_nowait(struct xe_gt 
*gt, unsigned int vfid
        return ERR_PTR(-EAGAIN);
 }
 
+static void pf_mig_data_destroy(void *ptr)
+{
+       struct xe_sriov_pf_migration_data *data = ptr;
+
+       xe_sriov_pf_migration_data_free(data);
+}
+
 static void pf_gt_migration_cleanup(struct drm_device *dev, void *arg)
 {
        struct xe_gt_sriov_pf_migration *migration = arg;
 
-       ptr_ring_cleanup(&migration->ring, NULL);
+       ptr_ring_cleanup(&migration->ring, pf_mig_data_destroy);
 }
 
 /**
diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_migration.c 
b/drivers/gpu/drm/xe/xe_sriov_pf_migration.c
index 347682f29a03c..d39cee66589b5 100644
--- a/drivers/gpu/drm/xe/xe_sriov_pf_migration.c
+++ b/drivers/gpu/drm/xe/xe_sriov_pf_migration.c
@@ -12,6 +12,7 @@
 #include "xe_pm.h"
 #include "xe_sriov_pf_helpers.h"
 #include "xe_sriov_pf_migration.h"
+#include "xe_sriov_pf_migration_data.h"
 #include "xe_sriov_printk.h"
 
 static struct xe_sriov_pf_migration *pf_pick_migration(struct xe_device *xe, 
unsigned int vfid)
diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_migration_data.c 
b/drivers/gpu/drm/xe/xe_sriov_pf_migration_data.c
new file mode 100644
index 0000000000000..cfc6b512c6674
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_sriov_pf_migration_data.c
@@ -0,0 +1,135 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include "xe_bo.h"
+#include "xe_device.h"
+#include "xe_sriov_pf_migration_data.h"
+
+static bool data_needs_bo(struct xe_sriov_pf_migration_data *data)
+{
+       unsigned int type = data->type;
+
+       return type == XE_SRIOV_MIG_DATA_CCS ||
+              type == XE_SRIOV_MIG_DATA_VRAM;
+}
+
+/**
+ * xe_sriov_pf_migration_data_alloc() - Allocate migration data packet
+ * @xe: the &struct xe_device
+ *
+ * Only allocates the "outer" structure, without initializing the migration
+ * data backing storage.
+ *
+ * Return: Pointer to &struct xe_sriov_pf_migration_data on success,
+ *         NULL in case of error.
+ */
+struct xe_sriov_pf_migration_data *
+xe_sriov_pf_migration_data_alloc(struct xe_device *xe)
+{
+       struct xe_sriov_pf_migration_data *data;
+
+       data = kzalloc(sizeof(*data), GFP_KERNEL);
+       if (!data)
+               return NULL;
+
+       data->xe = xe;
+       data->hdr_remaining = sizeof(data->hdr);
+
+       return data;
+}
+
+/**
+ * xe_sriov_pf_migration_data_free() - Free migration data packet
+ * @data: the &struct xe_sriov_pf_migration_data packet
+ */
+void xe_sriov_pf_migration_data_free(struct xe_sriov_pf_migration_data *data)
+{
+       if (data_needs_bo(data)) {
+               if (data->bo)
+                       xe_bo_unpin_map_no_vm(data->bo);
+       } else {
+               if (data->buff)
+                       kvfree(data->buff);
+       }
+
+       kfree(data);
+}
+
+static int mig_data_init(struct xe_sriov_pf_migration_data *data)
+{
+       struct xe_gt *gt = xe_device_get_gt(data->xe, data->gt);
+
+       if (!gt || data->tile != gt->tile->id)
+               return -EINVAL;
+
+       if (data->size == 0)
+               return 0;
+
+       if (data_needs_bo(data)) {
+               struct xe_bo *bo = xe_bo_create_pin_map_novm(data->xe, gt->tile,
+                                                            
PAGE_ALIGN(data->size),
+                                                            ttm_bo_type_kernel,
+                                                            XE_BO_FLAG_SYSTEM 
| XE_BO_FLAG_PINNED,
+                                                            false);
+               if (IS_ERR(bo))
+                       return PTR_ERR(bo);
+
+               data->bo = bo;
+               data->vaddr = bo->vmap.vaddr;
+       } else {
+               void *buff = kvzalloc(data->size, GFP_KERNEL);
+               if (!buff)
+                       return -ENOMEM;
+
+               data->buff = buff;
+               data->vaddr = buff;
+       }
+
+       return 0;
+}
+
+/**
+ * xe_sriov_pf_migration_data_init() - Initialize the migration data header 
and backing storage
+ * @data: the &struct xe_sriov_pf_migration_data packet
+ * @tile_id: tile identifier
+ * @gt_id: GT identifier
+ * @type: &enum xe_sriov_pf_migration_data_type
+ * @offset: offset of data packet payload (within wider resource)
+ * @size: size of data packet payload
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_sriov_pf_migration_data_init(struct xe_sriov_pf_migration_data *data, 
u8 tile_id, u8 gt_id,
+                                   unsigned int type, loff_t offset, size_t 
size)
+{
+       xe_assert(data->xe, type < XE_SRIOV_MIG_DATA_MAX);
+       data->version = 1;
+       data->type = type;
+       data->tile = tile_id;
+       data->gt = gt_id;
+       data->offset = offset;
+       data->size = size;
+       data->remaining = size;
+
+       return mig_data_init(data);
+}
+
+/**
+ * xe_sriov_pf_migration_data_init() - Initialize the migration data backing 
storage based on header
+ * @data: the &struct xe_sriov_pf_migration_data packet
+ *
+ * Header data is expected to be filled prior to calling this function
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_sriov_pf_migration_data_init_from_hdr(struct xe_sriov_pf_migration_data 
*data)
+{
+       if (WARN_ON(data->hdr_remaining))
+               return -EINVAL;
+
+       data->remaining = data->size;
+
+       return mig_data_init(data);
+}
diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_migration_data.h 
b/drivers/gpu/drm/xe/xe_sriov_pf_migration_data.h
new file mode 100644
index 0000000000000..1dde4cfcdbc47
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_sriov_pf_migration_data.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_SRIOV_PF_MIGRATION_DATA_H_
+#define _XE_SRIOV_PF_MIGRATION_DATA_H_
+
+#include <linux/types.h>
+
+struct xe_device;
+
+enum xe_sriov_pf_migration_data_type {
+       XE_SRIOV_MIG_DATA_DESCRIPTOR = 1,
+       XE_SRIOV_MIG_DATA_TRAILER,
+       XE_SRIOV_MIG_DATA_GGTT,
+       XE_SRIOV_MIG_DATA_MMIO,
+       XE_SRIOV_MIG_DATA_GUC,
+       XE_SRIOV_MIG_DATA_CCS,
+       XE_SRIOV_MIG_DATA_VRAM,
+       XE_SRIOV_MIG_DATA_MAX,
+};
+
+struct xe_sriov_pf_migration_data *
+xe_sriov_pf_migration_data_alloc(struct xe_device *xe);
+void xe_sriov_pf_migration_data_free(struct xe_sriov_pf_migration_data 
*snapshot);
+
+int xe_sriov_pf_migration_data_init(struct xe_sriov_pf_migration_data *data, 
u8 tile_id, u8 gt_id,
+                                   unsigned int type, loff_t offset, size_t 
size);
+int xe_sriov_pf_migration_data_init_from_hdr(struct xe_sriov_pf_migration_data 
*snapshot);
+
+#endif
-- 
2.50.1

Reply via email to