On Sun, Oct 12, 2025 at 11:32:47AM -0700, Matthew Brost wrote:
> On Sat, Oct 11, 2025 at 09:38:46PM +0200, Michał Winiarski wrote:
> > Vendor-specific VFIO driver for Xe will implement VF migration.
> > Export everything that's needed for migration ops.
> > 
> > Signed-off-by: Michał Winiarski <[email protected]>
> > ---
> >  drivers/gpu/drm/xe/Makefile        |   2 +
> >  drivers/gpu/drm/xe/xe_sriov_vfio.c | 252 +++++++++++++++++++++++++++++
> >  include/drm/intel/xe_sriov_vfio.h  |  28 ++++
> >  3 files changed, 282 insertions(+)
> >  create mode 100644 drivers/gpu/drm/xe/xe_sriov_vfio.c
> >  create mode 100644 include/drm/intel/xe_sriov_vfio.h
> > 
> > diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile
> > index e253d65366de4..a5c5afff42aa6 100644
> > --- a/drivers/gpu/drm/xe/Makefile
> > +++ b/drivers/gpu/drm/xe/Makefile
> > @@ -181,6 +181,8 @@ xe-$(CONFIG_PCI_IOV) += \
> >     xe_sriov_pf_service.o \
> >     xe_tile_sriov_pf_debugfs.o
> >  
> > +xe-$(CONFIG_XE_VFIO_PCI) += xe_sriov_vfio.o
> > +
> >  # include helpers for tests even when XE is built-in
> >  ifdef CONFIG_DRM_XE_KUNIT_TEST
> >  xe-y += tests/xe_kunit_helpers.o
> > diff --git a/drivers/gpu/drm/xe/xe_sriov_vfio.c 
> > b/drivers/gpu/drm/xe/xe_sriov_vfio.c
> > new file mode 100644
> > index 0000000000000..a510d1bde93f0
> > --- /dev/null
> > +++ b/drivers/gpu/drm/xe/xe_sriov_vfio.c
> > @@ -0,0 +1,252 @@
> > +// SPDX-License-Identifier: MIT
> > +/*
> > + * Copyright © 2025 Intel Corporation
> > + */
> > +
> > +#include <drm/intel/xe_sriov_vfio.h>
> > +
> > +#include "xe_pm.h"
> > +#include "xe_sriov.h"
> > +#include "xe_sriov_pf_control.h"
> > +#include "xe_sriov_pf_migration.h"
> > +#include "xe_sriov_pf_migration_data.h"
> > +
> > +/**
> > + * xe_sriov_vfio_migration_supported() - Check if migration is supported.
> > + * @pdev: PF PCI device
> > + *
> > + * Return: true if migration is supported, false otherwise.
> > + */
> > +bool xe_sriov_vfio_migration_supported(struct pci_dev *pdev)
> > +{
> > +   struct xe_device *xe = pci_get_drvdata(pdev);
> > +
> > +   if (!IS_SRIOV_PF(xe))
> > +           return -ENODEV;
> > +
> > +   return xe_sriov_pf_migration_supported(xe);
> > +}
> > +EXPORT_SYMBOL_FOR_MODULES(xe_sriov_vfio_migration_supported, 
> > "xe-vfio-pci");
> > +
> > +/**
> > + * xe_sriov_vfio_wait_flr_done - Wait for VF FLR completion.
> > + * @pdev: PF PCI device
> > + * @vfid: VF identifier
> > + *
> > + * This function will wait until VF FLR is processed by PF on all tiles (or
> > + * until timeout occurs).
> > + *
> > + * Return: 0 on success or a negative error code on failure.
> > + */
> > +int xe_sriov_vfio_wait_flr_done(struct pci_dev *pdev, unsigned int vfid)
> > +{
> > +   struct xe_device *xe = pci_get_drvdata(pdev);
> > +
> > +   if (!IS_SRIOV_PF(xe))
> > +           return -ENODEV;
> > +
> > +   return xe_sriov_pf_control_wait_flr(xe, vfid);
> 
> Ideally I think you'd want the exported suffix to match on all these
> functions.
> 
> i.e.,
> 
> xe_sriov_vfio_SUFFIX
> xe_sriov_pf_control_SUFFIX
> 
> Maybe this doesn't sense in all cases, so take as a suggestion, not a
> blocker.

The VFIO side uses different naming than the pf control.
Pause == Stop
Stop == Error
Restore == Resume

So the translation needs to happen at some place, and I guess the
current choice is at the exports.

> 
> > +}
> > +EXPORT_SYMBOL_FOR_MODULES(xe_sriov_vfio_wait_flr_done, "xe-vfio-pci");
> > +
> > +/**
> > + * xe_sriov_vfio_stop - Stop VF.
> > + * @pdev: PF PCI device
> > + * @vfid: VF identifier
> > + *
> > + * This function will pause VF on all tiles/GTs.
> > + *
> > + * Return: 0 on success or a negative error code on failure.
> > + */
> > +int xe_sriov_vfio_stop(struct pci_dev *pdev, unsigned int vfid)
> > +{
> > +   struct xe_device *xe = pci_get_drvdata(pdev);
> > +   int ret;
> > +
> > +   if (!IS_SRIOV_PF(xe))
> > +           return -ENODEV;
> > +
> > +   xe_pm_runtime_get(xe);
> 
> The PF must hold PM ref behalf of the VF' (right?) as VF's don't have
> access to the runtime PM.
> 
> So either you can assert a PM ref is held here and drop the put / get or
> use xe_pm_runtime_get_noresume here.
> 
> Exporting the waking runtime PM IMO is risky as waking runtime PM takes
> as bunch of locks which could create a problem at the caller if it is
> holding locks, best to avoid this if possible.

I'll replace it with an assert.

> 
> > +   ret = xe_sriov_pf_control_pause_vf(xe, vfid);
> > +   xe_pm_runtime_put(xe);
> > +
> > +   return ret;
> > +}
> > +EXPORT_SYMBOL_FOR_MODULES(xe_sriov_vfio_stop, "xe-vfio-pci");
> > +
> > +/**
> > + * xe_sriov_vfio_run - Run VF.
> > + * @pdev: PF PCI device
> > + * @vfid: VF identifier
> > + *
> > + * This function will resume VF on all tiles.
> > + *
> > + * Return: 0 on success or a negative error code on failure.
> > + */
> > +int xe_sriov_vfio_run(struct pci_dev *pdev, unsigned int vfid)
> > +{
> > +   struct xe_device *xe = pci_get_drvdata(pdev);
> > +   int ret;
> > +
> > +   if (!IS_SRIOV_PF(xe))
> > +           return -ENODEV;
> > +
> > +   xe_pm_runtime_get(xe);
> > +   ret = xe_sriov_pf_control_resume_vf(xe, vfid);
> > +   xe_pm_runtime_put(xe);
> > +
> > +   return ret;
> > +}
> > +EXPORT_SYMBOL_FOR_MODULES(xe_sriov_vfio_run, "xe-vfio-pci");
> > +
> > +/**
> > + * xe_sriov_vfio_stop_copy_enter - Copy VF migration data from device 
> > (while stopped).
> > + * @pdev: PF PCI device
> > + * @vfid: VF identifier
> > + *
> > + * This function will save VF migration data on all tiles.
> > + *
> > + * Return: 0 on success or a negative error code on failure.
> > + */
> > +int xe_sriov_vfio_stop_copy_enter(struct pci_dev *pdev, unsigned int vfid)
> > +{
> > +   struct xe_device *xe = pci_get_drvdata(pdev);
> > +   int ret;
> > +
> > +   if (!IS_SRIOV_PF(xe))
> > +           return -ENODEV;
> > +
> > +   xe_pm_runtime_get(xe);
> > +   ret = xe_sriov_pf_control_save_vf(xe, vfid);
> > +   xe_pm_runtime_put(xe);
> > +
> > +   return ret;
> > +}
> > +EXPORT_SYMBOL_FOR_MODULES(xe_sriov_vfio_stop_copy_enter, "xe-vfio-pci");
> > +
> > +/**
> > + * xe_sriov_vfio_stop_copy_exit - Wait until VF migration data save is 
> > done.
> > + * @pdev: PF PCI device
> > + * @vfid: VF identifier
> > + *
> > + * This function will wait until VF migration data is saved on all tiles.
> > + *
> > + * Return: 0 on success or a negative error code on failure.
> > + */
> > +int xe_sriov_vfio_stop_copy_exit(struct pci_dev *pdev, unsigned int vfid)
> > +{
> > +   struct xe_device *xe = pci_get_drvdata(pdev);
> > +   int ret;
> > +
> > +   if (!IS_SRIOV_PF(xe))
> > +           return -ENODEV;
> > +
> > +   xe_pm_runtime_get(xe);
> > +   ret = xe_sriov_pf_control_wait_save_vf(xe, vfid);
> > +   xe_pm_runtime_put(xe);
> > +
> > +   return ret;
> > +}
> > +EXPORT_SYMBOL_FOR_MODULES(xe_sriov_vfio_stop_copy_exit, "xe-vfio-pci");
> > +
> > +/**
> > + * xe_sriov_vfio_resume_enter - Copy VF migration data to device (while 
> > stopped).
> > + * @pdev: PF PCI device
> > + * @vfid: VF identifier
> > + *
> > + * This function will restore VF migration data on all tiles.
> > + *
> > + * Return: 0 on success or a negative error code on failure.
> > + */
> > +int xe_sriov_vfio_resume_enter(struct pci_dev *pdev, unsigned int vfid)
> > +{
> > +   struct xe_device *xe = pci_get_drvdata(pdev);
> > +   int ret;
> > +
> > +   if (!IS_SRIOV_PF(xe))
> > +           return -ENODEV;
> > +
> > +   xe_pm_runtime_get(xe);
> > +   ret = xe_sriov_pf_control_restore_vf(xe, vfid);
> > +   xe_pm_runtime_put(xe);
> > +
> > +   return ret;
> > +}
> > +EXPORT_SYMBOL_FOR_MODULES(xe_sriov_vfio_resume_enter, "xe-vfio-pci");
> > +
> > +/**
> > + * xe_sriov_vfio_resume_exit - Wait until VF migration data is copied to 
> > the device.
> > + * @pdev: PF PCI device
> > + * @vfid: VF identifier
> > + *
> > + * This function will wait until VF migration data is restored on all 
> > tiles.
> > + *
> > + * Return: 0 on success or a negative error code on failure.
> > + */
> > +int xe_sriov_vfio_resume_exit(struct pci_dev *pdev, unsigned int vfid)
> > +{
> > +   struct xe_device *xe = pci_get_drvdata(pdev);
> > +   int ret;
> > +
> > +   if (!IS_SRIOV_PF(xe))
> > +           return -ENODEV;
> > +
> > +   xe_pm_runtime_get(xe);
> > +   ret = xe_sriov_pf_control_wait_restore_vf(xe, vfid);
> > +   xe_pm_runtime_put(xe);
> > +
> > +   return ret;
> > +}
> > +EXPORT_SYMBOL_FOR_MODULES(xe_sriov_vfio_resume_exit, "xe-vfio-pci");
> > +
> > +/**
> > + * xe_sriov_vfio_error - Move VF to error state.
> > + * @pdev: PF PCI device
> > + * @vfid: VF identifier
> > + *
> > + * This function will stop VF on all tiles.
> > + * Reset is needed to move it out of error state.
> > + *
> > + * Return: 0 on success or a negative error code on failure.
> > + */
> > +int xe_sriov_vfio_error(struct pci_dev *pdev, unsigned int vfid)
> > +{
> > +   struct xe_device *xe = pci_get_drvdata(pdev);
> > +   int ret;
> > +
> > +   if (!IS_SRIOV_PF(xe))
> > +           return -ENODEV;
> > +
> > +   xe_pm_runtime_get(xe);
> > +   ret = xe_sriov_pf_control_stop_vf(xe, vfid);
> > +   xe_pm_runtime_put(xe);
> > +
> > +   return ret;
> > +}
> > +EXPORT_SYMBOL_FOR_MODULES(xe_sriov_vfio_error, "xe-vfio-pci");
> > +
> 
> Kernel doc for the below functions.

Ok.

Thanks,
-Michał

> 
> Matt
> 
> > +ssize_t xe_sriov_vfio_data_read(struct pci_dev *pdev, unsigned int vfid,
> > +                           char __user *buf, size_t len)
> > +{
> > +   struct xe_device *xe = pci_get_drvdata(pdev);
> > +
> > +   return xe_sriov_pf_migration_data_read(xe, vfid, buf, len);
> > +}
> > +EXPORT_SYMBOL_FOR_MODULES(xe_sriov_vfio_data_read, "xe-vfio-pci");
> > +
> > +ssize_t xe_sriov_vfio_data_write(struct pci_dev *pdev, unsigned int vfid,
> > +                            const char __user *buf, size_t len)
> > +{
> > +   struct xe_device *xe = pci_get_drvdata(pdev);
> > +
> > +   return xe_sriov_pf_migration_data_write(xe, vfid, buf, len);
> > +}
> > +EXPORT_SYMBOL_FOR_MODULES(xe_sriov_vfio_data_write, "xe-vfio-pci");
> > +
> > +ssize_t xe_sriov_vfio_stop_copy_size(struct pci_dev *pdev, unsigned int 
> > vfid)
> > +{
> > +   struct xe_device *xe = pci_get_drvdata(pdev);
> > +
> > +   return xe_sriov_pf_migration_size(xe, vfid);
> > +}
> > +EXPORT_SYMBOL_FOR_MODULES(xe_sriov_vfio_stop_copy_size, "xe-vfio-pci");
> > diff --git a/include/drm/intel/xe_sriov_vfio.h 
> > b/include/drm/intel/xe_sriov_vfio.h
> > new file mode 100644
> > index 0000000000000..24e272f84c0e6
> > --- /dev/null
> > +++ b/include/drm/intel/xe_sriov_vfio.h
> > @@ -0,0 +1,28 @@
> > +/* SPDX-License-Identifier: MIT */
> > +/*
> > + * Copyright © 2025 Intel Corporation
> > + */
> > +
> > +#ifndef _XE_SRIOV_VFIO_H_
> > +#define _XE_SRIOV_VFIO_H_
> > +
> > +#include <linux/types.h>
> > +
> > +struct pci_dev;
> > +
> > +bool xe_sriov_vfio_migration_supported(struct pci_dev *pdev);
> > +int xe_sriov_vfio_wait_flr_done(struct pci_dev *pdev, unsigned int vfid);
> > +int xe_sriov_vfio_stop(struct pci_dev *pdev, unsigned int vfid);
> > +int xe_sriov_vfio_run(struct pci_dev *pdev, unsigned int vfid);
> > +int xe_sriov_vfio_stop_copy_enter(struct pci_dev *pdev, unsigned int vfid);
> > +int xe_sriov_vfio_stop_copy_exit(struct pci_dev *pdev, unsigned int vfid);
> > +int xe_sriov_vfio_resume_enter(struct pci_dev *pdev, unsigned int vfid);
> > +int xe_sriov_vfio_resume_exit(struct pci_dev *pdev, unsigned int vfid);
> > +int xe_sriov_vfio_error(struct pci_dev *pdev, unsigned int vfid);
> > +ssize_t xe_sriov_vfio_data_read(struct pci_dev *pdev, unsigned int vfid,
> > +                           char __user *buf, size_t len);
> > +ssize_t xe_sriov_vfio_data_write(struct pci_dev *pdev, unsigned int vfid,
> > +                            const char __user *buf, size_t len);
> > +ssize_t xe_sriov_vfio_stop_copy_size(struct pci_dev *pdev, unsigned int 
> > vfid);
> > +
> > +#endif /* _XE_SRIOV_VFIO_H_ */
> > -- 
> > 2.50.1
> > 

Reply via email to