On 07/12/2023 3:09, Jason Gunthorpe wrote:
On Wed, Dec 06, 2023 at 10:38:57AM +0200, Yishai Hadas wrote:
+static ssize_t
+virtiovf_pci_core_read(struct vfio_device *core_vdev, char __user *buf,
+                      size_t count, loff_t *ppos)
+{
+       struct virtiovf_pci_core_device *virtvdev = container_of(
+               core_vdev, struct virtiovf_pci_core_device, core_device.vdev);
+       struct pci_dev *pdev = virtvdev->core_device.pdev;
+       unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
+       loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
+       int ret;
+
+       if (!count)
+               return 0;
+
+       if (index == VFIO_PCI_CONFIG_REGION_INDEX)
+               return virtiovf_pci_read_config(core_vdev, buf, count, ppos);
+
+       if (index != VFIO_PCI_BAR0_REGION_INDEX)
+               return vfio_pci_core_read(core_vdev, buf, count, ppos);
+
+       ret = pm_runtime_resume_and_get(&pdev->dev);
+       if (ret) {
+               pci_info_ratelimited(pdev, "runtime resume failed %d\n",
+                                    ret);
+               return -EIO;
+       }
+
+       ret = translate_io_bar_to_mem_bar(virtvdev, pos, buf, count, true);
+       pm_runtime_put(&pdev->dev);

There is two copies of this pm_runtime sequence, I'd put the
pm_runtime stuff into translate_io_bar_to_mem_bar() and organize these
to be more success oriented:

Yes, good idea.


  if (index == VFIO_PCI_BAR0_REGION_INDEX)
     return translate_io_bar_to_mem_bar();
  return vfio_pci_core_read();

+static ssize_t
+virtiovf_pci_core_write(struct vfio_device *core_vdev, const char __user *buf,
+                       size_t count, loff_t *ppos)
+{
+       struct virtiovf_pci_core_device *virtvdev = container_of(
+               core_vdev, struct virtiovf_pci_core_device, core_device.vdev);
+       struct pci_dev *pdev = virtvdev->core_device.pdev;
+       unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
+       loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
+       int ret;
+
+       if (!count)
+               return 0;
+
+       if (index == VFIO_PCI_CONFIG_REGION_INDEX) {
+               size_t register_offset;
+               loff_t copy_offset;
+               size_t copy_count;
+
+               if (range_intersect_range(pos, count, PCI_COMMAND, 
sizeof(virtvdev->pci_cmd),
+                                         &copy_offset, &copy_count,
+                                         &register_offset)) {
+                       if (copy_from_user((void *)&virtvdev->pci_cmd + 
register_offset,
+                                          buf + copy_offset,
+                                          copy_count))
+                               return -EFAULT;
+               }
+
+               if (range_intersect_range(pos, count, PCI_BASE_ADDRESS_0,
+                                         sizeof(virtvdev->pci_base_addr_0),
+                                         &copy_offset, &copy_count,
+                                         &register_offset)) {
+                       if (copy_from_user((void *)&virtvdev->pci_base_addr_0 + 
register_offset,
+                                          buf + copy_offset,
+                                          copy_count))
+                               return -EFAULT;
+               }
+       }
+
+       if (index != VFIO_PCI_BAR0_REGION_INDEX)
+               return vfio_pci_core_write(core_vdev, buf, count, ppos);
+
+       ret = pm_runtime_resume_and_get(&pdev->dev);
+       if (ret) {
+               pci_info_ratelimited(pdev, "runtime resume failed %d\n", ret);
+               return -EIO;
+       }
+
+       ret = translate_io_bar_to_mem_bar(virtvdev, pos, (char __user *)buf, 
count, false);
+       pm_runtime_put(&pdev->dev);

Same

+static const struct vfio_device_ops virtiovf_vfio_pci_tran_ops = {
+       .name = "virtio-vfio-pci-trans",
+       .init = virtiovf_pci_init_device,
+       .release = virtiovf_pci_core_release_dev,
+       .open_device = virtiovf_pci_open_device,
+       .close_device = vfio_pci_core_close_device,
+       .ioctl = virtiovf_vfio_pci_core_ioctl,
+       .read = virtiovf_pci_core_read,
+       .write = virtiovf_pci_core_write,
+       .mmap = vfio_pci_core_mmap,
+       .request = vfio_pci_core_request,
+       .match = vfio_pci_core_match,
+       .bind_iommufd = vfio_iommufd_physical_bind,
+       .unbind_iommufd = vfio_iommufd_physical_unbind,
+       .attach_ioas = vfio_iommufd_physical_attach_ioas,

Missing detach_ioas

Sure, will add also 'device_feature' for completeness.


+static bool virtiovf_bar0_exists(struct pci_dev *pdev)
+{
+       struct resource *res = pdev->resource;
+
+       return res->flags ? true : false;

?: isn't necessary cast to bool does this expression automatically

Right
This can just be return res->flags.


I didn't try to check the virtio parts of this, but the construction
of the variant driver looks OK, so

Reviewed-by: Jason Gunthorpe <j...@nvidia.com>

Thanks Jason
I'll add as part of V7.

Yishai


Jason


Reply via email to