This is a preparation for moving vendor specific code from
vfio_pci_core to vendor specific vfio_pci drivers. The next step will be
creating a dedicated module to NVIDIA NVLINK2 devices with P9 extensions
and a dedicated module for Power9 NPU NVLink2 HBAs.

Signed-off-by: Max Gurtovoy <[email protected]>
---
 drivers/vfio/pci/Makefile                     |   2 +-
 drivers/vfio/pci/npu2_trace.h                 |  50 ++++
 .../vfio/pci/{trace.h => nvlink2gpu_trace.h}  |  27 +--
 drivers/vfio/pci/vfio_pci_core.c              |   2 +-
 drivers/vfio/pci/vfio_pci_core.h              |   4 +-
 drivers/vfio/pci/vfio_pci_npu2.c              | 222 ++++++++++++++++++
 ...io_pci_nvlink2.c => vfio_pci_nvlink2gpu.c} | 201 +---------------
 7 files changed, 280 insertions(+), 228 deletions(-)
 create mode 100644 drivers/vfio/pci/npu2_trace.h
 rename drivers/vfio/pci/{trace.h => nvlink2gpu_trace.h} (72%)
 create mode 100644 drivers/vfio/pci/vfio_pci_npu2.c
 rename drivers/vfio/pci/{vfio_pci_nvlink2.c => vfio_pci_nvlink2gpu.c} (59%)

diff --git a/drivers/vfio/pci/Makefile b/drivers/vfio/pci/Makefile
index 16e7d77d63ce..f539f32c9296 100644
--- a/drivers/vfio/pci/Makefile
+++ b/drivers/vfio/pci/Makefile
@@ -5,7 +5,7 @@ obj-$(CONFIG_VFIO_PCI) += vfio-pci.o
 
 vfio-pci-core-y := vfio_pci_core.o vfio_pci_intrs.o vfio_pci_rdwr.o 
vfio_pci_config.o
 vfio-pci-core-$(CONFIG_VFIO_PCI_IGD) += vfio_pci_igd.o
-vfio-pci-core-$(CONFIG_VFIO_PCI_NVLINK2) += vfio_pci_nvlink2.o
+vfio-pci-core-$(CONFIG_VFIO_PCI_NVLINK2) += vfio_pci_nvlink2gpu.o 
vfio_pci_npu2.o
 vfio-pci-core-$(CONFIG_S390) += vfio_pci_zdev.o
 
 vfio-pci-y := vfio_pci.o
diff --git a/drivers/vfio/pci/npu2_trace.h b/drivers/vfio/pci/npu2_trace.h
new file mode 100644
index 000000000000..c8a1110132dc
--- /dev/null
+++ b/drivers/vfio/pci/npu2_trace.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * VFIO PCI mmap/mmap_fault tracepoints
+ *
+ * Copyright (C) 2018 IBM Corp.  All rights reserved.
+ *     Author: Alexey Kardashevskiy <[email protected]>
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM vfio_pci
+
+#if !defined(_TRACE_VFIO_PCI_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_VFIO_PCI_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(vfio_pci_npu2_mmap,
+       TP_PROTO(struct pci_dev *pdev, unsigned long hpa, unsigned long ua,
+                       unsigned long size, int ret),
+       TP_ARGS(pdev, hpa, ua, size, ret),
+
+       TP_STRUCT__entry(
+               __field(const char *, name)
+               __field(unsigned long, hpa)
+               __field(unsigned long, ua)
+               __field(unsigned long, size)
+               __field(int, ret)
+       ),
+
+       TP_fast_assign(
+               __entry->name = dev_name(&pdev->dev),
+               __entry->hpa = hpa;
+               __entry->ua = ua;
+               __entry->size = size;
+               __entry->ret = ret;
+       ),
+
+       TP_printk("%s: %lx -> %lx size=%lx ret=%d", __entry->name, __entry->hpa,
+                       __entry->ua, __entry->size, __entry->ret)
+);
+
+#endif /* _TRACE_VFIO_PCI_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH ../../drivers/vfio/pci
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE npu2_trace
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/vfio/pci/trace.h b/drivers/vfio/pci/nvlink2gpu_trace.h
similarity index 72%
rename from drivers/vfio/pci/trace.h
rename to drivers/vfio/pci/nvlink2gpu_trace.h
index b2aa986ab9ed..2392b9d4c6c9 100644
--- a/drivers/vfio/pci/trace.h
+++ b/drivers/vfio/pci/nvlink2gpu_trace.h
@@ -62,37 +62,12 @@ TRACE_EVENT(vfio_pci_nvgpu_mmap,
                        __entry->ua, __entry->size, __entry->ret)
 );
 
-TRACE_EVENT(vfio_pci_npu2_mmap,
-       TP_PROTO(struct pci_dev *pdev, unsigned long hpa, unsigned long ua,
-                       unsigned long size, int ret),
-       TP_ARGS(pdev, hpa, ua, size, ret),
-
-       TP_STRUCT__entry(
-               __field(const char *, name)
-               __field(unsigned long, hpa)
-               __field(unsigned long, ua)
-               __field(unsigned long, size)
-               __field(int, ret)
-       ),
-
-       TP_fast_assign(
-               __entry->name = dev_name(&pdev->dev),
-               __entry->hpa = hpa;
-               __entry->ua = ua;
-               __entry->size = size;
-               __entry->ret = ret;
-       ),
-
-       TP_printk("%s: %lx -> %lx size=%lx ret=%d", __entry->name, __entry->hpa,
-                       __entry->ua, __entry->size, __entry->ret)
-);
-
 #endif /* _TRACE_VFIO_PCI_H */
 
 #undef TRACE_INCLUDE_PATH
 #define TRACE_INCLUDE_PATH ../../drivers/vfio/pci
 #undef TRACE_INCLUDE_FILE
-#define TRACE_INCLUDE_FILE trace
+#define TRACE_INCLUDE_FILE nvlink2gpu_trace
 
 /* This part must be outside protection */
 #include <trace/define_trace.h>
diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c
index ba5dd4321487..4de8e352df9c 100644
--- a/drivers/vfio/pci/vfio_pci_core.c
+++ b/drivers/vfio/pci/vfio_pci_core.c
@@ -356,7 +356,7 @@ int vfio_pci_core_enable(struct vfio_pci_core_device *vdev)
 
        if (pdev->vendor == PCI_VENDOR_ID_NVIDIA &&
            IS_ENABLED(CONFIG_VFIO_PCI_NVLINK2)) {
-               ret = vfio_pci_nvdia_v100_nvlink2_init(vdev);
+               ret = vfio_pci_nvidia_v100_nvlink2_init(vdev);
                if (ret && ret != -ENODEV) {
                        pci_warn(pdev, "Failed to setup NVIDIA NV2 RAM 
region\n");
                        goto disable_exit;
diff --git a/drivers/vfio/pci/vfio_pci_core.h b/drivers/vfio/pci/vfio_pci_core.h
index 60b42df6c519..8989443c3086 100644
--- a/drivers/vfio/pci/vfio_pci_core.h
+++ b/drivers/vfio/pci/vfio_pci_core.h
@@ -205,10 +205,10 @@ static inline int vfio_pci_igd_init(struct 
vfio_pci_core_device *vdev)
 }
 #endif
 #ifdef CONFIG_VFIO_PCI_NVLINK2
-extern int vfio_pci_nvdia_v100_nvlink2_init(struct vfio_pci_core_device *vdev);
+extern int vfio_pci_nvidia_v100_nvlink2_init(struct vfio_pci_core_device 
*vdev);
 extern int vfio_pci_ibm_npu2_init(struct vfio_pci_core_device *vdev);
 #else
-static inline int vfio_pci_nvdia_v100_nvlink2_init(struct vfio_pci_core_device 
*vdev)
+static inline int vfio_pci_nvidia_v100_nvlink2_init(struct 
vfio_pci_core_device *vdev)
 {
        return -ENODEV;
 }
diff --git a/drivers/vfio/pci/vfio_pci_npu2.c b/drivers/vfio/pci/vfio_pci_npu2.c
new file mode 100644
index 000000000000..717745256ab3
--- /dev/null
+++ b/drivers/vfio/pci/vfio_pci_npu2.c
@@ -0,0 +1,222 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * VFIO PCI driver for POWER9 NPU support (NVLink2 host bus adapter).
+ *
+ * Copyright (c) 2020, Mellanox Technologies, Ltd.  All rights reserved.
+ *
+ * Copyright (C) 2018 IBM Corp.  All rights reserved.
+ *     Author: Alexey Kardashevskiy <[email protected]>
+ *
+ * Register an on-GPU RAM region for cacheable access.
+ *
+ * Derived from original vfio_pci_igd.c:
+ * Copyright (C) 2016 Red Hat, Inc.  All rights reserved.
+ *     Author: Alex Williamson <[email protected]>
+ */
+
+#include <linux/io.h>
+#include <linux/pci.h>
+#include <linux/uaccess.h>
+#include <linux/vfio.h>
+#include <linux/sched/mm.h>
+#include <linux/mmu_context.h>
+#include <asm/kvm_ppc.h>
+
+#include "vfio_pci_core.h"
+
+#define CREATE_TRACE_POINTS
+#include "npu2_trace.h"
+
+EXPORT_TRACEPOINT_SYMBOL_GPL(vfio_pci_npu2_mmap);
+
+struct vfio_pci_npu2_data {
+       void *base; /* ATSD register virtual address, for emulated access */
+       unsigned long mmio_atsd; /* ATSD physical address */
+       unsigned long gpu_tgt; /* TGT address of corresponding GPU RAM */
+       unsigned int link_speed; /* The link speed from DT's ibm,nvlink-speed */
+};
+
+static size_t vfio_pci_npu2_rw(struct vfio_pci_core_device *vdev,
+               char __user *buf, size_t count, loff_t *ppos, bool iswrite)
+{
+       unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) - VFIO_PCI_NUM_REGIONS;
+       struct vfio_pci_npu2_data *data = vdev->region[i].data;
+       loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
+
+       if (pos >= vdev->region[i].size)
+               return -EINVAL;
+
+       count = min(count, (size_t)(vdev->region[i].size - pos));
+
+       if (iswrite) {
+               if (copy_from_user(data->base + pos, buf, count))
+                       return -EFAULT;
+       } else {
+               if (copy_to_user(buf, data->base + pos, count))
+                       return -EFAULT;
+       }
+       *ppos += count;
+
+       return count;
+}
+
+static int vfio_pci_npu2_mmap(struct vfio_pci_core_device *vdev,
+               struct vfio_pci_region *region, struct vm_area_struct *vma)
+{
+       int ret;
+       struct vfio_pci_npu2_data *data = region->data;
+       unsigned long req_len = vma->vm_end - vma->vm_start;
+
+       if (req_len != PAGE_SIZE)
+               return -EINVAL;
+
+       vma->vm_flags |= VM_PFNMAP;
+       vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+       ret = remap_pfn_range(vma, vma->vm_start, data->mmio_atsd >> PAGE_SHIFT,
+                       req_len, vma->vm_page_prot);
+       trace_vfio_pci_npu2_mmap(vdev->pdev, data->mmio_atsd, vma->vm_start,
+                       vma->vm_end - vma->vm_start, ret);
+
+       return ret;
+}
+
+static void vfio_pci_npu2_release(struct vfio_pci_core_device *vdev,
+               struct vfio_pci_region *region)
+{
+       struct vfio_pci_npu2_data *data = region->data;
+
+       memunmap(data->base);
+       kfree(data);
+}
+
+static int vfio_pci_npu2_add_capability(struct vfio_pci_core_device *vdev,
+               struct vfio_pci_region *region, struct vfio_info_cap *caps)
+{
+       struct vfio_pci_npu2_data *data = region->data;
+       struct vfio_region_info_cap_nvlink2_ssatgt captgt = {
+               .header.id = VFIO_REGION_INFO_CAP_NVLINK2_SSATGT,
+               .header.version = 1,
+               .tgt = data->gpu_tgt
+       };
+       struct vfio_region_info_cap_nvlink2_lnkspd capspd = {
+               .header.id = VFIO_REGION_INFO_CAP_NVLINK2_LNKSPD,
+               .header.version = 1,
+               .link_speed = data->link_speed
+       };
+       int ret;
+
+       ret = vfio_info_add_capability(caps, &captgt.header, sizeof(captgt));
+       if (ret)
+               return ret;
+
+       return vfio_info_add_capability(caps, &capspd.header, sizeof(capspd));
+}
+
+static const struct vfio_pci_regops vfio_pci_npu2_regops = {
+       .rw = vfio_pci_npu2_rw,
+       .mmap = vfio_pci_npu2_mmap,
+       .release = vfio_pci_npu2_release,
+       .add_capability = vfio_pci_npu2_add_capability,
+};
+
+int vfio_pci_ibm_npu2_init(struct vfio_pci_core_device *vdev)
+{
+       int ret;
+       struct vfio_pci_npu2_data *data;
+       struct device_node *nvlink_dn;
+       u32 nvlink_index = 0, mem_phandle = 0;
+       struct pci_dev *npdev = vdev->pdev;
+       struct device_node *npu_node = pci_device_to_OF_node(npdev);
+       struct pci_controller *hose = pci_bus_to_host(npdev->bus);
+       u64 mmio_atsd = 0;
+       u64 tgt = 0;
+       u32 link_speed = 0xff;
+
+       /*
+        * PCI config space does not tell us about NVLink presense but
+        * platform does, use this.
+        */
+       if (!pnv_pci_get_gpu_dev(vdev->pdev))
+               return -ENODEV;
+
+       if (of_property_read_u32(npu_node, "memory-region", &mem_phandle))
+               return -ENODEV;
+
+       /*
+        * NPU2 normally has 8 ATSD registers (for concurrency) and 6 links
+        * so we can allocate one register per link, using nvlink index as
+        * a key.
+        * There is always at least one ATSD register so as long as at least
+        * NVLink bridge #0 is passed to the guest, ATSD will be available.
+        */
+       nvlink_dn = of_parse_phandle(npdev->dev.of_node, "ibm,nvlink", 0);
+       if (WARN_ON(of_property_read_u32(nvlink_dn, "ibm,npu-link-index",
+                       &nvlink_index)))
+               return -ENODEV;
+
+       if (of_property_read_u64_index(hose->dn, "ibm,mmio-atsd", nvlink_index,
+                       &mmio_atsd)) {
+               if (of_property_read_u64_index(hose->dn, "ibm,mmio-atsd", 0,
+                               &mmio_atsd)) {
+                       dev_warn(&vdev->pdev->dev, "No available ATSD found\n");
+                       mmio_atsd = 0;
+               } else {
+                       dev_warn(&vdev->pdev->dev,
+                                "Using fallback ibm,mmio-atsd[0] for ATSD.\n");
+               }
+       }
+
+       if (of_property_read_u64(npu_node, "ibm,device-tgt-addr", &tgt)) {
+               dev_warn(&vdev->pdev->dev, "No ibm,device-tgt-addr found\n");
+               return -EFAULT;
+       }
+
+       if (of_property_read_u32(npu_node, "ibm,nvlink-speed", &link_speed)) {
+               dev_warn(&vdev->pdev->dev, "No ibm,nvlink-speed found\n");
+               return -EFAULT;
+       }
+
+       data = kzalloc(sizeof(*data), GFP_KERNEL);
+       if (!data)
+               return -ENOMEM;
+
+       data->mmio_atsd = mmio_atsd;
+       data->gpu_tgt = tgt;
+       data->link_speed = link_speed;
+       if (data->mmio_atsd) {
+               data->base = memremap(data->mmio_atsd, SZ_64K, MEMREMAP_WT);
+               if (!data->base) {
+                       ret = -ENOMEM;
+                       goto free_exit;
+               }
+       }
+
+       /*
+        * We want to expose the capability even if this specific NVLink
+        * did not get its own ATSD register because capabilities
+        * belong to VFIO regions and normally there will be ATSD register
+        * assigned to the NVLink bridge.
+        */
+       ret = vfio_pci_register_dev_region(vdev,
+                       PCI_VENDOR_ID_IBM |
+                       VFIO_REGION_TYPE_PCI_VENDOR_TYPE,
+                       VFIO_REGION_SUBTYPE_IBM_NVLINK2_ATSD,
+                       &vfio_pci_npu2_regops,
+                       data->mmio_atsd ? PAGE_SIZE : 0,
+                       VFIO_REGION_INFO_FLAG_READ |
+                       VFIO_REGION_INFO_FLAG_WRITE |
+                       VFIO_REGION_INFO_FLAG_MMAP,
+                       data);
+       if (ret)
+               goto free_exit;
+
+       return 0;
+
+free_exit:
+       if (data->base)
+               memunmap(data->base);
+       kfree(data);
+
+       return ret;
+}
diff --git a/drivers/vfio/pci/vfio_pci_nvlink2.c 
b/drivers/vfio/pci/vfio_pci_nvlink2gpu.c
similarity index 59%
rename from drivers/vfio/pci/vfio_pci_nvlink2.c
rename to drivers/vfio/pci/vfio_pci_nvlink2gpu.c
index 8ef2c62a9d27..6dce1e78ee82 100644
--- a/drivers/vfio/pci/vfio_pci_nvlink2.c
+++ b/drivers/vfio/pci/vfio_pci_nvlink2gpu.c
@@ -19,14 +19,14 @@
 #include <linux/sched/mm.h>
 #include <linux/mmu_context.h>
 #include <asm/kvm_ppc.h>
+
 #include "vfio_pci_core.h"
 
 #define CREATE_TRACE_POINTS
-#include "trace.h"
+#include "nvlink2gpu_trace.h"
 
 EXPORT_TRACEPOINT_SYMBOL_GPL(vfio_pci_nvgpu_mmap_fault);
 EXPORT_TRACEPOINT_SYMBOL_GPL(vfio_pci_nvgpu_mmap);
-EXPORT_TRACEPOINT_SYMBOL_GPL(vfio_pci_npu2_mmap);
 
 struct vfio_pci_nvgpu_data {
        unsigned long gpu_hpa; /* GPU RAM physical address */
@@ -207,7 +207,7 @@ static int vfio_pci_nvgpu_group_notifier(struct 
notifier_block *nb,
        return NOTIFY_OK;
 }
 
-int vfio_pci_nvdia_v100_nvlink2_init(struct vfio_pci_core_device *vdev)
+int vfio_pci_nvidia_v100_nvlink2_init(struct vfio_pci_core_device *vdev)
 {
        int ret;
        u64 reg[2];
@@ -293,198 +293,3 @@ int vfio_pci_nvdia_v100_nvlink2_init(struct 
vfio_pci_core_device *vdev)
 
        return ret;
 }
-
-/*
- * IBM NPU2 bridge
- */
-struct vfio_pci_npu2_data {
-       void *base; /* ATSD register virtual address, for emulated access */
-       unsigned long mmio_atsd; /* ATSD physical address */
-       unsigned long gpu_tgt; /* TGT address of corresponding GPU RAM */
-       unsigned int link_speed; /* The link speed from DT's ibm,nvlink-speed */
-};
-
-static size_t vfio_pci_npu2_rw(struct vfio_pci_core_device *vdev,
-               char __user *buf, size_t count, loff_t *ppos, bool iswrite)
-{
-       unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) - VFIO_PCI_NUM_REGIONS;
-       struct vfio_pci_npu2_data *data = vdev->region[i].data;
-       loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
-
-       if (pos >= vdev->region[i].size)
-               return -EINVAL;
-
-       count = min(count, (size_t)(vdev->region[i].size - pos));
-
-       if (iswrite) {
-               if (copy_from_user(data->base + pos, buf, count))
-                       return -EFAULT;
-       } else {
-               if (copy_to_user(buf, data->base + pos, count))
-                       return -EFAULT;
-       }
-       *ppos += count;
-
-       return count;
-}
-
-static int vfio_pci_npu2_mmap(struct vfio_pci_core_device *vdev,
-               struct vfio_pci_region *region, struct vm_area_struct *vma)
-{
-       int ret;
-       struct vfio_pci_npu2_data *data = region->data;
-       unsigned long req_len = vma->vm_end - vma->vm_start;
-
-       if (req_len != PAGE_SIZE)
-               return -EINVAL;
-
-       vma->vm_flags |= VM_PFNMAP;
-       vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-
-       ret = remap_pfn_range(vma, vma->vm_start, data->mmio_atsd >> PAGE_SHIFT,
-                       req_len, vma->vm_page_prot);
-       trace_vfio_pci_npu2_mmap(vdev->pdev, data->mmio_atsd, vma->vm_start,
-                       vma->vm_end - vma->vm_start, ret);
-
-       return ret;
-}
-
-static void vfio_pci_npu2_release(struct vfio_pci_core_device *vdev,
-               struct vfio_pci_region *region)
-{
-       struct vfio_pci_npu2_data *data = region->data;
-
-       memunmap(data->base);
-       kfree(data);
-}
-
-static int vfio_pci_npu2_add_capability(struct vfio_pci_core_device *vdev,
-               struct vfio_pci_region *region, struct vfio_info_cap *caps)
-{
-       struct vfio_pci_npu2_data *data = region->data;
-       struct vfio_region_info_cap_nvlink2_ssatgt captgt = {
-               .header.id = VFIO_REGION_INFO_CAP_NVLINK2_SSATGT,
-               .header.version = 1,
-               .tgt = data->gpu_tgt
-       };
-       struct vfio_region_info_cap_nvlink2_lnkspd capspd = {
-               .header.id = VFIO_REGION_INFO_CAP_NVLINK2_LNKSPD,
-               .header.version = 1,
-               .link_speed = data->link_speed
-       };
-       int ret;
-
-       ret = vfio_info_add_capability(caps, &captgt.header, sizeof(captgt));
-       if (ret)
-               return ret;
-
-       return vfio_info_add_capability(caps, &capspd.header, sizeof(capspd));
-}
-
-static const struct vfio_pci_regops vfio_pci_npu2_regops = {
-       .rw = vfio_pci_npu2_rw,
-       .mmap = vfio_pci_npu2_mmap,
-       .release = vfio_pci_npu2_release,
-       .add_capability = vfio_pci_npu2_add_capability,
-};
-
-int vfio_pci_ibm_npu2_init(struct vfio_pci_core_device *vdev)
-{
-       int ret;
-       struct vfio_pci_npu2_data *data;
-       struct device_node *nvlink_dn;
-       u32 nvlink_index = 0, mem_phandle = 0;
-       struct pci_dev *npdev = vdev->pdev;
-       struct device_node *npu_node = pci_device_to_OF_node(npdev);
-       struct pci_controller *hose = pci_bus_to_host(npdev->bus);
-       u64 mmio_atsd = 0;
-       u64 tgt = 0;
-       u32 link_speed = 0xff;
-
-       /*
-        * PCI config space does not tell us about NVLink presense but
-        * platform does, use this.
-        */
-       if (!pnv_pci_get_gpu_dev(vdev->pdev))
-               return -ENODEV;
-
-       if (of_property_read_u32(npu_node, "memory-region", &mem_phandle))
-               return -ENODEV;
-
-       /*
-        * NPU2 normally has 8 ATSD registers (for concurrency) and 6 links
-        * so we can allocate one register per link, using nvlink index as
-        * a key.
-        * There is always at least one ATSD register so as long as at least
-        * NVLink bridge #0 is passed to the guest, ATSD will be available.
-        */
-       nvlink_dn = of_parse_phandle(npdev->dev.of_node, "ibm,nvlink", 0);
-       if (WARN_ON(of_property_read_u32(nvlink_dn, "ibm,npu-link-index",
-                       &nvlink_index)))
-               return -ENODEV;
-
-       if (of_property_read_u64_index(hose->dn, "ibm,mmio-atsd", nvlink_index,
-                       &mmio_atsd)) {
-               if (of_property_read_u64_index(hose->dn, "ibm,mmio-atsd", 0,
-                               &mmio_atsd)) {
-                       dev_warn(&vdev->pdev->dev, "No available ATSD found\n");
-                       mmio_atsd = 0;
-               } else {
-                       dev_warn(&vdev->pdev->dev,
-                                "Using fallback ibm,mmio-atsd[0] for ATSD.\n");
-               }
-       }
-
-       if (of_property_read_u64(npu_node, "ibm,device-tgt-addr", &tgt)) {
-               dev_warn(&vdev->pdev->dev, "No ibm,device-tgt-addr found\n");
-               return -EFAULT;
-       }
-
-       if (of_property_read_u32(npu_node, "ibm,nvlink-speed", &link_speed)) {
-               dev_warn(&vdev->pdev->dev, "No ibm,nvlink-speed found\n");
-               return -EFAULT;
-       }
-
-       data = kzalloc(sizeof(*data), GFP_KERNEL);
-       if (!data)
-               return -ENOMEM;
-
-       data->mmio_atsd = mmio_atsd;
-       data->gpu_tgt = tgt;
-       data->link_speed = link_speed;
-       if (data->mmio_atsd) {
-               data->base = memremap(data->mmio_atsd, SZ_64K, MEMREMAP_WT);
-               if (!data->base) {
-                       ret = -ENOMEM;
-                       goto free_exit;
-               }
-       }
-
-       /*
-        * We want to expose the capability even if this specific NVLink
-        * did not get its own ATSD register because capabilities
-        * belong to VFIO regions and normally there will be ATSD register
-        * assigned to the NVLink bridge.
-        */
-       ret = vfio_pci_register_dev_region(vdev,
-                       PCI_VENDOR_ID_IBM |
-                       VFIO_REGION_TYPE_PCI_VENDOR_TYPE,
-                       VFIO_REGION_SUBTYPE_IBM_NVLINK2_ATSD,
-                       &vfio_pci_npu2_regops,
-                       data->mmio_atsd ? PAGE_SIZE : 0,
-                       VFIO_REGION_INFO_FLAG_READ |
-                       VFIO_REGION_INFO_FLAG_WRITE |
-                       VFIO_REGION_INFO_FLAG_MMAP,
-                       data);
-       if (ret)
-               goto free_exit;
-
-       return 0;
-
-free_exit:
-       if (data->base)
-               memunmap(data->base);
-       kfree(data);
-
-       return ret;
-}
-- 
2.25.4

Reply via email to