Re: [PATCH 2/3] drm/i915/gvt: make gvt oblivious of kvmgt data structures

2020-01-09 Thread Jani Nikula
On Wed, 08 Jan 2020, Julian Stecklina  
wrote:
> On Wed, 2020-01-08 at 12:24 +0200, Jani Nikula wrote:
>> On Mon, 06 Jan 2020, Julian Stecklina 
>> 
>> wrote:
> [...]
>> > +  /* Hypervisor-specific device state. */
>> > +  void *vdev;
>> 
>> I have no clue about the relative merits of the patch, but you can use
>> the actual type for the pointer with a forward declaration. You don't
>> need the definition for that.
>> 
>> i.e.
>> 
>> struct kvmgt_vdev;
>> ...
>>  struct kvmgt_vdev *vdev;
>
> The goal here is to make the GVT code independent of the hypervisor backend.
> Different hypervisor backends need to keep different per-device state, so 
> using
> the KVM type here defeats the purpose.
>
> I assume this is not only useful for us, but also for other hypervisor 
> backends,
> such as Xen or 3rd-party hypervisors.

Right, carry on, sorry for the noise. ;)

BR,
Jani.

-- 
Jani Nikula, Intel Open Source Graphics Center
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH 2/3] drm/i915/gvt: make gvt oblivious of kvmgt data structures

2020-01-08 Thread Julian Stecklina
On Wed, 2020-01-08 at 12:24 +0200, Jani Nikula wrote:
> On Mon, 06 Jan 2020, Julian Stecklina 
> wrote:
[...]
> > +   /* Hypervisor-specific device state. */
> > +   void *vdev;
> 
> I have no clue about the relative merits of the patch, but you can use
> the actual type for the pointer with a forward declaration. You don't
> need the definition for that.
> 
> i.e.
> 
> struct kvmgt_vdev;
> ...
>   struct kvmgt_vdev *vdev;

The goal here is to make the GVT code independent of the hypervisor backend.
Different hypervisor backends need to keep different per-device state, so using
the KVM type here defeats the purpose.

I assume this is not only useful for us, but also for other hypervisor backends,
such as Xen or 3rd-party hypervisors.

Julian


___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH 2/3] drm/i915/gvt: make gvt oblivious of kvmgt data structures

2020-01-08 Thread Jani Nikula
On Mon, 06 Jan 2020, Julian Stecklina  
wrote:
> Instead of defining KVMGT per-device state in struct intel_vgpu
> directly, add an indirection. This makes the GVT code oblivious of
> what state KVMGT needs to keep.
>
> The intention here is to eventually make it possible to build
> hypervisor backends for the mediator, without having to touch the
> mediator itself. This is a first step.
>
> Cc: Zhenyu Wang 
> Cc: zhiyuan...@intel.com
> Cc: hang.y...@intel.com
>
> Signed-off-by: Julian Stecklina 
> ---
>  drivers/gpu/drm/i915/gvt/gvt.h   |  32 +---
>  drivers/gpu/drm/i915/gvt/kvmgt.c | 287 +++
>  2 files changed, 184 insertions(+), 135 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
> index 0081b051d3e0..2604739e5680 100644
> --- a/drivers/gpu/drm/i915/gvt/gvt.h
> +++ b/drivers/gpu/drm/i915/gvt/gvt.h
> @@ -196,31 +196,8 @@ struct intel_vgpu {
>  
>   struct dentry *debugfs;
>  
> -#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
> - struct {
> - struct mdev_device *mdev;
> - struct vfio_region *region;
> - int num_regions;
> - struct eventfd_ctx *intx_trigger;
> - struct eventfd_ctx *msi_trigger;
> -
> - /*
> -  * Two caches are used to avoid mapping duplicated pages (eg.
> -  * scratch pages). This help to reduce dma setup overhead.
> -  */
> - struct rb_root gfn_cache;
> - struct rb_root dma_addr_cache;
> - unsigned long nr_cache_entries;
> - struct mutex cache_lock;
> -
> - struct notifier_block iommu_notifier;
> - struct notifier_block group_notifier;
> - struct kvm *kvm;
> - struct work_struct release_work;
> - atomic_t released;
> - struct vfio_device *vfio_device;
> - } vdev;
> -#endif
> + /* Hypervisor-specific device state. */
> + void *vdev;

I have no clue about the relative merits of the patch, but you can use
the actual type for the pointer with a forward declaration. You don't
need the definition for that.

i.e.

struct kvmgt_vdev;
...
struct kvmgt_vdev *vdev;


BR,
Jani.


>  
>   struct list_head dmabuf_obj_list_head;
>   struct mutex dmabuf_lock;
> @@ -231,6 +208,11 @@ struct intel_vgpu {
>   u32 scan_nonprivbb;
>  };
>  
> +static inline void *intel_vgpu_vdev(struct intel_vgpu *vgpu)
> +{
> + return vgpu->vdev;
> +}
> +
>  /* validating GM healthy status*/
>  #define vgpu_is_vm_unhealthy(ret_val) \
>   (((ret_val) == -EBADRQC) || ((ret_val) == -EFAULT))
> diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c 
> b/drivers/gpu/drm/i915/gvt/kvmgt.c
> index bd79a9718cc7..d725a4fb94b9 100644
> --- a/drivers/gpu/drm/i915/gvt/kvmgt.c
> +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
> @@ -108,6 +108,36 @@ struct gvt_dma {
>   struct kref ref;
>  };
>  
> +struct kvmgt_vdev {
> + struct intel_vgpu *vgpu;
> + struct mdev_device *mdev;
> + struct vfio_region *region;
> + int num_regions;
> + struct eventfd_ctx *intx_trigger;
> + struct eventfd_ctx *msi_trigger;
> +
> + /*
> +  * Two caches are used to avoid mapping duplicated pages (eg.
> +  * scratch pages). This help to reduce dma setup overhead.
> +  */
> + struct rb_root gfn_cache;
> + struct rb_root dma_addr_cache;
> + unsigned long nr_cache_entries;
> + struct mutex cache_lock;
> +
> + struct notifier_block iommu_notifier;
> + struct notifier_block group_notifier;
> + struct kvm *kvm;
> + struct work_struct release_work;
> + atomic_t released;
> + struct vfio_device *vfio_device;
> +};
> +
> +static inline struct kvmgt_vdev *kvmgt_vdev(struct intel_vgpu *vgpu)
> +{
> + return intel_vgpu_vdev(vgpu);
> +}
> +
>  static inline bool handle_valid(unsigned long handle)
>  {
>   return !!(handle & ~0xff);
> @@ -129,7 +159,7 @@ static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, 
> unsigned long gfn,
>   for (npage = 0; npage < total_pages; npage++) {
>   unsigned long cur_gfn = gfn + npage;
>  
> - ret = vfio_unpin_pages(mdev_dev(vgpu->vdev.mdev), &cur_gfn, 1);
> + ret = vfio_unpin_pages(mdev_dev(kvmgt_vdev(vgpu)->mdev), 
> &cur_gfn, 1);
>   WARN_ON(ret != 1);
>   }
>  }
> @@ -152,7 +182,7 @@ static int gvt_pin_guest_page(struct intel_vgpu *vgpu, 
> unsigned long gfn,
>   unsigned long cur_gfn = gfn + npage;
>   unsigned long pfn;
>  
> - ret = vfio_pin_pages(mdev_dev(vgpu->vdev.mdev), &cur_gfn, 1,
> + ret = vfio_pin_pages(mdev_dev(kvmgt_vdev(vgpu)->mdev), 
> &cur_gfn, 1,
>IOMMU_READ | IOMMU_WRITE, &pfn);
>   if (ret != 1) {
>   gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx, ret 
> %d\n",
> @@ -219,7 +249,7 @@ static void gvt_dma_unmap_page(struct intel_vgpu *v

[PATCH 2/3] drm/i915/gvt: make gvt oblivious of kvmgt data structures

2020-01-06 Thread Julian Stecklina
Instead of defining KVMGT per-device state in struct intel_vgpu
directly, add an indirection. This makes the GVT code oblivious of
what state KVMGT needs to keep.

The intention here is to eventually make it possible to build
hypervisor backends for the mediator, without having to touch the
mediator itself. This is a first step.

Cc: Zhenyu Wang 
Cc: zhiyuan...@intel.com
Cc: hang.y...@intel.com

Signed-off-by: Julian Stecklina 
---
 drivers/gpu/drm/i915/gvt/gvt.h   |  32 +---
 drivers/gpu/drm/i915/gvt/kvmgt.c | 287 +++
 2 files changed, 184 insertions(+), 135 deletions(-)

diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 0081b051d3e0..2604739e5680 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -196,31 +196,8 @@ struct intel_vgpu {
 
struct dentry *debugfs;
 
-#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
-   struct {
-   struct mdev_device *mdev;
-   struct vfio_region *region;
-   int num_regions;
-   struct eventfd_ctx *intx_trigger;
-   struct eventfd_ctx *msi_trigger;
-
-   /*
-* Two caches are used to avoid mapping duplicated pages (eg.
-* scratch pages). This help to reduce dma setup overhead.
-*/
-   struct rb_root gfn_cache;
-   struct rb_root dma_addr_cache;
-   unsigned long nr_cache_entries;
-   struct mutex cache_lock;
-
-   struct notifier_block iommu_notifier;
-   struct notifier_block group_notifier;
-   struct kvm *kvm;
-   struct work_struct release_work;
-   atomic_t released;
-   struct vfio_device *vfio_device;
-   } vdev;
-#endif
+   /* Hypervisor-specific device state. */
+   void *vdev;
 
struct list_head dmabuf_obj_list_head;
struct mutex dmabuf_lock;
@@ -231,6 +208,11 @@ struct intel_vgpu {
u32 scan_nonprivbb;
 };
 
+static inline void *intel_vgpu_vdev(struct intel_vgpu *vgpu)
+{
+   return vgpu->vdev;
+}
+
 /* validating GM healthy status*/
 #define vgpu_is_vm_unhealthy(ret_val) \
(((ret_val) == -EBADRQC) || ((ret_val) == -EFAULT))
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index bd79a9718cc7..d725a4fb94b9 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -108,6 +108,36 @@ struct gvt_dma {
struct kref ref;
 };
 
+struct kvmgt_vdev {
+   struct intel_vgpu *vgpu;
+   struct mdev_device *mdev;
+   struct vfio_region *region;
+   int num_regions;
+   struct eventfd_ctx *intx_trigger;
+   struct eventfd_ctx *msi_trigger;
+
+   /*
+* Two caches are used to avoid mapping duplicated pages (eg.
+* scratch pages). This help to reduce dma setup overhead.
+*/
+   struct rb_root gfn_cache;
+   struct rb_root dma_addr_cache;
+   unsigned long nr_cache_entries;
+   struct mutex cache_lock;
+
+   struct notifier_block iommu_notifier;
+   struct notifier_block group_notifier;
+   struct kvm *kvm;
+   struct work_struct release_work;
+   atomic_t released;
+   struct vfio_device *vfio_device;
+};
+
+static inline struct kvmgt_vdev *kvmgt_vdev(struct intel_vgpu *vgpu)
+{
+   return intel_vgpu_vdev(vgpu);
+}
+
 static inline bool handle_valid(unsigned long handle)
 {
return !!(handle & ~0xff);
@@ -129,7 +159,7 @@ static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, 
unsigned long gfn,
for (npage = 0; npage < total_pages; npage++) {
unsigned long cur_gfn = gfn + npage;
 
-   ret = vfio_unpin_pages(mdev_dev(vgpu->vdev.mdev), &cur_gfn, 1);
+   ret = vfio_unpin_pages(mdev_dev(kvmgt_vdev(vgpu)->mdev), 
&cur_gfn, 1);
WARN_ON(ret != 1);
}
 }
@@ -152,7 +182,7 @@ static int gvt_pin_guest_page(struct intel_vgpu *vgpu, 
unsigned long gfn,
unsigned long cur_gfn = gfn + npage;
unsigned long pfn;
 
-   ret = vfio_pin_pages(mdev_dev(vgpu->vdev.mdev), &cur_gfn, 1,
+   ret = vfio_pin_pages(mdev_dev(kvmgt_vdev(vgpu)->mdev), 
&cur_gfn, 1,
 IOMMU_READ | IOMMU_WRITE, &pfn);
if (ret != 1) {
gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx, ret 
%d\n",
@@ -219,7 +249,7 @@ static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, 
unsigned long gfn,
 static struct gvt_dma *__gvt_cache_find_dma_addr(struct intel_vgpu *vgpu,
dma_addr_t dma_addr)
 {
-   struct rb_node *node = vgpu->vdev.dma_addr_cache.rb_node;
+   struct rb_node *node = kvmgt_vdev(vgpu)->dma_addr_cache.rb_node;
struct gvt_dma *itr;
 
while (node) {
@@ -237,7 +267,7 @@ static struct gvt_dma *__gvt_cache_find_dma_addr(struct 
intel_vgpu *vgpu,