Re: [PATCH v18 25/26] drm/virtio: Support shmem shrinking

2023-11-03 Thread Gurchetan Singh
On Sun, Oct 29, 2023 at 4:03 PM Dmitry Osipenko <
dmitry.osipe...@collabora.com> wrote:

> Support generic drm-shmem memory shrinker and add new madvise IOCTL to
> the VirtIO-GPU driver. BO cache manager of Mesa driver will mark BOs as
> "don't need" using the new IOCTL to let shrinker purge the marked BOs on
> OOM, the shrinker will also evict unpurgeable shmem BOs from memory if
> guest supports SWAP file or partition.
>
> Acked-by: Gerd Hoffmann 
> Signed-off-by: Daniel Almeida 
> Signed-off-by: Dmitry Osipenko 
> ---
>  drivers/gpu/drm/virtio/virtgpu_drv.h| 13 +-
>  drivers/gpu/drm/virtio/virtgpu_gem.c| 35 ++
>  drivers/gpu/drm/virtio/virtgpu_ioctl.c  | 25 ++
>  drivers/gpu/drm/virtio/virtgpu_kms.c|  8 
>  drivers/gpu/drm/virtio/virtgpu_object.c | 61 +
>  drivers/gpu/drm/virtio/virtgpu_vq.c | 40 
>  include/uapi/drm/virtgpu_drm.h  | 14 ++
>  7 files changed, 195 insertions(+), 1 deletion(-)
>
> diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h
> b/drivers/gpu/drm/virtio/virtgpu_drv.h
> index 421f524ae1de..33a78b24c272 100644
> --- a/drivers/gpu/drm/virtio/virtgpu_drv.h
> +++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
> @@ -278,7 +278,7 @@ struct virtio_gpu_fpriv {
>  };
>
>  /* virtgpu_ioctl.c */
> -#define DRM_VIRTIO_NUM_IOCTLS 12
> +#define DRM_VIRTIO_NUM_IOCTLS 13
>  extern struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS];
>  void virtio_gpu_create_context(struct drm_device *dev, struct drm_file
> *file);
>
> @@ -316,6 +316,8 @@ void virtio_gpu_array_put_free_delayed(struct
> virtio_gpu_device *vgdev,
>  void virtio_gpu_array_put_free_work(struct work_struct *work);
>  int virtio_gpu_array_prepare(struct virtio_gpu_device *vgdev,
>  struct virtio_gpu_object_array *objs);
> +int virtio_gpu_gem_host_mem_release(struct virtio_gpu_object *bo);
> +int virtio_gpu_gem_madvise(struct virtio_gpu_object *obj, int madv);
>  int virtio_gpu_gem_pin(struct virtio_gpu_object *bo);
>  void virtio_gpu_gem_unpin(struct virtio_gpu_object *bo);
>
> @@ -329,6 +331,8 @@ void virtio_gpu_cmd_create_resource(struct
> virtio_gpu_device *vgdev,
> struct virtio_gpu_fence *fence);
>  void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
>struct virtio_gpu_object *bo);
> +int virtio_gpu_cmd_release_resource(struct virtio_gpu_device *vgdev,
> +   struct virtio_gpu_object *bo);
>  void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
> uint64_t offset,
> uint32_t width, uint32_t height,
> @@ -349,6 +353,9 @@ void virtio_gpu_object_attach(struct virtio_gpu_device
> *vgdev,
>   struct virtio_gpu_object *obj,
>   struct virtio_gpu_mem_entry *ents,
>   unsigned int nents);
> +void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
> + struct virtio_gpu_object *obj,
> + struct virtio_gpu_fence *fence);
>  void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
> struct virtio_gpu_output *output);
>  int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev);
> @@ -492,4 +499,8 @@ void virtio_gpu_vram_unmap_dma_buf(struct device *dev,
>  int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
> struct drm_file *file);
>
> +/* virtgpu_gem_shrinker.c */
> +int virtio_gpu_gem_shrinker_init(struct virtio_gpu_device *vgdev);
> +void virtio_gpu_gem_shrinker_fini(struct virtio_gpu_device *vgdev);
> +
>  #endif
> diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c
> b/drivers/gpu/drm/virtio/virtgpu_gem.c
> index 97e67064c97e..748f7bbb0e6d 100644
> --- a/drivers/gpu/drm/virtio/virtgpu_gem.c
> +++ b/drivers/gpu/drm/virtio/virtgpu_gem.c
> @@ -147,10 +147,20 @@ void virtio_gpu_gem_object_close(struct
> drm_gem_object *obj,
> struct virtio_gpu_device *vgdev = obj->dev->dev_private;
> struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
> struct virtio_gpu_object_array *objs;
> +   struct virtio_gpu_object *bo;
>
> if (!vgdev->has_virgl_3d)
> return;
>
> +   bo = gem_to_virtio_gpu_obj(obj);
> +
> +   /*
> +* Purged BO was already detached and released, the resource ID
> +* is invalid by now.
> +*/
> +   if (!virtio_gpu_gem_madvise(bo, VIRTGPU_MADV_WILLNEED))
> +   return;
> +
> objs = virtio_gpu_array_alloc(1);
> if (!objs)
> return;
> @@ -315,6 +325,31 @@ int virtio_gpu_array_prepare(struct virtio_gpu_device
> *vgdev,
> return ret;
>  }
>
> +int virtio_gpu_gem_madvise(struct virtio_gpu_object *bo, int madv)
> +{
> +   if 

Re: [PATCH v3] drm/virtio: add new virtio gpu capset definitions

2023-10-18 Thread Gurchetan Singh
On Tue, Oct 10, 2023 at 9:41 PM Huang Rui  wrote:

> On Tue, Oct 10, 2023 at 11:52:14PM +0800, Dmitry Osipenko wrote:
> > On 10/10/23 18:40, Dmitry Osipenko wrote:
> > > On 10/10/23 16:57, Huang Rui wrote:
> > >> These definitions are used fro qemu, and qemu imports this marco in
> the
> > >> headers to enable gfxstream, venus, cross domain, and drm (native
> > >> context) for virtio gpu. So it should add them even kernel doesn't use
> > >> this.
> > >>
> > >> Signed-off-by: Huang Rui 
> > >> Reviewed-by: Akihiko Odaki 
> > >> ---
> > >>
> > >> Changes V1 -> V2:
> > >> - Add all capsets including gfxstream and venus in kernel header
> (Dmitry Osipenko)
> > >>
> > >> Changes V2 -> V3:
> > >> - Add missed capsets including cross domain and drm (native context)
> > >>   (Dmitry Osipenko)
> > >>
> > >> v1:
> https://lore.kernel.org/lkml/20230915105918.3763061-1-ray.hu...@amd.com/
> > >> v2:
> https://lore.kernel.org/lkml/20231010032553.1138036-1-ray.hu...@amd.com/
> > >>
> > >>  include/uapi/linux/virtio_gpu.h | 4 
> > >>  1 file changed, 4 insertions(+)
> > >>
> > >> diff --git a/include/uapi/linux/virtio_gpu.h
> b/include/uapi/linux/virtio_gpu.h
> > >> index f556fde07b76..240911c8da31 100644
> > >> --- a/include/uapi/linux/virtio_gpu.h
> > >> +++ b/include/uapi/linux/virtio_gpu.h
> > >> @@ -309,6 +309,10 @@ struct virtio_gpu_cmd_submit {
> > >>
> > >>  #define VIRTIO_GPU_CAPSET_VIRGL 1
> > >>  #define VIRTIO_GPU_CAPSET_VIRGL2 2
> > >> +#define VIRTIO_GPU_CAPSET_GFXSTREAM 3
> > >
> > > The GFXSTREAM capset isn't correct, it should be GFXSTREAM_VULKAN in
> > > accordance to [1] and [2]. There are more capsets for GFXSTREAM.
> > >
> > > [1]
> > >
> https://github.com/google/crosvm/blob/main/rutabaga_gfx/src/rutabaga_utils.rs#L172
> > >
> > > [2]
> > >
> https://patchwork.kernel.org/project/qemu-devel/patch/20231006010835.444-7-gurchetansi...@chromium.org/
> >
> > Though, maybe those are "rutabaga" capsets that not related to
> > virtio-gpu because crosvm has another defs for virtio-gpu capsets [3].
> > The DRM capset is oddly missing in [3] and code uses "rutabaga" capset
> > for DRM and virtio-gpu.
> >
> > [3]
> >
> https://github.com/google/crosvm/blob/main/devices/src/virtio/gpu/protocol.rs#L416
>
> Yes, [3] is the file that I referred to add these capsets definitions. And
> it's defined as gfxstream not gfxstream_vulkan.
>
> >
> > Gurchetan, could you please clarify which capsets definitions are
> > related to virtio-gpu and gfxstream. The
> > GFXSTREAM_VULKAN/GLES/MAGMA/COMPOSER or just the single GFXSTREAM?


It should be GFXSTREAM_VULKAN.  The rest are more experimental and easy to
modify in terms of the enum value, should the need arise.

I imagine the virtio-spec update to reflect the GFXSTREAM to
GFXSTREAM_VULKAN change will happen eventually.


> >
>
> Gurchetan, may we have your insight?
>
> Thanks,
> Ray
>
> > --
> > Best regards,
> > Dmitry
> >
>
___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

Re: [PATCH v6 0/3] Add sync object UAPI support to VirtIO-GPU driver

2023-07-28 Thread Gurchetan Singh
On Wed, Jul 19, 2023 at 11:58 AM Dmitry Osipenko <
dmitry.osipe...@collabora.com> wrote:

> 27.06.2023 20:16, Rob Clark пишет:
> ...
> >> Now these are just suggestions, and while I think they are good, you
> can safely ignore them.
> >>
> >> But there's also the DRM requirements, which state "userspace side must
> be fully reviewed and tested to the standards of that user-space
> project.".  So I think to meet the minimum requirements, I think we should
> at-least have one of the following (not all, just one) reviewed:
> >>
> >> 1) venus using the new uapi
> >> 2) gfxstream vk using the new uapi
> >> 3) amdgpu nctx out of "draft" mode and using the new uapi.
> >> 4) virtio-intel using new uapi
> >> 5) turnip using your new uapi
> >
> > forgot to mention this earlier, but
> > https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/23533
> >
> > Dmitry, you can also add, if you haven't already:
> >
> > Tested-by: Rob Clark 
>
> Gurchetan, Turnip Mesa virtio support is ready to be merged upstream,
> it's using this new syncobj UAPI. Could you please give yours r-b if you
> don't have objections?
>

Given that Turnip native contexts are reviewed using this UAPI, your change
does now meet the requirements and is ready to merge.

One thing I noticed is you might need explicit padding between
`num_out_syncobjs` and `in_syncobjs`.  Otherwise, feel free to add my
acked-by.


>
> --
> Best regards,
> Dmitry
>
>
___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

Re: [PATCH v6 0/3] Add sync object UAPI support to VirtIO-GPU driver

2023-05-12 Thread Gurchetan Singh
On Thu, May 11, 2023 at 7:33 PM Dmitry Osipenko <
dmitry.osipe...@collabora.com> wrote:

> On 5/12/23 03:17, Gurchetan Singh wrote:
> ...
> > Can we get one of the Mesa MRs reviewed first?  There's currently no
> > virtio-intel MR AFAICT, and the amdgpu one is marked as "Draft:".
> >
> > Even for the amdgpu, Pierre suggests the feature "will be marked as
> > experimental both in Mesa and virglrenderer" and we can revise as needed.
> > The DRM requirements seem to warn against adding an UAPI too hastily...
> >
> > You can get the deqp-vk 1.2 tests to pass with the current UAPI, if you
> > just change your mesa <--> virglrenderer protocol a little.  Perhaps that
> > way is even better, since you plumb the in sync-obj into host-side
> command
> > submission.
> >
> > Without inter-context sharing of the fence, this MR really only adds
> guest
> > kernel syntactic sugar.
> >
> > Note I'm not against syntactic sugar, but I just want to point out that
> you
> > can likely merge the native context work without any UAPI changes, in
> case
> > it's not clear.
> >
> > If this was for the core drm syncobj implementation, and not just
> >> driver ioctl parsing and wiring up the core helpers, I would agree
> >> with you.
> >>
> >
> > There are several possible and viable paths to get the features in
> question
> > (VK1.2 syncobjs, and inter-context fence sharing).  There are paths
> > entirely without the syncobj, paths that only use the syncobj for the
> > inter-context fence sharing case and create host syncobjs for VK1.2,
> paths
> > that also use guest syncobjs in every proxied command submission.
> >
> > It's really hard to tell which one is better.  Here's my suggestion:
> >
> > 1) Get the native contexts reviewed/merged in Mesa/virglrenderer using
> the
> > current UAPI.  Options for VK1.2 include: pushing down the syncobjs to
> the
> > host, and simulating the syncobj (as already done).  It's fine to mark
> > these contexts as "experimental" like msm-experimental.  That will allow
> > you to experiment with the protocols, come up with tests, and hopefully
> > determine an answer to the host versus guest syncobj question.
> >
> > 2) Once you've completed (1), try to add UAPI changes for features that
> are
> > missing or things that are suboptimal with the knowledge gained from
> doing
> > (2).
> >
> > WDYT?
>
> Having syncobj support available by DRM driver is a mandatory
> requirement for native contexts because userspace (Mesa) relies on sync
> objects support presence. In particular, Intel Mesa driver checks
> whether DRM driver supports sync objects to decide which features are
> available, ANV depends on the syncobj support.


> I'm not familiar with a history of Venus and its limitations. Perhaps
> the reason it's using host-side syncobjs is to have 1:1 Vulkan API
> mapping between guest and host. Not sure if Venus could use guest
> syncobjs instead or there are problems with that.
>

Why not submit a Venus MR?  It's already in-tree, and you can see how your
API works in scenarios with a host side timeline semaphore (aka syncobj).
I think they are also interested in fencing/sync improvements.


>
> When syncobj was initially added to kernel, it was done from the needs
> of supporting Vulkan wait API. For Venus the actual Vulkan driver is on
> host side, while for native contexts it's on guest side. Native contexts
> don't need syncobj on host side, it will be unnecessary overhead for
> every nctx to have it on host. Hence, if there is no good reason for
> host-side syncobjs, then why do that?


Depends on your threading model.  You can have the following scenarios:

1) N guest contexts : 1 host thread
2) N guest contexts : N host threads for each context
3) 1:1 thread

I think the native context is single-threaded (1), IIRC?  If the goal is to
push command submission to the host (for inter-context sharing), I think
you'll at-least want (2).  For a 1:1 model (a la gfxstream), one host
thread can put another thread's out_sync_objs as it's in_sync_objs (in the
same virtgpu context).  I think that's kind of the goal of timeline
semaphores, with the example given by Khronos as with a compute thread + a
graphics thread.

I'm not saying one threading model is better than any other, perhaps the
native context using the host driver in the guest is so good, it doesn't
matter.  I'm just saying these are the types of discussions we can have if
we tried to get one the Mesa MRs merged first ;-)


> Native contexts pass deqp synchronization tests, they use sync objects
> universally for both GL and VK. Games work, 

Re: [PATCH v6 0/3] Add sync object UAPI support to VirtIO-GPU driver

2023-05-11 Thread Gurchetan Singh
On Mon, May 8, 2023 at 6:59 AM Rob Clark  wrote:

> On Wed, May 3, 2023 at 10:07 AM Gurchetan Singh
>  wrote:
> >
> >
> >
> > On Mon, May 1, 2023 at 8:38 AM Dmitry Osipenko <
> dmitry.osipe...@collabora.com> wrote:
> >>
> >> On 4/16/23 14:52, Dmitry Osipenko wrote:
> >> > We have multiple Vulkan context types that are awaiting for the
> addition
> >> > of the sync object DRM UAPI support to the VirtIO-GPU kernel driver:
> >> >
> >> >  1. Venus context
> >> >  2. Native contexts (virtio-freedreno, virtio-intel, virtio-amdgpu)
> >> >
> >> > Mesa core supports DRM sync object UAPI, providing Vulkan drivers
> with a
> >> > generic fencing implementation that we want to utilize.
> >> >
> >> > This patch adds initial sync objects support. It creates fundament
> for a
> >> > further fencing improvements. Later on we will want to extend the
> VirtIO-GPU
> >> > fencing API with passing fence IDs to host for waiting, it will be a
> new
> >> > additional VirtIO-GPU IOCTL and more. Today we have several
> VirtIO-GPU context
> >> > drivers in works that require VirtIO-GPU to support sync objects UAPI.
> >> >
> >> > The patch is heavily inspired by the sync object UAPI implementation
> of the
> >> > MSM driver.
> >>
> >> Gerd, do you have any objections to merging this series?
> >>
> >> We have AMDGPU [1] and Intel [2] native context WIP drivers depending on
> >> the sync object support. It is the only part missing from kernel today
> >> that is wanted by the native context drivers. Otherwise, there are few
> >> other things in Qemu and virglrenderer left to sort out.
> >>
> >> [1] https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/21658
> >> [2]
> https://gitlab.freedesktop.org/digetx/mesa/-/commits/native-context-iris
> >
> >
> > I'm not saying this change isn't good, just it's probably possible to
> implement the native contexts (even up to even VK1.2) without it.  But this
> patch series may be the most ergonomic way to do it, given how Mesa is
> designed.  But you probably want one of Mesa MRs reviewed first before
> merging (I added a comment on the amdgpu change) and that is a requirement
> [a].
> >
> > [a] "The userspace side must be fully reviewed and tested to the
> standards of that user space project. For e.g. mesa this means piglit
> testcases and review on the mailing list. This is again to ensure that the
> new interface actually gets the job done." -- from the requirements
> >
>
> tbh, the syncobj support is all drm core, the only driver specifics is
> the ioctl parsing.  IMHO existing tests and the two existing consumers
> are sufficient.  (Also, considering that additional non-drm
> dependencies involved.)
>

Can we get one of the Mesa MRs reviewed first?  There's currently no
virtio-intel MR AFAICT, and the amdgpu one is marked as "Draft:".

Even for the amdgpu, Pierre suggests the feature "will be marked as
experimental both in Mesa and virglrenderer" and we can revise as needed.
The DRM requirements seem to warn against adding an UAPI too hastily...

You can get the deqp-vk 1.2 tests to pass with the current UAPI, if you
just change your mesa <--> virglrenderer protocol a little.  Perhaps that
way is even better, since you plumb the in sync-obj into host-side command
submission.

Without inter-context sharing of the fence, this MR really only adds guest
kernel syntactic sugar.

Note I'm not against syntactic sugar, but I just want to point out that you
can likely merge the native context work without any UAPI changes, in case
it's not clear.

If this was for the core drm syncobj implementation, and not just
> driver ioctl parsing and wiring up the core helpers, I would agree
> with you.
>

There are several possible and viable paths to get the features in question
(VK1.2 syncobjs, and inter-context fence sharing).  There are paths
entirely without the syncobj, paths that only use the syncobj for the
inter-context fence sharing case and create host syncobjs for VK1.2, paths
that also use guest syncobjs in every proxied command submission.

It's really hard to tell which one is better.  Here's my suggestion:

1) Get the native contexts reviewed/merged in Mesa/virglrenderer using the
current UAPI.  Options for VK1.2 include: pushing down the syncobjs to the
host, and simulating the syncobj (as already done).  It's fine to mark
these contexts as "experimental" like msm-experimental.  That will allow
you to experiment with the protocols, come up with tests, and hopefully
determine an answer to the host versus guest syncobj question.

2) Once you've completed (1), try to add UAPI changes for features that are
missing or things that are suboptimal with the knowledge gained from doing
(2).

WDYT?


>
> BR,
> -R
>
___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

Re: [PATCH v6 0/3] Add sync object UAPI support to VirtIO-GPU driver

2023-05-03 Thread Gurchetan Singh
On Mon, May 1, 2023 at 8:38 AM Dmitry Osipenko <
dmitry.osipe...@collabora.com> wrote:

> On 4/16/23 14:52, Dmitry Osipenko wrote:
> > We have multiple Vulkan context types that are awaiting for the addition
> > of the sync object DRM UAPI support to the VirtIO-GPU kernel driver:
> >
> >  1. Venus context
> >  2. Native contexts (virtio-freedreno, virtio-intel, virtio-amdgpu)
> >
> > Mesa core supports DRM sync object UAPI, providing Vulkan drivers with a
> > generic fencing implementation that we want to utilize.
> >
> > This patch adds initial sync objects support. It creates fundament for a
> > further fencing improvements. Later on we will want to extend the
> VirtIO-GPU
> > fencing API with passing fence IDs to host for waiting, it will be a new
> > additional VirtIO-GPU IOCTL and more. Today we have several VirtIO-GPU
> context
> > drivers in works that require VirtIO-GPU to support sync objects UAPI.
> >
> > The patch is heavily inspired by the sync object UAPI implementation of
> the
> > MSM driver.
>
> Gerd, do you have any objections to merging this series?
>
> We have AMDGPU [1] and Intel [2] native context WIP drivers depending on
> the sync object support. It is the only part missing from kernel today
> that is wanted by the native context drivers. Otherwise, there are few
> other things in Qemu and virglrenderer left to sort out.
>
> [1] https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/21658
> [2]
> https://gitlab.freedesktop.org/digetx/mesa/-/commits/native-context-iris


I'm not saying this change isn't good, just it's probably possible to
implement the native contexts (even up to even VK1.2) without it.  But this
patch series may be the most ergonomic way to do it, given how Mesa is
designed.  But you probably want one of Mesa MRs reviewed first before
merging (I added a comment on the amdgpu change) and that is a requirement
[a].

[a] "The userspace side must be fully reviewed and tested to the standards
of that user space project. For e.g. mesa this means piglit testcases and
review on the mailing list. This is again to ensure that the new interface
actually gets the job done." -- from the requirements


>
>
> --
> Best regards,
> Dmitry
>
>
___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

Re: [PATCH v6 0/3] Add sync object UAPI support to VirtIO-GPU driver

2023-04-24 Thread Gurchetan Singh
On Wed, Apr 19, 2023 at 2:22 PM Dmitry Osipenko
 wrote:
>
> Hello Gurchetan,
>
> On 4/18/23 02:17, Gurchetan Singh wrote:
> > On Sun, Apr 16, 2023 at 4:53 AM Dmitry Osipenko <
> > dmitry.osipe...@collabora.com> wrote:
> >
> >> We have multiple Vulkan context types that are awaiting for the addition
> >> of the sync object DRM UAPI support to the VirtIO-GPU kernel driver:
> >>
> >>  1. Venus context
> >>  2. Native contexts (virtio-freedreno, virtio-intel, virtio-amdgpu)
> >>
> >> Mesa core supports DRM sync object UAPI, providing Vulkan drivers with a
> >> generic fencing implementation that we want to utilize.
> >>
> >> This patch adds initial sync objects support. It creates fundament for a
> >> further fencing improvements. Later on we will want to extend the
> >> VirtIO-GPU
> >> fencing API with passing fence IDs to host for waiting, it will be a new
> >> additional VirtIO-GPU IOCTL and more. Today we have several VirtIO-GPU
> >> context
> >> drivers in works that require VirtIO-GPU to support sync objects UAPI.
> >>
> >> The patch is heavily inspired by the sync object UAPI implementation of the
> >> MSM driver.
> >>
> >
> > The changes seem good, but I would recommend getting a full end-to-end
> > solution (i.e, you've proxied the host fence with these changes and shared
> > with the host compositor) working first.  You'll never know what you'll
> > find after completing this exercise.  Or is that the plan already?
> >
> > Typically, you want to land the uAPI and virtio spec changes last.
> > Mesa/gfxstream/virglrenderer/crosvm all have the ability to test out
> > unstable uAPIs ...
>
> The proxied host fence isn't directly related to sync objects, though I
> prepared code such that it could be extended with a proxied fence later
> on, based on a prototype that was made some time ago.

Proxying the host fence is the novel bit.  If you have code that does
this, you should rebase/send that out (even as an RFC) so it's easier
to see how the pieces fit.

Right now, if you've only tested synchronization objects between the
same virtio-gpu context that skips the guest side wait, I think you
can already do that with the current uAPI (since ideally you'd wait on
the host side and can encode the sync resource in the command stream).

Also, try to come with a simple test (so we can meet requirements here
[a]) that showcases the new feature/capability.  An example would be
the virtio-intel native context sharing a fence with KMS or even
Wayland.

[a] 
https://dri.freedesktop.org/docs/drm/gpu/drm-uapi.html#open-source-userspace-requirements

>
> The proxied host fence shouldn't require UAPI changes, but only
> virtio-gpu proto extension. Normally, all in-fences belong to a job's
> context, and thus, waits are skipped by the guest kernel. Hence, fence
> proxying is a separate feature from sync objects, it can be added
> without sync objects.
>
> Sync objects primarily wanted by native context drivers because Mesa
> relies on the sync object UAPI presence. It's one of direct blockers for
> Intel and AMDGPU drivers, both of which has been using this sync object
> UAPI for a few months and now wanting it to land upstream.
>
> --
> Best regards,
> Dmitry
>
___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

Re: [PATCH] drm/virtio: Fix capset-id query size

2022-02-15 Thread Gurchetan Singh
On Tue, Feb 15, 2022 at 5:15 PM Rob Clark  wrote:

> From: Rob Clark 
>
> The UABI was already defined for pointer to 64b value, and all the
> userspace users of this ioctl that I could find are already using a
> uint64_t (but zeroing it out to work around kernel only copying 32b).
> Unfortunately this ioctl doesn't have a length field, so out of paranoia
> I restricted the change to copy 64b to the single 64b param that can be
> queried.
>
> Fixes: 78aa20fa4381 ("drm/virtio: implement context init: advertise
> feature to userspace")
> Signed-off-by: Rob Clark 
>

Reviewed-by: Gurchetan Singh 


> ---
>  drivers/gpu/drm/virtio/virtgpu_ioctl.c | 16 
>  1 file changed, 12 insertions(+), 4 deletions(-)
>
> diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
> b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
> index 0f2f3f54dbf9..0158d27d5645 100644
> --- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
> +++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
> @@ -269,7 +269,8 @@ static int virtio_gpu_getparam_ioctl(struct drm_device
> *dev, void *data,
>  {
> struct virtio_gpu_device *vgdev = dev->dev_private;
> struct drm_virtgpu_getparam *param = data;
> -   int value;
> +   int value, ret, sz = sizeof(int);
> +   uint64_t value64;
>
> switch (param->param) {
> case VIRTGPU_PARAM_3D_FEATURES:
> @@ -291,13 +292,20 @@ static int virtio_gpu_getparam_ioctl(struct
> drm_device *dev, void *data,
> value = vgdev->has_context_init ? 1 : 0;
> break;
> case VIRTGPU_PARAM_SUPPORTED_CAPSET_IDs:
> -   value = vgdev->capset_id_mask;
> +   value64 = vgdev->capset_id_mask;
> +   sz = sizeof(value64);
> break;
> default:
> return -EINVAL;
> }
> -   if (copy_to_user(u64_to_user_ptr(param->value), ,
> sizeof(int)))
> -   return -EFAULT;
> +
> +   if (sz == sizeof(int)) {
> +   if (copy_to_user(u64_to_user_ptr(param->value), ,
> sz))
> +   return -EFAULT;
> +   } else {
> +   if (copy_to_user(u64_to_user_ptr(param->value), ,
> sz))
> +   return -EFAULT;
> +   }
>
> return 0;
>  }
> --
> 2.34.1
>
>
___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

Re: [PATCH] drm/virtio: Fix NULL vs IS_ERR checking in virtio_gpu_object_shmem_init

2022-01-07 Thread Gurchetan Singh
On Tue, Dec 21, 2021 at 11:26 PM Miaoqian Lin  wrote:

> Since drm_prime_pages_to_sg() function return error pointers.
> The drm_gem_shmem_get_sg_table() function returns error pointers too.
> Using IS_ERR() to check the return value to fix this.
>
> Fixes: f651c8b05542("drm/virtio: factor out the sg_table from
> virtio_gpu_object")
> Signed-off-by: Miaoqian Lin 
>

Reviewed-by: Gurchetan Singh 


> ---
>  drivers/gpu/drm/virtio/virtgpu_object.c | 4 ++--
>  1 file changed, 2 insertions(+), 2 deletions(-)
>
> diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c
> b/drivers/gpu/drm/virtio/virtgpu_object.c
> index f648b0e24447..8bb80289672c 100644
> --- a/drivers/gpu/drm/virtio/virtgpu_object.c
> +++ b/drivers/gpu/drm/virtio/virtgpu_object.c
> @@ -168,9 +168,9 @@ static int virtio_gpu_object_shmem_init(struct
> virtio_gpu_device *vgdev,
>  * since virtio_gpu doesn't support dma-buf import from other
> devices.
>  */
> shmem->pages = drm_gem_shmem_get_sg_table(>base.base);
> -   if (!shmem->pages) {
> +   if (IS_ERR(shmem->pages)) {
> drm_gem_shmem_unpin(>base.base);
> -   return -EINVAL;
> +   return PTR_ERR(shmem->pages);
> }
>
> if (use_dma_api) {
> --
> 2.17.1
>
>
___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

Re: [virtio-dev] [PATCH v2 4/4] drm/virtio: Support virtgpu exported resources

2020-03-02 Thread Gurchetan Singh
On Mon, Mar 2, 2020 at 4:15 AM David Stevens  wrote:
>
> Add support for UUID-based resource sharing mechanism to virtgpu. This
> implements the new virtgpu commands and hooks them up to dma-buf's
> get_uuid callback.
>
> Signed-off-by: David Stevens 
> ---
>  drivers/gpu/drm/virtio/virtgpu_drv.c   |  3 ++
>  drivers/gpu/drm/virtio/virtgpu_drv.h   | 19 
>  drivers/gpu/drm/virtio/virtgpu_kms.c   |  4 ++
>  drivers/gpu/drm/virtio/virtgpu_prime.c | 48 ++--
>  drivers/gpu/drm/virtio/virtgpu_vq.c| 62 ++
>  5 files changed, 133 insertions(+), 3 deletions(-)
>
> diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c 
> b/drivers/gpu/drm/virtio/virtgpu_drv.c
> index ab4bed78e656..776e6667042e 100644
> --- a/drivers/gpu/drm/virtio/virtgpu_drv.c
> +++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
> @@ -165,6 +165,7 @@ static unsigned int features[] = {
> VIRTIO_GPU_F_VIRGL,
>  #endif
> VIRTIO_GPU_F_EDID,
> +   VIRTIO_GPU_F_RESOURCE_UUID,
>  };
>  static struct virtio_driver virtio_gpu_driver = {
> .feature_table = features,
> @@ -202,7 +203,9 @@ static struct drm_driver driver = {
> .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
> .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
> .gem_prime_mmap = drm_gem_prime_mmap,
> +   .gem_prime_export = virtgpu_gem_prime_export,
> .gem_prime_import_sg_table = virtgpu_gem_prime_import_sg_table,
> +   .gem_prime_get_uuid = virtgpu_gem_prime_get_uuid,
>
> .gem_create_object = virtio_gpu_create_object,
> .fops = _gpu_driver_fops,
> diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h 
> b/drivers/gpu/drm/virtio/virtgpu_drv.h
> index af9403e1cf78..4be84de73d86 100644
> --- a/drivers/gpu/drm/virtio/virtgpu_drv.h
> +++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
> @@ -49,6 +49,11 @@
>  #define DRIVER_MINOR 1
>  #define DRIVER_PATCHLEVEL 0
>
> +#define UUID_NOT_INITIALIZED 0
> +#define UUID_INITIALIZING 1
> +#define UUID_INITIALIZED 2
> +#define UUID_INITIALIZATION_FAILED 3
> +
>  struct virtio_gpu_object_params {
> uint32_t format;
> uint32_t width;
> @@ -75,6 +80,9 @@ struct virtio_gpu_object {
>
> bool dumb;
> bool created;
> +
> +   int uuid_state;
> +   uuid_t uuid;
>  };
>  #define gem_to_virtio_gpu_obj(gobj) \
> container_of((gobj), struct virtio_gpu_object, base.base)
> @@ -196,6 +204,7 @@ struct virtio_gpu_device {
> bool has_virgl_3d;
> bool has_edid;
> bool has_indirect;
> +   bool has_resource_assign_uuid;
>
> struct work_struct config_changed_work;
>
> @@ -206,6 +215,8 @@ struct virtio_gpu_device {
> struct virtio_gpu_drv_capset *capsets;
> uint32_t num_capsets;
> struct list_head cap_cache;
> +
> +   spinlock_t resource_export_lock;
>  };
>
>  struct virtio_gpu_fpriv {
> @@ -338,6 +349,10 @@ void virtio_gpu_dequeue_fence_func(struct work_struct 
> *work);
>  void virtio_gpu_disable_notify(struct virtio_gpu_device *vgdev);
>  void virtio_gpu_enable_notify(struct virtio_gpu_device *vgdev);
>
> +int
> +virtio_gpu_cmd_resource_assign_uuid(struct virtio_gpu_device *vgdev,
> +   struct virtio_gpu_object *bo);
> +
>  /* virtio_gpu_display.c */
>  void virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev);
>  void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev);
> @@ -366,6 +381,10 @@ int virtio_gpu_object_create(struct virtio_gpu_device 
> *vgdev,
>  struct virtio_gpu_object **bo_ptr,
>  struct virtio_gpu_fence *fence);
>  /* virtgpu_prime.c */
> +struct dma_buf *virtgpu_gem_prime_export(struct drm_gem_object *obj,
> +int flags);
> +int virtgpu_gem_prime_get_uuid(struct drm_gem_object *obj,
> +  uuid_t *uuid);
>  struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
> struct drm_device *dev, struct dma_buf_attachment *attach,
> struct sg_table *sgt);
> diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c 
> b/drivers/gpu/drm/virtio/virtgpu_kms.c
> index 4009c2f97d08..5a2aeb6d2f35 100644
> --- a/drivers/gpu/drm/virtio/virtgpu_kms.c
> +++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
> @@ -134,6 +134,7 @@ int virtio_gpu_init(struct drm_device *dev)
> vgdev->dev = dev->dev;
>
> spin_lock_init(>display_info_lock);
> +   spin_lock_init(>resource_export_lock);
> ida_init(>ctx_id_ida);
> ida_init(>resource_ida);
> init_waitqueue_head(>resp_wq);
> @@ -162,6 +163,9 @@ int virtio_gpu_init(struct drm_device *dev)
> if (virtio_has_feature(vgdev->vdev, VIRTIO_RING_F_INDIRECT_DESC)) {
> vgdev->has_indirect = true;
> }
> +   if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_RESOURCE_UUID)) {
> +   vgdev->has_resource_assign_uuid = true;
> +   }
>
> DRM_INFO("features: %cvirgl %cedid\n",

Re: [PATCH] drm/virtio: rework batching

2020-02-11 Thread Gurchetan Singh
event_timeout(vgdev->resp_wq,
> diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c 
> b/drivers/gpu/drm/virtio/virtgpu_kms.c
> index c1086df49816..44e4c07d0162 100644
> --- a/drivers/gpu/drm/virtio/virtgpu_kms.c
> +++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
> @@ -44,6 +44,7 @@ static void virtio_gpu_config_changed_work_func(struct 
> work_struct *work)
> if (vgdev->has_edid)
> virtio_gpu_cmd_get_edids(vgdev);
> virtio_gpu_cmd_get_display_info(vgdev);
> +   virtio_gpu_notify(vgdev);
> drm_helper_hpd_irq_event(vgdev->ddev);
> events_clear |= VIRTIO_GPU_EVENT_DISPLAY;
> }
> @@ -92,6 +93,7 @@ static void virtio_gpu_get_capsets(struct virtio_gpu_device 
> *vgdev,
> }
> for (i = 0; i < num_capsets; i++) {
> virtio_gpu_cmd_get_capset_info(vgdev, i);
> +   virtio_gpu_notify(vgdev);
> ret = wait_event_timeout(vgdev->resp_wq,
>  vgdev->capsets[i].id > 0, 5 * HZ);
> if (ret == 0) {
> @@ -206,6 +208,7 @@ int virtio_gpu_init(struct drm_device *dev)
> if (vgdev->has_edid)
> virtio_gpu_cmd_get_edids(vgdev);
> virtio_gpu_cmd_get_display_info(vgdev);
> +   virtio_gpu_notify(vgdev);
> wait_event_timeout(vgdev->resp_wq, !vgdev->display_info_pending,
>5 * HZ);
> return 0;
> diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c 
> b/drivers/gpu/drm/virtio/virtgpu_object.c
> index 8870ee23ff2b..65d6834d3c74 100644
> --- a/drivers/gpu/drm/virtio/virtgpu_object.c
> +++ b/drivers/gpu/drm/virtio/virtgpu_object.c
> @@ -224,6 +224,7 @@ int virtio_gpu_object_create(struct virtio_gpu_device 
> *vgdev,
> return ret;
> }
>
> +   virtio_gpu_notify(vgdev);
> *bo_ptr = bo;
> return 0;
>
> diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c 
> b/drivers/gpu/drm/virtio/virtgpu_plane.c
> index ac42c84d2d7f..fd6487fb0855 100644
> --- a/drivers/gpu/drm/virtio/virtgpu_plane.c
> +++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
> @@ -154,8 +154,6 @@ static void virtio_gpu_primary_plane_update(struct 
> drm_plane *plane,
> if (!drm_atomic_helper_damage_merged(old_state, plane->state, ))
> return;
>
> -   virtio_gpu_disable_notify(vgdev);
> -
> bo = gem_to_virtio_gpu_obj(plane->state->fb->obj[0]);
> if (bo->dumb)
> virtio_gpu_update_dumb_bo(vgdev, plane->state, );
> @@ -187,7 +185,7 @@ static void virtio_gpu_primary_plane_update(struct 
> drm_plane *plane,
>   rect.x2 - rect.x1,
>   rect.y2 - rect.y1);
>
> -   virtio_gpu_enable_notify(vgdev);
> +   virtio_gpu_notify(vgdev);
>  }
>
>  static int virtio_gpu_cursor_prepare_fb(struct drm_plane *plane,
> @@ -265,6 +263,7 @@ static void virtio_gpu_cursor_plane_update(struct 
> drm_plane *plane,
>  plane->state->crtc_w,
>  plane->state->crtc_h,
>  0, 0, objs, vgfb->fence);
> +   virtio_gpu_notify(vgdev);
> dma_fence_wait(>fence->f, true);
> dma_fence_put(>fence->f);
> vgfb->fence = NULL;
> diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c 
> b/drivers/gpu/drm/virtio/virtgpu_vq.c
> index a682c2fcbe9a..ccc89b7578a0 100644
> --- a/drivers/gpu/drm/virtio/virtgpu_vq.c
> +++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
> @@ -329,7 +329,6 @@ static void virtio_gpu_queue_ctrl_sgs(struct 
> virtio_gpu_device *vgdev,
>   int incnt)
>  {
> struct virtqueue *vq = vgdev->ctrlq.vq;
> -   bool notify = false;
> int ret;
>
> if (vgdev->has_indirect)
> @@ -369,16 +368,9 @@ static void virtio_gpu_queue_ctrl_sgs(struct 
> virtio_gpu_device *vgdev,
>
> trace_virtio_gpu_cmd_queue(vq, virtio_gpu_vbuf_ctrl_hdr(vbuf));
>
> -   notify = virtqueue_kick_prepare(vq);
> +   atomic_inc(>pending_commands);
>
> spin_unlock(>ctrlq.qlock);
> -
> -   if (notify) {
> -   if (vgdev->disable_notify)
> -   vgdev->pending_notify = true;
> -   else
> -       virtqueue_notify(vq);
> -   }
>  }
>
>  static void virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device 
> *vgdev,
> @@ -434,19 +426,20 @@ static void virtio_gpu_queue_fenced_ctrl_buffer(struct 
> virtio_gpu_d

Re: [PATCH] drm/virtio: fix mmap page attributes

2019-12-10 Thread Gurchetan Singh
On Tue, Dec 10, 2019 at 12:58 AM Gerd Hoffmann  wrote:
>
> virtio-gpu uses cached mappings.  shmem helpers use writecombine though.
> So roll our own mmap function, wrapping drm_gem_shmem_mmap(), to tweak
> vm_page_prot accordingly.
>
> Reported-by: Gurchetan Singh 
> Signed-off-by: Gerd Hoffmann 
> ---
>  drivers/gpu/drm/virtio/virtgpu_object.c | 18 +-
>  1 file changed, 17 insertions(+), 1 deletion(-)
>
> diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c 
> b/drivers/gpu/drm/virtio/virtgpu_object.c
> index 017a9e0fc3bb..158610902054 100644
> --- a/drivers/gpu/drm/virtio/virtgpu_object.c
> +++ b/drivers/gpu/drm/virtio/virtgpu_object.c
> @@ -75,6 +75,22 @@ static void virtio_gpu_free_object(struct drm_gem_object 
> *obj)
> drm_gem_shmem_free_object(obj);
>  }
>
> +static int virtio_gpu_gem_mmap(struct drm_gem_object *obj, struct 
> vm_area_struct *vma)
> +{
> +   pgprot_t prot;
> +   int ret;
> +
> +   ret = drm_gem_shmem_mmap(obj, vma);
> +   if (ret < 0)
> +   return ret;
> +
> +   /* virtio-gpu needs normal caching, so clear writecombine */
> +   prot = vm_get_page_prot(vma->vm_flags);
> +   prot = pgprot_decrypted(prot);
> +   vma->vm_page_prot = prot;
> +   return 0;
> +}
> +
>  static const struct drm_gem_object_funcs virtio_gpu_gem_funcs = {
> .free = virtio_gpu_free_object,
> .open = virtio_gpu_gem_object_open,
> @@ -86,7 +102,7 @@ static const struct drm_gem_object_funcs 
> virtio_gpu_gem_funcs = {
> .get_sg_table = drm_gem_shmem_get_sg_table,
> .vmap = drm_gem_shmem_vmap,

Do we need vmap/vmunap?  It seems optionable and also uses non-cacheable memory?

> .vunmap = drm_gem_shmem_vunmap,
> -   .mmap = _gem_shmem_mmap,
> +   .mmap = _gpu_gem_mmap,

Why the _gpu_gem_mmap?  Shouldn't just virtio_gpu_gem_mmap work?



>  };
>
>  struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev,
> --
> 2.18.1
>
___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization


Re: [PATCH v6 16/18] drm/virtio: rework virtio_gpu_cmd_context_{attach, detach}_resource

2019-07-02 Thread Gurchetan Singh
On Tue, Jul 2, 2019 at 7:19 AM Gerd Hoffmann  wrote:

> Switch to the virtio_gpu_array_* helper workflow.
>
> Signed-off-by: Gerd Hoffmann 
> ---
>  drivers/gpu/drm/virtio/virtgpu_drv.h |  4 ++--
>  drivers/gpu/drm/virtio/virtgpu_gem.c | 24 +++-
>  drivers/gpu/drm/virtio/virtgpu_vq.c  | 10 ++
>  3 files changed, 19 insertions(+), 19 deletions(-)
>
> diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h
> b/drivers/gpu/drm/virtio/virtgpu_drv.h
> index b1f63a21abb6..d99c54abd090 100644
> --- a/drivers/gpu/drm/virtio/virtgpu_drv.h
> +++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
> @@ -292,10 +292,10 @@ void virtio_gpu_cmd_context_destroy(struct
> virtio_gpu_device *vgdev,
> uint32_t id);
>  void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device
> *vgdev,
> uint32_t ctx_id,
> -   uint32_t resource_id);
> +   struct virtio_gpu_object_array
> *objs);
>  void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device
> *vgdev,
> uint32_t ctx_id,
> -   uint32_t resource_id);
> +   struct virtio_gpu_object_array
> *objs);
>  void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
>void *data, uint32_t data_size,
>uint32_t ctx_id,
> diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c
> b/drivers/gpu/drm/virtio/virtgpu_gem.c
> index 6baf64141645..e75819dbba80 100644
> --- a/drivers/gpu/drm/virtio/virtgpu_gem.c
> +++ b/drivers/gpu/drm/virtio/virtgpu_gem.c
> @@ -111,19 +111,18 @@ int virtio_gpu_gem_object_open(struct drm_gem_object
> *obj,
>  {
> struct virtio_gpu_device *vgdev = obj->dev->dev_private;
> struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
> -   struct virtio_gpu_object *qobj = gem_to_virtio_gpu_obj(obj);
> -   int r;
> +   struct virtio_gpu_object_array *objs;
>
> if (!vgdev->has_virgl_3d)
> return 0;
>
> -   r = virtio_gpu_object_reserve(qobj);
> -   if (r)
> -   return r;
> +   objs = virtio_gpu_array_alloc(1);
> +   if (!objs)
> +   return -ENOMEM;
> +   virtio_gpu_array_add_obj(objs, obj);
>
> virtio_gpu_cmd_context_attach_resource(vgdev, vfpriv->ctx_id,
> -  qobj->hw_res_handle);
> -   virtio_gpu_object_unreserve(qobj);
> +  objs);
> return 0;
>  }
>
> @@ -132,19 +131,18 @@ void virtio_gpu_gem_object_close(struct
> drm_gem_object *obj,
>  {
> struct virtio_gpu_device *vgdev = obj->dev->dev_private;
> struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
> -   struct virtio_gpu_object *qobj = gem_to_virtio_gpu_obj(obj);
> -   int r;
> +   struct virtio_gpu_object_array *objs;
>
> if (!vgdev->has_virgl_3d)
> return;
>
> -   r = virtio_gpu_object_reserve(qobj);
> -   if (r)
> +   objs = virtio_gpu_array_alloc(1);
> +   if (!objs)
> return;
> +   virtio_gpu_array_add_obj(objs, obj);
>

This seems to call drm_gem_object_get.  Without adding the objects to the
vbuf, aren't we missing the corresponding drm_gem_object_put_unlocked?

Some miscellaneous comments:
1) Maybe virtio_gpu_array can have it's own header and file?  virtgpu_drv.h
is getting rather big..
2) What data are you trying to protect with the additional references?  Is
it host side resources (i.e, the host GL texture or buffer object) or is it
guest side resources (fences)?  Guest side resources seem properly counted
to me.  GL is supposed to reference count pending resources as well, but
that's not always the case in practice.  A little blurb somewhere like
"hold on to 3D GEM buffers until the host response as a safety measure" or
"we could potential cause a kernel oops if [...]" would be useful.

The guest side looks sufficiently referenced to me, while the GL spec
indicates


>
> virtio_gpu_cmd_context_detach_resource(vgdev, vfpriv->ctx_id,
> -   qobj->hw_res_handle);
> -   virtio_gpu_object_unreserve(qobj);
> +  objs);
>  }
>
>  struct virtio_gpu_object_array *virtio_gpu_array_alloc(u32 nents)
> diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c
> b/drivers/gpu/drm/virtio/virtgpu_vq.c
> index 1c0097de419a..799d1339ee0f 100644
> --- a/drivers/gpu/drm/virtio/virtgpu_vq.c
> +++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
> @@ -835,8 +835,9 @@ void virtio_gpu_cmd_context_destroy(struct
> virtio_gpu_device *vgdev,
>
>  void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device
> *vgdev,
> uint32_t ctx_id,
> -   

Re: [PATCH 3/3] virtio-gpu api: VIRTIO_GPU_F_RESSOURCE_V2

2019-04-22 Thread Gurchetan Singh
On Wed, Apr 17, 2019 at 2:57 AM Gerd Hoffmann  wrote:
>
> On Fri, Apr 12, 2019 at 04:34:20PM -0700, Chia-I Wu wrote:
> > Hi,
> >
> > I am still new to virgl, and missed the last round of discussion about
> > resource_create_v2.
> >
> > From the discussion below, semantically resource_create_v2 creates a host
> > resource object _without_ any storage; memory_create creates a host memory
> > object which provides the storage.  Is that correct?
>
> Right now all resource_create_* variants create a resource object with
> host storage.  memory_create creates guest storage, and
> resource_attach_memory binds things together.  Then you have to transfer
> the data.
>
> Hmm, maybe we need a flag indicating that host storage is not needed,
> for resources where we want establish some kind of shared mapping later
> on.
>
> > Do we expect these new commands to be supported by OpenGL, which does not
> > separate resources and memories?
>
> Well, for opengl you need a 1:1 relationship between memory region and
> resource.
>
> > > Yes, even though it is not clear yet how we are going to handle
> > > host-allocated buffers in the vhost-user case ...
> >
> > This might be another dumb question, but is this only an issue for
> > vhost-user(-gpu) case?  What mechanisms are used to map host dma-buf into
> > the guest address space?
>
> qemu can change the address space, that includes mmap()ing stuff there.
> An external vhost-user process can't do this, it can only read the
> address space layout, and read/write from/to guest memory.
>
> > But one needs to create the resource first to know which memory types can
> > be attached to it.  I think the metadata needs to be returned with
> > resource_create_v2.
>
> There is a resource_info reply for that.

The memory type should probably be in resource_create_v2, not in the
reply.  The metadata will be different based on the memory heap it's
allocated from.

Also, not all heaps need to be exposed to the guest kernel.  For
example, device local memory in Vulkan could be un-mappable.  In fact,
for resources that are not host visible we might be better off
sidestepping the kernel altogether and tracking allocation in guest
userspace.

Here is an example of memory types the guest kernel may be interested
in (based on i965):

Type 0 --> DEVICE_LOCAL_BIT | HOST_VISIBLE_BIT | HOST_COHERENT_BIT |
HOST_CACHED_BIT | RENDERER_ALLOCATED (Vulkan)
Type 1 --> HOST_VISIBLE_BIT | HOST_COHERENT_BIT | EXTERNAL_ALLOCATED
(gbm write combine)
Type 2 --> HOST_VISIBLE_BIT | HOST_COHERENT_BIT | GUEST_ALLOCATED
(guest allocated memory, which I assume is also write combine)
Type 3 --> HOST_VISIBLE_BIT | HOST_CACHED | EXTERNAL_ALLOCATED (gbm
cached memory)



>
> > That should be good enough.  But by returning alignments, we can minimize
> > the gaps when attaching multiple resources, especially when the resources
> > are only used by GPU.
>
> We can add alignments to the resource_info reply.
>
> cheers,
>   Gerd
>
___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization