Module: Mesa
Branch: main
Commit: 06e57e32310f51e752d7798e287400e8edc10312
URL:    
http://cgit.freedesktop.org/mesa/mesa/commit/?id=06e57e32310f51e752d7798e287400e8edc10312

Author: Rob Clark <robdcl...@chromium.org>
Date:   Thu Jul 13 15:47:05 2023 -0700

virtio: Add vdrm native-context helper

Add a helper to de-duplicate some of the virtgpu native-context support
between turnip and freedreno.  This should make things easier for other
drivers adding virtgpu support.

Signed-off-by: Rob Clark <robdcl...@chromium.org>
Tested-by: Dmitry Osipenko <dmitry.osipe...@collabora.com> # virtio-intel
Reviewed-by: Dmitry Osipenko <dmitry.osipe...@collabora.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/24733>

---

 src/meson.build                |   7 +-
 src/virtio/meson.build         |   2 +
 src/virtio/vdrm/meson.build    |  26 +++
 src/virtio/vdrm/vdrm.c         | 190 ++++++++++++++++++++
 src/virtio/vdrm/vdrm.h         | 158 +++++++++++++++++
 src/virtio/vdrm/vdrm_virtgpu.c | 395 +++++++++++++++++++++++++++++++++++++++++
 6 files changed, 775 insertions(+), 3 deletions(-)

diff --git a/src/meson.build b/src/meson.build
index 78da771c4af..bc200fe1435 100644
--- a/src/meson.build
+++ b/src/meson.build
@@ -91,6 +91,7 @@ endif
 if with_gallium_etnaviv
   subdir('etnaviv')
 endif
+subdir('virtio')
 if with_gallium_freedreno or with_freedreno_vk or 
with_tools.contains('freedreno')
   subdir('freedreno')
 endif
@@ -100,9 +101,9 @@ endif
 if with_gallium_panfrost or with_gallium_lima or with_panfrost_vk or 
with_tools.contains('panfrost')
   subdir('panfrost')
 endif
-if with_gallium_virgl or with_virtio_vk
-  subdir('virtio')
-endif
+#if with_gallium_virgl or with_virtio_vk
+#  subdir('virtio')
+#endif
 if with_microsoft_clc or with_gallium_d3d12 or with_spirv_to_dxil or 
with_microsoft_vk
   subdir('microsoft')
 endif
diff --git a/src/virtio/meson.build b/src/virtio/meson.build
index cf84c8311a6..ff2c23f7515 100644
--- a/src/virtio/meson.build
+++ b/src/virtio/meson.build
@@ -19,7 +19,9 @@
 # SOFTWARE.
 
 inc_virtio = include_directories('.')
+inc_virtio_vdrm = include_directories('./vdrm')
 
 if with_virtio_vk
   subdir('vulkan')
 endif
+subdir('vdrm')
diff --git a/src/virtio/vdrm/meson.build b/src/virtio/vdrm/meson.build
new file mode 100644
index 00000000000..939c3e8d530
--- /dev/null
+++ b/src/virtio/vdrm/meson.build
@@ -0,0 +1,26 @@
+# Copyright 2023 Google LLC
+# SPDX-License-Identifier: MIT
+
+libvdrm = static_library(
+  'libvdrm',
+  [
+    'vdrm.c',
+    'vdrm.h',
+    'vdrm_virtgpu.c',
+  ],
+  include_directories : [
+    inc_include,
+    inc_src,
+    inc_virtio_gpu,
+  ],
+  c_args : [ no_override_init_args ],
+  gnu_symbol_visibility: 'hidden',
+  dependencies: [
+    dep_libdrm,
+    dep_valgrind,
+  ],
+  link_with: [
+    _libmesa_util,
+  ],
+  build_by_default: false,
+)
diff --git a/src/virtio/vdrm/vdrm.c b/src/virtio/vdrm/vdrm.c
new file mode 100644
index 00000000000..ff69ad325dc
--- /dev/null
+++ b/src/virtio/vdrm/vdrm.c
@@ -0,0 +1,190 @@
+/*
+ * Copyright © 2023 Google, Inc.
+ * SPDX-License-Identifier: MIT
+ */
+
+#include "util/u_math.h"
+#include "util/perf/cpu_trace.h"
+
+#include "vdrm.h"
+
+struct vdrm_device * vdrm_virtgpu_connect(int fd, uint32_t context_type);
+
+struct vdrm_device *
+vdrm_device_connect(int fd, uint32_t context_type)
+{
+   struct vdrm_device *vdev;
+
+   // TODO vtest vs virtio..
+   vdev = vdrm_virtgpu_connect(fd, context_type);
+   if (!vdev)
+      return NULL;
+
+   simple_mtx_init(&vdev->rsp_lock, mtx_plain);
+   simple_mtx_init(&vdev->eb_lock, mtx_plain);
+
+   return vdev;
+}
+
+void
+vdrm_device_close(struct vdrm_device *vdev)
+{
+   vdev->funcs->close(vdev);
+   free(vdev);
+}
+
+uint32_t
+vdrm_bo_create(struct vdrm_device *vdev, size_t size, uint32_t blob_flags,
+               uint64_t blob_id, struct vdrm_ccmd_req *req)
+{
+   uint32_t handle;
+
+   simple_mtx_lock(&vdev->eb_lock);
+
+   /* flush any buffered cmds so they are seen by the host *prior* to
+    * the cmds associated with bo creation.
+    */
+   vdev->funcs->flush_locked(vdev, NULL);
+
+   req->seqno = ++vdev->next_seqno;
+
+   handle = vdev->funcs->bo_create(vdev, size, blob_flags, blob_id, req);
+
+   simple_mtx_unlock(&vdev->eb_lock);
+
+   return handle;
+}
+
+void *
+vdrm_alloc_rsp(struct vdrm_device *vdev, struct vdrm_ccmd_req *req, uint32_t 
sz)
+{
+   unsigned off;
+
+   simple_mtx_lock(&vdev->rsp_lock);
+
+   sz = align(sz, 8);
+
+   if ((vdev->next_rsp_off + sz) >= vdev->rsp_mem_len)
+      vdev->next_rsp_off = 0;
+
+   off = vdev->next_rsp_off;
+   vdev->next_rsp_off += sz;
+
+   simple_mtx_unlock(&vdev->rsp_lock);
+
+   req->rsp_off = off;
+
+   struct vdrm_ccmd_rsp *rsp = (void *)&vdev->rsp_mem[off];
+   rsp->len = sz;
+
+   return rsp;
+}
+
+static int
+enqueue_req(struct vdrm_device *vdev, struct vdrm_ccmd_req *req)
+{
+   simple_mtx_assert_locked(&vdev->eb_lock);
+
+   req->seqno = ++vdev->next_seqno;
+
+   if ((vdev->reqbuf_len + req->len) > sizeof(vdev->reqbuf)) {
+      int ret = vdev->funcs->flush_locked(vdev, NULL);
+      if (ret)
+         return ret;
+   }
+
+   memcpy(&vdev->reqbuf[vdev->reqbuf_len], req, req->len);
+   vdev->reqbuf_len += req->len;
+   vdev->reqbuf_cnt++;
+
+   return 0;
+}
+
+int
+vdrm_execbuf(struct vdrm_device *vdev, struct vdrm_execbuf_params *p)
+{
+   int ret = 0;
+
+   MESA_TRACE_FUNC();
+
+   simple_mtx_lock(&vdev->eb_lock);
+
+   ret = vdev->funcs->flush_locked(vdev, NULL);
+   if (ret)
+      goto out_unlock;
+
+   ret = vdev->funcs->execbuf_locked(vdev, p, p->req, p->req->len);
+
+out_unlock:
+   simple_mtx_unlock(&vdev->eb_lock);
+
+   return ret;
+}
+
+/**
+ * Buffer/send a request cmd to host
+ */
+int
+vdrm_send_req(struct vdrm_device *vdev, struct vdrm_ccmd_req *req, bool sync)
+{
+   MESA_TRACE_FUNC();
+
+   uintptr_t fence = 0;
+   int ret = 0;
+
+   simple_mtx_lock(&vdev->eb_lock);
+   ret = enqueue_req(vdev, req);
+
+   if (ret || !sync)
+      goto out_unlock;
+
+   ret = vdev->funcs->flush_locked(vdev, &fence);
+
+out_unlock:
+   simple_mtx_unlock(&vdev->eb_lock);
+
+   if (ret)
+      return ret;
+
+   if (sync) {
+      MESA_TRACE_SCOPE("vdrm_execbuf sync");
+      vdev->funcs->wait_fence(vdev, fence);
+      vdrm_host_sync(vdev, req);
+   }
+
+   return 0;
+}
+
+int
+vdrm_flush(struct vdrm_device *vdev)
+{
+   int ret = 0;
+
+   MESA_TRACE_FUNC();
+
+   simple_mtx_lock(&vdev->eb_lock);
+   ret = vdev->funcs->flush_locked(vdev, NULL);
+   simple_mtx_unlock(&vdev->eb_lock);
+
+   return ret;
+}
+
+/**
+ * Helper for fence/seqno comparisions which deals properly with rollover.
+ * Returns true if fence 'a' is before fence 'b'
+ */
+static bool
+fence_before(uint32_t a, uint32_t b)
+{
+   return (int32_t)(a - b) < 0;
+}
+
+/**
+ * Wait until host has processed the specified request.
+ */
+void
+vdrm_host_sync(struct vdrm_device *vdev, const struct vdrm_ccmd_req *req)
+{
+   while (fence_before(vdev->shmem->seqno, req->seqno))
+      sched_yield();
+}
diff --git a/src/virtio/vdrm/vdrm.h b/src/virtio/vdrm/vdrm.h
new file mode 100644
index 00000000000..7bcdaad457f
--- /dev/null
+++ b/src/virtio/vdrm/vdrm.h
@@ -0,0 +1,158 @@
+/*
+ * Copyright © 2023 Google, Inc.
+ * SPDX-License-Identifier: MIT
+ */
+
+/* A simple helper layer for virtgpu drm native context, which also
+ * abstracted the differences between vtest (communicating via socket
+ * with vtest server) vs virtgpu (communicating via drm/virtio driver
+ * in the guest).
+ */
+
+#ifndef __VDRM_H__
+#define __VDRM_H__
+
+#include <stdint.h>
+
+#include "util/simple_mtx.h"
+
+#define VIRGL_RENDERER_UNSTABLE_APIS 1
+#include "virglrenderer_hw.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct vdrm_device;
+struct vdrm_execbuf_params;
+
+struct vdrm_device_funcs {
+   /* Note flush_locked and execbuf_locked are similar, and on top of virtgpu
+    * guest kernel driver are basically the same.  But with vtest, only cmds
+    * that result in host kernel cmd submission can take and/or return fence
+    * and/or syncobj fd's.
+    */
+   int (*execbuf_locked)(struct vdrm_device *vdev, struct vdrm_execbuf_params 
*p,
+                         void *command, unsigned size);
+   int (*flush_locked)(struct vdrm_device *vdev, uintptr_t *fencep);
+
+   void (*wait_fence)(struct vdrm_device *vdev, uintptr_t fence);
+
+   uint32_t (*dmabuf_to_handle)(struct vdrm_device *vdev, int fd);
+   uint32_t (*handle_to_res_id)(struct vdrm_device *vdev, uint32_t handle);
+
+   uint32_t (*bo_create)(struct vdrm_device *vdev, size_t size, uint32_t 
blob_flags,
+                         uint64_t blob_id, struct vdrm_ccmd_req *req);
+   int (*bo_wait)(struct vdrm_device *vdev, uint32_t handle);
+   void *(*bo_map)(struct vdrm_device *vdev, uint32_t handle, size_t size);
+   int (*bo_export_dmabuf)(struct vdrm_device *vdev, uint32_t handle);
+   void (*bo_close)(struct vdrm_device *vdev, uint32_t handle);
+
+   void (*close)(struct vdrm_device *vdev);
+};
+
+struct vdrm_device {
+   const struct vdrm_device_funcs *funcs;
+
+   struct virgl_renderer_capset_drm caps;
+   struct vdrm_shmem *shmem;
+   uint8_t *rsp_mem;
+   uint32_t rsp_mem_len;
+   uint32_t next_rsp_off;
+   simple_mtx_t rsp_lock;
+   simple_mtx_t eb_lock;
+
+   uint32_t next_seqno;
+
+   /*
+    * Buffering for requests to host:
+    */
+   uint32_t reqbuf_len;
+   uint32_t reqbuf_cnt;
+   uint8_t reqbuf[0x4000];
+};
+
+struct vdrm_device *vdrm_device_connect(int fd, uint32_t context_type);
+void vdrm_device_close(struct vdrm_device *vdev);
+
+void * vdrm_alloc_rsp(struct vdrm_device *vdev, struct vdrm_ccmd_req *req, 
uint32_t sz);
+int vdrm_send_req(struct vdrm_device *vdev, struct vdrm_ccmd_req *req, bool 
sync);
+int vdrm_flush(struct vdrm_device *vdev);
+
+struct vdrm_execbuf_params {
+   int ring_idx;
+
+   struct vdrm_ccmd_req *req;     /* Note, must be host kernel cmd submit */
+
+   uint32_t *handles;
+   uint32_t num_handles;
+
+   struct drm_virtgpu_execbuffer_syncobj *in_syncobjs;
+   struct drm_virtgpu_execbuffer_syncobj *out_syncobjs;
+
+   bool has_in_fence_fd : 1;
+   bool needs_out_fence_fd : 1;
+
+   int fence_fd;                  /* in/out fence */
+
+   uint32_t num_in_syncobjs;
+   uint32_t num_out_syncobjs;
+};
+
+/**
+ * Note, must be a host cmd submission, which specified in/out fence/syncobj
+ * can be passed to.  In the vtest case, we can't get fences/syncobjs for
+ * other host cmds.
+ */
+int vdrm_execbuf(struct vdrm_device *vdev, struct vdrm_execbuf_params *p);
+
+void vdrm_host_sync(struct vdrm_device *vdev, const struct vdrm_ccmd_req *req);
+
+/**
+ * Import dmabuf fd returning a GEM handle
+ */
+static inline uint32_t
+vdrm_dmabuf_to_handle(struct vdrm_device *vdev, int fd)
+{
+   return vdev->funcs->dmabuf_to_handle(vdev, fd);
+}
+
+static inline uint32_t
+vdrm_handle_to_res_id(struct vdrm_device *vdev, uint32_t handle)
+{
+   return vdev->funcs->handle_to_res_id(vdev, handle);
+}
+
+uint32_t vdrm_bo_create(struct vdrm_device *vdev, size_t size,
+                        uint32_t blob_flags, uint64_t blob_id,
+                        struct vdrm_ccmd_req *req);
+
+static inline int
+vdrm_bo_wait(struct vdrm_device *vdev, uint32_t handle)
+{
+   return vdev->funcs->bo_wait(vdev, handle);
+}
+
+static inline void *
+vdrm_bo_map(struct vdrm_device *vdev, uint32_t handle, size_t size)
+{
+   return vdev->funcs->bo_map(vdev, handle, size);
+}
+
+static inline int
+vdrm_bo_export_dmabuf(struct vdrm_device *vdev, uint32_t handle)
+{
+   return vdev->funcs->bo_export_dmabuf(vdev, handle);
+}
+
+static inline void
+vdrm_bo_close(struct vdrm_device *vdev, uint32_t handle)
+{
+   vdev->funcs->bo_close(vdev, handle);
+}
+
+#ifdef __cplusplus
+} /* end of extern "C" */
+#endif
+
+#endif /* __VDRM_H__ */
diff --git a/src/virtio/vdrm/vdrm_virtgpu.c b/src/virtio/vdrm/vdrm_virtgpu.c
new file mode 100644
index 00000000000..8f12a2e9951
--- /dev/null
+++ b/src/virtio/vdrm/vdrm_virtgpu.c
@@ -0,0 +1,395 @@
+/*
+ * Copyright © 2023 Google, Inc.
+ * SPDX-License-Identifier: MIT
+ */
+
+#include <fcntl.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <xf86drm.h>
+
+#include "vdrm.h"
+
+#include "drm-uapi/virtgpu_drm.h"
+#include "util/libsync.h"
+#include "util/log.h"
+#include "util/perf/cpu_trace.h"
+
+
+#define SHMEM_SZ 0x4000
+
+#define virtgpu_ioctl(fd, name, args...) ({                          \
+      MESA_TRACE_SCOPE(#name);                                       \
+      int ret = drmIoctl((fd), DRM_IOCTL_ ## name, (args));          \
+      ret;                                                           \
+   })
+
+struct virtgpu_device {
+   struct vdrm_device base;
+   uint32_t shmem_handle;
+   int fd;
+};
+DEFINE_CAST(vdrm_device, virtgpu_device)
+
+static int
+virtgpu_execbuf_locked(struct vdrm_device *vdev, struct vdrm_execbuf_params *p,
+                       void *command, unsigned size)
+{
+   struct virtgpu_device *vgdev = to_virtgpu_device(vdev);
+
+   simple_mtx_assert_locked(&vdev->eb_lock);
+
+   assert(size);
+
+#define COND(bool, val) ((bool) ? (val) : 0)
+   struct drm_virtgpu_execbuffer eb = {
+         .flags = COND(p->needs_out_fence_fd, VIRTGPU_EXECBUF_FENCE_FD_OUT) |
+                  COND(p->has_in_fence_fd, VIRTGPU_EXECBUF_FENCE_FD_IN) |
+                  VIRTGPU_EXECBUF_RING_IDX,
+         .size  = size,
+         .command = (uintptr_t)command,
+         .bo_handles = (uintptr_t)p->handles,
+         .num_bo_handles = p->num_handles,
+         .fence_fd = p->fence_fd,
+         .ring_idx = p->ring_idx,
+         .syncobj_stride = sizeof(struct drm_virtgpu_execbuffer_syncobj),
+         .num_in_syncobjs = p->num_in_syncobjs,
+         .num_out_syncobjs = p->num_out_syncobjs,
+         .in_syncobjs = (uintptr_t)p->in_syncobjs,
+         .out_syncobjs = (uintptr_t)p->out_syncobjs,
+   };
+
+   int ret = virtgpu_ioctl(vgdev->fd, VIRTGPU_EXECBUFFER, &eb);
+   if (ret) {
+      mesa_loge("EXECBUFFER failed: %s", strerror(errno));
+      return ret;
+   }
+
+   if (p->needs_out_fence_fd)
+      p->fence_fd = eb.fence_fd;
+
+   return 0;
+}
+
+static int
+virtgpu_flush_locked(struct vdrm_device *vdev, uintptr_t *fencep)
+{
+   int ret;
+
+   simple_mtx_assert_locked(&vdev->eb_lock);
+
+   if (!vdev->reqbuf_len)
+      return 0;
+
+   struct vdrm_execbuf_params p = {
+      .needs_out_fence_fd = !!fencep,
+   };
+   ret = virtgpu_execbuf_locked(vdev, &p, vdev->reqbuf, vdev->reqbuf_len);
+   if (ret)
+      return ret;
+
+   vdev->reqbuf_len = 0;
+   vdev->reqbuf_cnt = 0;
+
+   if (fencep)
+      *fencep = p.fence_fd;
+
+   return 0;
+}
+
+static void
+virtgpu_wait_fence(struct vdrm_device *vdev, uintptr_t fence)
+{
+   int fence_fd = fence;
+
+   sync_wait(fence_fd, -1);
+   close(fence_fd);
+}
+
+static void
+gem_close(struct virtgpu_device *vgdev, uint32_t handle)
+{
+   virtgpu_ioctl(vgdev->fd, GEM_CLOSE, &(struct drm_gem_close){
+      .handle = handle,
+   });
+}
+
+/**
+ * Note, does _not_ de-duplicate handles
+ */
+static uint32_t
+virtgpu_dmabuf_to_handle(struct vdrm_device *vdev, int fd)
+{
+   struct virtgpu_device *vgdev = to_virtgpu_device(vdev);
+   uint32_t handle;
+   int ret;
+
+   ret = drmPrimeFDToHandle(vgdev->fd, fd, &handle);
+   if (ret) {
+      mesa_loge("dmabuf import failed: %s", strerror(errno));
+      return 0;
+   }
+
+   return handle;
+}
+
+static uint32_t
+virtgpu_handle_to_res_id(struct vdrm_device *vdev, uint32_t handle)
+{
+   struct virtgpu_device *vgdev = to_virtgpu_device(vdev);
+   struct drm_virtgpu_resource_info args = {
+         .bo_handle = handle,
+   };
+   int ret;
+
+   ret = virtgpu_ioctl(vgdev->fd, VIRTGPU_RESOURCE_INFO, &args);
+   if (ret) {
+      mesa_loge("failed to get resource info: %s", strerror(errno));
+      return 0;
+   }
+
+   return args.res_handle;
+}
+
+static uint32_t
+virtgpu_bo_create(struct vdrm_device *vdev, size_t size, uint32_t blob_flags,
+                  uint64_t blob_id, struct vdrm_ccmd_req *req)
+{
+   struct virtgpu_device *vgdev = to_virtgpu_device(vdev);
+   struct drm_virtgpu_resource_create_blob args = {
+         .blob_mem   = VIRTGPU_BLOB_MEM_HOST3D,
+         .blob_flags = blob_flags,
+         .size       = size,
+         .cmd_size   = req->len,
+         .cmd        = (uintptr_t)req,
+         .blob_id    = blob_id,
+   };
+   int ret;
+
+   simple_mtx_assert_locked(&vdev->eb_lock);
+
+   ret = virtgpu_ioctl(vgdev->fd, VIRTGPU_RESOURCE_CREATE_BLOB, &args);
+   if (ret) {
+      mesa_loge("buffer allocation failed: %s", strerror(errno));
+      return 0;
+   }
+
+   return args.bo_handle;
+}
+
+static int
+map_handle(int fd, uint32_t handle, size_t size, void **map)
+{
+   struct drm_virtgpu_map req = {
+      .handle = handle,
+   };
+   int ret;
+
+   ret = virtgpu_ioctl(fd, VIRTGPU_MAP, &req);
+   if (ret) {
+      mesa_loge("VIRTGPU_MAP failed: %s", strerror(errno));
+      return ret;
+   }
+
+   *map = mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, req.offset);
+   if (*map == MAP_FAILED) {
+      mesa_loge("failed to map handle: %s", strerror(errno));
+      return -1;
+   }
+
+   return 0;
+}
+
+static int
+virtgpu_bo_wait(struct vdrm_device *vdev, uint32_t handle)
+{
+   struct virtgpu_device *vgdev = to_virtgpu_device(vdev);
+   struct drm_virtgpu_3d_wait args = {
+         .handle = handle,
+   };
+   int ret;
+
+   /* Side note, this ioctl is defined as IO_WR but should be IO_W: */
+   ret = virtgpu_ioctl(vgdev->fd, VIRTGPU_WAIT, &args);
+   if (ret && errno == EBUSY)
+      return -EBUSY;
+
+   return 0;
+}
+
+static void *
+virtgpu_bo_map(struct vdrm_device *vdev, uint32_t handle, size_t size)
+{
+   struct virtgpu_device *vgdev = to_virtgpu_device(vdev);
+   void *map;
+   int ret;
+
+   ret = map_handle(vgdev->fd, handle, size, &map);
+   if (ret)
+      return NULL;
+
+   return map;
+}
+
+static int
+virtgpu_bo_export_dmabuf(struct vdrm_device *vdev, uint32_t handle)
+{
+   struct virtgpu_device *vgdev = to_virtgpu_device(vdev);
+   int ret, fd;
+
+   ret = drmPrimeHandleToFD(vgdev->fd, handle, DRM_CLOEXEC | DRM_RDWR, &fd);
+   if (ret) {
+      mesa_loge("dmabuf export failed: %s", strerror(errno));
+      return ret;
+   }
+
+   return fd;
+}
+
+static void
+virtgpu_bo_close(struct vdrm_device *vdev, uint32_t handle)
+{
+   /* Flush any buffered commands first, so the detach_resource doesn't
+    * overtake any buffered ccmd which references the resource:
+    */
+   if (vdev->reqbuf_len) {
+      simple_mtx_lock(&vdev->eb_lock);
+      virtgpu_flush_locked(vdev, NULL);
+      simple_mtx_unlock(&vdev->eb_lock);
+   }
+
+   gem_close(to_virtgpu_device(vdev), handle);
+}
+
+static void
+virtgpu_close(struct vdrm_device *vdev)
+{
+   struct virtgpu_device *vgdev = to_virtgpu_device(vdev);
+
+   munmap(vdev->shmem, SHMEM_SZ);
+   gem_close(vgdev, vgdev->shmem_handle);
+}
+
+static const struct vdrm_device_funcs funcs = {
+   .flush_locked = virtgpu_flush_locked,
+   .wait_fence = virtgpu_wait_fence,
+   .execbuf_locked = virtgpu_execbuf_locked,
+   .dmabuf_to_handle = virtgpu_dmabuf_to_handle,
+   .handle_to_res_id = virtgpu_handle_to_res_id,
+   .bo_create = virtgpu_bo_create,
+   .bo_wait = virtgpu_bo_wait,
+   .bo_map = virtgpu_bo_map,
+   .bo_export_dmabuf = virtgpu_bo_export_dmabuf,
+   .bo_close = virtgpu_bo_close,
+   .close = virtgpu_close,
+};
+
+static int
+get_capset(int fd, struct virgl_renderer_capset_drm *caps)
+{
+   struct drm_virtgpu_get_caps args = {
+         .cap_set_id = VIRGL_RENDERER_CAPSET_DRM,
+         .cap_set_ver = 0,
+         .addr = (uintptr_t)caps,
+         .size = sizeof(*caps),
+   };
+
+   memset(caps, 0, sizeof(*caps));
+
+   return virtgpu_ioctl(fd, VIRTGPU_GET_CAPS, &args);
+}
+
+static int
+set_context(int fd)
+{
+   struct drm_virtgpu_context_set_param params[] = {
+         { VIRTGPU_CONTEXT_PARAM_CAPSET_ID, VIRGL_RENDERER_CAPSET_DRM },
+         { VIRTGPU_CONTEXT_PARAM_NUM_RINGS, 64 },
+   };
+   struct drm_virtgpu_context_init args = {
+      .num_params = ARRAY_SIZE(params),
+      .ctx_set_params = (uintptr_t)params,
+   };
+
+   return virtgpu_ioctl(fd, VIRTGPU_CONTEXT_INIT, &args);
+}
+
+static int
+init_shmem(struct virtgpu_device *vgdev)
+{
+   struct vdrm_device *vdev = &vgdev->base;
+   struct drm_virtgpu_resource_create_blob args = {
+         .blob_mem   = VIRTGPU_BLOB_MEM_HOST3D,
+         .blob_flags = VIRTGPU_BLOB_FLAG_USE_MAPPABLE,
+         .size       = SHMEM_SZ,
+         .blob_id    = 0,
+   };
+   int ret;
+
+   ret = virtgpu_ioctl(vgdev->fd, VIRTGPU_RESOURCE_CREATE_BLOB, &args);
+   if (ret) {
+      mesa_logi("failed to allocate shmem buffer: %s", strerror(errno));
+      return ret;
+   }
+
+   vgdev->shmem_handle = args.bo_handle;
+
+   ret = map_handle(vgdev->fd, vgdev->shmem_handle, args.size, (void 
**)&vdev->shmem);
+   if (ret) {
+      gem_close(vgdev, vgdev->shmem_handle);
+      vgdev->shmem_handle = 0;
+      return ret;
+   }
+
+   uint32_t offset = vdev->shmem->rsp_mem_offset;
+   vdev->rsp_mem_len = args.size - offset;
+   vdev->rsp_mem = &((uint8_t *)vdev->shmem)[offset];
+
+   return 0;
+}
+
+struct vdrm_device * vdrm_virtgpu_connect(int fd, uint32_t context_type);
+
+struct vdrm_device *
+vdrm_virtgpu_connect(int fd, uint32_t context_type)
+{
+   struct virgl_renderer_capset_drm caps;
+   struct virtgpu_device *vgdev;
+   struct vdrm_device *vdev;
+   int ret;
+
+   ret = get_capset(fd, &caps);
+   if (ret) {
+      mesa_logi("could not get caps: %s", strerror(errno));
+      return NULL;
+   }
+
+   if (caps.context_type != context_type) {
+      mesa_logi("wrong context_type: %u", caps.context_type);
+      return NULL;
+   }
+
+   ret = set_context(fd);
+   if (ret) {
+      mesa_logi("Could not set context type: %s", strerror(errno));
+      return NULL;
+   }
+
+   vgdev = calloc(1, sizeof(*vgdev));
+   if (!vgdev)
+      return NULL;
+
+   vgdev->fd = fd;
+
+   ret = init_shmem(vgdev);
+   if (ret) {
+      free(vgdev);
+      return NULL;
+   }
+
+   vdev = &vgdev->base;
+   vdev->caps = caps;
+   vdev->funcs = &funcs;
+
+   return vdev;
+}

Reply via email to