[PATCH v2 0/2] drm/virtio: fence handling in case of multi scanouts

2022-06-03 Thread Dongwon Kim
Current primary plane update flow when blob is enabled (for zero copy
display sharing) shows fence synchronization problems when multi planes
are referencing a same single large FB (i.e. multi displays in extended
mode). This is because there is only one fence bound to the FB and this
single fence is re-used asynchronously when flushing all associated
planes.

The way to prevent this is to assign the fence for each plane so that
flushing one plane won't be affecting or affected by other plane's flush
operation.

The 1st patch "drm/virtio: .release ops for virtgpu fence release" which
adds device specific release ops is for making the virtio_gpu fence freed
upon the last dma_fence_put call.

The 2nd patch "drm/virtio: fence created per cursor/plane update" contains
the main implementation of per-plane fence.

Dongwon Kim (2):
  drm/virtio: .release ops for virtgpu fence release
  drm/virtio: fence created per cursor/plane update

 drivers/gpu/drm/virtio/virtgpu_drv.h   |   1 -
 drivers/gpu/drm/virtio/virtgpu_fence.c |   8 ++
 drivers/gpu/drm/virtio/virtgpu_plane.c | 103 ++---
 3 files changed, 47 insertions(+), 65 deletions(-)

-- 
2.20.1



[PATCH v2 1/2] drm/virtio: .release ops for virtgpu fence release

2022-06-03 Thread Dongwon Kim
virtio_gpu_fence_release is added to free virtio-gpu-fence
upon release of dma_fence.

Cc: Gurchetan Singh 
Cc: Gerd Hoffmann 
Cc: Vivek Kasireddy 
Signed-off-by: Dongwon Kim 
---
 drivers/gpu/drm/virtio/virtgpu_fence.c | 8 
 1 file changed, 8 insertions(+)

diff --git a/drivers/gpu/drm/virtio/virtgpu_fence.c 
b/drivers/gpu/drm/virtio/virtgpu_fence.c
index f28357dbde35..ba659ac2a51d 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fence.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fence.c
@@ -63,12 +63,20 @@ static void virtio_gpu_timeline_value_str(struct dma_fence 
*f, char *str,
 (u64)atomic64_read(&fence->drv->last_fence_id));
 }
 
+static void virtio_gpu_fence_release(struct dma_fence *f)
+{
+   struct virtio_gpu_fence *fence = to_virtio_gpu_fence(f);
+
+   kfree(fence);
+}
+
 static const struct dma_fence_ops virtio_gpu_fence_ops = {
.get_driver_name = virtio_gpu_get_driver_name,
.get_timeline_name   = virtio_gpu_get_timeline_name,
.signaled= virtio_gpu_fence_signaled,
.fence_value_str = virtio_gpu_fence_value_str,
.timeline_value_str  = virtio_gpu_timeline_value_str,
+   .release = virtio_gpu_fence_release,
 };
 
 struct virtio_gpu_fence *virtio_gpu_fence_alloc(struct virtio_gpu_device 
*vgdev,
-- 
2.20.1



[PATCH v2 2/2] drm/virtio: fence created per cursor/plane update

2022-06-03 Thread Dongwon Kim
Having one fence for a vgfb would cause conflict in case there are
multiple planes referencing the same vgfb (e.g. Xorg screen covering
two displays in extended mode) being flushed simultaneously. So it makes
sence to use a separated fence for each plane update to prevent this.

vgfb->fence is not required anymore with the suggested code change so
both prepare_fb and cleanup_fb are removed since only fence creation/
freeing are done in there.

v2: - use the fence always as long as guest_blob is enabled on the
  scanout object
- obj and fence initialized as NULL ptrs to avoid uninitialzed
  ptr problem (Reported by Dan Carpenter/kernel-test-robot)

Reported-by: kernel test robot 
Reported-by: Dan Carpenter 
Cc: Gurchetan Singh 
Cc: Gerd Hoffmann 
Cc: Vivek Kasireddy 
Signed-off-by: Dongwon Kim 
---
 drivers/gpu/drm/virtio/virtgpu_drv.h   |   1 -
 drivers/gpu/drm/virtio/virtgpu_plane.c | 103 ++---
 2 files changed, 39 insertions(+), 65 deletions(-)

diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h 
b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 0a194aaad419..4c59c1e67ca5 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -186,7 +186,6 @@ struct virtio_gpu_output {
 
 struct virtio_gpu_framebuffer {
struct drm_framebuffer base;
-   struct virtio_gpu_fence *fence;
 };
 #define to_virtio_gpu_framebuffer(x) \
container_of(x, struct virtio_gpu_framebuffer, base)
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c 
b/drivers/gpu/drm/virtio/virtgpu_plane.c
index 6d3cc9e238a4..821023b7d57d 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -137,29 +137,37 @@ static void virtio_gpu_resource_flush(struct drm_plane 
*plane,
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_framebuffer *vgfb;
struct virtio_gpu_object *bo;
+   struct virtio_gpu_object_array *objs = NULL;
+   struct virtio_gpu_fence *fence = NULL;
 
vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
-   if (vgfb->fence) {
-   struct virtio_gpu_object_array *objs;
 
+   if (!bo)
+   return;
+
+   if (bo->dumb && bo->guest_blob)
+   fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context,
+  0);
+
+   if (fence) {
objs = virtio_gpu_array_alloc(1);
-   if (!objs)
+   if (!objs) {
+   kfree(fence);
return;
+   }
virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]);
virtio_gpu_array_lock_resv(objs);
-   virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle, x, y,
- width, height, objs, vgfb->fence);
-   virtio_gpu_notify(vgdev);
+   }
+
+   virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle, x, y,
+ width, height, objs, fence);
+   virtio_gpu_notify(vgdev);
 
-   dma_fence_wait_timeout(&vgfb->fence->f, true,
+   if (fence) {
+   dma_fence_wait_timeout(&fence->f, true,
   msecs_to_jiffies(50));
-   dma_fence_put(&vgfb->fence->f);
-   vgfb->fence = NULL;
-   } else {
-   virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle, x, y,
- width, height, NULL, NULL);
-   virtio_gpu_notify(vgdev);
+   dma_fence_put(&fence->f);
}
 }
 
@@ -239,47 +247,6 @@ static void virtio_gpu_primary_plane_update(struct 
drm_plane *plane,
  rect.y2 - rect.y1);
 }
 
-static int virtio_gpu_plane_prepare_fb(struct drm_plane *plane,
-  struct drm_plane_state *new_state)
-{
-   struct drm_device *dev = plane->dev;
-   struct virtio_gpu_device *vgdev = dev->dev_private;
-   struct virtio_gpu_framebuffer *vgfb;
-   struct virtio_gpu_object *bo;
-
-   if (!new_state->fb)
-   return 0;
-
-   vgfb = to_virtio_gpu_framebuffer(new_state->fb);
-   bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
-   if (!bo || (plane->type == DRM_PLANE_TYPE_PRIMARY && !bo->guest_blob))
-   return 0;
-
-   if (bo->dumb && (plane->state->fb != new_state->fb)) {
-   vgfb->fence = virtio_gpu_fence_alloc(vgdev, 
vgdev->fence_drv.context,
-0);
-   if (!vgfb->fence)
-   return -ENOMEM;
-   }
-
-   return 0;
-}
-
-static void virtio_gpu_plane_cleanup_fb(struct drm_plane *plane

Re: [PATCH v2 2/2] drm/virtio: fence created per cursor/plane update

2022-06-14 Thread Dongwon Kim
On Thu, Jun 09, 2022 at 06:24:43AM +0200, Gerd Hoffmann wrote:
> On Fri, Jun 03, 2022 at 02:18:49PM -0700, Dongwon Kim wrote:
> > Having one fence for a vgfb would cause conflict in case there are
> > multiple planes referencing the same vgfb (e.g. Xorg screen covering
> > two displays in extended mode) being flushed simultaneously. So it makes
> > sence to use a separated fence for each plane update to prevent this.
> > 
> > vgfb->fence is not required anymore with the suggested code change so
> > both prepare_fb and cleanup_fb are removed since only fence creation/
> > freeing are done in there.
> 
> The fences are allocated and released in prepare_fb + cleanup_fb for a
> reason: atomic_update must not fail.

In case fence allocation fails, it falls back to non-fence path so it
won't fail for primary-plane-update.

For cursor plane update, it returns if fence is NULL but we could change
it to just proceed and just make it skip waiting like,

if (fence) {
dma_fence_wait(&fence->f, true);
dma_fence_put(&fence->f);
}   

Or maybe I can limit my suggested changes to primary-plane-update only.

What do you think about these?

> 
> I guess virtio-gpu must be fixed to use drm_plane_state->fence
> correctly ...

I was thinking about this too but current functions (e.g.
virtio_gpu_cmd_transfer_to_host_2d) takes "struct virtio_gpu_fence".
Not sure what is the best way to connect drm_plane_state->fence to
virtio_gpu_fence without changing major function interfaces.

> 
> take care,
>   Gerd
> 


[PATCH 1/3] drm/virtio: .release ops for virtgpu fence release

2022-05-10 Thread Dongwon Kim
virtio_gpu_fence_release is added to free virtio-gpu-fence
upon release of dma_fence.

Cc: Gurchetan Singh 
Cc: Gerd Hoffmann 
Cc: Vivek Kasireddy 
Signed-off-by: Dongwon Kim 
---
 drivers/gpu/drm/virtio/virtgpu_fence.c | 8 
 1 file changed, 8 insertions(+)

diff --git a/drivers/gpu/drm/virtio/virtgpu_fence.c 
b/drivers/gpu/drm/virtio/virtgpu_fence.c
index f28357dbde35..ba659ac2a51d 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fence.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fence.c
@@ -63,12 +63,20 @@ static void virtio_gpu_timeline_value_str(struct dma_fence 
*f, char *str,
 (u64)atomic64_read(&fence->drv->last_fence_id));
 }
 
+static void virtio_gpu_fence_release(struct dma_fence *f)
+{
+   struct virtio_gpu_fence *fence = to_virtio_gpu_fence(f);
+
+   kfree(fence);
+}
+
 static const struct dma_fence_ops virtio_gpu_fence_ops = {
.get_driver_name = virtio_gpu_get_driver_name,
.get_timeline_name   = virtio_gpu_get_timeline_name,
.signaled= virtio_gpu_fence_signaled,
.fence_value_str = virtio_gpu_fence_value_str,
.timeline_value_str  = virtio_gpu_timeline_value_str,
+   .release = virtio_gpu_fence_release,
 };
 
 struct virtio_gpu_fence *virtio_gpu_fence_alloc(struct virtio_gpu_device 
*vgdev,
-- 
2.20.1



[PATCH 2/3] drm/virtio: fence created per cursor/plane update

2022-05-10 Thread Dongwon Kim
Having one fence for a vgfb would cause conflict in case there are
multiple planes referencing the same vgfb (e.g. Xorg screen covering
two displays in extended mode) being flushed simultaneously. So it makes
sence to use a separated fence for each plane update to prevent this.

vgfb->fence is not required anymore with the suggested code change so
both prepare_fb and cleanup_fb are removed since only fence creation/
freeing are done in there.

Cc: Gurchetan Singh 
Cc: Gerd Hoffmann 
Cc: Vivek Kasireddy 
Signed-off-by: Dongwon Kim 
---
 drivers/gpu/drm/virtio/virtgpu_drv.h   |  1 -
 drivers/gpu/drm/virtio/virtgpu_plane.c | 98 +-
 2 files changed, 35 insertions(+), 64 deletions(-)

diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h 
b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 0a194aaad419..4c59c1e67ca5 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -186,7 +186,6 @@ struct virtio_gpu_output {
 
 struct virtio_gpu_framebuffer {
struct drm_framebuffer base;
-   struct virtio_gpu_fence *fence;
 };
 #define to_virtio_gpu_framebuffer(x) \
container_of(x, struct virtio_gpu_framebuffer, base)
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c 
b/drivers/gpu/drm/virtio/virtgpu_plane.c
index 6d3cc9e238a4..9856e9941e37 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -137,29 +137,36 @@ static void virtio_gpu_resource_flush(struct drm_plane 
*plane,
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_framebuffer *vgfb;
struct virtio_gpu_object *bo;
+   struct virtio_gpu_object_array *objs;
+   struct virtio_gpu_fence *fence;
 
vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
-   if (vgfb->fence) {
-   struct virtio_gpu_object_array *objs;
 
+   if (bo && bo->dumb && (plane->state->fb != new_state->fb) &&
+   ((plane->type == DRM_PLANE_TYPE_PRIMARY && bo->guest_blob) ||
+   plane->type != DRM_PLANE_TYPE_PRIMARY))
+   fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context,
+  0);
+
+   if (fence) {
objs = virtio_gpu_array_alloc(1);
-   if (!objs)
+   if (!objs) {
+   kfree(fence);
return;
+   }
virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]);
virtio_gpu_array_lock_resv(objs);
-   virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle, x, y,
- width, height, objs, vgfb->fence);
-   virtio_gpu_notify(vgdev);
+   }
+
+   virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle, x, y,
+ width, height, objs, fence);
+   virtio_gpu_notify(vgdev);
 
-   dma_fence_wait_timeout(&vgfb->fence->f, true,
+   if (fence) {
+   dma_fence_wait_timeout(&fence->f, true,
   msecs_to_jiffies(50));
-   dma_fence_put(&vgfb->fence->f);
-   vgfb->fence = NULL;
-   } else {
-   virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle, x, y,
- width, height, NULL, NULL);
-   virtio_gpu_notify(vgdev);
+   dma_fence_put(&fence->f);
}
 }
 
@@ -239,47 +246,6 @@ static void virtio_gpu_primary_plane_update(struct 
drm_plane *plane,
  rect.y2 - rect.y1);
 }
 
-static int virtio_gpu_plane_prepare_fb(struct drm_plane *plane,
-  struct drm_plane_state *new_state)
-{
-   struct drm_device *dev = plane->dev;
-   struct virtio_gpu_device *vgdev = dev->dev_private;
-   struct virtio_gpu_framebuffer *vgfb;
-   struct virtio_gpu_object *bo;
-
-   if (!new_state->fb)
-   return 0;
-
-   vgfb = to_virtio_gpu_framebuffer(new_state->fb);
-   bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
-   if (!bo || (plane->type == DRM_PLANE_TYPE_PRIMARY && !bo->guest_blob))
-   return 0;
-
-   if (bo->dumb && (plane->state->fb != new_state->fb)) {
-   vgfb->fence = virtio_gpu_fence_alloc(vgdev, 
vgdev->fence_drv.context,
-0);
-   if (!vgfb->fence)
-   return -ENOMEM;
-   }
-
-   return 0;
-}
-
-static void virtio_gpu_plane_cleanup_fb(struct drm_plane *plane,
-   struct drm_plane_state *old_state)
-{
-   struct virtio_gpu_framebuffer *vgfb;
-
-

[PATCH 3/3] drm/virtio: use the fence for every plane update

2022-05-10 Thread Dongwon Kim
Trying to use the fence to make plane update to wait for the host to
consume the buffer for better synchronization in all cases

Cc: Gurchetan Singh 
Cc: Gerd Hoffmann 
Cc: Vivek Kasireddy 
Signed-off-by: Dongwon Kim 
---
 drivers/gpu/drm/virtio/virtgpu_plane.c | 7 +--
 1 file changed, 1 insertion(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c 
b/drivers/gpu/drm/virtio/virtgpu_plane.c
index 9856e9941e37..0333181e9dbf 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -142,12 +142,7 @@ static void virtio_gpu_resource_flush(struct drm_plane 
*plane,
 
vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
-
-   if (bo && bo->dumb && (plane->state->fb != new_state->fb) &&
-   ((plane->type == DRM_PLANE_TYPE_PRIMARY && bo->guest_blob) ||
-   plane->type != DRM_PLANE_TYPE_PRIMARY))
-   fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context,
-  0);
+   fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context, 0);
 
if (fence) {
objs = virtio_gpu_array_alloc(1);
-- 
2.20.1



[PFC PATCH 0/3] drm/virtio: synchronous guest framebuffer update

2022-09-26 Thread Dongwon Kim
This series is for fixing some issues regarding scanout synchronization with
host (e.g. QEMU/KVM) that uses virtio-gpu. This series replaces the previously
submitted one, "[PATCH v2 0/2] drm/virtio: fence handling in case of multi
scanouts".

Dongwon Kim (3):
  drm/virtio: .release ops for virtgpu fence release
  drm/virtio: new fence for every plane update
  drm/virtio: drm_gem_plane_helper_prepare_fb for obj synchronization

 drivers/gpu/drm/virtio/virtgpu_drv.h   |  7 +++
 drivers/gpu/drm/virtio/virtgpu_fence.c |  8 +++
 drivers/gpu/drm/virtio/virtgpu_plane.c | 85 +++---
 3 files changed, 65 insertions(+), 35 deletions(-)

-- 
2.20.1



[RFC PATCH 1/3] drm/virtio: .release ops for virtgpu fence release

2022-09-26 Thread Dongwon Kim
virtio_gpu_fence_release is added to free virtio-gpu-fence
upon release of dma_fence.

Cc: Gurchetan Singh 
Cc: Gerd Hoffmann 
Cc: Vivek Kasireddy 
Signed-off-by: Dongwon Kim 
---
 drivers/gpu/drm/virtio/virtgpu_fence.c | 8 
 1 file changed, 8 insertions(+)

diff --git a/drivers/gpu/drm/virtio/virtgpu_fence.c 
b/drivers/gpu/drm/virtio/virtgpu_fence.c
index f28357dbde35..ba659ac2a51d 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fence.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fence.c
@@ -63,12 +63,20 @@ static void virtio_gpu_timeline_value_str(struct dma_fence 
*f, char *str,
 (u64)atomic64_read(&fence->drv->last_fence_id));
 }
 
+static void virtio_gpu_fence_release(struct dma_fence *f)
+{
+   struct virtio_gpu_fence *fence = to_virtio_gpu_fence(f);
+
+   kfree(fence);
+}
+
 static const struct dma_fence_ops virtio_gpu_fence_ops = {
.get_driver_name = virtio_gpu_get_driver_name,
.get_timeline_name   = virtio_gpu_get_timeline_name,
.signaled= virtio_gpu_fence_signaled,
.fence_value_str = virtio_gpu_fence_value_str,
.timeline_value_str  = virtio_gpu_timeline_value_str,
+   .release = virtio_gpu_fence_release,
 };
 
 struct virtio_gpu_fence *virtio_gpu_fence_alloc(struct virtio_gpu_device 
*vgdev,
-- 
2.20.1



[RFC PATCH 2/3] drm/virtio: new fence for every plane update

2022-09-26 Thread Dongwon Kim
Having a fence linked to a virtio_gpu_framebuffer in plane update sequence
would cause conflict when several planes referencing the same framebuffer
especially when those planes are updated concurrently (e.g. Xorg screen
covering multi-displays configured for an extended mode). So it is better
for the fence to be created for every plane update event then link it to
the plane state since each plane update comes with a new plane state obj.

The plane state for virtio-gpu, "struct virtio_gpu_plane_state" is added for
this. This structure represents drm_plane_state and it contains the reference
to virtio_gpu_fence, which was previously in
"struct virtio_gpu_framebuffer".

"virtio_gpu_plane_duplicate_state" and "virtio_gpu_plane_destroy_state" were
added as well to manage virtio_gpu_plane_state.

Several drm helpers were slightly modified accordingly to use the fence in new
plane state structure. virtio_gpu_plane_cleanup_fb was completely removed as
none of code in the function are not required.

Also, the condition for adding fence, (plane->state->fb != new_state->fb) was
removed for the sychronous FB update even when the same FB is flushed again
consecutively.

Cc: Gurchetan Singh 
Cc: Gerd Hoffmann 
Cc: Vivek Kasireddy 
Signed-off-by: Dongwon Kim 
---
 drivers/gpu/drm/virtio/virtgpu_drv.h   |  7 +++
 drivers/gpu/drm/virtio/virtgpu_plane.c | 76 +++---
 2 files changed, 51 insertions(+), 32 deletions(-)

diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h 
b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 9b98470593b0..20a418f64533 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -190,6 +190,13 @@ struct virtio_gpu_framebuffer {
 #define to_virtio_gpu_framebuffer(x) \
container_of(x, struct virtio_gpu_framebuffer, base)
 
+struct virtio_gpu_plane_state {
+   struct drm_plane_state base;
+   struct virtio_gpu_fence *fence;
+};
+#define to_virtio_gpu_plane_state(x) \
+   container_of(x, struct virtio_gpu_plane_state, base)
+
 struct virtio_gpu_queue {
struct virtqueue *vq;
spinlock_t qlock;
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c 
b/drivers/gpu/drm/virtio/virtgpu_plane.c
index 4c09e313bebc..fd5e170dcb22 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -66,12 +66,36 @@ uint32_t virtio_gpu_translate_format(uint32_t drm_fourcc)
return format;
 }
 
+static struct
+drm_plane_state *virtio_gpu_plane_duplicate_state(struct drm_plane *plane)
+{
+   struct virtio_gpu_plane_state *new;
+
+   if (WARN_ON(!plane->state))
+   return NULL;
+
+   new = kzalloc(sizeof(*new), GFP_KERNEL);
+   if (!new)
+   return NULL;
+
+   __drm_atomic_helper_plane_duplicate_state(plane, &new->base);
+
+   return &new->base;
+}
+
+static void virtio_gpu_plane_destroy_state(struct drm_plane *plane,
+  struct drm_plane_state *state)
+{
+   __drm_atomic_helper_plane_destroy_state(state);
+   kfree(to_virtio_gpu_plane_state(state));
+}
+
 static const struct drm_plane_funcs virtio_gpu_plane_funcs = {
.update_plane   = drm_atomic_helper_update_plane,
.disable_plane  = drm_atomic_helper_disable_plane,
.reset  = drm_atomic_helper_plane_reset,
-   .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
-   .atomic_destroy_state   = drm_atomic_helper_plane_destroy_state,
+   .atomic_duplicate_state = virtio_gpu_plane_duplicate_state,
+   .atomic_destroy_state   = virtio_gpu_plane_destroy_state,
 };
 
 static int virtio_gpu_plane_atomic_check(struct drm_plane *plane,
@@ -128,11 +152,13 @@ static void virtio_gpu_resource_flush(struct drm_plane 
*plane,
struct drm_device *dev = plane->dev;
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_framebuffer *vgfb;
+   struct virtio_gpu_plane_state *vgplane_st;
struct virtio_gpu_object *bo;
 
vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
+   vgplane_st = to_virtio_gpu_plane_state(plane->state);
bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
-   if (vgfb->fence) {
+   if (vgplane_st->fence) {
struct virtio_gpu_object_array *objs;
 
objs = virtio_gpu_array_alloc(1);
@@ -141,13 +167,12 @@ static void virtio_gpu_resource_flush(struct drm_plane 
*plane,
virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]);
virtio_gpu_array_lock_resv(objs);
virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle, x, y,
- width, height, objs, vgfb->fence);
+ width, height, objs,
+ vgplane_st->fence);
  

[RFC PATCH 3/3] drm/virtio: drm_gem_plane_helper_prepare_fb for obj synchronization

2022-09-26 Thread Dongwon Kim
This helper is needed for framebuffer synchronization. Old framebuffer data
is often displayed on the guest display without this helper.

Cc: Gurchetan Singh 
Cc: Gerd Hoffmann 
Cc: Vivek Kasireddy 
Signed-off-by: Dongwon Kim 
---
 drivers/gpu/drm/virtio/virtgpu_plane.c | 4 
 1 file changed, 4 insertions(+)

diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c 
b/drivers/gpu/drm/virtio/virtgpu_plane.c
index 51b14ee4ece9..968afd0029fa 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -26,6 +26,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #include "virtgpu_drv.h"
 
@@ -270,6 +271,9 @@ static int virtio_gpu_plane_prepare_fb(struct drm_plane 
*plane,
vgfb = to_virtio_gpu_framebuffer(new_state->fb);
vgplane_st = to_virtio_gpu_plane_state(new_state);
bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
+
+   drm_gem_plane_helper_prepare_fb(plane, new_state);
+
if (!bo || (plane->type == DRM_PLANE_TYPE_PRIMARY && !bo->guest_blob))
return 0;
 
-- 
2.20.1



[RFC PATCH 0/2] drm/virtio:virtio-gpu driver freeze-and-restore implementation

2022-09-27 Thread Dongwon Kim
This series is for adding virtio-gpu driver the support for the suspend and 
resume
(or freeze and restore).

First patch adds virtio-dev hooks that adds .freeze and .restore hooks that 
basically
flush and remove virtqueue before going into suspension then reinitialize them 
upon
wake-up event.

Second patch is the implementation of virtio-gpu resource restoration machanism 
that
is needed for the synchronization of resources between the host and the guest 
when
suspend(and hibernation) and resume happens.

Dongwon Kim (2):
  drm/virtio: freeze and restore hooks to support suspend and resume
  drm/virtio: restore virtio_gpu_objects upon suspend and resume

 drivers/gpu/drm/virtio/virtgpu_drv.c|  59 +++-
 drivers/gpu/drm/virtio/virtgpu_drv.h|  11 +++
 drivers/gpu/drm/virtio/virtgpu_kms.c|  23 +++--
 drivers/gpu/drm/virtio/virtgpu_object.c | 122 ++--
 4 files changed, 179 insertions(+), 36 deletions(-)

-- 
2.20.1



[RFC PATCH 1/2] drm/virtio: freeze and restore hooks to support suspend and resume

2022-09-27 Thread Dongwon Kim
virtqueue needs to be flushed and removed before VM goes into sleep or
hibernation then should be reinitialized again upon wake-up.

Cc: Gerd Hoffmann 
Cc: Vivek Kasireddy 
Signed-off-by: Dongwon Kim 
---
 drivers/gpu/drm/virtio/virtgpu_drv.c | 53 +++-
 drivers/gpu/drm/virtio/virtgpu_drv.h |  1 +
 drivers/gpu/drm/virtio/virtgpu_kms.c | 22 
 3 files changed, 69 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c 
b/drivers/gpu/drm/virtio/virtgpu_drv.c
index 0035affc3e59..2738589a04e4 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -148,6 +148,53 @@ static unsigned int features[] = {
VIRTIO_GPU_F_RESOURCE_BLOB,
VIRTIO_GPU_F_CONTEXT_INIT,
 };
+
+#ifdef CONFIG_PM_SLEEP
+static int virtgpu_freeze(struct virtio_device *vdev)
+{
+   struct drm_device *dev = vdev->priv;
+   struct virtio_gpu_device *vgdev = dev->dev_private;
+   int error;
+
+   error = drm_mode_config_helper_suspend(dev);
+   if (error) {
+   DRM_ERROR("suspend error %d\n", error);
+   return error;
+   }
+
+   flush_work(&vgdev->obj_free_work);
+   flush_work(&vgdev->ctrlq.dequeue_work);
+   flush_work(&vgdev->cursorq.dequeue_work);
+   flush_work(&vgdev->config_changed_work);
+   vdev->config->del_vqs(vdev);
+
+   return 0;
+}
+
+static int virtgpu_restore(struct virtio_device *vdev)
+{
+   struct drm_device *dev = vdev->priv;
+   struct virtio_gpu_device *vgdev = dev->dev_private;
+   int error;
+
+   error = virtio_gpu_find_vqs(vgdev);
+   if (error) {
+   DRM_ERROR("failed to find virt queues\n");
+   return error;
+   }
+
+   virtio_device_ready(vdev);
+
+   error = drm_mode_config_helper_resume(dev);
+   if (error) {
+   DRM_ERROR("resume error %d\n", error);
+   return error;
+   }
+
+   return 0;
+}
+#endif
+
 static struct virtio_driver virtio_gpu_driver = {
.feature_table = features,
.feature_table_size = ARRAY_SIZE(features),
@@ -156,7 +203,11 @@ static struct virtio_driver virtio_gpu_driver = {
.id_table = id_table,
.probe = virtio_gpu_probe,
.remove = virtio_gpu_remove,
-   .config_changed = virtio_gpu_config_changed
+   .config_changed = virtio_gpu_config_changed,
+#ifdef CONFIG_PM_SLEEP
+   .freeze = virtgpu_freeze,
+   .restore = virtgpu_restore,
+#endif
 };
 
 module_virtio_driver(virtio_gpu_driver);
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h 
b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 20a418f64533..646f7674a496 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -292,6 +292,7 @@ void virtio_gpu_deinit(struct drm_device *dev);
 void virtio_gpu_release(struct drm_device *dev);
 int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file);
 void virtio_gpu_driver_postclose(struct drm_device *dev, struct drm_file 
*file);
+int virtio_gpu_find_vqs(struct virtio_gpu_device *vgdev);
 
 /* virtgpu_gem.c */
 int virtio_gpu_gem_object_open(struct drm_gem_object *obj,
diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c 
b/drivers/gpu/drm/virtio/virtgpu_kms.c
index 27b7f14dae89..3a1d164eaf10 100644
--- a/drivers/gpu/drm/virtio/virtgpu_kms.c
+++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
@@ -112,16 +112,28 @@ static void virtio_gpu_get_capsets(struct 
virtio_gpu_device *vgdev,
vgdev->num_capsets = num_capsets;
 }
 
-int virtio_gpu_init(struct virtio_device *vdev, struct drm_device *dev)
+int virtio_gpu_find_vqs(struct virtio_gpu_device *vgdev)
 {
static vq_callback_t *callbacks[] = {
virtio_gpu_ctrl_ack, virtio_gpu_cursor_ack
};
static const char * const names[] = { "control", "cursor" };
+   struct virtqueue *vqs[2];
+   int ret;
+
+   ret = virtio_find_vqs(vgdev->vdev, 2, vqs, callbacks, names, NULL);
+   if (ret)
+   return ret;
+
+   vgdev->ctrlq.vq = vqs[0];
+   vgdev->cursorq.vq = vqs[1];
+
+   return 0;
+}
 
+int virtio_gpu_init(struct virtio_device *vdev, struct drm_device *dev)
+{
struct virtio_gpu_device *vgdev;
-   /* this will expand later */
-   struct virtqueue *vqs[2];
u32 num_scanouts, num_capsets;
int ret = 0;
 
@@ -205,13 +217,11 @@ int virtio_gpu_init(struct virtio_device *vdev, struct 
drm_device *dev)
DRM_INFO("features: %ccontext_init\n",
 vgdev->has_context_init ? '+' : '-');
 
-   ret = virtio_find_vqs(vgdev->vdev, 2, vqs, callbacks, names, NULL);
+   ret = virtio_gpu_find_vqs(vgdev);
if (ret) {
DRM_ERROR("failed to find virt queues\n");
goto err_vqs;
}
-   vgdev

[RFC PATCH 2/2] drm/virtio: restore virtio_gpu_objects upon suspend and resume

2022-09-27 Thread Dongwon Kim
virtio-gpu host(e.g. QEMU) deletes all virtio-gpu resourses when guest
is suspended then resumed. This behavior is invisible to the guest. As
a result, the guest can send out virtio-gpu commands for those deleted
resources with an assumption they are still there on host's side when
the guest wakes up. All of those comamnds are eventually ended up with
invalid resource errors.

There should be some sort of host and guest resource synchronization to
address this problem, which is the motivation of this new restoration
mechanism.

It is designed in a way that the driver saves virtio-gpu resource
objects somewhere then resumitting them to the host. More details are as
followed.

- Whenever a new virtio-gpu object is created, a list element containing
the object and object params is also created in "virtio_gpu_object_create".
This list element is then added to the linked list "obj_list".

- All list elements in the list are restored by virtio_gpu_object_restore
in virtio-gpu's .restore hook (virtiogpu_restore). virtio_gpu_object_restore
iterates "obj_list" and send the host resource-creation command for each
virtio-gpu object which let the host recreated those active resources.

- List elements in the list are removed when virtio-gpu objects are
unreferenced.

- A part of code in "virtio_gpu_object_create" that sets up shmem for
the object and submit it to the host is reused during restoring process so
it was taken out and defined as a new function, "virtio_gpu_object_pin".

Cc: Gerd Hoffmann 
Cc: Vivek Kasireddy 
Signed-off-by: Dongwon Kim 
---
 drivers/gpu/drm/virtio/virtgpu_drv.c|   6 ++
 drivers/gpu/drm/virtio/virtgpu_drv.h|  10 ++
 drivers/gpu/drm/virtio/virtgpu_kms.c|   1 +
 drivers/gpu/drm/virtio/virtgpu_object.c | 122 ++--
 4 files changed, 110 insertions(+), 29 deletions(-)

diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c 
b/drivers/gpu/drm/virtio/virtgpu_drv.c
index 2738589a04e4..0547b15772f8 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -185,6 +185,12 @@ static int virtgpu_restore(struct virtio_device *vdev)
 
virtio_device_ready(vdev);
 
+   error = virtio_gpu_object_restore(vgdev);
+   if (error) {
+   DRM_ERROR("Failed to recover objects\n");
+   return error;
+   }
+
error = drm_mode_config_helper_resume(dev);
if (error) {
DRM_ERROR("resume error %d\n", error);
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h 
b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 646f7674a496..cc417f5b127c 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -123,6 +123,12 @@ struct virtio_gpu_object_array {
struct drm_gem_object *objs[];
 };
 
+struct virtio_gpu_object_list {
+   struct virtio_gpu_object *bo;
+   struct virtio_gpu_object_params params;
+   struct list_head list;
+};
+
 struct virtio_gpu_vbuffer;
 struct virtio_gpu_device;
 
@@ -259,6 +265,7 @@ struct virtio_gpu_device {
struct work_struct obj_free_work;
spinlock_t obj_free_lock;
struct list_head obj_free_list;
+   struct list_head obj_list;
 
struct virtio_gpu_drv_capset *capsets;
uint32_t num_capsets;
@@ -464,6 +471,9 @@ bool virtio_gpu_is_shmem(struct virtio_gpu_object *bo);
 
 int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
   uint32_t *resid);
+
+int virtio_gpu_object_restore(struct virtio_gpu_device *vgdev);
+
 /* virtgpu_prime.c */
 int virtio_gpu_resource_assign_uuid(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *bo);
diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c 
b/drivers/gpu/drm/virtio/virtgpu_kms.c
index 3a1d164eaf10..4fb034fbdf0b 100644
--- a/drivers/gpu/drm/virtio/virtgpu_kms.c
+++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
@@ -160,6 +160,7 @@ int virtio_gpu_init(struct virtio_device *vdev, struct 
drm_device *dev)
vgdev->fence_drv.context = dma_fence_context_alloc(1);
spin_lock_init(&vgdev->fence_drv.lock);
INIT_LIST_HEAD(&vgdev->fence_drv.fences);
+   INIT_LIST_HEAD(&vgdev->obj_list);
INIT_LIST_HEAD(&vgdev->cap_cache);
INIT_WORK(&vgdev->config_changed_work,
  virtio_gpu_config_changed_work_func);
diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c 
b/drivers/gpu/drm/virtio/virtgpu_object.c
index 8d7728181de0..49660932c822 100644
--- a/drivers/gpu/drm/virtio/virtgpu_object.c
+++ b/drivers/gpu/drm/virtio/virtgpu_object.c
@@ -61,6 +61,38 @@ static void virtio_gpu_resource_id_put(struct 
virtio_gpu_device *vgdev, uint32_t
}
 }
 
+static void virtio_gpu_object_list_add(struct virtio_gpu_device *vgdev,
+  struct virtio_gpu_object *bo,
+

Re: [PATCH] udmabuf: Add support for mapping hugepages (v3)

2021-06-08 Thread Dongwon Kim
I see the number of entries in the list often exceeds list_limit
currently hardcoded to 1024 for full HD scanout resource (==
1920*1080*4 bytes). Can we include a change to increase it to something
like 4096 or higher in this patch?

On Fri, Jun 04, 2021 at 01:59:39PM -0700, Vivek Kasireddy wrote:
> If the VMM's (Qemu) memory backend is backed up by memfd + Hugepages
> (hugetlbfs and not THP), we have to first find the hugepage(s) where
> the Guest allocations are located and then extract the regular 4k
> sized subpages from them.
> 
> v2: Ensure that the subpage and hugepage offsets are calculated correctly
> when the range of subpage allocations cuts across multiple hugepages.
> 
> v3: Instead of repeatedly looking up the hugepage for each subpage,
> only do it when the subpage allocation crosses over into a different
> hugepage. (suggested by Gerd and DW)
> 
> Cc: Gerd Hoffmann 
> Signed-off-by: Vivek Kasireddy 
> Signed-off-by: Dongwon Kim 
> ---
>  drivers/dma-buf/udmabuf.c | 51 +--
>  1 file changed, 44 insertions(+), 7 deletions(-)
> 
> diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
> index db732f71e59a..2e02bbfe30fd 100644
> --- a/drivers/dma-buf/udmabuf.c
> +++ b/drivers/dma-buf/udmabuf.c
> @@ -11,6 +11,7 @@
>  #include 
>  #include 
>  #include 
> +#include 
>  
>  static const u32list_limit = 1024;  /* udmabuf_create_list->count limit 
> */
>  static const size_t size_limit_mb = 64; /* total dmabuf size, in megabytes  
> */
> @@ -163,7 +164,9 @@ static long udmabuf_create(struct miscdevice *device,
>   struct udmabuf *ubuf;
>   struct dma_buf *buf;
>   pgoff_t pgoff, pgcnt, pgidx, pgbuf = 0, pglimit;
> - struct page *page;
> + struct page *page, *hpage = NULL;
> + pgoff_t subpgoff, maxsubpgs;
> + struct hstate *hpstate;
>   int seals, ret = -EINVAL;
>   u32 i, flags;
>  
> @@ -194,7 +197,8 @@ static long udmabuf_create(struct miscdevice *device,
>   memfd = fget(list[i].memfd);
>   if (!memfd)
>   goto err;
> - if (!shmem_mapping(file_inode(memfd)->i_mapping))
> + if (!shmem_mapping(file_inode(memfd)->i_mapping) &&
> + !is_file_hugepages(memfd))
>   goto err;
>   seals = memfd_fcntl(memfd, F_GET_SEALS, 0);
>   if (seals == -EINVAL)
> @@ -205,17 +209,50 @@ static long udmabuf_create(struct miscdevice *device,
>   goto err;
>   pgoff = list[i].offset >> PAGE_SHIFT;
>   pgcnt = list[i].size   >> PAGE_SHIFT;
> + if (is_file_hugepages(memfd)) {
> + hpstate = hstate_file(memfd);
> + pgoff = list[i].offset >> huge_page_shift(hpstate);
> + subpgoff = (list[i].offset &
> + ~huge_page_mask(hpstate)) >> PAGE_SHIFT;
> + maxsubpgs = huge_page_size(hpstate) >> PAGE_SHIFT;
> + }
>   for (pgidx = 0; pgidx < pgcnt; pgidx++) {
> - page = shmem_read_mapping_page(
> - file_inode(memfd)->i_mapping, pgoff + pgidx);
> - if (IS_ERR(page)) {
> - ret = PTR_ERR(page);
> - goto err;
> + if (is_file_hugepages(memfd)) {
> + if (!hpage) {
> + hpage = find_get_page_flags(
> + file_inode(memfd)->i_mapping,
> + pgoff, FGP_ACCESSED);
> + if (IS_ERR(hpage)) {
> + ret = PTR_ERR(hpage);
> + goto err;
> + }
> + }
> + page = hpage + subpgoff;
> + get_page(page);
> + subpgoff++;
> + if (subpgoff == maxsubpgs) {
> + put_page(hpage);
> + hpage = NULL;
> + subpgoff = 0;
> + pgoff++;
> + }
> + } else {
> + page = shmem_read_mapping_page(
> + file_inode(memfd)->i_mapping,
> + pgoff + pgidx);
> +   

[PATCH] drm: set DRM_RENDER_ALLOW flag on DRM_IOCTL_MODE_CREATE/DESTROY_DUMB ioctls

2021-06-10 Thread Dongwon Kim
Render clients should be able to create/destroy dumb object to import
and use it as render buffer in case the default DRM device is different
from the render device (i.e. kmsro).

Signed-off-by: Dongwon Kim 
---
 drivers/gpu/drm/drm_ioctl.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 98ae00661656..f2f72e132741 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -685,9 +685,9 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, 
DRM_MASTER),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, 
DRM_MASTER),
-   DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, 
0),
+   DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, 
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, 0),
-   DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, 
0),
+   DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, 
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, 
drm_mode_obj_get_properties_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, 
drm_mode_obj_set_property_ioctl, DRM_MASTER),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR2, drm_mode_cursor2_ioctl, 
DRM_MASTER),
-- 
2.20.1



[PATCH] udmabuf: configurable list_limit and size_limit_mb

2021-06-11 Thread Dongwon Kim
Default list_limit and size_limit_mb are not big enough to cover all
possible use cases. For example, list_limit could be well over its default,
1024 if only one or several pages are chained in all individual list entries
when creating dmabuf backed by >4MB buffer. list_limit and size_limit_mb are
now defined as module parameters so that those can be optionally configured
by root with proper values to remove these constraints.

Cc: Gerd Hoffmann 
Signed-off-by: Dongwon Kim 
---
 drivers/dma-buf/udmabuf.c | 9 +++--
 1 file changed, 7 insertions(+), 2 deletions(-)

diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
index 1a79ce899b0f..8df761a10251 100644
--- a/drivers/dma-buf/udmabuf.c
+++ b/drivers/dma-buf/udmabuf.c
@@ -13,8 +13,13 @@
 #include 
 #include 
 
-static const u32list_limit = 1024;  /* udmabuf_create_list->count limit */
-static const size_t size_limit_mb = 64; /* total dmabuf size, in megabytes  */
+static int list_limit = 1024;
+module_param(list_limit, int, 0644);
+MODULE_PARM_DESC(list_limit, "udmabuf_create_list->count limit. Default is 
1024.");
+
+static int size_limit_mb = 64;
+module_param(size_limit_mb, int, 0644);
+MODULE_PARM_DESC(size_limit_mb, "Max size of a dmabuf, in megabytes. Default 
is 64.");
 
 struct udmabuf {
pgoff_t pagecount;
-- 
2.20.1



Re: [PATCH] drm: set DRM_RENDER_ALLOW flag on DRM_IOCTL_MODE_CREATE/DESTROY_DUMB ioctls

2021-06-11 Thread Dongwon Kim
Understood. I saw weston client apps were failing to create render buffers
from kmsro driver and found it was because they were not allowed to
create and destroy dumb objects then I came up with this patch. I just thought
it's the simplest solution. I didn't know it violates the rule. I think I should
look into kmsro to make the client app to get the render buffer from
ro-device instead. Thanks

On Fri, Jun 11, 2021 at 11:39:46AM +0100, Emil Velikov wrote:
> On Fri, 11 Jun 2021 at 10:47, Daniel Vetter  wrote:
> >
> > On Thu, Jun 10, 2021 at 02:36:59PM -0700, Dongwon Kim wrote:
> > > Render clients should be able to create/destroy dumb object to import
> > > and use it as render buffer in case the default DRM device is different
> > > from the render device (i.e. kmsro).
> > >
> > > Signed-off-by: Dongwon Kim 
> >
> > Uh no.
> >
> > Well I know everyone just hacks around this, but the idea behind dumb
> > buffer objects is that they're for kms scanout only. Furthermore on many
> > drivers they allocate a limited resource like CMA memory. Handing that out
> > like candy isn't a great idea.
> >
> > And it's exactly those drivers that kmsro currently is used for where the
> > display driver needs special memory.
> 
> Couldn't agree more. Perhaps we should add an inline comment and/or
> reference to a thread why?
> 
> -Emil


[PFC PATCH 0/3] drm/virtio: synchronous guest framebuffer update

2023-07-12 Thread Dongwon Kim
"Resubmission"

This series is for fixing issues regarding scanout synchronization with
host (e.g. QEMU/KVM) that uses virtio-gpu. This was submitted a while ago
but didn't get enough feedback/reviews so I am trying it again. This is a
rebased version. And the previous version is at
https://lists.freedesktop.org/archives/dri-devel/2022-September/373782.html

And very first version that has some feedbacks can be found here:
https://www.spinics.net/lists/dri-devel/msg349641.html

Dongwon Kim (3):
  drm/virtio: .release ops for virtgpu fence release
  drm/virtio: new fence for every plane update
  drm/virtio: drm_gem_plane_helper_prepare_fb for obj synchronization

 drivers/gpu/drm/virtio/virtgpu_drv.h   |  7 +++
 drivers/gpu/drm/virtio/virtgpu_fence.c |  8 +++
 drivers/gpu/drm/virtio/virtgpu_plane.c | 80 +++---
 3 files changed, 63 insertions(+), 32 deletions(-)

-- 
2.20.1



[RFC PATCH 1/3] drm/virtio: .release ops for virtgpu fence release

2023-07-12 Thread Dongwon Kim
virtio_gpu_fence_release is added to free virtio-gpu-fence
upon release of dma_fence.

Cc: Gerd Hoffmann 
Cc: Vivek Kasireddy 
Signed-off-by: Dongwon Kim 
---
 drivers/gpu/drm/virtio/virtgpu_fence.c | 8 
 1 file changed, 8 insertions(+)

diff --git a/drivers/gpu/drm/virtio/virtgpu_fence.c 
b/drivers/gpu/drm/virtio/virtgpu_fence.c
index f28357dbde35..ba659ac2a51d 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fence.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fence.c
@@ -63,12 +63,20 @@ static void virtio_gpu_timeline_value_str(struct dma_fence 
*f, char *str,
 (u64)atomic64_read(&fence->drv->last_fence_id));
 }
 
+static void virtio_gpu_fence_release(struct dma_fence *f)
+{
+   struct virtio_gpu_fence *fence = to_virtio_gpu_fence(f);
+
+   kfree(fence);
+}
+
 static const struct dma_fence_ops virtio_gpu_fence_ops = {
.get_driver_name = virtio_gpu_get_driver_name,
.get_timeline_name   = virtio_gpu_get_timeline_name,
.signaled= virtio_gpu_fence_signaled,
.fence_value_str = virtio_gpu_fence_value_str,
.timeline_value_str  = virtio_gpu_timeline_value_str,
+   .release = virtio_gpu_fence_release,
 };
 
 struct virtio_gpu_fence *virtio_gpu_fence_alloc(struct virtio_gpu_device 
*vgdev,
-- 
2.20.1



[RFC PATCH 3/3] drm/virtio: drm_gem_plane_helper_prepare_fb for obj synchronization

2023-07-12 Thread Dongwon Kim
This helper is needed for framebuffer synchronization. Old framebuffer data
is often displayed on the guest display without this helper.

Cc: Gerd Hoffmann 
Cc: Vivek Kasireddy 
Signed-off-by: Dongwon Kim 
---
 drivers/gpu/drm/virtio/virtgpu_plane.c | 4 
 1 file changed, 4 insertions(+)

diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c 
b/drivers/gpu/drm/virtio/virtgpu_plane.c
index a063f06ab6c5..e197299489ce 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -26,6 +26,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #include "virtgpu_drv.h"
 
@@ -271,6 +272,9 @@ static int virtio_gpu_plane_prepare_fb(struct drm_plane 
*plane,
vgfb = to_virtio_gpu_framebuffer(new_state->fb);
vgplane_st = to_virtio_gpu_plane_state(new_state);
bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
+
+   drm_gem_plane_helper_prepare_fb(plane, new_state);
+
if (!bo || (plane->type == DRM_PLANE_TYPE_PRIMARY && !bo->guest_blob))
return 0;
 
-- 
2.20.1



[RFC PATCH 2/3] drm/virtio: new fence for every plane update

2023-07-12 Thread Dongwon Kim
Having a fence linked to a virtio_gpu_framebuffer in plane update sequence
would cause conflict when several planes referencing the same framebuffer
especially when those planes are updated concurrently (e.g. Xorg screen
covering multi-displays configured for an extended mode). So it is better
for the fence to be created for every plane update event then link it to
the plane state since each plane update comes with a new plane state obj.

The plane state for virtio-gpu, "struct virtio_gpu_plane_state" is added for
this. This structure represents drm_plane_state and it contains the reference
to virtio_gpu_fence, which was previously in
"struct virtio_gpu_framebuffer".

"virtio_gpu_plane_duplicate_state" and "virtio_gpu_plane_destroy_state" were
added as well to manage virtio_gpu_plane_state.

Several drm helpers were slightly modified accordingly to use the fence in new
plane state structure. virtio_gpu_plane_cleanup_fb was completely removed as
none of code in the function are not required.

Also, the condition for adding fence, (plane->state->fb != new_state->fb) was
removed for the sychronous FB update even when the same FB is flushed again
consecutively.

Cc: Gerd Hoffmann 
Cc: Vivek Kasireddy 
Signed-off-by: Dongwon Kim 
---
 drivers/gpu/drm/virtio/virtgpu_drv.h   |  7 +++
 drivers/gpu/drm/virtio/virtgpu_plane.c | 76 +++---
 2 files changed, 51 insertions(+), 32 deletions(-)

diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h 
b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 4126c384286b..61fd37f95fbd 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -191,6 +191,13 @@ struct virtio_gpu_framebuffer {
 #define to_virtio_gpu_framebuffer(x) \
container_of(x, struct virtio_gpu_framebuffer, base)
 
+struct virtio_gpu_plane_state {
+   struct drm_plane_state base;
+   struct virtio_gpu_fence *fence;
+};
+#define to_virtio_gpu_plane_state(x) \
+   container_of(x, struct virtio_gpu_plane_state, base)
+
 struct virtio_gpu_queue {
struct virtqueue *vq;
spinlock_t qlock;
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c 
b/drivers/gpu/drm/virtio/virtgpu_plane.c
index a2e045f3a000..a063f06ab6c5 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -66,12 +66,36 @@ uint32_t virtio_gpu_translate_format(uint32_t drm_fourcc)
return format;
 }
 
+static struct
+drm_plane_state *virtio_gpu_plane_duplicate_state(struct drm_plane *plane)
+{
+   struct virtio_gpu_plane_state *new;
+
+   if (WARN_ON(!plane->state))
+   return NULL;
+
+   new = kzalloc(sizeof(*new), GFP_KERNEL);
+   if (!new)
+   return NULL;
+
+   __drm_atomic_helper_plane_duplicate_state(plane, &new->base);
+
+   return &new->base;
+}
+
+static void virtio_gpu_plane_destroy_state(struct drm_plane *plane,
+  struct drm_plane_state *state)
+{
+   __drm_atomic_helper_plane_destroy_state(state);
+   kfree(to_virtio_gpu_plane_state(state));
+}
+
 static const struct drm_plane_funcs virtio_gpu_plane_funcs = {
.update_plane   = drm_atomic_helper_update_plane,
.disable_plane  = drm_atomic_helper_disable_plane,
.reset  = drm_atomic_helper_plane_reset,
-   .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
-   .atomic_destroy_state   = drm_atomic_helper_plane_destroy_state,
+   .atomic_duplicate_state = virtio_gpu_plane_duplicate_state,
+   .atomic_destroy_state   = virtio_gpu_plane_destroy_state,
 };
 
 static int virtio_gpu_plane_atomic_check(struct drm_plane *plane,
@@ -128,11 +152,13 @@ static void virtio_gpu_resource_flush(struct drm_plane 
*plane,
struct drm_device *dev = plane->dev;
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_framebuffer *vgfb;
+   struct virtio_gpu_plane_state *vgplane_st;
struct virtio_gpu_object *bo;
 
vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
+   vgplane_st = to_virtio_gpu_plane_state(plane->state);
bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
-   if (vgfb->fence) {
+   if (vgplane_st->fence) {
struct virtio_gpu_object_array *objs;
 
objs = virtio_gpu_array_alloc(1);
@@ -141,13 +167,12 @@ static void virtio_gpu_resource_flush(struct drm_plane 
*plane,
virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]);
virtio_gpu_array_lock_resv(objs);
virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle, x, y,
- width, height, objs, vgfb->fence);
+ width, height, objs,
+ vgplane_st->fence);
  

[PATCH] intel: update global_name before HASH_ADD

2017-01-15 Thread Dongwon Kim
bo->global_name should be updated first before a hash value
for the entry is calculated with it by HASH_ADD macro.

Signed-off-by: Dongwon Kim 
---
 intel/intel_bufmgr_gem.c | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/intel/intel_bufmgr_gem.c b/intel/intel_bufmgr_gem.c
index 75949b9..c47cb9b 100644
--- a/intel/intel_bufmgr_gem.c
+++ b/intel/intel_bufmgr_gem.c
@@ -2736,11 +2736,12 @@ drm_intel_gem_bo_flink(drm_intel_bo *bo, uint32_t * 
name)
 
pthread_mutex_lock(&bufmgr_gem->lock);
if (!bo_gem->global_name) {
+   bo_gem->global_name = flink.name;
+   bo_gem->reusable = false;
+
HASH_ADD(name_hh, bufmgr_gem->name_table,
 global_name, sizeof(bo_gem->global_name),
 bo_gem);
-   bo_gem->global_name = flink.name;
-   bo_gem->reusable = false;
}
pthread_mutex_unlock(&bufmgr_gem->lock);
}
-- 
2.7.4

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [Xen-devel][RFC 3/3] xen/gntdev: Add support for Linux dma buffers

2018-05-21 Thread Dongwon Kim
Still need more time to review the whole code changes but I noticed one thing.

We've been using the term "hyper_dmabuf" for hypervisor-agnostic linux dmabuf
solution and we are planning to call any of our future solution for other
hypervisors the same name. So having same name for this xen-specific structure
or functions you implemented is confusing. Would you change it to something
else like... "xen_"? 

On Thu, May 17, 2018 at 11:26:04AM +0300, Oleksandr Andrushchenko wrote:
> From: Oleksandr Andrushchenko 
> 
> Signed-off-by: Oleksandr Andrushchenko 
> ---
>  drivers/xen/gntdev.c  | 954 +-
>  include/uapi/xen/gntdev.h | 101 
>  include/xen/gntdev_exp.h  |  23 +
>  3 files changed, 1066 insertions(+), 12 deletions(-)
>  create mode 100644 include/xen/gntdev_exp.h
> 
> diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
> index 9510f228efe9..0ee88e193362 100644
> --- a/drivers/xen/gntdev.c
> +++ b/drivers/xen/gntdev.c
> @@ -4,6 +4,8 @@
>   * Device for accessing (in user-space) pages that have been granted by other
>   * domains.
>   *
> + * DMA buffer implementation is based on drivers/gpu/drm/drm_prime.c.
> + *
>   * Copyright (c) 2006-2007, D G Murray.
>   *   (c) 2009 Gerd Hoffmann 
>   *
> @@ -37,6 +39,9 @@
>  #include 
>  #include 
>  
> +#include 
> +#include 
> +
>  #include 
>  #include 
>  #include 
> @@ -61,16 +66,39 @@ static atomic_t pages_mapped = ATOMIC_INIT(0);
>  static int use_ptemod;
>  #define populate_freeable_maps use_ptemod
>  
> +#ifndef GRANT_INVALID_REF
> +/*
> + * Note on usage of grant reference 0 as invalid grant reference:
> + * grant reference 0 is valid, but never exposed to a driver,
> + * because of the fact it is already in use/reserved by the PV console.
> + */
> +#define GRANT_INVALID_REF0
> +#endif
> +
>  struct gntdev_priv {
>   /* maps with visible offsets in the file descriptor */
>   struct list_head maps;
>   /* maps that are not visible; will be freed on munmap.
>* Only populated if populate_freeable_maps == 1 */
>   struct list_head freeable_maps;
> + /* List of dma-bufs. */
> + struct list_head dma_bufs;
>   /* lock protects maps and freeable_maps */
>   struct mutex lock;
>   struct mm_struct *mm;
>   struct mmu_notifier mn;
> +
> + /* Private data of the hyper DMA buffers. */
> +
> + struct device *dev;
> + /* List of exported DMA buffers. */
> + struct list_head dmabuf_exp_list;
> + /* List of wait objects. */
> + struct list_head dmabuf_exp_wait_list;
> + /* List of imported DMA buffers. */
> + struct list_head dmabuf_imp_list;
> + /* This is the lock which protects dma_buf_xxx lists. */
> + struct mutex dmabuf_lock;
>  };
>  
>  struct unmap_notify {
> @@ -95,10 +123,65 @@ struct grant_map {
>   struct gnttab_unmap_grant_ref *kunmap_ops;
>   struct page **pages;
>   unsigned long pages_vm_start;
> +
> + /*
> +  * All the fields starting with dmabuf_ are only valid if this
> +  * mapping is used for exporting a DMA buffer.
> +  * If dmabuf_vaddr is not NULL then this mapping is backed by DMA
> +  * capable memory.
> +  */
> +
> + /* Flags used to create this DMA buffer: GNTDEV_DMABUF_FLAG_XXX. */
> + bool dmabuf_flags;
> + /* Virtual/CPU address of the DMA buffer. */
> + void *dmabuf_vaddr;
> + /* Bus address of the DMA buffer. */
> + dma_addr_t dmabuf_bus_addr;
> +};
> +
> +struct hyper_dmabuf {
> + struct gntdev_priv *priv;
> + struct dma_buf *dmabuf;
> + struct list_head next;
> + int fd;
> +
> + union {
> + struct {
> + /* Exported buffers are reference counted. */
> + struct kref refcount;
> + struct grant_map *map;
> + } exp;
> + struct {
> + /* Granted references of the imported buffer. */
> + grant_ref_t *refs;
> + /* Scatter-gather table of the imported buffer. */
> + struct sg_table *sgt;
> + /* dma-buf attachment of the imported buffer. */
> + struct dma_buf_attachment *attach;
> + } imp;
> + } u;
> +
> + /* Number of pages this buffer has. */
> + int nr_pages;
> + /* Pages of this buffer. */
> + struct page **pages;
> +};
> +
> +struct hyper_dmabuf_wait_obj {
> + struct list_head next;
> + struct hyper_dmabuf *hyper_dmabuf;
> + struct completion completion;
> +};
> +
> +struct hyper_dambuf_attachment {
minor typo: dam->dma (same thing in other places as well.)

> + struct sg_table *sgt;
> + enum dma_data_direction dir;
>  };
>  
>  static int unmap_grant_pages(struct grant_map *map, int offset, int pages);
>  
> +static struct miscdevice gntdev_miscdev;
> +
>  /* -- */
>  
>  static void g

Re: [PATCH 1/8] xen/grant-table: Make set/clear page private code shared

2018-05-30 Thread Dongwon Kim
On Fri, May 25, 2018 at 06:33:24PM +0300, Oleksandr Andrushchenko wrote:
> From: Oleksandr Andrushchenko 
> 
> Make set/clear page private code shared and accessible to
> other kernel modules which can re-use these instead of open-coding.
> 
> Signed-off-by: Oleksandr Andrushchenko 
> ---
>  drivers/xen/grant-table.c | 54 +--
>  include/xen/grant_table.h |  3 +++
>  2 files changed, 38 insertions(+), 19 deletions(-)
> 
> diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
> index 27be107d6480..d7488226e1f2 100644
> --- a/drivers/xen/grant-table.c
> +++ b/drivers/xen/grant-table.c
> @@ -769,29 +769,18 @@ void gnttab_free_auto_xlat_frames(void)
>  }
>  EXPORT_SYMBOL_GPL(gnttab_free_auto_xlat_frames);
>  
> -/**
> - * gnttab_alloc_pages - alloc pages suitable for grant mapping into
> - * @nr_pages: number of pages to alloc
> - * @pages: returns the pages
> - */
> -int gnttab_alloc_pages(int nr_pages, struct page **pages)
> +int gnttab_pages_set_private(int nr_pages, struct page **pages)
>  {
>   int i;
> - int ret;
> -
> - ret = alloc_xenballooned_pages(nr_pages, pages);
> - if (ret < 0)
> - return ret;
>  
>   for (i = 0; i < nr_pages; i++) {
>  #if BITS_PER_LONG < 64
>   struct xen_page_foreign *foreign;
>  
>   foreign = kzalloc(sizeof(*foreign), GFP_KERNEL);
> - if (!foreign) {
> - gnttab_free_pages(nr_pages, pages);
> + if (!foreign)

Don't we have to free previously allocated "foreign"(s) if it fails in the 
middle
(e.g. 0 < i && i < nr_pages - 1) before returning?

>   return -ENOMEM;
> - }
> +
>   set_page_private(pages[i], (unsigned long)foreign);
>  #endif
>   SetPagePrivate(pages[i]);
> @@ -799,14 +788,30 @@ int gnttab_alloc_pages(int nr_pages, struct page 
> **pages)
>  
>   return 0;
>  }
> -EXPORT_SYMBOL(gnttab_alloc_pages);
> +EXPORT_SYMBOL(gnttab_pages_set_private);
>  
>  /**
> - * gnttab_free_pages - free pages allocated by gnttab_alloc_pages()
> - * @nr_pages; number of pages to free
> - * @pages: the pages
> + * gnttab_alloc_pages - alloc pages suitable for grant mapping into
> + * @nr_pages: number of pages to alloc
> + * @pages: returns the pages
>   */
> -void gnttab_free_pages(int nr_pages, struct page **pages)
> +int gnttab_alloc_pages(int nr_pages, struct page **pages)
> +{
> + int ret;
> +
> + ret = alloc_xenballooned_pages(nr_pages, pages);
> + if (ret < 0)
> + return ret;
> +
> + ret = gnttab_pages_set_private(nr_pages, pages);
> + if (ret < 0)
> + gnttab_free_pages(nr_pages, pages);
> +
> + return ret;
> +}
> +EXPORT_SYMBOL(gnttab_alloc_pages);
> +
> +void gnttab_pages_clear_private(int nr_pages, struct page **pages)
>  {
>   int i;
>  
> @@ -818,6 +823,17 @@ void gnttab_free_pages(int nr_pages, struct page **pages)
>   ClearPagePrivate(pages[i]);
>   }
>   }
> +}
> +EXPORT_SYMBOL(gnttab_pages_clear_private);
> +
> +/**
> + * gnttab_free_pages - free pages allocated by gnttab_alloc_pages()
> + * @nr_pages; number of pages to free
> + * @pages: the pages
> + */
> +void gnttab_free_pages(int nr_pages, struct page **pages)
> +{
> + gnttab_pages_clear_private(nr_pages, pages);
>   free_xenballooned_pages(nr_pages, pages);
>  }
>  EXPORT_SYMBOL(gnttab_free_pages);
> diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h
> index 2e37741f6b8d..de03f2542bb7 100644
> --- a/include/xen/grant_table.h
> +++ b/include/xen/grant_table.h
> @@ -198,6 +198,9 @@ void gnttab_free_auto_xlat_frames(void);
>  int gnttab_alloc_pages(int nr_pages, struct page **pages);
>  void gnttab_free_pages(int nr_pages, struct page **pages);
>  
> +int gnttab_pages_set_private(int nr_pages, struct page **pages);
> +void gnttab_pages_clear_private(int nr_pages, struct page **pages);
> +
>  int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
>   struct gnttab_map_grant_ref *kmap_ops,
>   struct page **pages, unsigned int count);
> -- 
> 2.17.0
> 
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH 6/8] xen/gntdev: Implement dma-buf export functionality

2018-05-30 Thread Dongwon Kim
On Fri, May 25, 2018 at 06:33:29PM +0300, Oleksandr Andrushchenko wrote:
> From: Oleksandr Andrushchenko 
> 
> 1. Create a dma-buf from grant references provided by the foreign
>domain. By default dma-buf is backed by system memory pages, but
>by providing GNTDEV_DMA_FLAG_XXX flags it can also be created
>as a DMA write-combine/coherent buffer, e.g. allocated with
>corresponding dma_alloc_xxx API.
>Export the resulting buffer as a new dma-buf.
> 
> 2. Implement waiting for the dma-buf to be released: block until the
>dma-buf with the file descriptor provided is released.
>If within the time-out provided the buffer is not released then
>-ETIMEDOUT error is returned. If the buffer with the file descriptor
>does not exist or has already been released, then -ENOENT is returned.
>For valid file descriptors this must not be treated as error.
> 
> Signed-off-by: Oleksandr Andrushchenko 
> ---
>  drivers/xen/gntdev.c | 478 ++-
>  1 file changed, 476 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
> index 9e450622af1a..52abc6cd5846 100644
> --- a/drivers/xen/gntdev.c
> +++ b/drivers/xen/gntdev.c
> @@ -4,6 +4,8 @@
>   * Device for accessing (in user-space) pages that have been granted by other
>   * domains.
>   *
> + * DMA buffer implementation is based on drivers/gpu/drm/drm_prime.c.
> + *
>   * Copyright (c) 2006-2007, D G Murray.
>   *   (c) 2009 Gerd Hoffmann 
>   *   (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc.
> @@ -41,6 +43,9 @@
>  #ifdef CONFIG_XEN_GRANT_DMA_ALLOC
>  #include 
>  #endif
> +#ifdef CONFIG_XEN_GNTDEV_DMABUF
> +#include 
> +#endif
>  
>  #include 
>  #include 
> @@ -81,6 +86,17 @@ struct gntdev_priv {
>   /* Device for which DMA memory is allocated. */
>   struct device *dma_dev;
>  #endif
> +
> +#ifdef CONFIG_XEN_GNTDEV_DMABUF
> + /* Private data of the hyper DMA buffers. */
> +
> + /* List of exported DMA buffers. */
> + struct list_head dmabuf_exp_list;
> + /* List of wait objects. */
> + struct list_head dmabuf_exp_wait_list;
> + /* This is the lock which protects dma_buf_xxx lists. */
> + struct mutex dmabuf_lock;
> +#endif
>  };
>  
>  struct unmap_notify {
> @@ -125,12 +141,38 @@ struct grant_map {
>  
>  #ifdef CONFIG_XEN_GNTDEV_DMABUF
>  struct xen_dmabuf {
> + struct gntdev_priv *priv;
> + struct dma_buf *dmabuf;
> + struct list_head next;
> + int fd;
> +
>   union {
> + struct {
> + /* Exported buffers are reference counted. */
> + struct kref refcount;
> + struct grant_map *map;
> + } exp;
>   struct {
>   /* Granted references of the imported buffer. */
>   grant_ref_t *refs;
>   } imp;
>   } u;
> +
> + /* Number of pages this buffer has. */
> + int nr_pages;
> + /* Pages of this buffer. */
> + struct page **pages;
> +};
> +
> +struct xen_dmabuf_wait_obj {
> + struct list_head next;
> + struct xen_dmabuf *xen_dmabuf;
> + struct completion completion;
> +};
> +
> +struct xen_dmabuf_attachment {
> + struct sg_table *sgt;
> + enum dma_data_direction dir;
>  };
>  #endif
>  
> @@ -320,6 +362,16 @@ static void gntdev_put_map(struct gntdev_priv *priv, 
> struct grant_map *map)
>   gntdev_free_map(map);
>  }
>  
> +#ifdef CONFIG_XEN_GNTDEV_DMABUF
> +static void gntdev_remove_map(struct gntdev_priv *priv, struct grant_map 
> *map)
> +{
> + mutex_lock(&priv->lock);
> + list_del(&map->next);
> + gntdev_put_map(NULL /* already removed */, map);
> + mutex_unlock(&priv->lock);
> +}
> +#endif
> +
>  /* -- */
>  
>  static int find_grant_ptes(pte_t *pte, pgtable_t token,
> @@ -628,6 +680,12 @@ static int gntdev_open(struct inode *inode, struct file 
> *flip)
>   INIT_LIST_HEAD(&priv->freeable_maps);
>   mutex_init(&priv->lock);
>  
> +#ifdef CONFIG_XEN_GNTDEV_DMABUF
> + mutex_init(&priv->dmabuf_lock);
> + INIT_LIST_HEAD(&priv->dmabuf_exp_list);
> + INIT_LIST_HEAD(&priv->dmabuf_exp_wait_list);
> +#endif
> +
>   if (use_ptemod) {
>   priv->mm = get_task_mm(current);
>   if (!priv->mm) {
> @@ -1053,17 +,433 @@ static long gntdev_ioctl_grant_copy(struct 
> gntdev_priv *priv, void __user *u)
>  /* DMA buffer export support. */
>  /* -- */
>  
> +/* -- */
> +/* Implementation of wait for exported DMA buffer to be released. */
> +/* -- */
> +
> +static void dmabuf_exp_release(struct kref *kref);
> +
> +static struct xen_dmabuf_wait_obj *
> +dmabuf_exp_wai

Re: [Xen-devel] [PATCH v2 9/9] xen/gntdev: Expose gntdev's dma-buf API for in-kernel use

2018-06-06 Thread Dongwon Kim
On Wed, Jun 06, 2018 at 05:51:38PM -0400, Boris Ostrovsky wrote:
> On 06/06/2018 08:46 AM, Oleksandr Andrushchenko wrote:
> > On 06/05/2018 01:36 AM, Boris Ostrovsky wrote:
> >> On 06/01/2018 07:41 AM, Oleksandr Andrushchenko wrote:
> >>> From: Oleksandr Andrushchenko 
> >>>
> >>> Allow creating grant device context for use by kernel modules which
> >>> require functionality, provided by gntdev. Export symbols for dma-buf
> >>> API provided by the module.
> >> Can you give an example of who'd be using these interfaces?
> > There is no use-case at the moment I can think of, but hyper dma-buf
> > [1], [2]
> > I let Intel folks (CCed) to defend this patch as it was done primarily
> > for them
> > and I don't use it in any of my use-cases. So, from this POV it can be
> > dropped,
> > at least from this series.
> 
> 
> Yes, let's drop this until someone actually needs it.
> 
> -boris

I agree. We are working on re-architecturing hyper_dmabuf. We would use zcopy
apis however, not sure if we are going to do it from kernel or from userspace.
So please do not expose those for now.

> 
> 
> >>
> >> -boris
> >>
> > [1] https://patchwork.freedesktop.org/series/38207/
> > [2] https://patchwork.freedesktop.org/patch/204447/
> >
> > ___
> > Xen-devel mailing list
> > xen-de...@lists.xenproject.org
> > https://lists.xenproject.org/mailman/listinfo/xen-devel
> 
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [Xen-devel] [PATCH 0/1] drm/xen-zcopy: Add Xen zero-copy helper DRM driver

2018-04-24 Thread Dongwon Kim
Had a meeting with Daniel and talked about bringing out generic
part of hyper-dmabuf to the userspace, which means we most likely
reuse IOCTLs defined in xen-zcopy for our use-case if we follow
his suggestion.

So assuming we use these IOCTLs as they are,
Several things I would like you to double-check..

1. returning gref as is to the user space is still unsafe because
it is a constant, easy to guess and any process that hijacks it can easily
exploit the buffer. So I am wondering if it's possible to keep dmabuf-to
-gref or gref-to-dmabuf in kernel space and add other layers on top
of those in actual IOCTLs to add some safety.. We introduced flink like
hyper_dmabuf_id including random number but many says even that is still
not safe.

2. maybe we could take hypervisor-independent process (e.g. SGT<->page)
out of xen-zcopy and put those in a new helper library. 

3. please consider the case where original DMA-BUF's first offset
and last length are not 0 and PAGE_SIZE respectively. I assume current
xen-zcopy only supports page-aligned buffer with PAGE_SIZE x n big.

thanks,
DW

On Tue, Apr 24, 2018 at 02:59:39PM +0300, Oleksandr Andrushchenko wrote:
> On 04/24/2018 02:54 PM, Daniel Vetter wrote:
> >On Mon, Apr 23, 2018 at 03:10:35PM +0300, Oleksandr Andrushchenko wrote:
> >>On 04/23/2018 02:52 PM, Wei Liu wrote:
> >>>On Fri, Apr 20, 2018 at 02:25:20PM +0300, Oleksandr Andrushchenko wrote:
> >>  the gntdev.
> >>
> >>I think this is generic enough that it could be implemented by a
> >>device not tied to Xen. AFAICT the hyper_dma guys also wanted
> >>something similar to this.
> >You can't just wrap random userspace memory into a dma-buf. We've just 
> >had
> >this discussion with kvm/qemu folks, who proposed just that, and after a
> >bit of discussion they'll now try to have a driver which just wraps a
> >memfd into a dma-buf.
> So, we have to decide either we introduce a new driver
> (say, under drivers/xen/xen-dma-buf) or extend the existing
> gntdev/balloon to support dma-buf use-cases.
> 
> Can anybody from Xen community express their preference here?
> 
> >>>Oleksandr talked to me on IRC about this, he said a few IOCTLs need to
> >>>be added to either existing drivers or a new driver.
> >>>
> >>>I went through this thread twice and skimmed through the relevant
> >>>documents, but I couldn't see any obvious pros and cons for either
> >>>approach. So I don't really have an opinion on this.
> >>>
> >>>But, assuming if implemented in existing drivers, those IOCTLs need to
> >>>be added to different drivers, which means userspace program needs to
> >>>write more code and get more handles, it would be slightly better to
> >>>implement a new driver from that perspective.
> >>If gntdev/balloon extension is still considered:
> >>
> >>All the IOCTLs will be in gntdev driver (in current xen-zcopy terminology):
> I was lazy to change dumb to dma-buf, so put this notice ;)
> >>  - DRM_ICOTL_XEN_ZCOPY_DUMB_FROM_REFS
> >>  - DRM_IOCTL_XEN_ZCOPY_DUMB_TO_REFS
> >>  - DRM_IOCTL_XEN_ZCOPY_DUMB_WAIT_FREE
> >s/DUMB/DMA_BUF/ please. This is generic dma-buf, it has nothing to do with
> >the dumb scanout buffer support in the drm/gfx subsystem. This here can be
> >used for any zcopy sharing among guests (as long as your endpoints
> >understands dma-buf, which most relevant drivers do).
> Of course, please see above
> >-Daniel
> >
> >>Balloon driver extension, which is needed for contiguous/DMA
> >>buffers, will be to provide new *kernel API*, no UAPI is needed.
> >>
> >>>Wei.
> >>Thank you,
> >>Oleksandr
> >>___
> >>dri-devel mailing list
> >>dri-devel@lists.freedesktop.org
> >>https://lists.freedesktop.org/mailman/listinfo/dri-devel
> 
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [Xen-devel] [PATCH 0/1] drm/xen-zcopy: Add Xen zero-copy helper DRM driver

2018-04-25 Thread Dongwon Kim
On Wed, Apr 25, 2018 at 08:34:55AM +0200, Daniel Vetter wrote:
> On Wed, Apr 25, 2018 at 09:07:07AM +0300, Oleksandr Andrushchenko wrote:
> > On 04/24/2018 11:35 PM, Dongwon Kim wrote:
> > > Had a meeting with Daniel and talked about bringing out generic
> > > part of hyper-dmabuf to the userspace, which means we most likely
> > > reuse IOCTLs defined in xen-zcopy for our use-case if we follow
> > > his suggestion.
> > I will still have kernel side API, so backends/frontends implemented
> > in the kernel can access that functionality as well.
> > > 
> > > So assuming we use these IOCTLs as they are,
> > > Several things I would like you to double-check..
> > > 
> > > 1. returning gref as is to the user space is still unsafe because
> > > it is a constant, easy to guess and any process that hijacks it can easily
> > > exploit the buffer. So I am wondering if it's possible to keep dmabuf-to
> > > -gref or gref-to-dmabuf in kernel space and add other layers on top
> > > of those in actual IOCTLs to add some safety.. We introduced flink like
> > > hyper_dmabuf_id including random number but many says even that is still
> > > not safe.
> > Yes, it is generally unsafe. But even if we have implemented
> > the approach you have in hyper-dmabuf or similar, what stops
> > malicious software from doing the same with the existing gntdev UAPI?
> > No need to brute force new UAPI if there is a simpler one.
> > That being said, I'll put security aside at the first stage,
> > but of course we can start investigating ways to improve
> > (I assume you already have use-cases where security issues must
> > be considered, so, probably you can tell more on what was investigated
> > so far).

Yeah, although we think we lowered the chance of guessing the right id
by adding random number to it, the security hole is still there as far
as we use a constant id across VMs. We understood this from the beginning
but couldn't find a better way. So what we proposed is to make sure our
customer understand this and prepare very secure way to handle this id
in the userspace (mattrope however recently proposed a "hyper-pipe" which
FD-type id can be converted and exchanged safely through. So we are looking
into this now.)

And another approach we have proposed is to use event-polling, that lets
the privileged userapp in importing guest to know about a new exported
DMABUF so that it can retrieve it from the queue then redistribute to
other applications. This method is not very flexible however, is one way
to hide ID from userspace completely.

Anyway, yes, we can continue to investigate the possible way to make it
more secure. 

> 
> Maybe a bit more context here:
> 
> So in graphics we have this old flink approach for buffer sharing with
> processes, and it's unsafe because way too easy to guess the buffer
> handles. And anyone with access to the graphics driver can then import
> that buffer object. We switched to file descriptor passing to make sure
> only the intended recipient can import a buffer.
> 
> So at the vm->vm level it sounds like grefs are safe, because they're only
> for a specific other guest (or sets of guests, not sure about). That means
> security is only within the OS. For that you need to make sure that
> unpriviledge userspace simply can't ever access a gref. If that doesn't
> work out, then I guess we should improve the xen gref stuff to have a more
> secure cookie.
> 
> > > 2. maybe we could take hypervisor-independent process (e.g. SGT<->page)
> > > out of xen-zcopy and put those in a new helper library.
> > I believe this can be done, but at the first stage I would go without
> > that helper library, so it is clearly seen what can be moved to it later
> > (I know that you want to run ACRN as well, but can I run it on ARM? ;)
> 
> There's already helpers for walking sgtables and adding pages/enumerating
> pages. I don't think we need more.

ok, where would that helpers be located? If we consider we will use these
with other hypervisor drivers, maybe it's better to place those in some
common area?

> 
> > > 3. please consider the case where original DMA-BUF's first offset
> > > and last length are not 0 and PAGE_SIZE respectively. I assume current
> > > xen-zcopy only supports page-aligned buffer with PAGE_SIZE x n big.
> > Hm, what is the use-case for that?

Just in general use-case.. I was just considering the case (might be corner
case..) where sg->offset != 0 or sg->length != PAGE_SIZE. Hyper dmabuf sends
this information (first offset and last length) together with references for
pages. So I was wonderi

Re: [Xen-devel] [PATCH 0/1] drm/xen-zcopy: Add Xen zero-copy helper DRM driver

2018-04-30 Thread Dongwon Kim
On Wed, Apr 25, 2018 at 08:12:08AM +0200, Juergen Gross wrote:
> On 24/04/18 22:35, Dongwon Kim wrote:
> > Had a meeting with Daniel and talked about bringing out generic
> > part of hyper-dmabuf to the userspace, which means we most likely
> > reuse IOCTLs defined in xen-zcopy for our use-case if we follow
> > his suggestion.
> > 
> > So assuming we use these IOCTLs as they are,
> > Several things I would like you to double-check..
> > 
> > 1. returning gref as is to the user space is still unsafe because
> > it is a constant, easy to guess and any process that hijacks it can easily
> > exploit the buffer. So I am wondering if it's possible to keep dmabuf-to
> > -gref or gref-to-dmabuf in kernel space and add other layers on top
> > of those in actual IOCTLs to add some safety.. We introduced flink like
> > hyper_dmabuf_id including random number but many says even that is still
> > not safe.
> 
> grefs are usable by root only. When you have root access in dom0 you can
> do evil things to all VMs even without using grants. That is in no way
> different to root being able to control all other processes on the
> system.

I honestly didn't know about this. I believed kernel code simply can map those
pages. However, out of curiosity, how is non-root usage of gref prevented in
current design? Is there privilege check in grant table driver or hypercalls
needed by this page mapping is only enabled for root in hypervisor level?

And this is pretty critical information for any use-case using grant-table.
Is there any place(doc/website) this is specified/explained?

Thanks,
DW


> 
> 
> Juergen
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[RFC PATCH v2 0/1] fence per plane state

2023-10-02 Thread Dongwon Kim
The patch "drm/virtio: new fence for every plane update" is to prevent a fence
synchronization problem when multiple planes are referencing a single large FB
(i.e. Xorg with multi displays configured as one extended surface.).

One example of a possible problematic flow is

1.virtio_gpu_plane_prepare_fb(plane_A) -> A fence for the FB is created (fence 
A)
2.virtio_gpu_resource_flush(plane_A) -> Fence A is emitted. Then it waits for 
the fence A
  to be signaled.
3.virtio_gpu_plane_prepare_fb(plane_B) -> A new fence for the FB is created 
(fence B) and
  FB->fence is replaced with fence B.
4.virtio_gpu_resource_flush(plane_A) -> Fence A is signaled but dma_fence_put 
is done for
  fence B because FB->fence = fence B already.
5.fence A won't be signaled or released for a long time, which leads to guest 
display and
  dmesg shows fence timeout errors.

The root-cause for problems is that the fence for the FB can be replaced with 
the new one
anytime another plain with the same FB is updated. So the proposed fix here is 
to allocate
a new fence per the plane state instead of per FB as described in the patch.

Tested system:

Host: QEMU + KVM on Linux running on Intel 12th Gen.
Guest: Ubuntu VM running Xorg w/ 2~3 virtual displays using blob scanouts

Dongwon Kim (1):
  drm/virtio: new fence for every plane update

 drivers/gpu/drm/virtio/virtgpu_drv.h   |  7 +++
 drivers/gpu/drm/virtio/virtgpu_plane.c | 66 +-
 2 files changed, 51 insertions(+), 22 deletions(-)

-- 
2.20.1



[RFC PATCH v2 1/1] drm/virtio: new fence for every plane update

2023-10-02 Thread Dongwon Kim
Having a fence linked to a virtio_gpu_framebuffer in the plane update sequence
would cause conflict when several planes referencing the same framebuffer
(e.g. Xorg screen covering multi-displays configured for an extended mode)
and those planes are updated concurrently. So it is needed to allocate a
fence for every plane state instead of the framebuffer.

The plane state for virtio-gpu, "struct virtio_gpu_plane_state" is added for
this. This structure represents drm_plane_state and it contains the reference
to virtio_gpu_fence, which was previously in "struct virtio_gpu_framebuffer".

"virtio_gpu_plane_duplicate_state" is added as well to create a
virtio_gpu_plane_state on top of duplicated drm plane state.

Several drm helpers were slightly modified accordingly to use the fence in new
plane state structure. virtio_gpu_plane_cleanup_fb was completely removed as
dma_fence_put shouldn't be called here as it can mess up with the ref count
of the fence. The fence should be put after the fence is signaled in
virtio_gpu_resource_flush then released in virtio_gpu_array_add_fence while
the next virtio message is being queued.

Also, the condition for adding fence, (plane->state->fb != new_state->fb) was
removed since we now allocate a new fence for the new plane state even if both
old and new planes are pointing to the same framebuffer.

v2: removed virtio_gpu_plane_duplicate_state as the existing helper,
drm_atomic_helper_plane_destroy_state does the same.

Cc: Dmitry Osipenko 
Cc: Gerd Hoffmann 
Cc: Vivek Kasireddy 
Signed-off-by: Dongwon Kim 
---
 drivers/gpu/drm/virtio/virtgpu_drv.h   |  7 +++
 drivers/gpu/drm/virtio/virtgpu_plane.c | 66 +-
 2 files changed, 51 insertions(+), 22 deletions(-)

diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h 
b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 8513b671f871..2568ad0c2d44 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -191,6 +191,13 @@ struct virtio_gpu_framebuffer {
 #define to_virtio_gpu_framebuffer(x) \
container_of(x, struct virtio_gpu_framebuffer, base)
 
+struct virtio_gpu_plane_state {
+   struct drm_plane_state base;
+   struct virtio_gpu_fence *fence;
+};
+#define to_virtio_gpu_plane_state(x) \
+   container_of(x, struct virtio_gpu_plane_state, base)
+
 struct virtio_gpu_queue {
struct virtqueue *vq;
spinlock_t qlock;
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c 
b/drivers/gpu/drm/virtio/virtgpu_plane.c
index a2e045f3a000..cd962898023e 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -66,11 +66,28 @@ uint32_t virtio_gpu_translate_format(uint32_t drm_fourcc)
return format;
 }
 
+static struct
+drm_plane_state *virtio_gpu_plane_duplicate_state(struct drm_plane *plane)
+{
+   struct virtio_gpu_plane_state *new;
+
+   if (WARN_ON(!plane->state))
+   return NULL;
+
+   new = kzalloc(sizeof(*new), GFP_KERNEL);
+   if (!new)
+   return NULL;
+
+   __drm_atomic_helper_plane_duplicate_state(plane, &new->base);
+
+   return &new->base;
+}
+
 static const struct drm_plane_funcs virtio_gpu_plane_funcs = {
.update_plane   = drm_atomic_helper_update_plane,
.disable_plane  = drm_atomic_helper_disable_plane,
.reset  = drm_atomic_helper_plane_reset,
-   .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+   .atomic_duplicate_state = virtio_gpu_plane_duplicate_state,
.atomic_destroy_state   = drm_atomic_helper_plane_destroy_state,
 };
 
@@ -128,11 +145,13 @@ static void virtio_gpu_resource_flush(struct drm_plane 
*plane,
struct drm_device *dev = plane->dev;
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_framebuffer *vgfb;
+   struct virtio_gpu_plane_state *vgplane_st;
struct virtio_gpu_object *bo;
 
vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
+   vgplane_st = to_virtio_gpu_plane_state(plane->state);
bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
-   if (vgfb->fence) {
+   if (vgplane_st->fence) {
struct virtio_gpu_object_array *objs;
 
objs = virtio_gpu_array_alloc(1);
@@ -141,13 +160,12 @@ static void virtio_gpu_resource_flush(struct drm_plane 
*plane,
virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]);
virtio_gpu_array_lock_resv(objs);
virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle, x, y,
- width, height, objs, vgfb->fence);
+ width, height, objs,
+ vgplane_st->fence);
virtio_gpu_notify(vgdev);
-
-   dma_fence_wait_

Re: [RfC PATCH] Add udmabuf misc device

2018-04-06 Thread Dongwon Kim
On Fri, Apr 06, 2018 at 03:36:03PM +0300, Oleksandr Andrushchenko wrote:
> On 04/06/2018 02:57 PM, Gerd Hoffmann wrote:
> >   Hi,
> >
> >>>I fail to see any common ground for xen-zcopy and udmabuf ...
> >>Does the above mean you can assume that xen-zcopy and udmabuf
> >>can co-exist as two different solutions?
> >Well, udmabuf route isn't fully clear yet, but yes.
> >
> >See also gvt (intel vgpu), where the hypervisor interface is abstracted
> >away into a separate kernel modules even though most of the actual vgpu
> >emulation code is common.
> Thank you for your input, I'm just trying to figure out
> which of the three z-copy solutions intersect and how much
> >>And what about hyper-dmabuf?

xen z-copy solution is pretty similar fundamentally to hyper_dmabuf
in terms of these core sharing feature:

1. the sharing process - import prime/dmabuf from the producer -> extract
underlying pages and get those shared -> return references for shared pages

2. the page sharing mechanism - it uses Xen-grant-table.

And to give you a quick summary of differences as far as I understand
between two implementations (please correct me if I am wrong, Oleksandr.)

1. xen-zcopy is DRM specific - can import only DRM prime buffer
while hyper_dmabuf can export any dmabuf regardless of originator

2. xen-zcopy doesn't seem to have dma-buf synchronization between two VMs
while (as danvet called it as remote dmabuf api sharing) hyper_dmabuf sends
out synchronization message to the exporting VM for synchronization.

3. 1-level references - when using grant-table for sharing pages, there will
be same # of refs (each 8 byte) as # of shared pages, which is passed to
the userspace to be shared with importing VM in case of xen-zcopy. Compared
to this, hyper_dmabuf does multiple level addressing to generate only one
reference id that represents all shared pages.

4. inter VM messaging (hype_dmabuf only) - hyper_dmabuf has inter-vm msg
communication defined for dmabuf synchronization and private data (meta
info that Matt Roper mentioned) exchange.

5. driver-to-driver notification (hyper_dmabuf only) - importing VM gets
notified when newdmabuf is exported from other VM - uevent can be optionally
generated when this happens.

6. structure - hyper_dmabuf is targetting to provide a generic solution for
inter-domain dmabuf sharing for most hypervisors, which is why it has two
layers as mattrope mentioned, front-end that contains standard API and backend
that is specific to hypervisor.

> >No idea, didn't look at it in detail.
> >
> >Looks pretty complex from a distant view.  Maybe because it tries to
> >build a communication framework using dma-bufs instead of a simple
> >dma-buf passing mechanism.

we started with simple dma-buf sharing but realized there are many
things we need to consider in real use-case, so we added communication
, notification and dma-buf synchronization then re-structured it to 
front-end and back-end (this made things more compicated..) since Xen
was not our only target. Also, we thought passing the reference for the
buffer (hyper_dmabuf_id) is not secure so added uvent mechanism later.

> Yes, I am looking at it now, trying to figure out the full story
> and its implementation. BTW, Intel guys were about to share some
> test application for hyper-dmabuf, maybe I have missed one.
> It could probably better explain the use-cases and the complexity
> they have in hyper-dmabuf.

One example is actually in github. If you want take a look at it, please
visit:

https://github.com/downor/linux_hyper_dmabuf_test/tree/xen/simple_export

> >
> >Like xen-zcopy it seems to depend on the idea that the hypervisor
> >manages all memory it is easy for guests to share pages with the help of
> >the hypervisor.
> So, for xen-zcopy we were not trying to make it generic,
> it just solves display (dumb) zero-copying use-cases for Xen.
> We implemented it as a DRM helper driver because we can't see any
> other use-cases as of now.
> For example, we also have Xen para-virtualized sound driver, but
> its buffer memory usage is not comparable to what display wants
> and it works somewhat differently (e.g. there is no "frame done"
> event, so one can't tell when the sound buffer can be "flipped").
> At the same time, we do not use virtio-gpu, so this could probably
> be one more candidate for shared dma-bufs some day.
> >   Which simply isn't the case on kvm.
> >
> >hyper-dmabuf and xen-zcopy could maybe share code, or hyper-dmabuf build
> >on top of xen-zcopy.
> Hm, I can imagine that: xen-zcopy could be a library code for hyper-dmabuf
> in terms of implementing all that page sharing fun in multiple directions,
> e.g. Host->Guest, Guest->Host, Guest<->Guest.
> But I'll let Matt and Dongwon to comment on that.

I think we can definitely collaborate. Especially, maybe we are using some
outdated sharing mechanism/grant-table mechanism in our Xen backend (thanks
for bringing that up Oleksandr). However, the question is once we collaborate
somehow

Re: [RfC PATCH] Add udmabuf misc device

2018-04-10 Thread Dongwon Kim
On Tue, Apr 10, 2018 at 09:37:53AM +0300, Oleksandr Andrushchenko wrote:
> On 04/06/2018 09:57 PM, Dongwon Kim wrote:
> >On Fri, Apr 06, 2018 at 03:36:03PM +0300, Oleksandr Andrushchenko wrote:
> >>On 04/06/2018 02:57 PM, Gerd Hoffmann wrote:
> >>>   Hi,
> >>>
> >>>>>I fail to see any common ground for xen-zcopy and udmabuf ...
> >>>>Does the above mean you can assume that xen-zcopy and udmabuf
> >>>>can co-exist as two different solutions?
> >>>Well, udmabuf route isn't fully clear yet, but yes.
> >>>
> >>>See also gvt (intel vgpu), where the hypervisor interface is abstracted
> >>>away into a separate kernel modules even though most of the actual vgpu
> >>>emulation code is common.
> >>Thank you for your input, I'm just trying to figure out
> >>which of the three z-copy solutions intersect and how much
> >>>>And what about hyper-dmabuf?
> >xen z-copy solution is pretty similar fundamentally to hyper_dmabuf
> >in terms of these core sharing feature:
> >
> >1. the sharing process - import prime/dmabuf from the producer -> extract
> >underlying pages and get those shared -> return references for shared pages

Another thing is danvet was kind of against to the idea of importing existing
dmabuf/prime buffer and forward it to the other domain due to synchronization
issues. He proposed to make hyper_dmabuf only work as an exporter so that it
can have a full control over the buffer. I think we need to talk about this
further as well.

danvet, can you comment on this topic?

> >
> >2. the page sharing mechanism - it uses Xen-grant-table.
> >
> >And to give you a quick summary of differences as far as I understand
> >between two implementations (please correct me if I am wrong, Oleksandr.)
> >
> >1. xen-zcopy is DRM specific - can import only DRM prime buffer
> >while hyper_dmabuf can export any dmabuf regardless of originator
> Well, this is true. And at the same time this is just a matter
> of extending the API: xen-zcopy is a helper driver designed for
> xen-front/back use-case, so this is why it only has DRM PRIME API
> >
> >2. xen-zcopy doesn't seem to have dma-buf synchronization between two VMs
> >while (as danvet called it as remote dmabuf api sharing) hyper_dmabuf sends
> >out synchronization message to the exporting VM for synchronization.
> This is true. Again, this is because of the use-cases it covers.
> But having synchronization for a generic solution seems to be a good idea.

Yeah, understood xen-zcopy works ok with your use case. But I am just curious
if it is ok not to have any inter-domain synchronization in this sharing model.
The buffer being shared is technically dma-buf and originator needs to be able
to keep track of it.

> >
> >3. 1-level references - when using grant-table for sharing pages, there will
> >be same # of refs (each 8 byte)
> To be precise, grant ref is 4 bytes
You are right. Thanks for correction.;)

> >as # of shared pages, which is passed to
> >the userspace to be shared with importing VM in case of xen-zcopy.
> The reason for that is that xen-zcopy is a helper driver, e.g.
> the grant references come from the display backend [1], which implements
> Xen display protocol [2]. So, effectively the backend extracts references
> from frontend's requests and passes those to xen-zcopy as an array
> of refs.
> >  Compared
> >to this, hyper_dmabuf does multiple level addressing to generate only one
> >reference id that represents all shared pages.
> In the protocol [2] only one reference to the gref directory is passed
> between VMs
> (and the gref directory is a single-linked list of shared pages containing
> all
> of the grefs of the buffer).

ok, good to know. I will look into its implementation in more details but is
this gref directory (chained grefs) something that can be used for any general
memory sharing use case or is it jsut for xen-display (in current code base)?

> 
> >
> >4. inter VM messaging (hype_dmabuf only) - hyper_dmabuf has inter-vm msg
> >communication defined for dmabuf synchronization and private data (meta
> >info that Matt Roper mentioned) exchange.
> This is true, xen-zcopy has no means for inter VM sync and meta-data,
> simply because it doesn't have any code for inter VM exchange in it,
> e.g. the inter VM protocol is handled by the backend [1].
> >
> >5. driver-to-driver notification (hyper_dmabuf only) - importing VM gets
> >notified when newdmabuf is exported from other VM - uevent can be optionally
> >generated when this happens.
> >
> >6. structure - hyper_dmabuf is target

buffer sharing across VMs - xen-zcopy and hyper_dmabuf discussion

2018-04-12 Thread Dongwon Kim
(changed subject and decoupling from udmabuf thread)

On Wed, Apr 11, 2018 at 08:59:32AM +0300, Oleksandr Andrushchenko wrote:
> On 04/10/2018 08:26 PM, Dongwon Kim wrote:
> >On Tue, Apr 10, 2018 at 09:37:53AM +0300, Oleksandr Andrushchenko wrote:
> >>On 04/06/2018 09:57 PM, Dongwon Kim wrote:
> >>>On Fri, Apr 06, 2018 at 03:36:03PM +0300, Oleksandr Andrushchenko wrote:
> >>>>On 04/06/2018 02:57 PM, Gerd Hoffmann wrote:
> >>>>>   Hi,
> >>>>>
> >>>>>>>I fail to see any common ground for xen-zcopy and udmabuf ...
> >>>>>>Does the above mean you can assume that xen-zcopy and udmabuf
> >>>>>>can co-exist as two different solutions?
> >>>>>Well, udmabuf route isn't fully clear yet, but yes.
> >>>>>
> >>>>>See also gvt (intel vgpu), where the hypervisor interface is abstracted
> >>>>>away into a separate kernel modules even though most of the actual vgpu
> >>>>>emulation code is common.
> >>>>Thank you for your input, I'm just trying to figure out
> >>>>which of the three z-copy solutions intersect and how much
> >>>>>>And what about hyper-dmabuf?
> >>>xen z-copy solution is pretty similar fundamentally to hyper_dmabuf
> >>>in terms of these core sharing feature:
> >>>
> >>>1. the sharing process - import prime/dmabuf from the producer -> extract
> >>>underlying pages and get those shared -> return references for shared pages
> >Another thing is danvet was kind of against to the idea of importing existing
> >dmabuf/prime buffer and forward it to the other domain due to synchronization
> >issues. He proposed to make hyper_dmabuf only work as an exporter so that it
> >can have a full control over the buffer. I think we need to talk about this
> >further as well.
> Yes, I saw this. But this limits the use-cases so much.

I agree. Our current approach is a lot more flexible. You can find very
similar feedback in my reply to those review messages. However, I also
understand Daniel's concern as well. I believe we need more dicussion
regarding this matter.

> For instance, running Android as a Guest (which uses ION to allocate
> buffers) means that finally HW composer will import dma-buf into
> the DRM driver. Then, in case of xen-front for example, it needs to be
> shared with the backend (Host side). Of course, we can change user-space
> to make xen-front allocate the buffers (make it exporter), but what we try
> to avoid is to change user-space which in normal world would have remain
> unchanged otherwise.
> So, I do think we have to support this use-case and just have to understand
> the complexity.
> 
> >
> >danvet, can you comment on this topic?
> >
> >>>2. the page sharing mechanism - it uses Xen-grant-table.
> >>>
> >>>And to give you a quick summary of differences as far as I understand
> >>>between two implementations (please correct me if I am wrong, Oleksandr.)
> >>>
> >>>1. xen-zcopy is DRM specific - can import only DRM prime buffer
> >>>while hyper_dmabuf can export any dmabuf regardless of originator
> >>Well, this is true. And at the same time this is just a matter
> >>of extending the API: xen-zcopy is a helper driver designed for
> >>xen-front/back use-case, so this is why it only has DRM PRIME API
> >>>2. xen-zcopy doesn't seem to have dma-buf synchronization between two VMs
> >>>while (as danvet called it as remote dmabuf api sharing) hyper_dmabuf sends
> >>>out synchronization message to the exporting VM for synchronization.
> >>This is true. Again, this is because of the use-cases it covers.
> >>But having synchronization for a generic solution seems to be a good idea.
> >Yeah, understood xen-zcopy works ok with your use case. But I am just curious
> >if it is ok not to have any inter-domain synchronization in this sharing 
> >model.
> The synchronization is done with displif protocol [1]
> >The buffer being shared is technically dma-buf and originator needs to be 
> >able
> >to keep track of it.
> As I am working in DRM terms the tracking is done by the DRM core
> for me for free. (This might be one of the reasons Daniel sees DRM
> based implementation fit very good from code-reuse POV).

yeah but once you have a DRM object (whether it's dmabuf or not) on a remote
domain, it is totally new object and out of sync (correct me if I am wrong)
with original DRM prime, isn't it? How could these two different but based on
same pages be 

Re: [PATCH 0/1] drm/xen-zcopy: Add Xen zero-copy helper DRM driver

2018-04-16 Thread Dongwon Kim
Yeah, I definitely agree on the idea of expanding the use case to the 
general domain where dmabuf sharing is used. However, what you are
targetting with proposed changes is identical to the core design of
hyper_dmabuf.

On top of this basic functionalities, hyper_dmabuf has driver level
inter-domain communication, that is needed for dma-buf remote tracking
(no fence forwarding though), event triggering and event handling, extra
meta data exchange and hyper_dmabuf_id that represents grefs
(grefs are shared implicitly on driver level)

Also it is designed with frontend (common core framework) + backend
(hyper visor specific comm and memory sharing) structure for portability.
We just can't limit this feature to Xen because we want to use the same
uapis not only for Xen but also other applicable hypervisor, like ACORN.

So I am wondering we can start with this hyper_dmabuf then modify it for
your use-case if needed and polish and fix any glitches if we want to 
to use this for all general dma-buf usecases.

Also, I still have one unresolved question regarding the export/import flow
in both of hyper_dmabuf and xen-zcopy.

@danvet: Would this flow (guest1->import existing dmabuf->share underlying
pages->guest2->map shared pages->create/export dmabuf) be acceptable now?

Regards,
DW
 
On Mon, Apr 16, 2018 at 05:33:46PM +0300, Oleksandr Andrushchenko wrote:
> Hello, all!
> 
> After discussing xen-zcopy and hyper-dmabuf [1] approaches
> 
> it seems that xen-zcopy can be made not depend on DRM core any more
> 
> and be dma-buf centric (which it in fact is).
> 
> The DRM code was mostly there for dma-buf's FD import/export
> 
> with DRM PRIME UAPI and with DRM use-cases in mind, but it comes out that if
> 
> the proposed 2 IOCTLs (DRM_XEN_ZCOPY_DUMB_FROM_REFS and
> DRM_XEN_ZCOPY_DUMB_TO_REFS)
> 
> are extended to also provide a file descriptor of the corresponding dma-buf,
> then
> 
> PRIME stuff in the driver is not needed anymore.
> 
> That being said, xen-zcopy can safely be detached from DRM and moved from
> 
> drivers/gpu/drm/xen into drivers/xen/dma-buf-backend(?).
> 
> This driver then becomes a universal way to turn any shared buffer between
> Dom0/DomD
> 
> and DomU(s) into a dma-buf, e.g. one can create a dma-buf from any grant
> references
> 
> or represent a dma-buf as grant-references for export.
> 
> This way the driver can be used not only for DRM use-cases, but also for
> other
> 
> use-cases which may require zero copying between domains.
> 
> For example, the use-cases we are about to work in the nearest future will
> use
> 
> V4L, e.g. we plan to support cameras, codecs etc. and all these will benefit
> 
> from zero copying much. Potentially, even block/net devices may benefit,
> 
> but this needs some evaluation.
> 
> 
> I would love to hear comments for authors of the hyper-dmabuf
> 
> and Xen community, as well as DRI-Devel and other interested parties.
> 
> 
> Thank you,
> 
> Oleksandr
> 
> 
> On 03/29/2018 04:19 PM, Oleksandr Andrushchenko wrote:
> >From: Oleksandr Andrushchenko 
> >
> >Hello!
> >
> >When using Xen PV DRM frontend driver then on backend side one will need
> >to do copying of display buffers' contents (filled by the
> >frontend's user-space) into buffers allocated at the backend side.
> >Taking into account the size of display buffers and frames per seconds
> >it may result in unneeded huge data bus occupation and performance loss.
> >
> >This helper driver allows implementing zero-copying use-cases
> >when using Xen para-virtualized frontend display driver by
> >implementing a DRM/KMS helper driver running on backend's side.
> >It utilizes PRIME buffers API to share frontend's buffers with
> >physical device drivers on backend's side:
> >
> >  - a dumb buffer created on backend's side can be shared
> >with the Xen PV frontend driver, so it directly writes
> >into backend's domain memory (into the buffer exported from
> >DRM/KMS driver of a physical display device)
> >  - a dumb buffer allocated by the frontend can be imported
> >into physical device DRM/KMS driver, thus allowing to
> >achieve no copying as well
> >
> >For that reason number of IOCTLs are introduced:
> >  -  DRM_XEN_ZCOPY_DUMB_FROM_REFS
> > This will create a DRM dumb buffer from grant references provided
> > by the frontend
> >  - DRM_XEN_ZCOPY_DUMB_TO_REFS
> >This will grant references to a dumb/display buffer's memory provided
> >by the backend
> >  - DRM_XEN_ZCOPY_DUMB_WAIT_FREE
> >This will block until the dumb buffer with the wait handle provided
> >be freed
> >
> >With this helper driver I was able to drop CPU usage from 17% to 3%
> >on Renesas R-Car M3 board.
> >
> >This was tested with Renesas' Wayland-KMS and backend running as DRM master.
> >
> >Thank you,
> >Oleksandr
> >
> >Oleksandr Andrushchenko (1):
> >   drm/xen-zcopy: Add Xen zero-copy helper DRM driver
> >
> >  Documentation/gpu/drivers.rst   |   1 +
> >  Documentation/gpu/xen-zcopy.rst

Re: [PATCH 0/1] drm/xen-zcopy: Add Xen zero-copy helper DRM driver

2018-04-17 Thread Dongwon Kim
On Tue, Apr 17, 2018 at 09:59:28AM +0200, Daniel Vetter wrote:
> On Mon, Apr 16, 2018 at 12:29:05PM -0700, Dongwon Kim wrote:
> > Yeah, I definitely agree on the idea of expanding the use case to the 
> > general domain where dmabuf sharing is used. However, what you are
> > targetting with proposed changes is identical to the core design of
> > hyper_dmabuf.
> > 
> > On top of this basic functionalities, hyper_dmabuf has driver level
> > inter-domain communication, that is needed for dma-buf remote tracking
> > (no fence forwarding though), event triggering and event handling, extra
> > meta data exchange and hyper_dmabuf_id that represents grefs
> > (grefs are shared implicitly on driver level)
> 
> This really isn't a positive design aspect of hyperdmabuf imo. The core
> code in xen-zcopy (ignoring the ioctl side, which will be cleaned up) is
> very simple & clean.
> 
> If there's a clear need later on we can extend that. But for now xen-zcopy
> seems to cover the basic use-case needs, so gets the job done.
> 
> > Also it is designed with frontend (common core framework) + backend
> > (hyper visor specific comm and memory sharing) structure for portability.
> > We just can't limit this feature to Xen because we want to use the same
> > uapis not only for Xen but also other applicable hypervisor, like ACORN.
> 
> See the discussion around udmabuf and the needs for kvm. I think trying to
> make an ioctl/uapi that works for multiple hypervisors is misguided - it
> likely won't work.
> 
> On top of that the 2nd hypervisor you're aiming to support is ACRN. That's
> not even upstream yet, nor have I seen any patches proposing to land linux
> support for ACRN. Since it's not upstream, it doesn't really matter for
> upstream consideration. I'm doubting that ACRN will use the same grant
> references as xen, so the same uapi won't work on ACRN as on Xen anyway.

Yeah, ACRN doesn't have grant-table. Only Xen supports it. But that is why
hyper_dmabuf has been architectured with the concept of backend.
If you look at the structure of backend, you will find that
backend is just a set of standard function calls as shown here:

struct hyper_dmabuf_bknd_ops {
/* backend initialization routine (optional) */
int (*init)(void);

/* backend cleanup routine (optional) */
int (*cleanup)(void);

/* retreiving id of current virtual machine */
int (*get_vm_id)(void);

/* get pages shared via hypervisor-specific method */
int (*share_pages)(struct page **pages, int vm_id,
   int nents, void **refs_info);

/* make shared pages unshared via hypervisor specific method */
int (*unshare_pages)(void **refs_info, int nents);

/* map remotely shared pages on importer's side via
 * hypervisor-specific method
 */
struct page ** (*map_shared_pages)(unsigned long ref, int vm_id,
   int nents, void **refs_info);

/* unmap and free shared pages on importer's side via
 * hypervisor-specific method
 */
int (*unmap_shared_pages)(void **refs_info, int nents);

/* initialize communication environment */
int (*init_comm_env)(void);

void (*destroy_comm)(void);

/* upstream ch setup (receiving and responding) */
int (*init_rx_ch)(int vm_id);

/* downstream ch setup (transmitting and parsing responses) */
int (*init_tx_ch)(int vm_id);

int (*send_req)(int vm_id, struct hyper_dmabuf_req *req, int wait);
};

All of these can be mapped with any hypervisor specific implementation.
We designed backend implementation for Xen using grant-table, Xen event
and ring buffer communication. For ACRN, we have another backend using Virt-IO
for both memory sharing and communication.

We tried to define this structure of backend to make it general enough (or
it can be even modified or extended to support more cases.) so that it can
fit to other hypervisor cases. Only requirements/expectation on the hypervisor
are page-level memory sharing and inter-domain communication, which I think
are standard features of modern hypervisor.

And please review common UAPIs that hyper_dmabuf and xen-zcopy supports. They
are very general. One is getting FD (dmabuf) and get those shared. The other
is generating dmabuf from global handle (secure handle hiding gref behind it).
On top of this, hyper_dmabuf has "unshare" and "query" which are also useful
for any cases.

So I don't know why we wouldn't want to try to make these standard in most of
hypervisor cases instead of limiting it to certain hypervisor like Xen.
Frontend-backend structre is optimal for this I think.

> 
>

Re: [Xen-devel] [PATCH 0/1] drm/xen-zcopy: Add Xen zero-copy helper DRM driver

2018-04-18 Thread Dongwon Kim
On Wed, Apr 18, 2018 at 03:42:29PM +0300, Oleksandr Andrushchenko wrote:
> On 04/18/2018 01:55 PM, Roger Pau Monné wrote:
> >On Wed, Apr 18, 2018 at 01:39:35PM +0300, Oleksandr Andrushchenko wrote:
> >>On 04/18/2018 01:18 PM, Paul Durrant wrote:
> >>>>-Original Message-
> >>>>From: Xen-devel [mailto:xen-devel-boun...@lists.xenproject.org] On Behalf
> >>>>Of Roger Pau Monné
> >>>>Sent: 18 April 2018 11:11
> >>>>To: Oleksandr Andrushchenko 
> >>>>Cc: jgr...@suse.com; Artem Mygaiev ;
> >>>>Dongwon Kim ; airl...@linux.ie;
> >>>>oleksandr_andrushche...@epam.com; linux-ker...@vger.kernel.org; dri-
> >>>>de...@lists.freedesktop.org; Potrola, MateuszX
> >>>>; xen-de...@lists.xenproject.org;
> >>>>daniel.vet...@intel.com; boris.ostrov...@oracle.com; Matt Roper
> >>>>
> >>>>Subject: Re: [Xen-devel] [PATCH 0/1] drm/xen-zcopy: Add Xen zero-copy
> >>>>helper DRM driver
> >>>>
> >>>>On Wed, Apr 18, 2018 at 11:01:12AM +0300, Oleksandr Andrushchenko
> >>>>wrote:
> >>>>>On 04/18/2018 10:35 AM, Roger Pau Monné wrote:
> >>>>After speaking with Oleksandr on IRC, I think the main usage of the
> >>>>gntdev extension is to:
> >>>>
> >>>>1. Create a dma-buf from a set of grant references.
> >>>>2. Share dma-buf and get a list of grant references.
> >>>>
> >>>>I think this set of operations could be broken into:
> >>>>
> >>>>1.1 Map grant references into user-space using the gntdev.
> >>>>1.2 Create a dma-buf out of a set of user-space virtual addresses.
> >>>>
> >>>>2.1 Map a dma-buf into user-space.
> >>>>2.2 Get grefs out of the user-space addresses where the dma-buf is
> >>>>  mapped.
> >>>>
> >>>>So it seems like what's actually missing is a way to:
> >>>>
> >>>>   - Create a dma-buf from a list of user-space virtual addresses.
> >>>>   - Allow to map a dma-buf into user-space, so it can then be used with
> >>>> the gntdev.
> >>>>
> >>>>I think this is generic enough that it could be implemented by a
> >>>>device not tied to Xen. AFAICT the hyper_dma guys also wanted
> >>>>something similar to this.
> >>Ok, so just to summarize, xen-zcopy/hyper-dmabuf as they are now,
> >>are no go from your POV?

FYI,

our use-case is "surface sharing" or "graphic obj sharing" where a client
application in one guest renders and export this render target(e.g. EGL surface)
as dma-buf. This dma-buf is then exported to another guest/host via hyper_dmabuf
drv where a compositor is running. This importing domain creates a dmabuf with
shared reference then it is imported as EGL image that later can be used as
texture object via EGL api. Mapping dmabuf to the userspace or vice versa
might be possible with modifying user space drivers/applications but it is an
unnecessary extra step from our perspective. Also, we want to keep all objects
in the kernel level.

> >My opinion is that there seems to be a more generic way to implement
> >this, and thus I would prefer that one.
> >
> >>Instead, we have to make all that fancy stuff
> >>with VAs <-> device-X and have that device-X driver live out of drivers/xen
> >>as it is not a Xen specific driver?
> >That would be my preference if feasible, simply because it can be
> >reused by other use-cases that need to create dma-bufs in user-space.
> There is a use-case I have: a display unit on my target has a DMA
> controller which can't do scatter-gather, e.g. it only expects a
> single starting address of the buffer.
> In order to create a dma-buf from grefs in this case
> I allocate memory with dma_alloc_xxx and then balloon pages of the
> buffer and finally map grefs onto this DMA buffer.
> This way I can give this shared buffer to the display unit as its bus
> addresses are contiguous.
> 
> With the proposed solution (gntdev + device-X) I won't be able to achieve
> this,
> as I have no control over from where gntdev/balloon drivers get the pages
> (even more, those can easily be out of DMA address space of the display
> unit).
> 
> Thus, even if implemented, I can't use this approach.
> >
> >In any case I just knew about dma-bufs this morning, there might be
> >things that I'm missing.
> >
> >Roger.
> 
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH 0/1] drm/xen-zcopy: Add Xen zero-copy helper DRM driver

2018-04-18 Thread Dongwon Kim
On Wed, Apr 18, 2018 at 09:38:39AM +0300, Oleksandr Andrushchenko wrote:
> On 04/17/2018 11:57 PM, Dongwon Kim wrote:
> >On Tue, Apr 17, 2018 at 09:59:28AM +0200, Daniel Vetter wrote:
> >>On Mon, Apr 16, 2018 at 12:29:05PM -0700, Dongwon Kim wrote:
> >>>Yeah, I definitely agree on the idea of expanding the use case to the
> >>>general domain where dmabuf sharing is used. However, what you are
> >>>targetting with proposed changes is identical to the core design of
> >>>hyper_dmabuf.
> >>>
> >>>On top of this basic functionalities, hyper_dmabuf has driver level
> >>>inter-domain communication, that is needed for dma-buf remote tracking
> >>>(no fence forwarding though), event triggering and event handling, extra
> >>>meta data exchange and hyper_dmabuf_id that represents grefs
> >>>(grefs are shared implicitly on driver level)
> >>This really isn't a positive design aspect of hyperdmabuf imo. The core
> >>code in xen-zcopy (ignoring the ioctl side, which will be cleaned up) is
> >>very simple & clean.
> >>
> >>If there's a clear need later on we can extend that. But for now xen-zcopy
> >>seems to cover the basic use-case needs, so gets the job done.
> >>
> >>>Also it is designed with frontend (common core framework) + backend
> >>>(hyper visor specific comm and memory sharing) structure for portability.
> >>>We just can't limit this feature to Xen because we want to use the same
> >>>uapis not only for Xen but also other applicable hypervisor, like ACORN.
> >>See the discussion around udmabuf and the needs for kvm. I think trying to
> >>make an ioctl/uapi that works for multiple hypervisors is misguided - it
> >>likely won't work.
> >>
> >>On top of that the 2nd hypervisor you're aiming to support is ACRN. That's
> >>not even upstream yet, nor have I seen any patches proposing to land linux
> >>support for ACRN. Since it's not upstream, it doesn't really matter for
> >>upstream consideration. I'm doubting that ACRN will use the same grant
> >>references as xen, so the same uapi won't work on ACRN as on Xen anyway.
> >Yeah, ACRN doesn't have grant-table. Only Xen supports it. But that is why
> >hyper_dmabuf has been architectured with the concept of backend.
> >If you look at the structure of backend, you will find that
> >backend is just a set of standard function calls as shown here:
> >
> >struct hyper_dmabuf_bknd_ops {
> > /* backend initialization routine (optional) */
> > int (*init)(void);
> >
> > /* backend cleanup routine (optional) */
> > int (*cleanup)(void);
> >
> > /* retreiving id of current virtual machine */
> > int (*get_vm_id)(void);
> >
> > /* get pages shared via hypervisor-specific method */
> > int (*share_pages)(struct page **pages, int vm_id,
> >int nents, void **refs_info);
> >
> > /* make shared pages unshared via hypervisor specific method */
> > int (*unshare_pages)(void **refs_info, int nents);
> >
> > /* map remotely shared pages on importer's side via
> >  * hypervisor-specific method
> >  */
> > struct page ** (*map_shared_pages)(unsigned long ref, int vm_id,
> >int nents, void **refs_info);
> >
> > /* unmap and free shared pages on importer's side via
> >  * hypervisor-specific method
> >  */
> > int (*unmap_shared_pages)(void **refs_info, int nents);
> >
> > /* initialize communication environment */
> > int (*init_comm_env)(void);
> >
> > void (*destroy_comm)(void);
> >
> > /* upstream ch setup (receiving and responding) */
> > int (*init_rx_ch)(int vm_id);
> >
> > /* downstream ch setup (transmitting and parsing responses) */
> > int (*init_tx_ch)(int vm_id);
> >
> > int (*send_req)(int vm_id, struct hyper_dmabuf_req *req, int wait);
> >};
> >
> >All of these can be mapped with any hypervisor specific implementation.
> >We designed backend implementation for Xen using grant-table, Xen event
> >and ring buffer communication. For ACRN, we have another backend using 
> >Virt-IO
> >for both memory sharing and communication.
> >
> >We tried to define this structure of backend to make it

Re: [PATCH 0/1] drm/xen-zcopy: Add Xen zero-copy helper DRM driver

2018-04-19 Thread Dongwon Kim
On Thu, Apr 19, 2018 at 11:14:02AM +0300, Oleksandr Andrushchenko wrote:
> On 04/18/2018 08:01 PM, Dongwon Kim wrote:
> >On Wed, Apr 18, 2018 at 09:38:39AM +0300, Oleksandr Andrushchenko wrote:
> >>On 04/17/2018 11:57 PM, Dongwon Kim wrote:
> >>>On Tue, Apr 17, 2018 at 09:59:28AM +0200, Daniel Vetter wrote:
> >>>>On Mon, Apr 16, 2018 at 12:29:05PM -0700, Dongwon Kim wrote:
> >>>>>Yeah, I definitely agree on the idea of expanding the use case to the
> >>>>>general domain where dmabuf sharing is used. However, what you are
> >>>>>targetting with proposed changes is identical to the core design of
> >>>>>hyper_dmabuf.
> >>>>>
> >>>>>On top of this basic functionalities, hyper_dmabuf has driver level
> >>>>>inter-domain communication, that is needed for dma-buf remote tracking
> >>>>>(no fence forwarding though), event triggering and event handling, extra
> >>>>>meta data exchange and hyper_dmabuf_id that represents grefs
> >>>>>(grefs are shared implicitly on driver level)
> >>>>This really isn't a positive design aspect of hyperdmabuf imo. The core
> >>>>code in xen-zcopy (ignoring the ioctl side, which will be cleaned up) is
> >>>>very simple & clean.
> >>>>
> >>>>If there's a clear need later on we can extend that. But for now xen-zcopy
> >>>>seems to cover the basic use-case needs, so gets the job done.
> >>>>
> >>>>>Also it is designed with frontend (common core framework) + backend
> >>>>>(hyper visor specific comm and memory sharing) structure for portability.
> >>>>>We just can't limit this feature to Xen because we want to use the same
> >>>>>uapis not only for Xen but also other applicable hypervisor, like ACORN.
> >>>>See the discussion around udmabuf and the needs for kvm. I think trying to
> >>>>make an ioctl/uapi that works for multiple hypervisors is misguided - it
> >>>>likely won't work.
> >>>>
> >>>>On top of that the 2nd hypervisor you're aiming to support is ACRN. That's
> >>>>not even upstream yet, nor have I seen any patches proposing to land linux
> >>>>support for ACRN. Since it's not upstream, it doesn't really matter for
> >>>>upstream consideration. I'm doubting that ACRN will use the same grant
> >>>>references as xen, so the same uapi won't work on ACRN as on Xen anyway.
> >>>Yeah, ACRN doesn't have grant-table. Only Xen supports it. But that is why
> >>>hyper_dmabuf has been architectured with the concept of backend.
> >>>If you look at the structure of backend, you will find that
> >>>backend is just a set of standard function calls as shown here:
> >>>
> >>>struct hyper_dmabuf_bknd_ops {
> >>> /* backend initialization routine (optional) */
> >>> int (*init)(void);
> >>>
> >>> /* backend cleanup routine (optional) */
> >>> int (*cleanup)(void);
> >>>
> >>> /* retreiving id of current virtual machine */
> >>> int (*get_vm_id)(void);
> >>>
> >>> /* get pages shared via hypervisor-specific method */
> >>> int (*share_pages)(struct page **pages, int vm_id,
> >>>int nents, void **refs_info);
> >>>
> >>> /* make shared pages unshared via hypervisor specific method */
> >>> int (*unshare_pages)(void **refs_info, int nents);
> >>>
> >>> /* map remotely shared pages on importer's side via
> >>>  * hypervisor-specific method
> >>>  */
> >>> struct page ** (*map_shared_pages)(unsigned long ref, int vm_id,
> >>>int nents, void **refs_info);
> >>>
> >>> /* unmap and free shared pages on importer's side via
> >>>  * hypervisor-specific method
> >>>  */
> >>> int (*unmap_shared_pages)(void **refs_info, int nents);
> >>>
> >>> /* initialize communication environment */
> >>> int (*init_comm_env)(void);
> >>>
> >>> void (*destroy_comm)(void);
> >>>
> >>> /* upstream ch setup (receiving and respond

[RFC PATCH v2 8/9] hyper_dmabuf: event-polling mechanism for detecting a new hyper_DMABUF

2018-02-13 Thread Dongwon Kim
New method based on polling for a importing VM to know about a new
hyper_DMABUF exported to it.

For this, the userspace now can poll the device node to check if
there a new event, which is created if there's a new hyper_DMABUF
available in importing VM (just exported).

A poll function call was added to the device driver interface for this
new functionality. Event-generation functionalitywas also implemented in
all other relavant parts of driver.

This "event-polling" mechanism is optional feature and can be enabled
by setting a Kernel config option, "HYPER_DMABUF_EVENT_GEN".

Signed-off-by: Dongwon Kim 
Signed-off-by: Mateusz Polrola 
---
 drivers/dma-buf/hyper_dmabuf/Kconfig  |  20 +++
 drivers/dma-buf/hyper_dmabuf/Makefile |   1 +
 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_drv.c   | 146 ++
 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_drv.h   |  11 ++
 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_event.c | 122 ++
 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_event.h |  38 ++
 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_list.c  |   1 +
 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_msg.c   |  11 ++
 include/uapi/linux/hyper_dmabuf.h |  11 ++
 9 files changed, 361 insertions(+)
 create mode 100644 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_event.c
 create mode 100644 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_event.h

diff --git a/drivers/dma-buf/hyper_dmabuf/Kconfig 
b/drivers/dma-buf/hyper_dmabuf/Kconfig
index 68f3d6ce2c1f..92510731af25 100644
--- a/drivers/dma-buf/hyper_dmabuf/Kconfig
+++ b/drivers/dma-buf/hyper_dmabuf/Kconfig
@@ -20,6 +20,16 @@ config HYPER_DMABUF_SYSFS
 
  The location of sysfs is under ""
 
+config HYPER_DMABUF_EVENT_GEN
+bool "Enable event-generation and polling operation"
+default n
+depends on HYPER_DMABUF
+help
+  With this config enabled, hyper_dmabuf driver on the importer side
+  generates events and queue those up in the event list whenever a new
+  shared DMA-BUF is available. Events in the list can be retrieved by
+  read operation.
+
 config HYPER_DMABUF_XEN
 bool "Configure hyper_dmabuf for XEN hypervisor"
 default y
@@ -27,4 +37,14 @@ config HYPER_DMABUF_XEN
 help
   Enabling Hyper_DMABUF Backend for XEN hypervisor
 
+config HYPER_DMABUF_XEN_AUTO_RX_CH_ADD
+bool "Enable automatic rx-ch add with 10 secs interval"
+default y
+depends on HYPER_DMABUF && HYPER_DMABUF_XEN
+help
+  If enabled, driver reads a node in xenstore every 10 seconds
+  to check whether there is any tx comm ch configured by another
+  domain then initialize matched rx comm ch automatically for any
+  existing tx comm chs.
+
 endmenu
diff --git a/drivers/dma-buf/hyper_dmabuf/Makefile 
b/drivers/dma-buf/hyper_dmabuf/Makefile
index 578a669a0d3e..f573dd5c4054 100644
--- a/drivers/dma-buf/hyper_dmabuf/Makefile
+++ b/drivers/dma-buf/hyper_dmabuf/Makefile
@@ -11,6 +11,7 @@ ifneq ($(KERNELRELEASE),)
 hyper_dmabuf_id.o \
 hyper_dmabuf_remote_sync.o \
 hyper_dmabuf_query.o \
+hyper_dmabuf_event.o \
 
 ifeq ($(CONFIG_HYPER_DMABUF_XEN), y)
$(TARGET_MODULE)-objs += backends/xen/hyper_dmabuf_xen_comm.o \
diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_drv.c 
b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_drv.c
index 3320f9dcc769..087f091ccae9 100644
--- a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_drv.c
+++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_drv.c
@@ -41,6 +41,7 @@
 #include "hyper_dmabuf_ioctl.h"
 #include "hyper_dmabuf_list.h"
 #include "hyper_dmabuf_id.h"
+#include "hyper_dmabuf_event.h"
 
 #ifdef CONFIG_HYPER_DMABUF_XEN
 #include "backends/xen/hyper_dmabuf_xen_drv.h"
@@ -91,10 +92,138 @@ static int hyper_dmabuf_release(struct inode *inode, 
struct file *filp)
return 0;
 }
 
+#ifdef CONFIG_HYPER_DMABUF_EVENT_GEN
+
+static unsigned int hyper_dmabuf_event_poll(struct file *filp,
+struct poll_table_struct *wait)
+{
+   poll_wait(filp, &hy_drv_priv->event_wait, wait);
+
+   if (!list_empty(&hy_drv_priv->event_list))
+   return POLLIN | POLLRDNORM;
+
+   return 0;
+}
+
+static ssize_t hyper_dmabuf_event_read(struct file *filp, char __user *buffer,
+   size_t count, loff_t *offset)
+{
+   int ret;
+
+   /* only root can read events */
+   if (!capable(CAP_DAC_OVERRIDE)) {
+   dev_err(hy_drv_priv->dev,
+   "Only root can read events\n");
+   return -EPERM;
+   }
+
+   /* make sure user buffer can be written */
+   if (!access_ok(VERIFY_WRITE, 

[RFC PATCH v2 5/9] hyper_dmabuf: default backend for XEN hypervisor

2018-02-13 Thread Dongwon Kim
From: "Matuesz Polrola" 

The default backend for XEN hypervisor. This backend contains actual
implementation of individual methods defined in "struct hyper_dmabuf_bknd_ops"
defined as:

struct hyper_dmabuf_bknd_ops {
/* backend initialization routine (optional) */
int (*init)(void);

/* backend cleanup routine (optional) */
int (*cleanup)(void);

/* retreiving id of current virtual machine */
int (*get_vm_id)(void);

/* get pages shared via hypervisor-specific method */
int (*share_pages)(struct page **, int, int, void **);

/* make shared pages unshared via hypervisor specific method */
int (*unshare_pages)(void **, int);

/* map remotely shared pages on importer's side via
 * hypervisor-specific method
 */
struct page ** (*map_shared_pages)(unsigned long, int, int, void **);

/* unmap and free shared pages on importer's side via
 * hypervisor-specific method
 */
int (*unmap_shared_pages)(void **, int);

/* initialize communication environment */
int (*init_comm_env)(void);

void (*destroy_comm)(void);

/* upstream ch setup (receiving and responding) */
int (*init_rx_ch)(int);

/* downstream ch setup (transmitting and parsing responses) */
int (*init_tx_ch)(int);

int (*send_req)(int, struct hyper_dmabuf_req *, int);
};

First two methods are for extra initialization or cleaning up possibly
required for the current Hypervisor (optional). Third method
(.get_vm_id) provides a way to get current VM's id, which will be used
as an identication of source VM of shared hyper_DMABUF later.

All other methods are related to either memory sharing or inter-VM
communication, which are minimum requirement for hyper_DMABUF driver.
(Brief description of role of each method is embedded as a comment in the
definition of the structure above and header file.)

Actual implementation of each of these methods specific to XEN is under
backends/xen/. Their mappings are done as followed:

struct hyper_dmabuf_bknd_ops xen_bknd_ops = {
.init = NULL, /* not needed for xen */
.cleanup = NULL, /* not needed for xen */
.get_vm_id = xen_be_get_domid,
.share_pages = xen_be_share_pages,
.unshare_pages = xen_be_unshare_pages,
.map_shared_pages = (void *)xen_be_map_shared_pages,
.unmap_shared_pages = xen_be_unmap_shared_pages,
.init_comm_env = xen_be_init_comm_env,
.destroy_comm = xen_be_destroy_comm,
.init_rx_ch = xen_be_init_rx_rbuf,
.init_tx_ch = xen_be_init_tx_rbuf,
.send_req = xen_be_send_req,
};

A section for Hypervisor Backend has been added to

"Documentation/hyper-dmabuf-sharing.txt" accordingly

Signed-off-by: Dongwon Kim 
Signed-off-by: Mateusz Polrola 
---
 drivers/dma-buf/hyper_dmabuf/Kconfig   |   7 +
 drivers/dma-buf/hyper_dmabuf/Makefile  |   7 +
 .../backends/xen/hyper_dmabuf_xen_comm.c   | 941 +
 .../backends/xen/hyper_dmabuf_xen_comm.h   |  78 ++
 .../backends/xen/hyper_dmabuf_xen_comm_list.c  | 158 
 .../backends/xen/hyper_dmabuf_xen_comm_list.h  |  67 ++
 .../backends/xen/hyper_dmabuf_xen_drv.c|  46 +
 .../backends/xen/hyper_dmabuf_xen_drv.h|  53 ++
 .../backends/xen/hyper_dmabuf_xen_shm.c| 525 
 .../backends/xen/hyper_dmabuf_xen_shm.h|  46 +
 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_drv.c|  10 +
 11 files changed, 1938 insertions(+)
 create mode 100644 
drivers/dma-buf/hyper_dmabuf/backends/xen/hyper_dmabuf_xen_comm.c
 create mode 100644 
drivers/dma-buf/hyper_dmabuf/backends/xen/hyper_dmabuf_xen_comm.h
 create mode 100644 
drivers/dma-buf/hyper_dmabuf/backends/xen/hyper_dmabuf_xen_comm_list.c
 create mode 100644 
drivers/dma-buf/hyper_dmabuf/backends/xen/hyper_dmabuf_xen_comm_list.h
 create mode 100644 
drivers/dma-buf/hyper_dmabuf/backends/xen/hyper_dmabuf_xen_drv.c
 create mode 100644 
drivers/dma-buf/hyper_dmabuf/backends/xen/hyper_dmabuf_xen_drv.h
 create mode 100644 
drivers/dma-buf/hyper_dmabuf/backends/xen/hyper_dmabuf_xen_shm.c
 create mode 100644 
drivers/dma-buf/hyper_dmabuf/backends/xen/hyper_dmabuf_xen_shm.h

diff --git a/drivers/dma-buf/hyper_dmabuf/Kconfig 
b/drivers/dma-buf/hyper_dmabuf/Kconfig
index 5ebf516d65eb..68f3d6ce2c1f 100644
--- a/drivers/dma-buf/hyper_dmabuf/Kconfig
+++ b/drivers/dma-buf/hyper_dmabuf/Kconfig
@@ -20,4 +20,11 @@ config HYPER_DMABUF_SYSFS
 
  The location of sysfs is under ""
 
+config HYPER_DMABUF_XEN
+bool "Configure hyper_dmabuf for XEN hypervisor"
+default y
+depends on HYPER_DMABUF && XEN && XENFS
+help
+  Enabling Hyper_DMABUF Backend for XEN hypervisor
+
 endmenu
diff --git a/drivers/dma-buf/hyper_dmabuf/M

[RFC PATCH v2 1/9] hyper_dmabuf: initial upload of hyper_dmabuf drv core framework

2018-02-13 Thread Dongwon Kim
Upload of intial version of core framework in hyper_DMABUF driver
enabling DMA_BUF exchange between two different VMs in virtualized
platform based on Hypervisor such as XEN.

Hyper_DMABUF drv's primary role is to import a DMA_BUF from originator
then re-export it to another Linux VM so that it can be mapped and
accessed in there.

This driver has two layers, one is so called, "core framework", which
contains driver interface and core functions handling export/import of
new hyper_DMABUF and its maintenance. This part of the driver is
independent from Hypervisor so can work as is with any Hypervisor.

The other layer is called "Hypervisor Backend". This layer represents
the interface between "core framework" and actual Hypervisor, handling
memory sharing and communication. Not like "core framework", every
Hypervisor needs it's own backend interface designed using its native
mechanism for memory sharing and inter-VM communication.

This patch contains the first part, "core framework", which consists of
7 source files and 11 header files. Some brief description of these
source code are attached below:

hyper_dmabuf_drv.c

- Linux driver interface and initialization/cleaning-up routines

hyper_dmabuf_ioctl.c

- IOCTLs calls for export/import of DMA-BUF comm channel's creation and
  destruction.

hyper_dmabuf_sgl_proc.c

- Provides methods to managing DMA-BUF for exporing and importing. For
  exporting, extraction of pages, sharing pages via procedures in
  "Backend" and notifying importing VM exist. For importing, all
  operations related to the reconstruction of DMA-BUF (with shared
  pages) on importer's side are defined.

hyper_dmabuf_ops.c

- Standard DMA-BUF operations for hyper_DMABUF reconstructed on
  importer's side.

hyper_dmabuf_list.c

- Lists for storing exported and imported hyper_DMABUF to keep track of
  remote usage of hyper_DMABUF currently being shared.

hyper_dmabuf_msg.c

- Defines messages exchanged between VMs (exporter and importer) and
  function calls for sending and parsing (when received) those.

hyper_dmabuf_id.c

- Contains methods to generate and manage "hyper_DMABUF id" for each
  hyper_DMABUF being exported. It is a global handle for a hyper_DMABUF,
  which another VM needs to know to import it.

hyper_dmabuf_struct.h

- Contains data structures of importer or exporter hyper_DMABUF

include/uapi/linux/hyper_dmabuf.h

- Contains definition of data types and structures referenced by user
  application to interact with driver

Signed-off-by: Dongwon Kim 
Signed-off-by: Mateusz Polrola 
---
 drivers/dma-buf/Kconfig|   2 +
 drivers/dma-buf/Makefile   |   1 +
 drivers/dma-buf/hyper_dmabuf/Kconfig   |  23 +
 drivers/dma-buf/hyper_dmabuf/Makefile  |  34 ++
 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_drv.c| 254 
 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_drv.h| 111 
 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_id.c | 135 +
 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_id.h |  53 ++
 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ioctl.c  | 672 +
 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ioctl.h  |  52 ++
 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_list.c   | 294 +
 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_list.h   |  73 +++
 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_msg.c| 320 ++
 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_msg.h|  87 +++
 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ops.c| 264 
 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ops.h|  34 ++
 .../dma-buf/hyper_dmabuf/hyper_dmabuf_sgl_proc.c   | 256 
 .../dma-buf/hyper_dmabuf/hyper_dmabuf_sgl_proc.h   |  43 ++
 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_struct.h | 131 
 include/uapi/linux/hyper_dmabuf.h  |  87 +++
 20 files changed, 2926 insertions(+)
 create mode 100644 drivers/dma-buf/hyper_dmabuf/Kconfig
 create mode 100644 drivers/dma-buf/hyper_dmabuf/Makefile
 create mode 100644 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_drv.c
 create mode 100644 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_drv.h
 create mode 100644 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_id.c
 create mode 100644 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_id.h
 create mode 100644 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ioctl.c
 create mode 100644 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ioctl.h
 create mode 100644 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_list.c
 create mode 100644 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_list.h
 create mode 100644 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_msg.c
 create mode 100644 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_msg.h
 create mode 100644 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ops.c
 create mode 100644 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ops.h
 create mode 100644 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_sgl_proc.c
 create mode 

[RFC PATCH v2 9/9] hyper_dmabuf: threaded interrupt in Xen-backend

2018-02-13 Thread Dongwon Kim
Use threaded interrupt intead of regular one because most part of ISR
is time-critical and possibly sleeps

Signed-off-by: Dongwon Kim 
---
 .../hyper_dmabuf/backends/xen/hyper_dmabuf_xen_comm.c | 19 +++
 1 file changed, 11 insertions(+), 8 deletions(-)

diff --git a/drivers/dma-buf/hyper_dmabuf/backends/xen/hyper_dmabuf_xen_comm.c 
b/drivers/dma-buf/hyper_dmabuf/backends/xen/hyper_dmabuf_xen_comm.c
index 30bc4b6304ac..65af5ddfb2d7 100644
--- a/drivers/dma-buf/hyper_dmabuf/backends/xen/hyper_dmabuf_xen_comm.c
+++ b/drivers/dma-buf/hyper_dmabuf/backends/xen/hyper_dmabuf_xen_comm.c
@@ -332,11 +332,14 @@ int xen_be_init_tx_rbuf(int domid)
}
 
/* setting up interrupt */
-   ret = bind_evtchn_to_irqhandler(alloc_unbound.port,
-   front_ring_isr, 0,
-   NULL, (void *) ring_info);
+   ring_info->irq = bind_evtchn_to_irq(alloc_unbound.port);
 
-   if (ret < 0) {
+   ret = request_threaded_irq(ring_info->irq,
+  NULL,
+  front_ring_isr,
+  IRQF_ONESHOT, NULL, ring_info);
+
+   if (ret != 0) {
dev_err(hy_drv_priv->dev,
"Failed to setup event channel\n");
close.port = alloc_unbound.port;
@@ -348,7 +351,6 @@ int xen_be_init_tx_rbuf(int domid)
}
 
ring_info->rdomain = domid;
-   ring_info->irq = ret;
ring_info->port = alloc_unbound.port;
 
mutex_init(&ring_info->lock);
@@ -535,9 +537,10 @@ int xen_be_init_rx_rbuf(int domid)
if (!xen_comm_find_tx_ring(domid))
ret = xen_be_init_tx_rbuf(domid);
 
-   ret = request_irq(ring_info->irq,
- back_ring_isr, 0,
- NULL, (void *)ring_info);
+   ret = request_threaded_irq(ring_info->irq,
+  NULL,
+  back_ring_isr, IRQF_ONESHOT,
+  NULL, (void *)ring_info);
 
return ret;
 
-- 
2.16.1

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[RFC PATCH v2 4/9] hyper_dmabuf: user private data attached to hyper_DMABUF

2018-02-13 Thread Dongwon Kim
Define a private data (e.g. meta data for the buffer) attached to
each hyper_DMABUF structure. This data is provided by userapace via
export_remote IOCTL and its size can be up to 192 bytes.

Signed-off-by: Dongwon Kim 
Signed-off-by: Mateusz Polrola 
---
 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ioctl.c  | 83 --
 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_msg.c| 36 +-
 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_msg.h|  2 +-
 .../dma-buf/hyper_dmabuf/hyper_dmabuf_sgl_proc.c   |  1 +
 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_struct.h | 12 
 include/uapi/linux/hyper_dmabuf.h  |  4 ++
 6 files changed, 132 insertions(+), 6 deletions(-)

diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ioctl.c 
b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ioctl.c
index 020a5590a254..168ccf98f710 100644
--- a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ioctl.c
+++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ioctl.c
@@ -103,6 +103,11 @@ static int send_export_msg(struct exported_sgt_info 
*exported,
}
}
 
+   op[8] = exported->sz_priv;
+
+   /* driver/application specific private info */
+   memcpy(&op[9], exported->priv, op[8]);
+
req = kcalloc(1, sizeof(*req), GFP_KERNEL);
 
if (!req)
@@ -120,8 +125,9 @@ static int send_export_msg(struct exported_sgt_info 
*exported,
 
 /* Fast path exporting routine in case same buffer is already exported.
  *
- * If same buffer is still valid and exist in EXPORT LIST it returns 0 so
- * that remaining normal export process can be skipped.
+ * If same buffer is still valid and exist in EXPORT LIST, it only updates
+ * user-private data for the buffer and returns 0 so that that it can skip
+ * normal export process.
  *
  * If "unexport" is scheduled for the buffer, it cancels it since the buffer
  * is being re-exported.
@@ -129,7 +135,7 @@ static int send_export_msg(struct exported_sgt_info 
*exported,
  * return '1' if reexport is needed, return '0' if succeeds, return
  * Kernel error code if something goes wrong
  */
-static int fastpath_export(hyper_dmabuf_id_t hid)
+static int fastpath_export(hyper_dmabuf_id_t hid, int sz_priv, char *priv)
 {
int reexport = 1;
int ret = 0;
@@ -155,6 +161,46 @@ static int fastpath_export(hyper_dmabuf_id_t hid)
exported->unexport_sched = false;
}
 
+   /* if there's any change in size of private data.
+* we reallocate space for private data with new size
+*/
+   if (sz_priv != exported->sz_priv) {
+   kfree(exported->priv);
+
+   /* truncating size */
+   if (sz_priv > MAX_SIZE_PRIV_DATA)
+   exported->sz_priv = MAX_SIZE_PRIV_DATA;
+   else
+   exported->sz_priv = sz_priv;
+
+   exported->priv = kcalloc(1, exported->sz_priv,
+GFP_KERNEL);
+
+   if (!exported->priv) {
+   hyper_dmabuf_remove_exported(exported->hid);
+   hyper_dmabuf_cleanup_sgt_info(exported, true);
+   kfree(exported);
+   return -ENOMEM;
+   }
+   }
+
+   /* update private data in sgt_info with new ones */
+   ret = copy_from_user(exported->priv, priv, exported->sz_priv);
+   if (ret) {
+   dev_err(hy_drv_priv->dev,
+   "Failed to load a new private data\n");
+   ret = -EINVAL;
+   } else {
+   /* send an export msg for updating priv in importer */
+   ret = send_export_msg(exported, NULL);
+
+   if (ret < 0) {
+   dev_err(hy_drv_priv->dev,
+   "Failed to send a new private data\n");
+   ret = -EBUSY;
+   }
+   }
+
return ret;
 }
 
@@ -191,7 +237,8 @@ static int hyper_dmabuf_export_remote_ioctl(struct file 
*filp, void *data)
 export_remote_attr->remote_domain);
 
if (hid.id != -1) {
-   ret = fastpath_export(hid);
+   ret = fastpath_export(hid, export_remote_attr->sz_priv,
+ export_remote_attr->priv);
 
/* return if fastpath_export succeeds or
 * gets some fatal error
@@ -225,6 +272,24 @@ static int hyper_dmabuf_export_remote_ioctl(struct file 
*filp, void *data)
goto fail_sgt_info_creation;
}
 
+   /* possible truncation */
+   if (export_remote_attr->sz_priv > MAX_SIZE_PRIV_DATA)
+   exported->sz_priv = MAX_SIZE_PRIV_DATA;
+   else
+   exported->sz_priv = export_remote_attr->sz_priv;
+
+   /* creating buffer for private data of buffer */
+   if 

[RFC PATCH v2 6/9] hyper_dmabuf: hyper_DMABUF synchronization across VM

2018-02-13 Thread Dongwon Kim
All of hyper_DMABUF operations now (hyper_dmabuf_ops.c) send a message
to the exporting VM for synchronization between two VMs. For this, every
mapping done by importer will make exporter perform shadow mapping of
original DMA-BUF. Then all consecutive DMA-BUF operations (attach, detach,
map/unmap and so on) will be mimicked on this shadowed DMA-BUF for tracking
and synchronization purpose (e.g. +-reference count to check the status).

Signed-off-by: Dongwon Kim 
Signed-off-by: Mateusz Polrola 
---
 drivers/dma-buf/hyper_dmabuf/Makefile  |   1 +
 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_msg.c|  53 +++-
 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_msg.h|   2 +
 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ops.c| 157 +-
 .../hyper_dmabuf/hyper_dmabuf_remote_sync.c| 324 +
 .../hyper_dmabuf/hyper_dmabuf_remote_sync.h|  32 ++
 6 files changed, 565 insertions(+), 4 deletions(-)
 create mode 100644 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_remote_sync.c
 create mode 100644 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_remote_sync.h

diff --git a/drivers/dma-buf/hyper_dmabuf/Makefile 
b/drivers/dma-buf/hyper_dmabuf/Makefile
index b9ab4eeca6f2..702696f29215 100644
--- a/drivers/dma-buf/hyper_dmabuf/Makefile
+++ b/drivers/dma-buf/hyper_dmabuf/Makefile
@@ -9,6 +9,7 @@ ifneq ($(KERNELRELEASE),)
 hyper_dmabuf_ops.o \
 hyper_dmabuf_msg.o \
 hyper_dmabuf_id.o \
+hyper_dmabuf_remote_sync.o \
 
 ifeq ($(CONFIG_HYPER_DMABUF_XEN), y)
$(TARGET_MODULE)-objs += backends/xen/hyper_dmabuf_xen_comm.o \
diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_msg.c 
b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_msg.c
index 7176fa8fb139..1592d5cfaa52 100644
--- a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_msg.c
+++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_msg.c
@@ -34,6 +34,7 @@
 #include 
 #include "hyper_dmabuf_drv.h"
 #include "hyper_dmabuf_msg.h"
+#include "hyper_dmabuf_remote_sync.h"
 #include "hyper_dmabuf_list.h"
 
 struct cmd_process {
@@ -92,6 +93,25 @@ void hyper_dmabuf_create_req(struct hyper_dmabuf_req *req,
req->op[i] = op[i];
break;
 
+   case HYPER_DMABUF_OPS_TO_REMOTE:
+   /* notifying dmabuf map/unmap to importer (probably not needed)
+* for dmabuf synchronization
+*/
+   break;
+
+   case HYPER_DMABUF_OPS_TO_SOURCE:
+   /* notifying dmabuf map/unmap to exporter, map will make
+* the driver to do shadow mapping or unmapping for
+* synchronization with original exporter (e.g. i915)
+*
+* command : DMABUF_OPS_TO_SOURCE.
+* op0~3 : hyper_dmabuf_id
+* op4 : map(=1)/unmap(=2)/attach(=3)/detach(=4)
+*/
+   for (i = 0; i < 5; i++)
+   req->op[i] = op[i];
+   break;
+
default:
/* no command found */
return;
@@ -201,6 +221,12 @@ static void cmd_process_work(struct work_struct *work)
 
break;
 
+   case HYPER_DMABUF_OPS_TO_REMOTE:
+   /* notifying dmabuf map/unmap to importer
+* (probably not needed) for dmabuf synchronization
+*/
+   break;
+
default:
/* shouldn't get here */
break;
@@ -217,6 +243,7 @@ int hyper_dmabuf_msg_parse(int domid, struct 
hyper_dmabuf_req *req)
struct imported_sgt_info *imported;
struct exported_sgt_info *exported;
hyper_dmabuf_id_t hid;
+   int ret;
 
if (!req) {
dev_err(hy_drv_priv->dev, "request is NULL\n");
@@ -229,7 +256,7 @@ int hyper_dmabuf_msg_parse(int domid, struct 
hyper_dmabuf_req *req)
hid.rng_key[2] = req->op[3];
 
if ((req->cmd < HYPER_DMABUF_EXPORT) ||
-   (req->cmd > HYPER_DMABUF_NOTIFY_UNEXPORT)) {
+   (req->cmd > HYPER_DMABUF_OPS_TO_SOURCE)) {
dev_err(hy_drv_priv->dev, "invalid command\n");
return -EINVAL;
}
@@ -271,6 +298,30 @@ int hyper_dmabuf_msg_parse(int domid, struct 
hyper_dmabuf_req *req)
return req->cmd;
}
 
+   /* dma buf remote synchronization */
+   if (req->cmd == HYPER_DMABUF_OPS_TO_SOURCE) {
+   /* notifying dmabuf map/unmap to exporter, map will
+* make the driver to do shadow mapping
+* or unmapping for synchronization with original
+* exporter (e.g. i915)
+*
+* command : DMABUF_OPS_TO_SOURCE.
+* op0~3 : hyper_dmabuf_id
+* op1 : enum hyper_

[RFC PATCH v2 0/9] hyper_dmabuf: Hyper_DMABUF driver

2018-02-13 Thread Dongwon Kim
This patch series contains the implementation of a new device driver,
hyper_DMABUF driver, which provides a way to expand the boundary of
Linux DMA-BUF sharing to across different VM instances in Multi-OS platform
enabled by a Hypervisor (e.g. XEN)

This version 2 series is basically refactored version of old series starting
with "[RFC PATCH 01/60] hyper_dmabuf: initial working version of hyper_dmabuf
drv"

Implementation details of this driver are described in the reference guide
added by the second patch, "[RFC PATCH v2 2/5] hyper_dmabuf: architecture
specification and reference guide".

Attaching 'Overview' section here as a quick summary.

--
Section 1. Overview
--

Hyper_DMABUF driver is a Linux device driver running on multiple Virtual
achines (VMs), which expands DMA-BUF sharing capability to the VM environment
where multiple different OS instances need to share same physical data without
data-copy across VMs.

To share a DMA_BUF across VMs, an instance of the Hyper_DMABUF drv on the
exporting VM (so called, “exporter”) imports a local DMA_BUF from the original
producer of the buffer, then re-exports it with an unique ID, hyper_dmabuf_id
for the buffer to the importing VM (so called, “importer”).

Another instance of the Hyper_DMABUF driver on importer registers
a hyper_dmabuf_id together with reference information for the shared physical
pages associated with the DMA_BUF to its database when the export happens.

The actual mapping of the DMA_BUF on the importer’s side is done by
the Hyper_DMABUF driver when user space issues the IOCTL command to access
the shared DMA_BUF. The Hyper_DMABUF driver works as both an importing and
exporting driver as is, that is, no special configuration is required.
Consequently, only a single module per VM is needed to enable cross-VM DMA_BUF
exchange.

--

There is a git repository at github.com where this series of patches are all
integrated in Linux kernel tree based on the commit:

commit ae64f9bd1d3621b5e60d7363bc20afb46aede215
Author: Linus Torvalds 
Date:   Sun Dec 3 11:01:47 2018 -0500

Linux 4.15-rc2

https://github.com/downor/linux_hyper_dmabuf.git hyper_dmabuf_integration_v4

Dongwon Kim, Mateusz Polrola (9):
  hyper_dmabuf: initial upload of hyper_dmabuf drv core framework
  hyper_dmabuf: architecture specification and reference guide
  MAINTAINERS: adding Hyper_DMABUF driver section in MAINTAINERS
  hyper_dmabuf: user private data attached to hyper_DMABUF
  hyper_dmabuf: hyper_DMABUF synchronization across VM
  hyper_dmabuf: query ioctl for retreiving various hyper_DMABUF info
  hyper_dmabuf: event-polling mechanism for detecting a new hyper_DMABUF
  hyper_dmabuf: threaded interrupt in Xen-backend
  hyper_dmabuf: default backend for XEN hypervisor

 Documentation/hyper-dmabuf-sharing.txt | 734 
 MAINTAINERS|  11 +
 drivers/dma-buf/Kconfig|   2 +
 drivers/dma-buf/Makefile   |   1 +
 drivers/dma-buf/hyper_dmabuf/Kconfig   |  50 ++
 drivers/dma-buf/hyper_dmabuf/Makefile  |  44 +
 .../backends/xen/hyper_dmabuf_xen_comm.c   | 944 +
 .../backends/xen/hyper_dmabuf_xen_comm.h   |  78 ++
 .../backends/xen/hyper_dmabuf_xen_comm_list.c  | 158 
 .../backends/xen/hyper_dmabuf_xen_comm_list.h  |  67 ++
 .../backends/xen/hyper_dmabuf_xen_drv.c|  46 +
 .../backends/xen/hyper_dmabuf_xen_drv.h|  53 ++
 .../backends/xen/hyper_dmabuf_xen_shm.c| 525 
 .../backends/xen/hyper_dmabuf_xen_shm.h|  46 +
 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_drv.c| 410 +
 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_drv.h| 122 +++
 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_event.c  | 122 +++
 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_event.h  |  38 +
 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_id.c | 135 +++
 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_id.h |  53 ++
 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ioctl.c  | 794 +
 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ioctl.h  |  52 ++
 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_list.c   | 295 +++
 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_list.h   |  73 ++
 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_msg.c| 416 +
 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_msg.h|  89 ++
 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ops.c| 415 +
 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ops.h|  34 +
 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_query.c  | 174 
 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_query.h  |  36 +
 .../hyper_dmabuf/hyper_dma

[RFC PATCH v2 3/9] MAINTAINERS: adding Hyper_DMABUF driver section in MAINTAINERS

2018-02-13 Thread Dongwon Kim
Hyper_DMABUF DRIVER
M:  Dongwon Kim 
M:  Mateusz Polrola 
L:  linux-ker...@vger.kernel.org
L:  xen-de...@lists.xenproject.org
S:  Maintained
F:  drivers/dma-buf/hyper_dmabuf*
F:  include/uapi/linux/hyper_dmabuf.h
F:  Documentation/hyper-dmabuf-sharing.txt
T:  https://github.com/downor/linux_hyper_dmabuf/

Signed-off-by: Dongwon Kim 
---
 MAINTAINERS | 11 +++
 1 file changed, 11 insertions(+)

diff --git a/MAINTAINERS b/MAINTAINERS
index d4fdcb12616c..155f7f839201 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -6468,6 +6468,17 @@ S:   Maintained
 F: mm/memory-failure.c
 F: mm/hwpoison-inject.c
 
+Hyper_DMABUF DRIVER
+M: Dongwon Kim 
+M: Mateusz Polrola 
+L: linux-ker...@vger.kernel.org
+L: xen-de...@lists.xenproject.org
+S: Maintained
+F: drivers/dma-buf/hyper_dmabuf*
+F: include/uapi/linux/hyper_dmabuf.h
+F: Documentation/hyper-dmabuf-sharing.txt
+T: https://github.com/downor/linux_hyper_dmabuf/
+
 Hyper-V CORE AND DRIVERS
 M: "K. Y. Srinivasan" 
 M: Haiyang Zhang 
-- 
2.16.1

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[RFC PATCH v2 7/9] hyper_dmabuf: query ioctl for retreiving various hyper_DMABUF info

2018-02-13 Thread Dongwon Kim
Add a new ioctl, "IOCTL_HYPER_DMABUF_QUERY" for the userspace to
retreive various information about hyper_DMABUF, currently being shared
across VMs.

Supported query items are as followed:

enum hyper_dmabuf_query {
HYPER_DMABUF_QUERY_TYPE = 0x10,
HYPER_DMABUF_QUERY_EXPORTER,
HYPER_DMABUF_QUERY_IMPORTER,
HYPER_DMABUF_QUERY_SIZE,
HYPER_DMABUF_QUERY_BUSY,
HYPER_DMABUF_QUERY_UNEXPORTED,
HYPER_DMABUF_QUERY_DELAYED_UNEXPORTED,
HYPER_DMABUF_QUERY_PRIV_INFO_SIZE,
HYPER_DMABUF_QUERY_PRIV_INFO,
};

Query IOCTL call with each query item above returns,

HYPER_DMABUF_QUERY_TYPE - type - EXPORTED/IMPORTED of hyper_DMABUF from
current VM's perspective.

HYPER_DMABUF_QUERY_EXPORTER - ID of exporting VM

HYPER_DMABUF_QUERY_IMPORTER - ID of importing VM

HYPER_DMABUF_QUERY_SIZE - size of shared buffer in byte

HYPER_DMABUF_QUERY_BUSY - true if hyper_DMABUF is being actively used
(e.g. attached and mapped by end-consumer)

HYPER_DMABUF_QUERY_UNEXPORTED - true if hyper_DMABUF has been unexported
on exporting VM's side.

HYPER_DMABUF_QUERY_DELAYED_UNEXPORTED - true if hyper_DMABUF is scheduled
to be unexported (still valid but will be unexported soon)

HYPER_DMABUF_QUERY_PRIV_INFO_SIZE - size of private information (given by
user application on exporter's side) attached to hyper_DMABUF

HYPER_DMABUF_QUERY_PRIV_INFO - private information attached to hyper_DMABUF

Signed-off-by: Dongwon Kim 
Signed-off-by: Mateusz Polrola 
---
 drivers/dma-buf/hyper_dmabuf/Makefile |   1 +
 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ioctl.c |  49 +-
 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_query.c | 174 ++
 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_query.h |  36 +
 include/uapi/linux/hyper_dmabuf.h |  32 
 5 files changed, 291 insertions(+), 1 deletion(-)
 create mode 100644 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_query.c
 create mode 100644 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_query.h

diff --git a/drivers/dma-buf/hyper_dmabuf/Makefile 
b/drivers/dma-buf/hyper_dmabuf/Makefile
index 702696f29215..578a669a0d3e 100644
--- a/drivers/dma-buf/hyper_dmabuf/Makefile
+++ b/drivers/dma-buf/hyper_dmabuf/Makefile
@@ -10,6 +10,7 @@ ifneq ($(KERNELRELEASE),)
 hyper_dmabuf_msg.o \
 hyper_dmabuf_id.o \
 hyper_dmabuf_remote_sync.o \
+hyper_dmabuf_query.o \
 
 ifeq ($(CONFIG_HYPER_DMABUF_XEN), y)
$(TARGET_MODULE)-objs += backends/xen/hyper_dmabuf_xen_comm.o \
diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ioctl.c 
b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ioctl.c
index 168ccf98f710..e90e59cd0568 100644
--- a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ioctl.c
+++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ioctl.c
@@ -41,6 +41,7 @@
 #include "hyper_dmabuf_msg.h"
 #include "hyper_dmabuf_sgl_proc.h"
 #include "hyper_dmabuf_ops.h"
+#include "hyper_dmabuf_query.h"
 
 static int hyper_dmabuf_tx_ch_setup_ioctl(struct file *filp, void *data)
 {
@@ -543,7 +544,6 @@ static int hyper_dmabuf_export_fd_ioctl(struct file *filp, 
void *data)
hyper_dmabuf_create_req(req,
HYPER_DMABUF_EXPORT_FD_FAILED,
&op[0]);
-
bknd_ops->send_req(HYPER_DMABUF_DOM_ID(imported->hid),
   req, false);
kfree(req);
@@ -682,6 +682,51 @@ int hyper_dmabuf_unexport_ioctl(struct file *filp, void 
*data)
return 0;
 }
 
+static int hyper_dmabuf_query_ioctl(struct file *filp, void *data)
+{
+   struct ioctl_hyper_dmabuf_query *query_attr =
+   (struct ioctl_hyper_dmabuf_query *)data;
+   struct exported_sgt_info *exported = NULL;
+   struct imported_sgt_info *imported = NULL;
+   int ret = 0;
+
+   if (HYPER_DMABUF_DOM_ID(query_attr->hid) == hy_drv_priv->domid) {
+   /* query for exported dmabuf */
+   exported = hyper_dmabuf_find_exported(query_attr->hid);
+   if (exported) {
+   ret = hyper_dmabuf_query_exported(exported,
+ query_attr->item,
+ &query_attr->info);
+   } else {
+   dev_err(hy_drv_priv->dev,
+   "hid {id:%d key:%d %d %d} not in exp list\n",
+   query_attr->hid.id,
+   query_attr->hid.rng_key[0],
+   query_attr->hid.rng_key[1],
+   query_attr->hid.rng_key[2]);
+   retu

[RFC PATCH v2 2/9] hyper_dmabuf: architecture specification and reference guide

2018-02-13 Thread Dongwon Kim
Reference document for hyper_DMABUF driver

Documentation/hyper-dmabuf-sharing.txt

Signed-off-by: Dongwon Kim 
---
 Documentation/hyper-dmabuf-sharing.txt | 734 +
 1 file changed, 734 insertions(+)
 create mode 100644 Documentation/hyper-dmabuf-sharing.txt

diff --git a/Documentation/hyper-dmabuf-sharing.txt 
b/Documentation/hyper-dmabuf-sharing.txt
new file mode 100644
index ..928e411931e3
--- /dev/null
+++ b/Documentation/hyper-dmabuf-sharing.txt
@@ -0,0 +1,734 @@
+Linux Hyper DMABUF Driver
+
+--
+Section 1. Overview
+--
+
+Hyper_DMABUF driver is a Linux device driver running on multiple Virtual
+achines (VMs), which expands DMA-BUF sharing capability to the VM environment
+where multiple different OS instances need to share same physical data without
+data-copy across VMs.
+
+To share a DMA_BUF across VMs, an instance of the Hyper_DMABUF drv on the
+exporting VM (so called, “exporter”) imports a local DMA_BUF from the original
+producer of the buffer, then re-exports it with an unique ID, hyper_dmabuf_id
+for the buffer to the importing VM (so called, “importer”).
+
+Another instance of the Hyper_DMABUF driver on importer registers
+a hyper_dmabuf_id together with reference information for the shared physical
+pages associated with the DMA_BUF to its database when the export happens.
+
+The actual mapping of the DMA_BUF on the importer’s side is done by
+the Hyper_DMABUF driver when user space issues the IOCTL command to access
+the shared DMA_BUF. The Hyper_DMABUF driver works as both an importing and
+exporting driver as is, that is, no special configuration is required.
+Consequently, only a single module per VM is needed to enable cross-VM DMA_BUF
+exchange.
+
+--
+Section 2. Architecture
+--
+
+1. Hyper_DMABUF ID
+
+hyper_dmabuf_id is a global handle for shared DMA BUFs, which is compatible
+across VMs. It is a key used by the importer to retrieve information about
+shared Kernel pages behind the DMA_BUF structure from the IMPORT list. When
+a DMA_BUF is exported to another domain, its hyper_dmabuf_id and META data
+are also kept in the EXPORT list by the exporter for further synchronization
+of control over the DMA_BUF.
+
+hyper_dmabuf_id is “targeted”, meaning it is valid only in exporting (owner of
+the buffer) and importing VMs, where the corresponding hyper_dmabuf_id is
+stored in their database (EXPORT and IMPORT lists).
+
+A user-space application specifies the targeted VM id in the user parameter
+when it calls the IOCTL command to export shared DMA_BUF to another VM.
+
+hyper_dmabuf_id_t is a data type for hyper_dmabuf_id. It is defined as 16-byte
+data structure, and it contains id and rng_key[3] as elements for
+the structure.
+
+typedef struct {
+int id;
+int rng_key[3]; /* 12bytes long random number */
+} hyper_dmabuf_id_t;
+
+The first element in the hyper_dmabuf_id structure, int id is combined data of
+a count number generated by the driver running on the exporter and
+the exporter’s ID. The VM’s ID is a one byte value and located at the field’s
+SB in int id. The remaining three bytes in int id are reserved for a count
+number.
+
+However, there is a limit related to this count number, which is 1000.
+Therefore, only little more than a byte starting from the LSB is actually used
+for storing this count number.
+
+#define HYPER_DMABUF_ID_CREATE(domid, id) \
+domid) & 0xFF) << 24) | ((id) & 0xFF))
+
+This limit on the count number directly means the maximum number of DMA BUFs
+that  can be shared simultaneously by one VM. The second element of
+hyper_dmabuf_id, that is int rng_key[3], is an array of three integers. These
+numbers are generated by Linux’s native random number generation mechanism.
+This field is added to enhance the security of the Hyper DMABUF driver by
+maximizing the entropy of hyper_dmabuf_id (that is, preventing it from being
+guessed by a security attacker).
+
+Once DMA_BUF is no longer shared, the hyper_dmabuf_id associated with
+the DMA_BUF is released, but the count number in hyper_dmabuf_id is saved in
+the ID list for reuse. However, random keys stored in int rng_key[3] are not
+reused. Instead, those keys are always filled with freshly generated random
+keys for security.
+
+2. IOCTLs
+
+a. IOCTL_HYPER_DMABUF_TX_CH_SETUP
+
+This type of IOCTL is used for initialization of a one-directional transmit
+communication channel with a remote domain.
+
+The user space argument for this type of IOCTL is defined as:
+
+struct ioctl_hyper_dmabuf_tx_ch_setup {
+/* IN parameters */
+/* Remote domain id */
+int remote_domain;
+};
+
+b. IOCTL_HYPER_DMABUF_RX_CH_SETUP
+

Re: [RFC PATCH 01/60] hyper_dmabuf: initial working version of hyper_dmabuf drv

2018-02-14 Thread Dongwon Kim
Abandoning this series as a new version was submitted for the review

"[RFC PATCH v2 0/9] hyper_dmabuf: Hyper_DMABUF driver"

On Tue, Dec 19, 2017 at 11:29:17AM -0800, Kim, Dongwon wrote:
> Upload of intial version of hyper_DMABUF driver enabling
> DMA_BUF exchange between two different VMs in virtualized
> platform based on hypervisor such as KVM or XEN.
> 
> Hyper_DMABUF drv's primary role is to import a DMA_BUF
> from originator then re-export it to another Linux VM
> so that it can be mapped and accessed by it.
> 
> The functionality of this driver highly depends on
> Hypervisor's native page sharing mechanism and inter-VM
> communication support.
> 
> This driver has two layers, one is main hyper_DMABUF
> framework for scatter-gather list management that handles
> actual import and export of DMA_BUF. Lower layer is about
> actual memory sharing and communication between two VMs,
> which is hypervisor-specific interface.
> 
> This driver is initially designed to enable DMA_BUF
> sharing across VMs in Xen environment, so currently working
> with Xen only.
> 
> This also adds Kernel configuration for hyper_DMABUF drv
> under Device Drivers->Xen driver support->hyper_dmabuf
> options.
> 
> To give some brief information about each source file,
> 
> hyper_dmabuf/hyper_dmabuf_conf.h
> : configuration info
> 
> hyper_dmabuf/hyper_dmabuf_drv.c
> : driver interface and initialization
> 
> hyper_dmabuf/hyper_dmabuf_imp.c
> : scatter-gather list generation and management. DMA_BUF
> ops for DMA_BUF reconstructed from hyper_DMABUF
> 
> hyper_dmabuf/hyper_dmabuf_ioctl.c
> : IOCTLs calls for export/import and comm channel creation
> unexport.
> 
> hyper_dmabuf/hyper_dmabuf_list.c
> : Database (linked-list) for exported and imported
> hyper_DMABUF
> 
> hyper_dmabuf/hyper_dmabuf_msg.c
> : creation and management of messages between exporter and
> importer
> 
> hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c
> : comm ch management and ISRs for incoming messages.
> 
> hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.c
> : Database (linked-list) for keeping information about
> existing comm channels among VMs
> 
> Signed-off-by: Dongwon Kim 
> Signed-off-by: Mateusz Polrola 
> ---
>  drivers/xen/Kconfig|   2 +
>  drivers/xen/Makefile   |   1 +
>  drivers/xen/hyper_dmabuf/Kconfig   |  14 +
>  drivers/xen/hyper_dmabuf/Makefile  |  34 +
>  drivers/xen/hyper_dmabuf/hyper_dmabuf_conf.h   |   2 +
>  drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c|  54 ++
>  drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h| 101 +++
>  drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c| 852 
> +
>  drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.h|  31 +
>  drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c  | 462 +++
>  drivers/xen/hyper_dmabuf/hyper_dmabuf_list.c   | 119 +++
>  drivers/xen/hyper_dmabuf/hyper_dmabuf_list.h   |  40 +
>  drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c| 212 +
>  drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.h|  45 ++
>  drivers/xen/hyper_dmabuf/hyper_dmabuf_query.h  |  16 +
>  drivers/xen/hyper_dmabuf/hyper_dmabuf_struct.h |  70 ++
>  .../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c   | 328 
>  .../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.h   |  62 ++
>  .../hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.c  | 106 +++
>  .../hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.h  |  35 +
>  20 files changed, 2586 insertions(+)
>  create mode 100644 drivers/xen/hyper_dmabuf/Kconfig
>  create mode 100644 drivers/xen/hyper_dmabuf/Makefile
>  create mode 100644 drivers/xen/hyper_dmabuf/hyper_dmabuf_conf.h
>  create mode 100644 drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c
>  create mode 100644 drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h
>  create mode 100644 drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c
>  create mode 100644 drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.h
>  create mode 100644 drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
>  create mode 100644 drivers/xen/hyper_dmabuf/hyper_dmabuf_list.c
>  create mode 100644 drivers/xen/hyper_dmabuf/hyper_dmabuf_list.h
>  create mode 100644 drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c
>  create mode 100644 drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.h
>  create mode 100644 drivers/xen/hyper_dmabuf/hyper_dmabuf_query.h
>  create mode 100644 drivers/xen/hyper_dmabuf/hyper_dmabuf_struct.h
>  create mode 100644 drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c
>  create mode 100644 drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.h
>  create mode 100644 drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.c
>

Re: [RFC PATCH v2 0/9] hyper_dmabuf: Hyper_DMABUF driver

2018-02-21 Thread Dongwon Kim
On Mon, Feb 19, 2018 at 06:01:29PM +0100, Daniel Vetter wrote:
> On Tue, Feb 13, 2018 at 05:49:59PM -0800, Dongwon Kim wrote:
> > This patch series contains the implementation of a new device driver,
> > hyper_DMABUF driver, which provides a way to expand the boundary of
> > Linux DMA-BUF sharing to across different VM instances in Multi-OS platform
> > enabled by a Hypervisor (e.g. XEN)
> > 
> > This version 2 series is basically refactored version of old series starting
> > with "[RFC PATCH 01/60] hyper_dmabuf: initial working version of 
> > hyper_dmabuf
> > drv"
> > 
> > Implementation details of this driver are described in the reference guide
> > added by the second patch, "[RFC PATCH v2 2/5] hyper_dmabuf: architecture
> > specification and reference guide".
> > 
> > Attaching 'Overview' section here as a quick summary.
> > 
> > --
> > Section 1. Overview
> > --
> > 
> > Hyper_DMABUF driver is a Linux device driver running on multiple Virtual
> > achines (VMs), which expands DMA-BUF sharing capability to the VM 
> > environment
> > where multiple different OS instances need to share same physical data 
> > without
> > data-copy across VMs.
> > 
> > To share a DMA_BUF across VMs, an instance of the Hyper_DMABUF drv on the
> > exporting VM (so called, “exporter”) imports a local DMA_BUF from the 
> > original
> > producer of the buffer, then re-exports it with an unique ID, 
> > hyper_dmabuf_id
> > for the buffer to the importing VM (so called, “importer”).
> > 
> > Another instance of the Hyper_DMABUF driver on importer registers
> > a hyper_dmabuf_id together with reference information for the shared 
> > physical
> > pages associated with the DMA_BUF to its database when the export happens.
> > 
> > The actual mapping of the DMA_BUF on the importer’s side is done by
> > the Hyper_DMABUF driver when user space issues the IOCTL command to access
> > the shared DMA_BUF. The Hyper_DMABUF driver works as both an importing and
> > exporting driver as is, that is, no special configuration is required.
> > Consequently, only a single module per VM is needed to enable cross-VM 
> > DMA_BUF
> > exchange.
> > 
> > --
> > 
> > There is a git repository at github.com where this series of patches are all
> > integrated in Linux kernel tree based on the commit:
> > 
> > commit ae64f9bd1d3621b5e60d7363bc20afb46aede215
> > Author: Linus Torvalds 
> > Date:   Sun Dec 3 11:01:47 2018 -0500
> > 
> > Linux 4.15-rc2
> > 
> > https://github.com/downor/linux_hyper_dmabuf.git hyper_dmabuf_integration_v4
> 
> Since you place this under drivers/dma-buf I'm assuming you want to
> maintain this as part of the core dma-buf support, and not as some
> Xen-specific thing. Given that, usual graphics folks rules apply:

I moved it inside driver/dma-buf because the half of design is not hypervisor
specific and it is possible that we would add more backends for other
additional hypervisor support. 

> 
> Where's the userspace for this (must be open source)? What exactly is the
> use-case you're trying to solve by sharing dma-bufs in this fashion?

Automotive use cases are actually using this feature now where each VM has
their own display and want to share same rendering contents from one to
another. It is a platform based on Xen and Intel hardware and I don't think
all of SW stack is open-sourced. I do have a test application to verify this,
which I think I can make public.

> 
> Iirc my feedback on v1 was why exactly you really need to be able to
> import a normal dma-buf into a hyper-dmabuf, instead of allocating them
> directly in the hyper-dmabuf driver. Which would _massively_ simplify your
> design, since you don't need to marshall all the attach and map business
> around (since the hypervisor would be in control of the dma-buf, not a
> guest OS). 

I am sorry but I don't quite understand which side you are talking about
when you said "import a normal dma-buf". This hyper_dmabuf driver running
on the exporting VM actually imports the normal dma-buf (e.g. the one from
i915) then get underlying pages shared and pass all the references to those
pages to the importing VM. On importing VM, hyper_dmabuf driver is supposed
to create a dma-buf (Is this part what you are talking about?) with those
shared pages and ex

Re: [Xen-devel] [RFC PATCH v2 2/9] hyper_dmabuf: architecture specification and reference guide

2018-02-23 Thread Dongwon Kim
Thanks for your comment, Roger
I will try to polish this doc and resubmit.
(I put some comments below as well.)

On Fri, Feb 23, 2018 at 04:15:00PM +, Roger Pau Monné wrote:
> On Tue, Feb 13, 2018 at 05:50:01PM -0800, Dongwon Kim wrote:
> > Reference document for hyper_DMABUF driver
> > 
> > Documentation/hyper-dmabuf-sharing.txt
> 
> This should likely be patch 1 in order for reviewers to have the
> appropriate context.
> 
> > 
> > Signed-off-by: Dongwon Kim 
> > ---
> >  Documentation/hyper-dmabuf-sharing.txt | 734 
> > +
> >  1 file changed, 734 insertions(+)
> >  create mode 100644 Documentation/hyper-dmabuf-sharing.txt
> > 
> > diff --git a/Documentation/hyper-dmabuf-sharing.txt 
> > b/Documentation/hyper-dmabuf-sharing.txt
> > new file mode 100644
> > index ..928e411931e3
> > --- /dev/null
> > +++ b/Documentation/hyper-dmabuf-sharing.txt
> > @@ -0,0 +1,734 @@
> > +Linux Hyper DMABUF Driver
> > +
> > +--
> > +Section 1. Overview
> > +--
> > +
> > +Hyper_DMABUF driver is a Linux device driver running on multiple Virtual
> > +achines (VMs), which expands DMA-BUF sharing capability to the VM 
> > environment
> > +where multiple different OS instances need to share same physical data 
> > without
> > +data-copy across VMs.
> > +
> > +To share a DMA_BUF across VMs, an instance of the Hyper_DMABUF drv on the
> > +exporting VM (so called, “exporter”) imports a local DMA_BUF from the 
> > original
> > +producer of the buffer,
> 
> The usage of export and import in the above sentence makes it almost
> impossible to understand.

Ok, it looks confusing. I think the problem is that those words are used for 
both
local and cross-VMs cases. I will try to clarify those. 

> 
> > then re-exports it with an unique ID, hyper_dmabuf_id
> > +for the buffer to the importing VM (so called, “importer”).
> 
> And this is even worse.
> 
> Maybe it would help to have some kind of flow diagram of all this
> import/export operations, but please read below.

I will add a diagram here.

> 
> > +
> > +Another instance of the Hyper_DMABUF driver on importer registers
> > +a hyper_dmabuf_id together with reference information for the shared 
> > physical
> > +pages associated with the DMA_BUF to its database when the export happens.
> > +
> > +The actual mapping of the DMA_BUF on the importer’s side is done by
> > +the Hyper_DMABUF driver when user space issues the IOCTL command to access
> > +the shared DMA_BUF. The Hyper_DMABUF driver works as both an importing and
> > +exporting driver as is, that is, no special configuration is required.
> > +Consequently, only a single module per VM is needed to enable cross-VM 
> > DMA_BUF
> > +exchange.
> 
> IMHO I need a more generic view of the problem you are trying to solve
> in the overview section. I've read the full overview, and I still have
> no idea why you need all this.

I will add some more paragrahs here to give some more generic view (and possibly
diagrams) of this driver.

> 
> I think the overview should contain at least:
> 
> 1. A description of the problem you are trying to solve.
> 2. A high level description of the proposed solution.
> 3. How the proposed solution deals with the problem described in 1.
> 
> This overview is not useful for people that don't know which problem
> you are trying to solve, like myself.

Thanks again.

> 
> Thanks, Roger.
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[RFC PATCH 01/60] hyper_dmabuf: initial working version of hyper_dmabuf drv

2017-12-19 Thread Dongwon Kim
Upload of intial version of hyper_DMABUF driver enabling
DMA_BUF exchange between two different VMs in virtualized
platform based on hypervisor such as KVM or XEN.

Hyper_DMABUF drv's primary role is to import a DMA_BUF
from originator then re-export it to another Linux VM
so that it can be mapped and accessed by it.

The functionality of this driver highly depends on
Hypervisor's native page sharing mechanism and inter-VM
communication support.

This driver has two layers, one is main hyper_DMABUF
framework for scatter-gather list management that handles
actual import and export of DMA_BUF. Lower layer is about
actual memory sharing and communication between two VMs,
which is hypervisor-specific interface.

This driver is initially designed to enable DMA_BUF
sharing across VMs in Xen environment, so currently working
with Xen only.

This also adds Kernel configuration for hyper_DMABUF drv
under Device Drivers->Xen driver support->hyper_dmabuf
options.

To give some brief information about each source file,

hyper_dmabuf/hyper_dmabuf_conf.h
: configuration info

hyper_dmabuf/hyper_dmabuf_drv.c
: driver interface and initialization

hyper_dmabuf/hyper_dmabuf_imp.c
: scatter-gather list generation and management. DMA_BUF
ops for DMA_BUF reconstructed from hyper_DMABUF

hyper_dmabuf/hyper_dmabuf_ioctl.c
: IOCTLs calls for export/import and comm channel creation
unexport.

hyper_dmabuf/hyper_dmabuf_list.c
: Database (linked-list) for exported and imported
hyper_DMABUF

hyper_dmabuf/hyper_dmabuf_msg.c
: creation and management of messages between exporter and
importer

hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c
: comm ch management and ISRs for incoming messages.

hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.c
: Database (linked-list) for keeping information about
existing comm channels among VMs

Signed-off-by: Dongwon Kim 
Signed-off-by: Mateusz Polrola 
---
 drivers/xen/Kconfig|   2 +
 drivers/xen/Makefile   |   1 +
 drivers/xen/hyper_dmabuf/Kconfig   |  14 +
 drivers/xen/hyper_dmabuf/Makefile  |  34 +
 drivers/xen/hyper_dmabuf/hyper_dmabuf_conf.h   |   2 +
 drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c|  54 ++
 drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h| 101 +++
 drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c| 852 +
 drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.h|  31 +
 drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c  | 462 +++
 drivers/xen/hyper_dmabuf/hyper_dmabuf_list.c   | 119 +++
 drivers/xen/hyper_dmabuf/hyper_dmabuf_list.h   |  40 +
 drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c| 212 +
 drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.h|  45 ++
 drivers/xen/hyper_dmabuf/hyper_dmabuf_query.h  |  16 +
 drivers/xen/hyper_dmabuf/hyper_dmabuf_struct.h |  70 ++
 .../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c   | 328 
 .../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.h   |  62 ++
 .../hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.c  | 106 +++
 .../hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.h  |  35 +
 20 files changed, 2586 insertions(+)
 create mode 100644 drivers/xen/hyper_dmabuf/Kconfig
 create mode 100644 drivers/xen/hyper_dmabuf/Makefile
 create mode 100644 drivers/xen/hyper_dmabuf/hyper_dmabuf_conf.h
 create mode 100644 drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c
 create mode 100644 drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h
 create mode 100644 drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c
 create mode 100644 drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.h
 create mode 100644 drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
 create mode 100644 drivers/xen/hyper_dmabuf/hyper_dmabuf_list.c
 create mode 100644 drivers/xen/hyper_dmabuf/hyper_dmabuf_list.h
 create mode 100644 drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c
 create mode 100644 drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.h
 create mode 100644 drivers/xen/hyper_dmabuf/hyper_dmabuf_query.h
 create mode 100644 drivers/xen/hyper_dmabuf/hyper_dmabuf_struct.h
 create mode 100644 drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c
 create mode 100644 drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.h
 create mode 100644 drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.c
 create mode 100644 drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.h

diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index d8dd546..b59b0e3 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -321,4 +321,6 @@ config XEN_SYMS
 config XEN_HAVE_VPMU
bool
 
+source "drivers/xen/hyper_dmabuf/Kconfig"
+
 endmenu
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index 451e833..a6e253a 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -4,6 +4,7 @@ obj-$(CONFIG_X86)   += fallback.o
 obj-y  += grant-table.o features.o balloon.o manage.o preempt.o time.o
 obj-y  += events/
 obj-y  += xenbus/
+obj-y  += hyper_dmabuf

[RFC PATCH 02/60] hyper_dmabuf: added a doc for hyper_dmabuf sharing

2017-12-19 Thread Dongwon Kim
High-level description of hyper_dmabuf driver has been added
to "Documentation" directory.

Signed-off-by: Dongwon Kim 
---
 Documentation/hyper-dmabuf-sharing.txt | 734 +
 1 file changed, 734 insertions(+)
 create mode 100644 Documentation/hyper-dmabuf-sharing.txt

diff --git a/Documentation/hyper-dmabuf-sharing.txt 
b/Documentation/hyper-dmabuf-sharing.txt
new file mode 100644
index 000..a6744f8
--- /dev/null
+++ b/Documentation/hyper-dmabuf-sharing.txt
@@ -0,0 +1,734 @@
+Linux Hyper DMABUF Driver
+
+--
+Section 1. Overview
+--
+
+Hyper_DMABUF driver is a Linux device driver running on multiple Virtual
+achines (VMs), which expands DMA-BUF sharing capability to the VM environment
+where multiple different OS instances need to share same physical data without
+data-copy across VMs.
+
+To share a DMA_BUF across VMs, an instance of the Hyper_DMABUF drv on the
+exporting VM (so called, “exporter”) imports a local DMA_BUF from the original
+producer of the buffer, then re-exports it with an unique ID, hyper_dmabuf_id
+for the buffer to the importing VM (so called, “importer”).
+
+Another instance of the Hyper_DMABUF driver on importer registers
+a hyper_dmabuf_id together with reference information for the shared physical
+pages associated with the DMA_BUF to its database when the export happens.
+
+The actual mapping of the DMA_BUF on the importer’s side is done by
+the Hyper_DMABUF driver when user space issues the IOCTL command to access
+the shared DMA_BUF. The Hyper_DMABUF driver works as both an importing and
+exporting driver as is, that is, no special configuration is required.
+Consequently, only a single module per VM is needed to enable cross-VM DMA_BUF
+exchange.
+
+--
+Section 2. Architecture
+--
+
+1. Hyper_DMABUF ID
+
+hyper_dmabuf_id is a global handle for shared DMA BUFs, which is compatible
+across VMs. It is a key used by the importer to retrieve information about
+shared Kernel pages behind the DMA_BUF structure from the IMPORT list. When
+a DMA_BUF is exported to another domain, its hyper_dmabuf_id and META data
+are also kept in the EXPORT list by the exporter for further synchronization
+of control over the DMA_BUF.
+
+hyper_dmabuf_id is “targeted”, meaning it is valid only in exporting (owner of
+the buffer) and importing VMs, where the corresponding hyper_dmabuf_id is
+stored in their database (EXPORT and IMPORT lists).
+
+A user-space application specifies the targeted VM id in the user parameter
+when it calls the IOCTL command to export shared DMA_BUF to another VM.
+
+hyper_dmabuf_id_t is a data type for hyper_dmabuf_id. It is defined as 16-byte
+data structure, and it contains id and rng_key[3] as elements for
+the structure.
+
+typedef struct {
+int id;
+int rng_key[3]; /* 12bytes long random number */
+} hyper_dmabuf_id_t;
+
+The first element in the hyper_dmabuf_id structure, int id is combined data of
+a count number generated by the driver running on the exporter and
+the exporter’s ID. The VM’s ID is a one byte value and located at the field’s
+SB in int id. The remaining three bytes in int id are reserved for a count
+number.
+
+However, there is a limit related to this count number, which is 1000.
+Therefore, only little more than a byte starting from the LSB is actually used
+for storing this count number.
+
+#define HYPER_DMABUF_ID_CREATE(domid, id) \
+domid) & 0xFF) << 24) | ((id) & 0xFF))
+
+This limit on the count number directly means the maximum number of DMA BUFs
+that  can be shared simultaneously by one VM. The second element of
+hyper_dmabuf_id, that is int rng_key[3], is an array of three integers. These
+numbers are generated by Linux’s native random number generation mechanism.
+This field is added to enhance the security of the Hyper DMABUF driver by
+maximizing the entropy of hyper_dmabuf_id (that is, preventing it from being
+guessed by a security attacker).
+
+Once DMA_BUF is no longer shared, the hyper_dmabuf_id associated with
+the DMA_BUF is released, but the count number in hyper_dmabuf_id is saved in
+the ID list for reuse. However, random keys stored in int rng_key[3] are not
+reused. Instead, those keys are always filled with freshly generated random
+keys for security.
+
+2. IOCTLs
+
+a. IOCTL_HYPER_DMABUF_TX_CH_SETUP
+
+This type of IOCTL is used for initialization of a one-directional transmit
+communication channel with a remote domain.
+
+The user space argument for this type of IOCTL is defined as:
+
+struct ioctl_hyper_dmabuf_tx_ch_setup {
+/* IN parameters */
+/* Remote domain id */
+int remote_domain;
+};
+
+b. IOCTL_HYPER_DMABUF_RX_

[RFC PATCH 04/60] hyper_dmabuf: new index, k for pointing a right n-th page

2017-12-19 Thread Dongwon Kim
Need a new index, k in hyper_dmabuf_extract_pgs function for
picking up a correct n-th page in contigous memory space.

Signed-off-by: Dongwon Kim 
---
 drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c | 11 ++-
 1 file changed, 6 insertions(+), 5 deletions(-)

diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c
index 7cb5c35..3b40ec0 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c
@@ -39,7 +39,7 @@ static int hyper_dmabuf_get_num_pgs(struct sg_table *sgt)
 struct hyper_dmabuf_pages_info *hyper_dmabuf_ext_pgs(struct sg_table *sgt)
 {
struct hyper_dmabuf_pages_info *pinfo;
-   int i, j;
+   int i, j, k;
int length;
struct scatterlist *sgl;
 
@@ -57,7 +57,7 @@ struct hyper_dmabuf_pages_info *hyper_dmabuf_ext_pgs(struct 
sg_table *sgt)
pinfo->frst_ofst = sgl->offset;
pinfo->pages[0] = sg_page(sgl);
length = sgl->length - PAGE_SIZE + sgl->offset;
-   i=1;
+   i = 1;
 
while (length > 0) {
pinfo->pages[i] = nth_page(sg_page(sgl), i);
@@ -71,12 +71,12 @@ struct hyper_dmabuf_pages_info *hyper_dmabuf_ext_pgs(struct 
sg_table *sgt)
pinfo->pages[i++] = sg_page(sgl);
length = sgl->length - PAGE_SIZE;
pinfo->nents++;
+   k = 1;
 
while (length > 0) {
-   pinfo->pages[i] = nth_page(sg_page(sgl), i);
+   pinfo->pages[i++] = nth_page(sg_page(sgl), k++);
length -= PAGE_SIZE;
pinfo->nents++;
-   i++;
}
}
 
@@ -535,7 +535,8 @@ static int hyper_dmabuf_ops_attach(struct dma_buf* dmabuf, 
struct device* dev,
printk("hyper_dmabuf::%s Error:send dmabuf sync request 
failed\n", __func__);
}
 
-   return ret;
+   /* Ignoring ret for now */
+   return 0;
 }
 
 static void hyper_dmabuf_ops_detach(struct dma_buf* dmabuf, struct 
dma_buf_attachment *attach)
-- 
2.7.4

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[RFC PATCH 03/60] hyper_dmabuf: re-use dma_buf previously exported if exist

2017-12-19 Thread Dongwon Kim
Now we re-use dma_buf instead of exporting it via normal process
(including new mappings). For this, hyper_dmabuf list entries can
be searched with "struct dma_buf*". Also, ioctl (export_remote) is
modified to just return hyper_dmabuf_id if the specific dmabuf
has already been exported to the target domain.

This patch also Includes changes in printk calles for debugging.

Signed-off-by: Dongwon Kim 
---
 drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c   | 28 +--
 drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c | 17 
 drivers/xen/hyper_dmabuf/hyper_dmabuf_list.c  |  4 ++--
 drivers/xen/hyper_dmabuf/hyper_dmabuf_list.h  |  2 +-
 4 files changed, 26 insertions(+), 25 deletions(-)

diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c
index faa5c1b..7cb5c35 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c
@@ -532,7 +532,7 @@ static int hyper_dmabuf_ops_attach(struct dma_buf* dmabuf, 
struct device* dev,
HYPER_DMABUF_OPS_ATTACH);
 
if (ret < 0) {
-   printk("send dmabuf sync request failed\n");
+   printk("hyper_dmabuf::%s Error:send dmabuf sync request 
failed\n", __func__);
}
 
return ret;
@@ -552,7 +552,7 @@ static void hyper_dmabuf_ops_detach(struct dma_buf* dmabuf, 
struct dma_buf_attac
HYPER_DMABUF_OPS_DETACH);
 
if (ret < 0) {
-   printk("send dmabuf sync request failed\n");
+   printk("hyper_dmabuf::%s Error:send dmabuf sync request 
failed\n", __func__);
}
 }
 
@@ -586,7 +586,7 @@ static struct sg_table* hyper_dmabuf_ops_map(struct 
dma_buf_attachment *attachme
HYPER_DMABUF_OPS_MAP);
 
if (ret < 0) {
-   printk("send dmabuf sync request failed\n");
+   printk("hyper_dmabuf::%s Error:send dmabuf sync request 
failed\n", __func__);
}
 
return st;
@@ -618,7 +618,7 @@ static void hyper_dmabuf_ops_unmap(struct 
dma_buf_attachment *attachment,
HYPER_DMABUF_OPS_UNMAP);
 
if (ret < 0) {
-   printk("send dmabuf sync request failed\n");
+   printk("hyper_dmabuf::%s Error:send dmabuf sync request 
failed\n", __func__);
}
 }
 
@@ -636,7 +636,7 @@ static void hyper_dmabuf_ops_release(struct dma_buf *dmabuf)
HYPER_DMABUF_OPS_RELEASE);
 
if (ret < 0) {
-   printk("send dmabuf sync request failed\n");
+   printk("hyper_dmabuf::%s Error:send dmabuf sync request 
failed\n", __func__);
}
 }
 
@@ -653,7 +653,7 @@ static int hyper_dmabuf_ops_begin_cpu_access(struct dma_buf 
*dmabuf, enum dma_da
ret = 
hyper_dmabuf_sync_request_and_wait(HYPER_DMABUF_ID_IMPORTER_GET_SDOMAIN_ID(sgt_info->hyper_dmabuf_id),

HYPER_DMABUF_OPS_BEGIN_CPU_ACCESS);
if (ret < 0) {
-   printk("send dmabuf sync request failed\n");
+   printk("hyper_dmabuf::%s Error:send dmabuf sync request 
failed\n", __func__);
}
 
return ret;
@@ -672,7 +672,7 @@ static int hyper_dmabuf_ops_end_cpu_access(struct dma_buf 
*dmabuf, enum dma_data
ret = 
hyper_dmabuf_sync_request_and_wait(HYPER_DMABUF_ID_IMPORTER_GET_SDOMAIN_ID(sgt_info->hyper_dmabuf_id),

HYPER_DMABUF_OPS_END_CPU_ACCESS);
if (ret < 0) {
-   printk("send dmabuf sync request failed\n");
+   printk("hyper_dmabuf::%s Error:send dmabuf sync request 
failed\n", __func__);
}
 
return 0;
@@ -691,7 +691,7 @@ static void *hyper_dmabuf_ops_kmap_atomic(struct dma_buf 
*dmabuf, unsigned long
ret = 
hyper_dmabuf_sync_request_and_wait(HYPER_DMABUF_ID_IMPORTER_GET_SDOMAIN_ID(sgt_info->hyper_dmabuf_id),
HYPER_DMABUF_OPS_KMAP_ATOMIC);
if (ret < 0) {
-   printk("send dmabuf sync request failed\n");
+   printk("hyper_dmabuf::%s Error:send dmabuf sync request 
failed\n", __func__);
}
 
return NULL; /* for now NULL.. need to return the address of mapped 
region */
@@ -710,7 +710,7 @@ static void hyper_dmabuf_ops_kunmap_atomic(struct dma_buf 
*dmabuf, unsigned long
ret = 
hyper_dmabuf_sync_request_and_wait(HYPER_DMABUF_ID_IMPORTER_GET_SDOMAIN_ID(sgt_info->hyper_dmabuf_id),
HYPER_DMABUF_OPS_KUNMAP_ATOMIC);
if (ret < 0) {
-   printk("send dmabu

[RFC PATCH 11/60] hyper_dmabuf: check stack before unmapping/detaching shadow DMA_BUF

2017-12-19 Thread Dongwon Kim
Make sure list of mapping/attaching activities on impoter VM is not
empty before doing unmapping/detaching shadow DMA BUF for indirect
synchronization.

Signed-off-by: Dongwon Kim 
---
 .../xen/hyper_dmabuf/hyper_dmabuf_remote_sync.c| 68 +-
 1 file changed, 53 insertions(+), 15 deletions(-)

diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_remote_sync.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_remote_sync.c
index 6ba932f..fa2fa11 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_remote_sync.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_remote_sync.c
@@ -11,6 +11,21 @@
 
 extern struct hyper_dmabuf_private hyper_dmabuf_private;
 
+/* Whenever importer does dma operations from remote domain,
+ * a notification is sent to the exporter so that exporter
+ * issues equivalent dma operation on the original dma buf
+ * for indirect synchronization via shadow operations.
+ *
+ * All ptrs and references (e.g struct sg_table*,
+ * struct dma_buf_attachment) created via these operations on
+ * exporter's side are kept in stack (implemented as circular
+ * linked-lists) separately so that those can be re-referenced
+ * later when unmapping operations are invoked to free those.
+ *
+ * The very first element on the bottom of each stack holds
+ * are what is created when initial exporting is issued so it
+ * should not be modified or released by this fuction.
+ */
 int hyper_dmabuf_remote_sync(int id, int ops)
 {
struct hyper_dmabuf_sgt_info *sgt_info;
@@ -33,7 +48,7 @@ int hyper_dmabuf_remote_sync(int id, int ops)
attachl = kcalloc(1, sizeof(*attachl), GFP_KERNEL);
 
attachl->attach = dma_buf_attach(sgt_info->dma_buf,
-   hyper_dmabuf_private.device);
+hyper_dmabuf_private.device);
 
if (!attachl->attach) {
kfree(attachl);
@@ -45,22 +60,31 @@ int hyper_dmabuf_remote_sync(int id, int ops)
break;
 
case HYPER_DMABUF_OPS_DETACH:
-   attachl = list_first_entry(&sgt_info->active_attached->list,
-   struct attachment_list, list);
-
-   if (!attachl) {
+   if (list_empty(&sgt_info->active_attached->list)) {
printk("dmabuf remote sync::error while processing 
HYPER_DMABUF_OPS_DETACH\n");
+   printk("no more dmabuf attachment left to be 
detached\n");
return -EINVAL;
}
+
+   attachl = list_first_entry(&sgt_info->active_attached->list,
+  struct attachment_list, list);
+
dma_buf_detach(sgt_info->dma_buf, attachl->attach);
list_del(&attachl->list);
kfree(attachl);
break;
 
case HYPER_DMABUF_OPS_MAP:
-   sgtl = kcalloc(1, sizeof(*sgtl), GFP_KERNEL);
+   if (list_empty(&sgt_info->active_attached->list)) {
+   printk("dmabuf remote sync::error while processing 
HYPER_DMABUF_OPS_MAP\n");
+   printk("no more dmabuf attachment left to be 
detached\n");
+   return -EINVAL;
+   }
+
attachl = list_first_entry(&sgt_info->active_attached->list,
-   struct attachment_list, list);
+  struct attachment_list, list);
+
+   sgtl = kcalloc(1, sizeof(*sgtl), GFP_KERNEL);
sgtl->sgt = dma_buf_map_attachment(attachl->attach, 
DMA_BIDIRECTIONAL);
if (!sgtl->sgt) {
kfree(sgtl);
@@ -71,17 +95,20 @@ int hyper_dmabuf_remote_sync(int id, int ops)
break;
 
case HYPER_DMABUF_OPS_UNMAP:
-   attachl = list_first_entry(&sgt_info->active_attached->list,
-   struct attachment_list, list);
-   sgtl = list_first_entry(&sgt_info->active_sgts->list,
-   struct sgt_list, list);
-   if (!attachl || !sgtl) {
+   if (list_empty(&sgt_info->active_sgts->list) ||
+   list_empty(&sgt_info->active_attached->list)) {
printk("dmabuf remote sync::error while processing 
HYPER_DMABUF_OPS_UNMAP\n");
+   printk("no more SGT or attachment left to be freed\n");
return -EINVAL;
}
 
+   attachl = list_first_entry(&sgt_info->active_attached->list,
+  struct attachment_list, list);
+   sgtl = list_first_entry(&sgt_info->active_s

[RFC PATCH 05/60] hyper_dmabuf: skip creating a comm ch if exist for the VM

2017-12-19 Thread Dongwon Kim
hyper_dmabuf_importer_ring_setup creates new channel only if
there is no existing downstream communication channel previously
created for the exporter VM.

Signed-off-by: Dongwon Kim 
---
 drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c   | 13 +++--
 drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c | 20 
 2 files changed, 27 insertions(+), 6 deletions(-)

diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c
index 3b40ec0..6b16e37 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c
@@ -827,12 +827,11 @@ static const struct dma_buf_ops hyper_dmabuf_ops = {
 int hyper_dmabuf_export_fd(struct hyper_dmabuf_imported_sgt_info *dinfo, int 
flags)
 {
int fd;
-
struct dma_buf* dmabuf;
 
-/* call hyper_dmabuf_export_dmabuf and create and bind a handle for it
- * then release */
-
+   /* call hyper_dmabuf_export_dmabuf and create
+* and bind a handle for it then release
+*/
dmabuf = hyper_dmabuf_export_dma_buf(dinfo);
 
fd = dma_buf_fd(dmabuf, flags);
@@ -845,9 +844,11 @@ struct dma_buf* hyper_dmabuf_export_dma_buf(struct 
hyper_dmabuf_imported_sgt_inf
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
 
exp_info.ops = &hyper_dmabuf_ops;
-   exp_info.size = dinfo->sgt->nents * PAGE_SIZE; /* multiple of 
PAGE_SIZE, not considering offset */
+
+   /* multiple of PAGE_SIZE, not considering offset */
+   exp_info.size = dinfo->sgt->nents * PAGE_SIZE;
exp_info.flags = /* not sure about flag */0;
exp_info.priv = dinfo;
 
return dma_buf_export(&exp_info);
-};
+}
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
index 665cada..90e0c65 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
@@ -12,6 +12,7 @@
 #include "hyper_dmabuf_drv.h"
 #include "hyper_dmabuf_query.h"
 #include "xen/hyper_dmabuf_xen_comm.h"
+#include "xen/hyper_dmabuf_xen_comm_list.h"
 #include "hyper_dmabuf_msg.h"
 
 struct hyper_dmabuf_private {
@@ -31,6 +32,7 @@ static uint32_t hyper_dmabuf_id_gen(void) {
 static int hyper_dmabuf_exporter_ring_setup(void *data)
 {
struct ioctl_hyper_dmabuf_exporter_ring_setup *ring_attr;
+   struct hyper_dmabuf_ring_info_export *ring_info;
int ret = 0;
 
if (!data) {
@@ -39,6 +41,15 @@ static int hyper_dmabuf_exporter_ring_setup(void *data)
}
ring_attr = (struct ioctl_hyper_dmabuf_exporter_ring_setup *)data;
 
+   /* check if the ring ch already exists */
+   ring_info = hyper_dmabuf_find_exporter_ring(ring_attr->remote_domain);
+
+   if (ring_info) {
+   printk("(exporter's) ring ch to domid = %d already exist\ngref 
= %d, port = %d\n",
+   ring_info->rdomain, ring_info->gref_ring, 
ring_info->port);
+   return 0;
+   }
+
ret = hyper_dmabuf_exporter_ringbuf_init(ring_attr->remote_domain,
&ring_attr->ring_refid,
&ring_attr->port);
@@ -49,6 +60,7 @@ static int hyper_dmabuf_exporter_ring_setup(void *data)
 static int hyper_dmabuf_importer_ring_setup(void *data)
 {
struct ioctl_hyper_dmabuf_importer_ring_setup *setup_imp_ring_attr;
+   struct hyper_dmabuf_ring_info_import *ring_info;
int ret = 0;
 
if (!data) {
@@ -58,6 +70,14 @@ static int hyper_dmabuf_importer_ring_setup(void *data)
 
setup_imp_ring_attr = (struct ioctl_hyper_dmabuf_importer_ring_setup 
*)data;
 
+   /* check if the ring ch already exist */
+   ring_info = 
hyper_dmabuf_find_importer_ring(setup_imp_ring_attr->source_domain);
+
+   if (ring_info) {
+   printk("(importer's) ring ch to domid = %d already exist\n", 
ring_info->sdomain);
+   return 0;
+   }
+
/* user need to provide a port number and ref # for the page used as 
ring buffer */
ret = 
hyper_dmabuf_importer_ringbuf_init(setup_imp_ring_attr->source_domain,
 
setup_imp_ring_attr->ring_refid,
-- 
2.7.4

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[RFC PATCH 07/60] hyper_dmabuf: message parsing done via workqueue

2017-12-19 Thread Dongwon Kim
Use workqueue mechanism to delay message parsing done
after exiting from ISR to reduce ISR execution time.

Signed-off-by: Dongwon Kim 
---
 drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c|  13 ++
 drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h|   5 +
 drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c  |   4 +-
 drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c| 155 ++---
 .../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c   |  75 --
 .../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.h   |   7 -
 6 files changed, 152 insertions(+), 107 deletions(-)

diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c
index 0698327..70b4878 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c
@@ -1,5 +1,8 @@
 #include/* module_init, module_exit */
 #include  /* version info, MODULE_LICENSE, MODULE_AUTHOR, 
printk() */
+#include 
+#include 
+#include "hyper_dmabuf_drv.h"
 #include "hyper_dmabuf_conf.h"
 #include "hyper_dmabuf_list.h"
 #include "xen/hyper_dmabuf_xen_comm_list.h"
@@ -10,6 +13,8 @@ MODULE_AUTHOR("IOTG-PED, INTEL");
 int register_device(void);
 int unregister_device(void);
 
+struct hyper_dmabuf_private hyper_dmabuf_private;
+
 
/*===*/
 static int hyper_dmabuf_drv_init(void)
 {
@@ -24,6 +29,10 @@ static int hyper_dmabuf_drv_init(void)
 
printk( KERN_NOTICE "initializing database for imported/exported 
dmabufs\n");
 
+   /* device structure initialization */
+   /* currently only does work-queue initialization */
+   hyper_dmabuf_private.work_queue = 
create_workqueue("hyper_dmabuf_wqueue");
+
ret = hyper_dmabuf_table_init();
if (ret < 0) {
return -EINVAL;
@@ -45,6 +54,10 @@ static void hyper_dmabuf_drv_exit(void)
hyper_dmabuf_table_destroy();
hyper_dmabuf_ring_table_init();
 
+   /* destroy workqueue */
+   if (hyper_dmabuf_private.work_queue)
+   destroy_workqueue(hyper_dmabuf_private.work_queue);
+
printk( KERN_NOTICE "dma_buf-src_sink model: Exiting" );
unregister_device();
 }
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h
index 2dad9a6..6145d29 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h
@@ -1,6 +1,11 @@
 #ifndef __LINUX_PUBLIC_HYPER_DMABUF_DRV_H__
 #define __LINUX_PUBLIC_HYPER_DMABUF_DRV_H__
 
+struct hyper_dmabuf_private {
+struct device *device;
+   struct workqueue_struct *work_queue;
+};
+
 typedef int (*hyper_dmabuf_ioctl_t)(void *data);
 
 struct hyper_dmabuf_ioctl_desc {
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
index af94359..e4d8316 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
@@ -15,9 +15,7 @@
 #include "xen/hyper_dmabuf_xen_comm_list.h"
 #include "hyper_dmabuf_msg.h"
 
-struct hyper_dmabuf_private {
-   struct device *device;
-} hyper_dmabuf_private;
+extern struct hyper_dmabuf_private hyper_dmabuf_private;
 
 static uint32_t hyper_dmabuf_id_gen(void) {
/* TODO: add proper implementation */
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c
index 3237e50..0166e61 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c
@@ -3,12 +3,23 @@
 #include 
 #include 
 #include 
+#include 
+#include 
+#include "hyper_dmabuf_drv.h"
 #include "hyper_dmabuf_imp.h"
 //#include "hyper_dmabuf_remote_sync.h"
 #include "xen/hyper_dmabuf_xen_comm.h"
 #include "hyper_dmabuf_msg.h"
 #include "hyper_dmabuf_list.h"
 
+extern struct hyper_dmabuf_private hyper_dmabuf_private;
+
+struct cmd_process {
+   struct work_struct work;
+   struct hyper_dmabuf_ring_rq *rq;
+   int domid;
+};
+
 void hyper_dmabuf_create_request(struct hyper_dmabuf_ring_rq *request,
enum hyper_dmabuf_command command, int 
*operands)
 {
@@ -71,18 +82,17 @@ void hyper_dmabuf_create_request(struct 
hyper_dmabuf_ring_rq *request,
}
 }
 
-int hyper_dmabuf_msg_parse(int domid, struct hyper_dmabuf_ring_rq *req)
+void cmd_process_work(struct work_struct *work)
 {
-   uint32_t i, ret;
struct hyper_dmabuf_imported_sgt_info *imported_sgt_info;
-   struct hyper_dmabuf_sgt_info *sgt_info;
-
-   /* make sure req is not NULL (may not be needed) */
-   if (!req) {
-   return -EINVAL;
-   }
+struct hyper_dmabuf_sgt_info *sgt_info;
+   struct cmd_process *proc = container_of(work, str

[RFC PATCH 09/60] hyper_dmabuf: indirect DMA_BUF synchronization via shadowing

2017-12-19 Thread Dongwon Kim
Importer now sends a synchronization request to the
exporter when any of DMA_BUF operations on imported
Hyper_DMABUF is executed (e.g dma_buf_map and dma_buf_unmap).
This results in a creation of shadow DMA_BUF and exactly same
DMA_BUF operation to be executed on it.

The main purpose of this is to get DMA_BUF synchronized
eventually between the original creator of DMA_BUF and the
end consumer of it running on the importer VM.

Signed-off-by: Dongwon Kim 
---
 drivers/xen/hyper_dmabuf/Makefile  |   1 +
 drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c|  90 ++
 drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c  |  52 --
 drivers/xen/hyper_dmabuf/hyper_dmabuf_list.c   |   8 +-
 drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c|  43 +++--
 .../xen/hyper_dmabuf/hyper_dmabuf_remote_sync.c| 189 +
 .../xen/hyper_dmabuf/hyper_dmabuf_remote_sync.h|   6 +
 drivers/xen/hyper_dmabuf/hyper_dmabuf_struct.h |  32 +++-
 .../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c   |  52 +-
 .../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.h   |   2 +-
 10 files changed, 397 insertions(+), 78 deletions(-)
 create mode 100644 drivers/xen/hyper_dmabuf/hyper_dmabuf_remote_sync.c
 create mode 100644 drivers/xen/hyper_dmabuf/hyper_dmabuf_remote_sync.h

diff --git a/drivers/xen/hyper_dmabuf/Makefile 
b/drivers/xen/hyper_dmabuf/Makefile
index 0be7445..3459382 100644
--- a/drivers/xen/hyper_dmabuf/Makefile
+++ b/drivers/xen/hyper_dmabuf/Makefile
@@ -7,6 +7,7 @@ ifneq ($(KERNELRELEASE),)
  hyper_dmabuf_list.o \
 hyper_dmabuf_imp.o \
 hyper_dmabuf_msg.o \
+hyper_dmabuf_remote_sync.o \
 xen/hyper_dmabuf_xen_comm.o \
 xen/hyper_dmabuf_xen_comm_list.o
 
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c
index 6b16e37..2c78bc1 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c
@@ -169,7 +169,8 @@ grant_ref_t 
hyper_dmabuf_create_addressing_tables(grant_ref_t *data_refs, int ne
/*
 * Calculate number of pages needed for 2nd level addresing:
 */
-   int n_2nd_level_pages = (nents/REFS_PER_PAGE + ((nents % REFS_PER_PAGE) 
? 1: 0));/* rounding */
+   int n_2nd_level_pages = (nents/REFS_PER_PAGE +
+   ((nents % REFS_PER_PAGE) ? 1: 0));
int i;
unsigned long gref_page_start;
grant_ref_t *tmp_page;
@@ -187,7 +188,9 @@ grant_ref_t 
hyper_dmabuf_create_addressing_tables(grant_ref_t *data_refs, int ne
 
/* Share 2nd level addressing pages in readonly mode*/
for (i=0; i< n_2nd_level_pages; i++) {
-   addr_refs[i] = gnttab_grant_foreign_access(rdomain, 
virt_to_mfn((unsigned long)tmp_page+i*PAGE_SIZE ), 1);
+   addr_refs[i] = gnttab_grant_foreign_access(rdomain,
+  
virt_to_mfn((unsigned long)tmp_page+i*PAGE_SIZE ),
+  1);
}
 
/*
@@ -213,7 +216,9 @@ grant_ref_t 
hyper_dmabuf_create_addressing_tables(grant_ref_t *data_refs, int ne
}
 
/* Share top level addressing page in readonly mode*/
-   top_level_ref = gnttab_grant_foreign_access(rdomain, 
virt_to_mfn((unsigned long)tmp_page), 1);
+   top_level_ref = gnttab_grant_foreign_access(rdomain,
+   virt_to_mfn((unsigned 
long)tmp_page),
+   1);
 
kfree(addr_refs);
 
@@ -255,7 +260,9 @@ struct page** hyper_dmabuf_get_data_refs(grant_ref_t 
top_level_ref, int domid, i
}
 
addr = (unsigned long)pfn_to_kaddr(page_to_pfn(top_level_page));
-   gnttab_set_map_op(&top_level_map_ops, addr, GNTMAP_host_map | 
GNTMAP_readonly, top_level_ref, domid);
+   gnttab_set_map_op(&top_level_map_ops, addr, GNTMAP_host_map | 
GNTMAP_readonly,
+ top_level_ref, domid);
+
gnttab_set_unmap_op(&top_level_unmap_ops, addr, GNTMAP_host_map | 
GNTMAP_readonly, -1);
 
if (gnttab_map_refs(&top_level_map_ops, NULL, &top_level_page, 1)) {
@@ -282,7 +289,8 @@ struct page** hyper_dmabuf_get_data_refs(grant_ref_t 
top_level_ref, int domid, i
 
for (i = 0; i < n_level2_refs; i++) {
addr = (unsigned 
long)pfn_to_kaddr(page_to_pfn(level2_pages[i]));
-   gnttab_set_map_op(&map_ops[i], addr, GNTMAP_host_map | 
GNTMAP_readonly, top_level_refs[i], domid);
+   gnttab_set_map_op(&map_ops[i], addr, GNTMAP_host_map | 
GNTMAP_readonly,
+ top_level_refs[i], domid);
gnttab_set_unmap_op(&unmap_ops[i], addr, GNTMAP_host_map | 
GNTMA

[RFC PATCH 08/60] hyper_dmabuf: automatic comm channel initialization using xenstore

2017-12-19 Thread Dongwon Kim
From: Mateusz Polrola 

This introduces use of xenstore for creating and managing
communication channels between two VMs in the system.

When hyper_dmabuf driver is loaded in the service VM (host OS),
a new xenstore directory, "/local/domain//data/hyper_dmabuf"
is created in xenstore filesystem. Whenever a new guest OS
creates and initailizes its own upstream channel the service VM,
new directory is created under the main directory created above
as shown here:

/local/domain//data/hyper_dmabuf//port
/local/domain//data/hyper_dmabuf//gref

This patch also adds a "xenstore watch" callback is called
when a new upstream connection is made from another VM (VM-b).
Upon detection, this VM (VM-a) intializes a downstream channel
,paired with detected upstream connection as shown below.

VM-a (downstream) <- (upstream) VM-a

And as soon as this downstream channel is created, a new upstream
channel from VM-a to VM-b is automatically created and initialized
via "xenstore watch" call back on VM-b.

VM-a (upstream) <- (downstream) VM-b

As a result, there will be bi-directional communication channel
available between two VMs.

When upstream channel is removed (e.g. unloading driver), VM on the
other side is notified and "xenstore watch" callback is invoked.
Via this callback, VM can remove corresponding downstream channel.

Signed-off-by: Dongwon Kim 
---
 drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c|  11 +-
 drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h|  14 --
 drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c  |  30 +--
 drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c|  31 +--
 drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.h|   2 -
 .../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c   | 226 +++--
 .../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.h   |  18 +-
 .../hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.c  |  22 ++
 .../hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.h  |   6 +
 9 files changed, 270 insertions(+), 90 deletions(-)

diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c
index 70b4878..5b5dae44 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c
@@ -6,6 +6,7 @@
 #include "hyper_dmabuf_conf.h"
 #include "hyper_dmabuf_list.h"
 #include "xen/hyper_dmabuf_xen_comm_list.h"
+#include "xen/hyper_dmabuf_xen_comm.h"
 
 MODULE_LICENSE("Dual BSD/GPL");
 MODULE_AUTHOR("IOTG-PED, INTEL");
@@ -43,6 +44,11 @@ static int hyper_dmabuf_drv_init(void)
return -EINVAL;
}
 
+   ret = hyper_dmabuf_setup_data_dir();
+   if (ret < 0) {
+   return -EINVAL;
+   }
+
/* interrupt for comm should be registered here: */
return ret;
 }
@@ -52,12 +58,15 @@ static void hyper_dmabuf_drv_exit(void)
 {
/* hash tables for export/import entries and ring_infos */
hyper_dmabuf_table_destroy();
-   hyper_dmabuf_ring_table_init();
+
+   hyper_dmabuf_cleanup_ringbufs();
+   hyper_dmabuf_ring_table_destroy();
 
/* destroy workqueue */
if (hyper_dmabuf_private.work_queue)
destroy_workqueue(hyper_dmabuf_private.work_queue);
 
+   hyper_dmabuf_destroy_data_dir();
printk( KERN_NOTICE "dma_buf-src_sink model: Exiting" );
unregister_device();
 }
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h
index 6145d29..7511afb 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h
@@ -29,8 +29,6 @@ struct ioctl_hyper_dmabuf_exporter_ring_setup {
/* IN parameters */
/* Remote domain id */
uint32_t remote_domain;
-   grant_ref_t ring_refid; /* assigned by driver, copied to userspace 
after initialization */
-   uint32_t port; /* assigned by driver, copied to userspace after 
initialization */
 };
 
 #define IOCTL_HYPER_DMABUF_IMPORTER_RING_SETUP \
@@ -39,10 +37,6 @@ struct ioctl_hyper_dmabuf_importer_ring_setup {
/* IN parameters */
/* Source domain id */
uint32_t source_domain;
-   /* Ring shared page refid */
-   grant_ref_t ring_refid;
-   /* Port number */
-   uint32_t port;
 };
 
 #define IOCTL_HYPER_DMABUF_EXPORT_REMOTE \
@@ -95,12 +89,4 @@ struct ioctl_hyper_dmabuf_query {
uint32_t info;
 };
 
-#define IOCTL_HYPER_DMABUF_REMOTE_EXPORTER_RING_SETUP \
-_IOC(_IOC_NONE, 'G', 6, sizeof(struct 
ioctl_hyper_dmabuf_remote_exporter_ring_setup))
-struct ioctl_hyper_dmabuf_remote_exporter_ring_setup {
-   /* in parameters */
-   uint32_t rdomain; /* id of remote domain where exporter's ring need to 
be setup */
-   uint32_t info;
-};
-
 #endif //__LINUX_PUBLIC_HYPER_DMABUF_DRV_H__
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c 
b/drivers/xen

[RFC PATCH 06/60] hyper_dmabuf: map shared pages only once when importing.

2017-12-19 Thread Dongwon Kim
From: Mateusz Polrola 

If shared pages of buffer were already mapped on importer side, do
not map them again on next request to export fd.

Signed-off-by: Mateusz Polrola 
Signed-off-by: Dongwon Kim 
---
 drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c | 20 +++-
 1 file changed, 11 insertions(+), 9 deletions(-)

diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
index 90e0c65..af94359 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
@@ -203,7 +203,7 @@ static int hyper_dmabuf_export_fd_ioctl(void *data)
 
if (!data) {
printk("user data is NULL\n");
-   return -1;
+   return -EINVAL;
}
 
export_fd_attr = (struct ioctl_hyper_dmabuf_export_fd *)data;
@@ -218,15 +218,17 @@ static int hyper_dmabuf_export_fd_ioctl(void *data)
imported_sgt_info->last_len, imported_sgt_info->nents,

HYPER_DMABUF_ID_IMPORTER_GET_SDOMAIN_ID(imported_sgt_info->hyper_dmabuf_id));
 
-   imported_sgt_info->sgt = hyper_dmabuf_map_pages(imported_sgt_info->gref,
-   imported_sgt_info->frst_ofst,
-   imported_sgt_info->last_len,
-   imported_sgt_info->nents,
-   
HYPER_DMABUF_ID_IMPORTER_GET_SDOMAIN_ID(imported_sgt_info->hyper_dmabuf_id),
-   
&imported_sgt_info->shared_pages_info);
-
if (!imported_sgt_info->sgt) {
-   return -1;
+   imported_sgt_info->sgt = 
hyper_dmabuf_map_pages(imported_sgt_info->gref,
+   
imported_sgt_info->frst_ofst,
+   
imported_sgt_info->last_len,
+   
imported_sgt_info->nents,
+   
HYPER_DMABUF_ID_IMPORTER_GET_SDOMAIN_ID(imported_sgt_info->hyper_dmabuf_id),
+   
&imported_sgt_info->shared_pages_info);
+   if (!imported_sgt_info->sgt) {
+   printk("Failed to create sgt\n");
+   return -EINVAL;
+   }
}
 
export_fd_attr->fd = hyper_dmabuf_export_fd(imported_sgt_info, 
export_fd_attr->flags);
-- 
2.7.4

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[RFC PATCH 14/60] hyper_dmabuf: clean-up process based on file->f_count

2017-12-19 Thread Dongwon Kim
Now relaese funcs checks f_count for the file instead of
our own refcount because it can't track dma_buf_get.

Also, importer now sends out HYPER_DMABUF_FIRST_EXPORT
to let the exporter know corresponding dma-buf has ever
exported on importer's side. This is to cover the case
where exporter exports a buffer and unexport it right
away before importer does first export_fd (there won't
be any dma_buf_release nofication to exporter since SGT
was never created by importer.)

After importer creates its own SGT, only condition it is
completely released is that dma_buf is unexported
(so valid == 0) and user app closes all locally
assigned FDs (when dma_buf_release is called.)
Otherwise, it needs to stay there since previously exported
FD can be reused.

Also includes minor changes;

1. flag had been changed to "bool valid" for conciseness.
2. added bool importer_exported in sgt_info as an indicator
   for usage of buffer on the importer.
3. num of pages is added (nents) to hyper_dmabuf_sgt_info
   to keep the size info in EXPORT list.
3. more minor changes and clean-ups.

Signed-off-by: Dongwon Kim 
Signed-off-by: Mateusz Polrola 
---
 drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c|  1 +
 drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h|  1 +
 drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c| 76 -
 drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.h|  5 +-
 drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c  | 78 --
 drivers/xen/hyper_dmabuf/hyper_dmabuf_list.c   |  2 +-
 drivers/xen/hyper_dmabuf/hyper_dmabuf_list.h   |  2 +-
 drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c| 34 --
 drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.h|  2 +
 .../xen/hyper_dmabuf/hyper_dmabuf_remote_sync.c| 10 ++-
 drivers/xen/hyper_dmabuf/hyper_dmabuf_struct.h | 19 +++---
 .../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c   |  6 +-
 12 files changed, 143 insertions(+), 93 deletions(-)

diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c
index 5b5dae44..5a7cfa5 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c
@@ -33,6 +33,7 @@ static int hyper_dmabuf_drv_init(void)
/* device structure initialization */
/* currently only does work-queue initialization */
hyper_dmabuf_private.work_queue = 
create_workqueue("hyper_dmabuf_wqueue");
+   hyper_dmabuf_private.domid = hyper_dmabuf_get_domid();
 
ret = hyper_dmabuf_table_init();
if (ret < 0) {
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h
index 8778a19..ff883e1 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h
@@ -3,6 +3,7 @@
 
 struct hyper_dmabuf_private {
 struct device *device;
+   int domid;
struct workqueue_struct *work_queue;
 };
 
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c
index f258981..fa445e5 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c
@@ -13,6 +13,14 @@
 
 #define REFS_PER_PAGE (PAGE_SIZE/sizeof(grant_ref_t))
 
+int dmabuf_refcount(struct dma_buf *dma_buf)
+{
+   if ((dma_buf != NULL) && (dma_buf->file != NULL))
+   return file_count(dma_buf->file);
+
+   return -1;
+}
+
 /* return total number of pages referecned by a sgt
  * for pre-calculation of # of pages behind a given sgt
  */
@@ -368,8 +376,8 @@ int hyper_dmabuf_cleanup_gref_table(struct 
hyper_dmabuf_sgt_info *sgt_info) {
struct hyper_dmabuf_shared_pages_info *shared_pages_info = 
&sgt_info->shared_pages_info;
 
grant_ref_t *ref = shared_pages_info->top_level_page;
-   int n_2nd_level_pages = 
(sgt_info->active_sgts->sgt->nents/REFS_PER_PAGE +
-   ((sgt_info->active_sgts->sgt->nents % 
REFS_PER_PAGE) ? 1: 0));
+   int n_2nd_level_pages = (sgt_info->nents/REFS_PER_PAGE +
+   ((sgt_info->nents % REFS_PER_PAGE) ? 1: 0));
 
 
if (shared_pages_info->data_refs == NULL ||
@@ -388,26 +396,28 @@ int hyper_dmabuf_cleanup_gref_table(struct 
hyper_dmabuf_sgt_info *sgt_info) {
if (!gnttab_end_foreign_access_ref(ref[i], 1)) {
printk("refid still in use!!!\n");
}
+   gnttab_free_grant_reference(ref[i]);
i++;
}
free_pages((unsigned long)shared_pages_info->addr_pages, i);
 
+
/* End foreign access for top level addressing page */
if (gnttab_query_foreign_access(shared_pages_info->top_level_ref)) {
printk("refid not shared !!\n");
}
-   if (!gnttab_end_foreign_access_ref(shared_pages_info->to

[RFC PATCH 12/60] hyper_dmabuf: two different unexporting mechanisms

2017-12-19 Thread Dongwon Kim
unexporting on exporter's side now have two options, one is
, that just remove and free everything to literally "disconnect"
from importer, the other is just to return fail if any apps
running on importer is still attached or DMAing. Currently whether
forcing or unforcing it is determined by how "FORCED_UNEXPORING"
is defined.

Also, the word "destroy" in IOCTL commands and several functions
have been modified to "unexport", which sounds more reasonable.

Signed-off-by: Dongwon Kim 
---
 drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h   |  8 +--
 drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c   | 94 ++-
 drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.h   |  4 ++
 drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c | 20 +++---
 drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c   | 62 +-
 drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.h   |  4 +-
 6 files changed, 142 insertions(+), 50 deletions(-)

diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h
index 7511afb..8778a19 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h
@@ -65,11 +65,11 @@ struct ioctl_hyper_dmabuf_export_fd {
uint32_t fd;
 };
 
-#define IOCTL_HYPER_DMABUF_DESTROY \
-_IOC(_IOC_NONE, 'G', 4, sizeof(struct ioctl_hyper_dmabuf_destroy))
-struct ioctl_hyper_dmabuf_destroy {
+#define IOCTL_HYPER_DMABUF_UNEXPORT \
+_IOC(_IOC_NONE, 'G', 4, sizeof(struct ioctl_hyper_dmabuf_unexport))
+struct ioctl_hyper_dmabuf_unexport {
/* IN parameters */
-   /* hyper dmabuf id to be destroyed */
+   /* hyper dmabuf id to be unexported */
uint32_t hyper_dmabuf_id;
/* OUT parameters */
/* Status of request */
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c
index 2c78bc1..06bd8e5 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c
@@ -104,7 +104,7 @@ struct sg_table* hyper_dmabuf_create_sgt(struct page 
**pages,
 
ret = sg_alloc_table(sgt, nents, GFP_KERNEL);
if (ret) {
-   kfree(sgt);
+   sg_free_table(sgt);
return NULL;
}
 
@@ -125,6 +125,12 @@ struct sg_table* hyper_dmabuf_create_sgt(struct page 
**pages,
return sgt;
 }
 
+/* free sg_table */
+void hyper_dmabuf_free_sgt(struct sg_table* sgt)
+{
+   sg_free_table(sgt);
+}
+
 /*
  * Creates 2 level page directory structure for referencing shared pages.
  * Top level page is a single page that contains up to 1024 refids that
@@ -512,6 +518,92 @@ struct sg_table* hyper_dmabuf_map_pages(grant_ref_t 
top_level_gref, int frst_ofs
return st;
 }
 
+int hyper_dmabuf_cleanup_sgt_info(struct hyper_dmabuf_sgt_info *sgt_info, int 
force)
+{
+   struct sgt_list *sgtl;
+   struct attachment_list *attachl;
+   struct kmap_vaddr_list *va_kmapl;
+   struct vmap_vaddr_list *va_vmapl;
+
+   if (!sgt_info) {
+   printk("invalid hyper_dmabuf_id\n");
+   return -EINVAL;
+   }
+
+   /* if force != 1, sgt_info can be released only if
+* there's no activity on exported dma-buf on importer
+* side.
+*/
+   if (!force &&
+   (!list_empty(&sgt_info->va_kmapped->list) ||
+   !list_empty(&sgt_info->va_vmapped->list) ||
+   !list_empty(&sgt_info->active_sgts->list) ||
+   !list_empty(&sgt_info->active_attached->list))) {
+   printk("dma-buf is used by importer\n");
+   return -EPERM;
+   }
+
+   while (!list_empty(&sgt_info->va_kmapped->list)) {
+   va_kmapl = list_first_entry(&sgt_info->va_kmapped->list,
+   struct kmap_vaddr_list, list);
+
+   dma_buf_kunmap(sgt_info->dma_buf, 1, va_kmapl->vaddr);
+   list_del(&va_kmapl->list);
+   kfree(va_kmapl);
+   }
+
+   while (!list_empty(&sgt_info->va_vmapped->list)) {
+   va_vmapl = list_first_entry(&sgt_info->va_vmapped->list,
+   struct vmap_vaddr_list, list);
+
+   dma_buf_vunmap(sgt_info->dma_buf, va_vmapl->vaddr);
+   list_del(&va_vmapl->list);
+   kfree(va_vmapl);
+   }
+
+   while (!list_empty(&sgt_info->active_sgts->list)) {
+   attachl = list_first_entry(&sgt_info->active_attached->list,
+  struct attachment_list, list);
+
+   sgtl = list_first_entry(&sgt_info->active_sgts->list,
+   struct sgt_list, list);
+
+   dma_buf_unmap_attachment(attachl->attach

[RFC PATCH 17/60] hyper_dmabuf: use dynamic debug macros for logging

2017-12-19 Thread Dongwon Kim
From: Mateusz Polrola 

Replaces printk to debug macros

Signed-off-by: Dongwon Kim 
---
 drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c|  4 +-
 drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h|  4 ++
 drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c| 46 +-
 drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c  | 50 +--
 drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c| 26 +---
 .../xen/hyper_dmabuf/hyper_dmabuf_remote_sync.c| 60 --
 .../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c   | 73 +++---
 .../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.c| 50 +--
 8 files changed, 206 insertions(+), 107 deletions(-)

diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c
index ddcc955..9d99769 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c
@@ -1,6 +1,7 @@
 #include 
 #include 
 #include 
+#include 
 #include "hyper_dmabuf_conf.h"
 #include "hyper_dmabuf_msg.h"
 #include "hyper_dmabuf_drv.h"
@@ -36,7 +37,8 @@ static int hyper_dmabuf_drv_init(void)
hyper_dmabuf_private.backend_ops = &xen_backend_ops;
 #endif
 
-   printk( KERN_NOTICE "initializing database for imported/exported 
dmabufs\n");
+   dev_info(hyper_dmabuf_private.device,
+"initializing database for imported/exported dmabufs\n");
 
/* device structure initialization */
/* currently only does work-queue initialization */
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h
index 03d77d7..c16e8d4 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h
@@ -1,6 +1,10 @@
 #ifndef __LINUX_PUBLIC_HYPER_DMABUF_DRV_H__
 #define __LINUX_PUBLIC_HYPER_DMABUF_DRV_H__
 
+#include 
+
+struct hyper_dmabuf_req;
+
 struct list_reusable_id {
int id;
struct list_head list;
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c
index 0f104b9..b61d29a 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c
@@ -155,7 +155,7 @@ int hyper_dmabuf_cleanup_sgt_info(struct 
hyper_dmabuf_sgt_info *sgt_info, int fo
struct hyper_dmabuf_backend_ops *ops = hyper_dmabuf_private.backend_ops;
 
if (!sgt_info) {
-   printk("invalid hyper_dmabuf_id\n");
+   dev_err(hyper_dmabuf_private.device, "invalid 
hyper_dmabuf_id\n");
return -EINVAL;
}
 
@@ -168,7 +168,7 @@ int hyper_dmabuf_cleanup_sgt_info(struct 
hyper_dmabuf_sgt_info *sgt_info, int fo
!list_empty(&sgt_info->va_vmapped->list) ||
!list_empty(&sgt_info->active_sgts->list) ||
!list_empty(&sgt_info->active_attached->list))) {
-   printk("dma-buf is used by importer\n");
+   dev_warn(hyper_dmabuf_private.device, "dma-buf is used by 
importer\n");
return -EPERM;
}
 
@@ -273,7 +273,8 @@ static int hyper_dmabuf_ops_attach(struct dma_buf* dmabuf, 
struct device* dev,
 HYPER_DMABUF_OPS_ATTACH);
 
if (ret < 0) {
-   printk("hyper_dmabuf::%s Error:send dmabuf sync request 
failed\n", __func__);
+   dev_err(hyper_dmabuf_private.device,
+   "hyper_dmabuf::%s Error:send dmabuf sync request 
failed\n", __func__);
return ret;
}
 
@@ -294,7 +295,8 @@ static void hyper_dmabuf_ops_detach(struct dma_buf* dmabuf, 
struct dma_buf_attac
 HYPER_DMABUF_OPS_DETACH);
 
if (ret < 0) {
-   printk("hyper_dmabuf::%s Error:send dmabuf sync request 
failed\n", __func__);
+   dev_err(hyper_dmabuf_private.device,
+   "hyper_dmabuf::%s Error:send dmabuf sync request 
failed\n", __func__);
}
 }
 
@@ -331,7 +333,8 @@ static struct sg_table* hyper_dmabuf_ops_map(struct 
dma_buf_attachment *attachme
kfree(page_info);
 
if (ret < 0) {
-   printk("hyper_dmabuf::%s Error:send dmabuf sync request 
failed\n", __func__);
+   dev_err(hyper_dmabuf_private.device,
+   "hyper_dmabuf::%s Error:send dmabuf sync request 
failed\n", __func__);
}
 
return st;
@@ -363,7 +366,8 @@ static void hyper_dmabuf_ops_unmap(struct 
dma_buf_attachment *attachment,
HYPER_DMABUF_OPS_UNMAP);
 
if (ret < 0) {
-   printk("hyper_dmabuf::%s Error:send dmabuf sync request 
failed\n", __func__);
+   dev_err(hyper_dmabuf_pr

[RFC PATCH 10/60] hyper_dmabuf: make sure to free memory to prevent leak

2017-12-19 Thread Dongwon Kim
From: Mateusz Polrola 

In hyper_dmabuf_export_remote, page_info->pages needs to
be freed before freeing page_info.

Also, info_entry in hyper_dmabuf_remove_exported/imported
and hyper_dmabuf_remove_exporter/importer_ring needs to
be freed after removal of an entry.

Signed-off-by: Dongwon Kim 
---
 drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c | 1 +
 drivers/xen/hyper_dmabuf/hyper_dmabuf_list.c  | 2 ++
 drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c  | 2 ++
 drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.c | 2 ++
 4 files changed, 7 insertions(+)

diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
index bace8b2..6f100ef 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
@@ -191,6 +191,7 @@ static int hyper_dmabuf_export_remote(void *data)
/* free msg */
kfree(req);
/* free page_info */
+   kfree(page_info->pages);
kfree(page_info);
 
return ret;
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_list.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_list.c
index 2b3ef6b..1420df9 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_list.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_list.c
@@ -98,6 +98,7 @@ int hyper_dmabuf_remove_exported(int id)
hash_for_each(hyper_dmabuf_hash_exported, bkt, info_entry, node)
if(info_entry->info->hyper_dmabuf_id == id) {
hash_del(&info_entry->node);
+   kfree(info_entry);
return 0;
}
 
@@ -112,6 +113,7 @@ int hyper_dmabuf_remove_imported(int id)
hash_for_each(hyper_dmabuf_hash_imported, bkt, info_entry, node)
if(info_entry->info->hyper_dmabuf_id == id) {
hash_del(&info_entry->node);
+   kfree(info_entry);
return 0;
}
 
diff --git a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c 
b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c
index 576085f..116850e 100644
--- a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c
+++ b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c
@@ -320,6 +320,8 @@ int hyper_dmabuf_importer_ringbuf_init(int sdomain)
ring_info->unmap_op.handle = ops[0].handle;
}
 
+   kfree(ops);
+
sring = (struct hyper_dmabuf_sring*) 
pfn_to_kaddr(page_to_pfn(shared_ring));
 
BACK_RING_INIT(&ring_info->ring_back, sring, PAGE_SIZE);
diff --git a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.c 
b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.c
index 5778468..a068276 100644
--- a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.c
+++ b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.c
@@ -85,6 +85,7 @@ int hyper_dmabuf_remove_exporter_ring(int domid)
hash_for_each(hyper_dmabuf_hash_exporter_ring, bkt, info_entry, node)
if(info_entry->info->rdomain == domid) {
hash_del(&info_entry->node);
+   kfree(info_entry);
return 0;
}
 
@@ -99,6 +100,7 @@ int hyper_dmabuf_remove_importer_ring(int domid)
hash_for_each(hyper_dmabuf_hash_importer_ring, bkt, info_entry, node)
if(info_entry->info->sdomain == domid) {
hash_del(&info_entry->node);
+   kfree(info_entry);
return 0;
}
 
-- 
2.7.4

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[RFC PATCH 13/60] hyper_dmabuf: postponing cleanup of hyper_DMABUF

2017-12-19 Thread Dongwon Kim
From: Mateusz Polrola 

Immediate clean up of buffer is not possible if the buffer is
actively used by importer. In this case, we need to postpone
freeing hyper_DMABUF until the last consumer unmaps and releases
the buffer on impoter VM. New reference count is added for tracking
usage by importers.

Signed-off-by: Dongwon Kim 
---
 drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c| 37 ++--
 drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c  | 34 +++
 drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c| 49 +++---
 drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.h|  1 -
 .../xen/hyper_dmabuf/hyper_dmabuf_remote_sync.c| 14 +--
 drivers/xen/hyper_dmabuf/hyper_dmabuf_struct.h |  9 +++-
 6 files changed, 95 insertions(+), 49 deletions(-)

diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c
index 06bd8e5..f258981 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c
@@ -9,6 +9,7 @@
 #include "hyper_dmabuf_imp.h"
 #include "xen/hyper_dmabuf_xen_comm.h"
 #include "hyper_dmabuf_msg.h"
+#include "hyper_dmabuf_list.h"
 
 #define REFS_PER_PAGE (PAGE_SIZE/sizeof(grant_ref_t))
 
@@ -104,7 +105,7 @@ struct sg_table* hyper_dmabuf_create_sgt(struct page 
**pages,
 
ret = sg_alloc_table(sgt, nents, GFP_KERNEL);
if (ret) {
-   sg_free_table(sgt);
+   hyper_dmabuf_free_sgt(sgt);
return NULL;
}
 
@@ -129,6 +130,7 @@ struct sg_table* hyper_dmabuf_create_sgt(struct page 
**pages,
 void hyper_dmabuf_free_sgt(struct sg_table* sgt)
 {
sg_free_table(sgt);
+   kfree(sgt);
 }
 
 /*
@@ -583,6 +585,9 @@ int hyper_dmabuf_cleanup_sgt_info(struct 
hyper_dmabuf_sgt_info *sgt_info, int fo
kfree(attachl);
}
 
+   /* Start cleanup of buffer in reverse order to exporting */
+   hyper_dmabuf_cleanup_gref_table(sgt_info);
+
/* unmap dma-buf */
dma_buf_unmap_attachment(sgt_info->active_attached->attach,
 sgt_info->active_sgts->sgt,
@@ -594,8 +599,6 @@ int hyper_dmabuf_cleanup_sgt_info(struct 
hyper_dmabuf_sgt_info *sgt_info, int fo
/* close connection to dma-buf completely */
dma_buf_put(sgt_info->dma_buf);
 
-   hyper_dmabuf_cleanup_gref_table(sgt_info);
-
kfree(sgt_info->active_sgts);
kfree(sgt_info->active_attached);
kfree(sgt_info->va_kmapped);
@@ -694,6 +697,9 @@ static struct sg_table* hyper_dmabuf_ops_map(struct 
dma_buf_attachment *attachme
ret = hyper_dmabuf_sync_request_and_wait(sgt_info->hyper_dmabuf_id,
HYPER_DMABUF_OPS_MAP);
 
+   kfree(page_info->pages);
+   kfree(page_info);
+
if (ret < 0) {
printk("hyper_dmabuf::%s Error:send dmabuf sync request 
failed\n", __func__);
}
@@ -741,12 +747,34 @@ static void hyper_dmabuf_ops_release(struct dma_buf 
*dmabuf)
 
sgt_info = (struct hyper_dmabuf_imported_sgt_info *)dmabuf->priv;
 
+   if (sgt_info) {
+   /* dmabuf fd is being released - decrease refcount */
+   sgt_info->ref_count--;
+
+   /* if no one else in that domain is using that buffer, unmap it 
for now */
+   if (sgt_info->ref_count == 0) {
+   hyper_dmabuf_cleanup_imported_pages(sgt_info);
+   hyper_dmabuf_free_sgt(sgt_info->sgt);
+   sgt_info->sgt = NULL;
+   }
+   }
+
ret = hyper_dmabuf_sync_request_and_wait(sgt_info->hyper_dmabuf_id,
HYPER_DMABUF_OPS_RELEASE);
 
if (ret < 0) {
printk("hyper_dmabuf::%s Error:send dmabuf sync request 
failed\n", __func__);
}
+
+   /*
+* Check if buffer is still valid and if not remove it from imported 
list.
+* That has to be done after sending sync request
+*/
+   if (sgt_info && sgt_info->ref_count == 0 &&
+   sgt_info->flags == HYPER_DMABUF_SGT_INVALID) {
+   hyper_dmabuf_remove_imported(sgt_info->hyper_dmabuf_id);
+   kfree(sgt_info);
+   }
 }
 
 static int hyper_dmabuf_ops_begin_cpu_access(struct dma_buf *dmabuf, enum 
dma_data_direction dir)
@@ -944,6 +972,9 @@ int hyper_dmabuf_export_fd(struct 
hyper_dmabuf_imported_sgt_info *dinfo, int fla
 
fd = dma_buf_fd(dmabuf, flags);
 
+   /* dmabuf fd is exported for given bufer - increase its ref count */
+   dinfo->ref_count++;
+
return fd;
 }
 
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
index a222c1b..c57acafe 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
+++ b/drivers

[RFC PATCH 15/60] hyper_dmabuf: reusing previously released hyper_dmabuf_id

2017-12-19 Thread Dongwon Kim
Now, released hyper_dmabuf_ids are stored in a stack -
(hyper_dmabuf_private.id_queue) for reuse. This is to prevent
overflow of ids for buffers. We also limit maximum number for
the id to 1000 for the stability and optimal performance.

Signed-off-by: Dongwon Kim 
---
 drivers/xen/hyper_dmabuf/Makefile  |  1 +
 drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c|  5 ++
 drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h|  6 ++
 drivers/xen/hyper_dmabuf/hyper_dmabuf_id.c | 76 ++
 drivers/xen/hyper_dmabuf/hyper_dmabuf_id.h | 24 +++
 drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c|  1 +
 drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c  | 15 ++---
 .../xen/hyper_dmabuf/hyper_dmabuf_remote_sync.c|  3 +
 drivers/xen/hyper_dmabuf/hyper_dmabuf_struct.h |  9 ---
 9 files changed, 120 insertions(+), 20 deletions(-)
 create mode 100644 drivers/xen/hyper_dmabuf/hyper_dmabuf_id.c
 create mode 100644 drivers/xen/hyper_dmabuf/hyper_dmabuf_id.h

diff --git a/drivers/xen/hyper_dmabuf/Makefile 
b/drivers/xen/hyper_dmabuf/Makefile
index 3459382..c9b8b7f 100644
--- a/drivers/xen/hyper_dmabuf/Makefile
+++ b/drivers/xen/hyper_dmabuf/Makefile
@@ -7,6 +7,7 @@ ifneq ($(KERNELRELEASE),)
  hyper_dmabuf_list.o \
 hyper_dmabuf_imp.o \
 hyper_dmabuf_msg.o \
+hyper_dmabuf_id.o \
 hyper_dmabuf_remote_sync.o \
 xen/hyper_dmabuf_xen_comm.o \
 xen/hyper_dmabuf_xen_comm_list.o
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c
index 5a7cfa5..66d6cb9 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c
@@ -5,6 +5,7 @@
 #include "hyper_dmabuf_drv.h"
 #include "hyper_dmabuf_conf.h"
 #include "hyper_dmabuf_list.h"
+#include "hyper_dmabuf_id.h"
 #include "xen/hyper_dmabuf_xen_comm_list.h"
 #include "xen/hyper_dmabuf_xen_comm.h"
 
@@ -67,6 +68,10 @@ static void hyper_dmabuf_drv_exit(void)
if (hyper_dmabuf_private.work_queue)
destroy_workqueue(hyper_dmabuf_private.work_queue);
 
+   /* destroy id_queue */
+   if (hyper_dmabuf_private.id_queue)
+   destroy_reusable_list();
+
hyper_dmabuf_destroy_data_dir();
printk( KERN_NOTICE "dma_buf-src_sink model: Exiting" );
unregister_device();
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h
index ff883e1..37b0cc1 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h
@@ -1,10 +1,16 @@
 #ifndef __LINUX_PUBLIC_HYPER_DMABUF_DRV_H__
 #define __LINUX_PUBLIC_HYPER_DMABUF_DRV_H__
 
+struct list_reusable_id {
+   int id;
+   struct list_head list;
+};
+
 struct hyper_dmabuf_private {
 struct device *device;
int domid;
struct workqueue_struct *work_queue;
+   struct list_reusable_id *id_queue;
 };
 
 typedef int (*hyper_dmabuf_ioctl_t)(void *data);
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_id.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_id.c
new file mode 100644
index 000..7bbb179
--- /dev/null
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_id.c
@@ -0,0 +1,76 @@
+#include 
+#include 
+#include "hyper_dmabuf_drv.h"
+#include "hyper_dmabuf_id.h"
+
+extern struct hyper_dmabuf_private hyper_dmabuf_private;
+
+void store_reusable_id(int id)
+{
+   struct list_reusable_id *reusable_head = hyper_dmabuf_private.id_queue;
+   struct list_reusable_id *new_reusable;
+
+   new_reusable = kmalloc(sizeof(*new_reusable), GFP_KERNEL);
+   new_reusable->id = id;
+
+   list_add(&new_reusable->list, &reusable_head->list);
+}
+
+static int retrieve_reusable_id(void)
+{
+   struct list_reusable_id *reusable_head = hyper_dmabuf_private.id_queue;
+
+   /* check there is reusable id */
+   if (!list_empty(&reusable_head->list)) {
+   reusable_head = list_first_entry(&reusable_head->list,
+struct list_reusable_id,
+list);
+
+   list_del(&reusable_head->list);
+   return reusable_head->id;
+   }
+
+   return -1;
+}
+
+void destroy_reusable_list(void)
+{
+   struct list_reusable_id *reusable_head = hyper_dmabuf_private.id_queue;
+   struct list_reusable_id *temp_head;
+
+   if (reusable_head) {
+   /* freeing mem space all reusable ids in the stack */
+   while (!list_empty(&reusable_head->list)) {
+   temp_head = list_first_entry(&reusable_head-

[RFC PATCH 21/60] hyper_dmabuf: exposing drv information using sysfs

2017-12-19 Thread Dongwon Kim
From: Michał Janiszewski 

This adds two entries in SYSFS with information about imported
and exported entries. The information exposed contains details
about number of pages, whether a buffer is valid or not, and
importer/exporter count.

Sysfs for hyper_dmabuf can be enabled by setting a new config
option, "CONFIG_HYPER_DMABUF_SYSFS" to 'yes'.

Signed-off-by: Dongwon Kim 
---
 drivers/xen/hyper_dmabuf/Kconfig |  7 +++
 drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c  | 12 -
 drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c  |  2 +-
 drivers/xen/hyper_dmabuf/hyper_dmabuf_list.c | 74 
 drivers/xen/hyper_dmabuf/hyper_dmabuf_list.h |  3 ++
 5 files changed, 96 insertions(+), 2 deletions(-)

diff --git a/drivers/xen/hyper_dmabuf/Kconfig b/drivers/xen/hyper_dmabuf/Kconfig
index 75e1f96..56633a2 100644
--- a/drivers/xen/hyper_dmabuf/Kconfig
+++ b/drivers/xen/hyper_dmabuf/Kconfig
@@ -11,4 +11,11 @@ config HYPER_DMABUF_XEN
help
  Configuring hyper_dmabuf driver for XEN hypervisor
 
+config HYPER_DMABUF_SYSFS
+   bool "Enable sysfs information about hyper DMA buffers"
+   default y
+   help
+ Expose information about imported and exported buffers using
+ hyper_dmabuf driver
+
 endmenu
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c
index 9d99769..3fc30e6 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c
@@ -22,7 +22,7 @@ int unregister_device(void);
 struct hyper_dmabuf_private hyper_dmabuf_private;
 
 
/*===*/
-static int hyper_dmabuf_drv_init(void)
+static int __init hyper_dmabuf_drv_init(void)
 {
int ret = 0;
 
@@ -51,10 +51,16 @@ static int hyper_dmabuf_drv_init(void)
}
 
ret = hyper_dmabuf_private.backend_ops->init_comm_env();
+   if (ret < 0) {
+   return -EINVAL;
+   }
 
+#ifdef CONFIG_HYPER_DMABUF_SYSFS
+   ret = hyper_dmabuf_register_sysfs(hyper_dmabuf_private.device);
if (ret < 0) {
return -EINVAL;
}
+#endif
 
/* interrupt for comm should be registered here: */
return ret;
@@ -63,6 +69,10 @@ static int hyper_dmabuf_drv_init(void)
 
/*---*/
 static void hyper_dmabuf_drv_exit(void)
 {
+#ifdef CONFIG_HYPER_DMABUF_SYSFS
+   hyper_dmabuf_unregister_sysfs(hyper_dmabuf_private.device);
+#endif
+
/* hash tables for export/import entries and ring_infos */
hyper_dmabuf_table_destroy();
 
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c
index 9b05063..924710f 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c
@@ -24,7 +24,7 @@ int dmabuf_refcount(struct dma_buf *dma_buf)
return -1;
 }
 
-/* return total number of pages referecned by a sgt
+/* return total number of pages referenced by a sgt
  * for pre-calculation of # of pages behind a given sgt
  */
 static int hyper_dmabuf_get_num_pgs(struct sg_table *sgt)
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_list.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_list.c
index 18731de..1d224c4 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_list.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_list.c
@@ -11,6 +11,80 @@
 DECLARE_HASHTABLE(hyper_dmabuf_hash_imported, MAX_ENTRY_IMPORTED);
 DECLARE_HASHTABLE(hyper_dmabuf_hash_exported, MAX_ENTRY_EXPORTED);
 
+#ifdef CONFIG_HYPER_DMABUF_SYSFS
+static ssize_t hyper_dmabuf_imported_show(struct device *drv, struct 
device_attribute *attr, char *buf)
+{
+   struct hyper_dmabuf_info_entry_imported *info_entry;
+   int bkt;
+   ssize_t count = 0;
+   size_t total = 0;
+
+   hash_for_each(hyper_dmabuf_hash_imported, bkt, info_entry, node) {
+   int id = info_entry->info->hyper_dmabuf_id;
+   int nents = info_entry->info->nents;
+   bool valid = info_entry->info->valid;
+   int num_importers = info_entry->info->num_importers;
+   total += nents;
+   count += scnprintf(buf + count, PAGE_SIZE - count, "id:%d, 
nents:%d, v:%c, numi:%d\n",
+  id, nents, (valid ? 't' : 'f'), 
num_importers);
+   }
+   count += scnprintf(buf + count, PAGE_SIZE - count, "total nents: %lu\n",
+  total);
+
+   return count;
+}
+
+static ssize_t hyper_dmabuf_exported_show(struct device *drv, struct 
device_attribute *attr, char *buf)
+{
+   struct hyper_dmabuf_info_entry_exported *info_entry;
+   int bkt;
+   ssize_t count = 0;
+   size_t total = 0;
+
+   hash_for_each(hyper_dm

[RFC PATCH 16/60] hyper_dmabuf: define hypervisor specific backend API

2017-12-19 Thread Dongwon Kim
For adoption of hyper_dmabuf driver to various hypervisors
other than Xen, a "backend" layer is defined and separated out
from existing one-body structure.

"Backend" is basically a list of entry points of function calls
that provides method to do Kernel's page-level sharing and inter
VMs communication using hypervisor's native mechanism (hypercall).

All backend APIs are listed up in "struct hyper_dmabuf_backend_ops"
as shown below.

struct hyper_dmabuf_backend_ops {
/* retreiving id of current virtual machine */
int (*get_vm_id)(void);

/* get pages shared via hypervisor-specific method */
int (*share_pages)(struct page **, int, int, void **);

/* make shared pages unshared via hypervisor specific method */
int (*unshare_pages)(void **, int);

/* map remotely shared pages on importer's side via
 * hypervisor-specific method
 */
struct page ** (*map_shared_pages)(int, int, int, void **);

/* unmap and free shared pages on importer's side via
 * hypervisor-specific method
 */
int (*unmap_shared_pages)(void **, int);

/* initialize communication environment */
int (*init_comm_env)(void);

void (*destroy_comm)(void);

/* upstream ch setup (receiving and responding) */
int (*init_rx_ch)(int);

/* downstream ch setup (transmitting and parsing responses) */
int (*init_tx_ch)(int);

int (*send_req)(int, struct hyper_dmabuf_req *, int);
};

Within this new structure, only backend APIs need to be re-designed or
replaced with new ones when porting this sharing model to a different
hypervisor environment, which is a lot simpler than completely redesiging
whole driver for a new hypervisor.

Signed-off-by: Dongwon Kim 
---
 drivers/xen/hyper_dmabuf/Makefile  |  11 +-
 drivers/xen/hyper_dmabuf/hyper_dmabuf_conf.h   |   1 -
 drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c|  33 +-
 drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h| 112 ++
 drivers/xen/hyper_dmabuf/hyper_dmabuf_id.c |   6 +-
 drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c| 426 ++---
 drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.h|  14 -
 drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c  | 134 +++
 drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.h  |  87 +
 drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c|  52 ++-
 drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.h|  23 +-
 .../xen/hyper_dmabuf/hyper_dmabuf_remote_sync.c|   4 +-
 drivers/xen/hyper_dmabuf/hyper_dmabuf_struct.h |  26 +-
 .../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c   | 303 +--
 .../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.h   |  51 +--
 .../hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.c  |  67 ++--
 .../hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.h  |  32 +-
 .../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_drv.c|  22 ++
 .../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_drv.h|  20 +
 .../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.c| 356 +
 .../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.h|  19 +
 21 files changed, 949 insertions(+), 850 deletions(-)
 create mode 100644 drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.h
 create mode 100644 drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_drv.c
 create mode 100644 drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_drv.h
 create mode 100644 drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.c
 create mode 100644 drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.h

diff --git a/drivers/xen/hyper_dmabuf/Makefile 
b/drivers/xen/hyper_dmabuf/Makefile
index c9b8b7f..d90cfc3 100644
--- a/drivers/xen/hyper_dmabuf/Makefile
+++ b/drivers/xen/hyper_dmabuf/Makefile
@@ -1,5 +1,7 @@
 TARGET_MODULE:=hyper_dmabuf
 
+PLATFORM:=XEN
+
 # If we running by kernel building system
 ifneq ($(KERNELRELEASE),)
$(TARGET_MODULE)-objs := hyper_dmabuf_drv.o \
@@ -9,8 +11,13 @@ ifneq ($(KERNELRELEASE),)
 hyper_dmabuf_msg.o \
 hyper_dmabuf_id.o \
 hyper_dmabuf_remote_sync.o \
-xen/hyper_dmabuf_xen_comm.o \
-xen/hyper_dmabuf_xen_comm_list.o
+
+ifeq ($(CONFIG_XEN), y)
+   $(TARGET_MODULE)-objs += xen/hyper_dmabuf_xen_comm.o \
+xen/hyper_dmabuf_xen_comm_list.o \
+xen/hyper_dmabuf_xen_shm.o \
+xen/hyper_dmabuf_xen_drv.o
+endif
 
 obj-$(CONFIG_HYPER_DMABUF) := $(TARGET_MODULE).o
 
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_conf.h 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_conf.h
index 3d9b2d6..d012b05 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_conf.h
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_conf.h
@@ -1,2 +1 @@
 #define CURRENT_TARGET XEN
-#define INTER_DOMAIN_DMABUF_SYN

[RFC PATCH 18/60] hyper_dmabuf: reset comm channel when one end has disconnected.

2017-12-19 Thread Dongwon Kim
From: Mateusz Polrola 

When exporter or importer is disconnected, ring buffer should be
reinitialzed, otherwise on next reconnection exporter/importer will
receive old requests/responses remaining in the ring buffer, which are
not valid anymore.

This patch also blocks back ring irq until communication channel is
initialized and fully active to prevent a race condition.

Signed-off-by: Dongwon Kim 
---
 .../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c   | 24 +++---
 1 file changed, 21 insertions(+), 3 deletions(-)

diff --git a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c 
b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c
index 5e7a250..b629032 100644
--- a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c
+++ b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c
@@ -282,6 +282,7 @@ int hyper_dmabuf_xen_init_tx_rbuf(int domid)
 void hyper_dmabuf_xen_cleanup_tx_rbuf(int domid)
 {
struct xen_comm_tx_ring_info *ring_info;
+   struct xen_comm_rx_ring_info *rx_ring_info;
 
/* check if we at all have exporter ring for given rdomain */
ring_info = xen_comm_find_tx_ring(domid);
@@ -307,6 +308,12 @@ void hyper_dmabuf_xen_cleanup_tx_rbuf(int domid)
  (unsigned long) ring_info->ring_front.sring);
 
kfree(ring_info);
+
+   rx_ring_info = xen_comm_find_rx_ring(domid);
+   if (!rx_ring_info)
+   return;
+
+   BACK_RING_INIT(&(rx_ring_info->ring_back), 
rx_ring_info->ring_back.sring, PAGE_SIZE);
 }
 
 /* importer needs to know about shared page and port numbers for
@@ -378,9 +385,8 @@ int hyper_dmabuf_xen_init_rx_rbuf(int domid)
 
BACK_RING_INIT(&ring_info->ring_back, sring, PAGE_SIZE);
 
-   ret = bind_interdomain_evtchn_to_irqhandler(domid, rx_port,
-   back_ring_isr, 0,
-   NULL, (void*)ring_info);
+   ret = bind_interdomain_evtchn_to_irq(domid, rx_port);
+
if (ret < 0) {
return -EINVAL;
}
@@ -399,6 +405,10 @@ int hyper_dmabuf_xen_init_rx_rbuf(int domid)
ret = hyper_dmabuf_xen_init_tx_rbuf(domid);
}
 
+   ret = request_irq(ring_info->irq,
+ back_ring_isr, 0,
+ NULL, (void*)ring_info);
+
return ret;
 }
 
@@ -406,6 +416,7 @@ int hyper_dmabuf_xen_init_rx_rbuf(int domid)
 void hyper_dmabuf_xen_cleanup_rx_rbuf(int domid)
 {
struct xen_comm_rx_ring_info *ring_info;
+   struct xen_comm_tx_ring_info *tx_ring_info;
struct page *shared_ring;
 
/* check if we have importer ring created for given sdomain */
@@ -425,6 +436,13 @@ void hyper_dmabuf_xen_cleanup_rx_rbuf(int domid)
gnttab_free_pages(1, &shared_ring);
 
kfree(ring_info);
+
+   tx_ring_info = xen_comm_find_tx_ring(domid);
+   if (!tx_ring_info)
+   return;
+
+   SHARED_RING_INIT(tx_ring_info->ring_front.sring);
+   FRONT_RING_INIT(&(tx_ring_info->ring_front), 
tx_ring_info->ring_front.sring, PAGE_SIZE);
 }
 
 int hyper_dmabuf_xen_init_comm_env(void)
-- 
2.7.4

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[RFC PATCH 20/60] hyper_dmabuf: optimized loop with less condition check

2017-12-19 Thread Dongwon Kim
Redefined nents_last, which means # of gref in the last page
of lvl2 table in any situation even if it is same as REFS_PER_PAGE.
With this, loop can be simplified with less condition check.

Signed-off-by: Dongwon Kim 
---
 drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.c | 8 +---
 1 file changed, 5 insertions(+), 3 deletions(-)

diff --git a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.c 
b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.c
index cc9860b..cb5b86f 100644
--- a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.c
+++ b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.c
@@ -184,8 +184,10 @@ struct page ** hyper_dmabuf_xen_map_shared_pages(int 
lvl3_gref, int domid, int n
struct gnttab_map_grant_ref *data_map_ops;
struct gnttab_unmap_grant_ref *data_unmap_ops;
 
-   int nents_last = nents % REFS_PER_PAGE;
-   int n_lvl2_grefs = (nents / REFS_PER_PAGE) + ((nents_last > 0) ? 1 : 0);
+   /* # of grefs in the last page of lvl2 table */
+   int nents_last = (nents - 1) % REFS_PER_PAGE + 1;
+   int n_lvl2_grefs = (nents / REFS_PER_PAGE) + ((nents_last > 0) ? 1 : 0) 
-
+  (nents_last == REFS_PER_PAGE);
int i, j, k;
 
dev_dbg(hyper_dmabuf_private.device, "%s entry\n", __func__);
@@ -270,7 +272,7 @@ struct page ** hyper_dmabuf_xen_map_shared_pages(int 
lvl3_gref, int domid, int n
 
k = 0;
 
-   for (i = 0; i < (nents_last ? n_lvl2_grefs - 1 : n_lvl2_grefs); i++) {
+   for (i = 0; i < n_lvl2_grefs - 1; i++) {
lvl2_table = pfn_to_kaddr(page_to_pfn(lvl2_table_pages[i]));
for (j = 0; j < REFS_PER_PAGE; j++) {
gnttab_set_map_op(&data_map_ops[k],
-- 
2.7.4

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[RFC PATCH 22/60] hyper_dmabuf: configure license

2017-12-19 Thread Dongwon Kim
Set the license of the driver to "GPL and MIT-X dual" and owner
to "Intel". Also attached license term to all source and header
files

Signed-off-by: Dongwon Kim 
---
 drivers/xen/hyper_dmabuf/hyper_dmabuf_conf.h   | 26 ++
 drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c| 32 --
 drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h| 24 
 drivers/xen/hyper_dmabuf/hyper_dmabuf_id.c | 28 +++
 drivers/xen/hyper_dmabuf/hyper_dmabuf_id.h | 24 
 drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c| 28 +++
 drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.h| 24 
 drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c  | 28 +++
 drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.h  | 24 
 drivers/xen/hyper_dmabuf/hyper_dmabuf_list.c   | 28 +++
 drivers/xen/hyper_dmabuf/hyper_dmabuf_list.h   | 24 
 drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c| 28 +++
 drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.h| 24 
 drivers/xen/hyper_dmabuf/hyper_dmabuf_query.h  | 24 
 .../xen/hyper_dmabuf/hyper_dmabuf_remote_sync.c| 28 +++
 .../xen/hyper_dmabuf/hyper_dmabuf_remote_sync.h| 24 
 drivers/xen/hyper_dmabuf/hyper_dmabuf_struct.h | 24 
 .../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c   | 28 +++
 .../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.h   | 24 
 .../hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.c  | 28 +++
 .../hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.h  | 24 
 .../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_drv.c| 28 +++
 .../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_drv.h| 24 
 .../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.c| 28 +++
 .../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.h| 24 
 25 files changed, 648 insertions(+), 2 deletions(-)

diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_conf.h 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_conf.h
index d012b05..ee1886c 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_conf.h
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_conf.h
@@ -1 +1,27 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+/* configuration */
+
 #define CURRENT_TARGET XEN
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c
index 3fc30e6..4e0ccdd 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c
@@ -1,3 +1,31 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR 

[RFC PATCH 27/60] hyper_dmabuf: use proper error codes

2017-12-19 Thread Dongwon Kim
Cleaned up and corrected error codes and condition in various
error check routines. Also added proper err messages when func
returns error.

Signed-off-by: Dongwon Kim 
---
 drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c| 14 +++--
 drivers/xen/hyper_dmabuf/hyper_dmabuf_id.c |  2 +-
 drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c|  8 +--
 drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c  | 66 ++
 drivers/xen/hyper_dmabuf/hyper_dmabuf_list.c   |  6 +-
 drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c|  5 +-
 .../xen/hyper_dmabuf/hyper_dmabuf_remote_sync.c| 38 ++---
 .../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c   | 20 +++
 .../hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.c  |  4 +-
 .../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.c|  2 +-
 10 files changed, 94 insertions(+), 71 deletions(-)

diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c
index 584d55d..44a9139 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c
@@ -60,7 +60,7 @@ static int __init hyper_dmabuf_drv_init(void)
 
ret = register_device();
if (ret < 0) {
-   return -EINVAL;
+   return ret;
}
 
 #ifdef CONFIG_HYPER_DMABUF_XEN
@@ -77,18 +77,24 @@ static int __init hyper_dmabuf_drv_init(void)
 
ret = hyper_dmabuf_table_init();
if (ret < 0) {
-   return -EINVAL;
+   dev_err(hyper_dmabuf_private.device,
+   "failed to initialize table for exported/imported 
entries\n");
+   return ret;
}
 
ret = hyper_dmabuf_private.backend_ops->init_comm_env();
if (ret < 0) {
-   return -EINVAL;
+   dev_err(hyper_dmabuf_private.device,
+   "failed to initiailize hypervisor-specific comm env\n");
+   return ret;
}
 
 #ifdef CONFIG_HYPER_DMABUF_SYSFS
ret = hyper_dmabuf_register_sysfs(hyper_dmabuf_private.device);
if (ret < 0) {
-   return -EINVAL;
+   dev_err(hyper_dmabuf_private.device,
+   "failed to initialize sysfs\n");
+   return ret;
}
 #endif
 
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_id.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_id.c
index 9b4ff45..35bfdfb 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_id.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_id.c
@@ -62,7 +62,7 @@ static int retrieve_reusable_id(void)
return id;
}
 
-   return -1;
+   return -ENOENT;
 }
 
 void destroy_reusable_list(void)
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c
index a9bc354..a0b3946 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c
@@ -84,11 +84,11 @@ struct hyper_dmabuf_pages_info *hyper_dmabuf_ext_pgs(struct 
sg_table *sgt)
struct scatterlist *sgl;
 
pinfo = kmalloc(sizeof(*pinfo), GFP_KERNEL);
-   if (pinfo == NULL)
+   if (!pinfo)
return NULL;
 
pinfo->pages = kmalloc(sizeof(struct page 
*)*hyper_dmabuf_get_num_pgs(sgt), GFP_KERNEL);
-   if (pinfo->pages == NULL)
+   if (!pinfo->pages)
return NULL;
 
sgl = sgt->sgl;
@@ -138,7 +138,7 @@ struct sg_table* hyper_dmabuf_create_sgt(struct page 
**pages,
int i, ret;
 
sgt = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
-   if (sgt == NULL) {
+   if (!sgt) {
return NULL;
}
 
@@ -348,7 +348,7 @@ static struct sg_table* hyper_dmabuf_ops_map(struct 
dma_buf_attachment *attachme
/* create a new sg_table with extracted pages */
st = hyper_dmabuf_create_sgt(page_info->pages, page_info->frst_ofst,
page_info->last_len, page_info->nents);
-   if (st == NULL)
+   if (!st)
goto err_free_sg;
 
 if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
index 8851a9c..19ca725 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
@@ -54,7 +54,7 @@ static int hyper_dmabuf_tx_ch_setup(void *data)
 
if (!data) {
dev_err(hyper_dmabuf_private.device, "user data is NULL\n");
-   return -1;
+   return -EINVAL;
}
tx_ch_attr = (struct ioctl_hyper_dmabuf_tx_ch_setup *)data;
 
@@ -71,7 +71,7 @@ static int hyper_dmabuf_rx_ch_setup(void *data)
 
if (!data) {
dev_err(hyper_dmabuf_private.device, "user data is NULL\n");
-   return -1;
+   return -EINVAL;
}
 

[RFC PATCH 19/60] hyper_dmabuf: fix the case with sharing a buffer with 2 pages

2017-12-19 Thread Dongwon Kim
From: Mateusz Polrola 

Checking whether buffer has more than two pages should be done
by evaluating nents > 1 instead of i > 1 to properly cover the
case when nents == 2.

Signed-off-by: Dongwon Kim 
---
 drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c
index b61d29a..9b05063 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c
@@ -129,7 +129,7 @@ struct sg_table* hyper_dmabuf_create_sgt(struct page 
**pages,
sg_set_page(sgl, pages[i], PAGE_SIZE, 0);
}
 
-   if (i > 1) /* more than one page */ {
+   if (nents > 1) /* more than one page */ {
sgl = sg_next(sgl);
sg_set_page(sgl, pages[i], last_len, 0);
}
-- 
2.7.4

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[RFC PATCH 26/60] hyper_dmabuf: add mutexes to prevent several race conditions

2017-12-19 Thread Dongwon Kim
From: Mateusz Polrola 

Added mutex to export_fd ioctl to prevent double pages mapping of the
same buffer to prevent race condition when two consumers are trying to
map the same buffer on importer VM.

Also locked mutex before sending request via xen communication channel
to prevent req_pending override by another caller.

Signed-off-by: Dongwon Kim 
---
 drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c  |  2 ++
 drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h  |  1 +
 drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c|  6 ++
 drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c | 10 ++
 drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.h |  1 +
 5 files changed, 20 insertions(+)

diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c
index 569b95e..584d55d 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c
@@ -56,6 +56,8 @@ static int __init hyper_dmabuf_drv_init(void)
 
printk( KERN_NOTICE "hyper_dmabuf_starting: Initialization started" );
 
+   mutex_init(&hyper_dmabuf_private.lock);
+
ret = register_device();
if (ret < 0) {
return -EINVAL;
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h
index 0b1441e..8445416 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h
@@ -76,6 +76,7 @@ struct hyper_dmabuf_private {
 
/* backend ops - hypervisor specific */
struct hyper_dmabuf_backend_ops *backend_ops;
+   struct mutex lock;
 };
 
 #endif /* __LINUX_PUBLIC_HYPER_DMABUF_DRV_H__ */
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
index 018de8c..8851a9c 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
@@ -260,6 +260,8 @@ static int hyper_dmabuf_export_fd_ioctl(void *data)
if (sgt_info == NULL || !sgt_info->valid) /* can't find sgt from the 
table */
return -1;
 
+   mutex_lock(&hyper_dmabuf_private.lock);
+
sgt_info->num_importers++;
 
/* send notification for export_fd to exporter */
@@ -274,6 +276,7 @@ static int hyper_dmabuf_export_fd_ioctl(void *data)
kfree(req);
dev_err(hyper_dmabuf_private.device, "Failed to create sgt or 
notify exporter\n");
sgt_info->num_importers--;
+   mutex_unlock(&hyper_dmabuf_private.lock);
return -EINVAL;
}
kfree(req);
@@ -282,6 +285,7 @@ static int hyper_dmabuf_export_fd_ioctl(void *data)
dev_err(hyper_dmabuf_private.device,
"Buffer invalid\n");
sgt_info->num_importers--;
+   mutex_unlock(&hyper_dmabuf_private.lock);
return -1;
} else {
dev_dbg(hyper_dmabuf_private.device, "Can import buffer\n");
@@ -303,6 +307,7 @@ static int hyper_dmabuf_export_fd_ioctl(void *data)
 
if (!data_pages) {
sgt_info->num_importers--;
+   mutex_unlock(&hyper_dmabuf_private.lock);
return -EINVAL;
}
 
@@ -318,6 +323,7 @@ static int hyper_dmabuf_export_fd_ioctl(void *data)
ret = export_fd_attr->fd;
}
 
+   mutex_unlock(&hyper_dmabuf_private.lock);
dev_dbg(hyper_dmabuf_private.device, "%s exit\n", __func__);
return 0;
 }
diff --git a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c 
b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c
index a8cce26..9d67b47 100644
--- a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c
+++ b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c
@@ -278,6 +278,8 @@ int hyper_dmabuf_xen_init_tx_rbuf(int domid)
ring_info->irq = ret;
ring_info->port = alloc_unbound.port;
 
+   mutex_init(&ring_info->lock);
+
dev_dbg(hyper_dmabuf_private.device,
"%s: allocated eventchannel gref %d  port: %d  irq: %d\n",
__func__,
@@ -512,6 +514,9 @@ int hyper_dmabuf_xen_send_req(int domid, struct 
hyper_dmabuf_req *req, int wait)
return -EINVAL;
}
 
+
+   mutex_lock(&ring_info->lock);
+
ring = &ring_info->ring_front;
 
if (RING_FULL(ring))
@@ -519,6 +524,7 @@ int hyper_dmabuf_xen_send_req(int domid, struct 
hyper_dmabuf_req *req, int wait)
 
new_req = RING_GET_REQUEST(ring, ring->req_prod_pvt);
if (!new_req) {
+   mutex_unlock(&ring_info->lock);
dev_err(hyper_dmabuf_private.device,
"NULL REQUEST\n");
return -EIO;
@@ -548,13 +554,17 @@ int hyper_dmabuf

[RFC PATCH 29/60] hyper_dmabuf: make sure to release allocated buffers when exiting

2017-12-19 Thread Dongwon Kim
From: Mateusz Polrola 

It is required to release all allocated buffers when the application
crashes. With the change, hyper_dmabuf_sgt_info includes file pointers
for the driver. If it's released unexpectedly, the driver is now
unexporting all already-exported buffers to prevent memory leak.

In case there are multiple applications exporting same buffer to
another VM, unexporting is not started when one of those crashes.
Actual unexporting is invoked only if the last application that
exported the buffer is crashed or finished via "emergency-unexport"
routine, that is executed automatically when all of file pointers
opened for accessing hyper_dmabuf driver are closed.

Signed-off-by: Dongwon Kim 
---
 drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c|  6 ++-
 drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c  | 73 ++
 drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.h  |  2 +-
 drivers/xen/hyper_dmabuf/hyper_dmabuf_list.c   | 14 +
 drivers/xen/hyper_dmabuf/hyper_dmabuf_list.h   |  4 ++
 drivers/xen/hyper_dmabuf/hyper_dmabuf_struct.h |  7 +++
 6 files changed, 81 insertions(+), 25 deletions(-)

diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c
index 44a9139..a12d4dc 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c
@@ -54,7 +54,7 @@ static int __init hyper_dmabuf_drv_init(void)
 {
int ret = 0;
 
-   printk( KERN_NOTICE "hyper_dmabuf_starting: Initialization started" );
+   printk( KERN_NOTICE "hyper_dmabuf_starting: Initialization started\n");
 
mutex_init(&hyper_dmabuf_private.lock);
 
@@ -122,7 +122,9 @@ static void hyper_dmabuf_drv_exit(void)
if (hyper_dmabuf_private.id_queue)
destroy_reusable_list();
 
-   printk( KERN_NOTICE "dma_buf-src_sink model: Exiting" );
+   dev_info(hyper_dmabuf_private.device,
+"hyper_dmabuf driver: Exiting\n");
+
unregister_device();
 }
 
/*===*/
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
index 58b115a..fa700f2 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
@@ -47,7 +47,7 @@
 
 extern struct hyper_dmabuf_private hyper_dmabuf_private;
 
-static int hyper_dmabuf_tx_ch_setup(void *data)
+static int hyper_dmabuf_tx_ch_setup(struct file *filp, void *data)
 {
struct ioctl_hyper_dmabuf_tx_ch_setup *tx_ch_attr;
struct hyper_dmabuf_backend_ops *ops = hyper_dmabuf_private.backend_ops;
@@ -64,7 +64,7 @@ static int hyper_dmabuf_tx_ch_setup(void *data)
return ret;
 }
 
-static int hyper_dmabuf_rx_ch_setup(void *data)
+static int hyper_dmabuf_rx_ch_setup(struct file *filp, void *data)
 {
struct ioctl_hyper_dmabuf_rx_ch_setup *rx_ch_attr;
struct hyper_dmabuf_backend_ops *ops = hyper_dmabuf_private.backend_ops;
@@ -82,7 +82,7 @@ static int hyper_dmabuf_rx_ch_setup(void *data)
return ret;
 }
 
-static int hyper_dmabuf_export_remote(void *data)
+static int hyper_dmabuf_export_remote(struct file *filp, void *data)
 {
struct ioctl_hyper_dmabuf_export_remote *export_remote_attr;
struct hyper_dmabuf_backend_ops *ops = hyper_dmabuf_private.backend_ops;
@@ -227,6 +227,8 @@ static int hyper_dmabuf_export_remote(void *data)
kfree(page_info->pages);
kfree(page_info);
 
+   sgt_info->filp = filp;
+
return ret;
 
 fail_send_request:
@@ -248,7 +250,7 @@ static int hyper_dmabuf_export_remote(void *data)
return ret;
 }
 
-static int hyper_dmabuf_export_fd_ioctl(void *data)
+static int hyper_dmabuf_export_fd_ioctl(struct file *filp, void *data)
 {
struct ioctl_hyper_dmabuf_export_fd *export_fd_attr;
struct hyper_dmabuf_backend_ops *ops = hyper_dmabuf_private.backend_ops;
@@ -411,7 +413,7 @@ static void hyper_dmabuf_delayed_unexport(struct 
work_struct *work)
 
 /* Schedules unexport of dmabuf.
  */
-static int hyper_dmabuf_unexport(void *data)
+static int hyper_dmabuf_unexport(struct file *filp, void *data)
 {
struct ioctl_hyper_dmabuf_unexport *unexport_attr;
struct hyper_dmabuf_sgt_info *sgt_info;
@@ -448,7 +450,7 @@ static int hyper_dmabuf_unexport(void *data)
return 0;
 }
 
-static int hyper_dmabuf_query(void *data)
+static int hyper_dmabuf_query(struct file *filp, void *data)
 {
struct ioctl_hyper_dmabuf_query *query_attr;
struct hyper_dmabuf_sgt_info *sgt_info;
@@ -558,7 +560,7 @@ static long hyper_dmabuf_ioctl(struct file *filp,
return -EFAULT;
}
 
-   ret = func(kdata);
+   ret = func(filp, kdata);
 
if (copy_to_user((void __user *)param, kdata, _IOC_SIZE(cmd)) != 0) {
dev_err(hyper_dmabuf_private.d

[RFC PATCH 35/60] hyper_dmabuf: 128bit hyper_dmabuf_id with random keys

2017-12-19 Thread Dongwon Kim
The length of hyper_dmabuf_id is increased to 128bit by adding
random key (96bit) to the id. This is to prevent possible leak
of the id by guessing on importer VM (by unauthorized application).

hyper_dmabuf_id_t is now defined as,

typedef struct {
int id;
int rng_key[3];
} hyper_dmabuf_id_t;

Signed-off-by: Dongwon Kim 
---
 drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c|   2 +-
 drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h|   3 +-
 drivers/xen/hyper_dmabuf/hyper_dmabuf_id.c |  57 --
 drivers/xen/hyper_dmabuf/hyper_dmabuf_id.h |  17 +-
 drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c|  51 +++---
 drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c  | 199 +
 drivers/xen/hyper_dmabuf/hyper_dmabuf_list.c   |  87 ++---
 drivers/xen/hyper_dmabuf/hyper_dmabuf_list.h   |  10 +-
 drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c| 115 +++-
 drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.h|   2 +-
 .../xen/hyper_dmabuf/hyper_dmabuf_remote_sync.c|  21 ++-
 .../xen/hyper_dmabuf/hyper_dmabuf_remote_sync.h|   2 +-
 drivers/xen/hyper_dmabuf/hyper_dmabuf_struct.h |  20 ++-
 .../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.h   |   2 -
 include/uapi/xen/hyper_dmabuf.h|  13 +-
 15 files changed, 372 insertions(+), 229 deletions(-)

diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c
index 92d710e..c802c3e 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c
@@ -30,9 +30,9 @@
 #include 
 #include 
 #include 
+#include "hyper_dmabuf_drv.h"
 #include "hyper_dmabuf_conf.h"
 #include "hyper_dmabuf_msg.h"
-#include "hyper_dmabuf_drv.h"
 #include "hyper_dmabuf_list.h"
 #include "hyper_dmabuf_id.h"
 
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h
index 91fda04..ffe4d53 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h
@@ -26,11 +26,12 @@
 #define __LINUX_PUBLIC_HYPER_DMABUF_DRV_H__
 
 #include 
+#include 
 
 struct hyper_dmabuf_req;
 
 struct list_reusable_id {
-   int id;
+   hyper_dmabuf_id_t hid;
struct list_head list;
 };
 
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_id.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_id.c
index fe95091..f59dee3 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_id.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_id.c
@@ -28,13 +28,14 @@
 
 #include 
 #include 
-#include "hyper_dmabuf_msg.h"
+#include 
 #include "hyper_dmabuf_drv.h"
 #include "hyper_dmabuf_id.h"
+#include "hyper_dmabuf_msg.h"
 
 extern struct hyper_dmabuf_private hyper_dmabuf_private;
 
-void store_reusable_id(int id)
+void store_reusable_hid(hyper_dmabuf_id_t hid)
 {
struct list_reusable_id *reusable_head = hyper_dmabuf_private.id_queue;
struct list_reusable_id *new_reusable;
@@ -47,15 +48,15 @@ void store_reusable_id(int id)
return;
}
 
-   new_reusable->id = id;
+   new_reusable->hid = hid;
 
list_add(&new_reusable->list, &reusable_head->list);
 }
 
-static int retrieve_reusable_id(void)
+static hyper_dmabuf_id_t retrieve_reusable_hid(void)
 {
struct list_reusable_id *reusable_head = hyper_dmabuf_private.id_queue;
-   int id;
+   hyper_dmabuf_id_t hid = {-1, {0,0,0}};
 
/* check there is reusable id */
if (!list_empty(&reusable_head->list)) {
@@ -64,12 +65,11 @@ static int retrieve_reusable_id(void)
 list);
 
list_del(&reusable_head->list);
-   id = reusable_head->id;
+   hid = reusable_head->hid;
kfree(reusable_head);
-   return id;
}
 
-   return -ENOENT;
+   return hid;
 }
 
 void destroy_reusable_list(void)
@@ -92,31 +92,50 @@ void destroy_reusable_list(void)
}
 }
 
-int hyper_dmabuf_get_id(void)
+hyper_dmabuf_id_t hyper_dmabuf_get_hid(void)
 {
-   static int id = 0;
+   static int count = 0;
+   hyper_dmabuf_id_t hid;
struct list_reusable_id *reusable_head;
-   int ret;
 
-   /* first cla to hyper_dmabuf_get_id */
-   if (id == 0) {
+   /* first call to hyper_dmabuf_get_id */
+   if (count == 0) {
reusable_head = kmalloc(sizeof(*reusable_head), GFP_KERNEL);
 
if (!reusable_head) {
dev_err(hyper_dmabuf_private.device,
"No memory left to be allocated\n");
-   return -ENOMEM;
+   return (hyper_dmabuf_id_t){-1, {0,0,0}};
}
 
-   reusable_head->id = -1; /* list hea

[RFC PATCH 33/60] hyper_dmabuf: error checking on the result of dma_buf_map_attachment

2017-12-19 Thread Dongwon Kim
From: Mateusz Polrola 

Added error checking on the result of function call,
dma_buf_map_attachment

Signed-off-by: Dongwon Kim 
---
 drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c | 5 +
 1 file changed, 5 insertions(+)

diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
index c0048d9..476c0d7 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
@@ -149,6 +149,11 @@ static int hyper_dmabuf_export_remote(struct file *filp, 
void *data)
 
sgt = dma_buf_map_attachment(attachment, DMA_BIDIRECTIONAL);
 
+   if (IS_ERR(sgt)) {
+   dev_err(hyper_dmabuf_private.device, "Cannot map attachment\n");
+   return PTR_ERR(sgt);
+   }
+
sgt_info = kcalloc(1, sizeof(*sgt_info), GFP_KERNEL);
 
if(!sgt_info) {
-- 
2.7.4

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[RFC PATCH 24/60] hyper_dmabuf: waits for resp only if WAIT_AFTER_SYNC_REQ == 1

2017-12-19 Thread Dongwon Kim
hyper_dmabuf's sync_request (previously hyper_dmabuf_sync_request_
and_wait) now does not wait for the response from exporter if
WAIT_AFTER_SYNC_REQ==0. This is to prevent peformance degradation
due to the communication latency while doing indirect hyper DMABUF
synchronization.

This patch also includes some minor changes as followed:

1. hyper_dmabuf_free_sgt is removed. Now we call sg_free_table and
   kfree directly from all the places where this function was executed.
   This was done for conciseness.

2. changed hyper_dmabuf_get_domid to hyper_dmabuf_xen_get_domid for
   consistence in func names in the backend.

3. some minor clean-ups

Signed-off-by: Dongwon Kim 
---
 drivers/xen/hyper_dmabuf/hyper_dmabuf_conf.h   |  2 -
 drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c| 91 +++---
 drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c|  2 -
 .../xen/hyper_dmabuf/hyper_dmabuf_remote_sync.c|  2 +-
 .../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c   | 14 ++--
 .../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.h   |  2 +-
 .../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_drv.c|  2 +-
 .../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.c| 21 +++--
 8 files changed, 69 insertions(+), 67 deletions(-)

diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_conf.h 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_conf.h
index ee1886c..d5125f2 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_conf.h
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_conf.h
@@ -23,5 +23,3 @@
  */
 
 /* configuration */
-
-#define CURRENT_TARGET XEN
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c
index a017070..d7a35fc 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c
@@ -131,7 +131,7 @@ struct hyper_dmabuf_pages_info *hyper_dmabuf_ext_pgs(struct 
sg_table *sgt)
 
 /* create sg_table with given pages and other parameters */
 struct sg_table* hyper_dmabuf_create_sgt(struct page **pages,
-   int frst_ofst, int last_len, int nents)
+int frst_ofst, int last_len, int nents)
 {
struct sg_table *sgt;
struct scatterlist *sgl;
@@ -144,7 +144,11 @@ struct sg_table* hyper_dmabuf_create_sgt(struct page 
**pages,
 
ret = sg_alloc_table(sgt, nents, GFP_KERNEL);
if (ret) {
-   hyper_dmabuf_free_sgt(sgt);
+   if (sgt) {
+   sg_free_table(sgt);
+   kfree(sgt);
+   }
+
return NULL;
}
 
@@ -165,15 +169,6 @@ struct sg_table* hyper_dmabuf_create_sgt(struct page 
**pages,
return sgt;
 }
 
-/* free sg_table */
-void hyper_dmabuf_free_sgt(struct sg_table* sgt)
-{
-   if (sgt) {
-   sg_free_table(sgt);
-   kfree(sgt);
-   }
-}
-
 int hyper_dmabuf_cleanup_sgt_info(struct hyper_dmabuf_sgt_info *sgt_info, int 
force)
 {
struct sgt_list *sgtl;
@@ -264,7 +259,9 @@ int hyper_dmabuf_cleanup_sgt_info(struct 
hyper_dmabuf_sgt_info *sgt_info, int fo
return 0;
 }
 
-inline int hyper_dmabuf_sync_request_and_wait(int id, int dmabuf_ops)
+#define WAIT_AFTER_SYNC_REQ 1
+
+inline int hyper_dmabuf_sync_request(int id, int dmabuf_ops)
 {
struct hyper_dmabuf_req *req;
struct hyper_dmabuf_backend_ops *ops = hyper_dmabuf_private.backend_ops;
@@ -279,7 +276,7 @@ inline int hyper_dmabuf_sync_request_and_wait(int id, int 
dmabuf_ops)
hyper_dmabuf_create_request(req, HYPER_DMABUF_OPS_TO_SOURCE, 
&operands[0]);
 
/* send request and wait for a response */
-   ret = ops->send_req(HYPER_DMABUF_DOM_ID(id), req, true);
+   ret = ops->send_req(HYPER_DMABUF_DOM_ID(id), req, WAIT_AFTER_SYNC_REQ);
 
kfree(req);
 
@@ -297,8 +294,8 @@ static int hyper_dmabuf_ops_attach(struct dma_buf* dmabuf, 
struct device* dev,
 
sgt_info = (struct hyper_dmabuf_imported_sgt_info 
*)attach->dmabuf->priv;
 
-   ret = hyper_dmabuf_sync_request_and_wait(sgt_info->hyper_dmabuf_id,
-HYPER_DMABUF_OPS_ATTACH);
+   ret = hyper_dmabuf_sync_request(sgt_info->hyper_dmabuf_id,
+   HYPER_DMABUF_OPS_ATTACH);
 
if (ret < 0) {
dev_err(hyper_dmabuf_private.device,
@@ -319,8 +316,8 @@ static void hyper_dmabuf_ops_detach(struct dma_buf* dmabuf, 
struct dma_buf_attac
 
sgt_info = (struct hyper_dmabuf_imported_sgt_info 
*)attach->dmabuf->priv;
 
-   ret = hyper_dmabuf_sync_request_and_wait(sgt_info->hyper_dmabuf_id,
-HYPER_DMABUF_OPS_DETACH);
+   ret = hyper_dmabuf_sync_request(sgt_info->hyper_dmabuf_id,
+   HYPER_DMABUF_OPS_DETACH);
 
if (ret < 0) {
dev_err(hyper_dmabuf_private.device,
@@ -354,8 +351,8 @@ static

[RFC PATCH 25/60] hyper_dmabuf: introduced delayed unexport

2017-12-19 Thread Dongwon Kim
From: Mateusz Polrola 

To prevent overhead when a DMA BUF needs to be exported right after
it is unexported, a marginal delay is introduced in unexporting process.
This adds a probation period to the unexporting process. If the same
DMA_BUF is requested to be exported agagin, unexporting process is
canceled right away and the buffer can be reused without any extensive
re-exporting process.

Additionally, "FIRST EXPORT" message is synchronously transmitted to
the exporter VM (importer VM waits for the response.) to make sure
the buffer is still valid (not unexported) on expoter VM's side before
importer VM starts to use it.

"delayed_ms" attribute is added to unexport ioctl, used for hardcoding
delay from userspace.

Signed-off-by: Dongwon Kim 
---
 drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c|   4 +
 drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c  | 157 ++---
 drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.h  |   2 +
 drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c|  41 +++---
 drivers/xen/hyper_dmabuf/hyper_dmabuf_struct.h |   2 +
 .../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c   |   2 +
 6 files changed, 139 insertions(+), 69 deletions(-)

diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c
index d7a35fc..a9bc354 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c
@@ -341,6 +341,10 @@ static struct sg_table* hyper_dmabuf_ops_map(struct 
dma_buf_attachment *attachme
/* extract pages from sgt */
page_info = hyper_dmabuf_ext_pgs(sgt_info->sgt);
 
+   if (!page_info) {
+   return NULL;
+   }
+
/* create a new sg_table with extracted pages */
st = hyper_dmabuf_create_sgt(page_info->pages, page_info->frst_ofst,
page_info->last_len, page_info->nents);
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
index b0f5b5b..018de8c 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
@@ -115,11 +115,24 @@ static int hyper_dmabuf_export_remote(void *data)
ret = hyper_dmabuf_find_id_exported(dma_buf, 
export_remote_attr->remote_domain);
sgt_info = hyper_dmabuf_find_exported(ret);
if (ret != -1 && sgt_info->valid) {
+   /*
+* Check if unexport is already scheduled for that buffer,
+* if so try to cancel it. If that will fail, buffer needs
+* to be reexport once again.
+*/
+   if (sgt_info->unexport_scheduled) {
+   if 
(!cancel_delayed_work_sync(&sgt_info->unexport_work)) {
+   dma_buf_put(dma_buf);
+   goto reexport;
+   }
+   sgt_info->unexport_scheduled = 0;
+   }
dma_buf_put(dma_buf);
export_remote_attr->hyper_dmabuf_id = ret;
return 0;
}
 
+reexport:
attachment = dma_buf_attach(dma_buf, hyper_dmabuf_private.device);
if (!attachment) {
dev_err(hyper_dmabuf_private.device, "Cannot get attachment\n");
@@ -133,7 +146,7 @@ static int hyper_dmabuf_export_remote(void *data)
 
sgt = dma_buf_map_attachment(attachment, DMA_BIDIRECTIONAL);
 
-   sgt_info = kmalloc(sizeof(*sgt_info), GFP_KERNEL);
+   sgt_info = kcalloc(1, sizeof(*sgt_info), GFP_KERNEL);
 
sgt_info->hyper_dmabuf_id = hyper_dmabuf_get_id();
 
@@ -141,7 +154,6 @@ static int hyper_dmabuf_export_remote(void *data)
sgt_info->hyper_dmabuf_rdomain = export_remote_attr->remote_domain;
sgt_info->dma_buf = dma_buf;
sgt_info->valid = 1;
-   sgt_info->importer_exported = 0;
 
sgt_info->active_sgts = kmalloc(sizeof(struct sgt_list), GFP_KERNEL);
sgt_info->active_attached = kmalloc(sizeof(struct attachment_list), 
GFP_KERNEL);
@@ -245,8 +257,35 @@ static int hyper_dmabuf_export_fd_ioctl(void *data)
 
/* look for dmabuf for the id */
sgt_info = hyper_dmabuf_find_imported(export_fd_attr->hyper_dmabuf_id);
-   if (sgt_info == NULL) /* can't find sgt from the table */
+   if (sgt_info == NULL || !sgt_info->valid) /* can't find sgt from the 
table */
+   return -1;
+
+   sgt_info->num_importers++;
+
+   /* send notification for export_fd to exporter */
+   operand = sgt_info->hyper_dmabuf_id;
+
+   req = kcalloc(1, sizeof(*req), GFP_KERNEL);
+   hyper_dmabuf_create_request(req, HYPER_DMABUF_FIRST_EXPORT, &operand);
+
+   ret = ops->send_req(HYPER_DMABUF_DOM_ID(operand), req, true);
+
+   if (ret < 0) {
+   kfree(req);
+   dev_err(hyper

[RFC PATCH 39/60] hyper_dmabuf: correcting DMA-BUF clean-up order

2017-12-19 Thread Dongwon Kim
Reordering clean-up procedure

Signed-off-by: Dongwon Kim 
---
 drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c | 37 +--
 1 file changed, 24 insertions(+), 13 deletions(-)

diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
index b77b156..2ff2c145 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
@@ -148,21 +148,24 @@ static int hyper_dmabuf_export_remote_ioctl(struct file 
*filp, void *data)
attachment = dma_buf_attach(dma_buf, hyper_dmabuf_private.device);
if (IS_ERR(attachment)) {
dev_err(hyper_dmabuf_private.device, "Cannot get attachment\n");
-   return PTR_ERR(attachment);
+   ret = PTR_ERR(attachment);
+   goto fail_attach;
}
 
sgt = dma_buf_map_attachment(attachment, DMA_BIDIRECTIONAL);
 
if (IS_ERR(sgt)) {
dev_err(hyper_dmabuf_private.device, "Cannot map attachment\n");
-   return PTR_ERR(sgt);
+   ret = PTR_ERR(sgt);
+   goto fail_map_attachment;
}
 
sgt_info = kcalloc(1, sizeof(*sgt_info), GFP_KERNEL);
 
if(!sgt_info) {
dev_err(hyper_dmabuf_private.device, "no more space left\n");
-   return -ENOMEM;
+   ret = -ENOMEM;
+   goto fail_sgt_info_creation;
}
 
sgt_info->hid = hyper_dmabuf_get_hid();
@@ -171,8 +174,8 @@ static int hyper_dmabuf_export_remote_ioctl(struct file 
*filp, void *data)
if(sgt_info->hid.id == -1) {
dev_err(hyper_dmabuf_private.device,
"exceeds allowed number of dmabuf to be exported\n");
-   /* TODO: Cleanup sgt */
-   return -ENOMEM;
+   ret = -ENOMEM;
+   goto fail_sgt_info_creation;
}
 
/* TODO: We might need to consider using port number on event channel? 
*/
@@ -286,6 +289,8 @@ static int hyper_dmabuf_export_remote_ioctl(struct file 
*filp, void *data)
 
return ret;
 
+/* Clean-up if error occurs */
+
 fail_send_request:
kfree(req);
 
@@ -293,20 +298,26 @@ static int hyper_dmabuf_export_remote_ioctl(struct file 
*filp, void *data)
hyper_dmabuf_remove_exported(sgt_info->hid);
 
 fail_export:
-   dma_buf_unmap_attachment(sgt_info->active_attached->attach,
-sgt_info->active_sgts->sgt,
-DMA_BIDIRECTIONAL);
-   dma_buf_detach(sgt_info->dma_buf, sgt_info->active_attached->attach);
-   dma_buf_put(sgt_info->dma_buf);
-
kfree(sgt_info->va_vmapped);
+
 fail_map_va_vmapped:
kfree(sgt_info->va_kmapped);
+
 fail_map_va_kmapped:
-   kfree(sgt_info->active_sgts);
-fail_map_active_sgts:
kfree(sgt_info->active_attached);
+
 fail_map_active_attached:
+   kfree(sgt_info->active_sgts);
+
+fail_map_active_sgts:
+fail_sgt_info_creation:
+   dma_buf_unmap_attachment(attachment, sgt, DMA_BIDIRECTIONAL);
+
+fail_map_attachment:
+   dma_buf_detach(dma_buf, attachment);
+
+fail_attach:
+   dma_buf_put(dma_buf);
 
return ret;
 }
-- 
2.7.4

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[RFC PATCH 31/60] hyper_dmabuf: built-in compilation option

2017-12-19 Thread Dongwon Kim
From: Mateusz Polrola 

Enabled built-in compilation option of hyper_dmabuf driver.
Also, moved backend initialization into open() to remove
its dependencies on Kernel booting sequence.

hyper_dmabuf.h is now installed as one of standard header
files of Kernel.

This patch also addresses possible memory leaks in various
places.

Signed-off-by: Dongwon Kim 
---
 drivers/xen/hyper_dmabuf/Kconfig   |   1 +
 drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c|  17 ++--
 drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h|   1 +
 drivers/xen/hyper_dmabuf/hyper_dmabuf_id.c |  14 +++
 drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c|  13 ++-
 drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c  | 113 +
 drivers/xen/hyper_dmabuf/hyper_dmabuf_list.c   |  15 +++
 drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c|  20 
 .../xen/hyper_dmabuf/hyper_dmabuf_remote_sync.c|  32 +-
 .../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c   |   6 ++
 .../hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.c  |  15 +++
 .../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.c|   6 ++
 include/uapi/xen/Kbuild|   6 ++
 13 files changed, 227 insertions(+), 32 deletions(-)
 create mode 100644 include/uapi/xen/Kbuild

diff --git a/drivers/xen/hyper_dmabuf/Kconfig b/drivers/xen/hyper_dmabuf/Kconfig
index 56633a2..185fdf8 100644
--- a/drivers/xen/hyper_dmabuf/Kconfig
+++ b/drivers/xen/hyper_dmabuf/Kconfig
@@ -14,6 +14,7 @@ config HYPER_DMABUF_XEN
 config HYPER_DMABUF_SYSFS
bool "Enable sysfs information about hyper DMA buffers"
default y
+   depends on HYPER_DMABUF
help
  Expose information about imported and exported buffers using
  hyper_dmabuf driver
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c
index a12d4dc..92d710e 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c
@@ -66,6 +66,15 @@ static int __init hyper_dmabuf_drv_init(void)
 #ifdef CONFIG_HYPER_DMABUF_XEN
hyper_dmabuf_private.backend_ops = &xen_backend_ops;
 #endif
+   /*
+* Defer backend setup to first open call.
+* Due to fact that some hypervisors eg. Xen, may have dependencies
+* to userspace daemons like xenstored, in that case all xenstore
+* calls done from kernel will block until that deamon will be
+* started, in case where module is built in that will block entire
+* kernel initialization.
+*/
+   hyper_dmabuf_private.backend_initialized = false;
 
dev_info(hyper_dmabuf_private.device,
 "initializing database for imported/exported dmabufs\n");
@@ -73,7 +82,6 @@ static int __init hyper_dmabuf_drv_init(void)
/* device structure initialization */
/* currently only does work-queue initialization */
hyper_dmabuf_private.work_queue = 
create_workqueue("hyper_dmabuf_wqueue");
-   hyper_dmabuf_private.domid = 
hyper_dmabuf_private.backend_ops->get_vm_id();
 
ret = hyper_dmabuf_table_init();
if (ret < 0) {
@@ -82,13 +90,6 @@ static int __init hyper_dmabuf_drv_init(void)
return ret;
}
 
-   ret = hyper_dmabuf_private.backend_ops->init_comm_env();
-   if (ret < 0) {
-   dev_err(hyper_dmabuf_private.device,
-   "failed to initiailize hypervisor-specific comm env\n");
-   return ret;
-   }
-
 #ifdef CONFIG_HYPER_DMABUF_SYSFS
ret = hyper_dmabuf_register_sysfs(hyper_dmabuf_private.device);
if (ret < 0) {
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h
index 8445416..91fda04 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h
@@ -77,6 +77,7 @@ struct hyper_dmabuf_private {
/* backend ops - hypervisor specific */
struct hyper_dmabuf_backend_ops *backend_ops;
struct mutex lock;
+   bool backend_initialized;
 };
 
 #endif /* __LINUX_PUBLIC_HYPER_DMABUF_DRV_H__ */
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_id.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_id.c
index 35bfdfb..fe95091 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_id.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_id.c
@@ -40,6 +40,13 @@ void store_reusable_id(int id)
struct list_reusable_id *new_reusable;
 
new_reusable = kmalloc(sizeof(*new_reusable), GFP_KERNEL);
+
+   if (!new_reusable) {
+   dev_err(hyper_dmabuf_private.device,
+   "No memory left to be allocated\n");
+   return;
+   }
+
new_reusable->id = id;
 
list_add(&new_reusable->list, &reusable_head->list);
@@ -94,6 +101,13 @@ int hyper_dmabuf_get_id(void)
/* first cla to hyper_

[RFC PATCH 40/60] hyper_dmabuf: do not use 'private' as field name

2017-12-19 Thread Dongwon Kim
From: Mateusz Polrola 

Using a word, 'private' is not recommended because of conflict
with language keyword when compiling with C++.
So changing those to 'priv'.

Signed-off-by: Dongwon Kim 
---
 drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c | 8 
 include/uapi/xen/hyper_dmabuf.h   | 2 +-
 2 files changed, 5 insertions(+), 5 deletions(-)

diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
index 2ff2c145..9d05d66 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
@@ -257,10 +257,10 @@ static int hyper_dmabuf_export_remote_ioctl(struct file 
*filp, void *data)
}
 
/* driver/application specific private info, max 4x4 bytes */
-   operands[8] = export_remote_attr->private[0];
-   operands[9] = export_remote_attr->private[1];
-   operands[10] = export_remote_attr->private[2];
-   operands[11] = export_remote_attr->private[3];
+   operands[8] = export_remote_attr->priv[0];
+   operands[9] = export_remote_attr->priv[1];
+   operands[10] = export_remote_attr->priv[2];
+   operands[11] = export_remote_attr->priv[3];
 
req = kcalloc(1, sizeof(*req), GFP_KERNEL);
 
diff --git a/include/uapi/xen/hyper_dmabuf.h b/include/uapi/xen/hyper_dmabuf.h
index bee0f86..a2d22d0 100644
--- a/include/uapi/xen/hyper_dmabuf.h
+++ b/include/uapi/xen/hyper_dmabuf.h
@@ -56,7 +56,7 @@ struct ioctl_hyper_dmabuf_export_remote {
int remote_domain;
/* exported dma buf id */
hyper_dmabuf_id_t hid;
-   int private[4];
+   int priv[4];
 };
 
 #define IOCTL_HYPER_DMABUF_EXPORT_FD \
-- 
2.7.4

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[RFC PATCH 37/60] hyper_dmabuf: implementation of query ioctl

2017-12-19 Thread Dongwon Kim
List of queries is re-defined. Now it supports following
items:

enum hyper_dmabuf_query {
DMABUF_QUERY_TYPE = 0x10,
DMABUF_QUERY_EXPORTER,
DMABUF_QUERY_IMPORTER,
DMABUF_QUERY_SIZE,
DMABUF_QUERY_BUSY,
DMABUF_QUERY_UNEXPORTED,
DMABUF_QUERY_DELAYED_UNEXPORTED,
};

Also, actual querying part of the function is moved to hyper_dmabuf_query.c

Signed-off-by: Dongwon Kim 
---
 drivers/xen/hyper_dmabuf/Makefile |   1 +
 drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c | 111 ++---
 drivers/xen/hyper_dmabuf/hyper_dmabuf_query.c | 115 ++
 drivers/xen/hyper_dmabuf/hyper_dmabuf_query.h |  38 +
 include/uapi/xen/hyper_dmabuf.h   |  17 
 5 files changed, 179 insertions(+), 103 deletions(-)
 create mode 100644 drivers/xen/hyper_dmabuf/hyper_dmabuf_query.c

diff --git a/drivers/xen/hyper_dmabuf/Makefile 
b/drivers/xen/hyper_dmabuf/Makefile
index d90cfc3..8865f50 100644
--- a/drivers/xen/hyper_dmabuf/Makefile
+++ b/drivers/xen/hyper_dmabuf/Makefile
@@ -11,6 +11,7 @@ ifneq ($(KERNELRELEASE),)
 hyper_dmabuf_msg.o \
 hyper_dmabuf_id.o \
 hyper_dmabuf_remote_sync.o \
+hyper_dmabuf_query.o \
 
 ifeq ($(CONFIG_XEN), y)
$(TARGET_MODULE)-objs += xen/hyper_dmabuf_xen_comm.o \
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
index 375b664..12f7ce4 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
@@ -31,7 +31,7 @@
 #include 
 #include 
 #include 
-#include 
+#include 
 #include 
 #include 
 #include 
@@ -46,7 +46,7 @@
 
 extern struct hyper_dmabuf_private hyper_dmabuf_private;
 
-static int hyper_dmabuf_tx_ch_setup(struct file *filp, void *data)
+static int hyper_dmabuf_tx_ch_setup_ioctl(struct file *filp, void *data)
 {
struct ioctl_hyper_dmabuf_tx_ch_setup *tx_ch_attr;
struct hyper_dmabuf_backend_ops *ops = hyper_dmabuf_private.backend_ops;
@@ -63,7 +63,7 @@ static int hyper_dmabuf_tx_ch_setup(struct file *filp, void 
*data)
return ret;
 }
 
-static int hyper_dmabuf_rx_ch_setup(struct file *filp, void *data)
+static int hyper_dmabuf_rx_ch_setup_ioctl(struct file *filp, void *data)
 {
struct ioctl_hyper_dmabuf_rx_ch_setup *rx_ch_attr;
struct hyper_dmabuf_backend_ops *ops = hyper_dmabuf_private.backend_ops;
@@ -81,7 +81,7 @@ static int hyper_dmabuf_rx_ch_setup(struct file *filp, void 
*data)
return ret;
 }
 
-static int hyper_dmabuf_export_remote(struct file *filp, void *data)
+static int hyper_dmabuf_export_remote_ioctl(struct file *filp, void *data)
 {
struct ioctl_hyper_dmabuf_export_remote *export_remote_attr;
struct hyper_dmabuf_backend_ops *ops = hyper_dmabuf_private.backend_ops;
@@ -514,7 +514,7 @@ static void hyper_dmabuf_delayed_unexport(struct 
work_struct *work)
 
 /* Schedules unexport of dmabuf.
  */
-static int hyper_dmabuf_unexport(struct file *filp, void *data)
+static int hyper_dmabuf_unexport_ioctl(struct file *filp, void *data)
 {
struct ioctl_hyper_dmabuf_unexport *unexport_attr;
struct hyper_dmabuf_sgt_info *sgt_info;
@@ -554,11 +554,11 @@ static int hyper_dmabuf_unexport(struct file *filp, void 
*data)
return 0;
 }
 
-static int hyper_dmabuf_query(struct file *filp, void *data)
+static int hyper_dmabuf_query_ioctl(struct file *filp, void *data)
 {
struct ioctl_hyper_dmabuf_query *query_attr;
-   struct hyper_dmabuf_sgt_info *sgt_info;
-   struct hyper_dmabuf_imported_sgt_info *imported_sgt_info;
+   struct hyper_dmabuf_sgt_info *sgt_info = NULL;
+   struct hyper_dmabuf_imported_sgt_info *imported_sgt_info = NULL;
int ret = 0;
 
if (!data) {
@@ -568,71 +568,46 @@ static int hyper_dmabuf_query(struct file *filp, void 
*data)
 
query_attr = (struct ioctl_hyper_dmabuf_query *)data;
 
-   sgt_info = hyper_dmabuf_find_exported(query_attr->hid);
-   imported_sgt_info = hyper_dmabuf_find_imported(query_attr->hid);
-
-   /* if dmabuf can't be found in both lists, return */
-   if (!(sgt_info && imported_sgt_info)) {
-   dev_err(hyper_dmabuf_private.device, "can't find entry 
anywhere\n");
-   return -ENOENT;
-   }
-
-   /* not considering the case where a dmabuf is found on both queues
-* in one domain */
-   switch (query_attr->item)
-   {
-   case DMABUF_QUERY_TYPE_LIST:
-   if (sgt_info) {
-   query_attr->info = EXPORTED;
-   } else {
-   query_attr->info = IMPORTED;
-   }
-   brea

[RFC PATCH 36/60] hyper_dmabuf: error handling when share_pages fails

2017-12-19 Thread Dongwon Kim
From: Mateusz Polrola 

When error occurs while sharing pages, all pages already shared
needs to be un-shared and proper error code has to be returned.

Signed-off-by: Dongwon Kim 
---
 drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c  |  6 ++-
 .../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.c| 50 ++
 2 files changed, 55 insertions(+), 1 deletion(-)

diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
index f1581d5..375b664 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
@@ -31,7 +31,7 @@
 #include 
 #include 
 #include 
-#include 
+#include 
 #include 
 #include 
 #include 
@@ -242,6 +242,10 @@ static int hyper_dmabuf_export_remote(struct file *filp, 
void *data)
operands[6] = page_info->last_len;
operands[7] = ops->share_pages (page_info->pages, 
export_remote_attr->remote_domain,
page_info->nents, &sgt_info->refs_info);
+   if (operands[7] < 0) {
+   dev_err(hyper_dmabuf_private.device, "pages sharing failed\n");
+   goto fail_map_req;
+   }
 
/* driver/application specific private info, max 4x4 bytes */
operands[8] = export_remote_attr->private[0];
diff --git a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.c 
b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.c
index 1416a69..908eda8 100644
--- a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.c
+++ b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.c
@@ -109,6 +109,16 @@ int hyper_dmabuf_xen_share_pages(struct page **pages, int 
domid, int nents,
lvl2_table[i] = gnttab_grant_foreign_access(domid,

pfn_to_mfn(page_to_pfn(pages[i])),
true /* read-only 
from remote domain */);
+   if (lvl2_table[i] == -ENOSPC) {
+   dev_err(hyper_dmabuf_private.device, "No more space 
left in grant table\n");
+
+   /* Unshare all already shared pages for lvl2 */
+   while(i--) {
+   gnttab_end_foreign_access_ref(lvl2_table[i], 0);
+   gnttab_free_grant_reference(lvl2_table[i]);
+   }
+   goto err_cleanup;
+   }
}
 
/* Share 2nd level addressing pages in readonly mode*/
@@ -116,6 +126,23 @@ int hyper_dmabuf_xen_share_pages(struct page **pages, int 
domid, int nents,
lvl3_table[i] = gnttab_grant_foreign_access(domid,

virt_to_mfn((unsigned long)lvl2_table+i*PAGE_SIZE ),
true);
+   if (lvl3_table[i] == -ENOSPC) {
+   dev_err(hyper_dmabuf_private.device, "No more space 
left in grant table\n");
+
+   /* Unshare all already shared pages for lvl3 */
+   while(i--) {
+   gnttab_end_foreign_access_ref(lvl3_table[i], 1);
+   gnttab_free_grant_reference(lvl3_table[i]);
+   }
+
+   /* Unshare all pages for lvl2 */
+   while(nents--) {
+   
gnttab_end_foreign_access_ref(lvl2_table[nents], 0);
+   gnttab_free_grant_reference(lvl2_table[nents]);
+   }
+
+   goto err_cleanup;
+   }
}
 
/* Share lvl3_table in readonly mode*/
@@ -123,6 +150,23 @@ int hyper_dmabuf_xen_share_pages(struct page **pages, int 
domid, int nents,
virt_to_mfn((unsigned 
long)lvl3_table),
true);
 
+   if (lvl3_gref == -ENOSPC) {
+   dev_err(hyper_dmabuf_private.device, "No more space left in 
grant table\n");
+
+   /* Unshare all pages for lvl3 */
+   while(i--) {
+   gnttab_end_foreign_access_ref(lvl3_table[i], 1);
+   gnttab_free_grant_reference(lvl3_table[i]);
+   }
+
+   /* Unshare all pages for lvl2 */
+   while(nents--) {
+   gnttab_end_foreign_access_ref(lvl2_table[nents], 0);
+   gnttab_free_grant_reference(lvl2_table[nents]);
+   }
+
+   goto err_cleanup;
+   }
 
/* Store lvl3_table page to be freed later */
sh_pages_info->lvl3_table = lvl3_table;
@@ -136,6 +180,12 @@ int hyper_dmabuf_xen_share_pages(struct page **pages, int 
domid, int nents,
 
dev_dbg(hyper_dmabuf_private.device, "%s exit\n", __func__);
 

[RFC PATCH 23/60] hyper_dmabuf: use CONFIG_HYPER_DMABUF_XEN instead of CONFIG_XEN

2017-12-19 Thread Dongwon Kim
Now, use CONFIG_HYPER_DMABUF_XEN as a configuration option
for building hyper_dmabuf for Xen hypervisor.

Signed-off-by: Dongwon Kim 
---
 drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c
index 4e0ccdd..569b95e 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c
@@ -36,7 +36,7 @@
 #include "hyper_dmabuf_list.h"
 #include "hyper_dmabuf_id.h"
 
-#ifdef CONFIG_XEN
+#ifdef CONFIG_HYPER_DMABUF_XEN
 #include "xen/hyper_dmabuf_xen_drv.h"
 extern struct hyper_dmabuf_backend_ops xen_backend_ops;
 #endif
@@ -61,7 +61,7 @@ static int __init hyper_dmabuf_drv_init(void)
return -EINVAL;
}
 
-#ifdef CONFIG_XEN
+#ifdef CONFIG_HYPER_DMABUF_XEN
hyper_dmabuf_private.backend_ops = &xen_backend_ops;
 #endif
 
-- 
2.7.4

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[RFC PATCH 28/60] hyper_dmabuf: address several synchronization issues

2017-12-19 Thread Dongwon Kim
From: Mateusz Polrola 

This patch addresses several synchronization issues while sharing
DMA_BUF with another VM.

1. Set WAIT_AFTER_SYNC_REQ to false by default to prevent possible
   performance degradation when waing for the response for every
   syncrhonization request to exporter VM.

2. Removed HYPER_DMABUF_OPS_RELEASE_FINAL message - now exporter can
   automatically detect when there are no more consumers of DMA_BUF
   so importer VM doesn't have to send out this message.

3. Renamed HYPER_DMABUF_FIRST_EXPORT into HYPER_DMABUF_EXPORT_FD

4. Introduced HYPER_DMABUF_EXPORT_FD_FAILED message to undo
   HYPER_DMABUF_FIRST_EXPORT in case of any failure while executing
   hyper_dmabuf_export_fd_ioctl

5. Waiting until other VM processes all pending requests when ring
   buffers are all full.

6. Create hyper_dmabuf.h with definitions of driver interface under
   include/uapi/xen/

Signed-off-by: Dongwon Kim 
---
 drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c| 21 ++---
 drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c  | 17 +++-
 drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.h  | 74 +
 drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c| 30 +--
 drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.h|  4 +-
 .../xen/hyper_dmabuf/hyper_dmabuf_remote_sync.c| 24 --
 .../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c   |  5 +-
 include/uapi/xen/hyper_dmabuf.h| 96 ++
 8 files changed, 163 insertions(+), 108 deletions(-)
 create mode 100644 include/uapi/xen/hyper_dmabuf.h

diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c
index a0b3946..5a034ffb 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c
@@ -187,10 +187,7 @@ int hyper_dmabuf_cleanup_sgt_info(struct 
hyper_dmabuf_sgt_info *sgt_info, int fo
 * side.
 */
if (!force &&
-   (!list_empty(&sgt_info->va_kmapped->list) ||
-   !list_empty(&sgt_info->va_vmapped->list) ||
-   !list_empty(&sgt_info->active_sgts->list) ||
-   !list_empty(&sgt_info->active_attached->list))) {
+   sgt_info->importer_exported) {
dev_warn(hyper_dmabuf_private.device, "dma-buf is used by 
importer\n");
return -EPERM;
}
@@ -259,7 +256,7 @@ int hyper_dmabuf_cleanup_sgt_info(struct 
hyper_dmabuf_sgt_info *sgt_info, int fo
return 0;
 }
 
-#define WAIT_AFTER_SYNC_REQ 1
+#define WAIT_AFTER_SYNC_REQ 0
 
 inline int hyper_dmabuf_sync_request(int id, int dmabuf_ops)
 {
@@ -431,17 +428,11 @@ static void hyper_dmabuf_ops_release(struct dma_buf 
*dma_buf)
final_release = sgt_info && !sgt_info->valid &&
!sgt_info->num_importers;
 
-   if (final_release) {
-   ret = hyper_dmabuf_sync_request(sgt_info->hyper_dmabuf_id,
-   HYPER_DMABUF_OPS_RELEASE_FINAL);
-   } else {
-   ret = hyper_dmabuf_sync_request(sgt_info->hyper_dmabuf_id,
-   HYPER_DMABUF_OPS_RELEASE);
-   }
-
+   ret = hyper_dmabuf_sync_request(sgt_info->hyper_dmabuf_id,
+   HYPER_DMABUF_OPS_RELEASE);
if (ret < 0) {
-   dev_err(hyper_dmabuf_private.device,
-   "hyper_dmabuf::%s Error:send dmabuf sync request 
failed\n", __func__);
+   dev_warn(hyper_dmabuf_private.device,
+"hyper_dmabuf::%s Error:send dmabuf sync request 
failed\n", __func__);
}
 
/*
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
index 19ca725..58b115a 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
@@ -35,6 +35,7 @@
 #include 
 #include 
 #include 
+#include 
 #include "hyper_dmabuf_struct.h"
 #include "hyper_dmabuf_ioctl.h"
 #include "hyper_dmabuf_list.h"
@@ -282,12 +283,17 @@ static int hyper_dmabuf_export_fd_ioctl(void *data)
/* send notification for export_fd to exporter */
operand = sgt_info->hyper_dmabuf_id;
 
+   dev_dbg(hyper_dmabuf_private.device, "Exporting fd of buffer %d\n", 
operand);
+
req = kcalloc(1, sizeof(*req), GFP_KERNEL);
-   hyper_dmabuf_create_request(req, HYPER_DMABUF_FIRST_EXPORT, &operand);
+   hyper_dmabuf_create_request(req, HYPER_DMABUF_EXPORT_FD, &operand);
 
ret = ops->send_req(HYPER_DMABUF_DOM_ID(operand), req, true);
 
if (ret < 0) {
+   /* in case of timeout other end eventually will receive 
request, so we need to undo it */
+   hyper_dmabuf_create_request(req, HYPER_DMABUF_EXPORT_FD_FAILED, 
&

[RFC PATCH 32/60] hyper_dmabuf: make all shared pages read-only

2017-12-19 Thread Dongwon Kim
All shared pages need to be read-only from importer's
point of view to prevent the buffer from being corrupted.

This patch may need to be reverted if we find a better
way to protect the original content in this sharing
model.

Signed-off-by: Dongwon Kim 
---
 drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.c | 16 
 1 file changed, 8 insertions(+), 8 deletions(-)

diff --git a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.c 
b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.c
index c6a2993..1416a69 100644
--- a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.c
+++ b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.c
@@ -104,24 +104,24 @@ int hyper_dmabuf_xen_share_pages(struct page **pages, int 
domid, int nents,
 
*refs_info = (void *)sh_pages_info;
 
-   /* share data pages in rw mode*/
+   /* share data pages in readonly mode for security */
for (i=0; ihttps://lists.freedesktop.org/mailman/listinfo/dri-devel


[RFC PATCH 34/60] hyper_dmabuf: extend DMA bitmask to 64-bits.

2017-12-19 Thread Dongwon Kim
From: Mateusz Polrola 

Extending DMA bitmask of hyper_dmabuf device to cover whole
address space driver may access.

Signed-off-by: Dongwon Kim 
---
 drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
index 476c0d7..f7d98c1 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
@@ -723,7 +723,7 @@ int register_device(void)
hyper_dmabuf_private.device = hyper_dmabuf_miscdev.this_device;
 
/* TODO: Check if there is a different way to initialize dma mask 
nicely */
-   dma_coerce_mask_and_coherent(hyper_dmabuf_private.device, 0x);
+   dma_coerce_mask_and_coherent(hyper_dmabuf_private.device, 
DMA_BIT_MASK(64));
 
return ret;
 }
-- 
2.7.4

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[RFC PATCH 30/60] hyper_dmabuf: free already mapped pages when error happens

2017-12-19 Thread Dongwon Kim
From: Mateusz Polrola 

It is needed to freeing already-mapped pages if it gets error
before finishing mapping all pages.

Signed-off-by: Dongwon Kim 
---
 .../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.c| 43 +++---
 1 file changed, 38 insertions(+), 5 deletions(-)

diff --git a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.c 
b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.c
index c03e5a0..524f75c 100644
--- a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.c
+++ b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.c
@@ -255,7 +255,7 @@ struct page ** hyper_dmabuf_xen_map_shared_pages(int 
lvl3_gref, int domid, int n
if (lvl3_map_ops.status) {
dev_err(hyper_dmabuf_private.device, "HYPERVISOR map grant ref 
failed status = %d",
lvl3_map_ops.status);
-   return NULL;
+   goto error_cleanup_lvl3;
} else {
lvl3_unmap_ops.handle = lvl3_map_ops.handle;
}
@@ -263,7 +263,7 @@ struct page ** hyper_dmabuf_xen_map_shared_pages(int 
lvl3_gref, int domid, int n
/* Map all second level pages */
if (gnttab_alloc_pages(n_lvl2_grefs, lvl2_table_pages)) {
dev_err(hyper_dmabuf_private.device, "Cannot allocate pages\n");
-   return NULL;
+   goto error_cleanup_lvl3;
}
 
for (i = 0; i < n_lvl2_grefs; i++) {
@@ -277,6 +277,9 @@ struct page ** hyper_dmabuf_xen_map_shared_pages(int 
lvl3_gref, int domid, int n
if (gnttab_unmap_refs(&lvl3_unmap_ops, NULL, &lvl3_table_page, 1)) {
dev_err(hyper_dmabuf_private.device, "xen: cannot unmap top 
level page\n");
return NULL;
+   } else {
+   /* Mark that page was unmapped */
+   lvl3_unmap_ops.handle = -1;
}
 
if (gnttab_map_refs(lvl2_map_ops, NULL, lvl2_table_pages, 
n_lvl2_grefs)) {
@@ -290,7 +293,7 @@ struct page ** hyper_dmabuf_xen_map_shared_pages(int 
lvl3_gref, int domid, int n
dev_err(hyper_dmabuf_private.device,
"HYPERVISOR map grant ref failed status = %d",
lvl2_map_ops[i].status);
-   return NULL;
+   goto error_cleanup_lvl2;
} else {
lvl2_unmap_ops[i].handle = lvl2_map_ops[i].handle;
}
@@ -298,7 +301,7 @@ struct page ** hyper_dmabuf_xen_map_shared_pages(int 
lvl3_gref, int domid, int n
 
if (gnttab_alloc_pages(nents, data_pages)) {
dev_err(hyper_dmabuf_private.device, "Cannot allocate pages\n");
-   return NULL;
+   goto error_cleanup_lvl2;
}
 
k = 0;
@@ -343,6 +346,11 @@ struct page ** hyper_dmabuf_xen_map_shared_pages(int 
lvl3_gref, int domid, int n
  n_lvl2_grefs)) {
dev_err(hyper_dmabuf_private.device, "Cannot unmap 2nd level 
refs\n");
return NULL;
+   } else {
+   /* Mark that pages were unmapped */
+   for (i = 0; i < n_lvl2_grefs; i++) {
+   lvl2_unmap_ops[i].handle = -1;
+   }
}
 
for (i = 0; i < nents; i++) {
@@ -350,7 +358,7 @@ struct page ** hyper_dmabuf_xen_map_shared_pages(int 
lvl3_gref, int domid, int n
dev_err(hyper_dmabuf_private.device,
"HYPERVISOR map grant ref failed status = %d\n",
data_map_ops[i].status);
-   return NULL;
+   goto error_cleanup_data;
} else {
data_unmap_ops[i].handle = data_map_ops[i].handle;
}
@@ -369,6 +377,31 @@ struct page ** hyper_dmabuf_xen_map_shared_pages(int 
lvl3_gref, int domid, int n
 
dev_dbg(hyper_dmabuf_private.device, "%s exit\n", __func__);
return data_pages;
+
+error_cleanup_data:
+   gnttab_unmap_refs(data_unmap_ops, NULL, data_pages,
+ nents);
+
+   gnttab_free_pages(nents, data_pages);
+
+error_cleanup_lvl2:
+   if (lvl2_unmap_ops[0].handle != -1)
+   gnttab_unmap_refs(lvl2_unmap_ops, NULL, lvl2_table_pages,
+ n_lvl2_grefs);
+   gnttab_free_pages(n_lvl2_grefs, lvl2_table_pages);
+
+error_cleanup_lvl3:
+   if (lvl3_unmap_ops.handle != -1)
+   gnttab_unmap_refs(&lvl3_unmap_ops, NULL, &lvl3_table_page, 1);
+   gnttab_free_pages(1, &lvl3_table_page);
+
+   kfree(lvl2_table_pages);
+   kfree(lvl2_map_ops);
+   kfree(lvl2_unmap_ops);
+   kfree(data_map_ops);
+
+
+   return NULL;
 }
 
 int hyper_dmabuf_xen_unmap_shared_pages(void **refs_info, int nents) {
-- 
2.7.4

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[RFC PATCH 43/60] hyper_dmabuf: fixes on memory leaks in various places

2017-12-19 Thread Dongwon Kim
Make sure to free buffers before returning to prevent memory leaks

Signed-off-by: Dongwon Kim 
---
 drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c  | 19 +++-
 drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c|  9 +++-
 drivers/xen/hyper_dmabuf/hyper_dmabuf_ops.c|  6 ++-
 drivers/xen/hyper_dmabuf/hyper_dmabuf_sgl_proc.c   |  4 +-
 .../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c   | 52 +++---
 5 files changed, 78 insertions(+), 12 deletions(-)

diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
index 283fe5a..3215003 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
@@ -282,6 +282,7 @@ static int hyper_dmabuf_export_remote_ioctl(struct file 
*filp, void *data)
 
/* free msg */
kfree(req);
+
/* free page_info */
kfree(page_info->pages);
kfree(page_info);
@@ -298,6 +299,10 @@ static int hyper_dmabuf_export_remote_ioctl(struct file 
*filp, void *data)
 fail_map_req:
hyper_dmabuf_remove_exported(sgt_info->hid);
 
+   /* free page_info */
+   kfree(page_info->pages);
+   kfree(page_info);
+
 fail_export:
kfree(sgt_info->va_vmapped);
 
@@ -433,6 +438,13 @@ static int hyper_dmabuf_export_fd_ioctl(struct file *filp, 
void *data)
 
sgt_info->num_importers--;
req = kcalloc(1, sizeof(*req), GFP_KERNEL);
+
+   if (!req) {
+   dev_err(hyper_dmabuf_private.device,
+   "No more space left\n");
+   return -ENOMEM;
+   }
+
hyper_dmabuf_create_request(req, 
HYPER_DMABUF_EXPORT_FD_FAILED, &operands[0]);
ops->send_req(HYPER_DMABUF_DOM_ID(sgt_info->hid), req, 
false);
kfree(req);
@@ -681,16 +693,19 @@ long hyper_dmabuf_ioctl(struct file *filp,
 
if (copy_from_user(kdata, (void __user *)param, _IOC_SIZE(cmd)) != 0) {
dev_err(hyper_dmabuf_private.device, "failed to copy from user 
arguments\n");
-   return -EFAULT;
+   ret = -EFAULT;
+   goto ioctl_error;
}
 
ret = func(filp, kdata);
 
if (copy_to_user((void __user *)param, kdata, _IOC_SIZE(cmd)) != 0) {
dev_err(hyper_dmabuf_private.device, "failed to copy to user 
arguments\n");
-   return -EFAULT;
+   ret = -EFAULT;
+   goto ioctl_error;
}
 
+ioctl_error:
kfree(kdata);
 
return ret;
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c
index c516df8..46cf9a4 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c
@@ -191,8 +191,7 @@ int hyper_dmabuf_msg_parse(int domid, struct 
hyper_dmabuf_req *req)
struct hyper_dmabuf_req *temp_req;
struct hyper_dmabuf_imported_sgt_info *sgt_info;
struct hyper_dmabuf_sgt_info *exp_sgt_info;
-   hyper_dmabuf_id_t hid = {req->operands[0], /* hid.id */
-  {req->operands[1], req->operands[2], 
req->operands[3]}}; /* hid.rng_key */
+   hyper_dmabuf_id_t hid;
int ret;
 
if (!req) {
@@ -200,6 +199,11 @@ int hyper_dmabuf_msg_parse(int domid, struct 
hyper_dmabuf_req *req)
return -EINVAL;
}
 
+   hid.id = req->operands[0];
+   hid.rng_key[0] = req->operands[1];
+   hid.rng_key[1] = req->operands[2];
+   hid.rng_key[2] = req->operands[3];
+
if ((req->command < HYPER_DMABUF_EXPORT) ||
(req->command > HYPER_DMABUF_OPS_TO_SOURCE)) {
dev_err(hyper_dmabuf_private.device, "invalid command\n");
@@ -332,6 +336,7 @@ int hyper_dmabuf_msg_parse(int domid, struct 
hyper_dmabuf_req *req)
if (!proc) {
dev_err(hyper_dmabuf_private.device,
"No memory left to be allocated\n");
+   kfree(temp_req);
return -ENOMEM;
}
 
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ops.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ops.c
index 81cb09f..9313c42 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ops.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ops.c
@@ -148,9 +148,8 @@ static struct sg_table* hyper_dmabuf_ops_map(struct 
dma_buf_attachment *attachme
if (!st)
goto err_free_sg;
 
-if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
+if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir))
 goto err_free_sg;
-}
 
ret = hyper_dmabuf_sync_request(sgt_info->hid,

[RFC PATCH 38/60] hyper_dmabuf: preventing self exporting of dma_buf

2017-12-19 Thread Dongwon Kim
Adding ID check to make sure a dma-buf is exported externally
since hyper_dmabuf only allows to export a dmabuf to a different
VM.

Signed-off-by: Dongwon Kim 
---
 drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c | 6 ++
 1 file changed, 6 insertions(+)

diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
index 12f7ce4..b77b156 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
@@ -103,6 +103,12 @@ static int hyper_dmabuf_export_remote_ioctl(struct file 
*filp, void *data)
 
export_remote_attr = (struct ioctl_hyper_dmabuf_export_remote *)data;
 
+   if (hyper_dmabuf_private.domid == export_remote_attr->remote_domain) {
+   dev_err(hyper_dmabuf_private.device,
+   "exporting to the same VM is not permitted\n");
+   return -EINVAL;
+   }
+
dma_buf = dma_buf_get(export_remote_attr->dmabuf_fd);
 
if (IS_ERR(dma_buf)) {
-- 
2.7.4

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[RFC PATCH 50/60] hyper_dmabuf: fix styling err and warns caught by checkpatch.pl

2017-12-19 Thread Dongwon Kim
Fixing all styling problems caught by checkpatch.pl

Signed-off-by: Dongwon Kim 
---
 drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c|  53 ++--
 drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h|   6 +-
 drivers/xen/hyper_dmabuf/hyper_dmabuf_event.c  |  12 +-
 drivers/xen/hyper_dmabuf/hyper_dmabuf_id.c |  24 +-
 drivers/xen/hyper_dmabuf/hyper_dmabuf_id.h |   4 +-
 drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c  | 308 +++--
 drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.h  |   5 +-
 drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c| 132 -
 drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.h|   4 +-
 drivers/xen/hyper_dmabuf/hyper_dmabuf_ops.c|  58 ++--
 drivers/xen/hyper_dmabuf/hyper_dmabuf_query.c  | 236 
 .../xen/hyper_dmabuf/hyper_dmabuf_remote_sync.c|  81 +++---
 drivers/xen/hyper_dmabuf/hyper_dmabuf_sgl_proc.c   |  15 +-
 drivers/xen/hyper_dmabuf/hyper_dmabuf_sgl_proc.h   |   2 +-
 drivers/xen/hyper_dmabuf/hyper_dmabuf_struct.h |  78 --
 .../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c   | 154 +--
 .../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.h   |  21 +-
 .../hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.c  |  21 +-
 .../hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.h  |  16 +-
 .../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_drv.h|  19 +-
 .../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.c| 128 +
 .../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.h|  15 +-
 include/uapi/xen/hyper_dmabuf.h|  26 +-
 23 files changed, 739 insertions(+), 679 deletions(-)

diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c
index 525ee78..023d7f4 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c
@@ -44,7 +44,6 @@
 
 #ifdef CONFIG_HYPER_DMABUF_XEN
 #include "xen/hyper_dmabuf_xen_drv.h"
-extern struct hyper_dmabuf_backend_ops xen_backend_ops;
 #endif
 
 MODULE_LICENSE("GPL and additional rights");
@@ -52,14 +51,11 @@ MODULE_AUTHOR("Intel Corporation");
 
 struct hyper_dmabuf_private *hy_drv_priv;
 
-long hyper_dmabuf_ioctl(struct file *filp,
-   unsigned int cmd, unsigned long param);
-
-static void hyper_dmabuf_force_free(struct exported_sgt_info* exported,
-   void *attr)
+static void hyper_dmabuf_force_free(struct exported_sgt_info *exported,
+   void *attr)
 {
struct ioctl_hyper_dmabuf_unexport unexport_attr;
-   struct file *filp = (struct file*) attr;
+   struct file *filp = (struct file *)attr;
 
if (!filp || !exported)
return;
@@ -97,7 +93,8 @@ int hyper_dmabuf_release(struct inode *inode, struct file 
*filp)
 
 #ifdef CONFIG_HYPER_DMABUF_EVENT_GEN
 
-unsigned int hyper_dmabuf_event_poll(struct file *filp, struct 
poll_table_struct *wait)
+unsigned int hyper_dmabuf_event_poll(struct file *filp,
+struct poll_table_struct *wait)
 {
unsigned int mask = 0;
 
@@ -153,15 +150,17 @@ ssize_t hyper_dmabuf_event_read(struct file *filp, char 
__user *buffer,
 
mutex_unlock(&hy_drv_priv->event_read_lock);
ret = wait_event_interruptible(hy_drv_priv->event_wait,
-  
!list_empty(&hy_drv_priv->event_list));
+ !list_empty(&hy_drv_priv->event_list));
 
if (ret == 0)
-   ret = 
mutex_lock_interruptible(&hy_drv_priv->event_read_lock);
+   ret = mutex_lock_interruptible(
+   &hy_drv_priv->event_read_lock);
 
if (ret)
return ret;
} else {
-   unsigned length = (sizeof(struct 
hyper_dmabuf_event_hdr) + e->event_data.hdr.size);
+   unsigned int length = (sizeof(e->event_data.hdr) +
+ e->event_data.hdr.size);
 
if (length > count - ret) {
 put_back_event:
@@ -172,20 +171,22 @@ ssize_t hyper_dmabuf_event_read(struct file *filp, char 
__user *buffer,
}
 
if (copy_to_user(buffer + ret, &e->event_data.hdr,
-sizeof(struct 
hyper_dmabuf_event_hdr))) {
+sizeof(e->event_data.hdr))) {
if (ret == 0)
ret = -EFAULT;
 
goto put_back_event;
}
 
-   ret += sizeof(struct hyper_dmabuf_event_hdr);
+   ret += sizeof(e->event_data.hdr);
 
-

[RFC PATCH 44/60] hyper_dmabuf: proper handling of sgt_info->priv

2017-12-19 Thread Dongwon Kim
sgt_info->priv will be used to store user private info passed in
ioctl. Data in sgt_info->priv is transfered via comm channel to
the importer VM whenever DMA_BUF is exported to keep the private
data synchroized across VMs.

This patch also adds hyper_dmabuf_send_export_msg that replaces
some of export_remote_ioctl to make it more readable and
compact.

Signed-off-by: Dongwon Kim 
---
 drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c  | 110 ++---
 drivers/xen/hyper_dmabuf/hyper_dmabuf_struct.h |   6 +-
 2 files changed, 65 insertions(+), 51 deletions(-)

diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
index 3215003..dfdb889 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
@@ -82,17 +82,64 @@ static int hyper_dmabuf_rx_ch_setup_ioctl(struct file 
*filp, void *data)
return ret;
 }
 
+static int hyper_dmabuf_send_export_msg(struct hyper_dmabuf_sgt_info *sgt_info,
+   struct hyper_dmabuf_pages_info 
*page_info)
+{
+   struct hyper_dmabuf_backend_ops *ops = hyper_dmabuf_private.backend_ops;
+   struct hyper_dmabuf_req *req;
+   int operands[12] = {0};
+   int ret, i;
+
+   /* now create request for importer via ring */
+   operands[0] = sgt_info->hid.id;
+
+   for (i=0; i<3; i++)
+   operands[i+1] = sgt_info->hid.rng_key[i];
+
+   if (page_info) {
+   operands[4] = page_info->nents;
+   operands[5] = page_info->frst_ofst;
+   operands[6] = page_info->last_len;
+   operands[7] = ops->share_pages (page_info->pages, 
sgt_info->hyper_dmabuf_rdomain,
+   page_info->nents, 
&sgt_info->refs_info);
+   if (operands[7] < 0) {
+   dev_err(hyper_dmabuf_private.device, "pages sharing 
failed\n");
+   return -1;
+   }
+   }
+
+   /* driver/application specific private info, max 4x4 bytes */
+   memcpy(&operands[8], &sgt_info->priv[0], sizeof(unsigned int) * 4);
+
+   req = kcalloc(1, sizeof(*req), GFP_KERNEL);
+
+   if(!req) {
+   dev_err(hyper_dmabuf_private.device, "no more space left\n");
+   return -1;
+   }
+
+   /* composing a message to the importer */
+   hyper_dmabuf_create_request(req, HYPER_DMABUF_EXPORT, &operands[0]);
+
+   ret = ops->send_req(sgt_info->hyper_dmabuf_rdomain, req, false);
+
+   if(ret) {
+   dev_err(hyper_dmabuf_private.device, "error while 
communicating\n");
+   }
+
+   kfree(req);
+
+   return ret;
+}
+
 static int hyper_dmabuf_export_remote_ioctl(struct file *filp, void *data)
 {
struct ioctl_hyper_dmabuf_export_remote *export_remote_attr;
-   struct hyper_dmabuf_backend_ops *ops = hyper_dmabuf_private.backend_ops;
struct dma_buf *dma_buf;
struct dma_buf_attachment *attachment;
struct sg_table *sgt;
struct hyper_dmabuf_pages_info *page_info;
struct hyper_dmabuf_sgt_info *sgt_info;
-   struct hyper_dmabuf_req *req;
-   int operands[MAX_NUMBER_OF_OPERANDS];
hyper_dmabuf_id_t hid;
int i;
int ret = 0;
@@ -138,6 +185,13 @@ static int hyper_dmabuf_export_remote_ioctl(struct file 
*filp, void *data)
}
sgt_info->unexport_scheduled = 0;
}
+
+   /* update private data in sgt_info with new 
ones */
+   memcpy(&sgt_info->priv[0], 
&export_remote_attr->priv[0], sizeof(unsigned int) * 4);
+
+   /* TODO: need to send this private info to the 
importer so that those
+* on importer's side are also updated */
+
dma_buf_put(dma_buf);
export_remote_attr->hid = hid;
return 0;
@@ -225,6 +279,9 @@ static int hyper_dmabuf_export_remote_ioctl(struct file 
*filp, void *data)
INIT_LIST_HEAD(&sgt_info->va_kmapped->list);
INIT_LIST_HEAD(&sgt_info->va_vmapped->list);
 
+   /* copy private data to sgt_info */
+   memcpy(&sgt_info->priv[0], &export_remote_attr->priv[0], 
sizeof(unsigned int) * 4);
+
page_info = hyper_dmabuf_ext_pgs(sgt);
if (!page_info) {
dev_err(hyper_dmabuf_private.device, "failed to construct 
page_info\n");
@@ -236,53 +293,15 @@ static int hyper_dmabuf_export_remote_ioctl(struct file 
*filp, void *data)
/* now register it to export list */
hyper_dmabuf_register_exported(sgt_info);
 
-   page_info->

[RFC PATCH 42/60] hyper_dmabuf: always generate a new random keys

2017-12-19 Thread Dongwon Kim
From: Mateusz Polrola 

Need to update random keys when reusing hyper_dmabuf_id
in the list to increase security

Signed-off-by: Dongwon Kim 
---
 drivers/xen/hyper_dmabuf/hyper_dmabuf_id.c | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_id.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_id.c
index f59dee3..cccdc19 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_id.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_id.c
@@ -120,10 +120,11 @@ hyper_dmabuf_id_t hyper_dmabuf_get_hid(void)
 */
if (hid.id == -1 && count < HYPER_DMABUF_ID_MAX) {
hid.id = HYPER_DMABUF_ID_CREATE(hyper_dmabuf_private.domid, 
count++);
-   /* random data embedded in the id for security */
-   get_random_bytes(&hid.rng_key[0], 12);
}
 
+   /* random data embedded in the id for security */
+   get_random_bytes(&hid.rng_key[0], 12);
+
return hid;
 }
 
-- 
2.7.4

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[RFC PATCH 52/60] hyper_dmabuf: remove prefix 'hyper_dmabuf' from static func and backend APIs

2017-12-19 Thread Dongwon Kim
Removed prefix "hyper_dmabuf" from backend functions and static func
(except for driver APIs) and add 'be' after 'xen' in backend function
calls to show those are backend APIs.

Also, modified some of function names for clarification and  addressed
some missing errors and warnings in hyper_dmabuf_list.c and
hyper_dmabuf_list.h.

Signed-off-by: Dongwon Kim 
---
 drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c|   9 +-
 drivers/xen/hyper_dmabuf/hyper_dmabuf_event.c  |   6 +-
 drivers/xen/hyper_dmabuf/hyper_dmabuf_id.c |   9 +-
 drivers/xen/hyper_dmabuf/hyper_dmabuf_id.h |   8 +-
 drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c  |  23 ++---
 drivers/xen/hyper_dmabuf/hyper_dmabuf_list.c   | 113 +++--
 drivers/xen/hyper_dmabuf/hyper_dmabuf_list.h   |  20 ++--
 drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c|   4 +-
 drivers/xen/hyper_dmabuf/hyper_dmabuf_ops.c|  54 +-
 .../xen/hyper_dmabuf/hyper_dmabuf_remote_sync.c|   4 +-
 drivers/xen/hyper_dmabuf/hyper_dmabuf_sgl_proc.c   |  20 +---
 drivers/xen/hyper_dmabuf/hyper_dmabuf_sgl_proc.h   |   2 -
 .../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c   |  45 
 .../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.h   |  20 ++--
 .../hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.c  |   1 -
 .../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_drv.c|  26 ++---
 .../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.c|  14 ++-
 .../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.h|  14 +--
 18 files changed, 179 insertions(+), 213 deletions(-)

diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c
index 76f57c2..387cc63 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c
@@ -37,7 +37,6 @@
 #include 
 #include "hyper_dmabuf_drv.h"
 #include "hyper_dmabuf_ioctl.h"
-#include "hyper_dmabuf_msg.h"
 #include "hyper_dmabuf_list.h"
 #include "hyper_dmabuf_id.h"
 #include "hyper_dmabuf_event.h"
@@ -51,8 +50,8 @@ MODULE_AUTHOR("Intel Corporation");
 
 struct hyper_dmabuf_private *hy_drv_priv;
 
-static void hyper_dmabuf_force_free(struct exported_sgt_info *exported,
-   void *attr)
+static void force_free(struct exported_sgt_info *exported,
+  void *attr)
 {
struct ioctl_hyper_dmabuf_unexport unexport_attr;
struct file *filp = (struct file *)attr;
@@ -86,7 +85,7 @@ static int hyper_dmabuf_open(struct inode *inode, struct file 
*filp)
 
 static int hyper_dmabuf_release(struct inode *inode, struct file *filp)
 {
-   hyper_dmabuf_foreach_exported(hyper_dmabuf_force_free, filp);
+   hyper_dmabuf_foreach_exported(force_free, filp);
 
return 0;
 }
@@ -369,7 +368,7 @@ static void hyper_dmabuf_drv_exit(void)
 
/* destroy id_queue */
if (hy_drv_priv->id_queue)
-   destroy_reusable_list();
+   hyper_dmabuf_free_hid_list();
 
 #ifdef CONFIG_HYPER_DMABUF_EVENT_GEN
/* clean up event queue */
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_event.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_event.c
index ae8cb43..392ea99 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_event.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_event.c
@@ -28,16 +28,14 @@
 
 #include 
 #include 
-#include 
 #include 
 #include 
-#include 
 #include "hyper_dmabuf_drv.h"
 #include "hyper_dmabuf_struct.h"
 #include "hyper_dmabuf_list.h"
 #include "hyper_dmabuf_event.h"
 
-static void hyper_dmabuf_send_event(struct hyper_dmabuf_event *e)
+static void send_event(struct hyper_dmabuf_event *e)
 {
struct hyper_dmabuf_event *oldest;
unsigned long irqflags;
@@ -110,7 +108,7 @@ int hyper_dmabuf_import_event(hyper_dmabuf_id_t hid)
e->event_data.data = (void *)imported->priv;
e->event_data.hdr.size = imported->sz_priv;
 
-   hyper_dmabuf_send_event(e);
+   send_event(e);
 
dev_dbg(hy_drv_priv->dev,
"event number = %d :", hy_drv_priv->pending);
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_id.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_id.c
index 312dea5..e67b84a 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_id.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_id.c
@@ -31,9 +31,8 @@
 #include 
 #include "hyper_dmabuf_drv.h"
 #include "hyper_dmabuf_id.h"
-#include "hyper_dmabuf_msg.h"
 
-void store_reusable_hid(hyper_dmabuf_id_t hid)
+void hyper_dmabuf_store_hid(hyper_dmabuf_id_t hid)
 {
struct list_reusable_id *reusable_head = hy_drv_priv->id_queue;
struct list_reusable_id *new_reusable;
@@ -48,7 +47,7 @@ void store_reusable_hid(hyper_dmabuf_id_t hid)
list_add(&new_reusable->list, &reusable_head->list);
 }
 
-static hyper_dmabuf_id_

[RFC PATCH 41/60] hyper_dmabuf: re-organize driver source

2017-12-19 Thread Dongwon Kim
Re-orginized source code for more intuitive structure

For this,

1. driver's file operations other than ioctls have been moved to
hyper_dmabuf_drv.c.

2. Separated out dma-buf operations from hyper_dmabuf_ops.c
and put those in a new file, 'hyper_dmabuf_ops.c'. Remaining part
(SGT core management) is also put in the a new file,
'hyper_dmabuf_sgt_proc.c'. hyper_dmabuf_imp.c and hyper_dmabuf_imp.h
are removed as a result.

3. Header files and Makefile are also updated accordingly.

Signed-off-by: Dongwon Kim 
---
 drivers/xen/hyper_dmabuf/Makefile  |   3 +-
 drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c|  95 ++-
 drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c| 682 -
 drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.h|  48 --
 drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c  | 136 +---
 drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c|   1 -
 drivers/xen/hyper_dmabuf/hyper_dmabuf_ops.c| 471 ++
 drivers/xen/hyper_dmabuf/hyper_dmabuf_ops.h|  32 +
 .../xen/hyper_dmabuf/hyper_dmabuf_remote_sync.c|   2 +-
 drivers/xen/hyper_dmabuf/hyper_dmabuf_sgl_proc.c   | 258 
 drivers/xen/hyper_dmabuf/hyper_dmabuf_sgl_proc.h   |  41 ++
 11 files changed, 920 insertions(+), 849 deletions(-)
 delete mode 100644 drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c
 delete mode 100644 drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.h
 create mode 100644 drivers/xen/hyper_dmabuf/hyper_dmabuf_ops.c
 create mode 100644 drivers/xen/hyper_dmabuf/hyper_dmabuf_ops.h
 create mode 100644 drivers/xen/hyper_dmabuf/hyper_dmabuf_sgl_proc.c
 create mode 100644 drivers/xen/hyper_dmabuf/hyper_dmabuf_sgl_proc.h

diff --git a/drivers/xen/hyper_dmabuf/Makefile 
b/drivers/xen/hyper_dmabuf/Makefile
index 8865f50..5040b9f 100644
--- a/drivers/xen/hyper_dmabuf/Makefile
+++ b/drivers/xen/hyper_dmabuf/Makefile
@@ -7,7 +7,8 @@ ifneq ($(KERNELRELEASE),)
$(TARGET_MODULE)-objs := hyper_dmabuf_drv.o \
  hyper_dmabuf_ioctl.o \
  hyper_dmabuf_list.o \
-hyper_dmabuf_imp.o \
+hyper_dmabuf_sgl_proc.o \
+hyper_dmabuf_ops.o \
 hyper_dmabuf_msg.o \
 hyper_dmabuf_id.o \
 hyper_dmabuf_remote_sync.o \
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c
index c802c3e..8c488d7 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c
@@ -28,10 +28,13 @@
 
 #include 
 #include 
+#include 
 #include 
 #include 
+#include 
 #include "hyper_dmabuf_drv.h"
 #include "hyper_dmabuf_conf.h"
+#include "hyper_dmabuf_ioctl.h"
 #include "hyper_dmabuf_msg.h"
 #include "hyper_dmabuf_list.h"
 #include "hyper_dmabuf_id.h"
@@ -44,12 +47,94 @@ extern struct hyper_dmabuf_backend_ops xen_backend_ops;
 MODULE_LICENSE("GPL and additional rights");
 MODULE_AUTHOR("Intel Corporation");
 
-int register_device(void);
-int unregister_device(void);
-
 struct hyper_dmabuf_private hyper_dmabuf_private;
 
-/*===*/
+long hyper_dmabuf_ioctl(struct file *filp,
+   unsigned int cmd, unsigned long param);
+
+void hyper_dmabuf_emergency_release(struct hyper_dmabuf_sgt_info* sgt_info,
+   void *attr);
+
+int hyper_dmabuf_open(struct inode *inode, struct file *filp)
+{
+   int ret = 0;
+
+   /* Do not allow exclusive open */
+   if (filp->f_flags & O_EXCL)
+   return -EBUSY;
+
+   /*
+* Initialize backend if neededm,
+* use mutex to prevent race conditions when
+* two userspace apps will open device at the same time
+*/
+   mutex_lock(&hyper_dmabuf_private.lock);
+
+   if (!hyper_dmabuf_private.backend_initialized) {
+   hyper_dmabuf_private.domid = 
hyper_dmabuf_private.backend_ops->get_vm_id();
+
+   ret = hyper_dmabuf_private.backend_ops->init_comm_env();
+   if (ret < 0) {
+   dev_err(hyper_dmabuf_private.device,
+   "failed to initiailize hypervisor-specific comm 
env\n");
+   } else {
+   hyper_dmabuf_private.backend_initialized = true;
+   }
+   }
+
+   mutex_unlock(&hyper_dmabuf_private.lock);
+
+   return ret;
+}
+
+int hyper_dmabuf_release(struct inode *inode, struct file *filp)
+{
+   hyper_dmabuf_foreach_exported(hyper_dmabuf_emergency_release, filp);
+
+   return 0;
+}
+
+static struct file_operations hyper_dmabuf_driver_fops =
+{
+   .owner = THIS_MODULE,
+  

[RFC PATCH 56/60] hyper_dmabuf: add initialization and cleanup to bknd_ops

2017-12-19 Thread Dongwon Kim
From: Mateusz Polrola 

Introduced additional init and cleanup routines in the backend
API structure that might be useful for hypervisors other than Xen.

Signed-off-by: Mateusz Polrola 
Signed-off-by: Dongwon Kim 
---
 drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c | 14 ++
 drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h |  6 ++
 drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_drv.c |  2 ++
 3 files changed, 22 insertions(+)

diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c
index 161fee7..f2731bf 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c
@@ -330,6 +330,16 @@ static int __init hyper_dmabuf_drv_init(void)
hy_drv_priv->pending = 0;
 #endif
 
+   if (hy_drv_priv->bknd_ops->init) {
+   ret = hy_drv_priv->bknd_ops->init();
+
+   if (ret < 0) {
+   dev_dbg(hy_drv_priv->dev,
+   "failed to initialize backend.\n");
+   return ret;
+   }
+   }
+
hy_drv_priv->domid = hy_drv_priv->bknd_ops->get_vm_id();
 
ret = hy_drv_priv->bknd_ops->init_comm_env();
@@ -362,6 +372,10 @@ static void hyper_dmabuf_drv_exit(void)
 
hy_drv_priv->bknd_ops->destroy_comm();
 
+   if (hy_drv_priv->bknd_ops->cleanup) {
+   hy_drv_priv->bknd_ops->cleanup();
+   };
+
/* destroy workqueue */
if (hy_drv_priv->work_queue)
destroy_workqueue(hy_drv_priv->work_queue);
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h
index 4a51f9e..9337d53 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h
@@ -73,6 +73,12 @@ struct list_reusable_id {
 };
 
 struct hyper_dmabuf_bknd_ops {
+   /* backend initialization routine (optional) */
+   int (*init)(void);
+
+   /* backend cleanup routine (optional) */
+   int (*cleanup)(void);
+
/* retreiving id of current virtual machine */
int (*get_vm_id)(void);
 
diff --git a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_drv.c 
b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_drv.c
index 1d7249d..14ed3bc 100644
--- a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_drv.c
+++ b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_drv.c
@@ -31,6 +31,8 @@
 #include "hyper_dmabuf_xen_shm.h"
 
 struct hyper_dmabuf_bknd_ops xen_bknd_ops = {
+   .init = NULL, /* not needed for xen */
+   .cleanup = NULL, /* not needed for xen */
.get_vm_id = xen_be_get_domid,
.share_pages = xen_be_share_pages,
.unshare_pages = xen_be_unshare_pages,
-- 
2.7.4

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


  1   2   >