Module Name:    src
Committed By:   riastradh
Date:           Sun Dec 19 12:01:40 UTC 2021

Modified Files:
        src/sys/external/bsd/drm2/dist/drm/amd/amdgpu: amdgpu_dma_buf.c
        src/sys/external/bsd/drm2/include/linux: dma-buf.h dma-fence-array.h
        src/sys/external/bsd/drm2/linux: linux_dma_buf.c

Log Message:
amdgpu: amdgpu_dma_buf.c


To generate a diff of this commit:
cvs rdiff -u -r1.2 -r1.3 \
    src/sys/external/bsd/drm2/dist/drm/amd/amdgpu/amdgpu_dma_buf.c
cvs rdiff -u -r1.11 -r1.12 src/sys/external/bsd/drm2/include/linux/dma-buf.h
cvs rdiff -u -r1.4 -r1.5 \
    src/sys/external/bsd/drm2/include/linux/dma-fence-array.h
cvs rdiff -u -r1.12 -r1.13 src/sys/external/bsd/drm2/linux/linux_dma_buf.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/external/bsd/drm2/dist/drm/amd/amdgpu/amdgpu_dma_buf.c
diff -u src/sys/external/bsd/drm2/dist/drm/amd/amdgpu/amdgpu_dma_buf.c:1.2 src/sys/external/bsd/drm2/dist/drm/amd/amdgpu/amdgpu_dma_buf.c:1.3
--- src/sys/external/bsd/drm2/dist/drm/amd/amdgpu/amdgpu_dma_buf.c:1.2	Sat Dec 18 23:44:58 2021
+++ src/sys/external/bsd/drm2/dist/drm/amd/amdgpu/amdgpu_dma_buf.c	Sun Dec 19 12:01:40 2021
@@ -1,4 +1,4 @@
-/*	$NetBSD: amdgpu_dma_buf.c,v 1.2 2021/12/18 23:44:58 riastradh Exp $	*/
+/*	$NetBSD: amdgpu_dma_buf.c,v 1.3 2021/12/19 12:01:40 riastradh Exp $	*/
 
 /*
  * Copyright 2019 Advanced Micro Devices, Inc.
@@ -34,7 +34,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: amdgpu_dma_buf.c,v 1.2 2021/12/18 23:44:58 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: amdgpu_dma_buf.c,v 1.3 2021/12/19 12:01:40 riastradh Exp $");
 
 #include "amdgpu.h"
 #include "amdgpu_display.h"
@@ -91,9 +91,19 @@ void amdgpu_gem_prime_vunmap(struct drm_
  * Returns:
  * 0 on success or a negative error code on failure.
  */
+#ifdef __NetBSD__
+int
+amdgpu_gem_prime_mmap(struct drm_gem_object *obj, off_t *offp, size_t size,
+    int prot, int *flagsp, int *advicep, struct uvm_object **uobjp,
+    int *maxprotp)
+#else
 int amdgpu_gem_prime_mmap(struct drm_gem_object *obj,
 			  struct vm_area_struct *vma)
+#endif
 {
+#ifdef __NetBSD__		/* XXX amdgpu prime */
+	return -ENODEV;
+#else
 	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 	unsigned asize = amdgpu_bo_size(bo);
@@ -106,7 +116,11 @@ int amdgpu_gem_prime_mmap(struct drm_gem
 		return -ENODEV;
 
 	/* Check for valid size. */
+#ifdef __NetBSD__
+	if (asize < size)
+#else
 	if (asize < vma->vm_end - vma->vm_start)
+#endif
 		return -EINVAL;
 
 	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) ||
@@ -124,6 +138,7 @@ int amdgpu_gem_prime_mmap(struct drm_gem
 	drm_vma_node_revoke(&obj->vma_node, vma->vm_file->private_data);
 
 	return ret;
+#endif
 }
 
 static int
@@ -184,8 +199,12 @@ static int amdgpu_dma_buf_attach(struct 
 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 	int r;
 
+#ifdef __NetBSD__		/* XXX */
+	__USE(adev);
+#else
 	if (attach->dev->driver == adev->dev->driver)
 		return 0;
+#endif
 
 	r = amdgpu_bo_reserve(bo, false);
 	if (unlikely(r != 0))
@@ -223,7 +242,12 @@ static void amdgpu_dma_buf_detach(struct
 	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 
+#ifdef __NetBSD__
+	__USE(adev);
+	if (bo->prime_shared_count)
+#else
 	if (attach->dev->driver != adev->dev->driver && bo->prime_shared_count)
+#endif
 		bo->prime_shared_count--;
 }
 
@@ -306,7 +330,8 @@ static void amdgpu_dma_buf_unmap(struct 
 static int amdgpu_dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
 					   enum dma_data_direction direction)
 {
-	struct amdgpu_bo *bo = gem_to_amdgpu_bo(dma_buf->priv);
+	struct drm_gem_object *gem = dma_buf->priv;
+	struct amdgpu_bo *bo = gem_to_amdgpu_bo(gem);
 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 	struct ttm_operation_ctx ctx = { true, false };
 	u32 domain = amdgpu_display_supported_domains(adev, bo->flags);
@@ -449,7 +474,11 @@ struct drm_gem_object *amdgpu_gem_prime_
 	if (IS_ERR(obj))
 		return obj;
 
+#ifdef __NetBSD__
+	attach = dma_buf_dynamic_attach(dma_buf, dev->dmat, true);
+#else
 	attach = dma_buf_dynamic_attach(dma_buf, dev->dev, true);
+#endif
 	if (IS_ERR(attach)) {
 		drm_gem_object_put(obj);
 		return ERR_CAST(attach);

Index: src/sys/external/bsd/drm2/include/linux/dma-buf.h
diff -u src/sys/external/bsd/drm2/include/linux/dma-buf.h:1.11 src/sys/external/bsd/drm2/include/linux/dma-buf.h:1.12
--- src/sys/external/bsd/drm2/include/linux/dma-buf.h:1.11	Sun Dec 19 11:33:31 2021
+++ src/sys/external/bsd/drm2/include/linux/dma-buf.h	Sun Dec 19 12:01:40 2021
@@ -1,4 +1,4 @@
-/*	$NetBSD: dma-buf.h,v 1.11 2021/12/19 11:33:31 riastradh Exp $	*/
+/*	$NetBSD: dma-buf.h,v 1.12 2021/12/19 12:01:40 riastradh Exp $	*/
 
 /*-
  * Copyright (c) 2018 The NetBSD Foundation, Inc.
@@ -54,6 +54,7 @@ struct uvm_object;
 
 struct dma_buf_ops {
 	bool	cache_sgt_mapping;
+	bool	dynamic_mapping;
 	int	(*attach)(struct dma_buf *, struct dma_buf_attachment *);
 	void	(*detach)(struct dma_buf *, struct dma_buf_attachment *);
 	struct sg_table *
@@ -86,6 +87,7 @@ struct dma_buf_attachment {
 	void				*priv;
 	struct dma_buf			*dmabuf;
 	bus_dma_tag_t			dev; /* XXX expedient misnomer */
+	bool				dynamic_mapping;
 };
 
 struct dma_buf_export_info {
@@ -105,6 +107,7 @@ struct dma_buf_export_info {
 
 #define	dma_buf_attach		linux_dma_buf_attach
 #define	dma_buf_detach		linux_dma_buf_detach
+#define	dma_buf_dynamic_attach	linux_dma_buf_dynamic_attach
 #define	dma_buf_export		linux_dma_buf_export
 #define	dma_buf_fd		linux_dma_buf_fd
 #define	dma_buf_get		linux_dma_buf_get
@@ -124,6 +127,8 @@ void	dma_buf_put(struct dma_buf *);
 
 struct dma_buf_attachment *
 	dma_buf_attach(struct dma_buf *, bus_dma_tag_t);
+struct dma_buf_attachment *
+	dma_buf_dynamic_attach(struct dma_buf *, bus_dma_tag_t, bool);
 void	dma_buf_detach(struct dma_buf *, struct dma_buf_attachment *);
 
 struct sg_table *

Index: src/sys/external/bsd/drm2/include/linux/dma-fence-array.h
diff -u src/sys/external/bsd/drm2/include/linux/dma-fence-array.h:1.4 src/sys/external/bsd/drm2/include/linux/dma-fence-array.h:1.5
--- src/sys/external/bsd/drm2/include/linux/dma-fence-array.h:1.4	Sun Dec 19 01:41:27 2021
+++ src/sys/external/bsd/drm2/include/linux/dma-fence-array.h	Sun Dec 19 12:01:40 2021
@@ -1,4 +1,4 @@
-/*	$NetBSD: dma-fence-array.h,v 1.4 2021/12/19 01:41:27 riastradh Exp $	*/
+/*	$NetBSD: dma-fence-array.h,v 1.5 2021/12/19 12:01:40 riastradh Exp $	*/
 
 /*-
  * Copyright (c) 2018 The NetBSD Foundation, Inc.
@@ -34,14 +34,22 @@
 
 #include <sys/stdbool.h>
 
+#include <linux/dma-fence.h>
+
+#define	dma_fence_array_create		linux_dma_fence_array_create
 #define	dma_fence_is_array		linux_dma_fence_is_array
 #define	to_dma_fence_array		linux_to_dma_fence_array
 
 struct dma_fence_array {
+	struct dma_fence	base;
 	struct dma_fence	**fences;
 	unsigned		num_fences;
 };
 
+struct dma_fence_array *
+	dma_fence_array_create(int, struct dma_fence **, unsigned, unsigned,
+	    bool);
+
 bool	dma_fence_is_array(struct dma_fence *);
 struct dma_fence_array *
 	to_dma_fence_array(struct dma_fence *);

Index: src/sys/external/bsd/drm2/linux/linux_dma_buf.c
diff -u src/sys/external/bsd/drm2/linux/linux_dma_buf.c:1.12 src/sys/external/bsd/drm2/linux/linux_dma_buf.c:1.13
--- src/sys/external/bsd/drm2/linux/linux_dma_buf.c:1.12	Sun Dec 19 11:37:29 2021
+++ src/sys/external/bsd/drm2/linux/linux_dma_buf.c	Sun Dec 19 12:01:40 2021
@@ -1,4 +1,4 @@
-/*	$NetBSD: linux_dma_buf.c,v 1.12 2021/12/19 11:37:29 riastradh Exp $	*/
+/*	$NetBSD: linux_dma_buf.c,v 1.13 2021/12/19 12:01:40 riastradh Exp $	*/
 
 /*-
  * Copyright (c) 2018 The NetBSD Foundation, Inc.
@@ -30,7 +30,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: linux_dma_buf.c,v 1.12 2021/12/19 11:37:29 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: linux_dma_buf.c,v 1.13 2021/12/19 12:01:40 riastradh Exp $");
 
 #include <sys/types.h>
 #include <sys/atomic.h>
@@ -174,7 +174,8 @@ dma_buf_put(struct dma_buf *dmabuf)
 }
 
 struct dma_buf_attachment *
-dma_buf_attach(struct dma_buf *dmabuf, bus_dma_tag_t dmat)
+dma_buf_dynamic_attach(struct dma_buf *dmabuf, bus_dma_tag_t dmat,
+    bool dynamic_mapping)
 {
 	struct dma_buf_attachment *attach;
 	int ret = 0;
@@ -182,6 +183,7 @@ dma_buf_attach(struct dma_buf *dmabuf, b
 	attach = kmem_zalloc(sizeof(*attach), KM_SLEEP);
 	attach->dmabuf = dmabuf;
 	attach->dev = dmat;
+	attach->dynamic_mapping = dynamic_mapping;
 
 	mutex_enter(&dmabuf->db_lock);
 	if (dmabuf->ops->attach)
@@ -190,12 +192,22 @@ dma_buf_attach(struct dma_buf *dmabuf, b
 	if (ret)
 		goto fail0;
 
+	if (attach->dynamic_mapping != dmabuf->ops->dynamic_mapping)
+		panic("%s: NYI", __func__);
+
 	return attach;
 
 fail0:	kmem_free(attach, sizeof(*attach));
 	return ERR_PTR(ret);
 }
 
+struct dma_buf_attachment *
+dma_buf_attach(struct dma_buf *dmabuf, bus_dma_tag_t dmat)
+{
+
+	return dma_buf_dynamic_attach(dmabuf, dmat, /*dynamic_mapping*/false);
+}
+
 void
 dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
 {
@@ -212,8 +224,15 @@ struct sg_table *
 dma_buf_map_attachment(struct dma_buf_attachment *attach,
     enum dma_data_direction dir)
 {
+	struct sg_table *sg;
+
+	if (attach->dmabuf->ops->dynamic_mapping)
+		dma_resv_lock(attach->dmabuf->resv, NULL);
+	sg = attach->dmabuf->ops->map_dma_buf(attach, dir);
+	if (attach->dmabuf->ops->dynamic_mapping)
+		dma_resv_unlock(attach->dmabuf->resv);
 
-	return attach->dmabuf->ops->map_dma_buf(attach, dir);
+	return sg;
 }
 
 void
@@ -221,7 +240,11 @@ dma_buf_unmap_attachment(struct dma_buf_
     struct sg_table *sg, enum dma_data_direction dir)
 {
 
-	return attach->dmabuf->ops->unmap_dma_buf(attach, sg, dir);
+	if (attach->dmabuf->ops->dynamic_mapping)
+		dma_resv_lock(attach->dmabuf->resv, NULL);
+	attach->dmabuf->ops->unmap_dma_buf(attach, sg, dir);
+	if (attach->dmabuf->ops->dynamic_mapping)
+		dma_resv_unlock(attach->dmabuf->resv);
 }
 
 static int

Reply via email to