Module Name:    src
Committed By:   riastradh
Date:           Sun Dec 19 10:19:53 UTC 2021

Modified Files:
        src/sys/external/bsd/drm2/include/linux: dma-buf.h dma-resv.h
        src/sys/external/bsd/drm2/linux: files.drmkms_linux linux_dma_buf.c
Added Files:
        src/sys/external/bsd/drm2/linux: linux_dma_resv.c
Removed Files:
        src/sys/external/bsd/drm2/include/linux: reservation.h
        src/sys/external/bsd/drm2/linux: linux_reservation.c

Log Message:
Rename reservation_object -> dma_resv.


To generate a diff of this commit:
cvs rdiff -u -r1.7 -r1.8 src/sys/external/bsd/drm2/include/linux/dma-buf.h
cvs rdiff -u -r1.4 -r1.5 src/sys/external/bsd/drm2/include/linux/dma-resv.h
cvs rdiff -u -r1.17 -r0 src/sys/external/bsd/drm2/include/linux/reservation.h
cvs rdiff -u -r1.23 -r1.24 src/sys/external/bsd/drm2/linux/files.drmkms_linux
cvs rdiff -u -r1.8 -r1.9 src/sys/external/bsd/drm2/linux/linux_dma_buf.c
cvs rdiff -u -r0 -r1.1 src/sys/external/bsd/drm2/linux/linux_dma_resv.c
cvs rdiff -u -r1.24 -r0 src/sys/external/bsd/drm2/linux/linux_reservation.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/external/bsd/drm2/include/linux/dma-buf.h
diff -u src/sys/external/bsd/drm2/include/linux/dma-buf.h:1.7 src/sys/external/bsd/drm2/include/linux/dma-buf.h:1.8
--- src/sys/external/bsd/drm2/include/linux/dma-buf.h:1.7	Sun Dec 19 09:50:57 2021
+++ src/sys/external/bsd/drm2/include/linux/dma-buf.h	Sun Dec 19 10:19:53 2021
@@ -1,4 +1,4 @@
-/*	$NetBSD: dma-buf.h,v 1.7 2021/12/19 09:50:57 riastradh Exp $	*/
+/*	$NetBSD: dma-buf.h,v 1.8 2021/12/19 10:19:53 riastradh Exp $	*/
 
 /*-
  * Copyright (c) 2018 The NetBSD Foundation, Inc.
@@ -37,7 +37,7 @@
 #include <sys/mutex.h>
 
 #include <linux/err.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
 
 struct device;
 struct dma_buf;
@@ -46,7 +46,7 @@ struct dma_buf_export_info;
 struct dma_buf_ops;
 struct file;
 struct module;
-struct reservation_object;
+struct dma_resv;
 struct sg_table;
 struct uvm_object;
 
@@ -80,12 +80,12 @@ struct dma_buf {
 	void				*priv;
 	const struct dma_buf_ops	*ops;
 	size_t				size;
-	struct reservation_object	*resv;
+	struct dma_resv			*resv;
 
 	kmutex_t			db_lock;
 	volatile unsigned		db_refcnt;
-	struct reservation_poll		db_resv_poll;
-	struct reservation_object	db_resv_int[];
+	struct dma_resv_poll		db_resv_poll;
+	struct dma_resv			db_resv_int[];
 };
 
 struct dma_buf_attachment {
@@ -102,7 +102,7 @@ struct dma_buf_export_info {
 	const struct dma_buf_ops	*ops;
 	size_t				size;
 	int				flags;
-	struct reservation_object	*resv;
+	struct dma_resv			*resv;
 	void				*priv;
 };
 

Index: src/sys/external/bsd/drm2/include/linux/dma-resv.h
diff -u src/sys/external/bsd/drm2/include/linux/dma-resv.h:1.4 src/sys/external/bsd/drm2/include/linux/dma-resv.h:1.5
--- src/sys/external/bsd/drm2/include/linux/dma-resv.h:1.4	Sun Dec 19 09:47:27 2021
+++ src/sys/external/bsd/drm2/include/linux/dma-resv.h	Sun Dec 19 10:19:53 2021
@@ -1,295 +1,138 @@
-/*
- * Header file for reservations for dma-buf and ttm
- *
- * Copyright(C) 2011 Linaro Limited. All rights reserved.
- * Copyright (C) 2012-2013 Canonical Ltd
- * Copyright (C) 2012 Texas Instruments
- *
- * Authors:
- * Rob Clark <robdcl...@gmail.com>
- * Maarten Lankhorst <maarten.lankho...@canonical.com>
- * Thomas Hellstrom <thellstrom-at-vmware-dot-com>
- *
- * Based on bo.c which bears the following copyright notice,
- * but is dual licensed:
- *
- * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
+/*	$NetBSD: dma-resv.h,v 1.5 2021/12/19 10:19:53 riastradh Exp $	*/
+
+/*-
+ * Copyright (c) 2018 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Taylor R. Campbell.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
  */
-#ifndef _LINUX_RESERVATION_H
-#define _LINUX_RESERVATION_H
 
-#include <linux/ww_mutex.h>
+#ifndef	_LINUX_DMA_RESV_H_
+#define	_LINUX_DMA_RESV_H_
+
 #include <linux/dma-fence.h>
-#include <linux/slab.h>
-#include <linux/seqlock.h>
 #include <linux/rcupdate.h>
+#include <linux/seqlock.h>
+#include <linux/ww_mutex.h>
 
-extern struct ww_class reservation_ww_class;
-extern struct lock_class_key reservation_seqcount_class;
-extern const char reservation_seqcount_string[];
-
-/**
- * struct dma_resv_list - a list of shared fences
- * @rcu: for internal use
- * @shared_count: table of shared fences
- * @shared_max: for growing shared fence table
- * @shared: shared fence table
- */
-struct dma_resv_list {
-	struct rcu_head rcu;
-	u32 shared_count, shared_max;
-	struct dma_fence __rcu *shared[];
-};
-
-/**
- * struct dma_resv - a reservation object manages fences for a buffer
- * @lock: update side lock
- * @seq: sequence count for managing RCU read-side synchronization
- * @fence_excl: the exclusive fence, if there is one currently
- * @fence: list of current shared fences
- */
 struct dma_resv {
-	struct ww_mutex lock;
-	seqcount_t seq;
+	struct ww_mutex				lock;
+	struct seqcount				seq;
+	struct dma_fence __rcu			*fence_excl;
+	struct dma_resv_list __rcu		*fence;
 
-	struct dma_fence __rcu *fence_excl;
-	struct dma_resv_list __rcu *fence;
+	struct dma_resv_list __rcu		*robj_prealloc;
 };
 
-#define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base)
-#define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base)
-
-/**
- * dma_resv_get_list - get the reservation object's
- * shared fence list, with update-side lock held
- * @obj: the reservation object
- *
- * Returns the shared fence list.  Does NOT take references to
- * the fence.  The obj->lock must be held.
- */
-static inline struct dma_resv_list *dma_resv_get_list(struct dma_resv *obj)
-{
-	return rcu_dereference_protected(obj->fence,
-					 dma_resv_held(obj));
-}
-
-/**
- * dma_resv_lock - lock the reservation object
- * @obj: the reservation object
- * @ctx: the locking context
- *
- * Locks the reservation object for exclusive access and modification. Note,
- * that the lock is only against other writers, readers will run concurrently
- * with a writer under RCU. The seqlock is used to notify readers if they
- * overlap with a writer.
- *
- * As the reservation object may be locked by multiple parties in an
- * undefined order, a #ww_acquire_ctx is passed to unwind if a cycle
- * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation
- * object may be locked by itself by passing NULL as @ctx.
- */
-static inline int dma_resv_lock(struct dma_resv *obj,
-				struct ww_acquire_ctx *ctx)
-{
-	return ww_mutex_lock(&obj->lock, ctx);
-}
-
-/**
- * dma_resv_lock_interruptible - lock the reservation object
- * @obj: the reservation object
- * @ctx: the locking context
- *
- * Locks the reservation object interruptible for exclusive access and
- * modification. Note, that the lock is only against other writers, readers
- * will run concurrently with a writer under RCU. The seqlock is used to
- * notify readers if they overlap with a writer.
- *
- * As the reservation object may be locked by multiple parties in an
- * undefined order, a #ww_acquire_ctx is passed to unwind if a cycle
- * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation
- * object may be locked by itself by passing NULL as @ctx.
- */
-static inline int dma_resv_lock_interruptible(struct dma_resv *obj,
-					      struct ww_acquire_ctx *ctx)
-{
-	return ww_mutex_lock_interruptible(&obj->lock, ctx);
-}
-
-/**
- * dma_resv_lock_slow - slowpath lock the reservation object
- * @obj: the reservation object
- * @ctx: the locking context
- *
- * Acquires the reservation object after a die case. This function
- * will sleep until the lock becomes available. See dma_resv_lock() as
- * well.
- */
-static inline void dma_resv_lock_slow(struct dma_resv *obj,
-				      struct ww_acquire_ctx *ctx)
-{
-	ww_mutex_lock_slow(&obj->lock, ctx);
-}
-
-/**
- * dma_resv_lock_slow_interruptible - slowpath lock the reservation
- * object, interruptible
- * @obj: the reservation object
- * @ctx: the locking context
- *
- * Acquires the reservation object interruptible after a die case. This function
- * will sleep until the lock becomes available. See
- * dma_resv_lock_interruptible() as well.
- */
-static inline int dma_resv_lock_slow_interruptible(struct dma_resv *obj,
-						   struct ww_acquire_ctx *ctx)
-{
-	return ww_mutex_lock_slow_interruptible(&obj->lock, ctx);
-}
-
-/**
- * dma_resv_trylock - trylock the reservation object
- * @obj: the reservation object
- *
- * Tries to lock the reservation object for exclusive access and modification.
- * Note, that the lock is only against other writers, readers will run
- * concurrently with a writer under RCU. The seqlock is used to notify readers
- * if they overlap with a writer.
- *
- * Also note that since no context is provided, no deadlock protection is
- * possible.
- *
- * Returns true if the lock was acquired, false otherwise.
- */
-static inline bool __must_check dma_resv_trylock(struct dma_resv *obj)
-{
-	return ww_mutex_trylock(&obj->lock);
-}
-
-/**
- * dma_resv_is_locked - is the reservation object locked
- * @obj: the reservation object
- *
- * Returns true if the mutex is locked, false if unlocked.
- */
-static inline bool dma_resv_is_locked(struct dma_resv *obj)
-{
-	return ww_mutex_is_locked(&obj->lock);
-}
+struct dma_resv_list {
+	struct rcu_head		rol_rcu;
 
-/**
- * dma_resv_locking_ctx - returns the context used to lock the object
- * @obj: the reservation object
- *
- * Returns the context used to lock a reservation object or NULL if no context
- * was used or the object is not locked at all.
- */
-static inline struct ww_acquire_ctx *dma_resv_locking_ctx(struct dma_resv *obj)
-{
-	return READ_ONCE(obj->lock.wwm_u.ctx);
-}
+	uint32_t		shared_count;
+	uint32_t		shared_max;
+	struct dma_fence __rcu	*shared[];
+};
 
-/**
- * dma_resv_unlock - unlock the reservation object
- * @obj: the reservation object
- *
- * Unlocks the reservation object following exclusive access.
- */
-static inline void dma_resv_unlock(struct dma_resv *obj)
-{
-#ifdef CONFIG_DEBUG_MUTEXES
-	/* Test shared fence slot reservation */
-	if (rcu_access_pointer(obj->fence)) {
-		struct dma_resv_list *fence = dma_resv_get_list(obj);
-
-		fence->shared_max = fence->shared_count;
-	}
-#endif
-	ww_mutex_unlock(&obj->lock);
-}
+/* NetBSD addition */
+struct dma_resv_poll {
+	kmutex_t		rp_lock;
+	struct selinfo		rp_selq;
+	struct dma_fence_cb		rp_fcb;
+	bool			rp_claimed;
+};
 
-/**
- * dma_resv_get_excl - get the reservation object's
- * exclusive fence, with update-side lock held
- * @obj: the reservation object
- *
- * Returns the exclusive fence (if any).  Does NOT take a
- * reference. Writers must hold obj->lock, readers may only
- * hold a RCU read side lock.
- *
- * RETURNS
- * The exclusive fence or NULL
- */
-static inline struct dma_fence *
-dma_resv_get_excl(struct dma_resv *obj)
-{
-	return rcu_dereference_protected(obj->fence_excl,
-					 dma_resv_held(obj));
-}
+#define	dma_resv_add_excl_fence		linux_dma_resv_add_excl_fence
+#define	dma_resv_add_shared_fence	linux_dma_resv_add_shared_fence
+#define	dma_resv_assert_held		linux_dma_resv_assert_held
+#define	dma_resv_copy_fences		linux_dma_resv_copy_fences
+#define	dma_resv_do_poll		linux_dma_resv_do_poll
+#define	dma_resv_fini			linux_dma_resv_fini
+#define	dma_resv_get_excl		linux_dma_resv_get_excl
+#define	dma_resv_get_excl_rcu		linux_dma_resv_get_excl_rcu
+#define	dma_resv_get_fences_rcu		linux_dma_resv_get_fences_rcu
+#define	dma_resv_get_list		linux_dma_resv_get_list
+#define	dma_resv_held			linux_dma_resv_held
+#define	dma_resv_init			linux_dma_resv_init
+#define	dma_resv_kqfilter		linux_dma_resv_kqfilter
+#define	dma_resv_lock			linux_dma_resv_lock
+#define	dma_resv_lock_interruptible	linux_dma_resv_lock_interruptible
+#define	dma_resv_reserve_shared		linux_dma_resv_reserve_shared
+#define	dma_resv_test_signaled_rcu	linux_dma_resv_test_signaled_rcu
+#define	dma_resv_trylock		linux_dma_resv_trylock
+#define	dma_resv_unlock			linux_dma_resv_unlock
+#define	dma_resv_wait_timeout_rcu	linux_dma_resv_wait_timeout_rcu
+#define	dma_resv_poll_fini		linux_dma_resv_poll_fini
+#define	dma_resv_poll_init		linux_dma_resv_poll_init
+#define	reservation_ww_class		linux_reservation_ww_class
+
+extern struct ww_class	reservation_ww_class;
+
+void	dma_resv_init(struct dma_resv *);
+void	dma_resv_fini(struct dma_resv *);
+int	dma_resv_lock(struct dma_resv *,
+	    struct ww_acquire_ctx *);
+int	dma_resv_lock_interruptible(struct dma_resv *,
+	    struct ww_acquire_ctx *);
+bool	dma_resv_trylock(struct dma_resv *) __must_check;
+void	dma_resv_unlock(struct dma_resv *);
+bool	dma_resv_held(struct dma_resv *);
+void	dma_resv_assert_held(struct dma_resv *);
+struct dma_fence *
+	dma_resv_get_excl(struct dma_resv *);
+struct dma_resv_list *
+	dma_resv_get_list(struct dma_resv *);
+int	dma_resv_reserve_shared(struct dma_resv *);
+void	dma_resv_add_excl_fence(struct dma_resv *,
+	    struct dma_fence *);
+void	dma_resv_add_shared_fence(struct dma_resv *,
+	    struct dma_fence *);
+
+struct dma_fence *
+	dma_resv_get_excl_rcu(const struct dma_resv *);
+int	dma_resv_get_fences_rcu(const struct dma_resv *,
+	    struct dma_fence **, unsigned *, struct dma_fence ***);
+
+int	dma_resv_copy_fences(struct dma_resv *,
+	    const struct dma_resv *);
+
+bool	dma_resv_test_signaled_rcu(const struct dma_resv *,
+	    bool);
+long	dma_resv_wait_timeout_rcu(const struct dma_resv *,
+	    bool, bool, unsigned long);
+
+/* NetBSD additions */
+void	dma_resv_poll_init(struct dma_resv_poll *);
+void	dma_resv_poll_fini(struct dma_resv_poll *);
+int	dma_resv_do_poll(const struct dma_resv *, int,
+	    struct dma_resv_poll *);
+int	dma_resv_kqfilter(const struct dma_resv *,
+	    struct knote *, struct dma_resv_poll *);
 
-/**
- * dma_resv_get_excl_rcu - get the reservation object's
- * exclusive fence, without lock held.
- * @obj: the reservation object
- *
- * If there is an exclusive fence, this atomically increments it's
- * reference count and returns it.
- *
- * RETURNS
- * The exclusive fence or NULL if none
- */
-static inline struct dma_fence *
-dma_resv_get_excl_rcu(struct dma_resv *obj)
+static inline bool
+dma_resv_has_excl_fence(const struct dma_resv *robj)
 {
-	struct dma_fence *fence;
-
-	if (!rcu_access_pointer(obj->fence_excl))
-		return NULL;
-
-	rcu_read_lock();
-	fence = dma_fence_get_rcu_safe(&obj->fence_excl);
-	rcu_read_unlock();
-
-	return fence;
+	return robj->fence_excl != NULL;
 }
 
-void dma_resv_init(struct dma_resv *obj);
-void dma_resv_fini(struct dma_resv *obj);
-int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences);
-void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence);
-
-void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence);
-
-int dma_resv_get_fences_rcu(struct dma_resv *obj,
-			    struct dma_fence **pfence_excl,
-			    unsigned *pshared_count,
-			    struct dma_fence ***pshared);
-
-int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src);
-
-long dma_resv_wait_timeout_rcu(struct dma_resv *obj, bool wait_all, bool intr,
-			       unsigned long timeout);
-
-bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all);
-
-#endif /* _LINUX_RESERVATION_H */
+#endif	/* _LINUX_DMA_RESV_H_ */

Index: src/sys/external/bsd/drm2/linux/files.drmkms_linux
diff -u src/sys/external/bsd/drm2/linux/files.drmkms_linux:1.23 src/sys/external/bsd/drm2/linux/files.drmkms_linux:1.24
--- src/sys/external/bsd/drm2/linux/files.drmkms_linux:1.23	Sun Dec 19 01:37:28 2021
+++ src/sys/external/bsd/drm2/linux/files.drmkms_linux	Sun Dec 19 10:19:53 2021
@@ -1,4 +1,4 @@
-#       $NetBSD: files.drmkms_linux,v 1.23 2021/12/19 01:37:28 riastradh Exp $
+#       $NetBSD: files.drmkms_linux,v 1.24 2021/12/19 10:19:53 riastradh Exp $
 
 define	drmkms_linux: i2cexec, i2c_bitbang
 
@@ -8,6 +8,7 @@ makeoptions 	drmkms_linux	CPPFLAGS+="-I$
 file	external/bsd/drm2/linux/linux_atomic64.c	drmkms_linux
 file	external/bsd/drm2/linux/linux_dma_buf.c		drmkms_linux
 file	external/bsd/drm2/linux/linux_dma_fence.c	drmkms_linux
+file	external/bsd/drm2/linux/linux_dma_resv.c	drmkms_linux
 file	external/bsd/drm2/linux/linux_dmi.c		drmkms_linux
 file	external/bsd/drm2/linux/linux_i2c.c		drmkms_linux
 file	external/bsd/drm2/linux/linux_idr.c		drmkms_linux
@@ -15,7 +16,6 @@ file	external/bsd/drm2/linux/linux_kmap.
 file	external/bsd/drm2/linux/linux_list_sort.c	drmkms_linux
 file	external/bsd/drm2/linux/linux_module.c		drmkms_linux
 file	external/bsd/drm2/linux/linux_pci.c		drmkms_linux
-file	external/bsd/drm2/linux/linux_reservation.c	drmkms_linux
 file	external/bsd/drm2/linux/linux_stop_machine.c	drmkms_linux
 file	external/bsd/drm2/linux/linux_wait_bit.c	drmkms_linux
 file	external/bsd/drm2/linux/linux_writecomb.c	drmkms_linux

Index: src/sys/external/bsd/drm2/linux/linux_dma_buf.c
diff -u src/sys/external/bsd/drm2/linux/linux_dma_buf.c:1.8 src/sys/external/bsd/drm2/linux/linux_dma_buf.c:1.9
--- src/sys/external/bsd/drm2/linux/linux_dma_buf.c:1.8	Sun Dec 19 01:14:29 2021
+++ src/sys/external/bsd/drm2/linux/linux_dma_buf.c	Sun Dec 19 10:19:53 2021
@@ -1,4 +1,4 @@
-/*	$NetBSD: linux_dma_buf.c,v 1.8 2021/12/19 01:14:29 riastradh Exp $	*/
+/*	$NetBSD: linux_dma_buf.c,v 1.9 2021/12/19 10:19:53 riastradh Exp $	*/
 
 /*-
  * Copyright (c) 2018 The NetBSD Foundation, Inc.
@@ -30,7 +30,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: linux_dma_buf.c,v 1.8 2021/12/19 01:14:29 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: linux_dma_buf.c,v 1.9 2021/12/19 10:19:53 riastradh Exp $");
 
 #include <sys/types.h>
 #include <sys/atomic.h>
@@ -41,7 +41,7 @@ __KERNEL_RCSID(0, "$NetBSD: linux_dma_bu
 
 #include <linux/dma-buf.h>
 #include <linux/err.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
 
 struct dma_buf_file {
 	struct dma_buf	*dbf_dmabuf;
@@ -86,11 +86,11 @@ dma_buf_export(struct dma_buf_export_inf
 
 	mutex_init(&dmabuf->db_lock, MUTEX_DEFAULT, IPL_NONE);
 	dmabuf->db_refcnt = 1;
-	reservation_poll_init(&dmabuf->db_resv_poll);
+	dma_resv_poll_init(&dmabuf->db_resv_poll);
 
 	if (dmabuf->resv == NULL) {
 		dmabuf->resv = &dmabuf->db_resv_int[0];
-		reservation_object_init(dmabuf->resv);
+		dma_resv_init(dmabuf->resv);
 	}
 
 	return dmabuf;
@@ -166,10 +166,10 @@ dma_buf_put(struct dma_buf *dmabuf)
 	if (atomic_dec_uint_nv(&dmabuf->db_refcnt) != 0)
 		return;
 
-	reservation_poll_fini(&dmabuf->db_resv_poll);
+	dma_resv_poll_fini(&dmabuf->db_resv_poll);
 	mutex_destroy(&dmabuf->db_lock);
 	if (dmabuf->resv == &dmabuf->db_resv_int[0]) {
-		reservation_object_fini(dmabuf->resv);
+		dma_resv_fini(dmabuf->resv);
 		kmem_free(dmabuf, offsetof(struct dma_buf, db_resv_int[1]));
 	} else {
 		kmem_free(dmabuf, sizeof(*dmabuf));
@@ -242,9 +242,9 @@ dmabuf_fop_poll(struct file *file, int e
 {
 	struct dma_buf_file *dbf = file->f_data;
 	struct dma_buf *dmabuf = dbf->dbf_dmabuf;
-	struct reservation_poll *rpoll = &dmabuf->db_resv_poll;
+	struct dma_resv_poll *rpoll = &dmabuf->db_resv_poll;
 
-	return reservation_object_poll(dmabuf->resv, events, rpoll);
+	return dma_resv_do_poll(dmabuf->resv, events, rpoll);
 }
 
 static int
@@ -252,9 +252,9 @@ dmabuf_fop_kqfilter(struct file *file, s
 {
 	struct dma_buf_file *dbf = file->f_data;
 	struct dma_buf *dmabuf = dbf->dbf_dmabuf;
-	struct reservation_poll *rpoll = &dmabuf->db_resv_poll;
+	struct dma_resv_poll *rpoll = &dmabuf->db_resv_poll;
 
-	return reservation_object_kqfilter(dmabuf->resv, kn, rpoll);
+	return dma_resv_kqfilter(dmabuf->resv, kn, rpoll);
 }
 
 static int

Added files:

Index: src/sys/external/bsd/drm2/linux/linux_dma_resv.c
diff -u /dev/null src/sys/external/bsd/drm2/linux/linux_dma_resv.c:1.1
--- /dev/null	Sun Dec 19 10:19:53 2021
+++ src/sys/external/bsd/drm2/linux/linux_dma_resv.c	Sun Dec 19 10:19:53 2021
@@ -0,0 +1,1335 @@
+/*	$NetBSD: linux_dma_resv.c,v 1.1 2021/12/19 10:19:53 riastradh Exp $	*/
+
+/*-
+ * Copyright (c) 2018 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Taylor R. Campbell.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__KERNEL_RCSID(0, "$NetBSD: linux_dma_resv.c,v 1.1 2021/12/19 10:19:53 riastradh Exp $");
+
+#include <sys/param.h>
+#include <sys/poll.h>
+#include <sys/select.h>
+
+#include <linux/dma-fence.h>
+#include <linux/dma-resv.h>
+#include <linux/seqlock.h>
+#include <linux/ww_mutex.h>
+
+DEFINE_WW_CLASS(reservation_ww_class __cacheline_aligned);
+
+static struct dma_resv_list *
+objlist_tryalloc(uint32_t n)
+{
+	struct dma_resv_list *list;
+
+	list = kmem_alloc(offsetof(typeof(*list), shared[n]), KM_NOSLEEP);
+	if (list == NULL)
+		return NULL;
+	list->shared_max = n;
+
+	return list;
+}
+
+static void
+objlist_free(struct dma_resv_list *list)
+{
+	uint32_t n = list->shared_max;
+
+	kmem_free(list, offsetof(typeof(*list), shared[n]));
+}
+
+static void
+objlist_free_cb(struct rcu_head *rcu)
+{
+	struct dma_resv_list *list = container_of(rcu,
+	    struct dma_resv_list, rol_rcu);
+
+	objlist_free(list);
+}
+
+static void
+objlist_defer_free(struct dma_resv_list *list)
+{
+
+	call_rcu(&list->rol_rcu, objlist_free_cb);
+}
+
+/*
+ * dma_resv_init(robj)
+ *
+ *	Initialize a reservation object.  Caller must later destroy it
+ *	with dma_resv_fini.
+ */
+void
+dma_resv_init(struct dma_resv *robj)
+{
+
+	ww_mutex_init(&robj->lock, &reservation_ww_class);
+	seqcount_init(&robj->seq);
+	robj->fence_excl = NULL;
+	robj->fence = NULL;
+	robj->robj_prealloc = NULL;
+}
+
+/*
+ * dma_resv_fini(robj)
+ *
+ *	Destroy a reservation object, freeing any memory that had been
+ *	allocated for it.  Caller must have exclusive access to it.
+ */
+void
+dma_resv_fini(struct dma_resv *robj)
+{
+	unsigned i;
+
+	if (robj->robj_prealloc)
+		objlist_free(robj->robj_prealloc);
+	if (robj->fence) {
+		for (i = 0; i < robj->fence->shared_count; i++)
+			dma_fence_put(robj->fence->shared[i]);
+		objlist_free(robj->fence);
+	}
+	if (robj->fence_excl)
+		dma_fence_put(robj->fence_excl);
+	ww_mutex_destroy(&robj->lock);
+}
+
+/*
+ * dma_resv_lock(robj, ctx)
+ *
+ *	Acquire a reservation object's lock.  Return 0 on success,
+ *	-EALREADY if caller already holds it, -EDEADLK if a
+ *	higher-priority owner holds it and the caller must back out and
+ *	retry.
+ */
+int
+dma_resv_lock(struct dma_resv *robj,
+    struct ww_acquire_ctx *ctx)
+{
+
+	return ww_mutex_lock(&robj->lock, ctx);
+}
+
+/*
+ * dma_resv_lock_interruptible(robj, ctx)
+ *
+ *	Acquire a reservation object's lock.  Return 0 on success,
+ *	-EALREADY if caller already holds it, -EDEADLK if a
+ *	higher-priority owner holds it and the caller must back out and
+ *	retry, -ERESTART/-EINTR if interrupted.
+ */
+int
+dma_resv_lock_interruptible(struct dma_resv *robj,
+    struct ww_acquire_ctx *ctx)
+{
+
+	return ww_mutex_lock_interruptible(&robj->lock, ctx);
+}
+
+/*
+ * dma_resv_trylock(robj)
+ *
+ *	Try to acquire a reservation object's lock without blocking.
+ *	Return true on success, false on failure.
+ */
+bool
+dma_resv_trylock(struct dma_resv *robj)
+{
+
+	return ww_mutex_trylock(&robj->lock);
+}
+
+/*
+ * dma_resv_unlock(robj)
+ *
+ *	Release a reservation object's lock.
+ */
+void
+dma_resv_unlock(struct dma_resv *robj)
+{
+
+	return ww_mutex_unlock(&robj->lock);
+}
+
+/*
+ * dma_resv_held(robj)
+ *
+ *	True if robj is locked.
+ */
+bool
+dma_resv_held(struct dma_resv *robj)
+{
+
+	return ww_mutex_is_locked(&robj->lock);
+}
+
+/*
+ * dma_resv_assert_held(robj)
+ *
+ *	Panic if robj is not held, in DIAGNOSTIC builds.
+ */
+void
+dma_resv_assert_held(struct dma_resv *robj)
+{
+
+	KASSERT(dma_resv_held(robj));
+}
+
+/*
+ * dma_resv_get_excl(robj)
+ *
+ *	Return a pointer to the exclusive fence of the reservation
+ *	object robj.
+ *
+ *	Caller must have robj locked.
+ */
+struct dma_fence *
+dma_resv_get_excl(struct dma_resv *robj)
+{
+
+	KASSERT(dma_resv_held(robj));
+	return robj->fence_excl;
+}
+
+/*
+ * dma_resv_get_list(robj)
+ *
+ *	Return a pointer to the shared fence list of the reservation
+ *	object robj.
+ *
+ *	Caller must have robj locked.
+ */
+struct dma_resv_list *
+dma_resv_get_list(struct dma_resv *robj)
+{
+
+	KASSERT(dma_resv_held(robj));
+	return robj->fence;
+}
+
+/*
+ * dma_resv_reserve_shared(robj)
+ *
+ *	Reserve space in robj to add a shared fence.  To be used only
+ *	once before calling dma_resv_add_shared_fence.
+ *
+ *	Caller must have robj locked.
+ *
+ *	Internally, we start with room for four entries and double if
+ *	we don't have enough.  This is not guaranteed.
+ */
+int
+dma_resv_reserve_shared(struct dma_resv *robj)
+{
+	struct dma_resv_list *list, *prealloc;
+	uint32_t n, nalloc;
+
+	KASSERT(dma_resv_held(robj));
+
+	list = robj->fence;
+	prealloc = robj->robj_prealloc;
+
+	/* If there's an existing list, check it for space.  */
+	if (list) {
+		/* If there's too many already, give up.  */
+		if (list->shared_count == UINT32_MAX)
+			return -ENOMEM;
+
+		/* Add one more. */
+		n = list->shared_count + 1;
+
+		/* If there's enough for one more, we're done.  */
+		if (n <= list->shared_max)
+			return 0;
+	} else {
+		/* No list already.  We need space for 1.  */
+		n = 1;
+	}
+
+	/* If not, maybe there's a preallocated list ready.  */
+	if (prealloc != NULL) {
+		/* If there's enough room in it, stop here.  */
+		if (n <= prealloc->shared_max)
+			return 0;
+
+		/* Try to double its capacity.  */
+		nalloc = n > UINT32_MAX/2 ? UINT32_MAX : 2*n;
+		prealloc = objlist_tryalloc(nalloc);
+		if (prealloc == NULL)
+			return -ENOMEM;
+
+		/* Swap the new preallocated list and free the old one.  */
+		objlist_free(robj->robj_prealloc);
+		robj->robj_prealloc = prealloc;
+	} else {
+		/* Start with some spare.  */
+		nalloc = n > UINT32_MAX/2 ? UINT32_MAX : MAX(2*n, 4);
+		prealloc = objlist_tryalloc(nalloc);
+		if (prealloc == NULL)
+			return -ENOMEM;
+		/* Save the new preallocated list.  */
+		robj->robj_prealloc = prealloc;
+	}
+
+	/* Success!  */
+	return 0;
+}
+
+struct dma_resv_write_ticket {
+};
+
+/*
+ * dma_resv_write_begin(robj, ticket)
+ *
+ *	Begin an atomic batch of writes to robj, and initialize opaque
+ *	ticket for it.  The ticket must be passed to
+ *	dma_resv_write_commit to commit the writes.
+ *
+ *	Caller must have robj locked.
+ *
+ *	Implies membar_producer, i.e. store-before-store barrier.  Does
+ *	NOT serve as an acquire operation, however.
+ */
+static void
+dma_resv_write_begin(struct dma_resv *robj,
+    struct dma_resv_write_ticket *ticket)
+{
+
+	KASSERT(dma_resv_held(robj));
+
+	write_seqcount_begin(&robj->seq);
+}
+
+/*
+ * dma_resv_write_commit(robj, ticket)
+ *
+ *	Commit an atomic batch of writes to robj begun with the call to
+ *	dma_resv_write_begin that returned ticket.
+ *
+ *	Caller must have robj locked.
+ *
+ *	Implies membar_producer, i.e. store-before-store barrier.  Does
+ *	NOT serve as a release operation, however.
+ */
+static void
+dma_resv_write_commit(struct dma_resv *robj,
+    struct dma_resv_write_ticket *ticket)
+{
+
+	KASSERT(dma_resv_held(robj));
+
+	write_seqcount_end(&robj->seq);
+}
+
+struct dma_resv_read_ticket {
+	unsigned version;
+};
+
+/*
+ * dma_resv_read_begin(robj, ticket)
+ *
+ *	Begin a read section, and initialize opaque ticket for it.  The
+ *	ticket must be passed to dma_resv_read_exit, and the
+ *	caller must be prepared to retry reading if it fails.
+ */
+static void
+dma_resv_read_begin(const struct dma_resv *robj,
+    struct dma_resv_read_ticket *ticket)
+{
+
+	ticket->version = read_seqcount_begin(&robj->seq);
+}
+
+/*
+ * dma_resv_read_valid(robj, ticket)
+ *
+ *	Test whether the read sections are valid.  Return true on
+ *	success, or false on failure if the read ticket has been
+ *	invalidated.
+ */
+static bool
+dma_resv_read_valid(const struct dma_resv *robj,
+    struct dma_resv_read_ticket *ticket)
+{
+
+	return !read_seqcount_retry(&robj->seq, ticket->version);
+}
+
+/*
+ * dma_resv_add_excl_fence(robj, fence)
+ *
+ *	Empty and release all of robj's shared fences, and clear and
+ *	release its exclusive fence.  If fence is nonnull, acquire a
+ *	reference to it and save it as robj's exclusive fence.
+ *
+ *	Caller must have robj locked.
+ */
+void
+dma_resv_add_excl_fence(struct dma_resv *robj,
+    struct dma_fence *fence)
+{
+	struct dma_fence *old_fence = robj->fence_excl;
+	struct dma_resv_list *old_list = robj->fence;
+	uint32_t old_shared_count;
+	struct dma_resv_write_ticket ticket;
+
+	KASSERT(dma_resv_held(robj));
+
+	/*
+	 * If we are setting rather than just removing a fence, acquire
+	 * a reference for ourselves.
+	 */
+	if (fence)
+		(void)dma_fence_get(fence);
+
+	/* If there are any shared fences, remember how many.  */
+	if (old_list)
+		old_shared_count = old_list->shared_count;
+
+	/* Begin an update.  */
+	dma_resv_write_begin(robj, &ticket);
+
+	/* Replace the fence and zero the shared count.  */
+	robj->fence_excl = fence;
+	if (old_list)
+		old_list->shared_count = 0;
+
+	/* Commit the update.  */
+	dma_resv_write_commit(robj, &ticket);
+
+	/* Release the old exclusive fence, if any.  */
+	if (old_fence)
+		dma_fence_put(old_fence);
+
+	/* Release any old shared fences.  */
+	if (old_list) {
+		while (old_shared_count--)
+			dma_fence_put(old_list->shared[old_shared_count]);
+	}
+}
+
+/*
+ * dma_resv_add_shared_fence(robj, fence)
+ *
+ *	Acquire a reference to fence and add it to robj's shared list.
+ *	If any fence was already added with the same context number,
+ *	release it and replace it by this one.
+ *
+ *	Caller must have robj locked, and must have preceded with a
+ *	call to dma_resv_reserve_shared for each shared fence
+ *	added.
+ */
+void
+dma_resv_add_shared_fence(struct dma_resv *robj,
+    struct dma_fence *fence)
+{
+	struct dma_resv_list *list = robj->fence;
+	struct dma_resv_list *prealloc = robj->robj_prealloc;
+	struct dma_resv_write_ticket ticket;
+	struct dma_fence *replace = NULL;
+	uint32_t i;
+
+	KASSERT(dma_resv_held(robj));
+
+	/* Acquire a reference to the fence.  */
+	KASSERT(fence != NULL);
+	(void)dma_fence_get(fence);
+
+	/* Check for a preallocated replacement list.  */
+	if (prealloc == NULL) {
+		/*
+		 * If there is no preallocated replacement list, then
+		 * there must be room in the current list.
+		 */
+		KASSERT(list != NULL);
+		KASSERT(list->shared_count < list->shared_max);
+
+		/* Begin an update.  Implies membar_producer for fence.  */
+		dma_resv_write_begin(robj, &ticket);
+
+		/* Find a fence with the same context number.  */
+		for (i = 0; i < list->shared_count; i++) {
+			if (list->shared[i]->context == fence->context) {
+				replace = list->shared[i];
+				list->shared[i] = fence;
+				break;
+			}
+		}
+
+		/* If we didn't find one, add it at the end.  */
+		if (i == list->shared_count)
+			list->shared[list->shared_count++] = fence;
+
+		/* Commit the update.  */
+		dma_resv_write_commit(robj, &ticket);
+	} else {
+		/*
+		 * There is a preallocated replacement list.  There may
+		 * not be a current list.  If not, treat it as a zero-
+		 * length list.
+		 */
+		uint32_t shared_count = (list == NULL? 0 : list->shared_count);
+
+		/* There had better be room in the preallocated list.  */
+		KASSERT(shared_count < prealloc->shared_max);
+
+		/*
+		 * Copy the fences over, but replace if we find one
+		 * with the same context number.
+		 */
+		for (i = 0; i < shared_count; i++) {
+			if (replace == NULL &&
+			    list->shared[i]->context == fence->context) {
+				replace = list->shared[i];
+				prealloc->shared[i] = fence;
+			} else {
+				prealloc->shared[i] = list->shared[i];
+			}
+		}
+		prealloc->shared_count = shared_count;
+
+		/* If we didn't find one, add it at the end.  */
+		if (replace == NULL)
+			prealloc->shared[prealloc->shared_count++] = fence;
+
+		/*
+		 * Now ready to replace the list.  Begin an update.
+		 * Implies membar_producer for fence and prealloc.
+		 */
+		dma_resv_write_begin(robj, &ticket);
+
+		/* Replace the list.  */
+		robj->fence = prealloc;
+		robj->robj_prealloc = NULL;
+
+		/* Commit the update.  */
+		dma_resv_write_commit(robj, &ticket);
+
+		/*
+		 * If there is an old list, free it when convenient.
+		 * (We are not in a position at this point to sleep
+		 * waiting for activity on all CPUs.)
+		 */
+		if (list)
+			objlist_defer_free(list);
+	}
+
+	/* Release a fence if we replaced it.  */
+	if (replace)
+		dma_fence_put(replace);
+}
+
+/*
+ * dma_resv_get_excl_rcu(robj)
+ *
+ *	Note: Caller need not call this from an RCU read section.
+ */
+struct dma_fence *
+dma_resv_get_excl_rcu(const struct dma_resv *robj)
+{
+	struct dma_fence *fence;
+
+	rcu_read_lock();
+	fence = dma_fence_get_rcu_safe(&robj->fence_excl);
+	rcu_read_unlock();
+
+	return fence;
+}
+
+/*
+ * dma_resv_get_fences_rcu(robj, fencep, nsharedp, sharedp)
+ */
+int
+dma_resv_get_fences_rcu(const struct dma_resv *robj,
+    struct dma_fence **fencep, unsigned *nsharedp, struct dma_fence ***sharedp)
+{
+	const struct dma_resv_list *list;
+	struct dma_fence *fence;
+	struct dma_fence **shared = NULL;
+	unsigned shared_alloc, shared_count, i;
+	struct dma_resv_read_ticket ticket;
+
+top:
+	/* Enter an RCU read section and get a read ticket.  */
+	rcu_read_lock();
+	dma_resv_read_begin(robj, &ticket);
+
+	/* If there is a shared list, grab it.  */
+	list = robj->fence;
+	__insn_barrier();
+	if (list) {
+		/* Make sure the content of the list has been published.  */
+		membar_datadep_consumer();
+
+		/* Check whether we have a buffer.  */
+		if (shared == NULL) {
+			/*
+			 * We don't have a buffer yet.  Try to allocate
+			 * one without waiting.
+			 */
+			shared_alloc = list->shared_max;
+			__insn_barrier();
+			shared = kcalloc(shared_alloc, sizeof(shared[0]),
+			    GFP_NOWAIT);
+			if (shared == NULL) {
+				/*
+				 * Couldn't do it immediately.  Back
+				 * out of RCU and allocate one with
+				 * waiting.
+				 */
+				rcu_read_unlock();
+				shared = kcalloc(shared_alloc,
+				    sizeof(shared[0]), GFP_KERNEL);
+				if (shared == NULL)
+					return -ENOMEM;
+				goto top;
+			}
+		} else if (shared_alloc < list->shared_max) {
+			/*
+			 * We have a buffer but it's too small.  We're
+			 * already racing in this case, so just back
+			 * out and wait to allocate a bigger one.
+			 */
+			shared_alloc = list->shared_max;
+			__insn_barrier();
+			rcu_read_unlock();
+			kfree(shared);
+			shared = kcalloc(shared_alloc, sizeof(shared[0]),
+			    GFP_KERNEL);
+			if (shared == NULL)
+				return -ENOMEM;
+		}
+
+		/*
+		 * We got a buffer large enough.  Copy into the buffer
+		 * and record the number of elements.
+		 */
+		memcpy(shared, list->shared, shared_alloc * sizeof(shared[0]));
+		shared_count = list->shared_count;
+	} else {
+		/* No shared list: shared count is zero.  */
+		shared_count = 0;
+	}
+
+	/* If there is an exclusive fence, grab it.  */
+	fence = robj->fence_excl;
+	__insn_barrier();
+	if (fence) {
+		/* Make sure the content of the fence has been published.  */
+		membar_datadep_consumer();
+	}
+
+	/*
+	 * We are done reading from robj and list.  Validate our
+	 * parking ticket.  If it's invalid, do not pass go and do not
+	 * collect $200.
+	 */
+	if (!dma_resv_read_valid(robj, &ticket))
+		goto restart;
+
+	/*
+	 * Try to get a reference to the exclusive fence, if there is
+	 * one.  If we can't, start over.
+	 */
+	if (fence) {
+		if (dma_fence_get_rcu(fence) == NULL)
+			goto restart;
+	}
+
+	/*
+	 * Try to get a reference to all of the shared fences.
+	 */
+	for (i = 0; i < shared_count; i++) {
+		if (dma_fence_get_rcu(shared[i]) == NULL)
+			goto put_restart;
+	}
+
+	/* Success!  */
+	rcu_read_unlock();
+	*fencep = fence;
+	*nsharedp = shared_count;
+	*sharedp = shared;
+	return 0;
+
+put_restart:
+	/* Back out.  */
+	while (i --> 0) {
+		dma_fence_put(shared[i]);
+		shared[i] = NULL; /* paranoia */
+	}
+	if (fence) {
+		dma_fence_put(fence);
+		fence = NULL;	/* paranoia */
+	}
+
+restart:
+	rcu_read_unlock();
+	goto top;
+}
+
+/*
+ * dma_resv_copy_fences(dst, src)
+ *
+ *	Copy the exclusive fence and all the shared fences from src to
+ *	dst.
+ *
+ *	Caller must have dst locked.
+ */
+int
+dma_resv_copy_fences(struct dma_resv *dst_robj,
+    const struct dma_resv *src_robj)
+{
+	const struct dma_resv_list *src_list;
+	struct dma_resv_list *dst_list = NULL;
+	struct dma_resv_list *old_list;
+	struct dma_fence *fence = NULL;
+	struct dma_fence *old_fence;
+	uint32_t shared_count, i;
+	struct dma_resv_read_ticket read_ticket;
+	struct dma_resv_write_ticket write_ticket;
+
+	KASSERT(dma_resv_held(dst_robj));
+
+top:
+	/* Enter an RCU read section and get a read ticket.  */
+	rcu_read_lock();
+	dma_resv_read_begin(src_robj, &read_ticket);
+
+	/* Get the shared list.  */
+	src_list = src_robj->fence;
+	__insn_barrier();
+	if (src_list) {
+		/* Make sure the content of the list has been published.  */
+		membar_datadep_consumer();
+
+		/* Find out how long it is.  */
+		shared_count = src_list->shared_count;
+
+		/*
+		 * Make sure we saw a consistent snapshot of the list
+		 * pointer and length.
+		 */
+		if (!dma_resv_read_valid(src_robj, &read_ticket))
+			goto restart;
+
+		/* Allocate a new list.  */
+		dst_list = objlist_tryalloc(shared_count);
+		if (dst_list == NULL)
+			return -ENOMEM;
+
+		/* Copy over all fences that are not yet signalled.  */
+		dst_list->shared_count = 0;
+		for (i = 0; i < shared_count; i++) {
+			if ((fence = dma_fence_get_rcu(src_list->shared[i]))
+			    != NULL)
+				goto restart;
+			if (dma_fence_is_signaled(fence)) {
+				dma_fence_put(fence);
+				fence = NULL;
+				continue;
+			}
+			dst_list->shared[dst_list->shared_count++] = fence;
+			fence = NULL;
+		}
+	}
+
+	/* Get the exclusive fence.  */
+	fence = src_robj->fence_excl;
+	__insn_barrier();
+	if (fence != NULL) {
+		/* Make sure the content of the fence has been published.  */
+		membar_datadep_consumer();
+
+		/*
+		 * Make sure we saw a consistent snapshot of the fence.
+		 *
+		 * XXX I'm not actually sure this is necessary since
+		 * pointer writes are supposed to be atomic.
+		 */
+		if (!dma_resv_read_valid(src_robj, &read_ticket)) {
+			fence = NULL;
+			goto restart;
+		}
+
+		/*
+		 * If it is going away, restart.  Otherwise, acquire a
+		 * reference to it.
+		 */
+		if (!dma_fence_get_rcu(fence)) {
+			fence = NULL;
+			goto restart;
+		}
+	}
+
+	/* All done with src; exit the RCU read section.  */
+	rcu_read_unlock();
+
+	/*
+	 * We now have a snapshot of the shared and exclusive fences of
+	 * src_robj and we have acquired references to them so they
+	 * won't go away.  Transfer them over to dst_robj, releasing
+	 * references to any that were there.
+	 */
+
+	/* Get the old shared and exclusive fences, if any.  */
+	old_list = dst_robj->fence;
+	old_fence = dst_robj->fence_excl;
+
+	/* Begin an update.  */
+	dma_resv_write_begin(dst_robj, &write_ticket);
+
+	/* Replace the fences.  */
+	dst_robj->fence = dst_list;
+	dst_robj->fence_excl = fence;
+
+	/* Commit the update.  */
+	dma_resv_write_commit(dst_robj, &write_ticket);
+
+	/* Release the old exclusive fence, if any.  */
+	if (old_fence)
+		dma_fence_put(old_fence);
+
+	/* Release any old shared fences.  */
+	if (old_list) {
+		for (i = old_list->shared_count; i --> 0;)
+			dma_fence_put(old_list->shared[i]);
+	}
+
+	/* Success!  */
+	return 0;
+
+restart:
+	rcu_read_unlock();
+	if (dst_list) {
+		for (i = dst_list->shared_count; i --> 0;) {
+			dma_fence_put(dst_list->shared[i]);
+			dst_list->shared[i] = NULL;
+		}
+		objlist_free(dst_list);
+		dst_list = NULL;
+	}
+	if (fence) {
+		dma_fence_put(fence);
+		fence = NULL;
+	}
+	goto top;
+}
+
+/*
+ * dma_resv_test_signaled_rcu(robj, shared)
+ *
+ *	If shared is true, test whether all of the shared fences are
+ *	signalled, or if there are none, test whether the exclusive
+ *	fence is signalled.  If shared is false, test only whether the
+ *	exclusive fence is signalled.
+ *
+ *	XXX Why does this _not_ test the exclusive fence if shared is
+ *	true only if there are no shared fences?  This makes no sense.
+ */
+bool
+dma_resv_test_signaled_rcu(const struct dma_resv *robj,
+    bool shared)
+{
+	struct dma_resv_read_ticket ticket;
+	struct dma_resv_list *list;
+	struct dma_fence *fence;
+	uint32_t i, shared_count;
+	bool signaled = true;
+
+top:
+	/* Enter an RCU read section and get a read ticket.  */
+	rcu_read_lock();
+	dma_resv_read_begin(robj, &ticket);
+
+	/* If shared is requested and there is a shared list, test it.  */
+	if (!shared)
+		goto excl;
+	list = robj->fence;
+	__insn_barrier();
+	if (list) {
+		/* Make sure the content of the list has been published.  */
+		membar_datadep_consumer();
+
+		/* Find out how long it is.  */
+		shared_count = list->shared_count;
+
+		/*
+		 * Make sure we saw a consistent snapshot of the list
+		 * pointer and length.
+		 */
+		if (!dma_resv_read_valid(robj, &ticket))
+			goto restart;
+
+		/*
+		 * For each fence, if it is going away, restart.
+		 * Otherwise, acquire a reference to it to test whether
+		 * it is signalled.  Stop if we find any that is not
+		 * signalled.
+		 */
+		for (i = 0; i < shared_count; i++) {
+			fence = dma_fence_get_rcu(list->shared[i]);
+			if (fence == NULL)
+				goto restart;
+			signaled &= dma_fence_is_signaled(fence);
+			dma_fence_put(fence);
+			if (!signaled)
+				goto out;
+		}
+	}
+
+excl:
+	/* If there is an exclusive fence, test it.  */
+	fence = robj->fence_excl;
+	__insn_barrier();
+	if (fence) {
+		/* Make sure the content of the fence has been published.  */
+		membar_datadep_consumer();
+
+		/*
+		 * Make sure we saw a consistent snapshot of the fence.
+		 *
+		 * XXX I'm not actually sure this is necessary since
+		 * pointer writes are supposed to be atomic.
+		 */
+		if (!dma_resv_read_valid(robj, &ticket))
+			goto restart;
+
+		/*
+		 * If it is going away, restart.  Otherwise, acquire a
+		 * reference to it to test whether it is signalled.
+		 */
+		if ((fence = dma_fence_get_rcu(fence)) == NULL)
+			goto restart;
+		signaled &= dma_fence_is_signaled(fence);
+		dma_fence_put(fence);
+		if (!signaled)
+			goto out;
+	}
+
+out:	rcu_read_unlock();
+	return signaled;
+
+restart:
+	rcu_read_unlock();
+	goto top;
+}
+
+/*
+ * dma_resv_wait_timeout_rcu(robj, shared, intr, timeout)
+ *
+ *	If shared is true, wait for all of the shared fences to be
+ *	signalled, or if there are none, wait for the exclusive fence
+ *	to be signalled.  If shared is false, wait only for the
+ *	exclusive fence to be signalled.  If timeout is zero, don't
+ *	wait, only test.
+ *
+ *	XXX Why does this _not_ wait for the exclusive fence if shared
+ *	is true only if there are no shared fences?  This makes no
+ *	sense.
+ */
+long
+dma_resv_wait_timeout_rcu(const struct dma_resv *robj,
+    bool shared, bool intr, unsigned long timeout)
+{
+	struct dma_resv_read_ticket ticket;
+	struct dma_resv_list *list;
+	struct dma_fence *fence;
+	uint32_t i, shared_count;
+	long ret;
+
+	if (timeout == 0)
+		return dma_resv_test_signaled_rcu(robj, shared);
+
+top:
+	/* Enter an RCU read section and get a read ticket.  */
+	rcu_read_lock();
+	dma_resv_read_begin(robj, &ticket);
+
+	/* If shared is requested and there is a shared list, wait on it.  */
+	if (!shared)
+		goto excl;
+	list = robj->fence;
+	__insn_barrier();
+	if (list) {
+		/* Make sure the content of the list has been published.  */
+		membar_datadep_consumer();
+
+		/* Find out how long it is.  */
+		shared_count = list->shared_count;
+
+		/*
+		 * Make sure we saw a consistent snapshot of the list
+		 * pointer and length.
+		 */
+		if (!dma_resv_read_valid(robj, &ticket))
+			goto restart;
+
+		/*
+		 * For each fence, if it is going away, restart.
+		 * Otherwise, acquire a reference to it to test whether
+		 * it is signalled.  Stop and wait if we find any that
+		 * is not signalled.
+		 */
+		for (i = 0; i < shared_count; i++) {
+			fence = dma_fence_get_rcu(list->shared[i]);
+			if (fence == NULL)
+				goto restart;
+			if (!dma_fence_is_signaled(fence))
+				goto wait;
+			dma_fence_put(fence);
+		}
+	}
+
+excl:
+	/* If there is an exclusive fence, test it.  */
+	fence = robj->fence_excl;
+	__insn_barrier();
+	if (fence) {
+		/* Make sure the content of the fence has been published.  */
+		membar_datadep_consumer();
+
+		/*
+		 * Make sure we saw a consistent snapshot of the fence.
+		 *
+		 * XXX I'm not actually sure this is necessary since
+		 * pointer writes are supposed to be atomic.
+		 */
+		if (!dma_resv_read_valid(robj, &ticket))
+			goto restart;
+
+		/*
+		 * If it is going away, restart.  Otherwise, acquire a
+		 * reference to it to test whether it is signalled.  If
+		 * not, wait for it.
+		 */
+		if ((fence = dma_fence_get_rcu(fence)) == NULL)
+			goto restart;
+		if (!dma_fence_is_signaled(fence))
+			goto wait;
+		dma_fence_put(fence);
+	}
+
+	/* Success!  Return the number of ticks left.  */
+	rcu_read_unlock();
+	return timeout;
+
+restart:
+	rcu_read_unlock();
+	goto top;
+
+wait:
+	/*
+	 * Exit the RCU read section and wait for it.  If we time out
+	 * or fail, bail.  Otherwise, go back to the top.
+	 */
+	KASSERT(fence != NULL);
+	rcu_read_unlock();
+	ret = dma_fence_wait_timeout(fence, intr, timeout);
+	dma_fence_put(fence);
+	if (ret <= 0)
+		return ret;
+	KASSERT(ret <= timeout);
+	timeout = ret;
+	goto top;
+}
+
+/*
+ * dma_resv_poll_init(rpoll, lock)
+ *
+ *	Initialize reservation poll state.
+ */
+void
+dma_resv_poll_init(struct dma_resv_poll *rpoll)
+{
+
+	mutex_init(&rpoll->rp_lock, MUTEX_DEFAULT, IPL_VM);
+	selinit(&rpoll->rp_selq);
+	rpoll->rp_claimed = 0;
+}
+
+/*
+ * dma_resv_poll_fini(rpoll)
+ *
+ *	Release any resource associated with reservation poll state.
+ */
+void
+dma_resv_poll_fini(struct dma_resv_poll *rpoll)
+{
+
+	KASSERT(rpoll->rp_claimed == 0);
+	seldestroy(&rpoll->rp_selq);
+	mutex_destroy(&rpoll->rp_lock);
+}
+
+/*
+ * dma_resv_poll_cb(fence, fcb)
+ *
+ *	Callback to notify a reservation poll that a fence has
+ *	completed.  Notify any waiters and allow the next poller to
+ *	claim the callback.
+ *
+ *	If one thread is waiting for the exclusive fence only, and we
+ *	spuriously notify them about a shared fence, tough.
+ */
+static void
+dma_resv_poll_cb(struct dma_fence *fence, struct dma_fence_cb *fcb)
+{
+	struct dma_resv_poll *rpoll = container_of(fcb,
+	    struct dma_resv_poll, rp_fcb);
+
+	mutex_enter(&rpoll->rp_lock);
+	selnotify(&rpoll->rp_selq, 0, NOTE_SUBMIT);
+	rpoll->rp_claimed = 0;
+	mutex_exit(&rpoll->rp_lock);
+}
+
+/*
+ * dma_resv_do_poll(robj, events, rpoll)
+ *
+ *	Poll for reservation object events using the reservation poll
+ *	state in rpoll:
+ *
+ *	- POLLOUT	wait for all fences shared and exclusive
+ *	- POLLIN	wait for the exclusive fence
+ *
+ *	Return the subset of events in events that are ready.  If any
+ *	are requested but not ready, arrange to be notified with
+ *	selnotify when they are.
+ */
+int
+dma_resv_do_poll(const struct dma_resv *robj, int events,
+    struct dma_resv_poll *rpoll)
+{
+	struct dma_resv_read_ticket ticket;
+	struct dma_resv_list *list;
+	struct dma_fence *fence;
+	uint32_t i, shared_count;
+	int revents;
+	bool recorded = false;	/* curlwp is on the selq */
+	bool claimed = false;	/* we claimed the callback */
+	bool callback = false;	/* we requested a callback */
+
+	/*
+	 * Start with the maximal set of events that could be ready.
+	 * We will eliminate the events that are definitely not ready
+	 * as we go at the same time as we add callbacks to notify us
+	 * that they may be ready.
+	 */
+	revents = events & (POLLIN|POLLOUT);
+	if (revents == 0)
+		return 0;
+
+top:
+	/* Enter an RCU read section and get a read ticket.  */
+	rcu_read_lock();
+	dma_resv_read_begin(robj, &ticket);
+
+	/* If we want to wait for all fences, get the shared list.  */
+	if (!(events & POLLOUT))
+		goto excl;
+	list = robj->fence;
+	__insn_barrier();
+	if (list) do {
+		/* Make sure the content of the list has been published.  */
+		membar_datadep_consumer();
+
+		/* Find out how long it is.  */
+		shared_count = list->shared_count;
+
+		/*
+		 * Make sure we saw a consistent snapshot of the list
+		 * pointer and length.
+		 */
+		if (!dma_resv_read_valid(robj, &ticket))
+			goto restart;
+
+		/*
+		 * For each fence, if it is going away, restart.
+		 * Otherwise, acquire a reference to it to test whether
+		 * it is signalled.  Stop and request a callback if we
+		 * find any that is not signalled.
+		 */
+		for (i = 0; i < shared_count; i++) {
+			fence = dma_fence_get_rcu(list->shared[i]);
+			if (fence == NULL)
+				goto restart;
+			if (!dma_fence_is_signaled(fence)) {
+				dma_fence_put(fence);
+				break;
+			}
+			dma_fence_put(fence);
+		}
+
+		/* If all shared fences have been signalled, move on.  */
+		if (i == shared_count)
+			break;
+
+		/* Put ourselves on the selq if we haven't already.  */
+		if (!recorded)
+			goto record;
+
+		/*
+		 * If someone else claimed the callback, or we already
+		 * requested it, we're guaranteed to be notified, so
+		 * assume the event is not ready.
+		 */
+		if (!claimed || callback) {
+			revents &= ~POLLOUT;
+			break;
+		}
+
+		/*
+		 * Otherwise, find the first fence that is not
+		 * signalled, request the callback, and clear POLLOUT
+		 * from the possible ready events.  If they are all
+		 * signalled, leave POLLOUT set; we will simulate the
+		 * callback later.
+		 */
+		for (i = 0; i < shared_count; i++) {
+			fence = dma_fence_get_rcu(list->shared[i]);
+			if (fence == NULL)
+				goto restart;
+			if (!dma_fence_add_callback(fence, &rpoll->rp_fcb,
+				dma_resv_poll_cb)) {
+				dma_fence_put(fence);
+				revents &= ~POLLOUT;
+				callback = true;
+				break;
+			}
+			dma_fence_put(fence);
+		}
+	} while (0);
+
+excl:
+	/* We always wait for at least the exclusive fence, so get it.  */
+	fence = robj->fence_excl;
+	__insn_barrier();
+	if (fence) do {
+		/* Make sure the content of the fence has been published.  */
+		membar_datadep_consumer();
+
+		/*
+		 * Make sure we saw a consistent snapshot of the fence.
+		 *
+		 * XXX I'm not actually sure this is necessary since
+		 * pointer writes are supposed to be atomic.
+		 */
+		if (!dma_resv_read_valid(robj, &ticket))
+			goto restart;
+
+		/*
+		 * If it is going away, restart.  Otherwise, acquire a
+		 * reference to it to test whether it is signalled.  If
+		 * not, stop and request a callback.
+		 */
+		if ((fence = dma_fence_get_rcu(fence)) == NULL)
+			goto restart;
+		if (dma_fence_is_signaled(fence)) {
+			dma_fence_put(fence);
+			break;
+		}
+
+		/* Put ourselves on the selq if we haven't already.  */
+		if (!recorded) {
+			dma_fence_put(fence);
+			goto record;
+		}
+
+		/*
+		 * If someone else claimed the callback, or we already
+		 * requested it, we're guaranteed to be notified, so
+		 * assume the event is not ready.
+		 */
+		if (!claimed || callback) {
+			dma_fence_put(fence);
+			revents = 0;
+			break;
+		}
+
+		/*
+		 * Otherwise, try to request the callback, and clear
+		 * all possible ready events.  If the fence has been
+		 * signalled in the interim, leave the events set; we
+		 * will simulate the callback later.
+		 */
+		if (!dma_fence_add_callback(fence, &rpoll->rp_fcb,
+			dma_resv_poll_cb)) {
+			dma_fence_put(fence);
+			revents = 0;
+			callback = true;
+			break;
+		}
+		dma_fence_put(fence);
+	} while (0);
+
+	/* All done reading the fences.  */
+	rcu_read_unlock();
+
+	if (claimed && !callback) {
+		/*
+		 * We claimed the callback but we didn't actually
+		 * request it because a fence was signalled while we
+		 * were claiming it.  Call it ourselves now.  The
+		 * callback doesn't use the fence nor rely on holding
+		 * any of the fence locks, so this is safe.
+		 */
+		dma_resv_poll_cb(NULL, &rpoll->rp_fcb);
+	}
+	return revents;
+
+restart:
+	rcu_read_unlock();
+	goto top;
+
+record:
+	rcu_read_unlock();
+	mutex_enter(&rpoll->rp_lock);
+	selrecord(curlwp, &rpoll->rp_selq);
+	if (!rpoll->rp_claimed)
+		claimed = rpoll->rp_claimed = true;
+	mutex_exit(&rpoll->rp_lock);
+	recorded = true;
+	goto top;
+}
+
+/*
+ * dma_resv_kqfilter(robj, kn, rpoll)
+ *
+ *	Kqueue filter for reservation objects.  Currently not
+ *	implemented because the logic to implement it is nontrivial,
+ *	and userland will presumably never use it, so it would be
+ *	dangerous to add never-tested complex code paths to the kernel.
+ */
+int
+dma_resv_kqfilter(const struct dma_resv *robj,
+    struct knote *kn, struct dma_resv_poll *rpoll)
+{
+
+	return EINVAL;
+}

Reply via email to