Module Name:    src
Committed By:   pooka
Date:           Mon Jan 25 22:18:17 UTC 2010

Modified Files:
        src/sys/net: bpf.c bpf.h bpf_stub.c

Log Message:
Make bpf dynamically loadable.


To generate a diff of this commit:
cvs rdiff -u -r1.153 -r1.154 src/sys/net/bpf.c
cvs rdiff -u -r1.52 -r1.53 src/sys/net/bpf.h
cvs rdiff -u -r1.3 -r1.4 src/sys/net/bpf_stub.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/net/bpf.c
diff -u src/sys/net/bpf.c:1.153 src/sys/net/bpf.c:1.154
--- src/sys/net/bpf.c:1.153	Tue Jan 19 22:08:00 2010
+++ src/sys/net/bpf.c	Mon Jan 25 22:18:17 2010
@@ -1,4 +1,4 @@
-/*	$NetBSD: bpf.c,v 1.153 2010/01/19 22:08:00 pooka Exp $	*/
+/*	$NetBSD: bpf.c,v 1.154 2010/01/25 22:18:17 pooka Exp $	*/
 
 /*
  * Copyright (c) 1990, 1991, 1993
@@ -39,7 +39,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.153 2010/01/19 22:08:00 pooka Exp $");
+__KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.154 2010/01/25 22:18:17 pooka Exp $");
 
 #if defined(_KERNEL_OPT)
 #include "opt_bpf.h"
@@ -58,6 +58,8 @@
 #include <sys/vnode.h>
 #include <sys/queue.h>
 #include <sys/stat.h>
+#include <sys/module.h>
+#include <sys/once.h>
 
 #include <sys/file.h>
 #include <sys/filedesc.h>
@@ -359,13 +361,8 @@
 	d->bd_bif = 0;
 }
 
-
-/*
- * bpfilterattach() is called at boot time.
- */
-/* ARGSUSED */
-void
-bpfilterattach(int n)
+static int
+doinit(void)
 {
 
 	mutex_init(&bpf_mtx, MUTEX_DEFAULT, IPL_NONE);
@@ -375,6 +372,20 @@
 	bpf_gstats.bs_recv = 0;
 	bpf_gstats.bs_drop = 0;
 	bpf_gstats.bs_capt = 0;
+
+	return 0;
+}
+
+/*
+ * bpfilterattach() is called at boot time.
+ */
+/* ARGSUSED */
+void
+bpfilterattach(int n)
+{
+	static ONCE_DECL(control);
+
+	RUN_ONCE(&control, doinit);
 }
 
 /*
@@ -1910,9 +1921,43 @@
 	.bpf_mtap_sl_out =	bpf_mtap_sl_out,
 };
 
-void
-bpf_setops()
+MODULE(MODULE_CLASS_DRIVER, bpf, NULL);
+
+static int
+bpf_modcmd(modcmd_t cmd, void *arg)
 {
+	devmajor_t bmajor, cmajor;
+	int error;
+
+	bmajor = cmajor = NODEVMAJOR;
+
+	switch (cmd) {
+	case MODULE_CMD_INIT:
+		bpfilterattach(0);
+		error = devsw_attach("bpf", NULL, &bmajor,
+		    &bpf_cdevsw, &cmajor);
+		if (error == EEXIST)
+			error = 0; /* maybe built-in ... improve eventually */
+		if (error)
+			break;
+
+		bpf_ops_handover_enter(&bpf_ops_kernel);
+		atomic_swap_ptr(&bpf_ops, &bpf_ops_kernel);
+		bpf_ops_handover_exit();
+		break;
+
+	case MODULE_CMD_FINI:
+		/*
+		 * bpf_ops is not (yet) referenced in the callers before
+		 * attach.  maybe other issues too.  "safety first".
+		 */
+		error = EOPNOTSUPP;
+		break;
 
-	bpf_ops = &bpf_ops_kernel;
+	default:
+		error = ENOTTY;
+		break;
+	}
+
+	return error;
 }

Index: src/sys/net/bpf.h
diff -u src/sys/net/bpf.h:1.52 src/sys/net/bpf.h:1.53
--- src/sys/net/bpf.h:1.52	Tue Jan 19 22:08:00 2010
+++ src/sys/net/bpf.h	Mon Jan 25 22:18:17 2010
@@ -1,4 +1,4 @@
-/*	$NetBSD: bpf.h,v 1.52 2010/01/19 22:08:00 pooka Exp $	*/
+/*	$NetBSD: bpf.h,v 1.53 2010/01/25 22:18:17 pooka Exp $	*/
 
 /*
  * Copyright (c) 1990, 1991, 1993
@@ -276,6 +276,9 @@
 extern struct bpf_ops *bpf_ops;
 void     bpf_setops(void);
 
+void     bpf_ops_handover_enter(struct bpf_ops *);
+void     bpf_ops_handover_exit(void);
+
 void	 bpfilterattach(int);
 
 int	 bpf_validate(struct bpf_insn *, int);

Index: src/sys/net/bpf_stub.c
diff -u src/sys/net/bpf_stub.c:1.3 src/sys/net/bpf_stub.c:1.4
--- src/sys/net/bpf_stub.c:1.3	Tue Jan 19 23:11:10 2010
+++ src/sys/net/bpf_stub.c	Mon Jan 25 22:18:17 2010
@@ -1,4 +1,4 @@
-/*	$NetBSD: bpf_stub.c,v 1.3 2010/01/19 23:11:10 pooka Exp $	*/
+/*	$NetBSD: bpf_stub.c,v 1.4 2010/01/25 22:18:17 pooka Exp $	*/
 
 /*
  * Copyright (c) 2010 The NetBSD Foundation, Inc.
@@ -27,18 +27,119 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: bpf_stub.c,v 1.3 2010/01/19 23:11:10 pooka Exp $");
+__KERNEL_RCSID(0, "$NetBSD: bpf_stub.c,v 1.4 2010/01/25 22:18:17 pooka Exp $");
 
 #include <sys/param.h>
+#include <sys/kmem.h>
 #include <sys/mbuf.h>
 
 #include <net/bpf.h>
 
+struct laglist {
+	struct ifnet *lag_ifp;
+	u_int lag_dlt;
+	u_int lag_hlen;
+	struct bpf_if **lag_drvp;
+
+	TAILQ_ENTRY(laglist) lag_entries;
+};
+
+static TAILQ_HEAD(, laglist) lagdrvs = TAILQ_HEAD_INITIALIZER(lagdrvs);
+
+static void bpf_stub_attach(struct ifnet *, u_int, u_int, struct bpf_if **);
+static void bpf_stub_detach(struct ifnet *);
+
+static void bpf_stub_null(void);
+static void bpf_stub_warn(void);
+
+static kmutex_t handovermtx;
+static kcondvar_t handovercv;
+static bool handover;
+
+struct bpf_ops bpf_ops_stub = {
+	.bpf_attach =		bpf_stub_attach,
+	.bpf_detach =		bpf_stub_detach,
+	.bpf_change_type =	(void *)bpf_stub_null,
+
+	.bpf_tap = 		(void *)bpf_stub_warn,
+	.bpf_mtap = 		(void *)bpf_stub_warn,
+	.bpf_mtap2 = 		(void *)bpf_stub_warn,
+	.bpf_mtap_af = 		(void *)bpf_stub_warn,
+	.bpf_mtap_et = 		(void *)bpf_stub_warn,
+	.bpf_mtap_sl_in = 	(void *)bpf_stub_warn,
+	.bpf_mtap_sl_out =	(void *)bpf_stub_warn,
+};
+struct bpf_ops *bpf_ops;
+
 static void
-bpf_stub_attach(struct ifnet *ipf, u_int dlt, u_int hlen, struct bpf_if **drvp)
+bpf_stub_attach(struct ifnet *ifp, u_int dlt, u_int hlen, struct bpf_if **drvp)
 {
+	struct laglist *lag;
+	bool storeattach = true;
+
+	lag = kmem_alloc(sizeof(*lag), KM_SLEEP);
+	lag->lag_ifp = ifp;
+	lag->lag_dlt = dlt;
+	lag->lag_hlen = hlen;
+	lag->lag_drvp = drvp;
+
+	mutex_enter(&handovermtx);
+	/*
+	 * If handover is in progress, wait for it to finish and complete
+	 * attach after that.  Otherwise record ourselves.
+	 */
+	while (handover) {
+		storeattach = false;
+		cv_wait(&handovercv, &handovermtx);
+	}
+
+	if (storeattach == false) {
+		mutex_exit(&handovermtx);
+		kmem_free(lag, sizeof(*lag));
+		KASSERT(bpf_ops != &bpf_ops_stub); /* revisit when unloadable */
+		bpf_ops->bpf_attach(ifp, dlt, hlen, drvp);
+	} else {
+		*drvp = NULL;
+		TAILQ_INSERT_TAIL(&lagdrvs, lag, lag_entries);
+		mutex_exit(&handovermtx);
+	}
+}
 
-	*drvp = NULL;
+static void
+bpf_stub_detach(struct ifnet *ifp)
+{
+	TAILQ_HEAD(, laglist) rmlist;
+	struct laglist *lag, *lag_next;
+	bool didhand;
+
+	TAILQ_INIT(&rmlist);
+
+	didhand = false;
+	mutex_enter(&handovermtx);
+	while (handover) {
+		didhand = true;
+		cv_wait(&handovercv, &handovermtx);
+	}
+
+	if (didhand == false) {
+		/* atomically remove all */
+		for (lag = TAILQ_FIRST(&lagdrvs); lag; lag = lag_next) {
+			lag_next = TAILQ_NEXT(lag, lag_entries);
+			if (lag->lag_ifp == ifp) {
+				TAILQ_REMOVE(&lagdrvs, lag, lag_entries);
+				TAILQ_INSERT_HEAD(&rmlist, lag, lag_entries);
+			}
+		}
+		mutex_exit(&handovermtx);
+		while ((lag = TAILQ_FIRST(&rmlist)) != NULL) {
+			TAILQ_REMOVE(&rmlist, lag, lag_entries);
+			kmem_free(lag, sizeof(*lag));
+		}
+	} else {
+		mutex_exit(&handovermtx);
+		KASSERT(bpf_ops != &bpf_ops_stub); /* revisit when unloadable */
+		bpf_ops->bpf_detach(ifp);
+	}
 }
 
 static void
@@ -59,27 +160,49 @@
 #endif
 }
 
-struct bpf_ops bpf_ops_stub = {
-	.bpf_attach =		bpf_stub_attach,
-	.bpf_detach =		(void *)bpf_stub_null,
-	.bpf_change_type =	(void *)bpf_stub_null,
+void
+bpf_setops()
+{
 
-	.bpf_tap = 		(void *)bpf_stub_warn,
-	.bpf_mtap = 		(void *)bpf_stub_warn,
-	.bpf_mtap2 = 		(void *)bpf_stub_warn,
-	.bpf_mtap_af = 		(void *)bpf_stub_warn,
-	.bpf_mtap_et = 		(void *)bpf_stub_warn,
-	.bpf_mtap_sl_in = 	(void *)bpf_stub_warn,
-	.bpf_mtap_sl_out =	(void *)bpf_stub_warn,
-};
+	mutex_init(&handovermtx, MUTEX_DEFAULT, IPL_NONE);
+	cv_init(&handovercv, "bpfops");
+	bpf_ops = &bpf_ops_stub;
+}
 
-struct bpf_ops *bpf_ops;
+/*
+ * Party's over, prepare for handover.
+ * It needs to happen *before* bpf_ops is set to make it atomic
+ * to callers (see also stub implementations, which wait if
+ * called during handover).  The likelyhood of seeing a full
+ * attach-detach *during* handover comes close to astronomical,
+ * but handle it anyway since it's relatively easy.
+ */
+void
+bpf_ops_handover_enter(struct bpf_ops *newops)
+{
+	struct laglist *lag;
+
+	mutex_enter(&handovermtx);
+	handover = true;
 
-void bpf_setops_stub(void);
+	while ((lag = TAILQ_FIRST(&lagdrvs)) != NULL) {
+		TAILQ_REMOVE(&lagdrvs, lag, lag_entries);
+		mutex_exit(&handovermtx);
+		newops->bpf_attach(lag->lag_ifp, lag->lag_dlt,
+		    lag->lag_hlen, lag->lag_drvp);
+		kmem_free(lag, sizeof(*lag));
+		mutex_enter(&handovermtx);
+	}
+	mutex_exit(&handovermtx);
+}
+
+/* hangover done */
 void
-bpf_setops_stub()
+bpf_ops_handover_exit()
 {
 
-	bpf_ops = &bpf_ops_stub;
+	mutex_enter(&handovermtx);
+	handover = false;
+	cv_broadcast(&handovercv);
+	mutex_exit(&handovermtx);
 }
-__weak_alias(bpf_setops,bpf_setops_stub);

Reply via email to