Module Name: src
Committed By: martin
Date: Sun Aug 9 14:03:08 UTC 2020
Modified Files:
src/sys/dev/ic [netbsd-9]: bwfm.c
src/sys/dev/sdmmc [netbsd-9]: if_bwfm_sdio.c ld_sdmmc.c sdmmc.c
sdmmc_io.c sdmmc_ioreg.h sdmmc_mem.c sdmmcvar.h
Log Message:
Pull up following revision(s) (requested by mrg in ticket #1042):
sys/dev/sdmmc/if_bwfm_sdio.c: revision 1.19
sys/dev/sdmmc/ld_sdmmc.c: revision 1.38
sys/dev/sdmmc/sdmmcvar.h: revision 1.35
sys/dev/sdmmc/if_bwfm_sdio.c: revision 1.5
sys/dev/ic/bwfm.c: revision 1.26
sys/dev/ic/bwfm.c: revision 1.27
sys/dev/sdmmc/if_bwfm_sdio.c: revision 1.20
sys/dev/sdmmc/if_bwfm_sdio.c: revision 1.21
sys/dev/sdmmc/sdmmc_io.c: revision 1.20
sys/dev/sdmmc/sdmmc_mem.c: revision 1.72
sys/dev/sdmmc/sdmmc.c: revision 1.40
sys/dev/sdmmc/sdmmc_ioreg.h: revision 1.6
sys/dev/sdmmc/if_bwfm_sdio.c: revision 1.16
Don't pass empty mbufs to the network stack.
Avoid changing signedness bit with << in sdmmc_ioreg.h
Reported by <prlw1>
If the controller doesn't support switch func (opcode 6) then skip
setting this but continue with other settings. This allows us to use
a card, albeit at a lower speed.
Fix races in sdmmc tasks and teach ld@sdmmc to abort xfers on detach.
- Teach sdmmc_add_task to queue it only if not already queued.
- Remove now-redundant logic to avoid repeated queueing elsewhere.
- Teach sdmmc_del_task to wait until task has completed.
- Call sdmmc_del_task in various needful places.
- Replace abuse of pcq by a lock and a tailq.
(pcq is multi-producer, _single_-consumer, but there are potentially
multiple consumers here and really only one producer.)
- Teach ld_sdmmc to abort xfers on detach.
(Mechanism is kinda kludgey but it'll do for now; any effort one is
tempted to spend overhauling this should be spent overhauling sdmmc
to support proper asynchronous commands.)
- Make sure ld_sdmmc_discard either returns failure or eventually calls
ldenddiscard.
XXX Currently ld_sdmmc_detach aborts xfers _before_ ldbegindetach has
has committed to detaching or not. This is currently necessary to
avoid a deadlock because ldbegindetach waits for xfers to drain --
which strikes me as wrong; ldbegindetach shouldn't wait for anything,
and should only make the decision to commit to detaching or not so
the caller can decide whether to abort xfers before we actually wait
for them in ldenddetach.
XXX pullup -- although this changes some kernel symbols (sdmmc_add_task
and sdmmc_del_task), it shouldn't affect any existing modules; the only
module that uses sdmmc is ld_sdmmc.kmod, which is `.if 0' in the build
so there shouldn't be any of them floating around.
Make this work on big endian machines
move some of the patching of callbacks and other data after
ieee80211_ifattach() but before if_deferred_start_init().
may fix panic i saw in after restarting wpa_supplicant.
from mlelstv.
only ask for SDPCMD_INTSTATUS_HMB_SW_MASK and SDPCMD_INTSTATUS_CHIPACTIVE
interrupts, not all of them. we only ack these ones.
mostly fixes pinebookpro wifi hard hangs. still is problematic and can
trigger interrupt storm that appears as a hard hang without NET_MPSAFE,
and a follow up, less clearly right, change will reduce that to a soft
hang of the interface that can be cleared with 'ifconfig bwfm0 down up',
and even often recovers itself now.
clear all interrupts, not just those we expect from the hostintmask.
this removes the final hard hang i have seen in pinebookpro wifi,
though one may still need to 'ifconfig bwfm0 down up' occasionally,
so we still have bugs to fix here (the hang is usually associated
with 'checksum error' from bwfm/sdio.)
Sort #includes. Nix trailing whitespace.
No functional change intended.
To generate a diff of this commit:
cvs rdiff -u -r1.14.6.1 -r1.14.6.2 src/sys/dev/ic/bwfm.c
cvs rdiff -u -r1.3.8.2 -r1.3.8.3 src/sys/dev/sdmmc/if_bwfm_sdio.c
cvs rdiff -u -r1.36 -r1.36.4.1 src/sys/dev/sdmmc/ld_sdmmc.c
cvs rdiff -u -r1.36.4.1 -r1.36.4.2 src/sys/dev/sdmmc/sdmmc.c
cvs rdiff -u -r1.14.4.1 -r1.14.4.2 src/sys/dev/sdmmc/sdmmc_io.c
cvs rdiff -u -r1.3.2.1 -r1.3.2.2 src/sys/dev/sdmmc/sdmmc_ioreg.h
cvs rdiff -u -r1.68.2.1 -r1.68.2.2 src/sys/dev/sdmmc/sdmmc_mem.c
cvs rdiff -u -r1.30.4.1 -r1.30.4.2 src/sys/dev/sdmmc/sdmmcvar.h
Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.
Modified files:
Index: src/sys/dev/ic/bwfm.c
diff -u src/sys/dev/ic/bwfm.c:1.14.6.1 src/sys/dev/ic/bwfm.c:1.14.6.2
--- src/sys/dev/ic/bwfm.c:1.14.6.1 Tue Feb 25 18:40:43 2020
+++ src/sys/dev/ic/bwfm.c Sun Aug 9 14:03:08 2020
@@ -1,4 +1,4 @@
-/* $NetBSD: bwfm.c,v 1.14.6.1 2020/02/25 18:40:43 martin Exp $ */
+/* $NetBSD: bwfm.c,v 1.14.6.2 2020/08/09 14:03:08 martin Exp $ */
/* $OpenBSD: bwfm.c,v 1.5 2017/10/16 22:27:16 patrick Exp $ */
/*
* Copyright (c) 2010-2016 Broadcom Corporation
@@ -18,21 +18,23 @@
*/
#include <sys/param.h>
-#include <sys/systm.h>
+#include <sys/types.h>
+
#include <sys/buf.h>
-#include <sys/kernel.h>
#include <sys/device.h>
+#include <sys/kernel.h>
+#include <sys/kmem.h>
+#include <sys/pcq.h>
#include <sys/queue.h>
#include <sys/socket.h>
-#include <sys/kmem.h>
+#include <sys/systm.h>
#include <sys/workqueue.h>
-#include <sys/pcq.h>
#include <net/bpf.h>
#include <net/if.h>
#include <net/if_dl.h>
-#include <net/if_media.h>
#include <net/if_ether.h>
+#include <net/if_media.h>
#include <netinet/in.h>
@@ -203,7 +205,7 @@ bwfm_attach(struct bwfm_softc *sc)
sizeof(bandlist))) {
printf("%s: couldn't get supported band list\n", DEVNAME(sc));
return;
- }
+ }
const u_int nbands = le32toh(bandlist[0]);
for (i = 1; i <= MIN(nbands, __arraycount(bandlist) - 1); i++) {
switch (le32toh(bandlist[i])) {
@@ -252,12 +254,8 @@ bwfm_attach(struct bwfm_softc *sc)
return; /* Error */
}
-
- ieee80211_ifattach(ic);
- ifp->if_percpuq = if_percpuq_create(ifp);
- if_deferred_start_init(ifp, NULL);
- if_register(ifp);
+ ieee80211_ifattach(ic);
sc->sc_newstate = ic->ic_newstate;
ic->ic_newstate = bwfm_newstate;
ic->ic_newassoc = bwfm_newassoc;
@@ -265,6 +263,10 @@ bwfm_attach(struct bwfm_softc *sc)
ic->ic_recv_mgmt = bwfm_recv_mgmt;
ic->ic_crypto.cs_key_set = bwfm_key_set;
ic->ic_crypto.cs_key_delete = bwfm_key_delete;
+
+ ifp->if_percpuq = if_percpuq_create(ifp);
+ if_deferred_start_init(ifp, NULL);
+ if_register(ifp);
ieee80211_media_init(ic, bwfm_media_change, ieee80211_media_status);
ieee80211_announce(ic);
@@ -401,7 +403,7 @@ bwfm_init(struct ifnet *ifp)
#ifdef BWFM_DEBUG
memset(evmask, 0xff, sizeof(evmask));
#endif
-
+
if (bwfm_fwvar_var_set_data(sc, "event_msgs", evmask, sizeof(evmask))) {
printf("%s: could not set event mask\n", DEVNAME(sc));
return EIO;
Index: src/sys/dev/sdmmc/if_bwfm_sdio.c
diff -u src/sys/dev/sdmmc/if_bwfm_sdio.c:1.3.8.2 src/sys/dev/sdmmc/if_bwfm_sdio.c:1.3.8.3
--- src/sys/dev/sdmmc/if_bwfm_sdio.c:1.3.8.2 Thu May 7 17:06:44 2020
+++ src/sys/dev/sdmmc/if_bwfm_sdio.c Sun Aug 9 14:03:07 2020
@@ -1,4 +1,4 @@
-/* $NetBSD: if_bwfm_sdio.c,v 1.3.8.2 2020/05/07 17:06:44 martin Exp $ */
+/* $NetBSD: if_bwfm_sdio.c,v 1.3.8.3 2020/08/09 14:03:07 martin Exp $ */
/* $OpenBSD: if_bwfm_sdio.c,v 1.1 2017/10/11 17:19:50 patrick Exp $ */
/*
* Copyright (c) 2010-2016 Broadcom Corporation
@@ -71,7 +71,6 @@ enum bwfm_sdio_clkstate {
struct bwfm_sdio_softc {
struct bwfm_softc sc_sc;
kmutex_t sc_lock;
- kmutex_t sc_intr_lock;
bool sc_bwfm_attached;
@@ -303,10 +302,8 @@ bwfm_sdio_attach(device_t parent, device
mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_NONE);
cv_init(&sc->sc_rxctl_cv, "bwfmctl");
- mutex_init(&sc->sc_intr_lock, MUTEX_DEFAULT, IPL_NONE);
sdmmc_init_task(&sc->sc_task, bwfm_sdio_task, sc);
- sc->sc_task_queued = false;
sc->sc_bounce_size = 64 * 1024;
sc->sc_bounce_buf = kmem_alloc(sc->sc_bounce_size, KM_SLEEP);
@@ -548,9 +545,8 @@ bwfm_sdio_attachhook(device_t self)
goto err;
}
-// bwfm_sdio_dev_write(sc, SDPCMD_HOSTINTMASK,
-// SDPCMD_INTSTATUS_HMB_SW_MASK | SDPCMD_INTSTATUS_CHIPACTIVE);
- bwfm_sdio_dev_write(sc, SDPCMD_HOSTINTMASK, 0xffffffff);
+ bwfm_sdio_dev_write(sc, SDPCMD_HOSTINTMASK,
+ SDPCMD_INTSTATUS_HMB_SW_MASK | SDPCMD_INTSTATUS_CHIPACTIVE);
bwfm_sdio_write_1(sc, BWFM_SDIO_WATERMARK, 8);
if (bwfm_chip_sr_capable(bwfm)) {
@@ -668,10 +664,11 @@ bwfm_sdio_detach(device_t self, int flag
if (sc->sc_bwfm_attached)
bwfm_detach(&sc->sc_sc, flags);
+ sdmmc_del_task(sc->sc_sf[1]->sc, &sc->sc_task, NULL);
+
kmem_free(sc->sc_sf, sc->sc_sf_size);
kmem_free(sc->sc_bounce_buf, sc->sc_bounce_size);
- mutex_destroy(&sc->sc_intr_lock);
cv_destroy(&sc->sc_rxctl_cv);
mutex_destroy(&sc->sc_lock);
@@ -739,7 +736,7 @@ bwfm_sdio_read_4(struct bwfm_sdio_softc
sf = sc->sc_sf[1];
rv = sdmmc_io_read_4(sf, addr);
- return rv;
+ return htole32(rv);
}
static void
@@ -782,7 +779,7 @@ bwfm_sdio_write_4(struct bwfm_sdio_softc
else
sf = sc->sc_sf[1];
- sdmmc_io_write_4(sf, addr, data);
+ sdmmc_io_write_4(sf, addr, htole32(data));
}
static int
@@ -1474,11 +1471,7 @@ bwfm_sdio_intr1(void *v, const char *nam
DPRINTF(("%s: %s\n", DEVNAME(sc), name));
- mutex_enter(&sc->sc_intr_lock);
- if (!sdmmc_task_pending(&sc->sc_task))
- sdmmc_add_task(sc->sc_sf[1]->sc, &sc->sc_task);
- sc->sc_task_queued = true;
- mutex_exit(&sc->sc_intr_lock);
+ sdmmc_add_task(sc->sc_sf[1]->sc, &sc->sc_task);
return 1;
}
@@ -1492,33 +1485,13 @@ static void
bwfm_sdio_task(void *v)
{
struct bwfm_sdio_softc *sc = (void *)v;
-#ifdef BWFM_DEBUG
- unsigned count = 0;
-#endif
-
- mutex_enter(&sc->sc_intr_lock);
- while (sc->sc_task_queued) {
-#ifdef BWFM_DEBUG
- ++count;
-#endif
- sc->sc_task_queued = false;
- mutex_exit(&sc->sc_intr_lock);
-
- mutex_enter(&sc->sc_lock);
- bwfm_sdio_task1(sc);
-#ifdef BWFM_DEBUG
- bwfm_sdio_debug_console(sc);
-#endif
- mutex_exit(&sc->sc_lock);
-
- mutex_enter(&sc->sc_intr_lock);
- }
- mutex_exit(&sc->sc_intr_lock);
+ mutex_enter(&sc->sc_lock);
+ bwfm_sdio_task1(sc);
#ifdef BWFM_DEBUG
- if (count > 1)
- DPRINTF(("%s: finished %u tasks\n", DEVNAME(sc), count));
+ bwfm_sdio_debug_console(sc);
#endif
+ mutex_exit(&sc->sc_lock);
}
static void
@@ -1541,7 +1514,6 @@ bwfm_sdio_task1(struct bwfm_sdio_softc *
intstat = bwfm_sdio_dev_read(sc, BWFM_SDPCMD_INTSTATUS);
DPRINTF(("%s: intstat 0x%" PRIx32 "\n", DEVNAME(sc), intstat));
- intstat &= (SDPCMD_INTSTATUS_HMB_SW_MASK|SDPCMD_INTSTATUS_CHIPACTIVE);
if (intstat)
bwfm_sdio_dev_write(sc, BWFM_SDPCMD_INTSTATUS, intstat);
@@ -1868,6 +1840,11 @@ bwfm_sdio_rx_frames(struct bwfm_sdio_sof
break;
}
m_adj(m, hoff);
+ /* don't pass empty packet to stack */
+ if (m->m_len == 0) {
+ m_freem(m);
+ break;
+ }
bwfm_rx(&sc->sc_sc, m);
nextlen = swhdr->nextlen << 4;
break;
@@ -2014,6 +1991,11 @@ bwfm_sdio_rx_glom(struct bwfm_sdio_softc
break;
}
m_adj(m, hoff);
+ /* don't pass empty packet to stack */
+ if (m->m_len == 0) {
+ m_freem(m);
+ break;
+ }
bwfm_rx(&sc->sc_sc, m);
break;
case BWFM_SDIO_SWHDR_CHANNEL_GLOM:
Index: src/sys/dev/sdmmc/ld_sdmmc.c
diff -u src/sys/dev/sdmmc/ld_sdmmc.c:1.36 src/sys/dev/sdmmc/ld_sdmmc.c:1.36.4.1
--- src/sys/dev/sdmmc/ld_sdmmc.c:1.36 Tue Mar 19 07:08:43 2019
+++ src/sys/dev/sdmmc/ld_sdmmc.c Sun Aug 9 14:03:07 2020
@@ -1,4 +1,4 @@
-/* $NetBSD: ld_sdmmc.c,v 1.36 2019/03/19 07:08:43 mlelstv Exp $ */
+/* $NetBSD: ld_sdmmc.c,v 1.36.4.1 2020/08/09 14:03:07 martin Exp $ */
/*
* Copyright (c) 2008 KIYOHARA Takashi
@@ -28,7 +28,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: ld_sdmmc.c,v 1.36 2019/03/19 07:08:43 mlelstv Exp $");
+__KERNEL_RCSID(0, "$NetBSD: ld_sdmmc.c,v 1.36.4.1 2020/08/09 14:03:07 martin Exp $");
#ifdef _KERNEL_OPT
#include "opt_sdmmc.h"
@@ -59,7 +59,7 @@ __KERNEL_RCSID(0, "$NetBSD: ld_sdmmc.c,v
#ifdef LD_SDMMC_DEBUG
#define DPRINTF(s) printf s
#else
-#define DPRINTF(s) /**/
+#define DPRINTF(s) __nothing
#endif
#define LD_SDMMC_IORETRIES 5 /* number of retries before giving up */
@@ -72,32 +72,37 @@ struct ld_sdmmc_softc;
struct ld_sdmmc_task {
struct sdmmc_task task;
-
struct ld_sdmmc_softc *task_sc;
struct buf *task_bp;
int task_retries; /* number of xfer retry */
struct callout task_restart_ch;
- kmutex_t task_lock;
- kcondvar_t task_cv;
+ bool task_poll;
+ int *task_errorp;
- uintptr_t task_data;
+ TAILQ_ENTRY(ld_sdmmc_task) task_entry;
};
struct ld_sdmmc_softc {
struct ld_softc sc_ld;
int sc_hwunit;
-
- struct sdmmc_function *sc_sf;
- struct ld_sdmmc_task sc_task[LD_SDMMC_MAXTASKCNT];
- pcq_t *sc_freeq;
char *sc_typename;
+ struct sdmmc_function *sc_sf;
+
+ kmutex_t sc_lock;
+ kcondvar_t sc_cv;
+ TAILQ_HEAD(, ld_sdmmc_task) sc_freeq;
+ TAILQ_HEAD(, ld_sdmmc_task) sc_xferq;
+ unsigned sc_busy;
+ bool sc_dying;
struct evcnt sc_ev_discard; /* discard counter */
struct evcnt sc_ev_discarderr; /* discard error counter */
struct evcnt sc_ev_discardbusy; /* discard busy counter */
struct evcnt sc_ev_cachesyncbusy; /* cache sync busy counter */
+
+ struct ld_sdmmc_task sc_task[LD_SDMMC_MAXTASKCNT];
};
static int ld_sdmmc_match(device_t, cfdata_t, void *);
@@ -117,6 +122,108 @@ static void ld_sdmmc_dodiscard(void *);
CFATTACH_DECL_NEW(ld_sdmmc, sizeof(struct ld_sdmmc_softc),
ld_sdmmc_match, ld_sdmmc_attach, ld_sdmmc_detach, NULL);
+static struct ld_sdmmc_task *
+ld_sdmmc_task_get(struct ld_sdmmc_softc *sc)
+{
+ struct ld_sdmmc_task *task;
+
+ KASSERT(mutex_owned(&sc->sc_lock));
+
+ if (sc->sc_dying || (task = TAILQ_FIRST(&sc->sc_freeq)) == NULL)
+ return NULL;
+ TAILQ_REMOVE(&sc->sc_freeq, task, task_entry);
+ TAILQ_INSERT_TAIL(&sc->sc_xferq, task, task_entry);
+ KASSERT(task->task_bp == NULL);
+ KASSERT(task->task_errorp == NULL);
+
+ return task;
+}
+
+static void
+ld_sdmmc_task_put(struct ld_sdmmc_softc *sc, struct ld_sdmmc_task *task)
+{
+
+ KASSERT(mutex_owned(&sc->sc_lock));
+
+ TAILQ_REMOVE(&sc->sc_xferq, task, task_entry);
+ TAILQ_INSERT_TAIL(&sc->sc_freeq, task, task_entry);
+ task->task_bp = NULL;
+ task->task_errorp = NULL;
+}
+
+static void
+ld_sdmmc_task_cancel(struct ld_sdmmc_softc *sc, struct ld_sdmmc_task *task)
+{
+ struct buf *bp;
+ int *errorp;
+
+ KASSERT(mutex_owned(&sc->sc_lock));
+ KASSERT(sc->sc_dying);
+
+ /*
+ * Either the callout or the task may be pending, but not both.
+ * First, determine whether the callout is pending.
+ */
+ if (callout_pending(&task->task_restart_ch) ||
+ callout_invoking(&task->task_restart_ch)) {
+ /*
+ * The callout either is pending, or just started but
+ * is waiting for us to release the lock. At this
+ * point, it will notice sc->sc_dying and give up, so
+ * just wait for it to complete and then we will
+ * release everything.
+ */
+ callout_halt(&task->task_restart_ch, &sc->sc_lock);
+ } else {
+ /*
+ * If the callout is running, it has just scheduled, so
+ * after we wait for the callout to finish running, the
+ * task is either pending or running. If the task is
+ * already running, it will notice sc->sc_dying and
+ * give up; otherwise we have to release everything.
+ */
+ callout_halt(&task->task_restart_ch, &sc->sc_lock);
+ if (!sdmmc_del_task(sc->sc_sf->sc, &task->task, &sc->sc_lock))
+ return; /* task already started, let it clean up */
+ }
+
+ /*
+ * It is our responsibility to clean up. Move it from xferq
+ * back to freeq and make sure to notify anyone waiting that
+ * it's finished.
+ */
+ bp = task->task_bp;
+ errorp = task->task_errorp;
+ ld_sdmmc_task_put(sc, task);
+
+ /*
+ * If the task was for an asynchronous I/O xfer, fail the I/O
+ * xfer, with the softc lock dropped since this is a callback
+ * into arbitrary other subsystems.
+ */
+ if (bp) {
+ mutex_exit(&sc->sc_lock);
+ /*
+ * XXX We assume that the same sequence works for bio
+ * and discard -- that lddiscardend is just the same as
+ * setting bp->b_resid = bp->b_bcount in the event of
+ * error and then calling lddone.
+ */
+ bp->b_error = ENXIO;
+ bp->b_resid = bp->b_bcount;
+ lddone(&sc->sc_ld, bp);
+ mutex_enter(&sc->sc_lock);
+ }
+
+ /*
+ * If the task was for a synchronous operation (cachesync),
+ * then just set the error indicator and wake up the waiter.
+ */
+ if (errorp) {
+ *errorp = ENXIO;
+ cv_broadcast(&sc->sc_cv);
+ }
+}
/* ARGSUSED */
static int
@@ -157,15 +264,18 @@ ld_sdmmc_attach(device_t parent, device_
evcnt_attach_dynamic(&sc->sc_ev_discardbusy, EVCNT_TYPE_MISC,
NULL, device_xname(self), "sdmmc discard busy");
+ mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_SDMMC);
+ cv_init(&sc->sc_cv, "ldsdmmc");
+ TAILQ_INIT(&sc->sc_freeq);
+ TAILQ_INIT(&sc->sc_xferq);
+ sc->sc_dying = false;
+
const int ntask = __arraycount(sc->sc_task);
- sc->sc_freeq = pcq_create(ntask, KM_SLEEP);
for (i = 0; i < ntask; i++) {
task = &sc->sc_task[i];
task->task_sc = sc;
callout_init(&task->task_restart_ch, CALLOUT_MPSAFE);
- mutex_init(&task->task_lock, MUTEX_DEFAULT, IPL_NONE);
- cv_init(&task->task_cv, "ldsdmmctask");
- pcq_put(sc->sc_freeq, task);
+ TAILQ_INSERT_TAIL(&sc->sc_freeq, task, task_entry);
}
sc->sc_hwunit = 0; /* always 0? */
@@ -231,19 +341,39 @@ ld_sdmmc_detach(device_t dev, int flags)
{
struct ld_sdmmc_softc *sc = device_private(dev);
struct ld_softc *ld = &sc->sc_ld;
+ struct ld_sdmmc_task *task;
int rv, i;
- if ((rv = ldbegindetach(ld, flags)) != 0)
+ /*
+ * Block new xfers, abort all pending tasks, and wait for all
+ * pending waiters to notice that we're gone.
+ */
+ mutex_enter(&sc->sc_lock);
+ sc->sc_dying = true;
+ while ((task = TAILQ_FIRST(&sc->sc_xferq)) != NULL)
+ ld_sdmmc_task_cancel(sc, task);
+ while (sc->sc_busy)
+ cv_wait(&sc->sc_cv, &sc->sc_lock);
+ mutex_exit(&sc->sc_lock);
+
+ /* Do the ld detach dance. */
+ if ((rv = ldbegindetach(ld, flags)) != 0) {
+ /* Detach failed -- back out. */
+ mutex_enter(&sc->sc_lock);
+ sc->sc_dying = false;
+ mutex_exit(&sc->sc_lock);
return rv;
+ }
ldenddetach(ld);
- for (i = 0; i < __arraycount(sc->sc_task); i++) {
+ KASSERT(TAILQ_EMPTY(&sc->sc_xferq));
+
+ for (i = 0; i < __arraycount(sc->sc_task); i++)
callout_destroy(&sc->sc_task[i].task_restart_ch);
- mutex_destroy(&sc->sc_task[i].task_lock);
- cv_destroy(&sc->sc_task[i].task_cv);
- }
- pcq_destroy(sc->sc_freeq);
+ cv_destroy(&sc->sc_cv);
+ mutex_destroy(&sc->sc_lock);
+
evcnt_detach(&sc->sc_ev_discard);
evcnt_detach(&sc->sc_ev_discarderr);
evcnt_detach(&sc->sc_ev_discardbusy);
@@ -256,10 +386,14 @@ static int
ld_sdmmc_start(struct ld_softc *ld, struct buf *bp)
{
struct ld_sdmmc_softc *sc = device_private(ld->sc_dv);
- struct ld_sdmmc_task *task = pcq_get(sc->sc_freeq);
+ struct ld_sdmmc_task *task;
+ int error;
- if (task == NULL)
- return EAGAIN;
+ mutex_enter(&sc->sc_lock);
+ if ((task = ld_sdmmc_task_get(sc)) == NULL) {
+ error = EAGAIN;
+ goto out;
+ }
task->task_bp = bp;
task->task_retries = 0;
@@ -267,7 +401,11 @@ ld_sdmmc_start(struct ld_softc *ld, stru
sdmmc_add_task(sc->sc_sf->sc, &task->task);
- return 0;
+ /* Success! The xfer is now queued. */
+ error = 0;
+
+out: mutex_exit(&sc->sc_lock);
+ return error;
}
static void
@@ -279,7 +417,11 @@ ld_sdmmc_restart(void *arg)
bp->b_resid = bp->b_bcount;
- sdmmc_add_task(sc->sc_sf->sc, &task->task);
+ mutex_enter(&sc->sc_lock);
+ callout_ack(&task->task_restart_ch);
+ if (!sc->sc_dying)
+ sdmmc_add_task(sc->sc_sf->sc, &task->task);
+ mutex_exit(&sc->sc_lock);
}
static void
@@ -324,8 +466,16 @@ ld_sdmmc_dobio(void *arg)
dksc->sc_dkdev.dk_label);
printf(", retrying\n");
task->task_retries++;
- callout_reset(&task->task_restart_ch, RECOVERYTIME,
- ld_sdmmc_restart, task);
+ mutex_enter(&sc->sc_lock);
+ if (sc->sc_dying) {
+ bp->b_resid = bp->b_bcount;
+ bp->b_error = error;
+ goto done_locked;
+ } else {
+ callout_reset(&task->task_restart_ch,
+ RECOVERYTIME, ld_sdmmc_restart, task);
+ }
+ mutex_exit(&sc->sc_lock);
return;
}
bp->b_error = error;
@@ -335,7 +485,11 @@ ld_sdmmc_dobio(void *arg)
}
done:
- pcq_put(sc->sc_freeq, task);
+ /* Dissociate the task from the I/O xfer and release it. */
+ mutex_enter(&sc->sc_lock);
+done_locked:
+ ld_sdmmc_task_put(sc, task);
+ mutex_exit(&sc->sc_lock);
lddone(&sc->sc_ld, bp);
}
@@ -364,15 +518,19 @@ ld_sdmmc_dodiscard(void *arg)
/* An error from discard is non-fatal */
error = sdmmc_mem_discard(sc->sc_sf, sblkno, sblkno + nblks - 1);
- if (error != 0)
+
+ /* Count error or success and release the task. */
+ mutex_enter(&sc->sc_lock);
+ if (error)
sc->sc_ev_discarderr.ev_count++;
else
sc->sc_ev_discard.ev_count++;
- pcq_put(sc->sc_freeq, task);
+ ld_sdmmc_task_put(sc, task);
+ mutex_exit(&sc->sc_lock);
+ /* Record the error and notify the xfer of completion. */
if (error)
bp->b_error = error;
-
lddiscardend(&sc->sc_ld, bp);
}
@@ -380,20 +538,29 @@ static int
ld_sdmmc_discard(struct ld_softc *ld, struct buf *bp)
{
struct ld_sdmmc_softc *sc = device_private(ld->sc_dv);
- struct ld_sdmmc_task *task = pcq_get(sc->sc_freeq);
+ struct ld_sdmmc_task *task;
+ int error;
+
+ mutex_enter(&sc->sc_lock);
- if (task == NULL) {
+ /* Acquire a free task, or drop the request altogether. */
+ if ((task = ld_sdmmc_task_get(sc)) == NULL) {
sc->sc_ev_discardbusy.ev_count++;
- return 0;
+ error = EBUSY;
+ goto out;
}
+ /* Set up the task and schedule it. */
task->task_bp = bp;
-
sdmmc_init_task(&task->task, ld_sdmmc_dodiscard, task);
sdmmc_add_task(sc->sc_sf->sc, &task->task);
- return 0;
+ /* Success! The request is queued. */
+ error = 0;
+
+out: mutex_exit(&sc->sc_lock);
+ return error;
}
static void
@@ -401,40 +568,60 @@ ld_sdmmc_docachesync(void *arg)
{
struct ld_sdmmc_task *task = arg;
struct ld_sdmmc_softc *sc = task->task_sc;
- const bool poll = (bool)task->task_data;
+ int error;
- task->task_data = sdmmc_mem_flush_cache(sc->sc_sf, poll);
+ /* Flush the cache. */
+ error = sdmmc_mem_flush_cache(sc->sc_sf, task->task_poll);
- mutex_enter(&task->task_lock);
- cv_signal(&task->task_cv);
- mutex_exit(&task->task_lock);
+ mutex_enter(&sc->sc_lock);
+
+ /* Notify the other thread that we're done; pass on the error. */
+ *task->task_errorp = error;
+ cv_broadcast(&sc->sc_cv);
+
+ /* Release the task. */
+ ld_sdmmc_task_put(sc, task);
+
+ mutex_exit(&sc->sc_lock);
}
static int
ld_sdmmc_cachesync(struct ld_softc *ld, bool poll)
{
struct ld_sdmmc_softc *sc = device_private(ld->sc_dv);
- struct ld_sdmmc_task *task = pcq_get(sc->sc_freeq);
- int error = 0;
+ struct ld_sdmmc_task *task;
+ int error = -1;
- if (task == NULL) {
+ mutex_enter(&sc->sc_lock);
+
+ /* Acquire a free task, or fail with EBUSY. */
+ if ((task = ld_sdmmc_task_get(sc)) == NULL) {
sc->sc_ev_cachesyncbusy.ev_count++;
- return EBUSY;
+ error = EBUSY;
+ goto out;
}
+ /* Set up the task and schedule it. */
+ task->task_poll = poll;
+ task->task_errorp = &error;
sdmmc_init_task(&task->task, ld_sdmmc_docachesync, task);
- task->task_data = poll;
- mutex_enter(&task->task_lock);
sdmmc_add_task(sc->sc_sf->sc, &task->task);
- error = cv_wait_sig(&task->task_cv, &task->task_lock);
- mutex_exit(&task->task_lock);
-
- if (error == 0)
- error = (int)task->task_data;
- pcq_put(sc->sc_freeq, task);
+ /*
+ * Wait for the task to complete. If the device is yanked,
+ * detach will notify us. Keep the busy count up until we're
+ * done waiting so that the softc doesn't go away until we're
+ * done.
+ */
+ sc->sc_busy++;
+ KASSERT(sc->sc_busy <= LD_SDMMC_MAXTASKCNT);
+ while (error == -1)
+ cv_wait(&sc->sc_cv, &sc->sc_lock);
+ if (--sc->sc_busy == 0)
+ cv_broadcast(&sc->sc_cv);
+out: mutex_exit(&sc->sc_lock);
return error;
}
Index: src/sys/dev/sdmmc/sdmmc.c
diff -u src/sys/dev/sdmmc/sdmmc.c:1.36.4.1 src/sys/dev/sdmmc/sdmmc.c:1.36.4.2
--- src/sys/dev/sdmmc/sdmmc.c:1.36.4.1 Tue Feb 25 18:40:43 2020
+++ src/sys/dev/sdmmc/sdmmc.c Sun Aug 9 14:03:07 2020
@@ -1,4 +1,4 @@
-/* $NetBSD: sdmmc.c,v 1.36.4.1 2020/02/25 18:40:43 martin Exp $ */
+/* $NetBSD: sdmmc.c,v 1.36.4.2 2020/08/09 14:03:07 martin Exp $ */
/* $OpenBSD: sdmmc.c,v 1.18 2009/01/09 10:58:38 jsg Exp $ */
/*
@@ -49,7 +49,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: sdmmc.c,v 1.36.4.1 2020/02/25 18:40:43 martin Exp $");
+__KERNEL_RCSID(0, "$NetBSD: sdmmc.c,v 1.36.4.2 2020/08/09 14:03:07 martin Exp $");
#ifdef _KERNEL_OPT
#include "opt_sdmmc.h"
@@ -151,7 +151,6 @@ sdmmc_attach(device_t parent, device_t s
mutex_init(&sc->sc_mtx, MUTEX_DEFAULT, IPL_NONE);
mutex_init(&sc->sc_tskq_mtx, MUTEX_DEFAULT, IPL_SDMMC);
mutex_init(&sc->sc_discover_task_mtx, MUTEX_DEFAULT, IPL_SDMMC);
- mutex_init(&sc->sc_intr_task_mtx, MUTEX_DEFAULT, IPL_SDMMC);
cv_init(&sc->sc_tskq_cv, "mmctaskq");
evcnt_attach_dynamic(&sc->sc_ev_xfer, EVCNT_TYPE_MISC, NULL,
@@ -226,8 +225,10 @@ sdmmc_detach(device_t self, int flags)
callout_destroy(&sc->sc_card_detect_ch);
}
+ sdmmc_del_task(sc, &sc->sc_intr_task, NULL);
+ sdmmc_del_task(sc, &sc->sc_discover_task, NULL);
+
cv_destroy(&sc->sc_tskq_cv);
- mutex_destroy(&sc->sc_intr_task_mtx);
mutex_destroy(&sc->sc_discover_task_mtx);
mutex_destroy(&sc->sc_tskq_mtx);
mutex_destroy(&sc->sc_mtx);
@@ -257,32 +258,64 @@ sdmmc_add_task(struct sdmmc_softc *sc, s
{
mutex_enter(&sc->sc_tskq_mtx);
+ if (task->sc == sc) {
+ KASSERT(task->onqueue);
+ goto out;
+ }
+ KASSERT(task->sc == NULL);
+ KASSERT(!task->onqueue);
task->onqueue = 1;
task->sc = sc;
TAILQ_INSERT_TAIL(&sc->sc_tskq, task, next);
cv_broadcast(&sc->sc_tskq_cv);
- mutex_exit(&sc->sc_tskq_mtx);
+out: mutex_exit(&sc->sc_tskq_mtx);
}
static inline void
sdmmc_del_task1(struct sdmmc_softc *sc, struct sdmmc_task *task)
{
+ KASSERT(mutex_owned(&sc->sc_tskq_mtx));
+
TAILQ_REMOVE(&sc->sc_tskq, task, next);
task->sc = NULL;
task->onqueue = 0;
}
-void
-sdmmc_del_task(struct sdmmc_task *task)
+bool
+sdmmc_del_task(struct sdmmc_softc *sc, struct sdmmc_task *task,
+ kmutex_t *interlock)
{
- struct sdmmc_softc *sc = (struct sdmmc_softc *)task->sc;
+ bool cancelled;
- if (sc != NULL) {
- mutex_enter(&sc->sc_tskq_mtx);
+ KASSERT(interlock == NULL || mutex_owned(interlock));
+
+ mutex_enter(&sc->sc_tskq_mtx);
+ if (task->sc == sc) {
+ KASSERT(task->onqueue);
+ KASSERT(sc->sc_curtask != task);
sdmmc_del_task1(sc, task);
- mutex_exit(&sc->sc_tskq_mtx);
+ cancelled = true;
+ } else {
+ KASSERT(task->sc == NULL);
+ KASSERT(!task->onqueue);
+ mutex_exit(interlock);
+ while (sc->sc_curtask == task) {
+ KASSERT(curlwp != sc->sc_tskq_lwp);
+ cv_wait(&sc->sc_tskq_cv, &sc->sc_tskq_mtx);
+ }
+ if (!mutex_tryenter(interlock)) {
+ mutex_exit(&sc->sc_tskq_mtx);
+ mutex_enter(interlock);
+ mutex_enter(&sc->sc_tskq_mtx);
+ }
+ cancelled = false;
}
+ mutex_exit(&sc->sc_tskq_mtx);
+
+ KASSERT(interlock == NULL || mutex_owned(interlock));
+
+ return cancelled;
}
static void
@@ -299,9 +332,12 @@ sdmmc_task_thread(void *arg)
task = TAILQ_FIRST(&sc->sc_tskq);
if (task != NULL) {
sdmmc_del_task1(sc, task);
+ sc->sc_curtask = task;
mutex_exit(&sc->sc_tskq_mtx);
(*task->func)(task->arg);
mutex_enter(&sc->sc_tskq_mtx);
+ sc->sc_curtask = NULL;
+ cv_broadcast(&sc->sc_tskq_cv);
} else {
/* Check for the exit condition. */
if (sc->sc_dying)
@@ -334,10 +370,7 @@ sdmmc_needs_discover(device_t dev)
if (!ISSET(sc->sc_flags, SMF_INITED))
return;
- mutex_enter(&sc->sc_discover_task_mtx);
- if (!sdmmc_task_pending(&sc->sc_discover_task))
- sdmmc_add_task(sc, &sc->sc_discover_task);
- mutex_exit(&sc->sc_discover_task_mtx);
+ sdmmc_add_task(sc, &sc->sc_discover_task);
}
static void
Index: src/sys/dev/sdmmc/sdmmc_io.c
diff -u src/sys/dev/sdmmc/sdmmc_io.c:1.14.4.1 src/sys/dev/sdmmc/sdmmc_io.c:1.14.4.2
--- src/sys/dev/sdmmc/sdmmc_io.c:1.14.4.1 Tue Feb 25 18:40:43 2020
+++ src/sys/dev/sdmmc/sdmmc_io.c Sun Aug 9 14:03:07 2020
@@ -1,4 +1,4 @@
-/* $NetBSD: sdmmc_io.c,v 1.14.4.1 2020/02/25 18:40:43 martin Exp $ */
+/* $NetBSD: sdmmc_io.c,v 1.14.4.2 2020/08/09 14:03:07 martin Exp $ */
/* $OpenBSD: sdmmc_io.c,v 1.10 2007/09/17 01:33:33 krw Exp $ */
/*
@@ -20,7 +20,7 @@
/* Routines for SD I/O cards. */
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: sdmmc_io.c,v 1.14.4.1 2020/02/25 18:40:43 martin Exp $");
+__KERNEL_RCSID(0, "$NetBSD: sdmmc_io.c,v 1.14.4.2 2020/08/09 14:03:07 martin Exp $");
#ifdef _KERNEL_OPT
#include "opt_sdmmc.h"
@@ -804,10 +804,7 @@ sdmmc_card_intr(device_t dev)
if (sc->sc_sct->card_enable_intr == NULL)
return;
- mutex_enter(&sc->sc_intr_task_mtx);
- if (!sdmmc_task_pending(&sc->sc_intr_task))
- sdmmc_add_task(sc, &sc->sc_intr_task);
- mutex_exit(&sc->sc_intr_task_mtx);
+ sdmmc_add_task(sc, &sc->sc_intr_task);
}
void
Index: src/sys/dev/sdmmc/sdmmc_ioreg.h
diff -u src/sys/dev/sdmmc/sdmmc_ioreg.h:1.3.2.1 src/sys/dev/sdmmc/sdmmc_ioreg.h:1.3.2.2
--- src/sys/dev/sdmmc/sdmmc_ioreg.h:1.3.2.1 Tue Feb 25 18:40:43 2020
+++ src/sys/dev/sdmmc/sdmmc_ioreg.h Sun Aug 9 14:03:07 2020
@@ -1,4 +1,4 @@
-/* $NetBSD: sdmmc_ioreg.h,v 1.3.2.1 2020/02/25 18:40:43 martin Exp $ */
+/* $NetBSD: sdmmc_ioreg.h,v 1.3.2.2 2020/08/09 14:03:07 martin Exp $ */
/* $OpenBSD: sdmmc_ioreg.h,v 1.4 2007/06/02 01:48:37 uwe Exp $ */
/*
@@ -39,7 +39,7 @@
/* CMD53 arguments */
#define SD_ARG_CMD53_READ (0<<31)
-#define SD_ARG_CMD53_WRITE (1<<31)
+#define SD_ARG_CMD53_WRITE (1U<<31)
#define SD_ARG_CMD53_FUNC_SHIFT 28
#define SD_ARG_CMD53_FUNC_MASK 0x7
#define SD_ARG_CMD53_BLOCK_MODE (1<<27)
@@ -55,7 +55,7 @@
#define MMC_R5(resp) ((resp)[0])
/* SD R4 response (IO OCR) */
-#define SD_IO_OCR_MEM_READY (1<<31)
+#define SD_IO_OCR_MEM_READY (1U<<31)
#define SD_IO_OCR_NUM_FUNCTIONS(ocr) (((ocr) >> 28) & 0x7)
#define SD_IO_OCR_MEM_PRESENT (1<<27)
#define SD_IO_OCR_MASK 0x00fffff0
Index: src/sys/dev/sdmmc/sdmmc_mem.c
diff -u src/sys/dev/sdmmc/sdmmc_mem.c:1.68.2.1 src/sys/dev/sdmmc/sdmmc_mem.c:1.68.2.2
--- src/sys/dev/sdmmc/sdmmc_mem.c:1.68.2.1 Tue Feb 25 18:40:43 2020
+++ src/sys/dev/sdmmc/sdmmc_mem.c Sun Aug 9 14:03:07 2020
@@ -1,4 +1,4 @@
-/* $NetBSD: sdmmc_mem.c,v 1.68.2.1 2020/02/25 18:40:43 martin Exp $ */
+/* $NetBSD: sdmmc_mem.c,v 1.68.2.2 2020/08/09 14:03:07 martin Exp $ */
/* $OpenBSD: sdmmc_mem.c,v 1.10 2009/01/09 10:55:22 jsg Exp $ */
/*
@@ -45,7 +45,7 @@
/* Routines for SD/MMC memory cards. */
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: sdmmc_mem.c,v 1.68.2.1 2020/02/25 18:40:43 martin Exp $");
+__KERNEL_RCSID(0, "$NetBSD: sdmmc_mem.c,v 1.68.2.2 2020/08/09 14:03:07 martin Exp $");
#ifdef _KERNEL_OPT
#include "opt_sdmmc.h"
@@ -833,9 +833,14 @@ sdmmc_mem_sd_init(struct sdmmc_softc *sc
DPRINTF(("%s: switch func mode 0\n", SDMMCDEVNAME(sc)));
error = sdmmc_mem_sd_switch(sf, 0, 1, 0, &status);
if (error) {
- aprint_error_dev(sc->sc_dev,
- "switch func mode 0 failed\n");
- return error;
+ if (error == ENOTSUP) {
+ /* Not supported by controller */
+ goto skipswitchfuncs;
+ } else {
+ aprint_error_dev(sc->sc_dev,
+ "switch func mode 0 failed\n");
+ return error;
+ }
}
support_func = SFUNC_STATUS_GROUP(&status, 1);
@@ -887,6 +892,7 @@ sdmmc_mem_sd_init(struct sdmmc_softc *sc
delay(25);
}
}
+skipswitchfuncs:
/* update bus clock */
if (sc->sc_busclk > sf->csd.tran_speed)
Index: src/sys/dev/sdmmc/sdmmcvar.h
diff -u src/sys/dev/sdmmc/sdmmcvar.h:1.30.4.1 src/sys/dev/sdmmc/sdmmcvar.h:1.30.4.2
--- src/sys/dev/sdmmc/sdmmcvar.h:1.30.4.1 Tue Feb 25 18:40:43 2020
+++ src/sys/dev/sdmmc/sdmmcvar.h Sun Aug 9 14:03:07 2020
@@ -1,4 +1,4 @@
-/* $NetBSD: sdmmcvar.h,v 1.30.4.1 2020/02/25 18:40:43 martin Exp $ */
+/* $NetBSD: sdmmcvar.h,v 1.30.4.2 2020/08/09 14:03:07 martin Exp $ */
/* $OpenBSD: sdmmcvar.h,v 1.13 2009/01/09 10:55:22 jsg Exp $ */
/*
@@ -89,8 +89,6 @@ do { \
(xtask)->sc = NULL; \
} while (/*CONSTCOND*/0)
-#define sdmmc_task_pending(xtask) ((xtask)->onqueue)
-
struct sdmmc_command {
struct sdmmc_task c_task; /* task queue entry */
uint16_t c_opcode; /* SD or MMC command index */
@@ -267,6 +265,7 @@ struct sdmmc_softc {
TAILQ_HEAD(, sdmmc_task) sc_tskq; /* task thread work queue */
struct kmutex sc_tskq_mtx;
struct kcondvar sc_tskq_cv;
+ struct sdmmc_task *sc_curtask;
/* discover task */
struct sdmmc_task sc_discover_task; /* card attach/detach task */
@@ -274,7 +273,6 @@ struct sdmmc_softc {
/* interrupt task */
struct sdmmc_task sc_intr_task; /* card interrupt task */
- struct kmutex sc_intr_task_mtx;
TAILQ_HEAD(, sdmmc_intr_handler) sc_intrq; /* interrupt handlers */
u_int sc_clkmin; /* host min bus clock */
@@ -325,7 +323,7 @@ extern int sdmmcdebug;
#endif
void sdmmc_add_task(struct sdmmc_softc *, struct sdmmc_task *);
-void sdmmc_del_task(struct sdmmc_task *);
+bool sdmmc_del_task(struct sdmmc_softc *, struct sdmmc_task *, kmutex_t *);
struct sdmmc_function *sdmmc_function_alloc(struct sdmmc_softc *);
void sdmmc_function_free(struct sdmmc_function *);