This patch reworks the steps involved in roaming to a new access
point on iwm(4) and iwx(4) interfaces.
The current implementation suffers from race conditions which can
leave the interface in a state where it gets "stuck". I have seen
this happen on iwm(4) 9560 in particular, while testing the driver
with new firmware images recently published by Intel. This may well
be related to other hangs people have reported in multi-AP environments
on both iwm(4) and iwx(4).
With this patch in place, net80211 can now pass control to drivers
which provide an optional bgscan_done() handler. In this handler the
driver will do everything necessary to prepare the device for switching
APs, and then call back into net80211 to trigger the AP switch.
In the previous implementation, net80211 asked the driver to prepare
the device and immediately proceeded with switching APs. The driver
may need to wait for firmware commands to complete, which is done in a
task context. So we have a situation where the driver-side task and
net80211's switching of APs (which involves sending frames) kind of
happen in parallel, and things may break.
To test roaming, you need to do the following:
All APs involved need to use the same SSID for this to work as intended.
If you do not have such a setup, you cannot effectively test this patch
(unless you just want to run the patch anyway and look for regressions).
Use wifi with 'ifconfig iwm0/iwx0 debug' enabled to make roaming attempts
appear in /var/log/messages, and watch this file with a command such as:
tail -f /var/log/messages
Move towards another access point and trigger a background scan by
running this command as root:
ifconfig iwm0 scan
It may take a few scan attempts until roaming triggers.
Successful roaming displays the following in /var/log/messages:
iwm0: roaming from 00:2b:a2:95:e3:e4 chan 1 to 00:2b:a2:95:e3:f4 chan 36
iwm0: RUN -> AUTH
iwm0: sending auth to 00:1a:dd:da:e3:f4 on channel 36 mode 11a
iwm0: AUTH -> ASSOC
iwm0: sending assoc_req to 00:2b:a2:95:e3:f4 on channel 36 mode 11a
iwm0: ASSOC -> RUN
If it doesn't do this but enters INIT or SCAN or whatever, roaming failed.
This can happen for various reasons. One of them is that our attempt to
AUTH to the new AP times out, and I don't know how this could be fixed.
In any case, the driver should recover from any roaming failure by going
though the regular INIT->SCAN->AUTH->ASSOC->RUN sequence with one of
the available APs, and interface link should come back up.
If you want to force roaming from one particular AP to another, this can
be done by forcing the channel number corresponding to the first AP,
associating to this AP, and then clearing the forced channel number:
# ifconfig iwm0 chan 1
wait for association to succeed, then clear the forced channel:
# ifconfig iwm0 -chan
now trigger a background scan as usual:
# ifconfig iwm0 scan
diff e5ddbb84043d48bc602408a6bf0e30fb062e3280
af878e690f6195efea7bb4639bf75fe439f3cddc
blob - f1908d2923d90c7be84c010fc68342afda860d0c
blob + dfccd0bd1327325b7c834d1e60f1b37d7a19dc18
--- sys/dev/pci/if_iwm.c
+++ sys/dev/pci/if_iwm.c
@@ -477,6 +477,9 @@ void iwm_add_task(struct iwm_softc *, struct taskq
*,
void iwm_del_task(struct iwm_softc *, struct taskq *, struct task *);
int iwm_scan(struct iwm_softc *);
int iwm_bgscan(struct ieee80211com *);
+void iwm_bgscan_done(struct ieee80211com *,
+ struct ieee80211_node_switch_bss_arg *, size_t);
+void iwm_bgscan_done_task(void *);
int iwm_umac_scan_abort(struct iwm_softc *);
int iwm_lmac_scan_abort(struct iwm_softc *);
int iwm_scan_abort(struct iwm_softc *);
@@ -8287,6 +8290,81 @@ iwm_bgscan(struct ieee80211com *ic)
return 0;
}
+void
+iwm_bgscan_done(struct ieee80211com *ic,
+ struct ieee80211_node_switch_bss_arg *arg, size_t arg_size)
+{
+ struct iwm_softc *sc = ic->ic_softc;
+
+ free(sc->bgscan_unref_arg, M_DEVBUF, sc->bgscan_unref_arg_size);
+ sc->bgscan_unref_arg = arg;
+ sc->bgscan_unref_arg_size = arg_size;
+ iwm_add_task(sc, sc->sc_nswq, &sc->bgscan_done_task);
+}
+
+void
+iwm_bgscan_done_task(void *arg)
+{
+ struct iwm_softc *sc = arg;
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct iwm_node *in = (void *)ic->ic_bss;
+ struct ieee80211_node *ni = &in->in_ni;
+ int tid, err = 0, s = splnet();
+
+ if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) ||
+ (ic->ic_flags & IEEE80211_F_BGSCAN) == 0 ||
+ ic->ic_state != IEEE80211_S_RUN) {
+ err = ENXIO;
+ goto done;
+ }
+
+ for (tid = 0; tid < IWM_MAX_TID_COUNT; tid++) {
+ int qid = IWM_FIRST_AGG_TX_QUEUE + tid;
+
+ if ((sc->tx_ba_queue_mask & (1 << qid)) == 0)
+ continue;
+
+ err = iwm_sta_tx_agg(sc, ni, tid, 0, 0, 0);
+ if (err)
+ goto done;
+ err = iwm_disable_txq(sc, IWM_STATION_ID, qid, tid);
+ if (err)
+ goto done;
+ in->tfd_queue_msk &= ~(1 << qid);
+#if 0 /* disabled for now; we are going to DEAUTH soon anyway */
+ IEEE80211_SEND_ACTION(ic, ni, IEEE80211_CATEG_BA,
+ IEEE80211_ACTION_DELBA,
+ IEEE80211_REASON_AUTH_LEAVE << 16 |
+ IEEE80211_FC1_DIR_TODS << 8 | tid);
+#endif
+ ieee80211_node_tx_ba_clear(ni, tid);
+ }
+
+ err = iwm_flush_sta(sc, in);
+ if (err)
+ goto done;
+
+ /*
+ * Tx queues have been flushed and Tx agg has been stopped.
+ * Allow roaming to proceed.
+ */
+ ni->ni_unref_arg = sc->bgscan_unref_arg;
+ ni->ni_unref_arg_size = sc->bgscan_unref_arg_size;
+ sc->bgscan_unref_arg = NULL;
+ sc->bgscan_unref_arg_size = 0;
+ ieee80211_node_tx_stopped(ic, &in->in_ni);
+done:
+ if (err) {
+ free(sc->bgscan_unref_arg, M_DEVBUF, sc->bgscan_unref_arg_size);
+ sc->bgscan_unref_arg = NULL;
+ sc->bgscan_unref_arg_size = 0;
+ if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0)
+ task_add(systq, &sc->init_task);
+ }
+ refcnt_rele_wake(&sc->task_refs);
+ splx(s);
+}
+
int
iwm_umac_scan_abort(struct iwm_softc *sc)
{
@@ -9141,6 +9219,7 @@ iwm_newstate(struct ieee80211com *ic, enum ieee80211_s
iwm_del_task(sc, systq, &sc->ba_task);
iwm_del_task(sc, systq, &sc->mac_ctxt_task);
iwm_del_task(sc, systq, &sc->phy_ctxt_task);
+ iwm_del_task(sc, systq, &sc->bgscan_done_task);
}
sc->ns_nstate = nstate;
@@ -10039,11 +10118,16 @@ iwm_stop(struct ifnet *ifp)
iwm_del_task(sc, systq, &sc->ba_task);
iwm_del_task(sc, systq, &sc->mac_ctxt_task);
iwm_del_task(sc, systq, &sc->phy_ctxt_task);
+ iwm_del_task(sc, systq, &sc->bgscan_done_task);
KASSERT(sc->task_refs.refs >= 1);
refcnt_finalize(&sc->task_refs, "iwmstop");
iwm_stop_device(sc);
+ free(sc->bgscan_unref_arg, M_DEVBUF, sc->bgscan_unref_arg_size);
+ sc->bgscan_unref_arg = NULL;
+ sc->bgscan_unref_arg_size = 0;
+
/* Reset soft state. */
sc->sc_generation++;
@@ -11517,9 +11601,11 @@ iwm_attach(struct device *parent, struct device *self,
task_set(&sc->ba_task, iwm_ba_task, sc);
task_set(&sc->mac_ctxt_task, iwm_mac_ctxt_task, sc);
task_set(&sc->phy_ctxt_task, iwm_phy_ctxt_task, sc);
+ task_set(&sc->bgscan_done_task, iwm_bgscan_done_task, sc);
ic->ic_node_alloc = iwm_node_alloc;
ic->ic_bgscan_start = iwm_bgscan;
+ ic->ic_bgscan_done = iwm_bgscan_done;
ic->ic_set_key = iwm_set_key;
ic->ic_delete_key = iwm_delete_key;
blob - ad894226d8b8120c1e7419db593e09f34835ca5c
blob + 45aeae6c7b24b5a2192fd3c61a70bfedb995669a
--- sys/dev/pci/if_iwmvar.h
+++ sys/dev/pci/if_iwmvar.h
@@ -592,6 +592,10 @@ struct iwm_softc {
int sc_rx_ba_sessions;
int tx_ba_queue_mask;
+ struct task bgscan_done_task;
+ struct ieee80211_node_switch_bss_arg *bgscan_unref_arg;
+ size_t bgscan_unref_arg_size;
+
int sc_scan_last_antenna;
int sc_fixed_ridx;
blob - 95b80be50723602f18d93f5bbcefb0911dd0e7e9
blob + f6dedcbe6b56ceb82029c6f863c2a652e7f10524
--- sys/dev/pci/if_iwx.c
+++ sys/dev/pci/if_iwx.c
@@ -298,6 +298,7 @@ void iwx_nic_config(struct iwx_softc *);
int iwx_nic_rx_init(struct iwx_softc *);
int iwx_nic_init(struct iwx_softc *);
int iwx_enable_txq(struct iwx_softc *, int, int, int, int);
+int iwx_disable_txq(struct iwx_softc *sc, int, int, uint8_t);
void iwx_post_alive(struct iwx_softc *);
int iwx_schedule_session_protection(struct iwx_softc *, struct iwx_node *,
uint32_t);
@@ -420,6 +421,9 @@ void iwx_add_task(struct iwx_softc *, struct taskq
*,
void iwx_del_task(struct iwx_softc *, struct taskq *, struct task *);
int iwx_scan(struct iwx_softc *);
int iwx_bgscan(struct ieee80211com *);
+void iwx_bgscan_done(struct ieee80211com *,
+ struct ieee80211_node_switch_bss_arg *, size_t);
+void iwx_bgscan_done_task(void *);
int iwx_umac_scan_abort(struct iwx_softc *);
int iwx_scan_abort(struct iwx_softc *);
int iwx_enable_mgmt_queue(struct iwx_softc *);
@@ -2596,6 +2600,49 @@ out:
return err;
}
+int
+iwx_disable_txq(struct iwx_softc *sc, int sta_id, int qid, uint8_t tid)
+{
+ struct iwx_tx_queue_cfg_cmd cmd;
+ struct iwx_rx_packet *pkt;
+ struct iwx_tx_queue_cfg_rsp *resp;
+ struct iwx_host_cmd hcmd = {
+ .id = IWX_SCD_QUEUE_CFG,
+ .flags = IWX_CMD_WANT_RESP,
+ .resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
+ };
+ struct iwx_tx_ring *ring = &sc->txq[qid];
+ int err;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.sta_id = sta_id;
+ cmd.tid = tid;
+ cmd.flags = htole16(0); /* clear "queue enabled" flag */
+ cmd.cb_size = htole32(0);
+ cmd.byte_cnt_addr = htole64(0);
+ cmd.tfdq_addr = htole64(0);
+
+ hcmd.data[0] = &cmd;
+ hcmd.len[0] = sizeof(cmd);
+
+ err = iwx_send_cmd(sc, &hcmd);
+ if (err)
+ return err;
+
+ pkt = hcmd.resp_pkt;
+ if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
+ DPRINTF(("SCD_QUEUE_CFG command failed\n"));
+ err = EIO;
+ goto out;
+ }
+
+ sc->qenablemsk &= ~(1 << qid);
+ iwx_reset_tx_ring(sc, ring);
+out:
+ iwx_free_resp(sc, &hcmd);
+ return err;
+}
+
void
iwx_post_alive(struct iwx_softc *sc)
{
@@ -6627,6 +6674,78 @@ iwx_bgscan(struct ieee80211com *ic)
return 0;
}
+void
+iwx_bgscan_done(struct ieee80211com *ic,
+ struct ieee80211_node_switch_bss_arg *arg, size_t arg_size)
+{
+ struct iwx_softc *sc = ic->ic_softc;
+
+ free(sc->bgscan_unref_arg, M_DEVBUF, sc->bgscan_unref_arg_size);
+ sc->bgscan_unref_arg = arg;
+ sc->bgscan_unref_arg_size = arg_size;
+ iwx_add_task(sc, sc->sc_nswq, &sc->bgscan_done_task);
+}
+
+void
+iwx_bgscan_done_task(void *arg)
+{
+ struct iwx_softc *sc = arg;
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct iwx_node *in = (void *)ic->ic_bss;
+ struct ieee80211_node *ni = &in->in_ni;
+ int tid, err = 0, s = splnet();
+
+ if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) ||
+ (ic->ic_flags & IEEE80211_F_BGSCAN) == 0 ||
+ ic->ic_state != IEEE80211_S_RUN) {
+ err = ENXIO;
+ goto done;
+ }
+
+ err = iwx_flush_sta(sc, in);
+ if (err)
+ goto done;
+
+ for (tid = 0; tid < IWX_MAX_TID_COUNT; tid++) {
+ int qid = IWX_FIRST_AGG_TX_QUEUE + tid;
+
+ if (sc->aggqid[tid] == 0)
+ continue;
+
+ err = iwx_disable_txq(sc, IWX_STATION_ID, qid, tid);
+ if (err)
+ goto done;
+#if 0 /* disabled for now; we are going to DEAUTH soon anyway */
+ IEEE80211_SEND_ACTION(ic, ni, IEEE80211_CATEG_BA,
+ IEEE80211_ACTION_DELBA,
+ IEEE80211_REASON_AUTH_LEAVE << 16 |
+ IEEE80211_FC1_DIR_TODS << 8 | tid);
+#endif
+ ieee80211_node_tx_ba_clear(ni, tid);
+ sc->aggqid[tid] = 0;
+ }
+
+ /*
+ * Tx queues have been flushed and Tx agg has been stopped.
+ * Allow roaming to proceed.
+ */
+ ni->ni_unref_arg = sc->bgscan_unref_arg;
+ ni->ni_unref_arg_size = sc->bgscan_unref_arg_size;
+ sc->bgscan_unref_arg = NULL;
+ sc->bgscan_unref_arg_size = 0;
+ ieee80211_node_tx_stopped(ic, &in->in_ni);
+done:
+ if (err) {
+ free(sc->bgscan_unref_arg, M_DEVBUF, sc->bgscan_unref_arg_size);
+ sc->bgscan_unref_arg = NULL;
+ sc->bgscan_unref_arg_size = 0;
+ if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0)
+ task_add(systq, &sc->init_task);
+ }
+ refcnt_rele_wake(&sc->task_refs);
+ splx(s);
+}
+
int
iwx_umac_scan_abort(struct iwx_softc *sc)
{
@@ -7442,6 +7561,7 @@ iwx_newstate(struct ieee80211com *ic, enum ieee80211_s
sc->setkey_cur = sc->setkey_tail = sc->setkey_nkeys = 0;
iwx_del_task(sc, systq, &sc->mac_ctxt_task);
iwx_del_task(sc, systq, &sc->phy_ctxt_task);
+ iwx_del_task(sc, systq, &sc->bgscan_done_task);
}
sc->ns_nstate = nstate;
@@ -8012,11 +8132,16 @@ iwx_stop(struct ifnet *ifp)
sc->setkey_cur = sc->setkey_tail = sc->setkey_nkeys = 0;
iwx_del_task(sc, systq, &sc->mac_ctxt_task);
iwx_del_task(sc, systq, &sc->phy_ctxt_task);
+ iwx_del_task(sc, systq, &sc->bgscan_done_task);
KASSERT(sc->task_refs.refs >= 1);
refcnt_finalize(&sc->task_refs, "iwxstop");
iwx_stop_device(sc);
+ free(sc->bgscan_unref_arg, M_DEVBUF, sc->bgscan_unref_arg_size);
+ sc->bgscan_unref_arg = NULL;
+ sc->bgscan_unref_arg_size = 0;
+
/* Reset soft state. */
sc->sc_generation++;
@@ -9413,9 +9538,11 @@ iwx_attach(struct device *parent, struct device *self,
task_set(&sc->setkey_task, iwx_setkey_task, sc);
task_set(&sc->mac_ctxt_task, iwx_mac_ctxt_task, sc);
task_set(&sc->phy_ctxt_task, iwx_phy_ctxt_task, sc);
+ task_set(&sc->bgscan_done_task, iwx_bgscan_done_task, sc);
ic->ic_node_alloc = iwx_node_alloc;
ic->ic_bgscan_start = iwx_bgscan;
+ ic->ic_bgscan_done = iwx_bgscan_done;
ic->ic_set_key = iwx_set_key;
ic->ic_delete_key = iwx_delete_key;
blob - 93e4860a77b2e42e7cffde648d08d203b5fb51bc
blob + 9590cdb3d7f9c17ef2026d683ca2f0a67febadc5
--- sys/dev/pci/if_iwxvar.h
+++ sys/dev/pci/if_iwxvar.h
@@ -566,6 +566,10 @@ struct iwx_softc {
int sc_tx_timer[IWX_NUM_TX_QUEUES];
int sc_rx_ba_sessions;
+ struct task bgscan_done_task;
+ struct ieee80211_node_switch_bss_arg *bgscan_unref_arg;
+ size_t bgscan_unref_arg_size;
+
int sc_scan_last_antenna;
int sc_fixed_ridx;
blob - 888a39c43dd0bbe0629998a8bb1e24a66466c092
blob + b4b230b5a2361d12b685bab7154ee72c403fe1aa
--- sys/net80211/ieee80211_input.c
+++ sys/net80211/ieee80211_input.c
@@ -2822,6 +2822,10 @@ ieee80211_recv_addba_req(struct ieee80211com *ic, stru
/* The driver is still processing an ADDBA request for this tid. */
if (ba->ba_state == IEEE80211_BA_REQUESTED)
return;
+ /* If we are in the process of roaming between APs, ignore. */
+ if ((ic->ic_flags & IEEE80211_F_BGSCAN) &&
+ (ic->ic_xflags & IEEE80211_F_TX_MGMT_ONLY))
+ return;
/* check if we already have a Block Ack agreement for this RA/TID */
if (ba->ba_state == IEEE80211_BA_AGREED) {
/* XXX should we update the timeout value? */
blob - 116d71f94d678686994b0de328dcf0735bac024d
blob + 6e233f810b97b77f8240908fc6dbc116c19fc094
--- sys/net80211/ieee80211_node.c
+++ sys/net80211/ieee80211_node.c
@@ -73,6 +73,8 @@ void ieee80211_node_set_timeouts(struct ieee80211_node
void ieee80211_setup_node(struct ieee80211com *, struct ieee80211_node *,
const u_int8_t *);
struct ieee80211_node *ieee80211_alloc_node_helper(struct ieee80211com *);
+void ieee80211_node_free_unref_cb(struct ieee80211_node *);
+void ieee80211_node_tx_flushed(struct ieee80211com *, struct ieee80211_node *);
void ieee80211_node_switch_bss(struct ieee80211com *, struct ieee80211_node *);
void ieee80211_node_addba_request(struct ieee80211_node *, int);
void ieee80211_node_addba_request_ac_be_to(void *);
@@ -1165,8 +1167,77 @@ struct ieee80211_node_switch_bss_arg {
u_int8_t sel_macaddr[IEEE80211_ADDR_LEN];
};
+void
+ieee80211_node_free_unref_cb(struct ieee80211_node *ni)
+{
+ free(ni->ni_unref_arg, M_DEVBUF, ni->ni_unref_arg_size);
+
+ /* Guard against accidental reuse. */
+ ni->ni_unref_cb = NULL;
+ ni->ni_unref_arg = NULL;
+ ni->ni_unref_arg_size = 0;
+}
+
/* Implements ni->ni_unref_cb(). */
void
+ieee80211_node_tx_stopped(struct ieee80211com *ic,
+ struct ieee80211_node *ni)
+{
+ splassert(IPL_NET);
+
+ if ((ic->ic_flags & IEEE80211_F_BGSCAN) == 0)
+ return;
+
+ /*
+ * Install a callback which will switch us to the new AP once
+ * the de-auth frame has been processed by hardware.
+ * Pass on the existing ni->ni_unref_arg argument.
+ */
+ ic->ic_bss->ni_unref_cb = ieee80211_node_switch_bss;
+
+ /*
+ * All data frames queued to hardware have been flushed and
+ * A-MPDU Tx has been stopped. We are now going to switch APs.
+ * Queue a de-auth frame addressed at our current AP.
+ */
+ if (IEEE80211_SEND_MGMT(ic, ic->ic_bss,
+ IEEE80211_FC0_SUBTYPE_DEAUTH,
+ IEEE80211_REASON_AUTH_LEAVE) != 0) {
+ ic->ic_flags &= ~IEEE80211_F_BGSCAN;
+ ieee80211_node_free_unref_cb(ni);
+ ieee80211_new_state(ic, IEEE80211_S_SCAN, -1);
+ return;
+ }
+
+ /* F_BGSCAN flag gets cleared in ieee80211_node_join_bss(). */
+}
+
+/* Implements ni->ni_unref_cb(). */
+void
+ieee80211_node_tx_flushed(struct ieee80211com *ic, struct ieee80211_node *ni)
+{
+ splassert(IPL_NET);
+
+ if ((ic->ic_flags & IEEE80211_F_BGSCAN) == 0)
+ return;
+
+ /* All data frames queued to hardware have been flushed. */
+ if (ic->ic_caps & IEEE80211_C_TX_AMPDU) {
+ /*
+ * Install a callback which will switch us to the
+ * new AP once Tx agg sessions have been stopped,
+ * which involves sending a DELDA frame.
+ * Pass on the existing ni->ni_unref_arg argument.
+ */
+ ic->ic_bss->ni_unref_cb = ieee80211_node_tx_stopped;
+ ieee80211_stop_ampdu_tx(ic, ic->ic_bss,
+ IEEE80211_FC0_SUBTYPE_DEAUTH);
+ } else
+ ieee80211_node_tx_stopped(ic, ni);
+}
+
+/* Implements ni->ni_unref_cb(). */
+void
ieee80211_node_switch_bss(struct ieee80211com *ic, struct ieee80211_node *ni)
{
struct ifnet *ifp = &ic->ic_if;
@@ -1175,16 +1246,14 @@ ieee80211_node_switch_bss(struct ieee80211com *ic, str
splassert(IPL_NET);
- if ((ic->ic_flags & IEEE80211_F_BGSCAN) == 0) {
- free(sba, M_DEVBUF, sizeof(*sba));
+ if ((ic->ic_flags & IEEE80211_F_BGSCAN) == 0)
return;
- }
ic->ic_xflags &= ~IEEE80211_F_TX_MGMT_ONLY;
selbs = ieee80211_find_node(ic, sba->sel_macaddr);
if (selbs == NULL) {
- free(sba, M_DEVBUF, sizeof(*sba));
+ ieee80211_node_free_unref_cb(ni);
ic->ic_flags &= ~IEEE80211_F_BGSCAN;
ieee80211_new_state(ic, IEEE80211_S_SCAN, -1);
return;
@@ -1192,7 +1261,7 @@ ieee80211_node_switch_bss(struct ieee80211com *ic, str
curbs = ieee80211_find_node(ic, sba->cur_macaddr);
if (curbs == NULL) {
- free(sba, M_DEVBUF, sizeof(*sba));
+ ieee80211_node_free_unref_cb(ni);
ic->ic_flags &= ~IEEE80211_F_BGSCAN;
ieee80211_new_state(ic, IEEE80211_S_SCAN, -1);
return;
@@ -1206,7 +1275,11 @@ ieee80211_node_switch_bss(struct ieee80211com *ic, str
ieee80211_chan2ieee(ic, selbs->ni_chan));
}
ieee80211_node_newstate(curbs, IEEE80211_STA_CACHE);
- ieee80211_node_join_bss(ic, selbs); /* frees arg and ic->ic_bss */
+ /*
+ * ieee80211_node_join_bss() frees arg and ic->ic_bss via
+ * ic->ic_node_copy() in ieee80211_node_cleanup().
+ */
+ ieee80211_node_join_bss(ic, selbs);
}
void
@@ -1487,32 +1560,32 @@ ieee80211_end_scan(struct ifnet *ifp)
ic->ic_bgscan_fail = 0;
- /*
- * We are going to switch APs. Stop A-MPDU Tx and
- * queue a de-auth frame addressed to our current AP.
- */
- ieee80211_stop_ampdu_tx(ic, ic->ic_bss,
- IEEE80211_FC0_SUBTYPE_DEAUTH);
- if (IEEE80211_SEND_MGMT(ic, ic->ic_bss,
- IEEE80211_FC0_SUBTYPE_DEAUTH,
- IEEE80211_REASON_AUTH_LEAVE) != 0) {
- ic->ic_flags &= ~IEEE80211_F_BGSCAN;
- free(arg, M_DEVBUF, sizeof(*arg));
- return;
- }
-
/* Prevent dispatch of additional data frames to hardware. */
ic->ic_xflags |= IEEE80211_F_TX_MGMT_ONLY;
- /*
- * Install a callback which will switch us to the new AP once
- * all dispatched frames have been processed by hardware.
- */
IEEE80211_ADDR_COPY(arg->cur_macaddr, curbs->ni_macaddr);
IEEE80211_ADDR_COPY(arg->sel_macaddr, selbs->ni_macaddr);
+
+ if (ic->ic_bgscan_done) {
+ /*
+ * The driver will flush its queues and allow roaming
+ * to proceed once queues have been flushed.
+ * On failure the driver will move back to SCAN state.
+ */
+ ic->ic_bgscan_done(ic, arg, sizeof(*arg));
+ return;
+ }
+
+ /*
+ * Install a callback which will switch us to the new AP once
+ * all dispatched frames have been processed by hardware.
+ */
ic->ic_bss->ni_unref_arg = arg;
ic->ic_bss->ni_unref_arg_size = sizeof(*arg);
- ic->ic_bss->ni_unref_cb = ieee80211_node_switch_bss;
+ if (ic->ic_bss->ni_refcnt > 0)
+ ic->ic_bss->ni_unref_cb = ieee80211_node_tx_flushed;
+ else
+ ieee80211_node_tx_flushed(ic, ni);
/* F_BGSCAN flag gets cleared in ieee80211_node_join_bss(). */
return;
} else if (selbs == NULL)
@@ -1611,14 +1684,10 @@ ieee80211_node_cleanup(struct ieee80211com *ic, struct
ni->ni_rsnie = NULL;
}
ieee80211_ba_del(ni);
- ni->ni_unref_cb = NULL;
- free(ni->ni_unref_arg, M_DEVBUF, ni->ni_unref_arg_size);
- ni->ni_unref_arg = NULL;
- ni->ni_unref_arg_size = 0;
-
#ifndef IEEE80211_STA_ONLY
mq_purge(&ni->ni_savedq);
#endif
+ ieee80211_node_free_unref_cb(ni);
}
void
@@ -1979,6 +2048,18 @@ ieee80211_find_node_for_beacon(struct ieee80211com *ic
}
void
+ieee80211_node_tx_ba_clear(struct ieee80211_node *ni, int tid)
+{
+ struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[tid];
+
+ if (ba->ba_state != IEEE80211_BA_INIT) {
+ if (timeout_pending(&ba->ba_to))
+ timeout_del(&ba->ba_to);
+ ba->ba_state = IEEE80211_BA_INIT;
+ }
+}
+
+void
ieee80211_ba_del(struct ieee80211_node *ni)
{
int tid;
@@ -1994,14 +2075,8 @@ ieee80211_ba_del(struct ieee80211_node *ni)
}
}
- for (tid = 0; tid < nitems(ni->ni_tx_ba); tid++) {
- struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[tid];
- if (ba->ba_state != IEEE80211_BA_INIT) {
- if (timeout_pending(&ba->ba_to))
- timeout_del(&ba->ba_to);
- ba->ba_state = IEEE80211_BA_INIT;
- }
- }
+ for (tid = 0; tid < nitems(ni->ni_tx_ba); tid++)
+ ieee80211_node_tx_ba_clear(ni, tid);
timeout_del(&ni->ni_addba_req_to[EDCA_AC_BE]);
timeout_del(&ni->ni_addba_req_to[EDCA_AC_BK]);
@@ -2040,17 +2115,18 @@ void
ieee80211_release_node(struct ieee80211com *ic, struct ieee80211_node *ni)
{
int s;
+ void (*ni_unref_cb)(struct ieee80211com *, struct ieee80211_node *);
DPRINTF(("%s refcnt %u\n", ether_sprintf(ni->ni_macaddr),
ni->ni_refcnt));
s = splnet();
if (ieee80211_node_decref(ni) == 0) {
if (ni->ni_unref_cb) {
- (*ni->ni_unref_cb)(ic, ni);
+ /* The callback may set ni->ni_unref_cb again. */
+ ni_unref_cb = ni->ni_unref_cb;
ni->ni_unref_cb = NULL;
/* Freed by callback if necessary: */
- ni->ni_unref_arg = NULL;
- ni->ni_unref_arg_size = 0;
+ (*ni_unref_cb)(ic, ni);
}
if (ni->ni_state == IEEE80211_STA_COLLECT)
ieee80211_free_node(ic, ni);
blob - d8fce4ca1f545d4f5d85fbf0ecd32e6959ad3003
blob + 05ee74a401deae7718839e9cf8084692c1b06c8b
--- sys/net80211/ieee80211_node.h
+++ sys/net80211/ieee80211_node.h
@@ -518,6 +518,7 @@ struct ieee80211_node *ieee80211_dup_bss(struct ieee80
const u_int8_t *);
struct ieee80211_node *ieee80211_find_node(struct ieee80211com *,
const u_int8_t *);
+void ieee80211_node_tx_ba_clear(struct ieee80211_node *, int);
void ieee80211_ba_del(struct ieee80211_node *);
struct ieee80211_node *ieee80211_find_rxnode(struct ieee80211com *,
const struct ieee80211_frame *);
@@ -553,6 +554,7 @@ void ieee80211_node_join(struct ieee80211com *,
void ieee80211_node_leave(struct ieee80211com *,
struct ieee80211_node *);
int ieee80211_match_bss(struct ieee80211com *, struct ieee80211_node *, int);
+void ieee80211_node_tx_stopped(struct ieee80211com *, struct ieee80211_node *);
struct ieee80211_node *ieee80211_node_choose_bss(struct ieee80211com *, int,
struct ieee80211_node **);
void ieee80211_node_join_bss(struct ieee80211com *, struct ieee80211_node *);
blob - 41d846d591de93bd7a010f75322f4f223a132083
blob + 95a6d9a2ece2768ab0de929bc3c41940a07a6719
--- sys/net80211/ieee80211_proto.c
+++ sys/net80211/ieee80211_proto.c
@@ -733,14 +733,9 @@ ieee80211_delba_request(struct ieee80211com *ic, struc
}
if (dir) {
/* MLME-DELBA.confirm(Originator) */
- struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[tid];
-
if (ic->ic_ampdu_tx_stop != NULL)
ic->ic_ampdu_tx_stop(ic, ni, tid);
-
- ba->ba_state = IEEE80211_BA_INIT;
- /* stop Block Ack inactivity timer */
- timeout_del(&ba->ba_to);
+ ieee80211_node_tx_ba_clear(ni, tid);
} else {
/* MLME-DELBA.confirm(Recipient) */
struct ieee80211_rx_ba *ba = &ni->ni_rx_ba[tid];
blob - 174da137a22361ca66fc4e3be2c5faace8ff2818
blob + c975badf30cc533995ee39f2f8e21ed24c4f23eb
--- sys/net80211/ieee80211_var.h
+++ sys/net80211/ieee80211_var.h
@@ -212,6 +212,8 @@ struct ieee80211_defrag {
#define IEEE80211_GROUP_NKID 6
+struct ieee80211_node_switch_bss_arg;
+
struct ieee80211com {
struct arpcom ic_ac;
LIST_ENTRY(ieee80211com) ic_list; /* chain of all ieee80211com */
@@ -248,6 +250,9 @@ struct ieee80211com {
void (*ic_updateprot)(struct ieee80211com *);
void (*ic_updatechan)(struct ieee80211com *);
int (*ic_bgscan_start)(struct ieee80211com *);
+ void (*ic_bgscan_done)(struct ieee80211com *,
+ struct ieee80211_node_switch_bss_arg *,
+ size_t);
struct timeout ic_bgscan_timeout;
uint32_t ic_bgscan_fail;
u_int8_t ic_myaddr[IEEE80211_ADDR_LEN];