theo pointed out the timeout should be cancelled when the interface
goes down. this adds a timeout_del in msk_stop.
some other drivers suffer this problem, ill have a look at them
once this is in.
On Mon, Oct 23, 2017 at 11:35:09AM +1000, David Gwynne wrote:
> if msk runs out of mbufs, the rx ring remains empty and there's
> nothing except an ifconfig down and up to get it going again.
>
> this adds a timeout to refill the ring. it's largely copied from
> other drivers (vr in this case).
>
> tests? ok?
>
> Index: if_mskvar.h
> ===================================================================
> RCS file: /cvs/src/sys/dev/pci/if_mskvar.h,v
> retrieving revision 1.13
> diff -u -p -r1.13 if_mskvar.h
> --- if_mskvar.h 2 Jun 2017 01:47:36 -0000 1.13
> +++ if_mskvar.h 23 Oct 2017 01:32:04 -0000
> @@ -212,6 +212,7 @@ struct sk_if_softc {
> int sk_pktlen;
> int sk_link;
> struct timeout sk_tick_ch;
> + struct timeout sk_tick_rx;
> struct msk_chain_data sk_cdata;
> struct msk_ring_data *sk_rdata;
> bus_dmamap_t sk_ring_map;
> Index: if_msk.c
> ===================================================================
> RCS file: /cvs/src/sys/dev/pci/if_msk.c,v
> retrieving revision 1.130
> diff -u -p -r1.130 if_msk.c
> --- if_msk.c 3 Oct 2017 15:37:58 -0000 1.130
> +++ if_msk.c 23 Oct 2017 01:32:04 -0000
> @@ -148,7 +148,7 @@ void msk_ifmedia_sts(struct ifnet *, str
> int msk_newbuf(struct sk_if_softc *);
> int msk_init_rx_ring(struct sk_if_softc *);
> int msk_init_tx_ring(struct sk_if_softc *);
> -void msk_fill_rx_ring(struct sk_if_softc *);
> +void msk_fill_rx_ring(struct sk_if_softc *, int);
>
> int msk_miibus_readreg(struct device *, int, int);
> void msk_miibus_writereg(struct device *, int, int, int);
> @@ -156,6 +156,7 @@ void msk_miibus_statchg(struct device *)
>
> void msk_iff(struct sk_if_softc *);
> void msk_tick(void *);
> +void msk_fill_rx_tick(void *);
>
> #ifdef MSK_DEBUG
> #define DPRINTF(x) if (mskdebug) printf x
> @@ -428,7 +429,7 @@ msk_init_rx_ring(struct sk_if_softc *sc_
> /* two ring entries per packet, so the effective ring size is halved */
> if_rxr_init(&sc_if->sk_cdata.sk_rx_ring, 2, MSK_RX_RING_CNT/2);
>
> - msk_fill_rx_ring(sc_if);
> + msk_fill_rx_ring(sc_if, 0);
> return (0);
> }
>
> @@ -1027,6 +1028,7 @@ msk_attach(struct device *parent, struct
> ifmedia_set(&sc_if->sk_mii.mii_media, IFM_ETHER|IFM_AUTO);
>
> timeout_set(&sc_if->sk_tick_ch, msk_tick, sc_if);
> + timeout_set(&sc_if->sk_tick_rx, msk_fill_rx_tick, sc_if);
>
> /*
> * Call MI attach routines.
> @@ -1779,7 +1781,7 @@ msk_txeof(struct sk_if_softc *sc_if)
> }
>
> void
> -msk_fill_rx_ring(struct sk_if_softc *sc_if)
> +msk_fill_rx_ring(struct sk_if_softc *sc_if, int ticks)
> {
> u_int slots, used;
>
> @@ -1792,6 +1794,20 @@ msk_fill_rx_ring(struct sk_if_softc *sc_
> slots -= used;
> }
> if_rxr_put(&sc_if->sk_cdata.sk_rx_ring, slots);
> + if (if_rxr_inuse(&sc_if->sk_cdata.sk_rx_ring) == 0)
> + timeout_add(&sc_if->sk_tick_rx, ticks);
> +}
> +
> +void
> +msk_fill_rx_tick(void *xsc_if)
> +{
> + struct sk_if_softc *sc_if = xsc_if;
> + int s;
> +
> + s = splnet();
> + if (if_rxr_inuse(&sc_if->sk_cdata.sk_rx_ring) == 0)
> + msk_fill_rx_ring(sc_if, 1);
> + splx(s);
> }
>
> void
> @@ -1902,12 +1918,12 @@ msk_intr(void *xsc)
> CSR_WRITE_4(sc, SK_Y2_ICR, 2);
>
> if (rx[0]) {
> - msk_fill_rx_ring(sc_if0);
> + msk_fill_rx_ring(sc_if0, 0);
> SK_IF_WRITE_2(sc_if0, 0, SK_RXQ1_Y2_PREF_PUTIDX,
> sc_if0->sk_cdata.sk_rx_prod);
> }
> if (rx[1]) {
> - msk_fill_rx_ring(sc_if1);
> + msk_fill_rx_ring(sc_if1, 0);
> SK_IF_WRITE_2(sc_if1, 0, SK_RXQ1_Y2_PREF_PUTIDX,
> sc_if1->sk_cdata.sk_rx_prod);
> }