On Mon, Jan 08, 2018 at 11:13:33AM +1000, David Gwynne wrote:
> this is tx mitigation again, ie, defer calling an interfaces start
> routine until at least 4 packets are queued, or a task fires.
> 
> the task firing is a problem for things like gif or vxlan that encap
> a packet in ip and send it through the ip stack again. the ip stack
> expects NET_RLOCK to be held. that is implicitly true when sending
> out of the network stack, but not when the bundle task fires.
> 
> this has the bundle tasks take the network read lock on behalf of
> the start routines, like the stack does. this avoids having to patch
> every driver to cope with this.
> 
> tests?
> 

Unfortunately, suspend/resume is still broken with this on my x230: when
I do 'zzz', the screen goes dark, the LED around the power button starts
blinking and the machine seems to lock up. No key strokes seem to have
any effect and it doesn't react to pings.

> Index: ifq.c
> ===================================================================
> RCS file: /cvs/src/sys/net/ifq.c,v
> retrieving revision 1.21
> diff -u -p -r1.21 ifq.c
> --- ifq.c     4 Jan 2018 11:02:57 -0000       1.21
> +++ ifq.c     8 Jan 2018 01:09:05 -0000
> @@ -70,9 +70,16 @@ struct priq {
>  void ifq_start_task(void *);
>  void ifq_restart_task(void *);
>  void ifq_barrier_task(void *);
> +void ifq_bundle_task(void *);
>  
>  #define TASK_ONQUEUE 0x1
>  
> +static inline void
> +ifq_run_start(struct ifqueue *ifq)
> +{
> +     ifq_serialize(ifq, &ifq->ifq_start);
> +}
> +
>  void
>  ifq_serialize(struct ifqueue *ifq, struct task *t)
>  {
> @@ -114,6 +121,16 @@ ifq_is_serialized(struct ifqueue *ifq)
>  }
>  
>  void
> +ifq_start(struct ifqueue *ifq)
> +{
> +     if (ifq_len(ifq) >= min(4, ifq->ifq_maxlen)) {
> +             task_del(ifq->ifq_softnet, &ifq->ifq_bundle);
> +             ifq_run_start(ifq);
> +     } else
> +             task_add(ifq->ifq_softnet, &ifq->ifq_bundle);
> +}
> +
> +void
>  ifq_start_task(void *p)
>  {
>       struct ifqueue *ifq = p;
> @@ -137,11 +154,36 @@ ifq_restart_task(void *p)
>  }
>  
>  void
> +ifq_bundle_task(void *p)
> +{
> +     struct ifqueue *ifq = p;
> +     int s;
> +
> +     NET_RLOCK();
> +     s = splnet();
> +     ifq_run_start(ifq);
> +     splx(s);
> +     NET_RUNLOCK();
> +}
> +
> +void
>  ifq_barrier(struct ifqueue *ifq)
>  {
>       struct cond c = COND_INITIALIZER();
>       struct task t = TASK_INITIALIZER(ifq_barrier_task, &c);
>  
> +     if (!task_del(ifq->ifq_softnet, &ifq->ifq_bundle)) {
> +             int netlocked = (rw_status(&netlock) == RW_WRITE);
> +
> +             if (netlocked) /* XXXSMP breaks atomicity */
> +                     NET_UNLOCK();
> +
> +             taskq_barrier(ifq->ifq_softnet);
> +
> +             if (netlocked)
> +                     NET_LOCK();
> +     }
> +
>       if (ifq->ifq_serializer == NULL)
>               return;
>  
> @@ -166,6 +208,7 @@ void
>  ifq_init(struct ifqueue *ifq, struct ifnet *ifp, unsigned int idx)
>  {
>       ifq->ifq_if = ifp;
> +     ifq->ifq_softnet = net_tq(ifp->if_index);
>       ifq->ifq_softc = NULL;
>  
>       mtx_init(&ifq->ifq_mtx, IPL_NET);
> @@ -187,6 +230,7 @@ ifq_init(struct ifqueue *ifq, struct ifn
>       mtx_init(&ifq->ifq_task_mtx, IPL_NET);
>       TAILQ_INIT(&ifq->ifq_task_list);
>       ifq->ifq_serializer = NULL;
> +     task_set(&ifq->ifq_bundle, ifq_bundle_task, ifq);
>  
>       task_set(&ifq->ifq_start, ifq_start_task, ifq);
>       task_set(&ifq->ifq_restart, ifq_restart_task, ifq);
> @@ -237,6 +281,8 @@ void
>  ifq_destroy(struct ifqueue *ifq)
>  {
>       struct mbuf_list ml = MBUF_LIST_INITIALIZER();
> +
> +     ifq_barrier(ifq); /* ensure nothing is running with the ifq */
>  
>       /* don't need to lock because this is the last use of the ifq */
>  
> Index: ifq.h
> ===================================================================
> RCS file: /cvs/src/sys/net/ifq.h,v
> retrieving revision 1.20
> diff -u -p -r1.20 ifq.h
> --- ifq.h     4 Jan 2018 11:02:57 -0000       1.20
> +++ ifq.h     8 Jan 2018 01:09:05 -0000
> @@ -25,6 +25,7 @@ struct ifq_ops;
>  
>  struct ifqueue {
>       struct ifnet            *ifq_if;
> +     struct taskq            *ifq_softnet;
>       union {
>               void                    *_ifq_softc;
>               /*
> @@ -57,6 +58,7 @@ struct ifqueue {
>       struct mutex             ifq_task_mtx;
>       struct task_list         ifq_task_list;
>       void                    *ifq_serializer;
> +     struct task              ifq_bundle;
>  
>       /* work to be serialised */
>       struct task              ifq_start;
> @@ -405,6 +407,7 @@ void               ifq_attach(struct ifqueue *, cons
>  void          ifq_destroy(struct ifqueue *);
>  void          ifq_add_data(struct ifqueue *, struct if_data *);
>  int           ifq_enqueue(struct ifqueue *, struct mbuf *);
> +void          ifq_start(struct ifqueue *);
>  struct mbuf  *ifq_deq_begin(struct ifqueue *);
>  void          ifq_deq_commit(struct ifqueue *, struct mbuf *);
>  void          ifq_deq_rollback(struct ifqueue *, struct mbuf *);
> @@ -438,12 +441,6 @@ static inline unsigned int
>  ifq_is_oactive(struct ifqueue *ifq)
>  {
>       return (ifq->ifq_oactive);
> -}
> -
> -static inline void
> -ifq_start(struct ifqueue *ifq)
> -{
> -     ifq_serialize(ifq, &ifq->ifq_start);
>  }
>  
>  static inline void

Reply via email to