http://lxr.free-electrons.com/source/net/core/dev.c#L2688

2688 static void net_rx_action(struct softirq_action *h)
2689 {
2690         struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
2691         unsigned long time_limit = jiffies + 2;
2692         int budget = netdev_budget;
2693         void *have;
2694 
2695         local_irq_disable();
2696 
2697         while (!list_empty(list)) {
2698                 struct napi_struct *n;
2699                 int work, weight;
2700 
2701                 /* If softirq window is exhuasted then punt.
2702                  * Allow this to run for 2 jiffies since which will allow
2703                  * an average latency of 1.5/HZ.
2704                  */
2705                 if (unlikely(budget <= 0 || time_after(jiffies, time_limit)))
2706                         goto softnet_break;
2707 
2708                 local_irq_enable();
2709 
2710                 /* Even though interrupts have been re-enabled, this
2711                  * access is safe because interrupts can only add new
2712                  * entries to the tail of this list, and only ->poll()
2713                  * calls can remove this head entry from the list.
2714                  */
2715                 n = list_entry(list->next, struct napi_struct, poll_list);
2716 
2717                 have = netpoll_poll_lock(n);
2718 
2719                 weight = n->weight;
2720 
2721                 /* This NAPI_STATE_SCHED test is for avoiding a race
2722                  * with netpoll's poll_napi().  Only the entity which
2723                  * obtains the lock and sees NAPI_STATE_SCHED set will
2724                  * actually make the ->poll() call.  Therefore we avoid
2725                  * accidently calling ->poll() when NAPI is not scheduled.
2726                  */
2727                 work = 0;
2728                 if (test_bit(NAPI_STATE_SCHED, &n->state))
2729                         work = n->poll(n, weight);
2730 
2731                 WARN_ON_ONCE(work > weight);
2732 
2733                 budget -= work;
2734 
2735                 local_irq_disable();
2736 
2737                 /* Drivers must not modify the NAPI state if they
2738                  * consume the entire weight.  In such cases this code
2739                  * still "owns" the NAPI instance and therefore can
2740                  * move the instance around on the list at-will.
2741                  */
2742                 if (unlikely(work == weight)) {
2743                         if (unlikely(napi_disable_pending(n)))
2744                                 __napi_complete(n);
2745                         else
2746                                 list_move_tail(&n->poll_list, list);
2747                 }
2748 
2749                 netpoll_poll_unlock(have);
2750         }
2751 out:
2752         local_irq_enable();
2753 
2754 #ifdef CONFIG_NET_DMA
2755         /*
2756          * There may not be any more sk_buffs coming right now, so push
2757          * any pending DMA copies to hardware
2758          */
2759         dma_issue_pending_all();
2760 #endif
2761 
2762         return;
2763 
2764 softnet_break:
2765         __get_cpu_var(netdev_rx_stat).time_squeeze++;
2766         __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2767         goto out;
2768 }
2769 



Reply via email to