int dev_queue_xmit(struct sk_buff *skb)
{
        struct net_device *dev = skb->dev;
        struct netdev_queue *txq;
        struct Qdisc *q;
        int rc = -ENOMEM;

        /* GSO will handle the following emulations directly. */
        if (netif_needs_gso(dev, skb))
                goto gso;

        if (skb_shinfo(skb)->frag_list &&
            !(dev->features & NETIF_F_FRAGLIST) &&
            __skb_linearize(skb))
                goto out_kfree_skb;

        /* Fragmented skb is linearized if device does not support SG,
         * or if at least one of fragments is in highmem and device
         * does not support DMA from it.
         */
        if (skb_shinfo(skb)->nr_frags &&
            (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
            __skb_linearize(skb))
                goto out_kfree_skb;

        /* If packet is not checksummed and device does not support
         * checksumming for this protocol, complete checksumming here.
         */
        if (skb->ip_summed == CHECKSUM_PARTIAL) {
                skb_set_transport_header(skb, skb->csum_start -
                                              skb_headroom(skb));
                if (!dev_can_checksum(dev, skb) && skb_checksum_help(skb))
                        goto out_kfree_skb;
        }

gso:
        /* Disable soft irqs for various locks below. Also
         * stops preemption for RCU.
         */
        rcu_read_lock_bh();

        txq = dev_pick_tx(dev, skb);
        q = rcu_dereference(txq->qdisc);

#ifdef CONFIG_NET_CLS_ACT
        skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS);
#endif
        if (q->enqueue) {
                spinlock_t *root_lock = qdisc_lock(q);

                spin_lock(root_lock);

                if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
                        kfree_skb(skb);
                        rc = NET_XMIT_DROP;
                } else {
                        rc = qdisc_enqueue_root(skb, q);
                        qdisc_run(q);
                                                                                                                                                                  1843,4-25     33%




/*=======================================================================
                        Receiver routines
  =======================================================================*/

int netdev_max_backlog __read_mostly = 1000;
int netdev_budget __read_mostly = 300;
int weight_p __read_mostly = 64;            /* old backlog weight */

DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };


/**
 *      netif_rx        -       post buffer to the network code
 *      @skb: buffer to post
 *
 *      This function receives a packet from a device driver and queues it for
 *      the upper (protocol) levels to process.  It always succeeds. The buffer
 *      may be dropped during processing for congestion control or by the
 *      protocol layers.
 *
 *      return values:
 *      NET_RX_SUCCESS  (no congestion)
 *      NET_RX_DROP     (packet was dropped)
 *
 */

int netif_rx(struct sk_buff *skb)
{
        struct softnet_data *queue;
        unsigned long flags;

        /* if netpoll wants it, pretend we never saw it */
        if (netpoll_rx(skb))
                return NET_RX_DROP;

        if (!skb->tstamp.tv64)
                net_timestamp(skb);

        /*
         * The code is rearranged so that the path is the most
         * short when CPU is congested, but is still operating.
         */
        local_irq_save(flags);
        queue = &__get_cpu_var(softnet_data);

        __get_cpu_var(netdev_rx_stat).total++;
        if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
                if (queue->input_pkt_queue.qlen) {
enqueue:
:q



Reply via email to