/*
 * A interface to enable slab creation on nodeid
 */
static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
                                int nodeid)
{
        struct list_head *entry;
        struct slab *slabp;
        struct kmem_list3 *l3;
        void *obj;
        int x;

        l3 = cachep->nodelists[nodeid];
        BUG_ON(!l3);

retry:
        check_irq_off();
        spin_lock(&l3->list_lock);
        entry = l3->slabs_partial.next;
        if (entry == &l3->slabs_partial) {
                l3->free_touched = 1;
                entry = l3->slabs_free.next;
                if (entry == &l3->slabs_free)
                        goto must_grow;
        }

        slabp = list_entry(entry, struct slab, list);
        check_spinlock_acquired_node(cachep, nodeid);
        check_slabp(cachep, slabp);

        STATS_INC_NODEALLOCS(cachep);
        STATS_INC_ACTIVE(cachep);
        STATS_SET_HIGH(cachep);

        BUG_ON(slabp->inuse == cachep->num);

        obj = slab_get_obj(cachep, slabp, nodeid);
        check_slabp(cachep, slabp);
        l3->free_objects--;
        /* move slabp to correct slabp list: */
        list_del(&slabp->list);

        if (slabp->free == BUFCTL_END)
                list_add(&slabp->list, &l3->slabs_full);
        else
                list_add(&slabp->list, &l3->slabs_partial);

        spin_unlock(&l3->list_lock);
        goto done;

must_grow:
        spin_unlock(&l3->list_lock);
        x = cache_grow(cachep, flags | GFP_THISNODE, nodeid, NULL);
        if (x)
                goto retry;

        return fallback_alloc(cachep, flags);

done:
        return obj;
}

Reply via email to