On Thu, Mar 29, 2001, Tony Hoyle <[EMAIL PROTECTED]> wrote:
> Unfortunately I can't test it as it uses undefined routines 
> pci_pool_{alloc,free}.  Is there another patch that goes with this one?

This one.

JE

--- include/linux/pci-orig.h    Wed Mar 14 12:40:44 2001
+++ include/linux/pci.h Thu Mar 22 14:06:46 2001
@@ -553,6 +553,14 @@
 struct pci_driver *pci_dev_driver(const struct pci_dev *);
 const struct pci_device_id *pci_match_device(const struct pci_device_id *ids, const 
struct pci_dev *dev);
 
+/* kmem_cache style wrapper around pci_alloc_consistent() */
+struct pci_pool *pci_pool_create (const char *name, struct pci_dev *dev,
+               size_t size, size_t align, size_t allocation, int flags);
+void pci_pool_destroy (struct pci_pool *pool);
+
+void *pci_pool_alloc (struct pci_pool *pool, int flags, dma_addr_t *handle);
+void pci_pool_free (struct pci_pool *pool, void *vaddr, dma_addr_t addr);
+
 #endif /* CONFIG_PCI */
 
 /* Include architecture-dependent settings and functions */
--- drivers/pci/pci-orig.c      Wed Mar 14 12:40:27 2001
+++ drivers/pci/pci.c   Fri Mar 23 10:12:46 2001
@@ -1339,6 +1339,344 @@
 }
 #endif
 
+/*
+ * Pool allocator ... useful for many drivers.  This wraps the core
+ * pci_alloc_consistent allocator, so small blocks can easily be used
+ * by drivers for bus mastering controllers.  This should probably be
+ * sharing the guts of the slab allocator.
+ */
+
+struct pci_pool {      /* the pool */
+       struct list_head        page_list;
+       spinlock_t              lock;
+       size_t                  blocks_per_page;
+       size_t                  size;
+       int                     flags;
+       struct pci_dev          *dev;
+       size_t                  allocation;
+       char                    name [32];
+       wait_queue_head_t       waitq;
+};
+
+struct pci_page {      /* cacheable header for 'allocation' bytes */
+       struct list_head        page_list;
+       void                    *vaddr;
+       dma_addr_t              dma;
+       unsigned long           bitmap [0];
+};
+
+#define        POOL_TIMEOUT_JIFFIES    ((100 /* msec */ * HZ) / 1000)
+#define        POOL_POISON_BYTE        0x97
+
+// #define CONFIG_PCIPOOL_DEBUG
+
+
+/**
+ * pci_pool_create - Creates a pool of pci consistent memory blocks, for dma.
+ * @name: name of pool, for diagnostics
+ * @pdev: pci device that will be doing the DMA
+ * @size: size of the blocks in this pool.
+ * @align: alignment requirement for blocks; must be a power of two
+ * @allocation: returned blocks won't cross this boundary (or zero)
+ * @flags: SLAB_* (or GFP_*) flags (not all are supported).
+ *
+ * Returns a pci allocation pool with the requested characteristics, or
+ * null if one can't be created.  Given one of these pools, pci_pool_alloc()
+ * may be used to allocate memory.  Such memory will all have "consistent"
+ * DMA mappings, accessible by the device and its driver without using
+ * cache flushing primitives.  The actual size of blocks allocated may be
+ * larger than requested because of alignment.
+ *
+ * If allocation is nonzero, objects returned from pci_pool_alloc() won't
+ * cross that size boundary.  This may be useful for devices which have
+ * addressing restrictions on individual DMA transfers, such as not crossing
+ * boundaries of 4KBytes.
+ */
+struct pci_pool *
+pci_pool_create (const char *name, struct pci_dev *pdev,
+       size_t size, size_t align, size_t allocation, int flags)
+{
+       struct pci_pool         *retval;
+
+       if (size == 0)
+               return 0;
+       else if (size < align)
+               size = align;
+       else if ((size % align) != 0) {
+               size += align + 1;
+               size &= ~(align - 1);
+       }
+
+       if (allocation == 0) {
+               if (PAGE_SIZE < size)
+                       allocation = size;
+               else
+                       allocation = PAGE_SIZE;
+               // FIXME: round up for less fragmentation
+       } else if (allocation < size)
+               return 0;
+
+       if (!(retval = kmalloc (sizeof *retval, flags)))
+               return retval;
+
+#ifdef CONFIG_PCIPOOL_DEBUG
+       flags |= SLAB_POISON;
+#endif
+
+       strncpy (retval->name, name, sizeof retval->name);
+       retval->name [sizeof retval->name - 1] = 0;
+
+       retval->dev = pdev;
+       INIT_LIST_HEAD (&retval->page_list);
+       spin_lock_init (&retval->lock);
+       retval->size = size;
+       retval->flags = flags;
+       retval->allocation = allocation;
+       retval->blocks_per_page = allocation / size;
+       init_waitqueue_head (&retval->waitq);
+
+#ifdef CONFIG_PCIPOOL_DEBUG
+       printk (KERN_DEBUG "pcipool create %s/%s size %d, %d/page (%d alloc)\n",
+               pdev->slot_name, retval->name, size,
+               retval->blocks_per_page, allocation);
+#endif
+
+       return retval;
+}
+
+
+static struct pci_page *
+pool_alloc_page (struct pci_pool *pool, int mem_flags)
+{
+       struct pci_page *page;
+       int             mapsize;
+
+       mapsize = pool->blocks_per_page;
+       mapsize = (mapsize + BITS_PER_LONG - 1) / BITS_PER_LONG;
+       mapsize *= sizeof (long);
+
+       page = (struct pci_page *) kmalloc (mapsize + sizeof *page, mem_flags);
+       if (!page)
+               return 0;
+       page->vaddr = pci_alloc_consistent (pool->dev,
+                               pool->allocation, &page->dma);
+       if (page->vaddr) {
+               memset (page->bitmap, ~0, mapsize);     // bit set == free
+               if (pool->flags & SLAB_POISON)
+                       memset (page->vaddr, POOL_POISON_BYTE, pool->allocation);
+               list_add (&page->page_list, &pool->page_list);
+       }
+       return page;
+}
+
+
+static inline int
+is_page_busy (int blocks, unsigned long *bitmap)
+{
+       while (blocks > 0) {
+               if (*bitmap++ != ~0)
+                       return 1;
+               blocks -= BITS_PER_LONG;
+       }
+       return 0;
+}
+
+static void
+pool_free_page (struct pci_pool *pool, struct pci_page *page)
+{
+       dma_addr_t      dma = page->dma;
+
+#ifdef CONFIG_PCIPOOL_DEBUG
+       if (is_page_busy (pool->blocks_per_page, page->bitmap)) {
+               printk (KERN_ERR "pcipool %s/%s, free_page %p busy; leaked\n",
+                       pool->dev->slot_name, pool->name, page->vaddr);
+               goto done;
+       }
+#endif
+
+       if (pool->flags & SLAB_POISON)
+               memset (page->vaddr, POOL_POISON_BYTE, pool->allocation);
+       pci_free_consistent (pool->dev, pool->allocation, page->vaddr, dma);
+done:
+       list_del (&page->page_list);
+       kfree (page);
+}
+
+
+/**
+ * pci_pool_destroy - destroys a pool of pci memory blocks.
+ * @pool: pci pool that will be destroyed
+ *
+ * Caller guarantees that no more memory from the pool is in use,
+ * and that nothing will try to use the pool after this call.
+ */
+void
+pci_pool_destroy (struct pci_pool *pool)
+{
+       unsigned long           flags;
+
+#ifdef CONFIG_PCIPOOL_DEBUG
+       printk (KERN_DEBUG "pcipool destroy %s/%s\n",
+               pool->dev->slot_name, pool->name);
+#endif
+
+       spin_lock_irqsave (&pool->lock, flags);
+       while (!list_empty (&pool->page_list)) {
+               struct pci_page         *page;
+               page = list_entry (pool->page_list.next,
+                               struct pci_page, page_list);
+               pool_free_page (pool, page);
+       }
+       spin_unlock_irqrestore (&pool->lock, flags);
+       kfree (pool);
+}
+
+
+/**
+ * pci_pool_alloc - get a block of consistent memory
+ * @pool: pci pool that will produce the block
+ * @mem_flags: GFP_KERNEL or GFP_ATOMIC
+ * @handle: pointer to dma address of block
+ *
+ * This returns the kernel virtual address of a currently unused block,
+ * and reports its dma address through the handle.
+ */
+void *
+pci_pool_alloc (struct pci_pool *pool, int mem_flags, dma_addr_t *handle)
+{
+       unsigned long           flags;
+       struct list_head        *entry;
+       struct pci_page         *page;
+       int                     map, block;
+       size_t                  offset;
+       void                    *retval;
+
+restart:
+       spin_lock_irqsave (&pool->lock, flags);
+       list_for_each (entry, &pool->page_list) {
+               int             i;
+               page = list_entry (entry, struct pci_page, page_list);
+               /* only cachable accesses here ... */
+               for (map = 0, i = 0;
+                               i < pool->blocks_per_page;
+                               i += BITS_PER_LONG, map++) {
+                       if (page->bitmap [map] == 0)
+                               continue;
+                       block = ffs (page->bitmap [map]);
+                       if ((i + block) <= pool->blocks_per_page) {
+                               block--;
+                               clear_bit (block, &page->bitmap [map]);
+                               offset = (BITS_PER_LONG * map) + block;
+                               offset *= pool->size;
+                               goto ready;
+                       }
+               }
+       }
+       if (!(page = pool_alloc_page (pool, mem_flags))) {
+               if (mem_flags == GFP_KERNEL) {
+                       DECLARE_WAITQUEUE (wait, current);
+
+                       current->state = TASK_INTERRUPTIBLE;
+                       add_wait_queue (&pool->waitq, &wait);
+                       spin_unlock_irqrestore (&pool->lock, flags);
+
+                       schedule_timeout (POOL_TIMEOUT_JIFFIES);
+
+                       current->state = TASK_RUNNING;
+                       remove_wait_queue (&pool->waitq, &wait);
+                       goto restart;
+               }
+               retval = 0;
+               goto done;
+       }
+
+       clear_bit (0, &page->bitmap [0]);
+       offset = 0;
+ready:
+       retval = offset + page->vaddr;
+       *handle = offset + page->dma;
+done:
+       spin_unlock_irqrestore (&pool->lock, flags);
+       return retval;
+}
+
+
+static struct pci_page *
+pool_find_page (struct pci_pool *pool, dma_addr_t dma)
+{
+       unsigned long           flags;
+       struct list_head        *entry;
+       struct pci_page         *page;
+
+       spin_lock_irqsave (&pool->lock, flags);
+       list_for_each (entry, &pool->page_list) {
+               page = list_entry (entry, struct pci_page, page_list);
+               if (dma < page->dma)
+                       continue;
+               if (dma < (page->dma + pool->allocation))
+                       goto done;
+       }
+       page = 0;
+done:
+       spin_unlock_irqrestore (&pool->lock, flags);
+       return page;
+}
+
+
+/**
+ * pci_pool_free - put block back into pci pool
+ * @pool: the pci pool holding the block
+ * @vaddr: virtual address of block
+ * @dma: dma address of block
+ *
+ * Caller promises neither device nor driver will again touch this block
+ * unless it is first re-allocated.
+ */
+void
+pci_pool_free (struct pci_pool *pool, void *vaddr, dma_addr_t dma)
+{
+       struct pci_page         *page;
+       unsigned long           flags;
+       int                     map, block;
+
+       if ((page = pool_find_page (pool, dma)) == 0) {
+               printk (KERN_ERR "pci_pool_free %s/%s, %p/%x (bad dma)\n",
+                       pool->dev->slot_name, pool->name, vaddr, dma);
+               return;
+       }
+#ifdef CONFIG_PCIPOOL_DEBUG
+       if (((dma - page->dma) + (void *)page->vaddr) != vaddr) {
+               printk (KERN_ERR "pci_pool_free %s/%s, %p (bad vaddr)/%x\n",
+                       pool->dev->slot_name, pool->name, vaddr, dma);
+               return;
+       }
+#endif
+
+       block = dma - page->dma;
+       block /= pool->size;
+       map = block / BITS_PER_LONG;
+       block %= BITS_PER_LONG;
+
+#ifdef CONFIG_PCIPOOL_DEBUG
+       if (page->bitmap [map] & (1 << block)) {
+               printk (KERN_ERR "pci_pool_free %s/%s, dma %x already free\n",
+                       pool->dev->slot_name, pool->name, dma);
+               return;
+       }
+#endif
+       if (pool->flags & SLAB_POISON)
+               memset (vaddr, POOL_POISON_BYTE, pool->size);
+
+       spin_lock_irqsave (&pool->lock, flags);
+       set_bit (block, &page->bitmap [map]);
+       if (waitqueue_active (&pool->waitq))
+               wake_up (&pool->waitq);
+       else if (!is_page_busy (pool->blocks_per_page, page->bitmap))
+               pool_free_page (pool, page);
+       spin_unlock_irqrestore (&pool->lock, flags);
+}
+
+
 void __init pci_init(void)
 {
        struct pci_dev *dev;
@@ -1420,4 +1758,11 @@
 
 EXPORT_SYMBOL(isa_dma_bridge_buggy);
 EXPORT_SYMBOL(pci_pci_problems);
+
+/* Pool allocator (layer over pci_alloc_consistent) */
+
+EXPORT_SYMBOL (pci_pool_create);
+EXPORT_SYMBOL (pci_pool_destroy);
+EXPORT_SYMBOL (pci_pool_alloc);
+EXPORT_SYMBOL (pci_pool_free);
 

Reply via email to