This driver is certainly not ready for prime-time, but
is sufficient to use test the DMA engine for BLTs from
user-space.

It really implements two separate features which are useful
together.

1. Contiguous memory allocation. Similar to that provided
by cmemk, but not pooled by allocation size.

Uses a first-fit algorithm on a pre-allocated hunk of RAM.
A module parameter like so will allocate up to 4MBytes for
sub-allocations.
        dav-dma=pool=0x400000

The size of the memory served is limited by the maximum value
returned by __get_free_pages (4MBytes).

Memory is allocated and deallocated using ioctls:
        DAV_ALLOCATE - takes an integer input (size to allocate)
                returns an integer (offset from physical base).
        DAV_FREE - takes an integer input (offset returned from ALLOCATE)

Allocations by a process are kept in a doubly-linked list and
freed in process exit. List head is maintained in the file handle
(f->private_data).

A process is expected to mmap the device (generally /dev/dav-dma-0),
The DAV_POOLINFO ioctl can be used to determine how much RAM to mmap.
The data structure and ioctls are declared in include/linux/dav-dma.h.

2. User-mode access to EDMA.

This functionality is not for the faint of heart, and is totally unsafe.
It allows DMAs to and from areas of physical memory using the
edmacc_paramentry_regs data structure defined in include/asm/arch/edma.h.
The DAV_DMA_DODMA ioctl is used to execute a synchronous call.
There is currently no support for chaining DMA descriptors.

This driver is written to kernel version 2.6.10 and should apply cleanly
using patch -p1.


Eric
commit 0e5ab37735db754190a4105250a429d49808cf24
Author: Eric Nelson <[EMAIL PROTECTED](none)>
Date:   Sat Oct 20 08:27:37 2007 -0700

    [dav-dma] Added Davinci EDMA driver for userspace access

diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 2c30cea..bd90ab4 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -29,6 +29,13 @@ config IBM_ASM
 
 	  If unsure, say N.
 
+config DAV_DMA
+	tristate "Device driver DaVinci DMA"
+	depends on ARCH_DAVINCI
+	default n
+	---help---
+	  This option enables the use of the DaVinci EDMA engine by applications.
+          It is typically used for bit-blts of images to and from the frame-buffer.
 endmenu
 
 menu "Multimedia Capabilities Port drivers"
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 9a39a0e..6be091b 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -4,6 +4,7 @@
 obj- := misc.o	# Dummy rule to force built-in.o to be made
 
 obj-$(CONFIG_IBM_ASM)	+= ibmasm/
+obj-$(CONFIG_DAV_DMA)   += ffit.o dav-dma.o 
 
 obj-$(CONFIG_MCP)              += mcp-core.o
 obj-$(CONFIG_MCP_UCB1200)      += ucb1x00-core.o
diff --git a/drivers/misc/dav-dma.c b/drivers/misc/dav-dma.c
new file mode 100644
index 0000000..5ac2f84
--- /dev/null
+++ b/drivers/misc/dav-dma.c
@@ -0,0 +1,410 @@
+/*
+ *  linux/drivers/misc/dav-dma.c
+ *
+ *  Copyright (C) 2007 Boundary Devices, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/cdev.h>
+#include <asm/arch/edma.h>
+#include <linux/dav-dma.h>
+#include <asm/uaccess.h>
+#include <linux/mm.h>
+#include "ffit.c"
+#include <linux/dma-mapping.h>
+
+// #define DEBUG
+#ifdef DEBUG
+#define DEBUGMSG( __fmt, ... ) printk( KERN_ERR __fmt, ## __VA_ARGS__ )
+#else
+#define DEBUGMSG( __fmt, ... ) 
+#endif
+
+struct dav_dma_dev_t {
+	struct cdev cdev; /* Char device structure */
+};
+
+static int dav_dma_major = 0 ;
+static int dav_dma_minor = 0 ;
+static int const dav_dma_nr_devs = 1 ;
+static char dav_dma_name[] = {
+        "dav_dma"
+};
+static struct dav_dma_dev_t dav_dma_dev ;
+static struct class_simple *dav_dma_class;
+static DECLARE_WAIT_QUEUE_HEAD(dma_wait);
+static unsigned pool_size = 0 ;
+static void *pool_data ;
+static dma_addr_t pool_phys ;
+static list_header_t *mem_pool ;
+static DECLARE_MUTEX(pool_lock);
+
+typedef struct {
+	struct list_head node_ ;
+	unsigned long    size_ ;
+	unsigned long    pad_ ;	// to 16-bytes
+} allocHeader_t ;
+
+static void *pool_alloc( unsigned size )
+{
+	void *rval = 0 ;
+
+	if( ( 0 == pool_data ) || ( 0 == mem_pool ) ){
+		printk( KERN_ERR "%s: no memory pool\n", __FUNCTION__ );
+		return 0 ;
+	}
+
+	if( down_interruptible(&pool_lock) ) {
+		printk( KERN_ERR "%s: interrupted\n", __FUNCTION__ );
+		return 0 ;
+	}
+
+	rval = rtl_malloc( mem_pool, size );
+
+	DEBUGMSG( "alloc: %u/%p/char 0x%02x\n", size, rval, ((u8 *)rval)[sizeof(allocHeader_t)] );
+
+	up(&pool_lock);
+
+	return rval ;
+}
+
+static void pool_free( void *ptr )
+{
+	if( down_interruptible(&pool_lock) )
+		return ;
+
+	DEBUGMSG( "free: %p/char 0x%02x\n", ptr, ((u8 *)ptr)[sizeof(allocHeader_t)] );
+
+	rtl_free(mem_pool,ptr);
+
+	up(&pool_lock);
+}
+
+static int dav_dma_open(struct inode *i, struct file *f)
+{
+	struct list_head *allocs = (struct list_head *)kmalloc( sizeof(struct list_head), GFP_KERNEL );
+	if( allocs ) {
+		INIT_LIST_HEAD(allocs);
+		f->private_data = allocs ;
+		DEBUGMSG( "%s:\n", __FUNCTION__ );
+		return 0 ;
+	}
+
+	return -ENOMEM ;
+}
+
+static int dav_dma_release(struct inode *i, struct file *f)
+{
+	struct list_head *allocs = (struct list_head *)f->private_data ;
+	DEBUGMSG( "%s:\n", __FUNCTION__ );
+	if( allocs ) {
+		while( !list_empty(allocs) ) {
+			allocHeader_t *node = (allocHeader_t *)allocs->next ;
+			if( ( 0 == node->node_.next )
+			    ||
+			    ( 0 == node->node_.prev )
+			    ||
+			    ( &node->node_ == node->node_.next )
+			    ||
+			    ( &node->node_ == node->node_.prev )
+			    ||
+			    ( 0 != ((unsigned long)node->node_.next & 15 ) )
+			    ||
+			    ( 0 != ((unsigned long)node->node_.prev & 15 ) ) ) {
+				printk( KERN_ERR "Invalid memory node %p/%p/%p/%lu/%lu\n", 
+					node, 
+					node ? node->node_.next : 0,
+					node ? node->node_.prev : 0,
+					node ? node->size_ : 0, 
+					node ? node->pad_ : 0 
+				      );
+				break ;
+			}
+			else {
+				list_del(&node->node_);
+//				memset( node, 0xbb, node->size_ + sizeof(*node) );
+				pool_free(node);
+			}
+		}
+
+		kfree( allocs );
+	}
+
+	f->private_data = 0 ;
+
+	return 0 ;
+}
+
+static void dav_dma_callback(int lch, unsigned short ch_status, void *data)
+{
+	DEBUGMSG( "%s: %u\n", __FUNCTION__, ch_status );
+	wake_up_interruptible(&dma_wait);
+}
+
+static int dav_dma_ioctl(struct inode *i, struct file *f, unsigned int cmd, unsigned long param)
+{
+	DEBUGMSG( "%s\n", __FUNCTION__);
+	switch (cmd) {
+		case DAV_POOLINFO: {
+                        struct dav_dma_pool_t pi ;
+			pi.physaddr = pool_phys ;
+			pi.size = pool_size ;
+			if( 0 == copy_to_user( (void *)param, &pi, sizeof(pi) ) )
+				return 0 ;
+			else
+                                return -EFAULT ;
+		}
+		case DAV_ALLOCATE: {
+			unsigned size ;
+			allocHeader_t *mem ;
+			unsigned long  offs ;
+
+			struct list_head *allocs = (struct list_head *)f->private_data ;
+			if( 0 == allocs )
+				return -EFAULT ;
+
+			if( copy_from_user( &size, (void __user *)param, sizeof(size) ) )
+				return -EFAULT;
+
+			if( 0 == size ) {
+				printk( KERN_ERR "alloc zero size\n" );
+				return -ENOMEM ;
+			}
+
+			mem = (allocHeader_t *)pool_alloc(size+sizeof( allocHeader_t ));
+
+			if( 0 == mem ) {
+				printk( KERN_DEBUG "DAV_ALLOC error: %u bytes\n", size );
+				return -ENOMEM ;
+			}
+
+			mem->size_ = size ;
+			mem->pad_  = 0 ;
+			list_add( &mem->node_, allocs );
+
+			offs = (char *)(mem+1) - (char *)pool_data ;
+
+			if( copy_to_user( (void __user *)param, &offs, sizeof(offs) ) )
+				return -EFAULT ;
+			else
+				return 0 ;
+		}
+		case DAV_FREE: {
+			allocHeader_t *mem ;
+			unsigned long  offs ;
+
+			if( copy_from_user( &offs, (void __user *)param, sizeof(offs) ) )
+				return -EFAULT;
+
+			if( (0 == offs) || (pool_size <= offs) ) {
+				printk( KERN_ERR "Invalid DAV free: %lx\n", offs );
+				return -EFAULT ;
+			}
+
+			mem = (allocHeader_t *)((char *)pool_data + offs - sizeof(allocHeader_t) );
+
+			if( ( 0 == mem->node_.next )
+			    ||
+			    ( 0 == mem->node_.prev )
+			    ||
+			    ( &mem->node_ == mem->node_.next )
+			    ||
+			    ( &mem->node_ == mem->node_.prev )
+			    ||
+			    ( 0 != ((unsigned long)mem->node_.next & 15 ) )
+			    ||
+			    ( 0 != ((unsigned long)mem->node_.prev & 15 ) ) ) {
+				printk( KERN_ERR "free invalid ptr: %p/%p/%p\n", 
+					&mem->node_,
+					mem->node_.next,
+					mem->node_.prev );
+				return -EFAULT ;
+			}
+
+			list_del(&mem->node_);
+//			memset( mem, 0xaa, mem->size_ + sizeof(allocHeader_t) );
+			pool_free(mem);
+			return 0 ;
+		}
+
+		case DAV_DMA_DODMA: {
+			edmacc_paramentry_regs regs ;
+			int rval ;
+			int dmach, tcc=0 ;
+			if( copy_from_user( &regs, (void *)param, sizeof(regs) ) ){
+				printk( KERN_ERR "%s: Invalid user ptr\n", __FUNCTION__ );
+				return -EFAULT ;
+			}
+			DEBUGMSG( "%s: DODMA %p\n", __FUNCTION__, (void *)param );
+			DEBUGMSG( "opt\t\t%8x\n", regs.opt );
+			DEBUGMSG( "src\t\t%8x\n", regs.src );
+			DEBUGMSG( "a_b_cnt\t\t%8x\n", regs.a_b_cnt );
+			DEBUGMSG( "dst\t\t%8x\n", regs.dst );
+			DEBUGMSG( "src_dst_bidx\t\t%8x\n", regs.src_dst_bidx );
+			DEBUGMSG( "link_bcntrld\t\t%8x\n", regs.link_bcntrld );
+			DEBUGMSG( "src_dst_cidx\t\t%8x\n", regs.src_dst_cidx );
+			DEBUGMSG( "ccnt\t\t%8x\n", regs.ccnt );
+			if( 0 != ( rval = davinci_request_dma(DAVINCI_DMA_CHANNEL_ANY, dav_dma_name, dav_dma_callback, f, &dmach, &tcc, EVENTQ_DEFAULT)) ){
+				printk( KERN_ERR "%s: error %d requesting DMA\n", __FUNCTION__, rval );
+				return -EFAULT ;
+			}
+
+			DEBUGMSG( "%s: allocated dma channel %d\n", __FUNCTION__, dmach );
+			davinci_set_dma_params(dmach, &regs);
+			{
+                                DEFINE_WAIT(wait);
+				prepare_to_wait(&dma_wait,&wait,TASK_INTERRUPTIBLE);
+				
+				rval = davinci_start_dma(dmach);
+				DEBUGMSG( "%s: dma %d\n", __FUNCTION__, rval );
+
+				schedule();
+
+				finish_wait(&dma_wait,&wait);
+			} // limit scope of wait
+
+			davinci_free_dma(dmach);
+
+			return rval ;
+		}
+		default:
+			break ;
+	}
+	return -EINVAL;
+}
+
+static int dav_dma_mmap(struct file * file, struct vm_area_struct * vma)
+{
+	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+	vma->vm_flags |= VM_IO|VM_RESERVED;
+	vma->vm_pgoff = pool_phys >> PAGE_SHIFT;
+	return remap_pfn_range( vma, vma->vm_start, vma->vm_pgoff, pool_size, vma->vm_page_prot);
+}
+
+static ssize_t dav_dma_read (struct file *filp, char *buffer,size_t count, loff_t *ppos)
+{
+	if( pool_data && ppos && (*ppos < pool_size) ){
+		unsigned left = pool_size-*ppos ;
+		if( left < count )
+			count = left ;
+		if( copy_to_user( buffer, pool_data+*ppos, count ) ){
+			return -EFAULT ;
+		}
+		*ppos += count ;
+		return count ;
+	}
+	else
+                return -EIO ;
+}
+
+struct file_operations dav_dma_fops = {
+	.owner = THIS_MODULE,
+	.ioctl = dav_dma_ioctl,
+	.open = dav_dma_open,
+	.release = dav_dma_release,
+	.read = dav_dma_read,
+	.mmap = dav_dma_mmap
+};
+
+static void dav_dma_setup_cdev(struct dav_dma_dev_t *dev)
+{
+	int err, devno = MKDEV(dav_dma_major, dav_dma_minor );
+	cdev_init(&dev->cdev, &dav_dma_fops);
+	dev->cdev.owner = THIS_MODULE;
+	dev->cdev.ops = &dav_dma_fops;
+	err = cdev_add (&dev->cdev, devno, 1);
+	/* Fail gracefully if need be */
+	if (err)
+		printk(KERN_NOTICE "Error %d adding dav_dma", err);
+}
+
+static int dav_dma_setup(char *options)
+{
+	char *this_opt;
+	int rval = 0 ;
+	while ((this_opt = strsep(&options, ",")) != NULL) {
+		if( 0 == strncmp("pool=",this_opt,5) ){
+			pool_size = PAGE_ALIGN(simple_strtoul(this_opt+5,0,0));
+			printk( KERN_ERR "pool size == 0x%x, order 0x%x\n", pool_size, get_order(pool_size) );
+			pool_size = (1<<get_order(pool_size))<<PAGE_SHIFT ;
+		}
+		else if( *this_opt ){
+			printk( KERN_ERR "Unknown option %s\n", this_opt );
+			rval = -1 ;
+			break;
+		}
+	}
+
+	return 0 ;
+}
+
+static char *options = "";
+module_param(options, charp, S_IRUGO);
+
+static int dav_dma_init(void)
+{
+	int result ;
+	DEBUGMSG( "%s\n", __FUNCTION__);
+	if (dav_dma_major) {
+		int dev = MKDEV(dav_dma_major, dav_dma_minor);
+		result = register_chrdev_region(dev, dav_dma_nr_devs, dav_dma_name);
+	} else {
+		int dev ;
+		result = alloc_chrdev_region(&dev, dav_dma_minor, dav_dma_nr_devs, dav_dma_name);
+		dav_dma_major = MAJOR(dev);
+	}
+	if (result < 0) {
+		printk(KERN_WARNING "dav_dma: can't get major %d\n", dav_dma_major);
+		return result ;
+	}
+
+	printk( KERN_INFO "registered chrdrv %s: %u\n", dav_dma_name, dav_dma_major );
+	dav_dma_setup(options);
+	if( 0 < pool_size ){
+		pool_data = dma_alloc_coherent( 0, pool_size, &pool_phys, GFP_KERNEL | GFP_DMA );
+		printk( KERN_ERR "dma_alloc: %p (phys %p)\n", pool_data, (void *)pool_phys );
+//dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, int gfp)
+      // pool_data = (void *)__get_free_pages(GFP_KERNEL|GFP_DMA,get_order(pool_size));
+		if( pool_data ){
+			mem_pool = init_memory_pool(pool_size, pool_data);
+			printk( KERN_ERR "memory pool header at %p\n", mem_pool );
+		}
+		else
+			printk( KERN_ERR "Error allocating pool of 0x%x bytes\n", pool_size );
+	}
+	else
+		printk( KERN_INFO "%s: no memory pool allocated\n", __FUNCTION__ );
+
+	dav_dma_setup_cdev(&dav_dma_dev);
+	dav_dma_class = class_simple_create(THIS_MODULE, dav_dma_name);
+	class_simple_device_add(dav_dma_class, MKDEV(dav_dma_major, dav_dma_minor), NULL, "dav-dma-%d", 0);
+
+	return result ;
+}
+static void dav_dma_exit(void)
+{
+	DEBUGMSG( "%s\n", __FUNCTION__);
+	if( pool_data ){
+//		free_pages( (unsigned long)pool_data, get_order(pool_size) );
+                dma_free_coherent(NULL, pool_size, (void *)pool_data, pool_phys );
+	}
+        class_simple_device_remove(MKDEV(dav_dma_major, dav_dma_minor));
+        class_simple_destroy(dav_dma_class);
+	cdev_del(&dav_dma_dev.cdev);
+	unregister_chrdev_region(MKDEV(dav_dma_major, dav_dma_minor), dav_dma_nr_devs);
+}
+module_init(dav_dma_init);
+module_exit(dav_dma_exit);
+
+MODULE_AUTHOR("Boundary Devices <[EMAIL PROTECTED]>");
+MODULE_DESCRIPTION("DaVinci EDMA driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/ffit.c b/drivers/misc/ffit.c
new file mode 100644
index 0000000..93b133f
--- /dev/null
+++ b/drivers/misc/ffit.c
@@ -0,0 +1,263 @@
+/*
+ * First-Fit / Best-Fit
+ * Version 0.2
+ *
+ * Written by Miguel Masmano Tello <[EMAIL PROTECTED]>
+ * Best-Fit strategy implemented by Ismael Ripoll <[EMAIL PROTECTED]>
+ *
+ * Thanks to Ismael Ripoll for his suggestions and reviews
+ *
+ * Copyright (C) April 2004
+ *
+ * This code is released using a dual license strategy: GPL/LGPL
+ * You can choose the license that better fits your requirements.
+ *
+ * Released under the terms of the GNU General Public License Version 2.0
+ * Released under the terms of the GNU Lesser General Public License Version 2.1
+ *
+ */
+
+
+#include "ffit.h"
+#include <linux/slab.h>
+#include <linux/stddef.h>
+
+/////////////////////////////////////////////////////////////////////////////
+// Once SANITY_CHECK is enabled, following functions can be used: memory_dump,
+// show_structure, and so on.
+//#define SANITY_CHECK
+//////////////////////////////////////////////////////////////////////////////
+
+
+// Some trivial definitions like NULL and printf
+#ifndef NULL
+   #define NULL ((void *)0)
+#endif
+
+#include <linux/kernel.h>
+#define ERRMSG( fmt, ... ) printk( KERN_ERR fmt, ## __VA_ARGS__ )
+
+
+list_header_t *init_memory_pool(unsigned int size, char *ptr) {
+   list_header_t *list = kmalloc( sizeof(list_header_t), GFP_KERNEL );
+   list->head = (list_t *) ptr;
+#ifdef SANITY_CHECK
+   list->first_block = list->head;
+   list->head -> mn = MN;
+#endif
+
+   list->HEADER_SIZE = (unsigned int)list->head->mem.ptr - (unsigned int) list->head;
+   size = size & (~0x3);
+   list->MIN_SIZE    = 16 ;
+
+   if (list->HEADER_SIZE > list->MIN_SIZE)
+   {
+      list->MIN_SIZE = ( ( ( list->HEADER_SIZE + 15 ) / 16 ) * 16 );
+   }
+
+   list->head -> size = size - list->HEADER_SIZE;
+   SET_FREE_BLOCK (list->head);
+   SET_LAST_BLOCK (list->head);
+   list->head->mem.free_ptr.next = NULL;
+   list->head->mem.free_ptr.prev = NULL;
+   list->head->prev_phys = NULL;
+
+   return list ;
+}
+
+
+// No matter if it is used
+void destroy_memory_pool(list_header_t *list){
+   kfree( list );
+}
+
+void *rtl_malloc( list_header_t *list, unsigned int size ) {
+   list_t *aux, *pos = NULL, *new, *bh3;
+   unsigned int new_size;
+
+   if (!size) return (void *) NULL;
+   if (size < list->MIN_SIZE) size = list->MIN_SIZE;
+
+   // Rounding up the requested size
+   size = ( ( size + 15 ) / 16 ) * 16 ;
+
+   aux = list->head;
+
+#ifdef BESTFIT
+   for ( ; aux ;  aux = aux -> mem.free_ptr.next)
+   {
+      if (GET_BLOCK_SIZE (aux) >= size)
+      {
+         if (!pos || (GET_BLOCK_SIZE (pos) > GET_BLOCK_SIZE(aux)))
+         {
+            pos = aux;
+         }
+         if (GET_BLOCK_SIZE(pos) == size)
+            break;
+      }
+   }
+#else // FIRST_FIST
+   for ( ; aux ;  aux = aux -> mem.free_ptr.next)
+   {
+      if (GET_BLOCK_SIZE(aux) >= size)
+      {
+         pos = aux;
+         break;
+      }
+   }
+#endif  // BESTFIT
+
+   aux = pos;
+
+   if (!aux) return (void *) NULL;
+
+   if (aux -> mem.free_ptr.next)
+      aux -> mem.free_ptr.next -> mem.free_ptr.prev = aux -> mem.free_ptr.prev;
+
+   if (aux -> mem.free_ptr.prev)
+      aux -> mem.free_ptr.prev -> mem.free_ptr.next = aux -> mem.free_ptr.next;
+
+   if (list->head == aux)
+      list->head = aux -> mem.free_ptr.next;
+
+   SET_USED_BLOCK (aux);
+
+   aux -> mem.free_ptr.next = NULL;
+   aux -> mem.free_ptr.prev = NULL;
+
+   new_size = GET_BLOCK_SIZE(aux) - size - list->HEADER_SIZE;
+   if (((int) new_size) >= (int)list->MIN_SIZE)
+   {
+      new = (list_t *) (((char *) aux) + (unsigned long)list->HEADER_SIZE + (unsigned long) size);
+      new -> size = new_size;
+
+      new -> mem.free_ptr.prev = NULL;
+      new -> mem.free_ptr.next = NULL;
+
+      SET_FREE_BLOCK (new);
+
+      new -> prev_phys = aux;
+      if (IS_LAST_BLOCK(aux))
+      {
+         SET_LAST_BLOCK (new);
+      }
+      else
+      {
+         // updating prev_phys pointer
+         bh3 = (list_t *)((char *) new + (unsigned long)list->HEADER_SIZE + (unsigned long) GET_BLOCK_SIZE(new));
+         bh3 -> prev_phys = new;
+      }
+
+      aux -> size = size;
+      SET_USED_BLOCK (aux);
+
+      // the new block is indexed inside of the list of free blocks
+      new -> mem.free_ptr.next = list->head;
+      if (list->head)
+         list->head -> mem.free_ptr.prev = new;
+      list->head = new;
+
+   }
+   return (void *)aux->mem.ptr ;
+}
+
+void rtl_free(list_header_t *list, void *ptr) {
+   list_t *b = (list_t *) ((char *)ptr - list->HEADER_SIZE), 
+          *b2, 
+          *b3;
+   if (!ptr)
+   {
+      ERRMSG ("FREE ERROR: ptr cannot be null\n");
+      return;
+   }
+
+#ifdef SANITY_CHECK
+   check_list ("Entrando free");
+   if (b -> mn != MN)
+   {
+      PRINTF ("ERROR MN 1\n");
+      PRINTF ("size ->%d\n", b -> size);
+      return;
+   }
+#endif
+   if (!IS_USED_BLOCK(b))
+   {
+      ERRMSG ("You are releasing a previously released block\n");
+      return;
+   }
+   SET_FREE_BLOCK (b);
+   b -> mem.free_ptr.next = NULL;
+   b -> mem.free_ptr.prev = NULL;
+   if (b -> prev_phys)
+   {
+      b2 = b -> prev_phys;
+      if (!IS_USED_BLOCK (b2))
+      {
+#ifdef SANITY_CHECK
+         list->blocks --;
+#endif
+         b2 -> size = GET_BLOCK_SIZE(b2) + GET_BLOCK_SIZE (b) + list->HEADER_SIZE;
+         if (b2 -> mem.free_ptr.next)
+            b2 -> mem.free_ptr.next -> mem.free_ptr.prev = b2 -> mem.free_ptr.prev;
+
+         if (b2 -> mem.free_ptr.prev)
+            b2 -> mem.free_ptr.prev -> mem.free_ptr.next = b2 -> mem.free_ptr.next;
+
+         // remove b2 from free list (added below)
+
+         if( list->head == b2)
+            list->head = b2 -> mem.free_ptr.next;
+
+         SET_FREE_BLOCK (b2);
+         b2 -> mem.free_ptr.next = NULL;
+         b2 -> mem.free_ptr.prev = NULL;
+         if (IS_LAST_BLOCK (b))
+         {
+            SET_LAST_BLOCK (b2);
+         }
+         else
+         {
+            b3 = (list_t *) (((char *) b2) + (unsigned long) list->HEADER_SIZE + 
+                             (unsigned long) GET_BLOCK_SIZE (b2));
+            b3 -> prev_phys = b2;
+         }
+         b = b2;
+      } // collapse with predecessor
+   }
+   if (!IS_LAST_BLOCK (b))
+   {
+      b2 = (list_t *) (((char *) b) + (unsigned long) list->HEADER_SIZE + 
+                       (unsigned long) GET_BLOCK_SIZE (b));
+
+      if (!IS_USED_BLOCK (b2))
+      {
+         b -> size += GET_BLOCK_SIZE(b2) + list->HEADER_SIZE;
+
+         if (b2 -> mem.free_ptr.next)
+            b2 -> mem.free_ptr.next -> mem.free_ptr.prev = b2 -> mem.free_ptr.prev;
+
+         if (b2 -> mem.free_ptr.prev)
+            b2 -> mem.free_ptr.prev -> mem.free_ptr.next = b2 -> mem.free_ptr.next;
+
+         if (list->head == b2)
+            list->head = b2 -> mem.free_ptr.next;
+         b2 -> mem.free_ptr.next = NULL;
+         b2 -> mem.free_ptr.prev = NULL;
+
+         if (IS_LAST_BLOCK (b2))
+         {
+            SET_LAST_BLOCK (b);
+         }
+         else
+         {
+            b3 = (list_t *) (((char *) b) + (unsigned long) list->HEADER_SIZE + (unsigned long) GET_BLOCK_SIZE (b));
+            b3 -> prev_phys = b;
+         }
+      }
+   }
+   b -> mem.free_ptr.next = list->head;
+
+   if (list->head)
+      list->head -> mem.free_ptr.prev = b;
+   list->head = b;
+}
diff --git a/drivers/misc/ffit.h b/drivers/misc/ffit.h
new file mode 100644
index 0000000..a80f339
--- /dev/null
+++ b/drivers/misc/ffit.h
@@ -0,0 +1,67 @@
+#ifndef _FFIT_H_
+#define _FFIT_H_
+
+// By default this implementation uses a First-Fit strategy, nonetheless if 
+// the BESTFIT macro is set then a Best-Fit strategy is used to find a
+// free block
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define BESTFIT
+
+struct free_ptr {
+  struct head_struct *next, *prev;
+};
+
+typedef struct head_struct {
+  unsigned long size;
+  struct head_struct *prev_phys;
+  unsigned long pad[2]; // make header 16-bytes
+  union mem {
+    struct free_ptr free_ptr;
+    unsigned char ptr[sizeof (struct free_ptr)];
+  } mem;
+} list_t;
+
+typedef struct list_header_struct {
+  list_t      *head;
+  unsigned int HEADER_SIZE; 
+  unsigned int MIN_SIZE ;
+} list_header_t;
+
+
+list_header_t *init_memory_pool (unsigned int size, char *ptr);
+void *rtl_malloc(list_header_t *list, unsigned int size);
+void rtl_free(list_header_t *list, void *ptr);
+void destroy_memory_pool(list_header_t *list);
+
+// Next function just can be used if the SANITY_CHECK macro has been defined
+#ifdef SANITY_CHECK
+void print_list (void);
+void dump_memory_region (unsigned char *mem_ptr, unsigned int size);
+void print_phys_list (void);
+#endif
+
+#define USED_BLOCK 0x80000000
+#define FREE_BLOCK ~USED_BLOCK //0x7FFFFFFF
+
+#define LAST_BLOCK 0x40000000
+#define NOT_LAST_BLOCK ~LAST_BLOCK //0xBFFFFFFF
+
+#define IS_USED_BLOCK(x) ((x -> size & USED_BLOCK) == USED_BLOCK)
+#define IS_LAST_BLOCK(x) ((x -> size & LAST_BLOCK) == LAST_BLOCK)
+#define GET_BLOCK_SIZE(x) (x -> size & FREE_BLOCK & NOT_LAST_BLOCK)
+#define SET_USED_BLOCK(x) (x -> size |= USED_BLOCK)
+#define SET_FREE_BLOCK(x) (x -> size &= FREE_BLOCK)
+#define SET_LAST_BLOCK(x) (x -> size |= LAST_BLOCK)
+#define SET_NOT_LAST_BLOCK(x) (x -> size &= NOT_LAST_BLOCK)
+
+#define LISTHEADERSIZE offsetof(list_t,mem)
+
+#ifdef __cplusplus
+}; // extern "C" 
+#endif
+
+#endif
diff --git a/include/linux/dav-dma.h b/include/linux/dav-dma.h
new file mode 100644
index 0000000..75ca51e
--- /dev/null
+++ b/include/linux/dav-dma.h
@@ -0,0 +1,25 @@
+#ifndef DAV_DMA_H
+#define DAV_DMA_H
+/*
+ * dav-dma.h
+ *
+ * Declares the ioctls used to use the DaVinci EDMA driver.
+ *
+ */
+#include <asm/arch/edma.h>
+ 
+#define BASE_MAGIC '\xDd'
+
+struct dav_dma_pool_t {
+   u_int32_t   physaddr ;
+   u_int32_t   size ;
+};
+
+#define DAV_POOLINFO		_IOW(BASE_MAGIC, 0x01, struct dav_dma_pool_t)
+#define DAV_DMA_DODMA		_IOR(BASE_MAGIC, 0x02, edmacc_paramentry_regs)
+#define DAV_ALLOCATE		_IOWR(BASE_MAGIC, 0x03, u_int32_t)
+#define DAV_FREE		_IOW(BASE_MAGIC, 0x04, u_int32_t)
+
+#define DAV_DMA_DEV "dav-dma-0"
+
+#endif
commit 0c4a03c7d7c90652a82e25fb0f195e461fb981cb
Author: Eric Nelson <[EMAIL PROTECTED](none)>
Date:   Sat Oct 20 16:46:23 2007 -0700

    [dav-dma] fix non-module compilation

diff --git a/drivers/misc/dav-dma.c b/drivers/misc/dav-dma.c
index 5ac2f84..0b1c045 100644
--- a/drivers/misc/dav-dma.c
+++ b/drivers/misc/dav-dma.c
@@ -19,7 +19,12 @@
 #include <linux/dav-dma.h>
 #include <asm/uaccess.h>
 #include <linux/mm.h>
+
+#ifndef MODULE
+#include "ffit.h"
+#else
 #include "ffit.c"
+#endif
 #include <linux/dma-mapping.h>
 
 // #define DEBUG
@@ -331,6 +336,8 @@ static int dav_dma_setup(char *options)
 {
 	char *this_opt;
 	int rval = 0 ;
+   printk( KERN_ERR "%s options=%s\n", __FUNCTION__, options );
+
 	while ((this_opt = strsep(&options, ",")) != NULL) {
 		if( 0 == strncmp("pool=",this_opt,5) ){
 			pool_size = PAGE_ALIGN(simple_strtoul(this_opt+5,0,0));
@@ -350,6 +357,18 @@ static int dav_dma_setup(char *options)
 static char *options = "";
 module_param(options, charp, S_IRUGO);
 
+#ifndef MODULE
+int __init save_options(char *args)
+{
+	if (!args || !*args)
+		return 0;
+   options=args ;
+
+	return 0;
+}
+__setup("dav-dma=", save_options);
+#endif
+
 static int dav_dma_init(void)
 {
 	int result ;
@@ -390,6 +409,7 @@ static int dav_dma_init(void)
 
 	return result ;
 }
+
 static void dav_dma_exit(void)
 {
 	DEBUGMSG( "%s\n", __FUNCTION__);
commit 46c15b25f09b19c0755b277931e67a7651f31825
Author: Eric Nelson <[EMAIL PROTECTED](none)>
Date:   Mon Nov 5 11:40:46 2007 -0700

    [dav-dma] make local function static

diff --git a/drivers/misc/dav-dma.c b/drivers/misc/dav-dma.c
index 0b1c045..13b2445 100644
--- a/drivers/misc/dav-dma.c
+++ b/drivers/misc/dav-dma.c
@@ -358,7 +358,7 @@ static char *options = "";
 module_param(options, charp, S_IRUGO);
 
 #ifndef MODULE
-int __init save_options(char *args)
+static int __init save_options(char *args)
 {
 	if (!args || !*args)
 		return 0;
_______________________________________________
directfb-users mailing list
directfb-users@directfb.org
http://mail.directfb.org/cgi-bin/mailman/listinfo/directfb-users

Reply via email to