Module Name:    xsrc
Committed By:   mrg
Date:           Sat Dec 13 21:39:07 UTC 2014

Modified Files:
        xsrc/external/mit/libdrm/dist: xf86atomic.h xf86drm.c xf86drmMode.c
            xf86drmMode.h
        xsrc/external/mit/libdrm/dist/include/drm: drm.h
        xsrc/external/mit/libdrm/dist/intel: intel_bufmgr_gem.c
        xsrc/external/mit/libdrm/dist/radeon: radeon_bo_gem.c radeon_cs_gem.c
            radeon_surface.c
        xsrc/external/mit/libdrm/dist/tests/modetest: modetest.c
Removed Files:
        xsrc/external/mit/libdrm/dist: config.guess config.sub depcomp
            install-sh ltmain.sh missing
        xsrc/external/mit/libdrm/dist/include: Makefile.am Makefile.in
        xsrc/external/mit/libdrm/dist/include/drm: Makefile Makefile.am
            Makefile.in i810_drm.h i830_drm.h

Log Message:
merge libdrm 2.4.58.


To generate a diff of this commit:
cvs rdiff -u -r1.3 -r0 xsrc/external/mit/libdrm/dist/config.guess \
    xsrc/external/mit/libdrm/dist/config.sub \
    xsrc/external/mit/libdrm/dist/depcomp \
    xsrc/external/mit/libdrm/dist/install-sh \
    xsrc/external/mit/libdrm/dist/ltmain.sh \
    xsrc/external/mit/libdrm/dist/missing
cvs rdiff -u -r1.4 -r1.5 xsrc/external/mit/libdrm/dist/xf86atomic.h
cvs rdiff -u -r1.8 -r1.9 xsrc/external/mit/libdrm/dist/xf86drm.c
cvs rdiff -u -r1.6 -r1.7 xsrc/external/mit/libdrm/dist/xf86drmMode.c
cvs rdiff -u -r1.2 -r1.3 xsrc/external/mit/libdrm/dist/xf86drmMode.h
cvs rdiff -u -r1.1.1.1 -r0 xsrc/external/mit/libdrm/dist/include/Makefile.am
cvs rdiff -u -r1.1.1.5 -r0 xsrc/external/mit/libdrm/dist/include/Makefile.in
cvs rdiff -u -r1.1.1.6 -r0 xsrc/external/mit/libdrm/dist/include/drm/Makefile
cvs rdiff -u -r1.1.1.2 -r0 \
    xsrc/external/mit/libdrm/dist/include/drm/Makefile.am
cvs rdiff -u -r1.1.1.5 -r0 \
    xsrc/external/mit/libdrm/dist/include/drm/Makefile.in
cvs rdiff -u -r1.2 -r1.3 xsrc/external/mit/libdrm/dist/include/drm/drm.h
cvs rdiff -u -r1.1.1.1 -r0 \
    xsrc/external/mit/libdrm/dist/include/drm/i810_drm.h \
    xsrc/external/mit/libdrm/dist/include/drm/i830_drm.h
cvs rdiff -u -r1.8 -r1.9 \
    xsrc/external/mit/libdrm/dist/intel/intel_bufmgr_gem.c
cvs rdiff -u -r1.4 -r1.5 xsrc/external/mit/libdrm/dist/radeon/radeon_bo_gem.c \
    xsrc/external/mit/libdrm/dist/radeon/radeon_surface.c
cvs rdiff -u -r1.3 -r1.4 xsrc/external/mit/libdrm/dist/radeon/radeon_cs_gem.c
cvs rdiff -u -r1.4 -r1.5 \
    xsrc/external/mit/libdrm/dist/tests/modetest/modetest.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: xsrc/external/mit/libdrm/dist/xf86atomic.h
diff -u xsrc/external/mit/libdrm/dist/xf86atomic.h:1.4 xsrc/external/mit/libdrm/dist/xf86atomic.h:1.5
--- xsrc/external/mit/libdrm/dist/xf86atomic.h:1.4	Wed Jul  9 14:12:39 2014
+++ xsrc/external/mit/libdrm/dist/xf86atomic.h	Sat Dec 13 21:39:07 2014
@@ -113,4 +113,13 @@ typedef struct { int atomic; } atomic_t;
 #error libdrm requires atomic operations, please define them for your CPU/compiler.
 #endif
 
+static inline int atomic_add_unless(atomic_t *v, int add, int unless)
+{
+	int c, old;
+	c = atomic_read(v);
+	while (c != unless && (old = atomic_cmpxchg(v, c, c + add)) != c)
+		c = old;
+	return c == unless;
+}
+
 #endif

Index: xsrc/external/mit/libdrm/dist/xf86drm.c
diff -u xsrc/external/mit/libdrm/dist/xf86drm.c:1.8 xsrc/external/mit/libdrm/dist/xf86drm.c:1.9
--- xsrc/external/mit/libdrm/dist/xf86drm.c:1.8	Fri Jul 11 19:57:33 2014
+++ xsrc/external/mit/libdrm/dist/xf86drm.c	Sat Dec 13 21:39:07 2014
@@ -48,7 +48,6 @@
 #include <sys/stat.h>
 #define stat_t struct stat
 #include <sys/ioctl.h>
-#include <sys/mman.h>
 #include <sys/time.h>
 #include <stdarg.h>
 
@@ -58,6 +57,7 @@
 #endif
 
 #include "xf86drm.h"
+#include "libdrm.h"
 
 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
 #define DRM_MAJOR 145
@@ -542,19 +542,6 @@ static int drmOpenByName(const char *nam
     int           fd;
     drmVersionPtr version;
     char *        id;
-    
-    if (!drmAvailable()) {
-	if (!drm_server_info) {
-	    return -1;
-	}
-	else {
-	    /* try to load the kernel module now */
-	    if (!drm_server_info->load_module(name)) {
-		drmMsg("[drm] failed to load kernel module \"%s\"\n", name);
-		return -1;
-	    }
-	}
-    }
 
     /*
      * Open the first minor number that matches the driver name and isn't
@@ -1165,7 +1152,7 @@ int drmMap(int fd, drm_handle_t handle, 
 	return 0;
     }
 #endif
-    *address = mmap(0, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, handle);
+    *address = drm_mmap(0, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, handle);
     if (*address == MAP_FAILED)
 	return -errno;
     return 0;
@@ -1185,7 +1172,7 @@ int drmMap(int fd, drm_handle_t handle, 
  */
 int drmUnmap(drmAddress address, drmSize size)
 {
-    return munmap(address, size);
+    return drm_munmap(address, size);
 }
 
 drmBufInfoPtr drmGetBufInfo(int fd)
@@ -1292,7 +1279,7 @@ int drmUnmapBufs(drmBufMapPtr bufs)
     int i;
 
     for (i = 0; i < bufs->count; i++) {
-	munmap(bufs->list[i].address, bufs->list[i].total);
+	drm_munmap(bufs->list[i].address, bufs->list[i].total);
     }
 
     drmFree(bufs->list);

Index: xsrc/external/mit/libdrm/dist/xf86drmMode.c
diff -u xsrc/external/mit/libdrm/dist/xf86drmMode.c:1.6 xsrc/external/mit/libdrm/dist/xf86drmMode.c:1.7
--- xsrc/external/mit/libdrm/dist/xf86drmMode.c:1.6	Fri Jul 11 20:45:15 2014
+++ xsrc/external/mit/libdrm/dist/xf86drmMode.c	Sat Dec 13 21:39:07 2014
@@ -819,6 +819,8 @@ int drmCheckModesettingSupported(const c
 	drmClose(fd);
 	if (ret == 0)
 		return 0;
+#elif defined(__DragonFly__)
+	return 0;
 #endif
 	return -ENOSYS;
 

Index: xsrc/external/mit/libdrm/dist/xf86drmMode.h
diff -u xsrc/external/mit/libdrm/dist/xf86drmMode.h:1.2 xsrc/external/mit/libdrm/dist/xf86drmMode.h:1.3
--- xsrc/external/mit/libdrm/dist/xf86drmMode.h:1.2	Fri Jul 11 20:03:34 2014
+++ xsrc/external/mit/libdrm/dist/xf86drmMode.h	Sat Dec 13 21:39:07 2014
@@ -296,6 +296,10 @@ typedef struct _drmModeConnector {
 	uint32_t *encoders; /**< List of encoder ids */
 } drmModeConnector, *drmModeConnectorPtr;
 
+#define DRM_PLANE_TYPE_OVERLAY 0
+#define DRM_PLANE_TYPE_PRIMARY 1
+#define DRM_PLANE_TYPE_CURSOR  2
+
 typedef struct _drmModeObjectProperties {
 	uint32_t count_props;
 	uint32_t *props;

Index: xsrc/external/mit/libdrm/dist/include/drm/drm.h
diff -u xsrc/external/mit/libdrm/dist/include/drm/drm.h:1.2 xsrc/external/mit/libdrm/dist/include/drm/drm.h:1.3
--- xsrc/external/mit/libdrm/dist/include/drm/drm.h:1.2	Fri Mar 21 18:09:49 2014
+++ xsrc/external/mit/libdrm/dist/include/drm/drm.h	Sat Dec 13 21:39:07 2014
@@ -627,6 +627,14 @@ struct drm_get_cap {
  */
 #define DRM_CLIENT_CAP_STEREO_3D	1
 
+/**
+ * DRM_CLIENT_CAP_UNIVERSAL_PLANES
+ *
+ * if set to 1, the DRM core will expose the full universal plane list
+ * (including primary and cursor planes).
+ */
+#define DRM_CLIENT_CAP_UNIVERSAL_PLANES 2
+
 /** DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
 struct drm_set_client_cap {
 	__u64 capability;

Index: xsrc/external/mit/libdrm/dist/intel/intel_bufmgr_gem.c
diff -u xsrc/external/mit/libdrm/dist/intel/intel_bufmgr_gem.c:1.8 xsrc/external/mit/libdrm/dist/intel/intel_bufmgr_gem.c:1.9
--- xsrc/external/mit/libdrm/dist/intel/intel_bufmgr_gem.c:1.8	Wed May  7 21:24:25 2014
+++ xsrc/external/mit/libdrm/dist/intel/intel_bufmgr_gem.c	Sat Dec 13 21:39:07 2014
@@ -49,7 +49,6 @@
 #include <pthread.h>
 #include <stddef.h>
 #include <sys/ioctl.h>
-#include <sys/mman.h>
 #include <sys/stat.h>
 #include <sys/types.h>
 #include <stdbool.h>
@@ -58,6 +57,7 @@
 #ifndef ETIME
 #define ETIME ETIMEDOUT
 #endif
+#include "libdrm.h"
 #include "libdrm_lists.h"
 #include "intel_bufmgr.h"
 #include "intel_bufmgr_priv.h"
@@ -94,6 +94,8 @@ struct drm_intel_gem_bo_bucket {
 typedef struct _drm_intel_bufmgr_gem {
 	drm_intel_bufmgr bufmgr;
 
+	atomic_t refcount;
+
 	int fd;
 
 	int max_relocs;
@@ -111,6 +113,8 @@ typedef struct _drm_intel_bufmgr_gem {
 	int num_buckets;
 	time_t time;
 
+	drmMMListHead managers;
+
 	drmMMListHead named;
 	drmMMListHead vma_cache;
 	int vma_count, vma_open, vma_max;
@@ -183,6 +187,11 @@ struct _drm_intel_bo_gem {
 	void *mem_virtual;
 	/** GTT virtual address for the buffer, saved across map/unmap cycles */
 	void *gtt_virtual;
+	/**
+	 * Virtual address of the buffer allocated by user, used for userptr
+	 * objects only.
+	 */
+	void *user_virtual;
 	int map_count;
 	drmMMListHead vma_list;
 
@@ -222,6 +231,11 @@ struct _drm_intel_bo_gem {
 	bool idle;
 
 	/**
+	 * Boolean of whether this buffer was allocated with userptr
+	 */
+	bool is_userptr;
+
+	/**
 	 * Size in bytes of this buffer and its relocation descendents.
 	 *
 	 * Used to avoid costly tree walking in
@@ -848,13 +862,87 @@ drm_intel_gem_bo_alloc_tiled(drm_intel_b
 					       tiling, stride);
 }
 
+static drm_intel_bo *
+drm_intel_gem_bo_alloc_userptr(drm_intel_bufmgr *bufmgr,
+				const char *name,
+				void *addr,
+				uint32_t tiling_mode,
+				uint32_t stride,
+				unsigned long size,
+				unsigned long flags)
+{
+	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
+	drm_intel_bo_gem *bo_gem;
+	int ret;
+	struct drm_i915_gem_userptr userptr;
+
+	/* Tiling with userptr surfaces is not supported
+	 * on all hardware so refuse it for time being.
+	 */
+	if (tiling_mode != I915_TILING_NONE)
+		return NULL;
+
+	bo_gem = calloc(1, sizeof(*bo_gem));
+	if (!bo_gem)
+		return NULL;
+
+	bo_gem->bo.size = size;
+
+	VG_CLEAR(userptr);
+	userptr.user_ptr = (__u64)((unsigned long)addr);
+	userptr.user_size = size;
+	userptr.flags = flags;
+
+	ret = drmIoctl(bufmgr_gem->fd,
+			DRM_IOCTL_I915_GEM_USERPTR,
+			&userptr);
+	if (ret != 0) {
+		DBG("bo_create_userptr: "
+		    "ioctl failed with user ptr %p size 0x%lx, "
+		    "user flags 0x%lx\n", addr, size, flags);
+		free(bo_gem);
+		return NULL;
+	}
+
+	bo_gem->gem_handle = userptr.handle;
+	bo_gem->bo.handle = bo_gem->gem_handle;
+	bo_gem->bo.bufmgr    = bufmgr;
+	bo_gem->is_userptr   = true;
+	bo_gem->bo.virtual   = addr;
+	/* Save the address provided by user */
+	bo_gem->user_virtual = addr;
+	bo_gem->tiling_mode  = I915_TILING_NONE;
+	bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
+	bo_gem->stride       = 0;
+
+	DRMINITLISTHEAD(&bo_gem->name_list);
+	DRMINITLISTHEAD(&bo_gem->vma_list);
+
+	bo_gem->name = name;
+	atomic_set(&bo_gem->refcount, 1);
+	bo_gem->validate_index = -1;
+	bo_gem->reloc_tree_fences = 0;
+	bo_gem->used_as_reloc_target = false;
+	bo_gem->has_error = false;
+	bo_gem->reusable = false;
+
+	drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
+
+	DBG("bo_create_userptr: "
+	    "ptr %p buf %d (%s) size %ldb, stride 0x%x, tile mode %d\n",
+		addr, bo_gem->gem_handle, bo_gem->name,
+		size, stride, tiling_mode);
+
+	return &bo_gem->bo;
+}
+
 /**
  * Returns a drm_intel_bo wrapping the given buffer object handle.
  *
  * This can be used when one application needs to pass a buffer object
  * to another.
  */
-drm_intel_bo *
+drm_public drm_intel_bo *
 drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
 				  const char *name,
 				  unsigned int handle)
@@ -872,12 +960,14 @@ drm_intel_bo_gem_create_from_name(drm_in
 	 * alternating names for the front/back buffer a linear search
 	 * provides a sufficiently fast match.
 	 */
+	pthread_mutex_lock(&bufmgr_gem->lock);
 	for (list = bufmgr_gem->named.next;
 	     list != &bufmgr_gem->named;
 	     list = list->next) {
 		bo_gem = DRMLISTENTRY(drm_intel_bo_gem, list, name_list);
 		if (bo_gem->global_name == handle) {
 			drm_intel_gem_bo_reference(&bo_gem->bo);
+			pthread_mutex_unlock(&bufmgr_gem->lock);
 			return &bo_gem->bo;
 		}
 	}
@@ -890,6 +980,7 @@ drm_intel_bo_gem_create_from_name(drm_in
 	if (ret != 0) {
 		DBG("Couldn't reference %s handle 0x%08x: %s\n",
 		    name, handle, strerror(errno));
+		pthread_mutex_unlock(&bufmgr_gem->lock);
 		return NULL;
 	}
         /* Now see if someone has used a prime handle to get this
@@ -902,13 +993,16 @@ drm_intel_bo_gem_create_from_name(drm_in
 		bo_gem = DRMLISTENTRY(drm_intel_bo_gem, list, name_list);
 		if (bo_gem->gem_handle == open_arg.handle) {
 			drm_intel_gem_bo_reference(&bo_gem->bo);
+			pthread_mutex_unlock(&bufmgr_gem->lock);
 			return &bo_gem->bo;
 		}
 	}
 
 	bo_gem = calloc(1, sizeof(*bo_gem));
-	if (!bo_gem)
+	if (!bo_gem) {
+		pthread_mutex_unlock(&bufmgr_gem->lock);
 		return NULL;
+	}
 
 	bo_gem->bo.size = open_arg.size;
 	bo_gem->bo.offset = 0;
@@ -930,6 +1024,7 @@ drm_intel_bo_gem_create_from_name(drm_in
 		       &get_tiling);
 	if (ret != 0) {
 		drm_intel_gem_bo_unreference(&bo_gem->bo);
+		pthread_mutex_unlock(&bufmgr_gem->lock);
 		return NULL;
 	}
 	bo_gem->tiling_mode = get_tiling.tiling_mode;
@@ -939,6 +1034,7 @@ drm_intel_bo_gem_create_from_name(drm_in
 
 	DRMINITLISTHEAD(&bo_gem->vma_list);
 	DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
+	pthread_mutex_unlock(&bufmgr_gem->lock);
 	DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name);
 
 	return &bo_gem->bo;
@@ -955,11 +1051,11 @@ drm_intel_gem_bo_free(drm_intel_bo *bo)
 	DRMLISTDEL(&bo_gem->vma_list);
 	if (bo_gem->mem_virtual) {
 		VG(VALGRIND_FREELIKE_BLOCK(bo_gem->mem_virtual, 0));
-		munmap(bo_gem->mem_virtual, bo_gem->bo.size);
+		drm_munmap(bo_gem->mem_virtual, bo_gem->bo.size);
 		bufmgr_gem->vma_count--;
 	}
 	if (bo_gem->gtt_virtual) {
-		munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
+		drm_munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
 		bufmgr_gem->vma_count--;
 	}
 
@@ -1044,12 +1140,12 @@ static void drm_intel_gem_bo_purge_vma_c
 		DRMLISTDELINIT(&bo_gem->vma_list);
 
 		if (bo_gem->mem_virtual) {
-			munmap(bo_gem->mem_virtual, bo_gem->bo.size);
+			drm_munmap(bo_gem->mem_virtual, bo_gem->bo.size);
 			bo_gem->mem_virtual = NULL;
 			bufmgr_gem->vma_count--;
 		}
 		if (bo_gem->gtt_virtual) {
-			munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
+			drm_munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
 			bo_gem->gtt_virtual = NULL;
 			bufmgr_gem->vma_count--;
 		}
@@ -1153,7 +1249,8 @@ static void drm_intel_gem_bo_unreference
 	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
 
 	assert(atomic_read(&bo_gem->refcount) > 0);
-	if (atomic_dec_and_test(&bo_gem->refcount)) {
+
+	if (atomic_add_unless(&bo_gem->refcount, -1, 1)) {
 		drm_intel_bufmgr_gem *bufmgr_gem =
 		    (drm_intel_bufmgr_gem *) bo->bufmgr;
 		struct timespec time;
@@ -1161,8 +1258,12 @@ static void drm_intel_gem_bo_unreference
 		clock_gettime(CLOCK_MONOTONIC, &time);
 
 		pthread_mutex_lock(&bufmgr_gem->lock);
-		drm_intel_gem_bo_unreference_final(bo, time.tv_sec);
-		drm_intel_gem_cleanup_bo_cache(bufmgr_gem, time.tv_sec);
+
+		if (atomic_dec_and_test(&bo_gem->refcount)) {
+			drm_intel_gem_bo_unreference_final(bo, time.tv_sec);
+			drm_intel_gem_cleanup_bo_cache(bufmgr_gem, time.tv_sec);
+		}
+
 		pthread_mutex_unlock(&bufmgr_gem->lock);
 	}
 }
@@ -1174,6 +1275,12 @@ static int drm_intel_gem_bo_map(drm_inte
 	struct drm_i915_gem_set_domain set_domain;
 	int ret;
 
+	if (bo_gem->is_userptr) {
+		/* Return the same user ptr */
+		bo->virtual = bo_gem->user_virtual;
+		return 0;
+	}
+
 	pthread_mutex_lock(&bufmgr_gem->lock);
 
 	if (bo_gem->map_count++ == 0)
@@ -1242,6 +1349,9 @@ map_gtt(drm_intel_bo *bo)
 	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
 	int ret;
 
+	if (bo_gem->is_userptr)
+		return -EINVAL;
+
 	if (bo_gem->map_count++ == 0)
 		drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
 
@@ -1293,7 +1403,8 @@ map_gtt(drm_intel_bo *bo)
 	return 0;
 }
 
-int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
+drm_public int
+drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
 {
 	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
 	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
@@ -1351,7 +1462,8 @@ int drm_intel_gem_bo_map_gtt(drm_intel_b
  * undefined).
  */
 
-int drm_intel_gem_bo_map_unsynchronized(drm_intel_bo *bo)
+drm_public int
+drm_intel_gem_bo_map_unsynchronized(drm_intel_bo *bo)
 {
 	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
 #ifdef HAVE_VALGRIND
@@ -1384,13 +1496,18 @@ int drm_intel_gem_bo_map_unsynchronized(
 
 static int drm_intel_gem_bo_unmap(drm_intel_bo *bo)
 {
-	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+	drm_intel_bufmgr_gem *bufmgr_gem;
 	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
 	int ret = 0;
 
 	if (bo == NULL)
 		return 0;
 
+	if (bo_gem->is_userptr)
+		return 0;
+
+	bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+
 	pthread_mutex_lock(&bufmgr_gem->lock);
 
 	if (bo_gem->map_count <= 0) {
@@ -1434,7 +1551,8 @@ static int drm_intel_gem_bo_unmap(drm_in
 	return ret;
 }
 
-int drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo)
+drm_public int
+drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo)
 {
 	return drm_intel_gem_bo_unmap(bo);
 }
@@ -1448,6 +1566,9 @@ drm_intel_gem_bo_subdata(drm_intel_bo *b
 	struct drm_i915_gem_pwrite pwrite;
 	int ret;
 
+	if (bo_gem->is_userptr)
+		return -EINVAL;
+
 	VG_CLEAR(pwrite);
 	pwrite.handle = bo_gem->gem_handle;
 	pwrite.offset = offset;
@@ -1500,6 +1621,9 @@ drm_intel_gem_bo_get_subdata(drm_intel_b
 	struct drm_i915_gem_pread pread;
 	int ret;
 
+	if (bo_gem->is_userptr)
+		return -EINVAL;
+
 	VG_CLEAR(pread);
 	pread.handle = bo_gem->gem_handle;
 	pread.offset = offset;
@@ -1549,7 +1673,8 @@ drm_intel_gem_bo_wait_rendering(drm_inte
  * handle. Userspace must make sure this race does not occur if such precision
  * is important.
  */
-int drm_intel_gem_bo_wait(drm_intel_bo *bo, int64_t timeout_ns)
+drm_public int
+drm_intel_gem_bo_wait(drm_intel_bo *bo, int64_t timeout_ns)
 {
 	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
 	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
@@ -1584,7 +1709,7 @@ int drm_intel_gem_bo_wait(drm_intel_bo *
  * In combination with drm_intel_gem_bo_pin() and manual fence management, we
  * can do tiled pixmaps this way.
  */
-void
+drm_public void
 drm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable)
 {
 	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
@@ -1744,7 +1869,7 @@ drm_intel_gem_bo_emit_reloc_fence(drm_in
 				read_domains, write_domain, true);
 }
 
-int
+drm_public int
 drm_intel_gem_bo_get_reloc_count(drm_intel_bo *bo)
 {
 	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
@@ -1765,9 +1890,10 @@ drm_intel_gem_bo_get_reloc_count(drm_int
  * Any further drm_intel_bufmgr_check_aperture_space() queries
  * involving this buffer in the tree are undefined after this call.
  */
-void
+drm_public void
 drm_intel_gem_bo_clear_relocs(drm_intel_bo *bo, int start)
 {
+	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
 	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
 	int i;
 	struct timespec time;
@@ -1775,7 +1901,10 @@ drm_intel_gem_bo_clear_relocs(drm_intel_
 	clock_gettime(CLOCK_MONOTONIC, &time);
 
 	assert(bo_gem->reloc_count >= start);
+
 	/* Unreference the cleared target buffers */
+	pthread_mutex_lock(&bufmgr_gem->lock);
+
 	for (i = start; i < bo_gem->reloc_count; i++) {
 		drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) bo_gem->reloc_target_info[i].bo;
 		if (&target_bo_gem->bo != bo) {
@@ -1785,6 +1914,9 @@ drm_intel_gem_bo_clear_relocs(drm_intel_
 		}
 	}
 	bo_gem->reloc_count = start;
+
+	pthread_mutex_unlock(&bufmgr_gem->lock);
+
 }
 
 /**
@@ -2096,7 +2228,7 @@ aub_build_dump_ringbuffer(drm_intel_bufm
 	bufmgr_gem->aub_offset += 4096;
 }
 
-void
+drm_public void
 drm_intel_gem_bo_aub_dump_bmp(drm_intel_bo *bo,
 			      int x1, int y1, int width, int height,
 			      enum aub_dump_bmp_format format,
@@ -2367,7 +2499,7 @@ drm_intel_gem_bo_mrb_exec2(drm_intel_bo 
 			flags);
 }
 
-int
+drm_public int
 drm_intel_gem_bo_context_exec(drm_intel_bo *bo, drm_intel_context *ctx,
 			      int used, unsigned int flags)
 {
@@ -2461,6 +2593,12 @@ drm_intel_gem_bo_set_tiling(drm_intel_bo
 	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
 	int ret;
 
+	/* Tiling with userptr surfaces is not supported
+	 * on all hardware so refuse it for time being.
+	 */
+	if (bo_gem->is_userptr)
+		return -EINVAL;
+
 	/* Linear buffers have no stride. By ensuring that we only ever use
 	 * stride 0 with linear buffers, we simplify our code.
 	 */
@@ -2486,7 +2624,7 @@ drm_intel_gem_bo_get_tiling(drm_intel_bo
 	return 0;
 }
 
-drm_intel_bo *
+drm_public drm_intel_bo *
 drm_intel_bo_gem_create_from_prime(drm_intel_bufmgr *bufmgr, int prime_fd, int size)
 {
 	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
@@ -2503,25 +2641,29 @@ drm_intel_bo_gem_create_from_prime(drm_i
 	 * for named buffers, we must not create two bo's pointing at the same
 	 * kernel object
 	 */
+	pthread_mutex_lock(&bufmgr_gem->lock);
 	for (list = bufmgr_gem->named.next;
 	     list != &bufmgr_gem->named;
 	     list = list->next) {
 		bo_gem = DRMLISTENTRY(drm_intel_bo_gem, list, name_list);
 		if (bo_gem->gem_handle == handle) {
 			drm_intel_gem_bo_reference(&bo_gem->bo);
+			pthread_mutex_unlock(&bufmgr_gem->lock);
 			return &bo_gem->bo;
 		}
 	}
 
 	if (ret) {
 	  fprintf(stderr,"ret is %d %d\n", ret, errno);
+	  pthread_mutex_unlock(&bufmgr_gem->lock);
 		return NULL;
 	}
 
 	bo_gem = calloc(1, sizeof(*bo_gem));
-	if (!bo_gem)
+	if (!bo_gem) {
+		pthread_mutex_unlock(&bufmgr_gem->lock);
 		return NULL;
-
+	}
 	/* Determine size of bo.  The fd-to-handle ioctl really should
 	 * return the size, but it doesn't.  If we have kernel 3.12 or
 	 * later, we can lseek on the prime fd to get the size.  Older
@@ -2549,6 +2691,7 @@ drm_intel_bo_gem_create_from_prime(drm_i
 
 	DRMINITLISTHEAD(&bo_gem->vma_list);
 	DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
+	pthread_mutex_unlock(&bufmgr_gem->lock);
 
 	VG_CLEAR(get_tiling);
 	get_tiling.handle = bo_gem->gem_handle;
@@ -2567,14 +2710,16 @@ drm_intel_bo_gem_create_from_prime(drm_i
 	return &bo_gem->bo;
 }
 
-int
+drm_public int
 drm_intel_bo_gem_export_to_prime(drm_intel_bo *bo, int *prime_fd)
 {
 	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
 	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
 
+	pthread_mutex_lock(&bufmgr_gem->lock);
         if (DRMLISTEMPTY(&bo_gem->name_list))
                 DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
+	pthread_mutex_unlock(&bufmgr_gem->lock);
 
 	if (drmPrimeHandleToFD(bufmgr_gem->fd, bo_gem->gem_handle,
 			       DRM_CLOEXEC, prime_fd) != 0)
@@ -2598,15 +2743,20 @@ drm_intel_gem_bo_flink(drm_intel_bo *bo,
 		VG_CLEAR(flink);
 		flink.handle = bo_gem->gem_handle;
 
+		pthread_mutex_lock(&bufmgr_gem->lock);
+
 		ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_FLINK, &flink);
-		if (ret != 0)
+		if (ret != 0) {
+			pthread_mutex_unlock(&bufmgr_gem->lock);
 			return -errno;
+		}
 
 		bo_gem->global_name = flink.name;
 		bo_gem->reusable = false;
 
                 if (DRMLISTEMPTY(&bo_gem->name_list))
                         DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
+		pthread_mutex_unlock(&bufmgr_gem->lock);
 	}
 
 	*name = bo_gem->global_name;
@@ -2620,7 +2770,7 @@ drm_intel_gem_bo_flink(drm_intel_bo *bo,
  * size is only bounded by how many buffers of that size we've managed to have
  * in flight at once.
  */
-void
+drm_public void
 drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr)
 {
 	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
@@ -2635,7 +2785,7 @@ drm_intel_bufmgr_gem_enable_reuse(drm_in
  * allocation.  If this option is not enabled, all relocs will have fence
  * register allocated.
  */
-void
+drm_public void
 drm_intel_bufmgr_gem_enable_fenced_relocs(drm_intel_bufmgr *bufmgr)
 {
 	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
@@ -2907,7 +3057,7 @@ init_cache_buckets(drm_intel_bufmgr_gem 
 	}
 }
 
-void
+drm_public void
 drm_intel_bufmgr_gem_set_vma_cache_size(drm_intel_bufmgr *bufmgr, int limit)
 {
 	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
@@ -2949,7 +3099,7 @@ get_pci_device_id(drm_intel_bufmgr_gem *
 	return devid;
 }
 
-int
+drm_public int
 drm_intel_bufmgr_gem_get_devid(drm_intel_bufmgr *bufmgr)
 {
 	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
@@ -2963,7 +3113,7 @@ drm_intel_bufmgr_gem_get_devid(drm_intel
  * This function has to be called before drm_intel_bufmgr_gem_set_aub_dump()
  * for it to have any effect.
  */
-void
+drm_public void
 drm_intel_bufmgr_gem_set_aub_filename(drm_intel_bufmgr *bufmgr,
 				      const char *filename)
 {
@@ -2982,7 +3132,7 @@ drm_intel_bufmgr_gem_set_aub_filename(dr
  * You can set up a GTT and upload your objects into the referenced
  * space, then send off batchbuffers and get BMPs out the other end.
  */
-void
+drm_public void
 drm_intel_bufmgr_gem_set_aub_dump(drm_intel_bufmgr *bufmgr, int enable)
 {
 	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
@@ -3038,7 +3188,7 @@ drm_intel_bufmgr_gem_set_aub_dump(drm_in
 	}
 }
 
-drm_intel_context *
+drm_public drm_intel_context *
 drm_intel_gem_context_create(drm_intel_bufmgr *bufmgr)
 {
 	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
@@ -3065,7 +3215,7 @@ drm_intel_gem_context_create(drm_intel_b
 	return context;
 }
 
-void
+drm_public void
 drm_intel_gem_context_destroy(drm_intel_context *ctx)
 {
 	drm_intel_bufmgr_gem *bufmgr_gem;
@@ -3088,7 +3238,7 @@ drm_intel_gem_context_destroy(drm_intel_
 	free(ctx);
 }
 
-int
+drm_public int
 drm_intel_get_reset_stats(drm_intel_context *ctx,
 			  uint32_t *reset_count,
 			  uint32_t *active,
@@ -3122,7 +3272,7 @@ drm_intel_get_reset_stats(drm_intel_cont
 	return ret;
 }
 
-int
+drm_public int
 drm_intel_reg_read(drm_intel_bufmgr *bufmgr,
 		   uint32_t offset,
 		   uint64_t *result)
@@ -3162,7 +3312,7 @@ drm_intel_reg_read(drm_intel_bufmgr *buf
  * default state (no annotations), call this function with a \c count
  * of zero.
  */
-void
+drm_public void
 drm_intel_bufmgr_gem_set_aub_annotations(drm_intel_bo *bo,
 					 drm_intel_aub_annotation *annotations,
 					 unsigned count)
@@ -3182,13 +3332,94 @@ drm_intel_bufmgr_gem_set_aub_annotations
 	bo_gem->aub_annotation_count = count;
 }
 
+static pthread_mutex_t bufmgr_list_mutex = PTHREAD_MUTEX_INITIALIZER;
+static drmMMListHead bufmgr_list = { &bufmgr_list, &bufmgr_list };
+
+static drm_intel_bufmgr_gem *
+drm_intel_bufmgr_gem_find(int fd)
+{
+	drm_intel_bufmgr_gem *bufmgr_gem;
+
+	DRMLISTFOREACHENTRY(bufmgr_gem, &bufmgr_list, managers) {
+		if (bufmgr_gem->fd == fd) {
+			atomic_inc(&bufmgr_gem->refcount);
+			return bufmgr_gem;
+		}
+	}
+
+	return NULL;
+}
+
+static void
+drm_intel_bufmgr_gem_unref(drm_intel_bufmgr *bufmgr)
+{
+	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
+
+	if (atomic_add_unless(&bufmgr_gem->refcount, -1, 1)) {
+		pthread_mutex_lock(&bufmgr_list_mutex);
+
+		if (atomic_dec_and_test(&bufmgr_gem->refcount)) {
+			DRMLISTDEL(&bufmgr_gem->managers);
+			drm_intel_bufmgr_gem_destroy(bufmgr);
+		}
+
+		pthread_mutex_unlock(&bufmgr_list_mutex);
+	}
+}
+
+static bool
+has_userptr(drm_intel_bufmgr_gem *bufmgr_gem)
+{
+	int ret;
+	void *ptr;
+	long pgsz;
+	struct drm_i915_gem_userptr userptr;
+	struct drm_gem_close close_bo;
+
+	pgsz = sysconf(_SC_PAGESIZE);
+	assert(pgsz > 0);
+
+	ret = posix_memalign(&ptr, pgsz, pgsz);
+	if (ret) {
+		DBG("Failed to get a page (%ld) for userptr detection!\n",
+			pgsz);
+		return false;
+	}
+
+	memset(&userptr, 0, sizeof(userptr));
+	userptr.user_ptr = (__u64)(unsigned long)ptr;
+	userptr.user_size = pgsz;
+
+retry:
+	ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_USERPTR, &userptr);
+	if (ret) {
+		if (errno == ENODEV && userptr.flags == 0) {
+			userptr.flags = I915_USERPTR_UNSYNCHRONIZED;
+			goto retry;
+		}
+		free(ptr);
+		return false;
+	}
+
+	close_bo.handle = userptr.handle;
+	ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close_bo);
+	free(ptr);
+	if (ret) {
+		fprintf(stderr, "Failed to release test userptr object! (%d) "
+				"i915 kernel driver may not be sane!\n", errno);
+		return false;
+	}
+
+	return true;
+}
+
 /**
  * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
  * and manage map buffer objections.
  *
  * \param fd File descriptor of the opened DRM device.
  */
-drm_intel_bufmgr *
+drm_public drm_intel_bufmgr *
 drm_intel_bufmgr_gem_init(int fd, int batch_size)
 {
 	drm_intel_bufmgr_gem *bufmgr_gem;
@@ -3197,15 +3428,23 @@ drm_intel_bufmgr_gem_init(int fd, int ba
 	int ret, tmp;
 	bool exec2 = false;
 
+	pthread_mutex_lock(&bufmgr_list_mutex);
+
+	bufmgr_gem = drm_intel_bufmgr_gem_find(fd);
+	if (bufmgr_gem)
+		goto exit;
+
 	bufmgr_gem = calloc(1, sizeof(*bufmgr_gem));
 	if (bufmgr_gem == NULL)
-		return NULL;
+		goto exit;
 
 	bufmgr_gem->fd = fd;
+	atomic_set(&bufmgr_gem->refcount, 1);
 
 	if (pthread_mutex_init(&bufmgr_gem->lock, NULL) != 0) {
 		free(bufmgr_gem);
-		return NULL;
+		bufmgr_gem = NULL;
+		goto exit;
 	}
 
 	ret = drmIoctl(bufmgr_gem->fd,
@@ -3242,7 +3481,8 @@ drm_intel_bufmgr_gem_init(int fd, int ba
 		bufmgr_gem->gen = 8;
 	else {
 		free(bufmgr_gem);
-		return NULL;
+		bufmgr_gem = NULL;
+		goto exit;
 	}
 
 	if (IS_GEN3(bufmgr_gem->pci_device) &&
@@ -3274,6 +3514,10 @@ drm_intel_bufmgr_gem_init(int fd, int ba
 	ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
 	bufmgr_gem->has_relaxed_fencing = ret == 0;
 
+	if (has_userptr(bufmgr_gem))
+		bufmgr_gem->bufmgr.bo_alloc_userptr =
+			drm_intel_gem_bo_alloc_userptr;
+
 	gp.param = I915_PARAM_HAS_WAIT_TIMEOUT;
 	ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
 	bufmgr_gem->has_wait_timeout = ret == 0;
@@ -3353,7 +3597,7 @@ drm_intel_bufmgr_gem_init(int fd, int ba
 		bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec;
 	bufmgr_gem->bufmgr.bo_busy = drm_intel_gem_bo_busy;
 	bufmgr_gem->bufmgr.bo_madvise = drm_intel_gem_bo_madvise;
-	bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_destroy;
+	bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_unref;
 	bufmgr_gem->bufmgr.debug = 0;
 	bufmgr_gem->bufmgr.check_aperture_space =
 	    drm_intel_gem_check_aperture_space;
@@ -3369,5 +3613,10 @@ drm_intel_bufmgr_gem_init(int fd, int ba
 	DRMINITLISTHEAD(&bufmgr_gem->vma_cache);
 	bufmgr_gem->vma_max = -1; /* unlimited by default */
 
-	return &bufmgr_gem->bufmgr;
+	DRMLISTADD(&bufmgr_gem->managers, &bufmgr_list);
+
+exit:
+	pthread_mutex_unlock(&bufmgr_list_mutex);
+
+	return bufmgr_gem != NULL ? &bufmgr_gem->bufmgr : NULL;
 }

Index: xsrc/external/mit/libdrm/dist/radeon/radeon_bo_gem.c
diff -u xsrc/external/mit/libdrm/dist/radeon/radeon_bo_gem.c:1.4 xsrc/external/mit/libdrm/dist/radeon/radeon_bo_gem.c:1.5
--- xsrc/external/mit/libdrm/dist/radeon/radeon_bo_gem.c:1.4	Thu Aug 14 20:56:10 2014
+++ xsrc/external/mit/libdrm/dist/radeon/radeon_bo_gem.c	Sat Dec 13 21:39:07 2014
@@ -36,8 +36,8 @@
 #include <stdint.h>
 #include <stdlib.h>
 #include <string.h>
-#include <sys/mman.h>
 #include <errno.h>
+#include "libdrm.h"
 #include "xf86drm.h"
 #include "xf86atomic.h"
 #include "drm.h"
@@ -134,7 +134,7 @@ static struct radeon_bo *bo_unref(struct
         return (struct radeon_bo *)boi;
     }
     if (bo_gem->priv_ptr) {
-        munmap(bo_gem->priv_ptr, boi->size);
+        drm_munmap(bo_gem->priv_ptr, boi->size);
     }
 
     /* Zero out args to make valgrind happy */
@@ -197,7 +197,7 @@ static int bo_unmap(struct radeon_bo_int
     if (--bo_gem->map_count > 0) {
         return 0;
     }
-    //munmap(bo->ptr, bo->size);
+    //drm_munmap(bo->ptr, bo->size);
     boi->ptr = NULL;
     return 0;
 }
@@ -284,7 +284,7 @@ static struct radeon_bo_funcs bo_gem_fun
     bo_is_busy,
 };
 
-struct radeon_bo_manager *radeon_bo_manager_gem_ctor(int fd)
+drm_public struct radeon_bo_manager *radeon_bo_manager_gem_ctor(int fd)
 {
     struct bo_manager_gem *bomg;
 
@@ -297,7 +297,7 @@ struct radeon_bo_manager *radeon_bo_mana
     return (struct radeon_bo_manager*)bomg;
 }
 
-void radeon_bo_manager_gem_dtor(struct radeon_bo_manager *bom)
+drm_public void radeon_bo_manager_gem_dtor(struct radeon_bo_manager *bom)
 {
     struct bo_manager_gem *bomg = (struct bo_manager_gem*)bom;
 
@@ -307,19 +307,22 @@ void radeon_bo_manager_gem_dtor(struct r
     free(bomg);
 }
 
-uint32_t radeon_gem_name_bo(struct radeon_bo *bo)
+drm_public uint32_t
+radeon_gem_name_bo(struct radeon_bo *bo)
 {
     struct radeon_bo_gem *bo_gem = (struct radeon_bo_gem*)bo;
     return bo_gem->name;
 }
 
-void *radeon_gem_get_reloc_in_cs(struct radeon_bo *bo)
+drm_public void *
+radeon_gem_get_reloc_in_cs(struct radeon_bo *bo)
 {
     struct radeon_bo_gem *bo_gem = (struct radeon_bo_gem*)bo;
     return &bo_gem->reloc_in_cs;
 }
 
-int radeon_gem_get_kernel_name(struct radeon_bo *bo, uint32_t *name)
+drm_public int
+radeon_gem_get_kernel_name(struct radeon_bo *bo, uint32_t *name)
 {
     struct radeon_bo_gem *bo_gem = (struct radeon_bo_gem*)bo;
     struct radeon_bo_int *boi = (struct radeon_bo_int *)bo;
@@ -340,7 +343,8 @@ int radeon_gem_get_kernel_name(struct ra
     return 0;
 }
 
-int radeon_gem_set_domain(struct radeon_bo *bo, uint32_t read_domains, uint32_t write_domain)
+drm_public int
+radeon_gem_set_domain(struct radeon_bo *bo, uint32_t read_domains, uint32_t write_domain)
 {
     struct radeon_bo_int *boi = (struct radeon_bo_int *)bo;
     struct drm_radeon_gem_set_domain args;
@@ -357,7 +361,7 @@ int radeon_gem_set_domain(struct radeon_
     return r;
 }
 
-int radeon_gem_prime_share_bo(struct radeon_bo *bo, int *handle)
+drm_public int radeon_gem_prime_share_bo(struct radeon_bo *bo, int *handle)
 {
     struct radeon_bo_gem *bo_gem = (struct radeon_bo_gem*)bo;
     int ret;
@@ -366,9 +370,8 @@ int radeon_gem_prime_share_bo(struct rad
     return ret;
 }
 
-struct radeon_bo *radeon_gem_bo_open_prime(struct radeon_bo_manager *bom,
-					   int fd_handle,
-					   uint32_t size)
+drm_public struct radeon_bo *
+radeon_gem_bo_open_prime(struct radeon_bo_manager *bom, int fd_handle, uint32_t size)
 {
     struct radeon_bo_gem *bo;
     int r;
Index: xsrc/external/mit/libdrm/dist/radeon/radeon_surface.c
diff -u xsrc/external/mit/libdrm/dist/radeon/radeon_surface.c:1.4 xsrc/external/mit/libdrm/dist/radeon/radeon_surface.c:1.5
--- xsrc/external/mit/libdrm/dist/radeon/radeon_surface.c:1.4	Fri Jul 11 20:12:52 2014
+++ xsrc/external/mit/libdrm/dist/radeon/radeon_surface.c	Sat Dec 13 21:39:07 2014
@@ -26,15 +26,18 @@
  * Authors:
  *      Jérôme Glisse <[email protected]>
  */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
 #include <stdbool.h>
 #include <assert.h>
 #include <errno.h>
 #include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
-#include <sys/mman.h>
 #include <sys/ioctl.h>
 #include "drm.h"
+#include "libdrm.h"
 #include "xf86drm.h"
 #include "radeon_drm.h"
 #include "radeon_surface.h"
@@ -282,7 +285,7 @@ static int r6_surface_init_linear(struct
         surf_minify(surf, surf->level+i, surf->bpe, i, xalign, yalign, zalign, offset);
         /* level0 and first mipmap need to have alignment */
         offset = surf->bo_size;
-        if ((i == 0)) {
+        if (i == 0) {
             offset = ALIGN(offset, surf->bo_alignment);
         }
     }
@@ -310,7 +313,7 @@ static int r6_surface_init_linear_aligne
         surf_minify(surf, surf->level+i, surf->bpe, i, xalign, yalign, zalign, offset);
         /* level0 and first mipmap need to have alignment */
         offset = surf->bo_size;
-        if ((i == 0)) {
+        if (i == 0) {
             offset = ALIGN(offset, surf->bo_alignment);
         }
     }
@@ -343,7 +346,7 @@ static int r6_surface_init_1d(struct rad
         surf_minify(surf, surf->level+i, surf->bpe, i, xalign, yalign, zalign, offset);
         /* level0 and first mipmap need to have alignment */
         offset = surf->bo_size;
-        if ((i == 0)) {
+        if (i == 0) {
             offset = ALIGN(offset, surf->bo_alignment);
         }
     }
@@ -384,7 +387,7 @@ static int r6_surface_init_2d(struct rad
         }
         /* level0 and first mipmap need to have alignment */
         offset = surf->bo_size;
-        if ((i == 0)) {
+        if (i == 0) {
             offset = ALIGN(offset, surf->bo_alignment);
         }
     }
@@ -632,7 +635,7 @@ static int eg_surface_init_1d(struct rad
         surf_minify(surf, level+i, bpe, i, xalign, yalign, zalign, offset);
         /* level0 and first mipmap need to have alignment */
         offset = surf->bo_size;
-        if ((i == 0)) {
+        if (i == 0) {
             offset = ALIGN(offset, surf->bo_alignment);
         }
     }
@@ -685,7 +688,7 @@ static int eg_surface_init_2d(struct rad
         }
         /* level0 and first mipmap need to have alignment */
         offset = surf->bo_size;
-        if ((i == 0)) {
+        if (i == 0) {
             offset = ALIGN(offset, surf->bo_alignment);
         }
     }
@@ -1307,7 +1310,7 @@ static int si_surface_sanity(struct rade
         /* default value */
         surf->mtilea = 1;
         surf->bankw = 1;
-        surf->bankw = 1;
+        surf->bankh = 1;
         surf->tile_split = 64;
         surf->stencil_tile_split = 64;
     }
@@ -1524,7 +1527,7 @@ static int si_surface_init_linear_aligne
         si_surf_minify(surf, surf->level+i, surf->bpe, i, xalign, yalign, zalign, slice_align, offset);
         /* level0 and first mipmap need to have alignment */
         offset = surf->bo_size;
-        if ((i == 0)) {
+        if (i == 0) {
             offset = ALIGN(offset, surf->bo_alignment);
         }
         if (surf->flags & RADEON_SURF_HAS_TILE_MODE_INDEX) {
@@ -1567,7 +1570,7 @@ static int si_surface_init_1d(struct rad
         si_surf_minify(surf, level+i, bpe, i, xalign, yalign, zalign, slice_align, offset);
         /* level0 and first mipmap need to have alignment */
         offset = surf->bo_size;
-        if ((i == 0)) {
+        if (i == 0) {
             offset = ALIGN(offset, alignment);
         }
         if (surf->flags & RADEON_SURF_HAS_TILE_MODE_INDEX) {
@@ -1669,7 +1672,7 @@ static int si_surface_init_2d(struct rad
         }
         /* level0 and first mipmap need to have alignment */
         aligned_offset = offset = surf->bo_size;
-        if ((i == 0)) {
+        if (i == 0) {
             aligned_offset = ALIGN(aligned_offset, surf->bo_alignment);
         }
         if (surf->flags & RADEON_SURF_HAS_TILE_MODE_INDEX) {
@@ -1914,7 +1917,7 @@ static void cik_get_2d_params(struct rad
         sample_split = 1;
         break;
     case CIK__SAMPLE_SPLIT__2:
-        sample_split = 1;
+        sample_split = 2;
         break;
     case CIK__SAMPLE_SPLIT__4:
         sample_split = 4;
@@ -2134,7 +2137,7 @@ static int cik_surface_sanity(struct rad
         /* default value */
         surf->mtilea = 1;
         surf->bankw = 1;
-        surf->bankw = 1;
+        surf->bankh = 1;
         surf->tile_split = 64;
         surf->stencil_tile_split = 64;
     }
@@ -2395,7 +2398,8 @@ static int cik_surface_best(struct radeo
 /* ===========================================================================
  * public API
  */
-struct radeon_surface_manager *radeon_surface_manager_new(int fd)
+drm_public struct radeon_surface_manager *
+radeon_surface_manager_new(int fd)
 {
     struct radeon_surface_manager *surf_man;
 
@@ -2443,7 +2447,8 @@ out_err:
     return NULL;
 }
 
-void radeon_surface_manager_free(struct radeon_surface_manager *surf_man)
+drm_public void
+radeon_surface_manager_free(struct radeon_surface_manager *surf_man)
 {
     free(surf_man);
 }
@@ -2515,8 +2520,9 @@ static int radeon_surface_sanity(struct 
     return 0;
 }
 
-int radeon_surface_init(struct radeon_surface_manager *surf_man,
-                        struct radeon_surface *surf)
+drm_public int
+radeon_surface_init(struct radeon_surface_manager *surf_man,
+                    struct radeon_surface *surf)
 {
     unsigned mode, type;
     int r;
@@ -2531,8 +2537,9 @@ int radeon_surface_init(struct radeon_su
     return surf_man->surface_init(surf_man, surf);
 }
 
-int radeon_surface_best(struct radeon_surface_manager *surf_man,
-                        struct radeon_surface *surf)
+drm_public int
+radeon_surface_best(struct radeon_surface_manager *surf_man,
+                    struct radeon_surface *surf)
 {
     unsigned mode, type;
     int r;

Index: xsrc/external/mit/libdrm/dist/radeon/radeon_cs_gem.c
diff -u xsrc/external/mit/libdrm/dist/radeon/radeon_cs_gem.c:1.3 xsrc/external/mit/libdrm/dist/radeon/radeon_cs_gem.c:1.4
--- xsrc/external/mit/libdrm/dist/radeon/radeon_cs_gem.c:1.3	Mon Mar 17 07:52:19 2014
+++ xsrc/external/mit/libdrm/dist/radeon/radeon_cs_gem.c	Sat Dec 13 21:39:07 2014
@@ -29,12 +29,14 @@
  *      Nicolai Haehnle <[email protected]>
  *      Jérôme Glisse <[email protected]>
  */
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
 #include <assert.h>
 #include <errno.h>
 #include <stdlib.h>
 #include <string.h>
 #include <pthread.h>
-#include <sys/mman.h>
 #include <sys/ioctl.h>
 #include "radeon_cs.h"
 #include "radeon_cs_int.h"
@@ -42,6 +44,7 @@
 #include "radeon_cs_gem.h"
 #include "radeon_bo_gem.h"
 #include "drm.h"
+#include "libdrm.h"
 #include "xf86drm.h"
 #include "xf86atomic.h"
 #include "radeon_drm.h"
@@ -534,7 +537,7 @@ static int radeon_get_device_id(int fd, 
     return r;
 }
 
-struct radeon_cs_manager *radeon_cs_manager_gem_ctor(int fd)
+drm_public struct radeon_cs_manager *radeon_cs_manager_gem_ctor(int fd)
 {
     struct radeon_cs_manager_gem *csm;
 
@@ -548,7 +551,7 @@ struct radeon_cs_manager *radeon_cs_mana
     return &csm->base;
 }
 
-void radeon_cs_manager_gem_dtor(struct radeon_cs_manager *csm)
+drm_public void radeon_cs_manager_gem_dtor(struct radeon_cs_manager *csm)
 {
     free(csm);
 }

Index: xsrc/external/mit/libdrm/dist/tests/modetest/modetest.c
diff -u xsrc/external/mit/libdrm/dist/tests/modetest/modetest.c:1.4 xsrc/external/mit/libdrm/dist/tests/modetest/modetest.c:1.5
--- xsrc/external/mit/libdrm/dist/tests/modetest/modetest.c:1.4	Fri Jul 11 19:57:33 2014
+++ xsrc/external/mit/libdrm/dist/tests/modetest/modetest.c	Sat Dec 13 21:39:07 2014
@@ -37,7 +37,9 @@
  * TODO: use cairo to write the mode info on the selected output once
  *       the mode has been programmed, along with possible test patterns.
  */
+#ifdef HAVE_CONFIG_H
 #include "config.h"
+#endif
 
 #include <assert.h>
 #include <ctype.h>
@@ -1435,7 +1437,7 @@ int main(int argc, char **argv)
 	int drop_master = 0;
 	int test_vsync = 0;
 	int test_cursor = 0;
-	const char *modules[] = { "i915", "radeon", "nouveau", "vmwgfx", "omapdrm", "exynos", "tilcdc", "msm" };
+	const char *modules[] = { "i915", "radeon", "nouveau", "vmwgfx", "omapdrm", "exynos", "tilcdc", "msm", "sti" };
 	char *device = NULL;
 	char *module = NULL;
 	unsigned int i;

Reply via email to