Module Name: src
Committed By: riastradh
Date: Mon Aug 27 07:17:35 UTC 2018
Modified Files:
src/sys/external/bsd/drm2/dist/drm/i915: i915_cmd_parser.c i915_drv.h
i915_gem.c i915_gem_fence.c i915_gem_gtt.c i915_gem_render_state.c
i915_gem_stolen.c i915_guc_submission.c
Log Message:
Rework how we map i915 drm gem objects.
The old way made no sense at all. This way might make sense.
To generate a diff of this commit:
cvs rdiff -u -r1.6 -r1.7 \
src/sys/external/bsd/drm2/dist/drm/i915/i915_cmd_parser.c \
src/sys/external/bsd/drm2/dist/drm/i915/i915_guc_submission.c
cvs rdiff -u -r1.17 -r1.18 src/sys/external/bsd/drm2/dist/drm/i915/i915_drv.h
cvs rdiff -u -r1.40 -r1.41 src/sys/external/bsd/drm2/dist/drm/i915/i915_gem.c
cvs rdiff -u -r1.3 -r1.4 \
src/sys/external/bsd/drm2/dist/drm/i915/i915_gem_fence.c \
src/sys/external/bsd/drm2/dist/drm/i915/i915_gem_render_state.c
cvs rdiff -u -r1.10 -r1.11 \
src/sys/external/bsd/drm2/dist/drm/i915/i915_gem_gtt.c
cvs rdiff -u -r1.8 -r1.9 \
src/sys/external/bsd/drm2/dist/drm/i915/i915_gem_stolen.c
Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.
Modified files:
Index: src/sys/external/bsd/drm2/dist/drm/i915/i915_cmd_parser.c
diff -u src/sys/external/bsd/drm2/dist/drm/i915/i915_cmd_parser.c:1.6 src/sys/external/bsd/drm2/dist/drm/i915/i915_cmd_parser.c:1.7
--- src/sys/external/bsd/drm2/dist/drm/i915/i915_cmd_parser.c:1.6 Mon Aug 27 07:04:02 2018
+++ src/sys/external/bsd/drm2/dist/drm/i915/i915_cmd_parser.c Mon Aug 27 07:17:35 2018
@@ -1,4 +1,4 @@
-/* $NetBSD: i915_cmd_parser.c,v 1.6 2018/08/27 07:04:02 riastradh Exp $ */
+/* $NetBSD: i915_cmd_parser.c,v 1.7 2018/08/27 07:17:35 riastradh Exp $ */
/*
* Copyright © 2013 Intel Corporation
@@ -28,7 +28,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: i915_cmd_parser.c,v 1.6 2018/08/27 07:04:02 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: i915_cmd_parser.c,v 1.7 2018/08/27 07:17:35 riastradh Exp $");
#include "i915_drv.h"
@@ -883,7 +883,11 @@ static u32 *vmap_batch(struct drm_i915_g
i = 0;
#ifdef __NetBSD__
- TAILQ_FOREACH(page, &obj->igo_pageq, pageq.queue) {
+ /*
+ * XXX Why do we work through the page queue instead of just
+ * using uvm_map?
+ */
+ TAILQ_FOREACH(page, &obj->pageq, pageq.queue) {
if (first_page-- > 0)
continue;
if (i == npages)
Index: src/sys/external/bsd/drm2/dist/drm/i915/i915_guc_submission.c
diff -u src/sys/external/bsd/drm2/dist/drm/i915/i915_guc_submission.c:1.6 src/sys/external/bsd/drm2/dist/drm/i915/i915_guc_submission.c:1.7
--- src/sys/external/bsd/drm2/dist/drm/i915/i915_guc_submission.c:1.6 Mon Aug 27 07:16:31 2018
+++ src/sys/external/bsd/drm2/dist/drm/i915/i915_guc_submission.c Mon Aug 27 07:17:35 2018
@@ -1,4 +1,4 @@
-/* $NetBSD: i915_guc_submission.c,v 1.6 2018/08/27 07:16:31 riastradh Exp $ */
+/* $NetBSD: i915_guc_submission.c,v 1.7 2018/08/27 07:17:35 riastradh Exp $ */
/*
* Copyright © 2014 Intel Corporation
@@ -24,7 +24,7 @@
*
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: i915_guc_submission.c,v 1.6 2018/08/27 07:16:31 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: i915_guc_submission.c,v 1.7 2018/08/27 07:17:35 riastradh Exp $");
#include <linux/bitmap.h>
#include <linux/firmware.h>
@@ -381,90 +381,6 @@ static void guc_init_proc_desc(struct in
kunmap_atomic(base);
}
-#ifdef __NetBSD__
-/*
- * bus_dmamem_move_atomic(dmat, seg, nseg, buf, nbytes, rw)
- *
- * Transfer nbytes of data between the bus_dma segments seg[0],
- * seg[1], ..., seg[nseg-1] and buf, in the direction specified by
- * rw: reading from or writing to the DMA segments.
- *
- * Cannot fail.
- */
-static void
-bus_dmamem_move(bus_dma_tag_t dmat, bus_dma_segment_t *seg, int nseg,
- bus_size_t skip, void *buf, size_t nbytes, enum uio_rw rw)
-{
- char *ptr = buf;
- void *kva;
- int i;
-
- /* Find the first segment that we need to copy from. */
- for (i = 0; skip < seg[i].ds_len; skip -= seg[i].ds_len, i++)
- KASSERT(i < nseg);
-
- /* Copy as much as we requested from the segments. */
- do {
- paddr_t pa;
- bus_size_t seglen;
-
- KASSERT(i < nseg);
- KASSERT(skip < seg[i].ds_len);
- pa = seg[i].ds_addr;
- seglen = MIN(seg[i].ds_len, nbytes);
- i++;
-
- while (seglen) {
- struct vm_page *vm_page = PHYS_TO_VM_PAGE(pa);
- struct page *page = container_of(vm_page, struct page,
- p_vmp);
- size_t copy = MIN(PAGE_SIZE - skip, seglen);
- const char *src;
- char *dst;
-
- kva = kmap_atomic(page);
- switch (rw) {
- case UIO_READ:
- src = kva;
- src += skip;
- dst = ptr;
- case UIO_WRITE:
- src = ptr;
- dst = kva;
- dst += skip;
- }
- memcpy(dst, src, copy);
- kunmap_atomic(kva);
-
- pa += PAGE_SIZE;
- seglen -= copy;
- ptr += copy;
- nbytes -= copy;
- skip = 0; /* after the first, we're page-aligned */
- }
- } while (nbytes);
-}
-
-static void
-bus_dmamem_write(bus_dma_tag_t dmat, bus_dma_segment_t *seg, int nseg,
- bus_size_t skip, const void *buf, size_t nbytes)
-{
-
- bus_dmamem_move(dmat, seg, nseg, skip, __UNCONST(buf), nbytes,
- UIO_WRITE);
-}
-
-#if 0
-static void
-bus_dmamem_read(bus_dma_tag_t dmat, bus_dma_segment_t *seg, int nseg,
- bus_size_t skip, void *buf, size_t nbytes)
-{
-
- bus_dmamem_move(dmat, seg, nseg, skip, buf, nbytes, UIO_READ);
-}
-#endif
-#endif
-
/*
* Initialise/clear the context descriptor shared with the GuC firmware.
*
@@ -540,7 +456,7 @@ static void guc_init_ctx_desc(struct int
i915_gem_obj_ggtt_offset(client->client_obj);
#ifdef __NetBSD__
desc.db_trigger_phy = client->doorbell_offset +
- client->client_obj->igo_dmamap->dm_segs[0].ds_addr;
+ client->client_obj->pages->dm_segs[0].ds_addr;
#else
desc.db_trigger_phy = client->doorbell_offset +
sg_dma_address(client->client_obj->pages->sgl);
@@ -562,13 +478,29 @@ static void guc_init_ctx_desc(struct int
/* Pool context is pinned already */
#ifdef __NetBSD__
- bus_dma_tag_t dmat = guc->ctx_pool_obj->base.dev->dmat;
- bus_dma_segment_t *seg = guc->ctx_pool_obj->pages;
- int nseg = guc->ctx_pool_obj->igo_nsegs;
- bus_size_t skip = sizeof(desc) * client->ctx_index;
- size_t nbytes = sizeof(desc);
-
- bus_dmamem_write(dmat, seg, nseg, skip, &desc, nbytes);
+ size_t skip = sizeof(desc) * client->ctx_index;
+ size_t resid = sizeof(desc);
+ struct vm_page *page;
+ const char *src = (void *)&desc;
+ char *dst;
+ size_t n;
+
+ /* XXX Can't use uiomove because it can fail. */
+ TAILQ_FOREACH(page, &guc->ctx_pool_obj->pageq, pageq.queue) {
+ if (skip >= PAGE_SIZE) {
+ skip -= PAGE_SIZE;
+ continue;
+ }
+ dst = kmap_atomic(container_of(page, struct page, p_vmp));
+ n = MIN(PAGE_SIZE - skip, resid);
+ memcpy(dst + skip, src, n);
+ kunmap_atomic(dst);
+ skip = 0;
+ src += n;
+ resid -= n;
+ if (resid == 0)
+ break;
+ }
#else
sg = guc->ctx_pool_obj->pages;
sg_pcopy_from_buffer(sg->sgl, sg->nents, &desc, sizeof(desc),
@@ -581,15 +513,31 @@ static void guc_fini_ctx_desc(struct int
{
#ifdef __NetBSD__
struct guc_context_desc desc;
- bus_dma_tag_t dmat = guc->ctx_pool_obj->base.dev->dmat;
- bus_dma_segment_t *seg = guc->ctx_pool_obj->pages;
- int nseg = guc->ctx_pool_obj->igo_nsegs;
- bus_size_t skip = sizeof(desc) * client->ctx_index;
- size_t nbytes = sizeof(desc);
+ size_t skip = sizeof(desc) * client->ctx_index;
+ size_t resid = sizeof(desc);
+ struct vm_page *page;
+ const char *src = (void *)&desc;
+ char *dst;
+ size_t n;
memset(&desc, 0, sizeof(desc));
- bus_dmamem_write(dmat, seg, nseg, skip, &desc, nbytes);
+ /* XXX Can't use uiomove because it can fail. */
+ TAILQ_FOREACH(page, &guc->ctx_pool_obj->pageq, pageq.queue) {
+ if (skip >= PAGE_SIZE) {
+ skip -= PAGE_SIZE;
+ continue;
+ }
+ dst = kmap_atomic(container_of(page, struct page, p_vmp));
+ n = MIN(PAGE_SIZE - skip, resid);
+ memcpy(dst + skip, src, n);
+ kunmap_atomic(dst);
+ skip = 0;
+ src += n;
+ resid -= n;
+ if (resid == 0)
+ break;
+ }
#else
struct guc_context_desc desc;
struct sg_table *sg;
Index: src/sys/external/bsd/drm2/dist/drm/i915/i915_drv.h
diff -u src/sys/external/bsd/drm2/dist/drm/i915/i915_drv.h:1.17 src/sys/external/bsd/drm2/dist/drm/i915/i915_drv.h:1.18
--- src/sys/external/bsd/drm2/dist/drm/i915/i915_drv.h:1.17 Mon Aug 27 07:15:50 2018
+++ src/sys/external/bsd/drm2/dist/drm/i915/i915_drv.h Mon Aug 27 07:17:35 2018
@@ -1,4 +1,4 @@
-/* $NetBSD: i915_drv.h,v 1.17 2018/08/27 07:15:50 riastradh Exp $ */
+/* $NetBSD: i915_drv.h,v 1.18 2018/08/27 07:17:35 riastradh Exp $ */
/* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
*/
@@ -2177,10 +2177,8 @@ struct drm_i915_gem_object {
unsigned int pin_display;
#ifdef __NetBSD__
- struct pglist igo_pageq;
- bus_dma_segment_t *pages; /* `pages' is an expedient misnomer. */
- int igo_nsegs;
- bus_dmamap_t igo_dmamap;
+ struct pglist pageq;
+ bus_dmamap_t pages; /* expedient misnomer */
#else
struct sg_table *pages;
#endif
Index: src/sys/external/bsd/drm2/dist/drm/i915/i915_gem.c
diff -u src/sys/external/bsd/drm2/dist/drm/i915/i915_gem.c:1.40 src/sys/external/bsd/drm2/dist/drm/i915/i915_gem.c:1.41
--- src/sys/external/bsd/drm2/dist/drm/i915/i915_gem.c:1.40 Mon Aug 27 07:06:38 2018
+++ src/sys/external/bsd/drm2/dist/drm/i915/i915_gem.c Mon Aug 27 07:17:35 2018
@@ -1,4 +1,4 @@
-/* $NetBSD: i915_gem.c,v 1.40 2018/08/27 07:06:38 riastradh Exp $ */
+/* $NetBSD: i915_gem.c,v 1.41 2018/08/27 07:17:35 riastradh Exp $ */
/*
* Copyright © 2008-2015 Intel Corporation
@@ -28,7 +28,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: i915_gem.c,v 1.40 2018/08/27 07:06:38 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: i915_gem.c,v 1.41 2018/08/27 07:17:35 riastradh Exp $");
#ifdef __NetBSD__
#if 0 /* XXX uvmhist option? */
@@ -241,12 +241,7 @@ i915_gem_object_get_pages_phys(struct dr
i915_gem_chipset_flush(obj->base.dev);
#ifdef __NetBSD__
- /*
- * We don't need to store anything here -- the phys handle is
- * enough. But obj->pages can't be null while the pages are
- * mapped.
- */
- obj->pages = (void *)1;
+ obj->pages = obj->phys_handle->dmah_map;
#else
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (st == NULL)
@@ -1100,10 +1095,10 @@ i915_gem_shmem_pwrite(struct drm_device
atop(offset));
#else
struct page *page = sg_page_iter_page(&sg_iter);
-#endif
if (remain <= 0)
break;
+#endif
/* Operation in this page
*
@@ -2647,10 +2642,10 @@ i915_gem_object_invalidate(struct drm_i9
#endif
}
-#ifdef __NetBSD__
static void
i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
{
+#ifdef __NetBSD__
struct drm_device *const dev = obj->base.dev;
struct vm_page *page;
int ret;
@@ -2666,6 +2661,8 @@ i915_gem_object_put_pages_gtt(struct drm
I915_GEM_DOMAIN_CPU;
}
+ i915_gem_gtt_finish_object(obj);
+
if (i915_gem_object_needs_bit17_swizzle(obj))
i915_gem_object_save_bit_17_swizzle(obj);
@@ -2673,22 +2670,16 @@ i915_gem_object_put_pages_gtt(struct drm
obj->dirty = 0;
if (obj->dirty) {
- TAILQ_FOREACH(page, &obj->igo_pageq, pageq.queue) {
+ TAILQ_FOREACH(page, &obj->pageq, pageq.queue) {
page->flags &= ~PG_CLEAN;
/* XXX mark page accessed */
}
}
+ obj->dirty = 0;
- bus_dmamap_destroy(dev->dmat, obj->igo_dmamap);
- bus_dmamem_unwire_uvm_object(dev->dmat, obj->base.gemo_shm_uao, 0,
- obj->base.size, obj->pages, obj->igo_nsegs);
-
- kfree(obj->pages);
-}
+ uvm_obj_unwirepages(obj->base.gemo_shm_uao, 0, obj->base.size);
+ bus_dmamap_destroy(dev->dmat, obj->pages);
#else
-static void
-i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
-{
struct sg_page_iter sg_iter;
int ret;
@@ -2727,8 +2718,8 @@ i915_gem_object_put_pages_gtt(struct drm
sg_free_table(obj->pages);
kfree(obj->pages);
-}
#endif
+}
int
i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
@@ -2756,44 +2747,43 @@ i915_gem_object_put_pages(struct drm_i91
return 0;
}
-#ifdef __NetBSD__
static int
i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
{
+#ifdef __NetBSD__
struct drm_device *const dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct vm_page *page;
- int error;
+ int ret;
- /* XXX Cargo-culted from the Linux code. */
BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
KASSERT(obj->pages == NULL);
- TAILQ_INIT(&obj->igo_pageq);
- obj->pages = kcalloc((obj->base.size / PAGE_SIZE),
- sizeof(obj->pages[0]), GFP_KERNEL);
- if (obj->pages == NULL) {
- error = -ENOMEM;
+ TAILQ_INIT(&obj->pageq);
+
+ /* XXX errno NetBSD->Linux */
+ ret = -bus_dmamap_create(dev->dmat, obj->base.size,
+ obj->base.size/PAGE_SIZE, PAGE_SIZE, 0, BUS_DMA_NOWAIT,
+ &obj->pages);
+ if (ret)
goto fail0;
- }
/* XXX errno NetBSD->Linux */
- error = -bus_dmamem_wire_uvm_object(dev->dmat, obj->base.gemo_shm_uao,
- 0, obj->base.size, &obj->igo_pageq, PAGE_SIZE, 0, obj->pages,
- (obj->base.size / PAGE_SIZE), &obj->igo_nsegs, BUS_DMA_NOWAIT);
- if (error)
- /* XXX Try i915_gem_purge, i915_gem_shrink_all. */
+ ret = -uvm_obj_wirepages(obj->base.gemo_shm_uao, 0, obj->base.size,
+ &obj->pageq);
+ if (ret) /* XXX Try purge, shrink. */
goto fail1;
- KASSERT(0 < obj->igo_nsegs);
- KASSERT(obj->igo_nsegs <= (obj->base.size / PAGE_SIZE));
/*
* Check that the paddrs will fit in 40 bits, or 32 bits on i965.
*
- * XXX This is wrong; we ought to pass this constraint to
- * bus_dmamem_wire_uvm_object instead.
+ * XXX This should be unnecessary: the uao should guarantee
+ * this constraint after uao_set_pgfl.
+ *
+ * XXX This should also be expanded for newer devices.
*/
- TAILQ_FOREACH(page, &obj->igo_pageq, pageq.queue) {
+ TAILQ_FOREACH(page, &obj->pageq, pageq.queue) {
const uint64_t mask =
(IS_BROADWATER(dev) || IS_CRESTLINE(dev)?
0xffffffffULL : 0xffffffffffULL);
@@ -2802,37 +2792,33 @@ i915_gem_object_get_pages_gtt(struct drm
": %"PRIxMAX"\n",
popcount64(mask),
(uintmax_t)VM_PAGE_TO_PHYS(page));
- error = -EIO;
+ ret = -EIO;
goto fail2;
}
}
- /* XXX Should create the DMA map when creating the object. */
-
- /* XXX errno NetBSD->Linux */
- error = -bus_dmamap_create(dev->dmat, obj->base.size, obj->igo_nsegs,
- PAGE_SIZE, 0, BUS_DMA_NOWAIT, &obj->igo_dmamap);
- if (error)
+ ret = i915_gem_gtt_prepare_object(obj);
+ if (ret)
goto fail2;
- /* XXX Cargo-culted from the Linux code. */
if (i915_gem_object_needs_bit17_swizzle(obj))
i915_gem_object_do_bit_17_swizzle(obj);
+ if (obj->tiling_mode != I915_TILING_NONE &&
+ dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
+ i915_gem_object_pin_pages(obj);
+
/* Success! */
return 0;
-fail2: bus_dmamem_unwire_uvm_object(dev->dmat, obj->base.gemo_shm_uao, 0,
- obj->base.size, obj->pages, (obj->base.size / PAGE_SIZE));
-fail1: kfree(obj->pages);
+fail3: __unused
+ i915_gem_gtt_finish_object(obj);
+fail2: uvm_obj_unwirepages(obj->base.gemo_shm_uao, 0, obj->base.size);
+fail1: bus_dmamap_destroy(dev->dmat, obj->pages);
obj->pages = NULL;
-fail0: KASSERT(error);
- return error;
-}
+fail0: KASSERT(ret);
+ return ret;
#else
-static int
-i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
-{
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
int page_count, i;
struct address_space *mapping;
@@ -2952,8 +2938,8 @@ err_pages:
ret = -ENOMEM;
return ret;
-}
#endif
+}
/* Ensure that the associated pages are gathered from the backing storage
* and pinned into our object. i915_gem_object_get_pages() may be called
@@ -4193,7 +4179,7 @@ i915_gem_clflush_object(struct drm_i915_
trace_i915_gem_object_clflush(obj);
#ifdef __NetBSD__
- drm_clflush_pglist(&obj->igo_pageq);
+ drm_clflush_pglist(&obj->pageq);
#else
drm_clflush_sg(obj->pages);
#endif
@@ -5886,11 +5872,18 @@ struct drm_i915_gem_object *
i915_gem_object_create_from_data(struct drm_device *dev,
const void *data, size_t size)
{
+ struct drm_i915_gem_object *obj;
#ifdef __NetBSD__
- panic("XXX");
+ struct iovec iov = { .iov_base = __UNCONST(data), .iov_len = size };
+ struct uio uio = {
+ .uio_iov = &iov,
+ .uio_iovcnt = 1,
+ .uio_resid = size,
+ .uio_rw = UIO_WRITE,
+ };
#else
- struct drm_i915_gem_object *obj;
struct sg_table *sg;
+#endif
size_t bytes;
int ret;
@@ -5907,8 +5900,16 @@ i915_gem_object_create_from_data(struct
goto fail;
i915_gem_object_pin_pages(obj);
+#ifdef __NetBSD__
+ /* XXX errno NetBSD->Linux */
+ ret = -ubc_uiomove(obj->base.gemo_shm_uao, &uio, size, UVM_ADV_NORMAL,
+ UBC_WRITE);
+ if (ret)
+ goto fail;
+#else
sg = obj->pages;
bytes = sg_copy_from_buffer(sg->sgl, sg->nents, (void *)data, size);
+#endif
i915_gem_object_unpin_pages(obj);
if (WARN_ON(bytes != size)) {
@@ -5922,5 +5923,4 @@ i915_gem_object_create_from_data(struct
fail:
drm_gem_object_unreference(&obj->base);
return ERR_PTR(ret);
-#endif
}
Index: src/sys/external/bsd/drm2/dist/drm/i915/i915_gem_fence.c
diff -u src/sys/external/bsd/drm2/dist/drm/i915/i915_gem_fence.c:1.3 src/sys/external/bsd/drm2/dist/drm/i915/i915_gem_fence.c:1.4
--- src/sys/external/bsd/drm2/dist/drm/i915/i915_gem_fence.c:1.3 Mon Aug 27 07:07:33 2018
+++ src/sys/external/bsd/drm2/dist/drm/i915/i915_gem_fence.c Mon Aug 27 07:17:35 2018
@@ -1,4 +1,4 @@
-/* $NetBSD: i915_gem_fence.c,v 1.3 2018/08/27 07:07:33 riastradh Exp $ */
+/* $NetBSD: i915_gem_fence.c,v 1.4 2018/08/27 07:17:35 riastradh Exp $ */
/*
* Copyright © 2008-2015 Intel Corporation
@@ -24,7 +24,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: i915_gem_fence.c,v 1.3 2018/08/27 07:07:33 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: i915_gem_fence.c,v 1.4 2018/08/27 07:17:35 riastradh Exp $");
#include <drm/drmP.h>
#include <drm/i915_drm.h>
@@ -762,7 +762,7 @@ i915_gem_object_do_bit_17_swizzle(struct
#ifdef __NetBSD__
i = 0;
- TAILQ_FOREACH(page, &obj->igo_pageq, pageq.queue) {
+ TAILQ_FOREACH(page, &obj->pageq, pageq.queue) {
unsigned char new_bit_17 = VM_PAGE_TO_PHYS(page) >> 17;
if ((new_bit_17 & 0x1) !=
(test_bit(i, obj->bit_17) != 0)) {
@@ -818,7 +818,7 @@ i915_gem_object_save_bit_17_swizzle(stru
i = 0;
#ifdef __NetBSD__
- TAILQ_FOREACH(page, &obj->igo_pageq, pageq.queue) {
+ TAILQ_FOREACH(page, &obj->pageq, pageq.queue) {
if (ISSET(VM_PAGE_TO_PHYS(page), __BIT(17)))
__set_bit(i, obj->bit_17);
else
Index: src/sys/external/bsd/drm2/dist/drm/i915/i915_gem_render_state.c
diff -u src/sys/external/bsd/drm2/dist/drm/i915/i915_gem_render_state.c:1.3 src/sys/external/bsd/drm2/dist/drm/i915/i915_gem_render_state.c:1.4
--- src/sys/external/bsd/drm2/dist/drm/i915/i915_gem_render_state.c:1.3 Mon Aug 27 07:16:40 2018
+++ src/sys/external/bsd/drm2/dist/drm/i915/i915_gem_render_state.c Mon Aug 27 07:17:35 2018
@@ -1,4 +1,4 @@
-/* $NetBSD: i915_gem_render_state.c,v 1.3 2018/08/27 07:16:40 riastradh Exp $ */
+/* $NetBSD: i915_gem_render_state.c,v 1.4 2018/08/27 07:17:35 riastradh Exp $ */
/*
* Copyright © 2014 Intel Corporation
@@ -28,7 +28,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: i915_gem_render_state.c,v 1.3 2018/08/27 07:16:40 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: i915_gem_render_state.c,v 1.4 2018/08/27 07:17:35 riastradh Exp $");
#include "i915_drv.h"
#include "intel_renderstate.h"
@@ -101,7 +101,7 @@ static int render_state_setup(struct ren
const struct intel_renderstate_rodata *rodata = so->rodata;
unsigned int i = 0, reloc_index = 0;
#ifdef __NetBSD__
- void *kva;
+ vaddr_t kva;
#else
struct page *page;
#endif
@@ -114,11 +114,12 @@ static int render_state_setup(struct ren
#ifdef __NetBSD__
/* XXX errno NetBSD->Linux */
- ret = -bus_dmamem_map(so->obj->base.dev->dmat, so->obj->pages,
- so->obj->igo_nsegs, PAGE_SIZE, &kva, BUS_DMA_WAITOK);
+ ret = -uvm_map(kernel_map, &kva, PAGE_SIZE, so->obj->base.gemo_shm_uao,
+ 0, sizeof(*d), UVM_MAPFLAG(UVM_PROT_W, UVM_PROT_W, UVM_INH_NONE,
+ UVM_ADV_NORMAL, 0));
if (ret)
return ret;
- d = kva;
+ d = (void *)kva;
#else
page = sg_page(so->obj->pages->sgl);
d = kmap(page);
@@ -166,7 +167,7 @@ static int render_state_setup(struct ren
#endif
#ifdef __NetBSD__
- bus_dmamem_unmap(so->obj->base.dev->dmat, kva, PAGE_SIZE);
+ uvm_unmap(kernel_map, kva, kva + PAGE_SIZE);
#else
kunmap(page);
#endif
@@ -184,7 +185,7 @@ static int render_state_setup(struct ren
err_out:
#ifdef __NetBSD__
- bus_dmamem_unmap(so->obj->base.dev->dmat, kva, PAGE_SIZE);
+ uvm_unmap(kernel_map, kva, kva + PAGE_SIZE);
#else
kunmap(page);
#endif
Index: src/sys/external/bsd/drm2/dist/drm/i915/i915_gem_gtt.c
diff -u src/sys/external/bsd/drm2/dist/drm/i915/i915_gem_gtt.c:1.10 src/sys/external/bsd/drm2/dist/drm/i915/i915_gem_gtt.c:1.11
--- src/sys/external/bsd/drm2/dist/drm/i915/i915_gem_gtt.c:1.10 Mon Aug 27 07:08:07 2018
+++ src/sys/external/bsd/drm2/dist/drm/i915/i915_gem_gtt.c Mon Aug 27 07:17:35 2018
@@ -1,4 +1,4 @@
-/* $NetBSD: i915_gem_gtt.c,v 1.10 2018/08/27 07:08:07 riastradh Exp $ */
+/* $NetBSD: i915_gem_gtt.c,v 1.11 2018/08/27 07:17:35 riastradh Exp $ */
/*
* Copyright © 2010 Daniel Vetter
@@ -26,8 +26,9 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: i915_gem_gtt.c,v 1.10 2018/08/27 07:08:07 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: i915_gem_gtt.c,v 1.11 2018/08/27 07:17:35 riastradh Exp $");
+#include <linux/bitmap.h>
#include <linux/err.h>
#include <linux/seq_file.h>
#include <drm/drmP.h>
@@ -38,6 +39,7 @@ __KERNEL_RCSID(0, "$NetBSD: i915_gem_gtt
#include "intel_drv.h"
#ifdef __NetBSD__
+#include <drm/bus_dma_hacks.h>
#include <x86/machdep.h>
#include <x86/pte.h>
#define _PAGE_PRESENT PG_V /* 0x01 PTE is present / valid */
@@ -835,6 +837,45 @@ static void gen8_ppgtt_clear_range(struc
}
}
+#ifdef __NetBSD__
+static void
+gen8_ppgtt_insert_pte_entries(struct i915_address_space *vm,
+ struct i915_page_directory_pointer *pdp, bus_dmamap_t dmamap,
+ unsigned *segp, uint64_t start, enum i915_cache_level cache_level)
+{
+ struct i915_hw_ppgtt *ppgtt =
+ container_of(vm, struct i915_hw_ppgtt, base);
+ gen8_pte_t *pt_vaddr;
+ unsigned pdpe = gen8_pdpe_index(start);
+ unsigned pde = gen8_pde_index(start);
+ unsigned pte = gen8_pte_index(start);
+
+ pt_vaddr = NULL;
+ for (; *segp < dmamap->dm_nsegs; (*segp)++) {
+ KASSERT(dmamap->dm_segs[*segp].ds_len == PAGE_SIZE);
+ if (pt_vaddr == NULL) {
+ struct i915_page_directory *pd =
+ pdp->page_directory[pdpe];
+ struct i915_page_table *pt = pd->page_table[pde];
+ pt_vaddr = kmap_px(pt);
+ }
+ pt_vaddr[pte] = gen8_pte_encode(dmamap->dm_segs[*segp].ds_addr,
+ cache_level, true);
+ if (++pte == GEN8_PTES) {
+ kunmap_px(ppgtt, pt_vaddr);
+ pt_vaddr = NULL;
+ if (++pde == I915_PDES) {
+ if (++pdpe == I915_PDPES_PER_PDP(vm->dev))
+ break;
+ pde = 0;
+ }
+ pte = 0;
+ }
+ }
+ if (pt_vaddr)
+ kunmap_px(ppgtt, pt_vaddr);
+}
+#else
static void
gen8_ppgtt_insert_pte_entries(struct i915_address_space *vm,
struct i915_page_directory_pointer *pdp,
@@ -876,7 +917,33 @@ gen8_ppgtt_insert_pte_entries(struct i91
if (pt_vaddr)
kunmap_px(ppgtt, pt_vaddr);
}
+#endif
+
+#ifdef __NetBSD__
+static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
+ bus_dmamap_t dmamap, uint64_t start, enum i915_cache_level cache_level,
+ u32 unused)
+{
+ struct i915_hw_ppgtt *ppgtt =
+ container_of(vm, struct i915_hw_ppgtt, base);
+ unsigned seg = 0;
+ if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
+ gen8_ppgtt_insert_pte_entries(vm, &ppgtt->pdp, dmamap, &seg,
+ start, cache_level);
+ } else {
+ struct i915_page_directory_pointer *pdp;
+ uint64_t templ4, pml4e;
+ uint64_t length = dmamap->dm_mapsize;
+
+ gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, templ4,
+ pml4e) {
+ gen8_ppgtt_insert_pte_entries(vm, pdp, dmamap, &seg,
+ start, cache_level);
+ }
+ }
+}
+#else
static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
struct sg_table *pages,
uint64_t start,
@@ -903,6 +970,7 @@ static void gen8_ppgtt_insert_entries(st
}
}
}
+#endif
static void gen8_free_page_tables(struct drm_device *dev,
struct i915_page_directory *pd)
@@ -1444,6 +1512,7 @@ static int gen8_alloc_va_range(struct i9
return gen8_alloc_va_range_3lvl(vm, &ppgtt->pdp, start, length);
}
+#ifndef __NetBSD__ /* XXX debugfs */
static void gen8_dump_pdp(struct i915_page_directory_pointer *pdp,
uint64_t start, uint64_t length,
gen8_pte_t scratch_pte,
@@ -1526,6 +1595,7 @@ static void gen8_dump_ppgtt(struct i915_
}
}
}
+#endif
static int gen8_preallocate_top_level_pdps(struct i915_hw_ppgtt *ppgtt)
{
@@ -1576,7 +1646,9 @@ static int gen8_ppgtt_init(struct i915_h
ppgtt->base.clear_range = gen8_ppgtt_clear_range;
ppgtt->base.unbind_vma = ppgtt_unbind_vma;
ppgtt->base.bind_vma = ppgtt_bind_vma;
+#ifndef __NetBSD__ /* XXX debugfs */
ppgtt->debug_dump = gen8_dump_ppgtt;
+#endif
if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
ret = setup_px(ppgtt->base.dev, &ppgtt->pml4);
@@ -1615,7 +1687,7 @@ free_scratch:
return ret;
}
-#ifndef __NetBSD__
+#ifndef __NetBSD__ /* XXX debugfs */
static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
{
struct i915_address_space *vm = &ppgtt->base;
@@ -1905,16 +1977,15 @@ static void gen6_ppgtt_clear_range(struc
#ifdef __NetBSD__
static void
gen6_ppgtt_insert_entries(struct i915_address_space *vm, bus_dmamap_t dmamap,
- uint64_t start, enum i915_cache_level cache_level)
+ uint64_t start, enum i915_cache_level cache_level, uint32_t flags)
{
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
- gen6_gtt_pte_t *pt_vaddr;
+ gen6_pte_t *pt_vaddr;
unsigned first_entry = start >> PAGE_SHIFT;
unsigned act_pt = first_entry / GEN6_PTES;
unsigned act_pte = first_entry % GEN6_PTES;
unsigned seg;
- int ret;
pt_vaddr = NULL;
KASSERT(0 < dmamap->dm_nsegs);
@@ -1922,11 +1993,9 @@ gen6_ppgtt_insert_entries(struct i915_ad
KASSERT(dmamap->dm_segs[seg].ds_len == PAGE_SIZE);
if (pt_vaddr == NULL)
pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]);
-
pt_vaddr[act_pte] =
- vm->pte_encode(dmamap->dm_segs[seg].ds_addr,
- cache_level, true, flags);
-
+ vm->pte_encode(dmamap->dm_segs[seg].ds_addr, cache_level,
+ true, flags);
if (++act_pte == GEN6_PTES) {
kunmap_px(ppgtt, pt_vaddr);
pt_vaddr = NULL;
@@ -2169,7 +2238,7 @@ static int gen6_ppgtt_alloc(struct i915_
static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt,
uint64_t start, uint64_t length)
{
- struct i915_page_table *unused;
+ struct i915_page_table *unused __unused;
uint32_t pde, temp;
gen6_for_each_pde(unused, &ppgtt->pd, start, length, temp, pde)
@@ -2214,14 +2283,16 @@ static int gen6_ppgtt_init(struct i915_h
ppgtt->pd.base.ggtt_offset =
ppgtt->node.start / PAGE_SIZE * sizeof(gen6_pte_t);
+#ifndef __NetBSD__
ppgtt->pd_addr = (gen6_pte_t __iomem *)dev_priv->gtt.gsm +
ppgtt->pd.base.ggtt_offset / sizeof(gen6_pte_t);
+#endif
gen6_scratch_va_range(ppgtt, 0, ppgtt->base.total);
gen6_write_page_range(dev_priv, &ppgtt->pd, 0, ppgtt->base.total);
- DRM_DEBUG_DRIVER("Allocated pde space (%lldM) at GTT entry: %llx\n",
+ DRM_DEBUG_DRIVER("Allocated pde space (%"PRId64"M) at GTT entry: %"PRIx64"\n",
ppgtt->node.size >> 20,
ppgtt->node.start / PAGE_SIZE);
@@ -2445,8 +2516,8 @@ int i915_gem_gtt_prepare_object(struct d
#ifdef __NetBSD__
KASSERT(0 < obj->base.size);
/* XXX errno NetBSD->Linux */
- return -bus_dmamap_load_raw(obj->base.dev->dmat, obj->igo_dmamap,
- obj->pages, obj->igo_nsegs, obj->base.size, BUS_DMA_NOWAIT);
+ return -bus_dmamap_load_pglist(obj->base.dev->dmat, obj->pages,
+ &obj->pageq, obj->base.size, BUS_DMA_NOWAIT);
#else
if (!dma_map_sg(&obj->base.dev->pdev->dev,
obj->pages->sgl, obj->pages->nents,
@@ -2458,11 +2529,11 @@ int i915_gem_gtt_prepare_object(struct d
}
#ifdef __NetBSD__
-static uint64_t
+static gen8_pte_t
gen8_get_pte(bus_space_tag_t bst, bus_space_handle_t bsh, unsigned i)
{
CTASSERT(_BYTE_ORDER == _LITTLE_ENDIAN); /* x86 */
- CTASSERT(sizeof(gen8_gtt_pte_t) == 8);
+ CTASSERT(sizeof(gen8_pte_t) == 8);
#ifdef _LP64 /* XXX How to detect bus_space_read_8? */
return bus_space_read_8(bst, bsh, 8*i);
#else
@@ -2477,10 +2548,10 @@ gen8_get_pte(bus_space_tag_t bst, bus_sp
static inline void
gen8_set_pte(bus_space_tag_t bst, bus_space_handle_t bsh, unsigned i,
- gen8_gtt_pte_t pte)
+ gen8_pte_t pte)
{
CTASSERT(_BYTE_ORDER == _LITTLE_ENDIAN); /* x86 */
- CTASSERT(sizeof(gen8_gtt_pte_t) == 8);
+ CTASSERT(sizeof(gen8_pte_t) == 8);
#ifdef _LP64 /* XXX How to detect bus_space_write_8? */
bus_space_write_8(bst, bsh, 8*i, pte);
#else
@@ -2503,7 +2574,7 @@ static void gen8_set_pte(void __iomem *a
#ifdef __NetBSD__
static void
gen8_ggtt_insert_entries(struct i915_address_space *vm, bus_dmamap_t dmamap,
- uint64_t start, enum i915_cache_level level)
+ uint64_t start, enum i915_cache_level level, uint32_t unused_flags)
{
struct drm_i915_private *dev_priv = vm->dev->dev_private;
unsigned first_entry = start >> PAGE_SHIFT;
@@ -2577,7 +2648,7 @@ static void gen8_ggtt_insert_entries(str
#ifdef __NetBSD__
static void
gen6_ggtt_insert_entries(struct i915_address_space *vm, bus_dmamap_t dmamap,
- uint64_t start, enum i915_cache_level level)
+ uint64_t start, enum i915_cache_level level, uint32_t flags)
{
struct drm_i915_private *dev_priv = vm->dev->dev_private;
unsigned first_entry = start >> PAGE_SHIFT;
@@ -2588,15 +2659,16 @@ gen6_ggtt_insert_entries(struct i915_add
KASSERT(0 < dmamap->dm_nsegs);
for (i = 0; i < dmamap->dm_nsegs; i++) {
KASSERT(dmamap->dm_segs[i].ds_len == PAGE_SIZE);
- CTASSERT(sizeof(gen6_gtt_pte_t) == 4);
+ CTASSERT(sizeof(gen6_pte_t) == 4);
bus_space_write_4(bst, bsh, 4*(first_entry + i),
- vm->pte_encode(dmamap->dm_segs[i].ds_addr, level, true));
+ vm->pte_encode(dmamap->dm_segs[i].ds_addr, level, true,
+ flags));
}
if (0 < i) {
/* Posting read. */
WARN_ON(bus_space_read_4(bst, bsh, 4*(first_entry + i - 1))
!= vm->pte_encode(dmamap->dm_segs[i - 1].ds_addr, level,
- true));
+ true, flags));
}
I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
POSTING_READ(GFX_FLSH_CNTL_GEN6);
@@ -2669,7 +2741,6 @@ static void gen8_ggtt_clear_range(struct
I915_CACHE_LLC,
use_scratch);
#ifdef __NetBSD__
- CTASSERT(sizeof(gen8_gtt_pte_t) == 8);
for (i = 0; i < num_entries; i++)
gen8_set_pte(bst, bsh, first_entry + i, scratch_pte);
(void)gen8_get_pte(bst, bsh, first_entry);
@@ -2708,7 +2779,7 @@ static void gen6_ggtt_clear_range(struct
I915_CACHE_LLC, use_scratch, 0);
#ifdef __NetBSD__
- CTASSERT(sizeof(gen6_gtt_pte_t) == 4);
+ CTASSERT(sizeof(gen6_pte_t) == 4);
for (i = 0; i < num_entries; i++)
bus_space_write_4(bst, bsh, 4*(first_entry + i), scratch_pte);
(void)bus_space_read_4(bst, bsh, 4*first_entry);
@@ -2850,7 +2921,7 @@ void i915_gem_gtt_finish_object(struct d
interruptible = do_idling(dev_priv);
#ifdef __NetBSD__
- bus_dmamap_unload(dev->dmat, obj->igo_dmamap);
+ bus_dmamap_unload(dev->dmat, obj->pages);
#else
dma_unmap_sg(&dev->pdev->dev, obj->pages->sgl, obj->pages->nents,
PCI_DMA_BIDIRECTIONAL);
@@ -2920,7 +2991,7 @@ static int i915_gem_setup_global_gtt(str
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm);
- DRM_DEBUG_KMS("reserving preallocated space: %llx + %zx\n",
+ DRM_DEBUG_KMS("reserving preallocated space: %"PRIx64" + %zx\n",
i915_gem_obj_ggtt_offset(obj), obj->base.size);
WARN_ON(i915_gem_obj_ggtt_bound(obj));
@@ -3105,10 +3176,11 @@ static int ggtt_probe_common(struct drm_
(pci_resource_len(dev->pdev, 0) / 2);
#ifdef __NetBSD__
+ int ret;
dev_priv->gtt.bst = dev->pdev->pd_pa.pa_memt;
/* XXX errno NetBSD->Linux */
ret = -bus_space_map(dev_priv->gtt.bst, gtt_phys_addr, gtt_size,
- IS_PROXTON(dev) ? 0 : BUS_SPACE_MAP_PREFETCHABLE,
+ IS_BROXTON(dev) ? 0 : BUS_SPACE_MAP_PREFETCHABLE,
&dev_priv->gtt.bsh);
if (ret) {
DRM_ERROR("Failed to map the graphics translation table: %d\n",
@@ -3296,7 +3368,7 @@ static int gen6_gmch_probe(struct drm_de
* a coarse sanity check.
*/
if ((*mappable_end < (64<<20) || (*mappable_end > (512<<20)))) {
- DRM_ERROR("Unknown GMADR size (%llx)\n",
+ DRM_ERROR("Unknown GMADR size (%"PRIx64")\n",
dev_priv->gtt.mappable_end);
return -ENXIO;
}
@@ -3425,9 +3497,9 @@ int i915_gem_gtt_init(struct drm_device
#endif
/* GMADR is the PCI mmio aperture into the global GTT. */
- DRM_INFO("Memory usable by graphics device = %lluM\n",
+ DRM_INFO("Memory usable by graphics device = %"PRIu64"M\n",
gtt->base.total >> 20);
- DRM_DEBUG_DRIVER("GMADR size = %lldM\n", gtt->mappable_end >> 20);
+ DRM_DEBUG_DRIVER("GMADR size = %"PRId64"M\n", gtt->mappable_end >> 20);
DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20);
#ifdef CONFIG_INTEL_IOMMU
if (intel_iommu_gfx_mapped)
Index: src/sys/external/bsd/drm2/dist/drm/i915/i915_gem_stolen.c
diff -u src/sys/external/bsd/drm2/dist/drm/i915/i915_gem_stolen.c:1.8 src/sys/external/bsd/drm2/dist/drm/i915/i915_gem_stolen.c:1.9
--- src/sys/external/bsd/drm2/dist/drm/i915/i915_gem_stolen.c:1.8 Mon Aug 27 07:16:00 2018
+++ src/sys/external/bsd/drm2/dist/drm/i915/i915_gem_stolen.c Mon Aug 27 07:17:35 2018
@@ -1,4 +1,4 @@
-/* $NetBSD: i915_gem_stolen.c,v 1.8 2018/08/27 07:16:00 riastradh Exp $ */
+/* $NetBSD: i915_gem_stolen.c,v 1.9 2018/08/27 07:17:35 riastradh Exp $ */
/*
* Copyright © 2008-2012 Intel Corporation
@@ -29,7 +29,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: i915_gem_stolen.c,v 1.8 2018/08/27 07:16:00 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: i915_gem_stolen.c,v 1.9 2018/08/27 07:17:35 riastradh Exp $");
#include <linux/printk.h>
#include <linux/err.h>
@@ -518,14 +518,60 @@ int i915_gem_init_stolen(struct drm_devi
return 0;
}
-#ifndef __NetBSD__
+#ifdef __NetBSD__
+static bus_dmamap_t
+i915_pages_create_for_stolen(struct drm_device *dev, u32 offset, u32 size)
+{
+ struct drm_i915_private *const dev_priv = dev->dev_private;
+ bus_dmamap_t dmamap = NULL;
+ bus_dma_segment_t *seg;
+ int nseg, i;
+ int ret;
+
+ KASSERT((size % PAGE_SIZE) == 0);
+ nseg = size / PAGE_SIZE;
+ seg = kmem_alloc(nseg * sizeof(seg[0]), KM_SLEEP);
+
+ /*
+ * x86 bus_dmamap_load_raw fails to respect the maxsegsz we
+ * pass to bus_dmamap_create, so we have to create page-sized
+ * segments to begin with.
+ */
+ for (i = 0; i < nseg; i++) {
+ seg[i].ds_addr = (bus_addr_t)dev_priv->mm.stolen_base +
+ offset + i*PAGE_SIZE;
+ seg[i].ds_len = PAGE_SIZE;
+ }
+
+ /* XXX errno NetBSD->Linux */
+ ret = -bus_dmamap_create(dev->dmat, size, nseg, PAGE_SIZE,
+ 0, BUS_DMA_WAITOK, &dmamap);
+ if (ret) {
+ DRM_ERROR("failed to create DMA map for stolen object: %d\n",
+ ret);
+fail0: dmamap = NULL; /* paranoia */
+ goto out;
+ }
+
+ /* XXX errno NetBSD->Liux */
+ ret = -bus_dmamap_load_raw(dev->dmat, dmamap, seg, nseg, size,
+ BUS_DMA_WAITOK);
+ if (ret) {
+ DRM_ERROR("failed to load DMA map for stolen object: %d\n",
+ ret);
+fail1: __unused
+ bus_dmamap_destroy(dev->dmat, dmamap);
+ goto fail0;
+ }
+
+out: kmem_free(seg, nseg*sizeof(seg[0]));
+ return dmamap;
+}
+#else
static struct sg_table *
i915_pages_create_for_stolen(struct drm_device *dev,
u32 offset, u32 size)
{
-#ifdef __NetBSD__
- panic("XXX");
-#else
struct drm_i915_private *dev_priv = dev->dev_private;
struct sg_table *st;
struct scatterlist *sg;
@@ -555,7 +601,6 @@ i915_pages_create_for_stolen(struct drm_
sg_dma_len(sg) = size;
return st;
-#endif
}
#endif
@@ -569,9 +614,8 @@ static void i915_gem_object_put_pages_st
{
/* Should only be called during free */
#ifdef __NetBSD__
- bus_dmamap_unload(obj->base.dev->dmat, obj->igo_dmamap);
- bus_dmamap_destroy(obj->base.dev->dmat, obj->igo_dmamap);
- kmem_free(obj->pages, (obj->igo_nsegs * sizeof(obj->pages[0])));
+ bus_dmamap_unload(obj->base.dev->dmat, obj->pages);
+ bus_dmamap_destroy(obj->base.dev->dmat, obj->pages);
#else
sg_free_table(obj->pages);
kfree(obj->pages);
@@ -601,11 +645,6 @@ _i915_gem_object_create_stolen(struct dr
struct drm_mm_node *stolen)
{
struct drm_i915_gem_object *obj;
-#ifdef __NetBSD__
- struct drm_i915_private *const dev_priv = dev->dev_private;
- unsigned i;
- int error;
-#endif
obj = i915_gem_object_alloc(dev);
if (obj == NULL)
@@ -614,47 +653,10 @@ _i915_gem_object_create_stolen(struct dr
drm_gem_private_object_init(dev, &obj->base, stolen->size);
i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
-#ifdef __NetBSD__
- TAILQ_INIT(&obj->igo_pageq); /* XXX Need to fill this... */
- KASSERT((stolen->size % PAGE_SIZE) == 0);
- obj->igo_nsegs = (stolen->size / PAGE_SIZE);
- obj->pages = kmem_alloc((obj->igo_nsegs * sizeof(obj->pages[0])),
- KM_SLEEP);
- /*
- * x86 bus_dmamap_load_raw fails to respect the maxsegsz we
- * pass to bus_dmamap_create, so we have to create page-sized
- * segments to begin with.
- */
- for (i = 0; i < obj->igo_nsegs; i++) {
- obj->pages[i].ds_addr = (bus_addr_t)dev_priv->mm.stolen_base +
- stolen->start + (i*PAGE_SIZE);
- obj->pages[i].ds_len = PAGE_SIZE;
- }
- error = bus_dmamap_create(dev->dmat, obj->base.size, obj->igo_nsegs,
- PAGE_SIZE, 0, BUS_DMA_WAITOK, &obj->igo_dmamap);
- if (error) {
- DRM_ERROR("failed to create DMA map for stolen object: %d\n",
- error);
- kmem_free(obj->pages, sizeof(obj->pages[0]));
- obj->pages = NULL;
- goto cleanup;
- }
- error = bus_dmamap_load_raw(dev->dmat, obj->igo_dmamap, obj->pages,
- obj->igo_nsegs, stolen->size, BUS_DMA_WAITOK);
- if (error) {
- DRM_ERROR("failed to load DMA map for stolen object: %d\n",
- error);
- bus_dmamap_destroy(dev->dmat, obj->igo_dmamap);
- kmem_free(obj->pages, sizeof(obj->pages[0]));
- obj->pages = NULL;
- goto cleanup;
- }
-#else
obj->pages = i915_pages_create_for_stolen(dev,
stolen->start, stolen->size);
if (obj->pages == NULL)
goto cleanup;
-#endif
i915_gem_object_pin_pages(obj);
obj->stolen = stolen;