Module Name: src Committed By: christos Date: Fri Mar 6 22:24:05 UTC 2015
Modified Files: src/sys/external/bsd/drm2/dist/drm/i915: i915_gem.c Log Message: Dedup the NetBSD portion of the code (ok Riastradh), no functional change. To generate a diff of this commit: cvs rdiff -u -r1.27 -r1.28 src/sys/external/bsd/drm2/dist/drm/i915/i915_gem.c Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.
Modified files: Index: src/sys/external/bsd/drm2/dist/drm/i915/i915_gem.c diff -u src/sys/external/bsd/drm2/dist/drm/i915/i915_gem.c:1.27 src/sys/external/bsd/drm2/dist/drm/i915/i915_gem.c:1.28 --- src/sys/external/bsd/drm2/dist/drm/i915/i915_gem.c:1.27 Wed Mar 4 20:04:51 2015 +++ src/sys/external/bsd/drm2/dist/drm/i915/i915_gem.c Fri Mar 6 17:24:05 2015 @@ -705,51 +705,21 @@ i915_gem_shmem_pread(struct drm_device * offset = args->offset; #ifdef __NetBSD__ - /* - * XXX This is a big #ifdef with a lot of duplicated code, but - * factoring out the loop head -- which is all that - * substantially differs -- is probably more trouble than it's - * worth at the moment. - */ - while (0 < remain) { - /* Get the next page. */ - shmem_page_offset = offset_in_page(offset); - KASSERT(shmem_page_offset < PAGE_SIZE); - page_length = MIN(remain, (PAGE_SIZE - shmem_page_offset)); + while (0 < remain) +#else + for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, + offset >> PAGE_SHIFT) +#endif + { +#ifdef __NetBSD__ struct page *const page = i915_gem_object_get_page(obj, atop(offset)); - - /* Decide whether to swizzle bit 17. */ - page_do_bit17_swizzling = obj_do_bit17_swizzling && - (page_to_phys(page) & (1 << 17)) != 0; - - /* Try the fast path. */ - ret = shmem_pread_fast(page, shmem_page_offset, page_length, - user_data, page_do_bit17_swizzling, needs_clflush); - if (ret == 0) - goto next_page; - - /* Fast path failed. Try the slow path. */ - mutex_unlock(&dev->struct_mutex); - /* XXX prefault */ - ret = shmem_pread_slow(page, shmem_page_offset, page_length, - user_data, page_do_bit17_swizzling, needs_clflush); - mutex_lock(&dev->struct_mutex); - if (ret) - goto out; - -next_page: KASSERT(page_length <= remain); - remain -= page_length; - user_data += page_length; - offset += page_length; - } #else - for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, - offset >> PAGE_SHIFT) { struct page *page = sg_page_iter_page(&sg_iter); if (remain <= 0) break; +#endif /* Operation in this page * @@ -771,7 +741,7 @@ next_page: KASSERT(page_length <= remain goto next_page; mutex_unlock(&dev->struct_mutex); - +#ifndef __NetBSD__ if (likely(!i915.prefault_disable) && !prefaulted) { ret = fault_in_multipages_writeable(user_data, remain); /* Userspace is tricking us, but we've already clobbered @@ -781,7 +751,7 @@ next_page: KASSERT(page_length <= remain (void)ret; prefaulted = 1; } - +#endif ret = shmem_pread_slow(page, shmem_page_offset, page_length, user_data, page_do_bit17_swizzling, needs_clflush); @@ -796,7 +766,6 @@ next_page: user_data += page_length; offset += page_length; } -#endif out: i915_gem_object_unpin_pages(obj); @@ -1050,6 +1019,9 @@ i915_gem_shmem_pwrite(struct drm_device int needs_clflush_before = 0; #ifndef __NetBSD__ struct sg_page_iter sg_iter; + int flush_mask = boot_cpu_data.x86_clflush_size - 1; +#else + int flush_mask = cpu_info_primary.ci_cflush_lsize - 1; #endif user_data = to_user_ptr(args->data_ptr); @@ -1083,48 +1055,18 @@ i915_gem_shmem_pwrite(struct drm_device obj->dirty = 1; #ifdef __NetBSD__ - while (0 < remain) { - /* Get the next page. */ - shmem_page_offset = offset_in_page(offset); - KASSERT(shmem_page_offset < PAGE_SIZE); - page_length = MIN(remain, (PAGE_SIZE - shmem_page_offset)); + while (0 < remain) +#else + for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, + offset >> PAGE_SHIFT) +#endif + { +#ifdef __NetBSD__ struct page *const page = i915_gem_object_get_page(obj, atop(offset)); - - /* Decide whether to flush the cache or swizzle bit 17. */ - const bool partial_cacheline_write = needs_clflush_before && - ((shmem_page_offset | page_length) - & (cpu_info_primary.ci_cflush_lsize - 1)); - page_do_bit17_swizzling = obj_do_bit17_swizzling && - (page_to_phys(page) & (1 << 17)) != 0; - - /* Try the fast path. */ - ret = shmem_pwrite_fast(page, shmem_page_offset, page_length, - user_data, page_do_bit17_swizzling, - partial_cacheline_write, needs_clflush_after); - if (ret == 0) - goto next_page; - - /* Fast path failed. Try the slow path. */ - hit_slowpath = 1; - mutex_unlock(&dev->struct_mutex); - ret = shmem_pwrite_slow(page, shmem_page_offset, page_length, - user_data, page_do_bit17_swizzling, - partial_cacheline_write, needs_clflush_after); - mutex_lock(&dev->struct_mutex); - if (ret) - goto out; - -next_page: KASSERT(page_length <= remain); - remain -= page_length; - user_data += page_length; - offset += page_length; - } #else - for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, - offset >> PAGE_SHIFT) { struct page *page = sg_page_iter_page(&sg_iter); - int partial_cacheline_write; +#endif if (remain <= 0) break; @@ -1143,9 +1085,8 @@ next_page: KASSERT(page_length <= remain /* If we don't overwrite a cacheline completely we need to be * careful to have up-to-date data by first clflushing. Don't * overcomplicate things and flush the entire patch. */ - partial_cacheline_write = needs_clflush_before && - ((shmem_page_offset | page_length) - & (boot_cpu_data.x86_clflush_size - 1)); + const int partial_cacheline_write = needs_clflush_before && + ((shmem_page_offset | page_length) & flush_mask); page_do_bit17_swizzling = obj_do_bit17_swizzling && (page_to_phys(page) & (1 << 17)) != 0; @@ -1174,7 +1115,6 @@ next_page: user_data += page_length; offset += page_length; } -#endif out: i915_gem_object_unpin_pages(obj);