*crtc,
struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ bool gtt_scannable = (adev->asic_type >= CHIP_CARRIZO && adev-
flags
+& AMD_IS_APU);
struct amdgpu_framebuffer *old_amdgpu_fb;
struct amdgpu_framebuffer *new_amdgpu_fb;
struct drm_gem_object *obj;
@@ -190,8 +191,13 @@ int amdgpu_crtc_page_flip_target(struct
drm_crtc *crtc,
r = amdgpu_bo_pin(new_abo, AMDGPU_GEM_DOMAIN_VRAM,
&base);
if (unlikely(r != 0)) {
- DRM_ERROR("failed to pin new abo buffer before flip\n");
- goto unreserve;
+ /* latest APUs support gtt scan out */
+ if (gtt_scannable)
+ r = amdgpu_bo_pin(new_abo,
AMDGPU_GEM_DOMAIN_GTT, &base);
+ if (unlikely(r != 0)) {
+ DRM_ERROR("failed to pin new abo buffer before
flip\n");
+ goto unreserve;
+ }
}
r = reservation_object_get_fences_rcu(new_abo->tbo.resv,
&work->excl, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 31383e0..df30b08 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -868,7 +868,7 @@ static struct drm_driver kms_driver = {
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = amdgpu_gem_prime_export,
- .gem_prime_import = drm_gem_prime_import,
+ .gem_prime_import = amdgpu_gem_prime_import,
.gem_prime_pin = amdgpu_gem_prime_pin,
.gem_prime_unpin = amdgpu_gem_prime_unpin,
.gem_prime_res_obj = amdgpu_gem_prime_res_obj, diff --git
a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
index ae9c106..9e1424d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
@@ -164,6 +164,82 @@ struct reservation_object
*amdgpu_gem_prime_res_obj(struct drm_gem_object *obj)
return bo->tbo.resv;
}
+static int amdgpu_gem_begin_cpu_access(struct dma_buf *dma_buf,
enum
+dma_data_direction direction) {
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(dma_buf->priv);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ long i, ret = 0;
+ unsigned old_count;
+ bool reads = (direction == DMA_BIDIRECTIONAL || direction ==
DMA_FROM_DEVICE);
+ bool gtt_scannable = (adev->asic_type >= CHIP_CARRIZO && adev-
flags & AMD_IS_APU);
+ u32 domain;
+
+ if (!reads || !gtt_scannable)
+ return 0;
+
+ ret = amdgpu_bo_reserve(bo, false);
+ if (unlikely(ret != 0))
+ return ret;
+
+ /*
+ * Wait for all shared fences to complete before we switch to future
+ * use of exclusive fence on this prime shared bo.
+ */
+ ret = reservation_object_wait_timeout_rcu(bo->tbo.resv, true, false,
+ MAX_SCHEDULE_TIMEOUT);
+
+ if (unlikely(ret < 0)) {
+ DRM_DEBUG_PRIME("Fence wait failed: %li\n", ret);
+ amdgpu_bo_unreserve(bo);
+ return ret;
+ }
+
+ ret = 0;
+ /* Pin to gtt */
+ domain = amdgpu_mem_type_to_domain(bo-
tbo.mem.mem_type);
+ if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
+ old_count = bo->pin_count;
+ for (i = 0; i < old_count; i++)
+ amdgpu_bo_unpin(bo);
+ for (i = 0; i < old_count; i++) {
+ ret = amdgpu_bo_pin(bo,
AMDGPU_GEM_DOMAIN_GTT, NULL);
+ if (unlikely(ret != 0))
+ break;
+ }
+ }
+ if (ret == 0)
+ ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT,
NULL);
+
+ amdgpu_bo_unreserve(bo);
+ return ret;
+}
+
+static int amdgpu_gem_end_cpu_access(struct dma_buf *dma_buf,
enum
+dma_data_direction direction) {
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(dma_buf->priv);
+ int ret = 0;
+ bool reads = (direction == DMA_BIDIRECTIONAL || direction ==
DMA_FROM_DEVICE);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ bool gtt_scannable = (adev->asic_type >= CHIP_CARRIZO && adev-
flags
+& AMD_IS_APU);
+
+ if (!reads || !gtt_scannable)
+ return 0;
+
+ mb();
+ ret = amdgpu_bo_reserve(bo, true);
+ if (unlikely(ret != 0))
+ return ret;
+
+ amdgpu_bo_unpin(bo);
+
+ amdgpu_bo_unreserve(bo);
+
+ return 0;
+}
+
+static struct dma_buf_ops amdgpu_dmabuf_ops; static atomic_t
+aops_lock;
+
struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
struct drm_gem_object *gobj,
int flags)
@@ -178,5 +254,37 @@ struct dma_buf
*amdgpu_gem_prime_export(struct drm_device *dev,
buf = drm_gem_prime_export(dev, gobj, flags);
if (!IS_ERR(buf))
buf->file->f_mapping = dev->anon_inode->i_mapping;
+
+ while (amdgpu_dmabuf_ops.begin_cpu_access !=
amdgpu_gem_begin_cpu_access ||
+ amdgpu_dmabuf_ops.end_cpu_access !=
amdgpu_gem_end_cpu_access )
+ {
+ if (!atomic_cmpxchg(&aops_lock, 0, 1)) {
+ amdgpu_dmabuf_ops = *(buf->ops);
+ amdgpu_dmabuf_ops.begin_cpu_access =
amdgpu_gem_begin_cpu_access;
+ amdgpu_dmabuf_ops.end_cpu_access =
amdgpu_gem_end_cpu_access;
+ }
+ }
+ buf->ops = &amdgpu_dmabuf_ops;
+
return buf;
}
+
+struct drm_gem_object *amdgpu_gem_prime_import(struct