[PATCH v1 2/2] drm/i915/gem: Migrate to system at dma-buf attach time

2021-07-01 Thread Michael J. Ruhl
From: Thomas Hellström 

Until we support p2p dma or as a complement to that, migrate data
to system memory at dma-buf attach time if possible.

v2:
- Rebase on dynamic exporter. Update the igt_dmabuf_import_same_driver
  selftest to migrate if we are LMEM capable.
v3:
- Migrate also in the pin() callback.
v4:
- Migrate in attach

Signed-off-by: Thomas Hellström 
Signed-off-by: Michael J. Ruhl 
---
 drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c   | 12 +++-
 drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c |  4 +++-
 2 files changed, 14 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c 
b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
index ccae17d5f441..280291a4a9dc 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
@@ -170,9 +170,19 @@ static int i915_gem_dmabuf_attach(struct dma_buf *dmabuf,
  struct dma_buf_attachment *attach)
 {
struct drm_i915_gem_object *obj = dma_buf_to_obj(dmabuf);
+   int ret;
 
assert_object_held(obj);
-   return i915_gem_object_pin_pages(obj);
+
+   if (!i915_gem_object_can_migrate(obj, INTEL_REGION_SMEM))
+   return -EOPNOTSUPP;
+   ret = i915_gem_object_migrate(obj, NULL, INTEL_REGION_SMEM);
+   if (!ret)
+   ret = i915_gem_object_wait_migration(obj, 0);
+   if (!ret)
+   ret = i915_gem_object_pin_pages(obj);
+
+   return ret;
 }
 
 static void i915_gem_dmabuf_detach(struct dma_buf *dmabuf,
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c 
b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
index 868b3469ecbd..b1e87ec08741 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
@@ -106,7 +106,9 @@ static int igt_dmabuf_import_same_driver(void *arg)
int err;
 
force_different_devices = true;
-   obj = i915_gem_object_create_shmem(i915, PAGE_SIZE);
+   obj = i915_gem_object_create_lmem(i915, PAGE_SIZE, 0);
+   if (IS_ERR(obj))
+   obj = i915_gem_object_create_shmem(i915, PAGE_SIZE);
if (IS_ERR(obj))
goto out_ret;
 
-- 
2.31.1



[PATCH v1 1/2] drm/i915/gem: Correct the locking and pin pattern for dma-buf

2021-07-01 Thread Michael J. Ruhl
From: Thomas Hellström 

If our exported dma-bufs are imported by another instance of our driver,
that instance will typically have the imported dma-bufs locked during
dma_buf_map_attachment(). But the exporter also locks the same reservation
object in the map_dma_buf() callback, which leads to recursive locking.

So taking the lock inside _pin_pages_unlocked() is incorrect.

Additionally, the current pinning code path is contrary to the defined
way that pinning should occur.

Remove the explicit pin/unpin from the map/umap functions and move them
to the attach/detach allowing correct locking to occur, and to match
the static dma-buf drm_prime pattern.

Add a live selftest to exercise both dynamic and non-dynamic
exports.

v2:
- Extend the selftest with a fake dynamic importer.
- Provide real pin and unpin callbacks to not abuse the interface.
v3: (ruhl)
- Remove the dynamic export support and move the pinning into the
  attach/detach path.
v4: (ruhl)
- Put pages does not need to assert on the dma-resv

Reported-by: Michael J. Ruhl 
Signed-off-by: Thomas Hellström 
Signed-off-by: Michael J. Ruhl 
---
 drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c|  44 +--
 .../drm/i915/gem/selftests/i915_gem_dmabuf.c  | 116 +-
 2 files changed, 146 insertions(+), 14 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c 
b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
index 616c3a2f1baf..ccae17d5f441 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
@@ -12,6 +12,8 @@
 #include "i915_gem_object.h"
 #include "i915_scatterlist.h"
 
+I915_SELFTEST_DECLARE(static bool force_different_devices;)
+
 static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf)
 {
return to_intel_bo(buf->priv);
@@ -25,15 +27,11 @@ static struct sg_table *i915_gem_map_dma_buf(struct 
dma_buf_attachment *attachme
struct scatterlist *src, *dst;
int ret, i;
 
-   ret = i915_gem_object_pin_pages_unlocked(obj);
-   if (ret)
-   goto err;
-
/* Copy sg so that we make an independent mapping */
st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
if (st == NULL) {
ret = -ENOMEM;
-   goto err_unpin_pages;
+   goto err;
}
 
ret = sg_alloc_table(st, obj->mm.pages->nents, GFP_KERNEL);
@@ -58,8 +56,6 @@ static struct sg_table *i915_gem_map_dma_buf(struct 
dma_buf_attachment *attachme
sg_free_table(st);
 err_free:
kfree(st);
-err_unpin_pages:
-   i915_gem_object_unpin_pages(obj);
 err:
return ERR_PTR(ret);
 }
@@ -68,13 +64,9 @@ static void i915_gem_unmap_dma_buf(struct dma_buf_attachment 
*attachment,
   struct sg_table *sg,
   enum dma_data_direction dir)
 {
-   struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
-
dma_unmap_sgtable(attachment->dev, sg, dir, DMA_ATTR_SKIP_CPU_SYNC);
sg_free_table(sg);
kfree(sg);
-
-   i915_gem_object_unpin_pages(obj);
 }
 
 static int i915_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct dma_buf_map 
*map)
@@ -168,7 +160,32 @@ static int i915_gem_end_cpu_access(struct dma_buf 
*dma_buf, enum dma_data_direct
return err;
 }
 
+/**
+ * i915_gem_dmabuf_attach - Do any extra attach work necessary
+ * @dmabuf: imported dma-buf
+ * @attach: new attach to do work on
+ *
+ */
+static int i915_gem_dmabuf_attach(struct dma_buf *dmabuf,
+ struct dma_buf_attachment *attach)
+{
+   struct drm_i915_gem_object *obj = dma_buf_to_obj(dmabuf);
+
+   assert_object_held(obj);
+   return i915_gem_object_pin_pages(obj);
+}
+
+static void i915_gem_dmabuf_detach(struct dma_buf *dmabuf,
+  struct dma_buf_attachment *attach)
+{
+   struct drm_i915_gem_object *obj = dma_buf_to_obj(dmabuf);
+
+   i915_gem_object_unpin_pages(obj);
+}
+
 static const struct dma_buf_ops i915_dmabuf_ops =  {
+   .attach = i915_gem_dmabuf_attach,
+   .detach = i915_gem_dmabuf_detach,
.map_dma_buf = i915_gem_map_dma_buf,
.unmap_dma_buf = i915_gem_unmap_dma_buf,
.release = drm_gem_dmabuf_release,
@@ -204,6 +221,8 @@ static int i915_gem_object_get_pages_dmabuf(struct 
drm_i915_gem_object *obj)
struct sg_table *pages;
unsigned int sg_page_sizes;
 
+   assert_object_held(obj);
+
pages = dma_buf_map_attachment(obj->base.import_attach,
   DMA_BIDIRECTIONAL);
if (IS_ERR(pages))
@@ -241,7 +260,8 @@ struct drm_gem_object *i915_gem_prime_import(struct 
drm_device *dev,
if (dma_buf->ops == _dmabuf_ops) {
obj = dma_buf_to_obj(dma_buf);
/* is it from our device? */
-   if (obj->base.dev == dev) {
+   if (obj->base.dev ==

[PATCH 1/2] drm/i915/gem: Correct the locking and pin pattern for dma-buf

2021-07-01 Thread Michael J. Ruhl
From: Thomas Hellström 

If our exported dma-bufs are imported by another instance of our driver,
that instance will typically have the imported dma-bufs locked during
dma_buf_map_attachment(). But the exporter also locks the same reservation
object in the map_dma_buf() callback, which leads to recursive locking.

So taking the lock inside _pin_pages_unlocked() is incorrect.

Additionally, the current pinning code path is contrary to the defined
way that pinning should occur.

Remove the explicit pin/unpin from the map/umap functions and move them
to the attach/detach allowing correct locking to occur, and to match
the static dma-buf drm_prime pattern.

Add a live selftest to exercise both dynamic and non-dynamic
exports.

v2:
- Extend the selftest with a fake dynamic importer.
- Provide real pin and unpin callbacks to not abuse the interface.
v3: (ruhl)
- Remove the dynamic export support and move the pinning into the
  attach/detach path.

Reported-by: Michael J. Ruhl 
Signed-off-by: Thomas Hellström 
Signed-off-by: Michael J. Ruhl 
---
 drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c|  46 +--
 .../drm/i915/gem/selftests/i915_gem_dmabuf.c  | 116 +-
 2 files changed, 148 insertions(+), 14 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c 
b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
index 616c3a2f1baf..8c528b693a30 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
@@ -12,6 +12,8 @@
 #include "i915_gem_object.h"
 #include "i915_scatterlist.h"
 
+I915_SELFTEST_DECLARE(static bool force_different_devices;)
+
 static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf)
 {
return to_intel_bo(buf->priv);
@@ -25,15 +27,11 @@ static struct sg_table *i915_gem_map_dma_buf(struct 
dma_buf_attachment *attachme
struct scatterlist *src, *dst;
int ret, i;
 
-   ret = i915_gem_object_pin_pages_unlocked(obj);
-   if (ret)
-   goto err;
-
/* Copy sg so that we make an independent mapping */
st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
if (st == NULL) {
ret = -ENOMEM;
-   goto err_unpin_pages;
+   goto err;
}
 
ret = sg_alloc_table(st, obj->mm.pages->nents, GFP_KERNEL);
@@ -58,8 +56,6 @@ static struct sg_table *i915_gem_map_dma_buf(struct 
dma_buf_attachment *attachme
sg_free_table(st);
 err_free:
kfree(st);
-err_unpin_pages:
-   i915_gem_object_unpin_pages(obj);
 err:
return ERR_PTR(ret);
 }
@@ -68,13 +64,9 @@ static void i915_gem_unmap_dma_buf(struct dma_buf_attachment 
*attachment,
   struct sg_table *sg,
   enum dma_data_direction dir)
 {
-   struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
-
dma_unmap_sgtable(attachment->dev, sg, dir, DMA_ATTR_SKIP_CPU_SYNC);
sg_free_table(sg);
kfree(sg);
-
-   i915_gem_object_unpin_pages(obj);
 }
 
 static int i915_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct dma_buf_map 
*map)
@@ -168,7 +160,32 @@ static int i915_gem_end_cpu_access(struct dma_buf 
*dma_buf, enum dma_data_direct
return err;
 }
 
+/**
+ * i915_gem_dmabuf_attach - Do any extra attach work necessary
+ * @dmabuf: imported dma-buf
+ * @attach: new attach to do work on
+ *
+ */
+static int i915_gem_dmabuf_attach(struct dma_buf *dmabuf,
+ struct dma_buf_attachment *attach)
+{
+   struct drm_i915_gem_object *obj = dma_buf_to_obj(dmabuf);
+
+   assert_object_held(obj);
+   return i915_gem_object_pin_pages(obj);
+}
+
+static void i915_gem_dmabuf_detach(struct dma_buf *dmabuf,
+  struct dma_buf_attachment *attach)
+{
+   struct drm_i915_gem_object *obj = dma_buf_to_obj(dmabuf);
+
+   i915_gem_object_unpin_pages(obj);
+}
+
 static const struct dma_buf_ops i915_dmabuf_ops =  {
+   .attach = i915_gem_dmabuf_attach,
+   .detach = i915_gem_dmabuf_detach,
.map_dma_buf = i915_gem_map_dma_buf,
.unmap_dma_buf = i915_gem_unmap_dma_buf,
.release = drm_gem_dmabuf_release,
@@ -204,6 +221,8 @@ static int i915_gem_object_get_pages_dmabuf(struct 
drm_i915_gem_object *obj)
struct sg_table *pages;
unsigned int sg_page_sizes;
 
+   assert_object_held(obj);
+
pages = dma_buf_map_attachment(obj->base.import_attach,
   DMA_BIDIRECTIONAL);
if (IS_ERR(pages))
@@ -219,6 +238,8 @@ static int i915_gem_object_get_pages_dmabuf(struct 
drm_i915_gem_object *obj)
 static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj,
 struct sg_table *pages)
 {
+   assert_object_held(obj);
+
dma_buf_unmap_attachment(obj->base.import_attach, pages,
 DMA_BIDI

[PATCH 2/2] drm/i915/gem: Migrate to system at dma-buf attach time

2021-07-01 Thread Michael J. Ruhl
From: Thomas Hellström 

Until we support p2p dma or as a complement to that, migrate data
to system memory at dma-buf attach time if possible.

v2:
- Rebase on dynamic exporter. Update the igt_dmabuf_import_same_driver
  selftest to migrate if we are LMEM capable.
v3:
- Migrate also in the pin() callback.
v4:
- Migrate in attach

Signed-off-by: Thomas Hellström 
Signed-off-by: Michael J. Ruhl 
---
 drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c   | 12 +++-
 drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c |  4 +++-
 2 files changed, 14 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c 
b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
index 8c528b693a30..a325f33f35b8 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
@@ -170,9 +170,19 @@ static int i915_gem_dmabuf_attach(struct dma_buf *dmabuf,
  struct dma_buf_attachment *attach)
 {
struct drm_i915_gem_object *obj = dma_buf_to_obj(dmabuf);
+   int ret;
 
assert_object_held(obj);
-   return i915_gem_object_pin_pages(obj);
+
+   if (!i915_gem_object_can_migrate(obj, INTEL_REGION_SMEM))
+   return -EOPNOTSUPP;
+   ret = i915_gem_object_migrate(obj, NULL, INTEL_REGION_SMEM);
+   if (!ret)
+   ret = i915_gem_object_wait_migration(obj, 0);
+   if (!ret)
+   ret = i915_gem_object_pin_pages(obj);
+
+   return ret;
 }
 
 static void i915_gem_dmabuf_detach(struct dma_buf *dmabuf,
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c 
b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
index 868b3469ecbd..b1e87ec08741 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
@@ -106,7 +106,9 @@ static int igt_dmabuf_import_same_driver(void *arg)
int err;
 
force_different_devices = true;
-   obj = i915_gem_object_create_shmem(i915, PAGE_SIZE);
+   obj = i915_gem_object_create_lmem(i915, PAGE_SIZE, 0);
+   if (IS_ERR(obj))
+   obj = i915_gem_object_create_shmem(i915, PAGE_SIZE);
if (IS_ERR(obj))
goto out_ret;
 
-- 
2.31.1



[PATCH 1/2] drm/i915/gem: Correct the locking and pin pattern for dma-buf

2021-07-01 Thread Michael J. Ruhl
From: Thomas Hellström 

If our exported dma-bufs are imported by another instance of our driver,
that instance will typically have the imported dma-bufs locked during
dma_buf_map_attachment(). But the exporter also locks the same reservation
object in the map_dma_buf() callback, which leads to recursive locking.

So taking the lock inside _pin_pages_unlocked() is incorrect.

Additionally, the current pinning code path is contrary to the defined
way that pinning should occur.

Remove the explicit pin/unpin from the map/umap functions and move them
to the attach/detach allowing correct locking to occur, and to match
the static dma-buf drm_prime pattern.

Add a live selftest to exercise both dynamic and non-dynamic
exports.

v2:
- Extend the selftest with a fake dynamic importer.
- Provide real pin and unpin callbacks to not abuse the interface.
v3: (ruhl)
- Remove the dynamic export support and move the pinning into the
  attach/detach path.

Reported-by: Michael J. Ruhl 
Signed-off-by: Thomas Hellström 
Signed-off-by: Michael J. Ruhl 
---
 drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c|  46 ++--
 .../drm/i915/gem/selftests/i915_gem_dmabuf.c  | 111 +-
 2 files changed, 143 insertions(+), 14 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c 
b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
index 616c3a2f1baf..00338c8d3739 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
@@ -12,6 +12,8 @@
 #include "i915_gem_object.h"
 #include "i915_scatterlist.h"
 
+I915_SELFTEST_DECLARE(static bool force_different_devices;)
+
 static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf)
 {
return to_intel_bo(buf->priv);
@@ -25,15 +27,11 @@ static struct sg_table *i915_gem_map_dma_buf(struct 
dma_buf_attachment *attachme
struct scatterlist *src, *dst;
int ret, i;
 
-   ret = i915_gem_object_pin_pages_unlocked(obj);
-   if (ret)
-   goto err;
-
/* Copy sg so that we make an independent mapping */
st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
if (st == NULL) {
ret = -ENOMEM;
-   goto err_unpin_pages;
+   goto err;
}
 
ret = sg_alloc_table(st, obj->mm.pages->nents, GFP_KERNEL);
@@ -58,8 +56,6 @@ static struct sg_table *i915_gem_map_dma_buf(struct 
dma_buf_attachment *attachme
sg_free_table(st);
 err_free:
kfree(st);
-err_unpin_pages:
-   i915_gem_object_unpin_pages(obj);
 err:
return ERR_PTR(ret);
 }
@@ -68,13 +64,9 @@ static void i915_gem_unmap_dma_buf(struct dma_buf_attachment 
*attachment,
   struct sg_table *sg,
   enum dma_data_direction dir)
 {
-   struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
-
dma_unmap_sgtable(attachment->dev, sg, dir, DMA_ATTR_SKIP_CPU_SYNC);
sg_free_table(sg);
kfree(sg);
-
-   i915_gem_object_unpin_pages(obj);
 }
 
 static int i915_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct dma_buf_map 
*map)
@@ -168,7 +160,32 @@ static int i915_gem_end_cpu_access(struct dma_buf 
*dma_buf, enum dma_data_direct
return err;
 }
 
+/**
+ * i915_gem_dmabuf_attach - Do any extra attach work necessary
+ * @dmabuf: imported dma-buf
+ * @attach: new attach to do work on
+ *
+ */
+static int i915_gem_dmabuf_attach(struct dma_buf *dmabuf,
+ struct dma_buf_attachment *attach)
+{
+   struct drm_i915_gem_object *obj = dma_buf_to_obj(dmabuf);
+
+   assert_object_held(obj);
+   return i915_gem_object_pin_pages(obj);
+}
+
+static void i915_gem_dmabuf_detach(struct dma_buf *dmabuf,
+ struct dma_buf_attachment *attach)
+{
+   struct drm_i915_gem_object *obj = dma_buf_to_obj(dmabuf);
+
+   i915_gem_object_unpin_pages(obj);
+}
+
 static const struct dma_buf_ops i915_dmabuf_ops =  {
+   .attach = i915_gem_dmabuf_attach,
+   .detach = i915_gem_dmabuf_detach,
.map_dma_buf = i915_gem_map_dma_buf,
.unmap_dma_buf = i915_gem_unmap_dma_buf,
.release = drm_gem_dmabuf_release,
@@ -204,6 +221,8 @@ static int i915_gem_object_get_pages_dmabuf(struct 
drm_i915_gem_object *obj)
struct sg_table *pages;
unsigned int sg_page_sizes;
 
+   assert_object_held(obj);
+
pages = dma_buf_map_attachment(obj->base.import_attach,
   DMA_BIDIRECTIONAL);
if (IS_ERR(pages))
@@ -219,6 +238,8 @@ static int i915_gem_object_get_pages_dmabuf(struct 
drm_i915_gem_object *obj)
 static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj,
 struct sg_table *pages)
 {
+   assert_object_held(obj);
+
dma_buf_unmap_attachment(obj->base.import_attach, pages,
 DMA_BIDI

[PATCH 2/2] drm/i915/gem: Migrate to system at dma-buf attach time

2021-07-01 Thread Michael J. Ruhl
From: Thomas Hellström 

Until we support p2p dma or as a complement to that, migrate data
to system memory at dma-buf attach time if possible.

v2:
- Rebase on dynamic exporter. Update the igt_dmabuf_import_same_driver
  selftest to migrate if we are LMEM capable.
v3:
- Migrate also in the pin() callback.
v4:
- Migrate in attach

Signed-off-by: Thomas Hellström 
Signed-off-by: Michael J. Ruhl 
---
 drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c   | 12 +++-
 drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c |  4 +++-
 2 files changed, 14 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c 
b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
index 00338c8d3739..406016aeb200 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
@@ -170,9 +170,19 @@ static int i915_gem_dmabuf_attach(struct dma_buf *dmabuf,
  struct dma_buf_attachment *attach)
 {
struct drm_i915_gem_object *obj = dma_buf_to_obj(dmabuf);
+   int ret;
 
assert_object_held(obj);
-   return i915_gem_object_pin_pages(obj);
+
+   if (!i915_gem_object_can_migrate(obj, INTEL_REGION_SMEM))
+   return -EOPNOTSUPP;
+   ret = i915_gem_object_migrate(obj, NULL, INTEL_REGION_SMEM);
+   if (!ret)
+   ret = i915_gem_object_wait_migration(obj, 0);
+   if (!ret)
+   ret = i915_gem_object_pin_pages(obj);
+
+   return ret;
 }
 
 static void i915_gem_dmabuf_detach(struct dma_buf *dmabuf,
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c 
b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
index 10a113cc00a5..6feac1a14281 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
@@ -101,7 +101,9 @@ static int igt_dmabuf_import_same_driver(void *arg)
int err;
 
force_different_devices = true;
-   obj = i915_gem_object_create_shmem(i915, PAGE_SIZE);
+   obj = i915_gem_object_create_lmem(i915, PAGE_SIZE, 0);
+   if (IS_ERR(obj))
+   obj = i915_gem_object_create_shmem(i915, PAGE_SIZE);
if (IS_ERR(obj))
goto out_ret;
 
-- 
2.31.1



[PATCH v2] io-mapping: Indicate mapping failure

2020-07-21 Thread Michael J. Ruhl
The !ATOMIC_IOMAP version of io_maping_init_wc will always return
success, even when the ioremap fails.

Since the ATOMIC_IOMAP version returns NULL when the init fails, and
callers check for a NULL return on error this is unexpected.

During a device probe, where the ioremap failed, a crash can look
like this:

BUG: unable to handle page fault for address: 0021
 #PF: supervisor write access in kernel mode
 #PF: error_code(0x0002) - not-present page
 Oops: 0002 [#1] PREEMPT SMP
 CPU: 0 PID: 177 Comm:
 RIP: 0010:fill_page_dma [i915]
  gen8_ppgtt_create [i915]
  i915_ppgtt_create [i915]
  intel_gt_init [i915]
  i915_gem_init [i915]
  i915_driver_probe [i915]
  pci_device_probe
  really_probe
  driver_probe_device

The remap failure occurred much earlier in the probe.  If it had
been propagated, the driver would have exited with an error.

Return NULL on ioremap failure.

Fixes: cafaf14a5d8f ("io-mapping: Always create a struct to hold metadata about 
the io-mapping")
Cc: Andrew Morton 
Cc: Mike Rapoport 
Cc: Andy Shevchenko 
Cc: Chris Wilson 
Cc: sta...@vger.kernel.org
Signed-off-by: Michael J. Ruhl 
---
v2: reflect review comments
---
 include/linux/io-mapping.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h
index 0beaa3eba155..5641e06cbcf7 100644
--- a/include/linux/io-mapping.h
+++ b/include/linux/io-mapping.h
@@ -118,7 +118,7 @@ io_mapping_init_wc(struct io_mapping *iomap,
iomap->prot = pgprot_noncached(PAGE_KERNEL);
 #endif
 
-   return iomap;
+   return iomap->iomem ? iomap : NULL;
 }
 
 static inline void
-- 
2.21.0

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[PATCH v1] io-mapping: Indicate mapping failure

2020-07-21 Thread Michael J. Ruhl
The !ATOMIC_IOMAP version of io_maping_init_wc will always return
success, even when the ioremap fails.

Since the ATOMIC_IOMAP version returns NULL when the init fails, and
callers check for a NULL return on error this is unexpected.

Return NULL on ioremap failure.

Fixes: cafaf14a5d8f ("io-mapping: Always create a struct to hold metadata about 
the io-mapping"
Cc: Andrew Morton 
Cc: Mike Rapoport 
Cc: Andy Shevchenko 
Cc: Chris Wilson 
Cc: sta...@vger.kernel.org
Signed-off-by: Michael J. Ruhl 
---
 include/linux/io-mapping.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h
index 0beaa3eba155..5641e06cbcf7 100644
--- a/include/linux/io-mapping.h
+++ b/include/linux/io-mapping.h
@@ -118,7 +118,7 @@ io_mapping_init_wc(struct io_mapping *iomap,
iomap->prot = pgprot_noncached(PAGE_KERNEL);
 #endif
 
-   return iomap;
+   return iomap->iomem ? iomap : NULL;
 }
 
 static inline void
-- 
2.21.0

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


io-mapping: Indicate mapping failure

2020-07-21 Thread Michael J. Ruhl
I found this when my system crashed long after the mapping failure.
The expected behavior should have been driver exit.

Since this is almost exclusively used for drm, I am posting to
the dri mailing list.  Should this go to another list as well?

Thanks,

Mike

Cc: Mike Rapoport 
Cc: Andy Shevchenko 


___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[PATCH] io-mapping: Indicate mapping failure

2020-07-21 Thread Michael J. Ruhl
Sometimes it is good to know when your mapping failed.

Fixes: cafaf14a5d8f ("io-mapping: Always create a struct to hold metadata about 
the io-mapping"
Cc: Andrew Morton 
Cc: Mike Rapoport 
Cc: Andy Shevchenko 
Cc: Chris Wilson 
Cc: sta...@vger.kernel.org
Signed-off-by: Michael J. Ruhl 
---
 include/linux/io-mapping.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h
index 0beaa3eba155..5641e06cbcf7 100644
--- a/include/linux/io-mapping.h
+++ b/include/linux/io-mapping.h
@@ -118,7 +118,7 @@ io_mapping_init_wc(struct io_mapping *iomap,
iomap->prot = pgprot_noncached(PAGE_KERNEL);
 #endif
 
-   return iomap;
+   return iomap->iomem ? iomap : NULL;
 }
 
 static inline void
-- 
2.21.0

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


io-mapping: Indicate mapping failure

2020-07-21 Thread Michael J. Ruhl
I found this when my system crashed long after the mapping failure.
The expected behavior should have been driver exit.

Since this is almost exclusively used for drm, I am posting to
the dri mailing list.  Should this go to another list as well?

Thanks,

Mike



___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


drm/i915: Teaching i915 dmabuf about LMEM

2020-04-22 Thread Michael J. Ruhl
With the addition of device memory (lmem) for the i915 driver, the
dma-buf interface needs a little polishing.



___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[PATCH 5/5] drm/i915/dmabuf Introduce dmabuf mmap to LMEM

2020-04-22 Thread Michael J. Ruhl
The i915 GEM dmabuf mmap interface assumes all BOs are SHMEM. When
the BO is backed by LMEM, this assumption doesn't work so well.

Introduce the dmabuf mmap interface to LMEM by adding the appropriate
VMA faulting mechanism.

Update dmabuf to allow for LMEM backed BOs by leveraging the gem_mman
path.

Signed-off-by: Michael J. Ruhl 
CC: Brian Welty 
---
 drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c|  58 --
 drivers/gpu/drm/i915/gem/i915_gem_mman.c  | 103 ++
 drivers/gpu/drm/i915/gem/i915_gem_mman.h  |   8 ++
 .../drm/i915/gem/selftests/i915_gem_mman.c|  10 +-
 4 files changed, 118 insertions(+), 61 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c 
b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
index 988778cc8539..72d312d04421 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
@@ -11,6 +11,8 @@
 #include 
 
 #include "i915_drv.h"
+#include "i915_gem_lmem.h"
+#include "i915_gem_mman.h"
 #include "i915_gem_object.h"
 #include "i915_scatterlist.h"
 
@@ -169,7 +171,41 @@ static void i915_gem_dmabuf_vunmap(struct dma_buf 
*dma_buf, void *vaddr)
i915_gem_object_unpin_map(obj);
 }
 
-static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct 
*vma)
+/**
+ * i915_gem_dmabuf_update_vma - Setup VMA information for exported LMEM
+ * objects
+ * @obj: valid LMEM object
+ * @vma: va;od vma
+ *
+ * NOTE: on success, the final _object_put() will be done by the VMA
+ * vm_close() callback.
+ */
+static int i915_gem_dmabuf_update_vma(struct drm_i915_gem_object *obj,
+ struct vm_area_struct *vma)
+{
+   struct i915_mmap_offset *mmo;
+   int err;
+
+   i915_gem_object_get(obj);
+   mmo = i915_gem_mmap_offset_attach(obj, I915_MMAP_TYPE_WC, NULL);
+   if (IS_ERR(mmo)) {
+   err = PTR_ERR(mmo);
+   goto out;
+   }
+
+   err = i915_gem_update_vma_info(mmo, vma);
+   if (err)
+   goto out;
+
+   return 0;
+
+out:
+   i915_gem_object_put(obj);
+   return err;
+}
+
+static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf,
+   struct vm_area_struct *vma)
 {
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
int ret;
@@ -177,17 +213,21 @@ static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, 
struct vm_area_struct *
if (obj->base.size < vma->vm_end - vma->vm_start)
return -EINVAL;
 
-   if (!obj->base.filp)
-   return -ENODEV;
+   /* shmem */
+   if (obj->base.filp) {
+   ret = call_mmap(obj->base.filp, vma);
+   if (ret)
+   return ret;
+   fput(vma->vm_file);
+   vma->vm_file = get_file(obj->base.filp);
 
-   ret = call_mmap(obj->base.filp, vma);
-   if (ret)
-   return ret;
+   return 0;
+   }
 
-   fput(vma->vm_file);
-   vma->vm_file = get_file(obj->base.filp);
+   if (i915_gem_object_is_lmem(obj))
+   return i915_gem_dmabuf_update_vma(obj, vma);
 
-   return 0;
+   return -ENODEV;
 }
 
 static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum 
dma_data_direction direction)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c 
b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index b39c24dae64e..70b00c41f6d3 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -549,10 +549,10 @@ insert_mmo(struct drm_i915_gem_object *obj, struct 
i915_mmap_offset *mmo)
return mmo;
 }
 
-static struct i915_mmap_offset *
-mmap_offset_attach(struct drm_i915_gem_object *obj,
-  enum i915_mmap_type mmap_type,
-  struct drm_file *file)
+struct i915_mmap_offset *
+i915_gem_mmap_offset_attach(struct drm_i915_gem_object *obj,
+   enum i915_mmap_type mmap_type,
+   struct drm_file *file)
 {
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_mmap_offset *mmo;
@@ -626,7 +626,7 @@ __assign_mmap_offset(struct drm_file *file,
goto out;
}
 
-   mmo = mmap_offset_attach(obj, mmap_type, file);
+   mmo = i915_gem_mmap_offset_attach(obj, mmap_type, file);
if (IS_ERR(mmo)) {
err = PTR_ERR(mmo);
goto out;
@@ -795,56 +795,21 @@ static struct file *mmap_singleton(struct 
drm_i915_private *i915)
return file;
 }
 
-/*
- * This overcomes the limitation in drm_gem_mmap's assignment of a
- * drm_gem_object as the vma->vm_private_data. Since we need to
- * be able to resolve multiple mmap offsets which could be tied
- * to a single gem object.
- */
-int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+int i915_gem_upd

[PATCH 4/5] drm/i915/dmabuf: Add LMEM support for cpu access and vmap interfaces

2020-04-22 Thread Michael J. Ruhl
LMEM backed buffer objects do not have struct page information, and
are not WB compatible.  Currently the cpu access and vmap interfaces
only support struct page backed objects.

Update the dma-buf interfaces begin/end_cpu_access and vmap/vunmap
to be LMEM aware.

Signed-off-by: Michael J. Ruhl 
---
 drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c | 15 ---
 1 file changed, 12 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c 
b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
index 402c989cc23d..988778cc8539 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
@@ -155,7 +155,10 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
 {
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
 
-   return i915_gem_object_pin_map(obj, I915_MAP_WB);
+   if (i915_gem_object_has_struct_page(obj))
+   return i915_gem_object_pin_map(obj, I915_MAP_WB);
+   else
+   return i915_gem_object_pin_map(obj, I915_MAP_WC);
 }
 
 static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
@@ -201,7 +204,11 @@ static int i915_gem_begin_cpu_access(struct dma_buf 
*dma_buf, enum dma_data_dire
if (err)
goto out;
 
-   err = i915_gem_object_set_to_cpu_domain(obj, write);
+   if (i915_gem_object_has_struct_page(obj))
+   err = i915_gem_object_set_to_cpu_domain(obj, write);
+   else
+   err = i915_gem_object_set_to_wc_domain(obj, write);
+
i915_gem_object_unlock(obj);
 
 out:
@@ -222,7 +229,9 @@ static int i915_gem_end_cpu_access(struct dma_buf *dma_buf, 
enum dma_data_direct
if (err)
goto out;
 
-   err = i915_gem_object_set_to_gtt_domain(obj, false);
+   if (i915_gem_object_has_struct_page(obj))
+   err = i915_gem_object_set_to_gtt_domain(obj, false);
+
i915_gem_object_unlock(obj);
 
 out:
-- 
2.21.0

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[PATCH 2/5] drm/i915/dmabuf: Use scatterlist for_each_sg API

2020-04-22 Thread Michael J. Ruhl
Update open coded for loop to use the standard scatterlist
for_each_sg API.

Signed-off-by: Michael J. Ruhl 
---
 drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c 
b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
index 0d9124ad549a..7ea4abb6a896 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
@@ -7,6 +7,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #include "i915_drv.h"
 #include "i915_gem_object.h"
@@ -40,12 +41,10 @@ static struct sg_table *i915_gem_map_dma_buf(struct 
dma_buf_attachment *attach,
if (ret)
goto err_free;
 
-   src = obj->mm.pages->sgl;
dst = sgt->sgl;
-   for (i = 0; i < obj->mm.pages->nents; i++) {
+   for_each_sg(obj->mm.pages->sgl, src, obj->mm.pages->nents, i) {
sg_set_page(dst, sg_page(src), src->length, 0);
dst = sg_next(dst);
-   src = sg_next(src);
}
 
if (!dma_map_sg_attrs(attach->dev,
-- 
2.21.0

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[PATCH 3/5] drm/i915/dmabuf: Add LMEM knowledge to dmabuf map handler

2020-04-22 Thread Michael J. Ruhl
LMEM backed buffer objects do not have struct page information.
Instead the dma_address of the struct sg is used to store the
LMEM address information (relative to the device, this is not
the CPU physical address).

The dmabuf map handler requires pages to do a dma_map_xx.

Add new mapping/unmapping functions, based on the LMEM usage
of the dma_address to allow LMEM backed buffer objects to be
mapped.

Before mapping check the peer2peer distance to verify that P2P
dma can occur.

Signed-off-by: Michael J. Ruhl 
---
 drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c | 82 --
 1 file changed, 76 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c 
b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
index 7ea4abb6a896..402c989cc23d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
@@ -7,6 +7,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 
 #include "i915_drv.h"
@@ -18,6 +19,67 @@ static struct drm_i915_gem_object *dma_buf_to_obj(struct 
dma_buf *buf)
return to_intel_bo(buf->priv);
 }
 
+static void dmabuf_unmap_addr(struct device *dev, struct scatterlist *sgl,
+ int nents, enum dma_data_direction dir)
+{
+   struct scatterlist *sg;
+   int i;
+
+   for_each_sg(sgl, sg, nents, i)
+   dma_unmap_resource(dev, sg_dma_address(sg), sg_dma_len(sg),
+  dir, 0);
+}
+
+/**
+ * dmabuf_map_addr - Update LMEM address to a physical address and map the
+ * resource.
+ * @dev: valid device
+ * @obj: valid i915 GEM object
+ * @sg: scatter list to appy mapping to
+ * @nents: number of entries in the scatter list
+ * @dir: DMA direction
+ *
+ * The dma_address of the scatter list is the LMEM "address".  From this the
+ * actual physical address can be determined.
+ *
+ * Return of 0 means error.
+ *
+ */
+static int dmabuf_map_addr(struct device *dev, struct drm_i915_gem_object *obj,
+  struct scatterlist *sgl, int nents,
+  enum dma_data_direction dir)
+{
+   struct scatterlist *sg;
+   phys_addr_t addr;
+   int distance;
+   int i;
+
+   distance = pci_p2pdma_distance_many(obj->base.dev->pdev, , 1,
+   true);
+   if (distance < 0) {
+   pr_info("%s: from: %s  to: %s  distance: %d\n", __func__,
+   pci_name(obj->base.dev->pdev), dev_name(dev),
+   distance);
+   return 0;
+   }
+
+   for_each_sg(sgl, sg, nents, i) {
+   addr = sg_dma_address(sg) + obj->mm.region->io_start;
+
+   sg->dma_address = dma_map_resource(dev, addr, sg->length, dir,
+  0);
+   if (dma_mapping_error(dev, sg->dma_address))
+   goto unmap;
+   sg->dma_length = sg->length;
+   }
+
+   return nents;
+
+unmap:
+   dmabuf_unmap_addr(dev, sgl, i, dir);
+   return 0;
+}
+
 static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attach,
 enum dma_data_direction dir)
 {
@@ -44,12 +106,17 @@ static struct sg_table *i915_gem_map_dma_buf(struct 
dma_buf_attachment *attach,
dst = sgt->sgl;
for_each_sg(obj->mm.pages->sgl, src, obj->mm.pages->nents, i) {
sg_set_page(dst, sg_page(src), src->length, 0);
+   sg_dma_address(dst) = sg_dma_address(src);
dst = sg_next(dst);
}
 
-   if (!dma_map_sg_attrs(attach->dev,
- sgt->sgl, sgt->nents, dir,
- DMA_ATTR_SKIP_CPU_SYNC)) {
+   if (i915_gem_object_has_struct_page(obj))
+   ret = dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
+  DMA_ATTR_SKIP_CPU_SYNC);
+   else
+   ret = dmabuf_map_addr(attach->dev, obj, sgt->sgl, sgt->nents,
+ dir);
+   if (!ret) {
ret = -ENOMEM;
goto err_free_sg;
}
@@ -72,9 +139,12 @@ static void i915_gem_unmap_dma_buf(struct 
dma_buf_attachment *attach,
 {
struct drm_i915_gem_object *obj = dma_buf_to_obj(attach->dmabuf);
 
-   dma_unmap_sg_attrs(attach->dev,
-  sgt->sgl, sgt->nents, dir,
-  DMA_ATTR_SKIP_CPU_SYNC);
+   if (i915_gem_object_has_struct_page(obj))
+   dma_unmap_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
+  DMA_ATTR_SKIP_CPU_SYNC);
+   else
+   dmabuf_unmap_addr(attach->dev, sgt->sgl, sgt->nents, dir);
+
sg_free_table(sgt);
kfree(sgt);
 
-- 
2.21.0

_

[PATCH 1/5] drm/i915/dmabuf: dmabuf cleanup

2020-04-22 Thread Michael J. Ruhl
Some minor cleanup of some variables to make upcoming patches
a little easier.

Normalize struct sg_table to sgt.
Normalize struct dma_buf_attachment to attach.
checkpatch issues sizeof(), !NULL updates.

Signed-off-by: Michael J. Ruhl 
---
 drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c | 58 +++---
 1 file changed, 29 insertions(+), 29 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c 
b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
index 7db5a793739d..0d9124ad549a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
@@ -17,11 +17,11 @@ static struct drm_i915_gem_object *dma_buf_to_obj(struct 
dma_buf *buf)
return to_intel_bo(buf->priv);
 }
 
-static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment 
*attachment,
+static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attach,
 enum dma_data_direction dir)
 {
-   struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
-   struct sg_table *st;
+   struct drm_i915_gem_object *obj = dma_buf_to_obj(attach->dmabuf);
+   struct sg_table *sgt;
struct scatterlist *src, *dst;
int ret, i;
 
@@ -30,54 +30,54 @@ static struct sg_table *i915_gem_map_dma_buf(struct 
dma_buf_attachment *attachme
goto err;
 
/* Copy sg so that we make an independent mapping */
-   st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
-   if (st == NULL) {
+   sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
+   if (!sgt) {
ret = -ENOMEM;
goto err_unpin_pages;
}
 
-   ret = sg_alloc_table(st, obj->mm.pages->nents, GFP_KERNEL);
+   ret = sg_alloc_table(sgt, obj->mm.pages->nents, GFP_KERNEL);
if (ret)
goto err_free;
 
src = obj->mm.pages->sgl;
-   dst = st->sgl;
+   dst = sgt->sgl;
for (i = 0; i < obj->mm.pages->nents; i++) {
sg_set_page(dst, sg_page(src), src->length, 0);
dst = sg_next(dst);
src = sg_next(src);
}
 
-   if (!dma_map_sg_attrs(attachment->dev,
- st->sgl, st->nents, dir,
+   if (!dma_map_sg_attrs(attach->dev,
+ sgt->sgl, sgt->nents, dir,
  DMA_ATTR_SKIP_CPU_SYNC)) {
ret = -ENOMEM;
goto err_free_sg;
}
 
-   return st;
+   return sgt;
 
 err_free_sg:
-   sg_free_table(st);
+   sg_free_table(sgt);
 err_free:
-   kfree(st);
+   kfree(sgt);
 err_unpin_pages:
i915_gem_object_unpin_pages(obj);
 err:
return ERR_PTR(ret);
 }
 
-static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
-  struct sg_table *sg,
+static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
+  struct sg_table *sgt,
   enum dma_data_direction dir)
 {
-   struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
+   struct drm_i915_gem_object *obj = dma_buf_to_obj(attach->dmabuf);
 
-   dma_unmap_sg_attrs(attachment->dev,
-  sg->sgl, sg->nents, dir,
+   dma_unmap_sg_attrs(attach->dev,
+  sgt->sgl, sgt->nents, dir,
   DMA_ATTR_SKIP_CPU_SYNC);
-   sg_free_table(sg);
-   kfree(sg);
+   sg_free_table(sgt);
+   kfree(sgt);
 
i915_gem_object_unpin_pages(obj);
 }
@@ -194,25 +194,25 @@ struct dma_buf *i915_gem_prime_export(struct 
drm_gem_object *gem_obj, int flags)
 
 static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
 {
-   struct sg_table *pages;
+   struct sg_table *sgt;
unsigned int sg_page_sizes;
 
-   pages = dma_buf_map_attachment(obj->base.import_attach,
-  DMA_BIDIRECTIONAL);
-   if (IS_ERR(pages))
-   return PTR_ERR(pages);
+   sgt = dma_buf_map_attachment(obj->base.import_attach,
+DMA_BIDIRECTIONAL);
+   if (IS_ERR(sgt))
+   return PTR_ERR(sgt);
 
-   sg_page_sizes = i915_sg_page_sizes(pages->sgl);
+   sg_page_sizes = i915_sg_page_sizes(sgt->sgl);
 
-   __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
+   __i915_gem_object_set_pages(obj, sgt, sg_page_sizes);
 
return 0;
 }
 
 static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj,
-struct sg_table *pages)
+struct sg_table *sgt)
 {
-   dma_buf_unmap_attachment(obj->base.import_attach, pages,
+   dma_buf_unmap_attachment(obj->base.import_attach,