[PATCH v2 1/4] drm/virtio: simplify virtio_gpu_alloc_cmd

2020-02-06 Thread Gerd Hoffmann
Just call virtio_gpu_alloc_cmd_resp with some fixed args
instead of duplicating most of the function body.

Signed-off-by: Gerd Hoffmann 
---
 drivers/gpu/drm/virtio/virtgpu_vq.c | 26 +-
 1 file changed, 9 insertions(+), 17 deletions(-)

diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c 
b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 41e475fbd67b..df499fb64ac7 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -120,23 +120,6 @@ virtio_gpu_vbuf_ctrl_hdr(struct virtio_gpu_vbuffer *vbuf)
return (struct virtio_gpu_ctrl_hdr *)vbuf->buf;
 }
 
-static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_vbuffer **vbuffer_p,
- int size)
-{
-   struct virtio_gpu_vbuffer *vbuf;
-
-   vbuf = virtio_gpu_get_vbuf(vgdev, size,
-  sizeof(struct virtio_gpu_ctrl_hdr),
-  NULL, NULL);
-   if (IS_ERR(vbuf)) {
-   *vbuffer_p = NULL;
-   return ERR_CAST(vbuf);
-   }
-   *vbuffer_p = vbuf;
-   return vbuf->buf;
-}
-
 static struct virtio_gpu_update_cursor*
 virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer **vbuffer_p)
@@ -172,6 +155,15 @@ static void *virtio_gpu_alloc_cmd_resp(struct 
virtio_gpu_device *vgdev,
return (struct virtio_gpu_command *)vbuf->buf;
 }
 
+static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer **vbuffer_p,
+ int size)
+{
+   return virtio_gpu_alloc_cmd_resp(vgdev, NULL, vbuffer_p, size,
+sizeof(struct virtio_gpu_ctrl_hdr),
+NULL);
+}
+
 static void free_vbuf(struct virtio_gpu_device *vgdev,
  struct virtio_gpu_vbuffer *vbuf)
 {
-- 
2.18.1

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[PATCH v2 4/4] drm/virtio: move virtio_gpu_mem_entry initialization to new function

2020-02-06 Thread Gerd Hoffmann
Introduce new virtio_gpu_object_shmem_init() helper function which will
create the virtio_gpu_mem_entry array, containing the backing storage
information for the host.  For the most path this just moves code from
virtio_gpu_object_attach().

Signed-off-by: Gerd Hoffmann 
---
 drivers/gpu/drm/virtio/virtgpu_drv.h|  4 +-
 drivers/gpu/drm/virtio/virtgpu_object.c | 55 -
 drivers/gpu/drm/virtio/virtgpu_vq.c | 51 ++-
 3 files changed, 60 insertions(+), 50 deletions(-)

diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h 
b/drivers/gpu/drm/virtio/virtgpu_drv.h
index d37ddd7644f6..6c78c77a2afc 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -71,6 +71,7 @@ struct virtio_gpu_object {
 
struct sg_table *pages;
uint32_t mapped;
+
bool dumb;
bool created;
 };
@@ -280,7 +281,8 @@ void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device 
*vgdev,
uint32_t x, uint32_t y);
 int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
 struct virtio_gpu_object *obj,
-struct virtio_gpu_fence *fence);
+struct virtio_gpu_mem_entry *ents,
+unsigned int nents);
 int virtio_gpu_attach_status_page(struct virtio_gpu_device *vgdev);
 int virtio_gpu_detach_status_page(struct virtio_gpu_device *vgdev);
 void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c 
b/drivers/gpu/drm/virtio/virtgpu_object.c
index bce2b3d843fe..8870ee23ff2b 100644
--- a/drivers/gpu/drm/virtio/virtgpu_object.c
+++ b/drivers/gpu/drm/virtio/virtgpu_object.c
@@ -121,6 +121,51 @@ struct drm_gem_object *virtio_gpu_create_object(struct 
drm_device *dev,
return >base.base;
 }
 
+static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev,
+   struct virtio_gpu_object *bo,
+   struct virtio_gpu_mem_entry **ents,
+   unsigned int *nents)
+{
+   bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
+   struct scatterlist *sg;
+   int si, ret;
+
+   ret = drm_gem_shmem_pin(>base.base);
+   if (ret < 0)
+   return -EINVAL;
+
+   bo->pages = drm_gem_shmem_get_sg_table(>base.base);
+   if (!bo->pages) {
+   drm_gem_shmem_unpin(>base.base);
+   return -EINVAL;
+   }
+
+   if (use_dma_api) {
+   bo->mapped = dma_map_sg(vgdev->vdev->dev.parent,
+   bo->pages->sgl, bo->pages->nents,
+   DMA_TO_DEVICE);
+   *nents = bo->mapped;
+   } else {
+   *nents = bo->pages->nents;
+   }
+
+   *ents = kmalloc_array(*nents, sizeof(struct virtio_gpu_mem_entry),
+ GFP_KERNEL);
+   if (!(*ents)) {
+   DRM_ERROR("failed to allocate ent list\n");
+   return -ENOMEM;
+   }
+
+   for_each_sg(bo->pages->sgl, sg, *nents, si) {
+   (*ents)[si].addr = cpu_to_le64(use_dma_api
+  ? sg_dma_address(sg)
+  : sg_phys(sg));
+   (*ents)[si].length = cpu_to_le32(sg->length);
+   (*ents)[si].padding = 0;
+   }
+   return 0;
+}
+
 int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
 struct virtio_gpu_object_params *params,
 struct virtio_gpu_object **bo_ptr,
@@ -129,6 +174,8 @@ int virtio_gpu_object_create(struct virtio_gpu_device 
*vgdev,
struct virtio_gpu_object_array *objs = NULL;
struct drm_gem_shmem_object *shmem_obj;
struct virtio_gpu_object *bo;
+   struct virtio_gpu_mem_entry *ents;
+   unsigned int nents;
int ret;
 
*bo_ptr = NULL;
@@ -165,7 +212,13 @@ int virtio_gpu_object_create(struct virtio_gpu_device 
*vgdev,
   objs, fence);
}
 
-   ret = virtio_gpu_object_attach(vgdev, bo, NULL);
+   ret = virtio_gpu_object_shmem_init(vgdev, bo, , );
+   if (ret != 0) {
+   virtio_gpu_free_object(_obj->base);
+   return ret;
+   }
+
+   ret = virtio_gpu_object_attach(vgdev, bo, ents, nents);
if (ret != 0) {
virtio_gpu_free_object(_obj->base);
return ret;
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c 
b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 87c439156151..8360f7338209 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -1086,56 +1086,11 @@ void virtio_gpu_cmd_submit(struct virtio_gpu_device 
*vgdev,
 
 int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,

[PATCH v2 3/4] drm/virtio: move mapping teardown to virtio_gpu_cleanup_object()

2020-02-06 Thread Gerd Hoffmann
Stop sending DETACH_BACKING commands, that will happening anyway when
releasing resources via UNREF.  Handle guest-side cleanup in
virtio_gpu_cleanup_object(), called when the host finished processing
the UNREF command.

Signed-off-by: Gerd Hoffmann 
---
 drivers/gpu/drm/virtio/virtgpu_drv.h|  2 --
 drivers/gpu/drm/virtio/virtgpu_object.c | 14 ++--
 drivers/gpu/drm/virtio/virtgpu_vq.c | 46 -
 3 files changed, 12 insertions(+), 50 deletions(-)

diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h 
b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 1bc13f6b161b..d37ddd7644f6 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -281,8 +281,6 @@ void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device 
*vgdev,
 int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
 struct virtio_gpu_object *obj,
 struct virtio_gpu_fence *fence);
-void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_object *obj);
 int virtio_gpu_attach_status_page(struct virtio_gpu_device *vgdev);
 int virtio_gpu_detach_status_page(struct virtio_gpu_device *vgdev);
 void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c 
b/drivers/gpu/drm/virtio/virtgpu_object.c
index 28a161af7503..bce2b3d843fe 100644
--- a/drivers/gpu/drm/virtio/virtgpu_object.c
+++ b/drivers/gpu/drm/virtio/virtgpu_object.c
@@ -23,6 +23,7 @@
  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  */
 
+#include 
 #include 
 
 #include "virtgpu_drv.h"
@@ -65,6 +66,17 @@ void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo)
 {
struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
 
+   if (bo->pages) {
+   if (bo->mapped) {
+   dma_unmap_sg(vgdev->vdev->dev.parent,
+bo->pages->sgl, bo->mapped,
+DMA_TO_DEVICE);
+   bo->mapped = 0;
+   }
+   sg_free_table(bo->pages);
+   bo->pages = NULL;
+   drm_gem_shmem_unpin(>base.base);
+   }
virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
drm_gem_shmem_free_object(>base.base);
 }
@@ -74,8 +86,6 @@ static void virtio_gpu_free_object(struct drm_gem_object *obj)
struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
 
-   if (bo->pages)
-   virtio_gpu_object_detach(vgdev, bo);
if (bo->created) {
virtio_gpu_cmd_unref_resource(vgdev, bo);
/* completion handler calls virtio_gpu_cleanup_object() */
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c 
b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 4e22c3914f94..87c439156151 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -545,22 +545,6 @@ void virtio_gpu_cmd_unref_resource(struct 
virtio_gpu_device *vgdev,
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 }
 
-static void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device 
*vgdev,
- uint32_t resource_id,
- struct virtio_gpu_fence 
*fence)
-{
-   struct virtio_gpu_resource_detach_backing *cmd_p;
-   struct virtio_gpu_vbuffer *vbuf;
-
-   cmd_p = virtio_gpu_alloc_cmd(vgdev, , sizeof(*cmd_p));
-   memset(cmd_p, 0, sizeof(*cmd_p));
-
-   cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING);
-   cmd_p->resource_id = cpu_to_le32(resource_id);
-
-   virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
-}
-
 void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
uint32_t scanout_id, uint32_t resource_id,
uint32_t width, uint32_t height,
@@ -1155,36 +1139,6 @@ int virtio_gpu_object_attach(struct virtio_gpu_device 
*vgdev,
return 0;
 }
 
-void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_object *obj)
-{
-   bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
-
-   if (WARN_ON_ONCE(!obj->pages))
-   return;
-
-   if (use_dma_api && obj->mapped) {
-   struct virtio_gpu_fence *fence = virtio_gpu_fence_alloc(vgdev);
-   /* detach backing and wait for the host process it ... */
-   virtio_gpu_cmd_resource_inval_backing(vgdev, 
obj->hw_res_handle, fence);
-   dma_fence_wait(>f, true);
-   dma_fence_put(>f);
-
-   /* ... then tear down iommu mappings */
-   dma_unmap_sg(vgdev->vdev->dev.parent,
-obj->pages->sgl, obj->mapped,
-

[PATCH v2 0/4] drm/virtio: rework backing storage handling

2020-02-06 Thread Gerd Hoffmann
Signed-off-by: Gerd Hoffmann 

Gerd Hoffmann (4):
  drm/virtio: simplify virtio_gpu_alloc_cmd
  drm/virtio: resource teardown tweaks
  drm/virtio: move mapping teardown to virtio_gpu_cleanup_object()
  drm/virtio: move virtio_gpu_mem_entry initialization to new function

 drivers/gpu/drm/virtio/virtgpu_drv.h|  10 +-
 drivers/gpu/drm/virtio/virtgpu_object.c |  88 --
 drivers/gpu/drm/virtio/virtgpu_vq.c | 152 ++--
 3 files changed, 124 insertions(+), 126 deletions(-)

-- 
2.18.1

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[PATCH v2 2/4] drm/virtio: resource teardown tweaks

2020-02-06 Thread Gerd Hoffmann
Add new virtio_gpu_cleanup_object() helper function for object cleanup.
Wire up callback function for resource unref, do cleanup from callback
when we know the host stopped using the resource.

Signed-off-by: Gerd Hoffmann 
---
 drivers/gpu/drm/virtio/virtgpu_drv.h|  4 +++-
 drivers/gpu/drm/virtio/virtgpu_object.c | 19 +++-
 drivers/gpu/drm/virtio/virtgpu_vq.c | 29 ++---
 3 files changed, 43 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h 
b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 7e69c06e168e..1bc13f6b161b 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -114,6 +114,7 @@ struct virtio_gpu_vbuffer {
char *resp_buf;
int resp_size;
virtio_gpu_resp_cb resp_cb;
+   void *resp_cb_data;
 
struct virtio_gpu_object_array *objs;
struct list_head list;
@@ -262,7 +263,7 @@ void virtio_gpu_cmd_create_resource(struct 
virtio_gpu_device *vgdev,
struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence);
 void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
-  uint32_t resource_id);
+  struct virtio_gpu_object *bo);
 void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
uint64_t offset,
uint32_t width, uint32_t height,
@@ -355,6 +356,7 @@ void virtio_gpu_fence_event_process(struct 
virtio_gpu_device *vdev,
u64 last_seq);
 
 /* virtio_gpu_object */
+void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo);
 struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev,
size_t size);
 int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c 
b/drivers/gpu/drm/virtio/virtgpu_object.c
index 017a9e0fc3bb..28a161af7503 100644
--- a/drivers/gpu/drm/virtio/virtgpu_object.c
+++ b/drivers/gpu/drm/virtio/virtgpu_object.c
@@ -61,6 +61,14 @@ static void virtio_gpu_resource_id_put(struct 
virtio_gpu_device *vgdev, uint32_t
}
 }
 
+void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo)
+{
+   struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
+
+   virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
+   drm_gem_shmem_free_object(>base.base);
+}
+
 static void virtio_gpu_free_object(struct drm_gem_object *obj)
 {
struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
@@ -68,11 +76,12 @@ static void virtio_gpu_free_object(struct drm_gem_object 
*obj)
 
if (bo->pages)
virtio_gpu_object_detach(vgdev, bo);
-   if (bo->created)
-   virtio_gpu_cmd_unref_resource(vgdev, bo->hw_res_handle);
-   virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
-
-   drm_gem_shmem_free_object(obj);
+   if (bo->created) {
+   virtio_gpu_cmd_unref_resource(vgdev, bo);
+   /* completion handler calls virtio_gpu_cleanup_object() */
+   return;
+   }
+   virtio_gpu_cleanup_object(bo);
 }
 
 static const struct drm_gem_object_funcs virtio_gpu_gem_funcs = {
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c 
b/drivers/gpu/drm/virtio/virtgpu_vq.c
index df499fb64ac7..4e22c3914f94 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -164,6 +164,16 @@ static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device 
*vgdev,
 NULL);
 }
 
+static void *virtio_gpu_alloc_cmd_cb(struct virtio_gpu_device *vgdev,
+struct virtio_gpu_vbuffer **vbuffer_p,
+int size,
+virtio_gpu_resp_cb cb)
+{
+   return virtio_gpu_alloc_cmd_resp(vgdev, cb, vbuffer_p, size,
+sizeof(struct virtio_gpu_ctrl_hdr),
+NULL);
+}
+
 static void free_vbuf(struct virtio_gpu_device *vgdev,
  struct virtio_gpu_vbuffer *vbuf)
 {
@@ -507,18 +517,31 @@ void virtio_gpu_cmd_create_resource(struct 
virtio_gpu_device *vgdev,
bo->created = true;
 }
 
+static void virtio_gpu_cmd_unref_cb(struct virtio_gpu_device *vgdev,
+   struct virtio_gpu_vbuffer *vbuf)
+{
+   struct virtio_gpu_object *bo;
+
+   bo = vbuf->resp_cb_data;
+   vbuf->resp_cb_data = NULL;
+
+   virtio_gpu_cleanup_object(bo);
+}
+
 void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
-  uint32_t resource_id)
+  struct virtio_gpu_object *bo)
 {
struct virtio_gpu_resource_unref *cmd_p;
struct 

Re: [PATCH v4 0/7] Add dts for mt8183 GPU (and misc panfrost patches)

2020-02-06 Thread Nicolas Boichat
On Fri, Feb 7, 2020 at 2:18 PM Tomeu Vizoso  wrote:
>
> On 2/7/20 6:26 AM, Nicolas Boichat wrote:
> > Hi!
> >
> > Follow-up on the v3: https://patchwork.kernel.org/cover/11331343/.
> >
> > The main purpose of this series is to upstream the dts change and the
> > binding document, but I wanted to see how far I could probe the GPU, to
> > check that the binding is indeed correct. The rest of the patches are
> > RFC/work-in-progress, but I think some of them could already be picked up.
> >
> > So this is tested on MT8183 with a chromeos-4.19 kernel, and a ton of
> > backports to get the latest panfrost driver (I should probably try on
> > linux-next at some point but this was the path of least resistance).
> >
> > I tested it as a module as it's more challenging (originally probing would
> > work built-in, on boot, but not as a module, as I didn't have the power
> > domain changes, and all power domains are on by default during boot).
> >
> > Probing logs looks like this, currently. They look sane.
> > [  501.319728] panfrost 1304.gpu: clock rate = 51170
> > [  501.320041] panfrost 1304.gpu: Linked as a consumer to regulator.14
> > [  501.320102] panfrost 1304.gpu: Linked as a consumer to regulator.31
> > [  501.320651] panfrost 1304.gpu: Linked as a consumer to 
> > genpd:0:1304.gpu
> > [  501.320954] panfrost 1304.gpu: Linked as a consumer to 
> > genpd:1:1304.gpu
> > [  501.321062] panfrost 1304.gpu: Linked as a consumer to 
> > genpd:2:1304.gpu
> > [  501.321734] panfrost 1304.gpu: mali-g72 id 0x6221 major 0x0 minor 
> > 0x3 status 0x0
> > [  501.321741] panfrost 1304.gpu: features: ,13de77ff, issues: 
> > ,0400
> > [  501.321747] panfrost 1304.gpu: Features: L2:0x07120206 
> > Shader:0x Tiler:0x0809 Mem:0x1 MMU:0x2830 AS:0xff JS:0x7
> > [  501.321752] panfrost 1304.gpu: shader_present=0x7 l2_present=0x1
> > [  501.324951] [drm] Initialized panfrost 1.1.0 20180908 for 1304.gpu 
> > on minor 2
> >
> > Some more changes are still required to get devfreq working, and of course
> > I do not have a userspace driver to test this with.
>
> Have you tried the Panfrost tests in IGT? They are atm quite basic, but
> could be interesting to check that the different HW units are correctly
> powered on.

I haven't, you mean this right?
https://gitlab.freedesktop.org/tomeu/igt-gpu-tools/tree/panfrost

Any specific test you have in mind?

Thanks,

> Regards,
>
> Tomeu
>
> > I believe at least patches 1, 2, and 3 can be merged. 4 and 5 are mostly
> > useful in conjunction with 6 and 7 (which are not ready yet), so I'll let
> > maintainers decide.
> >
> > Thanks!
> >
> > Nicolas Boichat (7):
> >dt-bindings: gpu: mali-bifrost: Add Mediatek MT8183
> >arm64: dts: mt8183: Add node for the Mali GPU
> >drm/panfrost: Improve error reporting in panfrost_gpu_power_on
> >drm/panfrost: Add support for multiple regulators
> >drm/panfrost: Add support for multiple power domains
> >RFC: drm/panfrost: Add mt8183-mali compatible string
> >RFC: drm/panfrost: devfreq: Add support for 2 regulators
> >
> >   .../bindings/gpu/arm,mali-bifrost.yaml|  25 
> >   arch/arm64/boot/dts/mediatek/mt8183-evb.dts   |   7 +
> >   arch/arm64/boot/dts/mediatek/mt8183.dtsi  | 105 +++
> >   drivers/gpu/drm/panfrost/panfrost_devfreq.c   |  17 +++
> >   drivers/gpu/drm/panfrost/panfrost_device.c| 123 +++---
> >   drivers/gpu/drm/panfrost/panfrost_device.h|  27 +++-
> >   drivers/gpu/drm/panfrost/panfrost_drv.c   |  41 --
> >   drivers/gpu/drm/panfrost/panfrost_gpu.c   |  11 +-
> >   8 files changed, 326 insertions(+), 30 deletions(-)
> >
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH v2] drm/virtio: fix ring free check

2020-02-06 Thread Chia-I Wu
On Thu, Feb 6, 2020 at 10:47 PM Gerd Hoffmann  wrote:
>
> If the virtio device supports indirect ring descriptors we need only one
> ring entry for the whole command.  Take that into account when checking
> whenever the virtqueue has enough free entries for our command.
>
> Signed-off-by: Gerd Hoffmann 
> ---
>  drivers/gpu/drm/virtio/virtgpu_drv.h | 1 +
>  drivers/gpu/drm/virtio/virtgpu_debugfs.c | 1 +
>  drivers/gpu/drm/virtio/virtgpu_kms.c | 3 +++
>  drivers/gpu/drm/virtio/virtgpu_vq.c  | 3 +++
>  4 files changed, 8 insertions(+)
>
> diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h 
> b/drivers/gpu/drm/virtio/virtgpu_drv.h
> index 7e69c06e168e..d278c8c50f39 100644
> --- a/drivers/gpu/drm/virtio/virtgpu_drv.h
> +++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
> @@ -193,6 +193,7 @@ struct virtio_gpu_device {
>
> bool has_virgl_3d;
> bool has_edid;
> +   bool has_indirect;
has_indirect_desc?  Either way,

Reviewed-by: Chia-I Wu 
>
> struct work_struct config_changed_work;
>
> diff --git a/drivers/gpu/drm/virtio/virtgpu_debugfs.c 
> b/drivers/gpu/drm/virtio/virtgpu_debugfs.c
> index 5156e6b279db..e27120d512b0 100644
> --- a/drivers/gpu/drm/virtio/virtgpu_debugfs.c
> +++ b/drivers/gpu/drm/virtio/virtgpu_debugfs.c
> @@ -47,6 +47,7 @@ static int virtio_gpu_features(struct seq_file *m, void 
> *data)
>
> virtio_add_bool(m, "virgl", vgdev->has_virgl_3d);
> virtio_add_bool(m, "edid", vgdev->has_edid);
> +   virtio_add_bool(m, "indirect", vgdev->has_indirect);
> virtio_add_int(m, "cap sets", vgdev->num_capsets);
> virtio_add_int(m, "scanouts", vgdev->num_scanouts);
> return 0;
> diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c 
> b/drivers/gpu/drm/virtio/virtgpu_kms.c
> index 2f5773e43557..c1086df49816 100644
> --- a/drivers/gpu/drm/virtio/virtgpu_kms.c
> +++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
> @@ -159,6 +159,9 @@ int virtio_gpu_init(struct drm_device *dev)
> if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_EDID)) {
> vgdev->has_edid = true;
> }
> +   if (virtio_has_feature(vgdev->vdev, VIRTIO_RING_F_INDIRECT_DESC)) {
> +   vgdev->has_indirect = true;
> +   }
>
> DRM_INFO("features: %cvirgl %cedid\n",
>  vgdev->has_virgl_3d ? '+' : '-',
> diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c 
> b/drivers/gpu/drm/virtio/virtgpu_vq.c
> index 41e475fbd67b..cc02fc4bab2a 100644
> --- a/drivers/gpu/drm/virtio/virtgpu_vq.c
> +++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
> @@ -330,6 +330,9 @@ static void virtio_gpu_queue_ctrl_sgs(struct 
> virtio_gpu_device *vgdev,
> bool notify = false;
> int ret;
>
> +   if (vgdev->has_indirect)
> +   elemcnt = 1;
> +
>  again:
> spin_lock(>ctrlq.qlock);
>
> --
> 2.18.1
>
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[PATCH v2] drm/virtio: fix ring free check

2020-02-06 Thread Gerd Hoffmann
If the virtio device supports indirect ring descriptors we need only one
ring entry for the whole command.  Take that into account when checking
whenever the virtqueue has enough free entries for our command.

Signed-off-by: Gerd Hoffmann 
---
 drivers/gpu/drm/virtio/virtgpu_drv.h | 1 +
 drivers/gpu/drm/virtio/virtgpu_debugfs.c | 1 +
 drivers/gpu/drm/virtio/virtgpu_kms.c | 3 +++
 drivers/gpu/drm/virtio/virtgpu_vq.c  | 3 +++
 4 files changed, 8 insertions(+)

diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h 
b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 7e69c06e168e..d278c8c50f39 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -193,6 +193,7 @@ struct virtio_gpu_device {
 
bool has_virgl_3d;
bool has_edid;
+   bool has_indirect;
 
struct work_struct config_changed_work;
 
diff --git a/drivers/gpu/drm/virtio/virtgpu_debugfs.c 
b/drivers/gpu/drm/virtio/virtgpu_debugfs.c
index 5156e6b279db..e27120d512b0 100644
--- a/drivers/gpu/drm/virtio/virtgpu_debugfs.c
+++ b/drivers/gpu/drm/virtio/virtgpu_debugfs.c
@@ -47,6 +47,7 @@ static int virtio_gpu_features(struct seq_file *m, void *data)
 
virtio_add_bool(m, "virgl", vgdev->has_virgl_3d);
virtio_add_bool(m, "edid", vgdev->has_edid);
+   virtio_add_bool(m, "indirect", vgdev->has_indirect);
virtio_add_int(m, "cap sets", vgdev->num_capsets);
virtio_add_int(m, "scanouts", vgdev->num_scanouts);
return 0;
diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c 
b/drivers/gpu/drm/virtio/virtgpu_kms.c
index 2f5773e43557..c1086df49816 100644
--- a/drivers/gpu/drm/virtio/virtgpu_kms.c
+++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
@@ -159,6 +159,9 @@ int virtio_gpu_init(struct drm_device *dev)
if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_EDID)) {
vgdev->has_edid = true;
}
+   if (virtio_has_feature(vgdev->vdev, VIRTIO_RING_F_INDIRECT_DESC)) {
+   vgdev->has_indirect = true;
+   }
 
DRM_INFO("features: %cvirgl %cedid\n",
 vgdev->has_virgl_3d ? '+' : '-',
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c 
b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 41e475fbd67b..cc02fc4bab2a 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -330,6 +330,9 @@ static void virtio_gpu_queue_ctrl_sgs(struct 
virtio_gpu_device *vgdev,
bool notify = false;
int ret;
 
+   if (vgdev->has_indirect)
+   elemcnt = 1;
+
 again:
spin_lock(>ctrlq.qlock);
 
-- 
2.18.1

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH] drm/virtio: fix ring free check

2020-02-06 Thread Gerd Hoffmann
  Hi,

> > +   indirect = virtio_has_feature(vgdev->vdev, 
> > VIRTIO_RING_F_INDIRECT_DESC);
> > +   vqcnt = indirect ? 1 : elemcnt;
> Is the feature dynamic and require the lock held?  If not, the result
> can be cached and the fixup can happen before grabbing the lock

Not dynamic, so yes, caching makes sense.

cheers,
  Gerd

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH v4 0/7] Add dts for mt8183 GPU (and misc panfrost patches)

2020-02-06 Thread Tomeu Vizoso

On 2/7/20 6:26 AM, Nicolas Boichat wrote:

Hi!

Follow-up on the v3: https://patchwork.kernel.org/cover/11331343/.

The main purpose of this series is to upstream the dts change and the
binding document, but I wanted to see how far I could probe the GPU, to
check that the binding is indeed correct. The rest of the patches are
RFC/work-in-progress, but I think some of them could already be picked up.

So this is tested on MT8183 with a chromeos-4.19 kernel, and a ton of
backports to get the latest panfrost driver (I should probably try on
linux-next at some point but this was the path of least resistance).

I tested it as a module as it's more challenging (originally probing would
work built-in, on boot, but not as a module, as I didn't have the power
domain changes, and all power domains are on by default during boot).

Probing logs looks like this, currently. They look sane.
[  501.319728] panfrost 1304.gpu: clock rate = 51170
[  501.320041] panfrost 1304.gpu: Linked as a consumer to regulator.14
[  501.320102] panfrost 1304.gpu: Linked as a consumer to regulator.31
[  501.320651] panfrost 1304.gpu: Linked as a consumer to 
genpd:0:1304.gpu
[  501.320954] panfrost 1304.gpu: Linked as a consumer to 
genpd:1:1304.gpu
[  501.321062] panfrost 1304.gpu: Linked as a consumer to 
genpd:2:1304.gpu
[  501.321734] panfrost 1304.gpu: mali-g72 id 0x6221 major 0x0 minor 0x3 
status 0x0
[  501.321741] panfrost 1304.gpu: features: ,13de77ff, issues: 
,0400
[  501.321747] panfrost 1304.gpu: Features: L2:0x07120206 Shader:0x 
Tiler:0x0809 Mem:0x1 MMU:0x2830 AS:0xff JS:0x7
[  501.321752] panfrost 1304.gpu: shader_present=0x7 l2_present=0x1
[  501.324951] [drm] Initialized panfrost 1.1.0 20180908 for 1304.gpu on 
minor 2

Some more changes are still required to get devfreq working, and of course
I do not have a userspace driver to test this with.


Have you tried the Panfrost tests in IGT? They are atm quite basic, but 
could be interesting to check that the different HW units are correctly 
powered on.


Regards,

Tomeu


I believe at least patches 1, 2, and 3 can be merged. 4 and 5 are mostly
useful in conjunction with 6 and 7 (which are not ready yet), so I'll let
maintainers decide.

Thanks!

Nicolas Boichat (7):
   dt-bindings: gpu: mali-bifrost: Add Mediatek MT8183
   arm64: dts: mt8183: Add node for the Mali GPU
   drm/panfrost: Improve error reporting in panfrost_gpu_power_on
   drm/panfrost: Add support for multiple regulators
   drm/panfrost: Add support for multiple power domains
   RFC: drm/panfrost: Add mt8183-mali compatible string
   RFC: drm/panfrost: devfreq: Add support for 2 regulators

  .../bindings/gpu/arm,mali-bifrost.yaml|  25 
  arch/arm64/boot/dts/mediatek/mt8183-evb.dts   |   7 +
  arch/arm64/boot/dts/mediatek/mt8183.dtsi  | 105 +++
  drivers/gpu/drm/panfrost/panfrost_devfreq.c   |  17 +++
  drivers/gpu/drm/panfrost/panfrost_device.c| 123 +++---
  drivers/gpu/drm/panfrost/panfrost_device.h|  27 +++-
  drivers/gpu/drm/panfrost/panfrost_drv.c   |  41 --
  drivers/gpu/drm/panfrost/panfrost_gpu.c   |  11 +-
  8 files changed, 326 insertions(+), 30 deletions(-)


___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[PATCH v4 1/7] dt-bindings: gpu: mali-bifrost: Add Mediatek MT8183

2020-02-06 Thread Nicolas Boichat
Define a compatible string for the Mali Bifrost GPU found in
Mediatek's MT8183 SoCs.

Signed-off-by: Nicolas Boichat 
Reviewed-by: Alyssa Rosenzweig 
---

v4:
 - Add power-domain-names description
   (kept Alyssa's reviewed-by as the change is minor)
v3:
 - No change

 .../bindings/gpu/arm,mali-bifrost.yaml| 25 +++
 1 file changed, 25 insertions(+)

diff --git a/Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml 
b/Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml
index 4ea6a8789699709..0d93b3981445977 100644
--- a/Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml
+++ b/Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml
@@ -17,6 +17,7 @@ properties:
 items:
   - enum:
   - amlogic,meson-g12a-mali
+  - mediatek,mt8183-mali
   - realtek,rtd1619-mali
   - rockchip,px30-mali
   - const: arm,mali-bifrost # Mali Bifrost GPU model/revision is fully 
discoverable
@@ -62,6 +63,30 @@ allOf:
   minItems: 2
   required:
 - resets
+  - if:
+  properties:
+compatible:
+  contains:
+const: mediatek,mt8183-mali
+then:
+  properties:
+sram-supply: true
+power-domains:
+  description:
+List of phandle and PM domain specifier as documented in
+Documentation/devicetree/bindings/power/power_domain.txt
+  minItems: 3
+  maxItems: 3
+power-domain-names:
+  items:
+- const: core0
+- const: core1
+- const: 2d
+
+  required:
+- sram-supply
+- power-domains
+- power-domains-names
 
 examples:
   - |
-- 
2.25.0.341.g760bfbb309-goog

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[PATCH v4 4/7] drm/panfrost: Add support for multiple regulators

2020-02-06 Thread Nicolas Boichat
Some GPUs, namely, the bifrost/g72 part on MT8183, have a second
regulator for their SRAM, let's add support for that.

We extend the framework in a generic manner so that we could
support more than 2 regulators, if required.

Signed-off-by: Nicolas Boichat 

---

v4:
 - nits: Run through latest version of checkpatch:
   - Use WARN instead of BUG_ON.
   - Drop braces in single expression for loop.
   - *comp not * comp
v3:
 - Make this more generic, by allowing any number of regulators
   (in practice we fix the maximum number of regulators to 2, but
   this could be increased easily).
 - We only probe the second regulator if the device tree matching
   data asks for it.
 - I couldn't find a way to detect the number of regulators in the
   device tree, if we wanted to refuse to probe the device if there
   are too many regulators, which might be required for safety, see
   the thread on v2 [1].
 - The discussion also included the idea of a separate device tree
   entry for a "soft PDC", or at least a separate driver. I'm not
   sure to understand the full picture, and how different vendors
   implement this, so I'm still integrating everything in the main
   driver. I'd be happy to try to make mt8183 fit into such a
   framework after it's created, but I don't think I'm best placed
   to implement (and again, the main purpose of this was to test
   if the binding is correct).

[1] https://patchwork.kernel.org/patch/11322839/

 drivers/gpu/drm/panfrost/panfrost_device.c | 26 +---
 drivers/gpu/drm/panfrost/panfrost_device.h | 15 +++-
 drivers/gpu/drm/panfrost/panfrost_drv.c| 28 +++---
 3 files changed, 51 insertions(+), 18 deletions(-)

diff --git a/drivers/gpu/drm/panfrost/panfrost_device.c 
b/drivers/gpu/drm/panfrost/panfrost_device.c
index 238fb6d54df4732..3720d50f6d9f965 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.c
+++ b/drivers/gpu/drm/panfrost/panfrost_device.c
@@ -87,18 +87,27 @@ static void panfrost_clk_fini(struct panfrost_device *pfdev)
 
 static int panfrost_regulator_init(struct panfrost_device *pfdev)
 {
-   int ret;
+   int ret, i;
 
-   pfdev->regulator = devm_regulator_get(pfdev->dev, "mali");
-   if (IS_ERR(pfdev->regulator)) {
-   ret = PTR_ERR(pfdev->regulator);
-   dev_err(pfdev->dev, "failed to get regulator: %d\n", ret);
+   if (WARN(pfdev->comp->num_supplies > ARRAY_SIZE(pfdev->regulators),
+   "Too many supplies in compatible structure.\n"))
+   return -EINVAL;
+
+   for (i = 0; i < pfdev->comp->num_supplies; i++)
+   pfdev->regulators[i].supply = pfdev->comp->supply_names[i];
+
+   ret = devm_regulator_bulk_get(pfdev->dev,
+ pfdev->comp->num_supplies,
+ pfdev->regulators);
+   if (ret < 0) {
+   dev_err(pfdev->dev, "failed to get regulators: %d\n", ret);
return ret;
}
 
-   ret = regulator_enable(pfdev->regulator);
+   ret = regulator_bulk_enable(pfdev->comp->num_supplies,
+   pfdev->regulators);
if (ret < 0) {
-   dev_err(pfdev->dev, "failed to enable regulator: %d\n", ret);
+   dev_err(pfdev->dev, "failed to enable regulators: %d\n", ret);
return ret;
}
 
@@ -107,7 +116,8 @@ static int panfrost_regulator_init(struct panfrost_device 
*pfdev)
 
 static void panfrost_regulator_fini(struct panfrost_device *pfdev)
 {
-   regulator_disable(pfdev->regulator);
+   regulator_bulk_disable(pfdev->comp->num_supplies,
+   pfdev->regulators);
 }
 
 int panfrost_device_init(struct panfrost_device *pfdev)
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h 
b/drivers/gpu/drm/panfrost/panfrost_device.h
index 06713811b92cdf7..c9468bc5573ac9d 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.h
+++ b/drivers/gpu/drm/panfrost/panfrost_device.h
@@ -7,6 +7,7 @@
 
 #include 
 #include 
+#include 
 #include 
 #include 
 #include 
@@ -19,6 +20,7 @@ struct panfrost_job;
 struct panfrost_perfcnt;
 
 #define NUM_JOB_SLOTS 3
+#define MAX_REGULATORS 2
 
 struct panfrost_features {
u16 id;
@@ -51,6 +53,16 @@ struct panfrost_features {
unsigned long hw_issues[64 / BITS_PER_LONG];
 };
 
+/*
+ * Features that cannot be automatically detected and need matching using the
+ * compatible string, typically SoC-specific.
+ */
+struct panfrost_compatible {
+   /* Supplies count and names. */
+   int num_supplies;
+   const char * const *supply_names;
+};
+
 struct panfrost_device {
struct device *dev;
struct drm_device *ddev;
@@ -59,10 +71,11 @@ struct panfrost_device {
void __iomem *iomem;
struct clk *clock;
struct clk *bus_clock;
-   struct regulator *regulator;
+   struct regulator_bulk_data regulators[MAX_REGULATORS];
struct reset_control *rstc;
 
 

[PATCH v4 0/7] Add dts for mt8183 GPU (and misc panfrost patches)

2020-02-06 Thread Nicolas Boichat
Hi!

Follow-up on the v3: https://patchwork.kernel.org/cover/11331343/.

The main purpose of this series is to upstream the dts change and the
binding document, but I wanted to see how far I could probe the GPU, to
check that the binding is indeed correct. The rest of the patches are
RFC/work-in-progress, but I think some of them could already be picked up.

So this is tested on MT8183 with a chromeos-4.19 kernel, and a ton of
backports to get the latest panfrost driver (I should probably try on
linux-next at some point but this was the path of least resistance).

I tested it as a module as it's more challenging (originally probing would
work built-in, on boot, but not as a module, as I didn't have the power
domain changes, and all power domains are on by default during boot).

Probing logs looks like this, currently. They look sane.
[  501.319728] panfrost 1304.gpu: clock rate = 51170
[  501.320041] panfrost 1304.gpu: Linked as a consumer to regulator.14
[  501.320102] panfrost 1304.gpu: Linked as a consumer to regulator.31
[  501.320651] panfrost 1304.gpu: Linked as a consumer to 
genpd:0:1304.gpu
[  501.320954] panfrost 1304.gpu: Linked as a consumer to 
genpd:1:1304.gpu
[  501.321062] panfrost 1304.gpu: Linked as a consumer to 
genpd:2:1304.gpu
[  501.321734] panfrost 1304.gpu: mali-g72 id 0x6221 major 0x0 minor 0x3 
status 0x0
[  501.321741] panfrost 1304.gpu: features: ,13de77ff, issues: 
,0400
[  501.321747] panfrost 1304.gpu: Features: L2:0x07120206 Shader:0x 
Tiler:0x0809 Mem:0x1 MMU:0x2830 AS:0xff JS:0x7
[  501.321752] panfrost 1304.gpu: shader_present=0x7 l2_present=0x1
[  501.324951] [drm] Initialized panfrost 1.1.0 20180908 for 1304.gpu on 
minor 2

Some more changes are still required to get devfreq working, and of course
I do not have a userspace driver to test this with.

I believe at least patches 1, 2, and 3 can be merged. 4 and 5 are mostly
useful in conjunction with 6 and 7 (which are not ready yet), so I'll let
maintainers decide.

Thanks!

Nicolas Boichat (7):
  dt-bindings: gpu: mali-bifrost: Add Mediatek MT8183
  arm64: dts: mt8183: Add node for the Mali GPU
  drm/panfrost: Improve error reporting in panfrost_gpu_power_on
  drm/panfrost: Add support for multiple regulators
  drm/panfrost: Add support for multiple power domains
  RFC: drm/panfrost: Add mt8183-mali compatible string
  RFC: drm/panfrost: devfreq: Add support for 2 regulators

 .../bindings/gpu/arm,mali-bifrost.yaml|  25 
 arch/arm64/boot/dts/mediatek/mt8183-evb.dts   |   7 +
 arch/arm64/boot/dts/mediatek/mt8183.dtsi  | 105 +++
 drivers/gpu/drm/panfrost/panfrost_devfreq.c   |  17 +++
 drivers/gpu/drm/panfrost/panfrost_device.c| 123 +++---
 drivers/gpu/drm/panfrost/panfrost_device.h|  27 +++-
 drivers/gpu/drm/panfrost/panfrost_drv.c   |  41 --
 drivers/gpu/drm/panfrost/panfrost_gpu.c   |  11 +-
 8 files changed, 326 insertions(+), 30 deletions(-)

-- 
2.25.0.341.g760bfbb309-goog

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[PATCH v4 2/7] arm64: dts: mt8183: Add node for the Mali GPU

2020-02-06 Thread Nicolas Boichat
Add a basic GPU node for mt8183.

Signed-off-by: Nicolas Boichat 
Reviewed-by: Alyssa Rosenzweig 
---
Upstreaming what matches existing bindings from our Chromium OS tree:
https://chromium.googlesource.com/chromiumos/third_party/kernel/+/chromeos-4.19/arch/arm64/boot/dts/mediatek/mt8183.dtsi#1348

The evb part of this change depends on this patch to add PMIC dtsi:
https://patchwork.kernel.org/patch/10928161/

The binding we use with out-of-tree Mali drivers includes more
clocks, this is used for devfreq: the out-of-tree driver switches
clk_mux to clk_sub_parent (26Mhz), adjusts clk_main_parent, then
switches clk_mux back to clk_main_parent:
(see 
https://chromium.googlesource.com/chromiumos/third_party/kernel/+/chromeos-4.19/drivers/gpu/arm/midgard/platform/mediatek/mali_kbase_runtime_pm.c#423)
clocks =
< CLK_TOP_MFGPLL_CK>,
< CLK_TOP_MUX_MFG>,
<>,
< CLK_MFG_BG3D>;
clock-names =
"clk_main_parent",
"clk_mux",
"clk_sub_parent",
"subsys_mfg_cg";

v4:
 - Add power-domain-names to describe the 3 domains.
   (kept Alyssa's reviewed-by as the change is minor)

v3:
 - No changes

v2:
 - Use sram instead of mali_sram as SRAM supply name.
 - Rename mali@ to gpu@.

 arch/arm64/boot/dts/mediatek/mt8183-evb.dts |   7 ++
 arch/arm64/boot/dts/mediatek/mt8183.dtsi| 105 
 2 files changed, 112 insertions(+)

diff --git a/arch/arm64/boot/dts/mediatek/mt8183-evb.dts 
b/arch/arm64/boot/dts/mediatek/mt8183-evb.dts
index 1fb195c683c3d01..7d609e0cd9b4975 100644
--- a/arch/arm64/boot/dts/mediatek/mt8183-evb.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8183-evb.dts
@@ -7,6 +7,7 @@
 
 /dts-v1/;
 #include "mt8183.dtsi"
+#include "mt6358.dtsi"
 
 / {
model = "MediaTek MT8183 evaluation board";
@@ -30,6 +31,12 @@  {
status = "okay";
 };
 
+ {
+   supply-names = "mali", "sram";
+   mali-supply = <_vgpu_reg>;
+   sram-supply = <_vsram_gpu_reg>;
+};
+
  {
pinctrl-names = "default";
pinctrl-0 = <_pins_0>;
diff --git a/arch/arm64/boot/dts/mediatek/mt8183.dtsi 
b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
index 124f9d3e09f532c..74b5305f663f740 100644
--- a/arch/arm64/boot/dts/mediatek/mt8183.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
@@ -599,6 +599,111 @@ mfgcfg: syscon@1300 {
#clock-cells = <1>;
};
 
+   gpu: gpu@1304 {
+   compatible = "mediatek,mt8183-mali", "arm,mali-bifrost";
+   reg = <0 0x1304 0 0x4000>;
+   interrupts =
+   ,
+   ,
+   ;
+   interrupt-names = "job", "mmu", "gpu";
+
+   clocks = < CLK_TOP_MFGPLL_CK>;
+
+   power-domains =
+   < MT8183_POWER_DOMAIN_MFG_CORE0>,
+   < MT8183_POWER_DOMAIN_MFG_CORE1>,
+   < MT8183_POWER_DOMAIN_MFG_2D>;
+   power-domain-names = "core0", "core1", "2d";
+
+   operating-points-v2 = <_opp_table>;
+   };
+
+   gpu_opp_table: opp_table0 {
+   compatible = "operating-points-v2";
+   opp-shared;
+
+   opp-3 {
+   opp-hz = /bits/ 64 <3>;
+   opp-microvolt = <625000>, <85>;
+   };
+
+   opp-32000 {
+   opp-hz = /bits/ 64 <32000>;
+   opp-microvolt = <631250>, <85>;
+   };
+
+   opp-34000 {
+   opp-hz = /bits/ 64 <34000>;
+   opp-microvolt = <637500>, <85>;
+   };
+
+   opp-36000 {
+   opp-hz = /bits/ 64 <36000>;
+   opp-microvolt = <643750>, <85>;
+   };
+
+   opp-38000 {
+   opp-hz = /bits/ 64 <38000>;
+   opp-microvolt = <65>, <85>;
+   };
+
+   opp-4 {
+   opp-hz = /bits/ 64 <4>;
+   opp-microvolt = <656250>, <85>;
+   };
+
+   opp-42000 {
+   opp-hz = /bits/ 64 <42000>;
+   opp-microvolt = <662500>, <85>;
+   };
+
+   opp-46000 {
+   opp-hz = /bits/ 64 <46000>;
+   opp-microvolt = <675000>, <85>;
+   };
+
+   opp-5 {
+

[PATCH v4 3/7] drm/panfrost: Improve error reporting in panfrost_gpu_power_on

2020-02-06 Thread Nicolas Boichat
It is useful to know which component cannot be powered on.

Signed-off-by: Nicolas Boichat 
Reviewed-by: Steven Price 
Reviewed-by: Alyssa Rosenzweig 
---

Was useful when trying to probe Bifrost GPU, to understand what
issue we are facing.

v4:
 - No change
v3:
 - Rebased on https://patchwork.kernel.org/patch/11325689/

 drivers/gpu/drm/panfrost/panfrost_gpu.c | 11 +++
 1 file changed, 7 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.c 
b/drivers/gpu/drm/panfrost/panfrost_gpu.c
index 460fc190de6e815..856f2fd1fa8ed27 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gpu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gpu.c
@@ -308,17 +308,20 @@ void panfrost_gpu_power_on(struct panfrost_device *pfdev)
gpu_write(pfdev, L2_PWRON_LO, pfdev->features.l2_present);
ret = readl_relaxed_poll_timeout(pfdev->iomem + L2_READY_LO,
val, val == pfdev->features.l2_present, 100, 1000);
+   if (ret)
+   dev_err(pfdev->dev, "error powering up gpu L2");
 
gpu_write(pfdev, SHADER_PWRON_LO, pfdev->features.shader_present);
-   ret |= readl_relaxed_poll_timeout(pfdev->iomem + SHADER_READY_LO,
+   ret = readl_relaxed_poll_timeout(pfdev->iomem + SHADER_READY_LO,
val, val == pfdev->features.shader_present, 100, 1000);
+   if (ret)
+   dev_err(pfdev->dev, "error powering up gpu shader");
 
gpu_write(pfdev, TILER_PWRON_LO, pfdev->features.tiler_present);
-   ret |= readl_relaxed_poll_timeout(pfdev->iomem + TILER_READY_LO,
+   ret = readl_relaxed_poll_timeout(pfdev->iomem + TILER_READY_LO,
val, val == pfdev->features.tiler_present, 100, 1000);
-
if (ret)
-   dev_err(pfdev->dev, "error powering up gpu");
+   dev_err(pfdev->dev, "error powering up gpu tiler");
 }
 
 void panfrost_gpu_power_off(struct panfrost_device *pfdev)
-- 
2.25.0.341.g760bfbb309-goog

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[PATCH v4 6/7] RFC: drm/panfrost: Add mt8183-mali compatible string

2020-02-06 Thread Nicolas Boichat
For testing only, the driver doesn't really work yet, AFAICT.

Signed-off-by: Nicolas Boichat 

---

v4:
 - Add power domain names.
v3:
 - Match mt8183-mali instead of bifrost, as we require special
   handling for the 2 regulators and 3 power domains.

 drivers/gpu/drm/panfrost/panfrost_drv.c | 11 +++
 1 file changed, 11 insertions(+)

diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c 
b/drivers/gpu/drm/panfrost/panfrost_drv.c
index a6e162236d67fdf..497c375932ad589 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -667,6 +667,15 @@ static const struct panfrost_compatible default_data = {
.pm_domain_names = NULL,
 };
 
+const char * const mediatek_mt8183_supplies[] = { "mali", "sram" };
+const char * const mediatek_mt8183_pm_domains[] = { "core0", "core1", "2d" };
+static const struct panfrost_compatible mediatek_mt8183_data = {
+   .num_supplies = ARRAY_SIZE(mediatek_mt8183_supplies),
+   .supply_names = mediatek_mt8183_supplies,
+   .num_pm_domains = 3,
+   .pm_domain_names = mediatek_mt8183_pm_domains,
+};
+
 static const struct of_device_id dt_match[] = {
{ .compatible = "arm,mali-t604", .data = _data, },
{ .compatible = "arm,mali-t624", .data = _data, },
@@ -677,6 +686,8 @@ static const struct of_device_id dt_match[] = {
{ .compatible = "arm,mali-t830", .data = _data, },
{ .compatible = "arm,mali-t860", .data = _data, },
{ .compatible = "arm,mali-t880", .data = _data, },
+   { .compatible = "mediatek,mt8183-mali",
+   .data = _mt8183_data },
{}
 };
 MODULE_DEVICE_TABLE(of, dt_match);
-- 
2.25.0.341.g760bfbb309-goog

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[PATCH v4 5/7] drm/panfrost: Add support for multiple power domains

2020-02-06 Thread Nicolas Boichat
When there is a single power domain per device, the core will
ensure the power domain is switched on (so it is technically
equivalent to having not power domain specified at all).

However, when there are multiple domains, as in MT8183 Bifrost
GPU, we need to handle them in driver code.

Signed-off-by: Nicolas Boichat 

---

The downstream driver we use on chromeos-4.19 currently uses 2
additional devices in device tree to accomodate for this [1], but
I believe this solution is cleaner.

[1] 
https://chromium.googlesource.com/chromiumos/third_party/kernel/+/refs/heads/chromeos-4.19/drivers/gpu/arm/midgard/platform/mediatek/mali_kbase_runtime_pm.c#31

v4:
 - Match the exact power domain names as specified in the compatible
   struct, instead of just matching the number of power domains.
   [Review: Ulf Hansson]
 - Dropped print and reordered function [Review: Steven Price]
 - nits: Run through latest version of checkpatch:
   - Use WARN instead of BUG_ON.
   - Drop braces for single expression if block.
v3:
 - Use the compatible matching data to specify the number of power
   domains. Note that setting 0 or 1 in num_pm_domains is equivalent
   as the core will handle these 2 cases in the exact same way
   (automatically, without driver intervention), and there should
   be no adverse consequence in this case (the concern is about
   switching on only some power domains and not others).

 drivers/gpu/drm/panfrost/panfrost_device.c | 97 --
 drivers/gpu/drm/panfrost/panfrost_device.h | 11 +++
 drivers/gpu/drm/panfrost/panfrost_drv.c|  2 +
 3 files changed, 102 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/panfrost/panfrost_device.c 
b/drivers/gpu/drm/panfrost/panfrost_device.c
index 3720d50f6d9f965..8136babd3ba9935 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.c
+++ b/drivers/gpu/drm/panfrost/panfrost_device.c
@@ -5,6 +5,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 
 #include "panfrost_device.h"
@@ -120,6 +121,79 @@ static void panfrost_regulator_fini(struct panfrost_device 
*pfdev)
pfdev->regulators);
 }
 
+static void panfrost_pm_domain_fini(struct panfrost_device *pfdev)
+{
+   int i;
+
+   for (i = 0; i < ARRAY_SIZE(pfdev->pm_domain_devs); i++) {
+   if (!pfdev->pm_domain_devs[i])
+   break;
+
+   if (pfdev->pm_domain_links[i])
+   device_link_del(pfdev->pm_domain_links[i]);
+
+   dev_pm_domain_detach(pfdev->pm_domain_devs[i], true);
+   }
+}
+
+static int panfrost_pm_domain_init(struct panfrost_device *pfdev)
+{
+   int err;
+   int i, num_domains;
+
+   num_domains = of_count_phandle_with_args(pfdev->dev->of_node,
+"power-domains",
+"#power-domain-cells");
+
+   /*
+* Single domain is handled by the core, and, if only a single power
+* the power domain is requested, the property is optional.
+*/
+   if (num_domains < 2 && pfdev->comp->num_pm_domains < 2)
+   return 0;
+
+   if (num_domains != pfdev->comp->num_pm_domains) {
+   dev_err(pfdev->dev,
+   "Incorrect number of power domains: %d provided, %d 
needed\n",
+   num_domains, pfdev->comp->num_pm_domains);
+   return -EINVAL;
+   }
+
+   if (WARN(num_domains > ARRAY_SIZE(pfdev->pm_domain_devs),
+   "Too many supplies in compatible structure.\n"))
+   return -EINVAL;
+
+   for (i = 0; i < num_domains; i++) {
+   pfdev->pm_domain_devs[i] =
+   dev_pm_domain_attach_by_name(pfdev->dev,
+   pfdev->comp->pm_domain_names[i]);
+   if (IS_ERR_OR_NULL(pfdev->pm_domain_devs[i])) {
+   err = PTR_ERR(pfdev->pm_domain_devs[i]) ? : -ENODATA;
+   pfdev->pm_domain_devs[i] = NULL;
+   dev_err(pfdev->dev,
+   "failed to get pm-domain %s(%d): %d\n",
+   pfdev->comp->pm_domain_names[i], i, err);
+   goto err;
+   }
+
+   pfdev->pm_domain_links[i] = device_link_add(pfdev->dev,
+   pfdev->pm_domain_devs[i], DL_FLAG_PM_RUNTIME |
+   DL_FLAG_STATELESS | DL_FLAG_RPM_ACTIVE);
+   if (!pfdev->pm_domain_links[i]) {
+   dev_err(pfdev->pm_domain_devs[i],
+   "adding device link failed!\n");
+   err = -ENODEV;
+   goto err;
+   }
+   }
+
+   return 0;
+
+err:
+   panfrost_pm_domain_fini(pfdev);
+   return err;
+}
+
 int panfrost_device_init(struct panfrost_device *pfdev)
 {
int err;
@@ -150,37 +224,43 @@ int 

[PATCH v4 7/7] RFC: drm/panfrost: devfreq: Add support for 2 regulators

2020-02-06 Thread Nicolas Boichat
The Bifrost GPU on MT8183 uses 2 regulators (core and SRAM) for
devfreq, and provides OPP table with 2 sets of voltages.

TODO: This is incomplete as we'll need add support for setting
a pair of voltages as well.

Signed-off-by: Nicolas Boichat 

---
 drivers/gpu/drm/panfrost/panfrost_devfreq.c | 17 +
 drivers/gpu/drm/panfrost/panfrost_device.h  |  1 +
 2 files changed, 18 insertions(+)

diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.c 
b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
index 413987038fbfccb..9c0987a3d71c597 100644
--- a/drivers/gpu/drm/panfrost/panfrost_devfreq.c
+++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
@@ -79,6 +79,21 @@ int panfrost_devfreq_init(struct panfrost_device *pfdev)
struct devfreq *devfreq;
struct thermal_cooling_device *cooling;
 
+   /* If we have 2 regulator, we need an OPP table with 2 voltages. */
+   if (pfdev->comp->num_supplies > 1) {
+   pfdev->devfreq.dev_opp_table =
+   dev_pm_opp_set_regulators(dev,
+   pfdev->comp->supply_names,
+   pfdev->comp->num_supplies);
+   if (IS_ERR(pfdev->devfreq.dev_opp_table)) {
+   ret = PTR_ERR(pfdev->devfreq.dev_opp_table);
+   pfdev->devfreq.dev_opp_table = NULL;
+   dev_err(dev,
+   "Failed to init devfreq opp table: %d\n", ret);
+   return ret;
+   }
+   }
+
ret = dev_pm_opp_of_add_table(dev);
if (ret == -ENODEV) /* Optional, continue without devfreq */
return 0;
@@ -119,6 +134,8 @@ void panfrost_devfreq_fini(struct panfrost_device *pfdev)
if (pfdev->devfreq.cooling)
devfreq_cooling_unregister(pfdev->devfreq.cooling);
dev_pm_opp_of_remove_table(>pdev->dev);
+   if (pfdev->devfreq.dev_opp_table)
+   dev_pm_opp_put_regulators(pfdev->devfreq.dev_opp_table);
 }
 
 void panfrost_devfreq_resume(struct panfrost_device *pfdev)
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h 
b/drivers/gpu/drm/panfrost/panfrost_device.h
index c30c719a805940a..5009a8b7c853ea1 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.h
+++ b/drivers/gpu/drm/panfrost/panfrost_device.h
@@ -110,6 +110,7 @@ struct panfrost_device {
struct {
struct devfreq *devfreq;
struct thermal_cooling_device *cooling;
+   struct opp_table *dev_opp_table;
ktime_t busy_time;
ktime_t idle_time;
ktime_t time_last_update;
-- 
2.25.0.341.g760bfbb309-goog

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[git pull] drm fixes for 5.6-rc1

2020-02-06 Thread Dave Airlie
Hi Linus,

Just some fixes on top of the merge windows pull, the tegra changes
fix some regressions in the merge, nouveau has a few modesetting
fixes. The amdgpu fixes are bit bigger, but they contain a couple of
weeks of fixes, and doesn't seem to contain anything that isn't really
a fix.

Regards,
Dave.

drm-next-2020-02-04:
drm fixes for 5.6-rc1

The following changes since commit b45f1b3b585e195a7daead16d914e164310b1df6:

  Merge branch 'ttm-prot-fix' of
git://people.freedesktop.org/~thomash/linux into drm-next (2020-01-31
16:58:35 +1000)

are available in the Git repository at:

  git://anongit.freedesktop.org/drm/drm tags/drm-next-2020-02-07

for you to fetch changes up to 9f880327160feb695de03caa29604883b0d00087:

  Merge tag 'amd-drm-next-5.6-2020-02-05' of
git://people.freedesktop.org/~agd5f/linux into drm-next (2020-02-07
12:29:36 +1000)


drm fixes for 5.6-rc1

tegra:
- merge window regression fixes

nouveau:
- couple of volta/turing modesetting fixes

amdgpu:
- EDC fixes for Arcturus
- GDDR6 memory training fixe
- Fix for reading gfx clockgating registers while in GFXOFF state
- i2c freq fixes
- Misc display fixes
- TLB invalidation fix when using semaphores
- VCN 2.5 instancing fixes
- Switch raven1 gfxoff to a blacklist
- Coreboot workaround for KV/KB
- Root cause dongle fixes for display and revert workaround
- Enable GPU reset for renoir and navi
- Navi overclocking fixes
- Fix up confusing warnings in display clock validation on raven

amdkfd:
- SDMA fix

radeon:
- Misc LUT fixes


Alex Deucher (12):
  drm/amdgpu: attempt to enable gfxoff on more raven1 boards (v2)
  drm/amdgpu: original raven doesn't support full asic reset
  drm/amdgpu: enable GPU reset by default on Navi
  drm/amdgpu: enable GPU reset by default on renoir
  drm/amdgpu/navi10: add mclk to navi10_get_clock_by_type_with_latency
  drm/amdgpu/navi: fix index for OD MCLK
  drm/amdgpu/navi10: add OD_RANGE for navi overclocking
  drm/amdgpu: fetch default VDDC curve voltages (v2)
  drm/amdgpu/display: handle multiple numbers of fclks in dcn_calcs.c (v2)
  drm/amdgpu/smu10: fix smu10_get_clock_by_type_with_latency
  drm/amdgpu/smu10: fix smu10_get_clock_by_type_with_voltage
  drm/amdgpu: update default voltage for boot od table for navi1x

Alex Sierra (1):
  drm/amdgpu: modify packet size for pm4 flush tlbs

Anthony Koo (1):
  drm/amd/display: Refactor to remove diags specific rgam func

Aric Cyr (1):
  drm/amd/display: 3.2.69

Ben Skeggs (3):
  drm/nouveau/disp/gv100-: halt
NV_PDISP_FE_RM_INTR_STAT_CTRL_DISP_ERROR storms
  drm/nouveau/kms/gv100-: move window ownership setup into modesetting path
  drm/nouveau/kms/gv100-: avoid sending a core update until the
first modeset

Bhawanpreet Lakha (1):
  drm/amd/display: Fix HW/SW state mismatch

Brandon Syu (1):
  drm/amd/display: fix rotation_angle to use enum values

Christian König (1):
  drm/amdgpu: add coreboot workaround for KV/KB

Colin Ian King (4):
  drm/amd/amdgpu: fix spelling mistake "to" -> "too"
  drm/amd/display: fix for-loop with incorrectly sized loop counter (v2)
  drm/amd/powerplay: fix spelling mistake "Attemp" -> "Attempt"
  drm/amd/display: fix spelling mistake link_integiry_check ->
link_integrity_check

Daniel Vetter (2):
  radeon: insert 10ms sleep in dce5_crtc_load_lut
  radeon: completely remove lut leftovers

Dave Airlie (3):
  Merge tag 'drm/tegra/for-5.6-rc1-fixes' of
git://anongit.freedesktop.org/tegra/linux into drm-next
  Merge branch 'linux-5.6' of git://github.com/skeggsb/linux into drm-next
  Merge tag 'amd-drm-next-5.6-2020-02-05' of
git://people.freedesktop.org/~agd5f/linux into drm-next

Dennis Li (6):
  drm/amdgpu: update mmhub 9.4.1 header files for Acrturus
  drm/amdgpu: enable RAS feature for more mmhub sub-blocks of Acrturus
  drm/amdgpu: refine the security check for RAS functions
  drm/amdgpu: abstract EDC counter clear to a separated function
  drm/amdgpu: add EDC counter registers of gc for Arcturus
  drm/amdgpu: add RAS support for the gfx block of Arcturus

Dor Askayo (1):
  drm/amd/display: do not allocate display_mode_lib unnecessarily

Evan Quan (1):
  drm/amd/powerplay: fix navi10 system intermittent reboot issue V2

Felix Kuehling (2):
  drm/amdgpu: Fix TLB invalidation request when using semaphore
  drm/amdgpu: Use the correct flush_type in flush_gpu_tlb_pasid

Haiyi Zhou (1):
  drm/amd/display: Fixed comment styling

Harry Wentland (2):
  drm/amd/display: Retrain dongles when SINK_COUNT becomes non-zero
  Revert "drm/amd/display: Don't skip link training for empty dongle"

Isabel Zhang (1):
  drm/amd/display: changed max_downscale_src_width to 4096.

James Zhu (5):
  drm/amdgpu/vcn: Share vcn_v2_0_dec_ring_test_ring to 

Re: [PATCH v3 5/7] drm/panfrost: Add support for multiple power domains

2020-02-06 Thread Nicolas Boichat
On Mon, Jan 20, 2020 at 10:53 PM Steven Price  wrote:
>
> On 14/01/2020 07:16, Nicolas Boichat wrote:
> [snip]
> >
> > + err = panfrost_pm_domain_init(pfdev);
> > + if (err) {
> > + dev_err(pfdev->dev, "pm_domain init failed %d\n", err);
>
> No need for this print - panfrost_pm_domain_init() will output a (more
> appropriate) error message on failure.

Dropped.

> > + goto err_out2;
> > + }
> > +
> [snip]
> > @@ -196,6 +274,7 @@ void panfrost_device_fini(struct panfrost_device *pfdev)
> >   panfrost_mmu_fini(pfdev);
> >   panfrost_gpu_fini(pfdev);
> >   panfrost_reset_fini(pfdev);
> > + panfrost_pm_domain_fini(pfdev);
>
> NIT: The reverse of the construction order would be to do this before
> panfrost_reset_fini().

Oh right, fixed.

Thanks.

> [snip]
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH v2 5/7] drm/panfrost: Add support for multiple power domain support

2020-02-06 Thread Nicolas Boichat
On Fri, Feb 7, 2020 at 10:04 AM Nicolas Boichat  wrote:
>
> Hi Ulf,
>
> On Mon, Jan 27, 2020 at 3:55 PM Ulf Hansson  wrote:
> >
> > On Fri, 10 Jan 2020 at 02:53, Nicolas Boichat  wrote:
> > >
> > > +Ulf to keep me honest on the power domains
> > >
> > > On Thu, Jan 9, 2020 at 10:08 PM Steven Price  wrote:
> > > >
> > > > On 08/01/2020 05:23, Nicolas Boichat wrote:
> > > > > When there is a single power domain per device, the core will
> > > > > ensure the power domains are all switched on.
> > > > >
> > > > > However, when there are multiple ones, as in MT8183 Bifrost GPU,
> > > > > we need to handle them in driver code.
> > > > >
> > > > >
> > > > > Signed-off-by: Nicolas Boichat 
> > > > > ---
> > > > >
> > > > > The downstream driver we use on chromeos-4.19 currently uses 2
> > > > > additional devices in device tree to accomodate for this [1], but
> > > > > I believe this solution is cleaner.
> > > >
> > > > I'm not sure what is best, but it seems odd to encode this into the 
> > > > Panfrost driver itself - it doesn't have any knowledge of what to do 
> > > > with these power domains. The naming of the domains looks suspiciously 
> > > > like someone thought that e.g. only half of the cores could be powered, 
> > > > but it doesn't look like that was implemented in the chromeos driver 
> > > > linked and anyway that is *meant* to be automatic in the hardware! 
> > > > (I.e. if you only power up one cores in one core stack then the PDC 
> > > > should only enable the power domain for that set of cores).
> > >
> > > This is actually implemented in the Chrome OS driver [1]. IMHO power
> > > domains are a bit confusing [2]:
> > >  i. If there's only 1 power domain in the device, then the core takes
> > > care of power on the domain (based on pm_runtime)
> > >  ii. If there's more than 1 power domain, then the device needs to
> > > link the domains manually.
> > >
> > > So the Chrome OS [1] driver takes approach (i), by creating 3 devices,
> > > each with 1 power domain that is switched on/off automatically using
> > > pm_runtime.
> > >
> > > This patch takes approach (ii) with device links to handle the extra 
> > > domains.
> > >
> > > I believe the latter is more upstream-friendly, but, as always,
> > > suggestions welcome.
> >
> > Apologies for the late reply. A few comments below.
>
> No worries, than for the helpful reply!

(s/than/thanks/... ,-P)

>
> > If the device is partitioned across multiple PM domains (it may need
> > several power rails), then that should be described with the "multi PM
> > domain" approach in the DTS. As in (ii).
> >
> > Using "device links" is however optional, as it may depend on the use
> > case. If all multiple PM domains needs to be powered on/off together,
> > then it's certainly recommended to use device links.
>
> That's the case here, there's no support for turning on/off the
> domains individually.
>
> > However, if the PM domains can be powered on/off independently (one
> > can be on while another is off), then it's probably easier to operate
> > directly with runtime PM, on the returned struct *device from
> > dev_pm_domain_attach_by_id().
> >
> > Also note, there is dev_pm_domain_attach_by_name(), which allows us to
> > specify a name for the PM domain in the DTS, rather than using an
> > index. This may be more future proof to use.
>
> Agree, probably better to have actual names than just "counting" the
> number of domains like I do, especially as we have a compatible struct
> anyway. I'll update the patch.
>
> > [...]
> >
> > Hope this helps.
> >
> > Kind regards
> > Uffe
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH v2 5/7] drm/panfrost: Add support for multiple power domain support

2020-02-06 Thread Nicolas Boichat
Hi Ulf,

On Mon, Jan 27, 2020 at 3:55 PM Ulf Hansson  wrote:
>
> On Fri, 10 Jan 2020 at 02:53, Nicolas Boichat  wrote:
> >
> > +Ulf to keep me honest on the power domains
> >
> > On Thu, Jan 9, 2020 at 10:08 PM Steven Price  wrote:
> > >
> > > On 08/01/2020 05:23, Nicolas Boichat wrote:
> > > > When there is a single power domain per device, the core will
> > > > ensure the power domains are all switched on.
> > > >
> > > > However, when there are multiple ones, as in MT8183 Bifrost GPU,
> > > > we need to handle them in driver code.
> > > >
> > > >
> > > > Signed-off-by: Nicolas Boichat 
> > > > ---
> > > >
> > > > The downstream driver we use on chromeos-4.19 currently uses 2
> > > > additional devices in device tree to accomodate for this [1], but
> > > > I believe this solution is cleaner.
> > >
> > > I'm not sure what is best, but it seems odd to encode this into the 
> > > Panfrost driver itself - it doesn't have any knowledge of what to do with 
> > > these power domains. The naming of the domains looks suspiciously like 
> > > someone thought that e.g. only half of the cores could be powered, but it 
> > > doesn't look like that was implemented in the chromeos driver linked and 
> > > anyway that is *meant* to be automatic in the hardware! (I.e. if you only 
> > > power up one cores in one core stack then the PDC should only enable the 
> > > power domain for that set of cores).
> >
> > This is actually implemented in the Chrome OS driver [1]. IMHO power
> > domains are a bit confusing [2]:
> >  i. If there's only 1 power domain in the device, then the core takes
> > care of power on the domain (based on pm_runtime)
> >  ii. If there's more than 1 power domain, then the device needs to
> > link the domains manually.
> >
> > So the Chrome OS [1] driver takes approach (i), by creating 3 devices,
> > each with 1 power domain that is switched on/off automatically using
> > pm_runtime.
> >
> > This patch takes approach (ii) with device links to handle the extra 
> > domains.
> >
> > I believe the latter is more upstream-friendly, but, as always,
> > suggestions welcome.
>
> Apologies for the late reply. A few comments below.

No worries, than for the helpful reply!

> If the device is partitioned across multiple PM domains (it may need
> several power rails), then that should be described with the "multi PM
> domain" approach in the DTS. As in (ii).
>
> Using "device links" is however optional, as it may depend on the use
> case. If all multiple PM domains needs to be powered on/off together,
> then it's certainly recommended to use device links.

That's the case here, there's no support for turning on/off the
domains individually.

> However, if the PM domains can be powered on/off independently (one
> can be on while another is off), then it's probably easier to operate
> directly with runtime PM, on the returned struct *device from
> dev_pm_domain_attach_by_id().
>
> Also note, there is dev_pm_domain_attach_by_name(), which allows us to
> specify a name for the PM domain in the DTS, rather than using an
> index. This may be more future proof to use.

Agree, probably better to have actual names than just "counting" the
number of domains like I do, especially as we have a compatible struct
anyway. I'll update the patch.

> [...]
>
> Hope this helps.
>
> Kind regards
> Uffe
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH v4 3/3] dt-bindings: panel: Convert orisetech, otm8009a to json-schema

2020-02-06 Thread Rob Herring
On Thu, 6 Feb 2020 14:33:44 +0100, Benjamin Gaignard wrote:
> Convert orisetech,otm8009a to json-schema.
> 
> Signed-off-by: Benjamin Gaignard 
> ---
>  .../bindings/display/panel/orisetech,otm8009a.txt  | 23 --
>  .../bindings/display/panel/orisetech,otm8009a.yaml | 53 
> ++
>  2 files changed, 53 insertions(+), 23 deletions(-)
>  delete mode 100644 
> Documentation/devicetree/bindings/display/panel/orisetech,otm8009a.txt
>  create mode 100644 
> Documentation/devicetree/bindings/display/panel/orisetech,otm8009a.yaml
> 

Reviewed-by: Rob Herring 
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH v4 2/3] dt-bindings: panel: Convert raydium,rm68200 to json-schema

2020-02-06 Thread Rob Herring
On Thu, 6 Feb 2020 14:33:43 +0100, Benjamin Gaignard wrote:
> Convert raydium,rm68200 to json-schema.
> 
> Signed-off-by: Benjamin Gaignard 
> ---
>  .../bindings/display/panel/raydium,rm68200.txt | 25 --
>  .../bindings/display/panel/raydium,rm68200.yaml| 56 
> ++
>  2 files changed, 56 insertions(+), 25 deletions(-)
>  delete mode 100644 
> Documentation/devicetree/bindings/display/panel/raydium,rm68200.txt
>  create mode 100644 
> Documentation/devicetree/bindings/display/panel/raydium,rm68200.yaml
> 

Reviewed-by: Rob Herring 
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH v4 1/3] dt-bindings: one file of all simple DSI panels

2020-02-06 Thread Rob Herring
On Thu, 6 Feb 2020 14:33:42 +0100, Benjamin Gaignard wrote:
> From: Sam Ravnborg 
> 
> To complement panel-simple.yaml, create panel-simple-dsi.yaml.
> panel-simple-dsi-yaml are for all simple DSP panels with a single
> power-supply and optional backlight / enable GPIO.
> 
> Migrate panasonic,vvx10f034n00 over to the new file.
> 
> The objectives with one file for all the simple DSI panels are:
> - Make it simpler to add bindings for simple DSI panels
> - Keep the number of bindings file lower
> - Keep the binding documentation for simple DSI panels more consistent
> 
> Signed-off-by: Sam Ravnborg 
> Signed-off-by: Benjamin Gaignard 
> Cc: Thierry Reding 
> Cc: Rob Herring 
> Cc: Maxime Ripard 
> Cc: Yannick Fertre 
> Cc: Mark Rutland 
> Cc: Daniel Vetter 
> Cc: dri-devel@lists.freedesktop.org
> Cc: devicet...@vger.kernel.org
> ---
> version 4:
> - remove orisetech,otm8009a and raydium,rm68200 compatibles
> - remove reset-gpios optional property
> 
> version 3:
> - add orisetech,otm8009a and raydium,rm68200 compatibles
> - add reset-gpios optional property
> - fix indentation on compatible enumeration
> 
>  .../display/panel/panasonic,vvx10f034n00.txt   | 20 ---
>  .../bindings/display/panel/panel-simple-dsi.yaml   | 67 
> ++
>  2 files changed, 67 insertions(+), 20 deletions(-)
>  delete mode 100644 
> Documentation/devicetree/bindings/display/panel/panasonic,vvx10f034n00.txt
>  create mode 100644 
> Documentation/devicetree/bindings/display/panel/panel-simple-dsi.yaml
> 

Reviewed-by: Rob Herring 
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [v1] dt-bindings: msm:disp: update dsi and dpu bindings

2020-02-06 Thread Rob Herring
On Tue, Feb 04, 2020 at 07:45:37PM +0530, Harigovindan P wrote:
> Updating bindings of dsi and dpu by adding and removing certain
> properties.

Yes, the diff tells me that. The commit message should say why.

This change breaks compatibility as well.

> 
> Signed-off-by: Harigovindan P 
> ---
> 
> Changes in v1:
> - Adding "ahb" clock as a required property.
> - Adding "bus", "rot", "lut" as optional properties for sc7180 device.
> - Removing properties from dsi bindings that are unused.
>   - Removing power-domain property since DSI is the child node of MDSS
> and it will inherit supply from its parent.
> 
>  Documentation/devicetree/bindings/display/msm/dpu.txt | 7 +++
>  Documentation/devicetree/bindings/display/msm/dsi.txt | 5 -
>  2 files changed, 7 insertions(+), 5 deletions(-)
> 
> diff --git a/Documentation/devicetree/bindings/display/msm/dpu.txt 
> b/Documentation/devicetree/bindings/display/msm/dpu.txt
> index 551ae26..dd58472a 100644
> --- a/Documentation/devicetree/bindings/display/msm/dpu.txt
> +++ b/Documentation/devicetree/bindings/display/msm/dpu.txt
> @@ -19,6 +19,7 @@ Required properties:
>The following clocks are required:
>* "iface"
>* "bus"
> +  * "ahb"

You can't just add new clocks...

>* "core"
>  - interrupts: interrupt signal from MDSS.
>  - interrupt-controller: identifies the node as an interrupt controller.
> @@ -50,6 +51,8 @@ Required properties:
>  - clock-names: device clock names, must be in same order as clocks property.
>The following clocks are required.
>* "bus"
> +  For the device "qcom,sc7180-dpu":
> +  * "bus" - is an optional property due to architecture change.
>* "iface"
>* "core"
>* "vsync"
> @@ -70,6 +73,10 @@ Optional properties:
>  - assigned-clocks: list of clock specifiers for clocks needing rate 
> assignment
>  - assigned-clock-rates: list of clock frequencies sorted in the same order as
>the assigned-clocks property.
> +- For the device "qcom,sc7180-dpu":
> +  clock-names: optional device clocks, needed for accessing LUT blocks.
> +  * "rot"
> +  * "lut"
>  
>  Example:
>  
> diff --git a/Documentation/devicetree/bindings/display/msm/dsi.txt 
> b/Documentation/devicetree/bindings/display/msm/dsi.txt
> index af95586..61d659a 100644
> --- a/Documentation/devicetree/bindings/display/msm/dsi.txt
> +++ b/Documentation/devicetree/bindings/display/msm/dsi.txt
> @@ -8,13 +8,10 @@ Required properties:
>  - reg-names: The names of register regions. The following regions are 
> required:
>* "dsi_ctrl"
>  - interrupts: The interrupt signal from the DSI block.
> -- power-domains: Should be < MDSS_GDSC>.
>  - clocks: Phandles to device clocks.
>  - clock-names: the following clocks are required:
> -  * "mdp_core"
>* "iface"
>* "bus"
> -  * "core_mmss"
>* "byte"
>* "pixel"
>* "core"
> @@ -156,7 +153,6 @@ Example:
>   "core",
>   "core_mmss",
>   "iface",
> - "mdp_core",
>   "pixel";
>   clocks =
>   < MDSS_AXI_CLK>,
> @@ -164,7 +160,6 @@ Example:
>   < MDSS_ESC0_CLK>,
>   < MMSS_MISC_AHB_CLK>,
>   < MDSS_AHB_CLK>,
> - < MDSS_MDP_CLK>,
>   < MDSS_PCLK0_CLK>;
>  
>   assigned-clocks =
> -- 
> 2.7.4
> 
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH v3] dt-bindings: display: Convert etnaviv to json-schema

2020-02-06 Thread Rob Herring
On Wed, 29 Jan 2020 09:56:13 +0100, Benjamin Gaignard wrote:
> Convert etnaviv bindings to yaml format.
> Move bindings file from display to gpu folder.
> 
> Signed-off-by: Benjamin Gaignard 
> ---
> version 3:
> - describe clock-names as enum to allow all possible mix
> 
> version 2:
> - move bindings file from display to gpu folder
>  .../bindings/display/etnaviv/etnaviv-drm.txt   | 36 ---
>  .../devicetree/bindings/gpu/vivante,gc.yaml| 69 
> ++
>  2 files changed, 69 insertions(+), 36 deletions(-)
>  delete mode 100644 
> Documentation/devicetree/bindings/display/etnaviv/etnaviv-drm.txt
>  create mode 100644 Documentation/devicetree/bindings/gpu/vivante,gc.yaml
> 

Applied, thanks.

Rob
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH v4 1/2] dt-bindings: display: bridge: Add documentation for Toshiba tc358768

2020-02-06 Thread Rob Herring
On Fri, 31 Jan 2020 13:15:52 +0200, Peter Ujfalusi wrote:
> TC358768/TC358778 is a Parallel RGB to MIPI DSI bridge.
> 
> Signed-off-by: Peter Ujfalusi 
> ---
>  .../display/bridge/toshiba,tc358768.yaml  | 159 ++
>  1 file changed, 159 insertions(+)
>  create mode 100644 
> Documentation/devicetree/bindings/display/bridge/toshiba,tc358768.yaml
> 

Reviewed-by: Rob Herring 
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH] drm/edid: fix building error

2020-02-06 Thread Ville Syrjälä
On Tue, Feb 04, 2020 at 04:41:16PM +0200, Ville Syrjälä wrote:
> On Mon, Feb 03, 2020 at 10:31:13PM +0100, Mauro Rossi wrote:
> > Fixes the following building error:
> > 
> > CC [M]  drivers/gpu/drm/drm_edid.o
> > ~/pie-x86_kernel/kernel/drivers/gpu/drm/drm_edid.c: In function 
> > 'cea_mode_alternate_timings':
> > ~/pie-x86_kernel/kernel/drivers/gpu/drm/drm_edid.c:3275:2: error: call to 
> > '__compiletime_assert_3282'
> > declared with attribute error: BUILD_BUG_ON failed: 
> > cea_mode_for_vic(8)->vtotal != 262 || cea_mode_for_vic(9)->vtotal != 262 || 
> > cea_mode_for_vic(12)->vtotal != 262 || cea_mode_for_vic(13)->vtotal != 262 
> > || cea_mode_for_vic(23)->vtotal != 312 || cea_mode_for_vic(24)->vtotal != 
> > 312 || cea_mode_for_vic(27)->vtotal != 312 || cea_mode_for_vic(28)->vtotal 
> > != 312
> > make[4]: *** [~/pie-x86_kernel/kernel/scripts/Makefile.build:265: 
> > drivers/gpu/drm/drm_edid.o] Error 1
> > 
> > Fixes: 7befe62 ("drm/edid: Abstract away cea_edid_modes[]")
> > Signed-off-by: Mauro Rossi 
> > ---
> >  drivers/gpu/drm/drm_edid.c | 2 +-
> >  1 file changed, 1 insertion(+), 1 deletion(-)
> > 
> > diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
> > index 99769d6c9f84..805fb004c8eb 100644
> > --- a/drivers/gpu/drm/drm_edid.c
> > +++ b/drivers/gpu/drm/drm_edid.c
> > @@ -3211,7 +3211,7 @@ static u8 *drm_find_cea_extension(const struct edid 
> > *edid)
> > return cea;
> >  }
> >  
> > -static const struct drm_display_mode *cea_mode_for_vic(u8 vic)
> > +static __always_inline const struct drm_display_mode *cea_mode_for_vic(u8 
> > vic)
> 
> Thanks for the fix. I've had another few reports of this fail on ia64
> at least. Hoping to get an answer whether this fixes that one as well.
> If not we need to do something else.

Got word that this fixes ia64 as well -> pushed to drm-misc-next-fixes.
Thanks.

PS. I bumped the Fixes sha1 up to the recommended 12 characters.

-- 
Ville Syrjälä
Intel
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[PATCH 2/2] drm/virtio: remove the global disable_notify state

2020-02-06 Thread Chia-I Wu
The global disable_notify state does not scale well when we start
using it in more places and when there are multiple threads.  Use
command-level bools to control whether to notify or not.

The naming conventions are

  virtio_gpu_cmd_foo -> add foo and commit is implied
  virtio_gpu_ctrl_bar -> add bar to ctrlq and commit is
 caller-controlled

virtio_gpu_{disable,enable}_notify is also replaced by
virtio_gpu_commit_ctrl.

Signed-off-by: Chia-I Wu 
---
 drivers/gpu/drm/virtio/virtgpu_display.c |  9 ++-
 drivers/gpu/drm/virtio/virtgpu_drv.h | 38 +-
 drivers/gpu/drm/virtio/virtgpu_ioctl.c   |  4 +-
 drivers/gpu/drm/virtio/virtgpu_plane.c   | 42 +--
 drivers/gpu/drm/virtio/virtgpu_vq.c  | 94 
 5 files changed, 92 insertions(+), 95 deletions(-)

diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c 
b/drivers/gpu/drm/virtio/virtgpu_display.c
index 7b0f0643bb2dd..34b1136b05120 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -87,9 +87,9 @@ static void virtio_gpu_crtc_mode_set_nofb(struct drm_crtc 
*crtc)
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_output *output = drm_crtc_to_virtio_gpu_output(crtc);
 
-   virtio_gpu_cmd_set_scanout(vgdev, output->index, 0,
-  crtc->mode.hdisplay,
-  crtc->mode.vdisplay, 0, 0);
+   virtio_gpu_ctrl_set_scanout(vgdev, output->index, 0,
+   crtc->mode.hdisplay,
+   crtc->mode.vdisplay, 0, 0, true);
 }
 
 static void virtio_gpu_crtc_atomic_enable(struct drm_crtc *crtc,
@@ -107,7 +107,8 @@ static void virtio_gpu_crtc_atomic_disable(struct drm_crtc 
*crtc,
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_output *output = drm_crtc_to_virtio_gpu_output(crtc);
 
-   virtio_gpu_cmd_set_scanout(vgdev, output->index, 0, 0, 0, 0, 0);
+   virtio_gpu_ctrl_set_scanout(vgdev, output->index, 0, 0, 0, 0, 0,
+   true);
output->enabled = false;
 }
 
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h 
b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 0af1724bda390..b9a948a6d6cf7 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -177,8 +177,6 @@ struct virtio_gpu_device {
struct kmem_cache *vbufs;
bool vqs_ready;
 
-   bool disable_notify;
-
struct ida  resource_ida;
 
wait_queue_head_t resp_wq;
@@ -262,20 +260,22 @@ void virtio_gpu_cmd_create_resource(struct 
virtio_gpu_device *vgdev,
struct virtio_gpu_fence *fence);
 void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
   uint32_t resource_id);
-void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
-   uint64_t offset,
-   uint32_t width, uint32_t height,
-   uint32_t x, uint32_t y,
-   struct virtio_gpu_object_array *objs,
-   struct virtio_gpu_fence *fence);
-void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
-  uint32_t resource_id,
-  uint32_t x, uint32_t y,
-  uint32_t width, uint32_t height);
-void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
-   uint32_t scanout_id, uint32_t resource_id,
-   uint32_t width, uint32_t height,
-   uint32_t x, uint32_t y);
+void virtio_gpu_ctrl_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
+uint64_t offset,
+uint32_t width, uint32_t height,
+uint32_t x, uint32_t y,
+struct virtio_gpu_object_array *objs,
+struct virtio_gpu_fence *fence,
+bool commit);
+void virtio_gpu_ctrl_resource_flush(struct virtio_gpu_device *vgdev,
+   uint32_t resource_id,
+   uint32_t x, uint32_t y,
+   uint32_t width, uint32_t height,
+   bool commit);
+void virtio_gpu_ctrl_set_scanout(struct virtio_gpu_device *vgdev,
+uint32_t scanout_id, uint32_t resource_id,
+uint32_t width, uint32_t height,
+uint32_t x, uint32_t y, bool commit);
 int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
 struct 

[PATCH 1/2] drm/virtio: remove the global pending_notify state

2020-02-06 Thread Chia-I Wu
Call virtqueue_kick_prepare once in virtio_gpu_enable_notify, not
whenever a command is added.  This should be more efficient since
the intention is to batch commands.

Signed-off-by: Chia-I Wu 
---
 drivers/gpu/drm/virtio/virtgpu_drv.h |  1 -
 drivers/gpu/drm/virtio/virtgpu_vq.c  | 28 +---
 2 files changed, 17 insertions(+), 12 deletions(-)

diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h 
b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 7e69c06e168ea..0af1724bda390 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -178,7 +178,6 @@ struct virtio_gpu_device {
bool vqs_ready;
 
bool disable_notify;
-   bool pending_notify;
 
struct ida  resource_ida;
 
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c 
b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 41e475fbd67bf..204b1577a1873 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -364,16 +364,13 @@ static void virtio_gpu_queue_ctrl_sgs(struct 
virtio_gpu_device *vgdev,
 
trace_virtio_gpu_cmd_queue(vq, virtio_gpu_vbuf_ctrl_hdr(vbuf));
 
-   notify = virtqueue_kick_prepare(vq);
+   if (!vgdev->disable_notify)
+   notify = virtqueue_kick_prepare(vq);
 
spin_unlock(>ctrlq.qlock);
 
-   if (notify) {
-   if (vgdev->disable_notify)
-   vgdev->pending_notify = true;
-   else
-   virtqueue_notify(vq);
-   }
+   if (notify)
+   virtqueue_notify(vq);
 }
 
 static void virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device 
*vgdev,
@@ -436,12 +433,21 @@ void virtio_gpu_disable_notify(struct virtio_gpu_device 
*vgdev)
 
 void virtio_gpu_enable_notify(struct virtio_gpu_device *vgdev)
 {
+   struct virtqueue *vq = vgdev->ctrlq.vq;
+   bool notify;
+
vgdev->disable_notify = false;
 
-   if (!vgdev->pending_notify)
-   return;
-   vgdev->pending_notify = false;
-   virtqueue_notify(vgdev->ctrlq.vq);
+   spin_lock(>ctrlq.qlock);
+   notify = virtqueue_kick_prepare(vq);
+   spin_unlock(>ctrlq.qlock);
+
+   /* Do not call virtqueue_notify with the lock held because
+* virtio_gpu_dequeue_ctrl_func may contend for the lock if an irq is
+* generated while we are in virtqueue_notify.
+*/
+   if (notify)
+   virtqueue_notify(vq);
 }
 
 static void virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
-- 
2.25.0.341.g760bfbb309-goog

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[PATCH 0/2] drm/virtio: rework command batching

2020-02-06 Thread Chia-I Wu
This series replaces the global disable_notify state by command-level bools to
control vq kicks.  When command batching is applied to more places, this
prevents one process from affecting another process.
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[PATCH v4 04/11] drm/bridge: synopsys: dw-hdmi: add bus format negociation

2020-02-06 Thread Neil Armstrong
Add the atomic_get_output_bus_fmts, atomic_get_input_bus_fmts to negociate
the possible output and input formats for the current mode and monitor,
and use the negotiated formats in a basic atomic_check callback.

Signed-off-by: Neil Armstrong 
---
 drivers/gpu/drm/bridge/synopsys/dw-hdmi.c | 272 +-
 1 file changed, 268 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c 
b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index fec4a4bcd1fe..15048ad694bc 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -2095,11 +2095,10 @@ static int dw_hdmi_setup(struct dw_hdmi *hdmi, struct 
drm_display_mode *mode)
hdmi->hdmi_data.video_mode.mpixelrepetitionoutput = 0;
hdmi->hdmi_data.video_mode.mpixelrepetitioninput = 0;
 
-   /* TOFIX: Get input format from plat data or fallback to RGB888 */
if (hdmi->plat_data->input_bus_format)
hdmi->hdmi_data.enc_in_bus_format =
hdmi->plat_data->input_bus_format;
-   else
+   else if (hdmi->hdmi_data.enc_in_bus_format == MEDIA_BUS_FMT_FIXED)
hdmi->hdmi_data.enc_in_bus_format = MEDIA_BUS_FMT_RGB888_1X24;
 
/* TOFIX: Get input encoding from plat data or fallback to none */
@@ -2109,8 +2108,8 @@ static int dw_hdmi_setup(struct dw_hdmi *hdmi, struct 
drm_display_mode *mode)
else
hdmi->hdmi_data.enc_in_encoding = V4L2_YCBCR_ENC_DEFAULT;
 
-   /* TOFIX: Default to RGB888 output format */
-   hdmi->hdmi_data.enc_out_bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+   if (hdmi->hdmi_data.enc_out_bus_format == MEDIA_BUS_FMT_FIXED)
+   hdmi->hdmi_data.enc_out_bus_format = MEDIA_BUS_FMT_RGB888_1X24;
 
hdmi->hdmi_data.pix_repet_factor = 0;
hdmi->hdmi_data.hdcp_enable = 0;
@@ -2388,6 +2387,267 @@ static const struct drm_connector_helper_funcs 
dw_hdmi_connector_helper_funcs =
.atomic_check = dw_hdmi_connector_atomic_check,
 };
 
+/*
+ * Possible output formats :
+ * - MEDIA_BUS_FMT_UYYVYY16_0_5X48,
+ * - MEDIA_BUS_FMT_UYYVYY12_0_5X36,
+ * - MEDIA_BUS_FMT_UYYVYY10_0_5X30,
+ * - MEDIA_BUS_FMT_UYYVYY8_0_5X24,
+ * - MEDIA_BUS_FMT_YUV16_1X48,
+ * - MEDIA_BUS_FMT_RGB161616_1X48,
+ * - MEDIA_BUS_FMT_UYVY12_1X24,
+ * - MEDIA_BUS_FMT_YUV12_1X36,
+ * - MEDIA_BUS_FMT_RGB121212_1X36,
+ * - MEDIA_BUS_FMT_UYVY10_1X20,
+ * - MEDIA_BUS_FMT_YUV10_1X30,
+ * - MEDIA_BUS_FMT_RGB101010_1X30,
+ * - MEDIA_BUS_FMT_UYVY8_1X16,
+ * - MEDIA_BUS_FMT_YUV8_1X24,
+ * - MEDIA_BUS_FMT_RGB888_1X24,
+ */
+
+/* Can return a maximum of 12 possible output formats for a mode/connector */
+#define MAX_OUTPUT_SEL_FORMATS 12
+
+static u32 *dw_hdmi_bridge_atomic_get_output_bus_fmts(struct drm_bridge 
*bridge,
+   struct drm_bridge_state *bridge_state,
+   struct drm_crtc_state *crtc_state,
+   struct drm_connector_state *conn_state,
+   unsigned int *num_output_fmts)
+{
+   struct drm_connector *conn = conn_state->connector;
+   struct drm_display_info *info = >display_info;
+   struct drm_display_mode *mode = _state->mode;
+   u8 max_bpc = conn_state->max_requested_bpc;
+   bool is_hdmi2_sink = info->hdmi.scdc.supported ||
+(info->color_formats & DRM_COLOR_FORMAT_YCRCB420);
+   u32 *output_fmts;
+   int i = 0;
+
+   *num_output_fmts = 0;
+
+   output_fmts = kcalloc(MAX_OUTPUT_SEL_FORMATS, sizeof(*output_fmts),
+ GFP_KERNEL);
+   if (!output_fmts)
+   return NULL;
+
+   /*
+* If the current mode enforces 4:2:0, force the output but format
+* to 4:2:0 and do not add the YUV422/444/RGB formats
+*/
+   if (conn->ycbcr_420_allowed &&
+   (drm_mode_is_420_only(info, mode) ||
+(is_hdmi2_sink && drm_mode_is_420_also(info, mode {
+
+   /* Order bus formats from 16bit to 8bit if supported */
+   if (max_bpc >= 16 && info->bpc == 16 &&
+   (info->hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48))
+   output_fmts[i++] = MEDIA_BUS_FMT_UYYVYY16_0_5X48;
+
+   if (max_bpc >= 12 && info->bpc >= 12 &&
+   (info->hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36))
+   output_fmts[i++] = MEDIA_BUS_FMT_UYYVYY12_0_5X36;
+
+   if (max_bpc >= 10 && info->bpc >= 10 &&
+   (info->hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30))
+   output_fmts[i++] = MEDIA_BUS_FMT_UYYVYY10_0_5X30;
+
+   /* Default 8bit fallback */
+   output_fmts[i++] = MEDIA_BUS_FMT_UYYVYY8_0_5X24;
+
+   *num_output_fmts = i;
+
+   return output_fmts;
+   }
+
+   /*
+* Order bus formats from 16bit to 8bit and from YUV422 to 

[PATCH v4 06/11] drm/meson: venc: make drm_display_mode const

2020-02-06 Thread Neil Armstrong
Before switching to bridge funcs, make sure drm_display_mode is passed
as const to the venc functions.

Signed-off-by: Neil Armstrong 
---
 drivers/gpu/drm/meson/meson_venc.c | 2 +-
 drivers/gpu/drm/meson/meson_venc.h | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/meson/meson_venc.c 
b/drivers/gpu/drm/meson/meson_venc.c
index 4efd7864d5bf..a9ab78970bfe 100644
--- a/drivers/gpu/drm/meson/meson_venc.c
+++ b/drivers/gpu/drm/meson/meson_venc.c
@@ -946,7 +946,7 @@ bool meson_venc_hdmi_venc_repeat(int vic)
 EXPORT_SYMBOL_GPL(meson_venc_hdmi_venc_repeat);
 
 void meson_venc_hdmi_mode_set(struct meson_drm *priv, int vic,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
 {
union meson_hdmi_venc_mode *vmode = NULL;
union meson_hdmi_venc_mode vmode_dmt;
diff --git a/drivers/gpu/drm/meson/meson_venc.h 
b/drivers/gpu/drm/meson/meson_venc.h
index 576768bdd08d..1abdcbdf51c0 100644
--- a/drivers/gpu/drm/meson/meson_venc.h
+++ b/drivers/gpu/drm/meson/meson_venc.h
@@ -60,7 +60,7 @@ extern struct meson_cvbs_enci_mode meson_cvbs_enci_ntsc;
 void meson_venci_cvbs_mode_set(struct meson_drm *priv,
   struct meson_cvbs_enci_mode *mode);
 void meson_venc_hdmi_mode_set(struct meson_drm *priv, int vic,
- struct drm_display_mode *mode);
+ const struct drm_display_mode *mode);
 unsigned int meson_venci_get_field(struct meson_drm *priv);
 
 void meson_venc_enable_vsync(struct meson_drm *priv);
-- 
2.22.0

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[PATCH v4 08/11] drm/meson: dw-hdmi: stop enforcing input_bus_format

2020-02-06 Thread Neil Armstrong
To allow using formats from negotiation, stop enforcing input_bus_format
in the private dw-plat-data struct.

Signed-off-by: Neil Armstrong 
Reviewed-by: Boris Brezillon 
---
 drivers/gpu/drm/meson/meson_dw_hdmi.c | 1 -
 1 file changed, 1 deletion(-)

diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c 
b/drivers/gpu/drm/meson/meson_dw_hdmi.c
index 4b3809626f7e..686c47106a18 100644
--- a/drivers/gpu/drm/meson/meson_dw_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c
@@ -1035,7 +1035,6 @@ static int meson_dw_hdmi_bind(struct device *dev, struct 
device *master,
dw_plat_data->phy_ops = _dw_hdmi_phy_ops;
dw_plat_data->phy_name = "meson_dw_hdmi_phy";
dw_plat_data->phy_data = meson_dw_hdmi;
-   dw_plat_data->input_bus_format = MEDIA_BUS_FMT_YUV8_1X24;
dw_plat_data->input_bus_encoding = V4L2_YCBCR_ENC_709;
 
if (dw_hdmi_is_compatible(meson_dw_hdmi, "amlogic,meson-gxl-dw-hdmi") ||
-- 
2.22.0

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[PATCH v4 00/11] drm/bridge: dw-hdmi: implement bus-format negotiation and YUV420 support

2020-02-06 Thread Neil Armstrong
This patchset is based on Boris's v10 "drm: Add support for bus-format 
negotiation" at [1]
patchset to implement full bus-format negotiation for DW-HDMI, including YUV420 
support and
10/12/16bit YUV444, YUV422 and RGB. The Color Space Converter support is 
already implemented.

And the counterpart implementation in the Amlogic Meson VPU dw-hdmi glue :
- basic bus-format negotiation to select YUV444 bus-format as DW-HDMI input
- YUV420 support when HDMI2.0 YUV420 modeset

This is a follow-up from the previous attempts :
- "drm/meson: Add support for HDMI2.0 YUV420 4k60" at [2]
- "drm/meson: Add support for HDMI2.0 4k60" at [3]

Changes sincd v3 at [6]:
- Added "Plug atomic state hooks to the default implementation" on drm/bridge: 
dw-hdmi
- Also added these atomic state hooks in meson-dw-hdmi in patch 7
- Rebased on latest drm-misc-next including patches 1-7 of [1]

Changes since RFC v2 at [5]:
- Added fixes from Jonas, who tested and integrated it for Rockchip SoCs
- Added support for 10/12/16bit tmds clock calculation
- Added support for max_bcp connector property
- Adapted to Boris's v4 patchset
- Fixed typos reported by boris

Changes since RFC v1 at [4]:
- Rewrote negociation using the v2 patchset, including full DW-HDMI fmt 
negociation

[1] 
https://patchwork.freedesktop.org/patch/msgid/20200128135514.108171-1-boris.brezil...@collabora.com
[2] 
https://patchwork.freedesktop.org/patch/msgid/20190520133753.23871-1-narmstr...@baylibre.com
[3] 
https://patchwork.freedesktop.org/patch/msgid/1549022873-40549-1-git-send-email-narmstr...@baylibre.com
[4] 
https://patchwork.freedesktop.org/patch/msgid/20190820084109.24616-1-narmstr...@baylibre.com
[5] 
https://patchwork.freedesktop.org/patch/msgid/20190827081425.15011-1-narmstr...@baylibre.com
[6] 
https://patchwork.freedesktop.org/patch/msgid/20191218154637.17509-1-narmstr...@baylibre.com

Jonas Karlman (2):
  drm/bridge: dw-hdmi: set mtmdsclock for deep color
  drm/bridge: dw-hdmi: add max bpc connector property

Neil Armstrong (9):
  drm/bridge: dw-hdmi: Plug atomic state hooks to the default
implementation
  drm/bridge: synopsys: dw-hdmi: add bus format negociation
  drm/bridge: synopsys: dw-hdmi: allow ycbcr420 modes for >= 0x200a
  drm/meson: venc: make drm_display_mode const
  drm/meson: meson_dw_hdmi: add bridge and switch to drm_bridge_funcs
  drm/meson: dw-hdmi: stop enforcing input_bus_format
  drm/meson: venc: add support for YUV420 setup
  drm/meson: vclk: add support for YUV420 setup
  drm/meson: Add YUV420 output support

 drivers/gpu/drm/bridge/synopsys/dw-hdmi.c | 302 +-
 drivers/gpu/drm/meson/meson_dw_hdmi.c | 200 +++---
 drivers/gpu/drm/meson/meson_vclk.c|  93 +--
 drivers/gpu/drm/meson/meson_vclk.h|   7 +-
 drivers/gpu/drm/meson/meson_venc.c|  10 +-
 drivers/gpu/drm/meson/meson_venc.h|   4 +-
 drivers/gpu/drm/meson/meson_venc_cvbs.c   |   6 +-
 include/drm/bridge/dw_hdmi.h  |   1 +
 8 files changed, 548 insertions(+), 75 deletions(-)

-- 
2.22.0

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[PATCH v4 05/11] drm/bridge: synopsys: dw-hdmi: allow ycbcr420 modes for >= 0x200a

2020-02-06 Thread Neil Armstrong
Now the DW-HDMI Controller supports the HDMI2.0 modes, enable support
for these modes in the connector if the platform supports them.
We limit these modes to DW-HDMI IP version >= 0x200a which
are designed to support HDMI2.0 display modes.

Signed-off-by: Neil Armstrong 
---
 drivers/gpu/drm/bridge/synopsys/dw-hdmi.c | 6 ++
 include/drm/bridge/dw_hdmi.h  | 1 +
 2 files changed, 7 insertions(+)

diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c 
b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index 15048ad694bc..4b35ea1427df 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -3231,6 +3231,12 @@ __dw_hdmi_probe(struct platform_device *pdev,
hdmi->bridge.of_node = pdev->dev.of_node;
 #endif
 
+   if (hdmi->version >= 0x200a)
+   hdmi->connector.ycbcr_420_allowed =
+   hdmi->plat_data->ycbcr_420_allowed;
+   else
+   hdmi->connector.ycbcr_420_allowed = false;
+
memset(, 0, sizeof(pdevinfo));
pdevinfo.parent = dev;
pdevinfo.id = PLATFORM_DEVID_AUTO;
diff --git a/include/drm/bridge/dw_hdmi.h b/include/drm/bridge/dw_hdmi.h
index 9d4d5cc47969..0b34a12c4a1c 100644
--- a/include/drm/bridge/dw_hdmi.h
+++ b/include/drm/bridge/dw_hdmi.h
@@ -129,6 +129,7 @@ struct dw_hdmi_plat_data {
unsigned long input_bus_format;
unsigned long input_bus_encoding;
bool use_drm_infoframe;
+   bool ycbcr_420_allowed;
 
/* Vendor PHY support */
const struct dw_hdmi_phy_ops *phy_ops;
-- 
2.22.0

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[PATCH v4 03/11] drm/bridge: dw-hdmi: Plug atomic state hooks to the default implementation

2020-02-06 Thread Neil Armstrong
Signed-off-by: Neil Armstrong 
---
 drivers/gpu/drm/bridge/synopsys/dw-hdmi.c | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c 
b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index 051001f77dd4..fec4a4bcd1fe 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -2494,6 +2494,9 @@ static void dw_hdmi_bridge_enable(struct drm_bridge 
*bridge)
 }
 
 static const struct drm_bridge_funcs dw_hdmi_bridge_funcs = {
+   .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+   .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+   .atomic_reset = drm_atomic_helper_bridge_reset,
.attach = dw_hdmi_bridge_attach,
.detach = dw_hdmi_bridge_detach,
.enable = dw_hdmi_bridge_enable,
-- 
2.22.0

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[PATCH v4 02/11] drm/bridge: dw-hdmi: add max bpc connector property

2020-02-06 Thread Neil Armstrong
From: Jonas Karlman 

Add the max_bpc property to the dw-hdmi connector to prepare support
for 10, 12 & 16bit output support.

Signed-off-by: Jonas Karlman 
Signed-off-by: Neil Armstrong 
---
 drivers/gpu/drm/bridge/synopsys/dw-hdmi.c | 4 
 1 file changed, 4 insertions(+)

diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c 
b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index 9e0927d22db6..051001f77dd4 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -2406,6 +2406,10 @@ static int dw_hdmi_bridge_attach(struct drm_bridge 
*bridge)
DRM_MODE_CONNECTOR_HDMIA,
hdmi->ddc);
 
+   drm_atomic_helper_connector_reset(connector);
+
+   drm_connector_attach_max_bpc_property(connector, 8, 16);
+
if (hdmi->version >= 0x200a && hdmi->plat_data->use_drm_infoframe)
drm_object_attach_property(>base,

connector->dev->mode_config.hdr_output_metadata_property, 0);
-- 
2.22.0

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[PATCH v4 01/11] drm/bridge: dw-hdmi: set mtmdsclock for deep color

2020-02-06 Thread Neil Armstrong
From: Jonas Karlman 

Configure the correct mtmdsclock for deep colors to prepare support
for 10, 12 & 16bit output.

Signed-off-by: Jonas Karlman 
Signed-off-by: Neil Armstrong 
---
 drivers/gpu/drm/bridge/synopsys/dw-hdmi.c | 17 +
 1 file changed, 17 insertions(+)

diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c 
b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index 67fca439bbfb..9e0927d22db6 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -1818,9 +1818,26 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi,
 
dev_dbg(hdmi->dev, "final pixclk = %d\n", vmode->mpixelclock);
 
+   if (!hdmi_bus_fmt_is_yuv422(hdmi->hdmi_data.enc_out_bus_format)) {
+   switch (hdmi_bus_fmt_color_depth(
+   hdmi->hdmi_data.enc_out_bus_format)) {
+   case 16:
+   vmode->mtmdsclock = (u64)vmode->mpixelclock * 2;
+   break;
+   case 12:
+   vmode->mtmdsclock = (u64)vmode->mpixelclock * 3 / 2;
+   break;
+   case 10:
+   vmode->mtmdsclock = (u64)vmode->mpixelclock * 5 / 4;
+   break;
+   }
+   }
+
if (hdmi_bus_fmt_is_yuv420(hdmi->hdmi_data.enc_out_bus_format))
vmode->mtmdsclock /= 2;
 
+   dev_dbg(hdmi->dev, "final tmdsclk = %d\n", vmode->mtmdsclock);
+
/* Set up HDMI_FC_INVIDCONF */
inv_val = (hdmi->hdmi_data.hdcp_enable ||
   (dw_hdmi_support_scdc(hdmi) &&
-- 
2.22.0

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[PATCH v4 09/11] drm/meson: venc: add support for YUV420 setup

2020-02-06 Thread Neil Armstrong
This patch adds encoding support for the YUV420 output from the
Amlogic Meson SoCs Video Processing Unit to the HDMI Controller.

The YUV420 is obtained by generating a YUV444 pixel stream like
the classic HDMI display modes, but then the Video Encoder output
can be configured to down-sample the YUV444 pixel stream to a YUV420
stream.

In addition if pixel stream down-sampling, the Y Cb Cr components must
also be mapped differently to align with the HDMI2.0 specifications.

Signed-off-by: Neil Armstrong 
---
 drivers/gpu/drm/meson/meson_dw_hdmi.c | 3 ++-
 drivers/gpu/drm/meson/meson_venc.c| 8 +---
 drivers/gpu/drm/meson/meson_venc.h| 2 ++
 3 files changed, 9 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c 
b/drivers/gpu/drm/meson/meson_dw_hdmi.c
index 686c47106a18..f5d46d082534 100644
--- a/drivers/gpu/drm/meson/meson_dw_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c
@@ -772,7 +772,8 @@ static void meson_venc_hdmi_encoder_mode_set(struct 
drm_bridge *bridge,
DRM_DEBUG_DRIVER("\"%s\" vic %d\n", mode->name, vic);
 
/* VENC + VENC-DVI Mode setup */
-   meson_venc_hdmi_mode_set(priv, vic, mode);
+   meson_venc_hdmi_mode_set(priv, vic, ycrcb_map, false,
+VPU_HDMI_OUTPUT_CBYCR);
 
/* VCLK Set clock */
dw_hdmi_set_vclk(dw_hdmi, mode);
diff --git a/drivers/gpu/drm/meson/meson_venc.c 
b/drivers/gpu/drm/meson/meson_venc.c
index a9ab78970bfe..f93c725b6f02 100644
--- a/drivers/gpu/drm/meson/meson_venc.c
+++ b/drivers/gpu/drm/meson/meson_venc.c
@@ -946,6 +946,8 @@ bool meson_venc_hdmi_venc_repeat(int vic)
 EXPORT_SYMBOL_GPL(meson_venc_hdmi_venc_repeat);
 
 void meson_venc_hdmi_mode_set(struct meson_drm *priv, int vic,
+ unsigned int ycrcb_map,
+ bool yuv420_mode,
  const struct drm_display_mode *mode)
 {
union meson_hdmi_venc_mode *vmode = NULL;
@@ -1528,14 +1530,14 @@ void meson_venc_hdmi_mode_set(struct meson_drm *priv, 
int vic,
if (mode->flags & DRM_MODE_FLAG_PVSYNC)
reg |= VPU_HDMI_INV_VSYNC;
 
-   /* Output data format: CbYCr */
-   reg |= VPU_HDMI_OUTPUT_CBYCR;
+   /* Output data format */
+   reg |= ycrcb_map;
 
/*
 * Write rate to the async FIFO between VENC and HDMI.
 * One write every 2 wr_clk.
 */
-   if (venc_repeat)
+   if (venc_repeat || yuv420_mode)
reg |= VPU_HDMI_WR_RATE(2);
 
/*
diff --git a/drivers/gpu/drm/meson/meson_venc.h 
b/drivers/gpu/drm/meson/meson_venc.h
index 1abdcbdf51c0..9138255ffc9e 100644
--- a/drivers/gpu/drm/meson/meson_venc.h
+++ b/drivers/gpu/drm/meson/meson_venc.h
@@ -60,6 +60,8 @@ extern struct meson_cvbs_enci_mode meson_cvbs_enci_ntsc;
 void meson_venci_cvbs_mode_set(struct meson_drm *priv,
   struct meson_cvbs_enci_mode *mode);
 void meson_venc_hdmi_mode_set(struct meson_drm *priv, int vic,
+ unsigned int ycrcb_map,
+ bool yuv420_mode,
  const struct drm_display_mode *mode);
 unsigned int meson_venci_get_field(struct meson_drm *priv);
 
-- 
2.22.0

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[PATCH v4 07/11] drm/meson: meson_dw_hdmi: add bridge and switch to drm_bridge_funcs

2020-02-06 Thread Neil Armstrong
Switch the dw-hdmi driver to drm_bridge_funcs by implementing a new local
bridge, connecting it to the dw-hdmi bridge, then implement the
atomic_get_input_bus_fmts/atomic_get_output_bus_fmts.

Signed-off-by: Neil Armstrong 
---
 drivers/gpu/drm/meson/meson_dw_hdmi.c | 105 +-
 1 file changed, 85 insertions(+), 20 deletions(-)

diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c 
b/drivers/gpu/drm/meson/meson_dw_hdmi.c
index 3bb7ffe5fc39..4b3809626f7e 100644
--- a/drivers/gpu/drm/meson/meson_dw_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c
@@ -16,6 +16,7 @@
 
 #include 
 #include 
+#include 
 #include 
 #include 
 #include 
@@ -135,6 +136,7 @@ struct meson_dw_hdmi_data {
 
 struct meson_dw_hdmi {
struct drm_encoder encoder;
+   struct drm_bridge bridge;
struct dw_hdmi_plat_data dw_plat_data;
struct meson_drm *priv;
struct device *dev;
@@ -151,6 +153,8 @@ struct meson_dw_hdmi {
 };
 #define encoder_to_meson_dw_hdmi(x) \
container_of(x, struct meson_dw_hdmi, encoder)
+#define bridge_to_meson_dw_hdmi(x) \
+   container_of(x, struct meson_dw_hdmi, bridge)
 
 static inline int dw_hdmi_is_compatible(struct meson_dw_hdmi *dw_hdmi,
const char *compat)
@@ -368,7 +372,7 @@ static inline void meson_dw_hdmi_phy_reset(struct 
meson_dw_hdmi *dw_hdmi)
 }
 
 static void dw_hdmi_set_vclk(struct meson_dw_hdmi *dw_hdmi,
-struct drm_display_mode *mode)
+const struct drm_display_mode *mode)
 {
struct meson_drm *priv = dw_hdmi->priv;
int vic = drm_match_cea_mode(mode);
@@ -663,6 +667,10 @@ dw_hdmi_mode_valid(struct drm_connector *connector,
 
 /* Encoder */
 
+static const u32 meson_dw_hdmi_out_bus_fmts[] = {
+   MEDIA_BUS_FMT_YUV8_1X24,
+};
+
 static void meson_venc_hdmi_encoder_destroy(struct drm_encoder *encoder)
 {
drm_encoder_cleanup(encoder);
@@ -672,16 +680,63 @@ static const struct drm_encoder_funcs 
meson_venc_hdmi_encoder_funcs = {
.destroy= meson_venc_hdmi_encoder_destroy,
 };
 
-static int meson_venc_hdmi_encoder_atomic_check(struct drm_encoder *encoder,
+static u32 *
+meson_venc_hdmi_encoder_get_out_bus_fmts(struct drm_bridge *bridge,
+struct drm_bridge_state *bridge_state,
+struct drm_crtc_state *crtc_state,
+struct drm_connector_state *conn_state,
+unsigned int *num_output_fmts)
+{
+   u32 *output_fmts;
+
+   *num_output_fmts = ARRAY_SIZE(meson_dw_hdmi_out_bus_fmts);
+   output_fmts = kcalloc(*num_output_fmts, sizeof(*output_fmts),
+ GFP_KERNEL);
+   if (!output_fmts)
+   return NULL;
+
+   memcpy(output_fmts, meson_dw_hdmi_out_bus_fmts, *num_output_fmts);
+
+   return output_fmts;
+}
+
+static u32 *
+meson_venc_hdmi_encoder_get_inp_bus_fmts(struct drm_bridge *bridge,
+   struct drm_bridge_state *bridge_state,
+   struct drm_crtc_state *crtc_state,
+   struct drm_connector_state *conn_state,
+   u32 output_fmt,
+   unsigned int *num_input_fmts)
+{
+   u32 *input_fmts = NULL;
+
+   if (output_fmt == meson_dw_hdmi_out_bus_fmts[0]) {
+   *num_input_fmts = 1;
+   input_fmts = kcalloc(*num_input_fmts,
+sizeof(*input_fmts),
+GFP_KERNEL);
+   if (!input_fmts)
+   return NULL;
+
+   input_fmts[0] = output_fmt;
+   } else {
+   *num_input_fmts = 0;
+   }
+
+   return input_fmts;
+}
+
+static int meson_venc_hdmi_encoder_atomic_check(struct drm_bridge *bridge,
+   struct drm_bridge_state *bridge_state,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
 {
return 0;
 }
 
-static void meson_venc_hdmi_encoder_disable(struct drm_encoder *encoder)
+static void meson_venc_hdmi_encoder_disable(struct drm_bridge *bridge)
 {
-   struct meson_dw_hdmi *dw_hdmi = encoder_to_meson_dw_hdmi(encoder);
+   struct meson_dw_hdmi *dw_hdmi = bridge_to_meson_dw_hdmi(bridge);
struct meson_drm *priv = dw_hdmi->priv;
 
DRM_DEBUG_DRIVER("\n");
@@ -693,9 +748,9 @@ static void meson_venc_hdmi_encoder_disable(struct 
drm_encoder *encoder)
writel_relaxed(0, priv->io_base + _REG(ENCP_VIDEO_EN));
 }
 
-static void meson_venc_hdmi_encoder_enable(struct drm_encoder *encoder)
+static void meson_venc_hdmi_encoder_enable(struct drm_bridge *bridge)
 {
-   struct meson_dw_hdmi *dw_hdmi = 

[PATCH v4 10/11] drm/meson: vclk: add support for YUV420 setup

2020-02-06 Thread Neil Armstrong
This patch adds clocking support for the YUV420 output from the
Amlogic Meson SoCs Video Processing Unit to the HDMI Controller.

The YUV420 is obtained by generating a YUV444 pixel stream like
the classic HDMI display modes, but then the Video Encoder output
can be configured to down-sample the YUV444 pixel stream to a YUV420
stream.

This mode needs a different clock generation scheme since the TMDS PHY
clock must match the 10x ratio with the YUV420 pixel clock, but
the video encoder must run at 2x the pixel clock.

This patch adds the TMDS PHY clock value in all the video clock setup
in order to better support these specific uses cases and switch
to the Common Clock framework for clocks handling in the future.

Signed-off-by: Neil Armstrong 
---
 drivers/gpu/drm/meson/meson_dw_hdmi.c   | 24 ---
 drivers/gpu/drm/meson/meson_vclk.c  | 93 +++--
 drivers/gpu/drm/meson/meson_vclk.h  |  7 +-
 drivers/gpu/drm/meson/meson_venc_cvbs.c |  6 +-
 4 files changed, 95 insertions(+), 35 deletions(-)

diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c 
b/drivers/gpu/drm/meson/meson_dw_hdmi.c
index f5d46d082534..94f206bf795d 100644
--- a/drivers/gpu/drm/meson/meson_dw_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c
@@ -376,15 +376,19 @@ static void dw_hdmi_set_vclk(struct meson_dw_hdmi 
*dw_hdmi,
 {
struct meson_drm *priv = dw_hdmi->priv;
int vic = drm_match_cea_mode(mode);
+   unsigned int phy_freq;
unsigned int vclk_freq;
unsigned int venc_freq;
unsigned int hdmi_freq;
 
vclk_freq = mode->clock;
 
+   /* TMDS clock is pixel_clock * 10 */
+   phy_freq = vclk_freq * 10;
+
if (!vic) {
-   meson_vclk_setup(priv, MESON_VCLK_TARGET_DMT, vclk_freq,
-vclk_freq, vclk_freq, false);
+   meson_vclk_setup(priv, MESON_VCLK_TARGET_DMT, phy_freq,
+vclk_freq, vclk_freq, vclk_freq, false);
return;
}
 
@@ -402,11 +406,11 @@ static void dw_hdmi_set_vclk(struct meson_dw_hdmi 
*dw_hdmi,
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
venc_freq /= 2;
 
-   DRM_DEBUG_DRIVER("vclk:%d venc=%d hdmi=%d enci=%d\n",
-   vclk_freq, venc_freq, hdmi_freq,
+   DRM_DEBUG_DRIVER("vclk:%d phy=%d venc=%d hdmi=%d enci=%d\n",
+   phy_freq, vclk_freq, venc_freq, hdmi_freq,
priv->venc.hdmi_use_enci);
 
-   meson_vclk_setup(priv, MESON_VCLK_TARGET_HDMI, vclk_freq,
+   meson_vclk_setup(priv, MESON_VCLK_TARGET_HDMI, phy_freq, vclk_freq,
 venc_freq, hdmi_freq, priv->venc.hdmi_use_enci);
 }
 
@@ -617,6 +621,7 @@ dw_hdmi_mode_valid(struct drm_connector *connector,
   const struct drm_display_mode *mode)
 {
struct meson_drm *priv = connector->dev->dev_private;
+   unsigned int phy_freq;
unsigned int vclk_freq;
unsigned int venc_freq;
unsigned int hdmi_freq;
@@ -643,6 +648,9 @@ dw_hdmi_mode_valid(struct drm_connector *connector,
 
vclk_freq = mode->clock;
 
+   /* TMDS clock is pixel_clock * 10 */
+   phy_freq = vclk_freq * 10;
+
/* 480i/576i needs global pixel doubling */
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
vclk_freq *= 2;
@@ -659,10 +667,10 @@ dw_hdmi_mode_valid(struct drm_connector *connector,
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
venc_freq /= 2;
 
-   dev_dbg(connector->dev->dev, "%s: vclk:%d venc=%d hdmi=%d\n", __func__,
-   vclk_freq, venc_freq, hdmi_freq);
+   dev_dbg(connector->dev->dev, "%s: vclk:%d phy=%d venc=%d hdmi=%d\n",
+   __func__, phy_freq, vclk_freq, venc_freq, hdmi_freq);
 
-   return meson_vclk_vic_supported_freq(vclk_freq);
+   return meson_vclk_vic_supported_freq(phy_freq, vclk_freq);
 }
 
 /* Encoder */
diff --git a/drivers/gpu/drm/meson/meson_vclk.c 
b/drivers/gpu/drm/meson/meson_vclk.c
index f690793ae2d5..fdf26dac9fa8 100644
--- a/drivers/gpu/drm/meson/meson_vclk.c
+++ b/drivers/gpu/drm/meson/meson_vclk.c
@@ -354,12 +354,17 @@ enum {
 /* 2970 /1 /1 /1 /5 /2  => /1 /1 */
MESON_VCLK_HDMI_297000,
 /* 5940 /1 /1 /2 /5 /1  => /1 /1 */
-   MESON_VCLK_HDMI_594000
+   MESON_VCLK_HDMI_594000,
+/* 2970 /1 /1 /1 /5 /1  => /1 /2 */
+   MESON_VCLK_HDMI_594000_YUV420,
 };
 
 struct meson_vclk_params {
+   unsigned int pll_freq;
+   unsigned int phy_freq;
+   unsigned int vclk_freq;
+   unsigned int venc_freq;
unsigned int pixel_freq;
-   unsigned int pll_base_freq;
unsigned int pll_od1;
unsigned int pll_od2;
unsigned int pll_od3;
@@ -367,8 +372,11 @@ struct meson_vclk_params {
unsigned int vclk_div;
 } params[] = {
[MESON_VCLK_HDMI_ENCI_54000] = {
+   .pll_freq = 432,
+   .phy_freq = 27,
+   .vclk_freq = 54000,
+   .venc_freq = 54000,
   

[PATCH v4 11/11] drm/meson: Add YUV420 output support

2020-02-06 Thread Neil Armstrong
This patch adds support for the YUV420 output from the Amlogic Meson SoCs
Video Processing Unit to the HDMI Controller.

The YUV420 is obtained by generating a YUV444 pixel stream like
the classic HDMI display modes, but then the Video Encoder output
can be configured to down-sample the YUV444 pixel stream to a YUV420
stream.
In addition if pixel stream down-sampling, the Y Cb Cr components must
also be mapped differently to align with the HDMI2.0 specifications.

This mode needs a different clock generation scheme since the TMDS PHY
clock must match the 10x ration with the YUV420 pixel clock, but
the video encoder must run at 2x the pixel clock.

This patch enables the bridge bus format negociation, and handles
the YUV420 case if selected by the negociation.

Signed-off-by: Neil Armstrong 
---
 drivers/gpu/drm/meson/meson_dw_hdmi.c | 91 ---
 1 file changed, 70 insertions(+), 21 deletions(-)

diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c 
b/drivers/gpu/drm/meson/meson_dw_hdmi.c
index 94f206bf795d..5962afbfc8ab 100644
--- a/drivers/gpu/drm/meson/meson_dw_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c
@@ -150,6 +150,7 @@ struct meson_dw_hdmi {
struct regulator *hdmi_supply;
u32 irq_stat;
struct dw_hdmi *hdmi;
+   unsigned long output_bus_fmt;
 };
 #define encoder_to_meson_dw_hdmi(x) \
container_of(x, struct meson_dw_hdmi, encoder)
@@ -301,6 +302,10 @@ static void meson_hdmi_phy_setup_mode(struct meson_dw_hdmi 
*dw_hdmi,
struct meson_drm *priv = dw_hdmi->priv;
unsigned int pixel_clock = mode->clock;
 
+   /* For 420, pixel clock is half unlike venc clock */
+   if (dw_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24)
+   pixel_clock /= 2;
+
if (dw_hdmi_is_compatible(dw_hdmi, "amlogic,meson-gxl-dw-hdmi") ||
dw_hdmi_is_compatible(dw_hdmi, "amlogic,meson-gxm-dw-hdmi")) {
if (pixel_clock >= 371250) {
@@ -383,6 +388,10 @@ static void dw_hdmi_set_vclk(struct meson_dw_hdmi *dw_hdmi,
 
vclk_freq = mode->clock;
 
+   /* For 420, pixel clock is half unlike venc clock */
+   if (dw_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24)
+   vclk_freq /= 2;
+
/* TMDS clock is pixel_clock * 10 */
phy_freq = vclk_freq * 10;
 
@@ -392,13 +401,16 @@ static void dw_hdmi_set_vclk(struct meson_dw_hdmi 
*dw_hdmi,
return;
}
 
+   /* 480i/576i needs global pixel doubling */
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
vclk_freq *= 2;
 
venc_freq = vclk_freq;
hdmi_freq = vclk_freq;
 
-   if (meson_venc_hdmi_venc_repeat(vic))
+   /* VENC double pixels for 1080i, 720p and YUV420 modes */
+   if (meson_venc_hdmi_venc_repeat(vic) ||
+   dw_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24)
venc_freq *= 2;
 
vclk_freq = max(venc_freq, hdmi_freq);
@@ -445,8 +457,9 @@ static int dw_hdmi_phy_init(struct dw_hdmi *hdmi, void 
*data,
/* Enable normal output to PHY */
dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_BIST_CNTL, BIT(12));
 
-   /* TMDS pattern setup (TOFIX Handle the YUV420 case) */
-   if (mode->clock > 34) {
+   /* TMDS pattern setup */
+   if (mode->clock > 34 &&
+   dw_hdmi->output_bus_fmt == MEDIA_BUS_FMT_YUV8_1X24) {
dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_01,
  0);
dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_23,
@@ -621,6 +634,7 @@ dw_hdmi_mode_valid(struct drm_connector *connector,
   const struct drm_display_mode *mode)
 {
struct meson_drm *priv = connector->dev->dev_private;
+   bool is_hdmi2_sink = connector->display_info.hdmi.scdc.supported;
unsigned int phy_freq;
unsigned int vclk_freq;
unsigned int venc_freq;
@@ -630,9 +644,11 @@ dw_hdmi_mode_valid(struct drm_connector *connector,
 
DRM_DEBUG_DRIVER("Modeline " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
 
-   /* If sink max TMDS clock, we reject the mode */
+   /* If sink does not support 540MHz, reject the non-420 HDMI2 modes */
if (connector->display_info.max_tmds_clock &&
-   mode->clock > connector->display_info.max_tmds_clock)
+   mode->clock > connector->display_info.max_tmds_clock &&
+   !drm_mode_is_420_only(>display_info, mode) &&
+   !drm_mode_is_420_also(>display_info, mode))
return MODE_BAD;
 
/* Check against non-VIC supported modes */
@@ -648,6 +664,12 @@ dw_hdmi_mode_valid(struct drm_connector *connector,
 
vclk_freq = mode->clock;
 
+   /* For 420, pixel clock is half unlike venc clock */
+   if (drm_mode_is_420_only(>display_info, mode) ||
+   (!is_hdmi2_sink &&
+drm_mode_is_420_also(>display_info, mode)))
+   vclk_freq /= 2;
+
/* TMDS 

Re: [PATCH v2] drm/msm: Fix a6xx GMU shutdown sequence

2020-02-06 Thread Doug Anderson
Hi,

On Wed, Feb 5, 2020 at 1:00 PM Rob Clark  wrote:
>
> On Wed, Feb 5, 2020 at 12:48 PM Jordan Crouse  wrote:
> >
> > Commit e812744c5f95 ("drm: msm: a6xx: Add support for A618") missed
> > updating the VBIF flush in a6xx_gmu_shutdown and instead
> > inserted the new sequence into a6xx_pm_suspend along with a redundant
> > GMU idle.
> >
> > Move a6xx_bus_clear_pending_transactions to a6xx_gmu.c and use it in
> > the appropriate place in the shutdown routine and remove the redundant
> > idle call.
> >
> > v2: Remove newly unused variable that was triggering a warning
> >
> > Signed-off-by: Jordan Crouse 
>
> Reviewed-by: Rob Clark 

Without this patch I'm seeing some really bad behavior where the whole
system will pause for a bit, especially if it has been idle.  After
this patch things are much better.  Thus:

Fixes: e812744c5f95 ("drm: msm: a6xx: Add support for A618")
Tested-by: Douglas Anderson 

-Doug
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH 4/4] drm/virtio: move virtio_gpu_mem_entry initialization to new function

2020-02-06 Thread Chia-I Wu
On Thu, Feb 6, 2020 at 12:55 AM Gerd Hoffmann  wrote:
>
>   Hi,
>
> > > virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle,
> > > -  ents, nents,
> > > +  obj->ents, obj->nents,
> > >fence);
> > > +   obj->ents = NULL;
> > > +   obj->nents = 0;
> > Hm, if the entries are temporary, can we allocate and initialize them
> > in this function?
>
> Well, the plan for CREATE_RESOURCE_BLOB is to use obj->ents too ...
Is obj->ents needed after CREATE_RESOURCE_BLOB?  If not, having yet
another helper

  ents = virtio_gpu_object_alloc_mem_entries(..., );

seems cleaner.  We would also be able to get rid of virtio_gpu_object_attach.

>
> cheers,
>   Gerd
>
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH 2/4] drm/virtio: resource teardown tweaks

2020-02-06 Thread Chia-I Wu
On Wed, Feb 5, 2020 at 10:43 PM Gerd Hoffmann  wrote:
>
> > > -
> > > -   drm_gem_shmem_free_object(obj);
> > > +   if (bo->created) {
> > > +   virtio_gpu_cmd_unref_resource(vgdev, bo);
> > > +   /* completion handler calls virtio_gpu_cleanup_object() */
> > nitpick: we don't need this comment when virtio_gpu_cmd_unref_cb is
> > defined by this file and passed to virtio_gpu_cmd_unref_resource.
>
> I want virtio_gpu_cmd_unref_cb + virtio_gpu_cmd_unref_resource being
> placed next to each other so it is easier to see how they work hand in
> hand.
>
> > I happen to be looking at our error handling paths.  I think we want
> > virtio_gpu_queue_fenced_ctrl_buffer to call vbuf->resp_cb on errors.
>
> /me was thinking about that too.  Yes, we will need either that,
> or a separate vbuf->error_cb callback.  That'll be another patch
> though.
Or the new virtio_gpu_queue_ctrl_sgs can return errors rather than
eating errors.

Yeah, that should be another patch.
>
> > > +   /*
> > > +* We are in the release callback and do NOT want refcount
> > > +* bo, so do NOT use virtio_gpu_array_add_obj().
> > > +*/
> > > +   vbuf->objs = virtio_gpu_array_alloc(1);
> > > +   vbuf->objs->objs[0] = >base.base
> > This is an abuse of obj array.  Add "void *private_data;" to
> > virtio_gpu_vbuffer and use that maybe?
>
> I'd name that *cb_data, but yes, that makes sense.
Sounds great.
>
> cheers,
>   Gerd
>
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH] drm/virtio: fix ring free check

2020-02-06 Thread Chia-I Wu
On Thu, Feb 6, 2020 at 3:14 AM Gerd Hoffmann  wrote:
>
> If the virtio device supports indirect ring descriptors we need only one
> ring entry for the whole command.  Take that into account when checking
> whenever the virtqueue has enough free entries for our command.
>
> Signed-off-by: Gerd Hoffmann 
> ---
>  drivers/gpu/drm/virtio/virtgpu_vq.c | 9 ++---
>  1 file changed, 6 insertions(+), 3 deletions(-)
>
> diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c 
> b/drivers/gpu/drm/virtio/virtgpu_vq.c
> index 41e475fbd67b..a2ec09dba530 100644
> --- a/drivers/gpu/drm/virtio/virtgpu_vq.c
> +++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
> @@ -328,7 +328,8 @@ static void virtio_gpu_queue_ctrl_sgs(struct 
> virtio_gpu_device *vgdev,
>  {
> struct virtqueue *vq = vgdev->ctrlq.vq;
> bool notify = false;
> -   int ret;
> +   bool indirect;
> +   int vqcnt, ret;
>
>  again:
> spin_lock(>ctrlq.qlock);
> @@ -341,9 +342,11 @@ static void virtio_gpu_queue_ctrl_sgs(struct 
> virtio_gpu_device *vgdev,
> return;
> }
>
> -   if (vq->num_free < elemcnt) {
> +   indirect = virtio_has_feature(vgdev->vdev, 
> VIRTIO_RING_F_INDIRECT_DESC);
> +   vqcnt = indirect ? 1 : elemcnt;
Is the feature dynamic and require the lock held?  If not, the result
can be cached and the fixup can happen before grabbing the lock

  if (vgdev->has_indirect_desc)
elemcnt = 1;

Either way, patch is

  Reviewed-by: Chia-I Wu 


> +   if (vq->num_free < vqcnt) {
> spin_unlock(>ctrlq.qlock);
> -   wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= elemcnt);
> +   wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= vqcnt);
> goto again;
> }
>
> --
> 2.18.1
>
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH 09/11] drm/virtio: avoid an infinite loop

2020-02-06 Thread Chia-I Wu
On Thu, Feb 6, 2020 at 1:49 AM Gerd Hoffmann  wrote:
>
> On Wed, Feb 05, 2020 at 10:19:53AM -0800, Chia-I Wu wrote:
> > Make sure elemcnt does not exceed the maximum element count in
> > virtio_gpu_queue_ctrl_sgs.  We should improve our error handling or
> > impose a size limit on execbuffer, which are TODOs.
>
> Hmm, virtio supports indirect ring entries, so large execbuffers should
> not be a problem ...
>
> So I've waded through the virtio code.  Figured our logic is wrong.
> Luckily we err on the safe side (waiting for more free entries than we
> actually need).  The patch below should fix that (not tested yet).
That is good to know!  I was not sure if we have
VIRTIO_RING_F_INDIRECT_DESC so I kept our logic.  I will drop this
patch in v2.

>
> cheers,
>   Gerd
>
> diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c 
> b/drivers/gpu/drm/virtio/virtgpu_vq.c
> index aa25e8781404..535399b3a3ea 100644
> --- a/drivers/gpu/drm/virtio/virtgpu_vq.c
> +++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
> @@ -328,7 +328,7 @@ static bool virtio_gpu_queue_ctrl_sgs(struct 
> virtio_gpu_device *vgdev,
>  {
> struct virtqueue *vq = vgdev->ctrlq.vq;
> bool notify = false;
> -   int ret;
> +   int vqcnt, ret;
>
>  again:
> spin_lock(>ctrlq.qlock);
> @@ -341,9 +341,10 @@ static bool virtio_gpu_queue_ctrl_sgs(struct 
> virtio_gpu_device *vgdev,
> return notify;
> }
>
> -   if (vq->num_free < elemcnt) {
> +   vqcnt = virtqueue_use_indirect(vq, elemcnt) ? 1 : elemcnt;
> +   if (vq->num_free < vqcnt) {
> spin_unlock(>ctrlq.qlock);
> -   wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= elemcnt);
> +   wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= vq);
> goto again;
> }
>
>
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH] drm/vgem: Close use-after-free race in vgem_gem_create

2020-02-06 Thread Daniel Vetter
On Sun, Feb 02, 2020 at 05:37:31PM +, Chris Wilson wrote:
> Quoting Daniel Vetter (2020-02-02 13:21:33)
> > There's two references floating around here (for the object reference,
> > not the handle_count reference, that's a different thing):
> > 
> > - The temporary reference held by vgem_gem_create, acquired by
> >   creating the object and released by calling
> >   drm_gem_object_put_unlocked.
> > 
> > - The reference held by the object handle, created by
> >   drm_gem_handle_create. This one generally outlives the function,
> >   except if a 2nd thread races with a GEM_CLOSE ioctl call.
> > 
> > So usually everything is correct, except in that race case, where the
> > access to gem_object->size could be looking at freed data already.
> > Which again isn't a real problem (userspace shot its feet off already
> > with the race, we could return garbage), but maybe someone can exploit
> > this as an information leak.
> > 
> > Cc: Dan Carpenter 
> > Cc: Hillf Danton 
> > Cc: Reported-by: syzbot+0dc774d419e91...@syzkaller.appspotmail.com
> > Cc: sta...@vger.kernel.org
> > Cc: Emil Velikov 
> > Cc: Daniel Vetter 
> > Cc: Sean Paul 
> > Cc: Chris Wilson 
> > Cc: Eric Anholt 
> > Cc: Sam Ravnborg 
> > Cc: Rob Clark 
> > Signed-off-by: Daniel Vetter 
> > ---
> >  drivers/gpu/drm/vgem/vgem_drv.c | 9 ++---
> >  1 file changed, 6 insertions(+), 3 deletions(-)
> > 
> > diff --git a/drivers/gpu/drm/vgem/vgem_drv.c 
> > b/drivers/gpu/drm/vgem/vgem_drv.c
> > index 5bd60ded3d81..909eba43664a 100644
> > --- a/drivers/gpu/drm/vgem/vgem_drv.c
> > +++ b/drivers/gpu/drm/vgem/vgem_drv.c
> > @@ -196,9 +196,10 @@ static struct drm_gem_object *vgem_gem_create(struct 
> > drm_device *dev,
> > return ERR_CAST(obj);
> >  
> > ret = drm_gem_handle_create(file, >base, handle);
> > -   drm_gem_object_put_unlocked(>base);
> > -   if (ret)
> > +   if (ret) {
> > +   drm_gem_object_put_unlocked(>base);
> > return ERR_PTR(ret);
> > +   }
> >  
> > return >base;
> >  }
> > @@ -221,7 +222,9 @@ static int vgem_gem_dumb_create(struct drm_file *file, 
> > struct drm_device *dev,
> > args->size = gem_object->size;
> > args->pitch = pitch;
> >  
> > -   DRM_DEBUG("Created object of size %lld\n", size);
> > +   drm_gem_object_put_unlocked(gem_object);
> > +
> > +   DRM_DEBUG("Created object of size %llu\n", args->size);
> 
> I was thinking we either should return size from vgem_gem_create (the
> strategy we took in i915) or simply remove the vgem_gem_create() as that
> doesn't improve readability.
> 
> -static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
> - struct drm_file *file,
> - unsigned int *handle,
> - unsigned long size)
> +static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device 
> *dev,
> +   struct drm_mode_create_dumb *args)
>  {
> struct drm_vgem_gem_object *obj;
> -   int ret;
> +   u64 pitch, size;
> +   u32 handle;
> +
> +   pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
> +   size = mul_u32_u32(args->height, pitch);
> +   if (size == 0 || pitch < args->width)
> +   return -EINVAL;
> 
> obj = __vgem_gem_create(dev, size);
> if (IS_ERR(obj))
> -   return ERR_CAST(obj);
> +   return PTR_ERR(obj);
> +
> +   size = obj->base.size;
> 
> -   ret = drm_gem_handle_create(file, >base, handle);
> +   ret = drm_gem_handle_create(file, >base, );
> drm_gem_object_put_unlocked(>base);
> if (ret)
> return ERR_PTR(ret);
> 
> -   return >base;
> -}
> -
> -static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device 
> *dev,
> -   struct drm_mode_create_dumb *args)
> -{
> -   struct drm_gem_object *gem_object;
> -   u64 pitch, size;
> -
> -   pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
> -   size = args->height * pitch;
> -   if (size == 0)
> -   return -EINVAL;
> -
> -   gem_object = vgem_gem_create(dev, file, >handle, size);
> -   if (IS_ERR(gem_object))
> -   return PTR_ERR(gem_object);
> -
> -   args->size = gem_object->size;
> +   args->size = size;
> args->pitch = pitch;
> +   args->handle = handle;
> 
> 
> At the end of the day, it makes no difference,

Yeah there's room for more polish, but didn't want to do that in the cc:
stable patch.

> Reviewed-by: Chris Wilson 

Thanks for your review, finally applied to drm-misc-next-fixes now that CI
has blessed me with its attention for a bit!
-Daniel
-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch
___
dri-devel mailing list
dri-devel@lists.freedesktop.org

[Bug 206225] nouveau: Screen distortion and lockup on resume

2020-02-06 Thread bugzilla-daemon
https://bugzilla.kernel.org/show_bug.cgi?id=206225

--- Comment #12 from Christoph Marz (derchiller-fo...@online.de) ---
Follow-up:

After a dist-upgrade, the error returned. I deleted the video acceleration
firmware and it was ok again.

When I installed 5.4.14, there were warnings about possibly missing firmware
(the nvidia files from firmware-misc-nonfree), so I reinstalled that package
and updated the initramfs (I think I missed that step after purging the
package). Furthermore, I removed nouveau.config=cipher=0 since that doesn't
seem to be related to the error.

To conclude: When it works, I do a dist-upgrade one day and the error returns.
Doing a dist-upgrade a few days after makes it work again. The same holds for
kernel upgrades.

-- 
You are receiving this mail because:
You are watching the assignee of the bug.
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH v4 libdrm 2/2] Add drmModeGetFB2

2020-02-06 Thread Li, Juston
On Wed, 2020-02-05 at 23:27 +, Eric Engestrom wrote:
> On Wednesday, 2020-02-05 23:10:21 +, Li, Juston wrote:
> > On Wed, 2020-02-05 at 22:25 +, Eric Engestrom wrote:
> > > On Friday, 2020-01-31 13:41:09 -0800, Juston Li wrote:
> > > > From: Daniel Stone 
> > > > 
> > > > Add a wrapper around the getfb2 ioctl, which returns extended
> > > > framebuffer information mirroring addfb2, including multiple
> > > > planes
> > > > and
> > > > modifiers.
> > > > 
> > > > Changes since v3:
> > > >  - remove unnecessary null check in drmModeFreeFB2 (Daniel
> > > > Stone)
> > > > 
> > > > Changes since v2:
> > > >  - getfb2 ioctl has been merged upstream
> > > >  - sync include/drm/drm.h in a seperate patch
> > > > 
> > > > Changes since v1:
> > > >  - functions should be drm_public
> > > >  - modifier should be 64 bits
> > > >  - update ioctl number
> > > > 
> > > > Signed-off-by: Juston Li 
> > > > Signed-off-by: Daniel Stone 
> > > > ---
> > > >  xf86drmMode.c | 36 
> > > >  xf86drmMode.h | 15 +++
> > > >  2 files changed, 51 insertions(+)
> > > > 
> > > > diff --git a/xf86drmMode.c b/xf86drmMode.c
> > > > index 0cf7992c6e9a..94dc8ce38a5e 100644
> > > > --- a/xf86drmMode.c
> > > > +++ b/xf86drmMode.c
> > > > @@ -1594,3 +1594,39 @@ drmModeRevokeLease(int fd, uint32_t
> > > > lessee_id)
> > > >  return 0;
> > > >  return -errno;
> > > >  }
> > > > +
> > > > +drm_public drmModeFB2Ptr
> > > > +drmModeGetFB2(int fd, uint32_t fb_id)
> > > > +{
> > > > +struct drm_mode_fb_cmd2 get;
> > > > +drmModeFB2Ptr ret;
> > > > +int err;
> > > > +
> > > > +memclear(get);
> > > > +get.fb_id = fb_id;
> > > 
> > > As mentioned on IRC, could you write it like this instead?
> > > 
> > > struct drm_mode_fb_cmd2 get = {
> > > .fb_id = fb_id,
> > > };
> > > 
> > > With that, consider this patch
> > > Reviewed-by: Eric Engestrom 
> > 
> > Opps I sent v5 before seeing this but my code style differs and is
> > probably incorrect :) I'll send v6 with the style corrected.
> > 
> > Thanks for reviewing!
> 
> Ah, sorry about that, our emails crossed paths.
> 
> As for the other patch (I mean 1/2), did you follow the instructions
> in
> include/drm/README, specifically the section titled "When and how to
> update these files" ?
> Your commit message makes it look like you just applied that one
> change
> instead of syncing with `make headers_install`.
> 
> Cheers,
>   Eric

Yes, drm.h was copied from 'make headers_install' from drm-misc-next.
It had been updated fairly recently so GETFB2 is the only delta.

Sorry, I didn't see the README so the commit message isn't exactly as
requested.


Also, only drm.h was synced, is that preferred or would it be better to
sync the entire header directory?

Thanks
Juston
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[GIT PULL] drm/tegra: Fixes for v5.6-rc1

2020-02-06 Thread Thierry Reding
Hi Dave,

The following changes since commit 033ccdb7f6b11701623507339646013b4ce389d3:

  gpu: host1x: Remove dev_err() on platform_get_irq() failure (2020-01-10 
17:05:12 +0100)

are available in the Git repository at:

  git://anongit.freedesktop.org/tegra/linux tags/drm/tegra/for-5.6-rc1-fixes

for you to fetch changes up to 98ae41adb252866158dd4e998551dfa85e612bed:

  gpu: host1x: Set DMA direction only for DMA-mapped buffer objects (2020-02-06 
18:23:12 +0100)

Thanks,
Thierry


drm/tegra: Fixes for v5.6-rc1

These are a couple of quick fixes for regressions that were found during
the first two weeks of the merge window.


Thierry Reding (6):
  drm/tegra: sor: Suspend on clock registration failure
  drm/tegra: sor: Disable runtime PM on probe failure
  drm/tegra: sor: Initialize runtime PM before use
  drm/tegra: Relax IOMMU usage criteria on old Tegra
  drm/tegra: Reuse IOVA mapping where possible
  gpu: host1x: Set DMA direction only for DMA-mapped buffer objects

 drivers/gpu/drm/tegra/drm.c   | 49 ---
 drivers/gpu/drm/tegra/gem.c   | 10 -
 drivers/gpu/drm/tegra/plane.c | 44 +-
 drivers/gpu/drm/tegra/sor.c   | 49 ---
 drivers/gpu/host1x/job.c  | 34 ++
 5 files changed, 123 insertions(+), 63 deletions(-)
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [Nouveau] [PATCH 4/4] drm/nouveau: Remove struct nouveau_framebuffer

2020-02-06 Thread James Jones
Yes, that's certainly viable.  If that's the general preference in 
direction, I'll rework that patches to do so.


Thanks,
-James

On 2/6/20 7:49 AM, Thomas Zimmermann wrote:

Hi James

Am 06.02.20 um 16:17 schrieb James Jones:

Note I'm adding some fields to nouveau_framebuffer in the series
"drm/nouveau: Support NVIDIA format modifiers."  I sent out v3 of that
yesterday.  It would probably still be possible to avoid them by
re-extracting the relevant data from the format modifier on the fly when
needed, but it is simpler and likely less error-prone with the wrapper
struct.


Thanks for the note.

I just took a look at your patchset. I think struct nouveau_framebuffer
should not store tile_mode and kind. AFAICT there are only two trivial
places where these values are used and they can be extracted from the
framebuffer at any time.

I'd suggest to expand nouveau_decode_mod() to take a drm_framebuffer and
return the correct values. Kind of what you do in
nouveau_framebuffer_new() near line 330.

Thoughts?

Best regards
Thomas

[1] https://patchwork.freedesktop.org/series/70786/#rev3



Thanks,
-James

On 2/6/20 2:19 AM, Thomas Zimmermann wrote:

After its cleanup, struct nouveau_framebuffer is only a wrapper around
struct drm_framebuffer. Use the latter directly.

Signed-off-by: Thomas Zimmermann 
---
   drivers/gpu/drm/nouveau/dispnv50/wndw.c   | 26 +++
   drivers/gpu/drm/nouveau/nouveau_display.c | 14 ++--
   drivers/gpu/drm/nouveau/nouveau_display.h | 12 +--
   drivers/gpu/drm/nouveau/nouveau_fbcon.c   | 14 ++--
   4 files changed, 28 insertions(+), 38 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index ba1399965a1c..4a67a656e007 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -40,11 +40,11 @@ nv50_wndw_ctxdma_del(struct nv50_wndw_ctxdma *ctxdma)
   }
     static struct nv50_wndw_ctxdma *
-nv50_wndw_ctxdma_new(struct nv50_wndw *wndw, struct
nouveau_framebuffer *fb)
+nv50_wndw_ctxdma_new(struct nv50_wndw *wndw, struct drm_framebuffer *fb)
   {
-    struct nouveau_drm *drm = nouveau_drm(fb->base.dev);
+    struct nouveau_drm *drm = nouveau_drm(fb->dev);
   struct nv50_wndw_ctxdma *ctxdma;
-    struct nouveau_bo *nvbo = nouveau_gem_object(fb->base.obj[0]);
+    struct nouveau_bo *nvbo = nouveau_gem_object(fb->obj[0]);
   const u8    kind = nvbo->kind;
   const u32 handle = 0xfb00 | kind;
   struct {
@@ -236,16 +236,16 @@ nv50_wndw_atomic_check_acquire(struct nv50_wndw
*wndw, bool modeset,
  struct nv50_wndw_atom *asyw,
  struct nv50_head_atom *asyh)
   {
-    struct nouveau_framebuffer *fb =
nouveau_framebuffer(asyw->state.fb);
+    struct drm_framebuffer *fb = asyw->state.fb;
   struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev);
-    struct nouveau_bo *nvbo = nouveau_gem_object(fb->base.obj[0]);
+    struct nouveau_bo *nvbo = nouveau_gem_object(fb->obj[0]);
   int ret;
     NV_ATOMIC(drm, "%s acquire\n", wndw->plane.name);
   -    if (asyw->state.fb != armw->state.fb || !armw->visible ||
modeset) {
-    asyw->image.w = fb->base.width;
-    asyw->image.h = fb->base.height;
+    if (fb != armw->state.fb || !armw->visible || modeset) {
+    asyw->image.w = fb->width;
+    asyw->image.h = fb->height;
   asyw->image.kind = nvbo->kind;
     ret = nv50_wndw_atomic_check_acquire_rgb(asyw);
@@ -261,13 +261,13 @@ nv50_wndw_atomic_check_acquire(struct nv50_wndw
*wndw, bool modeset,
   asyw->image.blockh = nvbo->mode >> 4;
   else
   asyw->image.blockh = nvbo->mode;
-    asyw->image.blocks[0] = fb->base.pitches[0] / 64;
+    asyw->image.blocks[0] = fb->pitches[0] / 64;
   asyw->image.pitch[0] = 0;
   } else {
   asyw->image.layout = 1;
   asyw->image.blockh = 0;
   asyw->image.blocks[0] = 0;
-    asyw->image.pitch[0] = fb->base.pitches[0];
+    asyw->image.pitch[0] = fb->pitches[0];
   }
     if (!asyh->state.async_flip)
@@ -486,16 +486,16 @@ nv50_wndw_cleanup_fb(struct drm_plane *plane,
struct drm_plane_state *old_state)
   static int
   nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state
*state)
   {
-    struct nouveau_framebuffer *fb = nouveau_framebuffer(state->fb);
+    struct drm_framebuffer *fb = state->fb;
   struct nouveau_drm *drm = nouveau_drm(plane->dev);
   struct nv50_wndw *wndw = nv50_wndw(plane);
   struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
-    struct nouveau_bo *nvbo = nouveau_gem_object(state->fb->obj[0]);
+    struct nouveau_bo *nvbo = nouveau_gem_object(fb->obj[0]);
   struct nv50_head_atom *asyh;
   struct nv50_wndw_ctxdma *ctxdma;
   int ret;
   -    NV_ATOMIC(drm, "%s prepare: %p\n", plane->name, state->fb);
+    NV_ATOMIC(drm, 

Re: [PATCH v3] drm/msm: Add syncobj support.

2020-02-06 Thread Bas Nieuwenhuizen
Hi,

I'd appreciate if you could take a look at this patch. I believe I
have accommodated the earlier review comments.

Thank you,
Bas

On Fri, Jan 24, 2020 at 12:58 AM Bas Nieuwenhuizen
 wrote:
>
> This
>
> 1) Enables core DRM syncobj support.
> 2) Adds options to the submission ioctl to wait/signal syncobjs.
>
> Just like the wait fence fd, this does inline waits. Using the
> scheduler would be nice but I believe it is out of scope for
> this work.
>
> Support for timeline syncobjs is implemented and the interface
> is ready for it, but I'm not enabling it yet until there is
> some code for turnip to use it.
>
> The reset is mostly in there because in the presence of waiting
> and signalling the same semaphores, resetting them after
> signalling can become very annoying.
>
> v2:
>   - Fixed style issues
>   - Removed a cleanup issue in a failure case
>   - Moved to a copy_from_user per syncobj
>
> v3:
>  - Fixed a missing declaration introduced in v2
>  - Reworked to use ERR_PTR/PTR_ERR
>  - Simplified failure gotos.
>
> Signed-off-by: Bas Nieuwenhuizen 
> ---
>  drivers/gpu/drm/msm/msm_drv.c|   6 +-
>  drivers/gpu/drm/msm/msm_gem_submit.c | 232 ++-
>  include/uapi/drm/msm_drm.h   |  24 ++-
>  3 files changed, 258 insertions(+), 4 deletions(-)
>
> diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
> index c84f0a8b3f2c..5246b41798df 100644
> --- a/drivers/gpu/drm/msm/msm_drv.c
> +++ b/drivers/gpu/drm/msm/msm_drv.c
> @@ -37,9 +37,10 @@
>   * - 1.4.0 - softpin, MSM_RELOC_BO_DUMP, and GEM_INFO support to set/get
>   *   GEM object's debug name
>   * - 1.5.0 - Add SUBMITQUERY_QUERY ioctl
> + * - 1.6.0 - Syncobj support
>   */
>  #define MSM_VERSION_MAJOR  1
> -#define MSM_VERSION_MINOR  5
> +#define MSM_VERSION_MINOR  6
>  #define MSM_VERSION_PATCHLEVEL 0
>
>  static const struct drm_mode_config_funcs mode_config_funcs = {
> @@ -988,7 +989,8 @@ static struct drm_driver msm_driver = {
> .driver_features= DRIVER_GEM |
> DRIVER_RENDER |
> DRIVER_ATOMIC |
> -   DRIVER_MODESET,
> +   DRIVER_MODESET |
> +   DRIVER_SYNCOBJ,
> .open   = msm_open,
> .postclose   = msm_postclose,
> .lastclose  = drm_fb_helper_lastclose,
> diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c 
> b/drivers/gpu/drm/msm/msm_gem_submit.c
> index be5327af16fa..11045f56b815 100644
> --- a/drivers/gpu/drm/msm/msm_gem_submit.c
> +++ b/drivers/gpu/drm/msm/msm_gem_submit.c
> @@ -8,7 +8,9 @@
>  #include 
>  #include 
>
> +#include 
>  #include 
> +#include 
>
>  #include "msm_drv.h"
>  #include "msm_gpu.h"
> @@ -394,6 +396,186 @@ static void submit_cleanup(struct msm_gem_submit 
> *submit)
> ww_acquire_fini(>ticket);
>  }
>
> +
> +struct msm_submit_post_dep {
> +   struct drm_syncobj *syncobj;
> +   uint64_t point;
> +   struct dma_fence_chain *chain;
> +};
> +
> +static struct drm_syncobj **msm_wait_deps(struct drm_device *dev,
> +  struct drm_file *file,
> +  uint64_t in_syncobjs_addr,
> +  uint32_t nr_in_syncobjs,
> +  size_t syncobj_stride,
> +  struct msm_ringbuffer *ring)
> +{
> +   struct drm_syncobj **syncobjs = NULL;
> +   struct drm_msm_gem_submit_syncobj syncobj_desc = {0};
> +   int ret = 0;
> +   uint32_t i, j;
> +
> +   syncobjs = kcalloc(nr_in_syncobjs, sizeof(*syncobjs),
> +  GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
> +   if (!syncobjs)
> +   return ERR_PTR(-ENOMEM);
> +
> +   for (i = 0; i < nr_in_syncobjs; ++i) {
> +   uint64_t address = in_syncobjs_addr + i * syncobj_stride;
> +   struct dma_fence *fence;
> +
> +   if (copy_from_user(_desc,
> +  u64_to_user_ptr(address),
> +  min(syncobj_stride, 
> sizeof(syncobj_desc {
> +   ret = -EFAULT;
> +   break;
> +   }
> +
> +   if (syncobj_desc.point &&
> +   !drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE)) {
> +   ret = -EOPNOTSUPP;
> +   break;
> +   }
> +
> +   if (syncobj_desc.flags & ~MSM_SUBMIT_SYNCOBJ_FLAGS) {
> +   ret = -EINVAL;
> +   break;
> +   }
> +
> +   ret = drm_syncobj_find_fence(file, syncobj_desc.handle,
> +syncobj_desc.point, 0, );
> +   if (ret)
> +   break;
> +
> +   if 

Re: [Nouveau] [PATCH 4/4] drm/nouveau: Remove struct nouveau_framebuffer

2020-02-06 Thread James Jones
Yes, that's certainly viable.  If that's the general preference in 
direction, I'll rework that patches to do so.


Thanks,
-James

On 2/6/20 7:49 AM, Thomas Zimmermann wrote:

Hi James

Am 06.02.20 um 16:17 schrieb James Jones:

Note I'm adding some fields to nouveau_framebuffer in the series
"drm/nouveau: Support NVIDIA format modifiers."  I sent out v3 of that
yesterday.  It would probably still be possible to avoid them by
re-extracting the relevant data from the format modifier on the fly when
needed, but it is simpler and likely less error-prone with the wrapper
struct.


Thanks for the note.

I just took a look at your patchset. I think struct nouveau_framebuffer
should not store tile_mode and kind. AFAICT there are only two trivial
places where these values are used and they can be extracted from the
framebuffer at any time.

I'd suggest to expand nouveau_decode_mod() to take a drm_framebuffer and
return the correct values. Kind of what you do in
nouveau_framebuffer_new() near line 330.

Thoughts?

Best regards
Thomas

[1] https://patchwork.freedesktop.org/series/70786/#rev3



Thanks,
-James

On 2/6/20 2:19 AM, Thomas Zimmermann wrote:

After its cleanup, struct nouveau_framebuffer is only a wrapper around
struct drm_framebuffer. Use the latter directly.

Signed-off-by: Thomas Zimmermann 
---
   drivers/gpu/drm/nouveau/dispnv50/wndw.c   | 26 +++
   drivers/gpu/drm/nouveau/nouveau_display.c | 14 ++--
   drivers/gpu/drm/nouveau/nouveau_display.h | 12 +--
   drivers/gpu/drm/nouveau/nouveau_fbcon.c   | 14 ++--
   4 files changed, 28 insertions(+), 38 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index ba1399965a1c..4a67a656e007 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -40,11 +40,11 @@ nv50_wndw_ctxdma_del(struct nv50_wndw_ctxdma *ctxdma)
   }
     static struct nv50_wndw_ctxdma *
-nv50_wndw_ctxdma_new(struct nv50_wndw *wndw, struct
nouveau_framebuffer *fb)
+nv50_wndw_ctxdma_new(struct nv50_wndw *wndw, struct drm_framebuffer *fb)
   {
-    struct nouveau_drm *drm = nouveau_drm(fb->base.dev);
+    struct nouveau_drm *drm = nouveau_drm(fb->dev);
   struct nv50_wndw_ctxdma *ctxdma;
-    struct nouveau_bo *nvbo = nouveau_gem_object(fb->base.obj[0]);
+    struct nouveau_bo *nvbo = nouveau_gem_object(fb->obj[0]);
   const u8    kind = nvbo->kind;
   const u32 handle = 0xfb00 | kind;
   struct {
@@ -236,16 +236,16 @@ nv50_wndw_atomic_check_acquire(struct nv50_wndw
*wndw, bool modeset,
  struct nv50_wndw_atom *asyw,
  struct nv50_head_atom *asyh)
   {
-    struct nouveau_framebuffer *fb =
nouveau_framebuffer(asyw->state.fb);
+    struct drm_framebuffer *fb = asyw->state.fb;
   struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev);
-    struct nouveau_bo *nvbo = nouveau_gem_object(fb->base.obj[0]);
+    struct nouveau_bo *nvbo = nouveau_gem_object(fb->obj[0]);
   int ret;
     NV_ATOMIC(drm, "%s acquire\n", wndw->plane.name);
   -    if (asyw->state.fb != armw->state.fb || !armw->visible ||
modeset) {
-    asyw->image.w = fb->base.width;
-    asyw->image.h = fb->base.height;
+    if (fb != armw->state.fb || !armw->visible || modeset) {
+    asyw->image.w = fb->width;
+    asyw->image.h = fb->height;
   asyw->image.kind = nvbo->kind;
     ret = nv50_wndw_atomic_check_acquire_rgb(asyw);
@@ -261,13 +261,13 @@ nv50_wndw_atomic_check_acquire(struct nv50_wndw
*wndw, bool modeset,
   asyw->image.blockh = nvbo->mode >> 4;
   else
   asyw->image.blockh = nvbo->mode;
-    asyw->image.blocks[0] = fb->base.pitches[0] / 64;
+    asyw->image.blocks[0] = fb->pitches[0] / 64;
   asyw->image.pitch[0] = 0;
   } else {
   asyw->image.layout = 1;
   asyw->image.blockh = 0;
   asyw->image.blocks[0] = 0;
-    asyw->image.pitch[0] = fb->base.pitches[0];
+    asyw->image.pitch[0] = fb->pitches[0];
   }
     if (!asyh->state.async_flip)
@@ -486,16 +486,16 @@ nv50_wndw_cleanup_fb(struct drm_plane *plane,
struct drm_plane_state *old_state)
   static int
   nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state
*state)
   {
-    struct nouveau_framebuffer *fb = nouveau_framebuffer(state->fb);
+    struct drm_framebuffer *fb = state->fb;
   struct nouveau_drm *drm = nouveau_drm(plane->dev);
   struct nv50_wndw *wndw = nv50_wndw(plane);
   struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
-    struct nouveau_bo *nvbo = nouveau_gem_object(state->fb->obj[0]);
+    struct nouveau_bo *nvbo = nouveau_gem_object(fb->obj[0]);
   struct nv50_head_atom *asyh;
   struct nv50_wndw_ctxdma *ctxdma;
   int ret;
   -    NV_ATOMIC(drm, "%s prepare: %p\n", plane->name, state->fb);
+    NV_ATOMIC(drm, 

Re: [PATCH v4] drm/scheduler: Avoid accessing freed bad job.

2020-02-06 Thread Andrey Grodzovsky


On 2/6/20 9:51 AM, Christian König wrote:

Am 06.02.20 um 15:49 schrieb Alex Deucher:

On Thu, Feb 6, 2020 at 6:50 AM Christian König
 wrote:

Am 06.02.20 um 12:10 schrieb Lucas Stach:

Hi all,

On Mi, 2020-02-05 at 19:24 +0100, Lucas Stach wrote:

Hi Andrey,

This commit breaks all drivers, which may bail out of the timeout
processing as they wish to extend the timeout (etnaviv, v3d).

Those drivers currently just return from the timeout handler before
calling drm_sched_stop(), which means with this commit applied we are
removing the first job from the ring_mirror_list, but never put it
back. This leads to jobs getting lost from the ring mirror, which 
then

causes quite a bit of fallout like unsignaled fences.

Not sure yet what to do about it, we can either add a function to add
the job back to the ring_mirror if the driver wants to extend the
timeout, or we could look for another way to stop
drm_sched_cleanup_jobs from freeing jobs that are currently in 
timeout

processing.

So after thinking about this a bit more my opinion is that we need to
revert this change for now and go back to the drawing board for the
scheduler timeout handling.

Right now this starts to feel like a big midlayer mistake with all the
very intricate intertwining between the drivers and the scheduler. The
rules on when it's safe to manipulate the ring mirror and when
completed jobs are signaled and freed are not really well specified.
The fact that we need to mutate state in order to get rid of races
instead of having a single big "timeout processing is owner of the
scheduler state for now" is a big fat warning sign IMHO.

Yes, that strongly feels like a hack to me as well. But I didn't had
time and still haven't to take a closer look and suggest something 
better.



In that case, can someone send me a revert?


Well a revert would break our driver.

The real solution is that somebody needs to sit down, gather ALL the 
requirements and then come up with a solution which is clean and works 
for everyone.


Christian.



I can to take on this as indeed our general design on this becomes more 
and more entangled as GPU reset scenarios grow in complexity (at least 
in AMD driver). Currently I am on a high priority internal task which 
should take me around a week or 2 to finish and after that I can get to it.


Regarding temporary solution  - I looked into v3d and etnaviv use cases 
and we in AMD actually face the same scenario where we decide to skip HW 
reset if the guilty job did finish by the time we are processing the 
timeout  (see amdgpu_device_gpu_recover and skip_hw_reset goto) - the 
difference is we always call drm_sched_stop/start irrespectively of 
whether we are going to actually HW reset or not (same as extend 
timeout). I wonder if something like this can be done also for ve3 and 
etnaviv ?


Andrey






Alex



Christian.


It took me far longer than I'd like to admit to understand the failure
mode with fences not getting signaled after a GPU hang. The back and
forth between scheduler and driver code makes things really hard to
follow.

Regards,
Lucas


Regards,
Lucas

On Mo, 2019-11-25 at 15:51 -0500, Andrey Grodzovsky wrote:

Problem:
Due to a race between drm_sched_cleanup_jobs in sched thread and
drm_sched_job_timedout in timeout work there is a possiblity that
bad job was already freed while still being accessed from the
timeout thread.

Fix:
Instead of just peeking at the bad job in the mirror list
remove it from the list under lock and then put it back later when
we are garanteed no race with main sched thread is possible which
is after the thread is parked.

v2: Lock around processing ring_mirror_list in 
drm_sched_cleanup_jobs.


v3: Rebase on top of drm-misc-next. v2 is not needed anymore as
drm_sched_get_cleanup_job already has a lock there.

v4: Fix comments to relfect latest code in drm-misc.

Signed-off-by: Andrey Grodzovsky 
Reviewed-by: Christian König 
Tested-by: Emily Deng 
---
   drivers/gpu/drm/scheduler/sched_main.c | 27 
+++

   1 file changed, 27 insertions(+)

diff --git a/drivers/gpu/drm/scheduler/sched_main.c 
b/drivers/gpu/drm/scheduler/sched_main.c

index 6774955..1bf9c40 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -284,10 +284,21 @@ static void drm_sched_job_timedout(struct 
work_struct *work)

 unsigned long flags;

 sched = container_of(work, struct drm_gpu_scheduler, 
work_tdr.work);

+
+   /* Protects against concurrent deletion in 
drm_sched_get_cleanup_job */

+   spin_lock_irqsave(>job_list_lock, flags);
 job = list_first_entry_or_null(>ring_mirror_list,
    struct drm_sched_job, node);

 if (job) {
+   /*
+    * Remove the bad job so it cannot be freed by 
concurrent
+    * drm_sched_cleanup_jobs. It will be reinserted back 
after sched->thread

+    * is parked at which point it's safe.
+    */
+   

Re: [Nouveau] [PATCH 4/4] drm/nouveau: Remove struct nouveau_framebuffer

2020-02-06 Thread Thomas Zimmermann
Hi James

Am 06.02.20 um 16:17 schrieb James Jones:
> Note I'm adding some fields to nouveau_framebuffer in the series
> "drm/nouveau: Support NVIDIA format modifiers."  I sent out v3 of that
> yesterday.  It would probably still be possible to avoid them by
> re-extracting the relevant data from the format modifier on the fly when
> needed, but it is simpler and likely less error-prone with the wrapper
> struct.

Thanks for the note.

I just took a look at your patchset. I think struct nouveau_framebuffer
should not store tile_mode and kind. AFAICT there are only two trivial
places where these values are used and they can be extracted from the
framebuffer at any time.

I'd suggest to expand nouveau_decode_mod() to take a drm_framebuffer and
return the correct values. Kind of what you do in
nouveau_framebuffer_new() near line 330.

Thoughts?

Best regards
Thomas

[1] https://patchwork.freedesktop.org/series/70786/#rev3

> 
> Thanks,
> -James
> 
> On 2/6/20 2:19 AM, Thomas Zimmermann wrote:
>> After its cleanup, struct nouveau_framebuffer is only a wrapper around
>> struct drm_framebuffer. Use the latter directly.
>>
>> Signed-off-by: Thomas Zimmermann 
>> ---
>>   drivers/gpu/drm/nouveau/dispnv50/wndw.c   | 26 +++
>>   drivers/gpu/drm/nouveau/nouveau_display.c | 14 ++--
>>   drivers/gpu/drm/nouveau/nouveau_display.h | 12 +--
>>   drivers/gpu/drm/nouveau/nouveau_fbcon.c   | 14 ++--
>>   4 files changed, 28 insertions(+), 38 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
>> b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
>> index ba1399965a1c..4a67a656e007 100644
>> --- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
>> +++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
>> @@ -40,11 +40,11 @@ nv50_wndw_ctxdma_del(struct nv50_wndw_ctxdma *ctxdma)
>>   }
>>     static struct nv50_wndw_ctxdma *
>> -nv50_wndw_ctxdma_new(struct nv50_wndw *wndw, struct
>> nouveau_framebuffer *fb)
>> +nv50_wndw_ctxdma_new(struct nv50_wndw *wndw, struct drm_framebuffer *fb)
>>   {
>> -    struct nouveau_drm *drm = nouveau_drm(fb->base.dev);
>> +    struct nouveau_drm *drm = nouveau_drm(fb->dev);
>>   struct nv50_wndw_ctxdma *ctxdma;
>> -    struct nouveau_bo *nvbo = nouveau_gem_object(fb->base.obj[0]);
>> +    struct nouveau_bo *nvbo = nouveau_gem_object(fb->obj[0]);
>>   const u8    kind = nvbo->kind;
>>   const u32 handle = 0xfb00 | kind;
>>   struct {
>> @@ -236,16 +236,16 @@ nv50_wndw_atomic_check_acquire(struct nv50_wndw
>> *wndw, bool modeset,
>>  struct nv50_wndw_atom *asyw,
>>  struct nv50_head_atom *asyh)
>>   {
>> -    struct nouveau_framebuffer *fb =
>> nouveau_framebuffer(asyw->state.fb);
>> +    struct drm_framebuffer *fb = asyw->state.fb;
>>   struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev);
>> -    struct nouveau_bo *nvbo = nouveau_gem_object(fb->base.obj[0]);
>> +    struct nouveau_bo *nvbo = nouveau_gem_object(fb->obj[0]);
>>   int ret;
>>     NV_ATOMIC(drm, "%s acquire\n", wndw->plane.name);
>>   -    if (asyw->state.fb != armw->state.fb || !armw->visible ||
>> modeset) {
>> -    asyw->image.w = fb->base.width;
>> -    asyw->image.h = fb->base.height;
>> +    if (fb != armw->state.fb || !armw->visible || modeset) {
>> +    asyw->image.w = fb->width;
>> +    asyw->image.h = fb->height;
>>   asyw->image.kind = nvbo->kind;
>>     ret = nv50_wndw_atomic_check_acquire_rgb(asyw);
>> @@ -261,13 +261,13 @@ nv50_wndw_atomic_check_acquire(struct nv50_wndw
>> *wndw, bool modeset,
>>   asyw->image.blockh = nvbo->mode >> 4;
>>   else
>>   asyw->image.blockh = nvbo->mode;
>> -    asyw->image.blocks[0] = fb->base.pitches[0] / 64;
>> +    asyw->image.blocks[0] = fb->pitches[0] / 64;
>>   asyw->image.pitch[0] = 0;
>>   } else {
>>   asyw->image.layout = 1;
>>   asyw->image.blockh = 0;
>>   asyw->image.blocks[0] = 0;
>> -    asyw->image.pitch[0] = fb->base.pitches[0];
>> +    asyw->image.pitch[0] = fb->pitches[0];
>>   }
>>     if (!asyh->state.async_flip)
>> @@ -486,16 +486,16 @@ nv50_wndw_cleanup_fb(struct drm_plane *plane,
>> struct drm_plane_state *old_state)
>>   static int
>>   nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state
>> *state)
>>   {
>> -    struct nouveau_framebuffer *fb = nouveau_framebuffer(state->fb);
>> +    struct drm_framebuffer *fb = state->fb;
>>   struct nouveau_drm *drm = nouveau_drm(plane->dev);
>>   struct nv50_wndw *wndw = nv50_wndw(plane);
>>   struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
>> -    struct nouveau_bo *nvbo = nouveau_gem_object(state->fb->obj[0]);
>> +    struct nouveau_bo *nvbo = nouveau_gem_object(fb->obj[0]);
>>   struct nv50_head_atom *asyh;
>>   struct nv50_wndw_ctxdma *ctxdma;
>>   int ret;
>>   -    NV_ATOMIC(drm, "%s 

Re: [Nouveau] [PATCH 4/4] drm/nouveau: Remove struct nouveau_framebuffer

2020-02-06 Thread James Jones
Note I'm adding some fields to nouveau_framebuffer in the series 
"drm/nouveau: Support NVIDIA format modifiers."  I sent out v3 of that 
yesterday.  It would probably still be possible to avoid them by 
re-extracting the relevant data from the format modifier on the fly when 
needed, but it is simpler and likely less error-prone with the wrapper 
struct.


Thanks,
-James

On 2/6/20 2:19 AM, Thomas Zimmermann wrote:

After its cleanup, struct nouveau_framebuffer is only a wrapper around
struct drm_framebuffer. Use the latter directly.

Signed-off-by: Thomas Zimmermann 
---
  drivers/gpu/drm/nouveau/dispnv50/wndw.c   | 26 +++
  drivers/gpu/drm/nouveau/nouveau_display.c | 14 ++--
  drivers/gpu/drm/nouveau/nouveau_display.h | 12 +--
  drivers/gpu/drm/nouveau/nouveau_fbcon.c   | 14 ++--
  4 files changed, 28 insertions(+), 38 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c 
b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index ba1399965a1c..4a67a656e007 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -40,11 +40,11 @@ nv50_wndw_ctxdma_del(struct nv50_wndw_ctxdma *ctxdma)
  }
  
  static struct nv50_wndw_ctxdma *

-nv50_wndw_ctxdma_new(struct nv50_wndw *wndw, struct nouveau_framebuffer *fb)
+nv50_wndw_ctxdma_new(struct nv50_wndw *wndw, struct drm_framebuffer *fb)
  {
-   struct nouveau_drm *drm = nouveau_drm(fb->base.dev);
+   struct nouveau_drm *drm = nouveau_drm(fb->dev);
struct nv50_wndw_ctxdma *ctxdma;
-   struct nouveau_bo *nvbo = nouveau_gem_object(fb->base.obj[0]);
+   struct nouveau_bo *nvbo = nouveau_gem_object(fb->obj[0]);
const u8kind = nvbo->kind;
const u32 handle = 0xfb00 | kind;
struct {
@@ -236,16 +236,16 @@ nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw, 
bool modeset,
   struct nv50_wndw_atom *asyw,
   struct nv50_head_atom *asyh)
  {
-   struct nouveau_framebuffer *fb = nouveau_framebuffer(asyw->state.fb);
+   struct drm_framebuffer *fb = asyw->state.fb;
struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev);
-   struct nouveau_bo *nvbo = nouveau_gem_object(fb->base.obj[0]);
+   struct nouveau_bo *nvbo = nouveau_gem_object(fb->obj[0]);
int ret;
  
  	NV_ATOMIC(drm, "%s acquire\n", wndw->plane.name);
  
-	if (asyw->state.fb != armw->state.fb || !armw->visible || modeset) {

-   asyw->image.w = fb->base.width;
-   asyw->image.h = fb->base.height;
+   if (fb != armw->state.fb || !armw->visible || modeset) {
+   asyw->image.w = fb->width;
+   asyw->image.h = fb->height;
asyw->image.kind = nvbo->kind;
  
  		ret = nv50_wndw_atomic_check_acquire_rgb(asyw);

@@ -261,13 +261,13 @@ nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw, 
bool modeset,
asyw->image.blockh = nvbo->mode >> 4;
else
asyw->image.blockh = nvbo->mode;
-   asyw->image.blocks[0] = fb->base.pitches[0] / 64;
+   asyw->image.blocks[0] = fb->pitches[0] / 64;
asyw->image.pitch[0] = 0;
} else {
asyw->image.layout = 1;
asyw->image.blockh = 0;
asyw->image.blocks[0] = 0;
-   asyw->image.pitch[0] = fb->base.pitches[0];
+   asyw->image.pitch[0] = fb->pitches[0];
}
  
  		if (!asyh->state.async_flip)

@@ -486,16 +486,16 @@ nv50_wndw_cleanup_fb(struct drm_plane *plane, struct 
drm_plane_state *old_state)
  static int
  nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
  {
-   struct nouveau_framebuffer *fb = nouveau_framebuffer(state->fb);
+   struct drm_framebuffer *fb = state->fb;
struct nouveau_drm *drm = nouveau_drm(plane->dev);
struct nv50_wndw *wndw = nv50_wndw(plane);
struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
-   struct nouveau_bo *nvbo = nouveau_gem_object(state->fb->obj[0]);
+   struct nouveau_bo *nvbo = nouveau_gem_object(fb->obj[0]);
struct nv50_head_atom *asyh;
struct nv50_wndw_ctxdma *ctxdma;
int ret;
  
-	NV_ATOMIC(drm, "%s prepare: %p\n", plane->name, state->fb);

+   NV_ATOMIC(drm, "%s prepare: %p\n", plane->name, fb);
if (!asyw->state.fb)
return 0;
  
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c

index bbbff55eb5d5..94f7fd48e1cf 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -207,10 +207,10 @@ int
  nouveau_framebuffer_new(struct drm_device *dev,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *gem,
- 

Re: [PATCH] drm/panfrost: perfcnt: Reserve/use the AS attached to the perfcnt MMU context

2020-02-06 Thread Steven Price
On 06/02/2020 14:13, Boris Brezillon wrote:
> We need to use the AS attached to the opened FD when dumping counters.

Indeed we do!

Reviewed-by: Steven Price 

> 
> Reported-by: Antonio Caggiano 
> Fixes: 7282f7645d06 ("drm/panfrost: Implement per FD address spaces")
> Cc: 
> Signed-off-by: Boris Brezillon 
> ---
>  drivers/gpu/drm/panfrost/panfrost_mmu.c |  7 ++-
>  drivers/gpu/drm/panfrost/panfrost_perfcnt.c | 11 ---
>  2 files changed, 10 insertions(+), 8 deletions(-)
> 
> diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c 
> b/drivers/gpu/drm/panfrost/panfrost_mmu.c
> index 763cfca886a7..3107b0738e40 100644
> --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
> +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
> @@ -151,7 +151,12 @@ u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, 
> struct panfrost_mmu *mmu)
>   as = mmu->as;
>   if (as >= 0) {
>   int en = atomic_inc_return(>as_count);
> - WARN_ON(en >= NUM_JOB_SLOTS);
> +
> + /*
> +  * AS can be retained by active jobs or a perfcnt context,
> +  * hence the '+ 1' here.
> +  */
> + WARN_ON(en >= (NUM_JOB_SLOTS + 1));
>  
>   list_move(>list, >as_lru_list);
>   goto out;
> diff --git a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c 
> b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
> index 684820448be3..6913578d5aa7 100644
> --- a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
> +++ b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
> @@ -73,7 +73,7 @@ static int panfrost_perfcnt_enable_locked(struct 
> panfrost_device *pfdev,
>   struct panfrost_file_priv *user = file_priv->driver_priv;
>   struct panfrost_perfcnt *perfcnt = pfdev->perfcnt;
>   struct drm_gem_shmem_object *bo;
> - u32 cfg;
> + u32 cfg, as;
>   int ret;
>  
>   if (user == perfcnt->user)
> @@ -126,12 +126,8 @@ static int panfrost_perfcnt_enable_locked(struct 
> panfrost_device *pfdev,
>  
>   perfcnt->user = user;
>  
> - /*
> -  * Always use address space 0 for now.
> -  * FIXME: this needs to be updated when we start using different
> -  * address space.
> -  */
> - cfg = GPU_PERFCNT_CFG_AS(0) |
> + as = panfrost_mmu_as_get(pfdev, perfcnt->mapping->mmu);
> + cfg = GPU_PERFCNT_CFG_AS(as) |
> GPU_PERFCNT_CFG_MODE(GPU_PERFCNT_CFG_MODE_MANUAL);
>  
>   /*
> @@ -195,6 +191,7 @@ static int panfrost_perfcnt_disable_locked(struct 
> panfrost_device *pfdev,
>   drm_gem_shmem_vunmap(>mapping->obj->base.base, perfcnt->buf);
>   perfcnt->buf = NULL;
>   panfrost_gem_close(>mapping->obj->base.base, file_priv);
> + panfrost_mmu_as_put(pfdev, perfcnt->mapping->mmu);
>   panfrost_gem_mapping_put(perfcnt->mapping);
>   perfcnt->mapping = NULL;
>   pm_runtime_mark_last_busy(pfdev->dev);
> 

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH v4] drm/scheduler: Avoid accessing freed bad job.

2020-02-06 Thread Christian König

Am 06.02.20 um 15:49 schrieb Alex Deucher:

On Thu, Feb 6, 2020 at 6:50 AM Christian König
 wrote:

Am 06.02.20 um 12:10 schrieb Lucas Stach:

Hi all,

On Mi, 2020-02-05 at 19:24 +0100, Lucas Stach wrote:

Hi Andrey,

This commit breaks all drivers, which may bail out of the timeout
processing as they wish to extend the timeout (etnaviv, v3d).

Those drivers currently just return from the timeout handler before
calling drm_sched_stop(), which means with this commit applied we are
removing the first job from the ring_mirror_list, but never put it
back. This leads to jobs getting lost from the ring mirror, which then
causes quite a bit of fallout like unsignaled fences.

Not sure yet what to do about it, we can either add a function to add
the job back to the ring_mirror if the driver wants to extend the
timeout, or we could look for another way to stop
drm_sched_cleanup_jobs from freeing jobs that are currently in timeout
processing.

So after thinking about this a bit more my opinion is that we need to
revert this change for now and go back to the drawing board for the
scheduler timeout handling.

Right now this starts to feel like a big midlayer mistake with all the
very intricate intertwining between the drivers and the scheduler. The
rules on when it's safe to manipulate the ring mirror and when
completed jobs are signaled and freed are not really well specified.
The fact that we need to mutate state in order to get rid of races
instead of having a single big "timeout processing is owner of the
scheduler state for now" is a big fat warning sign IMHO.

Yes, that strongly feels like a hack to me as well. But I didn't had
time and still haven't to take a closer look and suggest something better.


In that case, can someone send me a revert?


Well a revert would break our driver.

The real solution is that somebody needs to sit down, gather ALL the 
requirements and then come up with a solution which is clean and works 
for everyone.


Christian.



Alex



Christian.


It took me far longer than I'd like to admit to understand the failure
mode with fences not getting signaled after a GPU hang. The back and
forth between scheduler and driver code makes things really hard to
follow.

Regards,
Lucas


Regards,
Lucas

On Mo, 2019-11-25 at 15:51 -0500, Andrey Grodzovsky wrote:

Problem:
Due to a race between drm_sched_cleanup_jobs in sched thread and
drm_sched_job_timedout in timeout work there is a possiblity that
bad job was already freed while still being accessed from the
timeout thread.

Fix:
Instead of just peeking at the bad job in the mirror list
remove it from the list under lock and then put it back later when
we are garanteed no race with main sched thread is possible which
is after the thread is parked.

v2: Lock around processing ring_mirror_list in drm_sched_cleanup_jobs.

v3: Rebase on top of drm-misc-next. v2 is not needed anymore as
drm_sched_get_cleanup_job already has a lock there.

v4: Fix comments to relfect latest code in drm-misc.

Signed-off-by: Andrey Grodzovsky 
Reviewed-by: Christian König 
Tested-by: Emily Deng 
---
   drivers/gpu/drm/scheduler/sched_main.c | 27 +++
   1 file changed, 27 insertions(+)

diff --git a/drivers/gpu/drm/scheduler/sched_main.c 
b/drivers/gpu/drm/scheduler/sched_main.c
index 6774955..1bf9c40 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -284,10 +284,21 @@ static void drm_sched_job_timedout(struct work_struct 
*work)
 unsigned long flags;

 sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
+
+   /* Protects against concurrent deletion in drm_sched_get_cleanup_job */
+   spin_lock_irqsave(>job_list_lock, flags);
 job = list_first_entry_or_null(>ring_mirror_list,
struct drm_sched_job, node);

 if (job) {
+   /*
+* Remove the bad job so it cannot be freed by concurrent
+* drm_sched_cleanup_jobs. It will be reinserted back after 
sched->thread
+* is parked at which point it's safe.
+*/
+   list_del_init(>node);
+   spin_unlock_irqrestore(>job_list_lock, flags);
+
 job->sched->ops->timedout_job(job);

 /*
@@ -298,6 +309,8 @@ static void drm_sched_job_timedout(struct work_struct *work)
 job->sched->ops->free_job(job);
 sched->free_guilty = false;
 }
+   } else {
+   spin_unlock_irqrestore(>job_list_lock, flags);
 }

 spin_lock_irqsave(>job_list_lock, flags);
@@ -370,6 +383,20 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, 
struct drm_sched_job *bad)
 kthread_park(sched->thread);

 /*
+* Reinsert back the bad job here - now it's safe as
+* drm_sched_get_cleanup_job cannot race against us and release the
+* bad job at this point - we parked (waited for) any in progress
+* (earlier) cleanups and drm_sched_get_cleanup_job will 

Re: [PATCH v4] drm/scheduler: Avoid accessing freed bad job.

2020-02-06 Thread Alex Deucher
On Thu, Feb 6, 2020 at 6:50 AM Christian König
 wrote:
>
> Am 06.02.20 um 12:10 schrieb Lucas Stach:
> > Hi all,
> >
> > On Mi, 2020-02-05 at 19:24 +0100, Lucas Stach wrote:
> >> Hi Andrey,
> >>
> >> This commit breaks all drivers, which may bail out of the timeout
> >> processing as they wish to extend the timeout (etnaviv, v3d).
> >>
> >> Those drivers currently just return from the timeout handler before
> >> calling drm_sched_stop(), which means with this commit applied we are
> >> removing the first job from the ring_mirror_list, but never put it
> >> back. This leads to jobs getting lost from the ring mirror, which then
> >> causes quite a bit of fallout like unsignaled fences.
> >>
> >> Not sure yet what to do about it, we can either add a function to add
> >> the job back to the ring_mirror if the driver wants to extend the
> >> timeout, or we could look for another way to stop
> >> drm_sched_cleanup_jobs from freeing jobs that are currently in timeout
> >> processing.
> > So after thinking about this a bit more my opinion is that we need to
> > revert this change for now and go back to the drawing board for the
> > scheduler timeout handling.
> >
> > Right now this starts to feel like a big midlayer mistake with all the
> > very intricate intertwining between the drivers and the scheduler. The
> > rules on when it's safe to manipulate the ring mirror and when
> > completed jobs are signaled and freed are not really well specified.
> > The fact that we need to mutate state in order to get rid of races
> > instead of having a single big "timeout processing is owner of the
> > scheduler state for now" is a big fat warning sign IMHO.
>
> Yes, that strongly feels like a hack to me as well. But I didn't had
> time and still haven't to take a closer look and suggest something better.
>

In that case, can someone send me a revert?

Alex


> Christian.
>
> >
> > It took me far longer than I'd like to admit to understand the failure
> > mode with fences not getting signaled after a GPU hang. The back and
> > forth between scheduler and driver code makes things really hard to
> > follow.
> >
> > Regards,
> > Lucas
> >
> >> Regards,
> >> Lucas
> >>
> >> On Mo, 2019-11-25 at 15:51 -0500, Andrey Grodzovsky wrote:
> >>> Problem:
> >>> Due to a race between drm_sched_cleanup_jobs in sched thread and
> >>> drm_sched_job_timedout in timeout work there is a possiblity that
> >>> bad job was already freed while still being accessed from the
> >>> timeout thread.
> >>>
> >>> Fix:
> >>> Instead of just peeking at the bad job in the mirror list
> >>> remove it from the list under lock and then put it back later when
> >>> we are garanteed no race with main sched thread is possible which
> >>> is after the thread is parked.
> >>>
> >>> v2: Lock around processing ring_mirror_list in drm_sched_cleanup_jobs.
> >>>
> >>> v3: Rebase on top of drm-misc-next. v2 is not needed anymore as
> >>> drm_sched_get_cleanup_job already has a lock there.
> >>>
> >>> v4: Fix comments to relfect latest code in drm-misc.
> >>>
> >>> Signed-off-by: Andrey Grodzovsky 
> >>> Reviewed-by: Christian König 
> >>> Tested-by: Emily Deng 
> >>> ---
> >>>   drivers/gpu/drm/scheduler/sched_main.c | 27 +++
> >>>   1 file changed, 27 insertions(+)
> >>>
> >>> diff --git a/drivers/gpu/drm/scheduler/sched_main.c 
> >>> b/drivers/gpu/drm/scheduler/sched_main.c
> >>> index 6774955..1bf9c40 100644
> >>> --- a/drivers/gpu/drm/scheduler/sched_main.c
> >>> +++ b/drivers/gpu/drm/scheduler/sched_main.c
> >>> @@ -284,10 +284,21 @@ static void drm_sched_job_timedout(struct 
> >>> work_struct *work)
> >>> unsigned long flags;
> >>>
> >>> sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
> >>> +
> >>> +   /* Protects against concurrent deletion in drm_sched_get_cleanup_job 
> >>> */
> >>> +   spin_lock_irqsave(>job_list_lock, flags);
> >>> job = list_first_entry_or_null(>ring_mirror_list,
> >>>struct drm_sched_job, node);
> >>>
> >>> if (job) {
> >>> +   /*
> >>> +* Remove the bad job so it cannot be freed by concurrent
> >>> +* drm_sched_cleanup_jobs. It will be reinserted back after 
> >>> sched->thread
> >>> +* is parked at which point it's safe.
> >>> +*/
> >>> +   list_del_init(>node);
> >>> +   spin_unlock_irqrestore(>job_list_lock, flags);
> >>> +
> >>> job->sched->ops->timedout_job(job);
> >>>
> >>> /*
> >>> @@ -298,6 +309,8 @@ static void drm_sched_job_timedout(struct work_struct 
> >>> *work)
> >>> job->sched->ops->free_job(job);
> >>> sched->free_guilty = false;
> >>> }
> >>> +   } else {
> >>> +   spin_unlock_irqrestore(>job_list_lock, flags);
> >>> }
> >>>
> >>> spin_lock_irqsave(>job_list_lock, flags);
> >>> @@ -370,6 +383,20 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, 
> >>> struct 

[Bug 206441] New: kernel crash when amdgpu reset while very high loading

2020-02-06 Thread bugzilla-daemon
https://bugzilla.kernel.org/show_bug.cgi?id=206441

Bug ID: 206441
   Summary: kernel crash when amdgpu reset while very high loading
   Product: Drivers
   Version: 2.5
Kernel Version: 5.3.15
  Hardware: All
OS: Linux
  Tree: Mainline
Status: NEW
  Severity: normal
  Priority: P1
 Component: Video(DRI - non Intel)
  Assignee: drivers_video-...@kernel-bugs.osdl.org
  Reporter: wormw...@yahoo.com
Regression: No

environment:
  hw : Huawei TaiShan 2280(aarch64) server
  ubuntu : v5.3.15
  amdgpu : Radeon RX 580
  amdgpu driver: from 5.3.15  kernel

crash:
kernel crash accidentally  when  the gpu reset while the system is very
high load. The load average is about 600.
The crash repeated about 8 times  on different machines recently.

kernel log:
,
| [ 2510.570612] [drm:amdgpu_job_timedout [amdgpu]] *ERROR* ring gfx timeout,
signaled seq=11815857, emitted seq=11815860
| [ 2510.570711] [drm:amdgpu_job_timedout [amdgpu]] *ERROR* Process
information: process Media35435 pid 70612 thread appstream:cs0 pid 70667
| [ 2510.570722] amdgpu 000d:31:00.0: GPU reset begin!
| [ 2511.066418] amdgpu 000d:31:00.0: [drm:amdgpu_ring_test_helper [amdgpu]]
*ERROR* ring kiq_2.1.0 test failed (-110)
| [ 2511.066525] [drm:gfx_v8_0_hw_fini [amdgpu]] *ERROR* KCQ disable failed
| [ 2511.322334] cp is busy, skip halt cp
| [ 2511.378072] [drm] schedsdma0 is not ready, skipping
| [ 2511.378075] [drm] schedsdma1 is not ready, skipping
| [ 2511.378931] Unable to handle kernel access to user memory outside uaccess
routines at virtual address 0008
| [ 2511.378934] Mem abort info:
| [ 2511.378936]   ESR = 0x9604
| [ 2511.378939]   Exception class = DABT (current EL), IL = 32 bits
| [ 2511.378940]   SET = 0, FnV = 0
| [ 2511.378941]   EA = 0, S1PTW = 0
| [ 2511.378942] Data abort info:
| [ 2511.378943]   ISV = 0, ISS = 0x0004
| [ 2511.378944]   CM = 0, WnR = 0
| [ 2511.378947] user pgtable: 4k pages, 48-bit VAs, pgdp=001f76271000
| [ 2511.378948] [0008] pgd=
| [ 2511.378954] Internal error: Oops: 9604 [#1] PREEMPT SMP
| [ 2511.381197] CPU: 59 PID: 16120 Comm: appstream:cs0 Kdump: loaded Tainted:
G   OE 5.3.15-050315.2019121601-generic #jeff
| [ 2511.381507] Hardware name: Huawei TaiShan 2280 /BC11SPCD, BIOS 1.63
09/19/2019
| [ 2511.381709] pstate: 2045 (nzCv daif +PAN -UAO)
| [ 2511.382017] pc : amdgpu_cs_ioctl+0x173c/0x1998 [amdgpu]
| [ 2511.382255] lr : amdgpu_cs_ioctl+0x16fc/0x1998 [amdgpu]
| [ 2511.382392] sp : 6b3e39e0
| [ 2511.382459] x29: 6b3e39e0 x28: 
| [ 2511.382603] x27: 6b3e3ca8 x26: 11b19b48
| [ 2511.382756] x25:  x24: 
| [ 2511.382907] x23: 8093ba510df8 x22: 0976da44
| [ 2511.383056] x21: 8096a48a8400 x20: 8097c546
| [ 2511.383202] x19:  x18: 
| [ 2511.383340] x17:  x16: 
| [ 2511.383491] x15:  x14: 
| [ 2511.383641] x13:  x12: 
| [ 2511.383791] x11:  x10: 0040
| [ 2511.383937] x9 :  x8 : 8093ba511000
| [ 2511.384087] x7 : a14eea3b x6 : 8096a48a8a00
| [ 2511.384243] x5 : 8093ba510c00 x4 : 6b3e3a88
| [ 2511.384395] x3 : 801fd5427320 x2 : 
| [ 2511.384529] x1 :  x0 : 
| [ 2511.384681] Call trace:
| [ 2511.384852]  amdgpu_cs_ioctl+0x173c/0x1998 [amdgpu]
| [ 2511.389484]  drm_ioctl_kernel+0xb4/0x100 [drm]
| [ 2511.394027]  drm_ioctl+0x218/0x400 [drm]
| [ 2511.398696]  amdgpu_drm_ioctl+0x58/0x90 [amdgpu]
| [ 2511.403156]  do_vfs_ioctl+0xc4/0xb70
| [ 2511.407477]  ksys_ioctl+0x84/0xb8
| [ 2511.411706]  __arm64_sys_ioctl+0x28/0x38
| [ 2511.415663]  el0_svc_common.constprop.3+0xa4/0x1c8
| [ 2511.419497]  el0_svc_handler+0x34/0x90
| [ 2511.423165]  el0_svc+0x10/0x14
| [ 2511.426733] Code: 540006e1 f90077e0 5282 f9400800 (f9400400)
| [ 2511.430388] SMP: stopping secondary CPUs
| [ 2511.436245] Starting crashdump kernel...
| [ 2511.439485] Bye!
| 
| 
|   (gdb) l *(amdgpu_vm_sdma_commit+0x54)
|   0x54 is in amdgpu_vm_sdma_commit
(/home/motech/src/v5.3.15/v5.3.15/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c:102).
|   97  struct amdgpu_ib *ib = p->job->ibs;
|   98  struct amdgpu_ring *ring;
|   99  struct dma_fence *f;
|   100 int r;
|   101
|   102 ring = container_of(p->vm->entity.rq->sched, struct
amdgpu_ring, sched);
|   103
|   104 WARN_ON(ib->length_dw == 0);
|   105 amdgpu_ring_pad_ib(ring, ib);
|   106 WARN_ON(ib->length_dw > p->num_dw_left);
|   (gdb)
`

There was a gpu reset before the crash (null point)and the crash position
was different on  different machines, 

[PATCH] drm/panfrost: perfcnt: Reserve/use the AS attached to the perfcnt MMU context

2020-02-06 Thread Boris Brezillon
We need to use the AS attached to the opened FD when dumping counters.

Reported-by: Antonio Caggiano 
Fixes: 7282f7645d06 ("drm/panfrost: Implement per FD address spaces")
Cc: 
Signed-off-by: Boris Brezillon 
---
 drivers/gpu/drm/panfrost/panfrost_mmu.c |  7 ++-
 drivers/gpu/drm/panfrost/panfrost_perfcnt.c | 11 ---
 2 files changed, 10 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c 
b/drivers/gpu/drm/panfrost/panfrost_mmu.c
index 763cfca886a7..3107b0738e40 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
@@ -151,7 +151,12 @@ u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, 
struct panfrost_mmu *mmu)
as = mmu->as;
if (as >= 0) {
int en = atomic_inc_return(>as_count);
-   WARN_ON(en >= NUM_JOB_SLOTS);
+
+   /*
+* AS can be retained by active jobs or a perfcnt context,
+* hence the '+ 1' here.
+*/
+   WARN_ON(en >= (NUM_JOB_SLOTS + 1));
 
list_move(>list, >as_lru_list);
goto out;
diff --git a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c 
b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
index 684820448be3..6913578d5aa7 100644
--- a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
+++ b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
@@ -73,7 +73,7 @@ static int panfrost_perfcnt_enable_locked(struct 
panfrost_device *pfdev,
struct panfrost_file_priv *user = file_priv->driver_priv;
struct panfrost_perfcnt *perfcnt = pfdev->perfcnt;
struct drm_gem_shmem_object *bo;
-   u32 cfg;
+   u32 cfg, as;
int ret;
 
if (user == perfcnt->user)
@@ -126,12 +126,8 @@ static int panfrost_perfcnt_enable_locked(struct 
panfrost_device *pfdev,
 
perfcnt->user = user;
 
-   /*
-* Always use address space 0 for now.
-* FIXME: this needs to be updated when we start using different
-* address space.
-*/
-   cfg = GPU_PERFCNT_CFG_AS(0) |
+   as = panfrost_mmu_as_get(pfdev, perfcnt->mapping->mmu);
+   cfg = GPU_PERFCNT_CFG_AS(as) |
  GPU_PERFCNT_CFG_MODE(GPU_PERFCNT_CFG_MODE_MANUAL);
 
/*
@@ -195,6 +191,7 @@ static int panfrost_perfcnt_disable_locked(struct 
panfrost_device *pfdev,
drm_gem_shmem_vunmap(>mapping->obj->base.base, perfcnt->buf);
perfcnt->buf = NULL;
panfrost_gem_close(>mapping->obj->base.base, file_priv);
+   panfrost_mmu_as_put(pfdev, perfcnt->mapping->mmu);
panfrost_gem_mapping_put(perfcnt->mapping);
perfcnt->mapping = NULL;
pm_runtime_mark_last_busy(pfdev->dev);
-- 
2.24.1

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH] drm/mediatek: Ensure the cursor plane is on top of other overlays

2020-02-06 Thread Sean Paul
On Thu, Feb 06, 2020 at 05:59:51PM +1100, evanb...@google.com wrote:
> From: Sean Paul 
> 
> Currently the cursor is placed on the first overlay plane, which means
> it will be at the bottom of the stack when the hw does the compositing
> with anything other than primary plane. Since mtk doesn't support plane
> zpos, change the cursor location to the top-most plane.
> 
> Signed-off-by: Evan Benn 

Hi Evan,
Thanks for spotting the issue! I think this should probably be 2 patches, one to
fix crtc init and then the cursor patch on top of that. We generally try to only
do one thing per patch.

A few other nits below..

> Signed-off-by: Sean Paul 
> ---
>  drivers/gpu/drm/mediatek/mtk_drm_crtc.c | 29 +
>  1 file changed, 20 insertions(+), 9 deletions(-)
> 
> diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c 
> b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
> index 7b392d6c71cc..d4078c2089e0 100644
> --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
> +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
> @@ -658,10 +658,21 @@ static const struct drm_crtc_helper_funcs 
> mtk_crtc_helper_funcs = {
>  
>  static int mtk_drm_crtc_init(struct drm_device *drm,
>struct mtk_drm_crtc *mtk_crtc,
> -  struct drm_plane *primary,
> -  struct drm_plane *cursor, unsigned int pipe)
> +  unsigned int pipe)
>  {
> - int ret;
> + int i, ret;
> +

extra line

> + struct drm_plane *primary = NULL;
> + struct drm_plane *cursor = NULL;

These should be on top of the int declaration

> +
> + for (i = 0; i < mtk_crtc->layer_nr; ++i) {

We don't really do pre-increment in kernel for loops

> + if (!primary && mtk_crtc->planes[i].type ==
> + DRM_PLANE_TYPE_PRIMARY)

Line breaks should be around '&&':

if (!primary &&
mtk_crtc->planes[i].type == DRM_PLANE_TYPE_PRIMARY)

> + primary = _crtc->planes[i];
> + if (!cursor && mtk_crtc->planes[i].type ==

else if?

> + DRM_PLANE_TYPE_CURSOR)
> + cursor = _crtc->planes[i];


Since we can only have one primary and one cursor, the NULL checks on primary
and cursor are unnecessary, you can just blindly assign them when you hit a
plane of the right type. If the driver creates multiples the behavior is
undefined anyways.

> + }
>  
>   ret = drm_crtc_init_with_planes(drm, _crtc->base, primary, cursor,
>   _crtc_funcs, NULL);
> @@ -711,11 +722,12 @@ static int mtk_drm_crtc_num_comp_planes(struct 
> mtk_drm_crtc *mtk_crtc,
>  }
>  
>  static inline
> -enum drm_plane_type mtk_drm_crtc_plane_type(unsigned int plane_idx)
> +enum drm_plane_type mtk_drm_crtc_plane_type(unsigned int plane_idx,
> + unsigned int num_planes)
>  {
>   if (plane_idx == 0)
>   return DRM_PLANE_TYPE_PRIMARY;
> - else if (plane_idx == 1)
> + else if (plane_idx == (num_planes - 1))
>   return DRM_PLANE_TYPE_CURSOR;
>   else
>   return DRM_PLANE_TYPE_OVERLAY;
> @@ -734,7 +746,8 @@ static int mtk_drm_crtc_init_comp_planes(struct 
> drm_device *drm_dev,
>   ret = mtk_plane_init(drm_dev,
>   _crtc->planes[mtk_crtc->layer_nr],
>   BIT(pipe),
> - mtk_drm_crtc_plane_type(mtk_crtc->layer_nr),
> + mtk_drm_crtc_plane_type(mtk_crtc->layer_nr,
> + num_planes),
>   mtk_ddp_comp_supported_rotations(comp));
>   if (ret)
>   return ret;
> @@ -830,9 +843,7 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
>   return ret;
>   }
>  
> - ret = mtk_drm_crtc_init(drm_dev, mtk_crtc, _crtc->planes[0],
> - mtk_crtc->layer_nr > 1 ? _crtc->planes[1] :
> - NULL, pipe);
> + ret = mtk_drm_crtc_init(drm_dev, mtk_crtc, pipe);
>   if (ret < 0)
>   return ret;
>  
> -- 
> 2.25.0.341.g760bfbb309-goog
> 

-- 
Sean Paul, Software Engineer, Google / Chromium OS
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH v4] drm/scheduler: Avoid accessing freed bad job.

2020-02-06 Thread Christian König

Am 06.02.20 um 12:10 schrieb Lucas Stach:

Hi all,

On Mi, 2020-02-05 at 19:24 +0100, Lucas Stach wrote:

Hi Andrey,

This commit breaks all drivers, which may bail out of the timeout
processing as they wish to extend the timeout (etnaviv, v3d).

Those drivers currently just return from the timeout handler before
calling drm_sched_stop(), which means with this commit applied we are
removing the first job from the ring_mirror_list, but never put it
back. This leads to jobs getting lost from the ring mirror, which then
causes quite a bit of fallout like unsignaled fences.

Not sure yet what to do about it, we can either add a function to add
the job back to the ring_mirror if the driver wants to extend the
timeout, or we could look for another way to stop
drm_sched_cleanup_jobs from freeing jobs that are currently in timeout
processing.

So after thinking about this a bit more my opinion is that we need to
revert this change for now and go back to the drawing board for the
scheduler timeout handling.

Right now this starts to feel like a big midlayer mistake with all the
very intricate intertwining between the drivers and the scheduler. The
rules on when it's safe to manipulate the ring mirror and when
completed jobs are signaled and freed are not really well specified.
The fact that we need to mutate state in order to get rid of races
instead of having a single big "timeout processing is owner of the
scheduler state for now" is a big fat warning sign IMHO.


Yes, that strongly feels like a hack to me as well. But I didn't had 
time and still haven't to take a closer look and suggest something better.


Christian.



It took me far longer than I'd like to admit to understand the failure
mode with fences not getting signaled after a GPU hang. The back and
forth between scheduler and driver code makes things really hard to
follow.

Regards,
Lucas


Regards,
Lucas

On Mo, 2019-11-25 at 15:51 -0500, Andrey Grodzovsky wrote:

Problem:
Due to a race between drm_sched_cleanup_jobs in sched thread and
drm_sched_job_timedout in timeout work there is a possiblity that
bad job was already freed while still being accessed from the
timeout thread.

Fix:
Instead of just peeking at the bad job in the mirror list
remove it from the list under lock and then put it back later when
we are garanteed no race with main sched thread is possible which
is after the thread is parked.

v2: Lock around processing ring_mirror_list in drm_sched_cleanup_jobs.

v3: Rebase on top of drm-misc-next. v2 is not needed anymore as
drm_sched_get_cleanup_job already has a lock there.

v4: Fix comments to relfect latest code in drm-misc.

Signed-off-by: Andrey Grodzovsky 
Reviewed-by: Christian König 
Tested-by: Emily Deng 
---
  drivers/gpu/drm/scheduler/sched_main.c | 27 +++
  1 file changed, 27 insertions(+)

diff --git a/drivers/gpu/drm/scheduler/sched_main.c 
b/drivers/gpu/drm/scheduler/sched_main.c
index 6774955..1bf9c40 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -284,10 +284,21 @@ static void drm_sched_job_timedout(struct work_struct 
*work)
unsigned long flags;
  
  	sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);

+
+   /* Protects against concurrent deletion in drm_sched_get_cleanup_job */
+   spin_lock_irqsave(>job_list_lock, flags);
job = list_first_entry_or_null(>ring_mirror_list,
   struct drm_sched_job, node);
  
  	if (job) {

+   /*
+* Remove the bad job so it cannot be freed by concurrent
+* drm_sched_cleanup_jobs. It will be reinserted back after 
sched->thread
+* is parked at which point it's safe.
+*/
+   list_del_init(>node);
+   spin_unlock_irqrestore(>job_list_lock, flags);
+
job->sched->ops->timedout_job(job);
  
  		/*

@@ -298,6 +309,8 @@ static void drm_sched_job_timedout(struct work_struct *work)
job->sched->ops->free_job(job);
sched->free_guilty = false;
}
+   } else {
+   spin_unlock_irqrestore(>job_list_lock, flags);
}
  
  	spin_lock_irqsave(>job_list_lock, flags);

@@ -370,6 +383,20 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, 
struct drm_sched_job *bad)
kthread_park(sched->thread);
  
  	/*

+* Reinsert back the bad job here - now it's safe as
+* drm_sched_get_cleanup_job cannot race against us and release the
+* bad job at this point - we parked (waited for) any in progress
+* (earlier) cleanups and drm_sched_get_cleanup_job will not be called
+* now until the scheduler thread is unparked.
+*/
+   if (bad && bad->sched == sched)
+   /*
+* Add at the head of the queue to reflect it was the earliest
+* job extracted.
+*/
+ 

Re: [PATCH 00/11] drm/virtio: fixes and cleanups for vbuf queuing

2020-02-06 Thread Gerd Hoffmann
On Wed, Feb 05, 2020 at 10:19:44AM -0800, Chia-I Wu wrote:
> This series consists of fixes and cleanups for
> virtio_gpu_queue_fenced_ctrl_buffer, except for the last patch.  The fixes are
> for corner cases that were overlooked.  The cleanups make the last patch
> easier, but they should be good in themselves as well.

Pushed most patches.

> The last patch changes the disable_notify mechanism to call
> virtqueue_kick_prepare only once in virtio_gpu_enable_notify.  It should be
> more efficient than doing that after each command is queued.
> 
> There is a follow-up patch that replaces the global disable_notify state by
> command-level bools to disable notify for individual commands, so that one
> process cannot affect another process.  I can include it in v2 if you want to
> review it together with this series.

Can you rebase & resend so I can see all notify patches together?

thanks,
  Gerd

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[PATCH] drm/virtio: fix ring free check

2020-02-06 Thread Gerd Hoffmann
If the virtio device supports indirect ring descriptors we need only one
ring entry for the whole command.  Take that into account when checking
whenever the virtqueue has enough free entries for our command.

Signed-off-by: Gerd Hoffmann 
---
 drivers/gpu/drm/virtio/virtgpu_vq.c | 9 ++---
 1 file changed, 6 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c 
b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 41e475fbd67b..a2ec09dba530 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -328,7 +328,8 @@ static void virtio_gpu_queue_ctrl_sgs(struct 
virtio_gpu_device *vgdev,
 {
struct virtqueue *vq = vgdev->ctrlq.vq;
bool notify = false;
-   int ret;
+   bool indirect;
+   int vqcnt, ret;
 
 again:
spin_lock(>ctrlq.qlock);
@@ -341,9 +342,11 @@ static void virtio_gpu_queue_ctrl_sgs(struct 
virtio_gpu_device *vgdev,
return;
}
 
-   if (vq->num_free < elemcnt) {
+   indirect = virtio_has_feature(vgdev->vdev, VIRTIO_RING_F_INDIRECT_DESC);
+   vqcnt = indirect ? 1 : elemcnt;
+   if (vq->num_free < vqcnt) {
spin_unlock(>ctrlq.qlock);
-   wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= elemcnt);
+   wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= vqcnt);
goto again;
}
 
-- 
2.18.1

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH v4] drm/scheduler: Avoid accessing freed bad job.

2020-02-06 Thread Lucas Stach
Hi all,

On Mi, 2020-02-05 at 19:24 +0100, Lucas Stach wrote:
> Hi Andrey,
> 
> This commit breaks all drivers, which may bail out of the timeout
> processing as they wish to extend the timeout (etnaviv, v3d).
> 
> Those drivers currently just return from the timeout handler before
> calling drm_sched_stop(), which means with this commit applied we are
> removing the first job from the ring_mirror_list, but never put it
> back. This leads to jobs getting lost from the ring mirror, which then
> causes quite a bit of fallout like unsignaled fences.
> 
> Not sure yet what to do about it, we can either add a function to add
> the job back to the ring_mirror if the driver wants to extend the
> timeout, or we could look for another way to stop
> drm_sched_cleanup_jobs from freeing jobs that are currently in timeout
> processing.

So after thinking about this a bit more my opinion is that we need to
revert this change for now and go back to the drawing board for the
scheduler timeout handling.

Right now this starts to feel like a big midlayer mistake with all the
very intricate intertwining between the drivers and the scheduler. The
rules on when it's safe to manipulate the ring mirror and when
completed jobs are signaled and freed are not really well specified.
The fact that we need to mutate state in order to get rid of races
instead of having a single big "timeout processing is owner of the
scheduler state for now" is a big fat warning sign IMHO.

It took me far longer than I'd like to admit to understand the failure
mode with fences not getting signaled after a GPU hang. The back and
forth between scheduler and driver code makes things really hard to
follow.

Regards,
Lucas

> Regards,
> Lucas
> 
> On Mo, 2019-11-25 at 15:51 -0500, Andrey Grodzovsky wrote:
> > Problem:
> > Due to a race between drm_sched_cleanup_jobs in sched thread and
> > drm_sched_job_timedout in timeout work there is a possiblity that
> > bad job was already freed while still being accessed from the
> > timeout thread.
> > 
> > Fix:
> > Instead of just peeking at the bad job in the mirror list
> > remove it from the list under lock and then put it back later when
> > we are garanteed no race with main sched thread is possible which
> > is after the thread is parked.
> > 
> > v2: Lock around processing ring_mirror_list in drm_sched_cleanup_jobs.
> > 
> > v3: Rebase on top of drm-misc-next. v2 is not needed anymore as
> > drm_sched_get_cleanup_job already has a lock there.
> > 
> > v4: Fix comments to relfect latest code in drm-misc.
> > 
> > Signed-off-by: Andrey Grodzovsky 
> > Reviewed-by: Christian König 
> > Tested-by: Emily Deng 
> > ---
> >  drivers/gpu/drm/scheduler/sched_main.c | 27 +++
> >  1 file changed, 27 insertions(+)
> > 
> > diff --git a/drivers/gpu/drm/scheduler/sched_main.c 
> > b/drivers/gpu/drm/scheduler/sched_main.c
> > index 6774955..1bf9c40 100644
> > --- a/drivers/gpu/drm/scheduler/sched_main.c
> > +++ b/drivers/gpu/drm/scheduler/sched_main.c
> > @@ -284,10 +284,21 @@ static void drm_sched_job_timedout(struct work_struct 
> > *work)
> > unsigned long flags;
> >  
> > sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
> > +
> > +   /* Protects against concurrent deletion in drm_sched_get_cleanup_job */
> > +   spin_lock_irqsave(>job_list_lock, flags);
> > job = list_first_entry_or_null(>ring_mirror_list,
> >struct drm_sched_job, node);
> >  
> > if (job) {
> > +   /*
> > +* Remove the bad job so it cannot be freed by concurrent
> > +* drm_sched_cleanup_jobs. It will be reinserted back after 
> > sched->thread
> > +* is parked at which point it's safe.
> > +*/
> > +   list_del_init(>node);
> > +   spin_unlock_irqrestore(>job_list_lock, flags);
> > +
> > job->sched->ops->timedout_job(job);
> >  
> > /*
> > @@ -298,6 +309,8 @@ static void drm_sched_job_timedout(struct work_struct 
> > *work)
> > job->sched->ops->free_job(job);
> > sched->free_guilty = false;
> > }
> > +   } else {
> > +   spin_unlock_irqrestore(>job_list_lock, flags);
> > }
> >  
> > spin_lock_irqsave(>job_list_lock, flags);
> > @@ -370,6 +383,20 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, 
> > struct drm_sched_job *bad)
> > kthread_park(sched->thread);
> >  
> > /*
> > +* Reinsert back the bad job here - now it's safe as
> > +* drm_sched_get_cleanup_job cannot race against us and release the
> > +* bad job at this point - we parked (waited for) any in progress
> > +* (earlier) cleanups and drm_sched_get_cleanup_job will not be called
> > +* now until the scheduler thread is unparked.
> > +*/
> > +   if (bad && bad->sched == sched)
> > +   /*
> > +* Add at the head of the queue to reflect it was the earliest
> > +* job 

[PATCH v3] drm/hdcp: optimizing the srm handling

2020-02-06 Thread Ramalingam C
As we are not using the sysfs infrastructure anymore, link to it is
removed. And global srm data and mutex to protect it are removed,
with required handling at revocation check function.

v2:
  srm_data is dropped and few more comments are addressed.
v3:
  ptr passing around is fixed with functional testing.

Signed-off-by: Ramalingam C 
Suggested-by: Sean Paul 
---
 drivers/gpu/drm/drm_hdcp.c | 158 -
 drivers/gpu/drm/drm_internal.h |   4 -
 drivers/gpu/drm/drm_sysfs.c|   2 -
 include/drm/drm_hdcp.h |   4 +-
 4 files changed, 61 insertions(+), 107 deletions(-)

diff --git a/drivers/gpu/drm/drm_hdcp.c b/drivers/gpu/drm/drm_hdcp.c
index 9191633a3c43..10b735aafa64 100644
--- a/drivers/gpu/drm/drm_hdcp.c
+++ b/drivers/gpu/drm/drm_hdcp.c
@@ -23,14 +23,6 @@
 
 #include "drm_internal.h"
 
-static struct hdcp_srm {
-   u32 revoked_ksv_cnt;
-   u8 *revoked_ksv_list;
-
-   /* Mutex to protect above struct member */
-   struct mutex mutex;
-} *srm_data;
-
 static inline void drm_hdcp_print_ksv(const u8 *ksv)
 {
DRM_DEBUG("\t%#02x, %#02x, %#02x, %#02x, %#02x\n",
@@ -60,11 +52,11 @@ static u32 drm_hdcp_get_revoked_ksv_count(const u8 *buf, 
u32 vrls_length)
return ksv_count;
 }
 
-static u32 drm_hdcp_get_revoked_ksvs(const u8 *buf, u8 *revoked_ksv_list,
+static u32 drm_hdcp_get_revoked_ksvs(const u8 *buf, u8 **revoked_ksv_list,
 u32 vrls_length)
 {
-   u32 parsed_bytes = 0, ksv_count = 0;
u32 vrl_ksv_cnt, vrl_ksv_sz, vrl_idx = 0;
+   u32 parsed_bytes = 0, ksv_count = 0;
 
do {
vrl_ksv_cnt = *buf;
@@ -74,10 +66,10 @@ static u32 drm_hdcp_get_revoked_ksvs(const u8 *buf, u8 
*revoked_ksv_list,
 
DRM_DEBUG("vrl: %d, Revoked KSVs: %d\n", vrl_idx++,
  vrl_ksv_cnt);
-   memcpy(revoked_ksv_list, buf, vrl_ksv_sz);
+   memcpy((*revoked_ksv_list) + (ksv_count * DRM_HDCP_KSV_LEN),
+  buf, vrl_ksv_sz);
 
ksv_count += vrl_ksv_cnt;
-   revoked_ksv_list += vrl_ksv_sz;
buf += vrl_ksv_sz;
 
parsed_bytes += (vrl_ksv_sz + 1);
@@ -91,7 +83,8 @@ static inline u32 get_vrl_length(const u8 *buf)
return drm_hdcp_be24_to_cpu(buf);
 }
 
-static int drm_hdcp_parse_hdcp1_srm(const u8 *buf, size_t count)
+static int drm_hdcp_parse_hdcp1_srm(const u8 *buf, size_t count,
+   u8 **revoked_ksv_list, u32 *revoked_ksv_cnt)
 {
struct hdcp_srm_header *header;
u32 vrl_length, ksv_count;
@@ -131,29 +124,28 @@ static int drm_hdcp_parse_hdcp1_srm(const u8 *buf, size_t 
count)
ksv_count = drm_hdcp_get_revoked_ksv_count(buf, vrl_length);
if (!ksv_count) {
DRM_DEBUG("Revoked KSV count is 0\n");
-   return count;
+   return 0;
}
 
-   kfree(srm_data->revoked_ksv_list);
-   srm_data->revoked_ksv_list = kcalloc(ksv_count, DRM_HDCP_KSV_LEN,
-GFP_KERNEL);
-   if (!srm_data->revoked_ksv_list) {
+   *revoked_ksv_list = kcalloc(ksv_count, DRM_HDCP_KSV_LEN, GFP_KERNEL);
+   if (!*revoked_ksv_list) {
DRM_ERROR("Out of Memory\n");
return -ENOMEM;
}
 
-   if (drm_hdcp_get_revoked_ksvs(buf, srm_data->revoked_ksv_list,
+   if (drm_hdcp_get_revoked_ksvs(buf, revoked_ksv_list,
  vrl_length) != ksv_count) {
-   srm_data->revoked_ksv_cnt = 0;
-   kfree(srm_data->revoked_ksv_list);
+   *revoked_ksv_cnt = 0;
+   kfree(*revoked_ksv_list);
return -EINVAL;
}
 
-   srm_data->revoked_ksv_cnt = ksv_count;
-   return count;
+   *revoked_ksv_cnt = ksv_count;
+   return 0;
 }
 
-static int drm_hdcp_parse_hdcp2_srm(const u8 *buf, size_t count)
+static int drm_hdcp_parse_hdcp2_srm(const u8 *buf, size_t count,
+   u8 **revoked_ksv_list, u32 *revoked_ksv_cnt)
 {
struct hdcp_srm_header *header;
u32 vrl_length, ksv_count, ksv_sz;
@@ -195,13 +187,11 @@ static int drm_hdcp_parse_hdcp2_srm(const u8 *buf, size_t 
count)
ksv_count = (*buf << 2) | DRM_HDCP_2_KSV_COUNT_2_LSBITS(*(buf + 1));
if (!ksv_count) {
DRM_DEBUG("Revoked KSV count is 0\n");
-   return count;
+   return 0;
}
 
-   kfree(srm_data->revoked_ksv_list);
-   srm_data->revoked_ksv_list = kcalloc(ksv_count, DRM_HDCP_KSV_LEN,
-GFP_KERNEL);
-   if (!srm_data->revoked_ksv_list) {
+   *revoked_ksv_list = kcalloc(ksv_count, DRM_HDCP_KSV_LEN, GFP_KERNEL);
+   if (!*revoked_ksv_list) {
DRM_ERROR("Out of Memory\n");
return -ENOMEM;
}
@@ -210,10 +200,10 @@ static int 

[PATCH 3/4] drm/nouveau: Remove field nvbo from struct nouveau_framebuffer

2020-02-06 Thread Thomas Zimmermann
The buffer object stored in nvbo is also available GEM object in obj[0]
of struct drm_framebuffer. Therefore remove nvbo in favor obj[0] and
replace all references accordingly. This may require an additional cast.

With this change we can already replace nouveau_user_framebuffer_destroy()
and nouveau_user_framebuffer_create_handle() with generic GEM helpers.
Calls to nouveau_framebuffer_new() receive a GEM object.

Signed-off-by: Thomas Zimmermann 
---
 drivers/gpu/drm/nouveau/dispnv04/crtc.c| 19 ++--
 drivers/gpu/drm/nouveau/dispnv04/disp.c| 21 ++---
 drivers/gpu/drm/nouveau/dispnv04/overlay.c | 21 +++--
 drivers/gpu/drm/nouveau/dispnv50/wndw.c| 25 +---
 drivers/gpu/drm/nouveau/nouveau_display.c  | 35 --
 drivers/gpu/drm/nouveau/nouveau_display.h  |  9 +++---
 drivers/gpu/drm/nouveau/nouveau_fbcon.c| 28 +
 7 files changed, 74 insertions(+), 84 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c 
b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index 37c50ea8f847..ece877c727cd 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -605,15 +605,16 @@ static int
 nv_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb)
 {
struct nv04_display *disp = nv04_display(crtc->dev);
-   struct nouveau_framebuffer *nvfb = 
nouveau_framebuffer(crtc->primary->fb);
+   struct drm_framebuffer *fb = crtc->primary->fb;
+   struct nouveau_bo *nvbo = nouveau_gem_object(fb->obj[0]);
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
int ret;
 
-   ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM, false);
+   ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, false);
if (ret == 0) {
if (disp->image[nv_crtc->index])
nouveau_bo_unpin(disp->image[nv_crtc->index]);
-   nouveau_bo_ref(nvfb->nvbo, >image[nv_crtc->index]);
+   nouveau_bo_ref(nvbo, >image[nv_crtc->index]);
}
 
return ret;
@@ -822,8 +823,8 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nv04_crtc_reg *regp = 
_display(dev)->mode_reg.crtc_reg[nv_crtc->index];
+   struct nouveau_bo *nvbo;
struct drm_framebuffer *drm_fb;
-   struct nouveau_framebuffer *fb;
int arb_burst, arb_lwm;
 
NV_DEBUG(drm, "index %d\n", nv_crtc->index);
@@ -839,13 +840,12 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
 */
if (atomic) {
drm_fb = passed_fb;
-   fb = nouveau_framebuffer(passed_fb);
} else {
drm_fb = crtc->primary->fb;
-   fb = nouveau_framebuffer(crtc->primary->fb);
}
 
-   nv_crtc->fb.offset = fb->nvbo->bo.offset;
+   nvbo = nouveau_gem_object(drm_fb->obj[0]);
+   nv_crtc->fb.offset = nvbo->bo.offset;
 
if (nv_crtc->lut.depth != drm_fb->format->depth) {
nv_crtc->lut.depth = drm_fb->format->depth;
@@ -1143,8 +1143,9 @@ nv04_crtc_page_flip(struct drm_crtc *crtc, struct 
drm_framebuffer *fb,
const int swap_interval = (flags & DRM_MODE_PAGE_FLIP_ASYNC) ? 0 : 1;
struct drm_device *dev = crtc->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
-   struct nouveau_bo *old_bo = 
nouveau_framebuffer(crtc->primary->fb)->nvbo;
-   struct nouveau_bo *new_bo = nouveau_framebuffer(fb)->nvbo;
+   struct drm_framebuffer *old_fb = crtc->primary->fb;
+   struct nouveau_bo *old_bo = nouveau_gem_object(old_fb->obj[0]);
+   struct nouveau_bo *new_bo = nouveau_gem_object(fb->obj[0]);
struct nv04_page_flip_state *s;
struct nouveau_channel *chan;
struct nouveau_cli *cli;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c 
b/drivers/gpu/drm/nouveau/dispnv04/disp.c
index 44ee82d0c9b6..0f4ebefed1fd 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c
@@ -30,6 +30,7 @@
 #include "nouveau_encoder.h"
 #include "nouveau_connector.h"
 #include "nouveau_bo.h"
+#include "nouveau_gem.h"
 
 #include 
 
@@ -52,13 +53,13 @@ nv04_display_fini(struct drm_device *dev, bool suspend)
 
/* Un-pin FB and cursors so they'll be evicted to system memory. */
list_for_each_entry(crtc, >mode_config.crtc_list, head) {
-   struct nouveau_framebuffer *nouveau_fb;
+   struct drm_framebuffer *fb = crtc->primary->fb;
+   struct nouveau_bo *nvbo;
 
-   nouveau_fb = nouveau_framebuffer(crtc->primary->fb);
-   if (!nouveau_fb || !nouveau_fb->nvbo)
+   if (!fb || !fb->obj[0])
continue;
-
-   nouveau_bo_unpin(nouveau_fb->nvbo);
+   nvbo = nouveau_gem_object(fb->obj[0]);
+   nouveau_bo_unpin(nvbo);
}
 

[PATCH 2/4] drm/nouveau: Move struct nouveau_framebuffer.vma to struct nouveau_fbdev

2020-02-06 Thread Thomas Zimmermann
The vma field of struct nouveau_framebuffer is a special field for the
the accelerated fbdev console. Hence there's at most one single instance
for the active console. Moving it into struct nouveau_fbdev makes struct
nouveau_framebuffer slightly smaller and brings it closer to struct
drm_framebuffer.

Signed-off-by: Thomas Zimmermann 
---
 drivers/gpu/drm/nouveau/nouveau_display.h | 1 -
 drivers/gpu/drm/nouveau/nouveau_fbcon.c   | 6 +++---
 drivers/gpu/drm/nouveau/nouveau_fbcon.h   | 3 +++
 drivers/gpu/drm/nouveau/nv50_fbcon.c  | 9 -
 drivers/gpu/drm/nouveau/nvc0_fbcon.c  | 9 -
 5 files changed, 14 insertions(+), 14 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h 
b/drivers/gpu/drm/nouveau/nouveau_display.h
index e397b3d246e5..0b3eb04b95a7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.h
+++ b/drivers/gpu/drm/nouveau/nouveau_display.h
@@ -11,7 +11,6 @@
 struct nouveau_framebuffer {
struct drm_framebuffer base;
struct nouveau_bo *nvbo;
-   struct nouveau_vma *vma;
 };
 
 static inline struct nouveau_framebuffer *
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c 
b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 0c5cdda3c336..6b2f46b0c115 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -353,7 +353,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
 
chan = nouveau_nofbaccel ? NULL : drm->channel;
if (chan && device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
-   ret = nouveau_vma_new(nvbo, chan->vmm, >vma);
+   ret = nouveau_vma_new(nvbo, chan->vmm, >vma);
if (ret) {
NV_ERROR(drm, "failed to map fb into chan: %d\n", ret);
chan = NULL;
@@ -400,7 +400,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
 
 out_unlock:
if (chan)
-   nouveau_vma_del(>vma);
+   nouveau_vma_del(>vma);
nouveau_bo_unmap(fb->nvbo);
 out_unpin:
nouveau_bo_unpin(fb->nvbo);
@@ -419,7 +419,7 @@ nouveau_fbcon_destroy(struct drm_device *dev, struct 
nouveau_fbdev *fbcon)
drm_fb_helper_fini(>helper);
 
if (nouveau_fb && nouveau_fb->nvbo) {
-   nouveau_vma_del(_fb->vma);
+   nouveau_vma_del(>vma);
nouveau_bo_unmap(nouveau_fb->nvbo);
nouveau_bo_unpin(nouveau_fb->nvbo);
drm_framebuffer_put(_fb->base);
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h 
b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
index 73a7eeba3973..1796d8824580 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
@@ -31,6 +31,8 @@
 
 #include "nouveau_display.h"
 
+struct nouveau_vma;
+
 struct nouveau_fbdev {
struct drm_fb_helper helper; /* must be first */
unsigned int saved_flags;
@@ -41,6 +43,7 @@ struct nouveau_fbdev {
struct nvif_object gdi;
struct nvif_object blit;
struct nvif_object twod;
+   struct nouveau_vma *vma;
 
struct mutex hotplug_lock;
bool hotplug_waiting;
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c 
b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index facd18564e0d..47428f79ede8 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -149,7 +149,6 @@ int
 nv50_fbcon_accel_init(struct fb_info *info)
 {
struct nouveau_fbdev *nfbdev = info->par;
-   struct nouveau_framebuffer *fb = nouveau_framebuffer(nfbdev->helper.fb);
struct drm_device *dev = nfbdev->helper.dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_channel *chan = drm->channel;
@@ -240,8 +239,8 @@ nv50_fbcon_accel_init(struct fb_info *info)
OUT_RING(chan, info->fix.line_length);
OUT_RING(chan, info->var.xres_virtual);
OUT_RING(chan, info->var.yres_virtual);
-   OUT_RING(chan, upper_32_bits(fb->vma->addr));
-   OUT_RING(chan, lower_32_bits(fb->vma->addr));
+   OUT_RING(chan, upper_32_bits(nfbdev->vma->addr));
+   OUT_RING(chan, lower_32_bits(nfbdev->vma->addr));
BEGIN_NV04(chan, NvSub2D, 0x0230, 2);
OUT_RING(chan, format);
OUT_RING(chan, 1);
@@ -249,8 +248,8 @@ nv50_fbcon_accel_init(struct fb_info *info)
OUT_RING(chan, info->fix.line_length);
OUT_RING(chan, info->var.xres_virtual);
OUT_RING(chan, info->var.yres_virtual);
-   OUT_RING(chan, upper_32_bits(fb->vma->addr));
-   OUT_RING(chan, lower_32_bits(fb->vma->addr));
+   OUT_RING(chan, upper_32_bits(nfbdev->vma->addr));
+   OUT_RING(chan, lower_32_bits(nfbdev->vma->addr));
FIRE_RING(chan);
 
return 0;
diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c 
b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
index c0deef4fe727..cb56163ed608 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
@@ -150,7 +150,6 @@ nvc0_fbcon_accel_init(struct fb_info 

[PATCH 1/4] drm/nouveau: Remove unused fields from struct nouveau_framebuffer

2020-02-06 Thread Thomas Zimmermann
Signed-off-by: Thomas Zimmermann 
---
 drivers/gpu/drm/nouveau/nouveau_display.h | 5 -
 1 file changed, 5 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h 
b/drivers/gpu/drm/nouveau/nouveau_display.h
index 6e8e66882e45..e397b3d246e5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.h
+++ b/drivers/gpu/drm/nouveau/nouveau_display.h
@@ -12,11 +12,6 @@ struct nouveau_framebuffer {
struct drm_framebuffer base;
struct nouveau_bo *nvbo;
struct nouveau_vma *vma;
-   u32 r_handle;
-   u32 r_format;
-   u32 r_pitch;
-   struct nvif_object h_base[4];
-   struct nvif_object h_core;
 };
 
 static inline struct nouveau_framebuffer *
-- 
2.25.0

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[PATCH 0/4] drm/nouveau: Remove struct nouveau_framebuffer

2020-02-06 Thread Thomas Zimmermann
All fields in struct nouveau_framebuffer appear to be obsolete. The
data structure can be replaced by struct drm_framebuffer entirely.

Patch 1 removes several unused fields from struct nouveau_framebuffer.

Patch 2 moves the field vma to struct nouveau_fbdev. The information
in vma is only relevant for fbdev emulation, and as such he field is
only used there.

Patch 3 removes nvbo from struct nouveau_framebuffer. The nouveau
buffer object is based on gem, and as such should be stored in obj[0]
of struct drm_framebuffer. This also enables the use of several generic
GEM framebuffer functions.

Finally patch 4 removes struct nouveau_framebuffer. At this point it's
merely a wrapper around struct drm_framebuffer.

The patchset has been smoke-tested on NV34 HW by running fbcon and X11.

Future directions: There are still functions for creating frameuffers.
With further refinements of nouveau's fbcon code, GEM framebuffer helpers
could be used here.

Thomas Zimmermann (4):
  drm/nouveau: Remove unused fields from struct nouveau_framebuffer
  drm/nouveau: Move struct nouveau_framebuffer.vma to struct
nouveau_fbdev
  drm/nouveau: Remove field nvbo from struct nouveau_framebuffer
  drm/nouveau: Remove struct nouveau_framebuffer

 drivers/gpu/drm/nouveau/dispnv04/crtc.c| 19 -
 drivers/gpu/drm/nouveau/dispnv04/disp.c| 21 +-
 drivers/gpu/drm/nouveau/dispnv04/overlay.c | 21 +-
 drivers/gpu/drm/nouveau/dispnv50/wndw.c| 45 -
 drivers/gpu/drm/nouveau/nouveau_display.c  | 47 ++
 drivers/gpu/drm/nouveau/nouveau_display.h  | 25 +++-
 drivers/gpu/drm/nouveau/nouveau_fbcon.c| 42 ++-
 drivers/gpu/drm/nouveau/nouveau_fbcon.h|  3 ++
 drivers/gpu/drm/nouveau/nv50_fbcon.c   |  9 ++---
 drivers/gpu/drm/nouveau/nvc0_fbcon.c   |  9 ++---
 10 files changed, 108 insertions(+), 133 deletions(-)

--
2.25.0

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[PATCH 4/4] drm/nouveau: Remove struct nouveau_framebuffer

2020-02-06 Thread Thomas Zimmermann
After its cleanup, struct nouveau_framebuffer is only a wrapper around
struct drm_framebuffer. Use the latter directly.

Signed-off-by: Thomas Zimmermann 
---
 drivers/gpu/drm/nouveau/dispnv50/wndw.c   | 26 +++
 drivers/gpu/drm/nouveau/nouveau_display.c | 14 ++--
 drivers/gpu/drm/nouveau/nouveau_display.h | 12 +--
 drivers/gpu/drm/nouveau/nouveau_fbcon.c   | 14 ++--
 4 files changed, 28 insertions(+), 38 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c 
b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index ba1399965a1c..4a67a656e007 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -40,11 +40,11 @@ nv50_wndw_ctxdma_del(struct nv50_wndw_ctxdma *ctxdma)
 }
 
 static struct nv50_wndw_ctxdma *
-nv50_wndw_ctxdma_new(struct nv50_wndw *wndw, struct nouveau_framebuffer *fb)
+nv50_wndw_ctxdma_new(struct nv50_wndw *wndw, struct drm_framebuffer *fb)
 {
-   struct nouveau_drm *drm = nouveau_drm(fb->base.dev);
+   struct nouveau_drm *drm = nouveau_drm(fb->dev);
struct nv50_wndw_ctxdma *ctxdma;
-   struct nouveau_bo *nvbo = nouveau_gem_object(fb->base.obj[0]);
+   struct nouveau_bo *nvbo = nouveau_gem_object(fb->obj[0]);
const u8kind = nvbo->kind;
const u32 handle = 0xfb00 | kind;
struct {
@@ -236,16 +236,16 @@ nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw, 
bool modeset,
   struct nv50_wndw_atom *asyw,
   struct nv50_head_atom *asyh)
 {
-   struct nouveau_framebuffer *fb = nouveau_framebuffer(asyw->state.fb);
+   struct drm_framebuffer *fb = asyw->state.fb;
struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev);
-   struct nouveau_bo *nvbo = nouveau_gem_object(fb->base.obj[0]);
+   struct nouveau_bo *nvbo = nouveau_gem_object(fb->obj[0]);
int ret;
 
NV_ATOMIC(drm, "%s acquire\n", wndw->plane.name);
 
-   if (asyw->state.fb != armw->state.fb || !armw->visible || modeset) {
-   asyw->image.w = fb->base.width;
-   asyw->image.h = fb->base.height;
+   if (fb != armw->state.fb || !armw->visible || modeset) {
+   asyw->image.w = fb->width;
+   asyw->image.h = fb->height;
asyw->image.kind = nvbo->kind;
 
ret = nv50_wndw_atomic_check_acquire_rgb(asyw);
@@ -261,13 +261,13 @@ nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw, 
bool modeset,
asyw->image.blockh = nvbo->mode >> 4;
else
asyw->image.blockh = nvbo->mode;
-   asyw->image.blocks[0] = fb->base.pitches[0] / 64;
+   asyw->image.blocks[0] = fb->pitches[0] / 64;
asyw->image.pitch[0] = 0;
} else {
asyw->image.layout = 1;
asyw->image.blockh = 0;
asyw->image.blocks[0] = 0;
-   asyw->image.pitch[0] = fb->base.pitches[0];
+   asyw->image.pitch[0] = fb->pitches[0];
}
 
if (!asyh->state.async_flip)
@@ -486,16 +486,16 @@ nv50_wndw_cleanup_fb(struct drm_plane *plane, struct 
drm_plane_state *old_state)
 static int
 nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
 {
-   struct nouveau_framebuffer *fb = nouveau_framebuffer(state->fb);
+   struct drm_framebuffer *fb = state->fb;
struct nouveau_drm *drm = nouveau_drm(plane->dev);
struct nv50_wndw *wndw = nv50_wndw(plane);
struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
-   struct nouveau_bo *nvbo = nouveau_gem_object(state->fb->obj[0]);
+   struct nouveau_bo *nvbo = nouveau_gem_object(fb->obj[0]);
struct nv50_head_atom *asyh;
struct nv50_wndw_ctxdma *ctxdma;
int ret;
 
-   NV_ATOMIC(drm, "%s prepare: %p\n", plane->name, state->fb);
+   NV_ATOMIC(drm, "%s prepare: %p\n", plane->name, fb);
if (!asyw->state.fb)
return 0;
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c 
b/drivers/gpu/drm/nouveau/nouveau_display.c
index bbbff55eb5d5..94f7fd48e1cf 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -207,10 +207,10 @@ int
 nouveau_framebuffer_new(struct drm_device *dev,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *gem,
-   struct nouveau_framebuffer **pfb)
+   struct drm_framebuffer **pfb)
 {
struct nouveau_drm *drm = nouveau_drm(dev);
-   struct nouveau_framebuffer *fb;
+   struct drm_framebuffer *fb;
int ret;
 
 /* YUV overlays have special requirements pre-NV50 */
@@ -236,10 +236,10 @@ nouveau_framebuffer_new(struct drm_device *dev,
if (!(fb = *pfb 

Re: [PATCH 09/11] drm/virtio: avoid an infinite loop

2020-02-06 Thread Gerd Hoffmann
On Wed, Feb 05, 2020 at 10:19:53AM -0800, Chia-I Wu wrote:
> Make sure elemcnt does not exceed the maximum element count in
> virtio_gpu_queue_ctrl_sgs.  We should improve our error handling or
> impose a size limit on execbuffer, which are TODOs.

Hmm, virtio supports indirect ring entries, so large execbuffers should
not be a problem ...

So I've waded through the virtio code.  Figured our logic is wrong.
Luckily we err on the safe side (waiting for more free entries than we
actually need).  The patch below should fix that (not tested yet).

cheers,
  Gerd

diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c 
b/drivers/gpu/drm/virtio/virtgpu_vq.c
index aa25e8781404..535399b3a3ea 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -328,7 +328,7 @@ static bool virtio_gpu_queue_ctrl_sgs(struct 
virtio_gpu_device *vgdev,
 {
struct virtqueue *vq = vgdev->ctrlq.vq;
bool notify = false;
-   int ret;
+   int vqcnt, ret;
 
 again:
spin_lock(>ctrlq.qlock);
@@ -341,9 +341,10 @@ static bool virtio_gpu_queue_ctrl_sgs(struct 
virtio_gpu_device *vgdev,
return notify;
}
 
-   if (vq->num_free < elemcnt) {
+   vqcnt = virtqueue_use_indirect(vq, elemcnt) ? 1 : elemcnt;
+   if (vq->num_free < vqcnt) {
spin_unlock(>ctrlq.qlock);
-   wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= elemcnt);
+   wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= vq);
goto again;
}
 

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH 4/4] drm/virtio: move virtio_gpu_mem_entry initialization to new function

2020-02-06 Thread Gerd Hoffmann
  Hi,

> > virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle,
> > -  ents, nents,
> > +  obj->ents, obj->nents,
> >fence);
> > +   obj->ents = NULL;
> > +   obj->nents = 0;
> Hm, if the entries are temporary, can we allocate and initialize them
> in this function?

Well, the plan for CREATE_RESOURCE_BLOB is to use obj->ents too ...

cheers,
  Gerd

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[PATCH v2 04/12] drm/i915/color: conversion to drm_device based logging macros.

2020-02-06 Thread Wambui Karuga
Initial conversion of the straightforward printk based logging macros to
the struct drm_device based logging macros in
i915/display/intel_color.c.

Signed-off-by: Wambui Karuga 
---
 drivers/gpu/drm/i915/display/intel_color.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/display/intel_color.c 
b/drivers/gpu/drm/i915/display/intel_color.c
index 2087a1852486..d44bd8287801 100644
--- a/drivers/gpu/drm/i915/display/intel_color.c
+++ b/drivers/gpu/drm/i915/display/intel_color.c
@@ -1192,7 +1192,8 @@ static int check_luts(const struct intel_crtc_state 
*crtc_state)
 
/* C8 relies on its palette being stored in the legacy LUT */
if (crtc_state->c8_planes) {
-   DRM_DEBUG_KMS("C8 pixelformat requires the legacy LUT\n");
+   drm_dbg_kms(_priv->drm,
+   "C8 pixelformat requires the legacy LUT\n");
return -EINVAL;
}
 
-- 
2.25.0

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH 3/3] gpu: host1x: Set DMA direction only for DMA-mapped buffer objects

2020-02-06 Thread Dmitry Osipenko
04.02.2020 16:59, Thierry Reding пишет:
> From: Thierry Reding 
> 
> The DMA direction is only used by the DMA API, so there is no use in
> setting it when a buffer object isn't mapped with the DMA API.
> 
> Signed-off-by: Thierry Reding 
> ---
>  drivers/gpu/host1x/job.c | 2 +-
>  1 file changed, 1 insertion(+), 1 deletion(-)
> 
> diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c
> index 8198a4d42c77..a10643aa89aa 100644
> --- a/drivers/gpu/host1x/job.c
> +++ b/drivers/gpu/host1x/job.c
> @@ -248,6 +248,7 @@ static unsigned int pin_job(struct host1x *host, struct 
> host1x_job *job)
>   goto unpin;
>   }
>  
> + job->unpins[job->num_unpins].dir = DMA_TO_DEVICE;
>   job->unpins[job->num_unpins].dev = host->dev;
>   phys_addr = sg_dma_address(sgt->sgl);
>   }
> @@ -255,7 +256,6 @@ static unsigned int pin_job(struct host1x *host, struct 
> host1x_job *job)
>   job->addr_phys[job->num_unpins] = phys_addr;
>   job->gather_addr_phys[i] = phys_addr;
>  
> - job->unpins[job->num_unpins].dir = DMA_TO_DEVICE;
>   job->unpins[job->num_unpins].bo = g->bo;
>   job->unpins[job->num_unpins].sgt = sgt;
>   job->num_unpins++;
> 


Reviewed-by: Dmitry Osipenko 
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH 2/3] drm/tegra: Reuse IOVA mapping where possible

2020-02-06 Thread Dmitry Osipenko
04.02.2020 16:59, Thierry Reding пишет:
> From: Thierry Reding 
> 
> This partially reverts the DMA API support that was recently merged
> because it was causing performance regressions on older Tegra devices.
> Unfortunately, the cache maintenance performed by dma_map_sg() and
> dma_unmap_sg() causes performance to drop by a factor of 10.
> 
> The right solution for this would be to cache mappings for buffers per
> consumer device, but that's a bit involved. Instead, we simply revert to
> the old behaviour of sharing IOVA mappings when we know that devices can
> do so (i.e. they share the same IOMMU domain).

Needs a stable tag:

Cc:  # v5.5

> Reported-by: Dmitry Osipenko 
> Signed-off-by: Thierry Reding 
> ---
>  drivers/gpu/drm/tegra/gem.c   | 10 +++-
>  drivers/gpu/drm/tegra/plane.c | 44 ---
>  drivers/gpu/host1x/job.c  | 32 ++---
>  3 files changed, 63 insertions(+), 23 deletions(-)

Otherwise LGTM,

Reviewed-by: Dmitry Osipenko 
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[PATCH v2 10/12] drm/i915/dsi_vbt: convert to drm_device based logging macros.

2020-02-06 Thread Wambui Karuga
Convert various instances of the printk based drm logging macros to the
struct drm_device based logging macros in i915/display/intel_dsi_vbt.c.
This also involves extracting the drm_i915_private device from the
intel_dsi type for use in the logging macros.

This converts DRM_DEBUG/DRM_DEBUG_DRIVER to drm_dbg().

References: 
https://lists.freedesktop.org/archives/dri-devel/2020-January/253381.html
Signed-off-by: Wambui Karuga 
---
 drivers/gpu/drm/i915/display/intel_dsi_vbt.c | 162 +++
 1 file changed, 99 insertions(+), 63 deletions(-)

diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c 
b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
index 3914cfdab511..694498f4b719 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
@@ -136,7 +136,7 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi 
*intel_dsi,
u16 len;
enum port port;
 
-   DRM_DEBUG_KMS("\n");
+   drm_dbg_kms(_priv->drm, "\n");
 
flags = *data++;
type = *data++;
@@ -158,7 +158,8 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi 
*intel_dsi,
 
dsi_device = intel_dsi->dsi_hosts[port]->device;
if (!dsi_device) {
-   DRM_DEBUG_KMS("no dsi device for port %c\n", port_name(port));
+   drm_dbg_kms(_priv->drm, "no dsi device for port %c\n",
+   port_name(port));
goto out;
}
 
@@ -182,7 +183,8 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi 
*intel_dsi,
case MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM:
case MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM:
case MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM:
-   DRM_DEBUG_DRIVER("Generic Read not yet implemented or used\n");
+   drm_dbg(_priv->drm,
+   "Generic Read not yet implemented or used\n");
break;
case MIPI_DSI_GENERIC_LONG_WRITE:
mipi_dsi_generic_write(dsi_device, data, len);
@@ -194,7 +196,8 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi 
*intel_dsi,
mipi_dsi_dcs_write_buffer(dsi_device, data, 2);
break;
case MIPI_DSI_DCS_READ:
-   DRM_DEBUG_DRIVER("DCS Read not yet implemented or used\n");
+   drm_dbg(_priv->drm,
+   "DCS Read not yet implemented or used\n");
break;
case MIPI_DSI_DCS_LONG_WRITE:
mipi_dsi_dcs_write_buffer(dsi_device, data, len);
@@ -212,9 +215,10 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi 
*intel_dsi,
 
 static const u8 *mipi_exec_delay(struct intel_dsi *intel_dsi, const u8 *data)
 {
+   struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev);
u32 delay = *((const u32 *) data);
 
-   DRM_DEBUG_KMS("\n");
+   drm_dbg_kms(>drm, "\n");
 
usleep_range(delay, delay + 10);
data += 4;
@@ -231,7 +235,8 @@ static void vlv_exec_gpio(struct drm_i915_private *dev_priv,
u8 port;
 
if (gpio_index >= ARRAY_SIZE(vlv_gpio_table)) {
-   DRM_DEBUG_KMS("unknown gpio index %u\n", gpio_index);
+   drm_dbg_kms(_priv->drm, "unknown gpio index %u\n",
+   gpio_index);
return;
}
 
@@ -244,10 +249,11 @@ static void vlv_exec_gpio(struct drm_i915_private 
*dev_priv,
if (gpio_source == 0) {
port = IOSF_PORT_GPIO_NC;
} else if (gpio_source == 1) {
-   DRM_DEBUG_KMS("SC gpio not supported\n");
+   drm_dbg_kms(_priv->drm, "SC gpio not supported\n");
return;
} else {
-   DRM_DEBUG_KMS("unknown gpio source %u\n", gpio_source);
+   drm_dbg_kms(_priv->drm,
+   "unknown gpio source %u\n", gpio_source);
return;
}
}
@@ -291,13 +297,15 @@ static void chv_exec_gpio(struct drm_i915_private 
*dev_priv,
} else {
/* XXX: The spec is unclear about CHV GPIO on seq v2 */
if (gpio_source != 0) {
-   DRM_DEBUG_KMS("unknown gpio source %u\n", gpio_source);
+   drm_dbg_kms(_priv->drm,
+   "unknown gpio source %u\n", gpio_source);
return;
}
 
if (gpio_index >= CHV_GPIO_IDX_START_E) {
-   DRM_DEBUG_KMS("invalid gpio index %u for GPIO N\n",
- gpio_index);
+   drm_dbg_kms(_priv->drm,
+   "invalid gpio index %u for GPIO N\n",
+   gpio_index);
return;
}
 
@@ -332,8 +340,9 @@ static void bxt_exec_gpio(struct drm_i915_private *dev_priv,

[PATCH v4 2/3] drm: bridge: Add support for Cadence MHDP DPI/DP bridge

2020-02-06 Thread Yuti Amonkar
This patch adds new DRM driver for Cadence MHDP DPTX IP used on J721e SoC.
MHDP DPTX IP is the component that complies with VESA DisplayPort (DP) and
embedded Display Port (eDP) standards. It integrates uCPU running the
embedded Firmware(FW) interfaced over APB interface.
Basically, it takes a DPI stream as input and output it encoded in DP
format. Currently, it supports only SST mode.

Signed-off-by: Yuti Amonkar 
---
 drivers/gpu/drm/bridge/Kconfig  |   11 +
 drivers/gpu/drm/bridge/Makefile |3 +
 drivers/gpu/drm/bridge/cdns-mhdp-core.c | 2206 +++
 drivers/gpu/drm/bridge/cdns-mhdp-core.h |  380 
 4 files changed, 2600 insertions(+)
 create mode 100644 drivers/gpu/drm/bridge/cdns-mhdp-core.c
 create mode 100644 drivers/gpu/drm/bridge/cdns-mhdp-core.h

diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index 8397bf72d2f3..c66f2ef04f71 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -27,6 +27,17 @@ config DRM_CDNS_DSI
  Support Cadence DPI to DSI bridge. This is an internal
  bridge and is meant to be directly embedded in a SoC.
 
+config DRM_CDNS_MHDP
+   tristate "Cadence DPI/DP bridge"
+   select DRM_KMS_HELPER
+   select DRM_PANEL_BRIDGE
+   depends on OF
+   help
+ Support Cadence DPI to DP bridge. This is an internal
+ bridge and is meant to be directly embedded in a SoC.
+ It takes a DPI stream as input and output it encoded
+ in DP format.
+
 config DRM_DUMB_VGA_DAC
tristate "Dumb VGA DAC Bridge support"
depends on OF
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
index 1eb5376c5d68..71019088d257 100644
--- a/drivers/gpu/drm/bridge/Makefile
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -15,6 +15,9 @@ obj-$(CONFIG_DRM_TOSHIBA_TC358767) += tc358767.o
 obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511/
 obj-$(CONFIG_DRM_TI_SN65DSI86) += ti-sn65dsi86.o
 obj-$(CONFIG_DRM_TI_TFP410) += ti-tfp410.o
+obj-$(CONFIG_DRM_CDNS_MHDP) += cdns-mhdp.o
 
 obj-y += analogix/
 obj-y += synopsys/
+
+cdns-mhdp-objs := cdns-mhdp-core.o
diff --git a/drivers/gpu/drm/bridge/cdns-mhdp-core.c 
b/drivers/gpu/drm/bridge/cdns-mhdp-core.c
new file mode 100644
index ..51ed9cdee161
--- /dev/null
+++ b/drivers/gpu/drm/bridge/cdns-mhdp-core.c
@@ -0,0 +1,2206 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Cadence MHDP DP bridge driver.
+ *
+ * Copyright: 2019 Cadence Design Systems, Inc.
+ *
+ * Author: Quentin Schulz 
+ */
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#include 
+
+#include "cdns-mhdp-core.h"
+
+static const struct of_device_id mhdp_ids[] = {
+   { .compatible = "cdns,mhdp8546", },
+   { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mhdp_ids);
+
+static inline u32 get_unaligned_be24(const void *p)
+{
+   const u8 *_p = p;
+
+   return _p[0] << 16 | _p[1] << 8 | _p[2];
+}
+
+static inline void put_unaligned_be24(u32 val, void *p)
+{
+   u8 *_p = p;
+
+   _p[0] = val >> 16;
+   _p[1] = val >> 8;
+   _p[2] = val;
+}
+
+static int cdns_mhdp_mailbox_read(struct cdns_mhdp_device *mhdp)
+{
+   int val, ret;
+
+   WARN_ON(!mutex_is_locked(>mbox_mutex));
+
+   ret = readx_poll_timeout(readl, mhdp->regs + CDNS_MAILBOX_EMPTY,
+val, !val, MAILBOX_RETRY_US,
+MAILBOX_TIMEOUT_US);
+   if (ret < 0)
+   return ret;
+
+   return readl(mhdp->regs + CDNS_MAILBOX_RX_DATA) & 0xff;
+}
+
+static int cdns_mhdp_mailbox_write(struct cdns_mhdp_device *mhdp, u8 val)
+{
+   int ret, full;
+
+   WARN_ON(!mutex_is_locked(>mbox_mutex));
+
+   ret = readx_poll_timeout(readl, mhdp->regs + CDNS_MAILBOX_FULL,
+full, !full, MAILBOX_RETRY_US,
+MAILBOX_TIMEOUT_US);
+   if (ret < 0)
+   return ret;
+
+   writel(val, mhdp->regs + CDNS_MAILBOX_TX_DATA);
+
+   return 0;
+}
+
+static int cdns_mhdp_mailbox_validate_receive(struct cdns_mhdp_device *mhdp,
+ u8 module_id, u8 opcode,
+ u16 req_size)
+{
+   u32 mbox_size, i;
+   u8 header[4];
+   int ret;
+
+   /* read the header of the message */
+   for (i = 0; i < 4; i++) {
+   ret = cdns_mhdp_mailbox_read(mhdp);
+   if (ret < 0)
+   return ret;
+
+   header[i] = ret;
+   }
+
+   mbox_size = get_unaligned_be16(header + 2);
+
+   if (opcode != header[0] || module_id != header[1] ||
+   req_size != mbox_size) {
+   /*
+* If the message in mailbox is not what 

Re: [PATCH 1/3] fbdev/g364fb: Fix build failure

2020-02-06 Thread Philippe Mathieu-Daudé
On 2/5/20 7:02 PM, Philippe Mathieu-Daudé wrote:
> On Sun, Feb 2, 2020 at 3:41 AM Finn Thain  wrote:
>>
>> This patch resolves these compiler errors and warnings --
>>
>>   CC  drivers/video/fbdev/g364fb.o
>> drivers/video/fbdev/g364fb.c: In function 'g364fb_cursor':
>> drivers/video/fbdev/g364fb.c:137:9: error: 'x' undeclared (first use in this 
>> function)
>> drivers/video/fbdev/g364fb.c:137:9: note: each undeclared identifier is 
>> reported only once for each function it appears in
>> drivers/video/fbdev/g364fb.c:137:7: error: implicit declaration of function 
>> 'fontwidth' [-Werror=implicit-function-declaration]
>> drivers/video/fbdev/g364fb.c:137:23: error: 'p' undeclared (first use in 
>> this function)
>> drivers/video/fbdev/g364fb.c:137:38: error: 'y' undeclared (first use in 
>> this function)
>> drivers/video/fbdev/g364fb.c:137:7: error: implicit declaration of function 
>> 'fontheight' [-Werror=implicit-function-declaration]
>> drivers/video/fbdev/g364fb.c: In function 'g364fb_init':
>> drivers/video/fbdev/g364fb.c:233:24: error: 'fbvar' undeclared (first use in 
>> this function)
>> drivers/video/fbdev/g364fb.c:234:24: error: 'xres' undeclared (first use in 
>> this function)
> 
> 18 years unnoticed...
> 
>> drivers/video/fbdev/g364fb.c:201:14: warning: unused variable 'j' 
>> [-Wunused-variable]
>> drivers/video/fbdev/g364fb.c:197:25: warning: unused variable 'pal_ptr' 
>> [-Wunused-variable]
>>
>> The MIPS Magnum framebuffer console now works when tested in QEMU.
>>
>> Cc: Bartlomiej Zolnierkiewicz 
>> Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
> 
> This commit is the kernel 'git origin' import, not the proper reference.
> 
> The actual change is between v2.5.17/2.5.19:
> https://git.kernel.org/pub/scm/linux/kernel/git/tglx/history.git/diff/drivers/video/g364fb.c?id=b30e6e183a728923267
> Date: 2002-05-22 07:52:33...
> 
> The same commit introduced the changes in g364fb_cursor(), which was
> implemented previous to v2.4.0 so it is hard to follow from there.
> 
> Nobody complains during 18 years so I doubt anyone care that
> g364fb_cursor() is removed.
> And by removing it, you improve the kernel quality, so:
> Reviewed-by: Philippe Mathieu-Daudé 
> (Maybe remove the unhelpful 'Fixes' tag).
> 
>> Signed-off-by: Finn Thain 
>> ---
>>  drivers/video/fbdev/g364fb.c | 29 +++--
>>  1 file changed, 3 insertions(+), 26 deletions(-)

Note, you need to rebase your series due to:

  commit 8a48ac339398f21282985bff16552447d41dcfb2
  Author: Jani Nikula 
  Date:   Tue Dec 3 18:38:50 2019 +0200

  video: constify fb ops across all drivers
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[PATCH v2 07/12] drm/i915/dpll_mgr: convert to drm_device based logging macros.

2020-02-06 Thread Wambui Karuga
Conversion of instances of printk based drm logging macros to the struct
drm_device based logging macros in i915/display/intel_dpll_mgr.c.
This also involves extracting the struct drm_i915_private device pointer
from various intel types to use in the drm_device based macros.

Note that this converts DRM_DEBUG_DRIVER to drm_dbg().

References: 
https://lists.freedesktop.org/archives/dri-devel/2020-January/253381.html
Signed-off-by: Wambui Karuga 
---
 drivers/gpu/drm/i915/display/intel_dpll_mgr.c | 254 ++
 1 file changed, 142 insertions(+), 112 deletions(-)

diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c 
b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index 2e596e88cf09..e5bfe5245276 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -147,7 +147,7 @@ void intel_prepare_shared_dpll(const struct 
intel_crtc_state *crtc_state)
mutex_lock(_priv->dpll_lock);
drm_WARN_ON(_priv->drm, !pll->state.crtc_mask);
if (!pll->active_mask) {
-   DRM_DEBUG_DRIVER("setting up %s\n", pll->info->name);
+   drm_dbg(_priv->drm, "setting up %s\n", pll->info->name);
drm_WARN_ON(_priv->drm, pll->on);
assert_shared_dpll_disabled(dev_priv, pll);
 
@@ -182,9 +182,10 @@ void intel_enable_shared_dpll(const struct 
intel_crtc_state *crtc_state)
 
pll->active_mask |= crtc_mask;
 
-   DRM_DEBUG_KMS("enable %s (active %x, on? %d) for crtc %d\n",
- pll->info->name, pll->active_mask, pll->on,
- crtc->base.base.id);
+   drm_dbg_kms(_priv->drm,
+   "enable %s (active %x, on? %d) for crtc %d\n",
+   pll->info->name, pll->active_mask, pll->on,
+   crtc->base.base.id);
 
if (old_mask) {
drm_WARN_ON(_priv->drm, !pll->on);
@@ -193,7 +194,7 @@ void intel_enable_shared_dpll(const struct intel_crtc_state 
*crtc_state)
}
drm_WARN_ON(_priv->drm, pll->on);
 
-   DRM_DEBUG_KMS("enabling %s\n", pll->info->name);
+   drm_dbg_kms(_priv->drm, "enabling %s\n", pll->info->name);
pll->info->funcs->enable(dev_priv, pll);
pll->on = true;
 
@@ -225,9 +226,10 @@ void intel_disable_shared_dpll(const struct 
intel_crtc_state *crtc_state)
if (drm_WARN_ON(_priv->drm, !(pll->active_mask & crtc_mask)))
goto out;
 
-   DRM_DEBUG_KMS("disable %s (active %x, on? %d) for crtc %d\n",
- pll->info->name, pll->active_mask, pll->on,
- crtc->base.base.id);
+   drm_dbg_kms(_priv->drm,
+   "disable %s (active %x, on? %d) for crtc %d\n",
+   pll->info->name, pll->active_mask, pll->on,
+   crtc->base.base.id);
 
assert_shared_dpll_enabled(dev_priv, pll);
drm_WARN_ON(_priv->drm, !pll->on);
@@ -236,7 +238,7 @@ void intel_disable_shared_dpll(const struct 
intel_crtc_state *crtc_state)
if (pll->active_mask)
goto out;
 
-   DRM_DEBUG_KMS("disabling %s\n", pll->info->name);
+   drm_dbg_kms(_priv->drm, "disabling %s\n", pll->info->name);
pll->info->funcs->disable(dev_priv, pll);
pll->on = false;
 
@@ -272,20 +274,21 @@ intel_find_shared_dpll(struct intel_atomic_state *state,
if (memcmp(pll_state,
   _dpll[i].hw_state,
   sizeof(*pll_state)) == 0) {
-   DRM_DEBUG_KMS("[CRTC:%d:%s] sharing existing %s (crtc 
mask 0x%08x, active %x)\n",
- crtc->base.base.id, crtc->base.name,
- pll->info->name,
- shared_dpll[i].crtc_mask,
- pll->active_mask);
+   drm_dbg_kms(_priv->drm,
+   "[CRTC:%d:%s] sharing existing %s (crtc 
mask 0x%08x, active %x)\n",
+   crtc->base.base.id, crtc->base.name,
+   pll->info->name,
+   shared_dpll[i].crtc_mask,
+   pll->active_mask);
return pll;
}
}
 
/* Ok no matching timings, maybe there's a free one? */
if (unused_pll) {
-   DRM_DEBUG_KMS("[CRTC:%d:%s] allocated %s\n",
- crtc->base.base.id, crtc->base.name,
- unused_pll->info->name);
+   drm_dbg_kms(_priv->drm, "[CRTC:%d:%s] allocated %s\n",
+   crtc->base.base.id, crtc->base.name,
+   unused_pll->info->name);
return unused_pll;
}
 
@@ -298,6 +301,7 @@ intel_reference_shared_dpll(struct intel_atomic_state 
*state,
const struct intel_shared_dpll *pll,
 

Re: [PATCH 1/3] fbdev/g364fb: Fix build failure

2020-02-06 Thread Finn Thain
On Wed, 5 Feb 2020, Philippe Mathieu-Daudé wrote:

> Note, you need to rebase your series due to:
> 
>   commit 8a48ac339398f21282985bff16552447d41dcfb2
>   Author: Jani Nikula 
>   Date:   Tue Dec 3 18:38:50 2019 +0200
> 
>   video: constify fb ops across all drivers
> 

OK.

Thanks for your review.___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[PATCH v4 3/3] drm: bridge: cdns-mhdp: add j721e wrapper

2020-02-06 Thread Yuti Amonkar
Add j721e wrapper for mhdp, which sets up the clock and data muxes.

Signed-off-by: Yuti Amonkar 
---
 drivers/gpu/drm/bridge/Kconfig   | 12 
 drivers/gpu/drm/bridge/Makefile  |  3 +
 drivers/gpu/drm/bridge/cdns-mhdp-core.c  | 14 +
 drivers/gpu/drm/bridge/cdns-mhdp-core.h  |  1 +
 drivers/gpu/drm/bridge/cdns-mhdp-j721e.c | 79 
 drivers/gpu/drm/bridge/cdns-mhdp-j721e.h | 55 +
 6 files changed, 164 insertions(+)
 create mode 100644 drivers/gpu/drm/bridge/cdns-mhdp-j721e.c
 create mode 100644 drivers/gpu/drm/bridge/cdns-mhdp-j721e.h

diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index c66f2ef04f71..32e3bc5edae8 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -38,6 +38,18 @@ config DRM_CDNS_MHDP
  It takes a DPI stream as input and output it encoded
  in DP format.
 
+if DRM_CDNS_MHDP
+
+config DRM_CDNS_MHDP_J721E
+   bool "J721E Cadence DPI/DP wrapper support"
+   default y
+   help
+ Support J721E Cadence DPI/DP wrapper. This is a wrapper
+ which adds support for J721E related platform ops. It
+ initializes the J721e Display Port and sets up the
+ clock and data muxes.
+endif
+
 config DRM_DUMB_VGA_DAC
tristate "Dumb VGA DAC Bridge support"
depends on OF
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
index 71019088d257..7e6c64f9021f 100644
--- a/drivers/gpu/drm/bridge/Makefile
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -21,3 +21,6 @@ obj-y += analogix/
 obj-y += synopsys/
 
 cdns-mhdp-objs := cdns-mhdp-core.o
+ifeq ($(CONFIG_DRM_CDNS_MHDP_J721E),y)
+   cdns-mhdp-objs += cdns-mhdp-j721e.o
+endif
diff --git a/drivers/gpu/drm/bridge/cdns-mhdp-core.c 
b/drivers/gpu/drm/bridge/cdns-mhdp-core.c
index 51ed9cdee161..8483b6b1023b 100644
--- a/drivers/gpu/drm/bridge/cdns-mhdp-core.c
+++ b/drivers/gpu/drm/bridge/cdns-mhdp-core.c
@@ -36,8 +36,22 @@
 
 #include "cdns-mhdp-core.h"
 
+#include "cdns-mhdp-j721e.h"
+
+#ifdef CONFIG_DRM_CDNS_MHDP_J721E
+static const struct mhdp_platform_ops mhdp_ti_j721e_ops = {
+   .init = cdns_mhdp_j721e_init,
+   .exit = cdns_mhdp_j721e_fini,
+   .enable = cdns_mhdp_j721e_enable,
+   .disable = cdns_mhdp_j721e_disable,
+};
+#endif
+
 static const struct of_device_id mhdp_ids[] = {
{ .compatible = "cdns,mhdp8546", },
+#ifdef CONFIG_DRM_CDNS_MHDP_J721E
+   { .compatible = "ti,j721e-mhdp8546", .data = _ti_j721e_ops },
+#endif
{ /* sentinel */ }
 };
 MODULE_DEVICE_TABLE(of, mhdp_ids);
diff --git a/drivers/gpu/drm/bridge/cdns-mhdp-core.h 
b/drivers/gpu/drm/bridge/cdns-mhdp-core.h
index 2f3b67987832..67a99eab5db3 100644
--- a/drivers/gpu/drm/bridge/cdns-mhdp-core.h
+++ b/drivers/gpu/drm/bridge/cdns-mhdp-core.h
@@ -335,6 +335,7 @@ struct mhdp_platform_ops {
 
 struct cdns_mhdp_device {
void __iomem *regs;
+   void __iomem *j721e_regs;
 
struct device *dev;
struct clk *clk;
diff --git a/drivers/gpu/drm/bridge/cdns-mhdp-j721e.c 
b/drivers/gpu/drm/bridge/cdns-mhdp-j721e.c
new file mode 100644
index ..a87faf55c065
--- /dev/null
+++ b/drivers/gpu/drm/bridge/cdns-mhdp-j721e.c
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * TI j721e Cadence MHDP DP wrapper
+ *
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Jyri Sarha 
+#include 
+
+#include "cdns-mhdp-j721e.h"
+
+#defineREVISION0x00
+#defineDPTX_IPCFG  0x04
+#defineECC_MEM_CFG 0x08
+#defineDPTX_DSC_CFG0x0c
+#defineDPTX_SRC_CFG0x10
+#defineDPTX_VIF_SECURE_MODE_CFG0x14
+#defineDPTX_VIF_CONN_STATUS0x18
+#definePHY_CLK_STATUS  0x1c
+
+#define DPTX_SRC_AIF_ENBIT(16)
+#define DPTX_SRC_VIF_3_IN30B   BIT(11)
+#define DPTX_SRC_VIF_2_IN30B   BIT(10)
+#define DPTX_SRC_VIF_1_IN30B   BIT(9)
+#define DPTX_SRC_VIF_0_IN30B   BIT(8)
+#define DPTX_SRC_VIF_3_SEL_DPI5BIT(7)
+#define DPTX_SRC_VIF_3_SEL_DPI30
+#define DPTX_SRC_VIF_2_SEL_DPI4BIT(6)
+#define DPTX_SRC_VIF_2_SEL_DPI20
+#define DPTX_SRC_VIF_1_SEL_DPI3BIT(5)
+#define DPTX_SRC_VIF_1_SEL_DPI10
+#define DPTX_SRC_VIF_0_SEL_DPI2BIT(4)
+#define DPTX_SRC_VIF_0_SEL_DPI00
+#define DPTX_SRC_VIF_3_EN  BIT(3)
+#define DPTX_SRC_VIF_2_EN  BIT(2)
+#define DPTX_SRC_VIF_1_EN  BIT(1)
+#define DPTX_SRC_VIF_0_EN  BIT(0)
+
+/* TODO turn DPTX_IPCFG fw_mem_clk_en at pm_runtime_suspend. */
+
+int cdns_mhdp_j721e_init(struct cdns_mhdp_device *mhdp)
+{
+   struct platform_device *pdev = to_platform_device(mhdp->dev);
+   struct resource 

[PATCH v2 03/12] drm/i915/atomic: conversion to drm_device based logging macros.

2020-02-06 Thread Wambui Karuga
Conversion of the printk based drm logging macros to the struct
drm_device based logging macros in i915/display/intel_atomic.c
This change was achieved using the following coccinelle script that
matches based on the existence of a drm_i915_private device pointer:
@@
identifier fn, T;
@@

fn(...,struct drm_i915_private *T,...) {
<+...
(
-DRM_INFO(
+drm_info(>drm,
...)
|
-DRM_ERROR(
+drm_err(>drm,
...)
|
-DRM_WARN(
+drm_warn(>drm,
...)
|
-DRM_DEBUG(
+drm_dbg(>drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(>drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(>drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(>drm,
...)
)
...+>
}

@@
identifier fn, T;
@@

fn(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-DRM_INFO(
+drm_info(>drm,
...)
|
-DRM_ERROR(
+drm_err(>drm,
...)
|
-DRM_WARN(
+drm_warn(>drm,
...)
|
-DRM_DEBUG(
+drm_dbg(>drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(>drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(>drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(>drm,
...)
)
...+>
}

Checkpatch warnings were fixed manually.

Signed-off-by: Wambui Karuga 
---
 drivers/gpu/drm/i915/display/intel_atomic.c | 23 -
 1 file changed, 13 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c 
b/drivers/gpu/drm/i915/display/intel_atomic.c
index 379c12f3b1d4..d043057d2fa0 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic.c
@@ -66,8 +66,9 @@ int intel_digital_connector_atomic_get_property(struct 
drm_connector *connector,
else if (property == dev_priv->broadcast_rgb_property)
*val = intel_conn_state->broadcast_rgb;
else {
-   DRM_DEBUG_ATOMIC("Unknown property [PROP:%d:%s]\n",
-property->base.id, property->name);
+   drm_dbg_atomic(_priv->drm,
+  "Unknown property [PROP:%d:%s]\n",
+  property->base.id, property->name);
return -EINVAL;
}
 
@@ -103,8 +104,8 @@ int intel_digital_connector_atomic_set_property(struct 
drm_connector *connector,
return 0;
}
 
-   DRM_DEBUG_ATOMIC("Unknown property [PROP:%d:%s]\n",
-property->base.id, property->name);
+   drm_dbg_atomic(_priv->drm, "Unknown property [PROP:%d:%s]\n",
+  property->base.id, property->name);
return -EINVAL;
 }
 
@@ -362,8 +363,8 @@ static void intel_atomic_setup_scaler(struct 
intel_crtc_scaler_state *scaler_sta
mode = SKL_PS_SCALER_MODE_DYN;
}
 
-   DRM_DEBUG_KMS("Attached scaler id %u.%u to %s:%d\n",
- intel_crtc->pipe, *scaler_id, name, idx);
+   drm_dbg_kms(_priv->drm, "Attached scaler id %u.%u to %s:%d\n",
+   intel_crtc->pipe, *scaler_id, name, idx);
scaler_state->scalers[*scaler_id].mode = mode;
 }
 
@@ -414,8 +415,9 @@ int intel_atomic_setup_scalers(struct drm_i915_private 
*dev_priv,
 
/* fail if required scalers > available scalers */
if (num_scalers_need > intel_crtc->num_scalers){
-   DRM_DEBUG_KMS("Too many scaling requests %d > %d\n",
-   num_scalers_need, intel_crtc->num_scalers);
+   drm_dbg_kms(_priv->drm,
+   "Too many scaling requests %d > %d\n",
+   num_scalers_need, intel_crtc->num_scalers);
return -EINVAL;
}
 
@@ -460,8 +462,9 @@ int intel_atomic_setup_scalers(struct drm_i915_private 
*dev_priv,
plane = drm_plane_from_index(_priv->drm, i);
state = drm_atomic_get_plane_state(drm_state, 
plane);
if (IS_ERR(state)) {
-   DRM_DEBUG_KMS("Failed to add [PLANE:%d] 
to drm_state\n",
-   plane->base.id);
+   drm_dbg_kms(_priv->drm,
+   "Failed to add [PLANE:%d] 
to drm_state\n",
+   plane->base.id);
return PTR_ERR(state);
}
}
-- 
2.25.0

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH 2/3] drm/tegra: Reuse IOVA mapping where possible

2020-02-06 Thread Dmitry Osipenko
04.02.2020 16:59, Thierry Reding пишет:
> From: Thierry Reding 
> 
> This partially reverts the DMA API support that was recently merged
> because it was causing performance regressions on older Tegra devices.
> Unfortunately, the cache maintenance performed by dma_map_sg() and
> dma_unmap_sg() causes performance to drop by a factor of 10.
> 
> The right solution for this would be to cache mappings for buffers per
> consumer device, but that's a bit involved. Instead, we simply revert to
> the old behaviour of sharing IOVA mappings when we know that devices can
> do so (i.e. they share the same IOMMU domain).
> 
> Reported-by: Dmitry Osipenko 
> Signed-off-by: Thierry Reding 
> ---
>  drivers/gpu/drm/tegra/gem.c   | 10 +++-
>  drivers/gpu/drm/tegra/plane.c | 44 ---
>  drivers/gpu/host1x/job.c  | 32 ++---
>  3 files changed, 63 insertions(+), 23 deletions(-)

Tested-by: Dmitry Osipenko 
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[PATCHv3 0/2] Add support for rm69299 Visionox panel driver and add devicetree bindings for visionox panel

2020-02-06 Thread Harigovindan P
Adding support for visionox rm69299 panel driver and adding bindings for the 
same panel.

Harigovindan P (2):
  dt-bindings: display: add visionox rm69299 panel variant
  drm/panel: add support for rm69299 visionox panel driver

 .../bindings/display/visionox,rm69299.yaml | 109 ++
 drivers/gpu/drm/panel/Kconfig  |   8 +
 drivers/gpu/drm/panel/Makefile |   1 +
 drivers/gpu/drm/panel/panel-visionox-rm69299.c | 371 +
 4 files changed, 489 insertions(+)
 create mode 100644 
Documentation/devicetree/bindings/display/visionox,rm69299.yaml
 create mode 100644 drivers/gpu/drm/panel/panel-visionox-rm69299.c

-- 
2.7.4

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH] drm/panfrost: Don't try to map on error faults

2020-02-06 Thread Alyssa Rosenzweig
Reviewed-by: Alyssa Rosenzweig  

Although it might be nice to

#define TRANSLATION_FAULT_LEVEL1 0xC1
...
#define TRANSLATION_FAULT_LEVEL4 0xC4

and then use semantic names instead of magic values. Minimally maybe add
a comment explaining that.

On Wed, Feb 05, 2020 at 11:07:16AM +0100, Tomeu Vizoso wrote:
> If the exception type isn't one of the normal faults, don't try to map
> and instead go straight to a terminal fault.
> 
> Otherwise, we can get flooded by kernel warnings and further faults.
> 
> Signed-off-by: Tomeu Vizoso 
> ---
>  drivers/gpu/drm/panfrost/panfrost_mmu.c | 5 +++--
>  1 file changed, 3 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c 
> b/drivers/gpu/drm/panfrost/panfrost_mmu.c
> index 763cfca886a7..80abddb4544c 100644
> --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
> +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
> @@ -596,8 +596,9 @@ static irqreturn_t panfrost_mmu_irq_handler_thread(int 
> irq, void *data)
>   source_id = (fault_status >> 16);
>  
>   /* Page fault only */
> - if ((status & mask) == BIT(i)) {
> - WARN_ON(exception_type < 0xC1 || exception_type > 0xC4);
> + if ((status & mask) == BIT(i) &&
> +  exception_type >= 0xC1 &&
> +  exception_type <= 0xC4) {
>  
>   ret = panfrost_mmu_map_fault_addr(pfdev, i, addr);
>   if (!ret) {
> -- 
> 2.21.0
> 


signature.asc
Description: PGP signature
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[PATCH v2 05/12] drm/i915/crt: automatic conversion to drm_device based logging macros.

2020-02-06 Thread Wambui Karuga
Replaces various instances of the printk based logging macros with the
struct drm_device based logging macros in i915/display/intel_crt.c using
the following coccinelle script that matches based on the existence of a
drm_i915_private device pointer:
@@
identifier fn, T;
@@

fn(...,struct drm_i915_private *T,...) {
<+...
(
-DRM_INFO(
+drm_info(>drm,
...)
|
-DRM_ERROR(
+drm_err(>drm,
...)
|
-DRM_WARN(
+drm_warn(>drm,
...)
|
-DRM_DEBUG(
+drm_dbg(>drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(>drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(>drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(>drm,
...)
)
...+>
}

@@
identifier fn, T;
@@

fn(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-DRM_INFO(
+drm_info(>drm,
...)
|
-DRM_ERROR(
+drm_err(>drm,
...)
|
-DRM_WARN(
+drm_warn(>drm,
...)
|
-DRM_DEBUG(
+drm_dbg(>drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(>drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(>drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(>drm,
...)
)
...+>
}

Checkpatch warnings were addressed manually.

Signed-off-by: Wambui Karuga 
---
 drivers/gpu/drm/i915/display/intel_crt.c | 49 +++-
 1 file changed, 31 insertions(+), 18 deletions(-)

diff --git a/drivers/gpu/drm/i915/display/intel_crt.c 
b/drivers/gpu/drm/i915/display/intel_crt.c
index 0e2f63b0d458..45ecc7d9c829 100644
--- a/drivers/gpu/drm/i915/display/intel_crt.c
+++ b/drivers/gpu/drm/i915/display/intel_crt.c
@@ -420,7 +420,8 @@ static int hsw_crt_compute_config(struct intel_encoder 
*encoder,
/* LPT FDI RX only supports 8bpc. */
if (HAS_PCH_LPT(dev_priv)) {
if (pipe_config->bw_constrained && pipe_config->pipe_bpp < 24) {
-   DRM_DEBUG_KMS("LPT only supports 24bpp\n");
+   drm_dbg_kms(_priv->drm,
+   "LPT only supports 24bpp\n");
return -EINVAL;
}
 
@@ -449,7 +450,8 @@ static bool ilk_crt_detect_hotplug(struct drm_connector 
*connector)
crt->force_hotplug_required = false;
 
save_adpa = adpa = intel_de_read(dev_priv, crt->adpa_reg);
-   DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", 
adpa);
+   drm_dbg_kms(_priv->drm,
+   "trigger hotplug detect cycle: adpa=0x%x\n", adpa);
 
adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER;
if (turn_off_dac)
@@ -461,7 +463,8 @@ static bool ilk_crt_detect_hotplug(struct drm_connector 
*connector)
crt->adpa_reg,
ADPA_CRT_HOTPLUG_FORCE_TRIGGER,
1000))
-   DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
+   drm_dbg_kms(_priv->drm,
+   "timed out waiting for FORCE_TRIGGER");
 
if (turn_off_dac) {
intel_de_write(dev_priv, crt->adpa_reg, save_adpa);
@@ -475,7 +478,8 @@ static bool ilk_crt_detect_hotplug(struct drm_connector 
*connector)
ret = true;
else
ret = false;
-   DRM_DEBUG_KMS("ironlake hotplug adpa=0x%x, result %d\n", adpa, ret);
+   drm_dbg_kms(_priv->drm, "ironlake hotplug adpa=0x%x, result %d\n",
+   adpa, ret);
 
return ret;
 }
@@ -505,7 +509,8 @@ static bool valleyview_crt_detect_hotplug(struct 
drm_connector *connector)
reenable_hpd = intel_hpd_disable(dev_priv, crt->base.hpd_pin);
 
save_adpa = adpa = intel_de_read(dev_priv, crt->adpa_reg);
-   DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa);
+   drm_dbg_kms(_priv->drm,
+   "trigger hotplug detect cycle: adpa=0x%x\n", adpa);
 
adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER;
 
@@ -513,7 +518,8 @@ static bool valleyview_crt_detect_hotplug(struct 
drm_connector *connector)
 
if (intel_de_wait_for_clear(dev_priv, crt->adpa_reg,
ADPA_CRT_HOTPLUG_FORCE_TRIGGER, 1000)) {
-   DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
+   drm_dbg_kms(_priv->drm,
+   "timed out waiting for FORCE_TRIGGER");
intel_de_write(dev_priv, crt->adpa_reg, save_adpa);
}
 
@@ -524,7 +530,8 @@ static bool valleyview_crt_detect_hotplug(struct 
drm_connector *connector)
else
ret = false;
 
-   DRM_DEBUG_KMS("valleyview hotplug adpa=0x%x, result %d\n", adpa, ret);
+   drm_dbg_kms(_priv->drm,
+   "valleyview hotplug adpa=0x%x, result %d\n", adpa, ret);
 
if (reenable_hpd)
intel_hpd_enable(dev_priv, crt->base.hpd_pin);
@@ -564,7 +571,8 @@ static bool intel_crt_detect_hotplug(struct drm_connector 
*connector)
/* wait for FORCE_DETECT to go off */
if (intel_de_wait_for_clear(dev_priv, PORT_HOTPLUG_EN,

[PATCH v4 1/3] dt-bindings: drm/bridge: Document Cadence MHDP bridge bindings.

2020-02-06 Thread Yuti Amonkar
Document the bindings used for the Cadence MHDP DPI/DP bridge in
yaml format.

Signed-off-by: Yuti Amonkar 
Reviewed-by: Rob Herring 
---
 .../bindings/display/bridge/cdns,mhdp.yaml| 125 ++
 1 file changed, 125 insertions(+)
 create mode 100644 
Documentation/devicetree/bindings/display/bridge/cdns,mhdp.yaml

diff --git a/Documentation/devicetree/bindings/display/bridge/cdns,mhdp.yaml 
b/Documentation/devicetree/bindings/display/bridge/cdns,mhdp.yaml
new file mode 100644
index ..e7f84ed1d2da
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/bridge/cdns,mhdp.yaml
@@ -0,0 +1,125 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/display/bridge/cdns,mhdp.yaml#;
+$schema: "http://devicetree.org/meta-schemas/core.yaml#;
+
+title: Cadence MHDP bridge
+
+maintainers:
+  - Swapnil Jakhade 
+  - Yuti Amonkar 
+
+properties:
+  compatible:
+enum:
+  - cdns,mhdp8546
+  - ti,j721e-mhdp8546
+
+  reg:
+minItems: 1
+maxItems: 2
+items:
+  - description:
+  Register block of mhdptx apb registers upto PHY mapped 
area(AUX_CONFIG_P).
+  The AUX and PMA registers are mapped to associated phy driver.
+  - description:
+  Register block for DSS_EDP0_INTG_CFG_VP registers in case of TI J7 
SoCs.
+
+  reg-names:
+minItems: 1
+maxItems: 2
+items:
+  - const: mhdptx
+  - const: j721e-intg
+
+  clocks:
+maxItems: 1
+description:
+  DP bridge clock, it's used by the IP to know how to translate a number of
+  clock cycles into a time (which is used to comply with DP standard 
timings
+  and delays).
+
+  phys:
+description: Phandle to the DisplyPort phy.
+
+  ports:
+type: object
+description:
+  Ports as described in Documentation/devicetree/bindings/graph.txt
+
+properties:
+  '#address-cells':
+const: 1
+
+  '#size-cells':
+const: 0
+
+  port@0:
+type: object
+description:
+  input port representing the DP bridge input
+
+  port@1:
+type: object
+description:
+  output port representing the DP bridge output.
+
+required:
+  - port@0
+  - port@1
+  - '#address-cells'
+  - '#size-cells'
+
+allOf:
+  - if:
+  properties:
+compatible:
+  contains:
+const: ti,j721e-mhdp8546
+then:
+  properties:
+reg:
+  minItems: 2
+reg-names:
+  minItems: 2
+
+required:
+  - compatible
+  - clocks
+  - reg
+  - reg-names
+  - phys
+  - ports
+
+additionalProperties: false
+
+examples:
+  - |
+mhdp: dp-bridge@f0fb00 {
+compatible = "cdns,mhdp8546";
+reg = <0xf0 0xfb00 0x0 0x100>;
+reg-names = "mhdptx";
+clocks = <_clock>;
+phys = <_phy>;
+
+ports {
+  #address-cells = <1>;
+  #size-cells = <0>;
+
+  port@0 {
+ reg = <0>;
+ dp_bridge_input: endpoint {
+remote-endpoint = <_dpi_output>;
+ };
+  };
+
+  port@1 {
+ reg = <1>;
+ dp_bridge_output: endpoint {
+remote-endpoint = <_dp_connector_input>;
+ };
+  };
+};
+};
+...
-- 
2.20.1

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[PATCH v2 02/12] drm/i915/dp_link_training: convert to drm_device based logging macros.

2020-02-06 Thread Wambui Karuga
Converts various instances of the printk based drm logging macros to the
struct drm_device based logging macros in
i915/display/intel_dp_link_training.c.
This also involves extracting the drm_i915_private device pointer from
the intel_dp type to use in the various macros.

Signed-off-by: Wambui Karuga 
---
 .../drm/i915/display/intel_dp_link_training.c | 75 ---
 1 file changed, 46 insertions(+), 29 deletions(-)

diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c 
b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index 2a1130dd1ad0..a7defb37ab00 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -130,6 +130,7 @@ static bool intel_dp_link_max_vswing_reached(struct 
intel_dp *intel_dp)
 static bool
 intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
 {
+   struct drm_i915_private *i915 = dp_to_i915(intel_dp);
u8 voltage;
int voltage_tries, cr_tries, max_cr_tries;
bool max_vswing_reached = false;
@@ -143,9 +144,11 @@ intel_dp_link_training_clock_recovery(struct intel_dp 
*intel_dp)
  _bw, _select);
 
if (link_bw)
-   DRM_DEBUG_KMS("Using LINK_BW_SET value %02x\n", link_bw);
+   drm_dbg_kms(>drm,
+   "Using LINK_BW_SET value %02x\n", link_bw);
else
-   DRM_DEBUG_KMS("Using LINK_RATE_SET value %02x\n", rate_select);
+   drm_dbg_kms(>drm,
+   "Using LINK_RATE_SET value %02x\n", rate_select);
 
/* Write the link configuration data */
link_config[0] = link_bw;
@@ -169,7 +172,7 @@ intel_dp_link_training_clock_recovery(struct intel_dp 
*intel_dp)
if (!intel_dp_reset_link_train(intel_dp,
   DP_TRAINING_PATTERN_1 |
   DP_LINK_SCRAMBLING_DISABLE)) {
-   DRM_ERROR("failed to enable link training\n");
+   drm_err(>drm, "failed to enable link training\n");
return false;
}
 
@@ -193,22 +196,23 @@ intel_dp_link_training_clock_recovery(struct intel_dp 
*intel_dp)
drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
 
if (!intel_dp_get_link_status(intel_dp, link_status)) {
-   DRM_ERROR("failed to get link status\n");
+   drm_err(>drm, "failed to get link status\n");
return false;
}
 
if (drm_dp_clock_recovery_ok(link_status, 
intel_dp->lane_count)) {
-   DRM_DEBUG_KMS("clock recovery OK\n");
+   drm_dbg_kms(>drm, "clock recovery OK\n");
return true;
}
 
if (voltage_tries == 5) {
-   DRM_DEBUG_KMS("Same voltage tried 5 times\n");
+   drm_dbg_kms(>drm,
+   "Same voltage tried 5 times\n");
return false;
}
 
if (max_vswing_reached) {
-   DRM_DEBUG_KMS("Max Voltage Swing reached\n");
+   drm_dbg_kms(>drm, "Max Voltage Swing reached\n");
return false;
}
 
@@ -217,7 +221,8 @@ intel_dp_link_training_clock_recovery(struct intel_dp 
*intel_dp)
/* Update training set as requested by target */
intel_get_adjust_train(intel_dp, link_status);
if (!intel_dp_update_link_train(intel_dp)) {
-   DRM_ERROR("failed to update link training\n");
+   drm_err(>drm,
+   "failed to update link training\n");
return false;
}
 
@@ -231,7 +236,8 @@ intel_dp_link_training_clock_recovery(struct intel_dp 
*intel_dp)
max_vswing_reached = true;
 
}
-   DRM_ERROR("Failed clock recovery %d times, giving up!\n", max_cr_tries);
+   drm_err(>drm,
+   "Failed clock recovery %d times, giving up!\n", max_cr_tries);
return false;
 }
 
@@ -256,9 +262,11 @@ static u32 intel_dp_training_pattern(struct intel_dp 
*intel_dp)
return DP_TRAINING_PATTERN_4;
} else if (intel_dp->link_rate == 81) {
if (!source_tps4)
-   DRM_DEBUG_KMS("8.1 Gbps link rate without source 
HBR3/TPS4 support\n");
+   drm_dbg_kms(_to_i915(intel_dp)->drm,
+   "8.1 Gbps link rate without source 
HBR3/TPS4 support\n");
if (!sink_tps4)
-   DRM_DEBUG_KMS("8.1 Gbps link rate without sink TPS4 
support\n");
+   drm_dbg_kms(_to_i915(intel_dp)->drm,
+   "8.1 Gbps link rate without sink TPS4 
support\n");
}
/*
 * 

[PATCH v4 0/3] drm: Add support for Cadence MHDP DPI/DP bridge and J721E wrapper.

2020-02-06 Thread Yuti Amonkar
This patch series adds new DRM driver for Cadence Display Port.
The Cadence Display Port is also referred as MHDP (Mobile High
Definition Link, High-Definition Multimedia Interface Display
Port) Cadence Display Port complies with VESA DisplayPort (DP)
and embedded Display Port (eDP) standards. This driver implements
Single Stream Transport (SST) support. Adds Texas Instruments SoC
J721e specific wrapper and adds the device tree bindings in YAML format.

The patch series has three patches which applies the changes in the below 
sequence
1. 001-dt-bindings-drm-bridge-Document-Cadence-MHDP-bridge-bindings
Documents the bindings in yaml format.
2. 002-drm-bridge-Add-support-for-Cadence-MHDP-bridge
This patch adds new DRM driver for Cadence MHDP Display Port. The patch 
implements supports
for single stream transport mode.
3. 003-drm-bridge-cdns-mhdp-add-j721e-wrapper
Add Texas Instruments (TI) j721e wrapper for mhdp. The wrapper configures mhdp 
clocks
and muxes as required by SoC.

Version History:

v4:
- Added SPDX dual license tag to YAML bindings.
- Corrected indentation of the child node properties.
- Removed the maxItems in the conditional statement.
- Removed phy-names property from the bindings.
- Add Reviewed-by: Rob Herring  tag to the
  "Document Cadence MHDP bridge bindings" patch.
- Renamed the DRM driver executable name from mhdp8546 to cdns-mhdp in Makefile.
- Renamed the DRM driver and header file from cdns-mhdp to cdns-mhdp-core.

v3:
- Added if / then clause to validate that the reg length is proper
  based on the value of the compatible property.
- Updated phy property description in YAML to a generic one.
- Renamed num_lanes and max_bit_rate property strings to cdns,num-lanes 
  and cdns,max-bit-rate based on update in PHY series [2].

v2:
- Use enum in compatible property of YAML file.
- Add reg-names property to YAML file
- Add minItems and maxItems to reg property in YAML.
- Remove cdns_mhdp_link_probe function to remove
  duplication of reading dpcd capabilities.

This patch series is dependent on PHY DisplayPort configuration patch [1]

[1]

https://lkml.org/lkml/2020/1/6/279

[2]

https://lkml.org/lkml/2020/2/6/15


Yuti Amonkar (3):
  dt-bindings: drm/bridge: Document Cadence MHDP bridge bindings.
  drm: bridge: Add support for Cadence MHDP DPI/DP bridge
  drm: bridge: cdns-mhdp: add j721e wrapper

 .../bindings/display/bridge/cdns,mhdp.yaml|  125 +
 drivers/gpu/drm/bridge/Kconfig|   23 +
 drivers/gpu/drm/bridge/Makefile   |6 +
 drivers/gpu/drm/bridge/cdns-mhdp-core.c   | 2220 +
 drivers/gpu/drm/bridge/cdns-mhdp-core.h   |  381 +++
 drivers/gpu/drm/bridge/cdns-mhdp-j721e.c  |   79 +
 drivers/gpu/drm/bridge/cdns-mhdp-j721e.h  |   55 +
 7 files changed, 2889 insertions(+)
 create mode 100644 
Documentation/devicetree/bindings/display/bridge/cdns,mhdp.yaml
 create mode 100644 drivers/gpu/drm/bridge/cdns-mhdp-core.c
 create mode 100644 drivers/gpu/drm/bridge/cdns-mhdp-core.h
 create mode 100644 drivers/gpu/drm/bridge/cdns-mhdp-j721e.c
 create mode 100644 drivers/gpu/drm/bridge/cdns-mhdp-j721e.h

-- 
2.20.1

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[PATCHv3 1/2] dt-bindings: display: add visionox rm69299 panel variant

2020-02-06 Thread Harigovindan P
Add bindings for visionox rm69299 panel.

Signed-off-by: Harigovindan P 
---

Changes in v1:
- Added a compatible string to support sc7180 panel version.
Changes in v2:
- Removed unwanted properties from description.
- Creating source files without execute permissions(Rob Herring).
Changes in v3:
- Changing txt file into yaml

 .../bindings/display/visionox,rm69299.yaml | 109 +
 1 file changed, 109 insertions(+)
 create mode 100644 
Documentation/devicetree/bindings/display/visionox,rm69299.yaml

diff --git a/Documentation/devicetree/bindings/display/visionox,rm69299.yaml 
b/Documentation/devicetree/bindings/display/visionox,rm69299.yaml
new file mode 100644
index 000..bfcd46d
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/visionox,rm69299.yaml
@@ -0,0 +1,109 @@
+# SPDX-License-Identifier: GPL-2.0-only
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/visionox,rm69299.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Visionox model RM69299 Panels Device Tree Bindings
+
+maintainers:
+  - Harigovindan P 
+  - Kalyan Thota 
+  - Vishnuvardhan Prodduturi 
+
+description:
+  This binding is for display panels using a Visionox RM692999 panel.
+
+patternProperties:
+  "^(panel|panel-dsi)@[0-9]$":
+type: object
+description:
+  A node containing the panel or bridge description as documented in
+  Documentation/devicetree/bindings/display/mipi-dsi-bus.txt
+properties:
+  compatible:
+const: visionox,rm69299-1080p-display
+
+  reg:
+maxItems: 1
+
+  vdda-supply:
+description:
+  Phandle of the regulator that provides the vdda supply voltage.
+
+  vdd3p3-supply:
+description:
+  Phandle of the regulator that provides the vdd3p3 supply voltage.
+
+  pinctrl-names:
+items:
+  - const: default
+  - const: suspend
+
+  pinctrl-0:
+items:
+  - const: Display default pin
+  - const: Display default pin
+
+  ports:
+type: object
+description:
+  A node containing DSI input & output port nodes with endpoint
+  definitions as documented in
+  Documentation/devicetree/bindings/media/video-interfaces.txt
+  Documentation/devicetree/bindings/graph.txt
+properties:
+  port@0:
+type: object
+description:
+  DSI input port node.
+
+  "#address-cells":
+const: 1
+
+  "#size-cells":
+const: 0
+
+required:
+  - "#address-cells"
+  - "#size-cells"
+  - compatible
+  - reg
+  - vdda-supply
+  - vdd3p3-supply
+  - pinctrl-names
+  - pinctrl-0
+  - pinctrl-1
+  - reset-gpios
+
+additionalProperties: false
+
+examples:
+  -
+   dsi@ae94000 {
+   panel@0 {
+   compatible = "visionox,rm69299-1080p-display";
+   reg = <0>;
+
+   vdda-supply = <_pp1800_l8c>;
+   vdd3p3-supply = <_pp2800_l18a>;
+
+   pinctrl-names = "default", "suspend";
+   pinctrl-0 = <_pins_default>;
+   pinctrl-1 = <_pins_default>;
+
+   reset-gpios = <_gpios 3 0>;
+   ports {
+   #address-cells = <1>;
+   #size-cells = <0>;
+   port@0 {
+   reg = <0>;
+   panel0_in: endpoint {
+   remote-endpoint = <_out>;
+   };
+   };
+   };
+   };
+   };
+
+...
-- 
2.7.4

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH 1/3] drm/tegra: Relax IOMMU usage criteria on old Tegra

2020-02-06 Thread Dmitry Osipenko
04.02.2020 16:59, Thierry Reding пишет:
> From: Thierry Reding 
> 
> Older Tegra devices only allow addressing 32 bits of memory, so whether
> or not the host1x is attached to an IOMMU doesn't matter. host1x IOMMU
> attachment is only needed on devices that can address memory beyond the
> 32-bit boundary and where the host1x doesn't support the wide GATHER
> opcode that allows it to access buffers at higher addresses.
> 
> Signed-off-by: Thierry Reding 
> ---
>  drivers/gpu/drm/tegra/drm.c | 49 -
>  1 file changed, 32 insertions(+), 17 deletions(-)

Tested-by: Dmitry Osipenko 
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[PATCH v2 00/12] drm/i915/display: convert to drm_device based logging macros.

2020-02-06 Thread Wambui Karuga
This patchset continues the conversion of the printk based drm logging
macros in drm/i915 to use the struct drm_device based logging macros.
This series was done both using coccinelle and manually.

v2: rebase onto drm-tip to fix conflicts with new changes in drm/i915.

Wambui Karuga (12):
  drm/i915/dp: convert to struct drm_device based logging macros.
  drm/i915/dp_link_training: convert to drm_device based logging macros.
  drm/i915/atomic: conversion to drm_device based logging macros.
  drm/i915/color: conversion to drm_device based logging macros.
  drm/i915/crt: automatic conversion to drm_device based logging macros.
  drm/i915/dp_aux_backlight: convert to drm_device based logging macros.
  drm/i915/dpll_mgr: convert to drm_device based logging macros.
  drm/i915/combo_phy: convert to struct drm_device logging macros.
  drm/i915/dp_mst: convert to drm_device based logging macros.
  drm/i915/dsi_vbt: convert to drm_device based logging macros.
  drm/i915/hdmi: convert to struct drm_device based logging macros.
  drm/i915/dpio_phy: convert to drm_device based logging macros.

 drivers/gpu/drm/i915/display/intel_atomic.c   |  23 +-
 drivers/gpu/drm/i915/display/intel_color.c|   3 +-
 .../gpu/drm/i915/display/intel_combo_phy.c|  23 +-
 drivers/gpu/drm/i915/display/intel_crt.c  |  49 ++-
 drivers/gpu/drm/i915/display/intel_dp.c   | 320 +++---
 .../drm/i915/display/intel_dp_aux_backlight.c |  72 ++--
 .../drm/i915/display/intel_dp_link_training.c |  75 ++--
 drivers/gpu/drm/i915/display/intel_dp_mst.c   |  30 +-
 drivers/gpu/drm/i915/display/intel_dpio_phy.c |  28 +-
 drivers/gpu/drm/i915/display/intel_dpll_mgr.c | 254 --
 drivers/gpu/drm/i915/display/intel_dsi_vbt.c  | 162 +
 drivers/gpu/drm/i915/display/intel_hdmi.c | 193 +++
 12 files changed, 754 insertions(+), 478 deletions(-)

-- 
2.25.0

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[PATCH v2] drm: shmobile: Reduce include dependencies

2020-02-06 Thread Andy Shevchenko
This file doesn't need everything provided by .
All it needs are some types, which are provided by .

Note,  already includes , but
not relying on implicit includes is indeed a good thing.

Signed-off-by: Andy Shevchenko 
---
v2: Update commit message (Geert, Laurent)
 include/linux/platform_data/shmob_drm.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/include/linux/platform_data/shmob_drm.h 
b/include/linux/platform_data/shmob_drm.h
index fe815d7d9f58..08605fdb2226 100644
--- a/include/linux/platform_data/shmob_drm.h
+++ b/include/linux/platform_data/shmob_drm.h
@@ -10,7 +10,7 @@
 #ifndef __SHMOB_DRM_H__
 #define __SHMOB_DRM_H__
 
-#include 
+#include 
 
 #include 
 
-- 
2.24.1

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[PATCH v2 06/12] drm/i915/dp_aux_backlight: convert to drm_device based logging macros.

2020-02-06 Thread Wambui Karuga
Conversion of the printk based drm logging macros to the struct
drm_device based logging macros in display/intel_dp_aux_backlight.c.
This also involves extracting the drm_i915_private device pointer from
various intel types to use in the macros.

Note that this converts DRM_DEBUG_DRIVER to drm_dbg().

References: 
https://lists.freedesktop.org/archives/dri-devel/2020-January/253381.html
Signed-off-by: Wambui Karuga 
---
 .../drm/i915/display/intel_dp_aux_backlight.c | 72 ---
 1 file changed, 45 insertions(+), 27 deletions(-)

diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c 
b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
index e86feebef299..83d9c76e4da9 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
@@ -35,8 +35,9 @@ static void set_aux_backlight_enable(struct intel_dp 
*intel_dp, bool enable)
 
if (drm_dp_dpcd_readb(_dp->aux, DP_EDP_DISPLAY_CONTROL_REGISTER,
  _val) < 0) {
-   DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n",
- DP_EDP_DISPLAY_CONTROL_REGISTER);
+   drm_dbg_kms(_to_i915(intel_dp)->drm,
+   "Failed to read DPCD register 0x%x\n",
+   DP_EDP_DISPLAY_CONTROL_REGISTER);
return;
}
if (enable)
@@ -46,8 +47,9 @@ static void set_aux_backlight_enable(struct intel_dp 
*intel_dp, bool enable)
 
if (drm_dp_dpcd_writeb(_dp->aux, DP_EDP_DISPLAY_CONTROL_REGISTER,
   reg_val) != 1) {
-   DRM_DEBUG_KMS("Failed to %s aux backlight\n",
- enable ? "enable" : "disable");
+   drm_dbg_kms(_to_i915(intel_dp)->drm,
+   "Failed to %s aux backlight\n",
+   enable ? "enable" : "disable");
}
 }
 
@@ -65,8 +67,9 @@ static u32 intel_dp_aux_get_backlight(struct intel_connector 
*connector)
if (drm_dp_dpcd_readb(_dp->aux,
  DP_EDP_BACKLIGHT_MODE_SET_REGISTER,
  _reg) != 1) {
-   DRM_DEBUG_KMS("Failed to read the DPCD register 0x%x\n",
- DP_EDP_BACKLIGHT_MODE_SET_REGISTER);
+   drm_dbg_kms(_to_i915(intel_dp)->drm,
+   "Failed to read the DPCD register 0x%x\n",
+   DP_EDP_BACKLIGHT_MODE_SET_REGISTER);
return 0;
}
 
@@ -80,8 +83,9 @@ static u32 intel_dp_aux_get_backlight(struct intel_connector 
*connector)
 
if (drm_dp_dpcd_read(_dp->aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB,
 _val, sizeof(read_val)) < 0) {
-   DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n",
- DP_EDP_BACKLIGHT_BRIGHTNESS_MSB);
+   drm_dbg_kms(_to_i915(intel_dp)->drm,
+   "Failed to read DPCD register 0x%x\n",
+   DP_EDP_BACKLIGHT_BRIGHTNESS_MSB);
return 0;
}
level = read_val[0];
@@ -111,7 +115,8 @@ intel_dp_aux_set_backlight(const struct drm_connector_state 
*conn_state, u32 lev
}
if (drm_dp_dpcd_write(_dp->aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB,
  vals, sizeof(vals)) < 0) {
-   DRM_DEBUG_KMS("Failed to write aux backlight level\n");
+   drm_dbg_kms(_to_i915(intel_dp)->drm,
+   "Failed to write aux backlight level\n");
return;
}
 }
@@ -133,7 +138,8 @@ static bool intel_dp_aux_set_pwm_freq(struct 
intel_connector *connector)
 
freq = dev_priv->vbt.backlight.pwm_freq_hz;
if (!freq) {
-   DRM_DEBUG_KMS("Use panel default backlight frequency\n");
+   drm_dbg_kms(_priv->drm,
+   "Use panel default backlight frequency\n");
return false;
}
 
@@ -146,13 +152,14 @@ static bool intel_dp_aux_set_pwm_freq(struct 
intel_connector *connector)
fxp_max = DIV_ROUND_CLOSEST(fxp * 5, 4);
 
if (fxp_min > fxp_actual || fxp_actual > fxp_max) {
-   DRM_DEBUG_KMS("Actual frequency out of range\n");
+   drm_dbg_kms(_priv->drm, "Actual frequency out of range\n");
return false;
}
 
if (drm_dp_dpcd_writeb(_dp->aux,
   DP_EDP_BACKLIGHT_FREQ_SET, (u8) f) < 0) {
-   DRM_DEBUG_KMS("Failed to write aux backlight freq\n");
+   drm_dbg_kms(_priv->drm,
+   "Failed to write aux backlight freq\n");
return false;
}
return true;
@@ -162,14 +169,16 @@ static void intel_dp_aux_enable_backlight(const struct 
intel_crtc_state *crtc_st
  const struct drm_connector_state 
*conn_state)
 {
struct 

Re: [PATCH 3/3] gpu: host1x: Set DMA direction only for DMA-mapped buffer objects

2020-02-06 Thread Dmitry Osipenko
04.02.2020 16:59, Thierry Reding пишет:
> From: Thierry Reding 
> 
> The DMA direction is only used by the DMA API, so there is no use in
> setting it when a buffer object isn't mapped with the DMA API.
> 
> Signed-off-by: Thierry Reding 
> ---
>  drivers/gpu/host1x/job.c | 2 +-
>  1 file changed, 1 insertion(+), 1 deletion(-)

Tested-by: Dmitry Osipenko 
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


  1   2   >