Re: [PATCH] drm/amdgpu: add parameter to allocate high priority contexts v8

2017-04-25 Thread Andres Rodriguez



On 2017-04-25 06:21 PM, Alex Deucher wrote:

On Tue, Apr 25, 2017 at 4:28 PM, Andres Rodriguez  wrote:



On 2017-04-25 02:01 PM, Nicolai Hähnle wrote:


On 24.04.2017 18:20, Andres Rodriguez wrote:


Add a new context creation parameter to express a global context
priority.

The priority ranking in descending order is as follows:
 * AMDGPU_CTX_PRIORITY_HIGH
 * AMDGPU_CTX_PRIORITY_NORMAL
 * AMDGPU_CTX_PRIORITY_LOW

The driver will attempt to schedule work to the hardware according to
the priorities. No latency or throughput guarantees are provided by
this patch.

This interface intends to service the EGL_IMG_context_priority
extension, and vulkan equivalents.

v2: Instead of using flags, repurpose __pad
v3: Swap enum values of _NORMAL _HIGH for backwards compatibility
v4: Validate usermode priority and store it
v5: Move priority validation into amdgpu_ctx_ioctl(), headline reword
v6: add UAPI note regarding priorities requiring CAP_SYS_ADMIN
v7: remove ctx->priority
v8: added AMDGPU_CTX_PRIORITY_LOW, s/CAP_SYS_ADMIN/CAP_SYS_NICE

Reviewed-by: Emil Velikov 
Reviewed-by: Christian König 
Signed-off-by: Andres Rodriguez 



I didn't follow all the discussion, so feel free to shut me up if this
has already been discussed, but...


[snip]


+/* Context priority level */
+#define AMDGPU_CTX_PRIORITY_NORMAL0
+#define AMDGPU_CTX_PRIORITY_LOW1
+/* Selecting a priority above NORMAL requires CAP_SYS_ADMIN */
+#define AMDGPU_CTX_PRIORITY_HIGH2
+#define AMDGPU_CTX_PRIORITY_NUM3



I get that normal priority needs to be 0 for backwards compatibility,
but having LOW between NORMAL and HIGH is still odd. Have you considered
using signed integers as a way to fix that?



Thanks for the suggestion, that should make it a lot cleaner.


Maybe make the range -1023 to 1023 for consistency with the similar
proposed interface on Intel?
https://lists.freedesktop.org/archives/intel-gfx/2017-April/126155.html

Alex




Sure, that gives us a range to add new priority leves.

Andres



Regards,
Andres




(AMDGPU_CTX_PRIORITY_NUM doesn't seem to be used anywhere...)

Cheers,
Nicolai



+
 struct drm_amdgpu_ctx_in {
 /** AMDGPU_CTX_OP_* */
 __u32op;
 /** For future use, no flags defined so far */
 __u32flags;
 __u32ctx_id;
-__u32_pad;
+__u32priority;
 };

 union drm_amdgpu_ctx_out {
 struct {
 __u32ctx_id;
 __u32_pad;
 } alloc;

 struct {
 /** For future use, no flags defined so far */
 __u64flags;
 /** Number of resets caused by this context so far. */
 __u32hangs;
 /** Reset status since the last call of the ioctl. */
 __u32reset_status;
 } state;
 };

 union drm_amdgpu_ctx {
 struct drm_amdgpu_ctx_in in;





___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amdgpu: add parameter to allocate high priority contexts v8

2017-04-25 Thread Alex Deucher
On Tue, Apr 25, 2017 at 4:28 PM, Andres Rodriguez  wrote:
>
>
> On 2017-04-25 02:01 PM, Nicolai Hähnle wrote:
>>
>> On 24.04.2017 18:20, Andres Rodriguez wrote:
>>>
>>> Add a new context creation parameter to express a global context
>>> priority.
>>>
>>> The priority ranking in descending order is as follows:
>>>  * AMDGPU_CTX_PRIORITY_HIGH
>>>  * AMDGPU_CTX_PRIORITY_NORMAL
>>>  * AMDGPU_CTX_PRIORITY_LOW
>>>
>>> The driver will attempt to schedule work to the hardware according to
>>> the priorities. No latency or throughput guarantees are provided by
>>> this patch.
>>>
>>> This interface intends to service the EGL_IMG_context_priority
>>> extension, and vulkan equivalents.
>>>
>>> v2: Instead of using flags, repurpose __pad
>>> v3: Swap enum values of _NORMAL _HIGH for backwards compatibility
>>> v4: Validate usermode priority and store it
>>> v5: Move priority validation into amdgpu_ctx_ioctl(), headline reword
>>> v6: add UAPI note regarding priorities requiring CAP_SYS_ADMIN
>>> v7: remove ctx->priority
>>> v8: added AMDGPU_CTX_PRIORITY_LOW, s/CAP_SYS_ADMIN/CAP_SYS_NICE
>>>
>>> Reviewed-by: Emil Velikov 
>>> Reviewed-by: Christian König 
>>> Signed-off-by: Andres Rodriguez 
>>
>>
>> I didn't follow all the discussion, so feel free to shut me up if this
>> has already been discussed, but...
>>
>>
>> [snip]
>>>
>>> +/* Context priority level */
>>> +#define AMDGPU_CTX_PRIORITY_NORMAL0
>>> +#define AMDGPU_CTX_PRIORITY_LOW1
>>> +/* Selecting a priority above NORMAL requires CAP_SYS_ADMIN */
>>> +#define AMDGPU_CTX_PRIORITY_HIGH2
>>> +#define AMDGPU_CTX_PRIORITY_NUM3
>>
>>
>> I get that normal priority needs to be 0 for backwards compatibility,
>> but having LOW between NORMAL and HIGH is still odd. Have you considered
>> using signed integers as a way to fix that?
>
>
> Thanks for the suggestion, that should make it a lot cleaner.

Maybe make the range -1023 to 1023 for consistency with the similar
proposed interface on Intel?
https://lists.freedesktop.org/archives/intel-gfx/2017-April/126155.html

Alex


>
> Regards,
> Andres
>
>
>>
>> (AMDGPU_CTX_PRIORITY_NUM doesn't seem to be used anywhere...)
>>
>> Cheers,
>> Nicolai
>>
>>
>>> +
>>>  struct drm_amdgpu_ctx_in {
>>>  /** AMDGPU_CTX_OP_* */
>>>  __u32op;
>>>  /** For future use, no flags defined so far */
>>>  __u32flags;
>>>  __u32ctx_id;
>>> -__u32_pad;
>>> +__u32priority;
>>>  };
>>>
>>>  union drm_amdgpu_ctx_out {
>>>  struct {
>>>  __u32ctx_id;
>>>  __u32_pad;
>>>  } alloc;
>>>
>>>  struct {
>>>  /** For future use, no flags defined so far */
>>>  __u64flags;
>>>  /** Number of resets caused by this context so far. */
>>>  __u32hangs;
>>>  /** Reset status since the last call of the ioctl. */
>>>  __u32reset_status;
>>>  } state;
>>>  };
>>>
>>>  union drm_amdgpu_ctx {
>>>  struct drm_amdgpu_ctx_in in;
>>>
>>
>>
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amdgpu: add parameter to allocate high priority contexts v8

2017-04-25 Thread Andres Rodriguez



On 2017-04-25 02:01 PM, Nicolai Hähnle wrote:

On 24.04.2017 18:20, Andres Rodriguez wrote:

Add a new context creation parameter to express a global context
priority.

The priority ranking in descending order is as follows:
 * AMDGPU_CTX_PRIORITY_HIGH
 * AMDGPU_CTX_PRIORITY_NORMAL
 * AMDGPU_CTX_PRIORITY_LOW

The driver will attempt to schedule work to the hardware according to
the priorities. No latency or throughput guarantees are provided by
this patch.

This interface intends to service the EGL_IMG_context_priority
extension, and vulkan equivalents.

v2: Instead of using flags, repurpose __pad
v3: Swap enum values of _NORMAL _HIGH for backwards compatibility
v4: Validate usermode priority and store it
v5: Move priority validation into amdgpu_ctx_ioctl(), headline reword
v6: add UAPI note regarding priorities requiring CAP_SYS_ADMIN
v7: remove ctx->priority
v8: added AMDGPU_CTX_PRIORITY_LOW, s/CAP_SYS_ADMIN/CAP_SYS_NICE

Reviewed-by: Emil Velikov 
Reviewed-by: Christian König 
Signed-off-by: Andres Rodriguez 


I didn't follow all the discussion, so feel free to shut me up if this
has already been discussed, but...


[snip]

+/* Context priority level */
+#define AMDGPU_CTX_PRIORITY_NORMAL0
+#define AMDGPU_CTX_PRIORITY_LOW1
+/* Selecting a priority above NORMAL requires CAP_SYS_ADMIN */
+#define AMDGPU_CTX_PRIORITY_HIGH2
+#define AMDGPU_CTX_PRIORITY_NUM3


I get that normal priority needs to be 0 for backwards compatibility,
but having LOW between NORMAL and HIGH is still odd. Have you considered
using signed integers as a way to fix that?


Thanks for the suggestion, that should make it a lot cleaner.

Regards,
Andres



(AMDGPU_CTX_PRIORITY_NUM doesn't seem to be used anywhere...)

Cheers,
Nicolai



+
 struct drm_amdgpu_ctx_in {
 /** AMDGPU_CTX_OP_* */
 __u32op;
 /** For future use, no flags defined so far */
 __u32flags;
 __u32ctx_id;
-__u32_pad;
+__u32priority;
 };

 union drm_amdgpu_ctx_out {
 struct {
 __u32ctx_id;
 __u32_pad;
 } alloc;

 struct {
 /** For future use, no flags defined so far */
 __u64flags;
 /** Number of resets caused by this context so far. */
 __u32hangs;
 /** Reset status since the last call of the ioctl. */
 __u32reset_status;
 } state;
 };

 union drm_amdgpu_ctx {
 struct drm_amdgpu_ctx_in in;





___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amdgpu: add parameter to allocate high priority contexts v8

2017-04-25 Thread Nicolai Hähnle

On 24.04.2017 18:20, Andres Rodriguez wrote:

Add a new context creation parameter to express a global context priority.

The priority ranking in descending order is as follows:
 * AMDGPU_CTX_PRIORITY_HIGH
 * AMDGPU_CTX_PRIORITY_NORMAL
 * AMDGPU_CTX_PRIORITY_LOW

The driver will attempt to schedule work to the hardware according to
the priorities. No latency or throughput guarantees are provided by
this patch.

This interface intends to service the EGL_IMG_context_priority
extension, and vulkan equivalents.

v2: Instead of using flags, repurpose __pad
v3: Swap enum values of _NORMAL _HIGH for backwards compatibility
v4: Validate usermode priority and store it
v5: Move priority validation into amdgpu_ctx_ioctl(), headline reword
v6: add UAPI note regarding priorities requiring CAP_SYS_ADMIN
v7: remove ctx->priority
v8: added AMDGPU_CTX_PRIORITY_LOW, s/CAP_SYS_ADMIN/CAP_SYS_NICE

>

Reviewed-by: Emil Velikov 
Reviewed-by: Christian König 
Signed-off-by: Andres Rodriguez 


I didn't follow all the discussion, so feel free to shut me up if this 
has already been discussed, but...



[snip]

+/* Context priority level */
+#define AMDGPU_CTX_PRIORITY_NORMAL 0
+#define AMDGPU_CTX_PRIORITY_LOW1
+/* Selecting a priority above NORMAL requires CAP_SYS_ADMIN */
+#define AMDGPU_CTX_PRIORITY_HIGH   2
+#define AMDGPU_CTX_PRIORITY_NUM3


I get that normal priority needs to be 0 for backwards compatibility, 
but having LOW between NORMAL and HIGH is still odd. Have you considered 
using signed integers as a way to fix that?


(AMDGPU_CTX_PRIORITY_NUM doesn't seem to be used anywhere...)

Cheers,
Nicolai



+
 struct drm_amdgpu_ctx_in {
/** AMDGPU_CTX_OP_* */
__u32   op;
/** For future use, no flags defined so far */
__u32   flags;
__u32   ctx_id;
-   __u32   _pad;
+   __u32   priority;
 };

 union drm_amdgpu_ctx_out {
struct {
__u32   ctx_id;
__u32   _pad;
} alloc;

struct {
/** For future use, no flags defined so far */
__u64   flags;
/** Number of resets caused by this context so far. */
__u32   hangs;
/** Reset status since the last call of the ioctl. */
__u32   reset_status;
} state;
 };

 union drm_amdgpu_ctx {
struct drm_amdgpu_ctx_in in;




--
Lerne, wie die Welt wirklich ist,
Aber vergiss niemals, wie sie sein sollte.
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH] drm/amdgpu: add parameter to allocate high priority contexts v8

2017-04-24 Thread Andres Rodriguez
Add a new context creation parameter to express a global context priority.

The priority ranking in descending order is as follows:
 * AMDGPU_CTX_PRIORITY_HIGH
 * AMDGPU_CTX_PRIORITY_NORMAL
 * AMDGPU_CTX_PRIORITY_LOW

The driver will attempt to schedule work to the hardware according to
the priorities. No latency or throughput guarantees are provided by
this patch.

This interface intends to service the EGL_IMG_context_priority
extension, and vulkan equivalents.

v2: Instead of using flags, repurpose __pad
v3: Swap enum values of _NORMAL _HIGH for backwards compatibility
v4: Validate usermode priority and store it
v5: Move priority validation into amdgpu_ctx_ioctl(), headline reword
v6: add UAPI note regarding priorities requiring CAP_SYS_ADMIN
v7: remove ctx->priority
v8: added AMDGPU_CTX_PRIORITY_LOW, s/CAP_SYS_ADMIN/CAP_SYS_NICE

Reviewed-by: Emil Velikov 
Reviewed-by: Christian König 
Signed-off-by: Andres Rodriguez 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c   | 38 ---
 drivers/gpu/drm/amd/scheduler/gpu_scheduler.h |  4 ++-
 include/uapi/drm/amdgpu_drm.h |  9 ++-
 3 files changed, 45 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index b43..af75571 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -8,124 +8,134 @@
  * and/or sell copies of the Software, and to permit persons to whom the
  * Software is furnished to do so, subject to the following conditions:
  *
  * The above copyright notice and this permission notice shall be included in
  * all copies or substantial portions of the Software.
  *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  * OTHER DEALINGS IN THE SOFTWARE.
  *
  * Authors: monk liu 
  */
 
 #include 
 #include "amdgpu.h"
 
-static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx)
+static int amdgpu_ctx_init(struct amdgpu_device *adev,
+  enum amd_sched_priority priority,
+  struct amdgpu_ctx *ctx)
 {
unsigned i, j;
int r;
 
+   if (priority < 0 || priority >= AMD_SCHED_PRIORITY_MAX)
+   return -EINVAL;
+
+   if (priority >= AMD_SCHED_PRIORITY_HIGH && !capable(CAP_SYS_NICE))
+   return -EACCES;
+
memset(ctx, 0, sizeof(*ctx));
ctx->adev = adev;
kref_init(>refcount);
spin_lock_init(>ring_lock);
ctx->fences = kcalloc(amdgpu_sched_jobs * AMDGPU_MAX_RINGS,
  sizeof(struct fence*), GFP_KERNEL);
if (!ctx->fences)
return -ENOMEM;
 
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
ctx->rings[i].sequence = 1;
ctx->rings[i].fences = >fences[amdgpu_sched_jobs * i];
}
 
ctx->reset_counter = atomic_read(>gpu_reset_counter);
 
/* create context entity for each ring */
for (i = 0; i < adev->num_rings; i++) {
struct amdgpu_ring *ring = adev->rings[i];
struct amd_sched_rq *rq;
 
-   rq = >sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
+   rq = >sched.sched_rq[priority];
r = amd_sched_entity_init(>sched, >rings[i].entity,
  rq, amdgpu_sched_jobs);
if (r)
goto failed;
}
 
return 0;
 
 failed:
for (j = 0; j < i; j++)
amd_sched_entity_fini(>rings[j]->sched,
  >rings[j].entity);
kfree(ctx->fences);
ctx->fences = NULL;
return r;
 }
 
 static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
 {
struct amdgpu_device *adev = ctx->adev;
unsigned i, j;
 
if (!adev)
return;
 
for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
for (j = 0; j < amdgpu_sched_jobs; ++j)
fence_put(ctx->rings[i].fences[j]);
kfree(ctx->fences);
ctx->fences = NULL;
 
for (i = 0; i < adev->num_rings; i++)
amd_sched_entity_fini(>rings[i]->sched,
  >rings[i].entity);
 }
 
 static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
struct amdgpu_fpriv *fpriv,
+   enum amd_sched_priority priority,
uint32_t *id)
 {
struct