Re: [PATCH] drm/amdgpu: fix amdgpu_amdkfd_remove_eviction_fence v2

2018-08-20 Thread Felix Kuehling
Hi Christian,

Are you going to submit this change to amd-staging-drm-next?
amd-kfd-staging would pick it up from there automatically.

Regards,
  Felix


On 2018-08-15 01:57 PM, Felix Kuehling wrote:
> I applied your change to my local KFD staging branch and it through a
> presubmission build/test (sorry, only accessible from inside AMD):
> http://git.amd.com:8080/#/c/167906/
>
> It passed, but checkpatch pointed out one issue:
> http://git.amd.com:8080/#/c/167906/1/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c@247
> (see inline)
>
> With that fixed, this change is Reviewed-by: Felix Kuehling
> 
>
> Regards,
>   Felix
>
>
> On 2018-08-15 03:46 AM, Christian König wrote:
>> Fix quite a number of bugs here. Unfortunately only compile tested.
>>
>> v2: fix copy error
>>
>> Signed-off-by: Christian König 
>> ---
>>  drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 103 
>> ++-
>>  1 file changed, 46 insertions(+), 57 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c 
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
>> index fa38a960ce00..887663ede781 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
>> @@ -206,11 +206,9 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct 
>> amdgpu_bo *bo,
>>  struct amdgpu_amdkfd_fence ***ef_list,
>>  unsigned int *ef_count)
>>  {
>> -struct reservation_object_list *fobj;
>> -struct reservation_object *resv;
>> -unsigned int i = 0, j = 0, k = 0, shared_count;
>> -unsigned int count = 0;
>> -struct amdgpu_amdkfd_fence **fence_list;
>> +struct reservation_object *resv = bo->tbo.resv;
>> +struct reservation_object_list *old, *new;
>> +unsigned int i, j, k;
>>  
>>  if (!ef && !ef_list)
>>  return -EINVAL;
>> @@ -220,76 +218,67 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct 
>> amdgpu_bo *bo,
>>  *ef_count = 0;
>>  }
>>  
>> -resv = bo->tbo.resv;
>> -fobj = reservation_object_get_list(resv);
>> -
>> -if (!fobj)
>> +old = reservation_object_get_list(resv);
>> +if (!old)
>>  return 0;
>>  
>> -preempt_disable();
>> -write_seqcount_begin(>seq);
>> +new = kmalloc(offsetof(typeof(*new), shared[old->shared_max]),
>> +  GFP_KERNEL);
>> +if (!new)
>> +return -ENOMEM;
>>  
>> -/* Go through all the shared fences in the resevation object. If
>> - * ef is specified and it exists in the list, remove it and reduce the
>> - * count. If ef is not specified, then get the count of eviction fences
>> - * present.
>> +/* Go through all the shared fences in the resevation object and sort
>> + * the interesting ones to the end of the list.
>>   */
>> -shared_count = fobj->shared_count;
>> -for (i = 0; i < shared_count; ++i) {
>> +for (i = 0, j = old->shared_count, k = 0; i < old->shared_count; ++i) {
>>  struct dma_fence *f;
>>  
>> -f = rcu_dereference_protected(fobj->shared[i],
>> +f = rcu_dereference_protected(old->shared[i],
>>reservation_object_held(resv));
>>  
>> -if (ef) {
>> -if (f->context == ef->base.context) {
>> -dma_fence_put(f);
>> -fobj->shared_count--;
>> -} else {
>> -RCU_INIT_POINTER(fobj->shared[j++], f);
>> -}
>> -} else if (to_amdgpu_amdkfd_fence(f))
>> -count++;
>> +if ((ef && f->context == ef->base.context) ||
>> +(!ef && to_amdgpu_amdkfd_fence(f)))
>> +RCU_INIT_POINTER(new->shared[--j], f);
>> +else
>> +RCU_INIT_POINTER(new->shared[k++], f);
>>  }
>> -write_seqcount_end(>seq);
>> -preempt_enable();
>> -
>> -if (ef || !count)
>> -return 0;
>> -
>> -/* Alloc memory for count number of eviction fence pointers. Fill the
>> - * ef_list array and ef_count
>> - */
>> -fence_list = kcalloc(count, sizeof(struct amdgpu_amdkfd_fence *),
>> - GFP_KERNEL);
>> -if (!fence_list)
>> -return -ENOMEM;
>> +new->shared_max = old->shared_max;
>> +new->shared_count = k;
>>  
>> -preempt_disable();
>> -write_seqcount_begin(>seq);
>> +if (!ef) {
>> +unsigned int count = old->shared_count - j;
>>  
>> -j = 0;
>> -for (i = 0; i < shared_count; ++i) {
>> -struct dma_fence *f;
>> -struct amdgpu_amdkfd_fence *efence;
>> -
>> -f = rcu_dereference_protected(fobj->shared[i],
>> -reservation_object_held(resv));
>> +/* Alloc memory for count number of eviction fence pointers. 
>> Fill 

Re: [PATCH] drm/amdgpu: fix amdgpu_amdkfd_remove_eviction_fence

2018-08-16 Thread Christian König

Am 16.08.2018 um 20:43 schrieb Felix Kuehling:


On 2018-08-16 02:26 PM, Christian König wrote:

Am 16.08.2018 um 20:23 schrieb Felix Kuehling:

On 2018-08-16 02:18 PM, Christian König wrote:

Am 16.08.2018 um 18:50 schrieb Felix Kuehling:

On 2018-08-16 02:43 AM, Christian König wrote:
[SNIP]

I mean it could be that in the worst case we race and stop a KFD
process for no good reason.

Right. For a more practical example, a KFD BO can get evicted just
before the application decides to unmap it. The preemption happens
asynchronously, handled by an SDMA job in the GPU scheduler. That job
will have an amdgpu_sync object with the eviction fence in it.

While that SDMA job is pending or in progress, the application decides
to unmap the BO. That removes the eviction fence from that BO's
reservation. But it can't remove the fence from all the sync objects
that were previously created and are still in flight. So the
preemption
will be triggered, and the fence will eventually signal when the KFD
preemption is complete.

I don't think that's something we can prevent. The worst case is
that a
preemption happens unnecessarily if an eviction gets triggered just
before removing the fence. But removing the fence will prevent future
evictions of the BO from triggering a KFD process preemption.
That's the
best we can do.

It's true that you can't drop the SDMA job which wants to evict the
BO, but at this time the fence signaling is already underway and not
stoppable anymore.

Replacing the fence with a new one would just be much more cleaner and
fix quite a bunch of corner cases where the KFD process would be
preempted without good reason.

Replacing the fence cleanly probably also involves a preemption, so you
don't gain anything.

Mhm, why that?

My idea would be to create a new fence, replace the old one with the
new one and then manually signal the old one.

So why should there be a preemption triggered here?

Right maybe you can do this without triggering a preemption, but it
would require a bunch of new code. Currently the only thing that
replaces an old eviction fence with a new one is a preemption+restore.

You'll need to replace the fence in all BOs belonging to the process.
Since removing a fence is something you don't want to do, that really
means adding a new fence, and leaving the old fence in place until it
signals. In the mean time you have two eviction fences active for this
process, and if either one gets triggered, you'll get a preemption. Only
after all BOs have the new eviction fence, you can disarm the old
eviction fence and signal it (without triggering a preemption).

The way I see it, this adds a bunch of CPU overhead (depending on the
number of BOs in the process), and increases the likelihood of
unnecessary preemptions, because it takes that much longer for the
operation to complete.


Yeah, the CPU overhead is indeed a really good point.


Talking about overhead, imagine a process cleaning up at process
termination, which unmaps and frees all BOs. Each BO that gets freed
replaces the eviction fence on all remaining BOs. If you have N BOs,
you'll end up creating N new eviction fences in the process. The
overhead scales with O(N²) of the number of BOs in the process.


Well as I said that is the only case where I think replacing fences is 
unproblematic.


E.g. when a BO is freed we already have a mechanism to copy all fences 
to a local reservation object (to prevent that any new fences can be 
added to the BO if it shares it's reservation object).


At this time it should be really easy to filter out BOs you don't want 
to wait for when the BO is destructed.


But anyway, when you can live with the possibility that a stale fence 
triggers a process preemption than there isn't much point in cleaning 
this up.


Regards,
Christian.



Regards,
   Felix


Christian.


Regards,
    Felix


It's probably quite a bit of more CPU overhead of doing so, but I
think that this would still be the more fail prove option.

Regards,
Christian.



Regards,
     Felix


___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amdgpu: fix amdgpu_amdkfd_remove_eviction_fence

2018-08-16 Thread Felix Kuehling


On 2018-08-16 02:26 PM, Christian König wrote:
> Am 16.08.2018 um 20:23 schrieb Felix Kuehling:
>> On 2018-08-16 02:18 PM, Christian König wrote:
>>> Am 16.08.2018 um 18:50 schrieb Felix Kuehling:
 On 2018-08-16 02:43 AM, Christian König wrote:
 [SNIP]
> I mean it could be that in the worst case we race and stop a KFD
> process for no good reason.
 Right. For a more practical example, a KFD BO can get evicted just
 before the application decides to unmap it. The preemption happens
 asynchronously, handled by an SDMA job in the GPU scheduler. That job
 will have an amdgpu_sync object with the eviction fence in it.

 While that SDMA job is pending or in progress, the application decides
 to unmap the BO. That removes the eviction fence from that BO's
 reservation. But it can't remove the fence from all the sync objects
 that were previously created and are still in flight. So the
 preemption
 will be triggered, and the fence will eventually signal when the KFD
 preemption is complete.

 I don't think that's something we can prevent. The worst case is
 that a
 preemption happens unnecessarily if an eviction gets triggered just
 before removing the fence. But removing the fence will prevent future
 evictions of the BO from triggering a KFD process preemption.
 That's the
 best we can do.
>>> It's true that you can't drop the SDMA job which wants to evict the
>>> BO, but at this time the fence signaling is already underway and not
>>> stoppable anymore.
>>>
>>> Replacing the fence with a new one would just be much more cleaner and
>>> fix quite a bunch of corner cases where the KFD process would be
>>> preempted without good reason.
>> Replacing the fence cleanly probably also involves a preemption, so you
>> don't gain anything.
>
> Mhm, why that?
>
> My idea would be to create a new fence, replace the old one with the
> new one and then manually signal the old one.
>
> So why should there be a preemption triggered here?

Right maybe you can do this without triggering a preemption, but it
would require a bunch of new code. Currently the only thing that
replaces an old eviction fence with a new one is a preemption+restore.

You'll need to replace the fence in all BOs belonging to the process.
Since removing a fence is something you don't want to do, that really
means adding a new fence, and leaving the old fence in place until it
signals. In the mean time you have two eviction fences active for this
process, and if either one gets triggered, you'll get a preemption. Only
after all BOs have the new eviction fence, you can disarm the old
eviction fence and signal it (without triggering a preemption).

The way I see it, this adds a bunch of CPU overhead (depending on the
number of BOs in the process), and increases the likelihood of
unnecessary preemptions, because it takes that much longer for the
operation to complete.

Talking about overhead, imagine a process cleaning up at process
termination, which unmaps and frees all BOs. Each BO that gets freed
replaces the eviction fence on all remaining BOs. If you have N BOs,
you'll end up creating N new eviction fences in the process. The
overhead scales with O(N²) of the number of BOs in the process.

Regards,
  Felix

>
> Christian.
>
>>
>> Regards,
>>    Felix
>>
>>> It's probably quite a bit of more CPU overhead of doing so, but I
>>> think that this would still be the more fail prove option.
>>>
>>> Regards,
>>> Christian.
>>>
>>>
 Regards,
     Felix

>
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amdgpu: fix amdgpu_amdkfd_remove_eviction_fence

2018-08-16 Thread Christian König

Am 16.08.2018 um 20:23 schrieb Felix Kuehling:

On 2018-08-16 02:18 PM, Christian König wrote:

Am 16.08.2018 um 18:50 schrieb Felix Kuehling:

On 2018-08-16 02:43 AM, Christian König wrote:
[SNIP]

I mean it could be that in the worst case we race and stop a KFD
process for no good reason.

Right. For a more practical example, a KFD BO can get evicted just
before the application decides to unmap it. The preemption happens
asynchronously, handled by an SDMA job in the GPU scheduler. That job
will have an amdgpu_sync object with the eviction fence in it.

While that SDMA job is pending or in progress, the application decides
to unmap the BO. That removes the eviction fence from that BO's
reservation. But it can't remove the fence from all the sync objects
that were previously created and are still in flight. So the preemption
will be triggered, and the fence will eventually signal when the KFD
preemption is complete.

I don't think that's something we can prevent. The worst case is that a
preemption happens unnecessarily if an eviction gets triggered just
before removing the fence. But removing the fence will prevent future
evictions of the BO from triggering a KFD process preemption. That's the
best we can do.

It's true that you can't drop the SDMA job which wants to evict the
BO, but at this time the fence signaling is already underway and not
stoppable anymore.

Replacing the fence with a new one would just be much more cleaner and
fix quite a bunch of corner cases where the KFD process would be
preempted without good reason.

Replacing the fence cleanly probably also involves a preemption, so you
don't gain anything.


Mhm, why that?

My idea would be to create a new fence, replace the old one with the new 
one and then manually signal the old one.


So why should there be a preemption triggered here?

Christian.



Regards,
   Felix


It's probably quite a bit of more CPU overhead of doing so, but I
think that this would still be the more fail prove option.

Regards,
Christian.



Regards,
    Felix



___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amdgpu: fix amdgpu_amdkfd_remove_eviction_fence

2018-08-16 Thread Felix Kuehling
On 2018-08-16 02:18 PM, Christian König wrote:
> Am 16.08.2018 um 18:50 schrieb Felix Kuehling:
>> On 2018-08-16 02:43 AM, Christian König wrote:
>> [SNIP]
>>> I mean it could be that in the worst case we race and stop a KFD
>>> process for no good reason.
>> Right. For a more practical example, a KFD BO can get evicted just
>> before the application decides to unmap it. The preemption happens
>> asynchronously, handled by an SDMA job in the GPU scheduler. That job
>> will have an amdgpu_sync object with the eviction fence in it.
>>
>> While that SDMA job is pending or in progress, the application decides
>> to unmap the BO. That removes the eviction fence from that BO's
>> reservation. But it can't remove the fence from all the sync objects
>> that were previously created and are still in flight. So the preemption
>> will be triggered, and the fence will eventually signal when the KFD
>> preemption is complete.
>>
>> I don't think that's something we can prevent. The worst case is that a
>> preemption happens unnecessarily if an eviction gets triggered just
>> before removing the fence. But removing the fence will prevent future
>> evictions of the BO from triggering a KFD process preemption. That's the
>> best we can do.
>
> It's true that you can't drop the SDMA job which wants to evict the
> BO, but at this time the fence signaling is already underway and not
> stoppable anymore.
>
> Replacing the fence with a new one would just be much more cleaner and
> fix quite a bunch of corner cases where the KFD process would be
> preempted without good reason.

Replacing the fence cleanly probably also involves a preemption, so you
don't gain anything.

Regards,
  Felix

>
> It's probably quite a bit of more CPU overhead of doing so, but I
> think that this would still be the more fail prove option.
>
> Regards,
> Christian.
>
>
>>
>> Regards,
>>    Felix
>>
>

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amdgpu: fix amdgpu_amdkfd_remove_eviction_fence

2018-08-16 Thread Christian König

Am 16.08.2018 um 18:50 schrieb Felix Kuehling:

On 2018-08-16 02:43 AM, Christian König wrote:
[SNIP]

I mean it could be that in the worst case we race and stop a KFD
process for no good reason.

Right. For a more practical example, a KFD BO can get evicted just
before the application decides to unmap it. The preemption happens
asynchronously, handled by an SDMA job in the GPU scheduler. That job
will have an amdgpu_sync object with the eviction fence in it.

While that SDMA job is pending or in progress, the application decides
to unmap the BO. That removes the eviction fence from that BO's
reservation. But it can't remove the fence from all the sync objects
that were previously created and are still in flight. So the preemption
will be triggered, and the fence will eventually signal when the KFD
preemption is complete.

I don't think that's something we can prevent. The worst case is that a
preemption happens unnecessarily if an eviction gets triggered just
before removing the fence. But removing the fence will prevent future
evictions of the BO from triggering a KFD process preemption. That's the
best we can do.


It's true that you can't drop the SDMA job which wants to evict the BO, 
but at this time the fence signaling is already underway and not 
stoppable anymore.


Replacing the fence with a new one would just be much more cleaner and 
fix quite a bunch of corner cases where the KFD process would be 
preempted without good reason.


It's probably quite a bit of more CPU overhead of doing so, but I think 
that this would still be the more fail prove option.


Regards,
Christian.




Regards,
   Felix



___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amdgpu: fix amdgpu_amdkfd_remove_eviction_fence

2018-08-16 Thread Felix Kuehling
On 2018-08-16 02:43 AM, Christian König wrote:
> Am 15.08.2018 um 20:49 schrieb Felix Kuehling:
>> On 2018-08-15 02:27 PM, Christian König wrote:
>>> Am 15.08.2018 um 20:17 schrieb Felix Kuehling:
 On 2018-08-15 03:02 AM, Christian König wrote:
> Hi Felix,
>
> yeah, you pretty much nailed it.
>
> The problem is that the array itself is RCU protected. This means
> that
> you can only copy the whole structure when you want to update it.
>
> The exception is reservation_object_add_shared() which only works
> because we replace an either signaled fence or a fence within the
> same
> context but a later sequence number.
>
> This also explains why this is only a band aid and the whole approach
> of removing fences doesn't work in general. At any time somebody
> could
> have taken an RCU reference to the old array, created a copy of it
> and
> is now still using this one.
>
> The only 100% correct solution would be to mark the existing fence as
> signaled and replace it everywhere else.
 Depends what you're trying to achieve. I think the problem you see is,
 that some reader may still be using the old reservation_object_list
 copy
 with the fence still in it. But, the remaining lifetime of the
 reservation_object_list copy is limited. If we wanted to be sure no
 more
 copies with the old fence exist, all we'd need to do is call
 synchronize_rcu. Maybe we need to do that before releasing the fence
 references, or release the fence reference in an RCU callback to be
 safe.
>>> The assumption that the fence would die with the array is what is
>>> incorrect here.
>> I'm making no such assumption. The point is not to destroy the fence.
>> Only to remove the fence reference from the reservation object. That is,
>> we want any BO with this reservation object to stop checking or waiting
>> for our eviction fence.
>
> Maybe I'm overthinking this, but let me explain the point once more.
>
> See reservation_object_wait_timeout_rcu() for an example of the
> problem I see here:
>>     seq = read_seqcount_begin(>seq);
>>     rcu_read_lock();
> 
>>     if (!dma_fence_get_rcu(lfence))
>>     goto unlock_retry;
> ...
>>     rcu_read_unlock();
> ...
>>     if (read_seqcount_retry(>seq, seq)) {
>>     dma_fence_put(fence);
>>     goto retry;
>>     }
> ...
>>     ret = dma_fence_wait_timeout(fence, intr, ret);
>
> In other words the RCU mechanism guarantees that we also wait for
> newly added fences, but it does not guarantee that we don't wait for a
> fence which is already removed.

I understand that.

>
>>> The lifetime of RCUed array object is limit, but there is absolutely
>>> no guarantee that somebody didn't made a copy of the fences.
>>>
>>> E.g. somebody could have called reservation_object_get_fences_rcu(),
>>> reservation_object_copy_fences() or a concurrent
>>> reservation_object_wait_timeout_rcu() is underway.
>> That's fine. Then there will be additional fence references. When we
>> remove a fence from a reservation object, we don't know and don't care
>> who else is holding a reference to the fence anyway. This is no
>> different.
>
> So the KFD implementation doesn't care that the fence is removed from
> a BO but somebody could still start to wait on it because of the BO?
>
> I mean it could be that in the worst case we race and stop a KFD
> process for no good reason.

Right. For a more practical example, a KFD BO can get evicted just
before the application decides to unmap it. The preemption happens
asynchronously, handled by an SDMA job in the GPU scheduler. That job
will have an amdgpu_sync object with the eviction fence in it.

While that SDMA job is pending or in progress, the application decides
to unmap the BO. That removes the eviction fence from that BO's
reservation. But it can't remove the fence from all the sync objects
that were previously created and are still in flight. So the preemption
will be triggered, and the fence will eventually signal when the KFD
preemption is complete.

I don't think that's something we can prevent. The worst case is that a
preemption happens unnecessarily if an eviction gets triggered just
before removing the fence. But removing the fence will prevent future
evictions of the BO from triggering a KFD process preemption. That's the
best we can do.

Regards,
  Felix

>
> Regards,
> Christian.
>
>>
>> Regards,
>>    Felix
>>
>>> That's also the reason why fences live for much longer than their
>>> signaling, e.g. somebody can have a reference to the fence object even
>>> hours after it is signaled.
>>>
>>> Regards,
>>> Christian.
>>>
 Regards,
     Felix

> Going to fix the copy error I made with the sequence number and
> send it out again.
>
> Regards,
> Christian.
>
> 

Re: [PATCH] drm/amdgpu: fix amdgpu_amdkfd_remove_eviction_fence

2018-08-16 Thread Christian König

Am 15.08.2018 um 20:49 schrieb Felix Kuehling:

On 2018-08-15 02:27 PM, Christian König wrote:

Am 15.08.2018 um 20:17 schrieb Felix Kuehling:

On 2018-08-15 03:02 AM, Christian König wrote:

Hi Felix,

yeah, you pretty much nailed it.

The problem is that the array itself is RCU protected. This means that
you can only copy the whole structure when you want to update it.

The exception is reservation_object_add_shared() which only works
because we replace an either signaled fence or a fence within the same
context but a later sequence number.

This also explains why this is only a band aid and the whole approach
of removing fences doesn't work in general. At any time somebody could
have taken an RCU reference to the old array, created a copy of it and
is now still using this one.

The only 100% correct solution would be to mark the existing fence as
signaled and replace it everywhere else.

Depends what you're trying to achieve. I think the problem you see is,
that some reader may still be using the old reservation_object_list copy
with the fence still in it. But, the remaining lifetime of the
reservation_object_list copy is limited. If we wanted to be sure no more
copies with the old fence exist, all we'd need to do is call
synchronize_rcu. Maybe we need to do that before releasing the fence
references, or release the fence reference in an RCU callback to be
safe.

The assumption that the fence would die with the array is what is
incorrect here.

I'm making no such assumption. The point is not to destroy the fence.
Only to remove the fence reference from the reservation object. That is,
we want any BO with this reservation object to stop checking or waiting
for our eviction fence.


Maybe I'm overthinking this, but let me explain the point once more.

See reservation_object_wait_timeout_rcu() for an example of the problem 
I see here:

    seq = read_seqcount_begin(>seq);
    rcu_read_lock();



    if (!dma_fence_get_rcu(lfence))
    goto unlock_retry;

...

    rcu_read_unlock();

...

    if (read_seqcount_retry(>seq, seq)) {
    dma_fence_put(fence);
    goto retry;
    }

...

    ret = dma_fence_wait_timeout(fence, intr, ret);


In other words the RCU mechanism guarantees that we also wait for newly 
added fences, but it does not guarantee that we don't wait for a fence 
which is already removed.



The lifetime of RCUed array object is limit, but there is absolutely
no guarantee that somebody didn't made a copy of the fences.

E.g. somebody could have called reservation_object_get_fences_rcu(),
reservation_object_copy_fences() or a concurrent
reservation_object_wait_timeout_rcu() is underway.

That's fine. Then there will be additional fence references. When we
remove a fence from a reservation object, we don't know and don't care
who else is holding a reference to the fence anyway. This is no different.


So the KFD implementation doesn't care that the fence is removed from a 
BO but somebody could still start to wait on it because of the BO?


I mean it could be that in the worst case we race and stop a KFD process 
for no good reason.


Regards,
Christian.



Regards,
   Felix


That's also the reason why fences live for much longer than their
signaling, e.g. somebody can have a reference to the fence object even
hours after it is signaled.

Regards,
Christian.


Regards,
    Felix


Going to fix the copy error I made with the sequence number and
send it out again.

Regards,
Christian.

Am 14.08.2018 um 22:17 schrieb Felix Kuehling:

[+Harish]

I think this looks good for the most part. See one comment inline
below.

But bear with me while I'm trying to understand what was wrong with
the
old code. Please correct me if I get this wrong or point out anything
I'm missing.

The reservation_object_list looks to be protected by a combination of
three mechanism:

     * Holding the reservation object
     * RCU
     * seqcount

Updating the fence list requires holding the reservation object. But
there are some readers that can be called without holding that lock
(reservation_object_copy_fences, reservation_object_get_fences_rcu,
reservation_object_wait_timeout_rcu,
reservation_object_test_signaled_rcu). They rely on RCU to work on a
copy and seqcount to make sure they had the most up-to-date
information.
So any function updating the fence lists needs to do RCU and seqcount
correctly to prevent breaking those readers.

As I understand it, RCU with seqcount retry just means that readers
will
spin retrying while there are writers, and writers are never
blocked by
readers. Writers are blocked by each other, because they need to hold
the reservation.

The code you added looks a lot like
reservation_object_add_shared_replace, which removes fences that have
signalled, and atomically replaces obj->fences with a new
reservation_fence_list. That atomicity is important 

Re: [PATCH] drm/amdgpu: fix amdgpu_amdkfd_remove_eviction_fence

2018-08-15 Thread Felix Kuehling

On 2018-08-15 02:27 PM, Christian König wrote:
> Am 15.08.2018 um 20:17 schrieb Felix Kuehling:
>> On 2018-08-15 03:02 AM, Christian König wrote:
>>> Hi Felix,
>>>
>>> yeah, you pretty much nailed it.
>>>
>>> The problem is that the array itself is RCU protected. This means that
>>> you can only copy the whole structure when you want to update it.
>>>
>>> The exception is reservation_object_add_shared() which only works
>>> because we replace an either signaled fence or a fence within the same
>>> context but a later sequence number.
>>>
>>> This also explains why this is only a band aid and the whole approach
>>> of removing fences doesn't work in general. At any time somebody could
>>> have taken an RCU reference to the old array, created a copy of it and
>>> is now still using this one.
>>>
>>> The only 100% correct solution would be to mark the existing fence as
>>> signaled and replace it everywhere else.
>> Depends what you're trying to achieve. I think the problem you see is,
>> that some reader may still be using the old reservation_object_list copy
>> with the fence still in it. But, the remaining lifetime of the
>> reservation_object_list copy is limited. If we wanted to be sure no more
>> copies with the old fence exist, all we'd need to do is call
>> synchronize_rcu. Maybe we need to do that before releasing the fence
>> references, or release the fence reference in an RCU callback to be
>> safe.
>
> The assumption that the fence would die with the array is what is
> incorrect here.

I'm making no such assumption. The point is not to destroy the fence.
Only to remove the fence reference from the reservation object. That is,
we want any BO with this reservation object to stop checking or waiting
for our eviction fence.

>
> The lifetime of RCUed array object is limit, but there is absolutely
> no guarantee that somebody didn't made a copy of the fences.
>
> E.g. somebody could have called reservation_object_get_fences_rcu(),
> reservation_object_copy_fences() or a concurrent
> reservation_object_wait_timeout_rcu() is underway.

That's fine. Then there will be additional fence references. When we
remove a fence from a reservation object, we don't know and don't care
who else is holding a reference to the fence anyway. This is no different.

Regards,
  Felix

>
> That's also the reason why fences live for much longer than their
> signaling, e.g. somebody can have a reference to the fence object even
> hours after it is signaled.
>
> Regards,
> Christian.
>
>>
>> Regards,
>>    Felix
>>
>>> Going to fix the copy error I made with the sequence number and
>>> send it out again.
>>>
>>> Regards,
>>> Christian.
>>>
>>> Am 14.08.2018 um 22:17 schrieb Felix Kuehling:
 [+Harish]

 I think this looks good for the most part. See one comment inline
 below.

 But bear with me while I'm trying to understand what was wrong with
 the
 old code. Please correct me if I get this wrong or point out anything
 I'm missing.

 The reservation_object_list looks to be protected by a combination of
 three mechanism:

     * Holding the reservation object
     * RCU
     * seqcount

 Updating the fence list requires holding the reservation object. But
 there are some readers that can be called without holding that lock
 (reservation_object_copy_fences, reservation_object_get_fences_rcu,
 reservation_object_wait_timeout_rcu,
 reservation_object_test_signaled_rcu). They rely on RCU to work on a
 copy and seqcount to make sure they had the most up-to-date
 information.
 So any function updating the fence lists needs to do RCU and seqcount
 correctly to prevent breaking those readers.

 As I understand it, RCU with seqcount retry just means that readers
 will
 spin retrying while there are writers, and writers are never
 blocked by
 readers. Writers are blocked by each other, because they need to hold
 the reservation.

 The code you added looks a lot like
 reservation_object_add_shared_replace, which removes fences that have
 signalled, and atomically replaces obj->fences with a new
 reservation_fence_list. That atomicity is important because each
 pointer
 in the obj->fences->shared array is separately protected by RCU,
 but not
 the array as a whole or the shared_count.

 See one comment inline.

 Regards,
     Felix

 On 2018-08-14 03:39 AM, Christian König wrote:
> Fix quite a number of bugs here. Unfortunately only compile tested.
>
> Signed-off-by: Christian König 
> ---
>    drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 103
> ++-
>    1 file changed, 46 insertions(+), 57 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
> index fa38a960ce00..dece31516dc4 100644
> --- 

Re: [PATCH] drm/amdgpu: fix amdgpu_amdkfd_remove_eviction_fence

2018-08-15 Thread Christian König

Am 15.08.2018 um 20:17 schrieb Felix Kuehling:

On 2018-08-15 03:02 AM, Christian König wrote:

Hi Felix,

yeah, you pretty much nailed it.

The problem is that the array itself is RCU protected. This means that
you can only copy the whole structure when you want to update it.

The exception is reservation_object_add_shared() which only works
because we replace an either signaled fence or a fence within the same
context but a later sequence number.

This also explains why this is only a band aid and the whole approach
of removing fences doesn't work in general. At any time somebody could
have taken an RCU reference to the old array, created a copy of it and
is now still using this one.

The only 100% correct solution would be to mark the existing fence as
signaled and replace it everywhere else.

Depends what you're trying to achieve. I think the problem you see is,
that some reader may still be using the old reservation_object_list copy
with the fence still in it. But, the remaining lifetime of the
reservation_object_list copy is limited. If we wanted to be sure no more
copies with the old fence exist, all we'd need to do is call
synchronize_rcu. Maybe we need to do that before releasing the fence
references, or release the fence reference in an RCU callback to be safe.


The assumption that the fence would die with the array is what is 
incorrect here.


The lifetime of RCUed array object is limit, but there is absolutely no 
guarantee that somebody didn't made a copy of the fences.


E.g. somebody could have called reservation_object_get_fences_rcu(), 
reservation_object_copy_fences() or a concurrent 
reservation_object_wait_timeout_rcu() is underway.


That's also the reason why fences live for much longer than their 
signaling, e.g. somebody can have a reference to the fence object even 
hours after it is signaled.


Regards,
Christian.



Regards,
   Felix


Going to fix the copy error I made with the sequence number and
send it out again.

Regards,
Christian.

Am 14.08.2018 um 22:17 schrieb Felix Kuehling:

[+Harish]

I think this looks good for the most part. See one comment inline below.

But bear with me while I'm trying to understand what was wrong with the
old code. Please correct me if I get this wrong or point out anything
I'm missing.

The reservation_object_list looks to be protected by a combination of
three mechanism:

    * Holding the reservation object
    * RCU
    * seqcount

Updating the fence list requires holding the reservation object. But
there are some readers that can be called without holding that lock
(reservation_object_copy_fences, reservation_object_get_fences_rcu,
reservation_object_wait_timeout_rcu,
reservation_object_test_signaled_rcu). They rely on RCU to work on a
copy and seqcount to make sure they had the most up-to-date information.
So any function updating the fence lists needs to do RCU and seqcount
correctly to prevent breaking those readers.

As I understand it, RCU with seqcount retry just means that readers will
spin retrying while there are writers, and writers are never blocked by
readers. Writers are blocked by each other, because they need to hold
the reservation.

The code you added looks a lot like
reservation_object_add_shared_replace, which removes fences that have
signalled, and atomically replaces obj->fences with a new
reservation_fence_list. That atomicity is important because each pointer
in the obj->fences->shared array is separately protected by RCU, but not
the array as a whole or the shared_count.

See one comment inline.

Regards,
    Felix

On 2018-08-14 03:39 AM, Christian König wrote:

Fix quite a number of bugs here. Unfortunately only compile tested.

Signed-off-by: Christian König 
---
   drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 103
++-
   1 file changed, 46 insertions(+), 57 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index fa38a960ce00..dece31516dc4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -206,11 +206,9 @@ static int
amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
   struct amdgpu_amdkfd_fence ***ef_list,
   unsigned int *ef_count)
   {
-    struct reservation_object_list *fobj;
-    struct reservation_object *resv;
-    unsigned int i = 0, j = 0, k = 0, shared_count;
-    unsigned int count = 0;
-    struct amdgpu_amdkfd_fence **fence_list;
+    struct reservation_object *resv = bo->tbo.resv;
+    struct reservation_object_list *old, *new;
+    unsigned int i, j, k;
     if (!ef && !ef_list)
   return -EINVAL;
@@ -220,76 +218,67 @@ static int
amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
   *ef_count = 0;
   }
   -    resv = bo->tbo.resv;
-    fobj = reservation_object_get_list(resv);
-
-    if (!fobj)
+    old = reservation_object_get_list(resv);
+    if 

Re: [PATCH] drm/amdgpu: fix amdgpu_amdkfd_remove_eviction_fence

2018-08-15 Thread Felix Kuehling
On 2018-08-15 03:02 AM, Christian König wrote:
> Hi Felix,
>
> yeah, you pretty much nailed it.
>
> The problem is that the array itself is RCU protected. This means that
> you can only copy the whole structure when you want to update it.
>
> The exception is reservation_object_add_shared() which only works
> because we replace an either signaled fence or a fence within the same
> context but a later sequence number.
>
> This also explains why this is only a band aid and the whole approach
> of removing fences doesn't work in general. At any time somebody could
> have taken an RCU reference to the old array, created a copy of it and
> is now still using this one.
>
> The only 100% correct solution would be to mark the existing fence as
> signaled and replace it everywhere else.

Depends what you're trying to achieve. I think the problem you see is,
that some reader may still be using the old reservation_object_list copy
with the fence still in it. But, the remaining lifetime of the
reservation_object_list copy is limited. If we wanted to be sure no more
copies with the old fence exist, all we'd need to do is call
synchronize_rcu. Maybe we need to do that before releasing the fence
references, or release the fence reference in an RCU callback to be safe.

Regards,
  Felix

>
> Going to fix the copy error I made with the sequence number and
> send it out again.
>
> Regards,
> Christian.
>
> Am 14.08.2018 um 22:17 schrieb Felix Kuehling:
>> [+Harish]
>>
>> I think this looks good for the most part. See one comment inline below.
>>
>> But bear with me while I'm trying to understand what was wrong with the
>> old code. Please correct me if I get this wrong or point out anything
>> I'm missing.
>>
>> The reservation_object_list looks to be protected by a combination of
>> three mechanism:
>>
>>    * Holding the reservation object
>>    * RCU
>>    * seqcount
>>
>> Updating the fence list requires holding the reservation object. But
>> there are some readers that can be called without holding that lock
>> (reservation_object_copy_fences, reservation_object_get_fences_rcu,
>> reservation_object_wait_timeout_rcu,
>> reservation_object_test_signaled_rcu). They rely on RCU to work on a
>> copy and seqcount to make sure they had the most up-to-date information.
>> So any function updating the fence lists needs to do RCU and seqcount
>> correctly to prevent breaking those readers.
>>
>> As I understand it, RCU with seqcount retry just means that readers will
>> spin retrying while there are writers, and writers are never blocked by
>> readers. Writers are blocked by each other, because they need to hold
>> the reservation.
>>
>> The code you added looks a lot like
>> reservation_object_add_shared_replace, which removes fences that have
>> signalled, and atomically replaces obj->fences with a new
>> reservation_fence_list. That atomicity is important because each pointer
>> in the obj->fences->shared array is separately protected by RCU, but not
>> the array as a whole or the shared_count.
>>
>> See one comment inline.
>>
>> Regards,
>>    Felix
>>
>> On 2018-08-14 03:39 AM, Christian König wrote:
>>> Fix quite a number of bugs here. Unfortunately only compile tested.
>>>
>>> Signed-off-by: Christian König 
>>> ---
>>>   drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 103
>>> ++-
>>>   1 file changed, 46 insertions(+), 57 deletions(-)
>>>
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
>>> index fa38a960ce00..dece31516dc4 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
>>> @@ -206,11 +206,9 @@ static int
>>> amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
>>>   struct amdgpu_amdkfd_fence ***ef_list,
>>>   unsigned int *ef_count)
>>>   {
>>> -    struct reservation_object_list *fobj;
>>> -    struct reservation_object *resv;
>>> -    unsigned int i = 0, j = 0, k = 0, shared_count;
>>> -    unsigned int count = 0;
>>> -    struct amdgpu_amdkfd_fence **fence_list;
>>> +    struct reservation_object *resv = bo->tbo.resv;
>>> +    struct reservation_object_list *old, *new;
>>> +    unsigned int i, j, k;
>>>     if (!ef && !ef_list)
>>>   return -EINVAL;
>>> @@ -220,76 +218,67 @@ static int
>>> amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
>>>   *ef_count = 0;
>>>   }
>>>   -    resv = bo->tbo.resv;
>>> -    fobj = reservation_object_get_list(resv);
>>> -
>>> -    if (!fobj)
>>> +    old = reservation_object_get_list(resv);
>>> +    if (!old)
>>>   return 0;
>>>   -    preempt_disable();
>>> -    write_seqcount_begin(>seq);
>>> +    new = kmalloc(offsetof(typeof(*new), shared[old->shared_max]),
>>> +  GFP_KERNEL);
>>> +    if (!new)
>>> +    return -ENOMEM;
>>>   -    /* Go through all the shared fences in the resevation object. If
>>> - * ef 

Re: [PATCH] drm/amdgpu: fix amdgpu_amdkfd_remove_eviction_fence v2

2018-08-15 Thread Felix Kuehling
I applied your change to my local KFD staging branch and it through a
presubmission build/test (sorry, only accessible from inside AMD):
http://git.amd.com:8080/#/c/167906/

It passed, but checkpatch pointed out one issue:
http://git.amd.com:8080/#/c/167906/1/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c@247
(see inline)

With that fixed, this change is Reviewed-by: Felix Kuehling


Regards,
  Felix


On 2018-08-15 03:46 AM, Christian König wrote:
> Fix quite a number of bugs here. Unfortunately only compile tested.
>
> v2: fix copy error
>
> Signed-off-by: Christian König 
> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 103 
> ++-
>  1 file changed, 46 insertions(+), 57 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
> index fa38a960ce00..887663ede781 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
> @@ -206,11 +206,9 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct 
> amdgpu_bo *bo,
>   struct amdgpu_amdkfd_fence ***ef_list,
>   unsigned int *ef_count)
>  {
> - struct reservation_object_list *fobj;
> - struct reservation_object *resv;
> - unsigned int i = 0, j = 0, k = 0, shared_count;
> - unsigned int count = 0;
> - struct amdgpu_amdkfd_fence **fence_list;
> + struct reservation_object *resv = bo->tbo.resv;
> + struct reservation_object_list *old, *new;
> + unsigned int i, j, k;
>  
>   if (!ef && !ef_list)
>   return -EINVAL;
> @@ -220,76 +218,67 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct 
> amdgpu_bo *bo,
>   *ef_count = 0;
>   }
>  
> - resv = bo->tbo.resv;
> - fobj = reservation_object_get_list(resv);
> -
> - if (!fobj)
> + old = reservation_object_get_list(resv);
> + if (!old)
>   return 0;
>  
> - preempt_disable();
> - write_seqcount_begin(>seq);
> + new = kmalloc(offsetof(typeof(*new), shared[old->shared_max]),
> +   GFP_KERNEL);
> + if (!new)
> + return -ENOMEM;
>  
> - /* Go through all the shared fences in the resevation object. If
> -  * ef is specified and it exists in the list, remove it and reduce the
> -  * count. If ef is not specified, then get the count of eviction fences
> -  * present.
> + /* Go through all the shared fences in the resevation object and sort
> +  * the interesting ones to the end of the list.
>*/
> - shared_count = fobj->shared_count;
> - for (i = 0; i < shared_count; ++i) {
> + for (i = 0, j = old->shared_count, k = 0; i < old->shared_count; ++i) {
>   struct dma_fence *f;
>  
> - f = rcu_dereference_protected(fobj->shared[i],
> + f = rcu_dereference_protected(old->shared[i],
> reservation_object_held(resv));
>  
> - if (ef) {
> - if (f->context == ef->base.context) {
> - dma_fence_put(f);
> - fobj->shared_count--;
> - } else {
> - RCU_INIT_POINTER(fobj->shared[j++], f);
> - }
> - } else if (to_amdgpu_amdkfd_fence(f))
> - count++;
> + if ((ef && f->context == ef->base.context) ||
> + (!ef && to_amdgpu_amdkfd_fence(f)))
> + RCU_INIT_POINTER(new->shared[--j], f);
> + else
> + RCU_INIT_POINTER(new->shared[k++], f);
>   }
> - write_seqcount_end(>seq);
> - preempt_enable();
> -
> - if (ef || !count)
> - return 0;
> -
> - /* Alloc memory for count number of eviction fence pointers. Fill the
> -  * ef_list array and ef_count
> -  */
> - fence_list = kcalloc(count, sizeof(struct amdgpu_amdkfd_fence *),
> -  GFP_KERNEL);
> - if (!fence_list)
> - return -ENOMEM;
> + new->shared_max = old->shared_max;
> + new->shared_count = k;
>  
> - preempt_disable();
> - write_seqcount_begin(>seq);
> + if (!ef) {
> + unsigned int count = old->shared_count - j;
>  
> - j = 0;
> - for (i = 0; i < shared_count; ++i) {
> - struct dma_fence *f;
> - struct amdgpu_amdkfd_fence *efence;
> -
> - f = rcu_dereference_protected(fobj->shared[i],
> - reservation_object_held(resv));
> + /* Alloc memory for count number of eviction fence pointers. 
> Fill the

checkpatch.pl reports: WARNING: line over 80 characters

> +  * ef_list array and ef_count
> +  */
> + *ef_list = kcalloc(count, sizeof(**ef_list), GFP_KERNEL);
> + *ef_count = count;
>  
> - 

[PATCH] drm/amdgpu: fix amdgpu_amdkfd_remove_eviction_fence v2

2018-08-15 Thread Christian König
Fix quite a number of bugs here. Unfortunately only compile tested.

v2: fix copy error

Signed-off-by: Christian König 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 103 ++-
 1 file changed, 46 insertions(+), 57 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index fa38a960ce00..887663ede781 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -206,11 +206,9 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct 
amdgpu_bo *bo,
struct amdgpu_amdkfd_fence ***ef_list,
unsigned int *ef_count)
 {
-   struct reservation_object_list *fobj;
-   struct reservation_object *resv;
-   unsigned int i = 0, j = 0, k = 0, shared_count;
-   unsigned int count = 0;
-   struct amdgpu_amdkfd_fence **fence_list;
+   struct reservation_object *resv = bo->tbo.resv;
+   struct reservation_object_list *old, *new;
+   unsigned int i, j, k;
 
if (!ef && !ef_list)
return -EINVAL;
@@ -220,76 +218,67 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct 
amdgpu_bo *bo,
*ef_count = 0;
}
 
-   resv = bo->tbo.resv;
-   fobj = reservation_object_get_list(resv);
-
-   if (!fobj)
+   old = reservation_object_get_list(resv);
+   if (!old)
return 0;
 
-   preempt_disable();
-   write_seqcount_begin(>seq);
+   new = kmalloc(offsetof(typeof(*new), shared[old->shared_max]),
+ GFP_KERNEL);
+   if (!new)
+   return -ENOMEM;
 
-   /* Go through all the shared fences in the resevation object. If
-* ef is specified and it exists in the list, remove it and reduce the
-* count. If ef is not specified, then get the count of eviction fences
-* present.
+   /* Go through all the shared fences in the resevation object and sort
+* the interesting ones to the end of the list.
 */
-   shared_count = fobj->shared_count;
-   for (i = 0; i < shared_count; ++i) {
+   for (i = 0, j = old->shared_count, k = 0; i < old->shared_count; ++i) {
struct dma_fence *f;
 
-   f = rcu_dereference_protected(fobj->shared[i],
+   f = rcu_dereference_protected(old->shared[i],
  reservation_object_held(resv));
 
-   if (ef) {
-   if (f->context == ef->base.context) {
-   dma_fence_put(f);
-   fobj->shared_count--;
-   } else {
-   RCU_INIT_POINTER(fobj->shared[j++], f);
-   }
-   } else if (to_amdgpu_amdkfd_fence(f))
-   count++;
+   if ((ef && f->context == ef->base.context) ||
+   (!ef && to_amdgpu_amdkfd_fence(f)))
+   RCU_INIT_POINTER(new->shared[--j], f);
+   else
+   RCU_INIT_POINTER(new->shared[k++], f);
}
-   write_seqcount_end(>seq);
-   preempt_enable();
-
-   if (ef || !count)
-   return 0;
-
-   /* Alloc memory for count number of eviction fence pointers. Fill the
-* ef_list array and ef_count
-*/
-   fence_list = kcalloc(count, sizeof(struct amdgpu_amdkfd_fence *),
-GFP_KERNEL);
-   if (!fence_list)
-   return -ENOMEM;
+   new->shared_max = old->shared_max;
+   new->shared_count = k;
 
-   preempt_disable();
-   write_seqcount_begin(>seq);
+   if (!ef) {
+   unsigned int count = old->shared_count - j;
 
-   j = 0;
-   for (i = 0; i < shared_count; ++i) {
-   struct dma_fence *f;
-   struct amdgpu_amdkfd_fence *efence;
-
-   f = rcu_dereference_protected(fobj->shared[i],
-   reservation_object_held(resv));
+   /* Alloc memory for count number of eviction fence pointers. 
Fill the
+* ef_list array and ef_count
+*/
+   *ef_list = kcalloc(count, sizeof(**ef_list), GFP_KERNEL);
+   *ef_count = count;
 
-   efence = to_amdgpu_amdkfd_fence(f);
-   if (efence) {
-   fence_list[k++] = efence;
-   fobj->shared_count--;
-   } else {
-   RCU_INIT_POINTER(fobj->shared[j++], f);
+   if (!*ef_list) {
+   kfree(new);
+   return -ENOMEM;
}
}
 
+   /* Install the new fence list, seqcount provides the barriers */
+   preempt_disable();
+   write_seqcount_begin(>seq);
+   RCU_INIT_POINTER(resv->fence, new);

Re: [PATCH] drm/amdgpu: fix amdgpu_amdkfd_remove_eviction_fence

2018-08-15 Thread Christian König

Hi Felix,

yeah, you pretty much nailed it.

The problem is that the array itself is RCU protected. This means that 
you can only copy the whole structure when you want to update it.


The exception is reservation_object_add_shared() which only works 
because we replace an either signaled fence or a fence within the same 
context but a later sequence number.


This also explains why this is only a band aid and the whole approach of 
removing fences doesn't work in general. At any time somebody could have 
taken an RCU reference to the old array, created a copy of it and is now 
still using this one.


The only 100% correct solution would be to mark the existing fence as 
signaled and replace it everywhere else.


Going to fix the copy error I made with the sequence number and 
send it out again.


Regards,
Christian.

Am 14.08.2018 um 22:17 schrieb Felix Kuehling:

[+Harish]

I think this looks good for the most part. See one comment inline below.

But bear with me while I'm trying to understand what was wrong with the
old code. Please correct me if I get this wrong or point out anything
I'm missing.

The reservation_object_list looks to be protected by a combination of
three mechanism:

   * Holding the reservation object
   * RCU
   * seqcount

Updating the fence list requires holding the reservation object. But
there are some readers that can be called without holding that lock
(reservation_object_copy_fences, reservation_object_get_fences_rcu,
reservation_object_wait_timeout_rcu,
reservation_object_test_signaled_rcu). They rely on RCU to work on a
copy and seqcount to make sure they had the most up-to-date information.
So any function updating the fence lists needs to do RCU and seqcount
correctly to prevent breaking those readers.

As I understand it, RCU with seqcount retry just means that readers will
spin retrying while there are writers, and writers are never blocked by
readers. Writers are blocked by each other, because they need to hold
the reservation.

The code you added looks a lot like
reservation_object_add_shared_replace, which removes fences that have
signalled, and atomically replaces obj->fences with a new
reservation_fence_list. That atomicity is important because each pointer
in the obj->fences->shared array is separately protected by RCU, but not
the array as a whole or the shared_count.

See one comment inline.

Regards,
   Felix

On 2018-08-14 03:39 AM, Christian König wrote:

Fix quite a number of bugs here. Unfortunately only compile tested.

Signed-off-by: Christian König 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 103 ++-
  1 file changed, 46 insertions(+), 57 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index fa38a960ce00..dece31516dc4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -206,11 +206,9 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct 
amdgpu_bo *bo,
struct amdgpu_amdkfd_fence ***ef_list,
unsigned int *ef_count)
  {
-   struct reservation_object_list *fobj;
-   struct reservation_object *resv;
-   unsigned int i = 0, j = 0, k = 0, shared_count;
-   unsigned int count = 0;
-   struct amdgpu_amdkfd_fence **fence_list;
+   struct reservation_object *resv = bo->tbo.resv;
+   struct reservation_object_list *old, *new;
+   unsigned int i, j, k;
  
  	if (!ef && !ef_list)

return -EINVAL;
@@ -220,76 +218,67 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct 
amdgpu_bo *bo,
*ef_count = 0;
}
  
-	resv = bo->tbo.resv;

-   fobj = reservation_object_get_list(resv);
-
-   if (!fobj)
+   old = reservation_object_get_list(resv);
+   if (!old)
return 0;
  
-	preempt_disable();

-   write_seqcount_begin(>seq);
+   new = kmalloc(offsetof(typeof(*new), shared[old->shared_max]),
+ GFP_KERNEL);
+   if (!new)
+   return -ENOMEM;
  
-	/* Go through all the shared fences in the resevation object. If

-* ef is specified and it exists in the list, remove it and reduce the
-* count. If ef is not specified, then get the count of eviction fences
-* present.
+   /* Go through all the shared fences in the resevation object and sort
+* the interesting ones to the end of the list.
 */
-   shared_count = fobj->shared_count;
-   for (i = 0; i < shared_count; ++i) {
+   for (i = 0, j = old->shared_count, k = 0; i < old->shared_count; ++i) {
struct dma_fence *f;
  
-		f = rcu_dereference_protected(fobj->shared[i],

+   f = rcu_dereference_protected(old->shared[i],
  reservation_object_held(resv));
  
-		if (ef) {

-   if 

Re: [PATCH] drm/amdgpu: fix amdgpu_amdkfd_remove_eviction_fence

2018-08-14 Thread Felix Kuehling
[+Harish]

I think this looks good for the most part. See one comment inline below.

But bear with me while I'm trying to understand what was wrong with the
old code. Please correct me if I get this wrong or point out anything
I'm missing.

The reservation_object_list looks to be protected by a combination of
three mechanism:

  * Holding the reservation object
  * RCU
  * seqcount

Updating the fence list requires holding the reservation object. But
there are some readers that can be called without holding that lock
(reservation_object_copy_fences, reservation_object_get_fences_rcu,
reservation_object_wait_timeout_rcu,
reservation_object_test_signaled_rcu). They rely on RCU to work on a
copy and seqcount to make sure they had the most up-to-date information.
So any function updating the fence lists needs to do RCU and seqcount
correctly to prevent breaking those readers.

As I understand it, RCU with seqcount retry just means that readers will
spin retrying while there are writers, and writers are never blocked by
readers. Writers are blocked by each other, because they need to hold
the reservation.

The code you added looks a lot like
reservation_object_add_shared_replace, which removes fences that have
signalled, and atomically replaces obj->fences with a new
reservation_fence_list. That atomicity is important because each pointer
in the obj->fences->shared array is separately protected by RCU, but not
the array as a whole or the shared_count.

See one comment inline.

Regards,
  Felix

On 2018-08-14 03:39 AM, Christian König wrote:
> Fix quite a number of bugs here. Unfortunately only compile tested.
>
> Signed-off-by: Christian König 
> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 103 
> ++-
>  1 file changed, 46 insertions(+), 57 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
> index fa38a960ce00..dece31516dc4 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
> @@ -206,11 +206,9 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct 
> amdgpu_bo *bo,
>   struct amdgpu_amdkfd_fence ***ef_list,
>   unsigned int *ef_count)
>  {
> - struct reservation_object_list *fobj;
> - struct reservation_object *resv;
> - unsigned int i = 0, j = 0, k = 0, shared_count;
> - unsigned int count = 0;
> - struct amdgpu_amdkfd_fence **fence_list;
> + struct reservation_object *resv = bo->tbo.resv;
> + struct reservation_object_list *old, *new;
> + unsigned int i, j, k;
>  
>   if (!ef && !ef_list)
>   return -EINVAL;
> @@ -220,76 +218,67 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct 
> amdgpu_bo *bo,
>   *ef_count = 0;
>   }
>  
> - resv = bo->tbo.resv;
> - fobj = reservation_object_get_list(resv);
> -
> - if (!fobj)
> + old = reservation_object_get_list(resv);
> + if (!old)
>   return 0;
>  
> - preempt_disable();
> - write_seqcount_begin(>seq);
> + new = kmalloc(offsetof(typeof(*new), shared[old->shared_max]),
> +   GFP_KERNEL);
> + if (!new)
> + return -ENOMEM;
>  
> - /* Go through all the shared fences in the resevation object. If
> -  * ef is specified and it exists in the list, remove it and reduce the
> -  * count. If ef is not specified, then get the count of eviction fences
> -  * present.
> + /* Go through all the shared fences in the resevation object and sort
> +  * the interesting ones to the end of the list.
>*/
> - shared_count = fobj->shared_count;
> - for (i = 0; i < shared_count; ++i) {
> + for (i = 0, j = old->shared_count, k = 0; i < old->shared_count; ++i) {
>   struct dma_fence *f;
>  
> - f = rcu_dereference_protected(fobj->shared[i],
> + f = rcu_dereference_protected(old->shared[i],
> reservation_object_held(resv));
>  
> - if (ef) {
> - if (f->context == ef->base.context) {
> - dma_fence_put(f);
> - fobj->shared_count--;
> - } else {
> - RCU_INIT_POINTER(fobj->shared[j++], f);
> - }
> - } else if (to_amdgpu_amdkfd_fence(f))
> - count++;
> + if ((ef && f->context == ef->base.context) ||
> + (!ef && to_amdgpu_amdkfd_fence(f)))
> + RCU_INIT_POINTER(new->shared[--j], f);
> + else
> + RCU_INIT_POINTER(new->shared[k++], f);
>   }
> - write_seqcount_end(>seq);
> - preempt_enable();
> + new->shared_max = old->shared_max;
> + new->shared_count = k;
>  
> - if (ef || !count)
> - 

[PATCH] drm/amdgpu: fix amdgpu_amdkfd_remove_eviction_fence

2018-08-14 Thread Christian König
Fix quite a number of bugs here. Unfortunately only compile tested.

Signed-off-by: Christian König 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 103 ++-
 1 file changed, 46 insertions(+), 57 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index fa38a960ce00..dece31516dc4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -206,11 +206,9 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct 
amdgpu_bo *bo,
struct amdgpu_amdkfd_fence ***ef_list,
unsigned int *ef_count)
 {
-   struct reservation_object_list *fobj;
-   struct reservation_object *resv;
-   unsigned int i = 0, j = 0, k = 0, shared_count;
-   unsigned int count = 0;
-   struct amdgpu_amdkfd_fence **fence_list;
+   struct reservation_object *resv = bo->tbo.resv;
+   struct reservation_object_list *old, *new;
+   unsigned int i, j, k;
 
if (!ef && !ef_list)
return -EINVAL;
@@ -220,76 +218,67 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct 
amdgpu_bo *bo,
*ef_count = 0;
}
 
-   resv = bo->tbo.resv;
-   fobj = reservation_object_get_list(resv);
-
-   if (!fobj)
+   old = reservation_object_get_list(resv);
+   if (!old)
return 0;
 
-   preempt_disable();
-   write_seqcount_begin(>seq);
+   new = kmalloc(offsetof(typeof(*new), shared[old->shared_max]),
+ GFP_KERNEL);
+   if (!new)
+   return -ENOMEM;
 
-   /* Go through all the shared fences in the resevation object. If
-* ef is specified and it exists in the list, remove it and reduce the
-* count. If ef is not specified, then get the count of eviction fences
-* present.
+   /* Go through all the shared fences in the resevation object and sort
+* the interesting ones to the end of the list.
 */
-   shared_count = fobj->shared_count;
-   for (i = 0; i < shared_count; ++i) {
+   for (i = 0, j = old->shared_count, k = 0; i < old->shared_count; ++i) {
struct dma_fence *f;
 
-   f = rcu_dereference_protected(fobj->shared[i],
+   f = rcu_dereference_protected(old->shared[i],
  reservation_object_held(resv));
 
-   if (ef) {
-   if (f->context == ef->base.context) {
-   dma_fence_put(f);
-   fobj->shared_count--;
-   } else {
-   RCU_INIT_POINTER(fobj->shared[j++], f);
-   }
-   } else if (to_amdgpu_amdkfd_fence(f))
-   count++;
+   if ((ef && f->context == ef->base.context) ||
+   (!ef && to_amdgpu_amdkfd_fence(f)))
+   RCU_INIT_POINTER(new->shared[--j], f);
+   else
+   RCU_INIT_POINTER(new->shared[k++], f);
}
-   write_seqcount_end(>seq);
-   preempt_enable();
+   new->shared_max = old->shared_max;
+   new->shared_count = k;
 
-   if (ef || !count)
-   return 0;
+   if (!ef) {
+   unsigned int count = old->shared_count - j;
 
-   /* Alloc memory for count number of eviction fence pointers. Fill the
-* ef_list array and ef_count
-*/
-   fence_list = kcalloc(count, sizeof(struct amdgpu_amdkfd_fence *),
-GFP_KERNEL);
-   if (!fence_list)
-   return -ENOMEM;
+   /* Alloc memory for count number of eviction fence pointers. 
Fill the
+* ef_list array and ef_count
+*/
+   *ef_list = kcalloc(count, sizeof(**ef_list), GFP_KERNEL);
+   *ef_count = count;
 
+   if (!*ef_list) {
+   kfree(new);
+   return -ENOMEM;
+   }
+   }
+
+   /* Install the new fence list, seqcount provides the barriers */
+   preempt_disable();
+   write_seqcount_begin(>seq);
+   RCU_INIT_POINTER(resv->fence, new);
preempt_disable();
write_seqcount_begin(>seq);
 
-   j = 0;
-   for (i = 0; i < shared_count; ++i) {
+   /* Drop the references to the removed fences or move them to ef_list */
+   for (i = j, k = 0; i < old->shared_count; ++i) {
struct dma_fence *f;
-   struct amdgpu_amdkfd_fence *efence;
 
-   f = rcu_dereference_protected(fobj->shared[i],
-   reservation_object_held(resv));
-
-   efence = to_amdgpu_amdkfd_fence(f);
-   if (efence) {
-   fence_list[k++] = efence;
-