Re: [Freedreno] [PATCH] fixup! drm/msm: Separate locking of buffer resources from struct_mutex

2017-06-16 Thread Sushmita Susheelendra

Sounds good.

On 2017-06-16 15:44, Rob Clark wrote:

On Fri, Jun 16, 2017 at 5:32 PM,   wrote:

Hi Rob,

This looks good to me!

Just one nit: msm_gem_vunmap becomes very shrinker specific as it 
holds the

msm_obj->lock with the shrinker class. Should we have the caller i.e.
msm_gem_shrinker_vmap hold it instead? Or change it's name to
msm_gem_vunmap_shrinker or something like that...?


Hmm, I suppose I could pass the lock class in to msm_gem_vunmap() in
case we ever want to call it elsewhere (iirc, i915 did something like
that)

BR,
-R

It does make sense to make the madv/priv->inactive_list locking 
cleanup

separately.

Thanks,
Sushmita



On 2017-06-16 08:22, Rob Clark wrote:


---
Ok, 2nd fixup to handle vmap shrinking.

 drivers/gpu/drm/msm/msm_gem.c | 44
+++
 1 file changed, 36 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/msm/msm_gem.c 
b/drivers/gpu/drm/msm/msm_gem.c

index f5d1f84..3190e05 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -42,6 +42,9 @@ enum {
OBJ_LOCK_SHRINKER,
 };

+static void msm_gem_vunmap_locked(struct drm_gem_object *obj);
+
+
 static dma_addr_t physaddr(struct drm_gem_object *obj)
 {
struct msm_gem_object *msm_obj = to_msm_bo(obj);
@@ -484,6 +487,7 @@ int msm_gem_dumb_map_offset(struct drm_file 
*file,

struct drm_device *dev,
 void *msm_gem_get_vaddr(struct drm_gem_object *obj)
 {
struct msm_gem_object *msm_obj = to_msm_bo(obj);
+   int ret = 0;

mutex_lock(&msm_obj->lock);

@@ -492,22 +496,35 @@ void *msm_gem_get_vaddr(struct drm_gem_object 
*obj)

return ERR_PTR(-EBUSY);
}

+   /* increment vmap_count *before* vmap() call, so shrinker can
+* check vmap_count (is_vunmapable()) outside of 
msm_obj->lock.

+* This guarantees that we won't try to msm_gem_vunmap() this
+* same object from within the vmap() call (while we already
+* hold msm_obj->lock)
+*/
+   msm_obj->vmap_count++;
+
if (!msm_obj->vaddr) {
struct page **pages = get_pages(obj);
if (IS_ERR(pages)) {
-   mutex_unlock(&msm_obj->lock);
-   return ERR_CAST(pages);
+   ret = PTR_ERR(pages);
+   goto fail;
}
msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
VM_MAP, 
pgprot_writecombine(PAGE_KERNEL));

if (msm_obj->vaddr == NULL) {
-   mutex_unlock(&msm_obj->lock);
-   return ERR_PTR(-ENOMEM);
+   ret = -ENOMEM;
+   goto fail;
}
}
-   msm_obj->vmap_count++;
+
mutex_unlock(&msm_obj->lock);
return msm_obj->vaddr;
+
+fail:
+   msm_obj->vmap_count--;
+   mutex_unlock(&msm_obj->lock);
+   return ERR_PTR(ret);
 }

 void msm_gem_put_vaddr(struct drm_gem_object *obj)
@@ -554,7 +571,7 @@ void msm_gem_purge(struct drm_gem_object *obj)

put_iova(obj);

-   msm_gem_vunmap(obj);
+   msm_gem_vunmap_locked(obj);

put_pages(obj);

@@ -576,10 +593,12 @@ void msm_gem_purge(struct drm_gem_object *obj)
mutex_unlock(&msm_obj->lock);
 }

-void msm_gem_vunmap(struct drm_gem_object *obj)
+static void msm_gem_vunmap_locked(struct drm_gem_object *obj)
 {
struct msm_gem_object *msm_obj = to_msm_bo(obj);

+   WARN_ON(!mutex_is_locked(&msm_obj->lock));
+
if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
return;

@@ -587,6 +606,15 @@ void msm_gem_vunmap(struct drm_gem_object *obj)
msm_obj->vaddr = NULL;
 }

+void msm_gem_vunmap(struct drm_gem_object *obj)
+{
+   struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+   mutex_lock_nested(&msm_obj->lock, OBJ_LOCK_SHRINKER);
+   msm_gem_vunmap_locked(obj);
+   mutex_unlock(&msm_obj->lock);
+}
+
 /* must be called before _move_to_active().. */
 int msm_gem_sync_object(struct drm_gem_object *obj,
struct msm_fence_context *fctx, bool exclusive)
@@ -799,7 +827,7 @@ void msm_gem_free_object(struct drm_gem_object 
*obj)


drm_prime_gem_destroy(obj, msm_obj->sgt);
} else {
-   msm_gem_vunmap(obj);
+   msm_gem_vunmap_locked(obj);
put_pages(obj);
}

___
Freedreno mailing list
Freedreno@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/freedreno

___
Freedreno mailing list
Freedreno@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/freedreno


Re: [Freedreno] [PATCH] fixup! drm/msm: Separate locking of buffer resources from struct_mutex

2017-06-16 Thread Rob Clark
On Fri, Jun 16, 2017 at 5:32 PM,   wrote:
> Hi Rob,
>
> This looks good to me!
>
> Just one nit: msm_gem_vunmap becomes very shrinker specific as it holds the
> msm_obj->lock with the shrinker class. Should we have the caller i.e.
> msm_gem_shrinker_vmap hold it instead? Or change it's name to
> msm_gem_vunmap_shrinker or something like that...?

Hmm, I suppose I could pass the lock class in to msm_gem_vunmap() in
case we ever want to call it elsewhere (iirc, i915 did something like
that)

BR,
-R

> It does make sense to make the madv/priv->inactive_list locking cleanup
> separately.
>
> Thanks,
> Sushmita
>
>
>
> On 2017-06-16 08:22, Rob Clark wrote:
>>
>> ---
>> Ok, 2nd fixup to handle vmap shrinking.
>>
>>  drivers/gpu/drm/msm/msm_gem.c | 44
>> +++
>>  1 file changed, 36 insertions(+), 8 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
>> index f5d1f84..3190e05 100644
>> --- a/drivers/gpu/drm/msm/msm_gem.c
>> +++ b/drivers/gpu/drm/msm/msm_gem.c
>> @@ -42,6 +42,9 @@ enum {
>> OBJ_LOCK_SHRINKER,
>>  };
>>
>> +static void msm_gem_vunmap_locked(struct drm_gem_object *obj);
>> +
>> +
>>  static dma_addr_t physaddr(struct drm_gem_object *obj)
>>  {
>> struct msm_gem_object *msm_obj = to_msm_bo(obj);
>> @@ -484,6 +487,7 @@ int msm_gem_dumb_map_offset(struct drm_file *file,
>> struct drm_device *dev,
>>  void *msm_gem_get_vaddr(struct drm_gem_object *obj)
>>  {
>> struct msm_gem_object *msm_obj = to_msm_bo(obj);
>> +   int ret = 0;
>>
>> mutex_lock(&msm_obj->lock);
>>
>> @@ -492,22 +496,35 @@ void *msm_gem_get_vaddr(struct drm_gem_object *obj)
>> return ERR_PTR(-EBUSY);
>> }
>>
>> +   /* increment vmap_count *before* vmap() call, so shrinker can
>> +* check vmap_count (is_vunmapable()) outside of msm_obj->lock.
>> +* This guarantees that we won't try to msm_gem_vunmap() this
>> +* same object from within the vmap() call (while we already
>> +* hold msm_obj->lock)
>> +*/
>> +   msm_obj->vmap_count++;
>> +
>> if (!msm_obj->vaddr) {
>> struct page **pages = get_pages(obj);
>> if (IS_ERR(pages)) {
>> -   mutex_unlock(&msm_obj->lock);
>> -   return ERR_CAST(pages);
>> +   ret = PTR_ERR(pages);
>> +   goto fail;
>> }
>> msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
>> VM_MAP, pgprot_writecombine(PAGE_KERNEL));
>> if (msm_obj->vaddr == NULL) {
>> -   mutex_unlock(&msm_obj->lock);
>> -   return ERR_PTR(-ENOMEM);
>> +   ret = -ENOMEM;
>> +   goto fail;
>> }
>> }
>> -   msm_obj->vmap_count++;
>> +
>> mutex_unlock(&msm_obj->lock);
>> return msm_obj->vaddr;
>> +
>> +fail:
>> +   msm_obj->vmap_count--;
>> +   mutex_unlock(&msm_obj->lock);
>> +   return ERR_PTR(ret);
>>  }
>>
>>  void msm_gem_put_vaddr(struct drm_gem_object *obj)
>> @@ -554,7 +571,7 @@ void msm_gem_purge(struct drm_gem_object *obj)
>>
>> put_iova(obj);
>>
>> -   msm_gem_vunmap(obj);
>> +   msm_gem_vunmap_locked(obj);
>>
>> put_pages(obj);
>>
>> @@ -576,10 +593,12 @@ void msm_gem_purge(struct drm_gem_object *obj)
>> mutex_unlock(&msm_obj->lock);
>>  }
>>
>> -void msm_gem_vunmap(struct drm_gem_object *obj)
>> +static void msm_gem_vunmap_locked(struct drm_gem_object *obj)
>>  {
>> struct msm_gem_object *msm_obj = to_msm_bo(obj);
>>
>> +   WARN_ON(!mutex_is_locked(&msm_obj->lock));
>> +
>> if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
>> return;
>>
>> @@ -587,6 +606,15 @@ void msm_gem_vunmap(struct drm_gem_object *obj)
>> msm_obj->vaddr = NULL;
>>  }
>>
>> +void msm_gem_vunmap(struct drm_gem_object *obj)
>> +{
>> +   struct msm_gem_object *msm_obj = to_msm_bo(obj);
>> +
>> +   mutex_lock_nested(&msm_obj->lock, OBJ_LOCK_SHRINKER);
>> +   msm_gem_vunmap_locked(obj);
>> +   mutex_unlock(&msm_obj->lock);
>> +}
>> +
>>  /* must be called before _move_to_active().. */
>>  int msm_gem_sync_object(struct drm_gem_object *obj,
>> struct msm_fence_context *fctx, bool exclusive)
>> @@ -799,7 +827,7 @@ void msm_gem_free_object(struct drm_gem_object *obj)
>>
>> drm_prime_gem_destroy(obj, msm_obj->sgt);
>> } else {
>> -   msm_gem_vunmap(obj);
>> +   msm_gem_vunmap_locked(obj);
>> put_pages(obj);
>> }
___
Freedreno mailing list
Freedreno@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/freedreno


Re: [Freedreno] [PATCH] fixup! drm/msm: Separate locking of buffer resources from struct_mutex

2017-06-16 Thread ssusheel

Hi Rob,

This looks good to me!

Just one nit: msm_gem_vunmap becomes very shrinker specific as it holds 
the msm_obj->lock with the shrinker class. Should we have the caller 
i.e. msm_gem_shrinker_vmap hold it instead? Or change it's name to 
msm_gem_vunmap_shrinker or something like that...?


It does make sense to make the madv/priv->inactive_list locking cleanup 
separately.


Thanks,
Sushmita


On 2017-06-16 08:22, Rob Clark wrote:

---
Ok, 2nd fixup to handle vmap shrinking.

 drivers/gpu/drm/msm/msm_gem.c | 44 
+++

 1 file changed, 36 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/msm/msm_gem.c 
b/drivers/gpu/drm/msm/msm_gem.c

index f5d1f84..3190e05 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -42,6 +42,9 @@ enum {
OBJ_LOCK_SHRINKER,
 };

+static void msm_gem_vunmap_locked(struct drm_gem_object *obj);
+
+
 static dma_addr_t physaddr(struct drm_gem_object *obj)
 {
struct msm_gem_object *msm_obj = to_msm_bo(obj);
@@ -484,6 +487,7 @@ int msm_gem_dumb_map_offset(struct drm_file *file,
struct drm_device *dev,
 void *msm_gem_get_vaddr(struct drm_gem_object *obj)
 {
struct msm_gem_object *msm_obj = to_msm_bo(obj);
+   int ret = 0;

mutex_lock(&msm_obj->lock);

@@ -492,22 +496,35 @@ void *msm_gem_get_vaddr(struct drm_gem_object 
*obj)

return ERR_PTR(-EBUSY);
}

+   /* increment vmap_count *before* vmap() call, so shrinker can
+* check vmap_count (is_vunmapable()) outside of msm_obj->lock.
+* This guarantees that we won't try to msm_gem_vunmap() this
+* same object from within the vmap() call (while we already
+* hold msm_obj->lock)
+*/
+   msm_obj->vmap_count++;
+
if (!msm_obj->vaddr) {
struct page **pages = get_pages(obj);
if (IS_ERR(pages)) {
-   mutex_unlock(&msm_obj->lock);
-   return ERR_CAST(pages);
+   ret = PTR_ERR(pages);
+   goto fail;
}
msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
VM_MAP, pgprot_writecombine(PAGE_KERNEL));
if (msm_obj->vaddr == NULL) {
-   mutex_unlock(&msm_obj->lock);
-   return ERR_PTR(-ENOMEM);
+   ret = -ENOMEM;
+   goto fail;
}
}
-   msm_obj->vmap_count++;
+
mutex_unlock(&msm_obj->lock);
return msm_obj->vaddr;
+
+fail:
+   msm_obj->vmap_count--;
+   mutex_unlock(&msm_obj->lock);
+   return ERR_PTR(ret);
 }

 void msm_gem_put_vaddr(struct drm_gem_object *obj)
@@ -554,7 +571,7 @@ void msm_gem_purge(struct drm_gem_object *obj)

put_iova(obj);

-   msm_gem_vunmap(obj);
+   msm_gem_vunmap_locked(obj);

put_pages(obj);

@@ -576,10 +593,12 @@ void msm_gem_purge(struct drm_gem_object *obj)
mutex_unlock(&msm_obj->lock);
 }

-void msm_gem_vunmap(struct drm_gem_object *obj)
+static void msm_gem_vunmap_locked(struct drm_gem_object *obj)
 {
struct msm_gem_object *msm_obj = to_msm_bo(obj);

+   WARN_ON(!mutex_is_locked(&msm_obj->lock));
+
if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
return;

@@ -587,6 +606,15 @@ void msm_gem_vunmap(struct drm_gem_object *obj)
msm_obj->vaddr = NULL;
 }

+void msm_gem_vunmap(struct drm_gem_object *obj)
+{
+   struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+   mutex_lock_nested(&msm_obj->lock, OBJ_LOCK_SHRINKER);
+   msm_gem_vunmap_locked(obj);
+   mutex_unlock(&msm_obj->lock);
+}
+
 /* must be called before _move_to_active().. */
 int msm_gem_sync_object(struct drm_gem_object *obj,
struct msm_fence_context *fctx, bool exclusive)
@@ -799,7 +827,7 @@ void msm_gem_free_object(struct drm_gem_object 
*obj)


drm_prime_gem_destroy(obj, msm_obj->sgt);
} else {
-   msm_gem_vunmap(obj);
+   msm_gem_vunmap_locked(obj);
put_pages(obj);
}

___
Freedreno mailing list
Freedreno@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/freedreno


[Freedreno] [PATCH] fixup! drm/msm: Separate locking of buffer resources from struct_mutex

2017-06-16 Thread Rob Clark
---
Ok, 2nd fixup to handle vmap shrinking.

 drivers/gpu/drm/msm/msm_gem.c | 44 +++
 1 file changed, 36 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index f5d1f84..3190e05 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -42,6 +42,9 @@ enum {
OBJ_LOCK_SHRINKER,
 };
 
+static void msm_gem_vunmap_locked(struct drm_gem_object *obj);
+
+
 static dma_addr_t physaddr(struct drm_gem_object *obj)
 {
struct msm_gem_object *msm_obj = to_msm_bo(obj);
@@ -484,6 +487,7 @@ int msm_gem_dumb_map_offset(struct drm_file *file, struct 
drm_device *dev,
 void *msm_gem_get_vaddr(struct drm_gem_object *obj)
 {
struct msm_gem_object *msm_obj = to_msm_bo(obj);
+   int ret = 0;
 
mutex_lock(&msm_obj->lock);
 
@@ -492,22 +496,35 @@ void *msm_gem_get_vaddr(struct drm_gem_object *obj)
return ERR_PTR(-EBUSY);
}
 
+   /* increment vmap_count *before* vmap() call, so shrinker can
+* check vmap_count (is_vunmapable()) outside of msm_obj->lock.
+* This guarantees that we won't try to msm_gem_vunmap() this
+* same object from within the vmap() call (while we already
+* hold msm_obj->lock)
+*/
+   msm_obj->vmap_count++;
+
if (!msm_obj->vaddr) {
struct page **pages = get_pages(obj);
if (IS_ERR(pages)) {
-   mutex_unlock(&msm_obj->lock);
-   return ERR_CAST(pages);
+   ret = PTR_ERR(pages);
+   goto fail;
}
msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
VM_MAP, pgprot_writecombine(PAGE_KERNEL));
if (msm_obj->vaddr == NULL) {
-   mutex_unlock(&msm_obj->lock);
-   return ERR_PTR(-ENOMEM);
+   ret = -ENOMEM;
+   goto fail;
}
}
-   msm_obj->vmap_count++;
+
mutex_unlock(&msm_obj->lock);
return msm_obj->vaddr;
+
+fail:
+   msm_obj->vmap_count--;
+   mutex_unlock(&msm_obj->lock);
+   return ERR_PTR(ret);
 }
 
 void msm_gem_put_vaddr(struct drm_gem_object *obj)
@@ -554,7 +571,7 @@ void msm_gem_purge(struct drm_gem_object *obj)
 
put_iova(obj);
 
-   msm_gem_vunmap(obj);
+   msm_gem_vunmap_locked(obj);
 
put_pages(obj);
 
@@ -576,10 +593,12 @@ void msm_gem_purge(struct drm_gem_object *obj)
mutex_unlock(&msm_obj->lock);
 }
 
-void msm_gem_vunmap(struct drm_gem_object *obj)
+static void msm_gem_vunmap_locked(struct drm_gem_object *obj)
 {
struct msm_gem_object *msm_obj = to_msm_bo(obj);
 
+   WARN_ON(!mutex_is_locked(&msm_obj->lock));
+
if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
return;
 
@@ -587,6 +606,15 @@ void msm_gem_vunmap(struct drm_gem_object *obj)
msm_obj->vaddr = NULL;
 }
 
+void msm_gem_vunmap(struct drm_gem_object *obj)
+{
+   struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+   mutex_lock_nested(&msm_obj->lock, OBJ_LOCK_SHRINKER);
+   msm_gem_vunmap_locked(obj);
+   mutex_unlock(&msm_obj->lock);
+}
+
 /* must be called before _move_to_active().. */
 int msm_gem_sync_object(struct drm_gem_object *obj,
struct msm_fence_context *fctx, bool exclusive)
@@ -799,7 +827,7 @@ void msm_gem_free_object(struct drm_gem_object *obj)
 
drm_prime_gem_destroy(obj, msm_obj->sgt);
} else {
-   msm_gem_vunmap(obj);
+   msm_gem_vunmap_locked(obj);
put_pages(obj);
}
 
-- 
2.9.4

___
Freedreno mailing list
Freedreno@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/freedreno


Re: [Freedreno] [PATCH] fixup! drm/msm: Separate locking of buffer resources from struct_mutex

2017-06-15 Thread Rob Clark
On Thu, Jun 15, 2017 at 5:59 PM, Susheelendra, Sushmita
 wrote:
> Hi Rob,
>
> I can see how we can trigger the shrinker on objB while holding objA->lock.
> So, the nested lock with class SHRINKER makes sense.
> However, I’m trying to figure how the get_pages/vmap/fault path on an objA
> can end up triggering the shrinker on objA itself. As objA itself would not
> be purgeable (msm_obj->sgt would not be set)/vunmappable (msm_obj->vaddr
> would not be set) yet at that point, we would never end up calling
> msm_gem_purge or msm_gem_vunmap on objA itself right? If that is the case,
> we may not need the (msm_obj->madv == MSM_MADV_WILLNEED) check? Or am I
> missing something here?

get_pages() would set msm_obj->sgt.. I guess that is protected by
msm_obj->lock, so maybe it would be safe to drop the WILLNEED check.
Otoh, I think that does make things more clear to include the check
and a bit more future-proof.

We do check is_purgable() outside of msm_obj->lock.. I'd have to think
about it for more than a few seconds, but it seems like keeping the
WILLNEED check is a good idea.

> I think shrinker_vmap would also need the nested SHRINKER lock before it
> calls msm_gem_vunmap because a vmap on objA could trigger msm_gem_vunmap on
> objB.

hmm, right.. I guess I still need to test this w/ 32b build (where I
guess vmap shrinker is more likely), but I think you are right about
needing the nested lock in _vunmap() as well.

> I really like the idea of protecting priv->inactive_list with a separate
> lock as that is pretty much why the shrinker needs to hold struct_mutex.

yeah, rough idea is split out (probably?) spin-locks to protect the
list and madv.  Then I think we could drop struct_mutex lock for
shrinker.

I think this first patch is pretty close to being ready in time to
queue up for 4.13 (which I probably need to do this weekend).  We
should try and tackle the list+madv locks for 4.14, I think, since
this is already a pretty big change.

BR,
-R

> Thanks,
> Sushmita
>
> On Jun 15, 2017, at 7:20 AM, Rob Clark  wrote:
>
> ---
> This is roughly based on Chris's suggestion, in particular the part
> about using mutex_lock_nested().  It's not *exactly* the same, in
> particular msm_obj->lock protects a bit more than just backing store
> and we don't currently track a pin_count.  (Instead we currently
> keep pages pinned until the object is purged or freed.)
>
> Instead of making msm_obj->lock only cover backing store, it is
> easier to split out madv, which is still protected by struct_mutex,
> which is still held by the shrinker, so the shrinker does not need
> to grab msm_obj->lock until it purges an object.  We avoid going
> down any path that could trigger shrinker by ensuring that
> msm_obj->madv == WILLNEED.  To synchronize access to msm_obj->madv
> it is protected by msm_obj->lock inside struct_mutex.
>
> This seems to keep lockdep happy in my testing so far.
>
> drivers/gpu/drm/msm/msm_gem.c  | 54
> --
> drivers/gpu/drm/msm/msm_gem.h  |  1 +
> drivers/gpu/drm/msm/msm_gem_shrinker.c | 12 
> 3 files changed, 65 insertions(+), 2 deletions(-)
>
> diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
> index e132548..f5d1f84 100644
> --- a/drivers/gpu/drm/msm/msm_gem.c
> +++ b/drivers/gpu/drm/msm/msm_gem.c
> @@ -26,6 +26,22 @@
> #include "msm_gpu.h"
> #include "msm_mmu.h"
>
> +/* The shrinker can be triggered while we hold objA->lock, and need
> + * to grab objB->lock to purge it.  Lockdep just sees these as a single
> + * class of lock, so we use subclasses to teach it the difference.
> + *
> + * OBJ_LOCK_NORMAL is implicit (ie. normal mutex_lock() call), and
> + * OBJ_LOCK_SHRINKER is used in msm_gem_purge().
> + *
> + * It is *essential* that we never go down paths that could trigger the
> + * shrinker for a purgable object.  This is ensured by checking that
> + * msm_obj->madv == MSM_MADV_WILLNEED.
> + */
> +enum {
> + OBJ_LOCK_NORMAL,
> + OBJ_LOCK_SHRINKER,
> +};
> +
> static dma_addr_t physaddr(struct drm_gem_object *obj)
> {
> struct msm_gem_object *msm_obj = to_msm_bo(obj);
> @@ -150,6 +166,12 @@ struct page **msm_gem_get_pages(struct drm_gem_object
> *obj)
> struct page **p;
>
> mutex_lock(&msm_obj->lock);
> +
> + if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
> + mutex_unlock(&msm_obj->lock);
> + return ERR_PTR(-EBUSY);
> + }
> +
> p = get_pages(obj);
> mutex_unlock(&msm_obj->lock);
> return p;
> @@ -220,6 +242,11 @@ int msm_gem_fault(struct vm_fault *vmf)
> if (ret)
> goto out;
>
> + if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
> + mutex_unlock(&msm_obj->lock);
> + return VM_FAULT_SIGBUS;
> + }
> +
> /* make sure we have pages attached now */
> pages = get_pages(obj);
> if (IS_ERR(pages)) {
> @@ -358,6 +385,11 @@ int msm_gem_get_iova(struct drm_gem_object *obj,
>
> mutex_lock(&msm_obj->lock);
>
> + if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
> + mutex_unlock(&msm_obj->lock);
> + return -EBUSY;
> + }
> +
> vm

Re: [Freedreno] [PATCH] fixup! drm/msm: Separate locking of buffer resources from struct_mutex

2017-06-15 Thread Susheelendra, Sushmita
Hi Rob,

I can see how we can trigger the shrinker on objB while holding objA->lock. So, 
the nested lock with class SHRINKER makes sense.
However, I’m trying to figure how the get_pages/vmap/fault path on an objA can 
end up triggering the shrinker on objA itself. As objA itself would not be 
purgeable (msm_obj->sgt would not be set)/vunmappable (msm_obj->vaddr would not 
be set) yet at that point, we would never end up calling msm_gem_purge or 
msm_gem_vunmap on objA itself right? If that is the case, we may not need the 
(msm_obj->madv == MSM_MADV_WILLNEED) check? Or am I missing something here?

I think shrinker_vmap would also need the nested SHRINKER lock before it calls 
msm_gem_vunmap because a vmap on objA could trigger msm_gem_vunmap on objB.

I really like the idea of protecting priv->inactive_list with a separate lock 
as that is pretty much why the shrinker needs to hold struct_mutex.

Thanks,
Sushmita

> On Jun 15, 2017, at 7:20 AM, Rob Clark  wrote:
> 
> ---
> This is roughly based on Chris's suggestion, in particular the part
> about using mutex_lock_nested().  It's not *exactly* the same, in
> particular msm_obj->lock protects a bit more than just backing store
> and we don't currently track a pin_count.  (Instead we currently
> keep pages pinned until the object is purged or freed.)
> 
> Instead of making msm_obj->lock only cover backing store, it is
> easier to split out madv, which is still protected by struct_mutex,
> which is still held by the shrinker, so the shrinker does not need
> to grab msm_obj->lock until it purges an object.  We avoid going
> down any path that could trigger shrinker by ensuring that
> msm_obj->madv == WILLNEED.  To synchronize access to msm_obj->madv
> it is protected by msm_obj->lock inside struct_mutex.
> 
> This seems to keep lockdep happy in my testing so far.
> 
> drivers/gpu/drm/msm/msm_gem.c  | 54 --
> drivers/gpu/drm/msm/msm_gem.h  |  1 +
> drivers/gpu/drm/msm/msm_gem_shrinker.c | 12 
> 3 files changed, 65 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
> index e132548..f5d1f84 100644
> --- a/drivers/gpu/drm/msm/msm_gem.c
> +++ b/drivers/gpu/drm/msm/msm_gem.c
> @@ -26,6 +26,22 @@
> #include "msm_gpu.h"
> #include "msm_mmu.h"
> 
> +/* The shrinker can be triggered while we hold objA->lock, and need
> + * to grab objB->lock to purge it.  Lockdep just sees these as a single
> + * class of lock, so we use subclasses to teach it the difference.
> + *
> + * OBJ_LOCK_NORMAL is implicit (ie. normal mutex_lock() call), and
> + * OBJ_LOCK_SHRINKER is used in msm_gem_purge().
> + *
> + * It is *essential* that we never go down paths that could trigger the
> + * shrinker for a purgable object.  This is ensured by checking that
> + * msm_obj->madv == MSM_MADV_WILLNEED.
> + */
> +enum {
> + OBJ_LOCK_NORMAL,
> + OBJ_LOCK_SHRINKER,
> +};
> +
> static dma_addr_t physaddr(struct drm_gem_object *obj)
> {
>   struct msm_gem_object *msm_obj = to_msm_bo(obj);
> @@ -150,6 +166,12 @@ struct page **msm_gem_get_pages(struct drm_gem_object 
> *obj)
>   struct page **p;
> 
>   mutex_lock(&msm_obj->lock);
> +
> + if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
> + mutex_unlock(&msm_obj->lock);
> + return ERR_PTR(-EBUSY);
> + }
> +
>   p = get_pages(obj);
>   mutex_unlock(&msm_obj->lock);
>   return p;
> @@ -220,6 +242,11 @@ int msm_gem_fault(struct vm_fault *vmf)
>   if (ret)
>   goto out;
> 
> + if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
> + mutex_unlock(&msm_obj->lock);
> + return VM_FAULT_SIGBUS;
> + }
> +
>   /* make sure we have pages attached now */
>   pages = get_pages(obj);
>   if (IS_ERR(pages)) {
> @@ -358,6 +385,11 @@ int msm_gem_get_iova(struct drm_gem_object *obj,
> 
>   mutex_lock(&msm_obj->lock);
> 
> + if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
> + mutex_unlock(&msm_obj->lock);
> + return -EBUSY;
> + }
> +
>   vma = lookup_vma(obj, aspace);
> 
>   if (!vma) {
> @@ -454,6 +486,12 @@ void *msm_gem_get_vaddr(struct drm_gem_object *obj)
>   struct msm_gem_object *msm_obj = to_msm_bo(obj);
> 
>   mutex_lock(&msm_obj->lock);
> +
> + if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
> + mutex_unlock(&msm_obj->lock);
> + return ERR_PTR(-EBUSY);
> + }
> +
>   if (!msm_obj->vaddr) {
>   struct page **pages = get_pages(obj);
>   if (IS_ERR(pages)) {
> @@ -489,12 +527,18 @@ int msm_gem_madvise(struct drm_gem_object *obj, 
> unsigned madv)
> {
>   struct msm_gem_object *msm_obj = to_msm_bo(obj);
> 
> + mutex_lock(&msm_obj->lock);
> +
>   WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
> 
>   if (msm_obj->madv != __MSM_MADV_PURGED)
>   msm_obj->madv = madv;
> 
> - return (ms

[Freedreno] [PATCH] fixup! drm/msm: Separate locking of buffer resources from struct_mutex

2017-06-15 Thread Rob Clark
---
This is roughly based on Chris's suggestion, in particular the part
about using mutex_lock_nested().  It's not *exactly* the same, in
particular msm_obj->lock protects a bit more than just backing store
and we don't currently track a pin_count.  (Instead we currently
keep pages pinned until the object is purged or freed.)

Instead of making msm_obj->lock only cover backing store, it is
easier to split out madv, which is still protected by struct_mutex,
which is still held by the shrinker, so the shrinker does not need
to grab msm_obj->lock until it purges an object.  We avoid going
down any path that could trigger shrinker by ensuring that
msm_obj->madv == WILLNEED.  To synchronize access to msm_obj->madv
it is protected by msm_obj->lock inside struct_mutex.

This seems to keep lockdep happy in my testing so far.

 drivers/gpu/drm/msm/msm_gem.c  | 54 --
 drivers/gpu/drm/msm/msm_gem.h  |  1 +
 drivers/gpu/drm/msm/msm_gem_shrinker.c | 12 
 3 files changed, 65 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index e132548..f5d1f84 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -26,6 +26,22 @@
 #include "msm_gpu.h"
 #include "msm_mmu.h"
 
+/* The shrinker can be triggered while we hold objA->lock, and need
+ * to grab objB->lock to purge it.  Lockdep just sees these as a single
+ * class of lock, so we use subclasses to teach it the difference.
+ *
+ * OBJ_LOCK_NORMAL is implicit (ie. normal mutex_lock() call), and
+ * OBJ_LOCK_SHRINKER is used in msm_gem_purge().
+ *
+ * It is *essential* that we never go down paths that could trigger the
+ * shrinker for a purgable object.  This is ensured by checking that
+ * msm_obj->madv == MSM_MADV_WILLNEED.
+ */
+enum {
+   OBJ_LOCK_NORMAL,
+   OBJ_LOCK_SHRINKER,
+};
+
 static dma_addr_t physaddr(struct drm_gem_object *obj)
 {
struct msm_gem_object *msm_obj = to_msm_bo(obj);
@@ -150,6 +166,12 @@ struct page **msm_gem_get_pages(struct drm_gem_object *obj)
struct page **p;
 
mutex_lock(&msm_obj->lock);
+
+   if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
+   mutex_unlock(&msm_obj->lock);
+   return ERR_PTR(-EBUSY);
+   }
+
p = get_pages(obj);
mutex_unlock(&msm_obj->lock);
return p;
@@ -220,6 +242,11 @@ int msm_gem_fault(struct vm_fault *vmf)
if (ret)
goto out;
 
+   if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
+   mutex_unlock(&msm_obj->lock);
+   return VM_FAULT_SIGBUS;
+   }
+
/* make sure we have pages attached now */
pages = get_pages(obj);
if (IS_ERR(pages)) {
@@ -358,6 +385,11 @@ int msm_gem_get_iova(struct drm_gem_object *obj,
 
mutex_lock(&msm_obj->lock);
 
+   if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
+   mutex_unlock(&msm_obj->lock);
+   return -EBUSY;
+   }
+
vma = lookup_vma(obj, aspace);
 
if (!vma) {
@@ -454,6 +486,12 @@ void *msm_gem_get_vaddr(struct drm_gem_object *obj)
struct msm_gem_object *msm_obj = to_msm_bo(obj);
 
mutex_lock(&msm_obj->lock);
+
+   if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
+   mutex_unlock(&msm_obj->lock);
+   return ERR_PTR(-EBUSY);
+   }
+
if (!msm_obj->vaddr) {
struct page **pages = get_pages(obj);
if (IS_ERR(pages)) {
@@ -489,12 +527,18 @@ int msm_gem_madvise(struct drm_gem_object *obj, unsigned 
madv)
 {
struct msm_gem_object *msm_obj = to_msm_bo(obj);
 
+   mutex_lock(&msm_obj->lock);
+
WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
 
if (msm_obj->madv != __MSM_MADV_PURGED)
msm_obj->madv = madv;
 
-   return (msm_obj->madv != __MSM_MADV_PURGED);
+   madv = msm_obj->madv;
+
+   mutex_unlock(&msm_obj->lock);
+
+   return (madv != __MSM_MADV_PURGED);
 }
 
 void msm_gem_purge(struct drm_gem_object *obj)
@@ -506,6 +550,8 @@ void msm_gem_purge(struct drm_gem_object *obj)
WARN_ON(!is_purgeable(msm_obj));
WARN_ON(obj->import_attach);
 
+   mutex_lock_nested(&msm_obj->lock, OBJ_LOCK_SHRINKER);
+
put_iova(obj);
 
msm_gem_vunmap(obj);
@@ -526,6 +572,8 @@ void msm_gem_purge(struct drm_gem_object *obj)
 
invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
0, (loff_t)-1);
+
+   mutex_unlock(&msm_obj->lock);
 }
 
 void msm_gem_vunmap(struct drm_gem_object *obj)
@@ -660,7 +708,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct 
seq_file *m)
uint64_t off = drm_vma_node_start(&obj->vma_node);
const char *madv;
 
-   WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
+   mutex_lock(&msm_obj->lock);
 
switch (msm_obj->madv) {
case __MSM_MADV_PURGED:
@@ -701,6 +749,8 @@ void