If we fail to allocate a request, we can reap the outstanding requests
and push them to the request's slab's freelist before trying again. This
forces us to ratelimit malicious clients that tie up all of the system
resources in requests, instead of causing a system-wide oom.

Signed-off-by: Chris Wilson <[email protected]>
Cc: Joonas Lahtinen <[email protected]>
---
 drivers/gpu/drm/i915/i915_gem_request.c | 19 +++++++++++++++----
 1 file changed, 15 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_request.c 
b/drivers/gpu/drm/i915/i915_gem_request.c
index 4d5e2b714382..59f023bb7015 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -677,10 +677,21 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
         *
         * Do not use kmem_cache_zalloc() here!
         */
-       req = kmem_cache_alloc(dev_priv->requests, GFP_KERNEL);
-       if (!req) {
-               ret = -ENOMEM;
-               goto err_unreserve;
+       req = kmem_cache_alloc(dev_priv->requests,
+                              GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
+       if (unlikely(!req)) {
+               /* Ratelimit ourselves to prevent oom from malicious clients */
+               ret = i915_gem_wait_for_idle(dev_priv,
+                                            I915_WAIT_LOCKED |
+                                            I915_WAIT_INTERRUPTIBLE);
+               if (ret)
+                       goto err_unreserve;
+
+               req = kmem_cache_alloc(dev_priv->requests, GFP_KERNEL);
+               if (!req) {
+                       ret = -ENOMEM;
+                       goto err_unreserve;
+               }
        }
 
        req->timeline = i915_gem_context_lookup_timeline(ctx, engine);
-- 
2.15.1

_______________________________________________
Intel-gfx mailing list
[email protected]
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to