URL: https://github.com/SSSD/sssd/pull/934
Author: alexey-tikhonov
 Title: #934: SBUS: defer deallocation of sbus_watch_ctx
Action: opened

PR body:
"""
The following flow was causing use-after-free error:
  tevent_common_invoke_fd_handler(RW) -> sbus_watch_handler(RW) ->
  dbus_watch_handle(R) -> ...libdbus detects connection is closed... ->
  sbus_watch_remove() -> talloc_free(watch_fd) ->
  ... get back to libdbus and back to sbus_watch_handler() ->
  "if (watch_fd->dbus_watch.write != NULL) dbus_watch_handle(W)"
  => use-after-free

To resolve an issue schedule deallocation of watch as immediate event.

Resolves: https://pagure.io/SSSD/sssd/issue/2660
"""

To pull the PR as Git branch:
git remote add ghsssd https://github.com/SSSD/sssd
git fetch ghsssd pull/934/head:pr934
git checkout pr934
From e325bdccd7e587906caeae07afb3edb220baf6dd Mon Sep 17 00:00:00 2001
From: Alexey Tikhonov <[email protected]>
Date: Mon, 11 Nov 2019 18:01:50 +0100
Subject: [PATCH] SBUS: defer deallocation of sbus_watch_ctx

The following flow was causing use-after-free error:
  tevent_common_invoke_fd_handler(RW) -> sbus_watch_handler(RW) ->
  dbus_watch_handle(R) -> ...libdbus detects connection is closed... ->
  sbus_watch_remove() -> talloc_free(watch_fd) ->
  ... get back to libdbus and back to sbus_watch_handler() ->
  "if (watch_fd->dbus_watch.write != NULL) dbus_watch_handle(W)"
  => use-after-free

To resolve an issue schedule deallocation of watch as immediate event.

Resolves: https://pagure.io/SSSD/sssd/issue/2660
---
 src/sbus/connection/sbus_watch.c | 33 ++++++++++++++++++++++++++++++--
 1 file changed, 31 insertions(+), 2 deletions(-)

diff --git a/src/sbus/connection/sbus_watch.c b/src/sbus/connection/sbus_watch.c
index 0e4bd01d10..ba57c3f33a 100644
--- a/src/sbus/connection/sbus_watch.c
+++ b/src/sbus/connection/sbus_watch.c
@@ -253,10 +253,20 @@ sbus_watch_add(DBusWatch *dbus_watch, void *data)
     return TRUE;
 }
 
+static void
+free_sbus_watch(struct tevent_context *ev, struct tevent_immediate *im,
+                void *data)
+{
+    struct sbus_watch_fd *w = talloc_get_type(data, struct sbus_watch_fd);
+    talloc_free(w);
+    talloc_free(im);
+}
+
 static void
 sbus_watch_remove(DBusWatch *dbus_watch, void *data)
 {
     struct sbus_watch_fd *watch_fd;
+    struct tevent_immediate *im;
 
     watch_fd = talloc_get_type(dbus_watch_get_data(dbus_watch),
                                struct sbus_watch_fd);
@@ -280,8 +290,27 @@ sbus_watch_remove(DBusWatch *dbus_watch, void *data)
 
     if (watch_fd->dbus_watch.read == NULL
             && watch_fd->dbus_watch.write == NULL) {
-        talloc_free(watch_fd->fdevent);
-        talloc_free(watch_fd);
+        /* libdbus doesn't need this watch{fd} anymore, so associated
+         * tevent_fd should be removed from monitoring at the spot.
+         *
+         * This first attempt to free will actually fail because tevent_fd is
+         * marked as busy in tevent_common_invoke_fd_handler() but it will
+         * stop fd monitoring in tevent loop.
+         * Memory will be eventually freed alongside with deallocation
+         * of parent context (watch_fd).
+         */
+        TALLOC_FREE(watch_fd->fdevent);
+        /* watch_fd itself can't be freed yet as it still may be referenced
+         * in the current context (for example in sbus_watch_handler())
+         * so instead schedule immediate event to delete it.
+         */
+        im = tevent_create_immediate(watch_fd->sbus_watch);
+        if (im == NULL) {
+            DEBUG(SSSDBG_FATAL_FAILURE, "Out of Memory!\n");
+            return;
+        }
+        tevent_schedule_immediate(im, watch_fd->sbus_watch->ev,
+                                  free_sbus_watch, watch_fd);
     }
 }
 
_______________________________________________
sssd-devel mailing list -- [email protected]
To unsubscribe send an email to [email protected]
Fedora Code of Conduct: 
https://docs.fedoraproject.org/en-US/project/code-of-conduct/
List Guidelines: https://fedoraproject.org/wiki/Mailing_list_guidelines
List Archives: 
https://lists.fedorahosted.org/archives/list/[email protected]

Reply via email to