When doing cumulative patches, if patch A introduces a change to function 1,
and patch B reverts the change to function 1 and introduces changes to say
function 2 and 3 as well, the change that patch A introduced to function 1
is still present.

Introduce atomic replace, by first creating a 'no_op' klp_func for all
the functions that are reverted by patch B. The reason that 'no_op'
klp_funcs are created, instead of just unregistering directly from the ftrace
function hook, is to ensure that the consistency model is properly preserved.
By introducing the 'no_op' functions, 'atomic revert' leverages the existing
consistency model code. Then, after transition to the new code, 'atomic
revert' unregisters the ftrace handlers that are associated with the 'no_op'
klp_funcs, such that that we run the original un-patched function with no
additional no_op function overhead.

Since 'atomic replace' has completely replaced any previous livepatch modules,
it explicitly disables the previous patch, in the example- patch A, such that
the livepatch module associated with patch A can be completely removed (rmmod).
Patch A is now in a permanently disabled state. But if patch A is removed from
the kernel with rmmod, it can be re-inserted (insmod), and act as an atomic
replace on top of patch B.

Signed-off-by: Jason Baron <[email protected]>
Cc: Josh Poimboeuf <[email protected]>
Cc: Jessica Yu <[email protected]>
Cc: Jiri Kosina <[email protected]>
Cc: Miroslav Benes <[email protected]>
Cc: Petr Mladek <[email protected]>
---
 include/linux/livepatch.h     |   8 ++-
 kernel/livepatch/core.c       | 124 ++++++++++++++++++++++++++++++++++++++++++
 kernel/livepatch/core.h       |   4 ++
 kernel/livepatch/patch.c      |  14 +++--
 kernel/livepatch/patch.h      |   1 +
 kernel/livepatch/transition.c |  61 ++++++++++++++++++++-
 6 files changed, 202 insertions(+), 10 deletions(-)

diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h
index 5038337..6fd7222 100644
--- a/include/linux/livepatch.h
+++ b/include/linux/livepatch.h
@@ -49,6 +49,7 @@
  * @new_size:  size of the new function
  * @patched:   the func has been added to the klp_ops list
  * @transition:        the func is currently being applied or reverted
+ * @no_op:     this is a no_op function used to compelete revert a function
  *
  * The patched and transition variables define the func's patching state.  When
  * patching, a func is always in one of the following states:
@@ -86,6 +87,7 @@ struct klp_func {
        unsigned long old_size, new_size;
        bool patched;
        bool transition;
+       bool no_op;
 };
 
 /**
@@ -132,6 +134,7 @@ struct klp_object {
  * @kobj:      kobject for sysfs resources
  * @obj_list:  head of list for dynamically allocated struct klp_object
  * @enabled:   the patch is enabled (but operation may be incomplete)
+ * @replaced:  the patch has been replaced an can not be re-enabled
  * @finish:    for waiting till it is safe to remove the patch module
  */
 struct klp_patch {
@@ -145,6 +148,7 @@ struct klp_patch {
        struct kobject kobj;
        struct list_head obj_list;
        bool enabled;
+       bool replaced;
        struct completion finish;
 };
 
@@ -201,8 +205,8 @@ static inline struct klp_func *func_iter_next(struct 
func_iter *iter)
        struct klp_func *func;
        struct klp_func_no_op *func_no_op;
 
-       if (iter->func->old_name || iter->func->new_func ||
-                                       iter->func->old_sympos) {
+       if (iter->func && (iter->func->old_name || iter->func->new_func ||
+                          iter->func->old_sympos)) {
                func = iter->func;
                iter->func++;
        } else {
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
index e63f478..bf353da 100644
--- a/kernel/livepatch/core.c
+++ b/kernel/livepatch/core.c
@@ -352,6 +352,9 @@ static int __klp_enable_patch(struct klp_patch *patch)
        if (klp_transition_patch)
                return -EBUSY;
 
+       if (patch->replaced)
+               return -EINVAL;
+
        if (WARN_ON(patch->enabled))
                return -EINVAL;
 
@@ -602,6 +605,118 @@ static void klp_free_patch(struct klp_patch *patch)
                list_del(&patch->list);
 }
 
+void klp_patch_free_no_ops(struct klp_patch *patch)
+{
+       struct obj_iter o_iter;
+       struct func_iter f_iter;
+       struct klp_object *obj, *tmp_obj;
+       struct klp_func *func;
+       struct klp_func_no_op *func_no_op;
+
+       klp_for_each_object(patch, obj, &o_iter) {
+               klp_for_each_func(obj, func, &f_iter) {
+                       if (func->no_op) {
+                               func_no_op = container_of(func,
+                                                         struct klp_func_no_op,
+                                                         orig_func);
+                               list_del_init(&func_no_op->func_entry);
+                               kfree(func_no_op);
+                       }
+               }
+       }
+       list_for_each_entry_safe(obj, tmp_obj, &patch->obj_list, obj_entry) {
+               list_del_init(&obj->obj_entry);
+               kfree(obj);
+       }
+}
+
+static int klp_init_patch_no_ops(struct klp_patch *patch)
+{
+       struct klp_object *obj, *prev_obj, *new_obj;
+       struct klp_func *prev_func, *func;
+       struct klp_func_no_op *new;
+       struct klp_patch *prev_patch;
+       struct obj_iter o_iter, prev_o_iter;
+       struct func_iter prev_f_iter, f_iter;
+       bool found, mod;
+
+       if (patch->list.prev == &klp_patches)
+               return 0;
+
+       prev_patch = list_prev_entry(patch, list);
+       klp_for_each_object(prev_patch, prev_obj, &prev_o_iter) {
+               if (!klp_is_object_loaded(prev_obj))
+                       continue;
+
+               klp_for_each_func(prev_obj, prev_func, &prev_f_iter) {
+                       found = false;
+                       klp_for_each_object(patch, obj, &o_iter) {
+                               klp_for_each_func(obj, func, &f_iter) {
+                                       if ((strcmp(prev_func->old_name,
+                                                   func->old_name) == 0) &&
+                                               (prev_func->old_sympos ==
+                                                       func->old_sympos)) {
+                                               found = true;
+                                               break;
+                                       }
+                               }
+                               if (found)
+                                       break;
+                       }
+                       if (found)
+                               continue;
+
+                       new = kmalloc(sizeof(*new), GFP_KERNEL);
+                       if (!new)
+                               return -ENOMEM;
+                       new->orig_func = *prev_func;
+                       new->orig_func.old_name = prev_func->old_name;
+                       new->orig_func.new_func = NULL;
+                       new->orig_func.old_sympos = prev_func->old_sympos;
+                       new->orig_func.immediate = prev_func->immediate;
+                       new->orig_func.old_addr = prev_func->old_addr;
+                       INIT_LIST_HEAD(&new->orig_func.stack_node);
+                       new->orig_func.old_size = prev_func->old_size;
+                       new->orig_func.new_size = 0;
+                       new->orig_func.no_op = true;
+                       new->orig_func.patched = false;
+                       new->orig_func.transition = false;
+                       found = false;
+                       mod = klp_is_module(prev_obj);
+                       klp_for_each_object(patch, obj, &o_iter) {
+                               if (mod) {
+                                       if (klp_is_module(obj) &&
+                                           strcmp(prev_obj->name,
+                                                  obj->name) == 0) {
+                                               found = true;
+                                               break;
+                                       }
+                               } else if (!klp_is_module(obj)) {
+                                       found = true;
+                                       break;
+                               }
+                       }
+                       if (found) {
+                               list_add(&new->func_entry, &obj->func_list);
+                       } else {
+                               new_obj = kmalloc(sizeof(*new_obj), GFP_KERNEL);
+                               if (!new_obj)
+                                       return -ENOMEM;
+                               new_obj->name = prev_obj->name;
+                               new_obj->funcs = NULL;
+                               new_obj->mod = prev_obj->mod;
+                               new_obj->patched = false;
+                               INIT_LIST_HEAD(&new_obj->func_list);
+                               INIT_LIST_HEAD(&new_obj->obj_entry);
+                               list_add(&new->func_entry, &new_obj->func_list);
+                               list_add(&new_obj->obj_entry, &patch->obj_list);
+                       }
+               }
+       }
+
+       return 0;
+}
+
 static int klp_init_func(struct klp_object *obj, struct klp_func *func)
 {
        if (!func->old_name || !func->new_func)
@@ -725,6 +840,7 @@ static int klp_init_patch(struct klp_patch *patch)
        mutex_lock(&klp_mutex);
 
        patch->enabled = false;
+       patch->replaced = false;
        init_completion(&patch->finish);
 
        ret = kobject_init_and_add(&patch->kobj, &klp_ktype_patch,
@@ -746,12 +862,19 @@ static int klp_init_patch(struct klp_patch *patch)
 
        list_add_tail(&patch->list, &klp_patches);
 
+       ret = klp_init_patch_no_ops(patch);
+       if (ret) {
+               list_del(&patch->list);
+               goto free;
+       }
+
        mutex_unlock(&klp_mutex);
 
        return 0;
 
 free:
        klp_free_objects_limited(patch, obj);
+       klp_patch_free_no_ops(patch);
 
        mutex_unlock(&klp_mutex);
 
@@ -786,6 +909,7 @@ int klp_unregister_patch(struct klp_patch *patch)
        }
 
        klp_free_patch(patch);
+       klp_patch_free_no_ops(patch);
 
        mutex_unlock(&klp_mutex);
 
diff --git a/kernel/livepatch/core.h b/kernel/livepatch/core.h
index c74f24c..fa20e4d 100644
--- a/kernel/livepatch/core.h
+++ b/kernel/livepatch/core.h
@@ -1,6 +1,10 @@
 #ifndef _LIVEPATCH_CORE_H
 #define _LIVEPATCH_CORE_H
 
+#include <linux/livepatch.h>
+
 extern struct mutex klp_mutex;
 
+void klp_patch_free_no_ops(struct klp_patch *patch);
+
 #endif /* _LIVEPATCH_CORE_H */
diff --git a/kernel/livepatch/patch.c b/kernel/livepatch/patch.c
index 1cfdabc..cbb8b9d 100644
--- a/kernel/livepatch/patch.c
+++ b/kernel/livepatch/patch.c
@@ -117,6 +117,8 @@ static void notrace klp_ftrace_handler(unsigned long ip,
                }
        }
 
+       if (func->no_op)
+               goto unlock;
        klp_arch_set_pc(regs, (unsigned long)func->new_func);
 unlock:
        preempt_enable_notrace();
@@ -135,7 +137,7 @@ static unsigned long klp_get_ftrace_location(unsigned long 
faddr)
 }
 #endif
 
-static void klp_unpatch_func(struct klp_func *func)
+void klp_unpatch_func(struct klp_func *func, bool unregistered)
 {
        struct klp_ops *ops;
 
@@ -155,9 +157,11 @@ static void klp_unpatch_func(struct klp_func *func)
                if (WARN_ON(!ftrace_loc))
                        return;
 
-               WARN_ON(unregister_ftrace_function(&ops->fops));
-               WARN_ON(ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0));
-
+               if (!unregistered) {
+                       WARN_ON(unregister_ftrace_function(&ops->fops));
+                       WARN_ON(ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1,
+                               0));
+               }
                list_del_rcu(&func->stack_node);
                list_del(&ops->node);
                kfree(ops);
@@ -242,7 +246,7 @@ void klp_unpatch_object(struct klp_object *obj)
 
        klp_for_each_func(obj, func, &f_iter)
                if (func->patched)
-                       klp_unpatch_func(func);
+                       klp_unpatch_func(func, false);
 
        obj->patched = false;
 }
diff --git a/kernel/livepatch/patch.h b/kernel/livepatch/patch.h
index 0db2271..59c1430 100644
--- a/kernel/livepatch/patch.h
+++ b/kernel/livepatch/patch.h
@@ -29,5 +29,6 @@ struct klp_ops *klp_find_ops(unsigned long old_addr);
 int klp_patch_object(struct klp_object *obj);
 void klp_unpatch_object(struct klp_object *obj);
 void klp_unpatch_objects(struct klp_patch *patch);
+void klp_unpatch_func(struct klp_func *func, bool unregistered);
 
 #endif /* _LIVEPATCH_PATCH_H */
diff --git a/kernel/livepatch/transition.c b/kernel/livepatch/transition.c
index e112826..43e1609 100644
--- a/kernel/livepatch/transition.c
+++ b/kernel/livepatch/transition.c
@@ -21,6 +21,8 @@
 
 #include <linux/cpu.h>
 #include <linux/stacktrace.h>
+#include <linux/ftrace.h>
+#include <linux/delay.h>
 #include "core.h"
 #include "patch.h"
 #include "transition.h"
@@ -70,6 +72,7 @@ static void klp_synchronize_transition(void)
        schedule_on_each_cpu(klp_sync);
 }
 
+
 /*
  * The transition to the target patch state is complete.  Clean up the data
  * structures.
@@ -81,8 +84,32 @@ static void klp_complete_transition(void)
        struct task_struct *g, *task;
        unsigned int cpu;
        bool immediate_func = false;
+       bool no_op = false;
        struct obj_iter o_iter;
        struct func_iter f_iter;
+       unsigned long ftrace_loc;
+       struct klp_ops *ops;
+       struct klp_patch *prev_patch;
+
+       /* remove ftrace hook for all no_op functions. */
+       if (klp_target_state == KLP_PATCHED) {
+               klp_for_each_object(klp_transition_patch, obj, &o_iter) {
+                       klp_for_each_func(obj, func, &f_iter) {
+                               if (!func->no_op)
+                                       continue;
+
+                               ops = klp_find_ops(func->old_addr);
+                               if (WARN_ON(!ops))
+                                       continue;
+                               ftrace_loc = func->old_addr;
+                               WARN_ON(unregister_ftrace_function(&ops->fops));
+                               WARN_ON(ftrace_set_filter_ip(&ops->fops,
+                                                            ftrace_loc,
+                                                            1, 0));
+                               no_op = true;
+                       }
+               }
+       }
 
        if (klp_target_state == KLP_UNPATCHED) {
                /*
@@ -90,7 +117,9 @@ static void klp_complete_transition(void)
                 * remove the new functions from the func_stack.
                 */
                klp_unpatch_objects(klp_transition_patch);
+       }
 
+       if (klp_target_state == KLP_UNPATCHED || no_op) {
                /*
                 * Make sure klp_ftrace_handler() can no longer see functions
                 * from this patch on the ops->func_stack.  Otherwise, after
@@ -132,6 +161,24 @@ static void klp_complete_transition(void)
        }
 
 done:
+       /* remove and free any no_op functions */
+       if (no_op && klp_target_state == KLP_PATCHED) {
+               prev_patch = list_prev_entry(klp_transition_patch, list);
+               if (prev_patch->enabled) {
+                       klp_unpatch_objects(prev_patch);
+                       prev_patch->enabled = false;
+                       prev_patch->replaced = true;
+                       module_put(prev_patch->mod);
+               }
+               klp_for_each_object(klp_transition_patch, obj, &o_iter) {
+                       klp_for_each_func(obj, func, &f_iter) {
+                               if (func->no_op)
+                                       klp_unpatch_func(func, true);
+                       }
+               }
+               klp_patch_free_no_ops(klp_transition_patch);
+       }
+
        klp_target_state = KLP_UNDEFINED;
        klp_transition_patch = NULL;
 }
@@ -204,10 +251,18 @@ static int klp_check_stack_func(struct klp_func *func,
                if (klp_target_state == KLP_UNPATCHED) {
                         /*
                          * Check for the to-be-unpatched function
-                         * (the func itself).
+                         * (the func itself). If we're unpatching
+                         * a no-op, then we're running the original
+                         * function. We never 'patch' a no-op function,
+                         * since we just remove the ftrace hook.
                          */
-                       func_addr = (unsigned long)func->new_func;
-                       func_size = func->new_size;
+                       if (func->no_op) {
+                               func_addr = (unsigned long)func->old_addr;
+                               func_size = func->old_size;
+                       } else {
+                               func_addr = (unsigned long)func->new_func;
+                               func_size = func->new_size;
+                       }
                } else {
                        /*
                         * Check for the to-be-patched function
-- 
2.6.1

Reply via email to