atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable fsnotify_mark.refcnt is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

Suggested-by: Kees Cook <[email protected]>
Reviewed-by: David Windsor <[email protected]>
Reviewed-by: Hans Liljestrand <[email protected]>
Signed-off-by: Elena Reshetova <[email protected]>
---
 fs/notify/inotify/inotify_user.c |  4 ++--
 fs/notify/mark.c                 | 14 +++++++-------
 include/linux/fsnotify_backend.h |  2 +-
 kernel/audit_tree.c              |  2 +-
 4 files changed, 11 insertions(+), 11 deletions(-)

diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
index 7cc7d3f..d3c20e0 100644
--- a/fs/notify/inotify/inotify_user.c
+++ b/fs/notify/inotify/inotify_user.c
@@ -376,7 +376,7 @@ static struct inotify_inode_mark 
*inotify_idr_find_locked(struct fsnotify_group
 
                fsnotify_get_mark(fsn_mark);
                /* One ref for being in the idr, one ref we just took */
-               BUG_ON(atomic_read(&fsn_mark->refcnt) < 2);
+               BUG_ON(refcount_read(&fsn_mark->refcnt) < 2);
        }
 
        return i_mark;
@@ -446,7 +446,7 @@ static void inotify_remove_from_idr(struct fsnotify_group 
*group,
         * One ref for being in the idr
         * one ref grabbed by inotify_idr_find
         */
-       if (unlikely(atomic_read(&i_mark->fsn_mark.refcnt) < 2)) {
+       if (unlikely(refcount_read(&i_mark->fsn_mark.refcnt) < 2)) {
                printk(KERN_ERR "%s: i_mark=%p i_mark->wd=%d 
i_mark->group=%p\n",
                         __func__, i_mark, i_mark->wd, i_mark->fsn_mark.group);
                /* we can't really recover with bad ref cnting.. */
diff --git a/fs/notify/mark.c b/fs/notify/mark.c
index 9991f88..45f1141 100644
--- a/fs/notify/mark.c
+++ b/fs/notify/mark.c
@@ -105,8 +105,8 @@ static DECLARE_WORK(connector_reaper_work, 
fsnotify_connector_destroy_workfn);
 
 void fsnotify_get_mark(struct fsnotify_mark *mark)
 {
-       WARN_ON_ONCE(!atomic_read(&mark->refcnt));
-       atomic_inc(&mark->refcnt);
+       WARN_ON_ONCE(!refcount_read(&mark->refcnt));
+       refcount_inc(&mark->refcnt);
 }
 
 /*
@@ -116,7 +116,7 @@ void fsnotify_get_mark(struct fsnotify_mark *mark)
  */
 static bool fsnotify_get_mark_safe(struct fsnotify_mark *mark)
 {
-       return atomic_inc_not_zero(&mark->refcnt);
+       return refcount_inc_not_zero(&mark->refcnt);
 }
 
 static void __fsnotify_recalc_mask(struct fsnotify_mark_connector *conn)
@@ -211,7 +211,7 @@ void fsnotify_put_mark(struct fsnotify_mark *mark)
 
        /* Catch marks that were actually never attached to object */
        if (!mark->connector) {
-               if (atomic_dec_and_test(&mark->refcnt))
+               if (refcount_dec_and_test(&mark->refcnt))
                        fsnotify_final_mark_destroy(mark);
                return;
        }
@@ -220,7 +220,7 @@ void fsnotify_put_mark(struct fsnotify_mark *mark)
         * We have to be careful so that traversals of obj_list under lock can
         * safely grab mark reference.
         */
-       if (!atomic_dec_and_lock(&mark->refcnt, &mark->connector->lock))
+       if (!refcount_dec_and_lock(&mark->refcnt, &mark->connector->lock))
                return;
 
        conn = mark->connector;
@@ -338,7 +338,7 @@ void fsnotify_detach_mark(struct fsnotify_mark *mark)
 
        WARN_ON_ONCE(!mutex_is_locked(&group->mark_mutex));
        WARN_ON_ONCE(!srcu_read_lock_held(&fsnotify_mark_srcu) &&
-                    atomic_read(&mark->refcnt) < 1 +
+                    refcount_read(&mark->refcnt) < 1 +
                        !!(mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED));
 
        spin_lock(&mark->lock);
@@ -738,7 +738,7 @@ void fsnotify_init_mark(struct fsnotify_mark *mark,
 {
        memset(mark, 0, sizeof(*mark));
        spin_lock_init(&mark->lock);
-       atomic_set(&mark->refcnt, 1);
+       refcount_set(&mark->refcnt, 1);
        fsnotify_get_group(group);
        mark->group = group;
 }
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index 20a57ba..26485b1 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -244,7 +244,7 @@ struct fsnotify_mark {
        __u32 mask;
        /* We hold one for presence in g_list. Also one ref for each 'thing'
         * in kernel that found and may be using this mark. */
-       atomic_t refcnt;
+       refcount_t refcnt;
        /* Group this mark is for. Set on mark creation, stable until last ref
         * is dropped */
        struct fsnotify_group *group;
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
index 011d46e..45ec960 100644
--- a/kernel/audit_tree.c
+++ b/kernel/audit_tree.c
@@ -1007,7 +1007,7 @@ static void audit_tree_freeing_mark(struct fsnotify_mark 
*entry, struct fsnotify
         * We are guaranteed to have at least one reference to the mark from
         * either the inode or the caller of fsnotify_destroy_mark().
         */
-       BUG_ON(atomic_read(&entry->refcnt) < 1);
+       BUG_ON(refcount_read(&entry->refcnt) < 1);
 }
 
 static const struct fsnotify_ops audit_tree_ops = {
-- 
2.7.4

Reply via email to