refcount_t type and corresponding API should be
used instead of atomic_t when the variable is used as
a reference counter. This allows to avoid accidental
refcounter overflows that might lead to use-after-free
situations.

Signed-off-by: Elena Reshetova <[email protected]>
Signed-off-by: Hans Liljestrand <[email protected]>
Signed-off-by: Kees Cook <[email protected]>
Signed-off-by: David Windsor <[email protected]>
---
 fs/mbcache.c            | 16 ++++++++--------
 include/linux/mbcache.h |  6 +++---
 2 files changed, 11 insertions(+), 11 deletions(-)

diff --git a/fs/mbcache.c b/fs/mbcache.c
index b19be429d..57a82de 100644
--- a/fs/mbcache.c
+++ b/fs/mbcache.c
@@ -89,7 +89,7 @@ int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, 
u32 key,
 
        INIT_LIST_HEAD(&entry->e_list);
        /* One ref for hash, one ref returned */
-       atomic_set(&entry->e_refcnt, 1);
+       refcount_set(&entry->e_refcnt, 1);
        entry->e_key = key;
        entry->e_block = block;
        entry->e_reusable = reusable;
@@ -108,7 +108,7 @@ int mb_cache_entry_create(struct mb_cache *cache, gfp_t 
mask, u32 key,
        spin_lock(&cache->c_list_lock);
        list_add_tail(&entry->e_list, &cache->c_list);
        /* Grab ref for LRU list */
-       atomic_inc(&entry->e_refcnt);
+       refcount_inc(&entry->e_refcnt);
        cache->c_entry_count++;
        spin_unlock(&cache->c_list_lock);
 
@@ -140,7 +140,7 @@ static struct mb_cache_entry *__entry_find(struct mb_cache 
*cache,
                entry = hlist_bl_entry(node, struct mb_cache_entry,
                                       e_hash_list);
                if (entry->e_key == key && entry->e_reusable) {
-                       atomic_inc(&entry->e_refcnt);
+                       refcount_inc(&entry->e_refcnt);
                        goto out;
                }
                node = node->next;
@@ -203,7 +203,7 @@ struct mb_cache_entry *mb_cache_entry_get(struct mb_cache 
*cache, u32 key,
        hlist_bl_lock(head);
        hlist_bl_for_each_entry(entry, node, head, e_hash_list) {
                if (entry->e_key == key && entry->e_block == block) {
-                       atomic_inc(&entry->e_refcnt);
+                       refcount_inc(&entry->e_refcnt);
                        goto out;
                }
        }
@@ -239,7 +239,7 @@ void mb_cache_entry_delete_block(struct mb_cache *cache, 
u32 key,
                        if (!list_empty(&entry->e_list)) {
                                list_del_init(&entry->e_list);
                                cache->c_entry_count--;
-                               atomic_dec(&entry->e_refcnt);
+                               refcount_dec(&entry->e_refcnt);
                        }
                        spin_unlock(&cache->c_list_lock);
                        mb_cache_entry_put(cache, entry);
@@ -300,7 +300,7 @@ static unsigned long mb_cache_shrink(struct mb_cache *cache,
                hlist_bl_lock(head);
                if (!hlist_bl_unhashed(&entry->e_hash_list)) {
                        hlist_bl_del_init(&entry->e_hash_list);
-                       atomic_dec(&entry->e_refcnt);
+                       refcount_dec(&entry->e_refcnt);
                }
                hlist_bl_unlock(head);
                if (mb_cache_entry_put(cache, entry))
@@ -397,11 +397,11 @@ void mb_cache_destroy(struct mb_cache *cache)
        list_for_each_entry_safe(entry, next, &cache->c_list, e_list) {
                if (!hlist_bl_unhashed(&entry->e_hash_list)) {
                        hlist_bl_del_init(&entry->e_hash_list);
-                       atomic_dec(&entry->e_refcnt);
+                       refcount_dec(&entry->e_refcnt);
                } else
                        WARN_ON(1);
                list_del(&entry->e_list);
-               WARN_ON(atomic_read(&entry->e_refcnt) != 1);
+               WARN_ON(refcount_read(&entry->e_refcnt) != 1);
                mb_cache_entry_put(cache, entry);
        }
        kfree(cache->c_hash);
diff --git a/include/linux/mbcache.h b/include/linux/mbcache.h
index 86c9a8b..251e268 100644
--- a/include/linux/mbcache.h
+++ b/include/linux/mbcache.h
@@ -4,7 +4,7 @@
 #include <linux/hash.h>
 #include <linux/list_bl.h>
 #include <linux/list.h>
-#include <linux/atomic.h>
+#include <linux/refcount.h>
 #include <linux/fs.h>
 
 struct mb_cache;
@@ -14,7 +14,7 @@ struct mb_cache_entry {
        struct list_head        e_list;
        /* Hash table list - protected by hash chain bitlock */
        struct hlist_bl_node    e_hash_list;
-       atomic_t                e_refcnt;
+       refcount_t              e_refcnt;
        /* Key in hash - stable during lifetime of the entry */
        u32                     e_key;
        u32                     e_referenced:1;
@@ -32,7 +32,7 @@ void __mb_cache_entry_free(struct mb_cache_entry *entry);
 static inline int mb_cache_entry_put(struct mb_cache *cache,
                                     struct mb_cache_entry *entry)
 {
-       if (!atomic_dec_and_test(&entry->e_refcnt))
+       if (!refcount_dec_and_test(&entry->e_refcnt))
                return 0;
        __mb_cache_entry_free(entry);
        return 1;
-- 
2.7.4

Reply via email to