refcount_t type and corresponding API should be
used instead of atomic_t when the variable is used as
a reference counter. This allows to avoid accidental
refcounter overflows that might lead to use-after-free
situations.

Signed-off-by: Elena Reshetova <[email protected]>
Signed-off-by: Hans Liljestrand <[email protected]>
Signed-off-by: Kees Cook <[email protected]>
Signed-off-by: David Windsor <[email protected]>
---
 ipc/sem.c | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/ipc/sem.c b/ipc/sem.c
index e468cd1..9063ffa 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -139,7 +139,7 @@ struct sem_undo {
  * that may be shared among all a CLONE_SYSVSEM task group.
  */
 struct sem_undo_list {
-       atomic_t                refcnt;
+       refcount_t              refcnt;
        spinlock_t              lock;
        struct list_head        list_proc;
 };
@@ -1646,7 +1646,7 @@ static inline int get_undo_list(struct sem_undo_list 
**undo_listp)
                if (undo_list == NULL)
                        return -ENOMEM;
                spin_lock_init(&undo_list->lock);
-               atomic_set(&undo_list->refcnt, 1);
+               refcount_set(&undo_list->refcnt, 1);
                INIT_LIST_HEAD(&undo_list->list_proc);
 
                current->sysvsem.undo_list = undo_list;
@@ -2045,7 +2045,7 @@ int copy_semundo(unsigned long clone_flags, struct 
task_struct *tsk)
                error = get_undo_list(&undo_list);
                if (error)
                        return error;
-               atomic_inc(&undo_list->refcnt);
+               refcount_inc(&undo_list->refcnt);
                tsk->sysvsem.undo_list = undo_list;
        } else
                tsk->sysvsem.undo_list = NULL;
@@ -2074,7 +2074,7 @@ void exit_sem(struct task_struct *tsk)
                return;
        tsk->sysvsem.undo_list = NULL;
 
-       if (!atomic_dec_and_test(&ulp->refcnt))
+       if (!refcount_dec_and_test(&ulp->refcnt))
                return;
 
        for (;;) {
-- 
2.7.4

Reply via email to