Refactor struct proc_maps_private so that the fields used by PROCMAP_QUERY
ioctl are moved into a separate structure. In the next patch this allows
ioctl to reuse some of the functions used for reading /proc/pid/maps
without using file->private_data. This prevents concurrent modification
of file->private_data members by ioctl and /proc/pid/maps readers.

The change is pure code refactoring and has no functional changes.

Signed-off-by: Suren Baghdasaryan <[email protected]>
Reviewed-by: Vlastimil Babka <[email protected]>
Acked-by: SeongJae Park <[email protected]>
---
 fs/proc/internal.h   | 15 +++++---
 fs/proc/task_mmu.c   | 87 +++++++++++++++++++++++---------------------
 fs/proc/task_nommu.c | 14 +++----
 3 files changed, 63 insertions(+), 53 deletions(-)

diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index e737401d7383..d1598576506c 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -378,16 +378,21 @@ extern void proc_self_init(void);
  * task_[no]mmu.c
  */
 struct mem_size_stats;
-struct proc_maps_private {
-       struct inode *inode;
-       struct task_struct *task;
+
+struct proc_maps_locking_ctx {
        struct mm_struct *mm;
-       struct vma_iterator iter;
-       loff_t last_pos;
 #ifdef CONFIG_PER_VMA_LOCK
        bool mmap_locked;
        struct vm_area_struct *locked_vma;
 #endif
+};
+
+struct proc_maps_private {
+       struct inode *inode;
+       struct task_struct *task;
+       struct vma_iterator iter;
+       loff_t last_pos;
+       struct proc_maps_locking_ctx lock_ctx;
 #ifdef CONFIG_NUMA
        struct mempolicy *task_mempolicy;
 #endif
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 29cca0e6d0ff..c0968d293b61 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -132,18 +132,18 @@ static void release_task_mempolicy(struct 
proc_maps_private *priv)
 
 #ifdef CONFIG_PER_VMA_LOCK
 
-static void unlock_vma(struct proc_maps_private *priv)
+static void unlock_ctx_vma(struct proc_maps_locking_ctx *lock_ctx)
 {
-       if (priv->locked_vma) {
-               vma_end_read(priv->locked_vma);
-               priv->locked_vma = NULL;
+       if (lock_ctx->locked_vma) {
+               vma_end_read(lock_ctx->locked_vma);
+               lock_ctx->locked_vma = NULL;
        }
 }
 
 static const struct seq_operations proc_pid_maps_op;
 
 static inline bool lock_vma_range(struct seq_file *m,
-                                 struct proc_maps_private *priv)
+                                 struct proc_maps_locking_ctx *lock_ctx)
 {
        /*
         * smaps and numa_maps perform page table walk, therefore require
@@ -151,25 +151,25 @@ static inline bool lock_vma_range(struct seq_file *m,
         * walking the vma tree under rcu read protection.
         */
        if (m->op != &proc_pid_maps_op) {
-               if (mmap_read_lock_killable(priv->mm))
+               if (mmap_read_lock_killable(lock_ctx->mm))
                        return false;
 
-               priv->mmap_locked = true;
+               lock_ctx->mmap_locked = true;
        } else {
                rcu_read_lock();
-               priv->locked_vma = NULL;
-               priv->mmap_locked = false;
+               lock_ctx->locked_vma = NULL;
+               lock_ctx->mmap_locked = false;
        }
 
        return true;
 }
 
-static inline void unlock_vma_range(struct proc_maps_private *priv)
+static inline void unlock_vma_range(struct proc_maps_locking_ctx *lock_ctx)
 {
-       if (priv->mmap_locked) {
-               mmap_read_unlock(priv->mm);
+       if (lock_ctx->mmap_locked) {
+               mmap_read_unlock(lock_ctx->mm);
        } else {
-               unlock_vma(priv);
+               unlock_ctx_vma(lock_ctx);
                rcu_read_unlock();
        }
 }
@@ -177,15 +177,16 @@ static inline void unlock_vma_range(struct 
proc_maps_private *priv)
 static struct vm_area_struct *get_next_vma(struct proc_maps_private *priv,
                                           loff_t last_pos)
 {
+       struct proc_maps_locking_ctx *lock_ctx = &priv->lock_ctx;
        struct vm_area_struct *vma;
 
-       if (priv->mmap_locked)
+       if (lock_ctx->mmap_locked)
                return vma_next(&priv->iter);
 
-       unlock_vma(priv);
-       vma = lock_next_vma(priv->mm, &priv->iter, last_pos);
+       unlock_ctx_vma(lock_ctx);
+       vma = lock_next_vma(lock_ctx->mm, &priv->iter, last_pos);
        if (!IS_ERR_OR_NULL(vma))
-               priv->locked_vma = vma;
+               lock_ctx->locked_vma = vma;
 
        return vma;
 }
@@ -193,14 +194,16 @@ static struct vm_area_struct *get_next_vma(struct 
proc_maps_private *priv,
 static inline bool fallback_to_mmap_lock(struct proc_maps_private *priv,
                                         loff_t pos)
 {
-       if (priv->mmap_locked)
+       struct proc_maps_locking_ctx *lock_ctx = &priv->lock_ctx;
+
+       if (lock_ctx->mmap_locked)
                return false;
 
        rcu_read_unlock();
-       mmap_read_lock(priv->mm);
+       mmap_read_lock(lock_ctx->mm);
        /* Reinitialize the iterator after taking mmap_lock */
        vma_iter_set(&priv->iter, pos);
-       priv->mmap_locked = true;
+       lock_ctx->mmap_locked = true;
 
        return true;
 }
@@ -208,14 +211,14 @@ static inline bool fallback_to_mmap_lock(struct 
proc_maps_private *priv,
 #else /* CONFIG_PER_VMA_LOCK */
 
 static inline bool lock_vma_range(struct seq_file *m,
-                                 struct proc_maps_private *priv)
+                                 struct proc_maps_locking_ctx *lock_ctx)
 {
-       return mmap_read_lock_killable(priv->mm) == 0;
+       return mmap_read_lock_killable(lock_ctx->mm) == 0;
 }
 
-static inline void unlock_vma_range(struct proc_maps_private *priv)
+static inline void unlock_vma_range(struct proc_maps_locking_ctx *lock_ctx)
 {
-       mmap_read_unlock(priv->mm);
+       mmap_read_unlock(lock_ctx->mm);
 }
 
 static struct vm_area_struct *get_next_vma(struct proc_maps_private *priv,
@@ -258,7 +261,7 @@ static struct vm_area_struct *proc_get_vma(struct seq_file 
*m, loff_t *ppos)
                *ppos = vma->vm_end;
        } else {
                *ppos = SENTINEL_VMA_GATE;
-               vma = get_gate_vma(priv->mm);
+               vma = get_gate_vma(priv->lock_ctx.mm);
        }
 
        return vma;
@@ -267,6 +270,7 @@ static struct vm_area_struct *proc_get_vma(struct seq_file 
*m, loff_t *ppos)
 static void *m_start(struct seq_file *m, loff_t *ppos)
 {
        struct proc_maps_private *priv = m->private;
+       struct proc_maps_locking_ctx *lock_ctx;
        loff_t last_addr = *ppos;
        struct mm_struct *mm;
 
@@ -278,14 +282,15 @@ static void *m_start(struct seq_file *m, loff_t *ppos)
        if (!priv->task)
                return ERR_PTR(-ESRCH);
 
-       mm = priv->mm;
+       lock_ctx = &priv->lock_ctx;
+       mm = lock_ctx->mm;
        if (!mm || !mmget_not_zero(mm)) {
                put_task_struct(priv->task);
                priv->task = NULL;
                return NULL;
        }
 
-       if (!lock_vma_range(m, priv)) {
+       if (!lock_vma_range(m, lock_ctx)) {
                mmput(mm);
                put_task_struct(priv->task);
                priv->task = NULL;
@@ -318,13 +323,13 @@ static void *m_next(struct seq_file *m, void *v, loff_t 
*ppos)
 static void m_stop(struct seq_file *m, void *v)
 {
        struct proc_maps_private *priv = m->private;
-       struct mm_struct *mm = priv->mm;
+       struct mm_struct *mm = priv->lock_ctx.mm;
 
        if (!priv->task)
                return;
 
        release_task_mempolicy(priv);
-       unlock_vma_range(priv);
+       unlock_vma_range(&priv->lock_ctx);
        mmput(mm);
        put_task_struct(priv->task);
        priv->task = NULL;
@@ -339,9 +344,9 @@ static int proc_maps_open(struct inode *inode, struct file 
*file,
                return -ENOMEM;
 
        priv->inode = inode;
-       priv->mm = proc_mem_open(inode, PTRACE_MODE_READ);
-       if (IS_ERR(priv->mm)) {
-               int err = PTR_ERR(priv->mm);
+       priv->lock_ctx.mm = proc_mem_open(inode, PTRACE_MODE_READ);
+       if (IS_ERR(priv->lock_ctx.mm)) {
+               int err = PTR_ERR(priv->lock_ctx.mm);
 
                seq_release_private(inode, file);
                return err;
@@ -355,8 +360,8 @@ static int proc_map_release(struct inode *inode, struct 
file *file)
        struct seq_file *seq = file->private_data;
        struct proc_maps_private *priv = seq->private;
 
-       if (priv->mm)
-               mmdrop(priv->mm);
+       if (priv->lock_ctx.mm)
+               mmdrop(priv->lock_ctx.mm);
 
        return seq_release_private(inode, file);
 }
@@ -610,7 +615,7 @@ static int do_procmap_query(struct proc_maps_private *priv, 
void __user *uarg)
        if (!!karg.build_id_size != !!karg.build_id_addr)
                return -EINVAL;
 
-       mm = priv->mm;
+       mm = priv->lock_ctx.mm;
        if (!mm || !mmget_not_zero(mm))
                return -ESRCH;
 
@@ -1311,7 +1316,7 @@ static int show_smaps_rollup(struct seq_file *m, void *v)
 {
        struct proc_maps_private *priv = m->private;
        struct mem_size_stats mss = {};
-       struct mm_struct *mm = priv->mm;
+       struct mm_struct *mm = priv->lock_ctx.mm;
        struct vm_area_struct *vma;
        unsigned long vma_start = 0, last_vma_end = 0;
        int ret = 0;
@@ -1456,9 +1461,9 @@ static int smaps_rollup_open(struct inode *inode, struct 
file *file)
                goto out_free;
 
        priv->inode = inode;
-       priv->mm = proc_mem_open(inode, PTRACE_MODE_READ);
-       if (IS_ERR_OR_NULL(priv->mm)) {
-               ret = priv->mm ? PTR_ERR(priv->mm) : -ESRCH;
+       priv->lock_ctx.mm = proc_mem_open(inode, PTRACE_MODE_READ);
+       if (IS_ERR_OR_NULL(priv->lock_ctx.mm)) {
+               ret = priv->lock_ctx.mm ? PTR_ERR(priv->lock_ctx.mm) : -ESRCH;
 
                single_release(inode, file);
                goto out_free;
@@ -1476,8 +1481,8 @@ static int smaps_rollup_release(struct inode *inode, 
struct file *file)
        struct seq_file *seq = file->private_data;
        struct proc_maps_private *priv = seq->private;
 
-       if (priv->mm)
-               mmdrop(priv->mm);
+       if (priv->lock_ctx.mm)
+               mmdrop(priv->lock_ctx.mm);
 
        kfree(priv);
        return single_release(inode, file);
diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
index 59bfd61d653a..d362919f4f68 100644
--- a/fs/proc/task_nommu.c
+++ b/fs/proc/task_nommu.c
@@ -204,7 +204,7 @@ static void *m_start(struct seq_file *m, loff_t *ppos)
        if (!priv->task)
                return ERR_PTR(-ESRCH);
 
-       mm = priv->mm;
+       mm = priv->lock_ctx.mm;
        if (!mm || !mmget_not_zero(mm)) {
                put_task_struct(priv->task);
                priv->task = NULL;
@@ -226,7 +226,7 @@ static void *m_start(struct seq_file *m, loff_t *ppos)
 static void m_stop(struct seq_file *m, void *v)
 {
        struct proc_maps_private *priv = m->private;
-       struct mm_struct *mm = priv->mm;
+       struct mm_struct *mm = priv->lock_ctx.mm;
 
        if (!priv->task)
                return;
@@ -259,9 +259,9 @@ static int maps_open(struct inode *inode, struct file *file,
                return -ENOMEM;
 
        priv->inode = inode;
-       priv->mm = proc_mem_open(inode, PTRACE_MODE_READ);
-       if (IS_ERR_OR_NULL(priv->mm)) {
-               int err = priv->mm ? PTR_ERR(priv->mm) : -ESRCH;
+       priv->lock_ctx.mm = proc_mem_open(inode, PTRACE_MODE_READ);
+       if (IS_ERR_OR_NULL(priv->lock_ctx.mm)) {
+               int err = priv->lock_ctx.mm ? PTR_ERR(priv->lock_ctx.mm) : 
-ESRCH;
 
                seq_release_private(inode, file);
                return err;
@@ -276,8 +276,8 @@ static int map_release(struct inode *inode, struct file 
*file)
        struct seq_file *seq = file->private_data;
        struct proc_maps_private *priv = seq->private;
 
-       if (priv->mm)
-               mmdrop(priv->mm);
+       if (priv->lock_ctx.mm)
+               mmdrop(priv->lock_ctx.mm);
 
        return seq_release_private(inode, file);
 }
-- 
2.50.1.703.g449372360f-goog


Reply via email to