We need to take the mmap lock around find_vma() and subsequent use of the
VMA. Otherwise, we can race with concurrent operations like munmap(), which
can lead to use-after-free accesses to freed VMAs.

Fixes: 1000197d8013 ("nios2: System calls handling")
Signed-off-by: Jann Horn <ja...@google.com>
---
To the maintainers:
I can't easily test this patch - I don't even have a nios2 compiler.
If you have tested this patch, you may want to add a CC stable tag to this.

 arch/nios2/kernel/sys_nios2.c | 10 ++++++++--
 1 file changed, 8 insertions(+), 2 deletions(-)

diff --git a/arch/nios2/kernel/sys_nios2.c b/arch/nios2/kernel/sys_nios2.c
index cd390ec4f88b..2c8f8bd850c9 100644
--- a/arch/nios2/kernel/sys_nios2.c
+++ b/arch/nios2/kernel/sys_nios2.c
@@ -22,6 +22,7 @@ asmlinkage int sys_cacheflush(unsigned long addr,
unsigned long len,
                                unsigned int op)
 {
        struct vm_area_struct *vma;
+       struct mm_struct *mm = current->mm;

        if (len == 0)
                return 0;
@@ -34,16 +35,21 @@ asmlinkage int sys_cacheflush(unsigned long addr,
unsigned long len,
        if (addr + len < addr)
                return -EFAULT;

+       if (mmap_read_lock_killable(mm))
+               return -EINTR;
        /*
         * Verify that the specified address region actually belongs
         * to this process.
         */
-       vma = find_vma(current->mm, addr);
-       if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end)
+       vma = find_vma(mm, addr);
+       if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end) {
+               mmap_read_unlock();
                return -EFAULT;
+       }

        flush_cache_range(vma, addr, addr + len);

+       mmap_read_unlock();
        return 0;
 }


base-commit: 6d28cf7dfede6cfca5119a0d415a6a447c68f3a0
-- 
2.28.0.681.g6f77f65b4e-goog

Reply via email to