We will be shortly removing the vm_flags_t field from vm_area_desc so we
need to update all mmap_prepare users to only use the dessc->vma_flags
field.

This patch achieves that and makes all ancillary changes required to make
this possible.

This lays the groundwork for future work to eliminate the use of vm_flags_t
in vm_area_desc altogether and more broadly throughout the kernel.

While we're here, we take the opportunity to replace VM_REMAP_FLAGS with
VMA_REMAP_FLAGS, the vma_flags_t equivalent.

No functional changes intended.

Signed-off-by: Lorenzo Stoakes <[email protected]>
---
 drivers/char/mem.c       |  6 +++---
 drivers/dax/device.c     | 10 +++++-----
 fs/aio.c                 |  2 +-
 fs/erofs/data.c          |  5 +++--
 fs/ext4/file.c           |  4 ++--
 fs/ntfs3/file.c          |  2 +-
 fs/orangefs/file.c       |  4 ++--
 fs/ramfs/file-nommu.c    |  2 +-
 fs/resctrl/pseudo_lock.c |  2 +-
 fs/romfs/mmap-nommu.c    |  2 +-
 fs/xfs/xfs_file.c        |  4 ++--
 fs/zonefs/file.c         |  3 ++-
 include/linux/dax.h      |  4 ++--
 include/linux/mm.h       | 24 +++++++++++++++++++-----
 kernel/relay.c           |  2 +-
 mm/memory.c              | 17 ++++++++---------
 16 files changed, 54 insertions(+), 39 deletions(-)

diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 52039fae1594..702d9595a563 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -306,7 +306,7 @@ static unsigned zero_mmap_capabilities(struct file *file)
 /* can't do an in-place private mapping if there's no MMU */
 static inline int private_mapping_ok(struct vm_area_desc *desc)
 {
-       return is_nommu_shared_mapping(desc->vm_flags);
+       return is_nommu_shared_vma_flags(desc->vma_flags);
 }
 #else
 
@@ -360,7 +360,7 @@ static int mmap_mem_prepare(struct vm_area_desc *desc)
 
        desc->vm_ops = &mmap_mem_ops;
 
-       /* Remap-pfn-range will mark the range VM_IO. */
+       /* Remap-pfn-range will mark the range with the I/O flag. */
        mmap_action_remap_full(desc, desc->pgoff);
        /* We filter remap errors to -EAGAIN. */
        desc->action.error_hook = mmap_filter_error;
@@ -520,7 +520,7 @@ static int mmap_zero_prepare(struct vm_area_desc *desc)
 #ifndef CONFIG_MMU
        return -ENOSYS;
 #endif
-       if (desc->vm_flags & VM_SHARED)
+       if (vma_desc_test_flags(desc, VMA_SHARED_BIT))
                return shmem_zero_setup_desc(desc);
 
        desc->action.success_hook = mmap_zero_private_success;
diff --git a/drivers/dax/device.c b/drivers/dax/device.c
index 22999a402e02..4b2970d6bbee 100644
--- a/drivers/dax/device.c
+++ b/drivers/dax/device.c
@@ -13,7 +13,7 @@
 #include "dax-private.h"
 #include "bus.h"
 
-static int __check_vma(struct dev_dax *dev_dax, vm_flags_t vm_flags,
+static int __check_vma(struct dev_dax *dev_dax, vma_flags_t flags,
                       unsigned long start, unsigned long end, struct file 
*file,
                       const char *func)
 {
@@ -24,7 +24,7 @@ static int __check_vma(struct dev_dax *dev_dax, vm_flags_t 
vm_flags,
                return -ENXIO;
 
        /* prevent private mappings from being established */
-       if ((vm_flags & VM_MAYSHARE) != VM_MAYSHARE) {
+       if (!vma_flags_test(flags, VMA_MAYSHARE_BIT)) {
                dev_info_ratelimited(dev,
                                "%s: %s: fail, attempted private mapping\n",
                                current->comm, func);
@@ -53,7 +53,7 @@ static int __check_vma(struct dev_dax *dev_dax, vm_flags_t 
vm_flags,
 static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma,
                     const char *func)
 {
-       return __check_vma(dev_dax, vma->vm_flags, vma->vm_start, vma->vm_end,
+       return __check_vma(dev_dax, vma->flags, vma->vm_start, vma->vm_end,
                           vma->vm_file, func);
 }
 
@@ -306,14 +306,14 @@ static int dax_mmap_prepare(struct vm_area_desc *desc)
         * fault time.
         */
        id = dax_read_lock();
-       rc = __check_vma(dev_dax, desc->vm_flags, desc->start, desc->end, filp,
+       rc = __check_vma(dev_dax, desc->vma_flags, desc->start, desc->end, filp,
                         __func__);
        dax_read_unlock(id);
        if (rc)
                return rc;
 
        desc->vm_ops = &dax_vm_ops;
-       desc->vm_flags |= VM_HUGEPAGE;
+       vma_desc_set_flags(desc, VMA_HUGEPAGE_BIT);
        return 0;
 }
 
diff --git a/fs/aio.c b/fs/aio.c
index 0a23a8c0717f..59b67b8da1b2 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -394,7 +394,7 @@ static const struct vm_operations_struct aio_ring_vm_ops = {
 
 static int aio_ring_mmap_prepare(struct vm_area_desc *desc)
 {
-       desc->vm_flags |= VM_DONTEXPAND;
+       vma_desc_set_flags(desc, VMA_DONTEXPAND_BIT);
        desc->vm_ops = &aio_ring_vm_ops;
        return 0;
 }
diff --git a/fs/erofs/data.c b/fs/erofs/data.c
index bb13c4cb8455..e7bc29e764c6 100644
--- a/fs/erofs/data.c
+++ b/fs/erofs/data.c
@@ -438,11 +438,12 @@ static int erofs_file_mmap_prepare(struct vm_area_desc 
*desc)
        if (!IS_DAX(file_inode(desc->file)))
                return generic_file_readonly_mmap_prepare(desc);
 
-       if ((desc->vm_flags & VM_SHARED) && (desc->vm_flags & VM_MAYWRITE))
+       if (vma_desc_test_flags(desc, VMA_SHARED_BIT) &&
+           vma_desc_test_flags(desc, VMA_MAYWRITE_BIT))
                return -EINVAL;
 
        desc->vm_ops = &erofs_dax_vm_ops;
-       desc->vm_flags |= VM_HUGEPAGE;
+       vma_desc_set_flags(desc, VMA_HUGEPAGE_BIT);
        return 0;
 }
 #else
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 7a8b30932189..da3c208e72d1 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -822,13 +822,13 @@ static int ext4_file_mmap_prepare(struct vm_area_desc 
*desc)
         * We don't support synchronous mappings for non-DAX files and
         * for DAX files if underneath dax_device is not synchronous.
         */
-       if (!daxdev_mapping_supported(desc->vm_flags, file_inode(file), 
dax_dev))
+       if (!daxdev_mapping_supported(desc->vma_flags, file_inode(file), 
dax_dev))
                return -EOPNOTSUPP;
 
        file_accessed(file);
        if (IS_DAX(file_inode(file))) {
                desc->vm_ops = &ext4_dax_vm_ops;
-               desc->vm_flags |= VM_HUGEPAGE;
+               vma_desc_set_flags(desc, VMA_HUGEPAGE_BIT);
        } else {
                desc->vm_ops = &ext4_file_vm_ops;
        }
diff --git a/fs/ntfs3/file.c b/fs/ntfs3/file.c
index 2e7b2e566ebe..2902fc6d9a85 100644
--- a/fs/ntfs3/file.c
+++ b/fs/ntfs3/file.c
@@ -347,7 +347,7 @@ static int ntfs_file_mmap_prepare(struct vm_area_desc *desc)
        struct inode *inode = file_inode(file);
        struct ntfs_inode *ni = ntfs_i(inode);
        u64 from = ((u64)desc->pgoff << PAGE_SHIFT);
-       bool rw = desc->vm_flags & VM_WRITE;
+       const bool rw = vma_desc_test_flags(desc, VMA_WRITE_BIT);
        int err;
 
        /* Avoid any operation if inode is bad. */
diff --git a/fs/orangefs/file.c b/fs/orangefs/file.c
index 919f99b16834..c75aa3f419b1 100644
--- a/fs/orangefs/file.c
+++ b/fs/orangefs/file.c
@@ -411,8 +411,8 @@ static int orangefs_file_mmap_prepare(struct vm_area_desc 
*desc)
                     "orangefs_file_mmap: called on %pD\n", file);
 
        /* set the sequential readahead hint */
-       desc->vm_flags |= VM_SEQ_READ;
-       desc->vm_flags &= ~VM_RAND_READ;
+       vma_desc_set_flags(desc, VMA_SEQ_READ_BIT);
+       vma_desc_clear_flags(desc, VMA_RAND_READ_BIT);
 
        file_accessed(file);
        desc->vm_ops = &orangefs_file_vm_ops;
diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c
index 77b8ca2757e0..9b955787456e 100644
--- a/fs/ramfs/file-nommu.c
+++ b/fs/ramfs/file-nommu.c
@@ -264,7 +264,7 @@ static unsigned long ramfs_nommu_get_unmapped_area(struct 
file *file,
  */
 static int ramfs_nommu_mmap_prepare(struct vm_area_desc *desc)
 {
-       if (!is_nommu_shared_mapping(desc->vm_flags))
+       if (!is_nommu_shared_vma_flags(desc->vma_flags))
                return -ENOSYS;
 
        file_accessed(desc->file);
diff --git a/fs/resctrl/pseudo_lock.c b/fs/resctrl/pseudo_lock.c
index 0bfc13c5b96d..e81d71abfe54 100644
--- a/fs/resctrl/pseudo_lock.c
+++ b/fs/resctrl/pseudo_lock.c
@@ -1044,7 +1044,7 @@ static int pseudo_lock_dev_mmap_prepare(struct 
vm_area_desc *desc)
         * Ensure changes are carried directly to the memory being mapped,
         * do not allow copy-on-write mapping.
         */
-       if (!(desc->vm_flags & VM_SHARED)) {
+       if (!vma_desc_test_flags(desc, VMA_SHARED_BIT)) {
                mutex_unlock(&rdtgroup_mutex);
                return -EINVAL;
        }
diff --git a/fs/romfs/mmap-nommu.c b/fs/romfs/mmap-nommu.c
index 4b77c6dc4418..0271bd8bf676 100644
--- a/fs/romfs/mmap-nommu.c
+++ b/fs/romfs/mmap-nommu.c
@@ -63,7 +63,7 @@ static unsigned long romfs_get_unmapped_area(struct file 
*file,
  */
 static int romfs_mmap_prepare(struct vm_area_desc *desc)
 {
-       return is_nommu_shared_mapping(desc->vm_flags) ? 0 : -ENOSYS;
+       return is_nommu_shared_vma_flags(desc->vma_flags) ? 0 : -ENOSYS;
 }
 
 static unsigned romfs_mmap_capabilities(struct file *file)
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 7874cf745af3..fabea264324a 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -1974,14 +1974,14 @@ xfs_file_mmap_prepare(
         * We don't support synchronous mappings for non-DAX files and
         * for DAX files if underneath dax_device is not synchronous.
         */
-       if (!daxdev_mapping_supported(desc->vm_flags, file_inode(file),
+       if (!daxdev_mapping_supported(desc->vma_flags, file_inode(file),
                                      target->bt_daxdev))
                return -EOPNOTSUPP;
 
        file_accessed(file);
        desc->vm_ops = &xfs_file_vm_ops;
        if (IS_DAX(inode))
-               desc->vm_flags |= VM_HUGEPAGE;
+               vma_desc_set_flags(desc, VMA_HUGEPAGE_BIT);
        return 0;
 }
 
diff --git a/fs/zonefs/file.c b/fs/zonefs/file.c
index c1e5e30e90a0..8a7161fc49e5 100644
--- a/fs/zonefs/file.c
+++ b/fs/zonefs/file.c
@@ -333,7 +333,8 @@ static int zonefs_file_mmap_prepare(struct vm_area_desc 
*desc)
         * ordering between msync() and page cache writeback.
         */
        if (zonefs_inode_is_seq(file_inode(file)) &&
-           (desc->vm_flags & VM_SHARED) && (desc->vm_flags & VM_MAYWRITE))
+           vma_desc_test_flags(desc, VMA_SHARED_BIT) &&
+           vma_desc_test_flags(desc, VMA_MAYWRITE_BIT))
                return -EINVAL;
 
        file_accessed(file);
diff --git a/include/linux/dax.h b/include/linux/dax.h
index 9d624f4d9df6..162c19fe478c 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -65,11 +65,11 @@ size_t dax_recovery_write(struct dax_device *dax_dev, 
pgoff_t pgoff,
 /*
  * Check if given mapping is supported by the file / underlying device.
  */
-static inline bool daxdev_mapping_supported(vm_flags_t vm_flags,
+static inline bool daxdev_mapping_supported(vma_flags_t flags,
                                            const struct inode *inode,
                                            struct dax_device *dax_dev)
 {
-       if (!(vm_flags & VM_SYNC))
+       if (!vma_flags_test(flags, VMA_SYNC_BIT))
                return true;
        if (!IS_DAX(inode))
                return false;
diff --git a/include/linux/mm.h b/include/linux/mm.h
index ea7c210dc684..09e8e3be9a17 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -550,17 +550,18 @@ enum {
 /*
  * Physically remapped pages are special. Tell the
  * rest of the world about it:
- *   VM_IO tells people not to look at these pages
+ *   IO tells people not to look at these pages
  *     (accesses can have side effects).
- *   VM_PFNMAP tells the core MM that the base pages are just
+ *   PFNMAP tells the core MM that the base pages are just
  *     raw PFN mappings, and do not have a "struct page" associated
  *     with them.
- *   VM_DONTEXPAND
+ *   DONTEXPAND
  *      Disable vma merging and expanding with mremap().
- *   VM_DONTDUMP
+ *   DONTDUMP
  *      Omit vma from core dump, even when VM_IO turned off.
  */
-#define VM_REMAP_FLAGS (VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP)
+#define VMA_REMAP_FLAGS mk_vma_flags(VMA_IO_BIT, VMA_PFNMAP_BIT,       \
+                                    VMA_DONTEXPAND_BIT, VMA_DONTDUMP_BIT)
 
 /* This mask prevents VMA from being scanned with khugepaged */
 #define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB)
@@ -1928,6 +1929,14 @@ static inline bool is_cow_mapping(vm_flags_t flags)
        return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
 }
 
+static inline bool vma_desc_is_cow_mapping(struct vm_area_desc *desc)
+{
+       const vma_flags_t flags = desc->vma_flags;
+
+       return vma_flags_test(flags, VMA_MAYWRITE_BIT) &&
+               !vma_flags_test(flags, VMA_SHARED_BIT);
+}
+
 #ifndef CONFIG_MMU
 static inline bool is_nommu_shared_mapping(vm_flags_t flags)
 {
@@ -1941,6 +1950,11 @@ static inline bool is_nommu_shared_mapping(vm_flags_t 
flags)
         */
        return flags & (VM_MAYSHARE | VM_MAYOVERLAY);
 }
+
+static inline bool is_nommu_shared_vma_flags(vma_flags_t flags)
+{
+       return vma_flags_test(flags, VMA_MAYSHARE_BIT, VMA_MAYOVERLAY_BIT);
+}
 #endif
 
 #if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
diff --git a/kernel/relay.c b/kernel/relay.c
index e36f6b926f7f..1c8e88259df0 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -92,7 +92,7 @@ static int relay_mmap_prepare_buf(struct rchan_buf *buf,
                return -EINVAL;
 
        desc->vm_ops = &relay_file_mmap_ops;
-       desc->vm_flags |= VM_DONTEXPAND;
+       vma_desc_set_flags(desc, VMA_DONTEXPAND_BIT);
        desc->private_data = buf;
 
        return 0;
diff --git a/mm/memory.c b/mm/memory.c
index 76e7ee96ddad..d803e0fcefe3 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2886,7 +2886,7 @@ static inline int remap_p4d_range(struct mm_struct *mm, 
pgd_t *pgd,
        return 0;
 }
 
-static int get_remap_pgoff(vm_flags_t vm_flags, unsigned long addr,
+static int get_remap_pgoff(bool is_cow, unsigned long addr,
                unsigned long end, unsigned long vm_start, unsigned long vm_end,
                unsigned long pfn, pgoff_t *vm_pgoff_p)
 {
@@ -2896,7 +2896,7 @@ static int get_remap_pgoff(vm_flags_t vm_flags, unsigned 
long addr,
         * un-COW'ed pages by matching them up with "vma->vm_pgoff".
         * See vm_normal_page() for details.
         */
-       if (is_cow_mapping(vm_flags)) {
+       if (is_cow) {
                if (addr != vm_start || end != vm_end)
                        return -EINVAL;
                *vm_pgoff_p = pfn;
@@ -2917,7 +2917,7 @@ static int remap_pfn_range_internal(struct vm_area_struct 
*vma, unsigned long ad
        if (WARN_ON_ONCE(!PAGE_ALIGNED(addr)))
                return -EINVAL;
 
-       VM_WARN_ON_ONCE((vma->vm_flags & VM_REMAP_FLAGS) != VM_REMAP_FLAGS);
+       VM_WARN_ON_ONCE(!vma_test_all_flags_mask(vma, VMA_REMAP_FLAGS));
 
        BUG_ON(addr >= end);
        pfn -= addr >> PAGE_SHIFT;
@@ -3041,9 +3041,9 @@ void remap_pfn_range_prepare(struct vm_area_desc *desc, 
unsigned long pfn)
         * check it again on complete and will fail there if specified addr is
         * invalid.
         */
-       get_remap_pgoff(desc->vm_flags, desc->start, desc->end,
+       get_remap_pgoff(vma_desc_is_cow_mapping(desc), desc->start, desc->end,
                        desc->start, desc->end, pfn, &desc->pgoff);
-       desc->vm_flags |= VM_REMAP_FLAGS;
+       vma_desc_set_flags_mask(desc, VMA_REMAP_FLAGS);
 }
 
 static int remap_pfn_range_prepare_vma(struct vm_area_struct *vma, unsigned 
long addr,
@@ -3052,13 +3052,12 @@ static int remap_pfn_range_prepare_vma(struct 
vm_area_struct *vma, unsigned long
        unsigned long end = addr + PAGE_ALIGN(size);
        int err;
 
-       err = get_remap_pgoff(vma->vm_flags, addr, end,
-                             vma->vm_start, vma->vm_end,
-                             pfn, &vma->vm_pgoff);
+       err = get_remap_pgoff(is_cow_mapping(vma->vm_flags), addr, end,
+                             vma->vm_start, vma->vm_end, pfn, &vma->vm_pgoff);
        if (err)
                return err;
 
-       vm_flags_set(vma, VM_REMAP_FLAGS);
+       vma_set_flags_mask(vma, VMA_REMAP_FLAGS);
        return 0;
 }
 
-- 
2.52.0

Reply via email to