On Fri, 20 Mar 2026 19:38:17 +0000 "Lorenzo Stoakes (Oracle)" <[email protected]> 
wrote:

> This series converts a lot of the existing use of the legacy vm_flags_t
> data type to the new vma_flags_t type which replaces it.

Thanks, I updated mm.git's mm-unstable branch to this version.

> v4:
> * Propagated tags, thanks Vlasta!
> * Removed superfluous parens around vma_test_any_mask() as per Vlasta.
> * Converted masked functions into more understandable equivalent form as
>   per Vlasta in 24/25.
> * Redefined VM_SPECIAL using vma_flags_to_legacy() as per Vlasta.
> * Fixed whitespace as per Vlasta.
> * Added vma_flags_reset_once() as per Vlasta.
> * Expanded 22/23 commit message to describe why I'm replacing things as
>   per Vlasta.
> * Added bitmap_copy() to test headers in order to implement
>   vma_flags_reset_once().

Here's how v4 altered mm.git:


 include/linux/mm.h              |   26 ++++++++++++--------------
 mm/mlock.c                      |    4 ++--
 mm/mprotect.c                   |   14 +++++---------
 mm/vma.c                        |   11 +++++------
 mm/vma.h                        |    6 ++----
 tools/include/linux/bitmap.h    |   11 +++++++++++
 tools/testing/vma/include/dup.h |   22 +++++++++++++---------
 7 files changed, 50 insertions(+), 44 deletions(-)

--- a/include/linux/mm.h~b
+++ a/include/linux/mm.h
@@ -554,10 +554,10 @@ enum {
 /*
  * Special vmas that are non-mergable, non-mlock()able.
  */
-#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP)
 
 #define VMA_SPECIAL_FLAGS mk_vma_flags(VMA_IO_BIT, VMA_DONTEXPAND_BIT, \
                                       VMA_PFNMAP_BIT, VMA_MIXEDMAP_BIT)
+#define VM_SPECIAL vma_flags_to_legacy(VMA_SPECIAL_FLAGS)
 
 /*
  * Physically remapped pages are special. Tell the
@@ -959,22 +959,20 @@ static inline void vm_flags_reset(struct
        vm_flags_init(vma, flags);
 }
 
-static inline void vm_flags_reset_once(struct vm_area_struct *vma,
-                                      vm_flags_t flags)
+static inline void vma_flags_reset_once(struct vm_area_struct *vma,
+                                       vma_flags_t *flags)
 {
-       vma_assert_write_locked(vma);
-       /*
-        * If VMA flags exist beyond the first system word, also clear these. It
-        * is assumed the write once behaviour is required only for the first
-        * system word.
-        */
+       const unsigned long word = flags->__vma_flags[0];
+
+       /* It is assumed only the first system word must be written once. */
+       vma_flags_overwrite_word_once(&vma->flags, word);
+       /* The remainder can be copied normally. */
        if (NUM_VMA_FLAG_BITS > BITS_PER_LONG) {
-               unsigned long *bitmap = vma->flags.__vma_flags;
+               unsigned long *dst = &vma->flags.__vma_flags[1];
+               const unsigned long *src = &flags->__vma_flags[1];
 
-               bitmap_zero(&bitmap[1], NUM_VMA_FLAG_BITS - BITS_PER_LONG);
+               bitmap_copy(dst, src, NUM_VMA_FLAG_BITS - BITS_PER_LONG);
        }
-
-       vma_flags_overwrite_word_once(&vma->flags, flags);
 }
 
 static inline void vm_flags_set(struct vm_area_struct *vma,
@@ -1442,7 +1440,7 @@ static __always_inline void vma_desc_set
  * vm_area_desc object describing a proposed VMA, e.g.:
  *
  * vma_desc_set_flags(desc, VMA_IO_BIT, VMA_PFNMAP_BIT, VMA_DONTEXPAND_BIT,
- *              VMA_DONTDUMP_BIT);
+ *             VMA_DONTDUMP_BIT);
  */
 #define vma_desc_set_flags(desc, ...) \
        vma_desc_set_flags_mask(desc, mk_vma_flags(__VA_ARGS__))
--- a/mm/mlock.c~b
+++ a/mm/mlock.c
@@ -443,7 +443,7 @@ static void mlock_vma_pages_range(struct
        if (vma_flags_test(new_vma_flags, VMA_LOCKED_BIT))
                vma_flags_set(new_vma_flags, VMA_IO_BIT);
        vma_start_write(vma);
-       WRITE_ONCE(vma->flags, *new_vma_flags);
+       vma_flags_reset_once(vma, new_vma_flags);
 
        lru_add_drain();
        walk_page_range(vma->vm_mm, start, end, &mlock_walk_ops, NULL);
@@ -451,7 +451,7 @@ static void mlock_vma_pages_range(struct
 
        if (vma_flags_test(new_vma_flags, VMA_IO_BIT)) {
                vma_flags_clear(new_vma_flags, VMA_IO_BIT);
-               WRITE_ONCE(vma->flags, *new_vma_flags);
+               vma_flags_reset_once(vma, new_vma_flags);
        }
 }
 
--- a/mm/mprotect.c~b
+++ a/mm/mprotect.c
@@ -769,7 +769,7 @@ mprotect_fixup(struct vma_iterator *vmi,
         * held in write mode.
         */
        vma_start_write(vma);
-       WRITE_ONCE(vma->flags, new_vma_flags);
+       vma_flags_reset_once(vma, &new_vma_flags);
        if (vma_wants_manual_pte_write_upgrade(vma))
                mm_cp_flags |= MM_CP_TRY_CHANGE_WRITABLE;
        vma_set_page_prot(vma);
@@ -784,14 +784,10 @@ mprotect_fixup(struct vma_iterator *vmi,
         * Private VM_LOCKED VMA becoming writable: trigger COW to avoid major
         * fault on access.
         */
-       if (vma_flags_test(&new_vma_flags, VMA_WRITE_BIT)) {
-               const vma_flags_t mask =
-                       vma_flags_and(&old_vma_flags, VMA_WRITE_BIT,
-                                     VMA_SHARED_BIT, VMA_LOCKED_BIT);
-
-               if (vma_flags_same(&mask, VMA_LOCKED_BIT))
-                       populate_vma_page_range(vma, start, end, NULL);
-       }
+       if (vma_flags_test(&new_vma_flags, VMA_WRITE_BIT) &&
+           vma_flags_test(&old_vma_flags, VMA_LOCKED_BIT) &&
+           !vma_flags_test_any(&old_vma_flags, VMA_WRITE_BIT, VMA_SHARED_BIT))
+               populate_vma_page_range(vma, start, end, NULL);
 
        vm_stat_account(mm, vma_flags_to_legacy(old_vma_flags), -nrpages);
        newflags = vma_flags_to_legacy(new_vma_flags);
--- a/mm/vma.c~b
+++ a/mm/vma.c
@@ -2343,7 +2343,6 @@ void mm_drop_all_locks(struct mm_struct
 static bool accountable_mapping(struct mmap_state *map)
 {
        const struct file *file = map->file;
-       vma_flags_t mask;
 
        /*
         * hugetlb has its own accounting separate from the core VM
@@ -2352,9 +2351,9 @@ static bool accountable_mapping(struct m
        if (file && is_file_hugepages(file))
                return false;
 
-       mask = vma_flags_and(&map->vma_flags, VMA_NORESERVE_BIT, VMA_SHARED_BIT,
-                            VMA_WRITE_BIT);
-       return vma_flags_same(&mask, VMA_WRITE_BIT);
+       return vma_flags_test(&map->vma_flags, VMA_WRITE_BIT) &&
+               !vma_flags_test_any(&map->vma_flags, VMA_NORESERVE_BIT,
+                                   VMA_SHARED_BIT);
 }
 
 /*
@@ -3001,7 +3000,7 @@ retry:
        gap += (info->align_offset - gap) & info->align_mask;
        tmp = vma_next(&vmi);
        /* Avoid prev check if possible */
-       if (tmp && (vma_test_any_mask(tmp, VMA_STARTGAP_FLAGS))) {
+       if (tmp && vma_test_any_mask(tmp, VMA_STARTGAP_FLAGS)) {
                if (vm_start_gap(tmp) < gap + length - 1) {
                        low_limit = tmp->vm_end;
                        vma_iter_reset(&vmi);
@@ -3054,7 +3053,7 @@ retry:
        gap_end = vma_iter_end(&vmi);
        tmp = vma_next(&vmi);
         /* Avoid prev check if possible */
-       if (tmp && (vma_test_any_mask(tmp, VMA_STARTGAP_FLAGS))) {
+       if (tmp && vma_test_any_mask(tmp, VMA_STARTGAP_FLAGS)) {
                if (vm_start_gap(tmp) < gap_end) {
                        high_limit = vm_start_gap(tmp);
                        vma_iter_reset(&vmi);
--- a/mm/vma.h~b
+++ a/mm/vma.h
@@ -529,10 +529,8 @@ static inline bool is_data_mapping(vm_fl
 
 static inline bool is_data_mapping_vma_flags(const vma_flags_t *vma_flags)
 {
-       const vma_flags_t mask = vma_flags_and(vma_flags,
-                       VMA_WRITE_BIT, VMA_SHARED_BIT, VMA_STACK_BIT);
-
-       return vma_flags_same(&mask, VMA_WRITE_BIT);
+       return vma_flags_test(vma_flags, VMA_WRITE_BIT) &&
+               !vma_flags_test_any(vma_flags, VMA_SHARED_BIT, VMA_STACK_BIT);
 }
 
 static inline void vma_iter_config(struct vma_iterator *vmi,
--- a/tools/include/linux/bitmap.h~b
+++ a/tools/include/linux/bitmap.h
@@ -55,6 +55,17 @@ static inline void bitmap_fill(unsigned
        dst[nlongs - 1] = BITMAP_LAST_WORD_MASK(nbits);
 }
 
+static __always_inline
+void bitmap_copy(unsigned long *dst, const unsigned long *src, unsigned int 
nbits)
+{
+       unsigned int len = bitmap_size(nbits);
+
+       if (small_const_nbits(nbits))
+               *dst = *src;
+       else
+               memcpy(dst, src, len);
+}
+
 static inline bool bitmap_empty(const unsigned long *src, unsigned int nbits)
 {
        if (small_const_nbits(nbits))
--- a/tools/testing/vma/include/dup.h~b
+++ a/tools/testing/vma/include/dup.h
@@ -871,16 +871,20 @@ static inline void vm_flags_reset(struct
        vm_flags_init(vma, flags);
 }
 
-static inline void vm_flags_reset_once(struct vm_area_struct *vma,
-                                      vm_flags_t flags)
+static inline void vma_flags_reset_once(struct vm_area_struct *vma,
+                                       vma_flags_t *flags)
 {
-       vma_assert_write_locked(vma);
-       /*
-        * The user should only be interested in avoiding reordering of
-        * assignment to the first word.
-        */
-       vma_flags_clear_all(&vma->flags);
-       vma_flags_overwrite_word_once(&vma->flags, flags);
+       const unsigned long word = flags->__vma_flags[0];
+
+       /* It is assumed only the first system word must be written once. */
+       vma_flags_overwrite_word_once(&vma->flags, word);
+       /* The remainder can be copied normally. */
+       if (NUM_VMA_FLAG_BITS > BITS_PER_LONG) {
+               unsigned long *dst = &vma->flags.__vma_flags[1];
+               const unsigned long *src = &flags->__vma_flags[1];
+
+               bitmap_copy(dst, src, NUM_VMA_FLAG_BITS - BITS_PER_LONG);
+       }
 }
 
 static inline void vm_flags_set(struct vm_area_struct *vma,
_


Reply via email to