I need a new alignment field for vm area in order to reallocate
previously allocated area with the same alignment.

Patch for a new vrealloc() call will follow and this new call
I want to keep as simple as possible, thus not to provide dozens
of variants, like vrealloc_user(), which cares about alignment.

Current changes are just preparations.

Worth to mention, that on archs were unsigned long is 64 bit
this new field does not bloat vm_struct, because originally
there was a padding between nr_pages and phys_addr.

Signed-off-by: Roman Penyaev <[email protected]>
Cc: Andrew Morton <[email protected]>
Cc: Michal Hocko <[email protected]>
Cc: Andrey Ryabinin <[email protected]>
Cc: Joe Perches <[email protected]>
Cc: "Luis R. Rodriguez" <[email protected]>
Cc: [email protected]
Cc: [email protected]
---
 include/linux/vmalloc.h |  1 +
 mm/vmalloc.c            | 10 ++++++----
 2 files changed, 7 insertions(+), 4 deletions(-)

diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 398e9c95cd61..78210aa0bb43 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -38,6 +38,7 @@ struct vm_struct {
        unsigned long           flags;
        struct page             **pages;
        unsigned int            nr_pages;
+       unsigned int            alignment;
        phys_addr_t             phys_addr;
        const void              *caller;
 };
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index e83961767dc1..4851b4a67f55 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1347,12 +1347,14 @@ int map_vm_area(struct vm_struct *area, pgprot_t prot, 
struct page **pages)
 EXPORT_SYMBOL_GPL(map_vm_area);
 
 static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
-                             unsigned long flags, const void *caller)
+                            unsigned int align, unsigned long flags,
+                            const void *caller)
 {
        spin_lock(&vmap_area_lock);
        vm->flags = flags;
        vm->addr = (void *)va->va_start;
        vm->size = va->va_end - va->va_start;
+       vm->alignment = align;
        vm->caller = caller;
        va->vm = vm;
        va->flags |= VM_VM_AREA;
@@ -1399,7 +1401,7 @@ static struct vm_struct *__get_vm_area_node(unsigned long 
size,
                return NULL;
        }
 
-       setup_vmalloc_vm(area, va, flags, caller);
+       setup_vmalloc_vm(area, va, align, flags, caller);
 
        return area;
 }
@@ -2601,8 +2603,8 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long 
*offsets,
 
        /* insert all vm's */
        for (area = 0; area < nr_vms; area++)
-               setup_vmalloc_vm(vms[area], vas[area], VM_ALLOC,
-                                pcpu_get_vm_areas);
+               setup_vmalloc_vm(vms[area], vas[area], align,
+                                VM_ALLOC, pcpu_get_vm_areas);
 
        kfree(vas);
        return vms;
-- 
2.19.1

Reply via email to