This is a note to let you know that I've just added the patch titled
mm: fix crashes from mbind() merging vmas
to the 3.4-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary
The filename of the patch is:
mm-fix-crashes-from-mbind-merging-vmas.patch
and it can be found in the queue-3.4 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <[email protected]> know about it.
>From d05f0cdcbe6388723f1900c549b4850360545201 Mon Sep 17 00:00:00 2001
From: Hugh Dickins <[email protected]>
Date: Mon, 23 Jun 2014 13:22:07 -0700
Subject: mm: fix crashes from mbind() merging vmas
From: Hugh Dickins <[email protected]>
commit d05f0cdcbe6388723f1900c549b4850360545201 upstream.
In v2.6.34 commit 9d8cebd4bcd7 ("mm: fix mbind vma merge problem")
introduced vma merging to mbind(), but it should have also changed the
convention of passing start vma from queue_pages_range() (formerly
check_range()) to new_vma_page(): vma merging may have already freed
that structure, resulting in BUG at mm/mempolicy.c:1738 and probably
worse crashes.
Fixes: 9d8cebd4bcd7 ("mm: fix mbind vma merge problem")
Reported-by: Naoya Horiguchi <[email protected]>
Tested-by: Naoya Horiguchi <[email protected]>
Signed-off-by: Hugh Dickins <[email protected]>
Acked-by: Christoph Lameter <[email protected]>
Cc: KOSAKI Motohiro <[email protected]>
Cc: Minchan Kim <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
---
mm/mempolicy.c | 53 ++++++++++++++++++++++++-----------------------------
1 file changed, 24 insertions(+), 29 deletions(-)
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -566,24 +566,24 @@ static inline int check_pgd_range(struct
* If pagelist != NULL then isolate pages from the LRU and
* put them on the pagelist.
*/
-static struct vm_area_struct *
+static int
check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
const nodemask_t *nodes, unsigned long flags, void *private)
{
- int err;
- struct vm_area_struct *first, *vma, *prev;
+ int err = 0;
+ struct vm_area_struct *vma, *prev;
- first = find_vma(mm, start);
- if (!first)
- return ERR_PTR(-EFAULT);
+ vma = find_vma(mm, start);
+ if (!vma)
+ return -EFAULT;
prev = NULL;
- for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
+ for (; vma && vma->vm_start < end; vma = vma->vm_next) {
if (!(flags & MPOL_MF_DISCONTIG_OK)) {
if (!vma->vm_next && vma->vm_end < end)
- return ERR_PTR(-EFAULT);
+ return -EFAULT;
if (prev && prev->vm_end < vma->vm_start)
- return ERR_PTR(-EFAULT);
+ return -EFAULT;
}
if (!is_vm_hugetlb_page(vma) &&
((flags & MPOL_MF_STRICT) ||
@@ -597,14 +597,12 @@ check_range(struct mm_struct *mm, unsign
start = vma->vm_start;
err = check_pgd_range(vma, start, endvma, nodes,
flags, private);
- if (err) {
- first = ERR_PTR(err);
+ if (err)
break;
- }
}
prev = vma;
}
- return first;
+ return err;
}
/*
@@ -945,16 +943,15 @@ static int migrate_to_node(struct mm_str
{
nodemask_t nmask;
LIST_HEAD(pagelist);
- int err = 0;
- struct vm_area_struct *vma;
+ int err;
nodes_clear(nmask);
node_set(source, nmask);
- vma = check_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
+ err = check_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
flags | MPOL_MF_DISCONTIG_OK, &pagelist);
- if (IS_ERR(vma))
- return PTR_ERR(vma);
+ if (err)
+ return err;
if (!list_empty(&pagelist)) {
err = migrate_pages(&pagelist, new_node_page, dest,
@@ -1058,16 +1055,17 @@ out:
/*
* Allocate a new page for page migration based on vma policy.
- * Start assuming that page is mapped by vma pointed to by @private.
+ * Start by assuming the page is mapped by the same vma as contains @start.
* Search forward from there, if not. N.B., this assumes that the
* list of pages handed to migrate_pages()--which is how we get here--
* is in virtual address order.
*/
-static struct page *new_vma_page(struct page *page, unsigned long private, int
**x)
+static struct page *new_page(struct page *page, unsigned long start, int **x)
{
- struct vm_area_struct *vma = (struct vm_area_struct *)private;
+ struct vm_area_struct *vma;
unsigned long uninitialized_var(address);
+ vma = find_vma(current->mm, start);
while (vma) {
address = page_address_in_vma(page, vma);
if (address != -EFAULT)
@@ -1093,7 +1091,7 @@ int do_migrate_pages(struct mm_struct *m
return -ENOSYS;
}
-static struct page *new_vma_page(struct page *page, unsigned long private, int
**x)
+static struct page *new_page(struct page *page, unsigned long start, int **x)
{
return NULL;
}
@@ -1103,7 +1101,6 @@ static long do_mbind(unsigned long start
unsigned short mode, unsigned short mode_flags,
nodemask_t *nmask, unsigned long flags)
{
- struct vm_area_struct *vma;
struct mm_struct *mm = current->mm;
struct mempolicy *new;
unsigned long end;
@@ -1167,19 +1164,17 @@ static long do_mbind(unsigned long start
if (err)
goto mpol_out;
- vma = check_range(mm, start, end, nmask,
+ err = check_range(mm, start, end, nmask,
flags | MPOL_MF_INVERT, &pagelist);
- err = PTR_ERR(vma);
- if (!IS_ERR(vma)) {
+ if (!err) {
int nr_failed = 0;
err = mbind_range(mm, start, end, new);
if (!list_empty(&pagelist)) {
- nr_failed = migrate_pages(&pagelist, new_vma_page,
- (unsigned long)vma,
- false, true);
+ nr_failed = migrate_pages(&pagelist, new_page,
+ start, false, true);
if (nr_failed)
putback_lru_pages(&pagelist);
}
Patches currently in stable-queue which might be from [email protected] are
queue-3.4/hugetlb-fix-copy_hugetlb_page_range-to-handle-migration-hwpoisoned-entry.patch
queue-3.4/mm-fix-crashes-from-mbind-merging-vmas.patch
--
To unsubscribe from this list: send the line "unsubscribe stable" in
the body of a message to [email protected]
More majordomo info at http://vger.kernel.org/majordomo-info.html