Module Name:    src
Committed By:   chs
Date:           Fri May 19 15:30:19 UTC 2017

Modified Files:
        src/sys/uvm: uvm_map.c uvm_mmap.c

Log Message:
make MAP_FIXED mapping operations atomic. fixes PR 52239.
previously, unmapping any entries being replaced was done separately
from entering the new mapping, which allowed another thread doing
a non-MAP_FIXED mapping to allocate the range out from under the
MAP_FIXED thread.


To generate a diff of this commit:
cvs rdiff -u -r1.346 -r1.347 src/sys/uvm/uvm_map.c
cvs rdiff -u -r1.164 -r1.165 src/sys/uvm/uvm_mmap.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/uvm/uvm_map.c
diff -u src/sys/uvm/uvm_map.c:1.346 src/sys/uvm/uvm_map.c:1.347
--- src/sys/uvm/uvm_map.c:1.346	Fri May 19 14:42:00 2017
+++ src/sys/uvm/uvm_map.c	Fri May 19 15:30:19 2017
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_map.c,v 1.346 2017/05/19 14:42:00 christos Exp $	*/
+/*	$NetBSD: uvm_map.c,v 1.347 2017/05/19 15:30:19 chs Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -66,7 +66,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.346 2017/05/19 14:42:00 christos Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.347 2017/05/19 15:30:19 chs Exp $");
 
 #include "opt_ddb.h"
 #include "opt_pax.h"
@@ -1163,8 +1163,25 @@ retry:
 		}
 		vm_map_lock(map); /* could sleep here */
 	}
-	prev_entry = uvm_map_findspace(map, start, size, &start,
-	    uobj, uoffset, align, flags);
+	if (flags & UVM_FLAG_FIXED) {
+		KASSERT((flags & UVM_FLAG_NOWAIT) == 0);
+
+		/*
+		 * Set prev_entry to what it will need to be after any existing
+		 * entries are removed later in uvm_map_enter().
+		 */
+
+		if (uvm_map_lookup_entry(map, start, &prev_entry)) {
+			if (start == prev_entry->start)
+				prev_entry = prev_entry->prev;
+			else
+				UVM_MAP_CLIP_END(map, prev_entry, start);
+			SAVE_HINT(map, map->hint, prev_entry);
+		}
+	} else {
+		prev_entry = uvm_map_findspace(map, start, size, &start,
+		    uobj, uoffset, align, flags);
+	}
 	if (prev_entry == NULL) {
 		unsigned int timestamp;
 
@@ -1255,7 +1272,7 @@ uvm_map_enter(struct vm_map *map, const 
     struct vm_map_entry *new_entry)
 {
 	struct vm_map_entry *prev_entry = args->uma_prev;
-	struct vm_map_entry *dead = NULL;
+	struct vm_map_entry *dead = NULL, *dead_entries = NULL;
 
 	const uvm_flag_t flags = args->uma_flags;
 	const vm_prot_t prot = UVM_PROTECTION(flags);
@@ -1284,6 +1301,8 @@ uvm_map_enter(struct vm_map *map, const 
 
 	KASSERT(map->hint == prev_entry); /* bimerge case assumes this */
 	KASSERT(vm_map_locked_p(map));
+	KASSERT((flags & (UVM_FLAG_NOWAIT | UVM_FLAG_FIXED)) !=
+		(UVM_FLAG_NOWAIT | UVM_FLAG_FIXED));
 
 	if (uobj)
 		newetype = UVM_ET_OBJ;
@@ -1297,6 +1316,27 @@ uvm_map_enter(struct vm_map *map, const 
 	}
 
 	/*
+	 * For fixed mappings, remove any old entries now.  Adding the new
+	 * entry cannot fail because that can only happen if UVM_FLAG_NOWAIT
+	 * is set, and we do not support nowait and fixed together.
+	 */
+
+	if (flags & UVM_FLAG_FIXED) {
+		uvm_unmap_remove(map, start, start + size, &dead_entries, 0);
+#ifdef DEBUG
+		struct vm_map_entry *tmp_entry;
+		bool rv;
+
+		rv = uvm_map_lookup_entry(map, start, &tmp_entry);
+		KASSERT(!rv);
+		KASSERTMSG(prev_entry == tmp_entry,
+			   "args %p prev_entry %p tmp_entry %p",
+			   args, prev_entry, tmp_entry);
+#endif
+		SAVE_HINT(map, map->hint, prev_entry);
+	}
+
+	/*
 	 * try and insert in map by extending previous entry, if possible.
 	 * XXX: we don't try and pull back the next entry.   might be useful
 	 * for a stack, but we are currently allocating our stack in advance.
@@ -1569,17 +1609,19 @@ nomerge:
 	UVMHIST_LOG(maphist,"<- done!", 0, 0, 0, 0);
 
 	error = 0;
+
 done:
 	vm_map_unlock(map);
 
 	if (new_entry) {
 		uvm_mapent_free(new_entry);
 	}
-
 	if (dead) {
 		KDASSERT(merged);
 		uvm_mapent_free(dead);
 	}
+	if (dead_entries)
+		uvm_unmap_detach(dead_entries, 0);
 
 	return error;
 }

Index: src/sys/uvm/uvm_mmap.c
diff -u src/sys/uvm/uvm_mmap.c:1.164 src/sys/uvm/uvm_mmap.c:1.165
--- src/sys/uvm/uvm_mmap.c:1.164	Sat May  6 21:34:52 2017
+++ src/sys/uvm/uvm_mmap.c	Fri May 19 15:30:19 2017
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_mmap.c,v 1.164 2017/05/06 21:34:52 joerg Exp $	*/
+/*	$NetBSD: uvm_mmap.c,v 1.165 2017/05/19 15:30:19 chs Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -46,7 +46,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_mmap.c,v 1.164 2017/05/06 21:34:52 joerg Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_mmap.c,v 1.165 2017/05/19 15:30:19 chs Exp $");
 
 #include "opt_compat_netbsd.h"
 #include "opt_pax.h"
@@ -924,7 +924,7 @@ uvm_mmap(struct vm_map *map, vaddr_t *ad
 
 	/*
 	 * for non-fixed mappings, round off the suggested address.
-	 * for fixed mappings, check alignment and zap old mappings.
+	 * for fixed mappings, check alignment.
 	 */
 
 	if ((flags & MAP_FIXED) == 0) {
@@ -933,7 +933,6 @@ uvm_mmap(struct vm_map *map, vaddr_t *ad
 		if (*addr & PAGE_MASK)
 			return EINVAL;
 		uvmflag |= UVM_FLAG_FIXED;
-		(void) uvm_unmap(map, *addr, *addr + size);
 	}
 
 	/*

Reply via email to