On 14:44 Fri 10 Oct, Beavis wrote:
> thanks for the reply vladimir.
> 
> is it needed to upgrade my 4.3 stable to -current? isn't there a patch
> available for this?

The 4.3 uvm_map.c is 5 diffs far from this patch
http://www.openbsd.org/cgi-bin/cvsweb/src/sys/uvm/uvm_map.c?r1=1.104#rev1.104
you can generate the diff yourself,
 cvs diff -r1.99 -r1.104 uvm_map.c

or here:

Index: uvm_map.c
===================================================================
RCS file: /cvs/src/sys/uvm/uvm_map.c,v
retrieving revision 1.99
retrieving revision 1.104
diff -u -p -r1.99 -r1.104
--- uvm_map.c   15 Sep 2007 10:10:37 -0000      1.99
+++ uvm_map.c   23 Sep 2008 13:25:46 -0000      1.104
@@ -1,4 +1,4 @@
-/*     $OpenBSD: uvm_map.c,v 1.99 2007/09/15 10:10:37 martin Exp $     */
+/*     $OpenBSD: uvm_map.c,v 1.104 2008/09/23 13:25:46 art Exp $       */
 /*     $NetBSD: uvm_map.c,v 1.86 2000/11/27 08:40:03 chs Exp $ */
 
 /* 
@@ -98,6 +98,7 @@ static struct timeval uvm_kmapent_last_w
 static struct timeval uvm_kmapent_warn_rate = { 10, 0 };
 
 struct uvm_cnt uvm_map_call, map_backmerge, map_forwmerge;
+struct uvm_cnt map_nousermerge;
 struct uvm_cnt uvm_mlk_call, uvm_mlk_hint;
 const char vmmapbsy[] = "vmmapbsy";
 
@@ -538,6 +539,7 @@ uvm_map_init(void)
        UVMCNT_INIT(map_backmerge, UVMCNT_CNT, 0, "# uvm_map() back merges", 0);
        UVMCNT_INIT(map_forwmerge, UVMCNT_CNT, 0, "# uvm_map() missed forward",
            0);
+       UVMCNT_INIT(map_nousermerge, UVMCNT_CNT, 0, "# back merges skipped", 0);
        UVMCNT_INIT(uvm_mlk_call,  UVMCNT_CNT, 0, "# map lookup calls", 0);
        UVMCNT_INIT(uvm_mlk_hint,  UVMCNT_CNT, 0, "# map lookup hint hits", 0);
 
@@ -726,6 +728,8 @@ uvm_map_p(struct vm_map *map, vaddr_t *s
 
        if ((map->flags & VM_MAP_INTRSAFE) == 0)
                splassert(IPL_NONE);
+       else
+               splassert(IPL_VM);
 
        /*
         * step 0: sanity check of protection code
@@ -832,6 +836,15 @@ uvm_map_p(struct vm_map *map, vaddr_t *s
                        goto step3;
                }
 
+               /*
+                * Only merge kernel mappings, but keep track
+                * of how much we skipped.
+                */
+               if (map != kernel_map && map != kmem_map) {
+                       UVMCNT_INCR(map_nousermerge);
+                       goto step3;
+               }
+
                if (prev_entry->aref.ar_amap) {
                        error = amap_extend(prev_entry, size);
                        if (error) {
@@ -897,6 +910,8 @@ step3:
                if ((flags & UVM_FLAG_OVERLAY) == 0)
                        new_entry->etype |= UVM_ET_NEEDSCOPY;
        }
+       if (flags & UVM_FLAG_HOLE)
+               new_entry->etype |= UVM_ET_HOLE;
 
        new_entry->protection = prot;
        new_entry->max_protection = maxprot;
@@ -1098,6 +1113,45 @@ uvm_map_spacefits(struct vm_map *map, va
 }
 
 /*
+ * uvm_map_pie: return a random load address for a PIE executable
+ * properly aligned.
+ */
+
+#ifndef VM_PIE_MAX_ADDR
+#define VM_PIE_MAX_ADDR (VM_MAXUSER_ADDRESS / 4)
+#endif
+
+#ifndef VM_PIE_MIN_ADDR
+#define VM_PIE_MIN_ADDR VM_MIN_ADDRESS
+#endif
+
+#ifndef VM_PIE_MIN_ALIGN
+#define VM_PIE_MIN_ALIGN PAGE_SIZE
+#endif
+
+vaddr_t
+uvm_map_pie(vaddr_t align)
+{
+       vaddr_t addr, space, min;
+
+       align = MAX(align, VM_PIE_MIN_ALIGN);
+
+       /* round up to next alignment */
+       min = (VM_PIE_MIN_ADDR + align - 1) & ~(align - 1);
+
+       if (align >= VM_PIE_MAX_ADDR || min >= VM_PIE_MAX_ADDR)
+               return (align);
+
+       space = (VM_PIE_MAX_ADDR - min) / align;
+       space = MIN(space, (u_int32_t)-1);
+
+       addr = (vaddr_t)arc4random_uniform((u_int32_t)space) * align;
+       addr += min;
+
+       return (addr);
+}
+
+/*
  * uvm_map_hint: return the beginning of the best area suitable for
  * creating a new mapping with "prot" protection.
  */
@@ -1385,6 +1439,8 @@ uvm_unmap_remove(struct vm_map *map, vad
 
        if ((map->flags & VM_MAP_INTRSAFE) == 0)
                splassert(IPL_NONE);
+       else
+               splassert(IPL_VM);
 
        /*
         * find first entry
@@ -1451,7 +1507,9 @@ uvm_unmap_remove(struct vm_map *map, vad
                 * special case: handle mappings to anonymous kernel objects.
                 * we want to free these pages right away...
                 */
-               if (map->flags & VM_MAP_INTRSAFE) {
+               if (UVM_ET_ISHOLE(entry)) {
+                       /* nothing to do! */
+               } else if (map->flags & VM_MAP_INTRSAFE) {
                        uvm_km_pgremove_intrsafe(entry->start, entry->end);
                        pmap_kremove(entry->start, len);
                } else if (UVM_ET_ISOBJ(entry) &&
@@ -3697,9 +3755,8 @@ uvm_object_printit(uobj, full, pr)
 
 static const char page_flagbits[] =
        "\20\1BUSY\2WANTED\3TABLED\4CLEAN\5CLEANCHK\6RELEASED\7FAKE\10RDONLY"
-       "\11ZERO\15PAGER1";
-static const char page_pqflagbits[] =
-       "\20\1FREE\2INACTIVE\3ACTIVE\4LAUNDRY\5ANON\6AOBJ";
+       "\11ZERO\15PAGER1\20FREE\21INACTIVE\22ACTIVE\24ENCRYPT\30PMAP0"
+       "\31PMAP1\32PMAP2\33PMAP3";
 
 void
 uvm_page_printit(pg, full, pr)
@@ -3710,14 +3767,10 @@ uvm_page_printit(pg, full, pr)
        struct vm_page *tpg;
        struct uvm_object *uobj;
        struct pglist *pgl;
-       char pgbuf[128];
-       char pqbuf[128];
 
        (*pr)("PAGE %p:\n", pg);
-       snprintf(pgbuf, sizeof(pgbuf), "%b", pg->pg_flags, page_flagbits);
-       snprintf(pqbuf, sizeof(pqbuf), "%b", pg->pg_flags, page_pqflagbits);
-       (*pr)("  flags=%s, pg_flags=%s, vers=%d, wire_count=%d, pa=0x%llx\n",
-           pgbuf, pqbuf, pg->pg_version, pg->wire_count,
+       (*pr)("  flags=%b, vers=%d, wire_count=%d, pa=0x%llx\n",
+           pg->pg_flags, page_flagbits, pg->pg_version, pg->wire_count,
            (long long)pg->phys_addr);
        (*pr)("  uobject=%p, uanon=%p, offset=0x%llx loan_count=%d\n",
            pg->uobject, pg->uanon, (long long)pg->offset, pg->loan_count);


-- 
Vladimir Kirillov
http://darkproger.net

Reply via email to