Author: jhb
Date: Fri Sep  4 19:59:32 2009
New Revision: 196838
URL: http://svn.freebsd.org/changeset/base/196838

Log:
  MFC 193396, 193521, 194331, 194337, 194376, 194454, 194562, 194642, 195033,
  195385, 195649, 195660, 195749, and 195774:
  Add support to the virtual memory system for configuring machine-
  dependent memory attributes:
  - Refactor contigmalloc() into two functions: a simple front-end that deals
    with the malloc tag and calls a new back-end, kmem_alloc_contig(), that
    allocates the pages and maps them.
  - Use kmem_alloc_contig() to implement the UMA back-end allocator for
    jumbo frame zones.
  - Use kmem_alloc_contig() to allocate the top-level page tables for PAE.
  - Introduce vm_memattr_t to as a type to hold memory attributes.
  - Introduce vm_object_set_memattr() for setting the default memory
    attributes that will be given to an object's pages.
  - Introduce and use pmap_page_{get,set}_memattr() for getting and
    setting a page's machine-dependent memory attributes.  Add full
    support for these functions on amd64 and i386 and stubs for them on
    the other architectures.  The function pmap_page_set_memattr() is also
    responsible for any other machine-dependent aspects of changing a
    page's memory attributes, such as flushing the cache or updating the
    direct map.  The uses include kmem_alloc_contig(), vm_page_alloc(),
    and the device pager:
  
      kmem_alloc_contig() can now be used to allocate kernel memory with
      non-default memory attributes on amd64 and i386.
  
      vm_page_alloc() and the device pager will set the memory attributes
      for the real or fictitious page according to the object's default
      memory attributes.
  
  - Update the various pmap functions on amd64 and i386 that map pages to
    incorporate each page's memory attributes in the mapping.
  
  Reviewed by:  alc

Added:
  stable/7/sys/amd64/include/vm.h
     - copied, changed from r195033, head/sys/amd64/include/vm.h
  stable/7/sys/arm/include/vm.h
     - copied, changed from r195033, head/sys/arm/include/vm.h
  stable/7/sys/i386/include/vm.h
     - copied, changed from r195033, head/sys/i386/include/vm.h
  stable/7/sys/ia64/include/vm.h
     - copied, changed from r195033, head/sys/ia64/include/vm.h
  stable/7/sys/powerpc/include/vm.h
     - copied, changed from r195033, head/sys/powerpc/include/vm.h
  stable/7/sys/sparc64/include/vm.h
     - copied, changed from r195033, head/sys/sparc64/include/vm.h
  stable/7/sys/sun4v/include/vm.h
     - copied, changed from r195033, head/sys/sun4v/include/vm.h
Modified:
  stable/7/sys/   (props changed)
  stable/7/sys/amd64/amd64/pmap.c
  stable/7/sys/amd64/include/pmap.h
  stable/7/sys/arm/include/pmap.h
  stable/7/sys/contrib/pf/   (props changed)
  stable/7/sys/dev/iir/iir.c
  stable/7/sys/dev/iir/iir_ctrl.c
  stable/7/sys/i386/i386/pmap.c
  stable/7/sys/i386/include/pmap.h
  stable/7/sys/ia64/include/pmap.h
  stable/7/sys/kern/kern_mbuf.c
  stable/7/sys/powerpc/include/pmap.h
  stable/7/sys/sparc64/include/pmap.h
  stable/7/sys/sun4v/include/pmap.h
  stable/7/sys/sun4v/sun4v/pmap.c
  stable/7/sys/vm/device_pager.c
  stable/7/sys/vm/pmap.h
  stable/7/sys/vm/vm.h
  stable/7/sys/vm/vm_contig.c
  stable/7/sys/vm/vm_extern.h
  stable/7/sys/vm/vm_object.c
  stable/7/sys/vm/vm_object.h
  stable/7/sys/vm/vm_page.c
  stable/7/sys/vm/vm_phys.c

Modified: stable/7/sys/amd64/amd64/pmap.c
==============================================================================
--- stable/7/sys/amd64/amd64/pmap.c     Fri Sep  4 19:20:46 2009        
(r196837)
+++ stable/7/sys/amd64/amd64/pmap.c     Fri Sep  4 19:59:32 2009        
(r196838)
@@ -618,6 +618,7 @@ pmap_page_init(vm_page_t m)
 {
 
        TAILQ_INIT(&m->md.pv_list);
+       m->md.pat_mode = PAT_WRITE_BACK;
 }
 
 /*
@@ -748,21 +749,6 @@ pmap_cache_bits(int mode, boolean_t is_p
        /* The PAT bit is different for PTE's and PDE's. */
        pat_flag = is_pde ? PG_PDE_PAT : PG_PTE_PAT;
 
-       /* If we don't support PAT, map extended modes to older ones. */
-       if (!(cpu_feature & CPUID_PAT)) {
-               switch (mode) {
-               case PAT_UNCACHEABLE:
-               case PAT_WRITE_THROUGH:
-               case PAT_WRITE_BACK:
-                       break;
-               case PAT_UNCACHED:
-               case PAT_WRITE_COMBINING:
-               case PAT_WRITE_PROTECTED:
-                       mode = PAT_UNCACHEABLE;
-                       break;
-               }
-       }
-       
        /* Map the caching mode to a PAT index. */
        switch (mode) {
 #ifdef PAT_WORKS
@@ -1134,7 +1120,8 @@ pmap_qenter(vm_offset_t sva, vm_page_t *
        endpte = pte + count;
        while (pte < endpte) {
                oldpte |= *pte;
-               pte_store(pte, VM_PAGE_TO_PHYS(*ma) | PG_G | PG_RW | PG_V);
+               pte_store(pte, VM_PAGE_TO_PHYS(*ma) | PG_G |
+                   pmap_cache_bits((*ma)->md.pat_mode, 0) | PG_RW | PG_V);
                pte++;
                ma++;
        }
@@ -3046,7 +3033,7 @@ validate:
        /*
         * Now validate mapping with desired protection/wiring.
         */
-       newpte = (pt_entry_t)(pa | PG_V);
+       newpte = (pt_entry_t)(pa | pmap_cache_bits(m->md.pat_mode, 0) | PG_V);
        if ((prot & VM_PROT_WRITE) != 0) {
                newpte |= PG_RW;
                vm_page_flag_set(m, PG_WRITEABLE);
@@ -3131,7 +3118,8 @@ pmap_enter_pde(pmap_t pmap, vm_offset_t 
                    " in pmap %p", va, pmap);
                return (FALSE);
        }
-       newpde = VM_PAGE_TO_PHYS(m) | PG_PS | PG_V;
+       newpde = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.pat_mode, 1) |
+           PG_PS | PG_V;
        if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) {
                newpde |= PG_MANAGED;
 
@@ -3318,7 +3306,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_
         */
        pmap->pm_stats.resident_count++;
 
-       pa = VM_PAGE_TO_PHYS(m);
+       pa = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.pat_mode, 0);
        if ((prot & VM_PROT_EXECUTE) == 0)
                pa |= pg_nx;
 
@@ -3359,6 +3347,7 @@ pmap_object_init_pt(pmap_t pmap, vm_offs
        pd_entry_t *pde;
        vm_paddr_t pa, ptepa;
        vm_page_t p, pdpg;
+       int pat_mode;
 
        VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
        KASSERT(object->type == OBJT_DEVICE,
@@ -3369,6 +3358,7 @@ pmap_object_init_pt(pmap_t pmap, vm_offs
                p = vm_page_lookup(object, pindex);
                KASSERT(p->valid == VM_PAGE_BITS_ALL,
                    ("pmap_object_init_pt: invalid page %p", p));
+               pat_mode = p->md.pat_mode;
 
                /*
                 * Abort the mapping if the first page is not physically
@@ -3380,21 +3370,28 @@ pmap_object_init_pt(pmap_t pmap, vm_offs
 
                /*
                 * Skip the first page.  Abort the mapping if the rest of
-                * the pages are not physically contiguous.
+                * the pages are not physically contiguous or have differing
+                * memory attributes.
                 */
                p = TAILQ_NEXT(p, listq);
                for (pa = ptepa + PAGE_SIZE; pa < ptepa + size;
                    pa += PAGE_SIZE) {
                        KASSERT(p->valid == VM_PAGE_BITS_ALL,
                            ("pmap_object_init_pt: invalid page %p", p));
-                       if (pa != VM_PAGE_TO_PHYS(p))
+                       if (pa != VM_PAGE_TO_PHYS(p) ||
+                           pat_mode != p->md.pat_mode)
                                return;
                        p = TAILQ_NEXT(p, listq);
                }
 
-               /* Map using 2MB pages. */
+               /*
+                * Map using 2MB pages.  Since "ptepa" is 2M aligned and
+                * "size" is a multiple of 2M, adding the PAT setting to "pa"
+                * will not affect the termination of this loop.
+                */ 
                PMAP_LOCK(pmap);
-               for (pa = ptepa; pa < ptepa + size; pa += NBPDR) {
+               for (pa = ptepa | pmap_cache_bits(pat_mode, 1); pa < ptepa +
+                   size; pa += NBPDR) {
                        pdpg = pmap_allocpde(pmap, addr, M_NOWAIT);
                        if (pdpg == NULL) {
                                /*
@@ -4305,6 +4302,26 @@ pmap_unmapdev(vm_offset_t va, vm_size_t 
 }
 
 /*
+ * Sets the memory attribute for the specified page.
+ */
+void
+pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
+{
+
+       m->md.pat_mode = ma;
+
+       /*
+        * If "m" is a normal page, update its direct mapping.  This update
+        * can be relied upon to perform any cache operations that are
+        * required for data coherence.
+        */
+       if ((m->flags & PG_FICTITIOUS) == 0 &&
+           pmap_change_attr(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)), PAGE_SIZE,
+           m->md.pat_mode))
+               panic("memory attribute change on the direct map failed");
+}
+
+/*
  * Changes the specified virtual address range's memory type to that given by
  * the parameter "mode".  The specified virtual address range must be
  * completely contained within either the direct map or the kernel map.  If

Modified: stable/7/sys/amd64/include/pmap.h
==============================================================================
--- stable/7/sys/amd64/include/pmap.h   Fri Sep  4 19:20:46 2009        
(r196837)
+++ stable/7/sys/amd64/include/pmap.h   Fri Sep  4 19:59:32 2009        
(r196838)
@@ -234,7 +234,7 @@ struct      pv_entry;
 struct pv_chunk;
 
 struct md_page {
-       int pv_unused;
+       int                     pat_mode;
        TAILQ_HEAD(,pv_entry)   pv_list;
 };
 
@@ -306,6 +306,7 @@ extern vm_paddr_t dump_avail[];
 extern vm_offset_t virtual_avail;
 extern vm_offset_t virtual_end;
 
+#define        pmap_page_get_memattr(m)        ((vm_memattr_t)(m)->md.pat_mode)
 #define        pmap_unmapbios(va, sz)  pmap_unmapdev((va), (sz))
 
 void   pmap_bootstrap(vm_paddr_t *);
@@ -319,6 +320,7 @@ void        *pmap_mapbios(vm_paddr_t, vm_size_t
 void   *pmap_mapdev(vm_paddr_t, vm_size_t);
 void   *pmap_mapdev_attr(vm_paddr_t, vm_size_t, int);
 boolean_t pmap_page_is_mapped(vm_page_t m);
+void   pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma);
 void   pmap_unmapdev(vm_offset_t, vm_size_t);
 void   pmap_invalidate_page(pmap_t, vm_offset_t);
 void   pmap_invalidate_range(pmap_t, vm_offset_t, vm_offset_t);

Copied and modified: stable/7/sys/amd64/include/vm.h (from r195033, 
head/sys/amd64/include/vm.h)
==============================================================================
--- head/sys/amd64/include/vm.h Fri Jun 26 04:47:43 2009        (r195033, copy 
source)
+++ stable/7/sys/amd64/include/vm.h     Fri Sep  4 19:59:32 2009        
(r196838)
@@ -32,14 +32,14 @@
 
 #include <machine/specialreg.h>
 
-/* Cache control options. */
-#define        VM_CACHE_UNCACHEABLE            
((vm_cache_mode_t)PAT_UNCACHEABLE)
-#define        VM_CACHE_WRITE_COMBINING        
((vm_cache_mode_t)PAT_WRITE_COMBINING)
-#define        VM_CACHE_WRITE_THROUGH          
((vm_cache_mode_t)PAT_WRITE_THROUGH)
-#define        VM_CACHE_WRITE_PROTECTED        
((vm_cache_mode_t)PAT_WRITE_PROTECTED)
-#define        VM_CACHE_WRITE_BACK             
((vm_cache_mode_t)PAT_WRITE_BACK)
-#define        VM_CACHE_UNCACHED               ((vm_cache_mode_t)PAT_UNCACHED)
+/* Memory attributes. */
+#define        VM_MEMATTR_UNCACHEABLE          ((vm_memattr_t)PAT_UNCACHEABLE)
+#define        VM_MEMATTR_WRITE_COMBINING      
((vm_memattr_t)PAT_WRITE_COMBINING)
+#define        VM_MEMATTR_WRITE_THROUGH        
((vm_memattr_t)PAT_WRITE_THROUGH)
+#define        VM_MEMATTR_WRITE_PROTECTED      
((vm_memattr_t)PAT_WRITE_PROTECTED)
+#define        VM_MEMATTR_WRITE_BACK           ((vm_memattr_t)PAT_WRITE_BACK)
+#define        VM_MEMATTR_UNCACHED             ((vm_memattr_t)PAT_UNCACHED)
 
-#define        VM_CACHE_DEFAULT                VM_CACHE_WRITE_BACK
+#define        VM_MEMATTR_DEFAULT              VM_MEMATTR_WRITE_BACK
 
 #endif /* !_MACHINE_PMAP_H_ */

Modified: stable/7/sys/arm/include/pmap.h
==============================================================================
--- stable/7/sys/arm/include/pmap.h     Fri Sep  4 19:20:46 2009        
(r196837)
+++ stable/7/sys/arm/include/pmap.h     Fri Sep  4 19:59:32 2009        
(r196838)
@@ -75,7 +75,10 @@
 
 #endif
 
+#define        pmap_page_get_memattr(m)        VM_MEMATTR_DEFAULT
 #define        pmap_page_is_mapped(m)  (!TAILQ_EMPTY(&(m)->md.pv_list))
+#define        pmap_page_set_memattr(m, ma)    (void)0
+
 /*
  * Pmap stuff
  */

Copied and modified: stable/7/sys/arm/include/vm.h (from r195033, 
head/sys/arm/include/vm.h)
==============================================================================
--- head/sys/arm/include/vm.h   Fri Jun 26 04:47:43 2009        (r195033, copy 
source)
+++ stable/7/sys/arm/include/vm.h       Fri Sep  4 19:59:32 2009        
(r196838)
@@ -29,7 +29,7 @@
 #ifndef _MACHINE_VM_H_
 #define        _MACHINE_VM_H_
 
-/* Cache control is not (yet) implemented. */
-#define        VM_CACHE_DEFAULT        0
+/* Memory attribute configuration is not (yet) implemented. */
+#define        VM_MEMATTR_DEFAULT      0
 
 #endif /* !_MACHINE_PMAP_H_ */

Modified: stable/7/sys/dev/iir/iir.c
==============================================================================
--- stable/7/sys/dev/iir/iir.c  Fri Sep  4 19:20:46 2009        (r196837)
+++ stable/7/sys/dev/iir/iir.c  Fri Sep  4 19:59:32 2009        (r196838)
@@ -67,9 +67,6 @@ __FBSDID("$FreeBSD$");
 #include <cam/scsi/scsi_all.h>
 #include <cam/scsi/scsi_message.h>
 
-#include <vm/vm.h>
-#include <vm/pmap.h>
-
 #include <dev/iir/iir.h>
 
 MALLOC_DEFINE(M_GDTBUF, "iirbuf", "iir driver buffer");

Modified: stable/7/sys/dev/iir/iir_ctrl.c
==============================================================================
--- stable/7/sys/dev/iir/iir_ctrl.c     Fri Sep  4 19:20:46 2009        
(r196837)
+++ stable/7/sys/dev/iir/iir_ctrl.c     Fri Sep  4 19:59:32 2009        
(r196838)
@@ -52,10 +52,6 @@ __FBSDID("$FreeBSD$");
 #include <sys/stat.h>
 #include <sys/disklabel.h>
 #include <machine/bus.h>
-#include <vm/vm.h>
-#include <vm/vm_kern.h>
-#include <vm/vm_extern.h>
-#include <vm/pmap.h>
 
 #include <dev/iir/iir.h>
 

Modified: stable/7/sys/i386/i386/pmap.c
==============================================================================
--- stable/7/sys/i386/i386/pmap.c       Fri Sep  4 19:20:46 2009        
(r196837)
+++ stable/7/sys/i386/i386/pmap.c       Fri Sep  4 19:59:32 2009        
(r196838)
@@ -556,20 +556,18 @@ pmap_page_init(vm_page_t m)
 {
 
        TAILQ_INIT(&m->md.pv_list);
+       m->md.pat_mode = PAT_WRITE_BACK;
 }
 
 #ifdef PAE
-
-static MALLOC_DEFINE(M_PMAPPDPT, "pmap", "pmap pdpt");
-
 static void *
 pmap_pdpt_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
 {
 
        /* Inform UMA that this allocator uses kernel_map/object. */
        *flags = UMA_SLAB_KERNEL;
-       return (contigmalloc(PAGE_SIZE, M_PMAPPDPT, 0, 0x0ULL, 0xffffffffULL,
-           1, 0));
+       return ((void *)kmem_alloc_contig(kernel_map, bytes, wait, 0x0ULL,
+           0xffffffffULL, 1, 0, VM_MEMATTR_DEFAULT));
 }
 #endif
 
@@ -1206,7 +1204,8 @@ pmap_qenter(vm_offset_t sva, vm_page_t *
        endpte = pte + count;
        while (pte < endpte) {
                oldpte |= *pte;
-               pte_store(pte, VM_PAGE_TO_PHYS(*ma) | pgeflag | PG_RW | PG_V);
+               pte_store(pte, VM_PAGE_TO_PHYS(*ma) | pgeflag |
+                   pmap_cache_bits((*ma)->md.pat_mode, 0) | PG_RW | PG_V);
                pte++;
                ma++;
        }
@@ -3161,7 +3160,7 @@ validate:
        /*
         * Now validate mapping with desired protection/wiring.
         */
-       newpte = (pt_entry_t)(pa | PG_V);
+       newpte = (pt_entry_t)(pa | pmap_cache_bits(m->md.pat_mode, 0) | PG_V);
        if ((prot & VM_PROT_WRITE) != 0) {
                newpte |= PG_RW;
                vm_page_flag_set(m, PG_WRITEABLE);
@@ -3243,7 +3242,8 @@ pmap_enter_pde(pmap_t pmap, vm_offset_t 
                    " in pmap %p", va, pmap);
                return (FALSE);
        }
-       newpde = VM_PAGE_TO_PHYS(m) | PG_PS | PG_V;
+       newpde = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.pat_mode, 1) |
+           PG_PS | PG_V;
        if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) {
                newpde |= PG_MANAGED;
 
@@ -3428,7 +3428,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_
         */
        pmap->pm_stats.resident_count++;
 
-       pa = VM_PAGE_TO_PHYS(m);
+       pa = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.pat_mode, 0);
 #ifdef PAE
        if ((prot & VM_PROT_EXECUTE) == 0)
                pa |= pg_nx;
@@ -3471,6 +3471,7 @@ pmap_object_init_pt(pmap_t pmap, vm_offs
        pd_entry_t *pde;
        vm_paddr_t pa, ptepa;
        vm_page_t p;
+       int pat_mode;
 
        VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
        KASSERT(object->type == OBJT_DEVICE,
@@ -3482,6 +3483,7 @@ pmap_object_init_pt(pmap_t pmap, vm_offs
                p = vm_page_lookup(object, pindex);
                KASSERT(p->valid == VM_PAGE_BITS_ALL,
                    ("pmap_object_init_pt: invalid page %p", p));
+               pat_mode = p->md.pat_mode;
 
                /*
                 * Abort the mapping if the first page is not physically
@@ -3493,21 +3495,28 @@ pmap_object_init_pt(pmap_t pmap, vm_offs
 
                /*
                 * Skip the first page.  Abort the mapping if the rest of
-                * the pages are not physically contiguous.
+                * the pages are not physically contiguous or have differing
+                * memory attributes.
                 */
                p = TAILQ_NEXT(p, listq);
                for (pa = ptepa + PAGE_SIZE; pa < ptepa + size;
                    pa += PAGE_SIZE) {
                        KASSERT(p->valid == VM_PAGE_BITS_ALL,
                            ("pmap_object_init_pt: invalid page %p", p));
-                       if (pa != VM_PAGE_TO_PHYS(p))
+                       if (pa != VM_PAGE_TO_PHYS(p) ||
+                           pat_mode != p->md.pat_mode)
                                return;
                        p = TAILQ_NEXT(p, listq);
                }
 
-               /* Map using 2/4MB pages. */
+               /*
+                * Map using 2/4MB pages.  Since "ptepa" is 2/4M aligned and
+                * "size" is a multiple of 2/4M, adding the PAT setting to
+                * "pa" will not affect the termination of this loop.
+                */
                PMAP_LOCK(pmap);
-               for (pa = ptepa; pa < ptepa + size; pa += NBPDR) {
+               for (pa = ptepa | pmap_cache_bits(pat_mode, 1); pa < ptepa +
+                   size; pa += NBPDR) {
                        pde = pmap_pde(pmap, addr);
                        if (*pde == 0) {
                                pde_store(pde, pa | PG_PS | PG_M | PG_A |
@@ -3723,7 +3732,8 @@ pmap_zero_page(vm_page_t m)
        if (*sysmaps->CMAP2)
                panic("pmap_zero_page: CMAP2 busy");
        sched_pin();
-       *sysmaps->CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M;
+       *sysmaps->CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M |
+           pmap_cache_bits(m->md.pat_mode, 0);
        invlcaddr(sysmaps->CADDR2);
        pagezero(sysmaps->CADDR2);
        *sysmaps->CMAP2 = 0;
@@ -3745,9 +3755,10 @@ pmap_zero_page_area(vm_page_t m, int off
        sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)];
        mtx_lock(&sysmaps->lock);
        if (*sysmaps->CMAP2)
-               panic("pmap_zero_page: CMAP2 busy");
+               panic("pmap_zero_page_area: CMAP2 busy");
        sched_pin();
-       *sysmaps->CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M;
+       *sysmaps->CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M |
+           pmap_cache_bits(m->md.pat_mode, 0);
        invlcaddr(sysmaps->CADDR2);
        if (off == 0 && size == PAGE_SIZE) 
                pagezero(sysmaps->CADDR2);
@@ -3769,9 +3780,10 @@ pmap_zero_page_idle(vm_page_t m)
 {
 
        if (*CMAP3)
-               panic("pmap_zero_page: CMAP3 busy");
+               panic("pmap_zero_page_idle: CMAP3 busy");
        sched_pin();
-       *CMAP3 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M;
+       *CMAP3 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M |
+           pmap_cache_bits(m->md.pat_mode, 0);
        invlcaddr(CADDR3);
        pagezero(CADDR3);
        *CMAP3 = 0;
@@ -3798,8 +3810,10 @@ pmap_copy_page(vm_page_t src, vm_page_t 
        sched_pin();
        invlpg((u_int)sysmaps->CADDR1);
        invlpg((u_int)sysmaps->CADDR2);
-       *sysmaps->CMAP1 = PG_V | VM_PAGE_TO_PHYS(src) | PG_A;
-       *sysmaps->CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(dst) | PG_A | PG_M;
+       *sysmaps->CMAP1 = PG_V | VM_PAGE_TO_PHYS(src) | PG_A |
+           pmap_cache_bits(src->md.pat_mode, 0);
+       *sysmaps->CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(dst) | PG_A | PG_M |
+           pmap_cache_bits(dst->md.pat_mode, 0);
        bcopy(sysmaps->CADDR1, sysmaps->CADDR2, PAGE_SIZE);
        *sysmaps->CMAP1 = 0;
        *sysmaps->CMAP2 = 0;
@@ -4420,7 +4434,9 @@ pmap_mapdev_attr(vm_paddr_t pa, vm_size_
                pa += PAGE_SIZE;
        }
        pmap_invalidate_range(kernel_pmap, va, tmpva);
-       pmap_invalidate_cache();
+       /* If "Self Snoop" is supported, do nothing. */
+       if (!(cpu_feature & CPUID_SS))
+               pmap_invalidate_cache();
        return ((void *)(va + offset));
 }
 
@@ -4455,6 +4471,25 @@ pmap_unmapdev(vm_offset_t va, vm_size_t 
 }
 
 /*
+ * Sets the memory attribute for the specified page.
+ */
+void
+pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
+{
+
+       m->md.pat_mode = ma;
+
+       /*
+        * If "m" is a normal page, flush it from the cache.
+        */    
+       if ((m->flags & PG_FICTITIOUS) == 0) {
+               /* If "Self Snoop" is supported, do nothing. */
+               if (!(cpu_feature & CPUID_SS))
+                       pmap_invalidate_cache();
+       }
+}
+
+/*
  * Changes the specified virtual address range's memory type to that given by
  * the parameter "mode".  The specified virtual address range must be
  * completely contained within either the kernel map.

Modified: stable/7/sys/i386/include/pmap.h
==============================================================================
--- stable/7/sys/i386/include/pmap.h    Fri Sep  4 19:20:46 2009        
(r196837)
+++ stable/7/sys/i386/include/pmap.h    Fri Sep  4 19:59:32 2009        
(r196838)
@@ -332,7 +332,7 @@ struct      pv_entry;
 struct pv_chunk;
 
 struct md_page {
-       int pv_unused;
+       int                     pat_mode;
        TAILQ_HEAD(,pv_entry)   pv_list;
 };
 
@@ -411,6 +411,7 @@ extern char *ptvmmap;               /* poor name! */
 extern vm_offset_t virtual_avail;
 extern vm_offset_t virtual_end;
 
+#define        pmap_page_get_memattr(m)        ((vm_memattr_t)(m)->md.pat_mode)
 #define        pmap_unmapbios(va, sz)  pmap_unmapdev((va), (sz))
 
 void   pmap_bootstrap(vm_paddr_t);
@@ -423,6 +424,7 @@ void        *pmap_mapbios(vm_paddr_t, vm_size_t
 void   *pmap_mapdev(vm_paddr_t, vm_size_t);
 void   *pmap_mapdev_attr(vm_paddr_t, vm_size_t, int);
 boolean_t pmap_page_is_mapped(vm_page_t m);
+void   pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma);
 void   pmap_unmapdev(vm_offset_t, vm_size_t);
 pt_entry_t *pmap_pte(pmap_t, vm_offset_t) __pure2;
 void   pmap_set_pg(void);

Copied and modified: stable/7/sys/i386/include/vm.h (from r195033, 
head/sys/i386/include/vm.h)
==============================================================================
--- head/sys/i386/include/vm.h  Fri Jun 26 04:47:43 2009        (r195033, copy 
source)
+++ stable/7/sys/i386/include/vm.h      Fri Sep  4 19:59:32 2009        
(r196838)
@@ -32,14 +32,14 @@
 
 #include <machine/specialreg.h>
 
-/* Cache control options. */
-#define        VM_CACHE_UNCACHEABLE            
((vm_cache_mode_t)PAT_UNCACHEABLE)
-#define        VM_CACHE_WRITE_COMBINING        
((vm_cache_mode_t)PAT_WRITE_COMBINING)
-#define        VM_CACHE_WRITE_THROUGH          
((vm_cache_mode_t)PAT_WRITE_THROUGH)
-#define        VM_CACHE_WRITE_PROTECTED        
((vm_cache_mode_t)PAT_WRITE_PROTECTED)
-#define        VM_CACHE_WRITE_BACK             
((vm_cache_mode_t)PAT_WRITE_BACK)
-#define        VM_CACHE_UNCACHED               ((vm_cache_mode_t)PAT_UNCACHED)
+/* Memory attributes. */
+#define        VM_MEMATTR_UNCACHEABLE          ((vm_memattr_t)PAT_UNCACHEABLE)
+#define        VM_MEMATTR_WRITE_COMBINING      
((vm_memattr_t)PAT_WRITE_COMBINING)
+#define        VM_MEMATTR_WRITE_THROUGH        
((vm_memattr_t)PAT_WRITE_THROUGH)
+#define        VM_MEMATTR_WRITE_PROTECTED      
((vm_memattr_t)PAT_WRITE_PROTECTED)
+#define        VM_MEMATTR_WRITE_BACK           ((vm_memattr_t)PAT_WRITE_BACK)
+#define        VM_MEMATTR_UNCACHED             ((vm_memattr_t)PAT_UNCACHED)
 
-#define        VM_CACHE_DEFAULT                VM_CACHE_WRITE_BACK
+#define        VM_MEMATTR_DEFAULT              VM_MEMATTR_WRITE_BACK
 
 #endif /* !_MACHINE_PMAP_H_ */

Modified: stable/7/sys/ia64/include/pmap.h
==============================================================================
--- stable/7/sys/ia64/include/pmap.h    Fri Sep  4 19:20:46 2009        
(r196837)
+++ stable/7/sys/ia64/include/pmap.h    Fri Sep  4 19:59:32 2009        
(r196838)
@@ -118,7 +118,9 @@ extern vm_offset_t virtual_end;
 extern uint64_t pmap_vhpt_base[];
 extern int pmap_vhpt_log2size;
 
+#define        pmap_page_get_memattr(m)        VM_MEMATTR_DEFAULT
 #define        pmap_page_is_mapped(m)  (!TAILQ_EMPTY(&(m)->md.pv_list))
+#define        pmap_page_set_memattr(m, ma)    (void)0
 #define        pmap_mapbios(pa, sz)    pmap_mapdev(pa, sz)
 #define        pmap_unmapbios(va, sz)  pmap_unmapdev(va, sz)
 

Copied and modified: stable/7/sys/ia64/include/vm.h (from r195033, 
head/sys/ia64/include/vm.h)
==============================================================================
--- head/sys/ia64/include/vm.h  Fri Jun 26 04:47:43 2009        (r195033, copy 
source)
+++ stable/7/sys/ia64/include/vm.h      Fri Sep  4 19:59:32 2009        
(r196838)
@@ -32,13 +32,13 @@
 #include <machine/atomic.h>
 #include <machine/pte.h>
 
-/* Cache control options. */
-#define        VM_CACHE_WRITE_BACK             ((vm_cache_mode_t)PTE_MA_WB)
-#define        VM_CACHE_UNCACHEABLE            ((vm_cache_mode_t)PTE_MA_UC)
-#define        VM_CACHE_UNCACHEABLE_EXPORTED   ((vm_cache_mode_t)PTE_MA_UCE)
-#define        VM_CACHE_WRITE_COMBINING        ((vm_cache_mode_t)PTE_MA_WC)
-#define        VM_CACHE_NATPAGE                
((vm_cache_mode_t)PTE_MA_NATPAGE)
+/* Memory attributes. */
+#define        VM_MEMATTR_WRITE_BACK           ((vm_memattr_t)PTE_MA_WB)
+#define        VM_MEMATTR_UNCACHEABLE          ((vm_memattr_t)PTE_MA_UC)
+#define        VM_MEMATTR_UNCACHEABLE_EXPORTED ((vm_memattr_t)PTE_MA_UCE)
+#define        VM_MEMATTR_WRITE_COMBINING      ((vm_memattr_t)PTE_MA_WC)
+#define        VM_MEMATTR_NATPAGE              ((vm_memattr_t)PTE_MA_NATPAGE)
 
-#define        VM_CACHE_DEFAULT                VM_CACHE_WRITE_BACK
+#define        VM_MEMATTR_DEFAULT              VM_MEMATTR_WRITE_BACK
 
 #endif /* !_MACHINE_PMAP_H_ */

Modified: stable/7/sys/kern/kern_mbuf.c
==============================================================================
--- stable/7/sys/kern/kern_mbuf.c       Fri Sep  4 19:20:46 2009        
(r196837)
+++ stable/7/sys/kern/kern_mbuf.c       Fri Sep  4 19:59:32 2009        
(r196838)
@@ -45,6 +45,8 @@ __FBSDID("$FreeBSD$");
 #include <security/mac/mac_framework.h>
 
 #include <vm/vm.h>
+#include <vm/vm_extern.h>
+#include <vm/vm_kern.h>
 #include <vm/vm_page.h>
 #include <vm/uma.h>
 #include <vm/uma_int.h>
@@ -233,9 +235,6 @@ static void mb_zfini_pack(void *, int);
 static void    mb_reclaim(void *);
 static void    mbuf_init(void *);
 static void    *mbuf_jumbo_alloc(uma_zone_t, int, u_int8_t *, int);
-static void    mbuf_jumbo_free(void *, int, u_int8_t);
-
-static MALLOC_DEFINE(M_JUMBOFRAME, "jumboframes", "mbuf jumbo frame buffers");
 
 /* Ensure that MSIZE doesn't break dtom() - it must be a power of 2 */
 CTASSERT((((MSIZE - 1) ^ MSIZE) + 1) >> 1 == MSIZE);
@@ -297,7 +296,6 @@ mbuf_init(void *dummy)
        if (nmbjumbo9 > 0)
                uma_zone_set_max(zone_jumbo9, nmbjumbo9);
        uma_zone_set_allocf(zone_jumbo9, mbuf_jumbo_alloc);
-       uma_zone_set_freef(zone_jumbo9, mbuf_jumbo_free);
 
        zone_jumbo16 = uma_zcreate(MBUF_JUMBO16_MEM_NAME, MJUM16BYTES,
            mb_ctor_clust, mb_dtor_clust,
@@ -310,7 +308,6 @@ mbuf_init(void *dummy)
        if (nmbjumbo16 > 0)
                uma_zone_set_max(zone_jumbo16, nmbjumbo16);
        uma_zone_set_allocf(zone_jumbo16, mbuf_jumbo_alloc);
-       uma_zone_set_freef(zone_jumbo16, mbuf_jumbo_free);
 
        zone_ext_refcnt = uma_zcreate(MBUF_EXTREFCNT_MEM_NAME, sizeof(u_int),
            NULL, NULL,
@@ -359,18 +356,8 @@ mbuf_jumbo_alloc(uma_zone_t zone, int by
 
        /* Inform UMA that this allocator uses kernel_map/object. */
        *flags = UMA_SLAB_KERNEL;
-       return (contigmalloc(bytes, M_JUMBOFRAME, wait, (vm_paddr_t)0,
-           ~(vm_paddr_t)0, 1, 0));
-}
-
-/*
- * UMA backend page deallocator for the jumbo frame zones.
- */
-static void
-mbuf_jumbo_free(void *mem, int size, u_int8_t flags)
-{
-
-       contigfree(mem, size, M_JUMBOFRAME);
+       return ((void *)kmem_alloc_contig(kernel_map, bytes, wait,
+           (vm_paddr_t)0, ~(vm_paddr_t)0, 1, 0, VM_MEMATTR_DEFAULT));
 }
 
 /*

Modified: stable/7/sys/powerpc/include/pmap.h
==============================================================================
--- stable/7/sys/powerpc/include/pmap.h Fri Sep  4 19:20:46 2009        
(r196837)
+++ stable/7/sys/powerpc/include/pmap.h Fri Sep  4 19:59:32 2009        
(r196838)
@@ -72,7 +72,9 @@ struct        md_page {
 extern struct pmap kernel_pmap_store;
 #define        kernel_pmap     (&kernel_pmap_store)
 
+#define        pmap_page_get_memattr(m)        VM_MEMATTR_DEFAULT
 #define        pmap_page_is_mapped(m)  (!LIST_EMPTY(&(m)->md.mdpg_pvoh))
+#define        pmap_page_set_memattr(m, ma)    (void)0
 
 #ifdef _KERNEL
 

Copied and modified: stable/7/sys/powerpc/include/vm.h (from r195033, 
head/sys/powerpc/include/vm.h)
==============================================================================
--- head/sys/powerpc/include/vm.h       Fri Jun 26 04:47:43 2009        
(r195033, copy source)
+++ stable/7/sys/powerpc/include/vm.h   Fri Sep  4 19:59:32 2009        
(r196838)
@@ -31,10 +31,12 @@
 
 #include <machine/pte.h>
 
-/* Cache control options. */
-#define        VM_CACHE_INHIBIT                ((vm_cache_mode_t)PTE_I)
-#define        VM_CACHE_WRITE_THROUGH          ((vm_cache_mode_t)PTE_W)
+/* Memory attributes. */
+#define        VM_MEMATTR_CACHING_INHIBIT      ((vm_memattr_t)PTE_I)
+#define        VM_MEMATTR_GUARD                ((vm_memattr_t)PTE_G)
+#define        VM_MEMATTR_MEMORY_COHERENCE     ((vm_memattr_t)PTE_M)
+#define        VM_MEMATTR_WRITE_THROUGH        ((vm_memattr_t)PTE_W)
 
-#define        VM_CACHE_DEFAULT                0
+#define        VM_MEMATTR_DEFAULT              0
 
 #endif /* !_MACHINE_PMAP_H_ */

Modified: stable/7/sys/sparc64/include/pmap.h
==============================================================================
--- stable/7/sys/sparc64/include/pmap.h Fri Sep  4 19:20:46 2009        
(r196837)
+++ stable/7/sys/sparc64/include/pmap.h Fri Sep  4 19:59:32 2009        
(r196838)
@@ -77,6 +77,9 @@ struct pmap {
 #define        PMAP_TRYLOCK(pmap)      mtx_trylock(&(pmap)->pm_mtx)
 #define        PMAP_UNLOCK(pmap)       mtx_unlock(&(pmap)->pm_mtx)
 
+#define        pmap_page_get_memattr(m)        VM_MEMATTR_DEFAULT
+#define        pmap_page_set_memattr(m, ma)    (void)0
+
 void   pmap_bootstrap(vm_offset_t ekva);
 vm_paddr_t pmap_kextract(vm_offset_t va);
 void   pmap_kenter(vm_offset_t va, vm_page_t m);

Copied and modified: stable/7/sys/sparc64/include/vm.h (from r195033, 
head/sys/sparc64/include/vm.h)
==============================================================================
--- head/sys/sparc64/include/vm.h       Fri Jun 26 04:47:43 2009        
(r195033, copy source)
+++ stable/7/sys/sparc64/include/vm.h   Fri Sep  4 19:59:32 2009        
(r196838)
@@ -29,7 +29,7 @@
 #ifndef _MACHINE_VM_H_
 #define        _MACHINE_VM_H_
 
-/* Cache control is not (yet) implemented. */
-#define        VM_CACHE_DEFAULT        0
+/* Memory attribute configuration is not (yet) implemented. */
+#define        VM_MEMATTR_DEFAULT      0
 
 #endif /* !_MACHINE_PMAP_H_ */

Modified: stable/7/sys/sun4v/include/pmap.h
==============================================================================
--- stable/7/sys/sun4v/include/pmap.h   Fri Sep  4 19:20:46 2009        
(r196837)
+++ stable/7/sys/sun4v/include/pmap.h   Fri Sep  4 19:59:32 2009        
(r196838)
@@ -106,7 +106,9 @@ typedef struct pv_entry {
        TAILQ_ENTRY(pv_entry) pv_plist;
 } *pv_entry_t;
 
+#define        pmap_page_get_memattr(m)        VM_MEMATTR_DEFAULT
 #define pmap_page_is_mapped(m)  (!TAILQ_EMPTY(&(m)->md.pv_list))
+#define        pmap_page_set_memattr(m, ma)    (void)0
 
 void   pmap_bootstrap(vm_offset_t ekva);
 vm_paddr_t pmap_kextract(vm_offset_t va);

Copied and modified: stable/7/sys/sun4v/include/vm.h (from r195033, 
head/sys/sun4v/include/vm.h)
==============================================================================
--- head/sys/sun4v/include/vm.h Fri Jun 26 04:47:43 2009        (r195033, copy 
source)
+++ stable/7/sys/sun4v/include/vm.h     Fri Sep  4 19:59:32 2009        
(r196838)
@@ -29,7 +29,7 @@
 #ifndef _MACHINE_VM_H_
 #define        _MACHINE_VM_H_
 
-/* Cache control is not (yet) implemented. */
-#define        VM_CACHE_DEFAULT        0
+/* Memory attribute configuration is not (yet) implemented. */
+#define        VM_MEMATTR_DEFAULT      0
 
 #endif /* !_MACHINE_PMAP_H_ */

Modified: stable/7/sys/sun4v/sun4v/pmap.c
==============================================================================
--- stable/7/sys/sun4v/sun4v/pmap.c     Fri Sep  4 19:20:46 2009        
(r196837)
+++ stable/7/sys/sun4v/sun4v/pmap.c     Fri Sep  4 19:59:32 2009        
(r196838)
@@ -1298,7 +1298,7 @@ pmap_alloc_zeroed_contig_pages(int npage
        while (m == NULL) {     
                for (i = 0; phys_avail[i + 1] != 0; i += 2) {
                        m = vm_phys_alloc_contig(npages, phys_avail[i], 
-                                                phys_avail[i + 1], alignment, 
(1UL<<34));
+                           phys_avail[i + 1], alignment, (1UL<<34));
                        if (m)
                                goto found;
                }

Modified: stable/7/sys/vm/device_pager.c
==============================================================================
--- stable/7/sys/vm/device_pager.c      Fri Sep  4 19:20:46 2009        
(r196837)
+++ stable/7/sys/vm/device_pager.c      Fri Sep  4 19:59:32 2009        
(r196838)
@@ -70,9 +70,9 @@ static struct mtx dev_pager_mtx;
 
 static uma_zone_t fakepg_zone;
 
-static vm_page_t dev_pager_getfake(vm_paddr_t);
+static vm_page_t dev_pager_getfake(vm_paddr_t, vm_memattr_t);
 static void dev_pager_putfake(vm_page_t);
-static void dev_pager_updatefake(vm_page_t, vm_paddr_t);
+static void dev_pager_updatefake(vm_page_t, vm_paddr_t, vm_memattr_t);
 
 struct pagerops devicepagerops = {
        .pgo_init =     dev_pager_init,
@@ -194,7 +194,7 @@ dev_pager_dealloc(object)
        /*
         * Free up our fake pages.
         */
-       while ((m = TAILQ_FIRST(&object->un_pager.devp.devp_pglist)) != 0) {
+       while ((m = TAILQ_FIRST(&object->un_pager.devp.devp_pglist)) != NULL) {
                TAILQ_REMOVE(&object->un_pager.devp.devp_pglist, m, pageq);
                dev_pager_putfake(m);
        }
@@ -209,7 +209,8 @@ dev_pager_getpages(object, m, count, req
 {
        vm_pindex_t offset;
        vm_paddr_t paddr;
-       vm_page_t page;
+       vm_page_t m_paddr, page;
+       vm_memattr_t memattr;
        struct cdev *dev;
        int i, ret;
        int prot;
@@ -219,7 +220,9 @@ dev_pager_getpages(object, m, count, req
 
        VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
        dev = object->handle;
-       offset = m[reqpage]->pindex;
+       page = m[reqpage];
+       offset = page->pindex;
+       memattr = object->memattr;
        VM_OBJECT_UNLOCK(object);
        csw = dev_refthread(dev);
        if (csw == NULL)
@@ -233,14 +236,20 @@ dev_pager_getpages(object, m, count, req
        KASSERT(ret == 0, ("dev_pager_getpage: map function returns error"));
        td->td_fpop = fpop;
        dev_relthread(dev);
-
-       if ((m[reqpage]->flags & PG_FICTITIOUS) != 0) {
+       /* If "paddr" is a real page, perform a sanity check on "memattr". */
+       if ((m_paddr = vm_phys_paddr_to_vm_page(paddr)) != NULL &&
+           pmap_page_get_memattr(m_paddr) != memattr) {
+               memattr = pmap_page_get_memattr(m_paddr);
+               printf(
+           "WARNING: A device driver has set \"memattr\" inconsistently.\n");
+       }
+       if ((page->flags & PG_FICTITIOUS) != 0) {
                /*
                 * If the passed in reqpage page is a fake page, update it with
                 * the new physical address.
                 */
                VM_OBJECT_LOCK(object);
-               dev_pager_updatefake(m[reqpage], paddr);
+               dev_pager_updatefake(page, paddr, memattr);
                if (count > 1) {
                        vm_page_lock_queues();
                        for (i = 0; i < count; i++) {
@@ -254,7 +263,7 @@ dev_pager_getpages(object, m, count, req
                 * Replace the passed in reqpage page with our own fake page and
                 * free up the all of the original pages.
                 */
-               page = dev_pager_getfake(paddr);
+               page = dev_pager_getfake(paddr, memattr);
                VM_OBJECT_LOCK(object);
                TAILQ_INSERT_TAIL(&object->un_pager.devp.devp_pglist, page, 
pageq);
                vm_page_lock_queues();
@@ -264,7 +273,7 @@ dev_pager_getpages(object, m, count, req
                vm_page_insert(page, object, offset);
                m[reqpage] = page;
        }
-
+       page->valid = VM_PAGE_BITS_ALL;
        return (VM_PAGER_OK);
 }
 
@@ -294,48 +303,48 @@ dev_pager_haspage(object, pindex, before
 }
 
 /*
- * Instantiate a fictitious page.  Unlike physical memory pages, only
- * the machine-independent fields must be initialized.
+ * Create a fictitious page with the specified physical address and memory
+ * attribute.  The memory attribute is the only the machine-dependent aspect
+ * of a fictitious page that must be initialized.
  */
 static vm_page_t
-dev_pager_getfake(paddr)
-       vm_paddr_t paddr;
+dev_pager_getfake(vm_paddr_t paddr, vm_memattr_t memattr)
 {
        vm_page_t m;
 
-       m = uma_zalloc(fakepg_zone, M_WAITOK);
-
+       m = uma_zalloc(fakepg_zone, M_WAITOK | M_ZERO);
+       m->phys_addr = paddr;
+       /* Fictitious pages don't use "segind". */
        m->flags = PG_FICTITIOUS;
+       /* Fictitious pages don't use "order" or "pool". */
        m->oflags = VPO_BUSY;
-       m->valid = VM_PAGE_BITS_ALL;
-       m->dirty = 0;
-       m->busy = 0;
-       m->queue = PQ_NONE;
-       m->object = NULL;
-
        m->wire_count = 1;
-       m->hold_count = 0;
-       m->phys_addr = paddr;
-
+       pmap_page_set_memattr(m, memattr);
        return (m);
 }
 
+/*
+ * Release a fictitious page.
+ */
 static void
-dev_pager_putfake(m)
-       vm_page_t m;
+dev_pager_putfake(vm_page_t m)
 {
+
        if (!(m->flags & PG_FICTITIOUS))
                panic("dev_pager_putfake: bad page");
        uma_zfree(fakepg_zone, m);
 }
 
+/*
+ * Update the given fictitious page to the specified physical address and
+ * memory attribute.
+ */
 static void
-dev_pager_updatefake(m, paddr)
-       vm_page_t m;
-       vm_paddr_t paddr;
+dev_pager_updatefake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr)
 {
+
        if (!(m->flags & PG_FICTITIOUS))
                panic("dev_pager_updatefake: bad page");
        m->phys_addr = paddr;
-       m->valid = VM_PAGE_BITS_ALL;
+       pmap_page_set_memattr(m, memattr);
 }

Modified: stable/7/sys/vm/pmap.h
==============================================================================
--- stable/7/sys/vm/pmap.h      Fri Sep  4 19:20:46 2009        (r196837)
+++ stable/7/sys/vm/pmap.h      Fri Sep  4 19:59:32 2009        (r196838)
@@ -79,10 +79,16 @@ struct pmap_statistics {
 };
 typedef struct pmap_statistics *pmap_statistics_t;
 
+/*
+ * Each machine dependent implementation is expected to provide:
+ *
+ * vm_memattr_t        pmap_page_get_memattr(vm_page_t);
+ * boolean_t   pmap_page_is_mapped(vm_page_t);
+ * void                pmap_page_set_memattr(vm_page_t, vm_memattr_t);
+ */
 #include <machine/pmap.h>
 
 #ifdef _KERNEL
-struct proc;
 struct thread;
 
 /*

Modified: stable/7/sys/vm/vm.h
==============================================================================
--- stable/7/sys/vm/vm.h        Fri Sep  4 19:20:46 2009        (r196837)
+++ stable/7/sys/vm/vm.h        Fri Sep  4 19:59:32 2009        (r196838)
@@ -61,6 +61,14 @@
 #ifndef VM_H
 #define VM_H
 
+#include <machine/vm.h>
+
+/*
+ * The exact set of memory attributes is machine dependent.  However, every
+ * machine is required to define VM_MEMATTR_DEFAULT.
+ */
+typedef        char vm_memattr_t;      /* memory attribute codes */
+
 typedef char vm_inherit_t;     /* inheritance codes */
 
 #define        VM_INHERIT_SHARE        ((vm_inherit_t) 0)

Modified: stable/7/sys/vm/vm_contig.c
==============================================================================
--- stable/7/sys/vm/vm_contig.c Fri Sep  4 19:20:46 2009        (r196837)
+++ stable/7/sys/vm/vm_contig.c Fri Sep  4 19:59:32 2009        (r196838)
@@ -193,37 +193,37 @@ vm_page_release_contig(vm_page_t m, vm_p
  *     specified through the given flags, then the pages are zeroed
  *     before they are mapped.
  */
-static void *
-contigmapping(vm_page_t m, vm_pindex_t npages, int flags)
+static vm_offset_t
+contigmapping(vm_map_t map, vm_size_t size, vm_page_t m, vm_memattr_t memattr,
+    int flags)
 {
        vm_object_t object = kernel_object;
-       vm_map_t map = kernel_map;
        vm_offset_t addr, tmp_addr;
-       vm_pindex_t i;
  
        vm_map_lock(map);
-       if (vm_map_findspace(map, vm_map_min(map), npages << PAGE_SHIFT, &addr)
-           != KERN_SUCCESS) {
+       if (vm_map_findspace(map, vm_map_min(map), size, &addr)) {
                vm_map_unlock(map);
-               return (NULL);
+               return (0);
        }
        vm_object_reference(object);
        vm_map_insert(map, object, addr - VM_MIN_KERNEL_ADDRESS,
-           addr, addr + (npages << PAGE_SHIFT), VM_PROT_ALL, VM_PROT_ALL, 0);
+           addr, addr + size, VM_PROT_ALL, VM_PROT_ALL, 0);
        vm_map_unlock(map);
-       tmp_addr = addr;
        VM_OBJECT_LOCK(object);
-       for (i = 0; i < npages; i++) {
-               vm_page_insert(&m[i], object,
+       for (tmp_addr = addr; tmp_addr < addr + size; tmp_addr += PAGE_SIZE) {
+               if (memattr != VM_MEMATTR_DEFAULT)
+                       pmap_page_set_memattr(m, memattr);
+               vm_page_insert(m, object,
                    OFF_TO_IDX(tmp_addr - VM_MIN_KERNEL_ADDRESS));
-               if ((flags & M_ZERO) && !(m[i].flags & PG_ZERO))
-                       pmap_zero_page(&m[i]);
-               tmp_addr += PAGE_SIZE;
+               if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0)
+                       pmap_zero_page(m);
+               m->valid = VM_PAGE_BITS_ALL;
+               m++;
        }
        VM_OBJECT_UNLOCK(object);
-       vm_map_wire(map, addr, addr + (npages << PAGE_SHIFT),
+       vm_map_wire(map, addr, addr + size,
            VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
-       return ((void *)addr);
+       return (addr);
 }
 
 void *
@@ -237,11 +237,26 @@ contigmalloc(
        unsigned long boundary)
 {
        void *ret;
+
+       ret = (void *)kmem_alloc_contig(kernel_map, size, flags, low, high,
+           alignment, boundary, VM_MEMATTR_DEFAULT);
+       if (ret != NULL)
+               malloc_type_allocated(type, round_page(size));
+       return (ret);
+}
+
+vm_offset_t
+kmem_alloc_contig(vm_map_t map, vm_size_t size, int flags, vm_paddr_t low,
+    vm_paddr_t high, unsigned long alignment, unsigned long boundary,
+    vm_memattr_t memattr)
+{
+       vm_offset_t ret;
        vm_page_t pages;
        unsigned long npgs;
        int actl, actmax, inactl, inactmax, tries;
 
-       npgs = round_page(size) >> PAGE_SHIFT;
+       size = round_page(size);
+       npgs = size >> PAGE_SHIFT;
        tries = 0;
 retry:
        pages = vm_phys_alloc_contig(npgs, low, high, alignment, boundary);
@@ -267,13 +282,11 @@ again:
                        tries++;
                        goto retry;
                }
-               ret = NULL;
+               ret = 0;
        } else {
-               ret = contigmapping(pages, npgs, flags);
-               if (ret == NULL)
+               ret = contigmapping(map, size, pages, memattr, flags);
+               if (ret == 0)
                        vm_page_release_contig(pages, npgs);
-               else
-                       malloc_type_allocated(type, npgs << PAGE_SHIFT);
        }
        return (ret);
 }
@@ -281,9 +294,7 @@ again:
 void
 contigfree(void *addr, unsigned long size, struct malloc_type *type)
 {
-       vm_pindex_t npgs;
 
-       npgs = round_page(size) >> PAGE_SHIFT;
        kmem_free(kernel_map, (vm_offset_t)addr, size);
-       malloc_type_freed(type, npgs << PAGE_SHIFT);
+       malloc_type_freed(type, round_page(size));
 }

Modified: stable/7/sys/vm/vm_extern.h
==============================================================================
--- stable/7/sys/vm/vm_extern.h Fri Sep  4 19:20:46 2009        (r196837)
+++ stable/7/sys/vm/vm_extern.h Fri Sep  4 19:59:32 2009        (r196838)
@@ -57,6 +57,9 @@ int swapon(struct thread *, void *, int 
 
 int kernacc(void *, int, int);
 vm_offset_t kmem_alloc(vm_map_t, vm_size_t);
+vm_offset_t kmem_alloc_contig(vm_map_t map, vm_size_t size, int flags,
+    vm_paddr_t low, vm_paddr_t high, unsigned long alignment,
+    unsigned long boundary, vm_memattr_t memattr);
 vm_offset_t kmem_alloc_nofault(vm_map_t, vm_size_t);
 vm_offset_t kmem_alloc_wait(vm_map_t, vm_size_t);
 void kmem_free(vm_map_t, vm_offset_t, vm_size_t);

Modified: stable/7/sys/vm/vm_object.c
==============================================================================
--- stable/7/sys/vm/vm_object.c Fri Sep  4 19:20:46 2009        (r196837)

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***
_______________________________________________
svn-src-all@freebsd.org mailing list
http://lists.freebsd.org/mailman/listinfo/svn-src-all
To unsubscribe, send any mail to "svn-src-all-unsubscr...@freebsd.org"

Reply via email to