tree f63fdb5db967253e46472ff776d1e22c38cee16e
parent 7bbd827750e630003896c96d0212962276ee5d91
author Benjamin Herrenschmidt <[EMAIL PROTECTED]> Sun, 17 Apr 2005 05:24:33 
-0700
committer Linus Torvalds <[EMAIL PROTECTED]> Sun, 17 Apr 2005 05:24:33 -0700

[PATCH] ppc64: Fix semantics of __ioremap

This patch fixes ppc64 __ioremap() so that it stops adding implicitely
_PAGE_GUARDED when the cache is not writeback, and instead, let the callers
provide the flag they want here.  This allows things like framebuffers to
explicitely request a non-cacheable and non-guarded mapping which is more
efficient for that type of memory without side effects.  The patch also
fixes all current callers to add _PAGE_GUARDED except btext, which is fine
without it.

Signed-off-by: Benjamin Herrenschmidt <[EMAIL PROTECTED]>
Signed-off-by: Andrew Morton <[EMAIL PROTECTED]>
Signed-off-by: Linus Torvalds <[EMAIL PROTECTED]>

 ppc64/kernel/maple_setup.c   |    2 +-
 ppc64/kernel/pSeries_setup.c |    2 +-
 ppc64/kernel/pci.c           |   12 +++++++-----
 ppc64/mm/init.c              |   18 +++++++++---------
 4 files changed, 18 insertions(+), 16 deletions(-)

Index: arch/ppc64/kernel/maple_setup.c
===================================================================
--- 71bb72cddbb08f9de68b2c7c05b4f5c03e8ed0bd/arch/ppc64/kernel/maple_setup.c  
(mode:100644 sha1:1db6ea0f336f65902d776ba09d16a0423ea9af1a)
+++ f63fdb5db967253e46472ff776d1e22c38cee16e/arch/ppc64/kernel/maple_setup.c  
(mode:100644 sha1:8cf95a27178e8d7cecd4e98b426b7803ded39684)
@@ -142,7 +142,7 @@
        if (physport) {
                void *comport;
                /* Map the uart for udbg. */
-               comport = (void *)__ioremap(physport, 16, _PAGE_NO_CACHE);
+               comport = (void *)ioremap(physport, 16);
                udbg_init_uart(comport, default_speed);
 
                ppc_md.udbg_putc = udbg_putc;
Index: arch/ppc64/kernel/pSeries_setup.c
===================================================================
--- 71bb72cddbb08f9de68b2c7c05b4f5c03e8ed0bd/arch/ppc64/kernel/pSeries_setup.c  
(mode:100644 sha1:06536de51257749ac77bd10429b27110290de8b9)
+++ f63fdb5db967253e46472ff776d1e22c38cee16e/arch/ppc64/kernel/pSeries_setup.c  
(mode:100644 sha1:6c0d1d58a552e67382f3186c8dec4294c2d3b0b0)
@@ -363,7 +363,7 @@
                find_udbg_vterm();
        else if (physport) {
                /* Map the uart for udbg. */
-               comport = (void *)__ioremap(physport, 16, _PAGE_NO_CACHE);
+               comport = (void *)ioremap(physport, 16);
                udbg_init_uart(comport, default_speed);
 
                ppc_md.udbg_putc = udbg_putc;
Index: arch/ppc64/kernel/pci.c
===================================================================
--- 71bb72cddbb08f9de68b2c7c05b4f5c03e8ed0bd/arch/ppc64/kernel/pci.c  
(mode:100644 sha1:fdd8f7869a68d44a6a99742cc6512fc70c67cf84)
+++ f63fdb5db967253e46472ff776d1e22c38cee16e/arch/ppc64/kernel/pci.c  
(mode:100644 sha1:be3cc387c1ec4bcb18dcac51dd0befef0c588558)
@@ -547,8 +547,9 @@
        if (range == NULL || (rlen < sizeof(struct isa_range))) {
                printk(KERN_ERR "no ISA ranges or unexpected isa range size,"
                       "mapping 64k\n");
-               __ioremap_explicit(phb_io_base_phys, (unsigned 
long)phb_io_base_virt, 
-                                  0x10000, _PAGE_NO_CACHE);
+               __ioremap_explicit(phb_io_base_phys,
+                                  (unsigned long)phb_io_base_virt,
+                                  0x10000, _PAGE_NO_CACHE | _PAGE_GUARDED);
                return; 
        }
        
@@ -576,7 +577,7 @@
 
                __ioremap_explicit(phb_io_base_phys, 
                                   (unsigned long) phb_io_base_virt, 
-                                  size, _PAGE_NO_CACHE);
+                                  size, _PAGE_NO_CACHE | _PAGE_GUARDED);
        }
 }
 
@@ -692,7 +693,7 @@
        struct resource *res;
 
        hose->io_base_virt = __ioremap(hose->io_base_phys, size,
-                                       _PAGE_NO_CACHE);
+                                       _PAGE_NO_CACHE | _PAGE_GUARDED);
        DBG("phb%d io_base_phys 0x%lx io_base_virt 0x%lx\n",
                hose->global_number, hose->io_base_phys,
                (unsigned long) hose->io_base_virt);
@@ -780,7 +781,8 @@
        if (get_bus_io_range(bus, &start_phys, &start_virt, &size))
                return 1;
        printk("mapping IO %lx -> %lx, size: %lx\n", start_phys, start_virt, 
size);
-       if (__ioremap_explicit(start_phys, start_virt, size, _PAGE_NO_CACHE))
+       if (__ioremap_explicit(start_phys, start_virt, size,
+                              _PAGE_NO_CACHE | _PAGE_GUARDED))
                return 1;
 
        return 0;
Index: arch/ppc64/mm/init.c
===================================================================
--- 71bb72cddbb08f9de68b2c7c05b4f5c03e8ed0bd/arch/ppc64/mm/init.c  (mode:100644 
sha1:23813d03e1c423a239deb5f7779cadd0434914b6)
+++ f63fdb5db967253e46472ff776d1e22c38cee16e/arch/ppc64/mm/init.c  (mode:100644 
sha1:a7149b9fc35c38cd8bed1da9edbdb94fc390b4f7)
@@ -155,7 +155,8 @@
                ptep = pte_alloc_kernel(&ioremap_mm, pmdp, ea);
 
                pa = abs_to_phys(pa);
-               set_pte_at(&ioremap_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, 
__pgprot(flags)));
+               set_pte_at(&ioremap_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT,
+                                                         __pgprot(flags)));
                spin_unlock(&ioremap_mm.page_table_lock);
        } else {
                unsigned long va, vpn, hash, hpteg;
@@ -191,12 +192,9 @@
 
        if ((flags & _PAGE_PRESENT) == 0)
                flags |= pgprot_val(PAGE_KERNEL);
-       if (flags & (_PAGE_NO_CACHE | _PAGE_WRITETHRU))
-               flags |= _PAGE_GUARDED;
 
-       for (i = 0; i < size; i += PAGE_SIZE) {
+       for (i = 0; i < size; i += PAGE_SIZE)
                map_io_page(ea+i, pa+i, flags);
-       }
 
        return (void __iomem *) (ea + (addr & ~PAGE_MASK));
 }
@@ -205,7 +203,7 @@
 void __iomem *
 ioremap(unsigned long addr, unsigned long size)
 {
-       return __ioremap(addr, size, _PAGE_NO_CACHE);
+       return __ioremap(addr, size, _PAGE_NO_CACHE | _PAGE_GUARDED);
 }
 
 void __iomem *
@@ -272,7 +270,8 @@
                        return 1;
                }
                if (ea != (unsigned long) area->addr) {
-                       printk(KERN_ERR "unexpected addr return from 
im_get_area\n");
+                       printk(KERN_ERR "unexpected addr return from "
+                              "im_get_area\n");
                        return 1;
                }
        }
@@ -315,7 +314,8 @@
                        continue;
                if (pte_present(page))
                        continue;
-               printk(KERN_CRIT "Whee.. Swapped out page in kernel page 
table\n");
+               printk(KERN_CRIT "Whee.. Swapped out page in kernel page"
+                      " table\n");
        } while (address < end);
 }
 
@@ -352,7 +352,7 @@
  * Access to IO memory should be serialized by driver.
  * This code is modeled after vmalloc code - unmap_vm_area()
  *
- * XXX what about calls before mem_init_done (ie python_countermeasures())     
+ * XXX what about calls before mem_init_done (ie python_countermeasures())
  */
 void iounmap(volatile void __iomem *token)
 {
-
To unsubscribe from this list: send the line "unsubscribe bk-commits-head" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to