ChangeSet 1.2231.1.67, 2005/03/28 19:35:15-08:00, [EMAIL PROTECTED]

        [PATCH] x86_64: Work around Tyan BIOS MTRR initialization bug.
        
        Work around Tyan BIOS MTRR initialization bug.
        
        Some Tyan AMD BIOS don't initialize the first fixed range MTRR, which 
causes
        it to contain random bogus values.  When the MTRR tries to duplicate 
the MTRR
        state to other CPUs at startup it oopses because of this.
        
        This patch works around this by catching exception while setting MTRRs. 
        
        It would be better to validate all fixed range MTRRs and fix them, but 
that
        would be very complicated code.  This simple hack seems to work too 
(except
        that the first 64k of physical memory are likely uncached).  A BIOS 
update
        fixes that.
        
        Signed-off-by: Andi Kleen <[EMAIL PROTECTED]>
        Cc: <[EMAIL PROTECTED]>
        Signed-off-by: Andrew Morton <[EMAIL PROTECTED]>
        Signed-off-by: Linus Torvalds <[EMAIL PROTECTED]>



 arch/i386/kernel/cpu/mtrr/generic.c |   30 ++++++++++++++++++++----------
 arch/i386/kernel/cpu/mtrr/mtrr.h    |    1 +
 arch/i386/kernel/cpu/mtrr/state.c   |    4 ++--
 include/asm-i386/msr.h              |   15 +++++++++++++++
 include/asm-x86_64/msr.h            |    8 +++++---
 5 files changed, 43 insertions(+), 15 deletions(-)


diff -Nru a/arch/i386/kernel/cpu/mtrr/generic.c 
b/arch/i386/kernel/cpu/mtrr/generic.c
--- a/arch/i386/kernel/cpu/mtrr/generic.c       2005-03-28 21:21:16 -08:00
+++ b/arch/i386/kernel/cpu/mtrr/generic.c       2005-03-28 21:21:16 -08:00
@@ -92,6 +92,16 @@
        printk(KERN_INFO "mtrr: corrected configuration.\n");
 }
 
+/* Doesn't attempt to pass an error out to MTRR users
+   because it's quite complicated in some cases and probably not
+   worth it because the best error handling is to ignore it. */
+void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b)
+{
+       if (wrmsr_safe(msr, a, b) < 0)
+               printk(KERN_ERR
+                       "MTRR: CPU %u: Writing MSR %x to %x:%x failed\n",
+                       smp_processor_id(), msr, a, b);
+}
 
 int generic_get_free_region(unsigned long base, unsigned long size)
 /*  [SUMMARY] Get a free MTRR.
@@ -150,14 +160,14 @@
 
        rdmsr(MTRRfix64K_00000_MSR, lo, hi);
        if (p[0] != lo || p[1] != hi) {
-               wrmsr(MTRRfix64K_00000_MSR, p[0], p[1]);
+               mtrr_wrmsr(MTRRfix64K_00000_MSR, p[0], p[1]);
                changed = TRUE;
        }
 
        for (i = 0; i < 2; i++) {
                rdmsr(MTRRfix16K_80000_MSR + i, lo, hi);
                if (p[2 + i * 2] != lo || p[3 + i * 2] != hi) {
-                       wrmsr(MTRRfix16K_80000_MSR + i, p[2 + i * 2],
+                       mtrr_wrmsr(MTRRfix16K_80000_MSR + i, p[2 + i * 2],
                              p[3 + i * 2]);
                        changed = TRUE;
                }
@@ -166,7 +176,7 @@
        for (i = 0; i < 8; i++) {
                rdmsr(MTRRfix4K_C0000_MSR + i, lo, hi);
                if (p[6 + i * 2] != lo || p[7 + i * 2] != hi) {
-                       wrmsr(MTRRfix4K_C0000_MSR + i, p[6 + i * 2],
+                       mtrr_wrmsr(MTRRfix4K_C0000_MSR + i, p[6 + i * 2],
                              p[7 + i * 2]);
                        changed = TRUE;
                }
@@ -184,7 +194,7 @@
        rdmsr(MTRRphysBase_MSR(index), lo, hi);
        if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL)
            || (vr->base_hi & 0xfUL) != (hi & 0xfUL)) {
-               wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
+               mtrr_wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
                changed = TRUE;
        }
 
@@ -192,7 +202,7 @@
 
        if ((vr->mask_lo & 0xfffff800UL) != (lo & 0xfffff800UL)
            || (vr->mask_hi & 0xfUL) != (hi & 0xfUL)) {
-               wrmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
+               mtrr_wrmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
                changed = TRUE;
        }
        return changed;
@@ -267,7 +277,7 @@
        rdmsr(MTRRdefType_MSR, deftype_lo, deftype_hi);
 
        /*  Disable MTRRs, and set the default type to uncached  */
-       wrmsr(MTRRdefType_MSR, deftype_lo & 0xf300UL, deftype_hi);
+       mtrr_wrmsr(MTRRdefType_MSR, deftype_lo & 0xf300UL, deftype_hi);
 }
 
 static void post_set(void)
@@ -276,7 +286,7 @@
        __flush_tlb();
 
        /* Intel (P6) standard MTRRs */
-       wrmsr(MTRRdefType_MSR, deftype_lo, deftype_hi);
+       mtrr_wrmsr(MTRRdefType_MSR, deftype_lo, deftype_hi);
                
        /*  Enable caches  */
        write_cr0(read_cr0() & 0xbfffffff);
@@ -330,11 +340,11 @@
        if (size == 0) {
                /* The invalid bit is kept in the mask, so we simply clear the
                   relevant mask register to disable a range. */
-               wrmsr(MTRRphysMask_MSR(reg), 0, 0);
+               mtrr_wrmsr(MTRRphysMask_MSR(reg), 0, 0);
        } else {
-               wrmsr(MTRRphysBase_MSR(reg), base << PAGE_SHIFT | type,
+               mtrr_wrmsr(MTRRphysBase_MSR(reg), base << PAGE_SHIFT | type,
                      (base & size_and_mask) >> (32 - PAGE_SHIFT));
-               wrmsr(MTRRphysMask_MSR(reg), -size << PAGE_SHIFT | 0x800,
+               mtrr_wrmsr(MTRRphysMask_MSR(reg), -size << PAGE_SHIFT | 0x800,
                      (-size & size_and_mask) >> (32 - PAGE_SHIFT));
        }
 
diff -Nru a/arch/i386/kernel/cpu/mtrr/mtrr.h b/arch/i386/kernel/cpu/mtrr/mtrr.h
--- a/arch/i386/kernel/cpu/mtrr/mtrr.h  2005-03-28 21:21:16 -08:00
+++ b/arch/i386/kernel/cpu/mtrr/mtrr.h  2005-03-28 21:21:16 -08:00
@@ -94,4 +94,5 @@
 void finalize_mtrr_state(void);
 void mtrr_state_warn(void);
 char *mtrr_attrib_to_str(int x);
+void mtrr_wrmsr(unsigned, unsigned, unsigned);
 
diff -Nru a/arch/i386/kernel/cpu/mtrr/state.c 
b/arch/i386/kernel/cpu/mtrr/state.c
--- a/arch/i386/kernel/cpu/mtrr/state.c 2005-03-28 21:21:16 -08:00
+++ b/arch/i386/kernel/cpu/mtrr/state.c 2005-03-28 21:21:16 -08:00
@@ -42,7 +42,7 @@
 {
        if (use_intel()) 
                /*  Disable MTRRs, and set the default type to uncached  */
-               wrmsr(MTRRdefType_MSR, ctxt->deftype_lo & 0xf300UL,
+               mtrr_wrmsr(MTRRdefType_MSR, ctxt->deftype_lo & 0xf300UL,
                      ctxt->deftype_hi);
        else if (is_cpu(CYRIX))
                /* Cyrix ARRs - everything else were excluded at the top */
@@ -60,7 +60,7 @@
                /*  Restore MTRRdefType  */
                if (use_intel())
                        /* Intel (P6) standard MTRRs */
-                       wrmsr(MTRRdefType_MSR, ctxt->deftype_lo, 
ctxt->deftype_hi);
+                       mtrr_wrmsr(MTRRdefType_MSR, ctxt->deftype_lo, 
ctxt->deftype_hi);
                else
                        /* Cyrix ARRs - everything else was excluded at the top 
*/
                        setCx86(CX86_CCR3, ctxt->ccr3);
diff -Nru a/include/asm-i386/msr.h b/include/asm-i386/msr.h
--- a/include/asm-i386/msr.h    2005-03-28 21:21:16 -08:00
+++ b/include/asm-i386/msr.h    2005-03-28 21:21:16 -08:00
@@ -32,6 +32,21 @@
        wrmsr (msr, lo, hi);
 }
 
+/* wrmsr with exception handling */
+#define wrmsr_safe(msr,a,b) ({ int ret__;                                      
        \
+       asm volatile("2: wrmsr ; xorl %0,%0\n"                                  
        \
+                    "1:\n\t"                                                   
        \
+                    ".section .fixup,\"ax\"\n\t"                               
        \
+                    "3:  movl %4,%0 ; jmp 1b\n\t"                              
        \
+                    ".previous\n\t"                                            
        \
+                    ".section __ex_table,\"a\"\n"                              
        \
+                    "   .align 4\n\t"                                          
        \
+                    "   .long  2b,3b\n\t"                                      
        \
+                    ".previous"                                                
        \
+                    : "=a" (ret__)                                             
        \
+                    : "c" (msr), "0" (a), "d" (b), "i" (-EFAULT));\
+       ret__; })
+
 #define rdtsc(low,high) \
      __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high))
 
diff -Nru a/include/asm-x86_64/msr.h b/include/asm-x86_64/msr.h
--- a/include/asm-x86_64/msr.h  2005-03-28 21:21:16 -08:00
+++ b/include/asm-x86_64/msr.h  2005-03-28 21:21:16 -08:00
@@ -28,8 +28,8 @@
 
 #define wrmsrl(msr,val) wrmsr(msr,(__u32)((__u64)(val)),((__u64)(val))>>32) 
 
-/* wrmsrl with exception handling */
-#define checking_wrmsrl(msr,val) ({ int ret__;                                 
        \
+/* wrmsr with exception handling */
+#define wrmsr_safe(msr,a,b) ({ int ret__;                                      
        \
        asm volatile("2: wrmsr ; xorl %0,%0\n"                                  
        \
                     "1:\n\t"                                                   
        \
                     ".section .fixup,\"ax\"\n\t"                               
        \
@@ -40,8 +40,10 @@
                     "   .quad  2b,3b\n\t"                                      
        \
                     ".previous"                                                
        \
                     : "=a" (ret__)                                             
        \
-                    : "c" (msr), "0" ((__u32)val), "d" ((val)>>32), "i" 
(-EFAULT));\
+                    : "c" (msr), "0" (a), "d" (b), "i" (-EFAULT));\
        ret__; })
+
+#define checking_wrmsrl(msr,val) wrmsr_safe(msr,(u32)(val),(u32)((val)>>32))
 
 #define rdtsc(low,high) \
      __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high))
-
To unsubscribe from this list: send the line "unsubscribe bk-commits-head" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to