Avoid hang when 4GB or more DRAM is installed on AMD RS780 UMA systems.

-- When building for UMA, reduce the limit for DRAM below 4GB
   from E0000000 to C0000000. This is needed to accomodate the
   UMA frame buffer.
-- Correct problem where msr C0010010 bits 21 and 22 (MtrrTom2En
   and Tom2ForceMemTypeWB) are not set consistently across cores.
-- Enable TOM2 only if DRAM is present above 4GB.
-- Use AMD Tom2ForceMemTypeWB feature to avoid the need for 
   variable MTRR ranges above 4GB.
-- Split function x86_setup_var_mtrrs into standard and AMD versions.
   The AMD version of the function relies on Tom2ForceMemTypeWB for
   making DRAM above 4GB type WB. The AMD version also incorporates
   a change to avoid an unexpected variable MTRR range when using
   UMA graphics.
-- Improve white space consistency for mtrr.c by using tabs in place
   of spaces.
Tested on kino-780am2-fam10 with 2GB and 4GB.
 
Signed-off-by: Scott Duplichan <[email protected]>

Index: src/cpu/amd/mtrr/amd_mtrr.c
===================================================================
--- src/cpu/amd/mtrr/amd_mtrr.c (revision 6056)
+++ src/cpu/amd/mtrr/amd_mtrr.c (working copy)
@@ -107,14 +107,14 @@
        unsigned long address_bits;
        struct mem_state state;
        unsigned long i;
-       msr_t msr;
+       msr_t msr, sys_cfg;
 
 
        /* Enable the access to AMD RdDram and WrDram extension bits */
        disable_cache();
-       msr = rdmsr(SYSCFG_MSR);
-       msr.lo |= SYSCFG_MSR_MtrrFixDramModEn;
-       wrmsr(SYSCFG_MSR, msr);
+       sys_cfg = rdmsr(SYSCFG_MSR);
+       sys_cfg.lo |= SYSCFG_MSR_MtrrFixDramModEn;
+       wrmsr(SYSCFG_MSR, sys_cfg);
        enable_cache();
 
        printk(BIOS_DEBUG, "\n");
@@ -148,11 +148,18 @@
        msr.lo = state.mmio_basek << 10;
        wrmsr(TOP_MEM, msr);
 
+       // assume no DRAM above 4GB
+       sys_cfg.lo &= ~SYSCFG_MSR_TOM2En;
+       sys_cfg.lo &= ~SYSCFG_MSR_TOM2WB;
        if(state.tomk > (4*1024*1024)) {
                /* Setup TOP_MEM2 */
                msr.hi = state.tomk >> 22;
                msr.lo = state.tomk << 10;
                wrmsr(TOP_MEM2, msr);
+               // enable tom2
+               sys_cfg.lo |= SYSCFG_MSR_TOM2En;
+               // Make DRAM above 4GB WB without using any variable MTRR ranges
+               sys_cfg.lo |= SYSCFG_MSR_TOM2WB;
        }
 
        /* zero the IORR's before we enable to prevent
@@ -167,10 +174,9 @@
         * Enable the RdMem and WrMem bits in the fixed mtrrs.
         * Disable access to the RdMem and WrMem in the fixed mtrr.
         */
-       msr = rdmsr(SYSCFG_MSR);
-       msr.lo |= SYSCFG_MSR_MtrrVarDramEn | SYSCFG_MSR_MtrrFixDramEn | 
SYSCFG_MSR_TOM2En;
-       msr.lo &= ~SYSCFG_MSR_MtrrFixDramModEn;
-       wrmsr(SYSCFG_MSR, msr);
+       sys_cfg.lo |= SYSCFG_MSR_MtrrVarDramEn | SYSCFG_MSR_MtrrFixDramEn;
+       sys_cfg.lo &= ~SYSCFG_MSR_MtrrFixDramModEn;
+       wrmsr(SYSCFG_MSR, sys_cfg);
 
        enable_fixed_mtrr();
 
@@ -186,5 +192,5 @@
        /* Now that I have mapped what is memory and what is not
         * Setup the mtrrs so we can cache the memory.
         */
-       x86_setup_var_mtrrs(address_bits);
+       amd_setup_var_mtrrs(address_bits);
 }
Index: src/cpu/x86/mtrr/mtrr.c
===================================================================
--- src/cpu/x86/mtrr/mtrr.c     (revision 6056)
+++ src/cpu/x86/mtrr/mtrr.c     (working copy)
@@ -364,31 +364,30 @@
 
 void x86_setup_fixed_mtrrs(void)
 {
-        /* Try this the simple way of incrementally adding together
-         * mtrrs.  If this doesn't work out we can get smart again
-         * and clear out the mtrrs.
-         */
+       /* Try this the simple way of incrementally adding together
+        * mtrrs.  If this doesn't work out we can get smart again
+        * and clear out the mtrrs.
+        */
 
-        printk(BIOS_DEBUG, "\n");
-        /* Initialized the fixed_mtrrs to uncached */
-        printk(BIOS_DEBUG, "Setting fixed MTRRs(%d-%d) Type: UC\n",
-               0, NUM_FIXED_RANGES);
-        set_fixed_mtrrs(0, NUM_FIXED_RANGES, MTRR_TYPE_UNCACHEABLE);
+       printk(BIOS_DEBUG, "\n");
+       /* Initialized the fixed_mtrrs to uncached */
+       printk(BIOS_DEBUG, "Setting fixed MTRRs(%d-%d) Type: UC\n",
+               0, NUM_FIXED_RANGES);
+       set_fixed_mtrrs(0, NUM_FIXED_RANGES, MTRR_TYPE_UNCACHEABLE);
 
-        /* Now see which of the fixed mtrrs cover ram.
-                 */
-        search_global_resources(
+       /* Now see which of the fixed mtrrs cover ram. */
+       search_global_resources(
                IORESOURCE_MEM | IORESOURCE_CACHEABLE, IORESOURCE_MEM | 
IORESOURCE_CACHEABLE,
                set_fixed_mtrr_resource, NULL);
-        printk(BIOS_DEBUG, "DONE fixed MTRRs\n");
+       printk(BIOS_DEBUG, "DONE fixed MTRRs\n");
 
-        /* enable fixed MTRR */
-        printk(BIOS_SPEW, "call enable_fixed_mtrr()\n");
-        enable_fixed_mtrr();
+       /* enable fixed MTRR */
+       printk(BIOS_SPEW, "call enable_fixed_mtrr()\n");
+       enable_fixed_mtrr();
 
 }
 
-void x86_setup_var_mtrrs(unsigned address_bits)
+static void x86_setup_var_mtrrs(unsigned address_bits)
 /* this routine needs to know how many address bits a given processor
  * supports.  CPUs get grumpy when you set too many bits in
  * their mtrr registers :(  I would generically call cpuid here
@@ -449,6 +448,64 @@
        post_code(0x6A);
 }
 
+
+void amd_setup_var_mtrrs(unsigned address_bits)
+/* this routine needs to know how many address bits a given processor
+ * supports.  CPUs get grumpy when you set too many bits in
+ * their mtrr registers :(  I would generically call cpuid here
+ * and find out how many physically supported but some cpus are
+ * buggy, and report more bits then they actually support.
+ */
+{
+       /* Try this the simple way of incrementally adding together
+        * mtrrs.  If this doesn't work out we can get smart again
+        * and clear out the mtrrs.
+        */
+       struct var_mtrr_state var_state;
+
+       /* Cache as many memory areas as possible */
+       /* FIXME is there an algorithm for computing the optimal set of mtrrs?
+        * In some cases it is definitely possible to do better.
+        */
+
+       var_state.range_startk = 0;
+       var_state.range_sizek = 0;
+       var_state.hole_startk = 0;
+       var_state.hole_sizek = 0;
+       var_state.reg = 0;
+       var_state.address_bits = address_bits;
+
+       search_global_resources(
+               IORESOURCE_MEM | IORESOURCE_CACHEABLE, IORESOURCE_MEM | 
IORESOURCE_CACHEABLE,
+               set_var_mtrr_resource, &var_state);
+
+       if (var_state.range_sizek >= 0x100000000ull / 1024 && 
var_state.range_startk == 0)
+               {
+               // no variable MTRR range is needed for WB from 4GB-TOM2
+               // because Tom2ForceMemTypeWB will be used for this purpose.
+               var_state.range_sizek = var_state.hole_startk;
+               }
+
+#if (CONFIG_GFXUMA == 1) /* UMA or SP. */
+       // For now we assume the UMA space is at the end of memory below 4GB
+       // Reduce the dram wb mtrr range so that it does not cover the uma at 
the end
+       var_state.range_sizek -= (uma_memory_size >> 10);
+#endif
+       /* Write the last range */
+       var_state.reg = range_to_mtrr(var_state.reg, var_state.range_startk,
+               var_state.range_sizek, 0, MTRR_TYPE_WRBACK, 
var_state.address_bits);
+       printk(BIOS_DEBUG, "DONE variable MTRRs\n");
+       printk(BIOS_DEBUG, "Clear out the extra MTRR's\n");
+       /* Clear out the extra MTRR's */
+       while(var_state.reg < MTRRS) {
+               set_var_mtrr(var_state.reg++, 0, 0, 0, var_state.address_bits);
+       }
+       printk(BIOS_SPEW, "call enable_var_mtrr()\n");
+       enable_var_mtrr();
+       printk(BIOS_SPEW, "Leave %s\n", __func__);
+       post_code(0x6A);
+}
+
 void x86_setup_mtrrs(unsigned address_bits)
 {
        x86_setup_fixed_mtrrs();
Index: src/include/cpu/amd/mtrr.h
===================================================================
--- src/include/cpu/amd/mtrr.h  (revision 6056)
+++ src/include/cpu/amd/mtrr.h  (working copy)
@@ -8,6 +8,7 @@
 #define MTRR_WRITE_MEM                 (1 << 3)
 
 #define SYSCFG_MSR                     0xC0010010
+#define SYSCFG_MSR_TOM2WB              (1 << 22)
 #define SYSCFG_MSR_TOM2En              (1 << 21)
 #define SYSCFG_MSR_MtrrVarDramEn       (1 << 20)
 #define SYSCFG_MSR_MtrrFixDramModEn    (1 << 19)
Index: src/include/cpu/x86/mtrr.h
===================================================================
--- src/include/cpu/x86/mtrr.h  (revision 6056)
+++ src/include/cpu/x86/mtrr.h  (working copy)
@@ -37,7 +37,7 @@
 #if !defined (ASSEMBLY) && !defined(__PRE_RAM__)
 #include <device/device.h>
 void enable_fixed_mtrr(void);
-void x86_setup_var_mtrrs(unsigned address_bits);
+void amd_setup_var_mtrrs(unsigned address_bits);
 void x86_setup_mtrrs(unsigned address_bits);
 int x86_mtrr_check(void);
 void set_var_mtrr_resource(void *gp, struct device *dev, struct resource *res);
Index: src/northbridge/amd/amdmct/wrappers/mcti.h
===================================================================
--- src/northbridge/amd/amdmct/wrappers/mcti.h  (revision 6056)
+++ src/northbridge/amd/amdmct/wrappers/mcti.h  (working copy)
@@ -42,11 +42,12 @@
 //#define    SYSTEM_TYPE     MOBILE
 #endif
 
-/*----------------------------------------------------------------------------
-COMMENT OUT ALL BUT 1
-----------------------------------------------------------------------------*/
+/*--------------------------------------------------------------------------*/
+#if (CONFIG_GFXUMA)
+#define UMA_SUPPORT    1       /*Supported */
+#else
 #define UMA_SUPPORT    0       /*Not supported */
-//#define UMA_SUPPORT  1       /*Supported */
+#endif
 
 /*----------------------------------------------------------------------------
 UPDATE AS NEEDED
Index: src/northbridge/amd/amdmct/wrappers/mcti_d.c
===================================================================
--- src/northbridge/amd/amdmct/wrappers/mcti_d.c        (revision 6056)
+++ src/northbridge/amd/amdmct/wrappers/mcti_d.c        (working copy)
@@ -132,13 +132,17 @@
                //val = 1;      /* enable */
                break;
        case NV_BottomIO:
+#if (UMA_SUPPORT == 0)
                val = 0xE0;     /* address bits [31:24] */
+#elif (UMA_SUPPORT == 1)
+               val = 0xC0;     /* address bits [31:24] */
+#endif
                break;
        case NV_BottomUMA:
 #if (UMA_SUPPORT == 0)
                val = 0xE0;     /* address bits [31:24] */
 #elif (UMA_SUPPORT == 1)
-               val = 0xB0;     /* address bits [31:24] */
+               val = 0xC0;     /* address bits [31:24] */
 #endif
                break;
        case NV_ECC:
Avoid hang when 4GB or more DRAM is installed on AMD RS780 UMA systems.

-- When building for UMA, reduce the limit for DRAM below 4GB
   from E0000000 to C0000000. This is needed to accomodate the
   UMA frame buffer.
-- Correct problem where msr C0010010 bits 21 and 22 (MtrrTom2En
   and Tom2ForceMemTypeWB) are not set consistently across cores.
-- Enable TOM2 only if DRAM is present above 4GB.
-- Use AMD Tom2ForceMemTypeWB feature to avoid the need for 
   variable MTRR ranges above 4GB.
-- Split function x86_setup_var_mtrrs into standard and AMD versions.
   The AMD version of the function relies on Tom2ForceMemTypeWB for
   making DRAM above 4GB type WB. The AMD version also incorporates
   a change to avoid an unexpected variable MTRR range when using
   UMA graphics.
-- Improve white space consistency for mtrr.c by using tabs in place
   of spaces.
Tested on kino-780am2-fam10 with 2GB and 4GB.

Signed-off-by: Scott Duplichan <[email protected]>

Index: src/cpu/amd/mtrr/amd_mtrr.c
===================================================================
--- src/cpu/amd/mtrr/amd_mtrr.c (revision 6056)
+++ src/cpu/amd/mtrr/amd_mtrr.c (working copy)
@@ -107,14 +107,14 @@
        unsigned long address_bits;
        struct mem_state state;
        unsigned long i;
-       msr_t msr;
+       msr_t msr, sys_cfg;
 
 
        /* Enable the access to AMD RdDram and WrDram extension bits */
        disable_cache();
-       msr = rdmsr(SYSCFG_MSR);
-       msr.lo |= SYSCFG_MSR_MtrrFixDramModEn;
-       wrmsr(SYSCFG_MSR, msr);
+       sys_cfg = rdmsr(SYSCFG_MSR);
+       sys_cfg.lo |= SYSCFG_MSR_MtrrFixDramModEn;
+       wrmsr(SYSCFG_MSR, sys_cfg);
        enable_cache();
 
        printk(BIOS_DEBUG, "\n");
@@ -148,11 +148,18 @@
        msr.lo = state.mmio_basek << 10;
        wrmsr(TOP_MEM, msr);
 
+       // assume no DRAM above 4GB
+       sys_cfg.lo &= ~SYSCFG_MSR_TOM2En;
+       sys_cfg.lo &= ~SYSCFG_MSR_TOM2WB;
        if(state.tomk > (4*1024*1024)) {
                /* Setup TOP_MEM2 */
                msr.hi = state.tomk >> 22;
                msr.lo = state.tomk << 10;
                wrmsr(TOP_MEM2, msr);
+               // enable tom2
+               sys_cfg.lo |= SYSCFG_MSR_TOM2En;
+               // Make DRAM above 4GB WB without using any variable MTRR ranges
+               sys_cfg.lo |= SYSCFG_MSR_TOM2WB;
        }
 
        /* zero the IORR's before we enable to prevent
@@ -167,10 +174,9 @@
         * Enable the RdMem and WrMem bits in the fixed mtrrs.
         * Disable access to the RdMem and WrMem in the fixed mtrr.
         */
-       msr = rdmsr(SYSCFG_MSR);
-       msr.lo |= SYSCFG_MSR_MtrrVarDramEn | SYSCFG_MSR_MtrrFixDramEn | 
SYSCFG_MSR_TOM2En;
-       msr.lo &= ~SYSCFG_MSR_MtrrFixDramModEn;
-       wrmsr(SYSCFG_MSR, msr);
+       sys_cfg.lo |= SYSCFG_MSR_MtrrVarDramEn | SYSCFG_MSR_MtrrFixDramEn;
+       sys_cfg.lo &= ~SYSCFG_MSR_MtrrFixDramModEn;
+       wrmsr(SYSCFG_MSR, sys_cfg);
 
        enable_fixed_mtrr();
 
@@ -186,5 +192,5 @@
        /* Now that I have mapped what is memory and what is not
         * Setup the mtrrs so we can cache the memory.
         */
-       x86_setup_var_mtrrs(address_bits);
+       amd_setup_var_mtrrs(address_bits);
 }
Index: src/cpu/x86/mtrr/mtrr.c
===================================================================
--- src/cpu/x86/mtrr/mtrr.c     (revision 6056)
+++ src/cpu/x86/mtrr/mtrr.c     (working copy)
@@ -364,31 +364,30 @@
 
 void x86_setup_fixed_mtrrs(void)
 {
-        /* Try this the simple way of incrementally adding together
-         * mtrrs.  If this doesn't work out we can get smart again
-         * and clear out the mtrrs.
-         */
+       /* Try this the simple way of incrementally adding together
+        * mtrrs.  If this doesn't work out we can get smart again
+        * and clear out the mtrrs.
+        */
 
-        printk(BIOS_DEBUG, "\n");
-        /* Initialized the fixed_mtrrs to uncached */
-        printk(BIOS_DEBUG, "Setting fixed MTRRs(%d-%d) Type: UC\n",
-               0, NUM_FIXED_RANGES);
-        set_fixed_mtrrs(0, NUM_FIXED_RANGES, MTRR_TYPE_UNCACHEABLE);
+       printk(BIOS_DEBUG, "\n");
+       /* Initialized the fixed_mtrrs to uncached */
+       printk(BIOS_DEBUG, "Setting fixed MTRRs(%d-%d) Type: UC\n",
+               0, NUM_FIXED_RANGES);
+       set_fixed_mtrrs(0, NUM_FIXED_RANGES, MTRR_TYPE_UNCACHEABLE);
 
-        /* Now see which of the fixed mtrrs cover ram.
-                 */
-        search_global_resources(
+       /* Now see which of the fixed mtrrs cover ram. */
+       search_global_resources(
                IORESOURCE_MEM | IORESOURCE_CACHEABLE, IORESOURCE_MEM | 
IORESOURCE_CACHEABLE,
                set_fixed_mtrr_resource, NULL);
-        printk(BIOS_DEBUG, "DONE fixed MTRRs\n");
+       printk(BIOS_DEBUG, "DONE fixed MTRRs\n");
 
-        /* enable fixed MTRR */
-        printk(BIOS_SPEW, "call enable_fixed_mtrr()\n");
-        enable_fixed_mtrr();
+       /* enable fixed MTRR */
+       printk(BIOS_SPEW, "call enable_fixed_mtrr()\n");
+       enable_fixed_mtrr();
 
 }
 
-void x86_setup_var_mtrrs(unsigned address_bits)
+static void x86_setup_var_mtrrs(unsigned address_bits)
 /* this routine needs to know how many address bits a given processor
  * supports.  CPUs get grumpy when you set too many bits in
  * their mtrr registers :(  I would generically call cpuid here
@@ -449,6 +448,64 @@
        post_code(0x6A);
 }
 
+
+void amd_setup_var_mtrrs(unsigned address_bits)
+/* this routine needs to know how many address bits a given processor
+ * supports.  CPUs get grumpy when you set too many bits in
+ * their mtrr registers :(  I would generically call cpuid here
+ * and find out how many physically supported but some cpus are
+ * buggy, and report more bits then they actually support.
+ */
+{
+       /* Try this the simple way of incrementally adding together
+        * mtrrs.  If this doesn't work out we can get smart again
+        * and clear out the mtrrs.
+        */
+       struct var_mtrr_state var_state;
+
+       /* Cache as many memory areas as possible */
+       /* FIXME is there an algorithm for computing the optimal set of mtrrs?
+        * In some cases it is definitely possible to do better.
+        */
+
+       var_state.range_startk = 0;
+       var_state.range_sizek = 0;
+       var_state.hole_startk = 0;
+       var_state.hole_sizek = 0;
+       var_state.reg = 0;
+       var_state.address_bits = address_bits;
+
+       search_global_resources(
+               IORESOURCE_MEM | IORESOURCE_CACHEABLE, IORESOURCE_MEM | 
IORESOURCE_CACHEABLE,
+               set_var_mtrr_resource, &var_state);
+
+       if (var_state.range_sizek >= 0x100000000ull / 1024 && 
var_state.range_startk == 0)
+               {
+               // no variable MTRR range is needed for WB from 4GB-TOM2
+               // because Tom2ForceMemTypeWB will be used for this purpose.
+               var_state.range_sizek = var_state.hole_startk;
+               }
+
+#if (CONFIG_GFXUMA == 1) /* UMA or SP. */
+       // For now we assume the UMA space is at the end of memory below 4GB
+       // Reduce the dram wb mtrr range so that it does not cover the uma at 
the end
+       var_state.range_sizek -= (uma_memory_size >> 10);
+#endif
+       /* Write the last range */
+       var_state.reg = range_to_mtrr(var_state.reg, var_state.range_startk,
+               var_state.range_sizek, 0, MTRR_TYPE_WRBACK, 
var_state.address_bits);
+       printk(BIOS_DEBUG, "DONE variable MTRRs\n");
+       printk(BIOS_DEBUG, "Clear out the extra MTRR's\n");
+       /* Clear out the extra MTRR's */
+       while(var_state.reg < MTRRS) {
+               set_var_mtrr(var_state.reg++, 0, 0, 0, var_state.address_bits);
+       }
+       printk(BIOS_SPEW, "call enable_var_mtrr()\n");
+       enable_var_mtrr();
+       printk(BIOS_SPEW, "Leave %s\n", __func__);
+       post_code(0x6A);
+}
+
 void x86_setup_mtrrs(unsigned address_bits)
 {
        x86_setup_fixed_mtrrs();
Index: src/include/cpu/amd/mtrr.h
===================================================================
--- src/include/cpu/amd/mtrr.h  (revision 6056)
+++ src/include/cpu/amd/mtrr.h  (working copy)
@@ -8,6 +8,7 @@
 #define MTRR_WRITE_MEM                 (1 << 3)
 
 #define SYSCFG_MSR                     0xC0010010
+#define SYSCFG_MSR_TOM2WB              (1 << 22)
 #define SYSCFG_MSR_TOM2En              (1 << 21)
 #define SYSCFG_MSR_MtrrVarDramEn       (1 << 20)
 #define SYSCFG_MSR_MtrrFixDramModEn    (1 << 19)
Index: src/include/cpu/x86/mtrr.h
===================================================================
--- src/include/cpu/x86/mtrr.h  (revision 6056)
+++ src/include/cpu/x86/mtrr.h  (working copy)
@@ -37,7 +37,7 @@
 #if !defined (ASSEMBLY) && !defined(__PRE_RAM__)
 #include <device/device.h>
 void enable_fixed_mtrr(void);
-void x86_setup_var_mtrrs(unsigned address_bits);
+void amd_setup_var_mtrrs(unsigned address_bits);
 void x86_setup_mtrrs(unsigned address_bits);
 int x86_mtrr_check(void);
 void set_var_mtrr_resource(void *gp, struct device *dev, struct resource *res);
Index: src/northbridge/amd/amdmct/wrappers/mcti.h
===================================================================
--- src/northbridge/amd/amdmct/wrappers/mcti.h  (revision 6056)
+++ src/northbridge/amd/amdmct/wrappers/mcti.h  (working copy)
@@ -42,11 +42,12 @@
 //#define    SYSTEM_TYPE     MOBILE
 #endif
 
-/*----------------------------------------------------------------------------
-COMMENT OUT ALL BUT 1
-----------------------------------------------------------------------------*/
+/*--------------------------------------------------------------------------*/
+#if (CONFIG_GFXUMA)
+#define UMA_SUPPORT    1       /*Supported */
+#else
 #define UMA_SUPPORT    0       /*Not supported */
-//#define UMA_SUPPORT  1       /*Supported */
+#endif
 
 /*----------------------------------------------------------------------------
 UPDATE AS NEEDED
Index: src/northbridge/amd/amdmct/wrappers/mcti_d.c
===================================================================
--- src/northbridge/amd/amdmct/wrappers/mcti_d.c        (revision 6056)
+++ src/northbridge/amd/amdmct/wrappers/mcti_d.c        (working copy)
@@ -132,13 +132,17 @@
                //val = 1;      /* enable */
                break;
        case NV_BottomIO:
+#if (UMA_SUPPORT == 0)
                val = 0xE0;     /* address bits [31:24] */
+#elif (UMA_SUPPORT == 1)
+               val = 0xC0;     /* address bits [31:24] */
+#endif
                break;
        case NV_BottomUMA:
 #if (UMA_SUPPORT == 0)
                val = 0xE0;     /* address bits [31:24] */
 #elif (UMA_SUPPORT == 1)
-               val = 0xB0;     /* address bits [31:24] */
+               val = 0xC0;     /* address bits [31:24] */
 #endif
                break;
        case NV_ECC:
-- 
coreboot mailing list: [email protected]
http://www.coreboot.org/mailman/listinfo/coreboot

Reply via email to