-----BEGIN PGP SIGNED MESSAGE----- Hash: SHA1 > Thanks for the analysis. I can see it being useful for other things too.
Ok. > My understanding was that CAR refers to L2. As long as nothing gets > replaced from the L2, everything is as it should be. ROM contents can > always be fetched again, so that's not critical for correctness. This is OK, but L2 CAR is in more detail described in fam11h otherwise AMD always just speaks about L1 CAR. the fam11h needs some extra tweaks to various MSR to disables speculative fills if I remember correctly. This is the reason why I see this a bit dangerous, perhaps older CPUs needs this too. I think we should mark the XIP region as WP instead of WB (check the fam11h BKDG). Anyway - I tried with UC copy looks like it is not so slow... I have in works the patch for the register clobber cleanup plus I will do some patch for saving the coreboot mem to resume area... but perhaps on Sunday. Tomorrow bit of skiing, but if you are curious, here is the patch. It just fixes the clobber stuff for the assembly routines, it has bitten me already while dumping the MSRs... the ECX value contained some garbage, and rdmsr did some exception. The memcpy code is from Linux kernel. Rudolf -----BEGIN PGP SIGNATURE----- Version: GnuPG v1.4.10 (GNU/Linux) Comment: Using GnuPG with Mozilla - http://enigmail.mozdev.org iEYEARECAAYFAktQ/pkACgkQ3J9wPJqZRNV5OACghwX5yZFScyo24Pzkt66pgHta 2VIAoNryS9EX/JZpGcu4NJ3IpEVnbK/8 =IRfx -----END PGP SIGNATURE-----
Index: disable_cache_as_ram.c
===================================================================
--- disable_cache_as_ram.c (revision 5009)
+++ disable_cache_as_ram.c (working copy)
@@ -2,62 +2,49 @@
/* be warned, this file will be used other cores and core 0 / node 0 */
static inline __attribute__((always_inline)) void disable_cache_as_ram(void)
{
-
- __asm__ volatile (
-
+ __asm__ __volatile__ (
/* We don't need cache as ram for now on */
/* disable cache */
- "movl %cr0, %eax\n\t"
- "orl $(0x1<<30),%eax\n\t"
- "movl %eax, %cr0\n\t"
+ "movl %%cr0, %%eax\n\t"
+ "orl $(0x1<<30),%%eax\n\t"
+ "movl %%eax, %%cr0\n\t"
/* clear sth */
- "movl $0x269, %ecx\n\t" /* fix4k_c8000*/
- "xorl %edx, %edx\n\t"
- "xorl %eax, %eax\n\t"
+ "movl $0x269, %%ecx\n\t" /* fix4k_c8000*/
+ "xorl %%edx, %%edx\n\t"
+ "xorl %%eax, %%eax\n\t"
"wrmsr\n\t"
#if CONFIG_DCACHE_RAM_SIZE > 0x8000
- "movl $0x268, %ecx\n\t" /* fix4k_c0000*/
+ "movl $0x268, %%ecx\n\t" /* fix4k_c0000*/
"wrmsr\n\t"
#endif
/* disable fixed mtrr from now on, it will be enabled by coreboot_ram again*/
- "movl $0xC0010010, %ecx\n\t"
+ "movl $0xC0010010, %%ecx\n\t"
// "movl $SYSCFG_MSR, %ecx\n\t"
"rdmsr\n\t"
- "andl $(~(3<<18)), %eax\n\t"
+ "andl $(~(3<<18)), %%eax\n\t"
// "andl $(~(SYSCFG_MSR_MtrrFixDramModEn | SYSCFG_MSR_MtrrFixDramEn)), %eax\n\t"
"wrmsr\n\t"
/* Set the default memory type and disable fixed and enable variable MTRRs */
- "movl $0x2ff, %ecx\n\t"
+ "movl $0x2ff, %%ecx\n\t"
// "movl $MTRRdefType_MSR, %ecx\n\t"
- "xorl %edx, %edx\n\t"
+ "xorl %%edx, %%edx\n\t"
/* Enable Variable and Disable Fixed MTRRs */
- "movl $0x00000800, %eax\n\t"
+ "movl $0x00000800, %%eax\n\t"
"wrmsr\n\t"
/* enable cache */
- "movl %cr0, %eax\n\t"
- "andl $0x9fffffff,%eax\n\t"
- "movl %eax, %cr0\n\t"
-
+ "movl %%cr0, %%eax\n\t"
+ "andl $0x9fffffff,%%eax\n\t"
+ "movl %%eax, %%cr0\n\t"
+ ::: "memory", "eax", "ecx", "edx"
);
}
static void disable_cache_as_ram_bsp(void)
{
- __asm__ volatile (
-// "pushl %eax\n\t"
- "pushl %edx\n\t"
- "pushl %ecx\n\t"
- );
-
disable_cache_as_ram();
- __asm__ volatile (
- "popl %ecx\n\t"
- "popl %edx\n\t"
-// "popl %eax\n\t"
- );
}
Index: post_cache_as_ram.c
===================================================================
--- post_cache_as_ram.c (revision 5009)
+++ post_cache_as_ram.c (working copy)
@@ -12,12 +12,16 @@
static void inline __attribute__((always_inline)) memcopy(void *dest, const void *src, unsigned long bytes)
{
- __asm__ volatile(
- "cld\n\t"
- "rep; movsl\n\t"
- : /* No outputs */
- : "S" (src), "D" (dest), "c" ((bytes)>>2)
- );
+ int d0, d1, d2;
+ asm volatile("cld ; rep ; movsl\n\t"
+ "movl %4,%%ecx\n\t"
+ "andl $3,%%ecx\n\t"
+ "jz 1f\n\t"
+ "rep ; movsb\n\t"
+ "1:"
+ : "=&c" (d0), "=&D" (d1), "=&S" (d2)
+ : "0" (bytes / 4), "g" (bytes), "1" ((long)dest), "2" ((long)src)
+ : "memory", "cc");
}
/* Disable Erratum 343 Workaround, see RevGuide for Fam10h, Pub#41322 Rev 3.33 */
@@ -66,28 +70,16 @@
/* from here don't store more data in CAR */
vErrata343();
-#if 0
- __asm__ volatile (
- "pushl %eax\n\t"
- );
-#endif
-
memcopy((void *)((CONFIG_RAMTOP)-CONFIG_DCACHE_RAM_SIZE), (void *)CONFIG_DCACHE_RAM_BASE, CONFIG_DCACHE_RAM_SIZE); //inline
// dump_mem((CONFIG_RAMTOP) - 0x8000, (CONFIG_RAMTOP) - 0x7c00);
__asm__ volatile (
/* set new esp */ /* before CONFIG_RAMBASE */
- "subl %0, %%ebp\n\t"
"subl %0, %%esp\n\t"
::"a"( (CONFIG_DCACHE_RAM_BASE + CONFIG_DCACHE_RAM_SIZE)- (CONFIG_RAMTOP) )
- ); // We need to push %eax to the stack (CAR) before copy stack and pop it later after copy stack and change esp
-#if 0
- __asm__ volatile (
- "popl %eax\n\t"
+ : "cc", "memory", "%ebx", "%ecx", "%edx", "%esi", "%edi", "%ebp"
);
-#endif
-
/* We can put data to stack again */
/* only global variable sysinfo in cache need to be offset */
moje_zmeny.patch.sig
Description: Binary data
-- coreboot mailing list: [email protected] http://www.coreboot.org/mailman/listinfo/coreboot

