[PATCH 0/6] powerpc/book3e: support kexec and kdump

2012-11-15 Thread Tiejun Chen
This patchset is used to support kexec and kdump on book3e.

Tested on fsl-p5040 DS.

Tiejun Chen (6):
  powerpc/book3e: support CONFIG_RELOCATABLE
  book3e/kexec/kdump: enable kexec for kernel
  book3e/kexec/kdump: create a 1:1 TLB mapping
  book3e/kexec/kdump: introduce a kexec kernel flag
  book3e/kexec/kdump: skip ppc32 kexec specfic
  book3e/kexec/kdump: redefine VIRT_PHYS_OFFSET

 arch/powerpc/Kconfig |2 +-
 arch/powerpc/include/asm/exception-64e.h |8 
 arch/powerpc/include/asm/page.h  |2 +
 arch/powerpc/include/asm/smp.h   |3 ++
 arch/powerpc/kernel/exceptions-64e.S |   15 ++-
 arch/powerpc/kernel/head_64.S|   43 +--
 arch/powerpc/kernel/machine_kexec_64.c   |6 +++
 arch/powerpc/kernel/misc_64.S|   67 +-
 arch/powerpc/lib/feature-fixups.c|7 
 arch/powerpc/platforms/85xx/smp.c|   26 
 10 files changed, 173 insertions(+), 6 deletions(-)

Tiejun
___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev


[PATCH 1/6] powerpc/book3e: support CONFIG_RELOCATABLE

2012-11-15 Thread Tiejun Chen
book3e is different with book3s since 3s includes the exception
vectors code in head_64.S as it relies on absolute addressing
which is only possible within this compilation unit. So we have
to get that label address with got.

And when boot a relocated kernel, we should reset ipvr properly again
after .relocate.

Signed-off-by: Tiejun Chen tiejun.c...@windriver.com
---
 arch/powerpc/include/asm/exception-64e.h |8 
 arch/powerpc/kernel/exceptions-64e.S |   15 ++-
 arch/powerpc/kernel/head_64.S|   22 ++
 arch/powerpc/lib/feature-fixups.c|7 +++
 4 files changed, 51 insertions(+), 1 deletion(-)

diff --git a/arch/powerpc/include/asm/exception-64e.h 
b/arch/powerpc/include/asm/exception-64e.h
index 51fa43e..89e940d 100644
--- a/arch/powerpc/include/asm/exception-64e.h
+++ b/arch/powerpc/include/asm/exception-64e.h
@@ -214,10 +214,18 @@ exc_##label##_book3e:
 #define TLB_MISS_STATS_SAVE_INFO_BOLTED
 #endif
 
+#ifndef CONFIG_RELOCATABLE
 #define SET_IVOR(vector_number, vector_offset) \
li  r3,vector_offset@l; \
ori r3,r3,interrupt_base_book3e@l;  \
mtspr   SPRN_IVOR##vector_number,r3;
+#else
+#define SET_IVOR(vector_number, vector_offset) \
+   LOAD_REG_ADDR(r3,interrupt_base_book3e);\
+   rlwinm  r3,r3,0,15,0;   \
+   ori r3,r3,vector_offset@l;  \
+   mtspr   SPRN_IVOR##vector_number,r3;
+#endif
 
 #endif /* _ASM_POWERPC_EXCEPTION_64E_H */
 
diff --git a/arch/powerpc/kernel/exceptions-64e.S 
b/arch/powerpc/kernel/exceptions-64e.S
index 4e7083e..82be30b 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -1104,7 +1104,15 @@ skpinv:  addir6,r6,1 /* 
Increment */
  * r4 = MAS0 w/TLBSEL  ESEL for the temp mapping
  */
/* Now we branch the new virtual address mapped by this entry */
+#ifdef CONFIG_RELOCATABLE
+   /* We have to find out address from lr. */
+   bl  1f  /* Find our address */
+1: mflrr6
+   addir6,r6,(2f - 1b)
+   tovirt(r6,r6)
+#else
LOAD_REG_IMMEDIATE(r6,2f)
+#endif
lis r7,MSR_KERNEL@h
ori r7,r7,MSR_KERNEL@l
mtspr   SPRN_SRR0,r6
@@ -1355,9 +1363,14 @@ _GLOBAL(book3e_secondary_thread_init)
mflrr28
b   3b
 
-_STATIC(init_core_book3e)
+_GLOBAL(init_core_book3e)
/* Establish the interrupt vector base */
+#ifdef CONFIG_RELOCATABLE
+   tovirt(r2,r2)
+   LOAD_REG_ADDR(r3, interrupt_base_book3e)
+#else
LOAD_REG_IMMEDIATE(r3, interrupt_base_book3e)
+#endif
mtspr   SPRN_IVPR,r3
sync
blr
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 9e07bd0..aa7df52 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -395,12 +395,22 @@ _STATIC(__after_prom_start)
/* process relocations for the final address of the kernel */
lis r25,PAGE_OFFSET@highest /* compute virtual base of kernel */
sldir25,r25,32
+#if defined(CONFIG_PPC_BOOK3E)
+   tovirt(r26,r26) /* on booke, we already run at 
PAGE_OFFSET */
+#endif
lwz r7,__run_at_load-_stext(r26)
+#if defined(CONFIG_PPC_BOOK3E)
+   tophys(r26,r26) /* Restore for the remains. */
+#endif
cmplwi  cr0,r7,1/* flagged to stay where we are ? */
bne 1f
add r25,r25,r26
 1: mr  r3,r25
bl  .relocate
+#if defined(CONFIG_PPC_BOOK3E)
+   /* We should set ivpr again after .relocate. */
+   bl  .init_core_book3e
+#endif
 #endif
 
 /*
@@ -428,11 +438,23 @@ _STATIC(__after_prom_start)
  * variable __run_at_load, if it is set the kernel is treated as relocatable
  * kernel, otherwise it will be moved to PHYSICAL_START
  */
+#if defined(CONFIG_PPC_BOOK3E)
+   tovirt(r26,r26) /* on booke, we already run at 
PAGE_OFFSET */
+#endif
lwz r7,__run_at_load-_stext(r26)
+#if defined(CONFIG_PPC_BOOK3E)
+   tophys(r26,r26) /* Restore for the remains. */
+#endif
cmplwi  cr0,r7,1
bne 3f
 
+#ifdef CONFIG_PPC_BOOK3E
+   LOAD_REG_ADDR(r5, interrupt_end_book3e)
+   LOAD_REG_ADDR(r11, _stext)
+   sub r5,r5,r11
+#else
li  r5,__end_interrupts - _stext/* just copy interrupts */
+#endif
b   5f
 3:
 #endif
diff --git a/arch/powerpc/lib/feature-fixups.c 
b/arch/powerpc/lib/feature-fixups.c
index 7a8a748..13f20ed 100644
--- a/arch/powerpc/lib/feature-fixups.c
+++ b/arch/powerpc/lib/feature-fixups.c
@@ -135,13 +135,20 @@ void do_final_fixups(void)
 #if defined(CONFIG_PPC64)  defined(CONFIG_RELOCATABLE)
int *src, *dest;
unsigned long length;
+#ifdef CONFIG_PPC_BOOK3E
+   extern char interrupt_end_book3e[];
+#endif
 
if (PHYSICAL_START == 0)
return;
 
src = 

[PATCH 3/6] book3e/kexec/kdump: create a 1:1 TLB mapping

2012-11-15 Thread Tiejun Chen
book3e have no real MMU mode so we have to create a 1:1 TLB
mapping to make sure we can access the real physical address.
And correct something to support this pseudo real mode on book3e.

Signed-off-by: Tiejun Chen tiejun.c...@windriver.com
---
 arch/powerpc/kernel/head_64.S |9 ---
 arch/powerpc/kernel/misc_64.S |   55 -
 2 files changed, 60 insertions(+), 4 deletions(-)

diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index aa7df52..d51ffc0 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -425,12 +425,12 @@ _STATIC(__after_prom_start)
tovirt(r3,r3)   /* on booke, we already run at 
PAGE_OFFSET */
 #endif
mr. r4,r26  /* In some cases the loader may  */
+#if defined(CONFIG_PPC_BOOK3E)
+   tovirt(r4,r4)
+#endif
beq 9f  /* have already put us at zero */
li  r6,0x100/* Start offset, the first 0x100 */
/* bytes were copied earlier.*/
-#ifdef CONFIG_PPC_BOOK3E
-   tovirt(r6,r6)   /* on booke, we already run at 
PAGE_OFFSET */
-#endif
 
 #ifdef CONFIG_RELOCATABLE
 /*
@@ -472,6 +472,9 @@ _STATIC(__after_prom_start)
 p_end: .llong  _end - _stext
 
 4: /* Now copy the rest of the kernel up to _end */
+#if defined(CONFIG_PPC_BOOK3E)
+   tovirt(r26,r26)
+#endif
addis   r5,r26,(p_end - _stext)@ha
ld  r5,(p_end - _stext)@l(r5)   /* get _end */
 5: bl  .copy_and_flush /* copy the rest */
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index c2acf8c..ffe6043 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -449,6 +449,49 @@ kexec_flag:
 
 
 #ifdef CONFIG_KEXEC
+#ifdef CONFIG_PPC_BOOK3E
+/* BOOK3E have no a real MMU mode so we have to setup the initial TLB
+ * for a core to map v:0 to p:0 as 1:1. This current implementation
+ * assume that 1G is enough for kexec.
+ */
+#include asm/mmu.h
+kexec_create_tlb:
+   /* Invalidate all TLBs to avoid any TLB conflict. */
+   PPC_TLBILX_ALL(0,R0)
+   sync
+   isync
+
+   mfspr   r10,SPRN_TLB1CFG
+   andi.   r10,r10,TLBnCFG_N_ENTRY /* Extract # entries */
+   subir10,r10,1   /* Often its always safe to use last */
+   lis r9,MAS0_TLBSEL(1)@h
+   rlwimi  r9,r10,16,4,15  /* Setup MAS0 = TLBSEL | ESEL(r9) */
+
+/* Setup a temp mapping v:0 to p:0 as 1:1 and return to it.
+ */
+#ifdef CONFIG_SMP
+#define M_IF_SMP   MAS2_M
+#else
+#define M_IF_SMP   0
+#endif
+   mtspr   SPRN_MAS0,r9
+
+   lis r9,(MAS1_VALID|MAS1_IPROT)@h
+   ori r9,r9,(MAS1_TSIZE(BOOK3E_PAGESZ_1GB))@l
+   mtspr   SPRN_MAS1,r9
+
+   LOAD_REG_IMMEDIATE(r9, 0x0 | M_IF_SMP)
+   mtspr   SPRN_MAS2,r9
+
+   LOAD_REG_IMMEDIATE(r9, 0x0 | MAS3_SR | MAS3_SW | MAS3_SX)
+   mtspr   SPRN_MAS3,r9
+   li  r9,0
+   mtspr   SPRN_MAS7,r9
+
+   tlbwe
+   isync
+   blr
+#endif
 
 /* kexec_smp_wait(void)
  *
@@ -462,6 +505,10 @@ kexec_flag:
  */
 _GLOBAL(kexec_smp_wait)
lhz r3,PACAHWCPUID(r13)
+#ifdef CONFIG_PPC_BOOK3E
+   /* Create a 1:1 mapping. */
+   bl  kexec_create_tlb
+#endif
bl  real_mode
 
li  r4,KEXEC_STATE_REAL_MODE
@@ -478,6 +525,7 @@ _GLOBAL(kexec_smp_wait)
  * don't overwrite r3 here, it is live for kexec_wait above.
  */
 real_mode: /* assume normal blr return */
+#ifndef CONFIG_PPC_BOOK3E
 1: li  r9,MSR_RI
li  r10,MSR_DR|MSR_IR
mflrr11 /* return address to SRR0 */
@@ -489,7 +537,10 @@ real_mode: /* assume normal blr return */
mtspr   SPRN_SRR1,r10
mtspr   SPRN_SRR0,r11
rfid
-
+#else
+   /* the real mode is nothing for book3e. */
+   blr
+#endif
 
 /*
  * kexec_sequence(newstack, start, image, control, clear_all())
@@ -538,6 +589,8 @@ _GLOBAL(kexec_sequence)
mtmsrd  r3,1
 #else
wrteei  0
+   /* Create a 1:1 mapping. */
+   bl  kexec_create_tlb
 #endif
 
/* copy dest pages, flush whole dest image */
-- 
1.7.9.5

___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev


[PATCH 4/6] book3e/kexec/kdump: introduce a kexec kernel flag

2012-11-15 Thread Tiejun Chen
We need to introduce a flag to indicate we're already running
a kexec kernel then we can go proper path. For example, We
shouldn't access spin_table from the bootloader to up any secondary
cpu for kexec kernel, and kexec kernel already know how to jump to
generic_secondary_smp_init.

Signed-off-by: Tiejun Chen tiejun.c...@windriver.com
---
 arch/powerpc/include/asm/smp.h|3 +++
 arch/powerpc/kernel/head_64.S |   12 
 arch/powerpc/kernel/misc_64.S |6 ++
 arch/powerpc/platforms/85xx/smp.c |   14 ++
 4 files changed, 35 insertions(+)

diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index e807e9d..aadbe9b 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -190,6 +190,9 @@ extern void generic_secondary_thread_init(void);
 extern unsigned long __secondary_hold_spinloop;
 extern unsigned long __secondary_hold_acknowledge;
 extern char __secondary_hold;
+#ifdef CONFIG_KEXEC
+extern unsigned long __run_at_kexec;
+#endif
 
 extern void __early_start(void);
 #endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index d51ffc0..9c30d9f 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -89,6 +89,12 @@ __secondary_hold_spinloop:
 __secondary_hold_acknowledge:
.llong  0x0
 
+#if defined(CONFIG_KEXEC) || defined(CONFIG_CRASH_DUMP)
+   .globl  __run_at_kexec
+__run_at_kexec:
+   .llong  0x0 /* Flag for the secondary kernel from kexec. */
+#endif
+
 #ifdef CONFIG_RELOCATABLE
/* This flag is set to 1 by a loader if the kernel should run
 * at the loaded address instead of the linked address.  This
@@ -441,6 +447,12 @@ _STATIC(__after_prom_start)
 #if defined(CONFIG_PPC_BOOK3E)
tovirt(r26,r26) /* on booke, we already run at 
PAGE_OFFSET */
 #endif
+#if defined(CONFIG_KEXEC) || defined(CONFIG_CRASH_DUMP)
+   /* If relocated we need to restore this flag on that relocated address. 
*/
+   ld  r7,__run_at_kexec-_stext(r3)
+   std r7,__run_at_kexec-_stext(r26)
+#endif
+
lwz r7,__run_at_load-_stext(r26)
 #if defined(CONFIG_PPC_BOOK3E)
tophys(r26,r26) /* Restore for the remains. */
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index ffe6043..b81f8ac 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -608,6 +608,12 @@ _GLOBAL(kexec_sequence)
bl  .copy_and_flush /* (dest, src, copy limit, start offset) */
 1: /* assume normal blr return */
 
+   /* notify we're going into kexec kernel for SMP. */
+   LOAD_REG_ADDR(r3,__run_at_kexec)
+   li  r4,1
+   std r4,0(r3)
+   sync
+
/* release other cpus to the new kernel secondary start at 0x60 */
mflrr5
li  r6,1
diff --git a/arch/powerpc/platforms/85xx/smp.c 
b/arch/powerpc/platforms/85xx/smp.c
index 6fcfa12..c7febd5 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -137,6 +137,9 @@ static int __cpuinit smp_85xx_kick_cpu(int nr)
int hw_cpu = get_hard_smp_processor_id(nr);
int ioremappable;
int ret = 0;
+#if defined(CONFIG_KEXEC) || defined(CONFIG_CRASH_DUMP)
+   unsigned long *ptr;
+#endif
 
WARN_ON(nr  0 || nr = NR_CPUS);
WARN_ON(hw_cpu  0 || hw_cpu = NR_CPUS);
@@ -213,6 +216,14 @@ out:
 #else
smp_generic_kick_cpu(nr);
 
+#if defined(CONFIG_KEXEC) || defined(CONFIG_CRASH_DUMP)
+   ptr  = (unsigned long *)((unsigned long)__run_at_kexec);
+   /* We shouldn't access spin_table from the bootloader to up any
+* secondary cpu for kexec kernel, and kexec kernel already
+* know how to jump to generic_secondary_smp_init.
+*/
+   if (!*ptr) {
+#endif
out_be32(spin_table-pir, hw_cpu);
out_be64((u64 *)(spin_table-addr_h),
  __pa((u64)*((unsigned long long *)generic_secondary_smp_init)));
@@ -220,6 +231,9 @@ out:
if (!ioremappable)
flush_dcache_range((ulong)spin_table,
(ulong)spin_table + sizeof(struct epapr_spin_table));
+#if defined(CONFIG_KEXEC) || defined(CONFIG_CRASH_DUMP)
+   }
+#endif
 #endif
 
local_irq_restore(flags);
-- 
1.7.9.5

___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev


[PATCH 2/6] book3e/kexec/kdump: enable kexec for kernel

2012-11-15 Thread Tiejun Chen
We need to active KEXEC for book3e and bypass or convert non-book3e stuff
in kexec coverage.

Signed-off-by: Tiejun Chen tiejun.c...@windriver.com
---
 arch/powerpc/Kconfig   |2 +-
 arch/powerpc/kernel/machine_kexec_64.c |6 ++
 arch/powerpc/kernel/misc_64.S  |6 ++
 3 files changed, 13 insertions(+), 1 deletion(-)

diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index a902a5c..3000cab8 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -357,7 +357,7 @@ config ARCH_ENABLE_MEMORY_HOTREMOVE
 
 config KEXEC
bool kexec system call (EXPERIMENTAL)
-   depends on (PPC_BOOK3S || FSL_BOOKE || (44x  !SMP))  EXPERIMENTAL
+   depends on (PPC_BOOK3S || FSL_BOOKE || PPC_BOOK3E || (44x  !SMP))  
EXPERIMENTAL
help
  kexec is a system call that implements the ability to shutdown your
  current kernel, and to start another kernel.  It is like a reboot
diff --git a/arch/powerpc/kernel/machine_kexec_64.c 
b/arch/powerpc/kernel/machine_kexec_64.c
index d7f6090..2c0cbf0 100644
--- a/arch/powerpc/kernel/machine_kexec_64.c
+++ b/arch/powerpc/kernel/machine_kexec_64.c
@@ -32,6 +32,7 @@
 int default_machine_kexec_prepare(struct kimage *image)
 {
int i;
+#ifndef CONFIG_PPC_BOOK3E
unsigned long begin, end;   /* limits of segment */
unsigned long low, high;/* limits of blocked memory range */
struct device_node *node;
@@ -40,6 +41,7 @@ int default_machine_kexec_prepare(struct kimage *image)
 
if (!ppc_md.hpte_clear_all)
return -ENOENT;
+#endif
 
/*
 * Since we use the kernel fault handlers and paging code to
@@ -50,6 +52,7 @@ int default_machine_kexec_prepare(struct kimage *image)
if (image-segment[i].mem  __pa(_end))
return -ETXTBSY;
 
+#ifndef CONFIG_PPC_BOOK3E
/*
 * For non-LPAR, we absolutely can not overwrite the mmu hash
 * table, since we are still using the bolted entries in it to
@@ -91,6 +94,7 @@ int default_machine_kexec_prepare(struct kimage *image)
return -ETXTBSY;
}
}
+#endif
 
return 0;
 }
@@ -358,6 +362,7 @@ void default_machine_kexec(struct kimage *image)
/* NOTREACHED */
 }
 
+#ifndef CONFIG_PPC_BOOK3E
 /* Values we need to export to the second kernel via the device tree. */
 static unsigned long htab_base;
 
@@ -402,3 +407,4 @@ static int __init export_htab_values(void)
return 0;
 }
 late_initcall(export_htab_values);
+#endif
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index 5cfa800..c2acf8c 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -532,9 +532,13 @@ _GLOBAL(kexec_sequence)
lhz r25,PACAHWCPUID(r13)/* get our phys cpu from paca */
 
/* disable interrupts, we are overwriting kernel data next */
+#ifndef CONFIG_PPC_BOOK3E
mfmsr   r3
rlwinm  r3,r3,0,17,15
mtmsrd  r3,1
+#else
+   wrteei  0
+#endif
 
/* copy dest pages, flush whole dest image */
mr  r3,r29
@@ -556,10 +560,12 @@ _GLOBAL(kexec_sequence)
li  r6,1
stw r6,kexec_flag-1b(5)
 
+#ifndef CONFIG_PPC_BOOK3E
/* clear out hardware hash page table and tlb */
ld  r5,0(r27)   /* deref function descriptor */
mtctr   r5
bctrl   /* ppc_md.hpte_clear_all(void); */
+#endif
 
 /*
  *   kexec image calling is:
-- 
1.7.9.5

___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev


[PATCH 5/6] book3e/kexec/kdump: skip ppc32 kexec specfic

2012-11-15 Thread Tiejun Chen
ppc64 kexec mechanism has a different implementation with ppc32
so skipp those ppc32 specfic.

Signed-off-by: Tiejun Chen tiejun.c...@windriver.com
---
 arch/powerpc/platforms/85xx/smp.c |   12 
 1 file changed, 12 insertions(+)

diff --git a/arch/powerpc/platforms/85xx/smp.c 
b/arch/powerpc/platforms/85xx/smp.c
index c7febd5..d3ec57c 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -257,6 +257,7 @@ struct smp_ops_t smp_85xx_ops = {
 };
 
 #ifdef CONFIG_KEXEC
+#ifdef CONFIG_PPC32
 atomic_t kexec_down_cpus = ATOMIC_INIT(0);
 
 void mpc85xx_smp_kexec_cpu_down(int crash_shutdown, int secondary)
@@ -275,6 +276,13 @@ static void mpc85xx_smp_kexec_down(void *arg)
if (ppc_md.kexec_cpu_down)
ppc_md.kexec_cpu_down(0,1);
 }
+#else
+void mpc85xx_smp_kexec_cpu_down(int crash_shutdown, int secondary)
+{
+   local_irq_disable();
+   mpic_teardown_this_cpu(secondary);
+}
+#endif
 
 static void map_and_flush(unsigned long paddr)
 {
@@ -326,11 +334,14 @@ static void mpc85xx_smp_flush_dcache_kexec(struct kimage 
*image)
 
 static void mpc85xx_smp_machine_kexec(struct kimage *image)
 {
+#ifdef CONFIG_PPC32
int timeout = INT_MAX;
int i, num_cpus = num_present_cpus();
+#endif
 
mpc85xx_smp_flush_dcache_kexec(image);
 
+#ifdef CONFIG_PPC32
if (image-type == KEXEC_TYPE_DEFAULT)
smp_call_function(mpc85xx_smp_kexec_down, NULL, 0);
 
@@ -348,6 +359,7 @@ static void mpc85xx_smp_machine_kexec(struct kimage *image)
if ( i == smp_processor_id() ) continue;
mpic_reset_core(i);
}
+#endif
 
default_machine_kexec(image);
 }
-- 
1.7.9.5

___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev


[PATCH 6/6] book3e/kexec/kdump: redefine VIRT_PHYS_OFFSET

2012-11-15 Thread Tiejun Chen
Book3e is always aligned 1GB to create TLB so we should
use (KERNELBASE - MEMORY_START) as VIRT_PHYS_OFFSET to
get __pa/__va properly while boot kdump.

Signed-off-by: Tiejun Chen tiejun.c...@windriver.com
---
 arch/powerpc/include/asm/page.h |2 ++
 1 file changed, 2 insertions(+)

diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index f072e97..2cba08a 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -112,6 +112,8 @@ extern long long virt_phys_offset;
 /* See Description below for VIRT_PHYS_OFFSET */
 #ifdef CONFIG_RELOCATABLE_PPC32
 #define VIRT_PHYS_OFFSET virt_phys_offset
+#elif defined(CONFIG_PPC_BOOK3E_64)
+#define VIRT_PHYS_OFFSET (KERNELBASE - MEMORY_START)
 #else
 #define VIRT_PHYS_OFFSET (KERNELBASE - PHYSICAL_START)
 #endif
-- 
1.7.9.5

___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev


Re: [patch 4/4] mm, oom: remove statically defined arch functions of same name

2012-11-15 Thread Kamezawa Hiroyuki

(2012/11/14 18:15), David Rientjes wrote:

out_of_memory() is a globally defined function to call the oom killer.
x86, sh, and powerpc all use a function of the same name within file
scope in their respective fault.c unnecessarily.  Inline the functions
into the pagefault handlers to clean the code up.

Cc: Ingo Molnar mi...@redhat.com
Cc: H. Peter Anvin h...@zytor.com
Cc: Thomas Gleixner t...@linutronix.de
Cc: Benjamin Herrenschmidt b...@kernel.crashing.org
Cc: Paul Mackerras pau...@samba.org
Cc: Paul Mundt let...@linux-sh.org
Signed-off-by: David Rientjes rient...@google.com


I think this is good.

Reviewed-by: KAMEZAWA Hiroyuki kamezawa.hir...@jp.fujitsu.com


___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev


Re: [PATCH] cpuidle: Measure idle state durations with monotonic clock

2012-11-15 Thread Preeti Murthy
Hi all,

The code looks correct and inviting to me as it has led to good cleanups.
I dont think passing 0 as the argument to the function
sched_clock_idle_wakeup_event()
should lead to problems,as it does not do anything useful with the
passed arguments.

My only curiosity is what was the purpose of passing idle residency time to
sched_clock_idle_wakeup_event() when this data could always be retrieved from
dev-last_residency for each cpu,which gets almost immediately updated.

But this does not seem to come in way of this patch for now.Anyway I
have added Peter to
the list so that he can opine about this issue if possible and needed.

Reviewed-by: Preeti U Murthy pre...@linux.vnet.ibm.com


Regards
Preeti U Murthy
___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev


Re: [PATCH] cpuidle: Measure idle state durations with monotonic clock

2012-11-15 Thread Daniel Lezcano
On 11/15/2012 02:56 AM, Julius Werner wrote:
 Many cpuidle drivers measure their time spent in an idle state by
 reading the wallclock time before and after idling and calculating the
 difference. This leads to erroneous results when the wallclock time gets
 updated by another processor in the meantime, adding that clock
 adjustment to the idle state's time counter.
 
 If the clock adjustment was negative, the result is even worse due to an
 erroneous cast from int to unsigned long long of the last_residency
 variable. The negative 32 bit integer will zero-extend and result in a
 forward time jump of roughly four billion milliseconds or 1.3 hours on
 the idle state residency counter.
 
 This patch changes all affected cpuidle drivers to either use the
 monotonic clock for their measurements or make use of the generic time
 measurement wrapper in cpuidle.c, which was already working correctly.
 Some superfluous CLIs/STIs in the ACPI code are removed (interrupts
 should always already be disabled before entering the idle function, and
 not get reenabled until the generic wrapper has performed its second
 measurement). It also removes the erroneous cast, making sure that
 negative residency values are applied correctly even though they should
 not appear anymore.
 
 Signed-off-by: Julius Werner jwer...@chromium.org

Tested on a Core 2 Duo (processor_idle driver).

Tested-by: Daniel Lezcano daniel.lezc...@linaro.org
Acked-by: Daniel Lezcano daniel.lezc...@linaro.org



-- 
 http://www.linaro.org/ Linaro.org │ Open source software for ARM SoCs

Follow Linaro:  http://www.facebook.com/pages/Linaro Facebook |
http://twitter.com/#!/linaroorg Twitter |
http://www.linaro.org/linaro-blog/ Blog

___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

Re: Deprecating reserve-map in favor of properties

2012-11-15 Thread Grant Likely
On Fri, 02 Nov 2012 06:41:36 +1100, Benjamin Herrenschmidt 
b...@kernel.crashing.org wrote:
 On Thu, 2012-11-01 at 15:21 +0100, Grant Likely wrote:
 
  I think this makes sense. Cyril and I are just talking about what he
  needs. He wants to set aside per-device reserved regions and would
  like to have the ability to reference a particular reserved region
  from a device node, probably with a phandle. I like the look of the
  reserved-{ranges,names} properties in the root, but I see the argument
  that it isn't very flexible. What about something like this:
  
  reserved-memory {
  reserved@0x1000 { reg = 0x1000 0x0800; };
  reserved@0x0100 { reg = 0x0100 0x0020; };
  }
  
  The node name of the child nodes could be different of course.
 
 I'm not that fan of different nodes, especially nodes with nodes in them
 for that purpose. Seems overkill.
 
 Can't he reference reserved entries as phandle,index pairs ?

That would work too.

 I still think a single property would do fine. We could mandate those be
 in the respective memory nodes but them you have potentially to break
 up reserved regions if you have multiple memory nodes (NUMA) etc...

It makes sense to me for the reserved ranges to be kept with the memory
nodes themselves, even if it does mean that sometimes they need to be
split up for multiple memory nodes.

  Right, that would work also even though I prefer phandle references in
  general. Is it conceivable that additional data would want to be
  attached to a particular reserved region?
 
 phandle references and names aren't exclusive from each other. The name
 remains a useful diagnostic tool.
 
 If you want additional data, make a node somewhere to represent that
 region along with its additional data, that node can have a reference to
 the reserved map entry.
 
 I'd keep the reserve map itself simple. It's a synthetic property a
 bit like the memory nodes. IE. The memory nodes don't have to
 represent physical memory controllers  DIMMs. They represent the
 overall view of memory by the CPUs. You can represent the MCs and the
 DIMMs elsewhere in the SoC node.

okay.

g.

___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev


Re: [PATCH] bindings: i2c: use consistent naming for i2c binding descriptions

2012-11-15 Thread Grant Likely
On Tue, 13 Nov 2012 18:16:43 +0100, Wolfram Sang wolf...@the-dreams.de wrote:
 Filenames of devictree binding documentation seems to be arbitrary and
 for me it is unneeded hazzle to find the corresponding documentation for
 a specific driver.
 
 Naming the description the same as the driver is a lot easier and makes
 sense to me since the driver defines the binding it understands.
 
 Also, remove a reference in one source to the binding documentation, since 
 path
 information easily gets stale.
 
 Signed-off-by: Wolfram Sang wolf...@the-dreams.de
 Cc: Rob Herring robherri...@gmail.com
 Cc: Grant Likely grant.lik...@secretlab.ca

Applied, thanks.

g.

___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev


Re: PROBLEM: Linux 3.6.2 fails to boot on IBM Cell

2012-11-15 Thread Dennis Schridde
Hello again!

Am Donnerstag, 25. Oktober 2012, 21:33:41 schrieb Dennis Schridde:
 Am Freitag, 19. Oktober 2012, 09:04:08 schrieb Dennis Schridde:
  Am Freitag, 19. Oktober 2012, 00:17:55 schrieb Grant Likely:
   What does the boot log look like with the attached patch? (compiled
   only, I haven't booted with it)
 
  Please find the log attached.

 Have you found the cause or a fix for the problem?
It's been a while — are you still working on this? Is there a patch I could
try?

Best regards,
Dennis

signature.asc
Description: This is a digitally signed message part.
___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

Re: PROBLEM: Linux 3.6.2 fails to boot on IBM Cell

2012-11-15 Thread Grant Likely
On Thu, 25 Oct 2012 21:33:41 +0200, Dennis Schridde devuran...@gmx.net wrote:
 Hello everyone!
 
 Am Freitag, 19. Oktober 2012, 09:04:08 schrieb Dennis Schridde:
  Am Freitag, 19. Oktober 2012, 00:17:55 schrieb Grant Likely:
   What does the boot log look like with the attached patch? (compiled
   only, I haven't booted with it)
  
  Please find the log attached.
 Have you found the cause or a fix for the problem?

Umm, no. Some suggestions were made so I assumed you'd try those.

Anyway, here is a real patch. Try this:

g.

diff --git a/arch/powerpc/platforms/cell/pmu.c 
b/arch/powerpc/platforms/cell/pmu.c
index 59c1a16..348a27b 100644
--- a/arch/powerpc/platforms/cell/pmu.c
+++ b/arch/powerpc/platforms/cell/pmu.c
@@ -382,7 +382,7 @@ static int __init cbe_init_pm_irq(void)
unsigned int irq;
int rc, node;
 
-   for_each_node(node) {
+   for_each_online_node(node) {
irq = irq_create_mapping(NULL, IIC_IRQ_IOEX_PMI |
   (node  IIC_IRQ_NODE_SHIFT));
if (irq == NO_IRQ) {

___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev


Re: [PATCH RFT RESEND linux-next] powerpc: dma-mapping: support debug_dma_mapping_error

2012-11-15 Thread Shuah Khan
On Fri, 2012-10-26 at 10:08 -0600, Shuah Khan wrote:
 Add support for debug_dma_mapping_error() call to avoid warning from
 debug_dma_unmap() interface when it checks for mapping error checked
 status. Without this patch, device driver failed to check map error
 warning is generated.
 
 Signed-off-by: Shuah Khan shuah.k...@hp.com
 ---
  arch/powerpc/include/asm/dma-mapping.h |1 +
  1 file changed, 1 insertion(+)
 
 diff --git a/arch/powerpc/include/asm/dma-mapping.h 
 b/arch/powerpc/include/asm/dma-mapping.h
 index 7816087..e27e9ad 100644
 --- a/arch/powerpc/include/asm/dma-mapping.h
 +++ b/arch/powerpc/include/asm/dma-mapping.h
 @@ -172,6 +172,7 @@ static inline int dma_mapping_error(struct device *dev, 
 dma_addr_t dma_addr)
  {
   struct dma_map_ops *dma_ops = get_dma_ops(dev);
  
 + debug_dma_mapping_error(dev, dma_addr);
   if (dma_ops-mapping_error)
   return dma_ops-mapping_error(dev, dma_addr);
  

Marek,

This one is for powerpc to go through your tree with the other arch
debug_dma_mapping_error() patches.

Thanks,
-- Shuah

___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev