Re: [PATCH] Added support for PRTLVT based boards (MPC5121)

2008-06-23 Thread David Jander
On Friday 20 June 2008 16:36:20 you wrote:
 I have a set of patches that I will be submitting later today that
 adds the generic board support without removing ADS.  So I would
 prefer for you to just submit a device tree file for your board.

Ok, thanks. I'll check your patches, fix our DT and resubmit that one.

Greetings,

-- 
David Jander
Protonic Holland.
___
Linuxppc-dev mailing list
Linuxppc-dev@ozlabs.org
https://ozlabs.org/mailman/listinfo/linuxppc-dev


[PATCH 1/9] powerpc: Fix msr setting in 32 bit signal code

2008-06-23 Thread Michael Neuling
If we set the SPE MSR bit in save_user_regs we can blow away the VEC
bit.  This will never happen in reality (VMX and SPE will never be in
the same processor as their opcodes overlap), but it looks bad.  Also
when we add VSX here in a later patch, we can hit two of these at the
same time.  

Signed-off-by: Michael Neuling [EMAIL PROTECTED]
---

 arch/powerpc/kernel/signal_32.c |   10 ++
 1 file changed, 6 insertions(+), 4 deletions(-)

Index: linux-2.6-ozlabs/arch/powerpc/kernel/signal_32.c
===
--- linux-2.6-ozlabs.orig/arch/powerpc/kernel/signal_32.c
+++ linux-2.6-ozlabs/arch/powerpc/kernel/signal_32.c
@@ -336,6 +336,8 @@ struct rt_sigframe {
 static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
int sigret)
 {
+   unsigned long msr = regs-msr;
+
/* Make sure floating point registers are stored in regs */
flush_fp_to_thread(current);
 
@@ -354,8 +356,7 @@ static int save_user_regs(struct pt_regs
return 1;
/* set MSR_VEC in the saved MSR value to indicate that
   frame-mc_vregs contains valid data */
-   if (__put_user(regs-msr | MSR_VEC, frame-mc_gregs[PT_MSR]))
-   return 1;
+   msr |= MSR_VEC;
}
/* else assert((regs-msr  MSR_VEC) == 0) */
 
@@ -377,8 +378,7 @@ static int save_user_regs(struct pt_regs
return 1;
/* set MSR_SPE in the saved MSR value to indicate that
   frame-mc_vregs contains valid data */
-   if (__put_user(regs-msr | MSR_SPE, frame-mc_gregs[PT_MSR]))
-   return 1;
+   msr |= MSR_SPE;
}
/* else assert((regs-msr  MSR_SPE) == 0) */
 
@@ -387,6 +387,8 @@ static int save_user_regs(struct pt_regs
return 1;
 #endif /* CONFIG_SPE */
 
+   if (__put_user(msr, frame-mc_gregs[PT_MSR]))
+   return 1;
if (sigret) {
/* Set up the sigreturn trampoline: li r0,sigret; sc */
if (__put_user(0x3800UL + sigret, frame-tramp[0])
___
Linuxppc-dev mailing list
Linuxppc-dev@ozlabs.org
https://ozlabs.org/mailman/listinfo/linuxppc-dev


[PATCH 0/9] powerpc: Add kernel support for POWER7 VSX.

2008-06-23 Thread Michael Neuling
The following set of patches adds Vector Scalar Extentions (VSX)
support for POWER7.  Includes context switch, ptrace and signals support.

Signed-off-by: Michael Neuling [EMAIL PROTECTED]
--- 
Paulus: please consider for your 2.6.27 tree.

Updates this post
- Fixed ptrace 32 error noticed by paulus.
- Fixed calling of load_up_altivec in head_64.S also noticed by paulus

___
Linuxppc-dev mailing list
Linuxppc-dev@ozlabs.org
https://ozlabs.org/mailman/listinfo/linuxppc-dev


[PATCH 2/9] powerpc: Add macros to access floating point registers in thread_struct.

2008-06-23 Thread Michael Neuling
We are going to change where the floating point registers are stored
in the thread_struct, so in preparation add some macros to access the
floating point registers.  Update all code to use these new macros.

Signed-off-by: Michael Neuling [EMAIL PROTECTED]
---

 arch/powerpc/kernel/align.c  |6 ++--
 arch/powerpc/kernel/process.c|5 ++-
 arch/powerpc/kernel/ptrace.c |   14 +
 arch/powerpc/kernel/ptrace32.c   |   14 +++--
 arch/powerpc/kernel/softemu8xx.c |4 +-
 arch/powerpc/math-emu/math.c |   56 +++
 include/asm-powerpc/ppc_asm.h|5 ++-
 include/asm-powerpc/processor.h  |3 ++
 8 files changed, 61 insertions(+), 46 deletions(-)

Index: linux-2.6-ozlabs/arch/powerpc/kernel/align.c
===
--- linux-2.6-ozlabs.orig/arch/powerpc/kernel/align.c
+++ linux-2.6-ozlabs/arch/powerpc/kernel/align.c
@@ -366,7 +366,7 @@ static int emulate_multiple(struct pt_re
 static int emulate_fp_pair(struct pt_regs *regs, unsigned char __user *addr,
   unsigned int reg, unsigned int flags)
 {
-   char *ptr = (char *) current-thread.fpr[reg];
+   char *ptr = (char *) current-thread.TS_FPR(reg);
int i, ret;
 
if (!(flags  F))
@@ -784,7 +784,7 @@ int fix_alignment(struct pt_regs *regs)
return -EFAULT;
}
} else if (flags  F) {
-   data.dd = current-thread.fpr[reg];
+   data.dd = current-thread.TS_FPR(reg);
if (flags  S) {
/* Single-precision FP store requires conversion... */
 #ifdef CONFIG_PPC_FPU
@@ -862,7 +862,7 @@ int fix_alignment(struct pt_regs *regs)
if (unlikely(ret))
return -EFAULT;
} else if (flags  F)
-   current-thread.fpr[reg] = data.dd;
+   current-thread.TS_FPR(reg) = data.dd;
else
regs-gpr[reg] = data.ll;
 
Index: linux-2.6-ozlabs/arch/powerpc/kernel/process.c
===
--- linux-2.6-ozlabs.orig/arch/powerpc/kernel/process.c
+++ linux-2.6-ozlabs/arch/powerpc/kernel/process.c
@@ -110,7 +110,7 @@ int dump_task_fpu(struct task_struct *ts
return 0;
flush_fp_to_thread(current);
 
-   memcpy(fpregs, tsk-thread.fpr[0], sizeof(*fpregs));
+   memcpy(fpregs, tsk-thread.TS_FPR(0), sizeof(*fpregs));
 
return 1;
 }
@@ -689,7 +689,8 @@ void start_thread(struct pt_regs *regs, 
 #endif
 
discard_lazy_cpu_state();
-   memset(current-thread.fpr, 0, sizeof(current-thread.fpr));
+   memset(current-thread.fpr, 0,
+  sizeof(current-thread.fpr));
current-thread.fpscr.val = 0;
 #ifdef CONFIG_ALTIVEC
memset(current-thread.vr, 0, sizeof(current-thread.vr));
Index: linux-2.6-ozlabs/arch/powerpc/kernel/ptrace.c
===
--- linux-2.6-ozlabs.orig/arch/powerpc/kernel/ptrace.c
+++ linux-2.6-ozlabs/arch/powerpc/kernel/ptrace.c
@@ -218,10 +218,10 @@ static int fpr_get(struct task_struct *t
flush_fp_to_thread(target);
 
BUILD_BUG_ON(offsetof(struct thread_struct, fpscr) !=
-offsetof(struct thread_struct, fpr[32]));
+offsetof(struct thread_struct, TS_FPR(32)));
 
return user_regset_copyout(pos, count, kbuf, ubuf,
-  target-thread.fpr, 0, -1);
+  target-thread.fpr, 0, -1);
 }
 
 static int fpr_set(struct task_struct *target, const struct user_regset 
*regset,
@@ -231,10 +231,10 @@ static int fpr_set(struct task_struct *t
flush_fp_to_thread(target);
 
BUILD_BUG_ON(offsetof(struct thread_struct, fpscr) !=
-offsetof(struct thread_struct, fpr[32]));
+offsetof(struct thread_struct, TS_FPR(32)));
 
return user_regset_copyin(pos, count, kbuf, ubuf,
- target-thread.fpr, 0, -1);
+ target-thread.fpr, 0, -1);
 }
 
 
@@ -728,7 +728,8 @@ long arch_ptrace(struct task_struct *chi
tmp = ptrace_get_reg(child, (int) index);
} else {
flush_fp_to_thread(child);
-   tmp = ((unsigned long *)child-thread.fpr)[index - 
PT_FPR0];
+   tmp = ((unsigned long *)child-thread.fpr)
+   [TS_FPRSPACING * (index - PT_FPR0)];
}
ret = put_user(tmp,(unsigned long __user *) data);
break;
@@ -755,7 +756,8 @@ long arch_ptrace(struct task_struct *chi
ret = ptrace_put_reg(child, index, data);
} else {
flush_fp_to_thread(child);
-   ((unsigned long *)child-thread.fpr)[index - PT_FPR0] = 

[PATCH 5/9] powerpc: Introduce VSX thread_struct and CONFIG_VSX

2008-06-23 Thread Michael Neuling
The layout of the new VSR registers and how they overlap on top of the
legacy FPR and VR registers is:

   VSR doubleword 0   VSR doubleword 1
  
  VSR[0]  | FPR[0]|  |
  
  VSR[1]  | FPR[1]|  |
  
  |  ...  |  |
  |  ...  |  |
  
  VSR[30] | FPR[30]   |  |
  
  VSR[31] | FPR[31]   |  |
  
  VSR[32] | VR[0]|
  
  VSR[33] | VR[1]|
  
  |  ... |
  |  ... |
  
  VSR[62] | VR[30]   |
  
  VSR[63] | VR[31]   |
  

VSX has 64 128bit registers.  The first 32 regs overlap with the FP
registers and hence extend them with and additional 64 bits.  The
second 32 regs overlap with the VMX registers.

This patch introduces the thread_struct changes required to reflect
this register layout.  Ptrace and signals code is updated so that the
floating point registers are correctly accessed from the thread_struct
when CONFIG_VSX is enabled.

Signed-off-by: Michael Neuling [EMAIL PROTECTED]
---

 arch/powerpc/kernel/asm-offsets.c |4 ++
 arch/powerpc/kernel/ptrace.c  |   28 ++
 arch/powerpc/kernel/signal_32.c   |   59 --
 arch/powerpc/kernel/signal_64.c   |   32 ++--
 include/asm-powerpc/processor.h   |   21 -
 5 files changed, 126 insertions(+), 18 deletions(-)

Index: linux-2.6-ozlabs/arch/powerpc/kernel/asm-offsets.c
===
--- linux-2.6-ozlabs.orig/arch/powerpc/kernel/asm-offsets.c
+++ linux-2.6-ozlabs/arch/powerpc/kernel/asm-offsets.c
@@ -74,6 +74,10 @@ int main(void)
DEFINE(THREAD_VSCR, offsetof(struct thread_struct, vscr));
DEFINE(THREAD_USED_VR, offsetof(struct thread_struct, used_vr));
 #endif /* CONFIG_ALTIVEC */
+#ifdef CONFIG_VSX
+   DEFINE(THREAD_VSR0, offsetof(struct thread_struct, fpr));
+   DEFINE(THREAD_USED_VSR, offsetof(struct thread_struct, used_vsr));
+#endif /* CONFIG_VSX */
 #ifdef CONFIG_PPC64
DEFINE(KSP_VSID, offsetof(struct thread_struct, ksp_vsid));
 #else /* CONFIG_PPC64 */
Index: linux-2.6-ozlabs/arch/powerpc/kernel/ptrace.c
===
--- linux-2.6-ozlabs.orig/arch/powerpc/kernel/ptrace.c
+++ linux-2.6-ozlabs/arch/powerpc/kernel/ptrace.c
@@ -215,26 +215,54 @@ static int fpr_get(struct task_struct *t
   unsigned int pos, unsigned int count,
   void *kbuf, void __user *ubuf)
 {
+#ifdef CONFIG_VSX
+   double buf[33];
+   int i;
+#endif
flush_fp_to_thread(target);
 
+#ifdef CONFIG_VSX
+   /* copy to local buffer then write that out */
+   for (i = 0; i  32 ; i++)
+   buf[i] = target-thread.TS_FPR(i);
+   memcpy(buf[32], target-thread.fpscr, sizeof(double));
+   return user_regset_copyout(pos, count, kbuf, ubuf, buf, 0, -1);
+
+#else
BUILD_BUG_ON(offsetof(struct thread_struct, fpscr) !=
 offsetof(struct thread_struct, TS_FPR(32)));
 
return user_regset_copyout(pos, count, kbuf, ubuf,
   target-thread.fpr, 0, -1);
+#endif
 }
 
 static int fpr_set(struct task_struct *target, const struct user_regset 
*regset,
   unsigned int pos, unsigned int count,
   const void *kbuf, const void __user *ubuf)
 {
+#ifdef CONFIG_VSX
+   double buf[33];
+   int i;
+#endif
flush_fp_to_thread(target);
 
+#ifdef CONFIG_VSX
+   /* copy to local buffer then write that out */
+   i = user_regset_copyin(pos, count, kbuf, ubuf, buf, 0, -1);
+ 

[PATCH 6/9] powerpc: Add VSX CPU feature

2008-06-23 Thread Michael Neuling
Add a VSX CPU feature.  Also add code to detect if VSX is available
from the device tree.

Signed-off-by: Michael Neuling [EMAIL PROTECTED]
Signed-off-by: Joel Schopp [EMAIL PROTECTED]

---

 arch/powerpc/kernel/prom.c |4 
 include/asm-powerpc/cputable.h |   15 ++-
 2 files changed, 18 insertions(+), 1 deletion(-)

Index: linux-2.6-ozlabs/arch/powerpc/kernel/prom.c
===
--- linux-2.6-ozlabs.orig/arch/powerpc/kernel/prom.c
+++ linux-2.6-ozlabs/arch/powerpc/kernel/prom.c
@@ -609,6 +609,10 @@ static struct feature_property {
{altivec, 0, CPU_FTR_ALTIVEC, PPC_FEATURE_HAS_ALTIVEC},
{ibm,vmx, 1, CPU_FTR_ALTIVEC, PPC_FEATURE_HAS_ALTIVEC},
 #endif /* CONFIG_ALTIVEC */
+#ifdef CONFIG_VSX
+   /* Yes, this _really_ is ibm,vmx == 2 to enable VSX */
+   {ibm,vmx, 2, CPU_FTR_VSX, PPC_FEATURE_HAS_VSX},
+#endif /* CONFIG_VSX */
 #ifdef CONFIG_PPC64
{ibm,dfp, 1, 0, PPC_FEATURE_HAS_DFP},
{ibm,purr, 1, CPU_FTR_PURR, 0},
Index: linux-2.6-ozlabs/include/asm-powerpc/cputable.h
===
--- linux-2.6-ozlabs.orig/include/asm-powerpc/cputable.h
+++ linux-2.6-ozlabs/include/asm-powerpc/cputable.h
@@ -27,6 +27,7 @@
 #define PPC_FEATURE_HAS_DFP0x0400
 #define PPC_FEATURE_POWER6_EXT 0x0200
 #define PPC_FEATURE_ARCH_2_06  0x0100
+#define PPC_FEATURE_HAS_VSX0x0080
 
 #define PPC_FEATURE_TRUE_LE0x0002
 #define PPC_FEATURE_PPC_LE 0x0001
@@ -181,6 +182,7 @@ extern void do_feature_fixups(unsigned l
 #define CPU_FTR_DSCR   LONG_ASM_CONST(0x0002)
 #define CPU_FTR_1T_SEGMENT LONG_ASM_CONST(0x0004)
 #define CPU_FTR_NO_SLBIE_B LONG_ASM_CONST(0x0008)
+#define CPU_FTR_VSXLONG_ASM_CONST(0x0010)
 
 #ifndef __ASSEMBLY__
 
@@ -199,6 +201,17 @@ extern void do_feature_fixups(unsigned l
 #define PPC_FEATURE_HAS_ALTIVEC_COMP0
 #endif
 
+/* We only set the VSX features if the kernel was compiled with VSX
+ * support
+ */
+#ifdef CONFIG_VSX
+#define CPU_FTR_VSX_COMP   CPU_FTR_VSX
+#define PPC_FEATURE_HAS_VSX_COMP PPC_FEATURE_HAS_VSX
+#else
+#define CPU_FTR_VSX_COMP   0
+#define PPC_FEATURE_HAS_VSX_COMP0
+#endif
+
 /* We only set the spe features if the kernel was compiled with spe
  * support
  */
@@ -399,7 +412,7 @@ extern void do_feature_fixups(unsigned l
(CPU_FTRS_POWER3 | CPU_FTRS_RS64 | CPU_FTRS_POWER4 |\
CPU_FTRS_PPC970 | CPU_FTRS_POWER5 | CPU_FTRS_POWER6 |   \
CPU_FTRS_POWER7 | CPU_FTRS_CELL | CPU_FTRS_PA6T |   \
-   CPU_FTR_1T_SEGMENT)
+   CPU_FTR_1T_SEGMENT | CPU_FTR_VSX)
 #else
 enum {
CPU_FTRS_POSSIBLE =
___
Linuxppc-dev mailing list
Linuxppc-dev@ozlabs.org
https://ozlabs.org/mailman/listinfo/linuxppc-dev


[PATCH 8/9] powerpc: Add VSX context save/restore, ptrace and signal support

2008-06-23 Thread Michael Neuling
This patch extends the floating point save and restore code to use the
VSX load/stores when VSX is available.  This will make FP context
save/restore marginally slower on FP only code, when VSX is available,
as it has to load/store 128bits rather than just 64bits.

Mixing FP, VMX and VSX code will get constant architected state.

The signals interface is extended to enable access to VSR 0-31
doubleword 1 after discussions with tool chain maintainers.  Backward
compatibility is maintained.  

The ptrace interface is also extended to allow access to VSR 0-31 full
registers.

Signed-off-by: Michael Neuling [EMAIL PROTECTED]
---

 arch/powerpc/kernel/entry_64.S   |5 +
 arch/powerpc/kernel/fpu.S|   16 -
 arch/powerpc/kernel/head_64.S|   65 +++
 arch/powerpc/kernel/misc_64.S|   33 
 arch/powerpc/kernel/ppc32.h  |1 
 arch/powerpc/kernel/ppc_ksyms.c  |3 +
 arch/powerpc/kernel/process.c|  106 ++-
 arch/powerpc/kernel/ptrace.c |   70 +
 arch/powerpc/kernel/signal_32.c  |   33 
 arch/powerpc/kernel/signal_64.c  |   31 +++
 arch/powerpc/kernel/traps.c  |   29 ++
 include/asm-powerpc/elf.h|6 +-
 include/asm-powerpc/ptrace.h |   12 
 include/asm-powerpc/reg.h|2 
 include/asm-powerpc/sigcontext.h |   37 +
 include/asm-powerpc/system.h |9 +++
 include/linux/elf.h  |1 
 17 files changed, 451 insertions(+), 8 deletions(-)

Index: linux-2.6-ozlabs/arch/powerpc/kernel/entry_64.S
===
--- linux-2.6-ozlabs.orig/arch/powerpc/kernel/entry_64.S
+++ linux-2.6-ozlabs/arch/powerpc/kernel/entry_64.S
@@ -353,6 +353,11 @@ _GLOBAL(_switch)
mflrr20 /* Return to switch caller */
mfmsr   r22
li  r0, MSR_FP
+#ifdef CONFIG_VSX
+BEGIN_FTR_SECTION
+   orisr0,r0,[EMAIL PROTECTED] /* Disable VSX */
+END_FTR_SECTION_IFSET(CPU_FTR_VSX)
+#endif /* CONFIG_VSX */
 #ifdef CONFIG_ALTIVEC
 BEGIN_FTR_SECTION
orisr0,r0,[EMAIL PROTECTED] /* Disable altivec */
Index: linux-2.6-ozlabs/arch/powerpc/kernel/fpu.S
===
--- linux-2.6-ozlabs.orig/arch/powerpc/kernel/fpu.S
+++ linux-2.6-ozlabs/arch/powerpc/kernel/fpu.S
@@ -34,6 +34,11 @@
 _GLOBAL(load_up_fpu)
mfmsr   r5
ori r5,r5,MSR_FP
+#ifdef CONFIG_VSX
+BEGIN_FTR_SECTION
+   orisr5,r5,[EMAIL PROTECTED]
+END_FTR_SECTION_IFSET(CPU_FTR_VSX)
+#endif
SYNC
MTMSRD(r5)  /* enable use of fpu now */
isync
@@ -50,7 +55,7 @@ _GLOBAL(load_up_fpu)
beq 1f
toreal(r4)
addir4,r4,THREAD/* want last_task_used_math-thread */
-   SAVE_32FPRS(0, r4)
+   SAVE_32FPVSRS(0, r5, r4)
mffsfr0
stfdfr0,THREAD_FPSCR(r4)
PPC_LL  r5,PT_REGS(r4)
@@ -77,7 +82,7 @@ _GLOBAL(load_up_fpu)
 #endif
lfd fr0,THREAD_FPSCR(r5)
MTFSF_L(fr0)
-   REST_32FPRS(0, r5)
+   REST_32FPVSRS(0, r4, r5)
 #ifndef CONFIG_SMP
subir4,r5,THREAD
fromreal(r4)
@@ -96,6 +101,11 @@ _GLOBAL(load_up_fpu)
 _GLOBAL(giveup_fpu)
mfmsr   r5
ori r5,r5,MSR_FP
+#ifdef CONFIG_VSX
+BEGIN_FTR_SECTION
+   orisr5,r5,[EMAIL PROTECTED]
+END_FTR_SECTION_IFSET(CPU_FTR_VSX)
+#endif
SYNC_601
ISYNC_601
MTMSRD(r5)  /* enable use of fpu now */
@@ -106,7 +116,7 @@ _GLOBAL(giveup_fpu)
addir3,r3,THREAD/* want THREAD of task */
PPC_LL  r5,PT_REGS(r3)
PPC_LCMPI   0,r5,0
-   SAVE_32FPRS(0, r3)
+   SAVE_32FPVSRS(0, r4 ,r3)
mffsfr0
stfdfr0,THREAD_FPSCR(r3)
beq 1f
Index: linux-2.6-ozlabs/arch/powerpc/kernel/head_64.S
===
--- linux-2.6-ozlabs.orig/arch/powerpc/kernel/head_64.S
+++ linux-2.6-ozlabs/arch/powerpc/kernel/head_64.S
@@ -278,6 +278,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
. = 0xf20
b   altivec_unavailable_pSeries
 
+   . = 0xf40
+   b   vsx_unavailable_pSeries
+
 #ifdef CONFIG_CBE_RAS
HSTD_EXCEPTION_PSERIES(0x1200, cbe_system_error)
 #endif /* CONFIG_CBE_RAS */
@@ -297,6 +300,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
/* moved from 0xf00 */
STD_EXCEPTION_PSERIES(., performance_monitor)
STD_EXCEPTION_PSERIES(., altivec_unavailable)
+   STD_EXCEPTION_PSERIES(., vsx_unavailable)
 
 /*
  * An interrupt came in while soft-disabled; clear EE in SRR1,
@@ -836,6 +840,67 @@ _STATIC(load_up_altivec)
blr
 #endif /* CONFIG_ALTIVEC */
 
+   .align  7
+   .globl vsx_unavailable_common
+vsx_unavailable_common:
+   EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN)
+#ifdef CONFIG_VSX

[PATCH 9/9] powerpc: Add CONFIG_VSX config option

2008-06-23 Thread Michael Neuling
Add CONFIG_VSX config build option.  Must compile with POWER4, FPU and ALTIVEC.

Signed-off-by: Michael Neuling [EMAIL PROTECTED]
---

 arch/powerpc/platforms/Kconfig.cputype |   16 
 1 file changed, 16 insertions(+)

Index: linux-2.6-ozlabs/arch/powerpc/platforms/Kconfig.cputype
===
--- linux-2.6-ozlabs.orig/arch/powerpc/platforms/Kconfig.cputype
+++ linux-2.6-ozlabs/arch/powerpc/platforms/Kconfig.cputype
@@ -155,6 +155,22 @@ config ALTIVEC
 
  If in doubt, say Y here.
 
+config VSX
+   bool VSX Support
+   depends on POWER4  ALTIVEC  PPC_FPU
+   ---help---
+
+ This option enables kernel support for the Vector Scaler extensions
+ to the PowerPC processor. The kernel currently supports saving and
+ restoring VSX registers, and turning on the 'VSX enable' bit so user
+ processes can execute VSX instructions.
+
+ This option is only useful if you have a processor that supports
+ VSX (P7 and above), but does not have any affect on a non-VSX
+ CPUs (it does, however add code to the kernel).
+
+ If in doubt, say Y here.
+
 config SPE
bool SPE Support
depends on E200 || E500
___
Linuxppc-dev mailing list
Linuxppc-dev@ozlabs.org
https://ozlabs.org/mailman/listinfo/linuxppc-dev


[PATCH 4/9] powerpc: Make load_up_fpu and load_up_altivec callable

2008-06-23 Thread Michael Neuling
Make load_up_fpu and load_up_altivec callable so they can be reused by
the VSX code.  

Signed-off-by: Michael Neuling [EMAIL PROTECTED]
---

 arch/powerpc/kernel/fpu.S|2 +-
 arch/powerpc/kernel/head_32.S|6 --
 arch/powerpc/kernel/head_64.S|   10 +++---
 arch/powerpc/kernel/head_booke.h |6 --
 4 files changed, 16 insertions(+), 8 deletions(-)

Index: linux-2.6-ozlabs/arch/powerpc/kernel/fpu.S
===
--- linux-2.6-ozlabs.orig/arch/powerpc/kernel/fpu.S
+++ linux-2.6-ozlabs/arch/powerpc/kernel/fpu.S
@@ -85,7 +85,7 @@ _GLOBAL(load_up_fpu)
 #endif /* CONFIG_SMP */
/* restore registers and return */
/* we haven't used ctr or xer or lr */
-   b   fast_exception_return
+   blr
 
 /*
  * giveup_fpu(tsk)
Index: linux-2.6-ozlabs/arch/powerpc/kernel/head_32.S
===
--- linux-2.6-ozlabs.orig/arch/powerpc/kernel/head_32.S
+++ linux-2.6-ozlabs/arch/powerpc/kernel/head_32.S
@@ -421,8 +421,10 @@ BEGIN_FTR_SECTION
b   ProgramCheck
 END_FTR_SECTION_IFSET(CPU_FTR_FPU_UNAVAILABLE)
EXCEPTION_PROLOG
-   bne load_up_fpu /* if from user, just load it up */
-   addir3,r1,STACK_FRAME_OVERHEAD
+   beq 1f
+   bl  load_up_fpu /* if from user, just load it up */
+   b   fast_exception_return
+1: addir3,r1,STACK_FRAME_OVERHEAD
EXC_XFER_EE_LITE(0x800, kernel_fp_unavailable_exception)
 
 /* Decrementer */
Index: linux-2.6-ozlabs/arch/powerpc/kernel/head_64.S
===
--- linux-2.6-ozlabs.orig/arch/powerpc/kernel/head_64.S
+++ linux-2.6-ozlabs/arch/powerpc/kernel/head_64.S
@@ -741,7 +741,8 @@ fp_unavailable_common:
ENABLE_INTS
bl  .kernel_fp_unavailable_exception
BUG_OPCODE
-1: b   .load_up_fpu
+1: bl  .load_up_fpu
+   b   fast_exception_return
 
.align  7
.globl altivec_unavailable_common
@@ -749,7 +750,10 @@ altivec_unavailable_common:
EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
 #ifdef CONFIG_ALTIVEC
 BEGIN_FTR_SECTION
-   bne .load_up_altivec/* if from user, just load it up */
+   beq 1f
+   bl  .load_up_altivec
+   b   fast_exception_return
+1:
 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
 #endif
bl  .save_nvgprs
@@ -829,7 +833,7 @@ _STATIC(load_up_altivec)
std r4,0(r3)
 #endif /* CONFIG_SMP */
/* restore registers and return */
-   b   fast_exception_return
+   blr
 #endif /* CONFIG_ALTIVEC */
 
 /*
Index: linux-2.6-ozlabs/arch/powerpc/kernel/head_booke.h
===
--- linux-2.6-ozlabs.orig/arch/powerpc/kernel/head_booke.h
+++ linux-2.6-ozlabs/arch/powerpc/kernel/head_booke.h
@@ -363,8 +363,10 @@ label:
 #define FP_UNAVAILABLE_EXCEPTION \
START_EXCEPTION(FloatingPointUnavailable) \
NORMAL_EXCEPTION_PROLOG;  \
-   bne load_up_fpu;/* if from user, just load it up */   \
-   addir3,r1,STACK_FRAME_OVERHEAD;   \
+   beq 1f;   \
+   bl  load_up_fpu;/* if from user, just load it up */   \
+   b   fast_exception_return;\
+1: addir3,r1,STACK_FRAME_OVERHEAD;   \
EXC_XFER_EE_LITE(0x800, kernel_fp_unavailable_exception)
 
 #endif /* __HEAD_BOOKE_H__ */
___
Linuxppc-dev mailing list
Linuxppc-dev@ozlabs.org
https://ozlabs.org/mailman/listinfo/linuxppc-dev


[PATCH 7/9] powerpc: Add VSX assembler code macros

2008-06-23 Thread Michael Neuling
This adds the macros for the VSX load/store instruction as most
binutils are not going to support this for a while.

Also add VSX register save/restore macros and vsr[0-63] register definitions.

Signed-off-by: Michael Neuling [EMAIL PROTECTED]
---

 include/asm-powerpc/ppc_asm.h |  127 ++
 1 file changed, 127 insertions(+)

Index: linux-2.6-ozlabs/include/asm-powerpc/ppc_asm.h
===
--- linux-2.6-ozlabs.orig/include/asm-powerpc/ppc_asm.h
+++ linux-2.6-ozlabs/include/asm-powerpc/ppc_asm.h
@@ -74,6 +74,15 @@ END_FTR_SECTION_IFCLR(CPU_FTR_PURR); 
REST_10GPRS(22, base)
 #endif
 
+/*
+ * Define what the VSX XX1 form instructions will look like, then add
+ * the 128 bit load store instructions based on that.
+ */
+#define VSX_XX1(xs, ra, rb)(((xs)  0x1f)  21 | ((ra)  16) |  \
+((rb)  11) | (((xs)  5)))
+
+#define STXVD2X(xs, ra, rb).long (0x7c000798 | VSX_XX1((xs), (ra), (rb)))
+#define LXVD2X(xs, ra, rb) .long (0x7c000698 | VSX_XX1((xs), (ra), (rb)))
 
 #define SAVE_2GPRS(n, base)SAVE_GPR(n, base); SAVE_GPR(n+1, base)
 #define SAVE_4GPRS(n, base)SAVE_2GPRS(n, base); SAVE_2GPRS(n+2, base)
@@ -110,6 +119,57 @@ END_FTR_SECTION_IFCLR(CPU_FTR_PURR);   

 #define REST_16VRS(n,b,base)   REST_8VRS(n,b,base); REST_8VRS(n+8,b,base)
 #define REST_32VRS(n,b,base)   REST_16VRS(n,b,base); REST_16VRS(n+16,b,base)
 
+/* Save the lower 32 VSRs in the thread VSR region */
+#define SAVE_VSR(n,b,base) li b,THREAD_VSR0+(16*(n));  STXVD2X(n,b,base)
+#define SAVE_2VSRS(n,b,base)   SAVE_VSR(n,b,base); SAVE_VSR(n+1,b,base)
+#define SAVE_4VSRS(n,b,base)   SAVE_2VSRS(n,b,base); SAVE_2VSRS(n+2,b,base)
+#define SAVE_8VSRS(n,b,base)   SAVE_4VSRS(n,b,base); SAVE_4VSRS(n+4,b,base)
+#define SAVE_16VSRS(n,b,base)  SAVE_8VSRS(n,b,base); SAVE_8VSRS(n+8,b,base)
+#define SAVE_32VSRS(n,b,base)  SAVE_16VSRS(n,b,base); SAVE_16VSRS(n+16,b,base)
+#define REST_VSR(n,b,base) li b,THREAD_VSR0+(16*(n)); LXVD2X(n,b,base)
+#define REST_2VSRS(n,b,base)   REST_VSR(n,b,base); REST_VSR(n+1,b,base)
+#define REST_4VSRS(n,b,base)   REST_2VSRS(n,b,base); REST_2VSRS(n+2,b,base)
+#define REST_8VSRS(n,b,base)   REST_4VSRS(n,b,base); REST_4VSRS(n+4,b,base)
+#define REST_16VSRS(n,b,base)  REST_8VSRS(n,b,base); REST_8VSRS(n+8,b,base)
+#define REST_32VSRS(n,b,base)  REST_16VSRS(n,b,base); REST_16VSRS(n+16,b,base)
+/* Save the upper 32 VSRs (32-63) in the thread VSX region (0-31) */
+#define SAVE_VSRU(n,b,base)li b,THREAD_VR0+(16*(n));  STXVD2X(n+32,b,base)
+#define SAVE_2VSRSU(n,b,base)  SAVE_VSRU(n,b,base); SAVE_VSRU(n+1,b,base)
+#define SAVE_4VSRSU(n,b,base)  SAVE_2VSRSU(n,b,base); SAVE_2VSRSU(n+2,b,base)
+#define SAVE_8VSRSU(n,b,base)  SAVE_4VSRSU(n,b,base); SAVE_4VSRSU(n+4,b,base)
+#define SAVE_16VSRSU(n,b,base) SAVE_8VSRSU(n,b,base); SAVE_8VSRSU(n+8,b,base)
+#define SAVE_32VSRSU(n,b,base) SAVE_16VSRSU(n,b,base); 
SAVE_16VSRSU(n+16,b,base)
+#define REST_VSRU(n,b,base)li b,THREAD_VR0+(16*(n)); LXVD2X(n+32,b,base)
+#define REST_2VSRSU(n,b,base)  REST_VSRU(n,b,base); REST_VSRU(n+1,b,base)
+#define REST_4VSRSU(n,b,base)  REST_2VSRSU(n,b,base); REST_2VSRSU(n+2,b,base)
+#define REST_8VSRSU(n,b,base)  REST_4VSRSU(n,b,base); REST_4VSRSU(n+4,b,base)
+#define REST_16VSRSU(n,b,base) REST_8VSRSU(n,b,base); REST_8VSRSU(n+8,b,base)
+#define REST_32VSRSU(n,b,base) REST_16VSRSU(n,b,base); 
REST_16VSRSU(n+16,b,base)
+
+#ifdef CONFIG_VSX
+#define REST_32FPVSRS(n,c,base)
\
+BEGIN_FTR_SECTION  \
+   b   2f; \
+END_FTR_SECTION_IFSET(CPU_FTR_VSX);\
+   REST_32FPRS(n,base);\
+   b   3f; \
+2: REST_32VSRS(n,c,base);  \
+3:
+
+#define SAVE_32FPVSRS(n,c,base)
\
+BEGIN_FTR_SECTION  \
+   b   2f; \
+END_FTR_SECTION_IFSET(CPU_FTR_VSX);\
+   SAVE_32FPRS(n,base);\
+   b   3f; \
+2: SAVE_32VSRS(n,c,base);  \
+3:
+
+#else
+#define REST_32FPVSRS(n,b,base)REST_32FPRS(n, base)
+#define SAVE_32FPVSRS(n,b,base)SAVE_32FPRS(n, base)
+#endif
+
 #define SAVE_EVR(n,s,base) evmergehi s,s,n; stw s,THREAD_EVR0+4*(n)(base)
 #define SAVE_2EVRS(n,s,base)   SAVE_EVR(n,s,base); SAVE_EVR(n+1,s,base)
 #define SAVE_4EVRS(n,s,base)   SAVE_2EVRS(n,s,base); 

[PATCH 1/1] Change the default link address for pSeries zImage kernels.

2008-06-23 Thread Tony Breeds
Currently we set the start of the .text section to be 4Mb for pSeries.
In situations where the zImage is  8Mb we'll fail to boot (due to
overlapping with OF).  Move .text in a zImage from 4MB to 64MB (well past OF).

We still will not be able to load large zImage unless we also move OF,
to that end, add a note to the zImage ELF to move OF to 32Mb.  If this
is the very first kernel booted then we'll need to moev OF manually by
setting real-base.

Signed-off-by: Tony Breeds [EMAIL PROTECTED]
---
 arch/powerpc/boot/addnote.c |2 +-
 arch/powerpc/boot/oflib.c   |   15 +--
 arch/powerpc/boot/wrapper   |   14 --
 arch/powerpc/boot/zImage.coff.lds.S |1 -
 arch/powerpc/boot/zImage.lds.S  |1 -
 5 files changed, 26 insertions(+), 7 deletions(-)

diff --git a/arch/powerpc/boot/addnote.c b/arch/powerpc/boot/addnote.c
index 8041a98..b1e5611 100644
--- a/arch/powerpc/boot/addnote.c
+++ b/arch/powerpc/boot/addnote.c
@@ -25,7 +25,7 @@ char arch[] = PowerPC;
 #define N_DESCR6
 unsigned int descr[N_DESCR] = {
0x, /* real-mode = true */
-   0x00c0, /* real-base, i.e. where we expect OF to be */
+   0x0200, /* real-base, i.e. where we expect OF to be */
0x, /* real-size */
0x, /* virt-base */
0x, /* virt-size */
diff --git a/arch/powerpc/boot/oflib.c b/arch/powerpc/boot/oflib.c
index 95b8fd6..93a1a84 100644
--- a/arch/powerpc/boot/oflib.c
+++ b/arch/powerpc/boot/oflib.c
@@ -168,8 +168,19 @@ void *of_claim(unsigned long virt, unsigned long size, 
unsigned long align)
 
 void *of_vmlinux_alloc(unsigned long size)
 {
-   void *p = malloc(size);
-
+   unsigned long start = (unsigned long)_start, end = (unsigned long)_end;
+   void *addr;
+   void *p;
+
+   /* With some older POWER4 firmware the we need to claim the area
+* the kernel will reside in.  Newer firmwares don't need this so we
+* just ignore the return value.
+*/
+   addr = of_claim(start, end - start, 0);
+   printf(Trying to claim from 0x%lx to 0x%lx (0x%lx) got %p\r\n,
+  start, end, end - start, addr);
+
+   p = malloc(size);
if (!p)
fatal(Can't allocate memory for kernel image!\n\r);
 
diff --git a/arch/powerpc/boot/wrapper b/arch/powerpc/boot/wrapper
index d6c96d9..22bc26e 100755
--- a/arch/powerpc/boot/wrapper
+++ b/arch/powerpc/boot/wrapper
@@ -138,14 +138,20 @@ objflags=-S
 tmp=$tmpdir/zImage.$$.o
 ksection=.kernel:vmlinux.strip
 isection=.kernel:initrd
+link_address='0x40'
 
 case $platform in
-pmac|pseries|chrp)
+pseries)
+platformo=$object/of.o
+link_address='0x400'
+;;
+pmac|chrp)
 platformo=$object/of.o
 ;;
 coff)
 platformo=$object/of.o
 lds=$object/zImage.coff.lds
+link_address='0x50'
 ;;
 miboot|uboot)
 # miboot and U-boot want just the bare bits, not an ELF binary
@@ -190,6 +196,7 @@ ps3)
 objflags=-O binary --set-section-flags=.bss=contents,alloc,load,data
 ksection=.kernel:vmlinux.bin
 isection=.kernel:initrd
+link_address=''
 ;;
 ep88xc|ep405|ep8248e)
 platformo=$object/fixed-head.o $object/$platform.o
@@ -268,7 +275,10 @@ if [ -n $dtb ]; then
 fi
 
 if [ $platform != miboot ]; then
-${CROSS}ld -m elf32ppc -T $lds -o $ofile \
+if [ -n $link_address ] ; then
+text_start=-Ttext $link_address --defsym _start=$link_address
+fi
+${CROSS}ld -m elf32ppc -T $lds $text_start -o $ofile \
$platformo $tmp $object/wrapper.a
 rm $tmp
 fi
diff --git a/arch/powerpc/boot/zImage.coff.lds.S 
b/arch/powerpc/boot/zImage.coff.lds.S
index fe87a90..856dc78 100644
--- a/arch/powerpc/boot/zImage.coff.lds.S
+++ b/arch/powerpc/boot/zImage.coff.lds.S
@@ -3,7 +3,6 @@ ENTRY(_zimage_start_opd)
 EXTERN(_zimage_start_opd)
 SECTIONS
 {
-  . = (5*1024*1024);
   _start = .;
   .text  :
   {
diff --git a/arch/powerpc/boot/zImage.lds.S b/arch/powerpc/boot/zImage.lds.S
index f6e380f..0962d62 100644
--- a/arch/powerpc/boot/zImage.lds.S
+++ b/arch/powerpc/boot/zImage.lds.S
@@ -3,7 +3,6 @@ ENTRY(_zimage_start)
 EXTERN(_zimage_start)
 SECTIONS
 {
-  . = (4*1024*1024);
   _start = .;
   .text  :
   {
-- 
1.5.5.4

___
Linuxppc-dev mailing list
Linuxppc-dev@ozlabs.org
https://ozlabs.org/mailman/listinfo/linuxppc-dev


Re: [PATCH 1/1] Change the default link address for pSeries zImage kernels.

2008-06-23 Thread Tony Breeds
On Mon, Jun 23, 2008 at 06:13:23PM +1000, Tony Breeds wrote:
 Currently we set the start of the .text section to be 4Mb for pSeries.
 In situations where the zImage is  8Mb we'll fail to boot (due to
 overlapping with OF).  Move .text in a zImage from 4MB to 64MB (well past OF).
 
 We still will not be able to load large zImage unless we also move OF,
 to that end, add a note to the zImage ELF to move OF to 32Mb.  If this
 is the very first kernel booted then we'll need to moev OF manually by
 setting real-base.
 
 Signed-off-by: Tony Breeds [EMAIL PROTECTED]
 ---

Ooops should have said that this has been boot tested on power
3,4,5,5+,6 and JS20.  POWER4 firmware seems to have aproblem manually
reloacting realbase but asside from that it booted.

Yours Tony

  linux.conf.auhttp://www.marchsouth.org/
  Jan 19 - 24 2009 The Australian Linux Technical Conference!

___
Linuxppc-dev mailing list
Linuxppc-dev@ozlabs.org
https://ozlabs.org/mailman/listinfo/linuxppc-dev


Re: [RFC 1/3] powerpc: __copy_tofrom_user tweaked for Cell

2008-06-23 Thread Gunnar von Boehn
Hi Sanya,

 I have no idea how important unaligned or uncacheable
 copy perf is for Cell Linux. My experience is from Mac
 OS X for PPC, where we used dcbz in a general-purpose
 memcpy but were forced to pull that optimization because
 of the detrimental perf effect on important applications.

Interesting points.
Can you help me to understand where the negative effect of DCBZ does come
from?


 I may be missing something, but I don't see how Cell's microcoded shift
is much of a factor here.
 The problem is that the dcbz will generate the alignment exception
 regardless of whether the data is actually unaligned or not.
 Once you're on that code path, performance can't be good, can it?

In which case will DCBZ create an aligned exception?

If you want to see result on Cell then here are the values you can expect
on 1 CPU:
On Cell the copy using the Shift-xform achives max 800 MB/sec.
The copy using a single byte loop achieves 800 MB/sec as well.

A unaligned copy using unrolled doublewords and cache prefetch achieves
about 2500 MB/sec.
The aligned case using unrolled doublewords and cache prefetch achieves
about 7000 MB/sec.


What hurts performance a lot on CELL (and on XBOX 360) are two things:
a) The first level cache latency, and the memory and 2nd level cache
latency.
Cell has a first level cache latency of 4.
Cell has a second level cache latency of 40.
Cell has a memory latency of 400.

To avoid the 1st level cache latency you need to have 4 instruction
distance between your load and usage/store of the data.
Therefore a straight copy needs to be written like this.

.Loop:
  ld  r9, 0x08(r4)
  ld  r7, 0x10(r4)
  ld  r8, 0x18(r4)
  ldu r0, 0x20(r4)
  std r9, 0x08(r6)  // 4 instructions distance from load
  std r7, 0x10(r6)
  std r8, 0x18(r6)
  stdur0, 0x20(r6)
bdnz.Lloop2



b) A major pain in the back is the that the shift instruction is
microcoded.
While the SHIFT X-Form needs one clock on other PPC architectures, it needs
11 clocks on CELL.
An addition to taking 11 clocks for this running it thread, the microcoded
instruction will freeze the second thread.
Using microcoded instructions in a work loop will really drain the
performance on CELL.


I think if you want to use the same copy for uncacheable memory and maybe
for another PPC platform
then a good compromise will be to use the cache prefetch version for the
aligned case and to use a old SHIFT part for the unaligned case.
This way you will get max performance for aligned copies and good result
for the unaligned case.



   
 Sanjay Patel  
 [EMAIL PROTECTED] 
 .com  To 
   Gunnar von  
 20/06/2008 19:46  Boehn/Germany/Contr/[EMAIL PROTECTED]
   
cc 
   Arnd Bergmann [EMAIL PROTECTED],  
 Please respond to [EMAIL PROTECTED], Michael 
 [EMAIL PROTECTED] Ellerman [EMAIL PROTECTED],
comlinuxppc-dev@ozlabs.org, Mark   
   Nelson [EMAIL PROTECTED]  
   Subject 
   Re: [RFC 1/3] powerpc:  
   __copy_tofrom_user tweaked for Cell 
   
   
   
   
   
   





--- On Fri, 6/20/08, Gunnar von Boehn [EMAIL PROTECTED] wrote:
 How important is best performance for the unaligned copy
 to/from uncacheable memory?
 The challenge of the CELL chip is that X-form of the shift
 instructions are microcoded.
 The shifts are needed to implement a copy that reads and
 writes always aligned.

Hi Gunnar,

I have no idea how important unaligned or uncacheable copy perf is for Cell
Linux. My experience is from Mac OS X for PPC, where we used dcbz in a
general-purpose memcpy but were forced to pull that optimization because of
the detrimental perf effect on important applications.

I may be missing something, but I don't see how Cell's microcoded shift is
much of a factor here. The problem is that the dcbz will generate the
alignment exception regardless of whether the data is actually unaligned or

Re: [PATCH 1/1] Change the default link address for pSeries zImage kernels.

2008-06-23 Thread Adrian Reber
On Mon, Jun 23, 2008 at 06:13:23PM +1000, Tony Breeds wrote:
 Currently we set the start of the .text section to be 4Mb for pSeries.
 In situations where the zImage is  8Mb we'll fail to boot (due to
 overlapping with OF).  Move .text in a zImage from 4MB to 64MB (well past OF).
 
 We still will not be able to load large zImage unless we also move OF,
 to that end, add a note to the zImage ELF to move OF to 32Mb.  If this
 is the very first kernel booted then we'll need to moev OF manually by
 setting real-base.

Does this change also affect kernels for SLOF based systems (JS20, JS21,
Bimini/Powerstation, QS21, QS22)?

To avoid exactly that problem SLOF moved to a bit below 256MB on all
those platforms (about 220MB). There should be still enough space
between 64MB and 220MB to boot large kernels. It is, however, decreased
by 60MB.

Adrian
___
Linuxppc-dev mailing list
Linuxppc-dev@ozlabs.org
https://ozlabs.org/mailman/listinfo/linuxppc-dev


starting with 2.6.26-rc1 cell_defconfig fails on QS22

2008-06-23 Thread Adrian Reber

I have a custom config to build a kernel which runs on JS21/QS21/QS22.
Starting with 2.6.26-rc1 that fails on JS21 (with SLOF). Bisecting lead
me to the following commit:

 commit 366234f657879aeb7a1e2ca582f2f24f3fae9269
 Author: Kumar Gala [EMAIL PROTECTED]
 Date:   Wed Apr 16 05:52:28 2008 +1000

 [POWERPC] Update linker script to properly set physical addresses

I then tried it with cell_defconfig and it fails with almost the same
error on QS22. I have not restared the bisecting on QS22 because the
error is so similar:

zImage starting: loaded at 0x0040 (sp: 0x0e16aea0)
Allocating 0x6790c8 bytes for kernel ...
OF version = 'IBM,SLOF,HEAD'
gunzipping (0x0140 - 0x00407000:0x0062ce4a)...done 0x60405a bytes

Linux/PowerPC load: 
Finalizing device tree... using OF tree (promptr=0e1004c4)
 

( 700 ) Program Exception [ e1004c4 ]


R0 .. R7   R8 .. R15 R16 .. R23 R24 .. R31
014073b0   0e974200       
0e16aea0   00638c04       
019e2b98   0e974200    0140   
   b0003000    00638804   
   2000    0e96f3c0   
0e1004c4       0e1004c4   
0e96f3c0          
00638804      0eac5d70    

CR / XER   LR / CTR  SRR0 / SRR1DAR / DSISR
8022   014073e8   0189e99c    
2000   0140   90083000    

I have tried it with gcc-3.4.2, gcc-4.1.1 and gcc-4.2.4.

Somehow I expect that I am doing something wrong, because nobody else
has reported something like this yet.

It looks like 2.6.25 was the last version which worked on SLOF based
systems.

Adrian
___
Linuxppc-dev mailing list
Linuxppc-dev@ozlabs.org
https://ozlabs.org/mailman/listinfo/linuxppc-dev


Re: [PATCH 1/1] Change the default link address for pSeries zImage kernels.

2008-06-23 Thread Benjamin Herrenschmidt
On Mon, 2008-06-23 at 11:30 +0200, Adrian Reber wrote:
 On Mon, Jun 23, 2008 at 06:13:23PM +1000, Tony Breeds wrote:
  Currently we set the start of the .text section to be 4Mb for pSeries.
  In situations where the zImage is  8Mb we'll fail to boot (due to
  overlapping with OF).  Move .text in a zImage from 4MB to 64MB (well past 
  OF).
  
  We still will not be able to load large zImage unless we also move OF,
  to that end, add a note to the zImage ELF to move OF to 32Mb.  If this
  is the very first kernel booted then we'll need to moev OF manually by
  setting real-base.
 
 Does this change also affect kernels for SLOF based systems (JS20, JS21,
 Bimini/Powerstation, QS21, QS22)?

Yes, they use the same zImage.

 To avoid exactly that problem SLOF moved to a bit below 256MB on all
 those platforms (about 220MB). There should be still enough space
 between 64MB and 220MB to boot large kernels. It is, however, decreased
 by 60MB.

That should leave plenty of space...

Cheers,
Ben.


___
Linuxppc-dev mailing list
Linuxppc-dev@ozlabs.org
https://ozlabs.org/mailman/listinfo/linuxppc-dev


Re: starting with 2.6.26-rc1 cell_defconfig fails on QS22

2008-06-23 Thread Benjamin Herrenschmidt
On Mon, 2008-06-23 at 11:45 +0200, Adrian Reber wrote:
 I have a custom config to build a kernel which runs on JS21/QS21/QS22.
 Starting with 2.6.26-rc1 that fails on JS21 (with SLOF). Bisecting lead
 me to the following commit:
 
  commit 366234f657879aeb7a1e2ca582f2f24f3fae9269
  Author: Kumar Gala [EMAIL PROTECTED]
  Date:   Wed Apr 16 05:52:28 2008 +1000
 
  [POWERPC] Update linker script to properly set physical addresses
 
 I then tried it with cell_defconfig and it fails with almost the same
 error on QS22. I have not restared the bisecting on QS22 because the
 error is so similar:
 
 zImage starting: loaded at 0x0040 (sp: 0x0e16aea0)
 Allocating 0x6790c8 bytes for kernel ...
 OF version = 'IBM,SLOF,HEAD'
 gunzipping (0x0140 - 0x00407000:0x0062ce4a)...done 0x60405a bytes
 
 Linux/PowerPC load: 
 Finalizing device tree... using OF tree (promptr=0e1004c4)
  
 
 ( 700 ) Program Exception [ e1004c4 ]

The program check exception happens at 0xe1004c4 ? That looks like the
OF entry point (promptr)... could it be possible that it got corrupted
somewhat ? The only thing I see above there would be the stack but
I fail to see how it would use that much... 

 R0 .. R7   R8 .. R15 R16 .. R23 R24 .. R31
 014073b0   0e974200       
 0e16aea0   00638c04       
 019e2b98   0e974200    0140   
    b0003000    00638804   
    2000    0e96f3c0   
 0e1004c4       0e1004c4   
 0e96f3c0          
 00638804      0eac5d70    
 
 CR / XER   LR / CTR  SRR0 / SRR1DAR / DSISR
 8022   014073e8   0189e99c    
 2000   0140   90083000    
 
 I have tried it with gcc-3.4.2, gcc-4.1.1 and gcc-4.2.4.
 
 Somehow I expect that I am doing something wrong, because nobody else
 has reported something like this yet.
 
 It looks like 2.6.25 was the last version which worked on SLOF based
 systems.
 
   Adrian
 ___
 Linuxppc-dev mailing list
 Linuxppc-dev@ozlabs.org
 https://ozlabs.org/mailman/listinfo/linuxppc-dev

___
Linuxppc-dev mailing list
Linuxppc-dev@ozlabs.org
https://ozlabs.org/mailman/listinfo/linuxppc-dev


Re: [PATCH 1/4] crypto/talitos: rm duplicate timeout definition

2008-06-23 Thread Herbert Xu
On Mon, Jun 16, 2008 at 11:10:40AM -0500, Kim Phillips wrote:
 Signed-off-by: Kim Phillips [EMAIL PROTECTED]

I've merged these patches into the original changeset.  Thanks!
-- 
Visit Openswan at http://www.openswan.org/
Email: Herbert Xu ~{PmVHI~} [EMAIL PROTECTED]
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt
___
Linuxppc-dev mailing list
Linuxppc-dev@ozlabs.org
https://ozlabs.org/mailman/listinfo/linuxppc-dev


Re: [RFC 1/3] powerpc: __copy_tofrom_user tweaked for Cell

2008-06-23 Thread Geert Uytterhoeven
On Mon, 23 Jun 2008, Gunnar von Boehn wrote:
  The problem is that the dcbz will generate the alignment exception
  regardless of whether the data is actually unaligned or not.
  Once you're on that code path, performance can't be good, can it?
 
 In which case will DCBZ create an aligned exception?

When using dcbz on uncached memory, IIRC.

With kind regards,

Geert Uytterhoeven
Software Architect

Sony Techsoft Centre
The Corporate Village · Da Vincilaan 7-D1 · B-1935 Zaventem · Belgium

Phone:+32 (0)2 700 8453
Fax:  +32 (0)2 700 8622
E-mail:   [EMAIL PROTECTED]
Internet: http://www.sony-europe.com/

Sony Technology and Software Centre Europe
A division of Sony Service Centre (Europe) N.V.
Registered office: Technologielaan 7 · B-1840 Londerzeel · Belgium
VAT BE 0413.825.160 · RPR Brussels
Fortis 293-0376800-10 GEBA-BE-BB___
Linuxppc-dev mailing list
Linuxppc-dev@ozlabs.org
https://ozlabs.org/mailman/listinfo/linuxppc-dev

Re: [PATCH 1/1] Change the default link address for pSeries zImage kernels.

2008-06-23 Thread Michael Ellerman
On Mon, 2008-06-23 at 18:13 +1000, Tony Breeds wrote:
 Currently we set the start of the .text section to be 4Mb for pSeries.
 In situations where the zImage is  8Mb we'll fail to boot (due to
 overlapping with OF).  Move .text in a zImage from 4MB to 64MB (well past OF).
...
 diff --git a/arch/powerpc/boot/oflib.c b/arch/powerpc/boot/oflib.c
 index 95b8fd6..93a1a84 100644
 --- a/arch/powerpc/boot/oflib.c
 +++ b/arch/powerpc/boot/oflib.c
 @@ -168,8 +168,19 @@ void *of_claim(unsigned long virt, unsigned long size, 
 unsigned long align)
  
  void *of_vmlinux_alloc(unsigned long size)
  {
 - void *p = malloc(size);
 -
 + unsigned long start = (unsigned long)_start, end = (unsigned long)_end;
 + void *addr;
 + void *p;
 +
 + /* With some older POWER4 firmware the we need to claim the area

Sorry, typo/grammaro :/ ^^

cheers

-- 
Michael Ellerman
OzLabs, IBM Australia Development Lab

wwweb: http://michael.ellerman.id.au
phone: +61 2 6212 1183 (tie line 70 21183)

We do not inherit the earth from our ancestors,
we borrow it from our children. - S.M.A.R.T Person


signature.asc
Description: This is a digitally signed message part
___
Linuxppc-dev mailing list
Linuxppc-dev@ozlabs.org
https://ozlabs.org/mailman/listinfo/linuxppc-dev

rh_init exported twice (rheap vs. dm-region_hash)

2008-06-23 Thread Matt Sealey

Hi guys,

I keep getting export errors when building kernels, a conflict between
arch/powerpc/lib/rheap.c:rh_init() and drivers/dm/dm-region_hash.c:rh_init()

- in the event you want to use BestComm, MPC8610 DIU or other features
*and* a RAID algorithm, which subsystem would be best to patch away or
patch in a fix?

I was thinking of renaming the dm hash one to rh_hash_init() and then I
thought, maybe rh_heap_init() is better. Then I thought, why have rh in
both places, why not rename the entire API (rheap_ and rhash_) in both
cases?

What do you think is the best solution? Willing to hack and compile test
just so my kernel builds are cleaner..

--
Matt Sealey [EMAIL PROTECTED]
Genesi, Manager, Developer Relations
___
Linuxppc-dev mailing list
Linuxppc-dev@ozlabs.org
https://ozlabs.org/mailman/listinfo/linuxppc-dev


Re: rh_init exported twice (rheap vs. dm-region_hash)

2008-06-23 Thread Timur Tabi

Matt Sealey wrote:


I was thinking of renaming the dm hash one to rh_hash_init() and then I
thought, maybe rh_heap_init() is better. Then I thought, why have rh in
both places, why not rename the entire API (rheap_ and rhash_) in both
cases?


I'm okay with renaming the rh_xxx calls in rheap.c to rheap_xxx.  I also think 
renaming the rh_ calls to rhash is a good idea.


--
Timur Tabi
Linux Kernel Developer @ Freescale
___
Linuxppc-dev mailing list
Linuxppc-dev@ozlabs.org
https://ozlabs.org/mailman/listinfo/linuxppc-dev


[PATCH] ibm_newemac: Fixes entry of short packets

2008-06-23 Thread Stefan Roese
From: Sathya Narayanan [EMAIL PROTECTED]

Short packets has to be discarded by the driver. So this patch addresses the
issue of discarding the short packets of size lesser then ethernet header
size.

Signed-off-by: Sathya Narayanan [EMAIL PROTECTED]
Signed-off-by: Stefan Roese [EMAIL PROTECTED]
---
 drivers/net/ibm_newemac/core.c |7 +++
 1 files changed, 7 insertions(+), 0 deletions(-)

diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
index 6dfc2c9..aa407b2 100644
--- a/drivers/net/ibm_newemac/core.c
+++ b/drivers/net/ibm_newemac/core.c
@@ -1652,6 +1652,13 @@ static int emac_poll_rx(void *param, int budget)
 
skb_put(skb, len);
push_packet:
+   if (skb-len  ETH_HLEN) {
+   dev_kfree_skb(skb);
+   printk(KERN_WARNING %s: short packets dropped\n,
+  dev-ndev-name);
+   ++dev-estats.rx_dropped_stack;
+   goto next;
+   }
skb-dev = dev-ndev;
skb-protocol = eth_type_trans(skb, dev-ndev);
emac_rx_csum(dev, skb, ctrl);
-- 
1.5.6

___
Linuxppc-dev mailing list
Linuxppc-dev@ozlabs.org
https://ozlabs.org/mailman/listinfo/linuxppc-dev


[PATCH] ibm_newemac: Fixes kernel crashes when speed of cable connected changes

2008-06-23 Thread Stefan Roese
From: Sathya Narayanan [EMAIL PROTECTED]

The descriptor pointers were not initialized to NIL values, so it was
poiniting to some random addresses which was completely invalid. This
fix takes care of initializing the descriptor to NIL values and clearing
the valid descriptors on clean ring operation.

Signed-off-by: Sathya Narayanan [EMAIL PROTECTED]
Signed-off-by: Stefan Roese [EMAIL PROTECTED]
---
 drivers/net/ibm_newemac/core.c |6 +-
 1 files changed, 5 insertions(+), 1 deletions(-)

diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
index 5d2108c..6dfc2c9 100644
--- a/drivers/net/ibm_newemac/core.c
+++ b/drivers/net/ibm_newemac/core.c
@@ -1025,7 +1025,7 @@ static void emac_clean_tx_ring(struct emac_instance *dev)
int i;
 
for (i = 0; i  NUM_TX_BUFF; ++i) {
-   if (dev-tx_skb[i]) {
+   if (dev-tx_skb[i]  dev-tx_desc[i].data_ptr) {
dev_kfree_skb(dev-tx_skb[i]);
dev-tx_skb[i] = NULL;
if (dev-tx_desc[i].ctrl  MAL_TX_CTRL_READY)
@@ -2719,6 +2719,10 @@ static int __devinit emac_probe(struct of_device *ofdev,
/* Clean rings */
memset(dev-tx_desc, 0, NUM_TX_BUFF * sizeof(struct mal_descriptor));
memset(dev-rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor));
+   for (i = 0; i = NUM_TX_BUFF; i++)
+   dev-tx_skb[i] = NULL;
+   for (i = 0; i = NUM_RX_BUFF; i++)
+   dev-rx_skb[i] = NULL;
 
/* Attach to ZMII, if needed */
if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII) 
-- 
1.5.6

___
Linuxppc-dev mailing list
Linuxppc-dev@ozlabs.org
https://ozlabs.org/mailman/listinfo/linuxppc-dev


[PATCH] ibm_newemac: Fixes memory leak in ibm_newemac ethernet driver

2008-06-23 Thread Stefan Roese
From: Sathya Narayanan [EMAIL PROTECTED]

This patch addresses the memory leak happenning in drivers transmit queue
under heavy load condition. Once the transmit queue becomes full, driver
does an automatic wrapup of queue. During which the untransmitted SKB's are
lost without getting freed up.

Signed-off-by: Sathya Narayanan [EMAIL PROTECTED]
Signed-off-by: Stefan Roese [EMAIL PROTECTED]
---
 drivers/net/ibm_newemac/core.c |7 +++
 1 files changed, 7 insertions(+), 0 deletions(-)

diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
index aa407b2..ee868b6 100644
--- a/drivers/net/ibm_newemac/core.c
+++ b/drivers/net/ibm_newemac/core.c
@@ -1328,6 +1328,13 @@ static int emac_start_xmit(struct sk_buff *skb, struct 
net_device *ndev)
 
DBG2(dev, xmit(%u) %d NL, len, slot);
 
+   if (dev-tx_skb[slot]  dev-tx_desc[slot].data_ptr) {
+   dev_kfree_skb(dev-tx_skb[slot]);
+   dev-tx_skb[slot] = NULL;
+   dev-tx_cnt--;
+   ++dev-estats.tx_dropped;
+   }
+
dev-tx_skb[slot] = skb;
dev-tx_desc[slot].data_ptr = dma_map_single(dev-ofdev-dev,
 skb-data, len,
-- 
1.5.6

___
Linuxppc-dev mailing list
Linuxppc-dev@ozlabs.org
https://ozlabs.org/mailman/listinfo/linuxppc-dev


a question of mpc8313, maybe simple

2008-06-23 Thread jumpingProgrammer

i am working with a board of mpc8313 using default mpc8313erdb.dts.
But i do not know how can i correctly use DMA to transport data from memory
to a PCI device.
i tryed this
{
 .
 request_irq(IRQ,);
 request_dma(.);
 
}

In request_irq() , i do not know what is the parameter IRQ.
And , i did not find any words about DMA in mpc8313erdb.dts .
So 
1. please tell me what should be the parameter IRQ?
  Is it the irq requested when DMA initialized or another one?
2. If i want initialize DMA, how should i do with mpc8313erdb.dts?


-- 
View this message in context: 
http://www.nabble.com/a-question-of-mpc8313%2C-maybe-simple-tp18067094p18067094.html
Sent from the linuxppc-dev mailing list archive at Nabble.com.

___
Linuxppc-dev mailing list
Linuxppc-dev@ozlabs.org
https://ozlabs.org/mailman/listinfo/linuxppc-dev


Re: [PATCH 1/9] powerpc: Fix msr setting in 32 bit signal code

2008-06-23 Thread Kumar Gala


On Jun 23, 2008, at 2:38 AM, Michael Neuling wrote:


If we set the SPE MSR bit in save_user_regs we can blow away the VEC
bit.  This will never happen in reality (VMX and SPE will never be in
the same processor as their opcodes overlap), but it looks bad.  Also
when we add VSX here in a later patch, we can hit two of these at the
same time.

Signed-off-by: Michael Neuling [EMAIL PROTECTED]
---


I think it would also be good to comment about how this doesn't happen  
since they are the same MSR bit.  Having that comment might reduce  
confusion if anyone ever looks at this commit message in the future.   
(Plus you seem to have trailing white space in the commit message).


- k

___
Linuxppc-dev mailing list
Linuxppc-dev@ozlabs.org
https://ozlabs.org/mailman/listinfo/linuxppc-dev


Re: [PATCH][WIP][RFC] powerpc: fixup lwsync at runtime

2008-06-23 Thread Kumar Gala


On Jun 21, 2008, at 7:20 PM, Benjamin Herrenschmidt wrote:


On Sat, 2008-06-21 at 11:07 -0500, Kumar Gala wrote:



Remind me why we want to do that ? ie. can't we just use lwsync
unconditionally ? It's supposed to degrade to sync on CPUs that

don't

support it, or is that broken on some parts ?


I believe its broken on e500v1/v2.  However I'll double check.


e500v1/v2 treat lwsync as a illop.


An option is that if you get a program check instead, you can fixup
the sync from the exception too...



We could.  However it just feels a bit dirty to illop in the kernel  
and fix it up.


Plus, I think David Woodhouse had some ideas about being able to  
generate a single SMP/no-SMP kernel image like x86 apparently does  
with some form of run time fixup.  It would seem that was be in the  
same realm as the lwsync fixup I'm looking at.


- k

___
Linuxppc-dev mailing list
Linuxppc-dev@ozlabs.org
https://ozlabs.org/mailman/listinfo/linuxppc-dev


Virqs of cascaded interrupt controller.

2008-06-23 Thread Welch, Martyn (GE EntSol, Intelligent Platforms)
Hi,

I'm in the process of porting Linux to one of our boards based on an
8641D. Some of the interrupts of on-board devices are dealt with by a
custom interrupt controller in one of the onboard FPGAs, which cascades
into the 8641D's mpic. I'm trying to write a driver for it.

Looking at examples of cascaded interrupt handlers I've managed to get
to the point where the kernel is trying to register interrupts from the
DTB file I provide it. The problem is I don't know what virqs are being
assigned to the interrupts or how virqs are/should be assigned to a
cascaded interrupt controller.

Can anyone point me towards any documentation that my naïve googling is
missing or explain how this should work?

Martyn


Martyn Welch MEng MPhil MIET
Principal Software Engineer

GE Fanuc Intelligent Platforms
Tove Valley Business Park, Towcester,
Northants, NN12 6PF, United Kingdom

Telephone: +44 (0) 1327 359444
Direct Dial: +44 (0) 1327 322748
Fax: +44 (0) 1327 322800
email: [EMAIL PROTECTED]
web: www.gefanuc.com

GE Fanuc Intelligent Platforms Ltd, registered in England and Wales
(3828642) at 100 Barbirolli Square, Manchester, M2 3AB, VAT GB 729 849
476  

GE Fanuc Intelligent Platforms Confidential and Proprietary. If you have
received this message in error please notify us immediately and
permanently remove it from your system and destroy any printed
hardcopies.

___
Linuxppc-dev mailing list
Linuxppc-dev@ozlabs.org
https://ozlabs.org/mailman/listinfo/linuxppc-dev

Re: Virqs of cascaded interrupt controller.

2008-06-23 Thread Scott Wood
On Mon, Jun 23, 2008 at 03:00:35PM +0100, Welch, Martyn (GE EntSol, Intelligent 
Platforms) wrote:
 I'm in the process of porting Linux to one of our boards based on an
 8641D. Some of the interrupts of on-board devices are dealt with by a
 custom interrupt controller in one of the onboard FPGAs, which cascades
 into the 8641D's mpic. I'm trying to write a driver for it.
 
 Looking at examples of cascaded interrupt handlers I've managed to get
 to the point where the kernel is trying to register interrupts from the
 DTB file I provide it. The problem is I don't know what virqs are being
 assigned to the interrupts or how virqs are/should be assigned to a
 cascaded interrupt controller.

The virqs are created by irq_create_mapping() or irq_of_parse_and_map(). 
It is preferred to use the latter, with the interrupts expressed in the
device tree.

If that doesn't answer your question, could you be more specific about what
you're trying to find out, and/or what aspect of the existing cascaded irq
controllers (e.g. sysdev/qe_lib/qe_ic.c, platforms/82xx/pq2ads-pci-pic.c)
you want clarfication of?  Do you want to know what to pass to request_irq,
how to turn virqs back into hwirqs in the cascade driver, or something else?

-Scott
___
Linuxppc-dev mailing list
Linuxppc-dev@ozlabs.org
https://ozlabs.org/mailman/listinfo/linuxppc-dev


[2.6 patch] drivers/macintosh/: possible cleanups

2008-06-23 Thread Adrian Bunk
On Tue, Jun 10, 2008 at 10:21:25AM +1000, Stephen Rothwell wrote:
 Hi Adrian,
 
 On Tue, 10 Jun 2008 01:23:12 +0300 Adrian Bunk [EMAIL PROTECTED] wrote:
 
  +++ b/drivers/macintosh/adbhid.c
  @@ -75,7 +75,7 @@ static struct notifier_block adbhid_adb_notifier = {
   #define ADB_KEY_POWER_OLD  0x7e
   #define ADB_KEY_POWER  0x7f
   
  -u16 adb_to_linux_keycodes[128] = {
  +static u16 adb_to_linux_keycodes[128] = {
 
 This could be const as well.

Updated patch below.

cu
Adrian


--  snip  --


This patch contains the following possible cleanups:
- make the following needlessly global code static:
  - adb.c: adb_controller
  - adb.c: adb_init()
  - adbhid.c: adb_to_linux_keycodes[]  (also make it const)
  - via-pmu68k.c: backlight_level
  - via-pmu68k.c: backlight_enabled
- remove the following unused code:
  - via-pmu68k.c: sleep_notifier_list

Signed-off-by: Adrian Bunk [EMAIL PROTECTED]

---

 drivers/macintosh/adb.c|5 ++---
 drivers/macintosh/adbhid.c |2 +-
 drivers/macintosh/via-pmu68k.c |5 ++---
 include/linux/adb.h|1 -
 4 files changed, 5 insertions(+), 8 deletions(-)

45413fb42fb4215d80bb15f50a5bdc869465e9a1 diff --git a/drivers/macintosh/adb.c 
b/drivers/macintosh/adb.c
index dbaad39..61b62a6 100644
--- a/drivers/macintosh/adb.c
+++ b/drivers/macintosh/adb.c
@@ -46,7 +46,6 @@
 #endif
 
 
-EXPORT_SYMBOL(adb_controller);
 EXPORT_SYMBOL(adb_client_list);
 
 extern struct adb_driver via_macii_driver;
@@ -80,7 +79,7 @@ static struct adb_driver *adb_driver_list[] = {
 
 static struct class *adb_dev_class;
 
-struct adb_driver *adb_controller;
+static struct adb_driver *adb_controller;
 BLOCKING_NOTIFIER_HEAD(adb_client_list);
 static int adb_got_sleep;
 static int adb_inited;
@@ -290,7 +289,7 @@ static int adb_resume(struct platform_device *dev)
 }
 #endif /* CONFIG_PM */
 
-int __init adb_init(void)
+static int __init adb_init(void)
 {
struct adb_driver *driver;
int i;
diff --git a/drivers/macintosh/adbhid.c b/drivers/macintosh/adbhid.c
index ef4c117..af72f97 100644
--- a/drivers/macintosh/adbhid.c
+++ b/drivers/macintosh/adbhid.c
@@ -75,7 +75,7 @@ static struct notifier_block adbhid_adb_notifier = {
 #define ADB_KEY_POWER_OLD  0x7e
 #define ADB_KEY_POWER  0x7f
 
-u16 adb_to_linux_keycodes[128] = {
+static const u16 adb_to_linux_keycodes[128] = {
/* 0x00 */ KEY_A,   /*  30 */
/* 0x01 */ KEY_S,   /*  31 */
/* 0x02 */ KEY_D,   /*  32 */
diff --git a/drivers/macintosh/via-pmu68k.c b/drivers/macintosh/via-pmu68k.c
index e2f84da..b64741c 100644
--- a/drivers/macintosh/via-pmu68k.c
+++ b/drivers/macintosh/via-pmu68k.c
@@ -101,7 +101,6 @@ static int pmu_kind = PMU_UNKNOWN;
 static int pmu_fully_inited;
 
 int asleep;
-BLOCKING_NOTIFIER_HEAD(sleep_notifier_list);
 
 static int pmu_probe(void);
 static int pmu_init(void);
@@ -741,8 +740,8 @@ pmu_handle_data(unsigned char *data, int len)
}
 }
 
-int backlight_level = -1;
-int backlight_enabled = 0;
+static int backlight_level = -1;
+static int backlight_enabled = 0;
 
 #define LEVEL_TO_BRIGHT(lev)   ((lev)  1? 0x7f: 0x4a - ((lev)  1))
 
diff --git a/include/linux/adb.h b/include/linux/adb.h
index 64d8878..63bca50 100644
--- a/include/linux/adb.h
+++ b/include/linux/adb.h
@@ -84,7 +84,6 @@ enum adb_message {
 ADB_MSG_PRE_RESET, /* Called before resetting the bus */
 ADB_MSG_POST_RESET /* Called after resetting the bus (re-do init  
register) */
 };
-extern struct adb_driver *adb_controller;
 extern struct blocking_notifier_head adb_client_list;
 
 int adb_request(struct adb_request *req, void (*done)(struct adb_request *),

___
Linuxppc-dev mailing list
Linuxppc-dev@ozlabs.org
https://ozlabs.org/mailman/listinfo/linuxppc-dev


[2.6 patch] asm/ptrace.h userspace headers cleanup

2008-06-23 Thread Adrian Bunk
This patch contains the following cleanups for the asm/ptrace.h 
userspace headers:
- include/asm-generic/Kbuild.asm already lists ptrace.h, remove
  the superfluous listings in the Kbuild files of the following
  architectures:
  - cris
  - frv
  - powerpc
  - x86
- don't expose function prototypes and macros to userspace:
  - arm
  - blackfin
  - cris
  - mn10300
  - parisc
- remove #ifdef CONFIG_'s around #define's:
  - blackfin
  - m68knommu
- sh: AFAIK __SH5__ should work in both kernel and userspace,
  no need to leak CONFIG_SUPERH64 to userspace
- xtensa: cosmetical change to remove empty
#ifndef __ASSEMBLY__ #else #endif
  from the userspace headers

Signed-off-by: Adrian Bunk [EMAIL PROTECTED]

---

Not changed by this patch is the fact that the following architectures 
have a different struct pt_regs depending on CONFIG_ variables:
- h8300
- m68knommu
- mips

This does not work in userspace.


 include/asm-arm/ptrace.h   |6 ++
 include/asm-blackfin/ptrace.h  |6 --
 include/asm-cris/arch-v10/Kbuild   |1 -
 include/asm-cris/arch-v10/ptrace.h |4 
 include/asm-cris/arch-v32/Kbuild   |1 -
 include/asm-cris/arch-v32/ptrace.h |4 
 include/asm-cris/ptrace.h  |4 +++-
 include/asm-frv/Kbuild |1 -
 include/asm-m68knommu/ptrace.h |2 --
 include/asm-mn10300/ptrace.h   |8 ++--
 include/asm-parisc/ptrace.h|4 +++-
 include/asm-powerpc/Kbuild |1 -
 include/asm-sh/ptrace.h|2 +-
 include/asm-x86/Kbuild |1 -
 include/asm-xtensa/ptrace.h|   10 +-
 15 files changed, 32 insertions(+), 23 deletions(-)

fc14755b77cff7af5ff00e938a4c493a669e25cd diff --git a/include/asm-arm/ptrace.h 
b/include/asm-arm/ptrace.h
index 7aaa206..8382b75 100644
--- a/include/asm-arm/ptrace.h
+++ b/include/asm-arm/ptrace.h
@@ -139,8 +139,6 @@ static inline int valid_user_regs(struct pt_regs *regs)
return 0;
 }
 
-#endif /* __KERNEL__ */
-
 #define pc_pointer(v) \
((v)  ~PCMASK)
 
@@ -153,10 +151,10 @@ extern unsigned long profile_pc(struct pt_regs *regs);
 #define profile_pc(regs) instruction_pointer(regs)
 #endif
 
-#ifdef __KERNEL__
 #define predicate(x)   ((x)  0xf000)
 #define PREDICATE_ALWAYS   0xe000
-#endif
+
+#endif /* __KERNEL__ */
 
 #endif /* __ASSEMBLY__ */
 
diff --git a/include/asm-blackfin/ptrace.h b/include/asm-blackfin/ptrace.h
index b8346cd..a45a80e 100644
--- a/include/asm-blackfin/ptrace.h
+++ b/include/asm-blackfin/ptrace.h
@@ -83,14 +83,14 @@ struct pt_regs {
 #define PTRACE_GETREGS12
 #define PTRACE_SETREGS13   /* ptrace signal  */
 
-#ifdef CONFIG_BINFMT_ELF_FDPIC
 #define PTRACE_GETFDPIC   31
 #define PTRACE_GETFDPIC_EXEC  0
 #define PTRACE_GETFDPIC_INTERP1
-#endif
 
 #define PS_S  (0x0002)
 
+#ifdef __KERNEL__
+
 /* user_mode returns true if only one bit is set in IPEND, other than the
master interrupt enable.  */
 #define user_mode(regs) (!(((regs)-ipend  ~0x10)  (((regs)-ipend  ~0x10) 
- 1)))
@@ -98,6 +98,8 @@ struct pt_regs {
 #define profile_pc(regs) instruction_pointer(regs)
 extern void show_regs(struct pt_regs *);
 
+#endif  /*  __KERNEL__  */
+
 #endif /* __ASSEMBLY__ */
 
 /*
diff --git a/include/asm-cris/arch-v10/Kbuild b/include/asm-cris/arch-v10/Kbuild
index 60e7e1b..7a192e1 100644
--- a/include/asm-cris/arch-v10/Kbuild
+++ b/include/asm-cris/arch-v10/Kbuild
@@ -1,4 +1,3 @@
-header-y += ptrace.h
 header-y += user.h
 header-y += svinto.h
 header-y += sv_addr_ag.h
diff --git a/include/asm-cris/arch-v10/ptrace.h 
b/include/asm-cris/arch-v10/ptrace.h
index fb14c5e..2f464ea 100644
--- a/include/asm-cris/arch-v10/ptrace.h
+++ b/include/asm-cris/arch-v10/ptrace.h
@@ -106,10 +106,14 @@ struct switch_stack {
unsigned long return_ip; /* ip that _resume will return to */
 };
 
+#ifdef __KERNEL__
+
 /* bit 8 is user-mode flag */
 #define user_mode(regs) (((regs)-dccr  0x100) != 0)
 #define instruction_pointer(regs) ((regs)-irp)
 #define profile_pc(regs) instruction_pointer(regs)
 extern void show_regs(struct pt_regs *);
 
+#endif  /*  __KERNEL__  */
+
 #endif
diff --git a/include/asm-cris/arch-v32/Kbuild b/include/asm-cris/arch-v32/Kbuild
index a0ec545..35f2fc4 100644
--- a/include/asm-cris/arch-v32/Kbuild
+++ b/include/asm-cris/arch-v32/Kbuild
@@ -1,3 +1,2 @@
-header-y += ptrace.h
 header-y += user.h
 header-y += cryptocop.h
diff --git a/include/asm-cris/arch-v32/ptrace.h 
b/include/asm-cris/arch-v32/ptrace.h
index 516cc70..41f4e86 100644
--- a/include/asm-cris/arch-v32/ptrace.h
+++ b/include/asm-cris/arch-v32/ptrace.h
@@ -106,9 +106,13 @@ struct switch_stack {
unsigned long return_ip; /* ip that _resume will return to */
 };
 
+#ifdef __KERNEL__
+
 #define user_mode(regs) (((regs)-ccs  (1  (U_CCS_BITNR + CCS_SHIFT))) != 0)
 #define instruction_pointer(regs) ((regs)-erp)
 extern void show_regs(struct pt_regs 

[RFC: 2.6 patch] powerpc: don't export asm/asm-compat.h to userspace

2008-06-23 Thread Adrian Bunk
asm/asm-compat.h doesn't seem to be intended for userspace usage.

Signed-off-by: Adrian Bunk [EMAIL PROTECTED]

---

 include/asm-powerpc/Kbuild |1 -
 include/asm-powerpc/cputable.h |5 +++--
 2 files changed, 3 insertions(+), 3 deletions(-)

4c78f2fc562d3aae525c8f62706a80479228c50e diff --git 
a/include/asm-powerpc/Kbuild b/include/asm-powerpc/Kbuild
index 6920904..90cb216 100644
--- a/include/asm-powerpc/Kbuild
+++ b/include/asm-powerpc/Kbuild
@@ -24,7 +24,6 @@ header-y += sigcontext.h
 header-y += statfs.h
 header-y += ps3fb.h
 
-unifdef-y += asm-compat.h
 unifdef-y += bootx.h
 unifdef-y += byteorder.h
 unifdef-y += cputable.h
diff --git a/include/asm-powerpc/cputable.h b/include/asm-powerpc/cputable.h
index 1e79673..08b594a 100644
--- a/include/asm-powerpc/cputable.h
+++ b/include/asm-powerpc/cputable.h
@@ -1,8 +1,6 @@
 #ifndef __ASM_POWERPC_CPUTABLE_H
 #define __ASM_POWERPC_CPUTABLE_H
 
-#include asm/asm-compat.h
-
 #define PPC_FEATURE_32 0x8000
 #define PPC_FEATURE_64 0x4000
 #define PPC_FEATURE_601_INSTR  0x2000
@@ -31,6 +29,9 @@
 #define PPC_FEATURE_PPC_LE 0x0001
 
 #ifdef __KERNEL__
+
+#include asm/asm-compat.h
+
 #ifndef __ASSEMBLY__
 
 /* This structure can grow, it's real size is used by head.S code

___
Linuxppc-dev mailing list
Linuxppc-dev@ozlabs.org
https://ozlabs.org/mailman/listinfo/linuxppc-dev


[RFC: 2.6 patch] powerpc: asm/elf.h: reduce userspace header

2008-06-23 Thread Adrian Bunk
This patch makes asm/elf.h export less non-userspace stuff to userspace.

Signed-off-by: Adrian Bunk [EMAIL PROTECTED]

---

 include/asm-powerpc/elf.h |   12 ++--
 1 file changed, 6 insertions(+), 6 deletions(-)

6e868802e4b350a9dac071044b7cf52af7404134 diff --git a/include/asm-powerpc/elf.h 
b/include/asm-powerpc/elf.h
index 9080d85..d1e3bda 100644
--- a/include/asm-powerpc/elf.h
+++ b/include/asm-powerpc/elf.h
@@ -224,8 +224,6 @@ extern int dump_task_altivec(struct task_struct *, 
elf_vrregset_t *vrregs);
 #define ELF_CORE_XFPREG_TYPE NT_PPC_VMX
 #endif
 
-#endif /* __KERNEL__ */
-
 /* ELF_HWCAP yields a mask that user programs can use to figure out what
instruction set this cpu supports.  This could be done in userspace,
but it's not easy, and we've already done it here.  */
@@ -243,8 +241,6 @@ extern int dump_task_altivec(struct task_struct *, 
elf_vrregset_t *vrregs);
 } while (0)
 #endif /* __powerpc64__ */
 
-#ifdef __KERNEL__
-
 #ifdef __powerpc64__
 # define SET_PERSONALITY(ex, ibcs2)\
 do {   \
@@ -272,8 +268,6 @@ do {
\
 # define SET_PERSONALITY(ex, ibcs2) set_personality((ibcs2)?PER_SVR4:PER_LINUX)
 #endif /* __powerpc64__ */
 
-#endif /* __KERNEL__ */
-
 extern int dcache_bsize;
 extern int icache_bsize;
 extern int ucache_bsize;
@@ -285,6 +279,8 @@ extern int arch_setup_additional_pages(struct linux_binprm 
*bprm,
   int executable_stack);
 #define VDSO_AUX_ENT(a,b) NEW_AUX_ENT(a,b);
 
+#endif /* __KERNEL__ */
+
 /*
  * The requirements here are:
  * - keep the final alignment of sp (sp  0xf)
@@ -422,6 +418,8 @@ do {
\
 /* Keep this the last entry.  */
 #define R_PPC64_NUM107
 
+#ifdef  __KERNEL__
+
 #ifdef CONFIG_SPU_BASE
 /* Notes used in ET_CORE. Note name is SPU/fd/filename. */
 #define NT_SPU 1
@@ -430,4 +428,6 @@ do {
\
 
 #endif /* CONFIG_SPU_BASE */
 
+#endif /* __KERNEL */
+
 #endif /* _ASM_POWERPC_ELF_H */

___
Linuxppc-dev mailing list
Linuxppc-dev@ozlabs.org
https://ozlabs.org/mailman/listinfo/linuxppc-dev


[PATCH] drivers/macintosh/smu.c: improve error handling

2008-06-23 Thread Julia Lawall
From: Julia Lawall [EMAIL PROTECTED]

This patch makes two changes:
* As noted by Akinobu Mita in patch
b1fceac2b9e04d278316b2faddf276015fc06e3b, alloc_bootmem never returns NULL
and always returns a zeroed region of memory.  Thus the error checking code
and memset after the call to alloc_bootmem are not necessary.
* The old error handling code consisted of setting a global variable to
NULL and returning an error code, which could cause previously allocated
resources never to be freed.  The patch adds calls to appropriate resource
deallocation functions.

 drivers/macintosh/smu.c |   34 +++---
 1 file changed, 19 insertions(+), 15 deletions(-)

Signed-off-by: Julia Lawall [EMAIL PROTECTED]
---

diff -u -p a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -474,6 +474,7 @@ int __init smu_init (void)
 {
struct device_node *np;
const u32 *data;
+   int ret = 0;
 
 np = of_find_node_by_type(NULL, smu);
 if (np == NULL)
@@ -483,16 +484,11 @@ int __init smu_init (void)
 
if (smu_cmdbuf_abs == 0) {
printk(KERN_ERR SMU: Command buffer not allocated !\n);
-   of_node_put(np);
-   return -EINVAL;
+   ret = -EINVAL;
+   goto fail_np;
}
 
smu = alloc_bootmem(sizeof(struct smu_device));
-   if (smu == NULL) {
-   of_node_put(np);
-   return -ENOMEM;
-   }
-   memset(smu, 0, sizeof(*smu));
 
spin_lock_init(smu-lock);
INIT_LIST_HEAD(smu-cmd_list);
@@ -510,14 +506,14 @@ int __init smu_init (void)
smu-db_node = of_find_node_by_name(NULL, smu-doorbell);
if (smu-db_node == NULL) {
printk(KERN_ERR SMU: Can't find doorbell GPIO !\n);
-   goto fail;
+   ret = -ENXIO;
+   goto fail_bootmem;
}
data = of_get_property(smu-db_node, reg, NULL);
if (data == NULL) {
-   of_node_put(smu-db_node);
-   smu-db_node = NULL;
printk(KERN_ERR SMU: Can't find doorbell GPIO address !\n);
-   goto fail;
+   ret = -ENXIO;
+   goto fail_db_node;
}
 
/* Current setup has one doorbell GPIO that does both doorbell
@@ -551,7 +547,8 @@ int __init smu_init (void)
smu-db_buf = ioremap(0x8000860c, 0x1000);
if (smu-db_buf == NULL) {
printk(KERN_ERR SMU: Can't map doorbell buffer pointer !\n);
-   goto fail;
+   ret = -ENXIO;
+   goto fail_msg_node;
}
 
/* U3 has an issue with NAP mode when issuing SMU commands */
@@ -562,10 +559,17 @@ int __init smu_init (void)
sys_ctrler = SYS_CTRLER_SMU;
return 0;
 
- fail:
+fail_msg_node:
+   if (smu-msg_node)
+   of_node_put(smu-msg_node);
+fail_db_node:
+   of_node_put(smu-db_node);
+fail_bootmem:
+   free_bootmem((unsigned long)smu, sizeof(struct smu_device));
smu = NULL;
-   return -ENXIO;
-
+fail_np:
+   of_node_put(np);
+   return ret;
 }
 
 
___
Linuxppc-dev mailing list
Linuxppc-dev@ozlabs.org
https://ozlabs.org/mailman/listinfo/linuxppc-dev


Re: [2.6 patch] drivers/macintosh/: possible cleanups

2008-06-23 Thread Geert Uytterhoeven
On Mon, 23 Jun 2008, Adrian Bunk wrote:
 On Tue, Jun 10, 2008 at 10:21:25AM +1000, Stephen Rothwell wrote:
  On Tue, 10 Jun 2008 01:23:12 +0300 Adrian Bunk [EMAIL PROTECTED] wrote:
  
   +++ b/drivers/macintosh/adbhid.c
   @@ -75,7 +75,7 @@ static struct notifier_block adbhid_adb_notifier = {
#define ADB_KEY_POWER_OLD0x7e
#define ADB_KEY_POWER0x7f

   -u16 adb_to_linux_keycodes[128] = {
   +static u16 adb_to_linux_keycodes[128] = {
  
  This could be const as well.
 
 Updated patch below.
 
 cu
 Adrian
 
 --  snip  --
 
 This patch contains the following possible cleanups:
 - make the following needlessly global code static:
   - adb.c: adb_controller
   - adb.c: adb_init()
   - adbhid.c: adb_to_linux_keycodes[]  (also make it const)
   - via-pmu68k.c: backlight_level
   - via-pmu68k.c: backlight_enabled
 - remove the following unused code:
   - via-pmu68k.c: sleep_notifier_list
 
 Signed-off-by: Adrian Bunk [EMAIL PROTECTED]

Acked-by: Geert Uytterhoeven [EMAIL PROTECTED]

I assume Paulus will take it, as he controls more Macs?

Gr{oetje,eeting}s,

Geert

--
Geert Uytterhoeven -- There's lots of Linux beyond ia32 -- [EMAIL PROTECTED]

In personal conversations with technical people, I call myself a hacker. But
when I'm talking to journalists I just say programmer or something like that.
-- Linus Torvalds
___
Linuxppc-dev mailing list
Linuxppc-dev@ozlabs.org
https://ozlabs.org/mailman/listinfo/linuxppc-dev


[PATCH V2] ibm_newemac: Fixes kernel crashes when speed of cable connected changes

2008-06-23 Thread Stefan Roese
From: Sathya Narayanan [EMAIL PROTECTED]

The descriptor pointers were not initialized to NIL values, so it was
poiniting to some random addresses which was completely invalid. This
fix takes care of initializing the descriptor to NIL values and clearing
the valid descriptors on clean ring operation.

Signed-off-by: Sathya Narayanan [EMAIL PROTECTED]
Signed-off-by: Stefan Roese [EMAIL PROTECTED]
---
Changed since V1:
- Fixed off-by-one error in for loops

 drivers/net/ibm_newemac/core.c |6 +-
 1 files changed, 5 insertions(+), 1 deletions(-)

diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
index 5d2108c..6dfc2c9 100644
--- a/drivers/net/ibm_newemac/core.c
+++ b/drivers/net/ibm_newemac/core.c
@@ -1025,7 +1025,7 @@ static void emac_clean_tx_ring(struct emac_instance *dev)
int i;
 
for (i = 0; i  NUM_TX_BUFF; ++i) {
-   if (dev-tx_skb[i]) {
+   if (dev-tx_skb[i]  dev-tx_desc[i].data_ptr) {
dev_kfree_skb(dev-tx_skb[i]);
dev-tx_skb[i] = NULL;
if (dev-tx_desc[i].ctrl  MAL_TX_CTRL_READY)
@@ -2719,6 +2719,10 @@ static int __devinit emac_probe(struct of_device *ofdev,
/* Clean rings */
memset(dev-tx_desc, 0, NUM_TX_BUFF * sizeof(struct mal_descriptor));
memset(dev-rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor));
+   for (i = 0; i  NUM_TX_BUFF; i++)
+   dev-tx_skb[i] = NULL;
+   for (i = 0; i  NUM_RX_BUFF; i++)
+   dev-rx_skb[i] = NULL;
 
/* Attach to ZMII, if needed */
if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII) 
-- 
1.5.6

___
Linuxppc-dev mailing list
Linuxppc-dev@ozlabs.org
https://ozlabs.org/mailman/listinfo/linuxppc-dev


Re: [PATCH] [RFC] powerpc: Xilinx: adding virtex5 powerpc 440 support

2008-06-23 Thread Josh Boyer
On Mon, 23 Jun 2008 10:28:53 -0600
John Linn [EMAIL PROTECTED] wrote:

 This is an early patch against the mainline that I wanted to start
 getting comments on.
 
 I would appreciate any feedback.
 
 I already see a few things that I need to look into myself.
 
 1.I'm not sure why we need to disable the interrupts in the
 bootstrap loader?
 2.I see some SecetLab copyright in new files that might be just a
 cut/paste type error.
 3.I don't see the cputable.c up to date with the Xilinx specific
 440.

Your patch got pretty word-wrapped because of the forward.  I'll give
it a go though.

 diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
 index 3934e26..94adfe1 100644
 --- a/arch/powerpc/Kconfig
 +++ b/arch/powerpc/Kconfig
 @@ -483,6 +483,81 @@ config SECCOMP
 
 If unsure, say Y. Only embedded should say N here.
 
 +config WANT_DEVICE_TREE
 + bool
 + default n

Not sure why you added this back.  We removed it entirely recently.

 +config BUILD_RAW_IMAGE
 + bool Build firmware-independent image
 + select WANT_DEVICE_TREE
 + help
 +   If this is enabled, a firmware independent raw image will be
 +   built, as zImage.raw.  This requires a completely filled-in
 +   device tree, with the following labels:
 +
 +   mem_size_cells: on /#address-cells
 +   memsize: on the size portion of /memory/reg
 +   timebase: on the boot CPU's timebase property
 +
 +config DEVICE_TREE
 + string Static device tree source file
 + depends on WANT_DEVICE_TREE
 + help
 +   This specifies the device tree source (.dts) file to be
 +   compiled and included when building the bootwrapper.  If a
 +   relative filename is given, then it will be relative to
 +   arch/powerpc/boot/dts.  If you are not using the bootwrapper,
 +   or do not need to build a dts into the bootwrapper, this
 +   field is ignored.
 +
 +   For example, this is required when building a cuImage target
 +   for an older U-Boot, which cannot pass a device tree itself.
 +   Such a kernel will not work with a newer U-Boot that tries to
 +   pass a device tree (unless you tell it not to).  If your
 U-Boot
 +   does not mention a device tree in help bootm, then use the
 +   cuImage target and specify a device tree here.  Otherwise, use
 +   the uImage target and leave this field blank.

This doesn't seem like it's needed any longer either.  I'm confused why
these changes were added back in (and similar the Makefile changes.)

 +config COMPRESSED_DEVICE_TREE
 + bool Use compressed device tree
 + depends on XILINX_VIRTEX
 + depends on WANT_DEVICE_TREE
 + help
 +   In Xilinx FPGAs, the hardware can change quite dramatically
 while
 +   still running the same kernel.  In this case and other similar
 +   ones, it is preferable to associate the device tree with a
 +   particular build of the hardware design.  This configuration
 +   option assumes that the device tree blob has been compressed
 and
 +   stored in Block RAM in the FPGA design.  Typically, such a
 block
 +   ram is available in order to provide a bootloop or other code
 +   close to the reset vector at the top of the address space.  By
 +   default, the parameter options associated with this
 configuration
 +   assumes that exactly one block ram (2KB) of storage is
 available,
 +   which should be sufficient for most designs.  If necessary in
 a
 +   particular design, due to boot code requirement or a large
 number
 +   of devices, this address (and the corresponding parameters in
 the
 +   EDK design) must be modified.
 +
 +   Note that in some highly area constrained designs, no block
 rams
 +   may be available in the design, and some other mechanism may
 be
 +   used to hold the processor in reset while external memory is
 +   initialized with processor code.  In such cases, that
 mechanism
 +   should also be used to load the device tree at an appropriate
 +   location, and the parameters associated with this
 configuration
 +   option should be modified to point to that location in
 external
 +   memory.
 +
 +config COMPRESSED_DTB_START
 + hex Start of compressed device tree
 + depends on COMPRESSED_DEVICE_TREE
 + default 0xf800
 +
 +config COMPRESSED_DTB_SIZE
 + hex Size of compressed device tree
 + depends on COMPRESSED_DEVICE_TREE
 + default 0x800
 +
 +
  endmenu
 
  config ISA_DMA_API

snip

 diff --git a/arch/powerpc/boot/dts/ml507.dts
 b/arch/powerpc/boot/dts/ml507.dts
 new file mode 100644
 index 000..43d8535
 --- /dev/null
 +++ b/arch/powerpc/boot/dts/ml507.dts
 @@ -0,0 +1,254 @@
 +/*
 + * (C) Copyright 2007-2008 Xilinx, Inc.
 + * (C) Copyright 2007 Michal Simek
 + *
 + * Michal SIMEK [EMAIL PROTECTED]
 + *
 + * This program is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU General Public 

Re: [PATCH 17/19][v2] ibmveth: enable driver for CMO

2008-06-23 Thread Robert Jennings
Fixed patch formatting.

Enable ibmveth for Cooperative Memory Overcommitment (CMO).  For this driver
it means calculating a desired amount of IO memory based on the current MTU
and updating this value with the bus when MTU changes occur.  Because DMA
mappings can fail, we have added a bounce buffer for temporary cases where
the driver can not map IO memory for the buffer pool.

The following changes are made to enable the driver for CMO:
 * DMA mapping errors will not result in error messages if entitlement has
   been exceeded and resources were not available.
 * DMA mapping errors are handled gracefully, ibmveth_replenish_buffer_pool()
   is corrected to check the return from dma_map_single and fail gracefully.
 * The driver will have a get_io_entitlement function defined to function
   in a CMO environment.
 * When the MTU is changed, the driver will update the device IO entitlement

Signed-off-by: Robert Jennings [EMAIL PROTECTED]
Signed-off-by: Brian King [EMAIL PROTECTED]
Signed-off-by: Santiago Leon [EMAIL PROTECTED]

---
 drivers/net/ibmveth.c |  169 --
 drivers/net/ibmveth.h |5 +
 2 files changed, 140 insertions(+), 34 deletions(-)

Index: b/drivers/net/ibmveth.c
===
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -33,6 +33,7 @@
 */
 
 #include linux/module.h
+#include linux/moduleparam.h
 #include linux/types.h
 #include linux/errno.h
 #include linux/ioport.h
@@ -52,7 +53,9 @@
 #include asm/hvcall.h
 #include asm/atomic.h
 #include asm/vio.h
+#include asm/iommu.h
 #include asm/uaccess.h
+#include asm/firmware.h
 #include linux/seq_file.h
 
 #include ibmveth.h
@@ -94,8 +97,10 @@ static void ibmveth_proc_register_adapte
 static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter);
 static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance);
 static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter);
+static unsigned long ibmveth_get_io_entitlement(struct vio_dev *vdev);
 static struct kobj_type ktype_veth_pool;
 
+
 #ifdef CONFIG_PROC_FS
 #define IBMVETH_PROC_DIR ibmveth
 static struct proc_dir_entry *ibmveth_proc_dir;
@@ -226,16 +231,16 @@ static void ibmveth_replenish_buffer_poo
u32 i;
u32 count = pool-size - atomic_read(pool-available);
u32 buffers_added = 0;
+   struct sk_buff *skb;
+   unsigned int free_index, index;
+   u64 correlator;
+   unsigned long lpar_rc;
+   dma_addr_t dma_addr;
 
mb();
 
for(i = 0; i  count; ++i) {
-   struct sk_buff *skb;
-   unsigned int free_index, index;
-   u64 correlator;
union ibmveth_buf_desc desc;
-   unsigned long lpar_rc;
-   dma_addr_t dma_addr;
 
skb = alloc_skb(pool-buff_size, GFP_ATOMIC);
 
@@ -255,6 +260,9 @@ static void ibmveth_replenish_buffer_poo
dma_addr = dma_map_single(adapter-vdev-dev, skb-data,
pool-buff_size, DMA_FROM_DEVICE);
 
+   if (dma_mapping_error(dma_addr))
+   goto failure;
+
pool-free_map[free_index] = IBM_VETH_INVALID_MAP;
pool-dma_addr[index] = dma_addr;
pool-skbuff[index] = skb;
@@ -267,20 +275,9 @@ static void ibmveth_replenish_buffer_poo
 
lpar_rc = h_add_logical_lan_buffer(adapter-vdev-unit_address, 
desc.desc);
 
-   if(lpar_rc != H_SUCCESS) {
-   pool-free_map[free_index] = index;
-   pool-skbuff[index] = NULL;
-   if (pool-consumer_index == 0)
-   pool-consumer_index = pool-size - 1;
-   else
-   pool-consumer_index--;
-   dma_unmap_single(adapter-vdev-dev,
-   pool-dma_addr[index], pool-buff_size,
-   DMA_FROM_DEVICE);
-   dev_kfree_skb_any(skb);
-   adapter-replenish_add_buff_failure++;
-   break;
-   } else {
+   if (lpar_rc != H_SUCCESS)
+   goto failure;
+   else {
buffers_added++;
adapter-replenish_add_buff_success++;
}
@@ -288,6 +285,24 @@ static void ibmveth_replenish_buffer_poo
 
mb();
atomic_add(buffers_added, (pool-available));
+   return;
+
+failure:
+   pool-free_map[free_index] = index;
+   pool-skbuff[index] = NULL;
+   if (pool-consumer_index == 0)
+   pool-consumer_index = pool-size - 1;
+   else
+   pool-consumer_index--;
+   if (!dma_mapping_error(dma_addr))
+   dma_unmap_single(adapter-vdev-dev,
+pool-dma_addr[index], 

Re: [PATCH 16/19][v2] ibmveth: Automatically enable larger rx buffer pools for larger mtu

2008-06-23 Thread Robert Jennings
From: Santiago Leon [EMAIL PROTECTED]

Fixed patch formatting.

Activates larger rx buffer pools when the MTU is changed to a larger
value.  This patch de-activates the large rx buffer pools when the MTU
changes to a smaller value.

Signed-off-by: Santiago Leon [EMAIL PROTECTED]
Signed-off-by: Robert Jennings [EMAIL PROTECTED]

---

 drivers/net/ibmveth.c |   20 +---
 1 file changed, 13 insertions(+), 7 deletions(-)

Index: b/drivers/net/ibmveth.c
===
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -1054,7 +1054,6 @@ static int ibmveth_change_mtu(struct net
 {
struct ibmveth_adapter *adapter = dev-priv;
int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH;
-   int reinit = 0;
int i, rc;
 
if (new_mtu  IBMVETH_MAX_MTU)
@@ -1067,15 +1066,21 @@ static int ibmveth_change_mtu(struct net
if (i == IbmVethNumBufferPools)
return -EINVAL;
 
+   /* Deactivate all the buffer pools so that the next loop can activate
+  only the buffer pools necessary to hold the new MTU */
+   for (i = 0; i  IbmVethNumBufferPools; i++)
+   if (adapter-rx_buff_pool[i].active) {
+   ibmveth_free_buffer_pool(adapter,
+adapter-rx_buff_pool[i]);
+   adapter-rx_buff_pool[i].active = 0;
+   }
+
/* Look for an active buffer pool that can hold the new MTU */
for(i = 0; iIbmVethNumBufferPools; i++) {
-   if (!adapter-rx_buff_pool[i].active) {
-   adapter-rx_buff_pool[i].active = 1;
-   reinit = 1;
-   }
+   adapter-rx_buff_pool[i].active = 1;
 
if (new_mtu_oh  adapter-rx_buff_pool[i].buff_size) {
-   if (reinit  netif_running(adapter-netdev)) {
+   if (netif_running(adapter-netdev)) {
adapter-pool_config = 1;
ibmveth_close(adapter-netdev);
adapter-pool_config = 0;
@@ -1402,14 +1407,15 @@ const char * buf, size_t count)
return -EPERM;
}
 
-   pool-active = 0;
if (netif_running(netdev)) {
adapter-pool_config = 1;
ibmveth_close(netdev);
+   pool-active = 0;
adapter-pool_config = 0;
if ((rc = ibmveth_open(netdev)))
return rc;
}
+   pool-active = 0;
}
} else if (attr == veth_num_attr) {
if (value = 0 || value  IBMVETH_MAX_POOL_COUNT)
___
Linuxppc-dev mailing list
Linuxppc-dev@ozlabs.org
https://ozlabs.org/mailman/listinfo/linuxppc-dev


Re: [PATCH REPOST #2] IB/ehca: In case of lost interrupts, trigger EOI to reenable interrupts

2008-06-23 Thread Roland Dreier
ok, I queued this for 2.6.27.  thanks everyone
___
Linuxppc-dev mailing list
Linuxppc-dev@ozlabs.org
https://ozlabs.org/mailman/listinfo/linuxppc-dev


Re: [PATCH 12/19] powerpc: vio bus support for CMO

2008-06-23 Thread Robert Jennings
* Stephen Rothwell ([EMAIL PROTECTED]) wrote:
 Hi Robert,
 
 Firstly, can all this new stuff be ifdef'ed out if not needed as the
 vio infrastructure is also used on legacy iSeries and this adds quite a
 bit of stuff that won't ever be used there.

I've changed the patch to ifdef out CMO for legacy iSeries.  This should
keep things cleaner.

 On Thu, 12 Jun 2008 17:19:59 -0500 Robert Jennings [EMAIL PROTECTED] wrote:
 
  +static int vio_cmo_num_OF_devs(void)
  +{
  +   struct device_node *node_vroot;
  +   int count = 0;
  +
  +   /*
  +* Count the number of vdevice entries with an
  +* ibm,my-dma-window OF property
  +*/
  +   node_vroot = of_find_node_by_name(NULL, vdevice);
  +   if (node_vroot) {
  +   struct device_node *of_node;
  +   struct property *prop;
  +
  +   for (of_node = node_vroot-child; of_node != NULL;
  +   of_node = of_node-sibling) {
 
 Use:
   for_each_child_of_node(node_vroot, of_node) {

Fixed.
___
Linuxppc-dev mailing list
Linuxppc-dev@ozlabs.org
https://ozlabs.org/mailman/listinfo/linuxppc-dev


Re: [PATCH 12/19][v2] powerpc: vio bus support for CMO

2008-06-23 Thread Robert Jennings
From: Robert Jennings [EMAIL PROTECTED]

Enable bus level entitled memory accounting for Cooperative Memory
Overcommitment (CMO) environments.  The normal code path should not
be affected.

The following changes are made that the VIO bus layer for CMO:
 * add IO memory accounting per device structure.
 * add IO memory entitlement query function to driver structure.
 * during vio bus probe, if CMO is enabled, check that driver has
   memory entitlement query function defined.  Fail if function not defined.
 * fail to register driver if io entitlement function not defined.
 * create set of dma_ops at vio level for CMO that will track allocations
   and return DMA failures once entitlement is reached.  Entitlement will
   limited by overall system entitlement.  Devices will have a reserved
   quanity of memory that is guaranteed, the rest can be used as available.
 * expose entitlement, current allocation, desired allocation, and the
   allocation error counter for devices to the user through sysfs
 * provide mechanism for changing a device's desired entitlement at run time
   for devices as an exported function and sysfs tunable
 * track any DMA failures for entitled IO memory for each vio device.
 * check entitlement against available system entitlement on device add
 * track entitlement metrics (high water mark, current usage)
 * provide function to reset high water mark
 * provide minimum and desired entitlement numbers at a bus level
 * provide drivers with a minimum guaranteed entitlement
 * balance available entitlement between devices to satisfy their needs
 * handle system entitlement changes and device hotplug

Signed-off-by: Robert Jennings [EMAIL PROTECTED]

---
 arch/powerpc/kernel/vio.c | 1030 ++
 include/asm-powerpc/vio.h |   30 +
 2 files changed, 1052 insertions(+), 8 deletions(-)

Index: b/arch/powerpc/kernel/vio.c
===
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -1,11 +1,12 @@
 /*
  * IBM PowerPC Virtual I/O Infrastructure Support.
  *
- *Copyright (c) 2003-2005 IBM Corp.
+ *Copyright (c) 2003,2008 IBM Corp.
  * Dave Engebretsen [EMAIL PROTECTED]
  * Santiago Leon [EMAIL PROTECTED]
  * Hollis Blanchard [EMAIL PROTECTED]
  * Stephen Rothwell
+ * Robert Jennings [EMAIL PROTECTED]
  *
  *  This program is free software; you can redistribute it and/or
  *  modify it under the terms of the GNU General Public License
@@ -46,6 +47,986 @@ static struct vio_dev vio_bus_device  = 
.dev.bus = vio_bus_type,
 };
 
+#ifdef CONFIG_PPC_PSERIES
+/**
+ * vio_cmo_pool - A pool of IO memory for CMO use
+ *
+ * @size: The size of the pool in bytes
+ * @free: The amount of free memory in the pool
+ */
+struct vio_cmo_pool {
+   size_t size;
+   size_t free;
+};
+
+/* How many ms to delay queued balance work */
+#define VIO_CMO_BALANCE_DELAY 100
+
+/* Portion out IO memory to CMO devices by this chunk size */
+#define VIO_CMO_BALANCE_CHUNK 131072
+
+/**
+ * vio_cmo_dev_entry - A device that is CMO-enabled and requires entitlement
+ *
+ * @vio_dev: struct vio_dev pointer
+ * @list: pointer to other devices on bus that are being tracked
+ */
+struct vio_cmo_dev_entry {
+   struct vio_dev *viodev;
+   struct list_head list;
+};
+
+/**
+ * vio_cmo - VIO bus accounting structure for CMO entitlement
+ *
+ * @lock: spinlock for entire structure
+ * @balance_q: work queue for balancing system entitlement
+ * @device_list: list of CMO-enabled devices requiring entitlement
+ * @entitled: total system entitlement in bytes
+ * @reserve: pool of memory from which devices reserve entitlement, incl. spare
+ * @excess: pool of excess entitlement not needed for device reserves or spare
+ * @spare: IO memory for device hotplug functionality
+ * @min: minimum necessary for system operation
+ * @desired: desired memory for system operation
+ * @curr: bytes currently allocated
+ * @high: high water mark for IO data usage
+ */
+struct vio_cmo {
+   spinlock_t lock;
+   struct delayed_work balance_q;
+   struct list_head device_list;
+   size_t entitled;
+   struct vio_cmo_pool reserve;
+   struct vio_cmo_pool excess;
+   size_t spare;
+   size_t min;
+   size_t desired;
+   size_t curr;
+   size_t high;
+} vio_cmo;
+
+/**
+ * vio_cmo_OF_devices - Count the number of OF devices that have DMA windows
+ */
+static int vio_cmo_num_OF_devs(void)
+{
+   struct device_node *node_vroot;
+   int count = 0;
+
+   /*
+* Count the number of vdevice entries with an
+* ibm,my-dma-window OF property
+*/
+   node_vroot = of_find_node_by_name(NULL, vdevice);
+   if (node_vroot) {
+   struct device_node *of_node;
+   struct property *prop;
+
+   for_each_child_of_node(node_vroot, of_node) {
+   prop = 

RE: [PATCH] [RFC] powerpc: Xilinx: adding virtex5 powerpc 440 support

2008-06-23 Thread Stephen Neuendorffer
  diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
  index 3934e26..94adfe1 100644
  --- a/arch/powerpc/Kconfig
  +++ b/arch/powerpc/Kconfig
  @@ -483,6 +483,81 @@ config SECCOMP
 
If unsure, say Y. Only embedded should say N here.
 
  +config WANT_DEVICE_TREE
  +   bool
  +   default n
 
 Not sure why you added this back.  We removed it entirely recently.

This should go..  Generally speaking, we need to cleanup the 405 and 440
code to play nice with the new boot code structure.
 
  +config COMPRESSED_DEVICE_TREE
  +   bool Use compressed device tree
  +   depends on XILINX_VIRTEX
  +   depends on WANT_DEVICE_TREE
  +   help
  + In Xilinx FPGAs, the hardware can change quite dramatically
  while
  + still running the same kernel.  In this case and other similar
  + ones, it is preferable to associate the device tree with a
  + particular build of the hardware design.  This configuration
  + option assumes that the device tree blob has been compressed
  and
  + stored in Block RAM in the FPGA design.  Typically, such a
  block
  + ram is available in order to provide a bootloop or other code
  + close to the reset vector at the top of the address space.  By
  + default, the parameter options associated with this
  configuration
  + assumes that exactly one block ram (2KB) of storage is
  available,
  + which should be sufficient for most designs.  If necessary in
  a
  + particular design, due to boot code requirement or a large
  number
  + of devices, this address (and the corresponding parameters in
  the
  + EDK design) must be modified.
  +
  + Note that in some highly area constrained designs, no block
  rams
  + may be available in the design, and some other mechanism may
  be
  + used to hold the processor in reset while external memory is
  + initialized with processor code.  In such cases, that
  mechanism
  + should also be used to load the device tree at an appropriate
  + location, and the parameters associated with this
  configuration
  + option should be modified to point to that location in
  external
  + memory.
  +
  +config COMPRESSED_DTB_START
  +   hex Start of compressed device tree
  +   depends on COMPRESSED_DEVICE_TREE
  +   default 0xf800
  +
  +config COMPRESSED_DTB_SIZE
  +   hex Size of compressed device tree
  +   depends on COMPRESSED_DEVICE_TREE
  +   default 0x800
  +
  +

This needs to get added for 405 too.  More likely its easier to generate
a separate patch for this.

  +
  +/ {
  +   #address-cells = 1;
 
 This doesn't look right.  440 has 36-bit physical addresses, so
 #address-cells should be 2.

In Virtex FPGAs, the 440 is only hooked up through a 32-bit bus.

  +   #size-cells = 1;
  +   compatible = xlnx,virtex;
  +   dcr-parent = ppc440_virtex5_0;
  +   model = testing;
  +   chosen {
  +   bootargs = console=ttyS0 ip=on root=/dev/ram;
  +   linux,stdout-path = /[EMAIL PROTECTED]/[EMAIL PROTECTED];
  +   } ;
  +   cpus {
  +   #address-cells = 1;
  +   #cpus = 1;
  +   #size-cells = 0;
  +   ppc440_virtex5_0: [EMAIL PROTECTED] {
  +   #address-cells = 1;
  +   #size-cells = 1;
 
 You don't need those in the cpu node itself.

I'll take care of removing them.

  +   clock-frequency = 17d78400;
  +   compatible = PowerPC,440, ibm,ppc440;
  +   d-cache-line-size = 20;
  +   d-cache-size = 8000;
  +   dcr-access-method = native;
  +   dcr-controller ;
  +   device_type = cpu;
  +   i-cache-line-size = 20;
  +   i-cache-size = 8000;
  +   model = PowerPC,440;
  +   reg = 0;
  +   timebase-frequency = 17d78400;
  +   xlnx,apu-control = 1;
  +   xlnx,apu-udi-0 = c07701;
  +   xlnx,apu-udi-1 = c47701;
  +   xlnx,apu-udi-10 = 0;
  +   xlnx,apu-udi-11 = 0;
  +   xlnx,apu-udi-12 = 0;
  +   xlnx,apu-udi-13 = 0;
  +   xlnx,apu-udi-14 = 0;
  +   xlnx,apu-udi-15 = 0;
  +   xlnx,apu-udi-2 = 0;
  +   xlnx,apu-udi-3 = 0;
  +   xlnx,apu-udi-4 = 0;
  +   xlnx,apu-udi-5 = 0;
  +   xlnx,apu-udi-6 = 0;
  +   xlnx,apu-udi-7 = 0;
  +   xlnx,apu-udi-8 = 0;
  +   xlnx,apu-udi-9 = 0;
  +   xlnx,dcr-autolock-enable = 1;
  +   xlnx,dcu-rd-ld-cache-plb-prio = 0;
  +   xlnx,dcu-rd-noncache-plb-prio = 0;
  +   xlnx,dcu-rd-touch-plb-prio = 0;
  +   xlnx,dcu-rd-urgent-plb-prio = 0;
  +   xlnx,dcu-wr-flush-plb-prio = 0;
  +   

RE: [PATCH] [RFC] powerpc: Xilinx: adding virtex5 powerpc 440 support

2008-06-23 Thread John Linn
Thanks for the comments Josh. 

I commented on the others that Stephen didn't comment on.

 diff --git a/arch/powerpc/boot/io.h b/arch/powerpc/boot/io.h
 index ccaedae..ec57ec9 100644
 --- a/arch/powerpc/boot/io.h
 +++ b/arch/powerpc/boot/io.h
 @@ -99,4 +99,11 @@ static inline void barrier(void)
  asm volatile( : : : memory);
  }
 
 +static inline void disable_irq(void)
 +{
 +int dummy;
 +asm volatile(mfmsr %0; rlwinm %0, %0, 0, ~(115); mtmsr %0 :
 + =r (dummy) : : memory);
 +}

As you said, figuring out why this is here and at least adding a
comment would be good.

Agreed.


  #endif /* _IO_H */
 diff --git a/arch/powerpc/boot/virtex.c b/arch/powerpc/boot/virtex.c
 new file mode 100644
 index 000..5d807c6
 --- /dev/null
 +++ b/arch/powerpc/boot/virtex.c
 @@ -0,0 +1,246 @@
 +/*
 + * Old U-boot compatibility for Walnut
 + *
 + * Author: Josh Boyer [EMAIL PROTECTED]

You can remove this.  I'm pretty sure I didn't write this file :)


Agreed ;)

 + *
 + * Copyright 2007 IBM Corporation
 + *   Based on cuboot-83xx.c, which is:
 + * Copyright (c) 2007 Freescale Semiconductor, Inc.
 + *
 + * This program is free software; you can redistribute it and/or
modify
 it
 + * under the terms of the GNU General Public License version 2 as
 published
 + * by the Free Software Foundation.
 + */
 +
 +#include stddef.h
 +#include stdio.h
 +#include ops.h
 +#include dcr.h
 +#include 4xx.h
 +#include io.h
 +#include reg.h
 +
 +BSS_STACK(4096);
 +
 +#include types.h
 +#include gunzip_util.h
 +#include libfdt.h
 +#include ../../../include/linux/autoconf.h

Ew.  We've never included the CONFIG_ variables from Kconfig in the
bootwrapper.  It's supposed to be independent of the kernel.


I'll try to better understand if we can detect the compressed device
tree and if we really have to disable the APU.

What's the reasoning for being independent of the kernel, maybe it's
obvious to everyone but me?

 +#define UART_DLL0   /* Out: Divisor Latch Low */
 +#define UART_DLM1   /* Out: Divisor Latch High */
 +#define UART_FCR2   /* Out: FIFO Control Register */
 +#define UART_FCR_CLEAR_RCVR 0x02/* Clear the RCVR FIFO
*/
 +#define UART_FCR_CLEAR_XMIT 0x04/* Clear the XMIT FIFO */
 +#define UART_LCR3   /* Out: Line Control Register */
 +#define UART_MCR4   /* Out: Modem Control Register
 */
 +#define UART_MCR_RTS0x02/* RTS complement */
 +#define UART_MCR_DTR0x01/* DTR complement */
 +#define UART_LCR_DLAB   0x80/* Divisor latch access
bit */
 +#define UART_LCR_WLEN8  0x03/* Wordlength: 8 bits */
 +
 +/* This function is only needed when there is no boot loader to
 +   initialize the UART
 +*/
 +static int virtex_ns16550_console_init(void *devp)
 +{
 +int n;
 +unsigned long reg_phys;
 +unsigned char *regbase;
 +u32 regshift, clk, spd;
 +u16 divisor;
 +
 +n = getprop(devp, virtual-reg, regbase, sizeof(regbase));
 +if (n != sizeof(regbase)) {
 +if (!dt_xlate_reg(devp, 0, reg_phys, NULL))
 +return -1;
 +
 +regbase = (void *)reg_phys + 3;
 +}
 +regshift = 2;
 +
 +n = getprop(devp, current-speed, (void *)spd, sizeof(spd));
 +if (n != sizeof(spd))
 +spd = 9600;
 +
 +/* should there be a default clock rate?*/
 +n = getprop(devp, clock-frequency, (void *)clk, sizeof(clk));
 +if (n != sizeof(clk))
 +return -1;
 +
 +divisor = clk / (16 * spd);
 +
 +/* Access baud rate */
 +out_8(regbase + (UART_LCR  regshift), UART_LCR_DLAB);
 +
 +/* Baud rate based on input clock */
 +out_8(regbase + (UART_DLL  regshift), divisor  0xFF);
 +out_8(regbase + (UART_DLM  regshift), divisor  8);
 +
 +/* 8 data, 1 stop, no parity */
 +out_8(regbase + (UART_LCR  regshift), UART_LCR_WLEN8);
 +
 +/* RTS/DTR */
 +out_8(regbase + (UART_MCR  regshift), UART_MCR_RTS |
 UART_MCR_DTR);
 +
 +/* Clear transmitter and receiver */
 +out_8(regbase + (UART_FCR  regshift),
 +UART_FCR_CLEAR_XMIT |
 UART_FCR_CLEAR_RCVR);
 +return 0;
 +}
 +
 +/* For virtex, the kernel may be loaded without using a bootloader
and
 if so
 +   some UARTs need more setup than is provided in the normal console
 init
 +*/
 +static int virtex_serial_console_init(void)
 +{
 +void *devp;
 +char devtype[MAX_PROP_LEN];
 +char path[MAX_PATH_LEN];
 +
 +devp = finddevice(/chosen);
 +if (devp == NULL)
 +return -1;
 +
 +if (getprop(devp, linux,stdout-path, path, MAX_PATH_LEN)  0)
 {
 +devp = finddevice(path);
 +if (devp == NULL)
 +return -1;
 +
 +if ((getprop(devp, device_type, devtype,
 sizeof(devtype))  0)
 + !strcmp(devtype, serial)
 + (dt_is_compatible(devp, ns16550)))
 +

Re: [PATCH] ibm_newemac: Fixes kernel crashes when speed of cable connected changes

2008-06-23 Thread Benjamin Herrenschmidt
On Mon, 2008-06-23 at 14:54 +0200, Stefan Roese wrote:
 From: Sathya Narayanan [EMAIL PROTECTED]
 
 The descriptor pointers were not initialized to NIL values, so it was
 poiniting to some random addresses which was completely invalid. This
 fix takes care of initializing the descriptor to NIL values and clearing
 the valid descriptors on clean ring operation.
 
 Signed-off-by: Sathya Narayanan [EMAIL PROTECTED]
 Signed-off-by: Stefan Roese [EMAIL PROTECTED]
 ---
  drivers/net/ibm_newemac/core.c |6 +-
  1 files changed, 5 insertions(+), 1 deletions(-)
 
 diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
 index 5d2108c..6dfc2c9 100644
 --- a/drivers/net/ibm_newemac/core.c
 +++ b/drivers/net/ibm_newemac/core.c
 @@ -1025,7 +1025,7 @@ static void emac_clean_tx_ring(struct emac_instance 
 *dev)
   int i;
  
   for (i = 0; i  NUM_TX_BUFF; ++i) {
 - if (dev-tx_skb[i]) {
 + if (dev-tx_skb[i]  dev-tx_desc[i].data_ptr) {

Why changing the test above ?

   dev_kfree_skb(dev-tx_skb[i]);
   dev-tx_skb[i] = NULL;
   if (dev-tx_desc[i].ctrl  MAL_TX_CTRL_READY)
 @@ -2719,6 +2719,10 @@ static int __devinit emac_probe(struct of_device 
 *ofdev,
   /* Clean rings */
   memset(dev-tx_desc, 0, NUM_TX_BUFF * sizeof(struct mal_descriptor));
   memset(dev-rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor));
 + for (i = 0; i = NUM_TX_BUFF; i++)
 + dev-tx_skb[i] = NULL;
 + for (i = 0; i = NUM_RX_BUFF; i++)
 + dev-rx_skb[i] = NULL;

Why not use memset here too ?

   /* Attach to ZMII, if needed */
   if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII) 

___
Linuxppc-dev mailing list
Linuxppc-dev@ozlabs.org
https://ozlabs.org/mailman/listinfo/linuxppc-dev


Re: [PATCH] ibm_newemac: Fixes entry of short packets

2008-06-23 Thread Benjamin Herrenschmidt
On Mon, 2008-06-23 at 14:55 +0200, Stefan Roese wrote:
 From: Sathya Narayanan [EMAIL PROTECTED]
 
 Short packets has to be discarded by the driver. So this patch addresses the
 issue of discarding the short packets of size lesser then ethernet header
 size.

You are freeing the skb, why ? Shouldn't we just keep the skb in the
ring for further rx ?

 Signed-off-by: Sathya Narayanan [EMAIL PROTECTED]
 Signed-off-by: Stefan Roese [EMAIL PROTECTED]
 ---
  drivers/net/ibm_newemac/core.c |7 +++
  1 files changed, 7 insertions(+), 0 deletions(-)
 
 diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
 index 6dfc2c9..aa407b2 100644
 --- a/drivers/net/ibm_newemac/core.c
 +++ b/drivers/net/ibm_newemac/core.c
 @@ -1652,6 +1652,13 @@ static int emac_poll_rx(void *param, int budget)
  
   skb_put(skb, len);
   push_packet:
 + if (skb-len  ETH_HLEN) {
 + dev_kfree_skb(skb);
 + printk(KERN_WARNING %s: short packets dropped\n,
 +dev-ndev-name);
 + ++dev-estats.rx_dropped_stack;
 + goto next;
 + }
   skb-dev = dev-ndev;
   skb-protocol = eth_type_trans(skb, dev-ndev);
   emac_rx_csum(dev, skb, ctrl);

___
Linuxppc-dev mailing list
Linuxppc-dev@ozlabs.org
https://ozlabs.org/mailman/listinfo/linuxppc-dev


Re: [PATCH] ibm_newemac: Fixes memory leak in ibm_newemac ethernet driver

2008-06-23 Thread Benjamin Herrenschmidt
On Mon, 2008-06-23 at 14:55 +0200, Stefan Roese wrote:
 From: Sathya Narayanan [EMAIL PROTECTED]
 
 This patch addresses the memory leak happenning in drivers transmit queue
 under heavy load condition. Once the transmit queue becomes full, driver
 does an automatic wrapup of queue. During which the untransmitted SKB's are
 lost without getting freed up.

This would be a bug. We should stop the queue when full instead.

 Signed-off-by: Sathya Narayanan [EMAIL PROTECTED]
 Signed-off-by: Stefan Roese [EMAIL PROTECTED]
 ---
  drivers/net/ibm_newemac/core.c |7 +++
  1 files changed, 7 insertions(+), 0 deletions(-)
 
 diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
 index aa407b2..ee868b6 100644
 --- a/drivers/net/ibm_newemac/core.c
 +++ b/drivers/net/ibm_newemac/core.c
 @@ -1328,6 +1328,13 @@ static int emac_start_xmit(struct sk_buff *skb, struct 
 net_device *ndev)
  
   DBG2(dev, xmit(%u) %d NL, len, slot);
  
 + if (dev-tx_skb[slot]  dev-tx_desc[slot].data_ptr) {
 + dev_kfree_skb(dev-tx_skb[slot]);
 + dev-tx_skb[slot] = NULL;
 + dev-tx_cnt--;
 + ++dev-estats.tx_dropped;
 + }
 +
   dev-tx_skb[slot] = skb;
   dev-tx_desc[slot].data_ptr = dma_map_single(dev-ofdev-dev,
skb-data, len,

___
Linuxppc-dev mailing list
Linuxppc-dev@ozlabs.org
https://ozlabs.org/mailman/listinfo/linuxppc-dev


Re: [RFC 1/3] powerpc: __copy_tofrom_user tweaked for Cell

2008-06-23 Thread Paul Mackerras
Gunnar von Boehn writes:

 Interesting points.
 Can you help me to understand where the negative effect of DCBZ does come
 from?

In my experience, dcbz slows down the hot-cache case because it adds a
few cycles to the execution time of the inner loop, and on most 64-bit
PowerPC implementations, it doesn't actually help even in the
cold-cache case because the store queue does enough write combining
that the cache doesn't end up reading the line from memory.  I don't
know whether the Cell PPE can do that, but I could believe that it
can't.

Paul.
___
Linuxppc-dev mailing list
Linuxppc-dev@ozlabs.org
https://ozlabs.org/mailman/listinfo/linuxppc-dev


Re: [2.6 patch] drivers/macintosh/: possible cleanups

2008-06-23 Thread Stephen Rothwell
On Mon, 23 Jun 2008 20:46:57 +0300 Adrian Bunk [EMAIL PROTECTED] wrote:

 On Tue, Jun 10, 2008 at 10:21:25AM +1000, Stephen Rothwell wrote:
  Hi Adrian,
  
  On Tue, 10 Jun 2008 01:23:12 +0300 Adrian Bunk [EMAIL PROTECTED] wrote:
  
   +++ b/drivers/macintosh/adbhid.c
   @@ -75,7 +75,7 @@ static struct notifier_block adbhid_adb_notifier = {
#define ADB_KEY_POWER_OLD0x7e
#define ADB_KEY_POWER0x7f

   -u16 adb_to_linux_keycodes[128] = {
   +static u16 adb_to_linux_keycodes[128] = {
  
  This could be const as well.
 
 Updated patch below.
 
 cu
 Adrian
 
 
 --  snip  --
 
 
 This patch contains the following possible cleanups:
 - make the following needlessly global code static:
   - adb.c: adb_controller
   - adb.c: adb_init()
   - adbhid.c: adb_to_linux_keycodes[]  (also make it const)
   - via-pmu68k.c: backlight_level
   - via-pmu68k.c: backlight_enabled
 - remove the following unused code:
   - via-pmu68k.c: sleep_notifier_list
 
 Signed-off-by: Adrian Bunk [EMAIL PROTECTED]

Acked-by: Stephen Rothwell [EMAIL PROTECTED]

-- 
Cheers,
Stephen Rothwell[EMAIL PROTECTED]
http://www.canb.auug.org.au/~sfr/


pgpYlU0UEM4DL.pgp
Description: PGP signature
___
Linuxppc-dev mailing list
Linuxppc-dev@ozlabs.org
https://ozlabs.org/mailman/listinfo/linuxppc-dev

Re: [2.6 patch] asm/ptrace.h userspace headers cleanup

2008-06-23 Thread Greg Ungerer

Hi Adrian,

Adrian Bunk wrote:
This patch contains the following cleanups for the asm/ptrace.h 
userspace headers:

- include/asm-generic/Kbuild.asm already lists ptrace.h, remove
  the superfluous listings in the Kbuild files of the following
  architectures:
  - cris
  - frv
  - powerpc
  - x86
- don't expose function prototypes and macros to userspace:
  - arm
  - blackfin
  - cris
  - mn10300
  - parisc
- remove #ifdef CONFIG_'s around #define's:
  - blackfin
  - m68knommu
- sh: AFAIK __SH5__ should work in both kernel and userspace,
  no need to leak CONFIG_SUPERH64 to userspace
- xtensa: cosmetical change to remove empty
#ifndef __ASSEMBLY__ #else #endif
  from the userspace headers

Signed-off-by: Adrian Bunk [EMAIL PROTECTED]


The m68knommu looks ok.

Acked-by: Greg Ungerer [EMAIL PROTECTED]



---

Not changed by this patch is the fact that the following architectures 
have a different struct pt_regs depending on CONFIG_ variables:

- h8300
- m68knommu
- mips

This does not work in userspace.


Let me do something about that for m68knommu.

Regards
Greg




 include/asm-arm/ptrace.h   |6 ++
 include/asm-blackfin/ptrace.h  |6 --
 include/asm-cris/arch-v10/Kbuild   |1 -
 include/asm-cris/arch-v10/ptrace.h |4 
 include/asm-cris/arch-v32/Kbuild   |1 -
 include/asm-cris/arch-v32/ptrace.h |4 
 include/asm-cris/ptrace.h  |4 +++-
 include/asm-frv/Kbuild |1 -
 include/asm-m68knommu/ptrace.h |2 --
 include/asm-mn10300/ptrace.h   |8 ++--
 include/asm-parisc/ptrace.h|4 +++-
 include/asm-powerpc/Kbuild |1 -
 include/asm-sh/ptrace.h|2 +-
 include/asm-x86/Kbuild |1 -
 include/asm-xtensa/ptrace.h|   10 +-
 15 files changed, 32 insertions(+), 23 deletions(-)

fc14755b77cff7af5ff00e938a4c493a669e25cd diff --git a/include/asm-arm/ptrace.h 
b/include/asm-arm/ptrace.h
index 7aaa206..8382b75 100644
--- a/include/asm-arm/ptrace.h
+++ b/include/asm-arm/ptrace.h
@@ -139,8 +139,6 @@ static inline int valid_user_regs(struct pt_regs *regs)
return 0;
 }
 
-#endif	/* __KERNEL__ */

-
 #define pc_pointer(v) \
((v)  ~PCMASK)
 
@@ -153,10 +151,10 @@ extern unsigned long profile_pc(struct pt_regs *regs);

 #define profile_pc(regs) instruction_pointer(regs)
 #endif
 
-#ifdef __KERNEL__

 #define predicate(x)   ((x)  0xf000)
 #define PREDICATE_ALWAYS   0xe000
-#endif
+
+#endif /* __KERNEL__ */
 
 #endif /* __ASSEMBLY__ */
 
diff --git a/include/asm-blackfin/ptrace.h b/include/asm-blackfin/ptrace.h

index b8346cd..a45a80e 100644
--- a/include/asm-blackfin/ptrace.h
+++ b/include/asm-blackfin/ptrace.h
@@ -83,14 +83,14 @@ struct pt_regs {
 #define PTRACE_GETREGS12
 #define PTRACE_SETREGS13   /* ptrace signal  */
 
-#ifdef CONFIG_BINFMT_ELF_FDPIC

 #define PTRACE_GETFDPIC   31
 #define PTRACE_GETFDPIC_EXEC  0
 #define PTRACE_GETFDPIC_INTERP1
-#endif
 
 #define PS_S  (0x0002)
 
+#ifdef __KERNEL__

+
 /* user_mode returns true if only one bit is set in IPEND, other than the
master interrupt enable.  */
 #define user_mode(regs) (!(((regs)-ipend  ~0x10)  (((regs)-ipend  ~0x10) 
- 1)))
@@ -98,6 +98,8 @@ struct pt_regs {
 #define profile_pc(regs) instruction_pointer(regs)
 extern void show_regs(struct pt_regs *);
 
+#endif  /*  __KERNEL__  */

+
 #endif /* __ASSEMBLY__ */
 
 /*

diff --git a/include/asm-cris/arch-v10/Kbuild b/include/asm-cris/arch-v10/Kbuild
index 60e7e1b..7a192e1 100644
--- a/include/asm-cris/arch-v10/Kbuild
+++ b/include/asm-cris/arch-v10/Kbuild
@@ -1,4 +1,3 @@
-header-y += ptrace.h
 header-y += user.h
 header-y += svinto.h
 header-y += sv_addr_ag.h
diff --git a/include/asm-cris/arch-v10/ptrace.h 
b/include/asm-cris/arch-v10/ptrace.h
index fb14c5e..2f464ea 100644
--- a/include/asm-cris/arch-v10/ptrace.h
+++ b/include/asm-cris/arch-v10/ptrace.h
@@ -106,10 +106,14 @@ struct switch_stack {
unsigned long return_ip; /* ip that _resume will return to */
 };
 
+#ifdef __KERNEL__

+
 /* bit 8 is user-mode flag */
 #define user_mode(regs) (((regs)-dccr  0x100) != 0)
 #define instruction_pointer(regs) ((regs)-irp)
 #define profile_pc(regs) instruction_pointer(regs)
 extern void show_regs(struct pt_regs *);
 
+#endif  /*  __KERNEL__  */

+
 #endif
diff --git a/include/asm-cris/arch-v32/Kbuild b/include/asm-cris/arch-v32/Kbuild
index a0ec545..35f2fc4 100644
--- a/include/asm-cris/arch-v32/Kbuild
+++ b/include/asm-cris/arch-v32/Kbuild
@@ -1,3 +1,2 @@
-header-y += ptrace.h
 header-y += user.h
 header-y += cryptocop.h
diff --git a/include/asm-cris/arch-v32/ptrace.h 
b/include/asm-cris/arch-v32/ptrace.h
index 516cc70..41f4e86 100644
--- a/include/asm-cris/arch-v32/ptrace.h
+++ b/include/asm-cris/arch-v32/ptrace.h
@@ -106,9 +106,13 @@ struct switch_stack {
unsigned long return_ip; /* ip that _resume will return to */
 };
 

Re: a question of mpc8313, maybe simple

2008-06-23 Thread jumpingProgrammer

Thanks a lot for answer my question.
But i am newhand and not very clear about it.
Do you mean:
1. in this case i do not need to include DMA when i make menuconfig ?
The path is Device Drivers-DMA.
2. in this case i do not need add any DMA information in mpc8313erdb.dts?

Now i am reading PCI driver in kernel 2.6.20 to catch your meaning.


Scott Wood-2 wrote:
 
 On Mon, Jun 23, 2008 at 05:48:07AM -0700, jumpingProgrammer wrote:
 i am working with a board of mpc8313 using default mpc8313erdb.dts.
 But i do not know how can i correctly use DMA to transport data from
 memory
 to a PCI device.
 i tryed this
 {
  .
  request_irq(IRQ,);
  request_dma(.);
  
 }
 
 In request_irq() , i do not know what is the parameter IRQ.
 
 Use pci_dev-irq.
 
 And , i did not find any words about DMA in mpc8313erdb.dts .
 
 PCI devices can do bus-mastering DMA themselves; request_dma() is for ISA
 devices.
 
 -Scott
 ___
 Linuxppc-dev mailing list
 Linuxppc-dev@ozlabs.org
 https://ozlabs.org/mailman/listinfo/linuxppc-dev
 
 

-- 
View this message in context: 
http://www.nabble.com/a-question-of-mpc8313%2C-maybe-simple-tp18069331p18081728.html
Sent from the linuxppc-dev mailing list archive at Nabble.com.

___
Linuxppc-dev mailing list
Linuxppc-dev@ozlabs.org
https://ozlabs.org/mailman/listinfo/linuxppc-dev


dtc: Testcase for /include/ directive

2008-06-23 Thread David Gibson
This patch adds a testcase for the /include/ directive.  It assembles
a sample dts file with many /include/ directives at a variety of
different lexical / grammatical contexts.

Signed-off-by: David Gibson [EMAIL PROTECTED]

Index: dtc/tests/include0.dts
===
--- /dev/null   1970-01-01 00:00:00.0 +
+++ dtc/tests/include0.dts  2008-06-24 11:19:42.0 +1000
@@ -0,0 +1 @@
+/include/ include1.dts
Index: dtc/tests/include1.dts
===
--- /dev/null   1970-01-01 00:00:00.0 +
+++ dtc/tests/include1.dts  2008-06-24 11:19:42.0 +1000
@@ -0,0 +1,23 @@
+/dts-v1/;
+
+/include/ include2.dts
+/memreserve/ /include/ include3.dts;
+
+/ {
+   /include/ include4.dts
+   /include/ include5.dts = 0xdeadbeef;
+   prop-str = /include/ include6.dts;
+
+   /include/ include7.dts
+
+   [EMAIL PROTECTED] {
+   linux,phandle = 0x2000;
+   prop-int = 123456789;
+
+   /include/ include8.dts
+   linux,phandle = 0x2001;
+   compatible = subsubnode2, subsubnode;
+   prop-int = 0726746425;
+   };
+   };
+};
Index: dtc/tests/include2.dts
===
--- /dev/null   1970-01-01 00:00:00.0 +
+++ dtc/tests/include2.dts  2008-06-24 11:19:42.0 +1000
@@ -0,0 +1 @@
+/memreserve/ 0xdeadbeef 0x10;
Index: dtc/tests/include3.dts
===
--- /dev/null   1970-01-01 00:00:00.0 +
+++ dtc/tests/include3.dts  2008-06-24 11:19:42.0 +1000
@@ -0,0 +1 @@
+123456789 01
Index: dtc/tests/include4.dts
===
--- /dev/null   1970-01-01 00:00:00.0 +
+++ dtc/tests/include4.dts  2008-06-24 11:19:42.0 +1000
@@ -0,0 +1 @@
+   compatible = test_tree1;
Index: dtc/tests/include5.dts
===
--- /dev/null   1970-01-01 00:00:00.0 +
+++ dtc/tests/include5.dts  2008-06-24 11:19:42.0 +1000
@@ -0,0 +1 @@
+prop-int
Index: dtc/tests/include6.dts
===
--- /dev/null   1970-01-01 00:00:00.0 +
+++ dtc/tests/include6.dts  2008-06-24 11:19:42.0 +1000
@@ -0,0 +1 @@
+hello world
Index: dtc/tests/include7.dts
===
--- /dev/null   1970-01-01 00:00:00.0 +
+++ dtc/tests/include7.dts  2008-06-24 11:19:42.0 +1000
@@ -0,0 +1,9 @@
+   [EMAIL PROTECTED] {
+   compatible = subnode1;
+   prop-int = [deadbeef];
+
+   subsubnode {
+   compatible = subsubnode1, subsubnode;
+   prop-int = 0xdeadbeef;
+   };
+   };
Index: dtc/tests/include8.dts
===
--- /dev/null   1970-01-01 00:00:00.0 +
+++ dtc/tests/include8.dts  2008-06-24 11:19:42.0 +1000
@@ -0,0 +1 @@
[EMAIL PROTECTED] {
\ No newline at end of file
Index: dtc/tests/run_tests.sh
===
--- dtc.orig/tests/run_tests.sh 2008-06-24 11:19:07.0 +1000
+++ dtc/tests/run_tests.sh  2008-06-24 11:19:42.0 +1000
@@ -207,6 +207,10 @@
 run_dtc_test -I dts -O dtb -o dtc_comments-cmp.test.dtb comments-cmp.dts
 run_test dtbs_equal_ordered dtc_comments.test.dtb dtc_comments-cmp.test.dtb
 
+# Check /include/ directive
+run_dtc_test -I dts -O dtb -o includes.test.dtb include0.dts
+run_test dtbs_equal_ordered includes.test.dtb test_tree1.dtb
+
 # Check /incbin/ directive
 run_dtc_test -I dts -O dtb -o incbin.test.dtb incbin.dts
 run_test incbin incbin.test.dtb

-- 
David Gibson| I'll have my music baroque, and my code
david AT gibson.dropbear.id.au  | minimalist, thank you.  NOT _the_ _other_
| _way_ _around_!
http://www.ozlabs.org/~dgibson
___
Linuxppc-dev mailing list
Linuxppc-dev@ozlabs.org
https://ozlabs.org/mailman/listinfo/linuxppc-dev


[PATCH 01/14] powerpc: Move code patching code into arch/powerpc/lib/code-patching.c

2008-06-23 Thread Michael Ellerman
We currently have a few routines for patching code in asm/system.h, because
they didn't fit anywhere else. I'd like to clean them up a little and add
some more, so first move them into a dedicated C file - they don't need to
be inlined.

While we're moving the code, drop create_function_call(), it's intended
caller never got merged and will be replaced in future with something
different.

Signed-off-by: Michael Ellerman [EMAIL PROTECTED]
---

This series is also in my git tree:
http://git.kernel.org/?p=linux/kernel/git/mpe/linux-2.6.git


 arch/powerpc/kernel/crash_dump.c  |1 +
 arch/powerpc/lib/Makefile |2 +
 arch/powerpc/lib/code-patching.c  |   33 
 arch/powerpc/platforms/86xx/mpc86xx_smp.c |1 +
 arch/powerpc/platforms/powermac/smp.c |1 +
 include/asm-powerpc/code-patching.h   |   25 +++
 include/asm-powerpc/system.h  |   48 -
 7 files changed, 63 insertions(+), 48 deletions(-)

diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c
index 9ee3c52..35b9a66 100644
--- a/arch/powerpc/kernel/crash_dump.c
+++ b/arch/powerpc/kernel/crash_dump.c
@@ -14,6 +14,7 @@
 #include linux/crash_dump.h
 #include linux/bootmem.h
 #include linux/lmb.h
+#include asm/code-patching.h
 #include asm/kdump.h
 #include asm/prom.h
 #include asm/firmware.h
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
index c71d37d..305c7df 100644
--- a/arch/powerpc/lib/Makefile
+++ b/arch/powerpc/lib/Makefile
@@ -24,3 +24,5 @@ obj-$(CONFIG_SMP) += locks.o
 endif
 
 obj-$(CONFIG_PPC_LIB_RHEAP) += rheap.o
+
+obj-y  += code-patching.o
diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
new file mode 100644
index 000..7afae88
--- /dev/null
+++ b/arch/powerpc/lib/code-patching.c
@@ -0,0 +1,33 @@
+/*
+ *  Copyright 2008 Michael Ellerman, IBM Corporation.
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ */
+
+#include linux/kernel.h
+#include asm/code-patching.h
+
+
+void create_instruction(unsigned long addr, unsigned int instr)
+{
+   unsigned int *p;
+   p  = (unsigned int *)addr;
+   *p = instr;
+   asm (dcbst 0, %0; sync; icbi 0,%0; sync; isync : : r (p));
+}
+
+void create_branch(unsigned long addr, unsigned long target, int flags)
+{
+   unsigned int instruction;
+
+   if (! (flags  BRANCH_ABSOLUTE))
+   target = target - addr;
+
+   /* Mask out the flags and target, so they don't step on each other. */
+   instruction = 0x4800 | (flags  0x3) | (target  0x03FC);
+
+   create_instruction(addr, instruction);
+}
diff --git a/arch/powerpc/platforms/86xx/mpc86xx_smp.c 
b/arch/powerpc/platforms/86xx/mpc86xx_smp.c
index ba55b0f..63f5585 100644
--- a/arch/powerpc/platforms/86xx/mpc86xx_smp.c
+++ b/arch/powerpc/platforms/86xx/mpc86xx_smp.c
@@ -15,6 +15,7 @@
 #include linux/init.h
 #include linux/delay.h
 
+#include asm/code-patching.h
 #include asm/page.h
 #include asm/pgtable.h
 #include asm/pci-bridge.h
diff --git a/arch/powerpc/platforms/powermac/smp.c 
b/arch/powerpc/platforms/powermac/smp.c
index cb2d894..bf202f7 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -36,6 +36,7 @@
 
 #include asm/ptrace.h
 #include asm/atomic.h
+#include asm/code-patching.h
 #include asm/irq.h
 #include asm/page.h
 #include asm/pgtable.h
diff --git a/include/asm-powerpc/code-patching.h 
b/include/asm-powerpc/code-patching.h
new file mode 100644
index 000..0b91fdf
--- /dev/null
+++ b/include/asm-powerpc/code-patching.h
@@ -0,0 +1,25 @@
+#ifndef _ASM_POWERPC_CODE_PATCHING_H
+#define _ASM_POWERPC_CODE_PATCHING_H
+
+/*
+ * Copyright 2008, Michael Ellerman, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+/* Flags for create_branch:
+ * b   == create_branch(addr, target, 0);
+ * ba  == create_branch(addr, target, BRANCH_ABSOLUTE);
+ * bl  == create_branch(addr, target, BRANCH_SET_LINK);
+ * bla == create_branch(addr, target, BRANCH_ABSOLUTE | BRANCH_SET_LINK);
+ */
+#define BRANCH_SET_LINK0x1
+#define BRANCH_ABSOLUTE0x2
+
+extern void create_branch(unsigned long addr, unsigned long target, int flags);
+extern void create_instruction(unsigned long addr, unsigned int instr);
+
+#endif /* _ASM_POWERPC_CODE_PATCHING_H */
diff --git a/include/asm-powerpc/system.h b/include/asm-powerpc/system.h
index df781ad..d141e48 100644
--- a/include/asm-powerpc/system.h
+++ b/include/asm-powerpc/system.h

[PATCH 02/14] powerpc: Allow create_branch() to return errors

2008-06-23 Thread Michael Ellerman
Currently create_branch() creates a branch instruction for you, and patches
it into the call site. In some circumstances it would be nice to be able to
create the instruction and patch it later, and also some code might want
to check for errors in the branch creation before doing the patching. A
future patch will change create_branch() to check for errors.

For callers that don't care, replace create_branch() with patch_branch(),
which just creates the branch and patches it directly.

While we're touching all the callers, change to using unsigned int *, as
this seems to match usage better. That allows (and requires) us to remove
the volatile in the definition of vector in powermac/smp.c and mpc86xx_smp.c,
that's correct because now that we're passing vector as an unsigned int *
the compiler knows that it's value might change across the patch_branch()
call.

Signed-off-by: Michael Ellerman [EMAIL PROTECTED]
---
 arch/powerpc/kernel/crash_dump.c  |6 --
 arch/powerpc/lib/code-patching.c  |   20 
 arch/powerpc/platforms/86xx/mpc86xx_smp.c |5 ++---
 arch/powerpc/platforms/powermac/smp.c |5 ++---
 include/asm-powerpc/code-patching.h   |6 --
 5 files changed, 24 insertions(+), 18 deletions(-)

diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c
index 35b9a66..2664854 100644
--- a/arch/powerpc/kernel/crash_dump.c
+++ b/arch/powerpc/kernel/crash_dump.c
@@ -34,6 +34,8 @@ void __init reserve_kdump_trampoline(void)
 
 static void __init create_trampoline(unsigned long addr)
 {
+   unsigned int *p = (unsigned int *)addr;
+
/* The maximum range of a single instruction branch, is the current
 * instruction's address + (32 MB - 4) bytes. For the trampoline we
 * need to branch to current address + 32 MB. So we insert a nop at
@@ -42,8 +44,8 @@ static void __init create_trampoline(unsigned long addr)
 * branch to addr we jump to (addr + 32 MB). Although it requires
 * two instructions it doesn't require any registers.
 */
-   create_instruction(addr, 0x6000); /* nop */
-   create_branch(addr + 4, addr + PHYSICAL_START, 0);
+   patch_instruction(p, 0x6000); /* nop */
+   patch_branch(++p, addr + PHYSICAL_START, 0);
 }
 
 void __init setup_kdump_trampoline(void)
diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
index 7afae88..638dde3 100644
--- a/arch/powerpc/lib/code-patching.c
+++ b/arch/powerpc/lib/code-patching.c
@@ -11,23 +11,27 @@
 #include asm/code-patching.h
 
 
-void create_instruction(unsigned long addr, unsigned int instr)
+void patch_instruction(unsigned int *addr, unsigned int instr)
 {
-   unsigned int *p;
-   p  = (unsigned int *)addr;
-   *p = instr;
-   asm (dcbst 0, %0; sync; icbi 0,%0; sync; isync : : r (p));
+   *addr = instr;
+   asm (dcbst 0, %0; sync; icbi 0,%0; sync; isync : : r (addr));
 }
 
-void create_branch(unsigned long addr, unsigned long target, int flags)
+void patch_branch(unsigned int *addr, unsigned long target, int flags)
+{
+   patch_instruction(addr, create_branch(addr, target, flags));
+}
+
+unsigned int create_branch(const unsigned int *addr,
+  unsigned long target, int flags)
 {
unsigned int instruction;
 
if (! (flags  BRANCH_ABSOLUTE))
-   target = target - addr;
+   target = target - (unsigned long)addr;
 
/* Mask out the flags and target, so they don't step on each other. */
instruction = 0x4800 | (flags  0x3) | (target  0x03FC);
 
-   create_instruction(addr, instruction);
+   return instruction;
 }
diff --git a/arch/powerpc/platforms/86xx/mpc86xx_smp.c 
b/arch/powerpc/platforms/86xx/mpc86xx_smp.c
index 63f5585..835f2dc 100644
--- a/arch/powerpc/platforms/86xx/mpc86xx_smp.c
+++ b/arch/powerpc/platforms/86xx/mpc86xx_smp.c
@@ -57,8 +57,7 @@ smp_86xx_kick_cpu(int nr)
unsigned int save_vector;
unsigned long target, flags;
int n = 0;
-   volatile unsigned int *vector
-= (volatile unsigned int *)(KERNELBASE + 0x100);
+   unsigned int *vector = (unsigned int *)(KERNELBASE + 0x100);
 
if (nr  0 || nr = NR_CPUS)
return;
@@ -72,7 +71,7 @@ smp_86xx_kick_cpu(int nr)
 
/* Setup fake reset vector to call __secondary_start_mpc86xx. */
target = (unsigned long) __secondary_start_mpc86xx;
-   create_branch((unsigned long)vector, target, BRANCH_SET_LINK);
+   patch_branch(vector, target, BRANCH_SET_LINK);
 
/* Kick that CPU */
smp_86xx_release_core(nr);
diff --git a/arch/powerpc/platforms/powermac/smp.c 
b/arch/powerpc/platforms/powermac/smp.c
index bf202f7..4ae3d00 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -787,8 +787,7 @@ static void __devinit smp_core99_kick_cpu(int nr)
 {
unsigned int save_vector;

[PATCH 03/14] powerpc: Make create_branch() return errors if the branch target is too large

2008-06-23 Thread Michael Ellerman
If you pass a target value to create_branch() which is more than 32MB - 4,
or - 32MB away from the branch site, then it's impossible to create an
immediate branch. The current code doesn't check, which will lead to us
creating a branch to somewhere else - which is bad.

For code that cares to check we return 0, which is easy to check for, and
for code that doesn't at least we'll be creating an illegal instruction,
rather than a branch to some random address.

Signed-off-by: Michael Ellerman [EMAIL PROTECTED]
---
 arch/powerpc/lib/code-patching.c |   10 --
 1 files changed, 8 insertions(+), 2 deletions(-)

diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
index 638dde3..430f4c1 100644
--- a/arch/powerpc/lib/code-patching.c
+++ b/arch/powerpc/lib/code-patching.c
@@ -26,12 +26,18 @@ unsigned int create_branch(const unsigned int *addr,
   unsigned long target, int flags)
 {
unsigned int instruction;
+   long offset;
 
+   offset = target;
if (! (flags  BRANCH_ABSOLUTE))
-   target = target - (unsigned long)addr;
+   offset = offset - (unsigned long)addr;
+
+   /* Check we can represent the target in the instruction format */
+   if (offset  -0x200 || offset  0x1fc || offset  0x3)
+   return 0;
 
/* Mask out the flags and target, so they don't step on each other. */
-   instruction = 0x4800 | (flags  0x3) | (target  0x03FC);
+   instruction = 0x4800 | (flags  0x3) | (offset  0x03FC);
 
return instruction;
 }
-- 
1.5.5

___
Linuxppc-dev mailing list
Linuxppc-dev@ozlabs.org
https://ozlabs.org/mailman/listinfo/linuxppc-dev


[PATCH 04/14] powerpc: Add ppc_function_entry() which gets the entry point for a function

2008-06-23 Thread Michael Ellerman
Because function pointers point to different things on 32-bit vs 64-bit,
add a macro that deals with dereferencing the OPD on 64-bit. The soon to
be merged ftrace wants this, as well as other code I am working on.

Signed-off-by: Michael Ellerman [EMAIL PROTECTED]
---
 include/asm-powerpc/code-patching.h |   16 
 1 files changed, 16 insertions(+), 0 deletions(-)

diff --git a/include/asm-powerpc/code-patching.h 
b/include/asm-powerpc/code-patching.h
index fdb187c..a45a7ff 100644
--- a/include/asm-powerpc/code-patching.h
+++ b/include/asm-powerpc/code-patching.h
@@ -10,6 +10,8 @@
  * 2 of the License, or (at your option) any later version.
  */
 
+#include asm/types.h
+
 /* Flags for create_branch:
  * b   == create_branch(addr, target, 0);
  * ba  == create_branch(addr, target, BRANCH_ABSOLUTE);
@@ -24,4 +26,18 @@ unsigned int create_branch(const unsigned int *addr,
 void patch_branch(unsigned int *addr, unsigned long target, int flags);
 void patch_instruction(unsigned int *addr, unsigned int instr);
 
+static inline unsigned long ppc_function_entry(void *func)
+{
+#ifdef CONFIG_PPC64
+   /*
+* On PPC64 the function pointer actually points to the function's
+* descriptor. The first entry in the descriptor is the address
+* of the function text.
+*/
+   return ((func_descr_t *)func)-entry;
+#else
+   return (unsigned long)func;
+#endif
+}
+
 #endif /* _ASM_POWERPC_CODE_PATCHING_H */
-- 
1.5.5

___
Linuxppc-dev mailing list
Linuxppc-dev@ozlabs.org
https://ozlabs.org/mailman/listinfo/linuxppc-dev


[PATCH 05/14] powerpc: Add new code patching routines

2008-06-23 Thread Michael Ellerman
This commit adds some new routines for patching code, they will be used
in a following commit.

Signed-off-by: Michael Ellerman [EMAIL PROTECTED]
---
 arch/powerpc/lib/code-patching.c|  107 +++
 include/asm-powerpc/code-patching.h |8 +++
 2 files changed, 115 insertions(+), 0 deletions(-)

diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
index 430f4c1..27957c4 100644
--- a/arch/powerpc/lib/code-patching.c
+++ b/arch/powerpc/lib/code-patching.c
@@ -41,3 +41,110 @@ unsigned int create_branch(const unsigned int *addr,
 
return instruction;
 }
+
+unsigned int create_cond_branch(const unsigned int *addr,
+   unsigned long target, int flags)
+{
+   unsigned int instruction;
+   long offset;
+
+   offset = target;
+   if (! (flags  BRANCH_ABSOLUTE))
+   offset = offset - (unsigned long)addr;
+
+   /* Check we can represent the target in the instruction format */
+   if (offset  -0x8000 || offset  0x7FFF || offset  0x3)
+   return 0;
+
+   /* Mask out the flags and target, so they don't step on each other. */
+   instruction = 0x4000 | (flags  0x3FF0003) | (offset  0xFFFC);
+
+   return instruction;
+}
+
+static unsigned int branch_opcode(unsigned int instr)
+{
+   return (instr  26)  0x3F;
+}
+
+static int instr_is_branch_iform(unsigned int instr)
+{
+   return branch_opcode(instr) == 18;
+}
+
+static int instr_is_branch_bform(unsigned int instr)
+{
+   return branch_opcode(instr) == 16;
+}
+
+int instr_is_relative_branch(unsigned int instr)
+{
+   if (instr  BRANCH_ABSOLUTE)
+   return 0;
+
+   return instr_is_branch_iform(instr) || instr_is_branch_bform(instr);
+}
+
+static unsigned long branch_iform_target(const unsigned int *instr)
+{
+   signed long imm;
+
+   imm = *instr  0x3FC;
+
+   /* If the top bit of the immediate value is set this is negative */
+   if (imm  0x200)
+   imm -= 0x400;
+
+   if ((*instr  BRANCH_ABSOLUTE) == 0)
+   imm += (unsigned long)instr;
+
+   return (unsigned long)imm;
+}
+
+static unsigned long branch_bform_target(const unsigned int *instr)
+{
+   signed long imm;
+
+   imm = *instr  0xFFFC;
+
+   /* If the top bit of the immediate value is set this is negative */
+   if (imm  0x8000)
+   imm -= 0x1;
+
+   if ((*instr  BRANCH_ABSOLUTE) == 0)
+   imm += (unsigned long)instr;
+
+   return (unsigned long)imm;
+}
+
+unsigned long branch_target(const unsigned int *instr)
+{
+   if (instr_is_branch_iform(*instr))
+   return branch_iform_target(instr);
+   else if (instr_is_branch_bform(*instr))
+   return branch_bform_target(instr);
+
+   return 0;
+}
+
+int instr_is_branch_to_addr(const unsigned int *instr, unsigned long addr)
+{
+   if (instr_is_branch_iform(*instr) || instr_is_branch_bform(*instr))
+   return branch_target(instr) == addr;
+
+   return 0;
+}
+
+unsigned int translate_branch(const unsigned int *dest, const unsigned int 
*src)
+{
+   unsigned long target;
+
+   target = branch_target(src);
+
+   if (instr_is_branch_iform(*src))
+   return create_branch(dest, target, *src);
+   else if (instr_is_branch_bform(*src))
+   return create_cond_branch(dest, target, *src);
+
+   return 0;
+}
diff --git a/include/asm-powerpc/code-patching.h 
b/include/asm-powerpc/code-patching.h
index a45a7ff..40ad46b 100644
--- a/include/asm-powerpc/code-patching.h
+++ b/include/asm-powerpc/code-patching.h
@@ -23,9 +23,17 @@
 
 unsigned int create_branch(const unsigned int *addr,
   unsigned long target, int flags);
+unsigned int create_cond_branch(const unsigned int *addr,
+   unsigned long target, int flags);
 void patch_branch(unsigned int *addr, unsigned long target, int flags);
 void patch_instruction(unsigned int *addr, unsigned int instr);
 
+int instr_is_relative_branch(unsigned int instr);
+int instr_is_branch_to_addr(const unsigned int *instr, unsigned long addr);
+unsigned long branch_target(const unsigned int *instr);
+unsigned int translate_branch(const unsigned int *dest,
+ const unsigned int *src);
+
 static inline unsigned long ppc_function_entry(void *func)
 {
 #ifdef CONFIG_PPC64
-- 
1.5.5

___
Linuxppc-dev mailing list
Linuxppc-dev@ozlabs.org
https://ozlabs.org/mailman/listinfo/linuxppc-dev


[PATCH 06/14] powerpc: Add tests of the code patching routines

2008-06-23 Thread Michael Ellerman
Add tests of the existing code patching routines, as well as the new
routines added in the last commit. The self-tests are run late in boot
when CONFIG_CODE_PATCHING_SELFTEST=y, which depends on DEBUG_KERNEL=y.

Signed-off-by: Michael Ellerman [EMAIL PROTECTED]
---
 arch/powerpc/Kconfig.debug   |5 +
 arch/powerpc/lib/code-patching.c |  298 ++
 2 files changed, 303 insertions(+), 0 deletions(-)

diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index a7d24e6..dc58939 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -57,6 +57,11 @@ config KGDB
  debugger.  See http://kgdb.sourceforge.net/ for more information.
  Unless you are intending to debug the kernel, say N here.
 
+config CODE_PATCHING_SELFTEST
+   bool Run self-tests of the code-patching code.
+   depends on DEBUG_KERNEL
+   default n
+
 choice
prompt Serial Port
depends on KGDB
diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
index 27957c4..0559fe0 100644
--- a/arch/powerpc/lib/code-patching.c
+++ b/arch/powerpc/lib/code-patching.c
@@ -8,6 +8,9 @@
  */
 
 #include linux/kernel.h
+#include linux/vmalloc.h
+#include linux/init.h
+#include asm/page.h
 #include asm/code-patching.h
 
 
@@ -148,3 +151,298 @@ unsigned int translate_branch(const unsigned int *dest, 
const unsigned int *src)
 
return 0;
 }
+
+
+#ifdef CONFIG_CODE_PATCHING_SELFTEST
+
+static void __init test_trampoline(void)
+{
+   asm (nop;\n);
+}
+
+#define check(x)   \
+   if (!(x)) printk(code-patching: test failed at line %d\n, __LINE__);
+
+static void __init test_branch_iform(void)
+{
+   unsigned int instr;
+   unsigned long addr;
+
+   addr = (unsigned long)instr;
+
+   /* The simplest case, branch to self, no flags */
+   check(instr_is_branch_iform(0x4800));
+   /* All bits of target set, and flags */
+   check(instr_is_branch_iform(0x4bff));
+   /* High bit of opcode set, which is wrong */
+   check(!instr_is_branch_iform(0xcbff));
+   /* Middle bits of opcode set, which is wrong */
+   check(!instr_is_branch_iform(0x7bff));
+
+   /* Simplest case, branch to self with link */
+   check(instr_is_branch_iform(0x4801));
+   /* All bits of targets set */
+   check(instr_is_branch_iform(0x4bfd));
+   /* Some bits of targets set */
+   check(instr_is_branch_iform(0x4bff00fd));
+   /* Must be a valid branch to start with */
+   check(!instr_is_branch_iform(0x7bfd));
+
+   /* Absolute branch to 0x100 */
+   instr = 0x48000103;
+   check(instr_is_branch_to_addr(instr, 0x100));
+   /* Absolute branch to 0x420fc */
+   instr = 0x480420ff;
+   check(instr_is_branch_to_addr(instr, 0x420fc));
+   /* Maximum positive relative branch, + 20MB - 4B */
+   instr = 0x49fc;
+   check(instr_is_branch_to_addr(instr, addr + 0x1FC));
+   /* Smallest negative relative branch, - 4B */
+   instr = 0x4bfc;
+   check(instr_is_branch_to_addr(instr, addr - 4));
+   /* Largest negative relative branch, - 32 MB */
+   instr = 0x4a00;
+   check(instr_is_branch_to_addr(instr, addr - 0x200));
+
+   /* Branch to self, with link */
+   instr = create_branch(instr, addr, BRANCH_SET_LINK);
+   check(instr_is_branch_to_addr(instr, addr));
+
+   /* Branch to self - 0x100, with link */
+   instr = create_branch(instr, addr - 0x100, BRANCH_SET_LINK);
+   check(instr_is_branch_to_addr(instr, addr - 0x100));
+
+   /* Branch to self + 0x100, no link */
+   instr = create_branch(instr, addr + 0x100, 0);
+   check(instr_is_branch_to_addr(instr, addr + 0x100));
+
+   /* Maximum relative negative offset, - 32 MB */
+   instr = create_branch(instr, addr - 0x200, BRANCH_SET_LINK);
+   check(instr_is_branch_to_addr(instr, addr - 0x200));
+
+   /* Out of range relative negative offset, - 32 MB + 4*/
+   instr = create_branch(instr, addr - 0x204, BRANCH_SET_LINK);
+   check(instr == 0);
+
+   /* Out of range relative positive offset, + 32 MB */
+   instr = create_branch(instr, addr + 0x200, BRANCH_SET_LINK);
+   check(instr == 0);
+
+   /* Unaligned target */
+   instr = create_branch(instr, addr + 3, BRANCH_SET_LINK);
+   check(instr == 0);
+
+   /* Check flags are masked correctly */
+   instr = create_branch(instr, addr, 0xFFFC);
+   check(instr_is_branch_to_addr(instr, addr));
+   check(instr == 0x4800);
+}
+
+static void __init test_create_function_call(void)
+{
+   unsigned int *iptr;
+   unsigned long dest;
+
+   /* Check we can create a function call */
+   iptr = (unsigned int *)ppc_function_entry(test_trampoline);
+   dest = ppc_function_entry(test_create_function_call);
+   patch_instruction(iptr, create_branch(iptr, 

[PATCH 07/14] powerpc: Add PPC_NOP_INSTR, a hash define for the preferred nop instruction

2008-06-23 Thread Michael Ellerman
A bunch of code has hard-coded the value for a nop instruction, it
would be nice to have a #define for it.

Signed-off-by: Michael Ellerman [EMAIL PROTECTED]
---
 arch/powerpc/kernel/cputable.c  |3 ++-
 arch/powerpc/kernel/crash_dump.c|2 +-
 arch/powerpc/kernel/module_64.c |3 ++-
 include/asm-powerpc/code-patching.h |2 ++
 4 files changed, 7 insertions(+), 3 deletions(-)

diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index e44d553..887e190 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -17,6 +17,7 @@
 #include linux/module.h
 
 #include asm/oprofile_impl.h
+#include asm/code-patching.h
 #include asm/cputable.h
 #include asm/prom.h  /* for PTRRELOC on ARCH=ppc */
 
@@ -1613,7 +1614,7 @@ void do_feature_fixups(unsigned long value, void 
*fixup_start, void *fixup_end)
pend = ((unsigned int *)fcur) + (fcur-end_off / 4);
 
for (p = pstart; p  pend; p++) {
-   *p = 0x6000u;
+   *p = PPC_NOP_INSTR;
asm volatile (dcbst 0, %0 : : r (p));
}
asm volatile (sync : : : memory);
diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c
index 2664854..e0debcc 100644
--- a/arch/powerpc/kernel/crash_dump.c
+++ b/arch/powerpc/kernel/crash_dump.c
@@ -44,7 +44,7 @@ static void __init create_trampoline(unsigned long addr)
 * branch to addr we jump to (addr + 32 MB). Although it requires
 * two instructions it doesn't require any registers.
 */
-   patch_instruction(p, 0x6000); /* nop */
+   patch_instruction(p, PPC_NOP_INSTR);
patch_branch(++p, addr + PHYSICAL_START, 0);
 }
 
diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c
index 3a82b02..d5e569a 100644
--- a/arch/powerpc/kernel/module_64.c
+++ b/arch/powerpc/kernel/module_64.c
@@ -24,6 +24,7 @@
 #include asm/module.h
 #include asm/uaccess.h
 #include asm/firmware.h
+#include asm/code-patching.h
 #include linux/sort.h
 
 #include setup.h
@@ -346,7 +347,7 @@ static unsigned long stub_for_addr(Elf64_Shdr *sechdrs,
restore r2. */
 static int restore_r2(u32 *instruction, struct module *me)
 {
-   if (*instruction != 0x6000) {
+   if (*instruction != PPC_NOP_INSTR) {
printk(%s: Expect noop after relocate, got %08x\n,
   me-name, *instruction);
return 0;
diff --git a/include/asm-powerpc/code-patching.h 
b/include/asm-powerpc/code-patching.h
index 40ad46b..ef3a5d1 100644
--- a/include/asm-powerpc/code-patching.h
+++ b/include/asm-powerpc/code-patching.h
@@ -12,6 +12,8 @@
 
 #include asm/types.h
 
+#define PPC_NOP_INSTR  0x6000
+
 /* Flags for create_branch:
  * b   == create_branch(addr, target, 0);
  * ba  == create_branch(addr, target, BRANCH_ABSOLUTE);
-- 
1.5.5

___
Linuxppc-dev mailing list
Linuxppc-dev@ozlabs.org
https://ozlabs.org/mailman/listinfo/linuxppc-dev


[PATCH 08/14] powerpc: Split out do_feature_fixups() from cputable.c

2008-06-23 Thread Michael Ellerman
The logic to patch CPU feature sections lives in cputable.c, but these days
it's used for CPU features as well as firmware features. Move it into
it's own file for neatness and as preparation for some additions.

While we're moving the code, we pull the loop body logic into a separate
routine, and remove a comment which doesn't apply anymore.

Signed-off-by: Michael Ellerman [EMAIL PROTECTED]
---
 arch/powerpc/kernel/cputable.c|   36 ---
 arch/powerpc/lib/Makefile |1 +
 arch/powerpc/lib/feature-fixups.c |   56 +
 3 files changed, 57 insertions(+), 36 deletions(-)

diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 887e190..11943f0 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -17,7 +17,6 @@
 #include linux/module.h
 
 #include asm/oprofile_impl.h
-#include asm/code-patching.h
 #include asm/cputable.h
 #include asm/prom.h  /* for PTRRELOC on ARCH=ppc */
 
@@ -1588,38 +1587,3 @@ struct cpu_spec * __init identify_cpu(unsigned long 
offset, unsigned int pvr)
BUG();
return NULL;
 }
-
-void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end)
-{
-   struct fixup_entry {
-   unsigned long   mask;
-   unsigned long   value;
-   longstart_off;
-   longend_off;
-   } *fcur, *fend;
-
-   fcur = fixup_start;
-   fend = fixup_end;
-
-   for (; fcur  fend; fcur++) {
-   unsigned int *pstart, *pend, *p;
-
-   if ((value  fcur-mask) == fcur-value)
-   continue;
-
-   /* These PTRRELOCs will disappear once the new scheme for
-* modules and vdso is implemented
-*/
-   pstart = ((unsigned int *)fcur) + (fcur-start_off / 4);
-   pend = ((unsigned int *)fcur) + (fcur-end_off / 4);
-
-   for (p = pstart; p  pend; p++) {
-   *p = PPC_NOP_INSTR;
-   asm volatile (dcbst 0, %0 : : r (p));
-   }
-   asm volatile (sync : : : memory);
-   for (p = pstart; p  pend; p++)
-   asm volatile (icbi 0,%0 : : r (p));
-   asm volatile (sync; isync : : : memory);
-   }
-}
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
index 305c7df..4afcf3a 100644
--- a/arch/powerpc/lib/Makefile
+++ b/arch/powerpc/lib/Makefile
@@ -26,3 +26,4 @@ endif
 obj-$(CONFIG_PPC_LIB_RHEAP) += rheap.o
 
 obj-y  += code-patching.o
+obj-y  += feature-fixups.o
diff --git a/arch/powerpc/lib/feature-fixups.c 
b/arch/powerpc/lib/feature-fixups.c
new file mode 100644
index 000..f6fd5d2
--- /dev/null
+++ b/arch/powerpc/lib/feature-fixups.c
@@ -0,0 +1,56 @@
+/*
+ *  Copyright (C) 2001 Ben. Herrenschmidt ([EMAIL PROTECTED])
+ *
+ *  Modifications for ppc64:
+ *  Copyright (C) 2003 Dave Engebretsen [EMAIL PROTECTED]
+ *
+ *  Copyright 2008 Michael Ellerman, IBM Corporation.
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ */
+
+#include linux/kernel.h
+#include asm/cputable.h
+#include asm/code-patching.h
+
+
+struct fixup_entry {
+   unsigned long   mask;
+   unsigned long   value;
+   longstart_off;
+   longend_off;
+};
+
+static void patch_feature_section(unsigned long value, struct fixup_entry 
*fcur)
+{
+   unsigned int *pstart, *pend, *p;
+
+   if ((value  fcur-mask) == fcur-value)
+   return;
+
+   pstart = ((unsigned int *)fcur) + (fcur-start_off / 4);
+   pend = ((unsigned int *)fcur) + (fcur-end_off / 4);
+
+   for (p = pstart; p  pend; p++) {
+   *p = PPC_NOP_INSTR;
+   asm volatile (dcbst 0, %0 : : r (p));
+   }
+   asm volatile (sync : : : memory);
+   for (p = pstart; p  pend; p++)
+   asm volatile (icbi 0,%0 : : r (p));
+   asm volatile (sync; isync : : : memory);
+}
+
+void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end)
+{
+   struct fixup_entry *fcur, *fend;
+
+   fcur = fixup_start;
+   fend = fixup_end;
+
+   for (; fcur  fend; fcur++)
+   patch_feature_section(value, fcur);
+}
-- 
1.5.5

___
Linuxppc-dev mailing list
Linuxppc-dev@ozlabs.org
https://ozlabs.org/mailman/listinfo/linuxppc-dev


[PATCH 09/14] powerpc: Consolidate CPU and firmware feature fixup macros

2008-06-23 Thread Michael Ellerman
The CPU and firmware feature fixup macros are currently spread across
three files, firmware.h, cputable.h and asm-compat.h. Consolidate them
into their own file, feature-fixups.h

Signed-off-by: Michael Ellerman [EMAIL PROTECTED]
---
 include/asm-powerpc/asm-compat.h |   51 -
 include/asm-powerpc/cputable.h   |   14 +
 include/asm-powerpc/feature-fixups.h |  102 ++
 include/asm-powerpc/firmware.h   |   13 +
 4 files changed, 104 insertions(+), 76 deletions(-)

diff --git a/include/asm-powerpc/asm-compat.h b/include/asm-powerpc/asm-compat.h
index c19e736..8ec2e1d 100644
--- a/include/asm-powerpc/asm-compat.h
+++ b/include/asm-powerpc/asm-compat.h
@@ -15,57 +15,6 @@
 #endif
 
 
-/*
- * Feature section common macros
- *
- * Note that the entries now contain offsets between the table entry
- * and the code rather than absolute code pointers in order to be
- * useable with the vdso shared library. There is also an assumption
- * that values will be negative, that is, the fixup table has to be
- * located after the code it fixes up.
- */
-#ifdef CONFIG_PPC64
-#ifdef __powerpc64__
-/* 64 bits kernel, 64 bits code */
-#define MAKE_FTR_SECTION_ENTRY(msk, val, label, sect)  \
-99:\
-   .section sect,a;  \
-   .align 3;   \
-98:\
-   .llong msk; \
-   .llong val; \
-   .llong label##b-98b;\
-   .llong 99b-98b; \
-   .previous
-#else /* __powerpc64__ */
-/* 64 bits kernel, 32 bits code (ie. vdso32) */
-#define MAKE_FTR_SECTION_ENTRY(msk, val, label, sect)  \
-99:\
-   .section sect,a;  \
-   .align 3;   \
-98:\
-   .llong msk; \
-   .llong val; \
-   .long 0x;   \
-   .long label##b-98b; \
-   .long 0x;   \
-   .long 99b-98b;  \
-   .previous
-#endif /* !__powerpc64__ */
-#else /* CONFIG_PPC64 */
-/* 32 bits kernel, 32 bits code */
-#define MAKE_FTR_SECTION_ENTRY(msk, val, label, sect)  \
-99:\
-   .section sect,a;  \
-   .align 2;   \
-98:\
-   .long msk;  \
-   .long val;  \
-   .long label##b-98b; \
-   .long 99b-98b;  \
-   .previous
-#endif /* !CONFIG_PPC64 */
-
 #ifdef __powerpc64__
 
 /* operations for longs and pointers */
diff --git a/include/asm-powerpc/cputable.h b/include/asm-powerpc/cputable.h
index 1e79673..388b7de 100644
--- a/include/asm-powerpc/cputable.h
+++ b/include/asm-powerpc/cputable.h
@@ -2,6 +2,7 @@
 #define __ASM_POWERPC_CPUTABLE_H
 
 #include asm/asm-compat.h
+#include asm/feature-fixups.h
 
 #define PPC_FEATURE_32 0x8000
 #define PPC_FEATURE_64 0x4000
@@ -477,18 +478,5 @@ static inline int cpu_has_feature(unsigned long feature)
 
 #endif /* !__ASSEMBLY__ */
 
-#ifdef __ASSEMBLY__
-
-#define BEGIN_FTR_SECTION_NESTED(label)label:
-#define BEGIN_FTR_SECTION  BEGIN_FTR_SECTION_NESTED(97)
-#define END_FTR_SECTION_NESTED(msk, val, label) \
-   MAKE_FTR_SECTION_ENTRY(msk, val, label, __ftr_fixup)
-#define END_FTR_SECTION(msk, val)  \
-   END_FTR_SECTION_NESTED(msk, val, 97)
-
-#define END_FTR_SECTION_IFSET(msk) END_FTR_SECTION((msk), (msk))
-#define END_FTR_SECTION_IFCLR(msk) END_FTR_SECTION((msk), 0)
-#endif /* __ASSEMBLY__ */
-
 #endif /* __KERNEL__ */
 #endif /* __ASM_POWERPC_CPUTABLE_H */
diff --git a/include/asm-powerpc/feature-fixups.h 
b/include/asm-powerpc/feature-fixups.h
new file mode 100644
index 000..8597212
--- /dev/null
+++ b/include/asm-powerpc/feature-fixups.h
@@ -0,0 +1,102 @@
+#ifndef __ASM_POWERPC_FEATURE_FIXUPS_H
+#define __ASM_POWERPC_FEATURE_FIXUPS_H
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifdef __ASSEMBLY__
+
+/*
+ * Feature section common macros
+ *
+ * Note that the entries now contain offsets between the table entry
+ * and the code rather than 

[PATCH 10/14] powerpc: Consolidate feature fixup macros for 64/32 bit

2008-06-23 Thread Michael Ellerman
Currentl we have three versions of MAKE_FTR_SECTION_ENTRY(), the macro that
generates a feature section entry. There is 64bit version, a 32bit version
and version for 32bit code built with a 64bit kernel.

Rather than triplicating (?) the MAKE_FTR_SECTION_ENTRY() logic, we can
move the 64bit/32bit differences into separate macros, and then only have
one version of MAKE_FTR_SECTION_ENTRY().

Signed-off-by: Michael Ellerman [EMAIL PROTECTED]
---
 include/asm-powerpc/feature-fixups.h |   54 -
 1 files changed, 13 insertions(+), 41 deletions(-)

diff --git a/include/asm-powerpc/feature-fixups.h 
b/include/asm-powerpc/feature-fixups.h
index 8597212..35f9278 100644
--- a/include/asm-powerpc/feature-fixups.h
+++ b/include/asm-powerpc/feature-fixups.h
@@ -19,55 +19,27 @@
  * that values will be negative, that is, the fixup table has to be
  * located after the code it fixes up.
  */
-#ifdef CONFIG_PPC64
-
-#ifdef __powerpc64__
-
-/* 64 bits kernel, 64 bits code */
-#define MAKE_FTR_SECTION_ENTRY(msk, val, label, sect)  \
-99:\
-   .section sect,a;  \
-   .align 3;   \
-98:\
-   .llong msk; \
-   .llong val; \
-   .llong label##b-98b;\
-   .llong 99b-98b; \
-   .previous
-
-#else /* __powerpc64__ */
-
+#if defined(CONFIG_PPC64)  !defined(__powerpc64__)
 /* 64 bits kernel, 32 bits code (ie. vdso32) */
+#define FTR_ENTRY_LONG .llong
+#define FTR_ENTRY_OFFSET   .long 0x; .long
+#else
+/* 64 bit kernel 64 bit code, or 32 bit kernel 32 bit code */
+#define FTR_ENTRY_LONG PPC_LONG
+#define FTR_ENTRY_OFFSET   PPC_LONG
+#endif
+
 #define MAKE_FTR_SECTION_ENTRY(msk, val, label, sect)  \
 99:\
.section sect,a;  \
.align 3;   \
 98:\
-   .llong msk; \
-   .llong val; \
-   .long 0x;   \
-   .long label##b-98b; \
-   .long 0x;   \
-   .long 99b-98b;  \
-   .previous
-
-#endif /* !__powerpc64__ */
-
-#else /* CONFIG_PPC64 */
-
-/* 32 bits kernel, 32 bits code */
-#define MAKE_FTR_SECTION_ENTRY(msk, val, label, sect)  \
-99:\
-   .section sect,a;  \
-   .align 2;   \
-98:\
-   .long msk;  \
-   .long val;  \
-   .long label##b-98b; \
-   .long 99b-98b;  \
+   FTR_ENTRY_LONG msk; \
+   FTR_ENTRY_LONG val; \
+   FTR_ENTRY_OFFSET label##b-98b;  \
+   FTR_ENTRY_OFFSET 99b-98b;   \
.previous
 
-#endif /* !CONFIG_PPC64 */
 
 
 /* CPU feature dependent sections */
-- 
1.5.5

___
Linuxppc-dev mailing list
Linuxppc-dev@ozlabs.org
https://ozlabs.org/mailman/listinfo/linuxppc-dev


[PATCH 11/14] powerpc: Introduce infrastructure for feature sections with alternatives

2008-06-23 Thread Michael Ellerman
The current feature section logic only supports nop'ing out code, this means
if you want to choose at runtime between instruction sequences, one or both
cases will have to execute the nop'ed out contents of the other section, eg:

BEGIN_FTR_SECTION
or  1,1,1
END_FTR_SECTION_IFSET(FOO)
BEGIN_FTR_SECTION
or  2,2,2
END_FTR_SECTION_IFCLR(FOO)

and the resulting code will be either,

or  1,1,1
nop

or,
nop
or  2,2,2

For small code segments this is fine, but for larger code blocks and in
performance criticial code segments, it would be nice to avoid the nops.
This commit starts to implement logic to allow the following:

BEGIN_FTR_SECTION
or  1,1,1
FTR_SECTION_ELSE
or  2,2,2
ALT_FTR_SECTION_END_IFSET(FOO)

and the resulting code will be:

or  1,1,1
or,
or  2,2,2

We achieve this by extending the existing FTR macros. The current feature
section semantic just becomes a special case, ie. if the else case is empty
we nop out the default case.

The key limitation is that the size of the else case must be less than or
equal to the size of the default case. If the else case is smaller the
remainder of the section is nop'ed.

We let the linker put the else case code in with the rest of the text,
so that relative branches from the else case are more likley to link,
this has the disadvantage that we can't free the unused else cases.

This commit introduces the required macro and linker script changes, but
does not enable the patching of the alternative sections.

We also need to update two hand-made section entries in reg.h and timex.h

Signed-off-by: Michael Ellerman [EMAIL PROTECTED]
---
 arch/powerpc/kernel/vdso32/vdso32.lds.S |2 +-
 arch/powerpc/kernel/vdso64/vdso64.lds.S |2 +-
 arch/powerpc/kernel/vmlinux.lds.S   |2 +-
 arch/powerpc/lib/feature-fixups.c   |2 +
 include/asm-powerpc/feature-fixups.h|   68 +--
 include/asm-powerpc/reg.h   |2 +
 include/asm-powerpc/timex.h |2 +
 7 files changed, 64 insertions(+), 16 deletions(-)

diff --git a/arch/powerpc/kernel/vdso32/vdso32.lds.S 
b/arch/powerpc/kernel/vdso32/vdso32.lds.S
index 9352ab5..2717935 100644
--- a/arch/powerpc/kernel/vdso32/vdso32.lds.S
+++ b/arch/powerpc/kernel/vdso32/vdso32.lds.S
@@ -24,7 +24,7 @@ SECTIONS
 
. = ALIGN(16);
.text   : {
-   *(.text .stub .text.* .gnu.linkonce.t.*)
+   *(.text .stub .text.* .gnu.linkonce.t.* __ftr_alt_*)
}
PROVIDE(__etext = .);
PROVIDE(_etext = .);
diff --git a/arch/powerpc/kernel/vdso64/vdso64.lds.S 
b/arch/powerpc/kernel/vdso64/vdso64.lds.S
index 7d6585f..e608d1b 100644
--- a/arch/powerpc/kernel/vdso64/vdso64.lds.S
+++ b/arch/powerpc/kernel/vdso64/vdso64.lds.S
@@ -24,7 +24,7 @@ SECTIONS
 
. = ALIGN(16);
.text   : {
-   *(.text .stub .text.* .gnu.linkonce.t.*)
+   *(.text .stub .text.* .gnu.linkonce.t.* __ftr_alt_*)
*(.sfpr .glink)
}   :text
PROVIDE(__etext = .);
diff --git a/arch/powerpc/kernel/vmlinux.lds.S 
b/arch/powerpc/kernel/vmlinux.lds.S
index 0c3000b..3c07811 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -35,7 +35,7 @@ SECTIONS
ALIGN_FUNCTION();
*(.text.head)
_text = .;
-   *(.text .fixup .text.init.refok .exit.text.refok)
+   *(.text .fixup .text.init.refok .exit.text.refok __ftr_alt_*)
SCHED_TEXT
LOCK_TEXT
KPROBES_TEXT
diff --git a/arch/powerpc/lib/feature-fixups.c 
b/arch/powerpc/lib/feature-fixups.c
index f6fd5d2..973d547 100644
--- a/arch/powerpc/lib/feature-fixups.c
+++ b/arch/powerpc/lib/feature-fixups.c
@@ -22,6 +22,8 @@ struct fixup_entry {
unsigned long   value;
longstart_off;
longend_off;
+   longalt_start_off;
+   longalt_end_off;
 };
 
 static void patch_feature_section(unsigned long value, struct fixup_entry 
*fcur)
diff --git a/include/asm-powerpc/feature-fixups.h 
b/include/asm-powerpc/feature-fixups.h
index 35f9278..ab30129 100644
--- a/include/asm-powerpc/feature-fixups.h
+++ b/include/asm-powerpc/feature-fixups.h
@@ -29,24 +29,35 @@
 #define FTR_ENTRY_OFFSET   PPC_LONG
 #endif
 
+#define START_FTR_SECTION(label)   label##1:
+
+#define FTR_SECTION_ELSE_NESTED(label) \
+label##2:  \
+   .pushsection __ftr_alt_##label,a; \
+   .align 2;   \
+label##3:
+
 #define MAKE_FTR_SECTION_ENTRY(msk, val, label, sect)  \
-99:\
-   .section sect,a;  \
+label##4:   

[PATCH 12/14] powerpc: Add logic to patch alternative feature sections

2008-06-23 Thread Michael Ellerman
This patch adds the logic to patch alternative sections. This is fairly
straight forward, except for branches. Relative branches that jump from
inside the else section to outside of it, need to be translated as they're
moved, otherwise they will jump to the wrong location.

Signed-off-by: Michael Ellerman [EMAIL PROTECTED]
---
 arch/powerpc/lib/feature-fixups.c |   79 ++---
 1 files changed, 65 insertions(+), 14 deletions(-)

diff --git a/arch/powerpc/lib/feature-fixups.c 
b/arch/powerpc/lib/feature-fixups.c
index 973d547..174c1eb 100644
--- a/arch/powerpc/lib/feature-fixups.c
+++ b/arch/powerpc/lib/feature-fixups.c
@@ -26,24 +26,66 @@ struct fixup_entry {
longalt_end_off;
 };
 
-static void patch_feature_section(unsigned long value, struct fixup_entry 
*fcur)
+static unsigned int *calc_addr(struct fixup_entry *fcur, long offset)
 {
-   unsigned int *pstart, *pend, *p;
+   /*
+* We store the offset to the code as a negative offset from
+* the start of the alt_entry, to support the VDSO. This
+* routine converts that back into an actual address.
+*/
+   return (unsigned int *)((unsigned long)fcur + offset);
+}
+
+static int patch_alt_instruction(unsigned int *src, unsigned int *dest,
+unsigned int *alt_start, unsigned int *alt_end)
+{
+   unsigned int instr;
+
+   instr = *src;
+
+   if (instr_is_relative_branch(*src)) {
+   unsigned int *target = (unsigned int *)branch_target(src);
+
+   /* Branch within the section doesn't need translating */
+   if (target  alt_start || target = alt_end) {
+   instr = translate_branch(dest, src);
+   if (!instr)
+   return 1;
+   }
+   }
+
+   patch_instruction(dest, instr);
+
+   return 0;
+}
+
+static int patch_feature_section(unsigned long value, struct fixup_entry *fcur)
+{
+   unsigned int *start, *end, *alt_start, *alt_end, *src, *dest;
+
+   start = calc_addr(fcur, fcur-start_off);
+   end = calc_addr(fcur, fcur-end_off);
+   alt_start = calc_addr(fcur, fcur-alt_start_off);
+   alt_end = calc_addr(fcur, fcur-alt_end_off);
+
+   if ((alt_end - alt_start)  (end - start))
+   return 1;
 
if ((value  fcur-mask) == fcur-value)
-   return;
+   return 0;
 
-   pstart = ((unsigned int *)fcur) + (fcur-start_off / 4);
-   pend = ((unsigned int *)fcur) + (fcur-end_off / 4);
+   src = alt_start;
+   dest = start;
 
-   for (p = pstart; p  pend; p++) {
-   *p = PPC_NOP_INSTR;
-   asm volatile (dcbst 0, %0 : : r (p));
+   for (; src  alt_end; src++, dest++) {
+   if (patch_alt_instruction(src, dest, alt_start, alt_end))
+   return 1;
}
-   asm volatile (sync : : : memory);
-   for (p = pstart; p  pend; p++)
-   asm volatile (icbi 0,%0 : : r (p));
-   asm volatile (sync; isync : : : memory);
+
+   for (; dest  end; dest++)
+   patch_instruction(dest, PPC_NOP_INSTR);
+
+   return 0;
 }
 
 void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end)
@@ -53,6 +95,15 @@ void do_feature_fixups(unsigned long value, void 
*fixup_start, void *fixup_end)
fcur = fixup_start;
fend = fixup_end;
 
-   for (; fcur  fend; fcur++)
-   patch_feature_section(value, fcur);
+   for (; fcur  fend; fcur++) {
+   if (patch_feature_section(value, fcur)) {
+   __WARN();
+   printk(Unable to patch feature section at %p - %p \
+with %p - %p\n,
+   calc_addr(fcur, fcur-start_off),
+   calc_addr(fcur, fcur-end_off),
+   calc_addr(fcur, fcur-alt_start_off),
+   calc_addr(fcur, fcur-alt_end_off));
+   }
+   }
 }
-- 
1.5.5

___
Linuxppc-dev mailing list
Linuxppc-dev@ozlabs.org
https://ozlabs.org/mailman/listinfo/linuxppc-dev


[PATCH 13/14] powerpc: Add self-tests of the feature fixup code

2008-06-23 Thread Michael Ellerman
This commit adds tests of the feature fixup code, they are run during
boot if CONFIG_FTR_FIXUP_SELFTEST=y. Some of the tests manually invoke
the patching routines to check their behaviour, and others use the
macros and so are patched during the normal patching done during boot.

Because we have two sets of macros with different names, we use a macro
to generate the test of the macros, very nce.

Signed-off-by: Michael Ellerman [EMAIL PROTECTED]
---
 arch/powerpc/Kconfig.debug |5 +
 arch/powerpc/lib/Makefile  |1 +
 arch/powerpc/lib/feature-fixups-test.S |  727 
 arch/powerpc/lib/feature-fixups.c  |  206 +
 4 files changed, 939 insertions(+), 0 deletions(-)

diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index dc58939..2840ab6 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -62,6 +62,11 @@ config CODE_PATCHING_SELFTEST
depends on DEBUG_KERNEL
default n
 
+config FTR_FIXUP_SELFTEST
+   bool Run self-tests of the feature-fixup code.
+   depends on DEBUG_KERNEL
+   default n
+
 choice
prompt Serial Port
depends on KGDB
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
index 4afcf3a..9469f99 100644
--- a/arch/powerpc/lib/Makefile
+++ b/arch/powerpc/lib/Makefile
@@ -27,3 +27,4 @@ obj-$(CONFIG_PPC_LIB_RHEAP) += rheap.o
 
 obj-y  += code-patching.o
 obj-y  += feature-fixups.o
+obj-$(CONFIG_FTR_FIXUP_SELFTEST) += feature-fixups-test.o
diff --git a/arch/powerpc/lib/feature-fixups-test.S 
b/arch/powerpc/lib/feature-fixups-test.S
new file mode 100644
index 000..10d038b
--- /dev/null
+++ b/arch/powerpc/lib/feature-fixups-test.S
@@ -0,0 +1,727 @@
+/*
+ * Copyright 2008 Michael Ellerman, IBM Corporation.
+ *
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include asm/feature-fixups.h
+#include asm/ppc_asm.h
+
+   .text
+
+#define globl(x)   \
+   .globl x;   \
+x:
+
+globl(ftr_fixup_test1)
+   or  1,1,1
+   or  2,2,2   /* fixup will nop out this instruction */
+   or  3,3,3
+
+globl(end_ftr_fixup_test1)
+
+globl(ftr_fixup_test1_orig)
+   or  1,1,1
+   or  2,2,2
+   or  3,3,3
+
+globl(ftr_fixup_test1_expected)
+   or  1,1,1
+   nop
+   or  3,3,3
+
+globl(ftr_fixup_test2)
+   or  1,1,1
+   or  2,2,2   /* fixup will replace this with ftr_fixup_test2_alt */
+   or  3,3,3
+
+globl(end_ftr_fixup_test2)
+
+globl(ftr_fixup_test2_orig)
+   or  1,1,1
+   or  2,2,2
+   or  3,3,3
+
+globl(ftr_fixup_test2_alt)
+   or  31,31,31
+
+globl(ftr_fixup_test2_expected)
+   or  1,1,1
+   or  31,31,31
+   or  3,3,3
+
+globl(ftr_fixup_test3)
+   or  1,1,1
+   or  2,2,2   /* fixup will fail to replace this */
+   or  3,3,3
+
+globl(end_ftr_fixup_test3)
+
+globl(ftr_fixup_test3_orig)
+   or  1,1,1
+   or  2,2,2
+   or  3,3,3
+
+globl(ftr_fixup_test3_alt)
+   or  31,31,31
+   or  31,31,31
+
+globl(ftr_fixup_test4)
+   or  1,1,1
+   or  2,2,2
+   or  2,2,2
+   or  2,2,2
+   or  2,2,2
+   or  3,3,3
+
+globl(end_ftr_fixup_test4)
+
+globl(ftr_fixup_test4_expected)
+   or  1,1,1
+   or  31,31,31
+   or  31,31,31
+   nop
+   nop
+   or  3,3,3
+
+globl(ftr_fixup_test4_orig)
+   or  1,1,1
+   or  2,2,2
+   or  2,2,2
+   or  2,2,2
+   or  2,2,2
+   or  3,3,3
+
+globl(ftr_fixup_test4_alt)
+   or  31,31,31
+   or  31,31,31
+
+
+globl(ftr_fixup_test5)
+   or  1,1,1
+BEGIN_FTR_SECTION
+   or  2,2,2
+   or  2,2,2
+   or  2,2,2
+   or  2,2,2
+   or  2,2,2
+   or  2,2,2
+   or  2,2,2
+FTR_SECTION_ELSE
+2: b   3f
+3: or  5,5,5
+   beq 3b
+   b   1f
+   or  6,6,6
+   b   2b
+1: bdnz3b
+ALT_FTR_SECTION_END(0, 1)
+   or  1,1,1
+
+globl(end_ftr_fixup_test5)
+
+globl(ftr_fixup_test5_expected)
+   or  1,1,1
+2: b   3f
+3: or  5,5,5
+   beq 3b
+   b   1f
+   or  6,6,6
+   b   2b
+1: bdnz3b
+   or  1,1,1
+
+globl(ftr_fixup_test6)
+1: or  1,1,1
+BEGIN_FTR_SECTION
+   or  5,5,5
+2: cmpdi   r3,0
+   beq 4f
+   blt 2b
+   b   1b
+   b   4f
+FTR_SECTION_ELSE
+2: or  2,2,2
+   cmpdi   r3,1
+   beq 3f
+   blt 2b
+   b   3f
+   b   1b
+ALT_FTR_SECTION_END(0, 1)
+3: 

[PATCH 14/14] powerpc: Use an alternative feature section in entry_64.S

2008-06-23 Thread Michael Ellerman
Use an alternative feature section in _switch. There are three cases
handled here, either we don't have an SLB, in which case we jump over the
entire code section, or if we do we either do or don't have 1TB segments.

Boot tested on Power3, Power5 and Power5+.

Signed-off-by: Michael Ellerman [EMAIL PROTECTED]
---
 arch/powerpc/kernel/entry_64.S |   12 ++--
 1 files changed, 6 insertions(+), 6 deletions(-)

diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index c0db5b7..6ca3044 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -383,16 +383,16 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
 
ld  r8,KSP(r4)  /* new stack pointer */
 BEGIN_FTR_SECTION
-   b   2f
-END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
-BEGIN_FTR_SECTION
+  BEGIN_FTR_SECTION_NESTED(95)
clrrdi  r6,r8,28/* get its ESID */
clrrdi  r9,r1,28/* get current sp ESID */
-END_FTR_SECTION_IFCLR(CPU_FTR_1T_SEGMENT)
-BEGIN_FTR_SECTION
+  FTR_SECTION_ELSE_NESTED(95)
clrrdi  r6,r8,40/* get its 1T ESID */
clrrdi  r9,r1,40/* get current sp 1T ESID */
-END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT)
+  ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_1T_SEGMENT, 95)
+FTR_SECTION_ELSE
+   b   2f
+ALT_FTR_SECTION_END_IFSET(CPU_FTR_SLB)
clrldi. r0,r6,2 /* is new ESID c? */
cmpdcr1,r6,r9   /* or is new ESID the same as current ESID? */
croreq,4*cr1+eq,eq
-- 
1.5.5

___
Linuxppc-dev mailing list
Linuxppc-dev@ozlabs.org
https://ozlabs.org/mailman/listinfo/linuxppc-dev


Re: [PATCH] [RFC] powerpc: Xilinx: adding virtex5 powerpc 440 support

2008-06-23 Thread Josh Boyer
On Mon, 23 Jun 2008 15:30:35 -0600
John Linn [EMAIL PROTECTED] wrote:

  + *
  + * Copyright 2007 IBM Corporation
  + *   Based on cuboot-83xx.c, which is:
  + * Copyright (c) 2007 Freescale Semiconductor, Inc.
  + *
  + * This program is free software; you can redistribute it and/or
 modify
  it
  + * under the terms of the GNU General Public License version 2 as
  published
  + * by the Free Software Foundation.
  + */
  +
  +#include stddef.h
  +#include stdio.h
  +#include ops.h
  +#include dcr.h
  +#include 4xx.h
  +#include io.h
  +#include reg.h
  +
  +BSS_STACK(4096);
  +
  +#include types.h
  +#include gunzip_util.h
  +#include libfdt.h
  +#include ../../../include/linux/autoconf.h
 
 Ew.  We've never included the CONFIG_ variables from Kconfig in the
 bootwrapper.  It's supposed to be independent of the kernel.
 
 
 I'll try to better understand if we can detect the compressed device
 tree and if we really have to disable the APU.
 
 What's the reasoning for being independent of the kernel, maybe it's
 obvious to everyone but me?

The intention, as I understand it, is that the wrapper utilities can be
installed stand-alone and used to wrap other kernels if needs be.  In
practice I've not seen this happen yet, as most PowerPC kernels
are built directly from the kernel source.  Fedora does have a
separate package for the wrapper bits, but I'm not entirely sure it's
used.

My understanding could be totally wrong, and if so I'll politely ask
Paul or anyone else to hit me with a cluebat :).

josh
___
Linuxppc-dev mailing list
Linuxppc-dev@ozlabs.org
https://ozlabs.org/mailman/listinfo/linuxppc-dev


Re: starting with 2.6.26-rc1 cell_defconfig fails on QS22

2008-06-23 Thread Segher Boessenkool

Finalizing device tree... using OF tree (promptr=0e1004c4)


( 700 ) Program Exception [ e1004c4 ]


The program check exception happens at 0xe1004c4 ? That looks like the
OF entry point (promptr)... could it be possible that it got corrupted
somewhat ? The only thing I see above there would be the stack but
I fail to see how it would use that much...


I don't know what this [ e1004c4 ] is, if I read the current public
SLOF code (for JS2x) correctly it seems to be whatever was on the
stack (or just below the stack) below the error code that was thrown.
Maybe some errors put something interesting there, dunno.

/me looks deeper...  Nastiness :-)  So, a decrementer exception gives
the current decrementer value as reason code; an external exception
reads some BE-specific registers (on any system!); and all other
exceptions use whatever was in GPR4?

Anyway, the register dump shows:

CR / XER   LR / CTR  SRR0 / SRR1DAR / 
DSISR
8022   014073e8   0189e99c 


2000   0140   90083000 


so SRR0 is 0189e99c, which is where the exception happened.  Does
objdump show what's going on?  It seems to happen almost immediately
after the kernel starts, given the CTR value.  If the SRR0 address
doesn't help, the LR address should.


I have tried it with gcc-3.4.2, gcc-4.1.1 and gcc-4.2.4.


The binutils version is more interesting here.  2.18?


Segher

___
Linuxppc-dev mailing list
Linuxppc-dev@ozlabs.org
https://ozlabs.org/mailman/listinfo/linuxppc-dev


Re: [PATCH 6/6] powerpc: support multiple huge page sizes

2008-06-23 Thread Nick Piggin
On Tue, May 13, 2008 at 12:25:27PM -0500, Jon Tollefson wrote:
 Instead of using the variable mmu_huge_psize to keep track of the huge
 page size we use an array of MMU_PAGE_* values.  For each supported
 huge page size we need to know the hugepte_shift value and have a
 pgtable_cache.  The hstate or an mmu_huge_psizes index is passed to
 functions so that they know which huge page size they should use.
 
 The hugepage sizes 16M and 64K are setup(if available on the
 hardware) so that they don't have to be set on the boot cmd line in
 order to use them.  The number of 16G pages have to be specified at
 boot-time though (e.g. hugepagesz=16G hugepages=5).
 
 
 Signed-off-by: Jon Tollefson [EMAIL PROTECTED]
 ---
 
 @@ -150,17 +191,25 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned 
 long addr)
   pud_t *pu;
   pmd_t *pm;
 
 - BUG_ON(get_slice_psize(mm, addr) != mmu_huge_psize);
 + unsigned int psize;
 + unsigned int shift;
 + unsigned long sz;
 + struct hstate *hstate;
 + psize = get_slice_psize(mm, addr);
 + shift = mmu_psize_to_shift(psize);
 + sz = ((1UL)  shift);
 + hstate = size_to_hstate(sz);
 
 - addr = HPAGE_MASK;
 + addr = hstate-mask;
 
   pg = pgd_offset(mm, addr);
   if (!pgd_none(*pg)) {
   pu = pud_offset(pg, addr);
   if (!pud_none(*pu)) {
 - pm = hpmd_offset(pu, addr);
 + pm = hpmd_offset(pu, addr, hstate);
   if (!pmd_none(*pm))
 - return hugepte_offset((hugepd_t *)pm, addr);
 + return hugepte_offset((hugepd_t *)pm, addr,
 +   hstate);
   }
   }

Hi Jon,

I just noticed in a few places like this, you might be doing more work
than really needed to get the HPAGE_MASK.

For a first-pass conversion, this is the right way to go (just manually
replace hugepage constants with hstate- equivalents). However in this
case if you already know the page size, you should be able to work out
the shift from there, I think? That way you can avoid the size_to_hstate
call completely.

Anyway, just something to consider.

Thanks,
Nick
 
 @@ -173,16 +222,20 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned 
 long addr, unsigned long sz
   pud_t *pu;
   pmd_t *pm;
   hugepd_t *hpdp = NULL;
 + struct hstate *hstate;
 + unsigned int psize;
 + hstate = size_to_hstate(sz);
 
 - BUG_ON(get_slice_psize(mm, addr) != mmu_huge_psize);
 + psize = get_slice_psize(mm, addr);
 + BUG_ON(!mmu_huge_psizes[psize]);
 
 - addr = HPAGE_MASK;
 + addr = hstate-mask;
 
   pg = pgd_offset(mm, addr);
   pu = pud_alloc(mm, pg, addr);
 
   if (pu) {
 - pm = hpmd_alloc(mm, pu, addr);
 + pm = hpmd_alloc(mm, pu, addr, hstate);
   if (pm)
   hpdp = (hugepd_t *)pm;
   }
 @@ -190,10 +243,10 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned 
 long addr, unsigned long sz
   if (! hpdp)
   return NULL;
 
 - if (hugepd_none(*hpdp)  __hugepte_alloc(mm, hpdp, addr))
 + if (hugepd_none(*hpdp)  __hugepte_alloc(mm, hpdp, addr, psize))
   return NULL;
 
 - return hugepte_offset(hpdp, addr);
 + return hugepte_offset(hpdp, addr, hstate);
 }
 
 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
 @@ -201,19 +254,22 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned 
 long *addr, pte_t *ptep)
   return 0;
 }
 
 -static void free_hugepte_range(struct mmu_gather *tlb, hugepd_t *hpdp)
 +static void free_hugepte_range(struct mmu_gather *tlb, hugepd_t *hpdp,
 +unsigned int psize)
 {
   pte_t *hugepte = hugepd_page(*hpdp);
 
   hpdp-pd = 0;
   tlb-need_flush = 1;
 - pgtable_free_tlb(tlb, pgtable_free_cache(hugepte, HUGEPTE_CACHE_NUM,
 + pgtable_free_tlb(tlb, pgtable_free_cache(hugepte,
 +  HUGEPTE_CACHE_NUM+psize-1,
PGF_CACHENUM_MASK));
 }
 
 static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
  unsigned long addr, unsigned long end,
 -unsigned long floor, unsigned long 
 ceiling)
 +unsigned long floor, unsigned long 
 ceiling,
 +unsigned int psize)
 {
   pmd_t *pmd;
   unsigned long next;
 @@ -225,7 +281,7 @@ static void hugetlb_free_pmd_range(struct mmu_gather 
 *tlb, pud_t *pud,
   next = pmd_addr_end(addr, end);
   if (pmd_none(*pmd))
   continue;
 - free_hugepte_range(tlb, (hugepd_t *)pmd);
 + free_hugepte_range(tlb, (hugepd_t *)pmd, psize);
   } while (pmd++, addr = next, addr != end);
 
   start = PUD_MASK;
 @@ -251,6 +307,9 

[PATCH v2] Change the default link address for pSeries zImage kernels.

2008-06-23 Thread Tony Breeds
Currently we set the start of the .text section to be 4Mb for pSeries.
In situations where the zImage is  8Mb we'll fail to boot (due to
overlapping with OF).  Move .text in a zImage from 4MB to 64MB (well past OF).

We still will not be able to load large zImage unless we also move OF,
to that end, add a note to the zImage ELF to move OF to 32Mb.  If this
is the very first kernel booted then we'll need to move OF manually by
setting real-base.

Signed-off-by: Tony Breeds [EMAIL PROTECTED]
---
Booted on:
3,4,5,5+,6,JS20 and JS21 (running slof 1.7.0-1)
Changes since v1:
typo fixes in commit message and comments

 arch/powerpc/boot/addnote.c |2 +-
 arch/powerpc/boot/oflib.c   |   15 +--
 arch/powerpc/boot/wrapper   |   14 --
 arch/powerpc/boot/zImage.coff.lds.S |1 -
 arch/powerpc/boot/zImage.lds.S  |1 -
 5 files changed, 26 insertions(+), 7 deletions(-)

diff --git a/arch/powerpc/boot/addnote.c b/arch/powerpc/boot/addnote.c
index 8041a98..b1e5611 100644
--- a/arch/powerpc/boot/addnote.c
+++ b/arch/powerpc/boot/addnote.c
@@ -25,7 +25,7 @@ char arch[] = PowerPC;
 #define N_DESCR6
 unsigned int descr[N_DESCR] = {
0x, /* real-mode = true */
-   0x00c0, /* real-base, i.e. where we expect OF to be */
+   0x0200, /* real-base, i.e. where we expect OF to be */
0x, /* real-size */
0x, /* virt-base */
0x, /* virt-size */
diff --git a/arch/powerpc/boot/oflib.c b/arch/powerpc/boot/oflib.c
index 95b8fd6..b0ec9cf 100644
--- a/arch/powerpc/boot/oflib.c
+++ b/arch/powerpc/boot/oflib.c
@@ -168,8 +168,19 @@ void *of_claim(unsigned long virt, unsigned long size, 
unsigned long align)
 
 void *of_vmlinux_alloc(unsigned long size)
 {
-   void *p = malloc(size);
-
+   unsigned long start = (unsigned long)_start, end = (unsigned long)_end;
+   void *addr;
+   void *p;
+
+   /* With some older POWER4 firmware we need to claim the area the kernel
+* will reside in.  Newer firmwares don't need this so we just ignore
+* the return value.
+*/
+   addr = of_claim(start, end - start, 0);
+   printf(Trying to claim from 0x%lx to 0x%lx (0x%lx) got %p\r\n,
+  start, end, end - start, addr);
+
+   p = malloc(size);
if (!p)
fatal(Can't allocate memory for kernel image!\n\r);
 
diff --git a/arch/powerpc/boot/wrapper b/arch/powerpc/boot/wrapper
index d6c96d9..22bc26e 100755
--- a/arch/powerpc/boot/wrapper
+++ b/arch/powerpc/boot/wrapper
@@ -138,14 +138,20 @@ objflags=-S
 tmp=$tmpdir/zImage.$$.o
 ksection=.kernel:vmlinux.strip
 isection=.kernel:initrd
+link_address='0x40'
 
 case $platform in
-pmac|pseries|chrp)
+pseries)
+platformo=$object/of.o
+link_address='0x400'
+;;
+pmac|chrp)
 platformo=$object/of.o
 ;;
 coff)
 platformo=$object/of.o
 lds=$object/zImage.coff.lds
+link_address='0x50'
 ;;
 miboot|uboot)
 # miboot and U-boot want just the bare bits, not an ELF binary
@@ -190,6 +196,7 @@ ps3)
 objflags=-O binary --set-section-flags=.bss=contents,alloc,load,data
 ksection=.kernel:vmlinux.bin
 isection=.kernel:initrd
+link_address=''
 ;;
 ep88xc|ep405|ep8248e)
 platformo=$object/fixed-head.o $object/$platform.o
@@ -268,7 +275,10 @@ if [ -n $dtb ]; then
 fi
 
 if [ $platform != miboot ]; then
-${CROSS}ld -m elf32ppc -T $lds -o $ofile \
+if [ -n $link_address ] ; then
+text_start=-Ttext $link_address --defsym _start=$link_address
+fi
+${CROSS}ld -m elf32ppc -T $lds $text_start -o $ofile \
$platformo $tmp $object/wrapper.a
 rm $tmp
 fi
diff --git a/arch/powerpc/boot/zImage.coff.lds.S 
b/arch/powerpc/boot/zImage.coff.lds.S
index fe87a90..856dc78 100644
--- a/arch/powerpc/boot/zImage.coff.lds.S
+++ b/arch/powerpc/boot/zImage.coff.lds.S
@@ -3,7 +3,6 @@ ENTRY(_zimage_start_opd)
 EXTERN(_zimage_start_opd)
 SECTIONS
 {
-  . = (5*1024*1024);
   _start = .;
   .text  :
   {
diff --git a/arch/powerpc/boot/zImage.lds.S b/arch/powerpc/boot/zImage.lds.S
index f6e380f..0962d62 100644
--- a/arch/powerpc/boot/zImage.lds.S
+++ b/arch/powerpc/boot/zImage.lds.S
@@ -3,7 +3,6 @@ ENTRY(_zimage_start)
 EXTERN(_zimage_start)
 SECTIONS
 {
-  . = (4*1024*1024);
   _start = .;
   .text  :
   {
-- 
1.5.5.4

___
Linuxppc-dev mailing list
Linuxppc-dev@ozlabs.org
https://ozlabs.org/mailman/listinfo/linuxppc-dev


Re: [2.6 patch] asm/ptrace.h userspace headers cleanup

2008-06-23 Thread Paul Mundt
On Mon, Jun 23, 2008 at 08:48:09PM +0300, Adrian Bunk wrote:
 This patch contains the following cleanups for the asm/ptrace.h 
 userspace headers:
 - include/asm-generic/Kbuild.asm already lists ptrace.h, remove
   the superfluous listings in the Kbuild files of the following
   architectures:
   - cris
   - frv
   - powerpc
   - x86
 - don't expose function prototypes and macros to userspace:
   - arm
   - blackfin
   - cris
   - mn10300
   - parisc
 - remove #ifdef CONFIG_'s around #define's:
   - blackfin
   - m68knommu
 - sh: AFAIK __SH5__ should work in both kernel and userspace,
   no need to leak CONFIG_SUPERH64 to userspace

Yes, that's fine. We've generally avoided relying entirely on the gcc
builtin definitions due to the rampant stupidity surrounding
__SH4_NOFPU__, but it is true that __SH5__ is always defined at least.

Acked-by: Paul Mundt [EMAIL PROTECTED]
___
Linuxppc-dev mailing list
Linuxppc-dev@ozlabs.org
https://ozlabs.org/mailman/listinfo/linuxppc-dev


Re: [2.6 patch] asm/ptrace.h userspace headers cleanup

2008-06-23 Thread Grant Grundler
On Mon, Jun 23, 2008 at 08:48:09PM +0300, Adrian Bunk wrote:
 This patch contains the following cleanups for the asm/ptrace.h 
 userspace headers:
 - include/asm-generic/Kbuild.asm already lists ptrace.h, remove
   the superfluous listings in the Kbuild files of the following
   architectures:
   - cris
   - frv
   - powerpc
   - x86
 - don't expose function prototypes and macros to userspace:
   - arm
   - blackfin
   - cris
   - mn10300
   - parisc
...
 diff --git a/include/asm-parisc/ptrace.h b/include/asm-parisc/ptrace.h
 index 93f990e..3e94c5d 100644
 --- a/include/asm-parisc/ptrace.h
 +++ b/include/asm-parisc/ptrace.h
 @@ -33,7 +33,6 @@ struct pt_regs {
   unsigned long ipsw; /* CR22 */
  };
  
 -#define task_regs(task) ((struct pt_regs *) ((char *)(task) + TASK_REGS))
  /*
   * The numbers chosen here are somewhat arbitrary but absolutely MUST
   * not overlap with any of the number assigned in linux/ptrace.h.
 @@ -43,8 +42,11 @@ struct pt_regs {
   * since we have taken branch traps too)
   */
  #define PTRACE_SINGLEBLOCK   12  /* resume execution until next branch */
 +
  #ifdef __KERNEL__
  
 +#define task_regs(task) ((struct pt_regs *) ((char *)(task) + TASK_REGS))
 +
  /* XXX should we use iaoq[1] or iaoq[0] ? */
  #define user_mode(regs)  (((regs)-iaoq[0]  3) ? 1 : 0)
  #define user_space(regs) (((regs)-iasq[1] != 0) ? 1 : 0)

Looks fine to me.
Acked-by: Grant Grundler [EMAIL PROTECTED]
___
Linuxppc-dev mailing list
Linuxppc-dev@ozlabs.org
https://ozlabs.org/mailman/listinfo/linuxppc-dev


Re: Using DMA interrupt on MPC8313

2008-06-23 Thread jumpingProgrammer

I use this in 8313erdb.dts, but can not find DMA info in
/proc/interrupts.why?

[EMAIL PROTECTED] { 
#address-cells = 1; 
#size-cells = 1; 
compatible = fsl,elo-dma; 
cell-index = 0; 
reg = 8030 2d0; /* DMA general status register */ 
ranges = 0 8100 200; 

[EMAIL PROTECTED] { 
compatible = fsl,elo-dma-channel; 
cell-index = 0; 
reg = 0 80; 
interrupt-parent = 700; 
interrupts = 47 8; 
}; 
[EMAIL PROTECTED] { 
compatible = fsl,elo-dma-channel; 
cell-index = 1; 
reg = 80 80; 
interrupt-parent = 700; 
interrupts = 47 8; 
}; 
[EMAIL PROTECTED] { 
compatible = fsl,elo-dma-channel; 
cell-index = 2; 
reg = 100 80; 
interrupt-parent = 700; 
interrupts = 47 8; 
}; 
[EMAIL PROTECTED] { 
compatible = fsl,elo-dma-channel; 
cell-index = 3; 
reg = 180 80; 
interrupt-parent = 700; 
interrupts = 47 8; 
};
}


___
Linuxppc-dev mailing list
Linuxppc-dev@ozlabs.org
https://ozlabs.org/mailman/listinfo/linuxppc-dev



-- 
View this message in context: 
http://www.nabble.com/Using-DMA-interrupt-on-MPC8313-tp17808164p18084260.html
Sent from the linuxppc-dev mailing list archive at Nabble.com.

___
Linuxppc-dev mailing list
Linuxppc-dev@ozlabs.org
https://ozlabs.org/mailman/listinfo/linuxppc-dev