Now that thread_info is similar to task_struct, it's address is in r2
so CURRENT_THREAD_INFO() macro is useless. This patch removes it.

At the same time, as the 'cpu' field is not anymore in thread_info,
this patch renames it to TASK_CPU.

Signed-off-by: Christophe Leroy <christophe.le...@c-s.fr>
---
 arch/powerpc/include/asm/smp.h         |  2 +-
 arch/powerpc/include/asm/thread_info.h |  2 --
 arch/powerpc/kernel/asm-offsets.c      |  2 +-
 arch/powerpc/kernel/entry_32.S         | 43 ++++++++++++----------------------
 arch/powerpc/kernel/epapr_hcalls.S     |  5 ++--
 arch/powerpc/kernel/head_fsl_booke.S   |  5 ++--
 arch/powerpc/kernel/idle_6xx.S         |  8 +++----
 arch/powerpc/kernel/idle_e500.S        |  8 +++----
 arch/powerpc/kernel/misc_32.S          |  3 +--
 arch/powerpc/mm/hash_low_32.S          | 14 ++++-------
 arch/powerpc/sysdev/6xx-suspend.S      |  5 ++--
 11 files changed, 35 insertions(+), 62 deletions(-)

diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index 202924a7c98c..1096633d0574 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -93,7 +93,7 @@ extern int smp_hw_index[];
 #define raw_smp_processor_id() 0
 #else
 #include <asm/asm-offsets.h>
-#define raw_smp_processor_id() (*(unsigned int*)((void*)current + TI_CPU))
+#define raw_smp_processor_id() (*(unsigned int*)((void*)current + TASK_CPU))
 #endif
 
 #define hard_smp_processor_id()        (smp_hw_index[smp_processor_id()])
diff --git a/arch/powerpc/include/asm/thread_info.h 
b/arch/powerpc/include/asm/thread_info.h
index 62eb9ff31292..1c42df627bf3 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -19,8 +19,6 @@
 
 #ifdef CONFIG_PPC64
 #define CURRENT_THREAD_INFO(dest, sp)  stringify_in_c(ld dest, 
PACACURRENT(r13))
-#else
-#define CURRENT_THREAD_INFO(dest, sp)  stringify_in_c(mr dest, r2)
 #endif
 
 #ifndef __ASSEMBLY__
diff --git a/arch/powerpc/kernel/asm-offsets.c 
b/arch/powerpc/kernel/asm-offsets.c
index 7bdd9fc21117..4d3aca1c9343 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -91,7 +91,7 @@ int main(void)
 #endif /* CONFIG_PPC64 */
        OFFSET(TASK_STACK, task_struct, stack);
 #ifdef CONFIG_SMP
-       OFFSET(TI_CPU, task_struct, cpu);
+       OFFSET(TASK_CPU, task_struct, cpu);
 #endif
 
 #ifdef CONFIG_LIVEPATCH
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 380c93b28090..f867885e43a2 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -168,8 +168,7 @@ transfer_to_handler:
        tophys(r11,r11)
        addi    r11,r11,global_dbcr0@l
 #ifdef CONFIG_SMP
-       CURRENT_THREAD_INFO(r9, r1)
-       lwz     r9,TI_CPU(r9)
+       lwz     r9,TASK_CPU(r2)
        slwi    r9,r9,3
        add     r11,r11,r9
 #endif
@@ -180,8 +179,7 @@ transfer_to_handler:
        stw     r12,4(r11)
 #endif
 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
-       CURRENT_THREAD_INFO(r9, r1)
-       tophys(r9, r9)
+       tophys(r9, r2)
        ACCOUNT_CPU_USER_ENTRY(r9, r11, r12)
 #endif
 
@@ -195,8 +193,7 @@ transfer_to_handler:
        ble-    stack_ovf               /* then the kernel stack overflowed */
 5:
 #if defined(CONFIG_6xx) || defined(CONFIG_E500)
-       CURRENT_THREAD_INFO(r9, r1)
-       tophys(r9,r9)                   /* check local flags */
+       tophys(r9,r2)                   /* check local flags */
        lwz     r12,TI_LOCAL_FLAGS(r9)
        mtcrf   0x01,r12
        bt-     31-TLF_NAPPING,4f
@@ -345,8 +342,7 @@ _GLOBAL(DoSyscall)
        mtmsr   r11
 1:
 #endif /* CONFIG_TRACE_IRQFLAGS */
-       CURRENT_THREAD_INFO(r10, r1)
-       lwz     r11,TI_FLAGS(r10)
+       lwz     r11,TI_FLAGS(r2)
        andi.   r11,r11,_TIF_SYSCALL_DOTRACE
        bne-    syscall_dotrace
 syscall_dotrace_cont:
@@ -379,13 +375,12 @@ ret_from_syscall:
        lwz     r3,GPR3(r1)
 #endif
        mr      r6,r3
-       CURRENT_THREAD_INFO(r12, r1)
        /* disable interrupts so current_thread_info()->flags can't change */
        LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
        /* Note: We don't bother telling lockdep about it */
        SYNC
        MTMSRD(r10)
-       lwz     r9,TI_FLAGS(r12)
+       lwz     r9,TI_FLAGS(r2)
        li      r8,-MAX_ERRNO
        andi.   
r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
        bne-    syscall_exit_work
@@ -432,8 +427,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
        andi.   r4,r8,MSR_PR
        beq     3f
-       CURRENT_THREAD_INFO(r4, r1)
-       ACCOUNT_CPU_USER_EXIT(r4, r5, r7)
+       ACCOUNT_CPU_USER_EXIT(r2, r5, r7)
 3:
 #endif
        lwz     r4,_LINK(r1)
@@ -526,7 +520,7 @@ syscall_exit_work:
        /* Clear per-syscall TIF flags if any are set.  */
 
        li      r11,_TIF_PERSYSCALL_MASK
-       addi    r12,r12,TI_FLAGS
+       addi    r12,r2,TI_FLAGS
 3:     lwarx   r8,0,r12
        andc    r8,r8,r11
 #ifdef CONFIG_IBM405_ERR77
@@ -534,7 +528,6 @@ syscall_exit_work:
 #endif
        stwcx.  r8,0,r12
        bne-    3b
-       subi    r12,r12,TI_FLAGS
        
 4:     /* Anything which requires enabling interrupts? */
        andi.   r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP)
@@ -813,8 +806,7 @@ ret_from_except:
 
 user_exc_return:               /* r10 contains MSR_KERNEL here */
        /* Check current_thread_info()->flags */
-       CURRENT_THREAD_INFO(r9, r1)
-       lwz     r9,TI_FLAGS(r9)
+       lwz     r9,TI_FLAGS(r2)
        andi.   r0,r9,_TIF_USER_WORK_MASK
        bne     do_work
 
@@ -827,8 +819,7 @@ restore_user:
        bnel-   load_dbcr0
 #endif
 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
-       CURRENT_THREAD_INFO(r9, r1)
-       ACCOUNT_CPU_USER_EXIT(r9, r10, r11)
+       ACCOUNT_CPU_USER_EXIT(r2, r10, r11)
 #endif
 
        b       restore
@@ -836,8 +827,7 @@ restore_user:
 /* N.B. the only way to get here is from the beq following ret_from_except. */
 resume_kernel:
        /* check current_thread_info, _TIF_EMULATE_STACK_STORE */
-       CURRENT_THREAD_INFO(r9, r1)
-       lwz     r8,TI_FLAGS(r9)
+       lwz     r8,TI_FLAGS(r2)
        andis.  r0,r8,_TIF_EMULATE_STACK_STORE@h
        beq+    1f
 
@@ -863,7 +853,7 @@ resume_kernel:
 
        /* Clear _TIF_EMULATE_STACK_STORE flag */
        lis     r11,_TIF_EMULATE_STACK_STORE@h
-       addi    r5,r9,TI_FLAGS
+       addi    r5,r2,TI_FLAGS
 0:     lwarx   r8,0,r5
        andc    r8,r8,r11
 #ifdef CONFIG_IBM405_ERR77
@@ -875,7 +865,7 @@ resume_kernel:
 
 #ifdef CONFIG_PREEMPT
        /* check current_thread_info->preempt_count */
-       lwz     r0,TI_PREEMPT(r9)
+       lwz     r0,TI_PREEMPT(r2)
        cmpwi   0,r0,0          /* if non-zero, just restore regs and return */
        bne     restore
        andi.   r8,r8,_TIF_NEED_RESCHED
@@ -891,8 +881,7 @@ resume_kernel:
        bl      trace_hardirqs_off
 #endif
 1:     bl      preempt_schedule_irq
-       CURRENT_THREAD_INFO(r9, r1)
-       lwz     r3,TI_FLAGS(r9)
+       lwz     r3,TI_FLAGS(r2)
        andi.   r0,r3,_TIF_NEED_RESCHED
        bne-    1b
 #ifdef CONFIG_TRACE_IRQFLAGS
@@ -1191,8 +1180,7 @@ load_dbcr0:
        lis     r11,global_dbcr0@ha
        addi    r11,r11,global_dbcr0@l
 #ifdef CONFIG_SMP
-       CURRENT_THREAD_INFO(r9, r1)
-       lwz     r9,TI_CPU(r9)
+       lwz     r9,TASK_CPU(r2)
        slwi    r9,r9,3
        add     r11,r11,r9
 #endif
@@ -1232,8 +1220,7 @@ recheck:
        LOAD_MSR_KERNEL(r10,MSR_KERNEL)
        SYNC
        MTMSRD(r10)             /* disable interrupts */
-       CURRENT_THREAD_INFO(r9, r1)
-       lwz     r9,TI_FLAGS(r9)
+       lwz     r9,TI_FLAGS(r2)
        andi.   r0,r9,_TIF_NEED_RESCHED
        bne-    do_resched
        andi.   r0,r9,_TIF_USER_WORK_MASK
diff --git a/arch/powerpc/kernel/epapr_hcalls.S 
b/arch/powerpc/kernel/epapr_hcalls.S
index 52ca2471ee1a..d252f4663a23 100644
--- a/arch/powerpc/kernel/epapr_hcalls.S
+++ b/arch/powerpc/kernel/epapr_hcalls.S
@@ -21,10 +21,9 @@
 #ifndef CONFIG_PPC64
 /* epapr_ev_idle() was derived from e500_idle() */
 _GLOBAL(epapr_ev_idle)
-       CURRENT_THREAD_INFO(r3, r1)
-       PPC_LL  r4, TI_LOCAL_FLAGS(r3)  /* set napping bit */
+       PPC_LL  r4, TI_LOCAL_FLAGS(r2)  /* set napping bit */
        ori     r4, r4,_TLF_NAPPING     /* so when we take an exception */
-       PPC_STL r4, TI_LOCAL_FLAGS(r3)  /* it will return to our caller */
+       PPC_STL r4, TI_LOCAL_FLAGS(r2)  /* it will return to our caller */
 
        wrteei  1
 
diff --git a/arch/powerpc/kernel/head_fsl_booke.S 
b/arch/powerpc/kernel/head_fsl_booke.S
index 21819bd64912..b5f8d0899ddc 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -244,8 +244,7 @@ set_ivor:
        stwu    r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
 
 #ifdef CONFIG_SMP
-       CURRENT_THREAD_INFO(r22, r1)
-       stw     r24, TI_CPU(r22)
+       stw     r24, TASK_CPU(r2)
 #endif
 
        bl      early_init
@@ -704,7 +703,7 @@ finish_tlb_load:
 
        /* Get the next_tlbcam_idx percpu var */
 #ifdef CONFIG_SMP
-       lwz     r15, TI_CPU-THREAD(r12)
+       lwz     r15, TASK_CPU-THREAD(r12)
        lis     r14, __per_cpu_offset@h
        ori     r14, r14, __per_cpu_offset@l
        rlwinm  r15, r15, 2, 0, 29
diff --git a/arch/powerpc/kernel/idle_6xx.S b/arch/powerpc/kernel/idle_6xx.S
index ff026c9d3cab..5afd2e236990 100644
--- a/arch/powerpc/kernel/idle_6xx.S
+++ b/arch/powerpc/kernel/idle_6xx.S
@@ -136,10 +136,9 @@ BEGIN_FTR_SECTION
        DSSALL
        sync
 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
-       CURRENT_THREAD_INFO(r9, r1)
-       lwz     r8,TI_LOCAL_FLAGS(r9)   /* set napping bit */
+       lwz     r8,TI_LOCAL_FLAGS(r2)   /* set napping bit */
        ori     r8,r8,_TLF_NAPPING      /* so when we take an exception */
-       stw     r8,TI_LOCAL_FLAGS(r9)   /* it will return to our caller */
+       stw     r8,TI_LOCAL_FLAGS(r2)   /* it will return to our caller */
        mfmsr   r7
        ori     r7,r7,MSR_EE
        oris    r7,r7,MSR_POW@h
@@ -159,8 +158,7 @@ _GLOBAL(power_save_ppc32_restore)
        stw     r9,_NIP(r11)            /* make it do a blr */
 
 #ifdef CONFIG_SMP
-       CURRENT_THREAD_INFO(r12, r11)
-       lwz     r11,TI_CPU(r12)         /* get cpu number * 4 */
+       lwz     r11,TASK_CPU(r2)                /* get cpu number * 4 */
        slwi    r11,r11,2
 #else
        li      r11,0
diff --git a/arch/powerpc/kernel/idle_e500.S b/arch/powerpc/kernel/idle_e500.S
index 583e55ac7d26..69dfcd2ca011 100644
--- a/arch/powerpc/kernel/idle_e500.S
+++ b/arch/powerpc/kernel/idle_e500.S
@@ -22,10 +22,9 @@
        .text
 
 _GLOBAL(e500_idle)
-       CURRENT_THREAD_INFO(r3, r1)
-       lwz     r4,TI_LOCAL_FLAGS(r3)   /* set napping bit */
+       lwz     r4,TI_LOCAL_FLAGS(r2)   /* set napping bit */
        ori     r4,r4,_TLF_NAPPING      /* so when we take an exception */
-       stw     r4,TI_LOCAL_FLAGS(r3)   /* it will return to our caller */
+       stw     r4,TI_LOCAL_FLAGS(r2)   /* it will return to our caller */
 
 #ifdef CONFIG_PPC_E500MC
        wrteei  1
@@ -88,8 +87,7 @@ _GLOBAL(power_save_ppc32_restore)
        stw     r9,_NIP(r11)            /* make it do a blr */
 
 #ifdef CONFIG_SMP
-       CURRENT_THREAD_INFO(r12, r1)
-       lwz     r11,TI_CPU(r12)         /* get cpu number * 4 */
+       lwz     r11,TASK_CPU(r2)                /* get cpu number * 4 */
        slwi    r11,r11,2
 #else
        li      r11,0
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index d3ba56307eb4..0509c9a215a7 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -183,8 +183,7 @@ _GLOBAL(low_choose_750fx_pll)
 
 #ifdef CONFIG_SMP
        /* Store new HID1 image */
-       CURRENT_THREAD_INFO(r6, r1)
-       lwz     r6,TI_CPU(r6)
+       lwz     r6,TASK_CPU(r2)
        slwi    r6,r6,2
 #else
        li      r6, 0
diff --git a/arch/powerpc/mm/hash_low_32.S b/arch/powerpc/mm/hash_low_32.S
index 26acf6c8c20c..27edbfddebaa 100644
--- a/arch/powerpc/mm/hash_low_32.S
+++ b/arch/powerpc/mm/hash_low_32.S
@@ -185,8 +185,7 @@ _GLOBAL(add_hash_page)
        add     r3,r3,r0                /* note create_hpte trims to 24 bits */
 
 #ifdef CONFIG_SMP
-       CURRENT_THREAD_INFO(r8, r1)     /* use cpu number to make tag */
-       lwz     r8,TI_CPU(r8)           /* to go in mmu_hash_lock */
+       lwz     r8,TASK_CPU(r2)         /* to go in mmu_hash_lock */
        oris    r8,r8,12
 #endif /* CONFIG_SMP */
 
@@ -546,9 +545,8 @@ _GLOBAL(flush_hash_pages)
 #ifdef CONFIG_SMP
        addis   r9,r7,mmu_hash_lock@ha
        addi    r9,r9,mmu_hash_lock@l
-       CURRENT_THREAD_INFO(r8, r1)
-       add     r8,r8,r7
-       lwz     r8,TI_CPU(r8)
+       add     r8,r2,r7
+       lwz     r8,TASK_CPU(r8)
        oris    r8,r8,9
 10:    lwarx   r0,0,r9
        cmpi    0,r0,0
@@ -641,8 +639,7 @@ EXPORT_SYMBOL(flush_hash_pages)
  */
 _GLOBAL(_tlbie)
 #ifdef CONFIG_SMP
-       CURRENT_THREAD_INFO(r8, r1)
-       lwz     r8,TI_CPU(r8)
+       lwz     r8,TASK_CPU(r2)
        oris    r8,r8,11
        mfmsr   r10
        SYNC
@@ -679,8 +676,7 @@ _GLOBAL(_tlbie)
  */
 _GLOBAL(_tlbia)
 #if defined(CONFIG_SMP)
-       CURRENT_THREAD_INFO(r8, r1)
-       lwz     r8,TI_CPU(r8)
+       lwz     r8,TASK_CPU(r2)
        oris    r8,r8,10
        mfmsr   r10
        SYNC
diff --git a/arch/powerpc/sysdev/6xx-suspend.S 
b/arch/powerpc/sysdev/6xx-suspend.S
index cf48e9cb2575..6c4aec25c4ba 100644
--- a/arch/powerpc/sysdev/6xx-suspend.S
+++ b/arch/powerpc/sysdev/6xx-suspend.S
@@ -29,10 +29,9 @@ _GLOBAL(mpc6xx_enter_standby)
        ori     r5, r5, ret_from_standby@l
        mtlr    r5
 
-       CURRENT_THREAD_INFO(r5, r1)
-       lwz     r6, TI_LOCAL_FLAGS(r5)
+       lwz     r6, TI_LOCAL_FLAGS(r2)
        ori     r6, r6, _TLF_SLEEPING
-       stw     r6, TI_LOCAL_FLAGS(r5)
+       stw     r6, TI_LOCAL_FLAGS(r2)
 
        mfmsr   r5
        ori     r5, r5, MSR_EE
-- 
2.13.3

Reply via email to