Our inline assembly only clobbers the first condition register field,
but we mark all of them as being clobbered.
This will cause LLVM to save and restore the non volatile condition
register fields around the inline assembly, which is completely
unnecessary. A simple example:
void foo(void)
{
asm volatile(:::cc);
}
gives:
foo:
0: 26 00 80 7d mfcrr12
4: 08 00 81 91 stw r12,8(r1)
8: 08 00 61 81 lwz r11,8(r1)
c: 20 01 72 7d mtocrf 32,r11
10: 20 01 71 7d mtocrf 16,r11
14: 20 81 70 7d mtocrf 8,r11
18: 20 00 80 4e blr
Replacing cc with cr0:
foo:
0: 20 00 80 4e blr
This patch produces no difference to a kernel built with gcc.
Signed-off-by: Anton Blanchard an...@samba.org
---
arch/powerpc/include/asm/atomic.h| 36
arch/powerpc/include/asm/bitops.h| 4 ++--
arch/powerpc/include/asm/cmpxchg.h | 16 +++---
arch/powerpc/include/asm/futex.h | 2 +-
arch/powerpc/include/asm/kvm_book3s_64.h | 2 +-
arch/powerpc/include/asm/local.h | 12 +--
arch/powerpc/include/asm/mutex.h | 6 +++---
arch/powerpc/include/asm/pgtable-ppc32.h | 4 ++--
arch/powerpc/include/asm/pgtable-ppc64.h | 4 ++--
arch/powerpc/kvm/book3s_hv_rm_mmu.c | 2 +-
arch/powerpc/mm/pgtable_64.c | 4 ++--
11 files changed, 46 insertions(+), 46 deletions(-)
diff --git a/arch/powerpc/include/asm/atomic.h
b/arch/powerpc/include/asm/atomic.h
index 512d278..ef2172c 100644
--- a/arch/powerpc/include/asm/atomic.h
+++ b/arch/powerpc/include/asm/atomic.h
@@ -39,7 +39,7 @@ static __inline__ void atomic_##op(int a, atomic_t *v)
\
bne-1b\n \
: =r (t), +m (v-counter) \
: r (a), r (v-counter)\
- : cc);\
+ : cr0); \
} \
#define ATOMIC_OP_RETURN(op, asm_op) \
@@ -57,7 +57,7 @@ static __inline__ int atomic_##op##_return(int a, atomic_t
*v)\
PPC_ATOMIC_EXIT_BARRIER \
: =r (t) \
: r (a), r (v-counter)\
- : cc, memory); \
+ : cr0, memory); \
\
return t; \
}
@@ -85,7 +85,7 @@ static __inline__ void atomic_inc(atomic_t *v)
bne-1b
: =r (t), +m (v-counter)
: r (v-counter)
- : cc, xer);
+ : cr0, xer);
}
static __inline__ int atomic_inc_return(atomic_t *v)
@@ -102,7 +102,7 @@ static __inline__ int atomic_inc_return(atomic_t *v)
PPC_ATOMIC_EXIT_BARRIER
: =r (t)
: r (v-counter)
- : cc, xer, memory);
+ : cr0, xer, memory);
return t;
}
@@ -129,7 +129,7 @@ static __inline__ void atomic_dec(atomic_t *v)
bne-1b
: =r (t), +m (v-counter)
: r (v-counter)
- : cc, xer);
+ : cr0, xer);
}
static __inline__ int atomic_dec_return(atomic_t *v)
@@ -146,7 +146,7 @@ static __inline__ int atomic_dec_return(atomic_t *v)
PPC_ATOMIC_EXIT_BARRIER
: =r (t)
: r (v-counter)
- : cc, xer, memory);
+ : cr0, xer, memory);
return t;
}
@@ -181,7 +181,7 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int
a, int u)
2:
: =r (t)
: r (v-counter), r (a), r (u)
- : cc, memory);
+ : cr0, memory);
return t;
}
@@ -211,7 +211,7 @@ static __inline__ int atomic_inc_not_zero(atomic_t *v)
2:
: =r (t1), =r (t2)
: r (v-counter)
- : cc, xer, memory);
+ : cr0, xer, memory);
return t1;
}
@@ -242,7 +242,7 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v)
\n\
2:: =b (t)
: r (v-counter)
- : cc, memory);
+ : cr0, memory);
return t;
}
@@ -278,7 +278,7 @@ static __inline__ void atomic64_##op(long a, atomic64_t *v)
\
bne-1b\n \
: =r (t), +m (v-counter) \
: r (a), r (v-counter)\
- : cc);\
+ : cr0); \
}
#define ATOMIC64_OP_RETURN(op, asm_op) \
@@ -295,7 +295,7 @@ static __inline__ long