msr_check_and_set() always performs a mfmsr() to determine if it needs
to perform an mtmsr(), as mfmsr() can be a costly operation
msr_check_and_set() could return the MSR now on the CPU to avoid
callers of msr_check_and_set having to make their own mfmsr() call.

Signed-off-by: Cyril Bur <cyril...@gmail.com>
---
 arch/powerpc/include/asm/reg.h | 2 +-
 arch/powerpc/kernel/process.c  | 4 +++-
 2 files changed, 4 insertions(+), 2 deletions(-)

diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 9dddabc..a8f63bc 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -1250,7 +1250,7 @@ static inline void mtmsr_isync(unsigned long val)
                                     : "memory")
 #endif
 
-extern void msr_check_and_set(unsigned long bits);
+extern unsigned long msr_check_and_set(unsigned long bits);
 extern bool strict_msr_control;
 extern void __msr_check_and_clear(unsigned long bits);
 static inline void msr_check_and_clear(unsigned long bits)
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 5029567..34ee5f2 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -111,7 +111,7 @@ static int __init enable_strict_msr_control(char *str)
 }
 early_param("ppc_strict_facility_enable", enable_strict_msr_control);
 
-void msr_check_and_set(unsigned long bits)
+unsigned long msr_check_and_set(unsigned long bits)
 {
        unsigned long oldmsr = mfmsr();
        unsigned long newmsr;
@@ -125,6 +125,8 @@ void msr_check_and_set(unsigned long bits)
 
        if (oldmsr != newmsr)
                mtmsr_isync(newmsr);
+
+       return newmsr;
 }
 
 void __msr_check_and_clear(unsigned long bits)
-- 
2.10.0

Reply via email to