No supported processor implements this mode. Setting the bit in
MSR values can be a bit confusing (and would prevent the bit from
ever being reused). Remove it.

Signed-off-by: Nicholas Piggin <npig...@gmail.com>
---
 arch/powerpc/include/asm/reg.h | 5 +----
 arch/powerpc/kernel/entry_64.S | 2 +-
 arch/powerpc/kernel/head_64.S  | 3 +--
 arch/powerpc/kvm/book3s_pr.c   | 2 +-
 4 files changed, 4 insertions(+), 8 deletions(-)

diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index f877a576b338..8885fbf4285b 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -29,7 +29,6 @@
 #include <asm/reg_8xx.h>
 
 #define MSR_SF_LG      63              /* Enable 64 bit mode */
-#define MSR_ISF_LG     61              /* Interrupt 64b mode valid on 630 */
 #define MSR_HV_LG      60              /* Hypervisor state */
 #define MSR_TS_T_LG    34              /* Trans Mem state: Transactional */
 #define MSR_TS_S_LG    33              /* Trans Mem state: Suspended */
@@ -69,13 +68,11 @@
 
 #ifdef CONFIG_PPC64
 #define MSR_SF         __MASK(MSR_SF_LG)       /* Enable 64 bit mode */
-#define MSR_ISF                __MASK(MSR_ISF_LG)      /* Interrupt 64b mode 
valid on 630 */
 #define MSR_HV                 __MASK(MSR_HV_LG)       /* Hypervisor state */
 #define MSR_S          __MASK(MSR_S_LG)        /* Secure state */
 #else
 /* so tests for these bits fail on 32-bit */
 #define MSR_SF         0
-#define MSR_ISF                0
 #define MSR_HV         0
 #define MSR_S          0
 #endif
@@ -134,7 +131,7 @@
 #define MSR_64BIT      MSR_SF
 
 /* Server variant */
-#define __MSR          (MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_ISF |MSR_HV)
+#define __MSR          (MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_HV)
 #ifdef __BIG_ENDIAN__
 #define MSR_           __MSR
 #define MSR_IDLE       (MSR_ME | MSR_SF | MSR_HV)
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 2f3846192ec7..479fb58844fa 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -967,7 +967,7 @@ _GLOBAL(enter_prom)
        mtsrr1  r11
        rfi
 #else /* CONFIG_PPC_BOOK3E */
-       LOAD_REG_IMMEDIATE(r12, MSR_SF | MSR_ISF | MSR_LE)
+       LOAD_REG_IMMEDIATE(r12, MSR_SF | MSR_LE)
        andc    r11,r11,r12
        mtsrr1  r11
        RFI_TO_KERNEL
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 1510b2a56669..4e2591cb4bd1 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -865,8 +865,7 @@ enable_64b_mode:
        oris    r11,r11,0x8000          /* CM bit set, we'll set ICM later */
        mtmsr   r11
 #else /* CONFIG_PPC_BOOK3E */
-       li      r12,(MSR_64BIT | MSR_ISF)@highest
-       sldi    r12,r12,48
+       LOAD_REG_IMMEDIATE(r12, MSR_64BIT)
        or      r11,r11,r12
        mtmsrd  r11
        isync
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index b1fefa63e125..913944dc3620 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -239,7 +239,7 @@ static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
        smsr |= (guest_msr & vcpu->arch.guest_owned_ext);
        /* 64-bit Process MSR values */
 #ifdef CONFIG_PPC_BOOK3S_64
-       smsr |= MSR_ISF | MSR_HV;
+       smsr |= MSR_HV;
 #endif
 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
        /*
-- 
2.23.0

Reply via email to