On LPAR guest systems Linux enables the shadow SLB to indicate to the
hypervisor a number of SLB entries that always have to be available.

Today we go through this shadow SLB and disable all ESID's valid bits.
However, pHyp doesn't like this approach very much and honors us with
fancy machine checks.

Fortunately the shadow SLB descriptor also has an entry that indicates
the number of valid entries following. During the lifetime of a guest
we can just swap that value to 0 and don't have to worry about the
SLB restoration magic.

While we're touching the code, let's also make it more readable (get
rid of rldicl), allow it to deal with a dynamic number of bolted
SLB entries and only do shadow SLB swizzling on LPAR systems.

Signed-off-by: Alexander Graf <[email protected]>
---
 arch/powerpc/kvm/book3s_64_slb.S | 90 +++++++++++++++++++---------------------
 1 file changed, 43 insertions(+), 47 deletions(-)

diff --git a/arch/powerpc/kvm/book3s_64_slb.S b/arch/powerpc/kvm/book3s_64_slb.S
index 596140e..aa2e2da 100644
--- a/arch/powerpc/kvm/book3s_64_slb.S
+++ b/arch/powerpc/kvm/book3s_64_slb.S
@@ -17,29 +17,9 @@
  * Authors: Alexander Graf <[email protected]>
  */
 
-#define SHADOW_SLB_ESID(num)   (SLBSHADOW_SAVEAREA + (num * 0x10))
-#define SHADOW_SLB_VSID(num)   (SLBSHADOW_SAVEAREA + (num * 0x10) + 0x8)
-#define UNBOLT_SLB_ENTRY(num) \
-       li      r11, SHADOW_SLB_ESID(num);      \
-       LDX_BE  r9, r12, r11;                   \
-       /* Invalid? Skip. */;                   \
-       rldicl. r0, r9, 37, 63;                 \
-       beq     slb_entry_skip_ ## num;         \
-       xoris   r9, r9, SLB_ESID_V@h;           \
-       STDX_BE r9, r12, r11;                   \
-  slb_entry_skip_ ## num:
-
-#define REBOLT_SLB_ENTRY(num) \
-       li      r8, SHADOW_SLB_ESID(num);       \
-       li      r7, SHADOW_SLB_VSID(num);       \
-       LDX_BE  r10, r11, r8;                   \
-       cmpdi   r10, 0;                         \
-       beq     slb_exit_skip_ ## num;          \
-       oris    r10, r10, SLB_ESID_V@h;         \
-       LDX_BE  r9, r11, r7;                    \
-       slbmte  r9, r10;                        \
-       STDX_BE r10, r11, r8;                   \
-slb_exit_skip_ ## num:
+#define SHADOW_SLB_ENTRY_LEN   0x10
+#define OFFSET_ESID(x)         (SHADOW_SLB_ENTRY_LEN * x)
+#define OFFSET_VSID(x)         ((SHADOW_SLB_ENTRY_LEN * x) + 8)
 
 /******************************************************************************
  *                                                                            *
@@ -63,20 +43,15 @@ slb_exit_skip_ ## num:
         * SVCPU[LR]  = guest LR
         */
 
-       /* Remove LPAR shadow entries */
+BEGIN_FW_FTR_SECTION
 
-#if SLB_NUM_BOLTED == 3
+       /* Declare SLB shadow as 0 entries big */
 
-       ld      r12, PACA_SLBSHADOWPTR(r13)
+       ld      r11, PACA_SLBSHADOWPTR(r13)
+       li      r8, 0
+       stb     r8, 3(r11)
 
-       /* Remove bolted entries */
-       UNBOLT_SLB_ENTRY(0)
-       UNBOLT_SLB_ENTRY(1)
-       UNBOLT_SLB_ENTRY(2)
-       
-#else
-#error unknown number of bolted entries
-#endif
+END_FW_FTR_SECTION_IFSET(FW_FEATURE_LPAR)
 
        /* Flush SLB */
 
@@ -86,6 +61,8 @@ slb_exit_skip_ ## num:
 
        /* Fill SLB with our shadow */
 
+       lis     r7, SLB_ESID_V@h
+
        lbz     r12, SVCPU_SLB_MAX(r3)
        mulli   r12, r12, 16
        addi    r12, r12, SVCPU_SLB
@@ -99,7 +76,7 @@ slb_loop_enter:
 
        ld      r10, 0(r11)
 
-       rldicl. r0, r10, 37, 63
+       and.    r9, r10, r7
        beq     slb_loop_enter_skip
 
        ld      r9, 8(r11)
@@ -110,6 +87,9 @@ slb_loop_enter_skip:
        cmpd    cr0, r11, r12
        blt     slb_loop_enter
 
+       isync
+       sync
+
 slb_do_enter:
 
 .endm
@@ -136,23 +116,39 @@ slb_do_enter:
         *
         */
 
-       /* Restore bolted entries from the shadow and fix it along the way */
-
-       /* We don't store anything in entry 0, so we don't need to take care of 
it */
+       /* Remove all SLB entries that are in use. */
        slbia
-       isync
 
-#if SLB_NUM_BOLTED == 3
+       /* Restore bolted entries from the shadow */
 
        ld      r11, PACA_SLBSHADOWPTR(r13)
 
-       REBOLT_SLB_ENTRY(0)
-       REBOLT_SLB_ENTRY(1)
-       REBOLT_SLB_ENTRY(2)
-       
-#else
-#error unknown number of bolted entries
-#endif
+BEGIN_FW_FTR_SECTION
+
+       /* Declare SLB shadow as SLB_NUM_BOLTED entries big */
+
+       li      r8, SLB_NUM_BOLTED
+       stb     r8, 3(r11)
+
+END_FW_FTR_SECTION_IFSET(FW_FEATURE_LPAR)
+
+       /* Manually load all entries from shadow SLB */
+
+       li      r8, SLBSHADOW_SAVEAREA
+       li      r7, SLBSHADOW_SAVEAREA + 8
+
+       .rept   SLB_NUM_BOLTED
+       LDX_BE  r10, r11, r8
+       cmpdi   r10, 0
+       beq     1f
+       LDX_BE  r9, r11, r7
+       slbmte  r9, r10
+1:     addi    r7, r7, SHADOW_SLB_ENTRY_LEN
+       addi    r8, r8, SHADOW_SLB_ENTRY_LEN
+       .endr
+
+       isync
+       sync
 
 slb_do_exit:
 
-- 
1.8.1.4

--
To unsubscribe from this list: send the line "unsubscribe kvm-ppc" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to