Rather than hardcode a scratch register for the XPA case in iPTE_SW,
pass one through from the work registers allocated by the caller. This
allows for the XPA path to function correctly regardless of the work
registers in use.

Signed-off-by: Paul Burton <[email protected]>
---

 arch/mips/mm/tlbex.c | 24 +++++++++++-------------
 1 file changed, 11 insertions(+), 13 deletions(-)

diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 004cd9f..d7a7b3d 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -1526,14 +1526,12 @@ iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr)
 
 static void
 iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
-       unsigned int mode)
+       unsigned int mode, unsigned int scratch)
 {
 #ifdef CONFIG_PHYS_ADDR_T_64BIT
        unsigned int hwmode = mode & (_PAGE_VALID | _PAGE_DIRTY);
 
        if (config_enabled(CONFIG_XPA) && !cpu_has_64bits) {
-               const int scratch = 1; /* Our extra working register */
-
                uasm_i_lui(p, scratch, (mode >> 16));
                uasm_i_or(p, pte, pte, scratch);
        } else
@@ -1630,11 +1628,11 @@ build_pte_present(u32 **p, struct uasm_reloc **r,
 /* Make PTE valid, store result in PTR. */
 static void
 build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte,
-                unsigned int ptr)
+                unsigned int ptr, unsigned int scratch)
 {
        unsigned int mode = _PAGE_VALID | _PAGE_ACCESSED;
 
-       iPTE_SW(p, r, pte, ptr, mode);
+       iPTE_SW(p, r, pte, ptr, mode, scratch);
 }
 
 /*
@@ -1670,12 +1668,12 @@ build_pte_writable(u32 **p, struct uasm_reloc **r,
  */
 static void
 build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte,
-                unsigned int ptr)
+                unsigned int ptr, unsigned int scratch)
 {
        unsigned int mode = (_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID
                             | _PAGE_DIRTY);
 
-       iPTE_SW(p, r, pte, ptr, mode);
+       iPTE_SW(p, r, pte, ptr, mode, scratch);
 }
 
 /*
@@ -1780,7 +1778,7 @@ static void build_r3000_tlb_load_handler(void)
        build_r3000_tlbchange_handler_head(&p, K0, K1);
        build_pte_present(&p, &r, K0, K1, -1, label_nopage_tlbl);
        uasm_i_nop(&p); /* load delay */
-       build_make_valid(&p, &r, K0, K1);
+       build_make_valid(&p, &r, K0, K1, -1);
        build_r3000_tlb_reload_write(&p, &l, &r, K0, K1);
 
        uasm_l_nopage_tlbl(&l, p);
@@ -1811,7 +1809,7 @@ static void build_r3000_tlb_store_handler(void)
        build_r3000_tlbchange_handler_head(&p, K0, K1);
        build_pte_writable(&p, &r, K0, K1, -1, label_nopage_tlbs);
        uasm_i_nop(&p); /* load delay */
-       build_make_write(&p, &r, K0, K1);
+       build_make_write(&p, &r, K0, K1, -1);
        build_r3000_tlb_reload_write(&p, &l, &r, K0, K1);
 
        uasm_l_nopage_tlbs(&l, p);
@@ -1842,7 +1840,7 @@ static void build_r3000_tlb_modify_handler(void)
        build_r3000_tlbchange_handler_head(&p, K0, K1);
        build_pte_modifiable(&p, &r, K0, K1,  -1, label_nopage_tlbm);
        uasm_i_nop(&p); /* load delay */
-       build_make_write(&p, &r, K0, K1);
+       build_make_write(&p, &r, K0, K1, -1);
        build_r3000_pte_reload_tlbwi(&p, K0, K1);
 
        uasm_l_nopage_tlbm(&l, p);
@@ -2010,7 +2008,7 @@ static void build_r4000_tlb_load_handler(void)
                }
                uasm_l_tlbl_goaround1(&l, p);
        }
-       build_make_valid(&p, &r, wr.r1, wr.r2);
+       build_make_valid(&p, &r, wr.r1, wr.r2, wr.r3);
        build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
 
 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
@@ -2124,7 +2122,7 @@ static void build_r4000_tlb_store_handler(void)
        build_pte_writable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbs);
        if (m4kc_tlbp_war())
                build_tlb_probe_entry(&p);
-       build_make_write(&p, &r, wr.r1, wr.r2);
+       build_make_write(&p, &r, wr.r1, wr.r2, wr.r3);
        build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
 
 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
@@ -2180,7 +2178,7 @@ static void build_r4000_tlb_modify_handler(void)
        if (m4kc_tlbp_war())
                build_tlb_probe_entry(&p);
        /* Present and writable bits set, set accessed and dirty bits. */
-       build_make_write(&p, &r, wr.r1, wr.r2);
+       build_make_write(&p, &r, wr.r1, wr.r2, wr.r3);
        build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
 
 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
-- 
2.8.0

Reply via email to