This is a note to let you know that I've just added the patch titled

    sparc64: Adjust KTSB assembler to support larger physical addresses.

to the 3.14-stable tree which can be found at:
    
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary

The filename of the patch is:
     sparc64-adjust-ktsb-assembler-to-support-larger-physical-addresses.patch
and it can be found in the queue-3.14 subdirectory.

If you, or anyone else, feels it should not be added to the stable tree,
please let <[email protected]> know about it.


>From foo@baz Tue Oct 28 11:13:19 CST 2014
From: "David S. Miller" <[email protected]>
Date: Wed, 17 Sep 2014 10:14:56 -0700
Subject: sparc64: Adjust KTSB assembler to support larger physical addresses.

From: "David S. Miller" <[email protected]>

[ Upstream commit 8c82dc0e883821c098c8b0b130ffebabf9aab5df ]

As currently coded the KTSB accesses in the kernel only support up to
47 bits of physical addressing.

Adjust the instruction and patching sequence in order to support
arbitrary 64 bits addresses.

Signed-off-by: David S. Miller <[email protected]>
Acked-by: Bob Picco <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
---
 arch/sparc/include/asm/tsb.h |   30 ++++++++++++------------------
 arch/sparc/mm/init_64.c      |   28 +++++++++++++++++++++++++---
 2 files changed, 37 insertions(+), 21 deletions(-)

--- a/arch/sparc/include/asm/tsb.h
+++ b/arch/sparc/include/asm/tsb.h
@@ -256,8 +256,6 @@ extern struct tsb_phys_patch_entry __tsb
        (KERNEL_TSB_SIZE_BYTES / 16)
 #define KERNEL_TSB4M_NENTRIES  4096
 
-#define KTSB_PHYS_SHIFT                15
-
        /* Do a kernel TSB lookup at tl>0 on VADDR+TAG, branch to OK_LABEL
         * on TSB hit.  REG1, REG2, REG3, and REG4 are used as temporaries
         * and the found TTE will be left in REG1.  REG3 and REG4 must
@@ -266,17 +264,15 @@ extern struct tsb_phys_patch_entry __tsb
         * VADDR and TAG will be preserved and not clobbered by this macro.
         */
 #define KERN_TSB_LOOKUP_TL1(VADDR, TAG, REG1, REG2, REG3, REG4, OK_LABEL) \
-661:   sethi           %hi(swapper_tsb), REG1;                 \
-       or              REG1, %lo(swapper_tsb), REG1; \
+661:   sethi           %uhi(swapper_tsb), REG1; \
+       sethi           %hi(swapper_tsb), REG2; \
+       or              REG1, %ulo(swapper_tsb), REG1; \
+       or              REG2, %lo(swapper_tsb), REG2; \
        .section        .swapper_tsb_phys_patch, "ax"; \
        .word           661b; \
        .previous; \
-661:   nop; \
-       .section        .tsb_ldquad_phys_patch, "ax"; \
-       .word           661b; \
-       sllx            REG1, KTSB_PHYS_SHIFT, REG1; \
-       sllx            REG1, KTSB_PHYS_SHIFT, REG1; \
-       .previous; \
+       sllx            REG1, 32, REG1; \
+       or              REG1, REG2, REG1; \
        srlx            VADDR, PAGE_SHIFT, REG2; \
        and             REG2, (KERNEL_TSB_NENTRIES - 1), REG2; \
        sllx            REG2, 4, REG2; \
@@ -291,17 +287,15 @@ extern struct tsb_phys_patch_entry __tsb
         * we can make use of that for the index computation.
         */
 #define KERN_TSB4M_LOOKUP_TL1(TAG, REG1, REG2, REG3, REG4, OK_LABEL) \
-661:   sethi           %hi(swapper_4m_tsb), REG1;           \
-       or              REG1, %lo(swapper_4m_tsb), REG1; \
+661:   sethi           %uhi(swapper_4m_tsb), REG1; \
+       sethi           %hi(swapper_4m_tsb), REG2; \
+       or              REG1, %ulo(swapper_4m_tsb), REG1; \
+       or              REG2, %lo(swapper_4m_tsb), REG2; \
        .section        .swapper_4m_tsb_phys_patch, "ax"; \
        .word           661b; \
        .previous; \
-661:   nop; \
-       .section        .tsb_ldquad_phys_patch, "ax"; \
-       .word           661b; \
-       sllx            REG1, KTSB_PHYS_SHIFT, REG1; \
-       sllx            REG1, KTSB_PHYS_SHIFT, REG1; \
-       .previous; \
+       sllx            REG1, 32, REG1; \
+       or              REG1, REG2, REG1; \
        and             TAG, (KERNEL_TSB4M_NENTRIES - 1), REG2; \
        sllx            REG2, 4, REG2; \
        add             REG1, REG2, REG2; \
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -1727,19 +1727,41 @@ static void __init tsb_phys_patch(void)
 static struct hv_tsb_descr ktsb_descr[NUM_KTSB_DESCR];
 extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
 
+/* The swapper TSBs are loaded with a base sequence of:
+ *
+ *     sethi   %uhi(SYMBOL), REG1
+ *     sethi   %hi(SYMBOL), REG2
+ *     or      REG1, %ulo(SYMBOL), REG1
+ *     or      REG2, %lo(SYMBOL), REG2
+ *     sllx    REG1, 32, REG1
+ *     or      REG1, REG2, REG1
+ *
+ * When we use physical addressing for the TSB accesses, we patch the
+ * first four instructions in the above sequence.
+ */
+
 static void patch_one_ktsb_phys(unsigned int *start, unsigned int *end, 
unsigned long pa)
 {
-       pa >>= KTSB_PHYS_SHIFT;
+       unsigned long high_bits, low_bits;
+
+       high_bits = (pa >> 32) & 0xffffffff;
+       low_bits = (pa >> 0) & 0xffffffff;
 
        while (start < end) {
                unsigned int *ia = (unsigned int *)(unsigned long)*start;
 
-               ia[0] = (ia[0] & ~0x3fffff) | (pa >> 10);
+               ia[0] = (ia[0] & ~0x3fffff) | (high_bits >> 10);
                __asm__ __volatile__("flush     %0" : : "r" (ia));
 
-               ia[1] = (ia[1] & ~0x3ff) | (pa & 0x3ff);
+               ia[1] = (ia[1] & ~0x3fffff) | (low_bits >> 10);
                __asm__ __volatile__("flush     %0" : : "r" (ia + 1));
 
+               ia[2] = (ia[2] & ~0x1fff) | (high_bits & 0x3ff);
+               __asm__ __volatile__("flush     %0" : : "r" (ia + 2));
+
+               ia[3] = (ia[3] & ~0x1fff) | (low_bits & 0x3ff);
+               __asm__ __volatile__("flush     %0" : : "r" (ia + 3));
+
                start++;
        }
 }


Patches currently in stable-queue which might be from [email protected] are

queue-3.14/sparc64-adjust-vmalloc-region-size-based-upon-available-virtual-address-bits.patch
queue-3.14/sparc64-fix-fpu-register-corruption-with-aes-crypto-offload.patch
queue-3.14/sparc64-move-request_irq-from-ldc_bind-to-ldc_alloc.patch
queue-3.14/sparc32-dma_alloc_coherent-must-honour-gfp-flags.patch
queue-3.14/sparc64-kill-unnecessary-tables-and-increase-max_banks.patch
queue-3.14/sparc-let-memset-return-the-address-argument.patch
queue-3.14/sparc64-use-kernel-page-tables-for-vmemmap.patch
queue-3.14/sparc64-sparse-irq.patch
queue-3.14/sparc64-fix-physical-memory-management-regressions-with-large-max_phys_bits.patch
queue-3.14/sparc64-fix-lockdep-warnings-on-reboot-on-ultra-5.patch
queue-3.14/sparc64-switch-to-4-level-page-tables.patch
queue-3.14/sparc64-sun4v-tlb-error-power-off-events.patch
queue-3.14/sparc64-increase-size-of-boot-string-to-1024-bytes.patch
queue-3.14/sparc64-find_node-adjustment.patch
queue-3.14/sparc64-fix-reversed-start-end-in-flush_tlb_kernel_range.patch
queue-3.14/sparc64-increase-max_phys_address_bits-to-53.patch
queue-3.14/sparc64-define-va-hole-at-run-time-rather-than-at-compile-time.patch
queue-3.14/sparc64-fix-register-corruption-in-top-most-kernel-stack-frame-during-boot.patch
queue-3.14/sparc64-do-not-disable-interrupts-in-nmi_cpu_busy.patch
queue-3.14/sparc64-support-m6-and-m7-for-building-cpu-distribution-map.patch
queue-3.14/sparc64-cpu-hardware-caps-support-for-sparc-m6-and-m7.patch
queue-3.14/sparc64-do-not-define-thread-fpregs-save-area-as-zero-length-array.patch
queue-3.14/sparc64-t5-pmu.patch
queue-3.14/sparc64-adjust-ktsb-assembler-to-support-larger-physical-addresses.patch
queue-3.14/sparc64-implement-__get_user_pages_fast.patch
queue-3.14/sparc64-fix-corrupted-thread-fault-code.patch
queue-3.14/sparc64-fix-hibernation-code-refrence-to-page_offset.patch
queue-3.14/sparc64-correctly-recognise-m6-and-m7-cpu-type.patch
queue-3.14/sparc64-fix-pcr_ops-initialization-and-usage-bugs.patch
--
To unsubscribe from this list: send the line "unsubscribe stable" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to