Callback handlers for alternative patching will shortly be able to access a set of alternative instructions provided at assembly time. So update the callback handler prototypes so we can pass this number.
Signed-off-by: Ard Biesheuvel <[email protected]> --- arch/arm64/include/asm/alternative.h | 4 ++-- arch/arm64/include/asm/kvm_mmu.h | 4 ++-- arch/arm64/kernel/alternative.c | 11 +++++++---- arch/arm64/kernel/cpu_errata.c | 10 ++++------ arch/arm64/kvm/va_layout.c | 8 ++++---- 5 files changed, 19 insertions(+), 18 deletions(-) diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h index 77da798e888b..987c1514183a 100644 --- a/arch/arm64/include/asm/alternative.h +++ b/arch/arm64/include/asm/alternative.h @@ -29,8 +29,8 @@ struct alt_instr_cb { __le32 insn[]; /* sequence of alternative instructions */ }; -typedef void (*alternative_cb_t)(struct alt_instr *alt, - __le32 *origptr, __le32 *updptr, int nr_inst); +typedef void (*alternative_cb_t)(struct alt_instr *alt, __le32 *origptr, + __le32 *updptr, int nr_inst, int nr_alts); void __init apply_alternatives_all(void); diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 658657367f2f..5e32e314b9f0 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -100,8 +100,8 @@ alternative_cb_end #include <asm/mmu_context.h> #include <asm/pgtable.h> -void kvm_update_va_mask(struct alt_instr *alt, - __le32 *origptr, __le32 *updptr, int nr_inst); +void kvm_update_va_mask(struct alt_instr *alt, __le32 *origptr, + __le32 *updptr, int nr_insti, int nr_alts); static inline unsigned long __kern_hyp_va(unsigned long v) { diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c index a49930843784..f55afa0bbaa4 100644 --- a/arch/arm64/kernel/alternative.c +++ b/arch/arm64/kernel/alternative.c @@ -107,8 +107,8 @@ static u32 get_alt_insn(struct alt_instr *alt, __le32 *insnptr, __le32 *altinsnp return insn; } -static void patch_alternative(struct alt_instr *alt, - __le32 *origptr, __le32 *updptr, int nr_inst) +static void patch_alternative(struct alt_instr *alt, __le32 *origptr, + __le32 *updptr, int nr_inst, int nr_alts) { __le32 *replptr; int i; @@ -154,7 +154,7 @@ static void __apply_alternatives(void *alt_region, bool is_module) struct alt_instr_cb *alt_cb_insn; for (alt = region->begin; alt < region->end; alt++) { - int nr_inst; + int nr_inst, nr_alts; /* Use ARM64_CB_PATCH as an unconditional patch */ if (alt->cpufeature < ARM64_CB_PATCH && @@ -174,12 +174,15 @@ static void __apply_alternatives(void *alt_region, bool is_module) if (alt->cpufeature < ARM64_CB_PATCH) { alt_cb = patch_alternative; + nr_alts = alt->alt_len / AARCH64_INSN_SIZE; } else { alt_cb_insn = ALT_REPL_PTR(alt); alt_cb = offset_to_ptr(&alt_cb_insn->cb_offset); + nr_alts = (alt->alt_len - sizeof(*alt_cb_insn)) / AARCH64_INSN_SIZE; + } - alt_cb(alt, origptr, updptr, nr_inst); + alt_cb(alt, origptr, updptr, nr_inst, nr_alts); if (!is_module) { clean_dcache_range_nopatch((u64)origptr, diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index 6ad715d67df8..c5489b4612c5 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -305,9 +305,8 @@ static int __init ssbd_cfg(char *buf) } early_param("ssbd", ssbd_cfg); -void __init arm64_update_smccc_conduit(struct alt_instr *alt, - __le32 *origptr, __le32 *updptr, - int nr_inst) +void __init arm64_update_smccc_conduit(struct alt_instr *alt, __le32 *origptr, + __le32 *updptr, int nr_inst, int nr_alts) { u32 insn; @@ -327,9 +326,8 @@ void __init arm64_update_smccc_conduit(struct alt_instr *alt, *updptr = cpu_to_le32(insn); } -void __init arm64_enable_wa2_handling(struct alt_instr *alt, - __le32 *origptr, __le32 *updptr, - int nr_inst) +void __init arm64_enable_wa2_handling(struct alt_instr *alt, __le32 *origptr, + __le32 *updptr, int nr_inst, int nr_alts) { BUG_ON(nr_inst != 1); /* diff --git a/arch/arm64/kvm/va_layout.c b/arch/arm64/kvm/va_layout.c index c712a7376bc1..db7ba73e306a 100644 --- a/arch/arm64/kvm/va_layout.c +++ b/arch/arm64/kvm/va_layout.c @@ -114,8 +114,8 @@ static u32 compute_instruction(int n, u32 rd, u32 rn) return insn; } -void __init kvm_update_va_mask(struct alt_instr *alt, - __le32 *origptr, __le32 *updptr, int nr_inst) +void __init kvm_update_va_mask(struct alt_instr *alt, __le32 *origptr, + __le32 *updptr, int nr_inst, int nr_alts) { int i; @@ -154,8 +154,8 @@ void __init kvm_update_va_mask(struct alt_instr *alt, void *__kvm_bp_vect_base; int __kvm_harden_el2_vector_slot; -void kvm_patch_vector_branch(struct alt_instr *alt, - __le32 *origptr, __le32 *updptr, int nr_inst) +void kvm_patch_vector_branch(struct alt_instr *alt, __le32 *origptr, + __le32 *updptr, int nr_inst, int nr_alts) { u64 addr; u32 insn; -- 2.19.2

