The only remaining usage of get_cycles() is to provide random_get_entropy().
Switch mips over to the new scheme of selecting ARCH_HAS_RANDOM_ENTROPY and providing random_get_entropy() in asm/random.h. As a consequence this unearthed a nasty include dependency hell because arbitrary code relies on a magic include of asm/timex.h. Including the headers in asm/random.h turned out to be impossible as well. The only solution for now is to uninline random_get_entropy(). Fix up all other dependencies on the content of asm/timex.h in those files which really depend on it. Remove asm/timex.h as it has no functionality anymore. Signed-off-by: Thomas Gleixner <[email protected]> --- arch/mips/Kconfig | 1 arch/mips/generic/init.c | 1 arch/mips/include/asm/random.h | 7 +++ arch/mips/include/asm/timex.h | 92 ----------------------------------------- arch/mips/kernel/pm-cps.c | 1 arch/mips/kernel/proc.c | 1 arch/mips/kernel/relocate.c | 1 arch/mips/kernel/time.c | 53 +++++++++++++++++++++++ arch/mips/lib/dump_tlb.c | 1 arch/mips/mm/cache.c | 1 10 files changed, 66 insertions(+), 93 deletions(-) --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -19,6 +19,7 @@ config MIPS select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_HAS_UBSAN select ARCH_HAS_GCOV_PROFILE_ALL + select ARCH_HAS_RANDOM_ENTROPY select ARCH_KEEP_MEMBLOCK select ARCH_USE_BUILTIN_BSWAP select ARCH_USE_CMPXCHG_LOCKREF if 64BIT --- a/arch/mips/generic/init.c +++ b/arch/mips/generic/init.c @@ -12,6 +12,7 @@ #include <linux/of_fdt.h> #include <asm/bootinfo.h> +#include <asm/cpu-type.h> #include <asm/fw/fw.h> #include <asm/irq_cpu.h> #include <asm/machine.h> --- /dev/null +++ b/arch/mips/include/asm/random.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-v2.0-only */ +#ifndef _ASM_RANDOM_H +#define _ASM_RANDOM_H + +unsigned long random_get_entropy(void); + +#endif /* _ASM_RANDOM_H */ --- a/arch/mips/include/asm/timex.h +++ /dev/null @@ -1,92 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 1998, 1999, 2003 by Ralf Baechle - * Copyright (C) 2014 by Maciej W. Rozycki - */ -#ifndef _ASM_TIMEX_H -#define _ASM_TIMEX_H - -#ifdef __KERNEL__ - -#include <linux/compiler.h> - -#include <asm/cpu.h> -#include <asm/cpu-features.h> -#include <asm/mipsregs.h> -#include <asm/cpu-type.h> - -/* - * Standard way to access the cycle counter. - * Currently only used on SMP for scheduling. - * - * Only the low 32 bits are available as a continuously counting entity. - * But this only means we'll force a reschedule every 8 seconds or so, - * which isn't an evil thing. - * - * We know that all SMP capable CPUs have cycle counters. - */ - -/* - * On R4000/R4400 an erratum exists such that if the cycle counter is - * read in the exact moment that it is matching the compare register, - * no interrupt will be generated. - * - * There is a suggested workaround and also the erratum can't strike if - * the compare interrupt isn't being used as the clock source device. - * However for now the implementation of this function doesn't get these - * fine details right. - */ -static inline int can_use_mips_counter(unsigned int prid) -{ - int comp = (prid & PRID_COMP_MASK) != PRID_COMP_LEGACY; - - if (__builtin_constant_p(cpu_has_counter) && !cpu_has_counter) - return 0; - else if (__builtin_constant_p(cpu_has_mips_r) && cpu_has_mips_r) - return 1; - else if (likely(!__builtin_constant_p(cpu_has_mips_r) && comp)) - return 1; - /* Make sure we don't peek at cpu_data[0].options in the fast path! */ - if (!__builtin_constant_p(cpu_has_counter)) - asm volatile("" : "=m" (cpu_data[0].options)); - if (likely(cpu_has_counter && - prid > (PRID_IMP_R4000 | PRID_REV_ENCODE_44(15, 15)))) - return 1; - else - return 0; -} - -static inline cycles_t get_cycles(void) -{ - if (can_use_mips_counter(read_c0_prid())) - return read_c0_count(); - else - return 0; /* no usable counter */ -} -#define get_cycles get_cycles - -/* - * Like get_cycles - but where c0_count is not available we desperately - * use c0_random in an attempt to get at least a little bit of entropy. - */ -static inline unsigned long random_get_entropy(void) -{ - unsigned int c0_random; - - if (can_use_mips_counter(read_c0_prid())) - return read_c0_count(); - - if (cpu_has_3kex) - c0_random = (read_c0_random() >> 8) & 0x3f; - else - c0_random = read_c0_random() & 0x3f; - return (random_get_entropy_fallback() << 6) | (0x3f - c0_random); -} -#define random_get_entropy random_get_entropy - -#endif /* __KERNEL__ */ - -#endif /* _ASM_TIMEX_H */ --- a/arch/mips/kernel/pm-cps.c +++ b/arch/mips/kernel/pm-cps.c @@ -13,6 +13,7 @@ #include <asm/asm-offsets.h> #include <asm/cacheflush.h> #include <asm/cacheops.h> +#include <asm/cpu-type.h> #include <asm/idle.h> #include <asm/mips-cps.h> #include <asm/mipsmtregs.h> --- a/arch/mips/kernel/proc.c +++ b/arch/mips/kernel/proc.c @@ -11,6 +11,7 @@ #include <asm/bootinfo.h> #include <asm/cpu.h> #include <asm/cpu-features.h> +#include <asm/cpu-type.h> #include <asm/idle.h> #include <asm/mipsregs.h> #include <asm/processor.h> --- a/arch/mips/kernel/relocate.c +++ b/arch/mips/kernel/relocate.c @@ -13,7 +13,6 @@ #include <asm/fw/fw.h> #include <asm/sections.h> #include <asm/setup.h> -#include <asm/timex.h> #include <linux/elf.h> #include <linux/kernel.h> #include <linux/libfdt.h> --- a/arch/mips/kernel/time.c +++ b/arch/mips/kernel/time.c @@ -2,6 +2,7 @@ /* * Copyright 2001 MontaVista Software Inc. * Author: Jun Sun, [email protected] or [email protected] + * Copyright (C) 1998, 1999, 2003 by Ralf Baechle * Copyright (c) 2003, 2004 Maciej W. Rozycki * * Common time service routines for MIPS machines. @@ -21,9 +22,12 @@ #include <linux/cpufreq.h> #include <linux/delay.h> +#include <asm/cpu.h> #include <asm/cpu-features.h> #include <asm/cpu-type.h> #include <asm/div64.h> +#include <asm/mipsregs.h> +#include <asm/random.h> #include <asm/time.h> #ifdef CONFIG_CPU_FREQ @@ -150,6 +154,55 @@ static __init int cpu_has_mfc0_count_bug return 0; } + +/* + * On R4000/R4400 an erratum exists such that if the cycle counter is + * read in the exact moment that it is matching the compare register, + * no interrupt will be generated. + * + * There is a suggested workaround and also the erratum can't strike if + * the compare interrupt isn't being used as the clock source device. + * However for now the implementation of this function doesn't get these + * fine details right. + */ +static inline int can_use_mips_counter(unsigned int prid) +{ + int comp = (prid & PRID_COMP_MASK) != PRID_COMP_LEGACY; + + if (__builtin_constant_p(cpu_has_counter) && !cpu_has_counter) + return 0; + else if (__builtin_constant_p(cpu_has_mips_r) && cpu_has_mips_r) + return 1; + else if (likely(!__builtin_constant_p(cpu_has_mips_r) && comp)) + return 1; + /* Make sure we don't peek at cpu_data[0].options in the fast path! */ + if (!__builtin_constant_p(cpu_has_counter)) + asm volatile("" : "=m" (cpu_data[0].options)); + if (likely(cpu_has_counter && + prid > (PRID_IMP_R4000 | PRID_REV_ENCODE_44(15, 15)))) + return 1; + else + return 0; +} + +/* + * Like get_cycles - but where c0_count is not available we desperately + * use c0_random in an attempt to get at least a little bit of entropy. + */ +unsigned long random_get_entropy(void) +{ + unsigned int c0_random; + + if (can_use_mips_counter(read_c0_prid())) + return read_c0_count(); + + if (cpu_has_3kex) + c0_random = (read_c0_random() >> 8) & 0x3f; + else + c0_random = read_c0_random() & 0x3f; + return (random_get_entropy_fallback() << 6) | (0x3f - c0_random); +} + void __init time_init(void) { plat_time_init(); --- a/arch/mips/lib/dump_tlb.c +++ b/arch/mips/lib/dump_tlb.c @@ -8,6 +8,7 @@ #include <linux/kernel.h> #include <linux/mm.h> +#include <asm/cpu-type.h> #include <asm/hazards.h> #include <asm/mipsregs.h> #include <asm/mmu_context.h> --- a/arch/mips/mm/cache.c +++ b/arch/mips/mm/cache.c @@ -22,6 +22,7 @@ #include <asm/processor.h> #include <asm/cpu.h> #include <asm/cpu-features.h> +#include <asm/cpu-type.h> #include <asm/setup.h> #include <asm/pgtable.h>

