Re: [PATCH 2/3] x86: add wrapper functions for mtrr functions handling also pat

2022-07-15 Thread Rafael J. Wysocki
On Fri, Jul 15, 2022 at 4:25 PM Juergen Gross  wrote:
>
> There are several MTRR functions which also do PAT handling. In order
> to support PAT handling without MTRR in the future, add some wrappers
> for those functions.
>
> Cc:  # 5.17
> Fixes: bdd8b6c98239 ("drm/i915: replace X86_FEATURE_PAT with pat_enabled()")
> Signed-off-by: Juergen Gross 

Do I understand correctly that this particular patch doesn't change
the behavior?

If so, it would be good to mention that in the changelog.

> ---
>  arch/x86/include/asm/mtrr.h  |  2 --
>  arch/x86/include/asm/processor.h |  7 +
>  arch/x86/kernel/cpu/common.c | 44 +++-
>  arch/x86/kernel/cpu/mtrr/mtrr.c  | 25 +++---
>  arch/x86/kernel/setup.c  |  5 +---
>  arch/x86/kernel/smpboot.c|  8 +++---
>  arch/x86/power/cpu.c |  2 +-
>  7 files changed, 59 insertions(+), 34 deletions(-)
>
> diff --git a/arch/x86/include/asm/mtrr.h b/arch/x86/include/asm/mtrr.h
> index 12a16caed395..900083ac9f60 100644
> --- a/arch/x86/include/asm/mtrr.h
> +++ b/arch/x86/include/asm/mtrr.h
> @@ -43,7 +43,6 @@ extern int mtrr_del(int reg, unsigned long base, unsigned 
> long size);
>  extern int mtrr_del_page(int reg, unsigned long base, unsigned long size);
>  extern void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi);
>  extern void mtrr_ap_init(void);
> -extern void set_mtrr_aps_delayed_init(void);
>  extern void mtrr_aps_init(void);
>  extern void mtrr_bp_restore(void);
>  extern int mtrr_trim_uncached_memory(unsigned long end_pfn);
> @@ -86,7 +85,6 @@ static inline void mtrr_centaur_report_mcr(int mcr, u32 lo, 
> u32 hi)
>  {
>  }
>  #define mtrr_ap_init() do {} while (0)
> -#define set_mtrr_aps_delayed_init() do {} while (0)
>  #define mtrr_aps_init() do {} while (0)
>  #define mtrr_bp_restore() do {} while (0)
>  #define mtrr_disable() do {} while (0)
> diff --git a/arch/x86/include/asm/processor.h 
> b/arch/x86/include/asm/processor.h
> index 5c934b922450..e2140204fb7e 100644
> --- a/arch/x86/include/asm/processor.h
> +++ b/arch/x86/include/asm/processor.h
> @@ -865,7 +865,14 @@ bool arch_is_platform_page(u64 paddr);
>  #define arch_is_platform_page arch_is_platform_page
>  #endif
>
> +extern bool cache_aps_delayed_init;
> +
>  void cache_disable(void);
>  void cache_enable(void);
> +void cache_bp_init(void);
> +void cache_ap_init(void);
> +void cache_set_aps_delayed_init(void);
> +void cache_aps_init(void);
> +void cache_bp_restore(void);
>
>  #endif /* _ASM_X86_PROCESSOR_H */
> diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
> index e43322f8a4ef..0a1bd14f7966 100644
> --- a/arch/x86/kernel/cpu/common.c
> +++ b/arch/x86/kernel/cpu/common.c
> @@ -1929,7 +1929,7 @@ void identify_secondary_cpu(struct cpuinfo_x86 *c)
>  #ifdef CONFIG_X86_32
> enable_sep_cpu();
>  #endif
> -   mtrr_ap_init();
> +   cache_ap_init();
> validate_apic_and_package_id(c);
> x86_spec_ctrl_setup_ap();
> update_srbds_msr();
> @@ -2403,3 +2403,45 @@ void cache_enable(void) __releases(cache_disable_lock)
>
> raw_spin_unlock(_disable_lock);
>  }
> +
> +void __init cache_bp_init(void)
> +{
> +   if (IS_ENABLED(CONFIG_MTRR))
> +   mtrr_bp_init();
> +   else
> +   pat_disable("PAT support disabled because CONFIG_MTRR is 
> disabled in the kernel.");
> +}
> +
> +void cache_ap_init(void)
> +{
> +   if (cache_aps_delayed_init)
> +   return;
> +
> +   mtrr_ap_init();
> +}
> +
> +bool cache_aps_delayed_init;
> +
> +void cache_set_aps_delayed_init(void)
> +{
> +   cache_aps_delayed_init = true;
> +}
> +
> +void cache_aps_init(void)
> +{
> +   /*
> +* Check if someone has requested the delay of AP cache 
> initialization,
> +* by doing cache_set_aps_delayed_init(), prior to this point. If not,
> +* then we are done.
> +*/
> +   if (!cache_aps_delayed_init)
> +   return;
> +
> +   mtrr_aps_init();
> +   cache_aps_delayed_init = false;
> +}
> +
> +void cache_bp_restore(void)
> +{
> +   mtrr_bp_restore();
> +}
> diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.c b/arch/x86/kernel/cpu/mtrr/mtrr.c
> index 2746cac9d8a9..c1593cfae641 100644
> --- a/arch/x86/kernel/cpu/mtrr/mtrr.c
> +++ b/arch/x86/kernel/cpu/mtrr/mtrr.c
> @@ -69,7 +69,6 @@ unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
>  static DEFINE_MUTEX(mtrr_mutex);
>
>  u64 size_or_mask, size_and_mask;
> -static bool mtrr_aps_delayed_init;
>
>  static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __ro_after_init;
>
> @@ -176,7 +175,8 @@ static int mtrr_rendezvous_handler(void *info)
> if (data->smp_reg != ~0U) {
> mtrr_if->set(data->smp_reg, data->smp_base,
>  data->smp_size, data->smp_type);
> -   } else if (mtrr_aps_delayed_init || !cpu_online(smp_processor_id())) {
> +   } else if ((use_intel() && cache_aps_delayed_init) ||
> +  

[PATCH 2/3] x86: add wrapper functions for mtrr functions handling also pat

2022-07-15 Thread Juergen Gross
There are several MTRR functions which also do PAT handling. In order
to support PAT handling without MTRR in the future, add some wrappers
for those functions.

Cc:  # 5.17
Fixes: bdd8b6c98239 ("drm/i915: replace X86_FEATURE_PAT with pat_enabled()")
Signed-off-by: Juergen Gross 
---
 arch/x86/include/asm/mtrr.h  |  2 --
 arch/x86/include/asm/processor.h |  7 +
 arch/x86/kernel/cpu/common.c | 44 +++-
 arch/x86/kernel/cpu/mtrr/mtrr.c  | 25 +++---
 arch/x86/kernel/setup.c  |  5 +---
 arch/x86/kernel/smpboot.c|  8 +++---
 arch/x86/power/cpu.c |  2 +-
 7 files changed, 59 insertions(+), 34 deletions(-)

diff --git a/arch/x86/include/asm/mtrr.h b/arch/x86/include/asm/mtrr.h
index 12a16caed395..900083ac9f60 100644
--- a/arch/x86/include/asm/mtrr.h
+++ b/arch/x86/include/asm/mtrr.h
@@ -43,7 +43,6 @@ extern int mtrr_del(int reg, unsigned long base, unsigned 
long size);
 extern int mtrr_del_page(int reg, unsigned long base, unsigned long size);
 extern void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi);
 extern void mtrr_ap_init(void);
-extern void set_mtrr_aps_delayed_init(void);
 extern void mtrr_aps_init(void);
 extern void mtrr_bp_restore(void);
 extern int mtrr_trim_uncached_memory(unsigned long end_pfn);
@@ -86,7 +85,6 @@ static inline void mtrr_centaur_report_mcr(int mcr, u32 lo, 
u32 hi)
 {
 }
 #define mtrr_ap_init() do {} while (0)
-#define set_mtrr_aps_delayed_init() do {} while (0)
 #define mtrr_aps_init() do {} while (0)
 #define mtrr_bp_restore() do {} while (0)
 #define mtrr_disable() do {} while (0)
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 5c934b922450..e2140204fb7e 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -865,7 +865,14 @@ bool arch_is_platform_page(u64 paddr);
 #define arch_is_platform_page arch_is_platform_page
 #endif
 
+extern bool cache_aps_delayed_init;
+
 void cache_disable(void);
 void cache_enable(void);
+void cache_bp_init(void);
+void cache_ap_init(void);
+void cache_set_aps_delayed_init(void);
+void cache_aps_init(void);
+void cache_bp_restore(void);
 
 #endif /* _ASM_X86_PROCESSOR_H */
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index e43322f8a4ef..0a1bd14f7966 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1929,7 +1929,7 @@ void identify_secondary_cpu(struct cpuinfo_x86 *c)
 #ifdef CONFIG_X86_32
enable_sep_cpu();
 #endif
-   mtrr_ap_init();
+   cache_ap_init();
validate_apic_and_package_id(c);
x86_spec_ctrl_setup_ap();
update_srbds_msr();
@@ -2403,3 +2403,45 @@ void cache_enable(void) __releases(cache_disable_lock)
 
raw_spin_unlock(_disable_lock);
 }
+
+void __init cache_bp_init(void)
+{
+   if (IS_ENABLED(CONFIG_MTRR))
+   mtrr_bp_init();
+   else
+   pat_disable("PAT support disabled because CONFIG_MTRR is 
disabled in the kernel.");
+}
+
+void cache_ap_init(void)
+{
+   if (cache_aps_delayed_init)
+   return;
+
+   mtrr_ap_init();
+}
+
+bool cache_aps_delayed_init;
+
+void cache_set_aps_delayed_init(void)
+{
+   cache_aps_delayed_init = true;
+}
+
+void cache_aps_init(void)
+{
+   /*
+* Check if someone has requested the delay of AP cache initialization,
+* by doing cache_set_aps_delayed_init(), prior to this point. If not,
+* then we are done.
+*/
+   if (!cache_aps_delayed_init)
+   return;
+
+   mtrr_aps_init();
+   cache_aps_delayed_init = false;
+}
+
+void cache_bp_restore(void)
+{
+   mtrr_bp_restore();
+}
diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.c b/arch/x86/kernel/cpu/mtrr/mtrr.c
index 2746cac9d8a9..c1593cfae641 100644
--- a/arch/x86/kernel/cpu/mtrr/mtrr.c
+++ b/arch/x86/kernel/cpu/mtrr/mtrr.c
@@ -69,7 +69,6 @@ unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
 static DEFINE_MUTEX(mtrr_mutex);
 
 u64 size_or_mask, size_and_mask;
-static bool mtrr_aps_delayed_init;
 
 static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __ro_after_init;
 
@@ -176,7 +175,8 @@ static int mtrr_rendezvous_handler(void *info)
if (data->smp_reg != ~0U) {
mtrr_if->set(data->smp_reg, data->smp_base,
 data->smp_size, data->smp_type);
-   } else if (mtrr_aps_delayed_init || !cpu_online(smp_processor_id())) {
+   } else if ((use_intel() && cache_aps_delayed_init) ||
+  !cpu_online(smp_processor_id())) {
mtrr_if->set_all();
}
return 0;
@@ -789,7 +789,7 @@ void mtrr_ap_init(void)
if (!mtrr_enabled())
return;
 
-   if (!use_intel() || mtrr_aps_delayed_init)
+   if (!use_intel())
return;
 
/*
@@ -823,16 +823,6 @@ void mtrr_save_state(void)
smp_call_function_single(first_cpu, mtrr_save_fixed_ranges, NULL, 1);
 }
 
-void