When CONFIG_HAVE_MEMORYLESS_NODES is enabled, cpu_to_node()/numa_node_id()
may return a node without memory, and later cause system failure/panic
when calling kmalloc_node() and friends with returned node id.
So use cpu_to_mem()/numa_mem_id() instead to get the nearest node with
memory for the/current cpu.

If CONFIG_HAVE_MEMORYLESS_NODES is disabled, cpu_to_mem()/numa_mem_id()
is the same as cpu_to_node()/numa_node_id().

Signed-off-by: Jiang Liu <[email protected]>
---
 arch/x86/kernel/cpu/perf_event_amd.c          |    2 +-
 arch/x86/kernel/cpu/perf_event_amd_uncore.c   |    2 +-
 arch/x86/kernel/cpu/perf_event_intel.c        |    2 +-
 arch/x86/kernel/cpu/perf_event_intel_ds.c     |    6 +++---
 arch/x86/kernel/cpu/perf_event_intel_rapl.c   |    2 +-
 arch/x86/kernel/cpu/perf_event_intel_uncore.c |    2 +-
 6 files changed, 8 insertions(+), 8 deletions(-)

diff --git a/arch/x86/kernel/cpu/perf_event_amd.c 
b/arch/x86/kernel/cpu/perf_event_amd.c
index beeb7cc07044..ee5120ce3e98 100644
--- a/arch/x86/kernel/cpu/perf_event_amd.c
+++ b/arch/x86/kernel/cpu/perf_event_amd.c
@@ -347,7 +347,7 @@ static struct amd_nb *amd_alloc_nb(int cpu)
        struct amd_nb *nb;
        int i;
 
-       nb = kzalloc_node(sizeof(struct amd_nb), GFP_KERNEL, cpu_to_node(cpu));
+       nb = kzalloc_node(sizeof(struct amd_nb), GFP_KERNEL, cpu_to_mem(cpu));
        if (!nb)
                return NULL;
 
diff --git a/arch/x86/kernel/cpu/perf_event_amd_uncore.c 
b/arch/x86/kernel/cpu/perf_event_amd_uncore.c
index 3bbdf4cd38b9..1a7f4129bf4c 100644
--- a/arch/x86/kernel/cpu/perf_event_amd_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_amd_uncore.c
@@ -291,7 +291,7 @@ static struct pmu amd_l2_pmu = {
 static struct amd_uncore *amd_uncore_alloc(unsigned int cpu)
 {
        return kzalloc_node(sizeof(struct amd_uncore), GFP_KERNEL,
-                       cpu_to_node(cpu));
+                       cpu_to_mem(cpu));
 }
 
 static void amd_uncore_cpu_up_prepare(unsigned int cpu)
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c 
b/arch/x86/kernel/cpu/perf_event_intel.c
index adb02aa62af5..4f48d1bb7608 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -1957,7 +1957,7 @@ struct intel_shared_regs *allocate_shared_regs(int cpu)
        int i;
 
        regs = kzalloc_node(sizeof(struct intel_shared_regs),
-                           GFP_KERNEL, cpu_to_node(cpu));
+                           GFP_KERNEL, cpu_to_mem(cpu));
        if (regs) {
                /*
                 * initialize the locks to keep lockdep happy
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c 
b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index 980970cb744d..bb0327411bf1 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -250,7 +250,7 @@ static DEFINE_PER_CPU(void *, insn_buffer);
 static int alloc_pebs_buffer(int cpu)
 {
        struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
-       int node = cpu_to_node(cpu);
+       int node = cpu_to_mem(cpu);
        int max, thresh = 1; /* always use a single PEBS record */
        void *buffer, *ibuffer;
 
@@ -304,7 +304,7 @@ static void release_pebs_buffer(int cpu)
 static int alloc_bts_buffer(int cpu)
 {
        struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
-       int node = cpu_to_node(cpu);
+       int node = cpu_to_mem(cpu);
        int max, thresh;
        void *buffer;
 
@@ -341,7 +341,7 @@ static void release_bts_buffer(int cpu)
 
 static int alloc_ds_buffer(int cpu)
 {
-       int node = cpu_to_node(cpu);
+       int node = cpu_to_mem(cpu);
        struct debug_store *ds;
 
        ds = kzalloc_node(sizeof(*ds), GFP_KERNEL, node);
diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c 
b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
index 619f7699487a..9df1ec3b505d 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
@@ -547,7 +547,7 @@ static int rapl_cpu_prepare(int cpu)
        if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &msr_rapl_power_unit_bits))
                return -1;
 
-       pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
+       pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_mem(cpu));
        if (!pmu)
                return -1;
 
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c 
b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
index 65bbbea38b9c..4b77ba4b4e36 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -4011,7 +4011,7 @@ static int uncore_cpu_prepare(int cpu, int phys_id)
                        if (pmu->func_id < 0)
                                pmu->func_id = j;
 
-                       box = uncore_alloc_box(type, cpu_to_node(cpu));
+                       box = uncore_alloc_box(type, cpu_to_mem(cpu));
                        if (!box)
                                return -ENOMEM;
 
-- 
1.7.10.4

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to