[PATCH AUTOSEL for 4.9 034/190] ACPI/processor: Replace racy task affinity logic

2018-03-07 Thread Sasha Levin
From: Thomas Gleixner 

[ Upstream commit 8153f9ac43897f9f4786b30badc134fcc1a4fb11 ]

acpi_processor_get_throttling() requires to invoke the getter function on
the target CPU. This is achieved by temporarily setting the affinity of the
calling user space thread to the requested CPU and reset it to the original
affinity afterwards.

That's racy vs. CPU hotplug and concurrent affinity settings for that
thread resulting in code executing on the wrong CPU and overwriting the
new affinity setting.

acpi_processor_get_throttling() is invoked in two ways:

1) The CPU online callback, which is already running on the target CPU and
   obviously protected against hotplug and not affected by affinity
   settings.

2) The ACPI driver probe function, which is not protected against hotplug
   during modprobe.

Switch it over to work_on_cpu() and protect the probe function against CPU
hotplug.

Signed-off-by: Thomas Gleixner 
Cc: Fenghua Yu 
Cc: Tony Luck 
Cc: Herbert Xu 
Cc: "Rafael J. Wysocki" 
Cc: Peter Zijlstra 
Cc: Benjamin Herrenschmidt 
Cc: Sebastian Siewior 
Cc: Lai Jiangshan 
Cc: linux-a...@vger.kernel.org
Cc: Viresh Kumar 
Cc: Michael Ellerman 
Cc: Tejun Heo 
Cc: "David S. Miller" 
Cc: Len Brown 
Link: http://lkml.kernel.org/r/20170412201042.785920...@linutronix.de
Signed-off-by: Thomas Gleixner 
Signed-off-by: Sasha Levin 
---
 drivers/acpi/processor_driver.c |  7 -
 drivers/acpi/processor_throttling.c | 62 +
 2 files changed, 42 insertions(+), 27 deletions(-)

diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index eab8cdad7dc3..8697a82bd465 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -262,11 +262,16 @@ static int __acpi_processor_start(struct acpi_device 
*device)
 static int acpi_processor_start(struct device *dev)
 {
struct acpi_device *device = ACPI_COMPANION(dev);
+   int ret;
 
if (!device)
return -ENODEV;
 
-   return __acpi_processor_start(device);
+   /* Protect against concurrent CPU hotplug operations */
+   get_online_cpus();
+   ret = __acpi_processor_start(device);
+   put_online_cpus();
+   return ret;
 }
 
 static int acpi_processor_stop(struct device *dev)
diff --git a/drivers/acpi/processor_throttling.c 
b/drivers/acpi/processor_throttling.c
index d51ca1c05619..207e9bbb9490 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -62,8 +62,8 @@ struct acpi_processor_throttling_arg {
 #define THROTTLING_POSTCHANGE  (2)
 
 static int acpi_processor_get_throttling(struct acpi_processor *pr);
-int acpi_processor_set_throttling(struct acpi_processor *pr,
-   int state, bool force);
+static int __acpi_processor_set_throttling(struct acpi_processor *pr,
+  int state, bool force, bool direct);
 
 static int acpi_processor_update_tsd_coord(void)
 {
@@ -891,7 +891,8 @@ static int acpi_processor_get_throttling_ptc(struct 
acpi_processor *pr)
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Invalid throttling state, reset\n"));
state = 0;
-   ret = acpi_processor_set_throttling(pr, state, true);
+   ret = __acpi_processor_set_throttling(pr, state, true,
+ true);
if (ret)
return ret;
}
@@ -901,36 +902,31 @@ static int acpi_processor_get_throttling_ptc(struct 
acpi_processor *pr)
return 0;
 }
 
-static int acpi_processor_get_throttling(struct acpi_processor *pr)
+static long __acpi_processor_get_throttling(void *data)
 {
-   cpumask_var_t saved_mask;
-   int ret;
+   struct acpi_processor *pr = data;
+
+   return pr->throttling.acpi_processor_get_throttling(pr);
+}
 
+static int acpi_processor_get_throttling(struct acpi_processor *pr)
+{
if (!pr)
return -EINVAL;
 
if (!pr->flags.throttling)
return -ENODEV;
 
-   if (!alloc_cpumask_var(_mask, GFP_KERNEL))
-   return -ENOMEM;
-
/*
-* Migrate task to the cpu pointed by pr.
+* This is either called from the CPU hotplug callback of
+* processor_driver or via the ACPI probe function. In the latter
+* case the CPU is not guaranteed to be online. Both call sites are
+* protected against CPU hotplug.
 */
-   

[PATCH AUTOSEL for 4.9 034/190] ACPI/processor: Replace racy task affinity logic

2018-03-07 Thread Sasha Levin
From: Thomas Gleixner 

[ Upstream commit 8153f9ac43897f9f4786b30badc134fcc1a4fb11 ]

acpi_processor_get_throttling() requires to invoke the getter function on
the target CPU. This is achieved by temporarily setting the affinity of the
calling user space thread to the requested CPU and reset it to the original
affinity afterwards.

That's racy vs. CPU hotplug and concurrent affinity settings for that
thread resulting in code executing on the wrong CPU and overwriting the
new affinity setting.

acpi_processor_get_throttling() is invoked in two ways:

1) The CPU online callback, which is already running on the target CPU and
   obviously protected against hotplug and not affected by affinity
   settings.

2) The ACPI driver probe function, which is not protected against hotplug
   during modprobe.

Switch it over to work_on_cpu() and protect the probe function against CPU
hotplug.

Signed-off-by: Thomas Gleixner 
Cc: Fenghua Yu 
Cc: Tony Luck 
Cc: Herbert Xu 
Cc: "Rafael J. Wysocki" 
Cc: Peter Zijlstra 
Cc: Benjamin Herrenschmidt 
Cc: Sebastian Siewior 
Cc: Lai Jiangshan 
Cc: linux-a...@vger.kernel.org
Cc: Viresh Kumar 
Cc: Michael Ellerman 
Cc: Tejun Heo 
Cc: "David S. Miller" 
Cc: Len Brown 
Link: http://lkml.kernel.org/r/20170412201042.785920...@linutronix.de
Signed-off-by: Thomas Gleixner 
Signed-off-by: Sasha Levin 
---
 drivers/acpi/processor_driver.c |  7 -
 drivers/acpi/processor_throttling.c | 62 +
 2 files changed, 42 insertions(+), 27 deletions(-)

diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index eab8cdad7dc3..8697a82bd465 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -262,11 +262,16 @@ static int __acpi_processor_start(struct acpi_device 
*device)
 static int acpi_processor_start(struct device *dev)
 {
struct acpi_device *device = ACPI_COMPANION(dev);
+   int ret;
 
if (!device)
return -ENODEV;
 
-   return __acpi_processor_start(device);
+   /* Protect against concurrent CPU hotplug operations */
+   get_online_cpus();
+   ret = __acpi_processor_start(device);
+   put_online_cpus();
+   return ret;
 }
 
 static int acpi_processor_stop(struct device *dev)
diff --git a/drivers/acpi/processor_throttling.c 
b/drivers/acpi/processor_throttling.c
index d51ca1c05619..207e9bbb9490 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -62,8 +62,8 @@ struct acpi_processor_throttling_arg {
 #define THROTTLING_POSTCHANGE  (2)
 
 static int acpi_processor_get_throttling(struct acpi_processor *pr);
-int acpi_processor_set_throttling(struct acpi_processor *pr,
-   int state, bool force);
+static int __acpi_processor_set_throttling(struct acpi_processor *pr,
+  int state, bool force, bool direct);
 
 static int acpi_processor_update_tsd_coord(void)
 {
@@ -891,7 +891,8 @@ static int acpi_processor_get_throttling_ptc(struct 
acpi_processor *pr)
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Invalid throttling state, reset\n"));
state = 0;
-   ret = acpi_processor_set_throttling(pr, state, true);
+   ret = __acpi_processor_set_throttling(pr, state, true,
+ true);
if (ret)
return ret;
}
@@ -901,36 +902,31 @@ static int acpi_processor_get_throttling_ptc(struct 
acpi_processor *pr)
return 0;
 }
 
-static int acpi_processor_get_throttling(struct acpi_processor *pr)
+static long __acpi_processor_get_throttling(void *data)
 {
-   cpumask_var_t saved_mask;
-   int ret;
+   struct acpi_processor *pr = data;
+
+   return pr->throttling.acpi_processor_get_throttling(pr);
+}
 
+static int acpi_processor_get_throttling(struct acpi_processor *pr)
+{
if (!pr)
return -EINVAL;
 
if (!pr->flags.throttling)
return -ENODEV;
 
-   if (!alloc_cpumask_var(_mask, GFP_KERNEL))
-   return -ENOMEM;
-
/*
-* Migrate task to the cpu pointed by pr.
+* This is either called from the CPU hotplug callback of
+* processor_driver or via the ACPI probe function. In the latter
+* case the CPU is not guaranteed to be online. Both call sites are
+* protected against CPU hotplug.
 */
-   cpumask_copy(saved_mask, >cpus_allowed);
-   /* FIXME: use work_on_cpu() */
-   if (set_cpus_allowed_ptr(current, cpumask_of(pr->id))) {
-   /* Can't migrate to the target pr->id CPU. Exit */
-   free_cpumask_var(saved_mask);
+   if (!cpu_online(pr->id))
return -ENODEV;
-   }
-   ret =