[PATCH v2 12/12] cpuset: export effective masks to userspace

2013-10-11 Thread Li Zefan
cpuset.cpus and cpuset.mems are the configured masks, and we need
to export effective masks to userspace, so users know the real
cpus_allowed and mems_allowed that apply to the tasks in a cpuset.

cpuset.effective_cpus and cpuset.effective_mems will be created for
sane_behavior only.

v2:
- export those masks unconditionally, suggested by Tejun.

Signed-off-by: Li Zefan 
---
 kernel/cpuset.c | 34 --
 1 file changed, 28 insertions(+), 6 deletions(-)

diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index a98723d..c8ba514 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -1614,6 +1614,8 @@ typedef enum {
FILE_MEMORY_MIGRATE,
FILE_CPULIST,
FILE_MEMLIST,
+   FILE_EFFECTIVE_CPULIST,
+   FILE_EFFECTIVE_MEMLIST,
FILE_CPU_EXCLUSIVE,
FILE_MEM_EXCLUSIVE,
FILE_MEM_HARDWALL,
@@ -1762,23 +1764,23 @@ out_unlock:
  * across a page fault.
  */
 
-static size_t cpuset_sprintf_cpulist(char *page, struct cpuset *cs)
+static size_t cpuset_sprintf_cpulist(char *page, struct cpumask *pmask)
 {
size_t count;
 
mutex_lock(_mutex);
-   count = cpulist_scnprintf(page, PAGE_SIZE, cs->cpus_allowed);
+   count = cpulist_scnprintf(page, PAGE_SIZE, pmask);
mutex_unlock(_mutex);
 
return count;
 }
 
-static size_t cpuset_sprintf_memlist(char *page, struct cpuset *cs)
+static size_t cpuset_sprintf_memlist(char *page, nodemask_t mask)
 {
size_t count;
 
mutex_lock(_mutex);
-   count = nodelist_scnprintf(page, PAGE_SIZE, cs->mems_allowed);
+   count = nodelist_scnprintf(page, PAGE_SIZE, mask);
mutex_unlock(_mutex);
 
return count;
@@ -1802,10 +1804,16 @@ static ssize_t cpuset_common_file_read(struct 
cgroup_subsys_state *css,
 
switch (type) {
case FILE_CPULIST:
-   s += cpuset_sprintf_cpulist(s, cs);
+   s += cpuset_sprintf_cpulist(s, cs->cpus_allowed);
break;
case FILE_MEMLIST:
-   s += cpuset_sprintf_memlist(s, cs);
+   s += cpuset_sprintf_memlist(s, cs->mems_allowed);
+   break;
+   case FILE_EFFECTIVE_CPULIST:
+   s += cpuset_sprintf_cpulist(s, cs->effective_cpus);
+   break;
+   case FILE_EFFECTIVE_MEMLIST:
+   s += cpuset_sprintf_memlist(s, cs->effective_mems);
break;
default:
retval = -EINVAL;
@@ -1880,6 +1888,13 @@ static struct cftype files[] = {
},
 
{
+   .name = "effective_cpus",
+   .read = cpuset_common_file_read,
+   .max_write_len = (100U + 6 * NR_CPUS),
+   .private = FILE_EFFECTIVE_CPULIST,
+   },
+
+   {
.name = "mems",
.read = cpuset_common_file_read,
.write_string = cpuset_write_resmask,
@@ -1888,6 +1903,13 @@ static struct cftype files[] = {
},
 
{
+   .name = "effective_mems",
+   .read = cpuset_common_file_read,
+   .max_write_len = (100U + 6 * MAX_NUMNODES),
+   .private = FILE_EFFECTIVE_MEMLIST,
+   },
+
+   {
.name = "cpu_exclusive",
.read_u64 = cpuset_read_u64,
.write_u64 = cpuset_write_u64,
-- 
1.8.0.2

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[PATCH v2 12/12] cpuset: export effective masks to userspace

2013-10-11 Thread Li Zefan
cpuset.cpus and cpuset.mems are the configured masks, and we need
to export effective masks to userspace, so users know the real
cpus_allowed and mems_allowed that apply to the tasks in a cpuset.

cpuset.effective_cpus and cpuset.effective_mems will be created for
sane_behavior only.

v2:
- export those masks unconditionally, suggested by Tejun.

Signed-off-by: Li Zefan lize...@huawei.com
---
 kernel/cpuset.c | 34 --
 1 file changed, 28 insertions(+), 6 deletions(-)

diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index a98723d..c8ba514 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -1614,6 +1614,8 @@ typedef enum {
FILE_MEMORY_MIGRATE,
FILE_CPULIST,
FILE_MEMLIST,
+   FILE_EFFECTIVE_CPULIST,
+   FILE_EFFECTIVE_MEMLIST,
FILE_CPU_EXCLUSIVE,
FILE_MEM_EXCLUSIVE,
FILE_MEM_HARDWALL,
@@ -1762,23 +1764,23 @@ out_unlock:
  * across a page fault.
  */
 
-static size_t cpuset_sprintf_cpulist(char *page, struct cpuset *cs)
+static size_t cpuset_sprintf_cpulist(char *page, struct cpumask *pmask)
 {
size_t count;
 
mutex_lock(callback_mutex);
-   count = cpulist_scnprintf(page, PAGE_SIZE, cs-cpus_allowed);
+   count = cpulist_scnprintf(page, PAGE_SIZE, pmask);
mutex_unlock(callback_mutex);
 
return count;
 }
 
-static size_t cpuset_sprintf_memlist(char *page, struct cpuset *cs)
+static size_t cpuset_sprintf_memlist(char *page, nodemask_t mask)
 {
size_t count;
 
mutex_lock(callback_mutex);
-   count = nodelist_scnprintf(page, PAGE_SIZE, cs-mems_allowed);
+   count = nodelist_scnprintf(page, PAGE_SIZE, mask);
mutex_unlock(callback_mutex);
 
return count;
@@ -1802,10 +1804,16 @@ static ssize_t cpuset_common_file_read(struct 
cgroup_subsys_state *css,
 
switch (type) {
case FILE_CPULIST:
-   s += cpuset_sprintf_cpulist(s, cs);
+   s += cpuset_sprintf_cpulist(s, cs-cpus_allowed);
break;
case FILE_MEMLIST:
-   s += cpuset_sprintf_memlist(s, cs);
+   s += cpuset_sprintf_memlist(s, cs-mems_allowed);
+   break;
+   case FILE_EFFECTIVE_CPULIST:
+   s += cpuset_sprintf_cpulist(s, cs-effective_cpus);
+   break;
+   case FILE_EFFECTIVE_MEMLIST:
+   s += cpuset_sprintf_memlist(s, cs-effective_mems);
break;
default:
retval = -EINVAL;
@@ -1880,6 +1888,13 @@ static struct cftype files[] = {
},
 
{
+   .name = effective_cpus,
+   .read = cpuset_common_file_read,
+   .max_write_len = (100U + 6 * NR_CPUS),
+   .private = FILE_EFFECTIVE_CPULIST,
+   },
+
+   {
.name = mems,
.read = cpuset_common_file_read,
.write_string = cpuset_write_resmask,
@@ -1888,6 +1903,13 @@ static struct cftype files[] = {
},
 
{
+   .name = effective_mems,
+   .read = cpuset_common_file_read,
+   .max_write_len = (100U + 6 * MAX_NUMNODES),
+   .private = FILE_EFFECTIVE_MEMLIST,
+   },
+
+   {
.name = cpu_exclusive,
.read_u64 = cpuset_read_u64,
.write_u64 = cpuset_write_u64,
-- 
1.8.0.2

--
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/