From: Ben Widawsky
Now that preferred_nodes is just a mask, and policies are mutually
exclusive, there is no reason to have a separate mask.
This patch is optional. It definitely helps clean up code in future
patches, but there is no functional difference to leaving it with the
previous name. I do believe it helps demonstrate the exclusivity of the
fields.
Link: https://lore.kernel.org/r/20200630212517.308045-7-ben.widaw...@intel.com
Signed-off-by: Ben Widawsky
Signed-off-by: Feng Tang
---
include/linux/mempolicy.h | 6 +--
mm/mempolicy.c| 114 ++
2 files changed, 56 insertions(+), 64 deletions(-)
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 23ee105..ec811c3 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -46,11 +46,7 @@ struct mempolicy {
atomic_t refcnt;
unsigned short mode;/* See MPOL_* above */
unsigned short flags; /* See set_mempolicy() MPOL_F_* above */
- union {
- nodemask_t preferred_nodes; /* preferred */
- nodemask_t nodes; /* interleave/bind */
- /* undefined for default */
- } v;
+ nodemask_t nodes; /* interleave/bind/many */
union {
nodemask_t cpuset_mems_allowed; /* relative to these nodes */
nodemask_t user_nodemask; /* nodemask passed by user */
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index fbfa3ce..eba207e 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -199,7 +199,7 @@ static int mpol_new_interleave(struct mempolicy *pol, const
nodemask_t *nodes)
{
if (nodes_empty(*nodes))
return -EINVAL;
- pol->v.nodes = *nodes;
+ pol->nodes = *nodes;
return 0;
}
@@ -211,7 +211,7 @@ static int mpol_new_preferred_many(struct mempolicy *pol,
else if (nodes_empty(*nodes))
return -EINVAL; /* no allowed nodes */
else
- pol->v.preferred_nodes = *nodes;
+ pol->nodes = *nodes;
return 0;
}
@@ -235,7 +235,7 @@ static int mpol_new_bind(struct mempolicy *pol, const
nodemask_t *nodes)
{
if (nodes_empty(*nodes))
return -EINVAL;
- pol->v.nodes = *nodes;
+ pol->nodes = *nodes;
return 0;
}
@@ -352,15 +352,15 @@ static void mpol_rebind_nodemask(struct mempolicy *pol,
const nodemask_t *nodes)
else if (pol->flags & MPOL_F_RELATIVE_NODES)
mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
else {
- nodes_remap(tmp, pol->v.nodes,pol->w.cpuset_mems_allowed,
- *nodes);
+ nodes_remap(tmp, pol->nodes, pol->w.cpuset_mems_allowed,
+ *nodes);
pol->w.cpuset_mems_allowed = *nodes;
}
if (nodes_empty(tmp))
tmp = *nodes;
- pol->v.nodes = tmp;
+ pol->nodes = tmp;
}
static void mpol_rebind_preferred_common(struct mempolicy *pol,
@@ -373,17 +373,17 @@ static void mpol_rebind_preferred_common(struct mempolicy
*pol,
int node = first_node(pol->w.user_nodemask);
if (node_isset(node, *nodes)) {
- pol->v.preferred_nodes = nodemask_of_node(node);
+ pol->nodes = nodemask_of_node(node);
pol->flags &= ~MPOL_F_LOCAL;
} else
pol->flags |= MPOL_F_LOCAL;
} else if (pol->flags & MPOL_F_RELATIVE_NODES) {
mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
- pol->v.preferred_nodes = tmp;
+ pol->nodes = tmp;
} else if (!(pol->flags & MPOL_F_LOCAL)) {
- nodes_remap(tmp, pol->v.preferred_nodes,
- pol->w.cpuset_mems_allowed, *preferred_nodes);
- pol->v.preferred_nodes = tmp;
+ nodes_remap(tmp, pol->nodes, pol->w.cpuset_mems_allowed,
+ *preferred_nodes);
+ pol->nodes = tmp;
pol->w.cpuset_mems_allowed = *nodes;
}
}
@@ -963,14 +963,14 @@ static void get_policy_nodemask(struct mempolicy *p,
nodemask_t *nodes)
switch (p->mode) {
case MPOL_BIND:
case MPOL_INTERLEAVE:
- *nodes = p->v.nodes;
+ *nodes = p->nodes;
break;
case MPOL_PREFERRED_MANY:
- *nodes = p->v.preferred_nodes;
+ *nodes = p->nodes;
break;
case MPOL_PREFERRED:
if (!(p->flags & MPOL_F_LOCAL))
- *nodes = p->v.preferred_nodes;
+ *nodes = p->nodes;
/* else return empty node mask for local allocation */
break;
default:
@@ -1056,7 +1056,7 @@ static long do_get_mempolicy(int *pol