vcpu_deassign() has only one caller: _vcpu_remove().
Let's consolidate the two functions into one.
No functional change intended.
Signed-off-by: Dario Faggioli
---
Cc: George Dunlap
---
xen/common/sched_null.c | 76 ++-
1 file changed, 35 insertions(+), 41 deletions(-)
diff --git a/xen/common/sched_null.c b/xen/common/sched_null.c
index 784db71027..f372172c32 100644
--- a/xen/common/sched_null.c
+++ b/xen/common/sched_null.c
@@ -359,9 +359,14 @@ static void vcpu_assign(struct null_private *prv, struct
vcpu *v,
}
}
-static void vcpu_deassign(struct null_private *prv, struct vcpu *v,
- unsigned int cpu)
+static void vcpu_deassign(struct null_private *prv, struct vcpu *v)
{
+unsigned int bs;
+unsigned int cpu = v->processor;
+struct null_vcpu *wvc;
+
+ASSERT(list_empty(_vcpu(v)->waitq_elem));
+
per_cpu(npc, cpu).vcpu = NULL;
cpumask_set_cpu(cpu, >cpus_free);
@@ -378,6 +383,32 @@ static void vcpu_deassign(struct null_private *prv, struct
vcpu *v,
d.cpu = cpu;
__trace_var(TRC_SNULL_VCPU_DEASSIGN, 1, sizeof(d), );
}
+
+spin_lock(>waitq_lock);
+
+/*
+ * If v is assigned to a pCPU, let's see if there is someone waiting,
+ * suitable to be assigned to it (prioritizing vcpus that have
+ * soft-affinity with cpu).
+ */
+for_each_affinity_balance_step( bs )
+{
+list_for_each_entry( wvc, >waitq, waitq_elem )
+{
+if ( bs == BALANCE_SOFT_AFFINITY && !has_soft_affinity(wvc->vcpu) )
+continue;
+
+if ( vcpu_check_affinity(wvc->vcpu, cpu, bs) )
+{
+list_del_init(>waitq_elem);
+vcpu_assign(prv, wvc->vcpu, cpu);
+cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ);
+spin_unlock(>waitq_lock);
+return;
+}
+}
+}
+spin_unlock(>waitq_lock);
}
/* Change the scheduler of cpu to us (null). */
@@ -469,43 +500,6 @@ static void null_vcpu_insert(const struct scheduler *ops,
struct vcpu *v)
SCHED_STAT_CRANK(vcpu_insert);
}
-static void _vcpu_remove(struct null_private *prv, struct vcpu *v)
-{
-unsigned int bs;
-unsigned int cpu = v->processor;
-struct null_vcpu *wvc;
-
-ASSERT(list_empty(_vcpu(v)->waitq_elem));
-
-vcpu_deassign(prv, v, cpu);
-
-spin_lock(>waitq_lock);
-
-/*
- * If v is assigned to a pCPU, let's see if there is someone waiting,
- * suitable to be assigned to it (prioritizing vcpus that have
- * soft-affinity with cpu).
- */
-for_each_affinity_balance_step( bs )
-{
-list_for_each_entry( wvc, >waitq, waitq_elem )
-{
-if ( bs == BALANCE_SOFT_AFFINITY && !has_soft_affinity(wvc->vcpu) )
-continue;
-
-if ( vcpu_check_affinity(wvc->vcpu, cpu, bs) )
-{
-list_del_init(>waitq_elem);
-vcpu_assign(prv, wvc->vcpu, cpu);
-cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ);
-spin_unlock(>waitq_lock);
-return;
-}
-}
-}
-spin_unlock(>waitq_lock);
-}
-
static void null_vcpu_remove(const struct scheduler *ops, struct vcpu *v)
{
struct null_private *prv = null_priv(ops);
@@ -529,7 +523,7 @@ static void null_vcpu_remove(const struct scheduler *ops,
struct vcpu *v)
ASSERT(per_cpu(npc, v->processor).vcpu == v);
ASSERT(!cpumask_test_cpu(v->processor, >cpus_free));
-_vcpu_remove(prv, v);
+vcpu_deassign(prv, v);
out:
vcpu_schedule_unlock_irq(lock, v);
@@ -615,7 +609,7 @@ static void null_vcpu_migrate(const struct scheduler *ops,
struct vcpu *v,
*/
if ( likely(list_empty(>waitq_elem)) )
{
-_vcpu_remove(prv, v);
+vcpu_deassign(prv, v);
SCHED_STAT_CRANK(migrate_running);
}
else
___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel