In several places there is support for multiple vcpus per sched item
missing. Add that missing support (with the exception of initial
allocation) and missing helpers for that.

Signed-off-by: Juergen Gross <jgr...@suse.com>
---
RFC V2: fix vcpu_runstate_helper()
---
 xen/common/schedule.c      | 26 ++++++++--------
 xen/include/xen/sched-if.h | 74 ++++++++++++++++++++++++++++++++++++----------
 2 files changed, 73 insertions(+), 27 deletions(-)

diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index 6ba6e70338..1134733314 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -180,8 +180,9 @@ static inline void vcpu_runstate_change(
     s_time_t delta;
     struct sched_item *item = v->sched_item;
 
-    ASSERT(v->runstate.state != new_state);
     ASSERT(spin_is_locked(per_cpu(sched_res, v->processor)->schedule_lock));
+    if ( v->runstate.state == new_state )
+        return;
 
     vcpu_urgent_count_update(v);
 
@@ -203,15 +204,16 @@ static inline void vcpu_runstate_change(
 static inline void sched_item_runstate_change(struct sched_item *item,
     bool running, s_time_t new_entry_time)
 {
-    struct vcpu *v = item->vcpu;
+    struct vcpu *v;
 
-    if ( running )
-        vcpu_runstate_change(v, v->new_state, new_entry_time);
-    else
-        vcpu_runstate_change(v,
-            ((v->pause_flags & VPF_blocked) ? RUNSTATE_blocked :
-             (vcpu_runnable(v) ? RUNSTATE_runnable : RUNSTATE_offline)),
-            new_entry_time);
+    for_each_sched_item_vcpu( item, v )
+        if ( running )
+            vcpu_runstate_change(v, v->new_state, new_entry_time);
+        else
+            vcpu_runstate_change(v,
+                ((v->pause_flags & VPF_blocked) ? RUNSTATE_blocked :
+                 (vcpu_runnable(v) ? RUNSTATE_runnable : RUNSTATE_offline)),
+                new_entry_time);
 }
 
 void vcpu_runstate_get(struct vcpu *v, struct vcpu_runstate_info *runstate)
@@ -1580,7 +1582,7 @@ static void sched_switch_items(struct sched_resource *sd,
              (next->vcpu->runstate.state == RUNSTATE_runnable) ?
              (now - next->state_entry_time) : 0, prev->next_time);
 
-    ASSERT(prev->vcpu->runstate.state == RUNSTATE_running);
+    ASSERT(item_running(prev));
 
     TRACE_4D(TRC_SCHED_SWITCH, prev->domain->domain_id, prev->item_id,
              next->domain->domain_id, next->item_id);
@@ -1588,7 +1590,7 @@ static void sched_switch_items(struct sched_resource *sd,
     sched_item_runstate_change(prev, false, now);
     prev->last_run_time = now;
 
-    ASSERT(next->vcpu->runstate.state != RUNSTATE_running);
+    ASSERT(!item_running(next));
     sched_item_runstate_change(next, true, now);
 
     /*
@@ -1703,7 +1705,7 @@ void sched_context_switched(struct vcpu *vprev, struct 
vcpu *vnext)
             while ( atomic_read(&next->rendezvous_out_cnt) )
                 cpu_relax();
     }
-    else if ( vprev != vnext )
+    else if ( vprev != vnext && sched_granularity == 1 )
         context_saved(vprev);
 }
 
diff --git a/xen/include/xen/sched-if.h b/xen/include/xen/sched-if.h
index 755b0f8f74..88fbc06860 100644
--- a/xen/include/xen/sched-if.h
+++ b/xen/include/xen/sched-if.h
@@ -55,29 +55,55 @@ static inline bool is_idle_item(const struct sched_item 
*item)
     return is_idle_vcpu(item->vcpu);
 }
 
+static inline unsigned int item_running(const struct sched_item *item)
+{
+    return item->runstate_cnt[RUNSTATE_running];
+}
+
 static inline bool item_runnable(const struct sched_item *item)
 {
-    return vcpu_runnable(item->vcpu);
+    struct vcpu *v;
+
+    for_each_sched_item_vcpu( item, v )
+        if ( vcpu_runnable(v) )
+            return true;
+
+    return false;
 }
 
 static inline bool item_runnable_state(const struct sched_item *item)
 {
     struct vcpu *v;
-    bool runnable;
+    bool runnable, ret = false;
+
+    for_each_sched_item_vcpu( item, v )
+    {
+        runnable = vcpu_runnable(v);
+
+        v->new_state = runnable ? RUNSTATE_running
+                                : (v->pause_flags & VPF_blocked)
+                                  ? RUNSTATE_blocked : RUNSTATE_offline;
 
-    v = item->vcpu;
-    runnable = vcpu_runnable(v);
+        if ( runnable )
+            ret = true;
+    }
 
-    v->new_state = runnable ? RUNSTATE_running
-                            : (v->pause_flags & VPF_blocked)
-                              ? RUNSTATE_blocked : RUNSTATE_offline;
-    return runnable;
+    return ret;
 }
 
 static inline void sched_set_res(struct sched_item *item,
                                  struct sched_resource *res)
 {
-    item->vcpu->processor = res->processor;
+    int cpu = cpumask_first(res->cpus);
+    struct vcpu *v;
+
+    for_each_sched_item_vcpu( item, v )
+    {
+        ASSERT(cpu < nr_cpu_ids);
+        v->processor = cpu;
+        cpu = cpumask_next(cpu, res->cpus);
+    }
+
     item->res = res;
 }
 
@@ -89,25 +115,37 @@ static inline unsigned int sched_item_cpu(struct 
sched_item *item)
 static inline void sched_set_pause_flags(struct sched_item *item,
                                          unsigned int bit)
 {
-    __set_bit(bit, &item->vcpu->pause_flags);
+    struct vcpu *v;
+
+    for_each_sched_item_vcpu( item, v )
+        __set_bit(bit, &v->pause_flags);
 }
 
 static inline void sched_clear_pause_flags(struct sched_item *item,
                                            unsigned int bit)
 {
-    __clear_bit(bit, &item->vcpu->pause_flags);
+    struct vcpu *v;
+
+    for_each_sched_item_vcpu( item, v )
+        __clear_bit(bit, &v->pause_flags);
 }
 
 static inline void sched_set_pause_flags_atomic(struct sched_item *item,
                                                 unsigned int bit)
 {
-    set_bit(bit, &item->vcpu->pause_flags);
+    struct vcpu *v;
+
+    for_each_sched_item_vcpu( item, v )
+        set_bit(bit, &v->pause_flags);
 }
 
 static inline void sched_clear_pause_flags_atomic(struct sched_item *item,
                                                   unsigned int bit)
 {
-    clear_bit(bit, &item->vcpu->pause_flags);
+    struct vcpu *v;
+
+    for_each_sched_item_vcpu( item, v )
+        clear_bit(bit, &v->pause_flags);
 }
 
 static inline struct sched_item *sched_idle_item(unsigned int cpu)
@@ -468,12 +506,18 @@ static inline int sched_adjust_cpupool(const struct 
scheduler *s,
 
 static inline void sched_item_pause_nosync(struct sched_item *item)
 {
-    vcpu_pause_nosync(item->vcpu);
+    struct vcpu *v;
+
+    for_each_sched_item_vcpu( item, v )
+        vcpu_pause_nosync(v);
 }
 
 static inline void sched_item_unpause(struct sched_item *item)
 {
-    vcpu_unpause(item->vcpu);
+    struct vcpu *v;
+
+    for_each_sched_item_vcpu( item, v )
+        vcpu_unpause(v);
 }
 
 #define REGISTER_SCHEDULER(x) static const struct scheduler *x##_entry \
-- 
2.16.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

Reply via email to