Let's move all CFS_CPULIMIT related functions under CFS_CPULIMIT ifdef.
This will ease further patching.

Signed-off-by: Vladimir Davydov <[email protected]>
---
 kernel/sched/fair.c | 39 ++++++++++++++++++---------------------
 1 file changed, 18 insertions(+), 21 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 52365f6a4e36..2ff38fc1d600 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -560,11 +560,6 @@ static inline int check_cpulimit_spread(struct task_group 
*tg, int target_cpu)
        return cfs_rq_active(tg->cfs_rq[target_cpu]) ? 0 : -1;
 }
 #else /* !CONFIG_CFS_CPULIMIT */
-static inline int cfs_rq_active(struct cfs_rq *cfs_rq)
-{
-       return 1;
-}
-
 static inline void inc_nr_active_cfs_rqs(struct cfs_rq *cfs_rq)
 {
 }
@@ -572,16 +567,6 @@ static inline void inc_nr_active_cfs_rqs(struct cfs_rq 
*cfs_rq)
 static inline void dec_nr_active_cfs_rqs(struct cfs_rq *cfs_rq, int postpone)
 {
 }
-
-static inline enum hrtimer_restart sched_cfs_active_timer(struct hrtimer 
*timer)
-{
-       return 0;
-}
-
-static inline int check_cpulimit_spread(struct task_group *tg, int target_cpu)
-{
-       return 1;
-}
 #endif /* CONFIG_CFS_CPULIMIT */
 
 static __always_inline
@@ -4716,6 +4701,7 @@ done:
 
 static inline bool select_runnable_cpu(struct task_struct *p, int *new_cpu)
 {
+#ifdef CONFIG_CFS_CPULIMIT
        struct task_group *tg;
        struct sched_domain *sd;
        int prev_cpu = task_cpu(p);
@@ -4741,6 +4727,7 @@ static inline bool select_runnable_cpu(struct task_struct 
*p, int *new_cpu)
                        }
                }
        }
+#endif
        return false;
 }
 
@@ -5461,14 +5448,10 @@ static inline bool migrate_degrades_locality(struct 
task_struct *p,
 }
 #endif
 
-/*
- * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
- */
-static
-int can_migrate_task(struct task_struct *p, struct lb_env *env)
+static inline int can_migrate_task_cpulimit(struct task_struct *p, struct 
lb_env *env)
 {
+#ifdef CONFIG_CFS_CPULIMIT
        struct task_group *tg = top_cfs_rq_of(&p->se)->tg;
-       int tsk_cache_hot = 0;
 
        if (check_cpulimit_spread(tg, env->dst_cpu) < 0) {
                int cpu;
@@ -5490,6 +5473,20 @@ int can_migrate_task(struct task_struct *p, struct 
lb_env *env)
                }
                return 0;
        }
+#endif
+       return 1;
+}
+
+/*
+ * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
+ */
+static
+int can_migrate_task(struct task_struct *p, struct lb_env *env)
+{
+       int tsk_cache_hot = 0;
+
+       if (!can_migrate_task_cpulimit(p, env))
+               return 0;
 
        /*
         * We do not migrate tasks that are:
-- 
2.1.4

_______________________________________________
Devel mailing list
[email protected]
https://lists.openvz.org/mailman/listinfo/devel

Reply via email to