Re: [PATCH 1/7 v4] sched: factorize attach entity

2016-10-12 Thread Dietmar Eggemann
On 12/10/16 11:59, Vincent Guittot wrote:
> On 7 October 2016 at 01:11, Vincent Guittot  
> wrote:
>>
>> On 5 October 2016 at 11:38, Dietmar Eggemann  
>> wrote:
>>> On 09/26/2016 01:19 PM, Vincent Guittot wrote:

[...]

 -static void attach_task_cfs_rq(struct task_struct *p)
 +static void attach_entity_cfs_rq(struct sched_entity *se)
  {
 - struct sched_entity *se = >se;
   struct cfs_rq *cfs_rq = cfs_rq_of(se);
>>>
>>>
>>> Both callers of attach_entity_cfs_rq() already use cfs_rq_of(se). You
>>> could pass it into attach_entity_cfs_rq().
>>
>> Yes that would make sense
> 
> In fact there is a 3rd caller online_fair_sched_group which calls
> attach_entity_cfs_rq and doesn't already use  cfs_rq_of(se) so i
> wonder if it's worth doing the interface change.

OK, this change gets in w/ patch 6/7. Yeah, skip it, it's not so important.


Re: [PATCH 1/7 v4] sched: factorize attach entity

2016-10-12 Thread Dietmar Eggemann
On 12/10/16 11:59, Vincent Guittot wrote:
> On 7 October 2016 at 01:11, Vincent Guittot  
> wrote:
>>
>> On 5 October 2016 at 11:38, Dietmar Eggemann  
>> wrote:
>>> On 09/26/2016 01:19 PM, Vincent Guittot wrote:

[...]

 -static void attach_task_cfs_rq(struct task_struct *p)
 +static void attach_entity_cfs_rq(struct sched_entity *se)
  {
 - struct sched_entity *se = >se;
   struct cfs_rq *cfs_rq = cfs_rq_of(se);
>>>
>>>
>>> Both callers of attach_entity_cfs_rq() already use cfs_rq_of(se). You
>>> could pass it into attach_entity_cfs_rq().
>>
>> Yes that would make sense
> 
> In fact there is a 3rd caller online_fair_sched_group which calls
> attach_entity_cfs_rq and doesn't already use  cfs_rq_of(se) so i
> wonder if it's worth doing the interface change.

OK, this change gets in w/ patch 6/7. Yeah, skip it, it's not so important.


Re: [PATCH 1/7 v4] sched: factorize attach entity

2016-10-12 Thread Vincent Guittot
On 7 October 2016 at 01:11, Vincent Guittot  wrote:
>
> On 5 October 2016 at 11:38, Dietmar Eggemann  wrote:
> > On 09/26/2016 01:19 PM, Vincent Guittot wrote:
> >>
> >> Factorize post_init_entity_util_avg and part of attach_task_cfs_rq
> >> in one function attach_entity_cfs_rq
> >>
> >> Signed-off-by: Vincent Guittot 
> >> ---
> >>  kernel/sched/fair.c | 19 +++
> >>  1 file changed, 11 insertions(+), 8 deletions(-)
> >>
> >> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> >> index 986c10c..e8ed8d1 100644
> >> --- a/kernel/sched/fair.c
> >> +++ b/kernel/sched/fair.c
> >> @@ -697,9 +697,7 @@ void init_entity_runnable_average(struct sched_entity
> >> *se)
> >>  }
> >>
> >>  static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
> >> -static int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool
> >> update_freq);
> >> -static void update_tg_load_avg(struct cfs_rq *cfs_rq, int force);
> >> -static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct
> >> sched_entity *se);
> >> +static void attach_entity_cfs_rq(struct sched_entity *se);
> >>
> >>  /*
> >>   * With new tasks being created, their initial util_avgs are extrapolated
> >> @@ -764,9 +762,7 @@ void post_init_entity_util_avg(struct sched_entity
> >> *se)
> >>   }
> >>   }
> >
> >
> > You now could move the 'u64 now = cfs_rq_clock_task(cfs_rq);' into the
> > if condition to handle !fair_sched_class tasks.
>
> yes
>
> >
> >> - update_cfs_rq_load_avg(now, cfs_rq, false);
> >> - attach_entity_load_avg(cfs_rq, se);
> >> - update_tg_load_avg(cfs_rq, false);
> >> + attach_entity_cfs_rq(se);
> >>  }
> >>
> >>  #else /* !CONFIG_SMP */
> >> @@ -8501,9 +8497,8 @@ static void detach_task_cfs_rq(struct task_struct
> >> *p)
> >>   update_tg_load_avg(cfs_rq, false);
> >>  }
> >>
> >> -static void attach_task_cfs_rq(struct task_struct *p)
> >> +static void attach_entity_cfs_rq(struct sched_entity *se)
> >>  {
> >> - struct sched_entity *se = >se;
> >>   struct cfs_rq *cfs_rq = cfs_rq_of(se);
> >
> >
> > Both callers of attach_entity_cfs_rq() already use cfs_rq_of(se). You
> > could pass it into attach_entity_cfs_rq().
>
> Yes that would make sense

In fact there is a 3rd caller online_fair_sched_group which calls
attach_entity_cfs_rq and doesn't already use  cfs_rq_of(se) so i
wonder if it's worth doing the interface change.


>
>
> >
> >>   u64 now = cfs_rq_clock_task(cfs_rq);
> >> @@ -8519,6 +8514,14 @@ static void attach_task_cfs_rq(struct task_struct
> >> *p)
> >
> >
> > The old comment /* Synchronize task ... */ should be changed to /*
> > Synchronize entity ... */
>
> yes
>
> >
> >>   update_cfs_rq_load_avg(now, cfs_rq, false);
> >>   attach_entity_load_avg(cfs_rq, se);
> >>   update_tg_load_avg(cfs_rq, false);
> >> +}
> >> +
> >> +static void attach_task_cfs_rq(struct task_struct *p)
> >> +{
> >> + struct sched_entity *se = >se;
> >> + struct cfs_rq *cfs_rq = cfs_rq_of(se);
> >> +
> >> + attach_entity_cfs_rq(se);
> >>
> >>   if (!vruntime_normalized(p))
> >>   se->vruntime += cfs_rq->min_vruntime;
> >>
> >
> > IMPORTANT NOTICE: The contents of this email and any attachments are
> > confidential and may also be privileged. If you are not the intended
> > recipient, please notify the sender immediately and do not disclose the
> > contents to any other person, use it for any purpose, or store or copy the
> > information in any medium. Thank you.
> >


Re: [PATCH 1/7 v4] sched: factorize attach entity

2016-10-12 Thread Vincent Guittot
On 7 October 2016 at 01:11, Vincent Guittot  wrote:
>
> On 5 October 2016 at 11:38, Dietmar Eggemann  wrote:
> > On 09/26/2016 01:19 PM, Vincent Guittot wrote:
> >>
> >> Factorize post_init_entity_util_avg and part of attach_task_cfs_rq
> >> in one function attach_entity_cfs_rq
> >>
> >> Signed-off-by: Vincent Guittot 
> >> ---
> >>  kernel/sched/fair.c | 19 +++
> >>  1 file changed, 11 insertions(+), 8 deletions(-)
> >>
> >> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> >> index 986c10c..e8ed8d1 100644
> >> --- a/kernel/sched/fair.c
> >> +++ b/kernel/sched/fair.c
> >> @@ -697,9 +697,7 @@ void init_entity_runnable_average(struct sched_entity
> >> *se)
> >>  }
> >>
> >>  static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
> >> -static int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool
> >> update_freq);
> >> -static void update_tg_load_avg(struct cfs_rq *cfs_rq, int force);
> >> -static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct
> >> sched_entity *se);
> >> +static void attach_entity_cfs_rq(struct sched_entity *se);
> >>
> >>  /*
> >>   * With new tasks being created, their initial util_avgs are extrapolated
> >> @@ -764,9 +762,7 @@ void post_init_entity_util_avg(struct sched_entity
> >> *se)
> >>   }
> >>   }
> >
> >
> > You now could move the 'u64 now = cfs_rq_clock_task(cfs_rq);' into the
> > if condition to handle !fair_sched_class tasks.
>
> yes
>
> >
> >> - update_cfs_rq_load_avg(now, cfs_rq, false);
> >> - attach_entity_load_avg(cfs_rq, se);
> >> - update_tg_load_avg(cfs_rq, false);
> >> + attach_entity_cfs_rq(se);
> >>  }
> >>
> >>  #else /* !CONFIG_SMP */
> >> @@ -8501,9 +8497,8 @@ static void detach_task_cfs_rq(struct task_struct
> >> *p)
> >>   update_tg_load_avg(cfs_rq, false);
> >>  }
> >>
> >> -static void attach_task_cfs_rq(struct task_struct *p)
> >> +static void attach_entity_cfs_rq(struct sched_entity *se)
> >>  {
> >> - struct sched_entity *se = >se;
> >>   struct cfs_rq *cfs_rq = cfs_rq_of(se);
> >
> >
> > Both callers of attach_entity_cfs_rq() already use cfs_rq_of(se). You
> > could pass it into attach_entity_cfs_rq().
>
> Yes that would make sense

In fact there is a 3rd caller online_fair_sched_group which calls
attach_entity_cfs_rq and doesn't already use  cfs_rq_of(se) so i
wonder if it's worth doing the interface change.


>
>
> >
> >>   u64 now = cfs_rq_clock_task(cfs_rq);
> >> @@ -8519,6 +8514,14 @@ static void attach_task_cfs_rq(struct task_struct
> >> *p)
> >
> >
> > The old comment /* Synchronize task ... */ should be changed to /*
> > Synchronize entity ... */
>
> yes
>
> >
> >>   update_cfs_rq_load_avg(now, cfs_rq, false);
> >>   attach_entity_load_avg(cfs_rq, se);
> >>   update_tg_load_avg(cfs_rq, false);
> >> +}
> >> +
> >> +static void attach_task_cfs_rq(struct task_struct *p)
> >> +{
> >> + struct sched_entity *se = >se;
> >> + struct cfs_rq *cfs_rq = cfs_rq_of(se);
> >> +
> >> + attach_entity_cfs_rq(se);
> >>
> >>   if (!vruntime_normalized(p))
> >>   se->vruntime += cfs_rq->min_vruntime;
> >>
> >
> > IMPORTANT NOTICE: The contents of this email and any attachments are
> > confidential and may also be privileged. If you are not the intended
> > recipient, please notify the sender immediately and do not disclose the
> > contents to any other person, use it for any purpose, or store or copy the
> > information in any medium. Thank you.
> >


Re: [PATCH 1/7 v4] sched: factorize attach entity

2016-10-06 Thread Vincent Guittot
On 5 October 2016 at 11:38, Dietmar Eggemann  wrote:
> On 09/26/2016 01:19 PM, Vincent Guittot wrote:
>>
>> Factorize post_init_entity_util_avg and part of attach_task_cfs_rq
>> in one function attach_entity_cfs_rq
>>
>> Signed-off-by: Vincent Guittot 
>> ---
>>  kernel/sched/fair.c | 19 +++
>>  1 file changed, 11 insertions(+), 8 deletions(-)
>>
>> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
>> index 986c10c..e8ed8d1 100644
>> --- a/kernel/sched/fair.c
>> +++ b/kernel/sched/fair.c
>> @@ -697,9 +697,7 @@ void init_entity_runnable_average(struct sched_entity
>> *se)
>>  }
>>
>>  static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
>> -static int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool
>> update_freq);
>> -static void update_tg_load_avg(struct cfs_rq *cfs_rq, int force);
>> -static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct
>> sched_entity *se);
>> +static void attach_entity_cfs_rq(struct sched_entity *se);
>>
>>  /*
>>   * With new tasks being created, their initial util_avgs are extrapolated
>> @@ -764,9 +762,7 @@ void post_init_entity_util_avg(struct sched_entity
>> *se)
>>   }
>>   }
>
>
> You now could move the 'u64 now = cfs_rq_clock_task(cfs_rq);' into the
> if condition to handle !fair_sched_class tasks.

yes

>
>> - update_cfs_rq_load_avg(now, cfs_rq, false);
>> - attach_entity_load_avg(cfs_rq, se);
>> - update_tg_load_avg(cfs_rq, false);
>> + attach_entity_cfs_rq(se);
>>  }
>>
>>  #else /* !CONFIG_SMP */
>> @@ -8501,9 +8497,8 @@ static void detach_task_cfs_rq(struct task_struct
>> *p)
>>   update_tg_load_avg(cfs_rq, false);
>>  }
>>
>> -static void attach_task_cfs_rq(struct task_struct *p)
>> +static void attach_entity_cfs_rq(struct sched_entity *se)
>>  {
>> - struct sched_entity *se = >se;
>>   struct cfs_rq *cfs_rq = cfs_rq_of(se);
>
>
> Both callers of attach_entity_cfs_rq() already use cfs_rq_of(se). You
> could pass it into attach_entity_cfs_rq().

Yes that would make sense

>
>>   u64 now = cfs_rq_clock_task(cfs_rq);
>> @@ -8519,6 +8514,14 @@ static void attach_task_cfs_rq(struct task_struct
>> *p)
>
>
> The old comment /* Synchronize task ... */ should be changed to /*
> Synchronize entity ... */

yes

>
>>   update_cfs_rq_load_avg(now, cfs_rq, false);
>>   attach_entity_load_avg(cfs_rq, se);
>>   update_tg_load_avg(cfs_rq, false);
>> +}
>> +
>> +static void attach_task_cfs_rq(struct task_struct *p)
>> +{
>> + struct sched_entity *se = >se;
>> + struct cfs_rq *cfs_rq = cfs_rq_of(se);
>> +
>> + attach_entity_cfs_rq(se);
>>
>>   if (!vruntime_normalized(p))
>>   se->vruntime += cfs_rq->min_vruntime;
>>
>
> IMPORTANT NOTICE: The contents of this email and any attachments are
> confidential and may also be privileged. If you are not the intended
> recipient, please notify the sender immediately and do not disclose the
> contents to any other person, use it for any purpose, or store or copy the
> information in any medium. Thank you.
>


Re: [PATCH 1/7 v4] sched: factorize attach entity

2016-10-06 Thread Vincent Guittot
On 5 October 2016 at 11:38, Dietmar Eggemann  wrote:
> On 09/26/2016 01:19 PM, Vincent Guittot wrote:
>>
>> Factorize post_init_entity_util_avg and part of attach_task_cfs_rq
>> in one function attach_entity_cfs_rq
>>
>> Signed-off-by: Vincent Guittot 
>> ---
>>  kernel/sched/fair.c | 19 +++
>>  1 file changed, 11 insertions(+), 8 deletions(-)
>>
>> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
>> index 986c10c..e8ed8d1 100644
>> --- a/kernel/sched/fair.c
>> +++ b/kernel/sched/fair.c
>> @@ -697,9 +697,7 @@ void init_entity_runnable_average(struct sched_entity
>> *se)
>>  }
>>
>>  static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
>> -static int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool
>> update_freq);
>> -static void update_tg_load_avg(struct cfs_rq *cfs_rq, int force);
>> -static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct
>> sched_entity *se);
>> +static void attach_entity_cfs_rq(struct sched_entity *se);
>>
>>  /*
>>   * With new tasks being created, their initial util_avgs are extrapolated
>> @@ -764,9 +762,7 @@ void post_init_entity_util_avg(struct sched_entity
>> *se)
>>   }
>>   }
>
>
> You now could move the 'u64 now = cfs_rq_clock_task(cfs_rq);' into the
> if condition to handle !fair_sched_class tasks.

yes

>
>> - update_cfs_rq_load_avg(now, cfs_rq, false);
>> - attach_entity_load_avg(cfs_rq, se);
>> - update_tg_load_avg(cfs_rq, false);
>> + attach_entity_cfs_rq(se);
>>  }
>>
>>  #else /* !CONFIG_SMP */
>> @@ -8501,9 +8497,8 @@ static void detach_task_cfs_rq(struct task_struct
>> *p)
>>   update_tg_load_avg(cfs_rq, false);
>>  }
>>
>> -static void attach_task_cfs_rq(struct task_struct *p)
>> +static void attach_entity_cfs_rq(struct sched_entity *se)
>>  {
>> - struct sched_entity *se = >se;
>>   struct cfs_rq *cfs_rq = cfs_rq_of(se);
>
>
> Both callers of attach_entity_cfs_rq() already use cfs_rq_of(se). You
> could pass it into attach_entity_cfs_rq().

Yes that would make sense

>
>>   u64 now = cfs_rq_clock_task(cfs_rq);
>> @@ -8519,6 +8514,14 @@ static void attach_task_cfs_rq(struct task_struct
>> *p)
>
>
> The old comment /* Synchronize task ... */ should be changed to /*
> Synchronize entity ... */

yes

>
>>   update_cfs_rq_load_avg(now, cfs_rq, false);
>>   attach_entity_load_avg(cfs_rq, se);
>>   update_tg_load_avg(cfs_rq, false);
>> +}
>> +
>> +static void attach_task_cfs_rq(struct task_struct *p)
>> +{
>> + struct sched_entity *se = >se;
>> + struct cfs_rq *cfs_rq = cfs_rq_of(se);
>> +
>> + attach_entity_cfs_rq(se);
>>
>>   if (!vruntime_normalized(p))
>>   se->vruntime += cfs_rq->min_vruntime;
>>
>
> IMPORTANT NOTICE: The contents of this email and any attachments are
> confidential and may also be privileged. If you are not the intended
> recipient, please notify the sender immediately and do not disclose the
> contents to any other person, use it for any purpose, or store or copy the
> information in any medium. Thank you.
>


Re: [PATCH 1/7 v4] sched: factorize attach entity

2016-10-05 Thread Dietmar Eggemann

On 09/26/2016 01:19 PM, Vincent Guittot wrote:

Factorize post_init_entity_util_avg and part of attach_task_cfs_rq
in one function attach_entity_cfs_rq

Signed-off-by: Vincent Guittot 
---
 kernel/sched/fair.c | 19 +++
 1 file changed, 11 insertions(+), 8 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 986c10c..e8ed8d1 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -697,9 +697,7 @@ void init_entity_runnable_average(struct sched_entity *se)
 }

 static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
-static int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool 
update_freq);
-static void update_tg_load_avg(struct cfs_rq *cfs_rq, int force);
-static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity 
*se);
+static void attach_entity_cfs_rq(struct sched_entity *se);

 /*
  * With new tasks being created, their initial util_avgs are extrapolated
@@ -764,9 +762,7 @@ void post_init_entity_util_avg(struct sched_entity *se)
  }
  }


You now could move the 'u64 now = cfs_rq_clock_task(cfs_rq);' into the
if condition to handle !fair_sched_class tasks.


- update_cfs_rq_load_avg(now, cfs_rq, false);
- attach_entity_load_avg(cfs_rq, se);
- update_tg_load_avg(cfs_rq, false);
+ attach_entity_cfs_rq(se);
 }

 #else /* !CONFIG_SMP */
@@ -8501,9 +8497,8 @@ static void detach_task_cfs_rq(struct task_struct *p)
  update_tg_load_avg(cfs_rq, false);
 }

-static void attach_task_cfs_rq(struct task_struct *p)
+static void attach_entity_cfs_rq(struct sched_entity *se)
 {
- struct sched_entity *se = >se;
  struct cfs_rq *cfs_rq = cfs_rq_of(se);


Both callers of attach_entity_cfs_rq() already use cfs_rq_of(se). You
could pass it into attach_entity_cfs_rq().


  u64 now = cfs_rq_clock_task(cfs_rq);
@@ -8519,6 +8514,14 @@ static void attach_task_cfs_rq(struct task_struct *p)


The old comment /* Synchronize task ... */ should be changed to /*
Synchronize entity ... */


  update_cfs_rq_load_avg(now, cfs_rq, false);
  attach_entity_load_avg(cfs_rq, se);
  update_tg_load_avg(cfs_rq, false);
+}
+
+static void attach_task_cfs_rq(struct task_struct *p)
+{
+ struct sched_entity *se = >se;
+ struct cfs_rq *cfs_rq = cfs_rq_of(se);
+
+ attach_entity_cfs_rq(se);

  if (!vruntime_normalized(p))
  se->vruntime += cfs_rq->min_vruntime;



IMPORTANT NOTICE: The contents of this email and any attachments are 
confidential and may also be privileged. If you are not the intended recipient, 
please notify the sender immediately and do not disclose the contents to any 
other person, use it for any purpose, or store or copy the information in any 
medium. Thank you.



Re: [PATCH 1/7 v4] sched: factorize attach entity

2016-10-05 Thread Dietmar Eggemann

On 09/26/2016 01:19 PM, Vincent Guittot wrote:

Factorize post_init_entity_util_avg and part of attach_task_cfs_rq
in one function attach_entity_cfs_rq

Signed-off-by: Vincent Guittot 
---
 kernel/sched/fair.c | 19 +++
 1 file changed, 11 insertions(+), 8 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 986c10c..e8ed8d1 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -697,9 +697,7 @@ void init_entity_runnable_average(struct sched_entity *se)
 }

 static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
-static int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool 
update_freq);
-static void update_tg_load_avg(struct cfs_rq *cfs_rq, int force);
-static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity 
*se);
+static void attach_entity_cfs_rq(struct sched_entity *se);

 /*
  * With new tasks being created, their initial util_avgs are extrapolated
@@ -764,9 +762,7 @@ void post_init_entity_util_avg(struct sched_entity *se)
  }
  }


You now could move the 'u64 now = cfs_rq_clock_task(cfs_rq);' into the
if condition to handle !fair_sched_class tasks.


- update_cfs_rq_load_avg(now, cfs_rq, false);
- attach_entity_load_avg(cfs_rq, se);
- update_tg_load_avg(cfs_rq, false);
+ attach_entity_cfs_rq(se);
 }

 #else /* !CONFIG_SMP */
@@ -8501,9 +8497,8 @@ static void detach_task_cfs_rq(struct task_struct *p)
  update_tg_load_avg(cfs_rq, false);
 }

-static void attach_task_cfs_rq(struct task_struct *p)
+static void attach_entity_cfs_rq(struct sched_entity *se)
 {
- struct sched_entity *se = >se;
  struct cfs_rq *cfs_rq = cfs_rq_of(se);


Both callers of attach_entity_cfs_rq() already use cfs_rq_of(se). You
could pass it into attach_entity_cfs_rq().


  u64 now = cfs_rq_clock_task(cfs_rq);
@@ -8519,6 +8514,14 @@ static void attach_task_cfs_rq(struct task_struct *p)


The old comment /* Synchronize task ... */ should be changed to /*
Synchronize entity ... */


  update_cfs_rq_load_avg(now, cfs_rq, false);
  attach_entity_load_avg(cfs_rq, se);
  update_tg_load_avg(cfs_rq, false);
+}
+
+static void attach_task_cfs_rq(struct task_struct *p)
+{
+ struct sched_entity *se = >se;
+ struct cfs_rq *cfs_rq = cfs_rq_of(se);
+
+ attach_entity_cfs_rq(se);

  if (!vruntime_normalized(p))
  se->vruntime += cfs_rq->min_vruntime;



IMPORTANT NOTICE: The contents of this email and any attachments are 
confidential and may also be privileged. If you are not the intended recipient, 
please notify the sender immediately and do not disclose the contents to any 
other person, use it for any purpose, or store or copy the information in any 
medium. Thank you.