Re: [PATCH v3 4/5][RESEND] page_alloc: Make movablecore_map has higher priority

2012-12-12 Thread Simon Jeons
On Wed, 2012-12-12 at 17:34 +0800, Tang Chen wrote:
> Hi Simon,
> 
> Thanks for reviewing. This logic is aimed at make movablecore_map
> coexist with kernelcore/movablecore.
> 
> Please see below. :)

Hi Chen,

Thanks for your detail explanation. The logic looks reasonable to me. Bu
t how you guarantee the below changlog in your patchset.
1) If the range is involved in a single node, then from ss to the end of
the node will be ZONE_MOVABLE.
2) If the range covers two or more nodes, then from ss to the end of the
node will be ZONE_MOVABLE, and all the other nodes will only have
ZONE_MOVABLE.

> 
> On 12/12/2012 09:33 AM, Simon Jeons wrote:
> >> @@ -4839,9 +4839,17 @@ static void __init 
> >> find_zone_movable_pfns_for_nodes(void)
> >>required_kernelcore = max(required_kernelcore, corepages);
> >>}
> >>
> >> -  /* If kernelcore was not specified, there is no ZONE_MOVABLE */
> >> -  if (!required_kernelcore)
> >> +  /*
> >> +   * If neither kernelcore/movablecore nor movablecore_map is specified,
> >> +   * there is no ZONE_MOVABLE. But if movablecore_map is specified, the
> >> +   * start pfn of ZONE_MOVABLE has been stored in zone_movable_limit[].
> >> +   */
> >> +  if (!required_kernelcore) {
> >> +  if (movablecore_map.nr_map)
> >> +  memcpy(zone_movable_pfn, zone_movable_limit,
> >> +  sizeof(zone_movable_pfn));
> 
> If users didn't specified kernelcore option, then zone_movable_pfn[]
> and zone_movable_limit[] are all the same. We skip the logic.
> 
> >>goto out;
> >> +  }
> >>
> >>/* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
> >>usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
> >> @@ -4871,10 +4879,24 @@ restart:
> >>for_each_mem_pfn_range(i, nid,_pfn,_pfn, NULL) {
> >>unsigned long size_pages;
> >>
> >> +  /*
> >> +   * Find more memory for kernelcore in
> >> +   * [zone_movable_pfn[nid], zone_movable_limit[nid]).
> >> +   */
> >>start_pfn = max(start_pfn, zone_movable_pfn[nid]);
> >>if (start_pfn>= end_pfn)
> >>continue;
> >>
> >
> > Hi Chen,
> >
> >> +  if (zone_movable_limit[nid]) {
> 
> If users didn't give any limitation of ZONE_MOVABLE on node i, we could
> skip the logic too.
> 
> >> +  end_pfn = min(end_pfn, zone_movable_limit[nid]);
> 
> In order to reuse the original kernelcore/movablecore logic, we keep
> end_pfn <= zone_movable_limit[nid]. We device [start_pfn, end_pfn) into
> two parts:
> [start_pfn, zone_movable_limit[nid])
> and
> [zone_movable_limit[nid], end_pfn).
> 
> We just remove the second part, and go on to the original logic.
> 
> >> +  /* No range left for kernelcore in this node */
> >> +  if (start_pfn>= end_pfn) {
> 
> Since we re-evaluated end_pfn, if we have crossed the limitation, we
> should stop.
> 
> >> +  zone_movable_pfn[nid] =
> >> +  zone_movable_limit[nid];
> 
> Here, we found the real limitation. That means, the lowest pfn of
> ZONE_MOVABLE is either zone_movable_limit[nid] or the value the original
> logic calculates out, which is below zone_movable_limit[nid].
> 
> >> +  break;
> 
> Then we break and go on to the next node.
> 
> >> +  }
> >> +  }
> >> +
> >
> > Could you explain this part of codes? hard to understand.
> >
> >>/* Account for what is only usable for kernelcore */
> >>if (start_pfn<  usable_startpfn) {
> >>unsigned long kernel_pages;
> >> @@ -4934,12 +4956,12 @@ restart:
> >>if (usable_nodes&&  required_kernelcore>  usable_nodes)
> >>goto restart;
> >>
> >> +out:
> >>/* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
> >>for (nid = 0; nid<  MAX_NUMNODES; nid++)
> >>zone_movable_pfn[nid] =
> >>roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
> >>
> >> -out:
> >>/* restore the node_state */
> >>node_states[N_HIGH_MEMORY] = saved_node_state;
> >>   }
> >
> >
> >
> 


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH v3 4/5][RESEND] page_alloc: Make movablecore_map has higher priority

2012-12-12 Thread Tang Chen

Hi Simon,

Thanks for reviewing. This logic is aimed at make movablecore_map
coexist with kernelcore/movablecore.

Please see below. :)

On 12/12/2012 09:33 AM, Simon Jeons wrote:

@@ -4839,9 +4839,17 @@ static void __init find_zone_movable_pfns_for_nodes(void)
required_kernelcore = max(required_kernelcore, corepages);
}

-   /* If kernelcore was not specified, there is no ZONE_MOVABLE */
-   if (!required_kernelcore)
+   /*
+* If neither kernelcore/movablecore nor movablecore_map is specified,
+* there is no ZONE_MOVABLE. But if movablecore_map is specified, the
+* start pfn of ZONE_MOVABLE has been stored in zone_movable_limit[].
+*/
+   if (!required_kernelcore) {
+   if (movablecore_map.nr_map)
+   memcpy(zone_movable_pfn, zone_movable_limit,
+   sizeof(zone_movable_pfn));


If users didn't specified kernelcore option, then zone_movable_pfn[]
and zone_movable_limit[] are all the same. We skip the logic.


goto out;
+   }

/* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
@@ -4871,10 +4879,24 @@ restart:
for_each_mem_pfn_range(i, nid,_pfn,_pfn, NULL) {
unsigned long size_pages;

+   /*
+* Find more memory for kernelcore in
+* [zone_movable_pfn[nid], zone_movable_limit[nid]).
+*/
start_pfn = max(start_pfn, zone_movable_pfn[nid]);
if (start_pfn>= end_pfn)
continue;



Hi Chen,


+   if (zone_movable_limit[nid]) {


If users didn't give any limitation of ZONE_MOVABLE on node i, we could
skip the logic too.


+   end_pfn = min(end_pfn, zone_movable_limit[nid]);


In order to reuse the original kernelcore/movablecore logic, we keep
end_pfn <= zone_movable_limit[nid]. We device [start_pfn, end_pfn) into
two parts:
[start_pfn, zone_movable_limit[nid])
and
[zone_movable_limit[nid], end_pfn).

We just remove the second part, and go on to the original logic.


+   /* No range left for kernelcore in this node */
+   if (start_pfn>= end_pfn) {


Since we re-evaluated end_pfn, if we have crossed the limitation, we
should stop.


+   zone_movable_pfn[nid] =
+   zone_movable_limit[nid];


Here, we found the real limitation. That means, the lowest pfn of
ZONE_MOVABLE is either zone_movable_limit[nid] or the value the original
logic calculates out, which is below zone_movable_limit[nid].


+   break;


Then we break and go on to the next node.


+   }
+   }
+


Could you explain this part of codes? hard to understand.


/* Account for what is only usable for kernelcore */
if (start_pfn<  usable_startpfn) {
unsigned long kernel_pages;
@@ -4934,12 +4956,12 @@ restart:
if (usable_nodes&&  required_kernelcore>  usable_nodes)
goto restart;

+out:
/* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
for (nid = 0; nid<  MAX_NUMNODES; nid++)
zone_movable_pfn[nid] =
roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);

-out:
/* restore the node_state */
node_states[N_HIGH_MEMORY] = saved_node_state;
  }






--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH v3 4/5][RESEND] page_alloc: Make movablecore_map has higher priority

2012-12-12 Thread Tang Chen

Hi Simon,

Thanks for reviewing. This logic is aimed at make movablecore_map
coexist with kernelcore/movablecore.

Please see below. :)

On 12/12/2012 09:33 AM, Simon Jeons wrote:

@@ -4839,9 +4839,17 @@ static void __init find_zone_movable_pfns_for_nodes(void)
required_kernelcore = max(required_kernelcore, corepages);
}

-   /* If kernelcore was not specified, there is no ZONE_MOVABLE */
-   if (!required_kernelcore)
+   /*
+* If neither kernelcore/movablecore nor movablecore_map is specified,
+* there is no ZONE_MOVABLE. But if movablecore_map is specified, the
+* start pfn of ZONE_MOVABLE has been stored in zone_movable_limit[].
+*/
+   if (!required_kernelcore) {
+   if (movablecore_map.nr_map)
+   memcpy(zone_movable_pfn, zone_movable_limit,
+   sizeof(zone_movable_pfn));


If users didn't specified kernelcore option, then zone_movable_pfn[]
and zone_movable_limit[] are all the same. We skip the logic.


goto out;
+   }

/* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
@@ -4871,10 +4879,24 @@ restart:
for_each_mem_pfn_range(i, nid,start_pfn,end_pfn, NULL) {
unsigned long size_pages;

+   /*
+* Find more memory for kernelcore in
+* [zone_movable_pfn[nid], zone_movable_limit[nid]).
+*/
start_pfn = max(start_pfn, zone_movable_pfn[nid]);
if (start_pfn= end_pfn)
continue;



Hi Chen,


+   if (zone_movable_limit[nid]) {


If users didn't give any limitation of ZONE_MOVABLE on node i, we could
skip the logic too.


+   end_pfn = min(end_pfn, zone_movable_limit[nid]);


In order to reuse the original kernelcore/movablecore logic, we keep
end_pfn = zone_movable_limit[nid]. We device [start_pfn, end_pfn) into
two parts:
[start_pfn, zone_movable_limit[nid])
and
[zone_movable_limit[nid], end_pfn).

We just remove the second part, and go on to the original logic.


+   /* No range left for kernelcore in this node */
+   if (start_pfn= end_pfn) {


Since we re-evaluated end_pfn, if we have crossed the limitation, we
should stop.


+   zone_movable_pfn[nid] =
+   zone_movable_limit[nid];


Here, we found the real limitation. That means, the lowest pfn of
ZONE_MOVABLE is either zone_movable_limit[nid] or the value the original
logic calculates out, which is below zone_movable_limit[nid].


+   break;


Then we break and go on to the next node.


+   }
+   }
+


Could you explain this part of codes? hard to understand.


/* Account for what is only usable for kernelcore */
if (start_pfn  usable_startpfn) {
unsigned long kernel_pages;
@@ -4934,12 +4956,12 @@ restart:
if (usable_nodes  required_kernelcore  usable_nodes)
goto restart;

+out:
/* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
for (nid = 0; nid  MAX_NUMNODES; nid++)
zone_movable_pfn[nid] =
roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);

-out:
/* restore the node_state */
node_states[N_HIGH_MEMORY] = saved_node_state;
  }






--
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH v3 4/5][RESEND] page_alloc: Make movablecore_map has higher priority

2012-12-12 Thread Simon Jeons
On Wed, 2012-12-12 at 17:34 +0800, Tang Chen wrote:
 Hi Simon,
 
 Thanks for reviewing. This logic is aimed at make movablecore_map
 coexist with kernelcore/movablecore.
 
 Please see below. :)

Hi Chen,

Thanks for your detail explanation. The logic looks reasonable to me. Bu
t how you guarantee the below changlog in your patchset.
1) If the range is involved in a single node, then from ss to the end of
the node will be ZONE_MOVABLE.
2) If the range covers two or more nodes, then from ss to the end of the
node will be ZONE_MOVABLE, and all the other nodes will only have
ZONE_MOVABLE.

 
 On 12/12/2012 09:33 AM, Simon Jeons wrote:
  @@ -4839,9 +4839,17 @@ static void __init 
  find_zone_movable_pfns_for_nodes(void)
 required_kernelcore = max(required_kernelcore, corepages);
 }
 
  -  /* If kernelcore was not specified, there is no ZONE_MOVABLE */
  -  if (!required_kernelcore)
  +  /*
  +   * If neither kernelcore/movablecore nor movablecore_map is specified,
  +   * there is no ZONE_MOVABLE. But if movablecore_map is specified, the
  +   * start pfn of ZONE_MOVABLE has been stored in zone_movable_limit[].
  +   */
  +  if (!required_kernelcore) {
  +  if (movablecore_map.nr_map)
  +  memcpy(zone_movable_pfn, zone_movable_limit,
  +  sizeof(zone_movable_pfn));
 
 If users didn't specified kernelcore option, then zone_movable_pfn[]
 and zone_movable_limit[] are all the same. We skip the logic.
 
 goto out;
  +  }
 
 /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
 usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
  @@ -4871,10 +4879,24 @@ restart:
 for_each_mem_pfn_range(i, nid,start_pfn,end_pfn, NULL) {
 unsigned long size_pages;
 
  +  /*
  +   * Find more memory for kernelcore in
  +   * [zone_movable_pfn[nid], zone_movable_limit[nid]).
  +   */
 start_pfn = max(start_pfn, zone_movable_pfn[nid]);
 if (start_pfn= end_pfn)
 continue;
 
 
  Hi Chen,
 
  +  if (zone_movable_limit[nid]) {
 
 If users didn't give any limitation of ZONE_MOVABLE on node i, we could
 skip the logic too.
 
  +  end_pfn = min(end_pfn, zone_movable_limit[nid]);
 
 In order to reuse the original kernelcore/movablecore logic, we keep
 end_pfn = zone_movable_limit[nid]. We device [start_pfn, end_pfn) into
 two parts:
 [start_pfn, zone_movable_limit[nid])
 and
 [zone_movable_limit[nid], end_pfn).
 
 We just remove the second part, and go on to the original logic.
 
  +  /* No range left for kernelcore in this node */
  +  if (start_pfn= end_pfn) {
 
 Since we re-evaluated end_pfn, if we have crossed the limitation, we
 should stop.
 
  +  zone_movable_pfn[nid] =
  +  zone_movable_limit[nid];
 
 Here, we found the real limitation. That means, the lowest pfn of
 ZONE_MOVABLE is either zone_movable_limit[nid] or the value the original
 logic calculates out, which is below zone_movable_limit[nid].
 
  +  break;
 
 Then we break and go on to the next node.
 
  +  }
  +  }
  +
 
  Could you explain this part of codes? hard to understand.
 
 /* Account for what is only usable for kernelcore */
 if (start_pfn  usable_startpfn) {
 unsigned long kernel_pages;
  @@ -4934,12 +4956,12 @@ restart:
 if (usable_nodes  required_kernelcore  usable_nodes)
 goto restart;
 
  +out:
 /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
 for (nid = 0; nid  MAX_NUMNODES; nid++)
 zone_movable_pfn[nid] =
 roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
 
  -out:
 /* restore the node_state */
 node_states[N_HIGH_MEMORY] = saved_node_state;
}
 
 
 
 


--
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH v3 4/5][RESEND] page_alloc: Make movablecore_map has higher priority

2012-12-11 Thread Simon Jeons
On Tue, 2012-12-11 at 12:56 +0800, Tang Chen wrote:
> If kernelcore or movablecore is specified at the same time
> with movablecore_map, movablecore_map will have higher
> priority to be satisfied.
> This patch will make find_zone_movable_pfns_for_nodes()
> calculate zone_movable_pfn[] with the limit from
> zone_movable_limit[].
> 
> change log:
> Move find_usable_zone_for_movable() to free_area_init_nodes()
> so that sanitize_zone_movable_limit() in patch 3 could use
> initialized movable_zone.
> 
> Reported-by: Wu Jianguo 
> 
> Signed-off-by: Tang Chen 
> Reviewed-by: Wen Congyang 
> Reviewed-by: Lai Jiangshan 
> Tested-by: Lin Feng 
> ---
>  mm/page_alloc.c |   28 +---
>  1 files changed, 25 insertions(+), 3 deletions(-)
> 
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index 52c368e..00fa67d 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -4839,9 +4839,17 @@ static void __init 
> find_zone_movable_pfns_for_nodes(void)
>   required_kernelcore = max(required_kernelcore, corepages);
>   }
>  
> - /* If kernelcore was not specified, there is no ZONE_MOVABLE */
> - if (!required_kernelcore)
> + /*
> +  * If neither kernelcore/movablecore nor movablecore_map is specified,
> +  * there is no ZONE_MOVABLE. But if movablecore_map is specified, the
> +  * start pfn of ZONE_MOVABLE has been stored in zone_movable_limit[].
> +  */
> + if (!required_kernelcore) {
> + if (movablecore_map.nr_map)
> + memcpy(zone_movable_pfn, zone_movable_limit,
> + sizeof(zone_movable_pfn));
>   goto out;
> + }
>  
>   /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
>   usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
> @@ -4871,10 +4879,24 @@ restart:
>   for_each_mem_pfn_range(i, nid, _pfn, _pfn, NULL) {
>   unsigned long size_pages;
>  
> + /*
> +  * Find more memory for kernelcore in
> +  * [zone_movable_pfn[nid], zone_movable_limit[nid]).
> +  */
>   start_pfn = max(start_pfn, zone_movable_pfn[nid]);
>   if (start_pfn >= end_pfn)
>   continue;
>  

Hi Chen,

> + if (zone_movable_limit[nid]) {
> + end_pfn = min(end_pfn, zone_movable_limit[nid]);
> + /* No range left for kernelcore in this node */
> + if (start_pfn >= end_pfn) {
> + zone_movable_pfn[nid] =
> + zone_movable_limit[nid];
> + break;
> + }
> + }
> +

Could you explain this part of codes? hard to understand.

>   /* Account for what is only usable for kernelcore */
>   if (start_pfn < usable_startpfn) {
>   unsigned long kernel_pages;
> @@ -4934,12 +4956,12 @@ restart:
>   if (usable_nodes && required_kernelcore > usable_nodes)
>   goto restart;
>  
> +out:
>   /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
>   for (nid = 0; nid < MAX_NUMNODES; nid++)
>   zone_movable_pfn[nid] =
>   roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
>  
> -out:
>   /* restore the node_state */
>   node_states[N_HIGH_MEMORY] = saved_node_state;
>  }


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH v3 4/5][RESEND] page_alloc: Make movablecore_map has higher priority

2012-12-11 Thread Simon Jeons
On Tue, 2012-12-11 at 12:56 +0800, Tang Chen wrote:
 If kernelcore or movablecore is specified at the same time
 with movablecore_map, movablecore_map will have higher
 priority to be satisfied.
 This patch will make find_zone_movable_pfns_for_nodes()
 calculate zone_movable_pfn[] with the limit from
 zone_movable_limit[].
 
 change log:
 Move find_usable_zone_for_movable() to free_area_init_nodes()
 so that sanitize_zone_movable_limit() in patch 3 could use
 initialized movable_zone.
 
 Reported-by: Wu Jianguo wujian...@huawei.com
 
 Signed-off-by: Tang Chen tangc...@cn.fujitsu.com
 Reviewed-by: Wen Congyang we...@cn.fujitsu.com
 Reviewed-by: Lai Jiangshan la...@cn.fujitsu.com
 Tested-by: Lin Feng linf...@cn.fujitsu.com
 ---
  mm/page_alloc.c |   28 +---
  1 files changed, 25 insertions(+), 3 deletions(-)
 
 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
 index 52c368e..00fa67d 100644
 --- a/mm/page_alloc.c
 +++ b/mm/page_alloc.c
 @@ -4839,9 +4839,17 @@ static void __init 
 find_zone_movable_pfns_for_nodes(void)
   required_kernelcore = max(required_kernelcore, corepages);
   }
  
 - /* If kernelcore was not specified, there is no ZONE_MOVABLE */
 - if (!required_kernelcore)
 + /*
 +  * If neither kernelcore/movablecore nor movablecore_map is specified,
 +  * there is no ZONE_MOVABLE. But if movablecore_map is specified, the
 +  * start pfn of ZONE_MOVABLE has been stored in zone_movable_limit[].
 +  */
 + if (!required_kernelcore) {
 + if (movablecore_map.nr_map)
 + memcpy(zone_movable_pfn, zone_movable_limit,
 + sizeof(zone_movable_pfn));
   goto out;
 + }
  
   /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
   usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
 @@ -4871,10 +4879,24 @@ restart:
   for_each_mem_pfn_range(i, nid, start_pfn, end_pfn, NULL) {
   unsigned long size_pages;
  
 + /*
 +  * Find more memory for kernelcore in
 +  * [zone_movable_pfn[nid], zone_movable_limit[nid]).
 +  */
   start_pfn = max(start_pfn, zone_movable_pfn[nid]);
   if (start_pfn = end_pfn)
   continue;
  

Hi Chen,

 + if (zone_movable_limit[nid]) {
 + end_pfn = min(end_pfn, zone_movable_limit[nid]);
 + /* No range left for kernelcore in this node */
 + if (start_pfn = end_pfn) {
 + zone_movable_pfn[nid] =
 + zone_movable_limit[nid];
 + break;
 + }
 + }
 +

Could you explain this part of codes? hard to understand.

   /* Account for what is only usable for kernelcore */
   if (start_pfn  usable_startpfn) {
   unsigned long kernel_pages;
 @@ -4934,12 +4956,12 @@ restart:
   if (usable_nodes  required_kernelcore  usable_nodes)
   goto restart;
  
 +out:
   /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
   for (nid = 0; nid  MAX_NUMNODES; nid++)
   zone_movable_pfn[nid] =
   roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
  
 -out:
   /* restore the node_state */
   node_states[N_HIGH_MEMORY] = saved_node_state;
  }


--
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[PATCH v3 4/5][RESEND] page_alloc: Make movablecore_map has higher priority

2012-12-10 Thread Tang Chen
If kernelcore or movablecore is specified at the same time
with movablecore_map, movablecore_map will have higher
priority to be satisfied.
This patch will make find_zone_movable_pfns_for_nodes()
calculate zone_movable_pfn[] with the limit from
zone_movable_limit[].

change log:
Move find_usable_zone_for_movable() to free_area_init_nodes()
so that sanitize_zone_movable_limit() in patch 3 could use
initialized movable_zone.

Reported-by: Wu Jianguo 

Signed-off-by: Tang Chen 
Reviewed-by: Wen Congyang 
Reviewed-by: Lai Jiangshan 
Tested-by: Lin Feng 
---
 mm/page_alloc.c |   28 +---
 1 files changed, 25 insertions(+), 3 deletions(-)

diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 52c368e..00fa67d 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -4839,9 +4839,17 @@ static void __init find_zone_movable_pfns_for_nodes(void)
required_kernelcore = max(required_kernelcore, corepages);
}
 
-   /* If kernelcore was not specified, there is no ZONE_MOVABLE */
-   if (!required_kernelcore)
+   /*
+* If neither kernelcore/movablecore nor movablecore_map is specified,
+* there is no ZONE_MOVABLE. But if movablecore_map is specified, the
+* start pfn of ZONE_MOVABLE has been stored in zone_movable_limit[].
+*/
+   if (!required_kernelcore) {
+   if (movablecore_map.nr_map)
+   memcpy(zone_movable_pfn, zone_movable_limit,
+   sizeof(zone_movable_pfn));
goto out;
+   }
 
/* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
@@ -4871,10 +4879,24 @@ restart:
for_each_mem_pfn_range(i, nid, _pfn, _pfn, NULL) {
unsigned long size_pages;
 
+   /*
+* Find more memory for kernelcore in
+* [zone_movable_pfn[nid], zone_movable_limit[nid]).
+*/
start_pfn = max(start_pfn, zone_movable_pfn[nid]);
if (start_pfn >= end_pfn)
continue;
 
+   if (zone_movable_limit[nid]) {
+   end_pfn = min(end_pfn, zone_movable_limit[nid]);
+   /* No range left for kernelcore in this node */
+   if (start_pfn >= end_pfn) {
+   zone_movable_pfn[nid] =
+   zone_movable_limit[nid];
+   break;
+   }
+   }
+
/* Account for what is only usable for kernelcore */
if (start_pfn < usable_startpfn) {
unsigned long kernel_pages;
@@ -4934,12 +4956,12 @@ restart:
if (usable_nodes && required_kernelcore > usable_nodes)
goto restart;
 
+out:
/* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
for (nid = 0; nid < MAX_NUMNODES; nid++)
zone_movable_pfn[nid] =
roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
 
-out:
/* restore the node_state */
node_states[N_HIGH_MEMORY] = saved_node_state;
 }
-- 
1.7.1

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[PATCH v3 4/5][RESEND] page_alloc: Make movablecore_map has higher priority

2012-12-10 Thread Tang Chen
If kernelcore or movablecore is specified at the same time
with movablecore_map, movablecore_map will have higher
priority to be satisfied.
This patch will make find_zone_movable_pfns_for_nodes()
calculate zone_movable_pfn[] with the limit from
zone_movable_limit[].

change log:
Move find_usable_zone_for_movable() to free_area_init_nodes()
so that sanitize_zone_movable_limit() in patch 3 could use
initialized movable_zone.

Reported-by: Wu Jianguo wujian...@huawei.com

Signed-off-by: Tang Chen tangc...@cn.fujitsu.com
Reviewed-by: Wen Congyang we...@cn.fujitsu.com
Reviewed-by: Lai Jiangshan la...@cn.fujitsu.com
Tested-by: Lin Feng linf...@cn.fujitsu.com
---
 mm/page_alloc.c |   28 +---
 1 files changed, 25 insertions(+), 3 deletions(-)

diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 52c368e..00fa67d 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -4839,9 +4839,17 @@ static void __init find_zone_movable_pfns_for_nodes(void)
required_kernelcore = max(required_kernelcore, corepages);
}
 
-   /* If kernelcore was not specified, there is no ZONE_MOVABLE */
-   if (!required_kernelcore)
+   /*
+* If neither kernelcore/movablecore nor movablecore_map is specified,
+* there is no ZONE_MOVABLE. But if movablecore_map is specified, the
+* start pfn of ZONE_MOVABLE has been stored in zone_movable_limit[].
+*/
+   if (!required_kernelcore) {
+   if (movablecore_map.nr_map)
+   memcpy(zone_movable_pfn, zone_movable_limit,
+   sizeof(zone_movable_pfn));
goto out;
+   }
 
/* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
@@ -4871,10 +4879,24 @@ restart:
for_each_mem_pfn_range(i, nid, start_pfn, end_pfn, NULL) {
unsigned long size_pages;
 
+   /*
+* Find more memory for kernelcore in
+* [zone_movable_pfn[nid], zone_movable_limit[nid]).
+*/
start_pfn = max(start_pfn, zone_movable_pfn[nid]);
if (start_pfn = end_pfn)
continue;
 
+   if (zone_movable_limit[nid]) {
+   end_pfn = min(end_pfn, zone_movable_limit[nid]);
+   /* No range left for kernelcore in this node */
+   if (start_pfn = end_pfn) {
+   zone_movable_pfn[nid] =
+   zone_movable_limit[nid];
+   break;
+   }
+   }
+
/* Account for what is only usable for kernelcore */
if (start_pfn  usable_startpfn) {
unsigned long kernel_pages;
@@ -4934,12 +4956,12 @@ restart:
if (usable_nodes  required_kernelcore  usable_nodes)
goto restart;
 
+out:
/* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
for (nid = 0; nid  MAX_NUMNODES; nid++)
zone_movable_pfn[nid] =
roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
 
-out:
/* restore the node_state */
node_states[N_HIGH_MEMORY] = saved_node_state;
 }
-- 
1.7.1

--
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/