[PATCH v8 38/46] x86, mm: use limit_pfn for end pfn

2012-11-16 Thread Yinghai Lu
instead of shifting end to get that.

Signed-off-by: Yinghai Lu 
---
 arch/x86/mm/init.c |   20 +++-
 1 files changed, 11 insertions(+), 9 deletions(-)

diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 4bf1c53..f410dc6 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -203,10 +203,12 @@ static int __meminit split_mem_range(struct map_range 
*mr, int nr_range,
 unsigned long start,
 unsigned long end)
 {
-   unsigned long start_pfn, end_pfn;
+   unsigned long start_pfn, end_pfn, limit_pfn;
unsigned long pfn;
int i;
 
+   limit_pfn = PFN_DOWN(end);
+
/* head if not big page alignment ? */
pfn = start_pfn = PFN_DOWN(start);
 #ifdef CONFIG_X86_32
@@ -223,8 +225,8 @@ static int __meminit split_mem_range(struct map_range *mr, 
int nr_range,
 #else /* CONFIG_X86_64 */
end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
 #endif
-   if (end_pfn > PFN_DOWN(end))
-   end_pfn = PFN_DOWN(end);
+   if (end_pfn > limit_pfn)
+   end_pfn = limit_pfn;
if (start_pfn < end_pfn) {
nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
pfn = end_pfn;
@@ -233,11 +235,11 @@ static int __meminit split_mem_range(struct map_range 
*mr, int nr_range,
/* big page (2M) range */
start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
 #ifdef CONFIG_X86_32
-   end_pfn = PFN_DOWN(round_down(end, PMD_SIZE));
+   end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE));
 #else /* CONFIG_X86_64 */
end_pfn = round_up(pfn, PFN_DOWN(PUD_SIZE));
-   if (end_pfn > PFN_DOWN(round_down(end, PMD_SIZE)))
-   end_pfn = PFN_DOWN(round_down(end, PMD_SIZE));
+   if (end_pfn > round_down(limit_pfn, PFN_DOWN(PMD_SIZE)))
+   end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE));
 #endif
 
if (start_pfn < end_pfn) {
@@ -249,7 +251,7 @@ static int __meminit split_mem_range(struct map_range *mr, 
int nr_range,
 #ifdef CONFIG_X86_64
/* big page (1G) range */
start_pfn = round_up(pfn, PFN_DOWN(PUD_SIZE));
-   end_pfn = PFN_DOWN(round_down(end, PUD_SIZE));
+   end_pfn = round_down(limit_pfn, PFN_DOWN(PUD_SIZE));
if (start_pfn < end_pfn) {
nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
page_size_mask &
@@ -259,7 +261,7 @@ static int __meminit split_mem_range(struct map_range *mr, 
int nr_range,
 
/* tail is not big page (1G) alignment */
start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
-   end_pfn = PFN_DOWN(round_down(end, PMD_SIZE));
+   end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE));
if (start_pfn < end_pfn) {
nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
page_size_mask & (1

[PATCH v8 38/46] x86, mm: use limit_pfn for end pfn

2012-11-16 Thread Yinghai Lu
instead of shifting end to get that.

Signed-off-by: Yinghai Lu ying...@kernel.org
---
 arch/x86/mm/init.c |   20 +++-
 1 files changed, 11 insertions(+), 9 deletions(-)

diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 4bf1c53..f410dc6 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -203,10 +203,12 @@ static int __meminit split_mem_range(struct map_range 
*mr, int nr_range,
 unsigned long start,
 unsigned long end)
 {
-   unsigned long start_pfn, end_pfn;
+   unsigned long start_pfn, end_pfn, limit_pfn;
unsigned long pfn;
int i;
 
+   limit_pfn = PFN_DOWN(end);
+
/* head if not big page alignment ? */
pfn = start_pfn = PFN_DOWN(start);
 #ifdef CONFIG_X86_32
@@ -223,8 +225,8 @@ static int __meminit split_mem_range(struct map_range *mr, 
int nr_range,
 #else /* CONFIG_X86_64 */
end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
 #endif
-   if (end_pfn  PFN_DOWN(end))
-   end_pfn = PFN_DOWN(end);
+   if (end_pfn  limit_pfn)
+   end_pfn = limit_pfn;
if (start_pfn  end_pfn) {
nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
pfn = end_pfn;
@@ -233,11 +235,11 @@ static int __meminit split_mem_range(struct map_range 
*mr, int nr_range,
/* big page (2M) range */
start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
 #ifdef CONFIG_X86_32
-   end_pfn = PFN_DOWN(round_down(end, PMD_SIZE));
+   end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE));
 #else /* CONFIG_X86_64 */
end_pfn = round_up(pfn, PFN_DOWN(PUD_SIZE));
-   if (end_pfn  PFN_DOWN(round_down(end, PMD_SIZE)))
-   end_pfn = PFN_DOWN(round_down(end, PMD_SIZE));
+   if (end_pfn  round_down(limit_pfn, PFN_DOWN(PMD_SIZE)))
+   end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE));
 #endif
 
if (start_pfn  end_pfn) {
@@ -249,7 +251,7 @@ static int __meminit split_mem_range(struct map_range *mr, 
int nr_range,
 #ifdef CONFIG_X86_64
/* big page (1G) range */
start_pfn = round_up(pfn, PFN_DOWN(PUD_SIZE));
-   end_pfn = PFN_DOWN(round_down(end, PUD_SIZE));
+   end_pfn = round_down(limit_pfn, PFN_DOWN(PUD_SIZE));
if (start_pfn  end_pfn) {
nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
page_size_mask 
@@ -259,7 +261,7 @@ static int __meminit split_mem_range(struct map_range *mr, 
int nr_range,
 
/* tail is not big page (1G) alignment */
start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
-   end_pfn = PFN_DOWN(round_down(end, PMD_SIZE));
+   end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE));
if (start_pfn  end_pfn) {
nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
page_size_mask  (1PG_LEVEL_2M));
@@ -269,7 +271,7 @@ static int __meminit split_mem_range(struct map_range *mr, 
int nr_range,
 
/* tail is not big page (2M) alignment */
start_pfn = pfn;
-   end_pfn = PFN_DOWN(end);
+   end_pfn = limit_pfn;
nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
 
/* try to merge same page size and continuous */
-- 
1.7.7

--
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/