Re: [PATCH 4/5] [PPC] update lmb for hotplug memory add/remove

2008-05-15 Thread Benjamin Herrenschmidt

On Thu, 2008-03-27 at 16:39 -0800, Badari Pulavarty wrote:
 ppc kernel maintains information about logical memory blocks in
 lmb.memory structure at the boot time. Its not updated for
 hotplug memory add/remove. hotplug memory notifier for memory
 add/remove now updates lmb.memory.
 
 This information is useful for eHEA driver to find out the memory 
 layout and holes.
 
 NOTE: No special locking is needed for lmb_add() and lmb_remove().
 Calls to these are serialized by caller. (pSeries_reconfig_chain).

My worry here is the lack of locking...

 Signed-off-by: Badari Pulavarty [EMAIL PROTECTED]
 ---
  arch/powerpc/platforms/pseries/hotplug-memory.c |   43 +++
  include/linux/lmb.h |3 -
  lib/lmb.c   |   66 
 
  3 files changed, 102 insertions(+), 10 deletions(-)
 
 Index: linux-2.6.25-rc3/lib/lmb.c
 ===
 --- linux-2.6.25-rc3.orig/lib/lmb.c   2008-03-05 10:44:29.0 -0800
 +++ linux-2.6.25-rc3/lib/lmb.c2008-03-05 10:44:56.0 -0800
 @@ -54,14 +54,13 @@ void lmb_dump_all(void)
  #endif /* DEBUG */
  }
  
 -static unsigned long __init lmb_addrs_overlap(u64 base1,
 - u64 size1, u64 base2, u64 size2)
 +static unsigned long lmb_addrs_overlap(u64 base1, u64 size1, u64 base2,
 + u64 size2)
  {
   return ((base1  (base2+size2))  (base2  (base1+size1)));
  }
  
 -static long __init lmb_addrs_adjacent(u64 base1, u64 size1,
 - u64 base2, u64 size2)
 +static long lmb_addrs_adjacent(u64 base1, u64 size1, u64 base2, u64 size2)
  {
   if (base2 == base1 + size1)
   return 1;
 @@ -71,7 +70,7 @@ static long __init lmb_addrs_adjacent(u6
   return 0;
  }
  
 -static long __init lmb_regions_adjacent(struct lmb_region *rgn,
 +static long lmb_regions_adjacent(struct lmb_region *rgn,
   unsigned long r1, unsigned long r2)
  {
   u64 base1 = rgn-region[r1].base;
 @@ -82,7 +81,7 @@ static long __init lmb_regions_adjacent(
   return lmb_addrs_adjacent(base1, size1, base2, size2);
  }
  
 -static void __init lmb_remove_region(struct lmb_region *rgn, unsigned long r)
 +static void lmb_remove_region(struct lmb_region *rgn, unsigned long r)
  {
   unsigned long i;
  
 @@ -94,7 +93,7 @@ static void __init lmb_remove_region(str
  }
  
  /* Assumption: base addr of region 1  base addr of region 2 */
 -static void __init lmb_coalesce_regions(struct lmb_region *rgn,
 +static void lmb_coalesce_regions(struct lmb_region *rgn,
   unsigned long r1, unsigned long r2)
  {
   rgn-region[r1].size += rgn-region[r2].size;
 @@ -129,7 +128,7 @@ void __init lmb_analyze(void)
  }
  
  /* This routine called with relocation disabled. */
 -static long __init lmb_add_region(struct lmb_region *rgn, u64 base, u64 size)
 +static long lmb_add_region(struct lmb_region *rgn, u64 base, u64 size)
  {
   unsigned long coalesced = 0;
   long adjacent, i;
 @@ -195,7 +194,7 @@ static long __init lmb_add_region(struct
  }
  
  /* This routine may be called with relocation disabled. */
 -long __init lmb_add(u64 base, u64 size)
 +long lmb_add(u64 base, u64 size)
  {
   struct lmb_region *_rgn = (lmb.memory);
  
 @@ -207,6 +206,55 @@ long __init lmb_add(u64 base, u64 size)
  
  }
  
 +long lmb_remove(u64 base, u64 size)
 +{
 + struct lmb_region *rgn = (lmb.memory);
 + u64 rgnbegin, rgnend;
 + u64 end = base + size;
 + int i;
 +
 + rgnbegin = rgnend = 0; /* supress gcc warnings */
 +
 + /* Find the region where (base, size) belongs to */
 + for (i=0; i  rgn-cnt; i++) {
 + rgnbegin = rgn-region[i].base;
 + rgnend = rgnbegin + rgn-region[i].size;
 +
 + if ((rgnbegin = base)  (end = rgnend))
 + break;
 + }
 +
 + /* Didn't find the region */
 + if (i == rgn-cnt)
 + return -1;
 +
 + /* Check to see if we are removing entire region */
 + if ((rgnbegin == base)  (rgnend == end)) {
 + lmb_remove_region(rgn, i);
 + return 0;
 + }
 +
 + /* Check to see if region is matching at the front */
 + if (rgnbegin == base) {
 + rgn-region[i].base = end;
 + rgn-region[i].size -= size;
 + return 0;
 + }
 +
 + /* Check to see if the region is matching at the end */
 + if (rgnend == end) {
 + rgn-region[i].size -= size;
 + return 0;
 + }
 +
 + /*
 +  * We need to split the entry -  adjust the current one to the
 +  * beginging of the hole and add the region after hole.
 +  */
 + rgn-region[i].size = base - rgn-region[i].base;
 + return lmb_add_region(rgn, end, rgnend - end);
 +}
 +
  long __init lmb_reserve(u64 base, u64 size)
  {
   struct lmb_region *_rgn = (lmb.reserved);
 Index: linux-2.6.25-rc3/arch/powerpc/platforms/pseries/hotplug-memory.c
 

Re: [PATCH 4/5] [PPC] update lmb for hotplug memory add/remove

2008-03-28 Thread Badari Pulavarty

Kumar Gala wrote:


On Mar 27, 2008, at 7:39 PM, Badari Pulavarty wrote:

ppc kernel maintains information about logical memory blocks in
lmb.memory structure at the boot time. Its not updated for
hotplug memory add/remove. hotplug memory notifier for memory
add/remove now updates lmb.memory.

This information is useful for eHEA driver to find out the memory
layout and holes.

NOTE: No special locking is needed for lmb_add() and lmb_remove().
Calls to these are serialized by caller. (pSeries_reconfig_chain).

Signed-off-by: Badari Pulavarty [EMAIL PROTECTED]
---
arch/powerpc/platforms/pseries/hotplug-memory.c |   43 +++
include/linux/lmb.h |3 -
lib/lmb.c   |   66 


3 files changed, 102 insertions(+), 10 deletions(-)


How is lmb_remove different than lmb_alloc?

- k

lmb_remove() can be used to punch a hole in to the existing memory block.
lmb_alloc() tries to allocate for a given alignment, I don't think it can
adjust the current entries. Isn't it ?

Thanks,
Badari

___
Linuxppc-dev mailing list
Linuxppc-dev@ozlabs.org
https://ozlabs.org/mailman/listinfo/linuxppc-dev


[PATCH 4/5] [PPC] update lmb for hotplug memory add/remove

2008-03-27 Thread Badari Pulavarty
ppc kernel maintains information about logical memory blocks in
lmb.memory structure at the boot time. Its not updated for
hotplug memory add/remove. hotplug memory notifier for memory
add/remove now updates lmb.memory.

This information is useful for eHEA driver to find out the memory 
layout and holes.

NOTE: No special locking is needed for lmb_add() and lmb_remove().
Calls to these are serialized by caller. (pSeries_reconfig_chain).

Signed-off-by: Badari Pulavarty [EMAIL PROTECTED]
---
 arch/powerpc/platforms/pseries/hotplug-memory.c |   43 +++
 include/linux/lmb.h |3 -
 lib/lmb.c   |   66 
 3 files changed, 102 insertions(+), 10 deletions(-)

Index: linux-2.6.25-rc3/lib/lmb.c
===
--- linux-2.6.25-rc3.orig/lib/lmb.c 2008-03-05 10:44:29.0 -0800
+++ linux-2.6.25-rc3/lib/lmb.c  2008-03-05 10:44:56.0 -0800
@@ -54,14 +54,13 @@ void lmb_dump_all(void)
 #endif /* DEBUG */
 }
 
-static unsigned long __init lmb_addrs_overlap(u64 base1,
-   u64 size1, u64 base2, u64 size2)
+static unsigned long lmb_addrs_overlap(u64 base1, u64 size1, u64 base2,
+   u64 size2)
 {
return ((base1  (base2+size2))  (base2  (base1+size1)));
 }
 
-static long __init lmb_addrs_adjacent(u64 base1, u64 size1,
-   u64 base2, u64 size2)
+static long lmb_addrs_adjacent(u64 base1, u64 size1, u64 base2, u64 size2)
 {
if (base2 == base1 + size1)
return 1;
@@ -71,7 +70,7 @@ static long __init lmb_addrs_adjacent(u6
return 0;
 }
 
-static long __init lmb_regions_adjacent(struct lmb_region *rgn,
+static long lmb_regions_adjacent(struct lmb_region *rgn,
unsigned long r1, unsigned long r2)
 {
u64 base1 = rgn-region[r1].base;
@@ -82,7 +81,7 @@ static long __init lmb_regions_adjacent(
return lmb_addrs_adjacent(base1, size1, base2, size2);
 }
 
-static void __init lmb_remove_region(struct lmb_region *rgn, unsigned long r)
+static void lmb_remove_region(struct lmb_region *rgn, unsigned long r)
 {
unsigned long i;
 
@@ -94,7 +93,7 @@ static void __init lmb_remove_region(str
 }
 
 /* Assumption: base addr of region 1  base addr of region 2 */
-static void __init lmb_coalesce_regions(struct lmb_region *rgn,
+static void lmb_coalesce_regions(struct lmb_region *rgn,
unsigned long r1, unsigned long r2)
 {
rgn-region[r1].size += rgn-region[r2].size;
@@ -129,7 +128,7 @@ void __init lmb_analyze(void)
 }
 
 /* This routine called with relocation disabled. */
-static long __init lmb_add_region(struct lmb_region *rgn, u64 base, u64 size)
+static long lmb_add_region(struct lmb_region *rgn, u64 base, u64 size)
 {
unsigned long coalesced = 0;
long adjacent, i;
@@ -195,7 +194,7 @@ static long __init lmb_add_region(struct
 }
 
 /* This routine may be called with relocation disabled. */
-long __init lmb_add(u64 base, u64 size)
+long lmb_add(u64 base, u64 size)
 {
struct lmb_region *_rgn = (lmb.memory);
 
@@ -207,6 +206,55 @@ long __init lmb_add(u64 base, u64 size)
 
 }
 
+long lmb_remove(u64 base, u64 size)
+{
+   struct lmb_region *rgn = (lmb.memory);
+   u64 rgnbegin, rgnend;
+   u64 end = base + size;
+   int i;
+
+   rgnbegin = rgnend = 0; /* supress gcc warnings */
+
+   /* Find the region where (base, size) belongs to */
+   for (i=0; i  rgn-cnt; i++) {
+   rgnbegin = rgn-region[i].base;
+   rgnend = rgnbegin + rgn-region[i].size;
+
+   if ((rgnbegin = base)  (end = rgnend))
+   break;
+   }
+
+   /* Didn't find the region */
+   if (i == rgn-cnt)
+   return -1;
+
+   /* Check to see if we are removing entire region */
+   if ((rgnbegin == base)  (rgnend == end)) {
+   lmb_remove_region(rgn, i);
+   return 0;
+   }
+
+   /* Check to see if region is matching at the front */
+   if (rgnbegin == base) {
+   rgn-region[i].base = end;
+   rgn-region[i].size -= size;
+   return 0;
+   }
+
+   /* Check to see if the region is matching at the end */
+   if (rgnend == end) {
+   rgn-region[i].size -= size;
+   return 0;
+   }
+
+   /*
+* We need to split the entry -  adjust the current one to the
+* beginging of the hole and add the region after hole.
+*/
+   rgn-region[i].size = base - rgn-region[i].base;
+   return lmb_add_region(rgn, end, rgnend - end);
+}
+
 long __init lmb_reserve(u64 base, u64 size)
 {
struct lmb_region *_rgn = (lmb.reserved);
Index: linux-2.6.25-rc3/arch/powerpc/platforms/pseries/hotplug-memory.c
===
--- linux-2.6.25-rc3.orig/arch/powerpc/platforms/pseries/hotplug-memory.c  

Re: [PATCH 4/5] [PPC] update lmb for hotplug memory add/remove

2008-03-27 Thread Kumar Gala


On Mar 27, 2008, at 7:39 PM, Badari Pulavarty wrote:

ppc kernel maintains information about logical memory blocks in
lmb.memory structure at the boot time. Its not updated for
hotplug memory add/remove. hotplug memory notifier for memory
add/remove now updates lmb.memory.

This information is useful for eHEA driver to find out the memory
layout and holes.

NOTE: No special locking is needed for lmb_add() and lmb_remove().
Calls to these are serialized by caller. (pSeries_reconfig_chain).

Signed-off-by: Badari Pulavarty [EMAIL PROTECTED]
---
arch/powerpc/platforms/pseries/hotplug-memory.c |   43 +++
include/linux/lmb.h |3 -
lib/lmb.c   |   66 ++ 
++

3 files changed, 102 insertions(+), 10 deletions(-)


How is lmb_remove different than lmb_alloc?

- k
___
Linuxppc-dev mailing list
Linuxppc-dev@ozlabs.org
https://ozlabs.org/mailman/listinfo/linuxppc-dev


[PATCH 4/5] [PPC] update lmb for hotplug memory add/remove

2008-03-06 Thread Badari Pulavarty
ppc kernel maintains information about logical memory blocks in
lmb.memory structure at the boot time. Its not updated for
hotplug memory add/remove. hotplug memory notifier for memory
add/remove now updates lmb.memory.

This information is useful for eHEA driver to find out the memory 
layout and holes.

NOTE: Calls to lmb_add() and lmb_remove() needs to be serialized by 
the caller. In this case blocking_notifier_chain(pSeries_reconfig_chain)
does that.

Signed-off-by: Badari Pulavarty [EMAIL PROTECTED]
---
 arch/powerpc/platforms/pseries/hotplug-memory.c |   43 +++
 include/linux/lmb.h |3 -
 lib/lmb.c   |   66 
 3 files changed, 102 insertions(+), 10 deletions(-)

Index: linux-2.6.25-rc3/lib/lmb.c
===
--- linux-2.6.25-rc3.orig/lib/lmb.c 2008-03-05 10:44:29.0 -0800
+++ linux-2.6.25-rc3/lib/lmb.c  2008-03-05 10:44:56.0 -0800
@@ -54,14 +54,13 @@ void lmb_dump_all(void)
 #endif /* DEBUG */
 }
 
-static unsigned long __init lmb_addrs_overlap(u64 base1,
-   u64 size1, u64 base2, u64 size2)
+static unsigned long lmb_addrs_overlap(u64 base1, u64 size1, u64 base2,
+   u64 size2)
 {
return ((base1  (base2+size2))  (base2  (base1+size1)));
 }
 
-static long __init lmb_addrs_adjacent(u64 base1, u64 size1,
-   u64 base2, u64 size2)
+static long lmb_addrs_adjacent(u64 base1, u64 size1, u64 base2, u64 size2)
 {
if (base2 == base1 + size1)
return 1;
@@ -71,7 +70,7 @@ static long __init lmb_addrs_adjacent(u6
return 0;
 }
 
-static long __init lmb_regions_adjacent(struct lmb_region *rgn,
+static long lmb_regions_adjacent(struct lmb_region *rgn,
unsigned long r1, unsigned long r2)
 {
u64 base1 = rgn-region[r1].base;
@@ -82,7 +81,7 @@ static long __init lmb_regions_adjacent(
return lmb_addrs_adjacent(base1, size1, base2, size2);
 }
 
-static void __init lmb_remove_region(struct lmb_region *rgn, unsigned long r)
+static void lmb_remove_region(struct lmb_region *rgn, unsigned long r)
 {
unsigned long i;
 
@@ -94,7 +93,7 @@ static void __init lmb_remove_region(str
 }
 
 /* Assumption: base addr of region 1  base addr of region 2 */
-static void __init lmb_coalesce_regions(struct lmb_region *rgn,
+static void lmb_coalesce_regions(struct lmb_region *rgn,
unsigned long r1, unsigned long r2)
 {
rgn-region[r1].size += rgn-region[r2].size;
@@ -129,7 +128,7 @@ void __init lmb_analyze(void)
 }
 
 /* This routine called with relocation disabled. */
-static long __init lmb_add_region(struct lmb_region *rgn, u64 base, u64 size)
+static long lmb_add_region(struct lmb_region *rgn, u64 base, u64 size)
 {
unsigned long coalesced = 0;
long adjacent, i;
@@ -195,7 +194,7 @@ static long __init lmb_add_region(struct
 }
 
 /* This routine may be called with relocation disabled. */
-long __init lmb_add(u64 base, u64 size)
+long lmb_add(u64 base, u64 size)
 {
struct lmb_region *_rgn = (lmb.memory);
 
@@ -207,6 +206,55 @@ long __init lmb_add(u64 base, u64 size)
 
 }
 
+long lmb_remove(u64 base, u64 size)
+{
+   struct lmb_region *rgn = (lmb.memory);
+   u64 rgnbegin, rgnend;
+   u64 end = base + size;
+   int i;
+
+   rgnbegin = rgnend = 0; /* supress gcc warnings */
+
+   /* Find the region where (base, size) belongs to */
+   for (i=0; i  rgn-cnt; i++) {
+   rgnbegin = rgn-region[i].base;
+   rgnend = rgnbegin + rgn-region[i].size;
+
+   if ((rgnbegin = base)  (end = rgnend))
+   break;
+   }
+
+   /* Didn't find the region */
+   if (i == rgn-cnt)
+   return -1;
+
+   /* Check to see if we are removing entire region */
+   if ((rgnbegin == base)  (rgnend == end)) {
+   lmb_remove_region(rgn, i);
+   return 0;
+   }
+
+   /* Check to see if region is matching at the front */
+   if (rgnbegin == base) {
+   rgn-region[i].base = end;
+   rgn-region[i].size -= size;
+   return 0;
+   }
+
+   /* Check to see if the region is matching at the end */
+   if (rgnend == end) {
+   rgn-region[i].size -= size;
+   return 0;
+   }
+
+   /*
+* We need to split the entry -  adjust the current one to the
+* beginging of the hole and add the region after hole.
+*/
+   rgn-region[i].size = base - rgn-region[i].base;
+   return lmb_add_region(rgn, end, rgnend - end);
+}
+
 long __init lmb_reserve(u64 base, u64 size)
 {
struct lmb_region *_rgn = (lmb.reserved);
Index: linux-2.6.25-rc3/arch/powerpc/platforms/pseries/hotplug-memory.c
===
---