[PATCH 08/33] mm: emergency pool

2007-10-30 Thread Peter Zijlstra
Provide means to reserve a specific amount of pages.

The emergency pool is separated from the min watermark because ALLOC_HARDER
and ALLOC_HIGH modify the watermark in a relative way and thus do not ensure
a strict minimum.

Signed-off-by: Peter Zijlstra <[EMAIL PROTECTED]>
---
 include/linux/mmzone.h |3 +
 mm/page_alloc.c|   82 +++--
 mm/vmstat.c|6 +--
 3 files changed, 78 insertions(+), 13 deletions(-)

Index: linux-2.6/include/linux/mmzone.h
===
--- linux-2.6.orig/include/linux/mmzone.h
+++ linux-2.6/include/linux/mmzone.h
@@ -213,7 +213,7 @@ enum zone_type {
 
 struct zone {
/* Fields commonly accessed by the page allocator */
-   unsigned long   pages_min, pages_low, pages_high;
+   unsigned long   pages_emerg, pages_min, pages_low, pages_high;
/*
 * We don't know if the memory that we're going to allocate will be 
freeable
 * or/and it will be released eventually, so to avoid totally wasting 
several
@@ -682,6 +682,7 @@ int sysctl_min_unmapped_ratio_sysctl_han
struct file *, void __user *, size_t *, loff_t *);
 int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int,
struct file *, void __user *, size_t *, loff_t *);
+int adjust_memalloc_reserve(int pages);
 
 extern int numa_zonelist_order_handler(struct ctl_table *, int,
struct file *, void __user *, size_t *, loff_t *);
Index: linux-2.6/mm/page_alloc.c
===
--- linux-2.6.orig/mm/page_alloc.c
+++ linux-2.6/mm/page_alloc.c
@@ -118,6 +118,8 @@ static char * const zone_names[MAX_NR_ZO
 
 static DEFINE_SPINLOCK(min_free_lock);
 int min_free_kbytes = 1024;
+static DEFINE_MUTEX(var_free_mutex);
+int var_free_kbytes;
 
 unsigned long __meminitdata nr_kernel_pages;
 unsigned long __meminitdata nr_all_pages;
@@ -1252,7 +1254,7 @@ int zone_watermark_ok(struct zone *z, in
if (alloc_flags & ALLOC_HARDER)
min -= min / 4;
 
-   if (free_pages <= min + z->lowmem_reserve[classzone_idx])
+   if (free_pages <= min + z->lowmem_reserve[classzone_idx] + 
z->pages_emerg)
return 0;
for (o = 0; o < order; o++) {
/* At the next order, this order's pages become unavailable */
@@ -1733,8 +1735,8 @@ nofail_alloc:
 nopage:
if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) {
printk(KERN_WARNING "%s: page allocation failure."
-   " order:%d, mode:0x%x\n",
-   p->comm, order, gfp_mask);
+   " order:%d, mode:0x%x, alloc_flags:0x%x, pflags:0x%x\n",
+   p->comm, order, gfp_mask, alloc_flags, p->flags);
dump_stack();
show_mem();
}
@@ -1952,9 +1954,9 @@ void show_free_areas(void)
"\n",
zone->name,
K(zone_page_state(zone, NR_FREE_PAGES)),
-   K(zone->pages_min),
-   K(zone->pages_low),
-   K(zone->pages_high),
+   K(zone->pages_emerg + zone->pages_min),
+   K(zone->pages_emerg + zone->pages_low),
+   K(zone->pages_emerg + zone->pages_high),
K(zone_page_state(zone, NR_ACTIVE)),
K(zone_page_state(zone, NR_INACTIVE)),
K(zone->present_pages),
@@ -4113,7 +4115,7 @@ static void calculate_totalreserve_pages
}
 
/* we treat pages_high as reserved pages. */
-   max += zone->pages_high;
+   max += zone->pages_high + zone->pages_emerg;
 
if (max > zone->present_pages)
max = zone->present_pages;
@@ -4170,7 +4172,8 @@ static void setup_per_zone_lowmem_reserv
  */
 static void __setup_per_zone_pages_min(void)
 {
-   unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
+   unsigned pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
+   unsigned pages_emerg = var_free_kbytes >> (PAGE_SHIFT - 10);
unsigned long lowmem_pages = 0;
struct zone *zone;
unsigned long flags;
@@ -4182,11 +4185,13 @@ static void __setup_per_zone_pages_min(v
}
 
for_each_zone(zone) {
-   u64 tmp;
+   u64 tmp, tmp_emerg;
 
spin_lock_irqsave(>lru_lock, flags);
tmp = (u64)pages_min * zone->present_pages;
do_div(tmp, lowmem_pages);
+   tmp_emerg = (u64)pages_emerg * zone->present_pages;
+   do_div(tmp_emerg, lowmem_pages);
if (is_highmem(zone)) {
/*
 * __GFP_HIGH and 

[PATCH 08/33] mm: emergency pool

2007-10-30 Thread Peter Zijlstra
Provide means to reserve a specific amount of pages.

The emergency pool is separated from the min watermark because ALLOC_HARDER
and ALLOC_HIGH modify the watermark in a relative way and thus do not ensure
a strict minimum.

Signed-off-by: Peter Zijlstra [EMAIL PROTECTED]
---
 include/linux/mmzone.h |3 +
 mm/page_alloc.c|   82 +++--
 mm/vmstat.c|6 +--
 3 files changed, 78 insertions(+), 13 deletions(-)

Index: linux-2.6/include/linux/mmzone.h
===
--- linux-2.6.orig/include/linux/mmzone.h
+++ linux-2.6/include/linux/mmzone.h
@@ -213,7 +213,7 @@ enum zone_type {
 
 struct zone {
/* Fields commonly accessed by the page allocator */
-   unsigned long   pages_min, pages_low, pages_high;
+   unsigned long   pages_emerg, pages_min, pages_low, pages_high;
/*
 * We don't know if the memory that we're going to allocate will be 
freeable
 * or/and it will be released eventually, so to avoid totally wasting 
several
@@ -682,6 +682,7 @@ int sysctl_min_unmapped_ratio_sysctl_han
struct file *, void __user *, size_t *, loff_t *);
 int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int,
struct file *, void __user *, size_t *, loff_t *);
+int adjust_memalloc_reserve(int pages);
 
 extern int numa_zonelist_order_handler(struct ctl_table *, int,
struct file *, void __user *, size_t *, loff_t *);
Index: linux-2.6/mm/page_alloc.c
===
--- linux-2.6.orig/mm/page_alloc.c
+++ linux-2.6/mm/page_alloc.c
@@ -118,6 +118,8 @@ static char * const zone_names[MAX_NR_ZO
 
 static DEFINE_SPINLOCK(min_free_lock);
 int min_free_kbytes = 1024;
+static DEFINE_MUTEX(var_free_mutex);
+int var_free_kbytes;
 
 unsigned long __meminitdata nr_kernel_pages;
 unsigned long __meminitdata nr_all_pages;
@@ -1252,7 +1254,7 @@ int zone_watermark_ok(struct zone *z, in
if (alloc_flags  ALLOC_HARDER)
min -= min / 4;
 
-   if (free_pages = min + z-lowmem_reserve[classzone_idx])
+   if (free_pages = min + z-lowmem_reserve[classzone_idx] + 
z-pages_emerg)
return 0;
for (o = 0; o  order; o++) {
/* At the next order, this order's pages become unavailable */
@@ -1733,8 +1735,8 @@ nofail_alloc:
 nopage:
if (!(gfp_mask  __GFP_NOWARN)  printk_ratelimit()) {
printk(KERN_WARNING %s: page allocation failure.
-order:%d, mode:0x%x\n,
-   p-comm, order, gfp_mask);
+order:%d, mode:0x%x, alloc_flags:0x%x, pflags:0x%x\n,
+   p-comm, order, gfp_mask, alloc_flags, p-flags);
dump_stack();
show_mem();
}
@@ -1952,9 +1954,9 @@ void show_free_areas(void)
\n,
zone-name,
K(zone_page_state(zone, NR_FREE_PAGES)),
-   K(zone-pages_min),
-   K(zone-pages_low),
-   K(zone-pages_high),
+   K(zone-pages_emerg + zone-pages_min),
+   K(zone-pages_emerg + zone-pages_low),
+   K(zone-pages_emerg + zone-pages_high),
K(zone_page_state(zone, NR_ACTIVE)),
K(zone_page_state(zone, NR_INACTIVE)),
K(zone-present_pages),
@@ -4113,7 +4115,7 @@ static void calculate_totalreserve_pages
}
 
/* we treat pages_high as reserved pages. */
-   max += zone-pages_high;
+   max += zone-pages_high + zone-pages_emerg;
 
if (max  zone-present_pages)
max = zone-present_pages;
@@ -4170,7 +4172,8 @@ static void setup_per_zone_lowmem_reserv
  */
 static void __setup_per_zone_pages_min(void)
 {
-   unsigned long pages_min = min_free_kbytes  (PAGE_SHIFT - 10);
+   unsigned pages_min = min_free_kbytes  (PAGE_SHIFT - 10);
+   unsigned pages_emerg = var_free_kbytes  (PAGE_SHIFT - 10);
unsigned long lowmem_pages = 0;
struct zone *zone;
unsigned long flags;
@@ -4182,11 +4185,13 @@ static void __setup_per_zone_pages_min(v
}
 
for_each_zone(zone) {
-   u64 tmp;
+   u64 tmp, tmp_emerg;
 
spin_lock_irqsave(zone-lru_lock, flags);
tmp = (u64)pages_min * zone-present_pages;
do_div(tmp, lowmem_pages);
+   tmp_emerg = (u64)pages_emerg * zone-present_pages;
+   do_div(tmp_emerg, lowmem_pages);
if (is_highmem(zone)) {
/*
 * __GFP_HIGH and PF_MEMALLOC allocations usually don't
@@