[PATCH 05/29] mm: emergency pool
Provide means to reserve a specific amount pages. The emergency pool is separated from the min watermark because ALLOC_HARDER and ALLOC_HIGH modify the watermark in a relative way and thus do not ensure a strict minimum. Signed-off-by: Peter Zijlstra <[EMAIL PROTECTED]> --- include/linux/mmzone.h |3 +- mm/page_alloc.c| 52 - mm/vmstat.c|6 ++--- 3 files changed, 48 insertions(+), 13 deletions(-) Index: linux-2.6-git/include/linux/mmzone.h === --- linux-2.6-git.orig/include/linux/mmzone.h 2007-02-12 09:40:51.0 +0100 +++ linux-2.6-git/include/linux/mmzone.h2007-02-12 11:13:58.0 +0100 @@ -178,7 +178,7 @@ enum zone_type { struct zone { /* Fields commonly accessed by the page allocator */ - unsigned long pages_min, pages_low, pages_high; + unsigned long pages_emerg, pages_min, pages_low, pages_high; /* * We don't know if the memory that we're going to allocate will be freeable * or/and it will be released eventually, so to avoid totally wasting several @@ -562,6 +562,7 @@ int sysctl_min_unmapped_ratio_sysctl_han struct file *, void __user *, size_t *, loff_t *); int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *); +void adjust_memalloc_reserve(int pages); #include /* Returns the number of the current Node. */ Index: linux-2.6-git/mm/page_alloc.c === --- linux-2.6-git.orig/mm/page_alloc.c 2007-02-12 11:13:35.0 +0100 +++ linux-2.6-git/mm/page_alloc.c 2007-02-12 11:14:16.0 +0100 @@ -101,6 +101,7 @@ static char * const zone_names[MAX_NR_ZO static DEFINE_SPINLOCK(min_free_lock); int min_free_kbytes = 1024; +int var_free_kbytes; unsigned long __meminitdata nr_kernel_pages; unsigned long __meminitdata nr_all_pages; @@ -995,7 +996,8 @@ int zone_watermark_ok(struct zone *z, in if (alloc_flags & ALLOC_HARDER) min -= min / 4; - if (free_pages <= min + z->lowmem_reserve[classzone_idx]) + if (free_pages <= min + z->lowmem_reserve[classzone_idx] + + z->pages_emerg) return 0; for (o = 0; o < order; o++) { /* At the next order, this order's pages become unavailable */ @@ -1348,8 +1350,8 @@ nofail_alloc: nopage: if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) { printk(KERN_WARNING "%s: page allocation failure." - " order:%d, mode:0x%x\n", - p->comm, order, gfp_mask); + " order:%d, mode:0x%x, alloc_flags:0x%x, pflags:0x%lx\n", + p->comm, order, gfp_mask, alloc_flags, p->flags); dump_stack(); show_mem(); } @@ -1562,9 +1564,9 @@ void show_free_areas(void) "\n", zone->name, K(zone_page_state(zone, NR_FREE_PAGES)), - K(zone->pages_min), - K(zone->pages_low), - K(zone->pages_high), + K(zone->pages_emerg + zone->pages_min), + K(zone->pages_emerg + zone->pages_low), + K(zone->pages_emerg + zone->pages_high), K(zone_page_state(zone, NR_ACTIVE)), K(zone_page_state(zone, NR_INACTIVE)), K(zone->present_pages), @@ -3000,7 +3002,7 @@ static void calculate_totalreserve_pages } /* we treat pages_high as reserved pages. */ - max += zone->pages_high; + max += zone->pages_high + zone->pages_emerg; if (max > zone->present_pages) max = zone->present_pages; @@ -3057,7 +3059,8 @@ static void setup_per_zone_lowmem_reserv */ static void __setup_per_zone_pages_min(void) { - unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10); + unsigned pages_min = min_free_kbytes >> (PAGE_SHIFT - 10); + unsigned pages_emerg = var_free_kbytes >> (PAGE_SHIFT - 10); unsigned long lowmem_pages = 0; struct zone *zone; unsigned long flags; @@ -3069,11 +3072,13 @@ static void __setup_per_zone_pages_min(v } for_each_zone(zone) { - u64 tmp; + u64 tmp, tmp_emerg; spin_lock_irqsave(>lru_lock, flags); tmp = (u64)pages_min * zone->present_pages; do_div(tmp, lowmem_pages); + tmp_emerg = (u64)pages_emerg * zone->present_pages; + do_div(tmp_emerg, lowmem_pages); if
[PATCH 05/29] mm: emergency pool
Provide means to reserve a specific amount pages. The emergency pool is separated from the min watermark because ALLOC_HARDER and ALLOC_HIGH modify the watermark in a relative way and thus do not ensure a strict minimum. Signed-off-by: Peter Zijlstra [EMAIL PROTECTED] --- include/linux/mmzone.h |3 +- mm/page_alloc.c| 52 - mm/vmstat.c|6 ++--- 3 files changed, 48 insertions(+), 13 deletions(-) Index: linux-2.6-git/include/linux/mmzone.h === --- linux-2.6-git.orig/include/linux/mmzone.h 2007-02-12 09:40:51.0 +0100 +++ linux-2.6-git/include/linux/mmzone.h2007-02-12 11:13:58.0 +0100 @@ -178,7 +178,7 @@ enum zone_type { struct zone { /* Fields commonly accessed by the page allocator */ - unsigned long pages_min, pages_low, pages_high; + unsigned long pages_emerg, pages_min, pages_low, pages_high; /* * We don't know if the memory that we're going to allocate will be freeable * or/and it will be released eventually, so to avoid totally wasting several @@ -562,6 +562,7 @@ int sysctl_min_unmapped_ratio_sysctl_han struct file *, void __user *, size_t *, loff_t *); int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *); +void adjust_memalloc_reserve(int pages); #include linux/topology.h /* Returns the number of the current Node. */ Index: linux-2.6-git/mm/page_alloc.c === --- linux-2.6-git.orig/mm/page_alloc.c 2007-02-12 11:13:35.0 +0100 +++ linux-2.6-git/mm/page_alloc.c 2007-02-12 11:14:16.0 +0100 @@ -101,6 +101,7 @@ static char * const zone_names[MAX_NR_ZO static DEFINE_SPINLOCK(min_free_lock); int min_free_kbytes = 1024; +int var_free_kbytes; unsigned long __meminitdata nr_kernel_pages; unsigned long __meminitdata nr_all_pages; @@ -995,7 +996,8 @@ int zone_watermark_ok(struct zone *z, in if (alloc_flags ALLOC_HARDER) min -= min / 4; - if (free_pages = min + z-lowmem_reserve[classzone_idx]) + if (free_pages = min + z-lowmem_reserve[classzone_idx] + + z-pages_emerg) return 0; for (o = 0; o order; o++) { /* At the next order, this order's pages become unavailable */ @@ -1348,8 +1350,8 @@ nofail_alloc: nopage: if (!(gfp_mask __GFP_NOWARN) printk_ratelimit()) { printk(KERN_WARNING %s: page allocation failure. -order:%d, mode:0x%x\n, - p-comm, order, gfp_mask); +order:%d, mode:0x%x, alloc_flags:0x%x, pflags:0x%lx\n, + p-comm, order, gfp_mask, alloc_flags, p-flags); dump_stack(); show_mem(); } @@ -1562,9 +1564,9 @@ void show_free_areas(void) \n, zone-name, K(zone_page_state(zone, NR_FREE_PAGES)), - K(zone-pages_min), - K(zone-pages_low), - K(zone-pages_high), + K(zone-pages_emerg + zone-pages_min), + K(zone-pages_emerg + zone-pages_low), + K(zone-pages_emerg + zone-pages_high), K(zone_page_state(zone, NR_ACTIVE)), K(zone_page_state(zone, NR_INACTIVE)), K(zone-present_pages), @@ -3000,7 +3002,7 @@ static void calculate_totalreserve_pages } /* we treat pages_high as reserved pages. */ - max += zone-pages_high; + max += zone-pages_high + zone-pages_emerg; if (max zone-present_pages) max = zone-present_pages; @@ -3057,7 +3059,8 @@ static void setup_per_zone_lowmem_reserv */ static void __setup_per_zone_pages_min(void) { - unsigned long pages_min = min_free_kbytes (PAGE_SHIFT - 10); + unsigned pages_min = min_free_kbytes (PAGE_SHIFT - 10); + unsigned pages_emerg = var_free_kbytes (PAGE_SHIFT - 10); unsigned long lowmem_pages = 0; struct zone *zone; unsigned long flags; @@ -3069,11 +3072,13 @@ static void __setup_per_zone_pages_min(v } for_each_zone(zone) { - u64 tmp; + u64 tmp, tmp_emerg; spin_lock_irqsave(zone-lru_lock, flags); tmp = (u64)pages_min * zone-present_pages; do_div(tmp, lowmem_pages); + tmp_emerg = (u64)pages_emerg * zone-present_pages; + do_div(tmp_emerg, lowmem_pages); if (is_highmem(zone)) {