The PLMC memory controller. Only one change in mm/vmscan.c, just to remove a comment else patch1+patch2 don't compile...
Signed-off-by: Patrick Le Dot <[EMAIL PROTECTED]> --- include/linux/mem_rc_inline.h | 8 - kernel/res_group/Makefile | 2 kernel/res_group/memcore.c | 4 kernel/res_group/memctlr.c | 193 ++++++++++++++++++++++++++++++++++++++++++ mm/vmscan.c | 2 5 files changed, 201 insertions(+), 8 deletions(-) diff -Naurp a3/include/linux/mem_rc_inline.h b3/include/linux/mem_rc_inline.h --- a3/include/linux/mem_rc_inline.h 2006-10-03 09:57:26.000000000 +0200 +++ b3/include/linux/mem_rc_inline.h 2006-10-03 09:55:24.000000000 +0200 @@ -52,12 +52,12 @@ static inline void res_group_inc_active_ ?: mem_root_res_group; if (res == NULL) return; - // rg_mem_add_page(page, res); + rg_mem_add_page(page, res); } static inline void res_group_dec_active_list(struct page *page) { - // rg_mem_remove_page(page, NULL); + rg_mem_remove_page(page, NULL); } static inline void res_group_inc_inactive_list(struct page *page) @@ -67,12 +67,12 @@ static inline void res_group_inc_inactiv if (res == NULL) return; - // rg_mem_add_page(page, res); + rg_mem_add_page(page, res); } static inline void res_group_dec_inactive_list(struct page *page) { - // rg_mem_remove_page(page, NULL); + rg_mem_remove_page(page, NULL); } static inline void res_group_page_init(struct page *page) diff -Naurp a3/kernel/res_group/Makefile b3/kernel/res_group/Makefile --- a3/kernel/res_group/Makefile 2006-10-03 09:57:26.000000000 +0200 +++ b3/kernel/res_group/Makefile 2006-10-03 09:55:24.000000000 +0200 @@ -1,4 +1,4 @@ obj-y = res_group.o shares.o task.o obj-$(CONFIG_RES_GROUPS_NUMTASKS) += numtasks.o -obj-$(CONFIG_RES_GROUPS_MEM_RC) += memcore.o +obj-$(CONFIG_RES_GROUPS_MEM_RC) += memcore.o memctlr.o obj-$(CONFIG_RGCS) += rgcs.o diff -Naurp a3/kernel/res_group/memcore.c b3/kernel/res_group/memcore.c --- a3/kernel/res_group/memcore.c 2006-10-03 09:57:26.000000000 +0200 +++ b3/kernel/res_group/memcore.c 2006-10-03 09:55:24.000000000 +0200 @@ -81,7 +81,7 @@ static void set_tot_pages(void) static void mem_res_init_one(struct mem_res_group *mem_res) { - mem_res->shares.min_shares = SHARE_UNSUPPORTED; + mem_res->shares.min_shares = SHARE_DONT_CARE; mem_res->shares.max_shares = SHARE_UNSUPPORTED; mem_res->shares.child_shares_divisor = SHARE_DEFAULT_DIVISOR; mem_res->shares.unused_min_shares = SHARE_DEFAULT_DIVISOR; @@ -322,7 +322,7 @@ static void mem_move_task(struct task_st return; mm = task->active_mm; - // rg_mem_migrate_mm(mm, oldres, newres); + rg_mem_migrate_mm(mm, oldres, newres); return; } diff -Naurp a3/kernel/res_group/memctlr.c b3/kernel/res_group/memctlr.c --- a3/kernel/res_group/memctlr.c 1970-01-01 01:00:00.000000000 +0100 +++ b3/kernel/res_group/memctlr.c 2006-10-03 09:55:24.000000000 +0200 @@ -0,0 +1,193 @@ +/* memctlr.c - Basic routines for the Resource Groups memory controller + * + * Copyright (C) Jiantao Kong, IBM Corp. 2003 + * (C) Chandra Seetharaman, IBM Corp. 2004 + * (C) Patrick Le Dot <[EMAIL PROTECTED]@bull.net> 2006 + * + * Provides a Memory Resource controller for Resource Groups + * + * Latest version, more details at http://ckrm.sf.net + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + */ + +#include <linux/swap.h> +#include <linux/pagemap.h> +#include <linux/mem_rc_inline.h> + +static void check_guarantee(struct page *page, struct mem_res_group *res) +{ + if (atomic_read(&res->pg_inuse) <= res->pg_min_shares) { + if ((rgroup_guarantee & res->bit_id) == 0) + rgroup_guarantee = (rgroup_guarantee ^ res->bit_id); + } else { + if ((rgroup_guarantee & res->bit_id) != 0) + rgroup_guarantee = (rgroup_guarantee & ~res->bit_id); + } +} + +void rg_mem_add_page(struct page *page, struct mem_res_group *res) +{ + if (!res) + return; + if (page->rg_bitmap & res->bit_id) + return; + page->rg_bitmap = (page->rg_bitmap ^ res->bit_id); + atomic_inc(&res->pg_inuse); + res->max_pg_used = max(res->max_pg_used, atomic_read(&res->pg_inuse)); + if (res != mem_root_res_group) + check_guarantee(page, res); +} + +static void dec_and_update(struct page *page, struct mem_res_group *res) +{ + page->rg_bitmap = (page->rg_bitmap & ~res->bit_id); + atomic_dec(&res->pg_inuse); + if (res != mem_root_res_group) + check_guarantee(page, res); +} + +void rg_mem_remove_page(struct page *page, struct mem_res_group *res) +{ + struct mem_res_group *mem_rg; + + if (page->rg_bitmap == 0) + return; + if (!res) { + // remove the page from all groups + list_for_each_entry(mem_rg, &mem_res_group_list, res_list) { + if (page->rg_bitmap & mem_rg->bit_id) + dec_and_update(page, mem_rg); + } + } else + dec_and_update(page, res); +} + +static inline void +rg_mem_change_page(struct page *page, + struct mem_res_group *old_res, + struct mem_res_group *new_res) +{ + if (!new_res) { + if (!mem_root_res_group) + return; + new_res = mem_root_res_group; + } + + if (old_res == new_res) + return; + + if (old_res) { + rg_mem_remove_page(page, old_res); + } + + rg_mem_add_page(page, new_res); +} + +static inline int +rg_mem_migrate_pmd(struct vm_area_struct* vma, pmd_t* pmdir, + unsigned long address, unsigned long end, + struct mem_res_group *old, struct mem_res_group *new) +{ + pte_t *pte; + unsigned long pmd_end; + + if (pmd_none(*pmdir)) + return 0; + BUG_ON(pmd_bad(*pmdir)); + + pmd_end = (address+ PMD_SIZE) & PMD_MASK; + if (end > pmd_end) + end = pmd_end; + + do { + pte = pte_offset_map(pmdir, address); + if (pte_present(*pte)) { + struct page *page = pte_page(*pte); + if (page->mapping) { + struct zone *zone = page_zone(page); + spin_lock_irq(&zone->lru_lock); + rg_mem_change_page(page, old, new); + spin_unlock_irq(&zone->lru_lock); + } + } + address += PAGE_SIZE; + pte_unmap(pte); + pte++; + } while(address && (address < end)); + return 0; +} + +static inline int +rg_mem_migrate_pgd(struct vm_area_struct* vma, pgd_t* pgdir, + unsigned long address, unsigned long end, + struct mem_res_group *old, struct mem_res_group *new) +{ + pmd_t* pmd; + pud_t* pud; + unsigned long pgd_end; + + if (pgd_none(*pgdir)) + return 0; + BUG_ON(pgd_bad(*pgdir)); + + pud = pud_offset(pgdir, address); + if (pud_none(*pud)) + return 0; + BUG_ON(pud_bad(*pud)); + pmd = pmd_offset(pud, address); + pgd_end = (address + PGDIR_SIZE) & PGDIR_MASK; + + if (pgd_end && (end > pgd_end)) + end = pgd_end; + + do { + rg_mem_migrate_pmd(vma, pmd, address, end, old, new); + address = (address + PMD_SIZE) & PMD_MASK; + pmd++; + } while (address && (address < end)); + return 0; +} + +static inline int +rg_mem_migrate_vma(struct vm_area_struct* vma, struct mem_res_group *old, + struct mem_res_group *new) +{ + pgd_t* pgdir; + unsigned long address, end; + + address = vma->vm_start; + end = vma->vm_end; + + pgdir = pgd_offset(vma->vm_mm, address); + do { + rg_mem_migrate_pgd(vma, pgdir, address, end, old, new); + address = (address + PGDIR_SIZE) & PGDIR_MASK; + pgdir++; + } while(address && (address < end)); + return 0; +} + +void rg_mem_migrate_mm(struct mm_struct* mm, struct mem_res_group *old, + struct mem_res_group *new) +{ + struct vm_area_struct *vma; + + if (new) { + /* Go through all VMA to migrate pages */ + down_read(&mm->mmap_sem); + spin_lock(&mm->page_table_lock); + vma = mm->mmap; + while(vma) { + rg_mem_migrate_vma(vma, old, new); + vma = vma->vm_next; + } + spin_unlock(&mm->page_table_lock); + up_read(&mm->mmap_sem); + } + return; +} diff -Naurp a3/mm/vmscan.c b3/mm/vmscan.c --- a3/mm/vmscan.c 2006-10-03 09:57:26.000000000 +0200 +++ b3/mm/vmscan.c 2006-10-03 09:56:07.000000000 +0200 @@ -556,7 +556,7 @@ free_it: if ((page->rg_bitmap & rgroup_guarantee) != 0) goto keep_locked; else - // rg_mem_remove_page(page, NULL); + rg_mem_remove_page(page, NULL); #endif unlock_page(page); nr_reclaimed++; +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+ Patrick Le Dot mailto: [EMAIL PROTECTED]@bull.net Centre UNIX de BULL SAS Phone : +33 4 76 29 73 20 1, Rue de Provence BP 208 Fax : +33 4 76 29 76 00 38130 ECHIROLLES Cedex FRANCE Bull, Architect of an Open World TM www.bull.com ------------------------------------------------------------------------- Take Surveys. Earn Cash. Influence the Future of IT Join SourceForge.net's Techsay panel and you'll get the chance to share your opinions on IT & business topics through brief surveys -- and earn cash http://www.techsay.com/default.php?page=join.php&p=sourceforge&CID=DEVDEV _______________________________________________ ckrm-tech mailing list https://lists.sourceforge.net/lists/listinfo/ckrm-tech