2 changes here :
- the accounting in the kernel code : linux/mm_inline.h
- the struct page

The accounting is done every time a page is added/removed
from the active/inactive list.

This is IMHO not efficient because a page can move from a
list to the other and remain into the same group then no
accounting is necessary in this case.

To be discuss:-)


The struct page seems to be the better place to add a mark
to indicate which groups use this page.
Then the kswapd can decide if a page should be removed from
the memory or not : this is done in get_page_unless_zero()
and in shrink_page_list().

This implementation introduce only an integer in the struct page
and no other struct.
The PLMC controller use it like a bitmap : 1 bit per group.
Then on IA32, a limitation is a maximum of 32 groups with this
controller.

What I'm pushing is to have a change in the struct page but
don't care yet if this change is an int, a long or a ptr to
a struct container...


Signed-off-by: Patrick Le Dot <[EMAIL PROTECTED]>
---

 include/linux/mm.h        |   12 ++++++++++++
 include/linux/mm_inline.h |    7 +++++++
 mm/vmscan.c               |    7 +++++++
 3 files changed, 26 insertions(+)

diff -Naurp a2/include/linux/mm.h b2/include/linux/mm.h
--- a2/include/linux/mm.h       2006-10-03 09:50:56.000000000 +0200
+++ b2/include/linux/mm.h       2006-10-03 09:49:04.000000000 +0200
@@ -40,6 +40,10 @@ extern int sysctl_legacy_va_layout;
 
 #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
 
+#ifdef CONFIG_RES_GROUPS_MEM_RC
+extern unsigned int rgroup_guarantee;
+#endif
+
 /*
  * Linux kernel virtual memory manager primitives.
  * The idea being to have a "virtual" mm in the same way
@@ -267,6 +271,9 @@ struct page {
        void *virtual;                  /* Kernel virtual address (NULL if
                                           not kmapped, ie. highmem) */
 #endif /* WANT_PAGE_VIRTUAL */
+#ifdef CONFIG_RES_GROUPS_MEM_RC
+       unsigned int rg_bitmap;         /* 1 bit per rgroup using this page */
+#endif
 };
 
 #define page_private(page)             ((page)->private)
@@ -307,6 +314,11 @@ static inline int put_page_testzero(stru
  */
 static inline int get_page_unless_zero(struct page *page)
 {
+#ifdef CONFIG_RES_GROUPS_MEM_RC
+       if (rgroup_guarantee & page->rg_bitmap)
+               return 0;
+       else
+#endif
        return atomic_inc_not_zero(&page->_count);
 }
 
diff -Naurp a2/include/linux/mm_inline.h b2/include/linux/mm_inline.h
--- a2/include/linux/mm_inline.h        2006-10-03 09:50:57.000000000 +0200
+++ b2/include/linux/mm_inline.h        2006-10-03 09:49:04.000000000 +0200
@@ -1,9 +1,11 @@
+#include <linux/mem_rc_inline.h>
 
 static inline void
 add_page_to_active_list(struct zone *zone, struct page *page)
 {
        list_add(&page->lru, &zone->active_list);
        zone->nr_active++;
+       res_group_inc_active_list(page);
 }
 
 static inline void
@@ -11,6 +13,7 @@ add_page_to_inactive_list(struct zone *z
 {
        list_add(&page->lru, &zone->inactive_list);
        zone->nr_inactive++;
+       res_group_inc_inactive_list(page);
 }
 
 static inline void
@@ -18,6 +21,7 @@ del_page_from_active_list(struct zone *z
 {
        list_del(&page->lru);
        zone->nr_active--;
+       res_group_dec_active_list(page);
 }
 
 static inline void
@@ -25,6 +29,7 @@ del_page_from_inactive_list(struct zone 
 {
        list_del(&page->lru);
        zone->nr_inactive--;
+       res_group_dec_inactive_list(page);
 }
 
 static inline void
@@ -34,8 +39,10 @@ del_page_from_lru(struct zone *zone, str
        if (PageActive(page)) {
                __ClearPageActive(page);
                zone->nr_active--;
+               res_group_dec_active_list(page);
        } else {
                zone->nr_inactive--;
+               res_group_dec_inactive_list(page);
        }
 }
 
diff -Naurp a2/mm/vmscan.c b2/mm/vmscan.c
--- a2/mm/vmscan.c      2006-10-03 09:50:57.000000000 +0200
+++ b2/mm/vmscan.c      2006-10-03 09:49:04.000000000 +0200
@@ -551,6 +551,13 @@ static unsigned long shrink_page_list(st
                        goto keep_locked;
 
 free_it:
+#ifdef CONFIG_RES_GROUPS_MEM_RC
+               /* guarantee can be changed by the previous "free_it" */
+               if ((page->rg_bitmap & rgroup_guarantee) != 0)
+                       goto keep_locked;
+               else
+                       // rg_mem_remove_page(page, NULL);
+#endif
                unlock_page(page);
                nr_reclaimed++;
                if (!pagevec_add(&freed_pvec, page))


+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+    Patrick Le Dot
 mailto: [EMAIL PROTECTED]@bull.net         Centre UNIX de BULL SAS
 Phone : +33 4 76 29 73 20               1, Rue de Provence     BP 208
 Fax   : +33 4 76 29 76 00               38130 ECHIROLLES Cedex FRANCE
 Bull, Architect of an Open World TM
 www.bull.com

-------------------------------------------------------------------------
Take Surveys. Earn Cash. Influence the Future of IT
Join SourceForge.net's Techsay panel and you'll get the chance to share your
opinions on IT & business topics through brief surveys -- and earn cash
http://www.techsay.com/default.php?page=join.php&p=sourceforge&CID=DEVDEV
_______________________________________________
ckrm-tech mailing list
https://lists.sourceforge.net/lists/listinfo/ckrm-tech

Reply via email to