The current relock logical will change lru_lock when if found a new
lruvec, so if 2 memcgs are reading file or alloc page equally, they
could hold the lru_lock alternately.

This patch will record the needed lru_lock and only hold them once in
above scenario. That could reduce the lock contention.

Suggested-by: Konstantin Khlebnikov <[email protected]>
Signed-off-by: Alex Shi <[email protected]>
Cc: Andrew Morton <[email protected]>
Cc: [email protected]
Cc: [email protected]
---
 mm/swap.c | 43 ++++++++++++++++++++++++++++++++++++-------
 1 file changed, 36 insertions(+), 7 deletions(-)

diff --git a/mm/swap.c b/mm/swap.c
index 2ac78e8fab71..fe53449fa1b8 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -958,24 +958,53 @@ static void __pagevec_lru_add_fn(struct page *page, 
struct lruvec *lruvec)
        trace_mm_lru_insertion(page, lru);
 }
 
+struct add_lruvecs {
+       struct list_head lists[PAGEVEC_SIZE];
+       struct lruvec *vecs[PAGEVEC_SIZE];
+};
+
 /*
  * Add the passed pages to the LRU, then drop the caller's refcount
  * on them.  Reinitialises the caller's pagevec.
  */
 void __pagevec_lru_add(struct pagevec *pvec)
 {
-       int i;
+       int i, j, total;
        struct lruvec *lruvec = NULL;
        unsigned long flags = 0;
+       struct page *page;
+       struct add_lruvecs lruvecs;
+
+       lruvecs.vecs[0] = NULL;
+       for (i = total = 0; i < pagevec_count(pvec); i++) {
+               page = pvec->pages[i];
+               lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page));
+
+               /* Try to find a same lruvec */
+               for (j = 0; j <= total; j++)
+                       if (lruvec == lruvecs.vecs[j])
+                               break;
+               /* A new lruvec */
+               if (j > total) {
+                       INIT_LIST_HEAD(&lruvecs.lists[total]);
+                       lruvecs.vecs[total] = lruvec;
+                       j = total++;
+                       lruvecs.vecs[total] = 0;
+               }
 
-       for (i = 0; i < pagevec_count(pvec); i++) {
-               struct page *page = pvec->pages[i];
+               list_add(&page->lru, &lruvecs.lists[j]);
+       }
 
-               lruvec = relock_page_lruvec_irqsave(page, lruvec, &flags);
-               __pagevec_lru_add_fn(page, lruvec);
+       for (i = 0; i < total; i++) {
+               spin_lock_irqsave(&lruvecs.vecs[i]->lru_lock, flags);
+               while (!list_empty(&lruvecs.lists[i])) {
+                       page = lru_to_page(&lruvecs.lists[i]);
+                       list_del(&page->lru);
+                       __pagevec_lru_add_fn(page, lruvecs.vecs[i]);
+               }
+               spin_unlock_irqrestore(&lruvecs.vecs[i]->lru_lock, flags);
        }
-       if (lruvec)
-               unlock_page_lruvec_irqrestore(lruvec, flags);
+
        release_pages(pvec->pages, pvec->nr);
        pagevec_reinit(pvec);
 }
-- 
1.8.3.1

Reply via email to