On 11/20/20 9:27 AM, Alex Shi wrote:
The current relock logical will change lru_lock when found a new
lruvec, so if 2 memcgs are reading file or alloc page at same time,
they could hold the lru_lock alternately, and wait for each other for
fairness attribute of ticket spin lock.

This patch will sort that all lru_locks and only hold them once in
above scenario. That could reduce fairness waiting for lock reget.
Than, vm-scalability/case-lru-file-readtwice could get ~5% performance
gain on my 2P*20core*HT machine.

Hm, once you sort the pages like this, it's a shame not to splice them instead of more list_del() + list_add() iterations. update_lru_size() could be also called once?

Suggested-by: Konstantin Khlebnikov <koc...@gmail.com>
Signed-off-by: Alex Shi <alex....@linux.alibaba.com>
Cc: Konstantin Khlebnikov <koc...@gmail.com>
Cc: Andrew Morton <a...@linux-foundation.org>
Cc: Hugh Dickins <hu...@google.com>
Cc: Yu Zhao <yuz...@google.com>
Cc: Michal Hocko <mho...@suse.com>
Cc: linux...@kvack.org
Cc: linux-kernel@vger.kernel.org
---
  mm/swap.c | 57 +++++++++++++++++++++++++++++++++++++++++++++++--------
  1 file changed, 49 insertions(+), 8 deletions(-)

diff --git a/mm/swap.c b/mm/swap.c
index 490553f3f9ef..c787b38bf9c0 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -1009,24 +1009,65 @@ static void __pagevec_lru_add_fn(struct page *page, 
struct lruvec *lruvec)
        trace_mm_lru_insertion(page, lru);
  }
+struct lruvecs {
+       struct list_head lists[PAGEVEC_SIZE];
+       struct lruvec *vecs[PAGEVEC_SIZE];
+};
+
+/* Sort pvec pages on their lruvec */
+int sort_page_lruvec(struct lruvecs *lruvecs, struct pagevec *pvec)
+{
+       int i, j, nr_lruvec;
+       struct page *page;
+       struct lruvec *lruvec = NULL;
+
+       lruvecs->vecs[0] = NULL;
+       for (i = nr_lruvec = 0; i < pagevec_count(pvec); i++) {
+               page = pvec->pages[i];
+               lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page));
+
+               /* Try to find a same lruvec */
+               for (j = 0; j <= nr_lruvec; j++)
+                       if (lruvec == lruvecs->vecs[j])
+                               break;
+
+               /* A new lruvec */
+               if (j > nr_lruvec) {
+                       INIT_LIST_HEAD(&lruvecs->lists[nr_lruvec]);
+                       lruvecs->vecs[nr_lruvec] = lruvec;
+                       j = nr_lruvec++;
+                       lruvecs->vecs[nr_lruvec] = 0;
+               }
+
+               list_add_tail(&page->lru, &lruvecs->lists[j]);
+       }
+
+       return nr_lruvec;
+}
+
  /*
   * Add the passed pages to the LRU, then drop the caller's refcount
   * on them.  Reinitialises the caller's pagevec.
   */
  void __pagevec_lru_add(struct pagevec *pvec)
  {
-       int i;
-       struct lruvec *lruvec = NULL;
+       int i, nr_lruvec;
        unsigned long flags = 0;
+       struct page *page;
+       struct lruvecs lruvecs;
- for (i = 0; i < pagevec_count(pvec); i++) {
-               struct page *page = pvec->pages[i];
+       nr_lruvec = sort_page_lruvec(&lruvecs, pvec);
- lruvec = relock_page_lruvec_irqsave(page, lruvec, &flags);
-               __pagevec_lru_add_fn(page, lruvec);
+       for (i = 0; i < nr_lruvec; i++) {
+               spin_lock_irqsave(&lruvecs.vecs[i]->lru_lock, flags);
+               while (!list_empty(&lruvecs.lists[i])) {
+                       page = lru_to_page(&lruvecs.lists[i]);
+                       list_del(&page->lru);
+                       __pagevec_lru_add_fn(page, lruvecs.vecs[i]);
+               }
+               spin_unlock_irqrestore(&lruvecs.vecs[i]->lru_lock, flags);
        }
-       if (lruvec)
-               unlock_page_lruvec_irqrestore(lruvec, flags);
+
        release_pages(pvec->pages, pvec->nr);
        pagevec_reinit(pvec);
  }


Reply via email to