Since we changed the pgdat->lru_lock to lruvec->lru_lock, have to fix the
incorrect comments in code. Also fixed some zone->lru_lock comment error
in ancient time.

Signed-off-by: Alex Shi <[email protected]>
Cc: Andrew Morton <[email protected]>
Cc: Jason Gunthorpe <[email protected]>
Cc: Dan Williams <[email protected]>
Cc: Vlastimil Babka <[email protected]>
Cc: Ira Weiny <[email protected]>
Cc: Jesper Dangaard Brouer <[email protected]>
Cc: Andrey Ryabinin <[email protected]>
Cc: Jann Horn <[email protected]>
Cc: Logan Gunthorpe <[email protected]>
Cc: Souptick Joarder <[email protected]>
Cc: Ralph Campbell <[email protected]>
Cc: "Tobin C. Harding" <[email protected]>
Cc: Michal Hocko <[email protected]>
Cc: Oscar Salvador <[email protected]>
Cc: Mel Gorman <[email protected]>
Cc: Wei Yang <[email protected]>
Cc: Johannes Weiner <[email protected]>
Cc: Pavel Tatashin <[email protected]>
Cc: Arun KS <[email protected]>
Cc: Matthew Wilcox <[email protected]>
Cc: "Darrick J. Wong" <[email protected]>
Cc: Amir Goldstein <[email protected]>
Cc: Dave Chinner <[email protected]>
Cc: Josef Bacik <[email protected]>
Cc: "Kirill A. Shutemov" <[email protected]>
Cc: "Jérôme Glisse" <[email protected]>
Cc: Mike Kravetz <[email protected]>
Cc: Hugh Dickins <[email protected]>
Cc: Kirill Tkhai <[email protected]>
Cc: Daniel Jordan <[email protected]>
Cc: Yafang Shao <[email protected]>
Cc: Yang Shi <[email protected]>
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
---
 include/linux/mm_types.h | 2 +-
 include/linux/mmzone.h   | 4 ++--
 mm/filemap.c             | 4 ++--
 mm/rmap.c                | 2 +-
 mm/vmscan.c              | 6 +++---
 5 files changed, 9 insertions(+), 9 deletions(-)

diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 6a7a1083b6fb..f9f990d8f08f 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -79,7 +79,7 @@ struct page {
                struct {        /* Page cache and anonymous pages */
                        /**
                         * @lru: Pageout list, eg. active_list protected by
-                        * pgdat->lru_lock.  Sometimes used as a generic list
+                        * lruvec->lru_lock.  Sometimes used as a generic list
                         * by the page owner.
                         */
                        struct list_head lru;
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 8d0076d084be..d2f782263e42 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -159,7 +159,7 @@ static inline bool free_area_empty(struct free_area *area, 
int migratetype)
 struct pglist_data;
 
 /*
- * zone->lock and the zone lru_lock are two of the hottest locks in the kernel.
+ * zone->lock and the lru_lock are two of the hottest locks in the kernel.
  * So add a wild amount of padding here to ensure that they fall into separate
  * cachelines.  There are very few zone structures in the machine, so space
  * consumption is not a concern here.
@@ -295,7 +295,7 @@ struct zone_reclaim_stat {
 
 struct lruvec {
        struct list_head                lists[NR_LRU_LISTS];
-       /* move lru_lock to per lruvec for memcg */
+       /* perf lruvec lru_lock for memcg */
        spinlock_t                      lru_lock;
 
        struct zone_reclaim_stat        reclaim_stat;
diff --git a/mm/filemap.c b/mm/filemap.c
index d0cf700bf201..0a604c8284f2 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -100,8 +100,8 @@
  *    ->swap_lock              (try_to_unmap_one)
  *    ->private_lock           (try_to_unmap_one)
  *    ->i_pages lock           (try_to_unmap_one)
- *    ->pgdat->lru_lock                (follow_page->mark_page_accessed)
- *    ->pgdat->lru_lock                (check_pte_range->isolate_lru_page)
+ *    ->lruvec->lru_lock       (follow_page->mark_page_accessed)
+ *    ->lruvec->lru_lock       (check_pte_range->isolate_lru_page)
  *    ->private_lock           (page_remove_rmap->set_page_dirty)
  *    ->i_pages lock           (page_remove_rmap->set_page_dirty)
  *    bdi.wb->list_lock                (page_remove_rmap->set_page_dirty)
diff --git a/mm/rmap.c b/mm/rmap.c
index 003377e24232..6bee4aebced6 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -27,7 +27,7 @@
  *         mapping->i_mmap_rwsem
  *           anon_vma->rwsem
  *             mm->page_table_lock or pte_lock
- *               pgdat->lru_lock (in mark_page_accessed, isolate_lru_page)
+ *               lruvec->lru_lock (in mark_page_accessed, isolate_lru_page)
  *               swap_lock (in swap_duplicate, swap_info_get)
  *                 mmlist_lock (in mmput, drain_mmlist and others)
  *                 mapping->private_lock (in __set_page_dirty_buffers)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index ea5c2f3f2567..1328eb182a3e 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1662,7 +1662,7 @@ static __always_inline void update_lru_sizes(struct 
lruvec *lruvec,
 }
 
 /**
- * pgdat->lru_lock is heavily contended.  Some of the functions that
+ * lruvec->lru_lock is heavily contended.  Some of the functions that
  * shrink the lists perform better by taking out a batch of pages
  * and working on them outside the LRU lock.
  *
@@ -1864,9 +1864,9 @@ static int too_many_isolated(struct pglist_data *pgdat, 
int file,
  * processes, from rmap.
  *
  * If the pages are mostly unmapped, the processing is fast and it is
- * appropriate to hold zone_lru_lock across the whole operation.  But if
+ * appropriate to hold lru_lock across the whole operation.  But if
  * the pages are mapped, the processing is slow (page_referenced()) so we
- * should drop zone_lru_lock around each page.  It's impossible to balance
+ * should drop lru_lock around each page.  It's impossible to balance
  * this, so instead we remove the pages from the LRU while processing them.
  * It is safe to rely on PG_active against the non-LRU pages in here because
  * nobody will play with that bit on a non-LRU page.
-- 
1.8.3.1

Reply via email to