[PATCH v2 5/6] mm: introduce zone_gup_lock, for dma-pinned pages

2018-11-10 Thread john . hubbard
From: John Hubbard 

The page->dma_pinned_flags and _count fields require
lock protection. A lock at approximately the granularity
of the zone_lru_lock is called for, but adding to the
locking contention of zone_lru_lock is undesirable,
because that is a pre-existing hot spot. Fortunately,
these new dma_pinned_* fields can use an independent
lock, so this patch creates an entirely new lock, right
next to the zone_lru_lock.

Why "zone_gup_lock"?

Most of the naming refers to "DMA-pinned pages", but
"zone DMA lock" has other meanings already, so this is
called zone_gup_lock instead. The "dma pinning" is a result
of get_user_pages (gup) being called, so the name still
helps explain its use.

Cc: Matthew Wilcox 
Cc: Michal Hocko 
Cc: Christopher Lameter 
Cc: Jason Gunthorpe 
Cc: Dan Williams 
Cc: Jan Kara 
Signed-off-by: John Hubbard 
---
 include/linux/mmzone.h | 6 ++
 mm/page_alloc.c| 1 +
 2 files changed, 7 insertions(+)

diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 847705a6d0ec..125a6f34f6ba 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -660,6 +660,7 @@ typedef struct pglist_data {
enum zone_type kswapd_classzone_idx;
 
int kswapd_failures;/* Number of 'reclaimed == 0' runs */
+   spinlock_t pinned_dma_lock;
 
 #ifdef CONFIG_COMPACTION
int kcompactd_max_order;
@@ -729,6 +730,11 @@ static inline spinlock_t *zone_lru_lock(struct zone *zone)
return >zone_pgdat->lru_lock;
 }
 
+static inline spinlock_t *zone_gup_lock(struct zone *zone)
+{
+   return >zone_pgdat->pinned_dma_lock;
+}
+
 static inline struct lruvec *node_lruvec(struct pglist_data *pgdat)
 {
return >lruvec;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index a919ba5cb3c8..7cc0d9bdba17 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -6305,6 +6305,7 @@ static void __meminit pgdat_init_internals(struct 
pglist_data *pgdat)
 
pgdat_page_ext_init(pgdat);
spin_lock_init(>lru_lock);
+   spin_lock_init(>pinned_dma_lock);
lruvec_init(node_lruvec(pgdat));
 }
 
-- 
2.19.1



[PATCH v2 5/6] mm: introduce zone_gup_lock, for dma-pinned pages

2018-11-10 Thread john . hubbard
From: John Hubbard 

The page->dma_pinned_flags and _count fields require
lock protection. A lock at approximately the granularity
of the zone_lru_lock is called for, but adding to the
locking contention of zone_lru_lock is undesirable,
because that is a pre-existing hot spot. Fortunately,
these new dma_pinned_* fields can use an independent
lock, so this patch creates an entirely new lock, right
next to the zone_lru_lock.

Why "zone_gup_lock"?

Most of the naming refers to "DMA-pinned pages", but
"zone DMA lock" has other meanings already, so this is
called zone_gup_lock instead. The "dma pinning" is a result
of get_user_pages (gup) being called, so the name still
helps explain its use.

Cc: Matthew Wilcox 
Cc: Michal Hocko 
Cc: Christopher Lameter 
Cc: Jason Gunthorpe 
Cc: Dan Williams 
Cc: Jan Kara 
Signed-off-by: John Hubbard 
---
 include/linux/mmzone.h | 6 ++
 mm/page_alloc.c| 1 +
 2 files changed, 7 insertions(+)

diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 847705a6d0ec..125a6f34f6ba 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -660,6 +660,7 @@ typedef struct pglist_data {
enum zone_type kswapd_classzone_idx;
 
int kswapd_failures;/* Number of 'reclaimed == 0' runs */
+   spinlock_t pinned_dma_lock;
 
 #ifdef CONFIG_COMPACTION
int kcompactd_max_order;
@@ -729,6 +730,11 @@ static inline spinlock_t *zone_lru_lock(struct zone *zone)
return >zone_pgdat->lru_lock;
 }
 
+static inline spinlock_t *zone_gup_lock(struct zone *zone)
+{
+   return >zone_pgdat->pinned_dma_lock;
+}
+
 static inline struct lruvec *node_lruvec(struct pglist_data *pgdat)
 {
return >lruvec;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index a919ba5cb3c8..7cc0d9bdba17 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -6305,6 +6305,7 @@ static void __meminit pgdat_init_internals(struct 
pglist_data *pgdat)
 
pgdat_page_ext_init(pgdat);
spin_lock_init(>lru_lock);
+   spin_lock_init(>pinned_dma_lock);
lruvec_init(node_lruvec(pgdat));
 }
 
-- 
2.19.1