The current dax_lock_page() locks dax entry by obtaining mapping and
index in page.  To support 1-to-N RMAP in NVDIMM, we need a new function
to lock a specific dax entry corresponding to this file's mapping,index.
And output the page corresponding to the specific dax entry for caller
use.

Signed-off-by: Shiyang Ruan <ruansy.f...@fujitsu.com>
Reviewed-by: Christoph Hellwig <h...@lst.de>
Reviewed-by: Darrick J. Wong <djw...@kernel.org>
---
 fs/dax.c            | 63 +++++++++++++++++++++++++++++++++++++++++++++
 include/linux/dax.h | 15 +++++++++++
 2 files changed, 78 insertions(+)

diff --git a/fs/dax.c b/fs/dax.c
index 4155a6107fa1..65e44d78b3bb 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -455,6 +455,69 @@ void dax_unlock_page(struct page *page, dax_entry_t cookie)
        dax_unlock_entry(&xas, (void *)cookie);
 }
 
+/*
+ * dax_lock_mapping_entry - Lock the DAX entry corresponding to a mapping
+ * @mapping: the file's mapping whose entry we want to lock
+ * @index: the offset within this file
+ * @page: output the dax page corresponding to this dax entry
+ *
+ * Return: A cookie to pass to dax_unlock_mapping_entry() or 0 if the entry
+ * could not be locked.
+ */
+dax_entry_t dax_lock_mapping_entry(struct address_space *mapping, pgoff_t 
index,
+               struct page **page)
+{
+       XA_STATE(xas, NULL, 0);
+       void *entry;
+
+       rcu_read_lock();
+       for (;;) {
+               entry = NULL;
+               if (!dax_mapping(mapping))
+                       break;
+
+               xas.xa = &mapping->i_pages;
+               xas_lock_irq(&xas);
+               xas_set(&xas, index);
+               entry = xas_load(&xas);
+               if (dax_is_locked(entry)) {
+                       rcu_read_unlock();
+                       wait_entry_unlocked(&xas, entry);
+                       rcu_read_lock();
+                       continue;
+               }
+               if (!entry ||
+                   dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
+                       /*
+                        * Because we are looking for entry from file's mapping
+                        * and index, so the entry may not be inserted for now,
+                        * or even a zero/empty entry.  We don't think this is
+                        * an error case.  So, return a special value and do
+                        * not output @page.
+                        */
+                       entry = (void *)~0UL;
+               } else {
+                       *page = pfn_to_page(dax_to_pfn(entry));
+                       dax_lock_entry(&xas, entry);
+               }
+               xas_unlock_irq(&xas);
+               break;
+       }
+       rcu_read_unlock();
+       return (dax_entry_t)entry;
+}
+
+void dax_unlock_mapping_entry(struct address_space *mapping, pgoff_t index,
+               dax_entry_t cookie)
+{
+       XA_STATE(xas, &mapping->i_pages, index);
+
+       if (cookie == ~0UL)
+               return;
+
+       dax_unlock_entry(&xas, (void *)cookie);
+}
+
 /*
  * Find page cache entry at given index. If it is a DAX entry, return it
  * with the entry locked. If the page cache doesn't contain an entry at
diff --git a/include/linux/dax.h b/include/linux/dax.h
index cf85fc36da5f..7116681b48c0 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -161,6 +161,10 @@ struct page *dax_layout_busy_page(struct address_space 
*mapping);
 struct page *dax_layout_busy_page_range(struct address_space *mapping, loff_t 
start, loff_t end);
 dax_entry_t dax_lock_page(struct page *page);
 void dax_unlock_page(struct page *page, dax_entry_t cookie);
+dax_entry_t dax_lock_mapping_entry(struct address_space *mapping,
+               unsigned long index, struct page **page);
+void dax_unlock_mapping_entry(struct address_space *mapping,
+               unsigned long index, dax_entry_t cookie);
 #else
 static inline struct page *dax_layout_busy_page(struct address_space *mapping)
 {
@@ -188,6 +192,17 @@ static inline dax_entry_t dax_lock_page(struct page *page)
 static inline void dax_unlock_page(struct page *page, dax_entry_t cookie)
 {
 }
+
+static inline dax_entry_t dax_lock_mapping_entry(struct address_space *mapping,
+               unsigned long index, struct page **page)
+{
+       return 0;
+}
+
+static inline void dax_unlock_mapping_entry(struct address_space *mapping,
+               unsigned long index, dax_entry_t cookie)
+{
+}
 #endif
 
 int dax_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
-- 
2.36.1




Reply via email to