Re: [PATCH v9 3/8] fsdax: Replace mmap entry in case of CoW

2021-09-16 Thread kernel test robot
Hi Shiyang,

Thank you for the patch! Yet something to improve:

[auto build test ERROR on linus/master]
[also build test ERROR on v5.15-rc1 next-20210916]
[cannot apply to xfs-linux/for-next hch-configfs/for-next]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch]

url:
https://github.com/0day-ci/linux/commits/Shiyang-Ruan/fsdax-xfs-Add-reflink-dedupe-support-for-fsdax/20210915-184743
base:   https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git 
3ca706c189db861b2ca2019a0901b94050ca49d8
config: hexagon-randconfig-r045-20210916 (attached as .config)
compiler: clang version 14.0.0 (https://github.com/llvm/llvm-project 
c8b3d7d6d6de37af68b2f379d0e37304f78e115f)
reproduce (this is a W=1 build):
wget 
https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O 
~/bin/make.cross
chmod +x ~/bin/make.cross
# 
https://github.com/0day-ci/linux/commit/34b16b56bacb2d3e1e98f9ed47d20b545358bdcd
git remote add linux-review https://github.com/0day-ci/linux
git fetch --no-tags linux-review 
Shiyang-Ruan/fsdax-xfs-Add-reflink-dedupe-support-for-fsdax/20210915-184743
git checkout 34b16b56bacb2d3e1e98f9ed47d20b545358bdcd
# save the attached .config to linux build tree
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross W=1 
ARCH=hexagon 

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot 

All errors (new ones prefixed by >>):

>> fs/dax.c:1483:38: error: incompatible pointer types passing 'const struct 
>> iomap_iter *' to parameter of type 'const struct iomap *' 
>> [-Werror,-Wincompatible-pointer-types]
   return dax_pmd_load_hole(xas, vmf, iter, entry);
  ^~~~
   fs/dax.c:1206:23: note: passing argument to parameter 'iomap' here
   const struct iomap *iomap, void **entry)
   ^
   1 error generated.


vim +1483 fs/dax.c

  1451  
  1452  /**
  1453   * dax_fault_iter - Common actor to handle pfn insertion in PTE/PMD 
fault.
  1454   * @vmf:vm fault instance
  1455   * @iter:   iomap iter
  1456   * @pfnp:   pfn to be returned
  1457   * @xas:the dax mapping tree of a file
  1458   * @entry:  an unlocked dax entry to be inserted
  1459   * @pmd:distinguish whether it is a pmd fault
  1460   */
  1461  static vm_fault_t dax_fault_iter(struct vm_fault *vmf,
  1462  const struct iomap_iter *iter, pfn_t *pfnp,
  1463  struct xa_state *xas, void **entry, bool pmd)
  1464  {
  1465  const struct iomap *iomap = >iomap;
  1466  const struct iomap *srcmap = >srcmap;
  1467  size_t size = pmd ? PMD_SIZE : PAGE_SIZE;
  1468  loff_t pos = (loff_t)xas->xa_index << PAGE_SHIFT;
  1469  bool write = iter->flags & IOMAP_WRITE;
  1470  unsigned long entry_flags = pmd ? DAX_PMD : 0;
  1471  int err = 0;
  1472  pfn_t pfn;
  1473  void *kaddr;
  1474  
  1475  if (!pmd && vmf->cow_page)
  1476  return dax_fault_cow_page(vmf, iter);
  1477  
  1478  /* if we are reading UNWRITTEN and HOLE, return a hole. */
  1479  if (!write &&
  1480  (iomap->type == IOMAP_UNWRITTEN || iomap->type == 
IOMAP_HOLE)) {
  1481  if (!pmd)
  1482  return dax_load_hole(xas, vmf, iter, entry);
> 1483  return dax_pmd_load_hole(xas, vmf, iter, entry);
  1484  }
  1485  
  1486  if (iomap->type != IOMAP_MAPPED && !(iomap->flags & 
IOMAP_F_SHARED)) {
  1487  WARN_ON_ONCE(1);
  1488  return pmd ? VM_FAULT_FALLBACK : VM_FAULT_SIGBUS;
  1489  }
  1490  
  1491  err = dax_iomap_direct_access(iomap, pos, size, , );
  1492  if (err)
  1493  return pmd ? VM_FAULT_FALLBACK : dax_fault_return(err);
  1494  
  1495  *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, 
entry_flags);
  1496  
  1497  if (write &&
  1498  srcmap->addr != IOMAP_HOLE && srcmap->addr != iomap->addr) {
  1499  err = dax_iomap_cow_copy(pos, size, size, srcmap, 
kaddr);
  1500  if (err)
  1501  return dax_fault_return(err);
  1502  }
  1503  
  1504  if (dax_fault_is_synchronous(iter, vmf->vma))
  1505  return dax_fault_synchronous_pfnp(pfnp, pfn);
  1506  
  1507  /* insert PMD pfn */
  1508  if (pmd)
  1509  return vmf_insert_pfn_pmd(vmf, pfn, write);
  1510  
  1511  /* insert PTE pfn */
  1512  if (write)
  1513  return vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, 
pfn);
  1514  return 

[PATCH v9 3/8] fsdax: Replace mmap entry in case of CoW

2021-09-15 Thread Shiyang Ruan
Replace the existing entry to the newly allocated one in case of CoW.
Also, we mark the entry as PAGECACHE_TAG_TOWRITE so writeback marks this
entry as writeprotected.  This helps us snapshots so new write
pagefaults after snapshots trigger a CoW.

Signed-off-by: Goldwyn Rodrigues 
Signed-off-by: Shiyang Ruan 
Reviewed-by: Christoph Hellwig 
Reviewed-by: Ritesh Harjani 
Reviewed-by: Darrick J. Wong 
---
 fs/dax.c | 73 +++-
 1 file changed, 40 insertions(+), 33 deletions(-)

diff --git a/fs/dax.c b/fs/dax.c
index dded08be54dc..41c93929f20b 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -734,6 +734,23 @@ static int copy_cow_page_dax(struct block_device *bdev, 
struct dax_device *dax_d
return 0;
 }
 
+/*
+ * MAP_SYNC on a dax mapping guarantees dirty metadata is
+ * flushed on write-faults (non-cow), but not read-faults.
+ */
+static bool dax_fault_is_synchronous(const struct iomap_iter *iter,
+   struct vm_area_struct *vma)
+{
+   return (iter->flags & IOMAP_WRITE) && (vma->vm_flags & VM_SYNC) &&
+   (iter->iomap.flags & IOMAP_F_DIRTY);
+}
+
+static bool dax_fault_is_cow(const struct iomap_iter *iter)
+{
+   return (iter->flags & IOMAP_WRITE) &&
+   (iter->iomap.flags & IOMAP_F_SHARED);
+}
+
 /*
  * By this point grab_mapping_entry() has ensured that we have a locked entry
  * of the appropriate size so we don't have to worry about downgrading PMDs to
@@ -741,16 +758,19 @@ static int copy_cow_page_dax(struct block_device *bdev, 
struct dax_device *dax_d
  * already in the tree, we will skip the insertion and just dirty the PMD as
  * appropriate.
  */
-static void *dax_insert_entry(struct xa_state *xas,
-   struct address_space *mapping, struct vm_fault *vmf,
-   void *entry, pfn_t pfn, unsigned long flags, bool dirty)
+static void *dax_insert_entry(struct xa_state *xas, struct vm_fault *vmf,
+   const struct iomap_iter *iter, void *entry, pfn_t pfn,
+   unsigned long flags)
 {
+   struct address_space *mapping = vmf->vma->vm_file->f_mapping;
void *new_entry = dax_make_entry(pfn, flags);
+   bool dirty = !dax_fault_is_synchronous(iter, vmf->vma);
+   bool cow = dax_fault_is_cow(iter);
 
if (dirty)
__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
 
-   if (dax_is_zero_entry(entry) && !(flags & DAX_ZERO_PAGE)) {
+   if (cow || (dax_is_zero_entry(entry) && !(flags & DAX_ZERO_PAGE))) {
unsigned long index = xas->xa_index;
/* we are replacing a zero page with block mapping */
if (dax_is_pmd_entry(entry))
@@ -762,7 +782,7 @@ static void *dax_insert_entry(struct xa_state *xas,
 
xas_reset(xas);
xas_lock_irq(xas);
-   if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
+   if (cow || dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
void *old;
 
dax_disassociate_entry(entry, mapping, false);
@@ -786,6 +806,9 @@ static void *dax_insert_entry(struct xa_state *xas,
if (dirty)
xas_set_mark(xas, PAGECACHE_TAG_DIRTY);
 
+   if (cow)
+   xas_set_mark(xas, PAGECACHE_TAG_TOWRITE);
+
xas_unlock_irq(xas);
return entry;
 }
@@ -,17 +1134,15 @@ static int dax_iomap_cow_copy(loff_t pos, uint64_t 
length, size_t align_size,
  * If this page is ever written to we will re-fault and change the mapping to
  * point to real DAX storage instead.
  */
-static vm_fault_t dax_load_hole(struct xa_state *xas,
-   struct address_space *mapping, void **entry,
-   struct vm_fault *vmf)
+static vm_fault_t dax_load_hole(struct xa_state *xas, struct vm_fault *vmf,
+   const struct iomap_iter *iter, void **entry)
 {
-   struct inode *inode = mapping->host;
+   struct inode *inode = iter->inode;
unsigned long vaddr = vmf->address;
pfn_t pfn = pfn_to_pfn_t(my_zero_pfn(vaddr));
vm_fault_t ret;
 
-   *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn,
-   DAX_ZERO_PAGE, false);
+   *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, DAX_ZERO_PAGE);
 
ret = vmf_insert_mixed(vmf->vma, vaddr, pfn);
trace_dax_load_hole(inode, vmf, ret);
@@ -1130,7 +1151,7 @@ static vm_fault_t dax_load_hole(struct xa_state *xas,
 
 #ifdef CONFIG_FS_DAX_PMD
 static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
-   const struct iomap *iomap, void **entry)
+   const struct iomap_iter *iter, void **entry)
 {
struct address_space *mapping = vmf->vma->vm_file->f_mapping;
unsigned long pmd_addr = vmf->address & PMD_MASK;
@@ -1148,8 +1169,8 @@ static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, 
struct vm_fault *vmf,
goto fallback;
 
pfn = page_to_pfn_t(zero_page);
-   *entry =