Re: [PATCH v3 02/10] fsdax: Factor helper: dax_fault_actor()

2021-04-02 Thread Christoph Hellwig
> + if (!pmd)
> + return dax_load_hole(xas, mapping, , vmf);
> + else
> + return dax_pmd_load_hole(xas, vmf, iomap, );

> + if (pmd)
> + return vmf_insert_pfn_pmd(vmf, pfn, write);
> + if (write)
> + return vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn);
> + else
> + return vmf_insert_mixed(vmf->vma, vmf->address, pfn);
> +}

No need for else statements after returning.

Otherwise looks good:

Reviewed-by: Christoph Hellwig 


RE: [PATCH v3 02/10] fsdax: Factor helper: dax_fault_actor()

2021-03-30 Thread ruansy.f...@fujitsu.com


> -Original Message-
> From: Ritesh Harjani 
> Sent: Tuesday, March 23, 2021 11:48 PM
> Subject: Re: [PATCH v3 02/10] fsdax: Factor helper: dax_fault_actor()
> 
> 
> 
> On 3/19/21 7:22 AM, Shiyang Ruan wrote:
> > The core logic in the two dax page fault functions is similar. So,
> > move the logic into a common helper function. Also, to facilitate the
> > addition of new features, such as CoW, switch-case is no longer used
> > to handle different iomap types.
> >
> > Signed-off-by: Shiyang Ruan 
> > ---
> >   fs/dax.c | 291 +++
> >   1 file changed, 145 insertions(+), 146 deletions(-)
> >
> > diff --git a/fs/dax.c b/fs/dax.c
> > index 7031e4302b13..33ddad0f3091 100644
> > --- a/fs/dax.c
> > +++ b/fs/dax.c
> > @@ -1053,6 +1053,66 @@ static vm_fault_t dax_load_hole(struct xa_state
> *xas,
> > return ret;
> >   }
> >
> > +#ifdef CONFIG_FS_DAX_PMD
> > +static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault
> *vmf,
> > +   struct iomap *iomap, void **entry)
> > +{
> > +   struct address_space *mapping = vmf->vma->vm_file->f_mapping;
> > +   unsigned long pmd_addr = vmf->address & PMD_MASK;
> > +   struct vm_area_struct *vma = vmf->vma;
> > +   struct inode *inode = mapping->host;
> > +   pgtable_t pgtable = NULL;
> > +   struct page *zero_page;
> > +   spinlock_t *ptl;
> > +   pmd_t pmd_entry;
> > +   pfn_t pfn;
> > +
> > +   zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm);
> > +
> > +   if (unlikely(!zero_page))
> > +   goto fallback;
> > +
> > +   pfn = page_to_pfn_t(zero_page);
> > +   *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn,
> > +   DAX_PMD | DAX_ZERO_PAGE, false);
> > +
> > +   if (arch_needs_pgtable_deposit()) {
> > +   pgtable = pte_alloc_one(vma->vm_mm);
> > +   if (!pgtable)
> > +   return VM_FAULT_OOM;
> > +   }
> > +
> > +   ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
> > +   if (!pmd_none(*(vmf->pmd))) {
> > +   spin_unlock(ptl);
> > +   goto fallback;
> > +   }
> > +
> > +   if (pgtable) {
> > +   pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
> > +   mm_inc_nr_ptes(vma->vm_mm);
> > +   }
> > +   pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot);
> > +   pmd_entry = pmd_mkhuge(pmd_entry);
> > +   set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
> > +   spin_unlock(ptl);
> > +   trace_dax_pmd_load_hole(inode, vmf, zero_page, *entry);
> > +   return VM_FAULT_NOPAGE;
> > +
> > +fallback:
> > +   if (pgtable)
> > +   pte_free(vma->vm_mm, pgtable);
> > +   trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, *entry);
> > +   return VM_FAULT_FALLBACK;
> > +}
> > +#else
> > +static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault
> *vmf,
> > +   struct iomap *iomap, void **entry)
> > +{
> > +   return VM_FAULT_FALLBACK;
> > +}
> > +#endif /* CONFIG_FS_DAX_PMD */
> > +
> >   s64 dax_iomap_zero(loff_t pos, u64 length, struct iomap *iomap)
> >   {
> > sector_t sector = iomap_sector(iomap, pos & PAGE_MASK); @@
> -1289,6
> > +1349,61 @@ static int dax_fault_cow_page(struct vm_fault *vmf, struct
> iomap *iomap,
> > return 0;
> >   }
> >
> > +/**
> > + * dax_fault_actor - Common actor to handle pfn insertion in PTE/PMD fault.
> > + * @vmf:   vm fault instance
> > + * @pfnp:  pfn to be returned
> > + * @xas:   the dax mapping tree of a file
> > + * @entry: an unlocked dax entry to be inserted
> > + * @pmd:   distinguish whether it is a pmd fault
> > + * @flags: iomap flags
> > + * @iomap: from iomap_begin()
> > + * @srcmap:from iomap_begin(), not equal to iomap if it is a CoW
> > + */
> > +static vm_fault_t dax_fault_actor(struct vm_fault *vmf, pfn_t *pfnp,
> > +   struct xa_state *xas, void *entry, bool pmd, unsigned int flags,
> > +   struct iomap *iomap, struct iomap *srcmap) {
> > +   struct address_space *mapping = vmf->vma->vm_file->f_mapping;
> > +   size_t size = pmd ? PMD_SIZE : PAGE_SIZE;
> > +   loff_t pos = (loff_t)xas->xa_offset << PAGE_SHIFT;
> 
> shouldn't we use xa_index here for pos ?
> (loff_t)xas->xa_index << PAGE_SHIFT;
Yes.
> 
> > +   bool writ

Re: [PATCH v3 02/10] fsdax: Factor helper: dax_fault_actor()

2021-03-23 Thread Ritesh Harjani




On 3/19/21 7:22 AM, Shiyang Ruan wrote:

The core logic in the two dax page fault functions is similar. So, move
the logic into a common helper function. Also, to facilitate the
addition of new features, such as CoW, switch-case is no longer used to
handle different iomap types.

Signed-off-by: Shiyang Ruan 
---
  fs/dax.c | 291 +++
  1 file changed, 145 insertions(+), 146 deletions(-)

diff --git a/fs/dax.c b/fs/dax.c
index 7031e4302b13..33ddad0f3091 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -1053,6 +1053,66 @@ static vm_fault_t dax_load_hole(struct xa_state *xas,
return ret;
  }
  
+#ifdef CONFIG_FS_DAX_PMD

+static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
+   struct iomap *iomap, void **entry)
+{
+   struct address_space *mapping = vmf->vma->vm_file->f_mapping;
+   unsigned long pmd_addr = vmf->address & PMD_MASK;
+   struct vm_area_struct *vma = vmf->vma;
+   struct inode *inode = mapping->host;
+   pgtable_t pgtable = NULL;
+   struct page *zero_page;
+   spinlock_t *ptl;
+   pmd_t pmd_entry;
+   pfn_t pfn;
+
+   zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm);
+
+   if (unlikely(!zero_page))
+   goto fallback;
+
+   pfn = page_to_pfn_t(zero_page);
+   *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn,
+   DAX_PMD | DAX_ZERO_PAGE, false);
+
+   if (arch_needs_pgtable_deposit()) {
+   pgtable = pte_alloc_one(vma->vm_mm);
+   if (!pgtable)
+   return VM_FAULT_OOM;
+   }
+
+   ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
+   if (!pmd_none(*(vmf->pmd))) {
+   spin_unlock(ptl);
+   goto fallback;
+   }
+
+   if (pgtable) {
+   pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
+   mm_inc_nr_ptes(vma->vm_mm);
+   }
+   pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot);
+   pmd_entry = pmd_mkhuge(pmd_entry);
+   set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
+   spin_unlock(ptl);
+   trace_dax_pmd_load_hole(inode, vmf, zero_page, *entry);
+   return VM_FAULT_NOPAGE;
+
+fallback:
+   if (pgtable)
+   pte_free(vma->vm_mm, pgtable);
+   trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, *entry);
+   return VM_FAULT_FALLBACK;
+}
+#else
+static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
+   struct iomap *iomap, void **entry)
+{
+   return VM_FAULT_FALLBACK;
+}
+#endif /* CONFIG_FS_DAX_PMD */
+
  s64 dax_iomap_zero(loff_t pos, u64 length, struct iomap *iomap)
  {
sector_t sector = iomap_sector(iomap, pos & PAGE_MASK);
@@ -1289,6 +1349,61 @@ static int dax_fault_cow_page(struct vm_fault *vmf, 
struct iomap *iomap,
return 0;
  }
  
+/**

+ * dax_fault_actor - Common actor to handle pfn insertion in PTE/PMD fault.
+ * @vmf:   vm fault instance
+ * @pfnp:  pfn to be returned
+ * @xas:   the dax mapping tree of a file
+ * @entry: an unlocked dax entry to be inserted
+ * @pmd:   distinguish whether it is a pmd fault
+ * @flags: iomap flags
+ * @iomap: from iomap_begin()
+ * @srcmap:from iomap_begin(), not equal to iomap if it is a CoW
+ */
+static vm_fault_t dax_fault_actor(struct vm_fault *vmf, pfn_t *pfnp,
+   struct xa_state *xas, void *entry, bool pmd, unsigned int flags,
+   struct iomap *iomap, struct iomap *srcmap)
+{
+   struct address_space *mapping = vmf->vma->vm_file->f_mapping;
+   size_t size = pmd ? PMD_SIZE : PAGE_SIZE;
+   loff_t pos = (loff_t)xas->xa_offset << PAGE_SHIFT;


shouldn't we use xa_index here for pos ?
(loff_t)xas->xa_index << PAGE_SHIFT;


+   bool write = vmf->flags & FAULT_FLAG_WRITE;
+   bool sync = dax_fault_is_synchronous(flags, vmf->vma, iomap);
+   int err = 0;
+   pfn_t pfn;
+
+   /* if we are reading UNWRITTEN and HOLE, return a hole. */
+   if (!write &&
+   (iomap->type == IOMAP_UNWRITTEN || iomap->type == IOMAP_HOLE)) {
+   if (!pmd)
+   return dax_load_hole(xas, mapping, , vmf);
+   else
+   return dax_pmd_load_hole(xas, vmf, iomap, );
+   }
+
+   if (iomap->type != IOMAP_MAPPED) {
+   WARN_ON_ONCE(1);
+   return VM_FAULT_SIGBUS;
+   }


So now in case if mapping is not mapped, we always cause
VM_FAULT_SIGBUG. But earlier we were only doing WARN_ON_ONCE(1).
Can you pls help answer why the change in behavior?





+
+   err = dax_iomap_pfn(iomap, pos, size, );
+   if (err)
+   return dax_fault_return(err);


Same case here as well. This could return SIGBUS while earlier I am not 
sure why were we only returning FALLBACK?




+
+   entry = dax_insert_entry(xas, mapping, vmf, entry, pfn, 0,
+

[PATCH v3 02/10] fsdax: Factor helper: dax_fault_actor()

2021-03-18 Thread Shiyang Ruan
The core logic in the two dax page fault functions is similar. So, move
the logic into a common helper function. Also, to facilitate the
addition of new features, such as CoW, switch-case is no longer used to
handle different iomap types.

Signed-off-by: Shiyang Ruan 
---
 fs/dax.c | 291 +++
 1 file changed, 145 insertions(+), 146 deletions(-)

diff --git a/fs/dax.c b/fs/dax.c
index 7031e4302b13..33ddad0f3091 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -1053,6 +1053,66 @@ static vm_fault_t dax_load_hole(struct xa_state *xas,
return ret;
 }
 
+#ifdef CONFIG_FS_DAX_PMD
+static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
+   struct iomap *iomap, void **entry)
+{
+   struct address_space *mapping = vmf->vma->vm_file->f_mapping;
+   unsigned long pmd_addr = vmf->address & PMD_MASK;
+   struct vm_area_struct *vma = vmf->vma;
+   struct inode *inode = mapping->host;
+   pgtable_t pgtable = NULL;
+   struct page *zero_page;
+   spinlock_t *ptl;
+   pmd_t pmd_entry;
+   pfn_t pfn;
+
+   zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm);
+
+   if (unlikely(!zero_page))
+   goto fallback;
+
+   pfn = page_to_pfn_t(zero_page);
+   *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn,
+   DAX_PMD | DAX_ZERO_PAGE, false);
+
+   if (arch_needs_pgtable_deposit()) {
+   pgtable = pte_alloc_one(vma->vm_mm);
+   if (!pgtable)
+   return VM_FAULT_OOM;
+   }
+
+   ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
+   if (!pmd_none(*(vmf->pmd))) {
+   spin_unlock(ptl);
+   goto fallback;
+   }
+
+   if (pgtable) {
+   pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
+   mm_inc_nr_ptes(vma->vm_mm);
+   }
+   pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot);
+   pmd_entry = pmd_mkhuge(pmd_entry);
+   set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
+   spin_unlock(ptl);
+   trace_dax_pmd_load_hole(inode, vmf, zero_page, *entry);
+   return VM_FAULT_NOPAGE;
+
+fallback:
+   if (pgtable)
+   pte_free(vma->vm_mm, pgtable);
+   trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, *entry);
+   return VM_FAULT_FALLBACK;
+}
+#else
+static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
+   struct iomap *iomap, void **entry)
+{
+   return VM_FAULT_FALLBACK;
+}
+#endif /* CONFIG_FS_DAX_PMD */
+
 s64 dax_iomap_zero(loff_t pos, u64 length, struct iomap *iomap)
 {
sector_t sector = iomap_sector(iomap, pos & PAGE_MASK);
@@ -1289,6 +1349,61 @@ static int dax_fault_cow_page(struct vm_fault *vmf, 
struct iomap *iomap,
return 0;
 }
 
+/**
+ * dax_fault_actor - Common actor to handle pfn insertion in PTE/PMD fault.
+ * @vmf:   vm fault instance
+ * @pfnp:  pfn to be returned
+ * @xas:   the dax mapping tree of a file
+ * @entry: an unlocked dax entry to be inserted
+ * @pmd:   distinguish whether it is a pmd fault
+ * @flags: iomap flags
+ * @iomap: from iomap_begin()
+ * @srcmap:from iomap_begin(), not equal to iomap if it is a CoW
+ */
+static vm_fault_t dax_fault_actor(struct vm_fault *vmf, pfn_t *pfnp,
+   struct xa_state *xas, void *entry, bool pmd, unsigned int flags,
+   struct iomap *iomap, struct iomap *srcmap)
+{
+   struct address_space *mapping = vmf->vma->vm_file->f_mapping;
+   size_t size = pmd ? PMD_SIZE : PAGE_SIZE;
+   loff_t pos = (loff_t)xas->xa_offset << PAGE_SHIFT;
+   bool write = vmf->flags & FAULT_FLAG_WRITE;
+   bool sync = dax_fault_is_synchronous(flags, vmf->vma, iomap);
+   int err = 0;
+   pfn_t pfn;
+
+   /* if we are reading UNWRITTEN and HOLE, return a hole. */
+   if (!write &&
+   (iomap->type == IOMAP_UNWRITTEN || iomap->type == IOMAP_HOLE)) {
+   if (!pmd)
+   return dax_load_hole(xas, mapping, , vmf);
+   else
+   return dax_pmd_load_hole(xas, vmf, iomap, );
+   }
+
+   if (iomap->type != IOMAP_MAPPED) {
+   WARN_ON_ONCE(1);
+   return VM_FAULT_SIGBUS;
+   }
+
+   err = dax_iomap_pfn(iomap, pos, size, );
+   if (err)
+   return dax_fault_return(err);
+
+   entry = dax_insert_entry(xas, mapping, vmf, entry, pfn, 0,
+write && !sync);
+
+   if (sync)
+   return dax_fault_synchronous_pfnp(pfnp, pfn);
+
+   if (pmd)
+   return vmf_insert_pfn_pmd(vmf, pfn, write);
+   if (write)
+   return vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn);
+   else
+   return vmf_insert_mixed(vmf->vma, vmf->address, pfn);
+}
+
 static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf,