Re: [PATCH v6] fs: dax: Adding new return type vm_fault_t

2018-04-27 Thread Matthew Wilcox
On Fri, Apr 27, 2018 at 10:54:53AM +0530, Souptick Joarder wrote:
> > I noticed that we have the following status translation now in 4 places in 2
> > files:
> >
> > if (err == -ENOMEM)
> > return VM_FAULT_OOM;
> > if (err < 0 && err != -EBUSY)
> > return VM_FAULT_SIGBUS;
> > return VM_FAULT_NOPAGE;
> >
> >
> > This happens in vmf_insert_mixed_mkwrite(), vmf_insert_page(),
> > vmf_insert_mixed() and vmf_insert_pfn().
> >
> > I think it'd be a good idea to consolidate this translation into an inline
> > helper, in the spirit of dax_fault_return().  This will ensure that if/when 
> > we
> > start changing this status translation, we won't accidentally miss some of 
> > the
> > places which would make them get out of sync.  No need to fold this into 
> > this
> > patch - it should be a separate change.
> 
> Sure, I will send this as a separate patch.

No, this will entirely go away when vm_insert_foo() is removed.  Here's what
it'll look like instead:

@@ -1703,23 +1703,23 @@ pte_t *__get_locked_pte(struct mm_struct *mm, unsigned 
long addr,
  * old drivers should use this, and they needed to mark their
  * pages reserved for the old functions anyway.
  */
-static int insert_page(struct vm_area_struct *vma, unsigned long addr,
+static vm_fault_t insert_page(struct vm_area_struct *vma, unsigned long addr,
struct page *page, pgprot_t prot)
 {
struct mm_struct *mm = vma->vm_mm;
-   int retval;
+   vm_fault_t ret;
pte_t *pte;
spinlock_t *ptl;
 
-   retval = -EINVAL;
+   ret = VM_FAULT_SIGBUS;
if (PageAnon(page))
goto out;
-   retval = -ENOMEM;
+   ret = VM_FAULT_OOM;
flush_dcache_page(page);
pte = get_locked_pte(mm, addr, );
if (!pte)
goto out;
-   retval = -EBUSY;
+   ret = 0;
if (!pte_none(*pte))
goto out_unlock;
 
@@ -1729,17 +1729,14 @@ static int insert_page(struct vm_area_struct *vma, 
unsigned long addr,
page_add_file_rmap(page, false);
set_pte_at(mm, addr, pte, mk_pte(page, prot));
 
-   retval = 0;
-   pte_unmap_unlock(pte, ptl);
-   return retval;
 out_unlock:
pte_unmap_unlock(pte, ptl);
 out:
-   return retval;
+   return ret;
 }
 
 /**
- * vm_insert_page - insert single page into user vma
+ * vmf_insert_page - insert single page into user vma
  * @vma: user vma to map to
  * @addr: target user address of this page
  * @page: source kernel page
@@ -1765,13 +1762,13 @@ static int insert_page(struct vm_area_struct *vma, 
unsigned long addr,
  * Caller must set VM_MIXEDMAP on vma if it wants to call this
  * function from other places, for example from page-fault handler.
  */
-int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
+vm_fault_t vmf_insert_page(struct vm_area_struct *vma, unsigned long addr,
struct page *page)
 {
if (addr < vma->vm_start || addr >= vma->vm_end)
-   return -EFAULT;
+   return VM_FAULT_SIGBUS;
if (!page_count(page))
-   return -EINVAL;
+   return VM_FAULT_SIGBUS;
if (!(vma->vm_flags & VM_MIXEDMAP)) {
BUG_ON(down_read_trylock(>vm_mm->mmap_sem));
BUG_ON(vma->vm_flags & VM_PFNMAP);
@@ -1779,21 +1776,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned 
long addr,
}
return insert_page(vma, addr, page, vma->vm_page_prot);
 }
-EXPORT_SYMBOL(vm_insert_page);
+EXPORT_SYMBOL(vmf_insert_page);
 
-static int insert_pfn(struct vm_area_struct *vma, unsigned long addr,
+static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr,
pfn_t pfn, pgprot_t prot, bool mkwrite)
 {
struct mm_struct *mm = vma->vm_mm;
-   int retval;
+   vm_fault_t ret;
pte_t *pte, entry;
spinlock_t *ptl;
 
-   retval = -ENOMEM;
+   ret = VM_FAULT_OOM;
pte = get_locked_pte(mm, addr, );
if (!pte)
goto out;
-   retval = -EBUSY;
+   ret = VM_FAULT_SIGBUS;
if (!pte_none(*pte)) {
if (mkwrite) {
/*
@@ -1826,20 +1823,20 @@ static int insert_pfn(struct vm_area_struct *vma, 
unsigned long addr,
set_pte_at(mm, addr, pte, entry);
update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */
 
-   retval = 0;
+   ret = 0;
 out_unlock:
pte_unmap_unlock(pte, ptl);
 out:
-   return retval;
+   return ret;
 }
 
 /**
- * vm_insert_pfn - insert single pfn into user vma
+ * vmf_insert_pfn - insert single pfn into user vma
  * @vma: user vma to map to
  * @addr: target user address of this page
  * @pfn: source kernel pfn
  *
- * Similar to vm_insert_page, this allows drivers to insert individual pages
+ * Similar to vmf_insert_page(), this allows drivers to insert individual pages
  * they've 

Re: [PATCH v6] fs: dax: Adding new return type vm_fault_t

2018-04-27 Thread Matthew Wilcox
On Fri, Apr 27, 2018 at 10:54:53AM +0530, Souptick Joarder wrote:
> > I noticed that we have the following status translation now in 4 places in 2
> > files:
> >
> > if (err == -ENOMEM)
> > return VM_FAULT_OOM;
> > if (err < 0 && err != -EBUSY)
> > return VM_FAULT_SIGBUS;
> > return VM_FAULT_NOPAGE;
> >
> >
> > This happens in vmf_insert_mixed_mkwrite(), vmf_insert_page(),
> > vmf_insert_mixed() and vmf_insert_pfn().
> >
> > I think it'd be a good idea to consolidate this translation into an inline
> > helper, in the spirit of dax_fault_return().  This will ensure that if/when 
> > we
> > start changing this status translation, we won't accidentally miss some of 
> > the
> > places which would make them get out of sync.  No need to fold this into 
> > this
> > patch - it should be a separate change.
> 
> Sure, I will send this as a separate patch.

No, this will entirely go away when vm_insert_foo() is removed.  Here's what
it'll look like instead:

@@ -1703,23 +1703,23 @@ pte_t *__get_locked_pte(struct mm_struct *mm, unsigned 
long addr,
  * old drivers should use this, and they needed to mark their
  * pages reserved for the old functions anyway.
  */
-static int insert_page(struct vm_area_struct *vma, unsigned long addr,
+static vm_fault_t insert_page(struct vm_area_struct *vma, unsigned long addr,
struct page *page, pgprot_t prot)
 {
struct mm_struct *mm = vma->vm_mm;
-   int retval;
+   vm_fault_t ret;
pte_t *pte;
spinlock_t *ptl;
 
-   retval = -EINVAL;
+   ret = VM_FAULT_SIGBUS;
if (PageAnon(page))
goto out;
-   retval = -ENOMEM;
+   ret = VM_FAULT_OOM;
flush_dcache_page(page);
pte = get_locked_pte(mm, addr, );
if (!pte)
goto out;
-   retval = -EBUSY;
+   ret = 0;
if (!pte_none(*pte))
goto out_unlock;
 
@@ -1729,17 +1729,14 @@ static int insert_page(struct vm_area_struct *vma, 
unsigned long addr,
page_add_file_rmap(page, false);
set_pte_at(mm, addr, pte, mk_pte(page, prot));
 
-   retval = 0;
-   pte_unmap_unlock(pte, ptl);
-   return retval;
 out_unlock:
pte_unmap_unlock(pte, ptl);
 out:
-   return retval;
+   return ret;
 }
 
 /**
- * vm_insert_page - insert single page into user vma
+ * vmf_insert_page - insert single page into user vma
  * @vma: user vma to map to
  * @addr: target user address of this page
  * @page: source kernel page
@@ -1765,13 +1762,13 @@ static int insert_page(struct vm_area_struct *vma, 
unsigned long addr,
  * Caller must set VM_MIXEDMAP on vma if it wants to call this
  * function from other places, for example from page-fault handler.
  */
-int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
+vm_fault_t vmf_insert_page(struct vm_area_struct *vma, unsigned long addr,
struct page *page)
 {
if (addr < vma->vm_start || addr >= vma->vm_end)
-   return -EFAULT;
+   return VM_FAULT_SIGBUS;
if (!page_count(page))
-   return -EINVAL;
+   return VM_FAULT_SIGBUS;
if (!(vma->vm_flags & VM_MIXEDMAP)) {
BUG_ON(down_read_trylock(>vm_mm->mmap_sem));
BUG_ON(vma->vm_flags & VM_PFNMAP);
@@ -1779,21 +1776,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned 
long addr,
}
return insert_page(vma, addr, page, vma->vm_page_prot);
 }
-EXPORT_SYMBOL(vm_insert_page);
+EXPORT_SYMBOL(vmf_insert_page);
 
-static int insert_pfn(struct vm_area_struct *vma, unsigned long addr,
+static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr,
pfn_t pfn, pgprot_t prot, bool mkwrite)
 {
struct mm_struct *mm = vma->vm_mm;
-   int retval;
+   vm_fault_t ret;
pte_t *pte, entry;
spinlock_t *ptl;
 
-   retval = -ENOMEM;
+   ret = VM_FAULT_OOM;
pte = get_locked_pte(mm, addr, );
if (!pte)
goto out;
-   retval = -EBUSY;
+   ret = VM_FAULT_SIGBUS;
if (!pte_none(*pte)) {
if (mkwrite) {
/*
@@ -1826,20 +1823,20 @@ static int insert_pfn(struct vm_area_struct *vma, 
unsigned long addr,
set_pte_at(mm, addr, pte, entry);
update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */
 
-   retval = 0;
+   ret = 0;
 out_unlock:
pte_unmap_unlock(pte, ptl);
 out:
-   return retval;
+   return ret;
 }
 
 /**
- * vm_insert_pfn - insert single pfn into user vma
+ * vmf_insert_pfn - insert single pfn into user vma
  * @vma: user vma to map to
  * @addr: target user address of this page
  * @pfn: source kernel pfn
  *
- * Similar to vm_insert_page, this allows drivers to insert individual pages
+ * Similar to vmf_insert_page(), this allows drivers to insert individual pages
  * they've 

Re: [PATCH v6] fs: dax: Adding new return type vm_fault_t

2018-04-26 Thread Souptick Joarder
> I noticed that we have the following status translation now in 4 places in 2
> files:
>
> if (err == -ENOMEM)
> return VM_FAULT_OOM;
> if (err < 0 && err != -EBUSY)
> return VM_FAULT_SIGBUS;
> return VM_FAULT_NOPAGE;
>
>
> This happens in vmf_insert_mixed_mkwrite(), vmf_insert_page(),
> vmf_insert_mixed() and vmf_insert_pfn().
>
> I think it'd be a good idea to consolidate this translation into an inline
> helper, in the spirit of dax_fault_return().  This will ensure that if/when we
> start changing this status translation, we won't accidentally miss some of the
> places which would make them get out of sync.  No need to fold this into this
> patch - it should be a separate change.

Sure, I will send this as a separate patch.


Re: [PATCH v6] fs: dax: Adding new return type vm_fault_t

2018-04-26 Thread Souptick Joarder
> I noticed that we have the following status translation now in 4 places in 2
> files:
>
> if (err == -ENOMEM)
> return VM_FAULT_OOM;
> if (err < 0 && err != -EBUSY)
> return VM_FAULT_SIGBUS;
> return VM_FAULT_NOPAGE;
>
>
> This happens in vmf_insert_mixed_mkwrite(), vmf_insert_page(),
> vmf_insert_mixed() and vmf_insert_pfn().
>
> I think it'd be a good idea to consolidate this translation into an inline
> helper, in the spirit of dax_fault_return().  This will ensure that if/when we
> start changing this status translation, we won't accidentally miss some of the
> places which would make them get out of sync.  No need to fold this into this
> patch - it should be a separate change.

Sure, I will send this as a separate patch.


Re: [PATCH v6] fs: dax: Adding new return type vm_fault_t

2018-04-26 Thread Ross Zwisler
On Tue, Apr 24, 2018 at 10:17:51PM +0530, Souptick Joarder wrote:
> Use new return type vm_fault_t for fault handler. For
> now, this is just documenting that the function returns
> a VM_FAULT value rather than an errno. Once all instances
> are converted, vm_fault_t will become a distinct type.
> 
> commit 1c8f422059ae ("mm: change return type to vm_fault_t")
> 
> There was an existing bug inside dax_load_hole()
> if vm_insert_mixed had failed to allocate a page table,
> we'd return VM_FAULT_NOPAGE instead of VM_FAULT_OOM.
> With new vmf_insert_mixed() this issue is addressed.
> 
> vm_insert_mixed_mkwrite has inefficiency when it returns
> an error value, driver has to convert it to vm_fault_t
> type. With new vmf_insert_mixed_mkwrite() this limitation
> will be addressed.
> 
> Signed-off-by: Souptick Joarder 
> Reviewed-by: Jan Kara 
> Reviewed-by: Matthew Wilcox 

Sure, this looks correct.  You can add:

Reviewed-by: Ross Zwisler 

I noticed that we have the following status translation now in 4 places in 2
files:

if (err == -ENOMEM)
return VM_FAULT_OOM;
if (err < 0 && err != -EBUSY)
return VM_FAULT_SIGBUS;
return VM_FAULT_NOPAGE;


This happens in vmf_insert_mixed_mkwrite(), vmf_insert_page(),
vmf_insert_mixed() and vmf_insert_pfn().

I think it'd be a good idea to consolidate this translation into an inline
helper, in the spirit of dax_fault_return().  This will ensure that if/when we
start changing this status translation, we won't accidentally miss some of the
places which would make them get out of sync.  No need to fold this into this
patch - it should be a separate change.


Re: [PATCH v6] fs: dax: Adding new return type vm_fault_t

2018-04-26 Thread Ross Zwisler
On Tue, Apr 24, 2018 at 10:17:51PM +0530, Souptick Joarder wrote:
> Use new return type vm_fault_t for fault handler. For
> now, this is just documenting that the function returns
> a VM_FAULT value rather than an errno. Once all instances
> are converted, vm_fault_t will become a distinct type.
> 
> commit 1c8f422059ae ("mm: change return type to vm_fault_t")
> 
> There was an existing bug inside dax_load_hole()
> if vm_insert_mixed had failed to allocate a page table,
> we'd return VM_FAULT_NOPAGE instead of VM_FAULT_OOM.
> With new vmf_insert_mixed() this issue is addressed.
> 
> vm_insert_mixed_mkwrite has inefficiency when it returns
> an error value, driver has to convert it to vm_fault_t
> type. With new vmf_insert_mixed_mkwrite() this limitation
> will be addressed.
> 
> Signed-off-by: Souptick Joarder 
> Reviewed-by: Jan Kara 
> Reviewed-by: Matthew Wilcox 

Sure, this looks correct.  You can add:

Reviewed-by: Ross Zwisler 

I noticed that we have the following status translation now in 4 places in 2
files:

if (err == -ENOMEM)
return VM_FAULT_OOM;
if (err < 0 && err != -EBUSY)
return VM_FAULT_SIGBUS;
return VM_FAULT_NOPAGE;


This happens in vmf_insert_mixed_mkwrite(), vmf_insert_page(),
vmf_insert_mixed() and vmf_insert_pfn().

I think it'd be a good idea to consolidate this translation into an inline
helper, in the spirit of dax_fault_return().  This will ensure that if/when we
start changing this status translation, we won't accidentally miss some of the
places which would make them get out of sync.  No need to fold this into this
patch - it should be a separate change.


[PATCH v6] fs: dax: Adding new return type vm_fault_t

2018-04-24 Thread Souptick Joarder
Use new return type vm_fault_t for fault handler. For
now, this is just documenting that the function returns
a VM_FAULT value rather than an errno. Once all instances
are converted, vm_fault_t will become a distinct type.

commit 1c8f422059ae ("mm: change return type to vm_fault_t")

There was an existing bug inside dax_load_hole()
if vm_insert_mixed had failed to allocate a page table,
we'd return VM_FAULT_NOPAGE instead of VM_FAULT_OOM.
With new vmf_insert_mixed() this issue is addressed.

vm_insert_mixed_mkwrite has inefficiency when it returns
an error value, driver has to convert it to vm_fault_t
type. With new vmf_insert_mixed_mkwrite() this limitation
will be addressed.

Signed-off-by: Souptick Joarder 
Reviewed-by: Jan Kara 
Reviewed-by: Matthew Wilcox 
---
v2: vm_insert_mixed_mkwrite is replaced by new
vmf_insert_mixed_mkwrite

v3: Addressed Matthew's comment. One patch which
changes both at the same time. The history
should be bisectable so that it compiles and
works at every point.

v4: Updated the change log

v5: Updated the change log

v6: Added comment in source file

 fs/dax.c| 78 +
 include/linux/dax.h |  4 +--
 include/linux/mm.h  |  4 +--
 mm/memory.c | 21 ---
 4 files changed, 58 insertions(+), 49 deletions(-)

diff --git a/fs/dax.c b/fs/dax.c
index aaec72de..821986c 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -905,12 +905,12 @@ static int dax_iomap_pfn(struct iomap *iomap, loff_t pos, 
size_t size,
  * If this page is ever written to we will re-fault and change the mapping to
  * point to real DAX storage instead.
  */
-static int dax_load_hole(struct address_space *mapping, void *entry,
+static vm_fault_t dax_load_hole(struct address_space *mapping, void *entry,
 struct vm_fault *vmf)
 {
struct inode *inode = mapping->host;
unsigned long vaddr = vmf->address;
-   int ret = VM_FAULT_NOPAGE;
+   vm_fault_t ret = VM_FAULT_NOPAGE;
struct page *zero_page;
void *entry2;
pfn_t pfn;
@@ -929,7 +929,7 @@ static int dax_load_hole(struct address_space *mapping, 
void *entry,
goto out;
}
 
-   vm_insert_mixed(vmf->vma, vaddr, pfn);
+   ret = vmf_insert_mixed(vmf->vma, vaddr, pfn);
 out:
trace_dax_load_hole(inode, vmf, ret);
return ret;
@@ -1112,7 +1112,7 @@ int __dax_zero_page_range(struct block_device *bdev,
 }
 EXPORT_SYMBOL_GPL(dax_iomap_rw);
 
-static int dax_fault_return(int error)
+static vm_fault_t dax_fault_return(int error)
 {
if (error == 0)
return VM_FAULT_NOPAGE;
@@ -1132,7 +1132,7 @@ static bool dax_fault_is_synchronous(unsigned long flags,
&& (iomap->flags & IOMAP_F_DIRTY);
 }
 
-static int dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
+static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
   int *iomap_errp, const struct iomap_ops *ops)
 {
struct vm_area_struct *vma = vmf->vma;
@@ -1145,18 +1145,18 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf, 
pfn_t *pfnp,
int error, major = 0;
bool write = vmf->flags & FAULT_FLAG_WRITE;
bool sync;
-   int vmf_ret = 0;
+   vm_fault_t ret = 0;
void *entry;
pfn_t pfn;
 
-   trace_dax_pte_fault(inode, vmf, vmf_ret);
+   trace_dax_pte_fault(inode, vmf, ret);
/*
 * Check whether offset isn't beyond end of file now. Caller is supposed
 * to hold locks serializing us with truncate / punch hole so this is
 * a reliable test.
 */
if (pos >= i_size_read(inode)) {
-   vmf_ret = VM_FAULT_SIGBUS;
+   ret = VM_FAULT_SIGBUS;
goto out;
}
 
@@ -1165,7 +1165,7 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf, 
pfn_t *pfnp,
 
entry = grab_mapping_entry(mapping, vmf->pgoff, 0);
if (IS_ERR(entry)) {
-   vmf_ret = dax_fault_return(PTR_ERR(entry));
+   ret = dax_fault_return(PTR_ERR(entry));
goto out;
}
 
@@ -1176,7 +1176,7 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf, 
pfn_t *pfnp,
 * retried.
 */
if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) {
-   vmf_ret = VM_FAULT_NOPAGE;
+   ret = VM_FAULT_NOPAGE;
goto unlock_entry;
}
 
@@ -1189,7 +1189,7 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf, 
pfn_t *pfnp,
if (iomap_errp)
*iomap_errp = error;
if (error) {
-   vmf_ret = dax_fault_return(error);
+   ret = dax_fault_return(error);
goto unlock_entry;
}
if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) {
@@ -1219,9 +1219,9 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf, 

[PATCH v6] fs: dax: Adding new return type vm_fault_t

2018-04-24 Thread Souptick Joarder
Use new return type vm_fault_t for fault handler. For
now, this is just documenting that the function returns
a VM_FAULT value rather than an errno. Once all instances
are converted, vm_fault_t will become a distinct type.

commit 1c8f422059ae ("mm: change return type to vm_fault_t")

There was an existing bug inside dax_load_hole()
if vm_insert_mixed had failed to allocate a page table,
we'd return VM_FAULT_NOPAGE instead of VM_FAULT_OOM.
With new vmf_insert_mixed() this issue is addressed.

vm_insert_mixed_mkwrite has inefficiency when it returns
an error value, driver has to convert it to vm_fault_t
type. With new vmf_insert_mixed_mkwrite() this limitation
will be addressed.

Signed-off-by: Souptick Joarder 
Reviewed-by: Jan Kara 
Reviewed-by: Matthew Wilcox 
---
v2: vm_insert_mixed_mkwrite is replaced by new
vmf_insert_mixed_mkwrite

v3: Addressed Matthew's comment. One patch which
changes both at the same time. The history
should be bisectable so that it compiles and
works at every point.

v4: Updated the change log

v5: Updated the change log

v6: Added comment in source file

 fs/dax.c| 78 +
 include/linux/dax.h |  4 +--
 include/linux/mm.h  |  4 +--
 mm/memory.c | 21 ---
 4 files changed, 58 insertions(+), 49 deletions(-)

diff --git a/fs/dax.c b/fs/dax.c
index aaec72de..821986c 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -905,12 +905,12 @@ static int dax_iomap_pfn(struct iomap *iomap, loff_t pos, 
size_t size,
  * If this page is ever written to we will re-fault and change the mapping to
  * point to real DAX storage instead.
  */
-static int dax_load_hole(struct address_space *mapping, void *entry,
+static vm_fault_t dax_load_hole(struct address_space *mapping, void *entry,
 struct vm_fault *vmf)
 {
struct inode *inode = mapping->host;
unsigned long vaddr = vmf->address;
-   int ret = VM_FAULT_NOPAGE;
+   vm_fault_t ret = VM_FAULT_NOPAGE;
struct page *zero_page;
void *entry2;
pfn_t pfn;
@@ -929,7 +929,7 @@ static int dax_load_hole(struct address_space *mapping, 
void *entry,
goto out;
}
 
-   vm_insert_mixed(vmf->vma, vaddr, pfn);
+   ret = vmf_insert_mixed(vmf->vma, vaddr, pfn);
 out:
trace_dax_load_hole(inode, vmf, ret);
return ret;
@@ -1112,7 +1112,7 @@ int __dax_zero_page_range(struct block_device *bdev,
 }
 EXPORT_SYMBOL_GPL(dax_iomap_rw);
 
-static int dax_fault_return(int error)
+static vm_fault_t dax_fault_return(int error)
 {
if (error == 0)
return VM_FAULT_NOPAGE;
@@ -1132,7 +1132,7 @@ static bool dax_fault_is_synchronous(unsigned long flags,
&& (iomap->flags & IOMAP_F_DIRTY);
 }
 
-static int dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
+static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
   int *iomap_errp, const struct iomap_ops *ops)
 {
struct vm_area_struct *vma = vmf->vma;
@@ -1145,18 +1145,18 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf, 
pfn_t *pfnp,
int error, major = 0;
bool write = vmf->flags & FAULT_FLAG_WRITE;
bool sync;
-   int vmf_ret = 0;
+   vm_fault_t ret = 0;
void *entry;
pfn_t pfn;
 
-   trace_dax_pte_fault(inode, vmf, vmf_ret);
+   trace_dax_pte_fault(inode, vmf, ret);
/*
 * Check whether offset isn't beyond end of file now. Caller is supposed
 * to hold locks serializing us with truncate / punch hole so this is
 * a reliable test.
 */
if (pos >= i_size_read(inode)) {
-   vmf_ret = VM_FAULT_SIGBUS;
+   ret = VM_FAULT_SIGBUS;
goto out;
}
 
@@ -1165,7 +1165,7 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf, 
pfn_t *pfnp,
 
entry = grab_mapping_entry(mapping, vmf->pgoff, 0);
if (IS_ERR(entry)) {
-   vmf_ret = dax_fault_return(PTR_ERR(entry));
+   ret = dax_fault_return(PTR_ERR(entry));
goto out;
}
 
@@ -1176,7 +1176,7 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf, 
pfn_t *pfnp,
 * retried.
 */
if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) {
-   vmf_ret = VM_FAULT_NOPAGE;
+   ret = VM_FAULT_NOPAGE;
goto unlock_entry;
}
 
@@ -1189,7 +1189,7 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf, 
pfn_t *pfnp,
if (iomap_errp)
*iomap_errp = error;
if (error) {
-   vmf_ret = dax_fault_return(error);
+   ret = dax_fault_return(error);
goto unlock_entry;
}
if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) {
@@ -1219,9 +1219,9 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf, 
pfn_t *pfnp,
goto