[PATCH 1/5] userfaultfd: use ENOENT instead of EFAULT if the atomic copy user fails

2018-11-26 Thread Andrea Arcangeli
We internally used EFAULT to communicate with the caller, switch to
ENOENT, so EFAULT can be used as a non internal retval.

Reviewed-by: Mike Rapoport 
Reviewed-by: Hugh Dickins 
Cc: sta...@vger.kernel.org
Fixes: 4c27fe4c4c84 ("userfaultfd: shmem: add shmem_mcopy_atomic_pte for 
userfaultfd support")
Signed-off-by: Andrea Arcangeli 
---
 mm/hugetlb.c | 2 +-
 mm/shmem.c   | 2 +-
 mm/userfaultfd.c | 6 +++---
 3 files changed, 5 insertions(+), 5 deletions(-)

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 7f2a28ab46d5..705a3e9cc910 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -4080,7 +4080,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
 
/* fallback to copy_from_user outside mmap_sem */
if (unlikely(ret)) {
-   ret = -EFAULT;
+   ret = -ENOENT;
*pagep = page;
/* don't free the page */
goto out;
diff --git a/mm/shmem.c b/mm/shmem.c
index d44991ea5ed4..353287412c25 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2236,7 +2236,7 @@ static int shmem_mfill_atomic_pte(struct mm_struct 
*dst_mm,
*pagep = page;
shmem_inode_unacct_blocks(inode, 1);
/* don't free the page */
-   return -EFAULT;
+   return -ENOENT;
}
} else {/* mfill_zeropage_atomic */
clear_highpage(page);
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index 5029f241908f..46c8949e5f8f 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -48,7 +48,7 @@ static int mcopy_atomic_pte(struct mm_struct *dst_mm,
 
/* fallback to copy_from_user outside mmap_sem */
if (unlikely(ret)) {
-   ret = -EFAULT;
+   ret = -ENOENT;
*pagep = page;
/* don't free the page */
goto out;
@@ -274,7 +274,7 @@ static __always_inline ssize_t 
__mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
 
cond_resched();
 
-   if (unlikely(err == -EFAULT)) {
+   if (unlikely(err == -ENOENT)) {
up_read(_mm->mmap_sem);
BUG_ON(!page);
 
@@ -530,7 +530,7 @@ static __always_inline ssize_t __mcopy_atomic(struct 
mm_struct *dst_mm,
   src_addr, , zeropage);
cond_resched();
 
-   if (unlikely(err == -EFAULT)) {
+   if (unlikely(err == -ENOENT)) {
void *page_kaddr;
 
up_read(_mm->mmap_sem);


[PATCH 1/5] userfaultfd: use ENOENT instead of EFAULT if the atomic copy user fails

2018-11-26 Thread Andrea Arcangeli
We internally used EFAULT to communicate with the caller, switch to
ENOENT, so EFAULT can be used as a non internal retval.

Reviewed-by: Mike Rapoport 
Reviewed-by: Hugh Dickins 
Cc: sta...@vger.kernel.org
Fixes: 4c27fe4c4c84 ("userfaultfd: shmem: add shmem_mcopy_atomic_pte for 
userfaultfd support")
Signed-off-by: Andrea Arcangeli 
---
 mm/hugetlb.c | 2 +-
 mm/shmem.c   | 2 +-
 mm/userfaultfd.c | 6 +++---
 3 files changed, 5 insertions(+), 5 deletions(-)

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 7f2a28ab46d5..705a3e9cc910 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -4080,7 +4080,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
 
/* fallback to copy_from_user outside mmap_sem */
if (unlikely(ret)) {
-   ret = -EFAULT;
+   ret = -ENOENT;
*pagep = page;
/* don't free the page */
goto out;
diff --git a/mm/shmem.c b/mm/shmem.c
index d44991ea5ed4..353287412c25 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2236,7 +2236,7 @@ static int shmem_mfill_atomic_pte(struct mm_struct 
*dst_mm,
*pagep = page;
shmem_inode_unacct_blocks(inode, 1);
/* don't free the page */
-   return -EFAULT;
+   return -ENOENT;
}
} else {/* mfill_zeropage_atomic */
clear_highpage(page);
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index 5029f241908f..46c8949e5f8f 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -48,7 +48,7 @@ static int mcopy_atomic_pte(struct mm_struct *dst_mm,
 
/* fallback to copy_from_user outside mmap_sem */
if (unlikely(ret)) {
-   ret = -EFAULT;
+   ret = -ENOENT;
*pagep = page;
/* don't free the page */
goto out;
@@ -274,7 +274,7 @@ static __always_inline ssize_t 
__mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
 
cond_resched();
 
-   if (unlikely(err == -EFAULT)) {
+   if (unlikely(err == -ENOENT)) {
up_read(_mm->mmap_sem);
BUG_ON(!page);
 
@@ -530,7 +530,7 @@ static __always_inline ssize_t __mcopy_atomic(struct 
mm_struct *dst_mm,
   src_addr, , zeropage);
cond_resched();
 
-   if (unlikely(err == -EFAULT)) {
+   if (unlikely(err == -ENOENT)) {
void *page_kaddr;
 
up_read(_mm->mmap_sem);