[PATCH v2 hmm 2/9] mm/hmm: return the fault type from hmm_pte_need_fault()

2020-03-30 Thread Jason Gunthorpe
From: Jason Gunthorpe 

Using two bools instead of flags return is not necessary and leads to
bugs. Returning a value is easier for the compiler to check and easier to
pass around the code flow.

Convert the two bools into flags and push the change to all callers.

Signed-off-by: Jason Gunthorpe 
---
 mm/hmm.c | 183 ---
 1 file changed, 81 insertions(+), 102 deletions(-)

diff --git a/mm/hmm.c b/mm/hmm.c
index 3a2610e0713329..d208ddd351066f 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -32,6 +32,12 @@ struct hmm_vma_walk {
unsigned intflags;
 };
 
+enum {
+   HMM_NEED_FAULT = 1 << 0,
+   HMM_NEED_WRITE_FAULT = 1 << 1,
+   HMM_NEED_ALL_BITS = HMM_NEED_FAULT | HMM_NEED_WRITE_FAULT,
+};
+
 static int hmm_pfns_fill(unsigned long addr, unsigned long end,
struct hmm_range *range, enum hmm_pfn_value_e value)
 {
@@ -49,8 +55,7 @@ static int hmm_pfns_fill(unsigned long addr, unsigned long 
end,
  * hmm_vma_fault() - fault in a range lacking valid pmd or pte(s)
  * @addr: range virtual start address (inclusive)
  * @end: range virtual end address (exclusive)
- * @fault: should we fault or not ?
- * @write_fault: write fault ?
+ * @required_fault: HMM_NEED_* flags
  * @walk: mm_walk structure
  * Return: -EBUSY after page fault, or page fault error
  *
@@ -58,8 +63,7 @@ static int hmm_pfns_fill(unsigned long addr, unsigned long 
end,
  * or whenever there is no page directory covering the virtual address range.
  */
 static int hmm_vma_fault(unsigned long addr, unsigned long end,
- bool fault, bool write_fault,
- struct mm_walk *walk)
+unsigned int required_fault, struct mm_walk *walk)
 {
struct hmm_vma_walk *hmm_vma_walk = walk->private;
struct hmm_range *range = hmm_vma_walk->range;
@@ -68,13 +72,13 @@ static int hmm_vma_fault(unsigned long addr, unsigned long 
end,
unsigned long i = (addr - range->start) >> PAGE_SHIFT;
unsigned int fault_flags = FAULT_FLAG_REMOTE;
 
-   WARN_ON_ONCE(!fault && !write_fault);
+   WARN_ON_ONCE(!required_fault);
hmm_vma_walk->last = addr;
 
if (!vma)
goto out_error;
 
-   if (write_fault) {
+   if (required_fault & HMM_NEED_WRITE_FAULT) {
if (!(vma->vm_flags & VM_WRITE))
return -EPERM;
fault_flags |= FAULT_FLAG_WRITE;
@@ -91,14 +95,13 @@ static int hmm_vma_fault(unsigned long addr, unsigned long 
end,
return -EFAULT;
 }
 
-static inline void hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
- uint64_t pfns, uint64_t cpu_flags,
- bool *fault, bool *write_fault)
+static unsigned int hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
+  uint64_t pfns, uint64_t cpu_flags)
 {
struct hmm_range *range = hmm_vma_walk->range;
 
if (hmm_vma_walk->flags & HMM_FAULT_SNAPSHOT)
-   return;
+   return 0;
 
/*
 * So we not only consider the individual per page request we also
@@ -114,37 +117,37 @@ static inline void hmm_pte_need_fault(const struct 
hmm_vma_walk *hmm_vma_walk,
 
/* We aren't ask to do anything ... */
if (!(pfns & range->flags[HMM_PFN_VALID]))
-   return;
+   return 0;
 
-   /* If CPU page table is not valid then we need to fault */
-   *fault = !(cpu_flags & range->flags[HMM_PFN_VALID]);
/* Need to write fault ? */
if ((pfns & range->flags[HMM_PFN_WRITE]) &&
-   !(cpu_flags & range->flags[HMM_PFN_WRITE])) {
-   *write_fault = true;
-   *fault = true;
-   }
+   !(cpu_flags & range->flags[HMM_PFN_WRITE]))
+   return HMM_NEED_FAULT | HMM_NEED_WRITE_FAULT;
+
+   /* If CPU page table is not valid then we need to fault */
+   if (!(cpu_flags & range->flags[HMM_PFN_VALID]))
+   return HMM_NEED_FAULT;
+   return 0;
 }
 
-static void hmm_range_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
-const uint64_t *pfns, unsigned long npages,
-uint64_t cpu_flags, bool *fault,
-bool *write_fault)
+static unsigned int
+hmm_range_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
+const uint64_t *pfns, unsigned long npages,
+uint64_t cpu_flags)
 {
+   unsigned int required_fault = 0;
unsigned long i;
 
-   if (hmm_vma_walk->flags & HMM_FAULT_SNAPSHOT) {
-   *fault = *write_fault = false;
-   return;
-   }
+   if (hmm_vma_walk->flags & HMM_FAULT_SNAPSHOT)
+   return 0;
 
-   *fault = *write_fault = false;
for (i = 0; i < npages; ++i) {
-   

Re: [PATCH v2 hmm 2/9] mm/hmm: return the fault type from hmm_pte_need_fault()

2020-03-25 Thread Jason Gunthorpe
On Tue, Mar 24, 2020 at 08:27:12AM +0100, Christoph Hellwig wrote:
> On Mon, Mar 23, 2020 at 10:14:50PM -0300, Jason Gunthorpe wrote:
> > +enum {
> > +   HMM_NEED_FAULT = 1 << 0,
> > +   HMM_NEED_WRITE_FAULT = HMM_NEED_FAULT | (1 << 1),
> > +   HMM_NEED_ALL_BITS = HMM_NEED_FAULT | HMM_NEED_WRITE_FAULT,
> 
> I have to say I find the compound version of HMM_NEED_WRITE_FAULT
> way harder to understand than the logic in the previous version,
> and would refer keeping separate bits here.
> 
> Mostly beccause of statements like this:
> 
> > +   if ((required_fault & HMM_NEED_WRITE_FAULT) == HMM_NEED_WRITE_FAULT) {
> 
> which seems rather weird.

Okay, I checked it over, and there is one weird statement above but
only one place that |'s them together, so it is overall simpler to
split the enum.

I'll keep the HMM_NEED_ALL_BITS, I think that purpose is clear enough.

Thanks,
Jason
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[PATCH v2 hmm 2/9] mm/hmm: return the fault type from hmm_pte_need_fault()

2020-03-24 Thread Jason Gunthorpe
From: Jason Gunthorpe 

Using two bools instead of flags return is not necessary and leads to
bugs. Returning a value is easier for the compiler to check and easier to
pass around the code flow.

Convert the two bools into flags and push the change to all callers.

Signed-off-by: Jason Gunthorpe 
---
 mm/hmm.c | 183 ---
 1 file changed, 81 insertions(+), 102 deletions(-)

diff --git a/mm/hmm.c b/mm/hmm.c
index 3a2610e0713329..2a0eda1534bcda 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -32,6 +32,12 @@ struct hmm_vma_walk {
unsigned intflags;
 };
 
+enum {
+   HMM_NEED_FAULT = 1 << 0,
+   HMM_NEED_WRITE_FAULT = HMM_NEED_FAULT | (1 << 1),
+   HMM_NEED_ALL_BITS = HMM_NEED_FAULT | HMM_NEED_WRITE_FAULT,
+};
+
 static int hmm_pfns_fill(unsigned long addr, unsigned long end,
struct hmm_range *range, enum hmm_pfn_value_e value)
 {
@@ -49,8 +55,7 @@ static int hmm_pfns_fill(unsigned long addr, unsigned long 
end,
  * hmm_vma_fault() - fault in a range lacking valid pmd or pte(s)
  * @addr: range virtual start address (inclusive)
  * @end: range virtual end address (exclusive)
- * @fault: should we fault or not ?
- * @write_fault: write fault ?
+ * @required_fault: HMM_NEED_* flags
  * @walk: mm_walk structure
  * Return: -EBUSY after page fault, or page fault error
  *
@@ -58,8 +63,7 @@ static int hmm_pfns_fill(unsigned long addr, unsigned long 
end,
  * or whenever there is no page directory covering the virtual address range.
  */
 static int hmm_vma_fault(unsigned long addr, unsigned long end,
- bool fault, bool write_fault,
- struct mm_walk *walk)
+unsigned int required_fault, struct mm_walk *walk)
 {
struct hmm_vma_walk *hmm_vma_walk = walk->private;
struct hmm_range *range = hmm_vma_walk->range;
@@ -68,13 +72,13 @@ static int hmm_vma_fault(unsigned long addr, unsigned long 
end,
unsigned long i = (addr - range->start) >> PAGE_SHIFT;
unsigned int fault_flags = FAULT_FLAG_REMOTE;
 
-   WARN_ON_ONCE(!fault && !write_fault);
+   WARN_ON_ONCE(!required_fault);
hmm_vma_walk->last = addr;
 
if (!vma)
goto out_error;
 
-   if (write_fault) {
+   if ((required_fault & HMM_NEED_WRITE_FAULT) == HMM_NEED_WRITE_FAULT) {
if (!(vma->vm_flags & VM_WRITE))
return -EPERM;
fault_flags |= FAULT_FLAG_WRITE;
@@ -91,14 +95,13 @@ static int hmm_vma_fault(unsigned long addr, unsigned long 
end,
return -EFAULT;
 }
 
-static inline void hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
- uint64_t pfns, uint64_t cpu_flags,
- bool *fault, bool *write_fault)
+static unsigned int hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
+  uint64_t pfns, uint64_t cpu_flags)
 {
struct hmm_range *range = hmm_vma_walk->range;
 
if (hmm_vma_walk->flags & HMM_FAULT_SNAPSHOT)
-   return;
+   return 0;
 
/*
 * So we not only consider the individual per page request we also
@@ -114,37 +117,37 @@ static inline void hmm_pte_need_fault(const struct 
hmm_vma_walk *hmm_vma_walk,
 
/* We aren't ask to do anything ... */
if (!(pfns & range->flags[HMM_PFN_VALID]))
-   return;
+   return 0;
 
-   /* If CPU page table is not valid then we need to fault */
-   *fault = !(cpu_flags & range->flags[HMM_PFN_VALID]);
/* Need to write fault ? */
if ((pfns & range->flags[HMM_PFN_WRITE]) &&
-   !(cpu_flags & range->flags[HMM_PFN_WRITE])) {
-   *write_fault = true;
-   *fault = true;
-   }
+   !(cpu_flags & range->flags[HMM_PFN_WRITE]))
+   return HMM_NEED_WRITE_FAULT;
+
+   /* If CPU page table is not valid then we need to fault */
+   if (!(cpu_flags & range->flags[HMM_PFN_VALID]))
+   return HMM_NEED_FAULT;
+   return 0;
 }
 
-static void hmm_range_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
-const uint64_t *pfns, unsigned long npages,
-uint64_t cpu_flags, bool *fault,
-bool *write_fault)
+static unsigned int
+hmm_range_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
+const uint64_t *pfns, unsigned long npages,
+uint64_t cpu_flags)
 {
+   unsigned int required_fault = 0;
unsigned long i;
 
-   if (hmm_vma_walk->flags & HMM_FAULT_SNAPSHOT) {
-   *fault = *write_fault = false;
-   return;
-   }
+   if (hmm_vma_walk->flags & HMM_FAULT_SNAPSHOT)
+   return 0;
 
-   *fault = *write_fault = false;
for (i = 0; i < npages; ++i) {