Re: [PATCH 07/14] mm/hmm: use uint64_t for HMM pfn instead of defining hmm_pfn_t to ulong

2018-03-16 Thread John Hubbard
On 03/16/2018 12:14 PM, jgli...@redhat.com wrote:
> From: Jérôme Glisse 
> 

Hi Jerome,

This one looks great. A couple of trivial typo fixes are listed below.

You can add:

Reviewed-by: John Hubbard 

> All device driver we care about are using 64bits page table entry. In
> order to match this and to avoid useless define convert all HMM pfn to
> directly use uint64_t. It is a first step on the road to allow driver
> to directly use pfn value return by HMM (saving memory and CPU cycles
> use for convertion between the two).

  used for conversion
> 
> Signed-off-by: Jérôme Glisse 
> Cc: Evgeny Baskakov 
> Cc: Ralph Campbell 
> Cc: Mark Hairgrove 
> Cc: John Hubbard 
> ---
>  include/linux/hmm.h | 46 +-
>  mm/hmm.c| 26 +-
>  2 files changed, 34 insertions(+), 38 deletions(-)
> 



> @@ -104,14 +100,14 @@ typedef unsigned long hmm_pfn_t;
>  #define HMM_PFN_SHIFT 6
>  
>  /*
> - * hmm_pfn_t_to_page() - return struct page pointed to by a valid hmm_pfn_t
> - * @pfn: hmm_pfn_t to convert to struct page
> - * Returns: struct page pointer if pfn is a valid hmm_pfn_t, NULL otherwise
> + * hmm_pfn_to_page() - return struct page pointed to by a valid HMM pfn
> + * @pfn: HMM pfn value to get corresponding struct page from
> + * Returns: struct page pointer if pfn is a valid HMM pfn, NULL otherwise
>   *
> - * If the hmm_pfn_t is valid (ie valid flag set) then return the struct page
> - * matching the pfn value stored in the hmm_pfn_t. Otherwise return NULL.
> + * If the uint64_t is valid (ie valid flag set) then return the struct page

  If the HMM pfn is valid



>  
> @@ -634,8 +634,8 @@ EXPORT_SYMBOL(hmm_vma_range_done);
>   * This is similar to a regular CPU page fault except that it will not 
> trigger
>   * any memory migration if the memory being faulted is not accessible by 
> CPUs.
>   *
> - * On error, for one virtual address in the range, the function will set the
> - * hmm_pfn_t error flag for the corresponding pfn entry.
> + * On error, for one virtual address in the range, the function will mark the
> + * correspond HMM pfn entry with error flag.

  corresponding HMM pfn entry with an error flag.

thanks,
-- 
John Hubbard
NVIDIA



Re: [PATCH 07/14] mm/hmm: use uint64_t for HMM pfn instead of defining hmm_pfn_t to ulong

2018-03-16 Thread John Hubbard
On 03/16/2018 12:14 PM, jgli...@redhat.com wrote:
> From: Jérôme Glisse 
> 

Hi Jerome,

This one looks great. A couple of trivial typo fixes are listed below.

You can add:

Reviewed-by: John Hubbard 

> All device driver we care about are using 64bits page table entry. In
> order to match this and to avoid useless define convert all HMM pfn to
> directly use uint64_t. It is a first step on the road to allow driver
> to directly use pfn value return by HMM (saving memory and CPU cycles
> use for convertion between the two).

  used for conversion
> 
> Signed-off-by: Jérôme Glisse 
> Cc: Evgeny Baskakov 
> Cc: Ralph Campbell 
> Cc: Mark Hairgrove 
> Cc: John Hubbard 
> ---
>  include/linux/hmm.h | 46 +-
>  mm/hmm.c| 26 +-
>  2 files changed, 34 insertions(+), 38 deletions(-)
> 



> @@ -104,14 +100,14 @@ typedef unsigned long hmm_pfn_t;
>  #define HMM_PFN_SHIFT 6
>  
>  /*
> - * hmm_pfn_t_to_page() - return struct page pointed to by a valid hmm_pfn_t
> - * @pfn: hmm_pfn_t to convert to struct page
> - * Returns: struct page pointer if pfn is a valid hmm_pfn_t, NULL otherwise
> + * hmm_pfn_to_page() - return struct page pointed to by a valid HMM pfn
> + * @pfn: HMM pfn value to get corresponding struct page from
> + * Returns: struct page pointer if pfn is a valid HMM pfn, NULL otherwise
>   *
> - * If the hmm_pfn_t is valid (ie valid flag set) then return the struct page
> - * matching the pfn value stored in the hmm_pfn_t. Otherwise return NULL.
> + * If the uint64_t is valid (ie valid flag set) then return the struct page

  If the HMM pfn is valid



>  
> @@ -634,8 +634,8 @@ EXPORT_SYMBOL(hmm_vma_range_done);
>   * This is similar to a regular CPU page fault except that it will not 
> trigger
>   * any memory migration if the memory being faulted is not accessible by 
> CPUs.
>   *
> - * On error, for one virtual address in the range, the function will set the
> - * hmm_pfn_t error flag for the corresponding pfn entry.
> + * On error, for one virtual address in the range, the function will mark the
> + * correspond HMM pfn entry with error flag.

  corresponding HMM pfn entry with an error flag.

thanks,
-- 
John Hubbard
NVIDIA



[PATCH 07/14] mm/hmm: use uint64_t for HMM pfn instead of defining hmm_pfn_t to ulong

2018-03-16 Thread jglisse
From: Jérôme Glisse 

All device driver we care about are using 64bits page table entry. In
order to match this and to avoid useless define convert all HMM pfn to
directly use uint64_t. It is a first step on the road to allow driver
to directly use pfn value return by HMM (saving memory and CPU cycles
use for convertion between the two).

Signed-off-by: Jérôme Glisse 
Cc: Evgeny Baskakov 
Cc: Ralph Campbell 
Cc: Mark Hairgrove 
Cc: John Hubbard 
---
 include/linux/hmm.h | 46 +-
 mm/hmm.c| 26 +-
 2 files changed, 34 insertions(+), 38 deletions(-)

diff --git a/include/linux/hmm.h b/include/linux/hmm.h
index 4bdc58ffe9f3..78b3ed6d7977 100644
--- a/include/linux/hmm.h
+++ b/include/linux/hmm.h
@@ -80,8 +80,6 @@
 struct hmm;
 
 /*
- * hmm_pfn_t - HMM uses its own pfn type to keep several flags per page
- *
  * Flags:
  * HMM_PFN_VALID: pfn is valid
  * HMM_PFN_WRITE: CPU page table has write permission set
@@ -93,8 +91,6 @@ struct hmm;
  *  set and the pfn value is undefined.
  * HMM_PFN_DEVICE_UNADDRESSABLE: unaddressable device memory (ZONE_DEVICE)
  */
-typedef unsigned long hmm_pfn_t;
-
 #define HMM_PFN_VALID (1 << 0)
 #define HMM_PFN_WRITE (1 << 1)
 #define HMM_PFN_ERROR (1 << 2)
@@ -104,14 +100,14 @@ typedef unsigned long hmm_pfn_t;
 #define HMM_PFN_SHIFT 6
 
 /*
- * hmm_pfn_t_to_page() - return struct page pointed to by a valid hmm_pfn_t
- * @pfn: hmm_pfn_t to convert to struct page
- * Returns: struct page pointer if pfn is a valid hmm_pfn_t, NULL otherwise
+ * hmm_pfn_to_page() - return struct page pointed to by a valid HMM pfn
+ * @pfn: HMM pfn value to get corresponding struct page from
+ * Returns: struct page pointer if pfn is a valid HMM pfn, NULL otherwise
  *
- * If the hmm_pfn_t is valid (ie valid flag set) then return the struct page
- * matching the pfn value stored in the hmm_pfn_t. Otherwise return NULL.
+ * If the uint64_t is valid (ie valid flag set) then return the struct page
+ * matching the pfn value stored in the HMM pfn. Otherwise return NULL.
  */
-static inline struct page *hmm_pfn_t_to_page(hmm_pfn_t pfn)
+static inline struct page *hmm_pfn_to_page(uint64_t pfn)
 {
if (!(pfn & HMM_PFN_VALID))
return NULL;
@@ -119,11 +115,11 @@ static inline struct page *hmm_pfn_t_to_page(hmm_pfn_t 
pfn)
 }
 
 /*
- * hmm_pfn_t_to_pfn() - return pfn value store in a hmm_pfn_t
- * @pfn: hmm_pfn_t to extract pfn from
- * Returns: pfn value if hmm_pfn_t is valid, -1UL otherwise
+ * hmm_pfn_to_pfn() - return pfn value store in a HMM pfn
+ * @pfn: HMM pfn value to extract pfn from
+ * Returns: pfn value if HMM pfn is valid, -1UL otherwise
  */
-static inline unsigned long hmm_pfn_t_to_pfn(hmm_pfn_t pfn)
+static inline unsigned long hmm_pfn_to_pfn(uint64_t pfn)
 {
if (!(pfn & HMM_PFN_VALID))
return -1UL;
@@ -131,21 +127,21 @@ static inline unsigned long hmm_pfn_t_to_pfn(hmm_pfn_t 
pfn)
 }
 
 /*
- * hmm_pfn_t_from_page() - create a valid hmm_pfn_t value from struct page
- * @page: struct page pointer for which to create the hmm_pfn_t
- * Returns: valid hmm_pfn_t for the page
+ * hmm_pfn_from_page() - create a valid HMM pfn value from struct page
+ * @page: struct page pointer for which to create the HMM pfn
+ * Returns: valid HMM pfn for the page
  */
-static inline hmm_pfn_t hmm_pfn_t_from_page(struct page *page)
+static inline uint64_t hmm_pfn_from_page(struct page *page)
 {
return (page_to_pfn(page) << HMM_PFN_SHIFT) | HMM_PFN_VALID;
 }
 
 /*
- * hmm_pfn_t_from_pfn() - create a valid hmm_pfn_t value from pfn
- * @pfn: pfn value for which to create the hmm_pfn_t
- * Returns: valid hmm_pfn_t for the pfn
+ * hmm_pfn_from_pfn() - create a valid HMM pfn value from pfn
+ * @pfn: pfn value for which to create the HMM pfn
+ * Returns: valid HMM pfn for the pfn
  */
-static inline hmm_pfn_t hmm_pfn_t_from_pfn(unsigned long pfn)
+static inline uint64_t hmm_pfn_from_pfn(unsigned long pfn)
 {
return (pfn << HMM_PFN_SHIFT) | HMM_PFN_VALID;
 }
@@ -284,7 +280,7 @@ struct hmm_range {
struct list_headlist;
unsigned long   start;
unsigned long   end;
-   hmm_pfn_t   *pfns;
+   uint64_t*pfns;
boolvalid;
 };
 
@@ -307,7 +303,7 @@ bool hmm_vma_range_done(struct hmm_range *range);
 
 /*
  * Fault memory on behalf of device driver. Unlike handle_mm_fault(), this will
- * not migrate any device memory back to system memory. The hmm_pfn_t array 
will
+ * not migrate any device memory back to system memory. The HMM pfn array will
  * be updated with the fault result and current snapshot of the CPU page table
  * for the range.
  *
@@ -316,7 +312,7 @@ bool hmm_vma_range_done(struct hmm_range *range);
  * function returns -EAGAIN.
  *
  * Return 

[PATCH 07/14] mm/hmm: use uint64_t for HMM pfn instead of defining hmm_pfn_t to ulong

2018-03-16 Thread jglisse
From: Jérôme Glisse 

All device driver we care about are using 64bits page table entry. In
order to match this and to avoid useless define convert all HMM pfn to
directly use uint64_t. It is a first step on the road to allow driver
to directly use pfn value return by HMM (saving memory and CPU cycles
use for convertion between the two).

Signed-off-by: Jérôme Glisse 
Cc: Evgeny Baskakov 
Cc: Ralph Campbell 
Cc: Mark Hairgrove 
Cc: John Hubbard 
---
 include/linux/hmm.h | 46 +-
 mm/hmm.c| 26 +-
 2 files changed, 34 insertions(+), 38 deletions(-)

diff --git a/include/linux/hmm.h b/include/linux/hmm.h
index 4bdc58ffe9f3..78b3ed6d7977 100644
--- a/include/linux/hmm.h
+++ b/include/linux/hmm.h
@@ -80,8 +80,6 @@
 struct hmm;
 
 /*
- * hmm_pfn_t - HMM uses its own pfn type to keep several flags per page
- *
  * Flags:
  * HMM_PFN_VALID: pfn is valid
  * HMM_PFN_WRITE: CPU page table has write permission set
@@ -93,8 +91,6 @@ struct hmm;
  *  set and the pfn value is undefined.
  * HMM_PFN_DEVICE_UNADDRESSABLE: unaddressable device memory (ZONE_DEVICE)
  */
-typedef unsigned long hmm_pfn_t;
-
 #define HMM_PFN_VALID (1 << 0)
 #define HMM_PFN_WRITE (1 << 1)
 #define HMM_PFN_ERROR (1 << 2)
@@ -104,14 +100,14 @@ typedef unsigned long hmm_pfn_t;
 #define HMM_PFN_SHIFT 6
 
 /*
- * hmm_pfn_t_to_page() - return struct page pointed to by a valid hmm_pfn_t
- * @pfn: hmm_pfn_t to convert to struct page
- * Returns: struct page pointer if pfn is a valid hmm_pfn_t, NULL otherwise
+ * hmm_pfn_to_page() - return struct page pointed to by a valid HMM pfn
+ * @pfn: HMM pfn value to get corresponding struct page from
+ * Returns: struct page pointer if pfn is a valid HMM pfn, NULL otherwise
  *
- * If the hmm_pfn_t is valid (ie valid flag set) then return the struct page
- * matching the pfn value stored in the hmm_pfn_t. Otherwise return NULL.
+ * If the uint64_t is valid (ie valid flag set) then return the struct page
+ * matching the pfn value stored in the HMM pfn. Otherwise return NULL.
  */
-static inline struct page *hmm_pfn_t_to_page(hmm_pfn_t pfn)
+static inline struct page *hmm_pfn_to_page(uint64_t pfn)
 {
if (!(pfn & HMM_PFN_VALID))
return NULL;
@@ -119,11 +115,11 @@ static inline struct page *hmm_pfn_t_to_page(hmm_pfn_t 
pfn)
 }
 
 /*
- * hmm_pfn_t_to_pfn() - return pfn value store in a hmm_pfn_t
- * @pfn: hmm_pfn_t to extract pfn from
- * Returns: pfn value if hmm_pfn_t is valid, -1UL otherwise
+ * hmm_pfn_to_pfn() - return pfn value store in a HMM pfn
+ * @pfn: HMM pfn value to extract pfn from
+ * Returns: pfn value if HMM pfn is valid, -1UL otherwise
  */
-static inline unsigned long hmm_pfn_t_to_pfn(hmm_pfn_t pfn)
+static inline unsigned long hmm_pfn_to_pfn(uint64_t pfn)
 {
if (!(pfn & HMM_PFN_VALID))
return -1UL;
@@ -131,21 +127,21 @@ static inline unsigned long hmm_pfn_t_to_pfn(hmm_pfn_t 
pfn)
 }
 
 /*
- * hmm_pfn_t_from_page() - create a valid hmm_pfn_t value from struct page
- * @page: struct page pointer for which to create the hmm_pfn_t
- * Returns: valid hmm_pfn_t for the page
+ * hmm_pfn_from_page() - create a valid HMM pfn value from struct page
+ * @page: struct page pointer for which to create the HMM pfn
+ * Returns: valid HMM pfn for the page
  */
-static inline hmm_pfn_t hmm_pfn_t_from_page(struct page *page)
+static inline uint64_t hmm_pfn_from_page(struct page *page)
 {
return (page_to_pfn(page) << HMM_PFN_SHIFT) | HMM_PFN_VALID;
 }
 
 /*
- * hmm_pfn_t_from_pfn() - create a valid hmm_pfn_t value from pfn
- * @pfn: pfn value for which to create the hmm_pfn_t
- * Returns: valid hmm_pfn_t for the pfn
+ * hmm_pfn_from_pfn() - create a valid HMM pfn value from pfn
+ * @pfn: pfn value for which to create the HMM pfn
+ * Returns: valid HMM pfn for the pfn
  */
-static inline hmm_pfn_t hmm_pfn_t_from_pfn(unsigned long pfn)
+static inline uint64_t hmm_pfn_from_pfn(unsigned long pfn)
 {
return (pfn << HMM_PFN_SHIFT) | HMM_PFN_VALID;
 }
@@ -284,7 +280,7 @@ struct hmm_range {
struct list_headlist;
unsigned long   start;
unsigned long   end;
-   hmm_pfn_t   *pfns;
+   uint64_t*pfns;
boolvalid;
 };
 
@@ -307,7 +303,7 @@ bool hmm_vma_range_done(struct hmm_range *range);
 
 /*
  * Fault memory on behalf of device driver. Unlike handle_mm_fault(), this will
- * not migrate any device memory back to system memory. The hmm_pfn_t array 
will
+ * not migrate any device memory back to system memory. The HMM pfn array will
  * be updated with the fault result and current snapshot of the CPU page table
  * for the range.
  *
@@ -316,7 +312,7 @@ bool hmm_vma_range_done(struct hmm_range *range);
  * function returns -EAGAIN.
  *
  * Return value does not reflect if the fault was successful for every single
- * address or not. Therefore, the caller must to inspect the