On Fri 21-07-17 16:39:55, Ross Zwisler wrote:
> Now that we no longer insert struct page pointers in DAX radix trees the
> page cache code no longer needs to know anything about DAX exceptional
> entries.  Move all the DAX exceptional entry definitions from dax.h to
> fs/dax.c.
> 
> Signed-off-by: Ross Zwisler <ross.zwis...@linux.intel.com>
> Suggested-by: Jan Kara <j...@suse.cz>

Looks good. You can add:

Reviewed-by: Jan Kara <j...@suse.cz>

                                                                Honza


> ---
>  fs/dax.c            | 34 ++++++++++++++++++++++++++++++++++
>  include/linux/dax.h | 41 -----------------------------------------
>  2 files changed, 34 insertions(+), 41 deletions(-)
> 
> diff --git a/fs/dax.c b/fs/dax.c
> index 0e27d90..e7acc45 100644
> --- a/fs/dax.c
> +++ b/fs/dax.c
> @@ -54,6 +54,40 @@ static int __init init_dax_wait_table(void)
>  }
>  fs_initcall(init_dax_wait_table);
>  
> +/*
> + * We use lowest available bit in exceptional entry for locking, one bit for
> + * the entry size (PMD) and two more to tell us if the entry is a zero page 
> or
> + * an empty entry that is just used for locking.  In total four special bits.
> + *
> + * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the 
> ZERO_PAGE
> + * and EMPTY bits aren't set the entry is a normal DAX entry with a 
> filesystem
> + * block allocation.
> + */
> +#define RADIX_DAX_SHIFT              (RADIX_TREE_EXCEPTIONAL_SHIFT + 4)
> +#define RADIX_DAX_ENTRY_LOCK (1 << RADIX_TREE_EXCEPTIONAL_SHIFT)
> +#define RADIX_DAX_PMD                (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 
> 1))
> +#define RADIX_DAX_ZERO_PAGE  (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 2))
> +#define RADIX_DAX_EMPTY              (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 
> 3))
> +
> +static unsigned long dax_radix_sector(void *entry)
> +{
> +     return (unsigned long)entry >> RADIX_DAX_SHIFT;
> +}
> +
> +static void *dax_radix_locked_entry(sector_t sector, unsigned long flags)
> +{
> +     return (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY | flags |
> +                     ((unsigned long)sector << RADIX_DAX_SHIFT) |
> +                     RADIX_DAX_ENTRY_LOCK);
> +}
> +
> +static unsigned int dax_radix_order(void *entry)
> +{
> +     if ((unsigned long)entry & RADIX_DAX_PMD)
> +             return PMD_SHIFT - PAGE_SHIFT;
> +     return 0;
> +}
> +
>  static int dax_is_pmd_entry(void *entry)
>  {
>       return (unsigned long)entry & RADIX_DAX_PMD;
> diff --git a/include/linux/dax.h b/include/linux/dax.h
> index afa99bb..d0e3272 100644
> --- a/include/linux/dax.h
> +++ b/include/linux/dax.h
> @@ -88,33 +88,6 @@ void dax_flush(struct dax_device *dax_dev, pgoff_t pgoff, 
> void *addr,
>               size_t size);
>  void dax_write_cache(struct dax_device *dax_dev, bool wc);
>  
> -/*
> - * We use lowest available bit in exceptional entry for locking, one bit for
> - * the entry size (PMD) and two more to tell us if the entry is a zero page 
> or
> - * an empty entry that is just used for locking.  In total four special bits.
> - *
> - * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the 
> ZERO_PAGE
> - * and EMPTY bits aren't set the entry is a normal DAX entry with a 
> filesystem
> - * block allocation.
> - */
> -#define RADIX_DAX_SHIFT      (RADIX_TREE_EXCEPTIONAL_SHIFT + 4)
> -#define RADIX_DAX_ENTRY_LOCK (1 << RADIX_TREE_EXCEPTIONAL_SHIFT)
> -#define RADIX_DAX_PMD (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 1))
> -#define RADIX_DAX_ZERO_PAGE (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 2))
> -#define RADIX_DAX_EMPTY (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 3))
> -
> -static inline unsigned long dax_radix_sector(void *entry)
> -{
> -     return (unsigned long)entry >> RADIX_DAX_SHIFT;
> -}
> -
> -static inline void *dax_radix_locked_entry(sector_t sector, unsigned long 
> flags)
> -{
> -     return (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY | flags |
> -                     ((unsigned long)sector << RADIX_DAX_SHIFT) |
> -                     RADIX_DAX_ENTRY_LOCK);
> -}
> -
>  ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
>               const struct iomap_ops *ops);
>  int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
> @@ -136,20 +109,6 @@ static inline int __dax_zero_page_range(struct 
> block_device *bdev,
>  }
>  #endif
>  
> -#ifdef CONFIG_FS_DAX_PMD
> -static inline unsigned int dax_radix_order(void *entry)
> -{
> -     if ((unsigned long)entry & RADIX_DAX_PMD)
> -             return PMD_SHIFT - PAGE_SHIFT;
> -     return 0;
> -}
> -#else
> -static inline unsigned int dax_radix_order(void *entry)
> -{
> -     return 0;
> -}
> -#endif
> -
>  static inline bool dax_mapping(struct address_space *mapping)
>  {
>       return mapping->host && IS_DAX(mapping->host);
> -- 
> 2.9.4
> 
-- 
Jan Kara <j...@suse.com>
SUSE Labs, CR
--
To unsubscribe from this list: send the line "unsubscribe linux-doc" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to