Joonsoo Kim <iamjoonsoo....@lge.com> writes:

> ppc kvm's cma area management needs alignment constraint on
> cma region. So support it to prepare generalization of cma area
> management functionality.
>
> Additionally, add some comments which tell us why alignment
> constraint is needed on cma region.
>
> Signed-off-by: Joonsoo Kim <iamjoonsoo....@lge.com>

Reviewed-by: Aneesh Kumar K.V <aneesh.ku...@linux.vnet.ibm.com>

>
> diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
> index 8a44c82..bc4c171 100644
> --- a/drivers/base/dma-contiguous.c
> +++ b/drivers/base/dma-contiguous.c
> @@ -32,6 +32,7 @@
>  #include <linux/swap.h>
>  #include <linux/mm_types.h>
>  #include <linux/dma-contiguous.h>
> +#include <linux/log2.h>
>
>  struct cma {
>       unsigned long   base_pfn;
> @@ -219,6 +220,7 @@ core_initcall(cma_init_reserved_areas);
>   * @size: Size of the reserved area (in bytes),
>   * @base: Base address of the reserved area optional, use 0 for any
>   * @limit: End address of the reserved memory (optional, 0 for any).
> + * @alignment: Alignment for the contiguous memory area, should be power of 2
>   * @res_cma: Pointer to store the created cma region.
>   * @fixed: hint about where to place the reserved area
>   *
> @@ -233,15 +235,15 @@ core_initcall(cma_init_reserved_areas);
>   */
>  static int __init __dma_contiguous_reserve_area(phys_addr_t size,
>                               phys_addr_t base, phys_addr_t limit,
> +                             phys_addr_t alignment,
>                               struct cma **res_cma, bool fixed)
>  {
>       struct cma *cma = &cma_areas[cma_area_count];
> -     phys_addr_t alignment;
>       int ret = 0;
>
> -     pr_debug("%s(size %lx, base %08lx, limit %08lx)\n", __func__,
> -              (unsigned long)size, (unsigned long)base,
> -              (unsigned long)limit);
> +     pr_debug("%s(size %lx, base %08lx, limit %08lx align_order %08lx)\n",
> +             __func__, (unsigned long)size, (unsigned long)base,
> +             (unsigned long)limit, (unsigned long)alignment);
>
>       /* Sanity checks */
>       if (cma_area_count == ARRAY_SIZE(cma_areas)) {
> @@ -253,8 +255,17 @@ static int __init 
> __dma_contiguous_reserve_area(phys_addr_t size,
>       if (!size)
>               return -EINVAL;
>
> -     /* Sanitise input arguments */
> -     alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order);
> +     if (alignment && !is_power_of_2(alignment))
> +             return -EINVAL;
> +
> +     /*
> +      * Sanitise input arguments.
> +      * CMA area should be at least MAX_ORDER - 1 aligned. Otherwise,
> +      * CMA area could be merged into other MIGRATE_TYPE by buddy mechanism
> +      * and CMA property will be broken.
> +      */
> +     alignment = max(alignment,
> +             (phys_addr_t)PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order));
>       base = ALIGN(base, alignment);
>       size = ALIGN(size, alignment);
>       limit &= ~(alignment - 1);
> @@ -302,7 +313,8 @@ int __init dma_contiguous_reserve_area(phys_addr_t size, 
> phys_addr_t base,
>  {
>       int ret;
>
> -     ret = __dma_contiguous_reserve_area(size, base, limit, res_cma, fixed);
> +     ret = __dma_contiguous_reserve_area(size, base, limit, 0,
> +                                             res_cma, fixed);
>       if (ret)
>               return ret;
>
> -- 
> 1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to