> -----Original Message-----
> From: [email protected] [mailto:lng-odp-
> [email protected]] On Behalf Of ext Maxim Uvarov
> Sent: Thursday, January 22, 2015 6:39 PM
> To: [email protected]
> Subject: [lng-odp] [PATCHv3 3/3] hugepages: align mmap size for hugepages
>
> In case of hugepages munmap requires size aligned to page.
>
> Signed-off-by: Maxim Uvarov <[email protected]>
> ---
> platform/linux-generic/odp_shared_memory.c | 18 ++++++++++++++----
> test/validation/odp_shm.c | 4 ++++
> 2 files changed, 18 insertions(+), 4 deletions(-)
>
> diff --git a/platform/linux-generic/odp_shared_memory.c b/platform/linux-
> generic/odp_shared_memory.c
> index 23a9ceb..d2b28bc 100644
> --- a/platform/linux-generic/odp_shared_memory.c
> +++ b/platform/linux-generic/odp_shared_memory.c
> @@ -179,11 +179,22 @@ odp_shm_t odp_shm_reserve(const char *name, uint64_t
> size, uint64_t align,
> int map_flag = MAP_SHARED;
> /* If already exists: O_EXCL: error, O_TRUNC: truncate to zero */
> int oflag = O_RDWR | O_CREAT | O_TRUNC;
> - uint64_t alloc_size = size + align;
> + uint64_t alloc_size;
> uint64_t page_sz, huge_sz;
> + int need_huge_page = 0;
>
> - huge_sz = odp_sys_huge_page_size();
> page_sz = odp_sys_page_size();
> + alloc_size = size + align;
> +
> +#ifdef MAP_HUGETLB
> + huge_sz = odp_sys_huge_page_size();
> + need_huge_page = (huge_sz && alloc_size > page_sz);
> + if (need_huge_page) {
> + /* munmap for huge pages requires sizes round up by page */
> + alloc_size = (size + align + (huge_sz - 1))
> + & (-huge_sz);
> + }
> +#endif
>
> if (flags & ODP_SHM_PROC) {
> /* Creates a file to /dev/shm */
> @@ -235,12 +246,11 @@ odp_shm_t odp_shm_reserve(const char *name, uint64_t
> size, uint64_t align,
>
> #ifdef MAP_HUGETLB
> /* Try first huge pages */
> - if (huge_sz && alloc_size > page_sz) {
> + if (need_huge_page) {
> addr = mmap(NULL, alloc_size, PROT_READ | PROT_WRITE,
> map_flag | MAP_HUGETLB, fd, 0);
> }
> #endif
> -
Small tuning still needed. Now if we are out of huge pages (mmap failed above),
we want to fallback to use normal pages - with the original alloc_size. Huge
page size maybe e.g. 1GB, so e.g. 5kB alloc_size would have been rounded up to
1 GB. It needs to be reset here, so that we don't use excessive number of
normal pages in the following mmap.
-Petri
> /* Use normal pages for small or failed huge page allocations */
> if (addr == MAP_FAILED) {
> addr = mmap(NULL, alloc_size, PROT_READ | PROT_WRITE,
> diff --git a/test/validation/odp_shm.c b/test/validation/odp_shm.c
> index c26925b..4b1a38e 100644
> --- a/test/validation/odp_shm.c
> +++ b/test/validation/odp_shm.c
> @@ -32,7 +32,11 @@ static void *run_shm_thread(void *arg)
> CU_ASSERT(0 == info.flags);
> CU_ASSERT(test_shared_data == info.addr);
> CU_ASSERT(sizeof(test_shared_data_t) <= info.size);
> +#ifdef MAP_HUGETLB
> + CU_ASSERT(odp_sys_huge_page_size() == info.page_size);
> +#else
> CU_ASSERT(odp_sys_page_size() == info.page_size);
> +#endif
> odp_shm_print_all();
>
> fflush(stdout);
> --
> 1.8.5.1.163.gd7aced9
>
>
> _______________________________________________
> lng-odp mailing list
> [email protected]
> http://lists.linaro.org/mailman/listinfo/lng-odp
_______________________________________________
lng-odp mailing list
[email protected]
http://lists.linaro.org/mailman/listinfo/lng-odp