Convert all the heap based tests to use get_mapping_page_size to check if a mapping is backed by huge pages now that morecore uses MAP_HUGETLB.
Signed-off-by: Eric B Munson <emun...@mgebm.net> --- tests/heap-overflow.c | 7 +++++-- tests/heapshrink.c | 15 +++++++++++++-- tests/malloc.c | 15 +++++++++++++-- tests/malloc_manysmall.c | 14 ++++++++++++-- 4 files changed, 43 insertions(+), 8 deletions(-) diff --git a/tests/heap-overflow.c b/tests/heap-overflow.c index 93196ec..044c3fd 100644 --- a/tests/heap-overflow.c +++ b/tests/heap-overflow.c @@ -41,6 +41,7 @@ int main(int argc, char **argv) long size1, size2; void *p1, *p2; int st, pid, rv; + unsigned long long mapping_size; test_init(argc, argv); @@ -68,7 +69,8 @@ int main(int argc, char **argv) p1 = malloc(size1); if (!p1) FAIL("Couldn't malloc %ld bytes", size1); - if (!test_addr_huge(p1)) + mapping_size = get_mapping_page_size(p1); + if (mapping_size != hpagesize) FAIL("First allocation %p not on hugepages", p1); /* @@ -78,7 +80,8 @@ int main(int argc, char **argv) p2 = malloc(size2); if (!p2) FAIL("Couldn't malloc %ld bytes", size2); - st = test_addr_huge(p2); + mapping_size = get_mapping_page_size(p1); + st = (mapping_size == hpagesize); verbose_printf("Second allocation %p huge? %s\n", p2, st < 0 ? "??" : (st ? "yes" : "no")); diff --git a/tests/heapshrink.c b/tests/heapshrink.c index 9c83210..0644c78 100644 --- a/tests/heapshrink.c +++ b/tests/heapshrink.c @@ -22,11 +22,20 @@ #include <string.h> #include "hugetests.h" +/* + * We cannot test mapping size against huge page size because we are not linked + * against libhugetlbfs so gethugepagesize() won't work. So instead we define + * our MIN_PAGE_SIZE as 64 kB (the largest base page available) and make sure + * the mapping page size is larger than this. + */ +#define MIN_PAGE_SIZE 65536 + #define SIZE (32 * 1024 * 1024) int main(int argc, char **argv) { int is_huge, have_env, shrink_ok, have_helper; + unsigned long long mapping_size; void *p; test_init(argc, argv); @@ -45,7 +54,8 @@ int main(int argc, char **argv) FAIL("malloc(%d) failed\n", SIZE); } memset(p, 0, SIZE); - is_huge = test_addr_huge(p+SIZE-1) == 1; + mapping_size = get_mapping_page_size(p); + is_huge = (mapping_size > MIN_PAGE_SIZE); if (have_env && !is_huge) { if (shrink_ok && have_helper) { /* Hitting unexpected behavior in malloc() */ @@ -57,7 +67,8 @@ int main(int argc, char **argv) FAIL("Heap unexpectedly on hugepages"); free(p); - if (shrink_ok && test_addr_huge(p+SIZE-1) == 1) + mapping_size = get_mapping_page_size(p+SIZE-1); + if (shrink_ok && mapping_size > MIN_PAGE_SIZE) FAIL("Heap did not shrink"); PASS(); } diff --git a/tests/malloc.c b/tests/malloc.c index 98635d4..a50c99b 100644 --- a/tests/malloc.c +++ b/tests/malloc.c @@ -25,6 +25,14 @@ #include "hugetests.h" +/* + * We cannot test mapping size against huge page size because we are not linked + * against libhugetlbfs so gethugepagesize() won't work. So instead we define + * our MIN_PAGE_SIZE as 64 kB (the largest base page available) and make sure + * the mapping page size is larger than this. + */ +#define MIN_PAGE_SIZE 65536 + static int block_sizes[] = { sizeof(int), 1024, 128*1024, 1024*1024, 16*1024*1024, 32*1024*1024, @@ -47,6 +55,7 @@ int main(int argc, char *argv[]) for (i = 0; i < NUM_SIZES; i++) { int size = block_sizes[i]; + unsigned long long mapping_size; p = malloc(size); if (! p) @@ -56,9 +65,11 @@ int main(int argc, char *argv[]) memset(p, 0, size); - if (expect_hugepage && (test_addr_huge(p) != 1)) + mapping_size = get_mapping_page_size(p); + + if (expect_hugepage && (mapping_size <= MIN_PAGE_SIZE)) FAIL("Address is not hugepage"); - if (!expect_hugepage && (test_addr_huge(p) == 1)) + if (!expect_hugepage && (mapping_size > MIN_PAGE_SIZE)) FAIL("Address is unexpectedly huge"); free(p); diff --git a/tests/malloc_manysmall.c b/tests/malloc_manysmall.c index ae10b80..25086a8 100644 --- a/tests/malloc_manysmall.c +++ b/tests/malloc_manysmall.c @@ -25,6 +25,14 @@ #include "hugetests.h" +/* + * We cannot test mapping size against huge page size because we are not linked + * against libhugetlbfs so gethugepagesize() won't work. So instead we define + * our MIN_PAGE_SIZE as 64 kB (the largest base page available) and make sure + * the mapping page size is larger than this. + */ +#define MIN_PAGE_SIZE 65536 + #define ALLOC_SIZE (128) #define NUM_ALLOCS (262144) @@ -55,9 +63,11 @@ int main(int argc, char *argv[]) if ((i % 157) == 0) { /* With this many allocs, testing every one * takes forever */ - if (expect_hugepage && (test_addr_huge(p) != 1)) + unsigned long long mapping_size = + get_mapping_page_size(p); + if (expect_hugepage && (mapping_size <= MIN_PAGE_SIZE)) FAIL("Address is not hugepage"); - if (!expect_hugepage && (test_addr_huge(p) == 1)) + if (!expect_hugepage && (mapping_size > MIN_PAGE_SIZE)) FAIL("Address is unexpectedly huge"); } } -- 1.7.1 ------------------------------------------------------------------------------ Increase Visibility of Your 3D Game App & Earn a Chance To Win $500! Tap into the largest installed PC base & get more eyes on your game by optimizing for Intel(R) Graphics Technology. Get started today with the Intel(R) Software Partner Program. Five $500 cash prizes are up for grabs. http://p.sf.net/sfu/intelisp-dev2dev _______________________________________________ Libhugetlbfs-devel mailing list Libhugetlbfs-devel@lists.sourceforge.net https://lists.sourceforge.net/lists/listinfo/libhugetlbfs-devel