Hello community, here is the log from the commit of package makedumpfile for openSUSE:Factory checked in at 2014-06-19 13:08:27 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ Comparing /work/SRC/openSUSE:Factory/makedumpfile (Old) and /work/SRC/openSUSE:Factory/.makedumpfile.new (New) ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Package is "makedumpfile" Changes: -------- --- /work/SRC/openSUSE:Factory/makedumpfile/makedumpfile.changes 2014-05-10 08:32:18.000000000 +0200 +++ /work/SRC/openSUSE:Factory/.makedumpfile.new/makedumpfile.changes 2014-06-19 13:08:28.000000000 +0200 @@ -1,0 +2,18 @@ +Mon Jun 16 07:49:20 UTC 2014 - [email protected] + +- makedumpfile-fix-free-bitmap_buffer_cyclic.patch: Fix free + bitmap_buffer_cyclic error. + +------------------------------------------------------------------- +Fri Jun 13 10:00:24 UTC 2014 - [email protected] + +- makedumpfile-generic-multi-page-excl.patch: Generic handling of + multi-page exclusions (bnc#873232). +- makedumpfile-remove-overrun-adj.patch: Get rid of overrun + adjustments (bnc#873232). +- makedumpfile-isCompoundHead.patch: Add isCompoundHead() macro to + check for compound pages (bnc#873232). +- makedumpfile-exclude-compound-pages.patch: Treat compound pages + as a single entity (bnc#873232). + +------------------------------------------------------------------- New: ---- makedumpfile-exclude-compound-pages.patch makedumpfile-fix-free-bitmap_buffer_cyclic.patch makedumpfile-generic-multi-page-excl.patch makedumpfile-isCompoundHead.patch makedumpfile-remove-overrun-adj.patch ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ Other differences: ------------------ ++++++ makedumpfile.spec ++++++ --- /var/tmp/diff_new_pack.FFrk8i/_old 2014-06-19 13:08:29.000000000 +0200 +++ /var/tmp/diff_new_pack.FFrk8i/_new 2014-06-19 13:08:29.000000000 +0200 @@ -48,6 +48,11 @@ Source: %{name}-%{version}.tar.bz2 Source1: README.static Patch0: %{name}-coptflags.diff +Patch1: %{name}-generic-multi-page-excl.patch +Patch2: %{name}-remove-overrun-adj.patch +Patch3: %{name}-isCompoundHead.patch +Patch4: %{name}-exclude-compound-pages.patch +Patch5: %{name}-fix-free-bitmap_buffer_cyclic.patch BuildRoot: %{_tmppath}/%{name}-%{version}-build ExclusiveArch: %ix86 x86_64 ia64 ppc ppc64 ppc64le s390x %arm @@ -66,6 +71,11 @@ %prep %setup -q %patch0 -p1 +%patch1 -p1 +%patch2 -p1 +%patch3 -p1 +%patch4 -p1 +%patch5 -p1 %build LIBS_STATIC= ++++++ makedumpfile-exclude-compound-pages.patch ++++++ From: Petr Tesarik <[email protected]> Date: Fri Jun 13 11:57:03 2014 +0200 Subject: Treat compound pages as a single entity References: bnc#873232 Patch-mainline: not yet For a compound page, the head page determines whether the page should be kept or excluded. Signed-off-by: Petr Tesarik <[email protected]> --- makedumpfile.c | 57 +++++++++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 43 insertions(+), 14 deletions(-) --- a/makedumpfile.c +++ b/makedumpfile.c @@ -4652,6 +4652,7 @@ __exclude_unnecessary_pages(unsigned lon unsigned char *pcache; unsigned int _count, _mapcount = 0; unsigned long flags, mapping, private = 0; + int nr_pages; /* * If a multi-page exclusion is pending, do it first @@ -4671,7 +4672,13 @@ __exclude_unnecessary_pages(unsigned lon pfn_read_start = ULONGLONG_MAX; pfn_read_end = 0; - for (pfn = pfn_start; pfn < pfn_end; pfn++, mem_map += SIZE(page)) { + for (pfn = pfn_start; pfn < pfn_end; pfn += nr_pages, mem_map += nr_pages * SIZE(page)) { + unsigned long long *pfn_counter; + + /* + * Process one page by default. + */ + nr_pages = 1; /* * If this pfn doesn't belong to target region, skip this pfn. @@ -4723,18 +4730,32 @@ __exclude_unnecessary_pages(unsigned lon private = ULONG(pcache + OFFSET(page.private)); /* + * Check for compound pages. + */ + if (isCompoundHead(flags) && (pfn + 1 <= pfn_read_end)) { + unsigned char *ptail; + unsigned long order; + + ptail = pcache + SIZE(page); + order = ULONG(ptail + OFFSET(page.lru) + + OFFSET(list_head.prev)); + if (order && order < sizeof(unsigned long) * 8 && + (pfn & ((1UL << order) - 1)) == 0 && + (OFFSET(page.private) == NOT_FOUND_STRUCTURE || + ULONG(ptail + OFFSET(page.private)) == mem_map)) { + nr_pages = 1UL << order; + } + } + + /* * Exclude the free page managed by a buddy * Use buddy identification of free pages whether cyclic or not. */ if ((info->dump_level & DL_EXCLUDE_FREE) && info->page_is_buddy && info->page_is_buddy(flags, _mapcount, private, _count)) { - int nr_pages = 1 << private; - - exclude_range(&pfn_free, pfn, pfn + nr_pages, cycle); - - pfn += nr_pages - 1; - mem_map += (nr_pages - 1) * SIZE(page); + nr_pages = 1 << private; + pfn_counter = &pfn_free; } /* * Exclude the cache page without the private page. @@ -4742,8 +4763,7 @@ __exclude_unnecessary_pages(unsigned lon else if ((info->dump_level & DL_EXCLUDE_CACHE) && (isLRU(flags) || isSwapCache(flags)) && !isPrivate(flags) && !isAnon(mapping)) { - if (clear_bit_on_2nd_bitmap_for_kernel(pfn, cycle)) - pfn_cache++; + pfn_counter = &pfn_cache; } /* * Exclude the cache page with the private page. @@ -4751,23 +4771,32 @@ __exclude_unnecessary_pages(unsigned lon else if ((info->dump_level & DL_EXCLUDE_CACHE_PRI) && (isLRU(flags) || isSwapCache(flags)) && !isAnon(mapping)) { - if (clear_bit_on_2nd_bitmap_for_kernel(pfn, cycle)) - pfn_cache_private++; + pfn_counter = &pfn_cache_private; } /* * Exclude the data page of the user process. */ else if ((info->dump_level & DL_EXCLUDE_USER_DATA) && isAnon(mapping)) { - if (clear_bit_on_2nd_bitmap_for_kernel(pfn, cycle)) - pfn_user++; + pfn_counter = &pfn_user; } /* * Exclude the hwpoison page. */ else if (isHWPOISON(flags)) { + pfn_counter = &pfn_hwpoison; + } + /* + * Page not excluded + */ + else + continue; + + if (nr_pages == 1) { if (clear_bit_on_2nd_bitmap_for_kernel(pfn, cycle)) - pfn_hwpoison++; + (*pfn_counter)++; + } else { + exclude_range(pfn_counter, pfn, pfn + nr_pages, cycle); } } return TRUE; ++++++ makedumpfile-fix-free-bitmap_buffer_cyclic.patch ++++++ From: Arthur Zou <[email protected]> Date: Wed May 7 17:54:16 2014 +0900 Subject: Fix free bitmap_buffer_cyclic error. Patch-mainline: v1.5.7 Git-commit: 0e7b1a6e3c1919c9222b662d458637ddf802dd04 Description: In create_dump_bitmap() and write_kdump_pages_and_bitmap_cyclic(), What should be freed is info->partial_bitmap instead of info->bitmap. Solution: Add two functions to free the bitmap_buffer_cyclic. info->partial_bitmap1 is freed by free_bitmap1_buffer_cyclic(). info->partial_bitmap2 is freed by free_bitmap2_buffer_cyclic(). At the same time, remove thoes frees that free partial_bitmap1 or partial_bitmap2 at the end of main() because partial_bitmap1 and partial_bitmap2 has been freed after dump file has been written out, so there is no need to free it again at the end of main. Signed-off-by: Arthur Zou <[email protected]> Acked-by: Petr Tesarik <[email protected]> diff --git a/makedumpfile.c b/makedumpfile.c index 16081a5..ef8a750 100644 --- a/makedumpfile.c +++ b/makedumpfile.c @@ -5130,6 +5130,31 @@ free_bitmap_buffer(void) free_bitmap2_buffer(); } +void +free_bitmap1_buffer_cyclic() +{ + if (info->partial_bitmap1 != NULL){ + free(info->partial_bitmap1); + info->partial_bitmap1 = NULL; + } +} + +void +free_bitmap2_buffer_cyclic() +{ + if (info->partial_bitmap2 != NULL){ + free(info->partial_bitmap2); + info->partial_bitmap2 = NULL; + } +} + +void +free_bitmap_buffer_cyclic() +{ + free_bitmap1_buffer_cyclic(); + free_bitmap2_buffer_cyclic(); +} + int create_dump_bitmap(void) { @@ -5147,8 +5172,7 @@ create_dump_bitmap(void) goto out; info->num_dumpable = get_num_dumpable_cyclic(); - - free_bitmap2_buffer(); + free_bitmap2_buffer_cyclic(); } } else { @@ -6190,6 +6214,8 @@ write_elf_pages_cyclic(struct cache_data *cd_header, struct cache_data *cd_page) if (!write_cache_bufsz(cd_page)) return FALSE; + free_bitmap_buffer_cyclic(); + /* * print [100 %] */ @@ -6947,7 +6973,7 @@ write_kdump_pages_and_bitmap_cyclic(struct cache_data *cd_header, struct cache_d } - free_bitmap1_buffer(); + free_bitmap1_buffer_cyclic(); if (!prepare_bitmap2_buffer_cyclic()) return FALSE; @@ -6970,7 +6996,7 @@ write_kdump_pages_and_bitmap_cyclic(struct cache_data *cd_header, struct cache_d return FALSE; } - + free_bitmap2_buffer_cyclic(); gettimeofday(&tv_start, NULL); @@ -9349,10 +9375,6 @@ out: free(info->splitting_info); if (info->p2m_mfn_frame_list != NULL) free(info->p2m_mfn_frame_list); - if (info->partial_bitmap1 != NULL) - free(info->partial_bitmap1); - if (info->partial_bitmap2 != NULL) - free(info->partial_bitmap2); free(info); } free_elf_info(); ++++++ makedumpfile-generic-multi-page-excl.patch ++++++ From: Petr Tesarik <[email protected]> Date: Tue May 27 08:58:18 2014 +0900 Subject: Generic handling of multi-page exclusions. References: bnc#873232 Patch-mainline: v1.5.7 Git-commit: 6c19012f352d13147211cb3573e70d051a109a41 When multiple pages are excluded from the dump, store the extents in struct cycle and check if anything is still pending on the next invocation of __exclude_unnecessary_pages. This assumes that: 1. after __exclude_unnecessary_pages is called for a struct mem_map_data that extends beyond the current cycle, it is not called again during that cycle, 2. in the next cycle, __exclude_unnecessary_pages is not called before this final struct mem_map_data. Both assumptions are met if struct mem_map_data segments: 1. do not overlap, 2. are sorted by physical address in ascending order. These two conditions are true for all supported memory models. Note that the start PFN of the excluded extent is set to the end of the current cycle (which is equal to the start of the next cycle, see update_cycle), so only the part of the excluded region which falls beyond current cycle buffer is valid. If the excluded region is completely processed in the current cycle, the start PFN is bigger than the end PFN and no work is done at the beginning of the next cycle. After processing the leftover from last cycle, pfn_start and mem_map are adjusted to skip the excluded pages. There is no check whether the adjusted pfn_start is within the current cycle. Nothing bad happens if it isn't, because pages outside the current cyclic region are ignored by the subsequent loop, and the remainder is postponed to the next cycle by exclude_range(). Signed-off-by: Petr Tesarik <[email protected]> --- makedumpfile.c | 52 ++++++++++++++++++++++++++++++++++++++-------------- makedumpfile.h | 5 +++++ 2 files changed, 43 insertions(+), 14 deletions(-) --- a/makedumpfile.c +++ b/makedumpfile.c @@ -44,6 +44,9 @@ static void first_cycle(unsigned long lo if (cycle->end_pfn > max) cycle->end_pfn = max; + + cycle->exclude_pfn_start = 0; + cycle->exclude_pfn_end = 0; } static void update_cycle(unsigned long long max, struct cycle *cycle) @@ -4664,6 +4667,26 @@ initialize_2nd_bitmap_cyclic(struct cycl return TRUE; } +static void +exclude_range(unsigned long long *counter, unsigned long long pfn, unsigned long long endpfn, + struct cycle *cycle) +{ + if (cycle) { + cycle->exclude_pfn_start = cycle->end_pfn; + cycle->exclude_pfn_end = endpfn; + cycle->exclude_pfn_counter = counter; + + if (cycle->end_pfn < endpfn) + endpfn = cycle->end_pfn; + } + + while (pfn < endpfn) { + if (clear_bit_on_2nd_bitmap_for_kernel(pfn, cycle)) + (*counter)++; + ++pfn; + } +} + int __exclude_unnecessary_pages(unsigned long mem_map, unsigned long long pfn_start, unsigned long long pfn_end, struct cycle *cycle) @@ -4676,6 +4699,18 @@ __exclude_unnecessary_pages(unsigned lon unsigned long flags, mapping, private = 0; /* + * If a multi-page exclusion is pending, do it first + */ + if (cycle && cycle->exclude_pfn_start < cycle->exclude_pfn_end) { + exclude_range(cycle->exclude_pfn_counter, + cycle->exclude_pfn_start, cycle->exclude_pfn_end, + cycle); + + mem_map += (cycle->exclude_pfn_end - pfn_start) * SIZE(page); + pfn_start = cycle->exclude_pfn_end; + } + + /* * Refresh the buffer of struct page, when changing mem_map. */ pfn_read_start = ULONGLONG_MAX; @@ -4739,21 +4774,10 @@ __exclude_unnecessary_pages(unsigned lon if ((info->dump_level & DL_EXCLUDE_FREE) && info->page_is_buddy && info->page_is_buddy(flags, _mapcount, private, _count)) { - int i, nr_pages = 1 << private; + int nr_pages = 1 << private; + + exclude_range(&pfn_free, pfn, pfn + nr_pages, cycle); - for (i = 0; i < nr_pages; ++i) { - /* - * According to combination of - * MAX_ORDER and size of cyclic - * buffer, this clearing bit operation - * can overrun the cyclic buffer. - * - * See check_cyclic_buffer_overrun() - * for the detail. - */ - if (clear_bit_on_2nd_bitmap_for_kernel((pfn + i), cycle)) - pfn_free++; - } pfn += nr_pages - 1; mem_map += (nr_pages - 1) * SIZE(page); } --- a/makedumpfile.h +++ b/makedumpfile.h @@ -1591,6 +1591,11 @@ int get_xen_info_ia64(void); struct cycle { unsigned long long start_pfn; unsigned long long end_pfn; + + /* for excluding multi-page regions */ + unsigned long long exclude_pfn_start; + unsigned long long exclude_pfn_end; + unsigned long long *exclude_pfn_counter; }; static inline int ++++++ makedumpfile-isCompoundHead.patch ++++++ From: Petr Tesarik <[email protected]> Date: Thu Jun 12 16:57:22 2014 +0200 Subject: Add isCompoundHead() macro to check for compound pages References: bnc#873232 Patch-mainline: not yet This patch adds the necessary infrastructure to handle an additional page flag. It is slightly different from the other page flags, because the page flag name depends on CONFIG_PAGEFLAGS_EXTENDED (PG_head vs PG_compound), and the kernel decided to export the resulting bit mask rather than the individual page flag. When reading VMCOREINFO, it means less work (makedumpfile always gets the appropriate mask under the name PG_head_mask), but when generating VMCOREINFO, the code must check which enum value is defined and compute the resulting mask. Note PG_head_mask cannot be simply taken from DWARF, because: 1. it is a macro, not an enum value, 2. for !CONFIG_PAGEFLAGS_EXTENDED it is defined only by kernels after commit <TBD>. Signed-off-by: Petr Tesarik <[email protected]> --- makedumpfile.c | 14 ++++++++++++++ makedumpfile.h | 3 +++ 2 files changed, 17 insertions(+) --- a/makedumpfile.c +++ b/makedumpfile.c @@ -1184,6 +1184,8 @@ get_symbol_info(void) int get_structure_info(void) { + long PG_head; + /* * Get offsets of the page_discriptor's members. */ @@ -1290,6 +1292,14 @@ get_structure_info(void) ENUM_NUMBER_INIT(PG_slab, "PG_slab"); ENUM_NUMBER_INIT(PG_hwpoison, "PG_hwpoison"); + PG_head = get_enum_number("PG_head"); + if (PG_head == FAILED_DWARFINFO) { + PG_head = get_enum_number("PG_compound"); + if (PG_head == FAILED_DWARFINFO) + return FALSE; + } + NUMBER(PG_head_mask) = 1L << PG_head; + ENUM_TYPE_SIZE_INIT(pageflags, "pageflags"); TYPEDEF_SIZE_INIT(nodemask_t, "nodemask_t"); @@ -1524,6 +1534,8 @@ get_value_for_old_linux(void) NUMBER(PG_swapcache) = PG_swapcache_ORIGINAL; if (NUMBER(PG_slab) == NOT_FOUND_NUMBER) NUMBER(PG_slab) = PG_slab_ORIGINAL; + if (NUMBER(PG_head_mask) == NOT_FOUND_NUMBER) + NUMBER(PG_head_mask) = 1L << PG_compound_ORIGINAL; /* * The values from here are for free page filtering based on * mem_map array. These are minimum effort to cover old @@ -1784,6 +1796,7 @@ write_vmcoreinfo_data(void) WRITE_NUMBER("PG_buddy", PG_buddy); WRITE_NUMBER("PG_slab", PG_slab); WRITE_NUMBER("PG_hwpoison", PG_hwpoison); + WRITE_NUMBER("PG_head_mask", PG_head_mask); WRITE_NUMBER("PAGE_BUDDY_MAPCOUNT_VALUE", PAGE_BUDDY_MAPCOUNT_VALUE); @@ -2110,6 +2123,7 @@ read_vmcoreinfo(void) READ_NUMBER("PG_slab", PG_slab); READ_NUMBER("PG_buddy", PG_buddy); READ_NUMBER("PG_hwpoison", PG_hwpoison); + READ_NUMBER("PG_head_mask", PG_head_mask); READ_SRCFILE("pud_t", pud_t); --- a/makedumpfile.h +++ b/makedumpfile.h @@ -74,6 +74,7 @@ int get_mem_type(void); #define PG_lru_ORIGINAL (5) #define PG_slab_ORIGINAL (7) #define PG_private_ORIGINAL (11) /* Has something at ->private */ +#define PG_compound_ORIGINAL (14) /* Is part of a compound page */ #define PG_swapcache_ORIGINAL (15) /* Swap page: swp_entry_t in private */ #define PAGE_BUDDY_MAPCOUNT_VALUE_v2_6_38 (-2) @@ -151,6 +152,7 @@ test_bit(int nr, unsigned long addr) #define isSwapCache(flags) test_bit(NUMBER(PG_swapcache), flags) #define isHWPOISON(flags) (test_bit(NUMBER(PG_hwpoison), flags) \ && (NUMBER(PG_hwpoison) != NOT_FOUND_NUMBER)) +#define isCompoundHead(flags) (!!((flags) & NUMBER(PG_head_mask))) static inline int isAnon(unsigned long mapping) @@ -1430,6 +1432,7 @@ struct number_table { long PG_buddy; long PG_slab; long PG_hwpoison; + long PG_head_mask; long PAGE_BUDDY_MAPCOUNT_VALUE; }; ++++++ makedumpfile-remove-overrun-adj.patch ++++++ From: Petr Tesarik <[email protected]> Date: Tue May 27 08:58:36 2014 +0900 Subject: Get rid of overrun adjustments. References: bnc#873232 Patch-mainline: v1.5.7 Git-commit: 22a614a0d891dbab0e7901f8dbf387a882c1d5cf Thanks to the previous commit, __exclude_unnecessary_pages does not require any specific size of the cycle. Signed-off-by: Petr Tesarik <[email protected]> --- makedumpfile.c | 59 --------------------------------------------------------- 1 file changed, 59 deletions(-) --- a/makedumpfile.c +++ b/makedumpfile.c @@ -91,7 +91,6 @@ do { \ *ptr_long_table = value; \ } while (0) -static void check_cyclic_buffer_overrun(void); static void setup_page_is_buddy(void); void @@ -3254,9 +3253,6 @@ out: !sadump_generate_elf_note_from_dumpfile()) return FALSE; - if (info->flag_cyclic && info->dump_level & DL_EXCLUDE_FREE) - check_cyclic_buffer_overrun(); - } else { if (!get_mem_map_without_mm()) return FALSE; @@ -4281,61 +4277,6 @@ exclude_free_page(struct cycle *cycle) } /* - * Let C be a cyclic buffer size and B a bitmap size used for - * representing maximum block size managed by buddy allocator. - * - * For some combinations of C and B, clearing operation can overrun - * the cyclic buffer. Let's consider three cases. - * - * - If C == B, this is trivially safe. - * - * - If B > C, overrun can easily happen. - * - * - In case of C > B, if C mod B != 0, then there exist n > m > 0, - * B > b > 0 such that n x C = m x B + b. This means that clearing - * operation overruns cyclic buffer (B - b)-bytes in the - * combination of n-th cycle and m-th block. - * - * Note that C mod B != 0 iff (m x C) mod B != 0 for some m. - * - * If C == B, C mod B == 0 always holds. Again, if B > C, C mod B != 0 - * always holds. Hence, it's always sufficient to check the condition - * C mod B != 0 in order to determine whether overrun can happen or - * not. - * - * The bitmap size used for maximum block size B is calculated from - * MAX_ORDER as: - * - * B := DIVIDE_UP((1 << (MAX_ORDER - 1)), BITS_PER_BYTE) - * - * Normally, MAX_ORDER is 11 at default. This is configurable through - * CONFIG_FORCE_MAX_ZONEORDER. - */ -static void -check_cyclic_buffer_overrun(void) -{ - int max_order = ARRAY_LENGTH(zone.free_area); - int max_order_nr_pages = 1 << (max_order - 1); - unsigned long max_block_size = divideup(max_order_nr_pages, BITPERBYTE); - - if (info->bufsize_cyclic % max_block_size) { - unsigned long bufsize; - - if (max_block_size > info->bufsize_cyclic) { - MSG("WARNING: some free pages are not filtered.\n"); - return; - } - - bufsize = info->bufsize_cyclic; - info->bufsize_cyclic = round(bufsize, max_block_size); - info->pfn_cyclic = info->bufsize_cyclic * BITPERBYTE; - - MSG("cyclic buffer size has been changed: %lu => %lu\n", - bufsize, info->bufsize_cyclic); - } -} - -/* * For the kernel versions from v2.6.17 to v2.6.37. */ static int -- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
