Because "order" will never be negative in __rmqueue_fallback(),
so just make "order" unsigned int.
And modify trace_mm_page_alloc_extfrag() accordingly.

Signed-off-by: Pengfei Li <[email protected]>
---
 include/trace/events/kmem.h | 6 +++---
 mm/page_alloc.c             | 4 ++--
 2 files changed, 5 insertions(+), 5 deletions(-)

diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h
index eb57e3037deb..31f4d09aa31f 100644
--- a/include/trace/events/kmem.h
+++ b/include/trace/events/kmem.h
@@ -277,7 +277,7 @@ TRACE_EVENT(mm_page_pcpu_drain,
 TRACE_EVENT(mm_page_alloc_extfrag,
 
        TP_PROTO(struct page *page,
-               int alloc_order, int fallback_order,
+               unsigned int alloc_order, int fallback_order,
                int alloc_migratetype, int fallback_migratetype),
 
        TP_ARGS(page,
@@ -286,7 +286,7 @@ TRACE_EVENT(mm_page_alloc_extfrag,
 
        TP_STRUCT__entry(
                __field(        unsigned long,  pfn                     )
-               __field(        int,            alloc_order             )
+               __field(        unsigned int,   alloc_order             )
                __field(        int,            fallback_order          )
                __field(        int,            alloc_migratetype       )
                __field(        int,            fallback_migratetype    )
@@ -303,7 +303,7 @@ TRACE_EVENT(mm_page_alloc_extfrag,
                                        get_pageblock_migratetype(page));
        ),
 
-       TP_printk("page=%p pfn=%lu alloc_order=%d fallback_order=%d 
pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d 
change_ownership=%d",
+       TP_printk("page=%p pfn=%lu alloc_order=%u fallback_order=%d 
pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d 
change_ownership=%d",
                pfn_to_page(__entry->pfn),
                __entry->pfn,
                __entry->alloc_order,
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 75c18f4fd66a..1432cbcd87cd 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2631,8 +2631,8 @@ static bool unreserve_highatomic_pageblock(const struct 
alloc_context *ac,
  * condition simpler.
  */
 static __always_inline bool
-__rmqueue_fallback(struct zone *zone, int order, int start_migratetype,
-                                               unsigned int alloc_flags)
+__rmqueue_fallback(struct zone *zone, unsigned int order,
+               int start_migratetype, unsigned int alloc_flags)
 {
        struct free_area *area;
        int current_order;
-- 
2.21.0

Reply via email to