Update this patch according to the comments from Rafael.

Function shrink_all_memory_for_cma try to free `nr_to_reclaim' of memory.
CMA aggressive shrink function will call this functon to free `nr_to_reclaim' of
memory.

Signed-off-by: Hui Zhu <zhu...@xiaomi.com>
---
 mm/vmscan.c | 58 +++++++++++++++++++++++++++++++++++++++++++---------------
 1 file changed, 43 insertions(+), 15 deletions(-)

diff --git a/mm/vmscan.c b/mm/vmscan.c
index dcb4707..658dc8d 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -3404,6 +3404,28 @@ void wakeup_kswapd(struct zone *zone, int order, enum 
zone_type classzone_idx)
        wake_up_interruptible(&pgdat->kswapd_wait);
 }
 
+#if defined CONFIG_HIBERNATION || defined CONFIG_CMA_AGGRESSIVE
+static unsigned long __shrink_all_memory(struct scan_control *sc)
+{
+       struct reclaim_state reclaim_state;
+       struct zonelist *zonelist = node_zonelist(numa_node_id(), sc->gfp_mask);
+       struct task_struct *p = current;
+       unsigned long nr_reclaimed;
+
+       p->flags |= PF_MEMALLOC;
+       lockdep_set_current_reclaim_state(sc->gfp_mask);
+       reclaim_state.reclaimed_slab = 0;
+       p->reclaim_state = &reclaim_state;
+
+       nr_reclaimed = do_try_to_free_pages(zonelist, sc);
+
+       p->reclaim_state = NULL;
+       lockdep_clear_current_reclaim_state();
+       p->flags &= ~PF_MEMALLOC;
+
+       return nr_reclaimed;
+}
+
 #ifdef CONFIG_HIBERNATION
 /*
  * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of
@@ -3415,7 +3437,6 @@ void wakeup_kswapd(struct zone *zone, int order, enum 
zone_type classzone_idx)
  */
 unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
 {
-       struct reclaim_state reclaim_state;
        struct scan_control sc = {
                .nr_to_reclaim = nr_to_reclaim,
                .gfp_mask = GFP_HIGHUSER_MOVABLE,
@@ -3425,24 +3446,31 @@ unsigned long shrink_all_memory(unsigned long 
nr_to_reclaim)
                .may_swap = 1,
                .hibernation_mode = 1,
        };
-       struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
-       struct task_struct *p = current;
-       unsigned long nr_reclaimed;
-
-       p->flags |= PF_MEMALLOC;
-       lockdep_set_current_reclaim_state(sc.gfp_mask);
-       reclaim_state.reclaimed_slab = 0;
-       p->reclaim_state = &reclaim_state;
 
-       nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
+       return __shrink_all_memory(&sc);
+}
+#endif /* CONFIG_HIBERNATION */
 
-       p->reclaim_state = NULL;
-       lockdep_clear_current_reclaim_state();
-       p->flags &= ~PF_MEMALLOC;
+#ifdef CONFIG_CMA_AGGRESSIVE
+/*
+ * Try to free `nr_to_reclaim' of memory, system-wide, for CMA aggressive
+ * shrink function.
+ */
+void shrink_all_memory_for_cma(unsigned long nr_to_reclaim)
+{
+       struct scan_control sc = {
+               .nr_to_reclaim = nr_to_reclaim,
+               .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_HIGHMEM,
+               .priority = DEF_PRIORITY,
+               .may_writepage = !laptop_mode,
+               .may_unmap = 1,
+               .may_swap = 1,
+       };
 
-       return nr_reclaimed;
+       __shrink_all_memory(&sc);
 }
-#endif /* CONFIG_HIBERNATION */
+#endif /* CONFIG_CMA_AGGRESSIVE */
+#endif /* CONFIG_HIBERNATION || CONFIG_CMA_AGGRESSIVE */
 
 /* It's optimal to keep kswapds on the same CPUs as their memory, but
    not required for correctness.  So if the last cpu in a node goes
-- 
1.9.1

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to