https://git.reactos.org/?p=reactos.git;a=commitdiff;h=066ee4db3bdd2b7f430a33a0c988872cec78744d

commit 066ee4db3bdd2b7f430a33a0c988872cec78744d
Author:     Thomas Faber <thomas.fa...@reactos.org>
AuthorDate: Mon Jan 28 22:12:35 2019 +0100
Commit:     Thomas Faber <thomas.fa...@reactos.org>
CommitDate: Sun Feb 3 12:54:36 2019 +0100

    [NTOS:MM] Dump pool consumers at most once per second.
    
    This should avoid some log spam during kmtest:ExPools, which
    intentionally depletes pool.
---
 ntoskrnl/mm/ARM3/expool.c | 15 +++++++++++----
 1 file changed, 11 insertions(+), 4 deletions(-)

diff --git a/ntoskrnl/mm/ARM3/expool.c b/ntoskrnl/mm/ARM3/expool.c
index 4ec3329f06..9625e50f44 100644
--- a/ntoskrnl/mm/ARM3/expool.c
+++ b/ntoskrnl/mm/ARM3/expool.c
@@ -47,6 +47,7 @@ KSPIN_LOCK ExpLargePoolTableLock;
 ULONG ExpPoolBigEntriesInUse;
 ULONG ExpPoolFlags;
 ULONG ExPoolFailures;
+ULONGLONG MiLastPoolDumpTime;
 
 /* Pool block/header/list access macros */
 #define POOL_ENTRY(x)       (PPOOL_HEADER)((ULONG_PTR)(x) - 
sizeof(POOL_HEADER))
@@ -1937,11 +1938,14 @@ ExAllocatePoolWithTag(IN POOL_TYPE PoolType,
             // Out of memory, display current consumption
             // Let's consider that if the caller wanted more
             // than a hundred pages, that's a bogus caller
-            // and we are not out of memory
+            // and we are not out of memory. Dump at most
+            // once a second to avoid spamming the log.
             //
-            if (NumberOfBytes < 100 * PAGE_SIZE)
+            if (NumberOfBytes < 100 * PAGE_SIZE &&
+                KeQueryInterruptTime() >= MiLastPoolDumpTime + 10000000)
             {
                 MiDumpPoolConsumers(FALSE, 0, 0, 0);
+                MiLastPoolDumpTime = KeQueryInterruptTime();
             }
 #endif
 
@@ -2276,11 +2280,14 @@ ExAllocatePoolWithTag(IN POOL_TYPE PoolType,
         // Out of memory, display current consumption
         // Let's consider that if the caller wanted more
         // than a hundred pages, that's a bogus caller
-        // and we are not out of memory
+        // and we are not out of memory. Dump at most
+        // once a second to avoid spamming the log.
         //
-        if (NumberOfBytes < 100 * PAGE_SIZE)
+        if (NumberOfBytes < 100 * PAGE_SIZE &&
+            KeQueryInterruptTime() >= MiLastPoolDumpTime + 10000000)
         {
             MiDumpPoolConsumers(FALSE, 0, 0, 0);
+            MiLastPoolDumpTime = KeQueryInterruptTime();
         }
 #endif
 

Reply via email to