keith-turner commented on code in PR #4881:
URL: https://github.com/apache/accumulo/pull/4881#discussion_r1757672071


##########
server/monitor/src/main/java/org/apache/accumulo/monitor/util/logging/RecentLogs.java:
##########
@@ -38,53 +40,44 @@ public class RecentLogs {
 
   private static final int MAX_LOGS = 50;
 
+  private final Cache<String,DedupedEvent> events =
+      Caffeine.newBuilder().maximumSize(MAX_LOGS).build();
+
   /**
    * Internal class for keeping the current count and most recent event that 
matches a given cache
    * key (derived from the event's application, logger, level, and message 
fields).
    */
   private static class DedupedEvent {
     private final SingleLogEvent event;
-    private final int count;
+    private final AtomicInteger count;
 
-    private DedupedEvent(SingleLogEvent event, int count) {
+    private DedupedEvent(SingleLogEvent event) {
       this.event = event;
-      this.count = count;
+      this.count = new AtomicInteger();
     }
   }
 
-  private final LinkedHashMap<String,DedupedEvent> events =
-      new LinkedHashMap<>(MAX_LOGS + 1, (float) .75, true) {
-
-        private static final long serialVersionUID = 1L;
-
-        @Override
-        protected boolean removeEldestEntry(Map.Entry<String,DedupedEvent> 
eldest) {
-          return size() > MAX_LOGS;
-        }
-      };
-
-  public synchronized void addEvent(SingleLogEvent event) {
+  public void addEvent(SingleLogEvent event) {
     String key = event.application + ":" + event.logger + ":" + event.level + 
":" + event.message;
-    int count = events.containsKey(key) ? events.remove(key).count + 1 : 1;
-    events.put(key, new DedupedEvent(event, count));
+    events.asMap().computeIfAbsent(key, k -> new 
DedupedEvent(event)).count.incrementAndGet();
   }
 
-  public synchronized void clearEvents() {
-    events.clear();
+  public void clearEvents() {
+    events.invalidateAll();
   }
 
-  public synchronized int numEvents() {
-    return events.size();
+  public int numEvents() {
+    return events.asMap().size();
   }
 
-  public synchronized boolean eventsIncludeErrors() {
-    return events.values().stream().anyMatch(
+  public boolean eventsIncludeErrors() {
+    return events.asMap().values().stream().anyMatch(
         x -> x.event.level.equalsIgnoreCase("ERROR") || 
x.event.level.equalsIgnoreCase("FATAL"));
   }
 
-  public synchronized List<SanitizedLogEvent> getSanitizedEvents() {
-    return events.values().stream().map(ev -> new SanitizedLogEvent(ev.event, 
ev.count))
-        .collect(Collectors.toList());
+  public List<SanitizedLogEvent> getSanitizedEvents() {
+    return events.asMap().values().stream()
+        .map(ev -> new SanitizedLogEvent(ev.event, 
ev.count.get())).collect(Collectors.toList());

Review Comment:
   This is probably not needed.  Thinking that old way things worked that it 
would never go over 50.  With this more concurrent code, maybe it could go a 
bit over 50 sometimes.  Not sure if the code that calls this cares if it goes 
over 50, was thinking the limit defends against that if the calling code is 
expecting no more than 50.
   
   ```suggestion
           .map(ev -> new SanitizedLogEvent(ev.event, 
ev.count.get())).limit(MAX_LOGS).collect(Collectors.toList());
   ```



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to