This is an automated email from the ASF dual-hosted git repository.

ctubbsii pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/accumulo.git


The following commit(s) were added to refs/heads/main by this push:
     new 5f20e38  Avoid NPE when comparing equality (#2326)
5f20e38 is described below

commit 5f20e38628abd6385a5ee652634f4d748ecd013f
Author: EdColeman <d...@etcoleman.com>
AuthorDate: Mon Oct 25 13:29:22 2021 -0400

    Avoid NPE when comparing equality (#2326)
    
    * Use Objects.equals(a,b) instead of a.equals(b) to avoid NPE when a is null
    * Apply other minor IDE suggested checkstyle corrections
    
    Co-authored-by: Ed Coleman etcoleman <edcole...@apache.org>
---
 .../apache/accumulo/gc/SimpleGarbageCollector.java | 43 ++++++++++------------
 1 file changed, 20 insertions(+), 23 deletions(-)

diff --git 
a/server/gc/src/main/java/org/apache/accumulo/gc/SimpleGarbageCollector.java 
b/server/gc/src/main/java/org/apache/accumulo/gc/SimpleGarbageCollector.java
index 3b7761f..2c67b7c 100644
--- a/server/gc/src/main/java/org/apache/accumulo/gc/SimpleGarbageCollector.java
+++ b/server/gc/src/main/java/org/apache/accumulo/gc/SimpleGarbageCollector.java
@@ -31,6 +31,7 @@ import java.util.HashSet;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map.Entry;
+import java.util.Objects;
 import java.util.Set;
 import java.util.SortedMap;
 import java.util.UUID;
@@ -97,7 +98,6 @@ import org.apache.accumulo.server.rpc.ThriftServerType;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.Path;
 import org.apache.htrace.Trace;
-import org.apache.htrace.TraceScope;
 import org.apache.htrace.impl.ProbabilitySampler;
 import org.apache.zookeeper.KeeperException;
 import org.slf4j.Logger;
@@ -110,19 +110,17 @@ import com.google.protobuf.InvalidProtocolBufferException;
 
 import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 
-// Could/Should implement HighlyAvaialbleService but the Thrift server is 
already started before
+// Could/Should implement HighlyAvailableService but the Thrift server is 
already started before
 // the ZK lock is acquired. The server is only for metrics, there are no 
concerns about clients
 // using the service before the lock is acquired.
 public class SimpleGarbageCollector extends AbstractServer implements Iface {
 
   private static final Logger log = 
LoggerFactory.getLogger(SimpleGarbageCollector.class);
 
-  private ServiceLock lock;
-
-  private GCStatus status =
+  private final GCStatus status =
       new GCStatus(new GcCycleStats(), new GcCycleStats(), new GcCycleStats(), 
new GcCycleStats());
 
-  private GcCycleMetrics gcCycleMetrics = new GcCycleMetrics();
+  private final GcCycleMetrics gcCycleMetrics = new GcCycleMetrics();
 
   public static void main(String[] args) throws Exception {
     try (SimpleGarbageCollector gc = new SimpleGarbageCollector(new 
ServerOpts(), args)) {
@@ -201,14 +199,14 @@ public class SimpleGarbageCollector extends 
AbstractServer implements Iface {
 
   private class GCEnv implements GarbageCollectionEnvironment {
 
-    private DataLevel level;
+    private final DataLevel level;
 
     GCEnv(Ample.DataLevel level) {
       this.level = level;
     }
 
     @Override
-    public Iterator<String> getCandidates() throws TableNotFoundException {
+    public Iterator<String> getCandidates() {
       return getContext().getAmple().getGcCandidates(level);
     }
 
@@ -262,7 +260,7 @@ public class SimpleGarbageCollector extends AbstractServer 
implements Iface {
             .checkConsistency().fetch(DIR, FILES, SCANS).build().stream();
       }
 
-      Stream<Reference> refStream = tabletStream.flatMap(tm -> {
+      return tabletStream.flatMap(tm -> {
         Stream<Reference> refs = Stream.concat(tm.getFiles().stream(), 
tm.getScans().stream())
             .map(f -> new Reference(tm.getTableId(), f.getMetaUpdateDelete(), 
false));
         if (tm.getDirName() != null) {
@@ -271,8 +269,6 @@ public class SimpleGarbageCollector extends AbstractServer 
implements Iface {
         }
         return refs;
       });
-
-      return refStream;
     }
 
     @Override
@@ -323,7 +319,7 @@ public class SimpleGarbageCollector extends AbstractServer 
implements Iface {
               // volume switching when something needs to be deleted. Since 
the rest of the code
               // uses suffixes to compare delete entries, there is no danger
               // of deleting something that should not be deleted. Must not 
change value of delete
-              // variable because thats whats stored in metadata table.
+              // variable because that's what's stored in metadata table.
               log.debug("Volume replaced {} -> {}", delete, switchedDelete);
               fullPath = TabletFileUtil.validate(switchedDelete);
             } else {
@@ -402,7 +398,7 @@ public class SimpleGarbageCollector extends AbstractServer 
implements Iface {
       // if dir exist and is empty, then empty list is returned...
       // hadoop 2.0 will throw an exception if the file does not exist
       for (String dir : getContext().getTablesDirs()) {
-        FileStatus[] tabletDirs = null;
+        FileStatus[] tabletDirs;
         try {
           tabletDirs = fs.listStatus(new Path(dir + "/" + tableID));
         } catch (FileNotFoundException ex) {
@@ -482,7 +478,7 @@ public class SimpleGarbageCollector extends AbstractServer 
implements Iface {
         
TraceUtil.probabilitySampler(getConfiguration().getFraction(Property.GC_TRACE_PERCENT));
 
     // This is created outside of the run loop and passed to the 
walogCollector so that
-    // only a single timed task is created (internal to LiveTServerSet using 
SimpleTimer.
+    // only a single timed task is created (internal to LiveTServerSet) using 
SimpleTimer.
     final LiveTServerSet liveTServerSet =
         new LiveTServerSet(getContext(), (current, deleted, added) -> {
           log.debug("Number of current servers {}, tservers added {}, removed 
{}",
@@ -494,8 +490,8 @@ public class SimpleGarbageCollector extends AbstractServer 
implements Iface {
         });
 
     while (true) {
-      try (TraceScope gcOuterSpan = Trace.startSpan("gc", sampler)) {
-        try (TraceScope gcSpan = Trace.startSpan("loop")) {
+      try (var ignored = Trace.startSpan("gc", sampler)) {
+        try (var ignored1 = Trace.startSpan("loop")) {
           final long tStart = System.nanoTime();
           try {
             System.gc(); // make room
@@ -529,7 +525,7 @@ public class SimpleGarbageCollector extends AbstractServer 
implements Iface {
            * are no longer referenced in the metadata table before running
            * GarbageCollectWriteAheadLogs to ensure we delete as many files as 
possible.
            */
-          try (TraceScope replSpan = Trace.startSpan("replicationClose")) {
+          try (var ignored2 = Trace.startSpan("replicationClose")) {
             CloseWriteAheadLogReferences closeWals = new 
CloseWriteAheadLogReferences(getContext());
             closeWals.run();
           } catch (Exception e) {
@@ -537,7 +533,7 @@ public class SimpleGarbageCollector extends AbstractServer 
implements Iface {
           }
 
           // Clean up any unused write-ahead logs
-          try (TraceScope waLogs = Trace.startSpan("walogs")) {
+          try (var ignored2 = Trace.startSpan("walogs")) {
             GarbageCollectWriteAheadLogs walogCollector =
                 new GarbageCollectWriteAheadLogs(getContext(), fs, 
liveTServerSet, isUsingTrash());
             log.info("Beginning garbage collection of write-ahead logs");
@@ -567,7 +563,7 @@ public class SimpleGarbageCollector extends AbstractServer 
implements Iface {
               accumuloClient.tableOperations().flush(RootTable.NAME, null, 
null, true);
               break;
             default:
-              log.trace("\'none - no action\' or invalid value provided: {}", 
action);
+              log.trace("'none - no action' or invalid value provided: {}", 
action);
           }
 
           final long actionComplete = System.nanoTime();
@@ -633,7 +629,8 @@ public class SimpleGarbageCollector extends AbstractServer 
implements Iface {
 
     UUID zooLockUUID = UUID.randomUUID();
     while (true) {
-      lock = new ServiceLock(getContext().getZooReaderWriter().getZooKeeper(), 
path, zooLockUUID);
+      ServiceLock lock =
+          new ServiceLock(getContext().getZooReaderWriter().getZooKeeper(), 
path, zooLockUUID);
       if (lock.tryLock(lockWatcher,
           new ServerServices(addr.toString(), 
Service.GC_CLIENT).toString().getBytes())) {
         log.debug("Got GC ZooKeeper lock");
@@ -696,8 +693,8 @@ public class SimpleGarbageCollector extends AbstractServer 
implements Iface {
       List<String> processedDeletes, VolumeManager fs) {
     Set<Path> seenVolumes = new HashSet<>();
 
-    // when deleting a dir and all files in that dir, only need to delete the 
dir
-    // the dir will sort right before the files... so remove the files in this 
case
+    // when deleting a dir and all files in that dir, only need to delete the 
dir.
+    // The dir will sort right before the files... so remove the files in this 
case
     // to minimize namenode ops
     Iterator<Entry<String,String>> cdIter = 
confirmedDeletes.entrySet().iterator();
 
@@ -729,7 +726,7 @@ public class SimpleGarbageCollector extends AbstractServer 
implements Iface {
               }
             }
           } else {
-            sameVol = FileType.TABLE.getVolume(lastDirAbs).equals(vol);
+            sameVol = Objects.equals(FileType.TABLE.getVolume(lastDirAbs), 
vol);
           }
 
           if (sameVol) {

Reply via email to