This is an automated email from the ASF dual-hosted git repository.

janh pushed a commit to branch branch-1
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-1 by this push:
     new 99a59cf  HBASE-23622 Reduced the number of Checkstyle violations in 
hbase-common
99a59cf is described below

commit 99a59cf9e7122d5dd25cd464cea5ac4f6c96cdb0
Author: Jan Hentschel <j...@apache.org>
AuthorDate: Sun Jan 19 18:02:08 2020 +0100

    HBASE-23622 Reduced the number of Checkstyle violations in hbase-common
    
    Signed-off-by: Sean Busbey <bus...@apache.org>
---
 .../resources/hbase/checkstyle-suppressions.xml    |   2 +
 .../org/apache/hadoop/hbase/CellComparator.java    |   2 +-
 .../java/org/apache/hadoop/hbase/net/Address.java  |   7 +-
 .../hadoop/hbase/trace/SpanReceiverHost.java       |  10 +-
 .../apache/hadoop/hbase/util/ByteRangeUtils.java   |  17 +-
 .../apache/hadoop/hbase/util/CommonFSUtils.java    | 178 +++++----------------
 .../hadoop/hbase/util/ConcatenatedLists.java       |   5 +-
 .../java/org/apache/hadoop/hbase/util/Order.java   |  41 +++--
 8 files changed, 96 insertions(+), 166 deletions(-)

diff --git 
a/hbase-checkstyle/src/main/resources/hbase/checkstyle-suppressions.xml 
b/hbase-checkstyle/src/main/resources/hbase/checkstyle-suppressions.xml
index 72358ab..cdd88e0 100644
--- a/hbase-checkstyle/src/main/resources/hbase/checkstyle-suppressions.xml
+++ b/hbase-checkstyle/src/main/resources/hbase/checkstyle-suppressions.xml
@@ -34,4 +34,6 @@
   <suppress checks="." files=".*/generated/.*\.java"/>
   <suppress checks="." files=".*/generated-jamon/.*\.java"/>
   <suppress checks="MagicNumberCheck" files=".*Test\.java"/>
+  <!-- Will not have a private constructor, because it is 
InterfaceAudience.Public -->
+  <suppress checks="HideUtilityClassConstructor" 
files="org.apache.hadoop.hbase.util.ByteRangeUtils"/>
 </suppressions>
diff --git 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparator.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparator.java
index d8fcfe4..a2c6bdc 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparator.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparator.java
@@ -55,7 +55,7 @@ public class CellComparator implements Comparator<Cell>, 
Serializable {
    * @param a
    * @param b
    * @param ignoreSequenceid True if we are to compare the key portion only 
and ignore
-   * the sequenceid. Set to false to compare key and consider sequenceid.
+   *    the sequenceid. Set to false to compare key and consider sequenceid.
    * @return 0 if equal, -1 if a &lt; b, and +1 if a &gt; b.
    */
   public static int compare(final Cell a, final Cell b, boolean 
ignoreSequenceid) {
diff --git 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/net/Address.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/net/Address.java
index 15b4960..e6ee60b 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/net/Address.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/net/Address.java
@@ -26,7 +26,7 @@ import com.google.common.net.HostAndPort;
  * An immutable type to hold a hostname and port combo, like an Endpoint
  * or java.net.InetSocketAddress (but without danger of our calling
  * resolve -- we do NOT want a resolve happening every time we want
- * to hold a hostname and port combo). This class is also <<Comparable>>.
+ * to hold a hostname and port combo). This class is also {@link Comparable}
  * <p>In implementation this class is a facade over Guava's {@link 
HostAndPort}.
  * We cannot have Guava classes in our API hence this Type.
  */
@@ -83,7 +83,10 @@ public class Address implements Comparable<Address> {
   @Override
   public int compareTo(Address that) {
     int compare = this.getHostname().compareTo(that.getHostname());
-    if (compare != 0) return compare;
+    if (compare != 0) {
+      return compare;
+    }
+
     return this.getPort() - that.getPort();
   }
 }
diff --git 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/trace/SpanReceiverHost.java
 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/trace/SpanReceiverHost.java
index 4818efc..63241c0 100644
--- 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/trace/SpanReceiverHost.java
+++ 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/trace/SpanReceiverHost.java
@@ -43,7 +43,7 @@ public class SpanReceiverHost {
   private boolean closed = false;
 
   @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="SE_BAD_FIELD")
-  private static enum SingletonHolder {
+  private enum SingletonHolder {
     INSTANCE;
     final Object lock = new Object();
     SpanReceiverHost host = null; // FindBugs: SE_BAD_FIELD
@@ -64,14 +64,13 @@ public class SpanReceiverHost {
   }
 
   SpanReceiverHost(Configuration conf) {
-    receivers = new HashSet<SpanReceiver>();
+    receivers = new HashSet<>();
     this.conf = conf;
   }
 
   /**
    * Reads the names of classes specified in the {@code 
hbase.trace.spanreceiver.classes} property
    * and instantiates and registers them with the Tracer.
-   *
    */
   public void loadSpanReceivers() {
     String[] receiverNames = conf.getStrings(SPAN_RECEIVERS_CONF_KEY);
@@ -98,7 +97,10 @@ public class SpanReceiverHost {
    * Calls close() on all SpanReceivers created by this SpanReceiverHost.
    */
   public synchronized void closeReceivers() {
-    if (closed) return;
+    if (closed) {
+      return;
+    }
+
     closed = true;
     for (SpanReceiver rcvr : receivers) {
       try {
diff --git 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteRangeUtils.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteRangeUtils.java
index 7de1b13..f33e3ab 100644
--- 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteRangeUtils.java
+++ 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteRangeUtils.java
@@ -15,9 +15,10 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
 package org.apache.hadoop.hbase.util;
 
+import com.google.common.collect.Lists;
+
 import java.io.IOException;
 import java.io.OutputStream;
 import java.util.ArrayList;
@@ -26,19 +27,18 @@ import java.util.Collection;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.classification.InterfaceStability;
 
-import com.google.common.collect.Lists;
-
 /**
  * Utility methods for working with {@link ByteRange}.
  */
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
 public class ByteRangeUtils {
-
   public static int numEqualPrefixBytes(ByteRange left, ByteRange right, int 
rightInnerOffset) {
     int maxCompares = Math.min(left.getLength(), right.getLength() - 
rightInnerOffset);
-    final byte[] lbytes = left.getBytes(), rbytes = right.getBytes();
-    final int loffset = left.getOffset(), roffset = right.getOffset();
+    final byte[] lbytes = left.getBytes();
+    final byte[] rbytes = right.getBytes();
+    final int loffset = left.getOffset();
+    final int roffset = right.getOffset();
     for (int i = 0; i < maxCompares; ++i) {
       if (lbytes[loffset + i] != rbytes[roffset + rightInnerOffset + i]) {
         return i;
@@ -49,7 +49,7 @@ public class ByteRangeUtils {
 
   public static ArrayList<byte[]> copyToNewArrays(Collection<ByteRange> 
ranges) {
     if (ranges == null) {
-      return new ArrayList<byte[]>(0);
+      return new ArrayList<>(0);
     }
     ArrayList<byte[]> arrays = Lists.newArrayListWithCapacity(ranges.size());
     for (ByteRange range : ranges) {
@@ -60,7 +60,7 @@ public class ByteRangeUtils {
 
   public static ArrayList<ByteRange> fromArrays(Collection<byte[]> arrays) {
     if (arrays == null) {
-      return new ArrayList<ByteRange>(0);
+      return new ArrayList<>(0);
     }
     ArrayList<ByteRange> ranges = 
Lists.newArrayListWithCapacity(arrays.size());
     for (byte[] array : arrays) {
@@ -78,5 +78,4 @@ public class ByteRangeUtils {
     os.write(byteRange.getBytes(), byteRange.getOffset() + 
byteRangeInnerOffset,
       byteRange.getLength() - byteRangeInnerOffset);
   }
-
 }
diff --git 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java
index b52d3dc..ef6d489 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java
@@ -44,10 +44,10 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.ipc.RemoteException;
 
 /**
  * Utility methods for interacting with the underlying file system.
@@ -137,8 +137,7 @@ public abstract class CommonFSUtils {
    * @return True if deleted <code>dir</code>
    * @throws IOException e
    */
-  public static boolean deleteDirectory(final FileSystem fs, final Path dir)
-  throws IOException {
+  public static boolean deleteDirectory(final FileSystem fs, final Path dir) 
throws IOException {
     return fs.exists(dir) && fs.delete(dir, true);
   }
 
@@ -146,69 +145,22 @@ public abstract class CommonFSUtils {
    * Return the number of bytes that large input files should be optimally
    * be split into to minimize i/o time.
    *
-   * use reflection to search for getDefaultBlockSize(Path f)
-   * if the method doesn't exist, fall back to using getDefaultBlockSize()
-   *
    * @param fs filesystem object
    * @return the default block size for the path's filesystem
-   * @throws IOException e
    */
-  public static long getDefaultBlockSize(final FileSystem fs, final Path path) 
throws IOException {
-    Method m = null;
-    Class<? extends FileSystem> cls = fs.getClass();
-    try {
-      m = cls.getMethod("getDefaultBlockSize", new Class<?>[] { Path.class });
-    } catch (NoSuchMethodException e) {
-      LOG.info("FileSystem doesn't support getDefaultBlockSize");
-    } catch (SecurityException e) {
-      LOG.info("Doesn't have access to getDefaultBlockSize on FileSystems", e);
-      m = null; // could happen on setAccessible()
-    }
-    if (m == null) {
-      return fs.getDefaultBlockSize(path);
-    } else {
-      try {
-        Object ret = m.invoke(fs, path);
-        return ((Long)ret).longValue();
-      } catch (Exception e) {
-        throw new IOException(e);
-      }
-    }
+  public static long getDefaultBlockSize(final FileSystem fs, final Path path) 
{
+    return fs.getDefaultBlockSize(path);
   }
 
   /*
    * Get the default replication.
    *
-   * use reflection to search for getDefaultReplication(Path f)
-   * if the method doesn't exist, fall back to using getDefaultReplication()
-   *
    * @param fs filesystem object
    * @param f path of file
    * @return default replication for the path's filesystem
-   * @throws IOException e
    */
-  public static short getDefaultReplication(final FileSystem fs, final Path 
path)
-      throws IOException {
-    Method m = null;
-    Class<? extends FileSystem> cls = fs.getClass();
-    try {
-      m = cls.getMethod("getDefaultReplication", new Class<?>[] { Path.class 
});
-    } catch (NoSuchMethodException e) {
-      LOG.info("FileSystem doesn't support getDefaultReplication");
-    } catch (SecurityException e) {
-      LOG.info("Doesn't have access to getDefaultReplication on FileSystems", 
e);
-      m = null; // could happen on setAccessible()
-    }
-    if (m == null) {
-      return fs.getDefaultReplication(path);
-    } else {
-      try {
-        Object ret = m.invoke(fs, path);
-        return ((Number)ret).shortValue();
-      } catch (Exception e) {
-        throw new IOException(e);
-      }
-    }
+  public static short getDefaultReplication(final FileSystem fs, final Path 
path) {
+    return fs.getDefaultReplication(path);
   }
 
   /**
@@ -358,11 +310,11 @@ public abstract class CommonFSUtils {
     return p.makeQualified(fs);
   }
 
-  public static void setRootDir(final Configuration c, final Path root) throws 
IOException {
+  public static void setRootDir(final Configuration c, final Path root) {
     c.set(HConstants.HBASE_DIR, root.toString());
   }
 
-  public static void setFsDefault(final Configuration c, final Path root) 
throws IOException {
+  public static void setFsDefault(final Configuration c, final Path root) {
     c.set("fs.defaultFS", root.toString());    // for hadoop 0.21+
   }
 
@@ -387,7 +339,7 @@ public abstract class CommonFSUtils {
   }
 
   @VisibleForTesting
-  public static void setWALRootDir(final Configuration c, final Path root) 
throws IOException {
+  public static void setWALRootDir(final Configuration c, final Path root) {
     c.set(HBASE_WAL_DIR, root.toString());
   }
 
@@ -481,8 +433,7 @@ public abstract class CommonFSUtils {
     setStoragePolicy(fs, path, storagePolicy);
   }
 
-  private static final Map<FileSystem, Boolean> warningMap =
-      new ConcurrentHashMap<FileSystem, Boolean>();
+  private static final Map<FileSystem, Boolean> warningMap = new 
ConcurrentHashMap<>();
 
   /**
    * Sets storage policy for given path.
@@ -557,71 +508,43 @@ public abstract class CommonFSUtils {
    */
   private static void invokeSetStoragePolicy(final FileSystem fs, final Path 
path,
       final String storagePolicy) throws IOException {
-    Method m = null;
     Exception toThrow = null;
+
     try {
-      m = fs.getClass().getDeclaredMethod("setStoragePolicy",
-        new Class<?>[] { Path.class, String.class });
-      m.setAccessible(true);
-    } catch (NoSuchMethodException e) {
-      toThrow = e;
-      final String msg = "FileSystem doesn't support setStoragePolicy; 
HDFS-6584 not available";
-      if (!warningMap.containsKey(fs)) {
-        warningMap.put(fs, true);
-        LOG.warn(msg, e);
-      } else if (LOG.isDebugEnabled()) {
-        LOG.debug(msg, e);
+      fs.setStoragePolicy(path, storagePolicy);
+
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Set storagePolicy=" + storagePolicy + " for path=" + path);
       }
-      m = null;
-    } catch (SecurityException e) {
+    } catch (Exception e) {
       toThrow = e;
-      final String msg = "No access to setStoragePolicy on FileSystem; 
HDFS-6584 not available";
+      // This swallows FNFE, should we be throwing it? seems more likely to 
indicate dev
+      // misuse than a runtime problem with HDFS.
       if (!warningMap.containsKey(fs)) {
         warningMap.put(fs, true);
-        LOG.warn(msg, e);
+        LOG.warn("Unable to set storagePolicy=" + storagePolicy + " for path=" 
+ path, e);
       } else if (LOG.isDebugEnabled()) {
-        LOG.debug(msg, e);
+        LOG.debug("Unable to set storagePolicy=" + storagePolicy + " for 
path=" + path, e);
       }
-      m = null; // could happen on setAccessible()
-    }
-    if (m != null) {
-      try {
-        m.invoke(fs, path, storagePolicy);
+
+      // check for lack of HDFS-7228
+      if (e instanceof RemoteException &&
+          HadoopIllegalArgumentException.class.getName().equals(
+            ((RemoteException)e).getClassName())) {
         if (LOG.isDebugEnabled()) {
-          LOG.debug("Set storagePolicy=" + storagePolicy + " for path=" + 
path);
-        }
-      } catch (Exception e) {
-        toThrow = e;
-        // This swallows FNFE, should we be throwing it? seems more likely to 
indicate dev
-        // misuse than a runtime problem with HDFS.
-        if (!warningMap.containsKey(fs)) {
-          warningMap.put(fs, true);
-          LOG.warn("Unable to set storagePolicy=" + storagePolicy + " for 
path=" + path, e);
-        } else if (LOG.isDebugEnabled()) {
-          LOG.debug("Unable to set storagePolicy=" + storagePolicy + " for 
path=" + path, e);
-        }
-        // check for lack of HDFS-7228
-        if (e instanceof InvocationTargetException) {
-          final Throwable exception = e.getCause();
-          if (exception instanceof RemoteException &&
-              HadoopIllegalArgumentException.class.getName().equals(
-                ((RemoteException)exception).getClassName())) {
-            if (LOG.isDebugEnabled()) {
-              LOG.debug("Given storage policy, '" +storagePolicy +"', was 
rejected and probably " +
-                "isn't a valid policy for the version of Hadoop you're 
running. I.e. if you're " +
-                "trying to use SSD related policies then you're likely missing 
HDFS-7228. For " +
-                "more information see the 'ArchivalStorage' docs for your 
Hadoop release.");
-            }
-          }
+          LOG.debug("Given storage policy, '" +storagePolicy +"', was rejected 
and probably " +
+            "isn't a valid policy for the version of Hadoop you're running. 
I.e. if you're " +
+            "trying to use SSD related policies then you're likely missing 
HDFS-7228. For " +
+            "more information see the 'ArchivalStorage' docs for your Hadoop 
release.");
         }
       }
     }
+
     if (toThrow != null) {
       throw new IOException(toThrow);
     }
   }
 
-
   /**
    * @param conf must not be null
    * @return True if this filesystem whose scheme is 'hdfs'.
@@ -647,8 +570,7 @@ public abstract class CommonFSUtils {
    * @return Returns the filesystem of the hbase rootdir.
    * @throws IOException from underlying FileSystem
    */
-  public static FileSystem getCurrentFileSystem(Configuration conf)
-  throws IOException {
+  public static FileSystem getCurrentFileSystem(Configuration conf) throws 
IOException {
     return getRootDir(conf).getFileSystem(conf);
   }
 
@@ -666,7 +588,7 @@ public abstract class CommonFSUtils {
    * @param filter path filter
    * @return null if dir is empty or doesn't exist, otherwise FileStatus array
    */
-  public static FileStatus [] listStatus(final FileSystem fs,
+  public static FileStatus[] listStatus(final FileSystem fs,
       final Path dir, final PathFilter filter) throws IOException {
     FileStatus [] status = null;
     try {
@@ -753,13 +675,13 @@ public abstract class CommonFSUtils {
    * Log the current state of the filesystem from a certain root directory
    * @param fs filesystem to investigate
    * @param root root file/directory to start logging from
-   * @param LOG log to output information
+   * @param log log to output information
    * @throws IOException if an unexpected exception occurs
    */
-  public static void logFileSystemState(final FileSystem fs, final Path root, 
Log LOG)
+  public static void logFileSystemState(final FileSystem fs, final Path root, 
Log log)
       throws IOException {
-    LOG.debug("Current file system:");
-    logFSTree(LOG, fs, root, "|-");
+    log.debug("Current file system:");
+    logFSTree(log, fs, root, "|-");
   }
 
   /**
@@ -767,7 +689,7 @@ public abstract class CommonFSUtils {
    *
    * @see #logFileSystemState(FileSystem, Path, Log)
    */
-  private static void logFSTree(Log LOG, final FileSystem fs, final Path root, 
String prefix)
+  private static void logFSTree(Log log, final FileSystem fs, final Path root, 
String prefix)
       throws IOException {
     FileStatus[] files = listStatus(fs, root, null);
     if (files == null) {
@@ -776,10 +698,10 @@ public abstract class CommonFSUtils {
 
     for (FileStatus file : files) {
       if (file.isDirectory()) {
-        LOG.debug(prefix + file.getPath().getName() + "/");
-        logFSTree(LOG, fs, file.getPath(), prefix + "---");
+        log.debug(prefix + file.getPath().getName() + "/");
+        logFSTree(log, fs, file.getPath(), prefix + "---");
       } else {
-        LOG.debug(prefix + file.getPath().getName());
+        log.debug(prefix + file.getPath().getName());
       }
     }
   }
@@ -792,25 +714,6 @@ public abstract class CommonFSUtils {
   }
 
   /**
-   * Do our short circuit read setup.
-   * Checks buffer size to use and whether to do checksumming in hbase or hdfs.
-   * @param conf must not be null
-   */
-  public static void setupShortCircuitRead(final Configuration conf) {
-    // Check that the user has not set the 
"dfs.client.read.shortcircuit.skip.checksum" property.
-    boolean shortCircuitSkipChecksum =
-      conf.getBoolean("dfs.client.read.shortcircuit.skip.checksum", false);
-    boolean useHBaseChecksum = 
conf.getBoolean(HConstants.HBASE_CHECKSUM_VERIFICATION, true);
-    if (shortCircuitSkipChecksum) {
-      LOG.warn("Configuration \"dfs.client.read.shortcircuit.skip.checksum\" 
should not " +
-        "be set to true." + (useHBaseChecksum ? " HBase checksum doesn't 
require " +
-        "it, see https://issues.apache.org/jira/browse/HBASE-6868."; : ""));
-      assert !shortCircuitSkipChecksum; //this will fail if assertions are on
-    }
-    checkShortCircuitReadBufferSize(conf);
-  }
-
-  /**
    * Check if short circuit read buffer size is set and if not, set it to 
hbase value.
    * @param conf must not be null
    */
@@ -912,5 +815,4 @@ public abstract class CommonFSUtils {
       super(message);
     }
   }
-
 }
diff --git 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ConcatenatedLists.java
 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ConcatenatedLists.java
index f6fb4b9..93c29f9 100644
--- 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ConcatenatedLists.java
+++ 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ConcatenatedLists.java
@@ -79,7 +79,10 @@ public class ConcatenatedLists<T> extends 
AbstractCollection<T> {
       if (!components.isEmpty()) {
         this.nextWasCalled = true;
         List<T> src = components.get(currentComponent);
-        if (++indexWithinComponent < src.size()) return 
src.get(indexWithinComponent);
+        if (++indexWithinComponent < src.size()) {
+          return src.get(indexWithinComponent);
+        }
+
         if (++currentComponent < components.size()) {
           indexWithinComponent = 0;
           src = components.get(currentComponent);
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Order.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Order.java
index 9cbbe7e..57d0aed 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Order.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Order.java
@@ -29,22 +29,31 @@ import 
org.apache.hadoop.hbase.classification.InterfaceStability;
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
 public enum Order {
-
   ASCENDING {
     @Override
-    public int cmp(int cmp) { /* noop */ return cmp; }
+    public int cmp(int cmp) {
+      /* noop */ return cmp;
+    }
 
     @Override
-    public byte apply(byte val) { /* noop */ return val; }
+    public byte apply(byte val) {
+      /* noop */ return val;
+    }
 
     @Override
-    public void apply(byte[] val) { /* noop */ }
+    public void apply(byte[] val) {
+      /* noop */
+    }
 
     @Override
-    public void apply(byte[] val, int offset, int length) { /* noop */ }
+    public void apply(byte[] val, int offset, int length) {
+      /* noop */
+    }
 
     @Override
-    public String toString() { return "ASCENDING"; }
+    public String toString() {
+      return "ASCENDING";
+    }
   },
 
   DESCENDING {
@@ -55,23 +64,33 @@ public enum Order {
     private static final byte MASK = (byte) 0xff;
 
     @Override
-    public int cmp(int cmp) { return -1 * cmp; }
+    public int cmp(int cmp) {
+      return -1 * cmp;
+    }
 
     @Override
-    public byte apply(byte val) { return (byte) (val ^ MASK); }
+    public byte apply(byte val) {
+      return (byte) (val ^ MASK);
+    }
 
     @Override
     public void apply(byte[] val) {
-      for (int i = 0; i < val.length; i++) { val[i] ^= MASK; }
+      for (int i = 0; i < val.length; i++) {
+        val[i] ^= MASK;
+      }
     }
 
     @Override
     public void apply(byte[] val, int offset, int length) {
-      for (int i = 0; i < length; i++) { val[offset + i] ^= MASK; }
+      for (int i = 0; i < length; i++) {
+        val[offset + i] ^= MASK;
+      }
     }
 
     @Override
-    public String toString() { return "DESCENDING"; }
+    public String toString() {
+      return "DESCENDING";
+    }
   };
 
   /**

Reply via email to