Repository: accumulo
Updated Branches:
  refs/heads/master 66998005c -> c5f80656a


ACCUMULO-2411 Fix warnings in 1.5 branch

  Remove useless/no-op/broken javadocs and unused warnings. Isolate and
  suppress deprecation warning related to hadoop FileSystem replication
  support across HDFS versions.


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/e2efee6d
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/e2efee6d
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/e2efee6d

Branch: refs/heads/master
Commit: e2efee6d58f1ad67554134dddbfb3f28df025f3a
Parents: 6258018
Author: Christopher Tubbs <ctubb...@apache.org>
Authored: Wed Feb 26 14:35:36 2014 -0500
Committer: Christopher Tubbs <ctubb...@apache.org>
Committed: Wed Feb 26 14:35:36 2014 -0500

----------------------------------------------------------------------
 .../apache/accumulo/fate/util/AddressUtil.java  | 27 ++++++------
 .../server/tabletserver/log/DfsLogger.java      | 44 +++++++-------------
 .../server/watcher/MonitorLog4jWatcher.java     | 10 +----
 3 files changed, 30 insertions(+), 51 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/e2efee6d/fate/src/main/java/org/apache/accumulo/fate/util/AddressUtil.java
----------------------------------------------------------------------
diff --git a/fate/src/main/java/org/apache/accumulo/fate/util/AddressUtil.java 
b/fate/src/main/java/org/apache/accumulo/fate/util/AddressUtil.java
index 7a8c269..fce7a62 100644
--- a/fate/src/main/java/org/apache/accumulo/fate/util/AddressUtil.java
+++ b/fate/src/main/java/org/apache/accumulo/fate/util/AddressUtil.java
@@ -16,7 +16,7 @@
  */
 package org.apache.accumulo.fate.util;
 
-import java.net.InetAddress; // workaround to enable @see/@link hyperlink
+import java.net.InetAddress;
 import java.net.UnknownHostException;
 import java.security.Security;
 
@@ -27,28 +27,29 @@ public class AddressUtil {
   private static final Logger log = Logger.getLogger(AddressUtil.class);
 
   /**
-   * Fetch the security value that determines how long DNS failures are cached.
-   * Looks up the security property 'networkaddress.cache.negative.ttl'. 
Should that fail returns
-   * the default value used in the Oracle JVM 1.4+, which is 10 seconds.
-   *
-   * @param originalException the host lookup that is the source of needing 
this lookup. maybe be null.
+   * Fetch the security value that determines how long DNS failures are 
cached. Looks up the security property 'networkaddress.cache.negative.ttl'. 
Should that
+   * fail returns the default value used in the Oracle JVM 1.4+, which is 10 
seconds.
+   * 
+   * @param originalException
+   *          the host lookup that is the source of needing this lookup. maybe 
be null.
    * @return positive integer number of seconds
-   * @see java.net.InetAddress
-   * @throws IllegalArgumentException if dns failures are cached forever
+   * @see InetAddress
+   * @throws IllegalArgumentException
+   *           if dns failures are cached forever
    */
   static public int getAddressCacheNegativeTtl(UnknownHostException 
originalException) {
     int negativeTtl = 10;
     try {
       negativeTtl = 
Integer.parseInt(Security.getProperty("networkaddress.cache.negative.ttl"));
     } catch (NumberFormatException exception) {
-      log.warn("Failed to get JVM negative DNS respones cache TTL due to 
format problem (e.g. this JVM might not have the " +
-                "property). Falling back to default based on Oracle JVM 1.6 
(10s)", exception);
+      log.warn("Failed to get JVM negative DNS respones cache TTL due to 
format problem (e.g. this JVM might not have the "
+          + "property). Falling back to default based on Oracle JVM 1.6 
(10s)", exception);
     } catch (SecurityException exception) {
       log.warn("Failed to get JVM negative DNS response cache TTL due to 
security manager. Falling back to default based on Oracle JVM 1.6 (10s)", 
exception);
     }
     if (-1 == negativeTtl) {
-      log.error("JVM negative DNS repsonse cache TTL is set to 'forever' and 
host lookup failed. TTL can be changed with security property " +
-                "'networkaddress.cache.negative.ttl', see 
java.net.InetAddress.", originalException);
+      log.error("JVM negative DNS repsonse cache TTL is set to 'forever' and 
host lookup failed. TTL can be changed with security property "
+          + "'networkaddress.cache.negative.ttl', see java.net.InetAddress.", 
originalException);
       throw new IllegalArgumentException(originalException);
     } else if (0 > negativeTtl) {
       log.warn("JVM specified negative DNS response cache TTL was negative 
(and not 'forever'). Falling back to default based on Oracle JVM 1.6 (10s)");
@@ -56,5 +57,5 @@ public class AddressUtil {
     }
     return negativeTtl;
   }
-  
+
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/e2efee6d/server/src/main/java/org/apache/accumulo/server/tabletserver/log/DfsLogger.java
----------------------------------------------------------------------
diff --git 
a/server/src/main/java/org/apache/accumulo/server/tabletserver/log/DfsLogger.java
 
b/server/src/main/java/org/apache/accumulo/server/tabletserver/log/DfsLogger.java
index 64bac29..120c844 100644
--- 
a/server/src/main/java/org/apache/accumulo/server/tabletserver/log/DfsLogger.java
+++ 
b/server/src/main/java/org/apache/accumulo/server/tabletserver/log/DfsLogger.java
@@ -58,8 +58,6 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.util.Progressable;
 import org.apache.log4j.Logger;
-//import org.apache.hadoop.fs.CreateFlag;
-//import org.apache.hadoop.fs.Syncable;
 
 /**
  * Wrap a connection to a logger.
@@ -180,11 +178,6 @@ public class DfsLogger {
     }
   }
 
-  /*
-   * (non-Javadoc)
-   * 
-   * @see 
org.apache.accumulo.server.tabletserver.log.IRemoteLogger#equals(java.lang.Object)
-   */
   @Override
   public boolean equals(Object obj) {
     // filename is unique
@@ -195,11 +188,6 @@ public class DfsLogger {
     return false;
   }
 
-  /*
-   * (non-Javadoc)
-   * 
-   * @see org.apache.accumulo.server.tabletserver.log.IRemoteLogger#hashCode()
-   */
   @Override
   public int hashCode() {
     // filename is unique
@@ -247,6 +235,12 @@ public class DfsLogger {
     }
   }
 
+  @SuppressWarnings("deprecation")
+  private static short _getReplication(FileSystem fs) {
+    // use fs.getDefaultReplication(logPath) in hadoop 1.2 or greater
+    return fs.getDefaultReplication();
+  }
+
   public synchronized void open(String address) throws IOException {
     String filename = UUID.randomUUID().toString();
     logger = StringUtil.join(Arrays.asList(address.split(":")), "+");
@@ -258,7 +252,7 @@ public class DfsLogger {
       FileSystem fs = conf.getFileSystem();
       short replication = (short) 
conf.getConfiguration().getCount(Property.TSERV_WAL_REPLICATION);
       if (replication == 0)
-        replication = fs.getDefaultReplication();  // use 
fs.getDefaultReplication(logPath) in hadoop 1.2 or greater
+        replication = _getReplication(fs);
       long blockSize = 
conf.getConfiguration().getMemoryInBytes(Property.TSERV_WAL_BLOCKSIZE);
       if (blockSize == 0)
         blockSize = (long) 
(conf.getConfiguration().getMemoryInBytes(Property.TSERV_WALOG_MAX_SIZE) * 1.1);
@@ -363,11 +357,6 @@ public class DfsLogger {
     }
   }
 
-  /*
-   * (non-Javadoc)
-   * 
-   * @see org.apache.accumulo.server.tabletserver.log.IRemoteLogger#toString()
-   */
   @Override
   public String toString() {
     return getLogger() + "/" + getFileName();
@@ -426,11 +415,6 @@ public class DfsLogger {
     }
   }
 
-  /**
-   * @param key
-   * @param empty2
-   * @throws IOException
-   */
   private synchronized void write(LogFileKey key, LogFileValue value) throws 
IOException {
     key.write(encryptingLogFile);
     value.write(encryptingLogFile);
@@ -439,8 +423,8 @@ public class DfsLogger {
   public LoggerOperation log(int seq, int tid, Mutation mutation) throws 
IOException {
     return logManyTablets(Collections.singletonList(new TabletMutations(tid, 
seq, Collections.singletonList(mutation))));
   }
-  
-  private LoggerOperation logFileData(List<Pair<LogFileKey, LogFileValue>> 
keys) throws IOException {
+
+  private LoggerOperation logFileData(List<Pair<LogFileKey,LogFileValue>> 
keys) throws IOException {
     DfsLogger.LogWork work = new DfsLogger.LogWork(new CountDownLatch(1));
     synchronized (DfsLogger.this) {
       try {
@@ -452,7 +436,7 @@ public class DfsLogger {
         work.exception = e;
       }
     }
-    
+
     synchronized (closeLock) {
       // use a different lock for close check so that adding to work queue 
does not need
       // to wait on walog I/O operations
@@ -466,7 +450,7 @@ public class DfsLogger {
   }
 
   public LoggerOperation logManyTablets(List<TabletMutations> mutations) 
throws IOException {
-    List<Pair<LogFileKey, LogFileValue>> data = new ArrayList<Pair<LogFileKey, 
LogFileValue>>();
+    List<Pair<LogFileKey,LogFileValue>> data = new 
ArrayList<Pair<LogFileKey,LogFileValue>>();
     for (TabletMutations tabletMutations : mutations) {
       LogFileKey key = new LogFileKey();
       key.event = MANY_MUTATIONS;
@@ -474,7 +458,7 @@ public class DfsLogger {
       key.tid = tabletMutations.getTid();
       LogFileValue value = new LogFileValue();
       value.mutations = tabletMutations.getMutations();
-      data.add(new Pair<LogFileKey, LogFileValue>(key, value));
+      data.add(new Pair<LogFileKey,LogFileValue>(key, value));
     }
     return logFileData(data);
   }
@@ -484,7 +468,7 @@ public class DfsLogger {
     key.event = COMPACTION_FINISH;
     key.seq = seq;
     key.tid = tid;
-    return logFileData(Collections.singletonList(new Pair<LogFileKey, 
LogFileValue>(key, EMPTY)));
+    return logFileData(Collections.singletonList(new 
Pair<LogFileKey,LogFileValue>(key, EMPTY)));
   }
 
   public LoggerOperation minorCompactionStarted(int seq, int tid, String fqfn) 
throws IOException {
@@ -493,7 +477,7 @@ public class DfsLogger {
     key.seq = seq;
     key.tid = tid;
     key.filename = fqfn;
-    return logFileData(Collections.singletonList(new Pair<LogFileKey, 
LogFileValue>(key, EMPTY)));
+    return logFileData(Collections.singletonList(new 
Pair<LogFileKey,LogFileValue>(key, EMPTY)));
   }
 
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/e2efee6d/server/src/main/java/org/apache/accumulo/server/watcher/MonitorLog4jWatcher.java
----------------------------------------------------------------------
diff --git 
a/server/src/main/java/org/apache/accumulo/server/watcher/MonitorLog4jWatcher.java
 
b/server/src/main/java/org/apache/accumulo/server/watcher/MonitorLog4jWatcher.java
index d0ca27f..ac3426e 100644
--- 
a/server/src/main/java/org/apache/accumulo/server/watcher/MonitorLog4jWatcher.java
+++ 
b/server/src/main/java/org/apache/accumulo/server/watcher/MonitorLog4jWatcher.java
@@ -43,12 +43,6 @@ public class MonitorLog4jWatcher extends FileWatchdog 
implements Watcher {
   private boolean loggingDisabled = false;
   protected String path;
 
-  /**
-   * @param zkPath
-   * @param filename
-   * @param delay
-   * @param propertyName
-   */
   public MonitorLog4jWatcher(String instance, String filename, int delay) {
     super(filename);
     setDelay(delay);
@@ -78,7 +72,7 @@ public class MonitorLog4jWatcher extends FileWatchdog 
implements Watcher {
       resetLogger();
       return;
     }
-    
+
     synchronized (lock) {
       // We might triggered by file-reloading or from ZK update.
       // Either way will result in log-forwarding being restarted
@@ -87,7 +81,7 @@ public class MonitorLog4jWatcher extends FileWatchdog 
implements Watcher {
       resetLogger();
     }
   }
-  
+
   private void resetLogger() {
     // Force a reset on the logger's configuration
     LogManager.resetConfiguration();

Reply via email to