Author: stack Date: Thu Dec 17 19:03:36 2009 New Revision: 891843 URL: http://svn.apache.org/viewvc?rev=891843&view=rev Log: HBASE-2049 Cleanup HLog binary log output
Modified: hadoop/hbase/trunk/CHANGES.txt hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java Modified: hadoop/hbase/trunk/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/CHANGES.txt?rev=891843&r1=891842&r2=891843&view=diff ============================================================================== --- hadoop/hbase/trunk/CHANGES.txt (original) +++ hadoop/hbase/trunk/CHANGES.txt Thu Dec 17 19:03:36 2009 @@ -235,6 +235,7 @@ HBASE-2031 When starting HQuorumPeer, try to match on more than 1 address HBASE-2043 Shell's scan broken HBASE-2044 HBASE-1822 removed not-deprecated APIs + HBASE-2049 Cleanup HLog binary log output (Dave Latham via Stack) NEW FEATURES HBASE-1901 "General" partitioner for "hbase-48" bulk (behind the api, write Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java?rev=891843&r1=891842&r2=891843&view=diff ============================================================================== --- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java (original) +++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java Thu Dec 17 19:03:36 2009 @@ -480,7 +480,7 @@ LOG.debug("Found " + sequenceNumbers.size() + " hlogs to remove " + " out of total " + this.outputfiles.size() + "; " + "oldest outstanding seqnum is " + oldestOutstandingSeqNum + - " from region " + Bytes.toString(oldestRegion)); + " from region " + Bytes.toStringBinary(oldestRegion)); } if (sequenceNumbers.size() > 0) { for (Long seq : sequenceNumbers) { @@ -493,7 +493,7 @@ oldestRegion: getOldestRegion(oldestOutstandingSeqNum); LOG.info("Too many hlogs: logs=" + countOfLogs + ", maxlogs=" + this.maxLogs + "; forcing flush of region with oldest edits: " + - Bytes.toString(regionToFlush)); + Bytes.toStringBinary(regionToFlush)); } return regionToFlush; } @@ -1098,7 +1098,7 @@ LinkedList<HLogEntry> queue = logEntries.get(regionName); if (queue == null) { queue = new LinkedList<HLogEntry>(); - LOG.debug("Adding queue for " + Bytes.toString(regionName)); + LOG.debug("Adding queue for " + Bytes.toStringBinary(regionName)); logEntries.put(regionName, queue); } HLogEntry hle = new HLogEntry(val, key); @@ -1145,7 +1145,7 @@ ExecutorService threadPool = Executors.newFixedThreadPool(logWriterThreads); for (final byte[] key : logEntries.keySet()) { - Thread thread = new Thread(Bytes.toString(key)) { + Thread thread = new Thread(Bytes.toStringBinary(key)) { @Override public void run() { LinkedList<HLogEntry> entries = logEntries.get(key); @@ -1188,7 +1188,7 @@ logWriters.put(key, wap); if (LOG.isDebugEnabled()) { LOG.debug("Creating new hlog file writer for path " - + logfile + " and region " + Bytes.toString(key)); + + logfile + " and region " + Bytes.toStringBinary(key)); } if (old != null) { @@ -1211,12 +1211,12 @@ } if (LOG.isDebugEnabled()) { LOG.debug("Applied " + count + " total edits to " - + Bytes.toString(key) + " in " + + Bytes.toStringBinary(key) + " in " + (System.currentTimeMillis() - threadTime) + "ms"); } } catch (IOException e) { e = RemoteExceptionHandler.checkIOException(e); - LOG.warn("Got while writing region " + Bytes.toString(key) + LOG.warn("Got while writing region " + Bytes.toStringBinary(key) + " log " + e); e.printStackTrace(); }