Author: stack
Date: Mon Apr 21 16:54:25 2008
New Revision: 650324

URL: http://svn.apache.org/viewvc?rev=650324&view=rev
Log:
HBASE-586 HRegion runs HStore memcache snapshotting -- fix it so only HStore
knows about workings of memcache

Modified:
    hadoop/hbase/branches/0.1/CHANGES.txt
    hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HLog.java
    hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HRegion.java
    hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HStore.java
    hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HStoreFile.java
    hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/util/FSUtils.java
    
hadoop/hbase/branches/0.1/src/test/org/apache/hadoop/hbase/TestCompaction.java
    hadoop/hbase/branches/0.1/src/test/org/apache/hadoop/hbase/TestHLog.java
    
hadoop/hbase/branches/0.1/src/test/org/apache/hadoop/hbase/TestHMemcache.java

Modified: hadoop/hbase/branches/0.1/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/CHANGES.txt?rev=650324&r1=650323&r2=650324&view=diff
==============================================================================
--- hadoop/hbase/branches/0.1/CHANGES.txt (original)
+++ hadoop/hbase/branches/0.1/CHANGES.txt Mon Apr 21 16:54:25 2008
@@ -19,6 +19,8 @@
                directory if configuration is not correct
    HBASE-595   RowFilterInterface.rowProcessed() is called *before* fhe final
                filtering decision is made (Clint Morgan via Stack)
+   HBASE-586   HRegion runs HStore memcache snapshotting -- fix it so only 
HStore
+               knows about workings of memcache
 
   IMPROVEMENTS
    HBASE-559   MR example job to count table rows

Modified: hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HLog.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HLog.java?rev=650324&r1=650323&r2=650324&view=diff
==============================================================================
--- hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HLog.java 
(original)
+++ hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HLog.java Mon 
Apr 21 16:54:25 2008
@@ -43,6 +43,7 @@
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.SequenceFile.CompressionType;
 import org.apache.hadoop.io.SequenceFile.Reader;
+import org.apache.hadoop.hbase.util.FSUtils;
 
 /**
  * HLog stores all the edits to the HStore.
@@ -114,14 +115,14 @@
    */
   final Map<Text, Long> lastSeqWritten = new ConcurrentHashMap<Text, Long>();
 
-  volatile boolean closed = false;
+  private volatile boolean closed = false;
 
   private final Integer sequenceLock = new Integer(0);
-  volatile long logSeqNum = 0;
+  private volatile long logSeqNum = 0;
 
-  volatile long filenum = 0;
+  private volatile long filenum = 0;
 
-  volatile int numEntries = 0;
+  private volatile int numEntries = 0;
 
   // This lock prevents starting a log roll during a cache flush.
   // synchronized is insufficient because a cache flush spans two method calls.
@@ -159,7 +160,15 @@
     fs.mkdirs(dir);
     rollWriter();
   }
-  
+
+  /*
+   * Accessor for tests.
+   * @return Current state of the monotonically increasing file id.
+   */
+  long getFilenum() {
+    return this.filenum;
+  }
+
   /**
    * Get the compression type for the hlog files.
    * @param c Configuration to use.
@@ -183,7 +192,7 @@
       if (newvalue > logSeqNum) {
         if (LOG.isDebugEnabled()) {
           LOG.debug("changing sequence number from " + logSeqNum + " to " +
-              newvalue);
+            newvalue);
         }
         logSeqNum = newvalue;
       }
@@ -218,8 +227,7 @@
           this.writer.close();
           Path p = computeFilename(filenum - 1);
           if (LOG.isDebugEnabled()) {
-            LOG.debug("Closing current log writer " + p.toString() +
-            " to get a new one");
+            LOG.debug("Closing current log writer " + FSUtils.getPath(p));
           }
           if (filenum > 0) {
             synchronized (this.sequenceLock) {
@@ -230,7 +238,7 @@
         Path newPath = computeFilename(filenum++);
         this.writer = SequenceFile.createWriter(this.fs, this.conf, newPath,
             HLogKey.class, HLogEdit.class, getCompressionType(this.conf));
-        LOG.info("new log writer created at " + newPath);
+        LOG.info("New log writer created at " + FSUtils.getPath(newPath));
 
         // Can we delete any of the old log files?
         if (this.outputfiles.size() > 0) {
@@ -285,7 +293,7 @@
   }
   
   private void deleteLogFile(final Path p, final Long seqno) throws 
IOException {
-    LOG.info("removing old log file " + p.toString() +
+    LOG.info("removing old log file " + FSUtils.getPath(p) +
       " whose highest sequence/edit id is " + seqno);
     this.fs.delete(p);
   }

Modified: 
hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HRegion.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HRegion.java?rev=650324&r1=650323&r2=650324&view=diff
==============================================================================
--- hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HRegion.java 
(original)
+++ hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HRegion.java Mon 
Apr 21 16:54:25 2008
@@ -49,6 +49,7 @@
 import org.apache.hadoop.io.WritableUtils;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.hbase.util.FSUtils;
 
 /**
  * HRegion stores data for a certain region of a table.  It stores all columns
@@ -60,7 +61,6 @@
  * they make up all the data for the rows.  
  *
  * <p>Each HRegion has a 'startKey' and 'endKey'.
- *   
  * <p>The first is inclusive, the second is exclusive (except for
  * the final region)  The endKey of region 0 is the same as
  * startKey for region 1 (if it exists).  The startKey for the
@@ -194,8 +194,7 @@
       makeColumnFamilyDirs(fs, basedir, encodedRegionName, colFamily, 
tabledesc);
       
       // Because we compacted the source regions we should have no more than 
two
-      // HStoreFiles per family and there will be no reference stores
-
+      // HStoreFiles per family and there will be no reference store
       List<HStoreFile> srcFiles = es.getValue();
       if (srcFiles.size() == 2) {
         long seqA = srcFiles.get(0).loadInfo(fs);
@@ -203,8 +202,9 @@
         if (seqA == seqB) {
           // We can't have duplicate sequence numbers
           if (LOG.isDebugEnabled()) {
-            LOG.debug("Adjusting sequence number of storeFile " +
-                srcFiles.get(1));
+            LOG.debug("Adjusting sequence id of storeFile " + srcFiles.get(1) +
+              " down by one; sequence id A=" + seqA + ", sequence id B=" +
+              seqB);
           }
           srcFiles.get(1).writeInfo(fs, seqB - 1);
         }
@@ -401,7 +401,11 @@
     this.threadWakeFrequency = conf.getLong(THREAD_WAKE_FREQUENCY, 10 * 1000);
     this.regiondir = new Path(basedir, this.regionInfo.getEncodedName());
     Path oldLogFile = new Path(regiondir, HREGION_OLDLOGFILE_NAME);
-
+    
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Opening region " + this.regionInfo.getRegionName() + "/" +
+        this.regionInfo.getEncodedName());
+    }
     this.regionCompactionDir =
       new Path(getCompactionDir(basedir), this.regionInfo.getEncodedName());
 
@@ -430,7 +434,8 @@
       fs.delete(oldLogFile);
     }
     
-    this.minSequenceId = maxSeqId;
+    // Add one to the current maximum sequence id so new edits are beyond.
+    this.minSequenceId = maxSeqId + 1;
     if (LOG.isDebugEnabled()) {
       LOG.debug("Next sequence id for region " + regionInfo.getRegionName() +
         " is " + this.minSequenceId);
@@ -460,7 +465,8 @@
     // HRegion is ready to go!
     this.writestate.compacting = false;
     this.lastFlushTime = System.currentTimeMillis();
-    LOG.info("region " + this.regionInfo.getRegionName() + " available");
+    LOG.info("region " + this.regionInfo.getRegionName() + "/" +
+      this.regionInfo.getEncodedName() + " available");
   }
   
   /**
@@ -578,7 +584,7 @@
         
         // Don't flush the cache if we are aborting
         if (!abort) {
-          internalFlushcache(snapshotMemcaches());
+          internalFlushcache();
         }
 
         List<HStoreFile> result = new ArrayList<HStoreFile>();
@@ -780,7 +786,7 @@
       // Cleanup
       boolean deleted = fs.delete(splits);    // Get rid of splits directory
       if (LOG.isDebugEnabled()) {
-        LOG.debug("Cleaned up " + splits.toString() + " " + deleted);
+        LOG.debug("Cleaned up " + FSUtils.getPath(splits) + " " + deleted);
       }
       HRegion regions[] = new HRegion [] {regionA, regionB};
       return regions;
@@ -977,11 +983,7 @@
     try {
       lock.readLock().lock();                      // Prevent splits and closes
       try {
-        long startTime = -1;
-        synchronized (updateLock) {// Stop updates while we snapshot the 
memcaches
-          startTime = snapshotMemcaches();
-        }
-        return internalFlushcache(startTime);
+        return internalFlushcache();
       } finally {
         lock.readLock().unlock();
       }
@@ -993,32 +995,6 @@
     }
   }
 
-  /*
-   * It is assumed that updates are blocked for the duration of this method
-   */
-  private long snapshotMemcaches() {
-    if (this.memcacheSize.get() == 0) {
-      return -1;
-    }
-    long startTime = System.currentTimeMillis();
-    
-    if(LOG.isDebugEnabled()) {
-      LOG.debug("Started memcache flush for region " +
-        this.regionInfo.getRegionName() + ". Size " +
-        StringUtils.humanReadableInt(this.memcacheSize.get()));
-    }
-
-    // We reset the aggregate memcache size here so that subsequent updates
-    // will add to the unflushed size
-    
-    this.memcacheSize.set(0L);
-    
-    for (HStore hstore: stores.values()) {
-      hstore.snapshotMemcache();
-    }
-    return startTime;
-  }
-
   /**
    * Flushing the cache is a little tricky. We have a lot of updates in the
    * HMemcache, all of which have also been written to the log. We need to
@@ -1045,45 +1021,53 @@
    * 
    * <p> This method may block for some time.
    * 
-   * @param startTime the time the cache was snapshotted or -1 if a flush is
-   * not needed
-   * 
    * @return true if the cache was flushed
    * 
    * @throws IOException
    * @throws DroppedSnapshotException Thrown when replay of hlog is required
    * because a Snapshot was not properly persisted.
    */
-  private boolean internalFlushcache(long startTime) throws IOException {
-    if (startTime == -1) {
-      return false;
-    }
+  private boolean internalFlushcache() throws IOException {
+    final long startTime = System.currentTimeMillis();
+
+    // Record latest flush time
+    this.lastFlushTime = startTime;
+  
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Started memcache flush for region " +
+          this.regionInfo.getRegionName() + ". Current region memcache size " +
+          StringUtils.humanReadableInt(this.memcacheSize.get()));
+      }
 
-    // We pass the log to the HMemcache, so we can lock down both
-    // simultaneously.  We only have to do this for a moment: we need the
-    // HMemcache state at the time of a known log sequence number. Since
-    // multiple HRegions may write to a single HLog, the sequence numbers may
-    // zoom past unless we lock it.
-    //
-    // When execution returns from snapshotMemcacheForLog() with a non-NULL
-    // value, the HMemcache will have a snapshot object stored that must be
-    // explicitly cleaned up using a call to deleteSnapshot() or by calling
-    // abort.
-    //
+    // Stop updates while we snapshot the memcache of all stores. We only have
+    // to do this for a moment.  Its quick.  The subsequent sequence id that
+    // goes into the HLog after we've flushed all these snapshots also goes
+    // into the info file that sits beside the flushed files.
+    synchronized (updateLock) {
+      for (HStore s: stores.values()) {
+        s.memcache.snapshot();
+      }
+    }
     long sequenceId = log.startCacheFlush();
 
     // Any failure from here on out will be catastrophic requiring server
     // restart so hlog content can be replayed and put back into the memcache.
     // Otherwise, the snapshot content while backed up in the hlog, it will not
     // be part of the current running servers state.
-
     try {
       // A.  Flush memcache to all the HStores.
       // Keep running vector of all store files that includes both old and the
       // just-made new flush store file.
-      
       for (HStore hstore: stores.values()) {
-        hstore.flushCache(sequenceId);
+        long flushed = hstore.flushCache(sequenceId);
+        // Subtract amount flushed.
+        long size = this.memcacheSize.addAndGet(-flushed);
+        if (size < 0) {
+           if (LOG.isDebugEnabled()) {
+             LOG.warn("Memcache size went negative " + size + "; resetting");
+           }
+           this.memcacheSize.set(0);
+        }
       }
     } catch (IOException e) {
       // An exception here means that the snapshot was not persisted.
@@ -1111,7 +1095,7 @@
     if (LOG.isDebugEnabled()) {
       LOG.debug("Finished memcache flush for region " +
           this.regionInfo.getRegionName() + " in " +
-          (System.currentTimeMillis() - startTime) + "ms, sequenceid=" +
+          (System.currentTimeMillis() - startTime) + "ms, sequence id=" +
           sequenceId);
     }
     return true;
@@ -1605,8 +1589,7 @@
       for (Map.Entry<HStoreKey, byte[]> e: updatesByColumn.entrySet()) {
         HStoreKey key = e.getKey();
         byte[] val = e.getValue();
-        size = this.memcacheSize.addAndGet(key.getSize() +
-            (val == null ? 0 : val.length));
+        size = this.memcacheSize.addAndGet(getEntrySize(key, val));
         stores.get(HStoreKey.extractFamily(key.getColumn())).add(key, val);
       }
       if (this.flushListener != null && size > this.memcacheFlushSize) {
@@ -1615,6 +1598,19 @@
       }
     }
   }
+  
+  /*
+   * Calculate size of passed key/value pair.
+   * Used here when we update region to figure what to add to this.memcacheSize
+   * Also used in Store when flushing calculating size of flush.  Both need to
+   * use same method making size calculation.
+   * @param key
+   * @param value
+   * @return Size of the passed key + value
+   */
+  static long getEntrySize(final HStoreKey key, byte [] value) {
+    return key.getSize() + (value == null ? 0 : value.length);
+  }
 
   
//////////////////////////////////////////////////////////////////////////////
   // Support code
@@ -1926,7 +1922,6 @@
       }
     }
 
-    /** [EMAIL PROTECTED] */
     public Iterator<Entry<HStoreKey, SortedMap<Text, byte[]>>> iterator() {
       throw new UnsupportedOperationException("Unimplemented serverside. " +
         "next(HStoreKey, StortedMap(...) is more efficient");
@@ -1961,10 +1956,13 @@
   }
   
   /**
-   * Convenience method to open a HRegion.
+   * Convenience method to open a HRegion outside of an HRegionServer context.
    * @param info Info for region to be opened.
    * @param rootDir Root directory for HBase instance
-   * @param log HLog for region to use
+   * @param log HLog for region to use. This method will call
+   * HLog#setSequenceNumber(long) passing the result of the call to
+   * HRegion#getMinSequenceId() to ensure the log id is properly kept
+   * up.  HRegionStore does this every time it opens a new region.
    * @param conf
    * @return new HRegion
    * 
@@ -1975,9 +1973,16 @@
     if (LOG.isDebugEnabled()) {
       LOG.debug("Opening region: " + info);
     }
-    return new HRegion(
+    if (info == null) {
+      throw new NullPointerException("Passed region info is null");
+    }
+    HRegion r = new HRegion(
         HTableDescriptor.getTableDir(rootDir, info.getTableDesc().getName()),
         log, FileSystem.get(conf), conf, info, null, null);
+    if (log != null) {
+      log.setSequenceNumber(r.getMinSequenceId());
+    }
+    return r;
   }
   
   /**

Modified: hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HStore.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HStore.java?rev=650324&r1=650323&r2=650324&view=diff
==============================================================================
--- hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HStore.java 
(original)
+++ hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HStore.java Mon 
Apr 21 16:54:25 2008
@@ -55,6 +55,7 @@
 import org.apache.hadoop.io.WritableComparable;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.hbase.util.FSUtils;
 import org.onelab.filter.BloomFilter;
 import org.onelab.filter.CountingBloomFilter;
 import org.onelab.filter.Filter;
@@ -101,26 +102,26 @@
     }
 
     /**
-     * Creates a snapshot of the current Memcache
+     * Creates a snapshot of the current Memcache.
+     * Snapshot must be cleared by call to [EMAIL PROTECTED] 
#clearSnapshot(SortedMap)}
      */
-    SortedMap<HStoreKey, byte[]> snapshot() {
+    void snapshot() {
       this.mc_lock.writeLock().lock();
       try {
-        // If snapshot has entries, then flusher failed or didn't call cleanup.
+        // If snapshot currently has entries, then flusher failed or didn't 
call
+        // cleanup.  Log a warning.
         if (this.snapshot.size() > 0) {
-          LOG.debug("Returning existing snapshot. Either the snapshot was run 
" +
-            "by the region -- normal operation but to be fixed -- or there is 
" +
-            "another ongoing flush or did we fail last attempt?");
-          return this.snapshot;
-        }
-        // We used to synchronize on the memcache here but we're inside a
-        // write lock so removed it. Comment is left in case removal was a
-        // mistake. St.Ack
-        if (this.mc.size() != 0) {
-          this.snapshot = this.mc;
-          this.mc = createSynchronizedSortedMap();
+          LOG.debug("Snapshot called again without clearing previous. " +
+            "Doing nothing. Another ongoing flush or did we fail last 
attempt?");
+        } else {
+          // We used to synchronize on the memcache here but we're inside a
+          // write lock so removed it. Comment is left in case removal was a
+          // mistake. St.Ack
+          if (this.mc.size() != 0) {
+            this.snapshot = this.mc;
+            this.mc = createSynchronizedSortedMap();
+          }
         }
-        return this.snapshot;
       } finally {
         this.mc_lock.writeLock().unlock();
       }
@@ -128,6 +129,8 @@
 
    /**
     * Return the current snapshot.
+    * Called by flusher when it wants to clean up snapshot made by a previous
+     * call to [EMAIL PROTECTED] snapshot}
     * @return Return snapshot.
     * @see [EMAIL PROTECTED] #snapshot()}
     * @see [EMAIL PROTECTED] #clearSnapshot(SortedMap)}
@@ -787,10 +790,6 @@
       this.bloomFilter = loadOrCreateBloomFilter();
     }
 
-    if(LOG.isDebugEnabled()) {
-      LOG.debug("starting " + storeName);
-    }
-
     // Go through the 'mapdir' and 'infodir' together, make sure that all 
     // MapFiles are in a reliable state.  Every entry in 'mapdir' must have a 
     // corresponding one in 'loginfodir'. Without a corresponding log info
@@ -811,8 +810,8 @@
     
     this.maxSeqId = getMaxSequenceId(hstoreFiles);
     if (LOG.isDebugEnabled()) {
-      LOG.debug("maximum sequence id for hstore " + storeName + " is " +
-          this.maxSeqId);
+      LOG.debug("Loaded " + hstoreFiles.size() + " file(s) in hstore " +
+        this.storeName + ", max sequence id " + this.maxSeqId);
     }
     
     try {
@@ -836,9 +835,6 @@
     // taking updates as soon as possible (Once online, can take updates even
     // during a compaction).
 
-    // Move maxSeqId on by one. Why here?  And not in HRegion?
-    this.maxSeqId += 1;
-    
     // Finally, start up all the map readers! (There could be more than one
     // since we haven't compacted yet.)
     for(Map.Entry<Long, HStoreFile> e: this.storefiles.entrySet()) {
@@ -957,10 +953,6 @@
    */
   private List<HStoreFile> loadHStoreFiles(Path infodir, Path mapdir)
   throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("infodir: " + infodir.toString() + " mapdir: " +
-          mapdir.toString());
-    }
     // Look first at info files.  If a reference, these contain info we need
     // to create the HStoreFile.
     Path infofiles[] = fs.listPaths(new Path[] {infodir});
@@ -999,6 +991,10 @@
       
       // Found map and sympathetic info file.  Add this hstorefile to result.
       results.add(curfile);
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("loaded " + FSUtils.getPath(p) + ", isReference=" +
+          isReference);
+      }
       // Keep list of sympathetic data mapfiles for cleaning info dir in next
       // section.  Make sure path is fully qualified for compare.
       mapfiles.add(mapfile);
@@ -1125,7 +1121,6 @@
     lock.readLock().lock();
     try {
       this.memcache.add(key, value);
-      
     } finally {
       lock.readLock().unlock();
     }
@@ -1170,29 +1165,31 @@
   }
     
   /**
-   * Write out a brand-new set of items to the disk.
-   *
-   * We should only store key/vals that are appropriate for the data-columns 
-   * stored in this HStore.
-   *
-   * Also, we are not expecting any reads of this MapFile just yet.
-   *
-   * Return the entire list of HStoreFiles currently used by the HStore.
-   *
+   * Write out current snapshot.  Presumes [EMAIL PROTECTED] #snapshot()} has 
been called
+   * previously.
    * @param logCacheFlushId flush sequence number
+   * @return count of bytes flushed
    * @throws IOException
    */
-  void flushCache(final long logCacheFlushId) throws IOException {
-    SortedMap<HStoreKey, byte []> cache = this.memcache.snapshot();
-    internalFlushCache(cache, logCacheFlushId);
+  long flushCache(final long logCacheFlushId) throws IOException {
+    // Get the snapshot to flush.  Presumes that a call to
+    // this.memcache.snapshot() has happened earlier up in the chain.
+    SortedMap<HStoreKey, byte []> cache = this.memcache.getSnapshot();
+    long flushed = internalFlushCache(cache, logCacheFlushId);
     // If an exception happens flushing, we let it out without clearing
     // the memcache snapshot.  The old snapshot will be returned when we say
     // 'snapshot', the next time flush comes around.
     this.memcache.clearSnapshot(cache);
+    return flushed;
   }
   
-  private void internalFlushCache(SortedMap<HStoreKey, byte []> cache,
+  private long internalFlushCache(SortedMap<HStoreKey, byte []> cache,
       long logCacheFlushId) throws IOException {
+    long flushed = 0;
+    // Don't flush if there are no entries.
+    if (cache.size() == 0) {
+      return flushed;
+    }
     
     // TODO:  We can fail in the below block before we complete adding this
     // flush to list of store files.  Add cleanup of anything put on filesystem
@@ -1223,6 +1220,7 @@
           if (f.equals(this.family.getFamilyName())) {
             entries++;
             out.append(curkey, new ImmutableBytesWritable(es.getValue()));
+            flushed += HRegion.getEntrySize(curkey, es.getValue());
           }
         }
       } finally {
@@ -1247,15 +1245,15 @@
             flushedFile.getReader(this.fs, this.bloomFilter));
         this.storefiles.put(flushid, flushedFile);
         if(LOG.isDebugEnabled()) {
-          LOG.debug("Added " + flushedFile.toString() + " with " + entries +
-            " entries, sequence id " + logCacheFlushId + ", and size " +
-            StringUtils.humanReadableInt(flushedFile.length()) + " for " +
-            this.storeName);
+          LOG.debug("Added " + FSUtils.getPath(flushedFile.getMapFilePath()) +
+            " with " + entries +
+            " entries, sequence id " + logCacheFlushId + ", data size " +
+            StringUtils.humanReadableInt(flushed));
         }
       } finally {
         this.lock.writeLock().unlock();
       }
-      return;
+      return flushed;
     }
   }
 
@@ -1306,16 +1304,18 @@
    */
   boolean compact() throws IOException {
     synchronized (compactLock) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("started compaction of " + storefiles.size() +
-          " files using " + compactionDir.toString() + " for " +
-          this.storeName);
-      }
-
       // Storefiles are keyed by sequence id. The oldest file comes first.
       // We need to return out of here a List that has the newest file first.
       List<HStoreFile> filesToCompact =
         new ArrayList<HStoreFile>(this.storefiles.values());
+      if (filesToCompact.size() == 0) {
+        return true;
+      }
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("started compaction of " + storefiles.size() +
+          " files " + filesToCompact.toString() + " into " +
+              FSUtils.getPath(compactionDir));
+      }
       Collections.reverse(filesToCompact);
       if (filesToCompact.size() < 1 ||
         (filesToCompact.size() == 1 && !filesToCompact.get(0).isReference())) {
@@ -1379,10 +1379,7 @@
         // Add info about which file threw exception. It may not be in the
         // exception message so output a message here where we know the
         // culprit.
-        LOG.warn("Failed with " + e.toString() + ": HStoreFile=" +
-          hsf.toString() + (hsf.isReference()? ", Reference=" +
-          hsf.getReference().toString() : "") + " for Store=" +
-          this.storeName);
+        LOG.warn("Failed with " + e.toString() + ": " + hsf.toString());
         closeCompactionReaders(rdrs);
         throw e;
       }
@@ -1638,14 +1635,13 @@
         HStoreFile finalCompactedFile = new HStoreFile(conf, fs, basedir,
             info.getEncodedName(), family.getFamilyName(), -1, null);
         if(LOG.isDebugEnabled()) {
-          LOG.debug("moving " + compactedFile.toString() + " in " +
-              this.compactionDir.toString() + " to " +
-              finalCompactedFile.toString() + " in " + basedir.toString() +
-              " for " + this.storeName);
+          LOG.debug("moving " +
+            FSUtils.getPath(compactedFile.getMapFilePath()) +
+            " to " + FSUtils.getPath(finalCompactedFile.getMapFilePath()));
         }
         if (!compactedFile.rename(this.fs, finalCompactedFile)) {
           LOG.error("Failed move of compacted file " +
-              finalCompactedFile.toString() + " for " + this.storeName);
+            finalCompactedFile.getMapFilePath().toString());
           return;
         }
 
@@ -2697,7 +2693,7 @@
 
               // NOTE: We used to do results.putAll(resultSets[i]);
               // but this had the effect of overwriting newer
-              // values with older ones. So now we only insert
+              // values with older onms. So now we only insert
               // a result if the map does not contain the key.
               HStoreKey hsk = new HStoreKey(key.getRow(), EMPTY_TEXT,
                 key.getTimestamp());

Modified: 
hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HStoreFile.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HStoreFile.java?rev=650324&r1=650323&r2=650324&view=diff
==============================================================================
--- hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HStoreFile.java 
(original)
+++ hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HStoreFile.java 
Mon Apr 21 16:54:25 2008
@@ -421,7 +421,7 @@
   @Override
   public String toString() {
     return encodedRegionName + "/" + colFamily + "/" + fileId +
-      (isReference()? "/" + reference.toString(): "");
+      (isReference()? "-" + reference.toString(): "");
   }
   
   /**

Modified: 
hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/util/FSUtils.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/util/FSUtils.java?rev=650324&r1=650323&r2=650324&view=diff
==============================================================================
--- 
hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/util/FSUtils.java 
(original)
+++ 
hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/util/FSUtils.java 
Mon Apr 21 16:54:25 2008
@@ -143,4 +143,18 @@
       throw io;
     }
   }
+
+  /**
+   * Return the 'path' component of a Path.  In Hadoop, Path is an URI.  This
+   * method returns the 'path' component of a Path's URI: e.g. If a Path is
+   * <code>hdfs://example.org:9000/hbase_trunk/TestTable/compaction.dir</code>,
+   * this method returns <code>/hbase_trunk/TestTable/compaction.dir</code>.
+   * This method is useful if you want to print out a Path without qualifying
+   * Filesystem instance.
+   * @param p Filesystem Path whose 'path' component we are to return.
+   * @return Path portion of the Filesystem 
+   */
+  public static String getPath(Path p) {
+    return p.toUri().getPath();
+  }
 }

Modified: 
hadoop/hbase/branches/0.1/src/test/org/apache/hadoop/hbase/TestCompaction.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/src/test/org/apache/hadoop/hbase/TestCompaction.java?rev=650324&r1=650323&r2=650324&view=diff
==============================================================================
--- 
hadoop/hbase/branches/0.1/src/test/org/apache/hadoop/hbase/TestCompaction.java 
(original)
+++ 
hadoop/hbase/branches/0.1/src/test/org/apache/hadoop/hbase/TestCompaction.java 
Mon Apr 21 16:54:25 2008
@@ -162,6 +162,7 @@
     // compacted store and the flush above when we added deletes.  Add more
     // content to be certain.
     createSmallerStoreFile(this.r);
+    LOG.debug("Checking if compaction needed");
     assertTrue(this.r.compactIfNeeded());
     // Assert that the first row is still deleted.
     bytes = this.r.get(STARTROW, COLUMN_FAMILY_TEXT, 100 /*Too many*/);

Modified: 
hadoop/hbase/branches/0.1/src/test/org/apache/hadoop/hbase/TestHLog.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/src/test/org/apache/hadoop/hbase/TestHLog.java?rev=650324&r1=650323&r2=650324&view=diff
==============================================================================
--- hadoop/hbase/branches/0.1/src/test/org/apache/hadoop/hbase/TestHLog.java 
(original)
+++ hadoop/hbase/branches/0.1/src/test/org/apache/hadoop/hbase/TestHLog.java 
Mon Apr 21 16:54:25 2008
@@ -113,7 +113,7 @@
       long logSeqId = log.startCacheFlush();
       log.completeCacheFlush(regionName, tableName, logSeqId);
       log.close();
-      Path filename = log.computeFilename(log.filenum - 1);
+      Path filename = log.computeFilename(log.getFilenum() - 1);
       log = null;
       // Now open a reader on the log and assert append worked.
       reader = new SequenceFile.Reader(fs, filename, conf);

Modified: 
hadoop/hbase/branches/0.1/src/test/org/apache/hadoop/hbase/TestHMemcache.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/src/test/org/apache/hadoop/hbase/TestHMemcache.java?rev=650324&r1=650323&r2=650324&view=diff
==============================================================================
--- 
hadoop/hbase/branches/0.1/src/test/org/apache/hadoop/hbase/TestHMemcache.java 
(original)
+++ 
hadoop/hbase/branches/0.1/src/test/org/apache/hadoop/hbase/TestHMemcache.java 
Mon Apr 21 16:54:25 2008
@@ -77,7 +77,8 @@
   throws UnexpectedException {
     // Save off old state.
     int oldHistorySize = hmc.getSnapshot().size();
-    SortedMap<HStoreKey, byte[]> ss = hmc.snapshot();
+    hmc.snapshot();
+    SortedMap<HStoreKey, byte[]> ss = hmc.getSnapshot();
     // Make some assertions about what just happened.
     assertTrue("History size has not increased", oldHistorySize < ss.size());
     hmc.clearSnapshot(ss);


Reply via email to