svn commit: r618518 - in /hadoop/hbase/trunk: ./ src/java/org/apache/hadoop/hbase/hql/ src/java/org/apache/hadoop/hbase/hql/generated/ src/java/org/apache/hadoop/hbase/thrift/ src/java/org/apache/hado

2008-02-04 Thread jimk
Author: jimk
Date: Mon Feb  4 18:36:26 2008
New Revision: 618518

URL: http://svn.apache.org/viewvc?rev=618518&view=rev
Log:
2008/02/04 HBase is now a subproject of Hadoop. The first HBase release as a 
subproject will be release 0.1.0 which will be equivalent to the version of 
HBase included in Hadoop 0.16.0. In order to accomplish this, the HBase portion 
of HBASE-288 (formerly HADOOP-1398) has been backed out. Once 0.1.0 is frozen 
(depending mostly on changes to infrastructure due to becoming a sub project 
instead of a contrib project), this patch will re-appear on HBase trunk.

Removed:

hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/io/TestBlockFSInputStream.java
Modified:
hadoop/hbase/trunk/CHANGES.txt
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/hql/HQLParser.jj

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/hql/generated/HQLParser.java

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/hql/generated/HQLParserConstants.java

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/hql/generated/HQLParserTokenManager.java
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/thrift/Hbase.thrift

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/thrift/generated/ColumnDescriptor.java
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestToString.java

Modified: hadoop/hbase/trunk/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/CHANGES.txt?rev=618518&r1=618517&r2=618518&view=diff
==
--- hadoop/hbase/trunk/CHANGES.txt (original)
+++ hadoop/hbase/trunk/CHANGES.txt Mon Feb  4 18:36:26 2008
@@ -28,6 +28,15 @@
 
 Release 0.16.0
 
+  2008/02/04   HBase is now a subproject of Hadoop. The first HBase release as
+   a subproject will be release 0.1.0 which will be equivalent to
+   the version of HBase included in Hadoop 0.16.0. In order to
+   accomplish this, the HBase portion of HBASE-288 (formerly 
+   HADOOP-1398) has been backed out. Once 0.1.0 is frozen 
(depending
+   mostly on changes to infrastructure due to becoming a sub 
project
+   instead of a contrib project), this patch will re-appear on 
HBase
+   trunk.
+
   INCOMPATIBLE CHANGES
HADOOP-2056 A table with row keys containing colon fails to split regions
HADOOP-2079 Fix generated HLog, HRegion names

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/hql/HQLParser.jj
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/hql/HQLParser.jj?rev=618518&r1=618517&r2=618518&view=diff
==
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/hql/HQLParser.jj 
(original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/hql/HQLParser.jj Mon 
Feb  4 18:36:26 2008
@@ -118,7 +118,6 @@
| 
| 
| 
-   | 
| 
| 
| 
@@ -353,11 +352,6 @@
  
 { 
   columnSpec.put("IN_MEMORY", true); 
-} 
-   |  
- 
-{ 
-  columnSpec.put("BLOCK_CACHE_ENABLED", true); 
 } 
|  
 

Modified: 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/hql/generated/HQLParser.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/hql/generated/HQLParser.java?rev=618518&r1=618517&r2=618518&view=diff
==
--- 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/hql/generated/HQLParser.java
 (original)
+++ 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/hql/generated/HQLParser.java
 Mon Feb  4 18:36:26 2008
@@ -75,7 +75,7 @@
 case SELECT:
 case ENABLE:
 case DISABLE:
-case 69:
+case 68:
   switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
   case HELP:
   case ALTER:
@@ -100,7 +100,7 @@
 jj_la1[0] = jj_gen;
 ;
   }
-  jj_consume_token(69);
+  jj_consume_token(68);
   break;
 case 0:
   jj_consume_token(0);
@@ -390,7 +390,6 @@
   case MAX_LENGTH:
   case COMPRESSION:
   case IN_MEMORY:
-  case BLOCK_CACHE_ENABLED:
   case BLOOMFILTER:
   case VECTOR_SIZE:
   case NUM_HASH:
@@ -441,10 +440,6 @@
 jj_consume_token(IN_MEMORY);
   columnSpec.put("IN_MEMORY", true);
 break;
-  case BLOCK_CACHE_ENABLED:
-jj_consume_token(BLOCK_CACHE_ENABLED);
-  columnSpec.put("BLOCK_CACHE_ENABLED", true);
-break;
   case BLOOMFILTER:
 jj_consume_token(BLOOMFILTER);
 jj_consume_token(EQUALS);
@@ -1085,33 +1080,33 @@
 finally { jj_save(0, xla); }
   }
 
-  final private boolean jj_3_1() {
-if (jj_scan_token(ADD)) return true;
-if (jj_3R_10()) return true;
+  final private boolean jj_3R_11() {
+if (jj_scan_token(ID)) return true;
 ret

svn commit: r619288 - in /hadoop/hbase/trunk: CHANGES.txt src/java/org/apache/hadoop/hbase/HMaster.java src/java/org/apache/hadoop/hbase/HRegionServer.java src/java/org/apache/hadoop/hbase/Leases.java

2008-02-06 Thread jimk
Author: jimk
Date: Wed Feb  6 22:35:09 2008
New Revision: 619288

URL: http://svn.apache.org/viewvc?rev=619288&view=rev
Log:
HBASE-415   Rewrite leases to use DelayedBlockingQueue instead of polling

Modified:
hadoop/hbase/trunk/CHANGES.txt
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HMaster.java
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HRegionServer.java
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/Leases.java

Modified: hadoop/hbase/trunk/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/CHANGES.txt?rev=619288&r1=619287&r2=619288&view=diff
==
--- hadoop/hbase/trunk/CHANGES.txt (original)
+++ hadoop/hbase/trunk/CHANGES.txt Wed Feb  6 22:35:09 2008
@@ -27,6 +27,7 @@
HADOOP-2555 Refactor the HTable#get and HTable#getRow methods to avoid
repetition of retry-on-failure logic (thanks to Peter Dolan and
Bryan Duxbury)
+   HBASE-415   Rewrite leases to use DelayedBlockingQueue instead of polling
 
 Release 0.16.0
 

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HMaster.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HMaster.java?rev=619288&r1=619287&r2=619288&view=diff
==
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HMaster.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HMaster.java Wed Feb  6 
22:35:09 2008
@@ -46,6 +46,7 @@
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
@@ -372,10 +373,10 @@
 Path p = HStoreFile.getMapDir(tabledir, split.getEncodedName(),
 family.getFamilyName());
 
-// Look for reference files.  Call listPaths with an anonymous
+// Look for reference files.  Call listStatus with an anonymous
 // instance of PathFilter.
 
-Path [] ps = fs.listPaths(p,
+FileStatus [] ps = fs.listStatus(p,
 new PathFilter () {
   public boolean accept(Path path) {
 return HStore.isReference(path);
@@ -1306,8 +1307,7 @@
 loadToServers.put(load, servers);
 
 if (!closed.get()) {
-  long serverLabel = getServerLabel(s);
-  serverLeases.createLease(serverLabel, serverLabel, new ServerExpirer(s));
+  serverLeases.createLease(s, new ServerExpirer(s));
 }
 
 return createConfigurationSubset();
@@ -1327,15 +1327,10 @@
 return mw;
   }
 
-  private long getServerLabel(final String s) {
-return s.hashCode();
-  }
-
   /** [EMAIL PROTECTED] */
   public HMsg[] regionServerReport(HServerInfo serverInfo, HMsg msgs[])
   throws IOException {
 String serverName = serverInfo.getServerAddress().toString().trim();
-long serverLabel = getServerLabel(serverName);
 if (msgs.length > 0) {
   if (msgs[0].getMsg() == HMsg.MSG_REPORT_EXITING) {
 synchronized (serversToServerInfo) {
@@ -1348,7 +1343,7 @@
   ": MSG_REPORT_EXITING -- cancelling lease");
 }
 
-if (cancelLease(serverName, serverLabel)) {
+if (cancelLease(serverName)) {
   // Only process the exit message if the server still has a lease.
   // Otherwise we could end up processing the server exit twice.
   LOG.info("Region server " + serverName +
@@ -1428,7 +1423,7 @@
   }
 
   synchronized (serversToServerInfo) {
-cancelLease(serverName, serverLabel);
+cancelLease(serverName);
 serversToServerInfo.notifyAll();
   }
   return new HMsg[]{new HMsg(HMsg.MSG_REGIONSERVER_STOP)};
@@ -1439,7 +1434,7 @@
   // This will always succeed; otherwise, the fetch of serversToServerInfo
   // would have failed above.
 
-  serverLeases.renewLease(serverLabel, serverLabel);
+  serverLeases.renewLease(serverName);
 
   // Refresh the info object and the load information
 
@@ -1476,7 +1471,7 @@
   }
 
   /** Cancel a server's lease and update its load information */
-  private boolean cancelLease(final String serverName, final long serverLabel) 
{
+  private boolean cancelLease(final String serverName) {
 boolean leaseCancelled = false;
 HServerInfo info = serversToServerInfo.remove(serverName);
 if (info != null) {
@@ -1487,7 +1482,7 @@
 unassignRootRegion();
   }
   LOG.info("Cancelling lease for " + serverName);
-  serverLeases.cancelLease(serverLabel, serverLabel);
+  serverLeases.cancelLease(serverName);
   leaseCancelled = true;
 
   // update load information
@@ -3120,20 +3115,20 @@
   /*
* Data structure used to return results out of the

svn commit: r619618 - in /hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase: HRegion.java HRegionInterface.java

2008-02-07 Thread jimk
Author: jimk
Date: Thu Feb  7 12:09:29 2008
New Revision: 619618

URL: http://svn.apache.org/viewvc?rev=619618&view=rev
Log:
HBASE-35 Fix javadoc warnings introduced by patch

Modified:
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HRegion.java
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HRegionInterface.java

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HRegion.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HRegion.java?rev=619618&r1=619617&r2=619618&view=diff
==
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HRegion.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HRegion.java Thu Feb  7 
12:09:29 2008
@@ -1205,7 +1205,6 @@
   
//
   
   /**
-   * @param timestamp
* @param b
* @throws IOException
*/

Modified: 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HRegionInterface.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HRegionInterface.java?rev=619618&r1=619617&r2=619618&view=diff
==
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HRegionInterface.java 
(original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HRegionInterface.java 
Thu Feb  7 12:09:29 2008
@@ -140,7 +140,6 @@
* Applies a batch of updates via one RPC
* 
* @param regionName name of the region to update
-   * @param timestamp the time to be associated with the changes
* @param b BatchUpdate
* @throws IOException
*/




svn commit: r619635 - in /hadoop/hbase/trunk: CHANGES.txt build.xml

2008-02-07 Thread jimk
Author: jimk
Date: Thu Feb  7 12:57:23 2008
New Revision: 619635

URL: http://svn.apache.org/viewvc?rev=619635&view=rev
Log:
HBASE-410   Speed up the test suite (make test timeout 5 minutes instead of 15.

Modified:
hadoop/hbase/trunk/CHANGES.txt
hadoop/hbase/trunk/build.xml

Modified: hadoop/hbase/trunk/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/CHANGES.txt?rev=619635&r1=619634&r2=619635&view=diff
==
--- hadoop/hbase/trunk/CHANGES.txt (original)
+++ hadoop/hbase/trunk/CHANGES.txt Thu Feb  7 12:57:23 2008
@@ -18,6 +18,8 @@
HBASE-409   Add build path to svn:ignore list (Edward Yoon via Stack)
HBASE-408   Add .classpath and .project to svn:ignore list
(Edward Yoon via Stack)
+   HBASE-410   Speed up the test suite (make test timeout 5 minutes instead of
+   15.
 
 Branch 0.1
 

Modified: hadoop/hbase/trunk/build.xml
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/build.xml?rev=619635&r1=619634&r2=619635&view=diff
==
--- hadoop/hbase/trunk/build.xml (original)
+++ hadoop/hbase/trunk/build.xml Thu Feb  7 12:57:23 2008
@@ -38,7 +38,7 @@
   
 
   
-  
+  
 
   
   




svn commit: r619768 - in /hadoop/hbase/trunk: CHANGES.txt build.xml src/test/hbase-site.xml src/test/org/apache/hadoop/hbase/TestRegionServerExit.java

2008-02-07 Thread jimk
Author: jimk
Date: Thu Feb  7 20:58:00 2008
New Revision: 619768

URL: http://svn.apache.org/viewvc?rev=619768&view=rev
Log:
HBASE-421   TestRegionServerExit broken

Modified:
hadoop/hbase/trunk/CHANGES.txt
hadoop/hbase/trunk/build.xml
hadoop/hbase/trunk/src/test/hbase-site.xml

hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestRegionServerExit.java

Modified: hadoop/hbase/trunk/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/CHANGES.txt?rev=619768&r1=619767&r2=619768&view=diff
==
--- hadoop/hbase/trunk/CHANGES.txt (original)
+++ hadoop/hbase/trunk/CHANGES.txt Thu Feb  7 20:58:00 2008
@@ -15,6 +15,7 @@
(Dave Simpson via Bryan Duxbury via Stack)
HBASE-2 hlog numbers should wrap around when they reach 999
(Bryan Duxbury via Stack)
+   HBASE-421   TestRegionServerExit broken
 
   IMPROVEMENTS
HBASE-415   Rewrite leases to use DelayedBlockingQueue instead of polling

Modified: hadoop/hbase/trunk/build.xml
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/build.xml?rev=619768&r1=619767&r2=619768&view=diff
==
--- hadoop/hbase/trunk/build.xml (original)
+++ hadoop/hbase/trunk/build.xml Thu Feb  7 20:58:00 2008
@@ -38,7 +38,7 @@
   
 
   
-  
+  
 
   
   
@@ -277,9 +277,13 @@
   
   
   
-
 
 
+
+
 
 
 
@@ -309,7 +313,7 @@
 
 
   

Modified: hadoop/hbase/trunk/src/test/hbase-site.xml
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/hbase-site.xml?rev=619768&r1=619767&r2=619768&view=diff
==
--- hadoop/hbase/trunk/src/test/hbase-site.xml (original)
+++ hadoop/hbase/trunk/src/test/hbase-site.xml Thu Feb  7 20:58:00 2008
@@ -89,20 +89,6 @@
 
   
   
-hbase.master.info.port
--1
-The port for the hbase master web UI
-Set to -1 if you do not want the info server to run.
-
-  
-  
-hbase.regionserver.info.port
--1
-The port for the hbase regionserver web UI
-Set to -1 if you do not want the info server to run.
-
-  
-  
 hbase.master.lease.thread.wakefrequency
 3000
 The interval between checks for expired region server leases.

Modified: 
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestRegionServerExit.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestRegionServerExit.java?rev=619768&r1=619767&r2=619768&view=diff
==
--- 
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestRegionServerExit.java 
(original)
+++ 
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestRegionServerExit.java 
Thu Feb  7 20:58:00 2008
@@ -28,6 +28,8 @@
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.io.Text;
 
+import org.apache.hadoop.hbase.io.BatchUpdate;
+
 /**
  * Tests region server failover when a region server exits both cleanly and
  * when it aborts.
@@ -95,10 +97,10 @@
 // put some values in the table
 this.table = new HTable(conf, new Text(tableName));
 final Text row = new Text("row1");
-long lockid = table.startUpdate(row);
-table.put(lockid, HConstants.COLUMN_FAMILY,
+BatchUpdate b = new BatchUpdate(row);
+b.put(HConstants.COLUMN_FAMILY,
 tableName.getBytes(HConstants.UTF8_ENCODING));
-table.commit(lockid);
+table.commit(b);
 return row;
   }
 
@@ -179,8 +181,9 @@
 HConstants.UTF8_ENCODING)));
   }
   LOG.info("Success!");
-} catch (IOException e) {
+} catch (Exception e) {
   e.printStackTrace();
+  fail();
 } finally {
   if (scanner != null) {
 LOG.info("Closing scanner " + scanner);




svn commit: r619780 - in /hadoop/hbase/branches/0.1: ./ CHANGES.txt build.xml src/test/hbase-site.xml

2008-02-07 Thread jimk
Author: jimk
Date: Thu Feb  7 22:36:26 2008
New Revision: 619780

URL: http://svn.apache.org/viewvc?rev=619780&view=rev
Log:
HBase-421   TestRegionServerExit broken - back ported to HBase 0.1

Modified:
hadoop/hbase/branches/0.1/   (props changed)
hadoop/hbase/branches/0.1/CHANGES.txt
hadoop/hbase/branches/0.1/build.xml
hadoop/hbase/branches/0.1/src/test/hbase-site.xml

Propchange: hadoop/hbase/branches/0.1/
--
--- svn:ignore (original)
+++ svn:ignore Thu Feb  7 22:36:26 2008
@@ -1,2 +1,3 @@
-.project
-.classpath
+.project
+.classpath
+build

Modified: hadoop/hbase/branches/0.1/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/CHANGES.txt?rev=619780&r1=619779&r2=619780&view=diff
==
--- hadoop/hbase/branches/0.1/CHANGES.txt (original)
+++ hadoop/hbase/branches/0.1/CHANGES.txt Thu Feb  7 22:36:26 2008
@@ -23,6 +23,7 @@
HADOOP-2773 Master marks region offline when it is recovering from a region
server death
HBASE-425   Fix doc. so it accomodates new hbase untethered context
+   HBase-421   TestRegionServerExit broken
 
   IMPROVEMENTS
HADOOP-2555 Refactor the HTable#get and HTable#getRow methods to avoid

Modified: hadoop/hbase/branches/0.1/build.xml
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/build.xml?rev=619780&r1=619779&r2=619780&view=diff
==
--- hadoop/hbase/branches/0.1/build.xml (original)
+++ hadoop/hbase/branches/0.1/build.xml Thu Feb  7 22:36:26 2008
@@ -1,348 +1,352 @@
-
-
-
-
-
-  
-  
-  
-  
- 
-  
-  
-  
-  
-
-  
-  
-  
-  
-
-  
-  
-  
-
-  
-  
-
-  
-  
-  
-  
-  
-  
-  
-  
-  
-  
-  
-  
-
-  
-  
-  
-
-  
-
-  
-  
-
-  http://java.sun.com/j2se/1.5/docs/api/"/>
-  
-
-
-  
-  
-  
-
-
-  
-
-
-
-  
-
-  
-
-
-
-
-
-
-
-
-  
-
-
-
-  
-
-
-
-
-  
-
-
-
-  
-
-
-
-  
-
-
-  
-
-
-   
-
-  
-
-  
-  
-  
- 
- 
-  
-
-  
-   
-   
- 
-   
-  
-   
-  
-  
-
-  
-  
-
-  
-
-  
-  
-  
-   
-
-
-  
-
-
-  
-
-
-
-  
-
-
-
-  
-
-
-
-  
-
-
-
-  
-
-
-  
-
-
-
-  
-
-  
-
-
-
-  
-
-  
-
-  
-  
-  
-  
-
-
-
-  
-  
-  
-
-  
-  
-
-  
-
-  
-  
-
-
-  
-
-  
-
-  
-
-  
-
-  
-
-  
-  
-  
-  
-
-
-  
-
-  
-
-  
-
-  
-  
-  
-  
-
-
-   
-
-
-  
-  
-
-   
-
-  
-
-  
-  
-  
-  
-
-
-
-
-
-
-  
-
-   
- 
- 
-
-
-  
-  
-  
-  
-
-  
-
-  
-
-  
-
-
-
-  
-  
-  
-  
-  
-  
-  
-  
-  
-  
-   
-  
-  
-  
-
-  
-  
-
-  
-
-Tests failed!
-  
-
-  
-  
-  
-  
-
-  
-
+
+
+
+
+
+  
+  
+  
+  
+ 
+  
+  
+  
+  
+
+  
+  
+  
+  
+
+  
+  
+  
+
+  
+  
+
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+
+  
+  
+  
+
+  
+
+  
+  
+
+  h

svn commit: r620232 - in /hadoop/hbase/trunk: CHANGES.txt build.xml

2008-02-09 Thread jimk
Author: jimk
Date: Sat Feb  9 17:52:53 2008
New Revision: 620232

URL: http://svn.apache.org/viewvc?rev=620232&view=rev
Log:
HBASE-410   Speed up the test suite - Apparently test timeout was too 
aggressive for Hudson. TestLogRolling timed out even though it was operating 
properly. Change test timeout to 10 minutes.

Modified:
hadoop/hbase/trunk/CHANGES.txt
hadoop/hbase/trunk/build.xml

Modified: hadoop/hbase/trunk/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/CHANGES.txt?rev=620232&r1=620231&r2=620232&view=diff
==
--- hadoop/hbase/trunk/CHANGES.txt (original)
+++ hadoop/hbase/trunk/CHANGES.txt Sat Feb  9 17:52:53 2008
@@ -37,6 +37,9 @@
(Bryan Duxbury via Stack)
HBASE-418   Move HMaster and related classes into master package
(Bryan Duxbury via Stack)
+   HBASE-410   Speed up the test suite - Apparently test timeout was too
+   aggressive for Hudson. TestLogRolling timed out even though it
+   was operating properly. Change test timeout to 10 minutes.
 
 
 Branch 0.1

Modified: hadoop/hbase/trunk/build.xml
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/build.xml?rev=620232&r1=620231&r2=620232&view=diff
==
--- hadoop/hbase/trunk/build.xml (original)
+++ hadoop/hbase/trunk/build.xml Sat Feb  9 17:52:53 2008
@@ -38,7 +38,7 @@
   
 
   
-  
+  
 
   
   




svn commit: r627197 - /hadoop/hbase/trunk/CHANGES.txt

2008-02-12 Thread jimk
Author: jimk
Date: Tue Feb 12 16:18:29 2008
New Revision: 627197

URL: http://svn.apache.org/viewvc?rev=627197&view=rev
Log:
HBASE-434, HBASE-435 TestTableIndex and TestTableMapReduce failed in Hudson 
builds

Modified:
hadoop/hbase/trunk/CHANGES.txt

Modified: hadoop/hbase/trunk/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/CHANGES.txt?rev=627197&r1=627196&r2=627197&view=diff
==
--- hadoop/hbase/trunk/CHANGES.txt (original)
+++ hadoop/hbase/trunk/CHANGES.txt Tue Feb 12 16:18:29 2008
@@ -18,7 +18,7 @@
HBASE-421   TestRegionServerExit broken
HBASE-426   hbase can't find remote filesystem
HBASE-437   Clear Command should use system.out (Edward Yoon via Stack)
-   HBASE-43[45] TestTableIndex and TestTableMapReduce failed in Hudson builds
+   HBASE-434, HBASE-435 TestTableIndex and TestTableMapReduce failed in Hudson 
builds
 
   IMPROVEMENTS
HBASE-415   Rewrite leases to use DelayedBlockingQueue instead of polling




svn commit: r627152 - in /hadoop/hbase/trunk: CHANGES.txt conf/hbase-default.xml src/java/org/apache/hadoop/hbase/HRegionServer.java

2008-02-12 Thread jimk
Author: jimk
Date: Tue Feb 12 14:19:24 2008
New Revision: 627152

URL: http://svn.apache.org/viewvc?rev=627152&view=rev
Log:
HBASE-440   Add optional log roll interval so that log files are garbage 
collected

Modified:
hadoop/hbase/trunk/CHANGES.txt
hadoop/hbase/trunk/conf/hbase-default.xml
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HRegionServer.java

Modified: hadoop/hbase/trunk/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/CHANGES.txt?rev=627152&r1=627151&r2=627152&view=diff
==
--- hadoop/hbase/trunk/CHANGES.txt (original)
+++ hadoop/hbase/trunk/CHANGES.txt Tue Feb 12 14:19:24 2008
@@ -44,6 +44,8 @@
HBASE-436   website: http://hadoop.apache.org/hbase
HBASE-417   Factor TableOperation and subclasses into separate files from
HMaster (Bryan Duxbury via Stack)
+   HBASE-440   Add optional log roll interval so that log files are garbage
+   collected
 
 
 Branch 0.1

Modified: hadoop/hbase/trunk/conf/hbase-default.xml
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/conf/hbase-default.xml?rev=627152&r1=627151&r2=627152&view=diff
==
--- hadoop/hbase/trunk/conf/hbase-default.xml (original)
+++ hadoop/hbase/trunk/conf/hbase-default.xml Tue Feb 12 14:19:24 2008
@@ -156,6 +156,16 @@
 
   
   
+hbase.regionserver.optionallogrollinterval
+180
+
+Amount of time to wait since the last time a the region server's log was
+rolled before invoking an optional log roll (An optional log roll is a
+one in which the log does not contain hbase.regionserver.maxlogentries).
+Default: 30 minutes (in miliseconds)
+
+  
+  
 hbase.hregion.memcache.flush.size
 67108864
 

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HRegionServer.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HRegionServer.java?rev=627152&r1=627151&r2=627152&view=diff
==
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HRegionServer.java 
(original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HRegionServer.java Tue 
Feb 12 14:19:24 2008
@@ -508,12 +508,17 @@
   /** Runs periodically to determine if the HLog should be rolled */
   class LogRoller extends Thread implements LogRollListener {
 private final Integer rollLock = new Integer(0);
+private final long optionalLogRollInterval;
+private long lastLogRollTime;
 private volatile boolean rollLog;
 
 /** constructor */
 public LogRoller() {
   super();
+  this.optionalLogRollInterval = conf.getLong(
+  "hbase.regionserver.optionallogrollinterval", 30L * 60L * 1000L);
   this.rollLog = false;
+  lastLogRollTime = System.currentTimeMillis();
 }
  
 /** [EMAIL PROTECTED] */
@@ -521,12 +526,18 @@
 public void run() {
   while (!stopRequested.get()) {
 while (!rollLog && !stopRequested.get()) {
-  synchronized (rollLock) {
-try {
-  rollLock.wait(threadWakeFrequency);
+  long now = System.currentTimeMillis();
+  if (this.lastLogRollTime + this.optionalLogRollInterval <= now) {
+rollLog = true;
+this.lastLogRollTime = now;
+  } else {
+synchronized (rollLock) {
+  try {
+rollLock.wait(threadWakeFrequency);
 
-} catch (InterruptedException e) {
-  continue;
+  } catch (InterruptedException e) {
+continue;
+  }
 }
   }
 }




svn commit: r627195 - in /hadoop/hbase/trunk: CHANGES.txt src/test/org/apache/hadoop/hbase/MultiRegionTable.java src/test/org/apache/hadoop/hbase/mapred/TestTableIndex.java src/test/org/apache/hadoop/

2008-02-12 Thread jimk
Author: jimk
Date: Tue Feb 12 16:09:52 2008
New Revision: 627195

URL: http://svn.apache.org/viewvc?rev=627195&view=rev
Log:
HBASE-43[45] TestTableIndex and TestTableMapReduce failed in Hudson builds

Modified:
hadoop/hbase/trunk/CHANGES.txt
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/MultiRegionTable.java

hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/mapred/TestTableIndex.java

hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/mapred/TestTableMapReduce.java

Modified: hadoop/hbase/trunk/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/CHANGES.txt?rev=627195&r1=627194&r2=627195&view=diff
==
--- hadoop/hbase/trunk/CHANGES.txt (original)
+++ hadoop/hbase/trunk/CHANGES.txt Tue Feb 12 16:09:52 2008
@@ -18,6 +18,7 @@
HBASE-421   TestRegionServerExit broken
HBASE-426   hbase can't find remote filesystem
HBASE-437   Clear Command should use system.out (Edward Yoon via Stack)
+   HBASE-43[45] TestTableIndex and TestTableMapReduce failed in Hudson builds
 
   IMPROVEMENTS
HBASE-415   Rewrite leases to use DelayedBlockingQueue instead of polling

Modified: 
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/MultiRegionTable.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/MultiRegionTable.java?rev=627195&r1=627194&r2=627195&view=diff
==
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/MultiRegionTable.java 
(original)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/MultiRegionTable.java 
Tue Feb 12 16:09:52 2008
@@ -88,11 +88,17 @@
 // with EMPTY_START_ROW will be one of the unsplittable daughters.
 HRegionInfo hri = null;
 HRegion r = null;
+HRegionServer server = 
cluster.getRegionThreads().get(0).getRegionServer(); 
 for (int i = 0; i < 30; i++) {
-  hri = t.getRegionLocation(HConstants.EMPTY_START_ROW).getRegionInfo();
+  try {
+hri = t.getRegionLocation(HConstants.EMPTY_START_ROW).getRegionInfo();
+  } catch (IOException e) {
+e = RemoteExceptionHandler.checkIOException(e);
+e.printStackTrace();
+continue;
+  }
   LOG.info("Region location: " + hri);
-  r = cluster.getRegionThreads().get(0).getRegionServer().
-onlineRegions.get(hri.getRegionName());
+  r = server.onlineRegions.get(hri.getRegionName());
   if (r != null) {
 break;
   }
@@ -102,10 +108,10 @@
 LOG.warn("Waiting on region to come online", e);
   }
 }
+assertNotNull(r);
 
 // Flush the cache
-
cluster.getRegionThreads().get(0).getRegionServer().getCacheFlushListener().
-  flushRequested(r);
+server.getCacheFlushListener().flushRequested(r);
 
 // Now, wait until split makes it into the meta table.
 int oldCount = count;
@@ -158,7 +164,8 @@
 // still has references.
 while (true) {
   data = getSplitParentInfo(meta, parent);
-  if (data == null || data.size() == 3) {
+  if (data != null && data.size() == 3) {
+LOG.info("Waiting for splitA to release reference to parent");
 try {
   Thread.sleep(waitTime);
 } catch (InterruptedException e) {
@@ -168,7 +175,9 @@
   }
   break;
 }
-LOG.info("Parent split info returned " + data.keySet().toString());
+if (data != null) {
+  LOG.info("Parent split info returned " + data.keySet().toString());
+}
   }
 
   if (splitB == null) {
@@ -199,8 +208,10 @@
 
 for (int i = 0; i < retries; i++) {
   if (!fs.exists(parentDir)) {
+LOG.info("Parent directory was deleted. tries=" + i);
 break;
   }
+  LOG.info("Waiting for parent directory to be deleted. tries=" + i);
   try {
 Thread.sleep(waitTime);
   } catch (InterruptedException e) {
@@ -260,8 +271,7 @@
   continue;
 }
 // Make sure I get the parent.
-if (hri.getRegionName().toString().
-equals(parent.getRegionName().toString()) &&
+if (hri.getRegionName().equals(parent.getRegionName()) &&
   hri.getRegionId() == parent.getRegionId()) {
   return curVals;
 }
@@ -316,8 +326,7 @@
* @throws IOException
*/
   protected static void compact(final MiniHBaseCluster cluster,
-  final HRegionInfo r)
-  throws IOException {
+  final HRegionInfo r) throws IOException {
 if (r == null) {
   LOG.debug("Passed region is null");
   return;
@@ -332,8 +341,7 @@
   for (int i = 0; i < 10; i++) {
 try {
   for (HRegion online: regions.values()) {
-if (online.g

svn commit: r627839 - in /hadoop/hbase/branches/0.1: CHANGES.txt src/java/org/apache/hadoop/hbase/HMaster.java src/java/org/apache/hadoop/hbase/HRegionServer.java src/java/overview.html

2008-02-14 Thread jimk
Author: jimk
Date: Thu Feb 14 11:00:17 2008
New Revision: 627839

URL: http://svn.apache.org/viewvc?rev=627839&view=rev
Log:
HBASE-446   Fully qualified hbase.rootdir doesn't work

Modified:
hadoop/hbase/branches/0.1/CHANGES.txt
hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HMaster.java

hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HRegionServer.java
hadoop/hbase/branches/0.1/src/java/overview.html

Modified: hadoop/hbase/branches/0.1/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/CHANGES.txt?rev=627839&r1=627838&r2=627839&view=diff
==
--- hadoop/hbase/branches/0.1/CHANGES.txt (original)
+++ hadoop/hbase/branches/0.1/CHANGES.txt Thu Feb 14 11:00:17 2008
@@ -25,6 +25,7 @@
HBASE-425   Fix doc. so it accomodates new hbase untethered context
HBase-421   TestRegionServerExit broken
HBASE-426   hbase can't find remote filesystem
+   HBASE-446   Fully qualified hbase.rootdir doesn't work
 
   IMPROVEMENTS
HADOOP-2555 Refactor the HTable#get and HTable#getRow methods to avoid

Modified: 
hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HMaster.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HMaster.java?rev=627839&r1=627838&r2=627839&view=diff
==
--- hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HMaster.java 
(original)
+++ hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HMaster.java Thu 
Feb 14 11:00:17 2008
@@ -864,10 +864,12 @@
*/
   public HMaster(Path rd, HServerAddress address, HBaseConfiguration conf)
   throws IOException {
-
 this.conf = conf;
+this.rootdir = rd;
+// The filesystem hbase wants to use is probably not what is set into
+// fs.default.name; its value is probably the default.
+this.conf.set("fs.default.name", this.rootdir.toString());
 this.fs = FileSystem.get(conf);
-this.rootdir = this.fs.makeQualified(rd);
 this.conf.set(HConstants.HBASE_DIR, this.rootdir.toString());
 this.rand = new Random();
 Path rootRegionDir =

Modified: 
hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HRegionServer.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HRegionServer.java?rev=627839&r1=627838&r2=627839&view=diff
==
--- 
hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HRegionServer.java 
(original)
+++ 
hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HRegionServer.java 
Thu Feb 14 11:00:17 2008
@@ -857,6 +857,12 @@
 }
 this.conf.set(key, value);
   }
+  // Master sent us hbase.rootdir to use. Should be fully qualified
+  // path with file system specification included.  Set 'fs.default.name'
+  // to match the filesystem on hbase.rootdir else underlying hadoop hdfs
+  // accessors will be going against wrong filesystem (unless all is set
+  // to defaults).
+  this.conf.set("fs.default.name", this.conf.get("hbase.rootdir"));
   this.fs = FileSystem.get(this.conf);
   this.rootDir = new Path(this.conf.get(HConstants.HBASE_DIR));
   this.log = setupHLog();

Modified: hadoop/hbase/branches/0.1/src/java/overview.html
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/src/java/overview.html?rev=627839&r1=627838&r2=627839&view=diff
==
--- hadoop/hbase/branches/0.1/src/java/overview.html (original)
+++ hadoop/hbase/branches/0.1/src/java/overview.html Thu Feb 14 11:00:17 2008
@@ -75,7 +75,7 @@
 
   <property>
 <name>hbase.master</name>
-<value>http://localhost:6</value>;
+<value>localhost:6</value>
 <description>The host and port that the HBase master runs at.
 </description>
   </property>




svn commit: r627837 - in /hadoop/hbase/trunk: CHANGES.txt src/java/org/apache/hadoop/hbase/HRegionServer.java src/java/org/apache/hadoop/hbase/master/HMaster.java src/java/overview.html

2008-02-14 Thread jimk
Author: jimk
Date: Thu Feb 14 10:49:00 2008
New Revision: 627837

URL: http://svn.apache.org/viewvc?rev=627837&view=rev
Log:
HBASE-446 Fully qualified hbase.rootdir doesn't work

Modified:
hadoop/hbase/trunk/CHANGES.txt
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HRegionServer.java
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/HMaster.java
hadoop/hbase/trunk/src/java/overview.html

Modified: hadoop/hbase/trunk/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/CHANGES.txt?rev=627837&r1=627836&r2=627837&view=diff
==
--- hadoop/hbase/trunk/CHANGES.txt (original)
+++ hadoop/hbase/trunk/CHANGES.txt Thu Feb 14 10:49:00 2008
@@ -19,6 +19,7 @@
HBASE-426   hbase can't find remote filesystem
HBASE-437   Clear Command should use system.out (Edward Yoon via Stack)
HBASE-434, HBASE-435 TestTableIndex and TestTableMapReduce failed in Hudson 
builds
+   HBASE-446   Fully qualified hbase.rootdir doesn't work
 
   IMPROVEMENTS
HBASE-415   Rewrite leases to use DelayedBlockingQueue instead of polling

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HRegionServer.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HRegionServer.java?rev=627837&r1=627836&r2=627837&view=diff
==
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HRegionServer.java 
(original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HRegionServer.java Thu 
Feb 14 10:49:00 2008
@@ -872,6 +872,12 @@
 }
 this.conf.set(key, value);
   }
+  // Master sent us hbase.rootdir to use. Should be fully qualified
+  // path with file system specification included.  Set 'fs.default.name'
+  // to match the filesystem on hbase.rootdir else underlying hadoop hdfs
+  // accessors will be going against wrong filesystem (unless all is set
+  // to defaults).
+  this.conf.set("fs.default.name", this.conf.get("hbase.rootdir"));
   this.fs = FileSystem.get(this.conf);
   this.rootDir = new Path(this.conf.get(HConstants.HBASE_DIR));
   this.log = setupHLog();

Modified: 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/HMaster.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/HMaster.java?rev=627837&r1=627836&r2=627837&view=diff
==
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/HMaster.java 
(original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/HMaster.java Thu 
Feb 14 10:49:00 2008
@@ -255,10 +255,12 @@
*/
   public HMaster(Path rd, HServerAddress address, HBaseConfiguration conf)
   throws IOException {
-
 this.conf = conf;
+this.rootdir = rd;
+// The filesystem hbase wants to use is probably not what is set into
+// fs.default.name; its value is probably the default.
+this.conf.set("fs.default.name", this.rootdir.toString());
 this.fs = FileSystem.get(conf);
-this.rootdir = this.fs.makeQualified(rd);
 this.conf.set(HConstants.HBASE_DIR, this.rootdir.toString());
 this.rand = new Random();
 Path rootRegionDir =

Modified: hadoop/hbase/trunk/src/java/overview.html
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/overview.html?rev=627837&r1=627836&r2=627837&view=diff
==
--- hadoop/hbase/trunk/src/java/overview.html (original)
+++ hadoop/hbase/trunk/src/java/overview.html Thu Feb 14 10:49:00 2008
@@ -75,7 +75,7 @@
 
   <property>
 <name>hbase.master</name>
-<value>http://localhost:6</value>;
+<value>localhost:6</value>
 <description>The host and port that the HBase master runs at.
 </description>
   </property>




svn commit: r630383 - in /hadoop/hbase/trunk: CHANGES.txt src/test/org/apache/hadoop/hbase/util/TestMigrate.java

2008-02-22 Thread jimk
Author: jimk
Date: Fri Feb 22 19:18:44 2008
New Revision: 630383

URL: http://svn.apache.org/viewvc?rev=630383&view=rev
Log:
HBASE-460   TestMigrate broken when HBase moved to subproject

Modified:
hadoop/hbase/trunk/CHANGES.txt
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/util/TestMigrate.java

Modified: hadoop/hbase/trunk/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/CHANGES.txt?rev=630383&r1=630382&r2=630383&view=diff
==
--- hadoop/hbase/trunk/CHANGES.txt (original)
+++ hadoop/hbase/trunk/CHANGES.txt Fri Feb 22 19:18:44 2008
@@ -24,7 +24,7 @@
HBASE-8 Delete table does not remove the table directory in the FS
HBASE-428   Under continuous upload of rows, WrongRegionExceptions are 
thrown
that reach the client even after retries
-   
+   HBASE-460   TestMigrate broken when HBase moved to subproject   

   IMPROVEMENTS
HBASE-415   Rewrite leases to use DelayedBlockingQueue instead of polling

Modified: 
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/util/TestMigrate.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/util/TestMigrate.java?rev=630383&r1=630382&r2=630383&view=diff
==
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/util/TestMigrate.java 
(original)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/util/TestMigrate.java 
Fri Feb 22 19:18:44 2008
@@ -86,7 +86,7 @@
* First load files from an old style HBase file structure
*/
   
-  // Current directory is 
.../workspace/project/build/contrib/hbase/test/data
+  // Current directory is .../project/build/test/data
   
   FileSystem localfs = FileSystem.getLocal(conf);
   
@@ -96,11 +96,11 @@
   
   // this path is for running test with ant
   
-  
"../../../../../src/contrib/hbase/src/testdata/HADOOP-2478-testdata.zip")
+  "../../../src/testdata/HADOOP-2478-testdata.zip")
   
   // and this path is for when you want to run inside eclipse
   
-  /*"src/contrib/hbase/src/testdata/HADOOP-2478-testdata.zip")*/
+  /*"src/testdata/HADOOP-2478-testdata.zip")*/
   );
   
   ZipInputStream zip = new ZipInputStream(hs);




svn commit: r630389 - in /hadoop/hbase/trunk: CHANGES.txt src/java/org/apache/hadoop/hbase/Leases.java

2008-02-22 Thread jimk
Author: jimk
Date: Fri Feb 22 20:44:11 2008
New Revision: 630389

URL: http://svn.apache.org/viewvc?rev=630389&view=rev
Log:
HBASE-461   Simplify leases.

Modified:
hadoop/hbase/trunk/CHANGES.txt
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/Leases.java

Modified: hadoop/hbase/trunk/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/CHANGES.txt?rev=630389&r1=630388&r2=630389&view=diff
==
--- hadoop/hbase/trunk/CHANGES.txt (original)
+++ hadoop/hbase/trunk/CHANGES.txt Fri Feb 22 20:44:11 2008
@@ -59,7 +59,8 @@
HBASE-414   Move client classes into client package
HBASE-79When HBase needs to be migrated, it should display a message on
stdout, not just in the logs
-   
+   HBASE-461   Simplify leases.
+
 Branch 0.1
 
   INCOMPATIBLE CHANGES

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/Leases.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/Leases.java?rev=630389&r1=630388&r2=630389&view=diff
==
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/Leases.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/Leases.java Fri Feb 22 
20:44:11 2008
@@ -50,8 +50,6 @@
   private volatile DelayQueue leaseQueue = new DelayQueue();
 
   protected final Map leases = new HashMap();
-  protected final Map listeners =
-new HashMap();
   private volatile boolean stopRequested = false;
 
   /**
@@ -84,17 +82,14 @@
 continue;
   }
   // A lease expired
-  LeaseListener listener = null;
   synchronized (leaseQueue) {
-String leaseName = lease.getLeaseName();
-leases.remove(leaseName);
-listener = listeners.remove(leaseName);
-if (listener == null) {
-  LOG.error("lease listener is null for lease " + leaseName);
+leases.remove(lease.getLeaseName());
+if (lease.getListener() == null) {
+  LOG.error("lease listener is null for lease " + 
lease.getLeaseName());
   continue;
 }
   }
-  listener.leaseExpired();
+  lease.getListener().leaseExpired();
 }
 close();
   }
@@ -120,7 +115,6 @@
 synchronized (leaseQueue) {
   leaseQueue.clear();
   leases.clear();
-  listeners.clear();
   leaseQueue.notifyAll();
 }
 LOG.info(Thread.currentThread().getName() + " closed leases");
@@ -136,14 +130,14 @@
 if (stopRequested) {
   return;
 }
-Lease lease = new Lease(leaseName, System.currentTimeMillis() + 
leasePeriod);
+Lease lease = new Lease(leaseName, listener,
+System.currentTimeMillis() + leasePeriod);
 synchronized (leaseQueue) {
   if (leases.containsKey(leaseName)) {
 throw new IllegalStateException("lease '" + leaseName +
 "' already exists");
   }
   leases.put(leaseName, lease);
-  listeners.put(leaseName, listener);
   leaseQueue.add(lease);
 }
   }
@@ -179,17 +173,18 @@
 "' does not exist");
   }
   leaseQueue.remove(lease);
-  listeners.remove(leaseName);
 }
   }
 
   /** This class tracks a single Lease. */
   private static class Lease implements Delayed {
 private final String leaseName;
+private final LeaseListener listener;
 private long expirationTime;
 
-Lease(final String leaseName, long expirationTime) {
+Lease(final String leaseName, LeaseListener listener, long expirationTime) 
{
   this.leaseName = leaseName;
+  this.listener = listener;
   this.expirationTime = expirationTime;
 }
 
@@ -197,6 +192,11 @@
 public String getLeaseName() {
   return leaseName;
 }
+
+/** @return listener */
+public LeaseListener getListener() {
+  return this.listener;
+}
 
 /** [EMAIL PROTECTED] */
 @Override
@@ -219,16 +219,9 @@
 /** [EMAIL PROTECTED] */
 public int compareTo(Delayed o) {
   long delta = this.getDelay(TimeUnit.MILLISECONDS) -
-  o.getDelay(TimeUnit.MILLISECONDS);
+o.getDelay(TimeUnit.MILLISECONDS);
 
-  int value = 0;
-  if (delta > 0) {
-value = 1;
-
-  } else if (delta < 0) {
-value = -1;
-  }
-  return value;
+  return this.equals(o) ? 0 : (delta > 0 ? 1 : -1);
 }
 
 /** @param expirationTime the expirationTime to set */




svn commit: r630394 - in /hadoop/hbase/trunk: ./ src/java/org/apache/hadoop/hbase/ src/java/org/apache/hadoop/hbase/mapred/ src/java/org/apache/hadoop/hbase/master/ src/java/org/apache/hadoop/hbase/ut

2008-02-22 Thread jimk
Author: jimk
Date: Fri Feb 22 22:11:44 2008
New Revision: 630394

URL: http://svn.apache.org/viewvc?rev=630394&view=rev
Log:
HBASE-462   Update migration tool

Other miscellaneous changes included:

IdentityTableReduce
- Added SuppressWarnings("unused") for reporter argument
- Removed unnecessary cast.
AbstractMergeTestBase
- Removed unnecessary compaction
StaticTestEnvironment
- Change logging level for client connections which are too noisy in most cases
TestBloomFilters
- Removed unnecessary config settings
- Modified to use BatchUpdate instead of deprecated startUpdate, etc.
TestScannerAPI
- Modified to use BatchUpdate instead of deprecated startUpdate, etc.

Modified:
hadoop/hbase/trunk/CHANGES.txt
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HConstants.java

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/mapred/IdentityTableReduce.java
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/HMaster.java
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/FSUtils.java
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/Migrate.java

hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/AbstractMergeTestBase.java

hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/StaticTestEnvironment.java
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestBloomFilters.java
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestScannerAPI.java

Modified: hadoop/hbase/trunk/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/CHANGES.txt?rev=630394&r1=630393&r2=630394&view=diff
==
--- hadoop/hbase/trunk/CHANGES.txt (original)
+++ hadoop/hbase/trunk/CHANGES.txt Fri Feb 22 22:11:44 2008
@@ -25,6 +25,7 @@
HBASE-428   Under continuous upload of rows, WrongRegionExceptions are 
thrown
that reach the client even after retries
HBASE-460   TestMigrate broken when HBase moved to subproject   
+   HBASE-462   Update migration tool

   IMPROVEMENTS
HBASE-415   Rewrite leases to use DelayedBlockingQueue instead of polling

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HConstants.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HConstants.java?rev=630394&r1=630393&r2=630394&view=diff
==
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HConstants.java 
(original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HConstants.java Fri Feb 
22 22:11:44 2008
@@ -25,7 +25,8 @@
  * HConstants holds a bunch of HBase-related constants
  */
 public interface HConstants {
-  
+
+  /** long constant for zero */
   static final Long ZERO_L = Long.valueOf(0L);
   
   // For migration
@@ -34,7 +35,7 @@
   static final String VERSION_FILE_NAME = "hbase.version";
   
   /** version of file system */
-  static final String FILE_SYSTEM_VERSION = "0.1";
+  static final String FILE_SYSTEM_VERSION = "2";
   
   // Configuration parameters
   

Modified: 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/mapred/IdentityTableReduce.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/mapred/IdentityTableReduce.java?rev=630394&r1=630393&r2=630394&view=diff
==
--- 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/mapred/IdentityTableReduce.java
 (original)
+++ 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/mapred/IdentityTableReduce.java
 Fri Feb 22 22:11:44 2008
@@ -39,11 +39,12 @@
*/
   @Override
   public void reduce(Text key, Iterator values,
-  OutputCollector output, Reporter reporter)
+  OutputCollector output,
+  @SuppressWarnings("unused") Reporter reporter)
   throws IOException {
 
 while(values.hasNext()) {
-  MapWritable r = (MapWritable)values.next();
+  MapWritable r = values.next();
   output.collect(key, r);
 }
   }

Modified: 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/HMaster.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/HMaster.java?rev=630394&r1=630393&r2=630394&view=diff
==
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/HMaster.java 
(original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/HMaster.java Fri 
Feb 22 22:11:44 2008
@@ -148,6 +148,7 @@
   /** Name of master server */
   public static final String MASTER = "master";
 
+  /** @return InfoServer object */
   public InfoServer getInfoServer() {
 return infoServer;
   }
@@ -270,16 +271,21 @@
 try {
   // Make sure the root directory exists!
   if(! fs.exists(rootdir)) {
-

svn commit: r632533 - in /hadoop/hbase/trunk: CHANGES.txt src/test/org/apache/hadoop/hbase/regionserver/TestLogRolling.java

2008-02-29 Thread jimk
Author: jimk
Date: Fri Feb 29 17:52:59 2008
New Revision: 632533

URL: http://svn.apache.org/viewvc?rev=632533&view=rev
Log:
HBASE-479   Speed up TestLogRolling

Modified:
hadoop/hbase/trunk/CHANGES.txt

hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestLogRolling.java

Modified: hadoop/hbase/trunk/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/CHANGES.txt?rev=632533&r1=632532&r2=632533&view=diff
==
--- hadoop/hbase/trunk/CHANGES.txt (original)
+++ hadoop/hbase/trunk/CHANGES.txt Fri Feb 29 17:52:59 2008
@@ -70,6 +70,7 @@
HBASE-442   Move internal classes out of HRegionServer
HBASE-466   Move HMasterInterface, HRegionInterface, and 
HMasterRegionInterface into o.a.h.h.ipc
+   HBASE-479   Speed up TestLogRolling

 Branch 0.1
 

Modified: 
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestLogRolling.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestLogRolling.java?rev=632533&r1=632532&r2=632533&view=diff
==
--- 
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestLogRolling.java
 (original)
+++ 
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestLogRolling.java
 Fri Feb 29 17:52:59 2008
@@ -35,6 +35,7 @@
 import org.apache.hadoop.hbase.StaticTestEnvironment;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.io.BatchUpdate;
 
 /**
  * Test log deletion as logs are rolled.
@@ -65,8 +66,8 @@
   // Force a region split after every 768KB
   conf.setLong("hbase.hregion.max.filesize", 768L * 1024L);
 
-  // We roll the log after every 256 writes
-  conf.setInt("hbase.regionserver.maxlogentries", 256);
+  // We roll the log after every 32 writes
+  conf.setInt("hbase.regionserver.maxlogentries", 32);
 
   // For less frequently updated regions flush after every 2 flushes
   conf.setInt("hbase.hregion.memcache.optionalflushcount", 2);
@@ -102,11 +103,11 @@
   @Override
   public void setUp() throws Exception {
 try {
-  super.setUp();
   dfs = new MiniDFSCluster(conf, 2, true, (String[]) null);
   // Set the hbase.rootdir to be the home directory in mini dfs.
   this.conf.set(HConstants.HBASE_DIR,
 this.dfs.getFileSystem().getHomeDirectory().toString());
+  super.setUp();
 } catch (Exception e) {
   StaticTestEnvironment.shutdownDfs(dfs);
   LOG.fatal("error during setUp: ", e);
@@ -141,7 +142,7 @@
 this.log = server.getLog();
 
 // When the META table can be opened, the region servers are running
-HTable meta = new HTable(conf, HConstants.META_TABLE_NAME);
+new HTable(conf, HConstants.META_TABLE_NAME);
 
 // Create the test table and open it
 HTableDescriptor desc = new HTableDescriptor(tableName);
@@ -150,14 +151,14 @@
 admin.createTable(desc);
 HTable table = new HTable(conf, new Text(tableName));
 
-for (int i = 1; i <= 2048; i++) {// 2048 writes should cause 8 log 
rolls
-  long lockid =
-table.startUpdate(new Text("row" + String.format("%1$04d", i)));
-  table.put(lockid, HConstants.COLUMN_FAMILY, value);
-  table.commit(lockid);
+for (int i = 1; i <= 256; i++) {// 256 writes should cause 8 log rolls
+  BatchUpdate b =
+new BatchUpdate(new Text("row" + String.format("%1$04d", i)));
+  b.put(HConstants.COLUMN_FAMILY, value);
+  table.commit(b);
 
-  if (i % 256 == 0) {
-// After every 256 writes sleep to let the log roller run
+  if (i % 32 == 0) {
+// After every 32 writes sleep to let the log roller run
 
 try {
   Thread.sleep(2000);
@@ -193,7 +194,7 @@
   int count = log.getNumLogFiles();
   LOG.info("after flushing all regions and rolling logs there are " +
   log.getNumLogFiles() + " log files");
-  assertTrue(count <= 2);
+  assertTrue(("actual count: " + count), count <= 2);
 } catch (Exception e) {
   LOG.fatal("unexpected exception", e);
   throw e;




svn commit: r634168 - in /hadoop/hbase/trunk: ./ lib/ src/test/ src/test/org/apache/hadoop/hbase/

2008-03-05 Thread jimk
Author: jimk
Date: Wed Mar  5 21:09:33 2008
New Revision: 634168

URL: http://svn.apache.org/viewvc?rev=634168&view=rev
Log:
HBASE-492   hbase TRUNK does not build against hadoop TRUNK

Added:
hadoop/hbase/trunk/lib/hadoop-0.17.0-dev.2008-03-04_15-19-00-core.jar   
(with props)
hadoop/hbase/trunk/lib/hadoop-0.17.0-dev.2008-03-04_15-19-00-test.jar   
(with props)
Removed:
hadoop/hbase/trunk/lib/hadoop-0.17.0-dev.2008-02-07_12-01-58-core.jar
hadoop/hbase/trunk/lib/hadoop-0.17.0-dev.2008-02-07_12-01-58-test.jar
Modified:
hadoop/hbase/trunk/CHANGES.txt
hadoop/hbase/trunk/src/test/hbase-site.xml
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/MultiRegionTable.java

hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/PerformanceEvaluation.java

Modified: hadoop/hbase/trunk/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/CHANGES.txt?rev=634168&r1=634167&r2=634168&view=diff
==
--- hadoop/hbase/trunk/CHANGES.txt (original)
+++ hadoop/hbase/trunk/CHANGES.txt Wed Mar  5 21:09:33 2008
@@ -29,6 +29,7 @@
HBASE-473   When a table is deleted, master sends multiple close messages to
the region server
HBASE-490   Doubly-assigned .META.; master uses one and clients another
+   HBASE-492   hbase TRUNK does not build against hadoop TRUNK

   IMPROVEMENTS
HBASE-415   Rewrite leases to use DelayedBlockingQueue instead of polling

Added: hadoop/hbase/trunk/lib/hadoop-0.17.0-dev.2008-03-04_15-19-00-core.jar
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/lib/hadoop-0.17.0-dev.2008-03-04_15-19-00-core.jar?rev=634168&view=auto
==
Binary file - no diff available.

Propchange: 
hadoop/hbase/trunk/lib/hadoop-0.17.0-dev.2008-03-04_15-19-00-core.jar
--
svn:mime-type = application/octet-stream

Added: hadoop/hbase/trunk/lib/hadoop-0.17.0-dev.2008-03-04_15-19-00-test.jar
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/lib/hadoop-0.17.0-dev.2008-03-04_15-19-00-test.jar?rev=634168&view=auto
==
Binary file - no diff available.

Propchange: 
hadoop/hbase/trunk/lib/hadoop-0.17.0-dev.2008-03-04_15-19-00-test.jar
--
svn:executable = *

Propchange: 
hadoop/hbase/trunk/lib/hadoop-0.17.0-dev.2008-03-04_15-19-00-test.jar
--
svn:mime-type = application/octet-stream

Modified: hadoop/hbase/trunk/src/test/hbase-site.xml
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/hbase-site.xml?rev=634168&r1=634167&r2=634168&view=diff
==
--- hadoop/hbase/trunk/src/test/hbase-site.xml (original)
+++ hadoop/hbase/trunk/src/test/hbase-site.xml Wed Mar  5 21:09:33 2008
@@ -107,4 +107,8 @@
 Keep the maximum filesize small so we split more often in tests.
 
   
+  
+hadoop.log.dir
+${user.dir}/logs
+  
 

Modified: 
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/MultiRegionTable.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/MultiRegionTable.java?rev=634168&r1=634167&r2=634168&view=diff
==
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/MultiRegionTable.java 
(original)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/MultiRegionTable.java 
Wed Mar  5 21:09:33 2008
@@ -42,6 +42,15 @@
 public class MultiRegionTable extends HBaseTestCase {
   static final Log LOG = LogFactory.getLog(MultiRegionTable.class.getName());
 
+  /** [EMAIL PROTECTED] */
+  @Override
+  public void setUp() throws Exception {
+// These are needed for the new and improved Map/Reduce framework
+System.setProperty("hadoop.log.dir", conf.get("hadoop.log.dir"));
+conf.set("mapred.output.dir", conf.get("hadoop.tmp.dir"));
+super.setUp();
+  }
+
   /**
* Make a multi-region table.  Presumption is that table already exists and
* that there is only one regionserver. Makes it multi-region by filling with
@@ -187,7 +196,7 @@
   if (splitB == null) {
 LOG.info("splitB was already null. Assuming it was previously 
compacted.");
   } else {
-LOG.info("Daughter splitB: " + splitA.getRegionName());
+LOG.info("Daughter splitB: " + splitB.getRegionName());
 
 // Call second split.
 compact(cluster, splitB);

Modified: 
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/PerformanceEvaluation.java
URL: 
http://svn.apache.org/vie

svn commit: r634787 - in /hadoop/hbase/branches/0.1: CHANGES.txt src/java/org/apache/hadoop/hbase/HMaster.java

2008-03-07 Thread jimk
Author: jimk
Date: Fri Mar  7 11:34:58 2008
New Revision: 634787

URL: http://svn.apache.org/viewvc?rev=634787&view=rev
Log:
HBASE-79When HBase needs to be migrated, it should display a message on 
stdout, not just in the logs

Modified:
hadoop/hbase/branches/0.1/CHANGES.txt
hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HMaster.java

Modified: hadoop/hbase/branches/0.1/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/CHANGES.txt?rev=634787&r1=634786&r2=634787&view=diff
==
--- hadoop/hbase/branches/0.1/CHANGES.txt (original)
+++ hadoop/hbase/branches/0.1/CHANGES.txt Fri Mar  7 11:34:58 2008
@@ -33,6 +33,8 @@
HBASE-490   Doubly-assigned .META.; master uses one and clients another
HBASE-496   impossible state for createLease writes 400k lines in about 
15mins
HBASE-472   Passing on edits, we dump all to log
+   HBASE-79When HBase needs to be migrated, it should display a message on
+   stdout, not just in the logs
 
   IMPROVEMENTS
HADOOP-2555 Refactor the HTable#get and HTable#getRow methods to avoid

Modified: 
hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HMaster.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HMaster.java?rev=634787&r1=634786&r2=634787&view=diff
==
--- hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HMaster.java 
(original)
+++ hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HMaster.java Fri 
Mar  7 11:34:58 2008
@@ -878,8 +878,11 @@
 fs.mkdirs(rootdir);
 FSUtils.setVersion(fs, rootdir);
   } else if (!FSUtils.checkVersion(fs, rootdir)) {
-throw new IOException("File system needs upgrade. Run " +
-  "the '${HBASE_HOME}/bin/hbase migrate' script");
+// Output on stdout so user sees it in terminal.
+String message = "File system needs to be upgraded. Run " +
+  "the '${HBASE_HOME}/bin/hbase migrate' script.";
+System.out.println("WARNING! " + message + " Master shutting down...");
+throw new IOException(message);
   }
 
   if (!fs.exists(rootRegionDir)) {




svn commit: r636361 - in /hadoop/hbase/branches/0.1: CHANGES.txt src/java/org/apache/hadoop/hbase/HLog.java src/java/org/apache/hadoop/hbase/HRegion.java

2008-03-12 Thread jimk
Author: jimk
Date: Wed Mar 12 08:21:52 2008
New Revision: 636361

URL: http://svn.apache.org/viewvc?rev=636361&view=rev
Log:
HBASE-433 HBASE-251 Region server should delete restore log after successful 
restore, Stuck replaying the edits of crashed machine.

HLog

- don't overwrite oldlogfile in splitLog if it already exists. Rename it and 
copy it into the new oldlogfile. Then delete it once it has been copied.
- use FileUtil.fullyDelete to delete region server log directory.

HRegion

- delete oldlogfile once it has been successfully processed

Modified:
hadoop/hbase/branches/0.1/CHANGES.txt
hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HLog.java
hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HRegion.java

Modified: hadoop/hbase/branches/0.1/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/CHANGES.txt?rev=636361&r1=636360&r2=636361&view=diff
==
--- hadoop/hbase/branches/0.1/CHANGES.txt (original)
+++ hadoop/hbase/branches/0.1/CHANGES.txt Wed Mar 12 08:21:52 2008
@@ -36,6 +36,8 @@
HBASE-79When HBase needs to be migrated, it should display a message on
stdout, not just in the logs
HBASE-495   No server address listed in .META.
+   HBASE-433 HBASE-251 Region server should delete restore log after successful
+   restore, Stuck replaying the edits of crashed machine.
 
   IMPROVEMENTS
HADOOP-2555 Refactor the HTable#get and HTable#getRow methods to avoid

Modified: hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HLog.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HLog.java?rev=636361&r1=636360&r2=636361&view=diff
==
--- hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HLog.java 
(original)
+++ hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HLog.java Wed 
Mar 12 08:21:52 2008
@@ -19,7 +19,6 @@
  */
 package org.apache.hadoop.hbase;
 
-import java.io.File;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.Collections;
@@ -35,6 +34,7 @@
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
@@ -494,7 +494,15 @@
*/
   static void splitLog(Path rootDir, Path srcDir, FileSystem fs,
 Configuration conf) throws IOException {
-Path logfiles[] = fs.listPaths(new Path[] { srcDir });
+if (!fs.exists(srcDir)) {
+  // Nothing to do
+  return;
+}
+FileStatus logfiles[] = fs.listStatus(srcDir);
+if (logfiles == null || logfiles.length == 0) {
+  // Nothing to do
+  return;
+}
 LOG.info("splitting " + logfiles.length + " log(s) in " +
   srcDir.toString());
 Map logWriters =
@@ -503,17 +511,18 @@
   for (int i = 0; i < logfiles.length; i++) {
 if (LOG.isDebugEnabled()) {
   LOG.debug("Splitting " + i + " of " + logfiles.length + ": " +
-logfiles[i]);
+logfiles[i].getPath());
 }
 // Check for empty file.
-if (fs.getFileStatus(logfiles[i]).getLen() <= 0) {
+if (logfiles[i].getLen() <= 0) {
   LOG.info("Skipping " + logfiles[i].toString() +
-" because zero length");
+  " because zero length");
   continue;
 }
 HLogKey key = new HLogKey();
 HLogEdit val = new HLogEdit();
-SequenceFile.Reader in = new SequenceFile.Reader(fs, logfiles[i], 
conf);
+SequenceFile.Reader in =
+  new SequenceFile.Reader(fs, logfiles[i].getPath(), conf);
 try {
   int count = 0;
   for (; in.next(key, val); count++) {
@@ -528,6 +537,17 @@
   ),
   HREGION_OLDLOGFILE_NAME
   );
+  
+  Path oldlogfile = null;
+  SequenceFile.Reader old = null;
+  if (fs.exists(logfile)) {
+LOG.warn("Old log file " + logfile +
+" already exists. Copying existing file to new file");
+oldlogfile = new Path(logfile.toString() + ".old");
+fs.rename(logfile, oldlogfile);
+old = new SequenceFile.Reader(fs, oldlogfile, conf);
+  }
+  
   if (LOG.isDebugEnabled()) {
 LOG.debug("Creating new log file writer for path " + logfile +
   "; map content " + logWriters.toString());
@@ -537,8 +557,22 @@
   // Use copy

svn commit: r636396 - in /hadoop/hbase/trunk: CHANGES.txt src/java/org/apache/hadoop/hbase/regionserver/HLog.java src/java/org/apache/hadoop/hbase/regionserver/HRegion.java

2008-03-12 Thread jimk
Author: jimk
Date: Wed Mar 12 09:20:07 2008
New Revision: 636396

URL: http://svn.apache.org/viewvc?rev=636396&view=rev
Log:
HBASE-433 HBASE-251 Region server should delete restore log after successful 
restore, Stuck replaying the edits of crashed machine.

HLog

- don't overwrite oldlogfile in splitLog if it already exists. Rename it and 
copy it into the new oldlogfile. Then delete it once it has been copied.
- use FileUtil.fullyDelete to delete region server log directory.

HRegion

- delete oldlogfile once it has been successfully processed

Modified:
hadoop/hbase/trunk/CHANGES.txt
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HLog.java

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java

Modified: hadoop/hbase/trunk/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/CHANGES.txt?rev=636396&r1=636395&r2=636396&view=diff
==
--- hadoop/hbase/trunk/CHANGES.txt (original)
+++ hadoop/hbase/trunk/CHANGES.txt Wed Mar 12 09:20:07 2008
@@ -33,6 +33,8 @@
HBASE-496   impossible state for createLease writes 400k lines in about 
15mins
HBASE-472   Passing on edits, we dump all to log
HBASE-495   No server address listed in .META.
+   HBASE-433 HBASE-251 Region server should delete restore log after successful
+   restore, Stuck replaying the edits of crashed machine.

   IMPROVEMENTS
HBASE-415   Rewrite leases to use DelayedBlockingQueue instead of polling

Modified: 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HLog.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HLog.java?rev=636396&r1=636395&r2=636396&view=diff
==
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HLog.java 
(original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HLog.java 
Wed Mar 12 09:20:07 2008
@@ -19,7 +19,6 @@
  */
 package org.apache.hadoop.hbase.regionserver;
 
-import java.io.File;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.Collections;
@@ -49,6 +48,7 @@
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.RemoteExceptionHandler;
 
 /**
  * HLog stores all the edits to the HStore.
@@ -304,7 +304,7 @@
* file-number.
*/
   Path computeFilename(final long fn) {
-return new Path(dir, HLOG_DATFILE + new Long(fn).toString());
+return new Path(dir, HLOG_DATFILE + fn);
   }
 
   /**
@@ -503,7 +503,15 @@
*/
   public static void splitLog(Path rootDir, Path srcDir, FileSystem fs,
 Configuration conf) throws IOException {
+if (!fs.exists(srcDir)) {
+  // Nothing to do
+  return;
+}
 FileStatus logfiles[] = fs.listStatus(srcDir);
+if (logfiles == null || logfiles.length == 0) {
+  // Nothing to do
+  return;
+}
 LOG.info("splitting " + logfiles.length + " log(s) in " +
   srcDir.toString());
 Map logWriters =
@@ -512,12 +520,12 @@
   for (int i = 0; i < logfiles.length; i++) {
 if (LOG.isDebugEnabled()) {
   LOG.debug("Splitting " + i + " of " + logfiles.length + ": " +
-logfiles[i]);
+logfiles[i].getPath());
 }
 // Check for empty file.
 if (logfiles[i].getLen() <= 0) {
   LOG.info("Skipping " + logfiles[i].toString() +
-" because zero length");
+  " because zero length");
   continue;
 }
 HLogKey key = new HLogKey();
@@ -538,6 +546,17 @@
   ),
   HREGION_OLDLOGFILE_NAME
   );
+  
+  Path oldlogfile = null;
+  SequenceFile.Reader old = null;
+  if (fs.exists(logfile)) {
+LOG.warn("Old log file " + logfile +
+" already exists. Copying existing file to new file");
+oldlogfile = new Path(logfile.toString() + ".old");
+fs.rename(logfile, oldlogfile);
+old = new SequenceFile.Reader(fs, oldlogfile, conf);
+  }
+
   if (LOG.isDebugEnabled()) {
 LOG.debug("Creating new log file writer for path " + logfile +
   "; map content " + logWriters.toString());
@@ -547,8 +566,22 @@
   // Use copy of regionName; regionName object is reused inside in
   // HStoreKey.getRegionName so its content changes as we iterate.
   logWriters.put(new Text(regionName), w);
+  
+  if (old != null) {

svn commit: r636438 - in /hadoop/hbase/branches/0.1: CHANGES.txt src/java/org/apache/hadoop/hbase/HMaster.java src/java/org/apache/hadoop/hbase/HRegion.java src/test/org/apache/hadoop/hbase/TestEmptyM

2008-03-12 Thread jimk
Author: jimk
Date: Wed Mar 12 11:00:37 2008
New Revision: 636438

URL: http://svn.apache.org/viewvc?rev=636438&view=rev
Log:
HBASE-27 hregioninfo cell empty in meta table

Summary of changes:

HMaster:

- When a row has an empty HRegionInfo (info:regioninfo), log it with the row 
name and and the other keys still in the row.

- Log the number of rows with empty HRegionInfo

- Delete the rows

- Make RowMap inner class static, change methods to have package scope to avoid 
synthetic accessors.

- Provide row name to getHRegionInfo so it can issue better log messages

- add method deleteEmptyMetaRows to remove rows with empty HRegionInfo

HRegion

- change removeRegionFromMETA to use deleteAll rather than using a BatchUpdate 
containing deletes for each cell.

TestEmptyMetaInfo

- new test case


Added:

hadoop/hbase/branches/0.1/src/test/org/apache/hadoop/hbase/TestEmptyMetaInfo.java
Modified:
hadoop/hbase/branches/0.1/CHANGES.txt
hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HMaster.java
hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HRegion.java

Modified: hadoop/hbase/branches/0.1/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/CHANGES.txt?rev=636438&r1=636437&r2=636438&view=diff
==
--- hadoop/hbase/branches/0.1/CHANGES.txt (original)
+++ hadoop/hbase/branches/0.1/CHANGES.txt Wed Mar 12 11:00:37 2008
@@ -38,6 +38,7 @@
HBASE-495   No server address listed in .META.
HBASE-433 HBASE-251 Region server should delete restore log after successful
restore, Stuck replaying the edits of crashed machine.
+   HBASE-27hregioninfo cell empty in meta table
 
   IMPROVEMENTS
HADOOP-2555 Refactor the HTable#get and HTable#getRow methods to avoid

Modified: 
hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HMaster.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HMaster.java?rev=636438&r1=636437&r2=636438&view=diff
==
--- hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HMaster.java 
(original)
+++ hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HMaster.java Wed 
Mar 12 11:00:37 2008
@@ -215,6 +215,7 @@
   // scan we go check if parents can be removed.
   Map> splitParents =
 new HashMap>();
+  List emptyRows = new ArrayList();
   try {
 regionServer = connection.getHRegionConnection(region.getServer());
 scannerId =
@@ -229,10 +230,13 @@
   }
 
   // TODO: Why does this have to be a sorted map?
-  SortedMap results = toRowMap(values).getMap();
-  
-  HRegionInfo info = getHRegionInfo(results);
+  RowMap m = toRowMap(values);
+  SortedMap results = m.getMap();
+
+  Text row = m.getRow();
+  HRegionInfo info = getHRegionInfo(row, results);
   if (info == null) {
+emptyRows.add(row);
 continue;
   }
 
@@ -272,12 +276,23 @@
   }
 } catch (IOException e) {
   LOG.error("Closing scanner",
-RemoteExceptionHandler.checkIOException(e));
+  RemoteExceptionHandler.checkIOException(e));
 }
   }
 
-  // Scan is finished.  Take a look at split parents to see if any we can
-  // clean up.
+  // Scan is finished.
+  
+  // First clean up any meta region rows which had null HRegionInfo's
+
+  if (emptyRows.size() > 0) {
+LOG.warn("Found " + emptyRows.size() +
+" rows with empty HRegionInfo while scanning meta region " +
+region.getRegionName());
+deleteEmptyMetaRows(regionServer, region.getRegionName(), emptyRows);
+  }
+
+  // Take a look at split parents to see if any we can clean up.
+  
   if (splitParents.size() > 0) {
 for (Map.Entry> e:
 splitParents.entrySet()) {
@@ -356,7 +371,7 @@
  * @return True if still has references to parent.
  * @throws IOException
  */
-protected boolean hasReferences(final Text metaRegionName, 
+private boolean hasReferences(final Text metaRegionName, 
   final HRegionInterface srvr, final Text parent,
   SortedMap rowContent, final Text splitColumn)
 throws IOException {
@@ -1263,6 +1278,7 @@
* HMasterRegionInterface
*/
 
+  /** [EMAIL PROTECTED] */
   @SuppressWarnings("unused")
   public HbaseMapWritable regionServerStartup(HServerInfo serverInfo)
   throws IOException {
@@ -2002,8 +2018,9 @@
 private void scanMetaRegion(HRegionInterface server, long scannerId,
 Text regionName)
 throws IOException {
-  ArrayList toDoList = new ArrayList();
-  HashSet regions = new HashSet();
+  List toDoList = new ArrayList();

svn commit: r636589 - in /hadoop/hbase/trunk: ./ src/java/org/apache/hadoop/hbase/master/ src/test/org/apache/hadoop/hbase/

2008-03-12 Thread jimk
Author: jimk
Date: Wed Mar 12 17:33:13 2008
New Revision: 636589

URL: http://svn.apache.org/viewvc?rev=636589&view=rev
Log:
HBASE-27 hregioninfo cell empty in meta table

Summary of changes:

HMaster:

- When a row has an empty HRegionInfo (info:regioninfo), log it with the row 
name and and the other keys still in the row.

- Log the number of rows with empty HRegionInfo

- Delete the rows

- Make RowMap inner class static, change methods to have package scope to avoid 
synthetic accessors.

- Provide row name to getHRegionInfo so it can issue better log messages

- add method deleteEmptyMetaRows to remove rows with empty HRegionInfo

HRegion

- change removeRegionFromMETA to use deleteAll rather than using a BatchUpdate 
containing deletes for each cell.

TestEmptyMetaInfo

- new test case


Added:
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestEmptyMetaInfo.java
Modified:
hadoop/hbase/trunk/CHANGES.txt
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/BaseScanner.java
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/HMaster.java

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ProcessServerShutdown.java
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/RowMap.java

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ServerManager.java

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/TableOperation.java

Modified: hadoop/hbase/trunk/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/CHANGES.txt?rev=636589&r1=636588&r2=636589&view=diff
==
--- hadoop/hbase/trunk/CHANGES.txt (original)
+++ hadoop/hbase/trunk/CHANGES.txt Wed Mar 12 17:33:13 2008
@@ -37,6 +37,7 @@
HBASE-495   No server address listed in .META.
HBASE-433 HBASE-251 Region server should delete restore log after successful
restore, Stuck replaying the edits of crashed machine.
+   HBASE-27hregioninfo cell empty in meta table

   IMPROVEMENTS
HBASE-415   Rewrite leases to use DelayedBlockingQueue instead of polling

Modified: 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/BaseScanner.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/BaseScanner.java?rev=636589&r1=636588&r2=636589&view=diff
==
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/BaseScanner.java 
(original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/BaseScanner.java 
Wed Mar 12 17:33:13 2008
@@ -21,7 +21,8 @@
 
 import java.io.IOException;
 import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.SortedMap;
+import java.util.ArrayList;
+import java.util.List;
 import java.util.Map;
 import java.util.HashMap;
 
@@ -36,7 +37,6 @@
 import org.apache.hadoop.hbase.HServerInfo;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.io.HbaseMapWritable;
 import org.apache.hadoop.hbase.util.Writables;
 import org.apache.hadoop.hbase.RemoteExceptionHandler;
 import org.apache.hadoop.hbase.UnknownScannerException;
@@ -124,6 +124,7 @@
 this.initialScanComplete = false;
   }
   
+  /** @return true if initial scan completed successfully */
   public boolean isInitialScanComplete() {
 return initialScanComplete;
   }
@@ -152,6 +153,7 @@
 // scan we go check if parents can be removed.
 Map splitParents =
   new HashMap();
+List emptyRows = new ArrayList();
 try {
   regionServer = 
master.connection.getHRegionConnection(region.getServer());
   scannerId =
@@ -165,8 +167,9 @@
   break;
 }
 
-HRegionInfo info = master.getHRegionInfo(values);
+HRegionInfo info = master.getHRegionInfo(values.getRow(), values);
 if (info == null) {
+  emptyRows.add(values.getRow());
   continue;
 }
 
@@ -206,12 +209,24 @@
 }
   } catch (IOException e) {
 LOG.error("Closing scanner",
-  RemoteExceptionHandler.checkIOException(e));
+RemoteExceptionHandler.checkIOException(e));
   }
 }
 
-// Scan is finished.  Take a look at split parents to see if any we can
-// clean up.
+// Scan is finished.
+
+// First clean up any meta region rows which had null HRegionInfos
+
+if (emptyRows.size() > 0) {
+  LOG.warn("Found " + emptyRows.size() +
+  " rows with empty HRegionInfo while scanning meta region " +
+  region.getRegionName());
+  master.deleteEmptyMetaRows(regionServer, region.getRegionName(),
+  emptyRows);
+}
+
+// Take a look at split parents to see if any we can clean up.
+
 if (splitParents.size() > 0) {
   for (Map.Entry e : splitParents.entrySet()) {
 HRegionInfo h

svn commit: r636999 - in /hadoop/hbase/branches/0.1: CHANGES.txt src/java/org/apache/hadoop/hbase/HStoreFile.java

2008-03-13 Thread jimk
Author: jimk
Date: Thu Mar 13 22:43:34 2008
New Revision: 636999

URL: http://svn.apache.org/viewvc?rev=636999&view=rev
Log:
HBASE-516   HStoreFile.finalKey does not update the final key if it is not the 
top region of a split region

Modified HStoreFile$HalfMapFileReader.finalKey


Modified:
hadoop/hbase/branches/0.1/CHANGES.txt
hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HStoreFile.java

Modified: hadoop/hbase/branches/0.1/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/CHANGES.txt?rev=636999&r1=636998&r2=636999&view=diff
==
--- hadoop/hbase/branches/0.1/CHANGES.txt (original)
+++ hadoop/hbase/branches/0.1/CHANGES.txt Thu Mar 13 22:43:34 2008
@@ -41,6 +41,8 @@
HBASE-27hregioninfo cell empty in meta table
HBASE-501   Empty region server address in info:server entry and a
startcode of -1 in .META.
+   HBASE-516   HStoreFile.finalKey does not update the final key if it is not
+   the top region of a split region
 
   IMPROVEMENTS
HADOOP-2555 Refactor the HTable#get and HTable#getRow methods to avoid

Modified: 
hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HStoreFile.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HStoreFile.java?rev=636999&r1=636998&r2=636999&view=diff
==
--- hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HStoreFile.java 
(original)
+++ hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HStoreFile.java 
Thu Mar 13 22:43:34 2008
@@ -1,880 +1,890 @@
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase;
-
-import java.io.DataInput;
-import java.io.DataInputStream;
-import java.io.DataOutput;
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.io.UnsupportedEncodingException;
-import java.util.List;
-import java.util.Random;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.hadoop.hbase.util.Writables;
-import org.apache.hadoop.io.MapFile;
-import org.apache.hadoop.io.SequenceFile;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableComparable;
-import org.onelab.filter.Filter;
-import org.onelab.filter.Key;
-
-
-/**
- * A HStore data file.  HStores usually have one or more of these files.  They
- * are produced by flushing the memcache to disk.
- *
- * Each HStore maintains a bunch of different data files. The filename is a
- * mix of the parent dir, the region name, the column name, and a file
- * identifier. The name may also be a reference to a store file located
- * elsewhere. This class handles all that path-building stuff for you.
- * 
- * An HStoreFile usually tracks 4 things: its parent dir, the region
- * identifier, the column family, and the file identifier.  If you know those
- * four things, you know how to obtain the right HStoreFile.  HStoreFiles may
- * also refernce store files in another region serving either from
- * the top-half of the remote file or from the bottom-half.  Such references
- * are made fast splitting regions.
- * 
- * Plain HStoreFiles are named for a randomly generated id as in:
- * 1278437856009925445  A file by this name is made in both the
- * mapfiles and info subdirectories of a
- * HStore columnfamily directoy: E.g. If the column family is 'anchor:', then
- * under the region directory there is a subdirectory named 'anchor' within
- * which is a 'mapfiles' and 'info' subdirectory.  In each will be found a
- * file named something like 1278437856009925445, one to hold the
- * data in 'mapfiles' and one under 'info&

svn commit: r637002 - in /hadoop/hbase/trunk: CHANGES.txt src/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java

2008-03-13 Thread jimk
Author: jimk
Date: Thu Mar 13 22:57:54 2008
New Revision: 637002

URL: http://svn.apache.org/viewvc?rev=637002&view=rev
Log:
HBASE-516   HStoreFile.finalKey does not update the final key if it is not the 
top region of a split region

Modified HStoreFile$HalfMapFileReader.finalKey

Modified:
hadoop/hbase/trunk/CHANGES.txt

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java

Modified: hadoop/hbase/trunk/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/CHANGES.txt?rev=637002&r1=637001&r2=637002&view=diff
==
--- hadoop/hbase/trunk/CHANGES.txt (original)
+++ hadoop/hbase/trunk/CHANGES.txt Thu Mar 13 22:57:54 2008
@@ -40,6 +40,8 @@
HBASE-27hregioninfo cell empty in meta table
HBASE-501   Empty region server address in info:server entry and a
startcode of -1 in .META.
+   HBASE-516   HStoreFile.finalKey does not update the final key if it is not
+   the top region of a split region

   IMPROVEMENTS
HBASE-415   Rewrite leases to use DelayedBlockingQueue instead of polling

Modified: 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java?rev=637002&r1=637001&r2=637002&view=diff
==
--- 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java
 (original)
+++ 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java
 Thu Mar 13 22:57:54 2008
@@ -19,9 +19,12 @@
  */
 package org.apache.hadoop.hbase.regionserver;
 
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
 import java.io.DataInput;
 import java.io.DataInputStream;
 import java.io.DataOutput;
+import java.io.DataOutputStream;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.UnsupportedEncodingException;
@@ -892,7 +895,14 @@
   } else {
 reset();
 Writable value = new ImmutableBytesWritable();
-key = super.getClosest(midkey, value, true);
+WritableComparable k = super.getClosest(midkey, value, true);
+ByteArrayOutputStream byteout = new ByteArrayOutputStream();
+DataOutputStream out = new DataOutputStream(byteout);
+k.write(out);
+ByteArrayInputStream bytein =
+  new ByteArrayInputStream(byteout.toByteArray());
+DataInputStream in = new DataInputStream(bytein);
+key.readFields(in);
   }
 }
 




svn commit: r639852 - in /hadoop/hbase/branches/0.1: CHANGES.txt src/java/org/apache/hadoop/hbase/HMaster.java

2008-03-21 Thread jimk
Author: jimk
Date: Fri Mar 21 14:24:31 2008
New Revision: 639852

URL: http://svn.apache.org/viewvc?rev=639852&view=rev
Log:
HBASE-537   Wait for hdfs to exit safe mode

Modified:
hadoop/hbase/branches/0.1/CHANGES.txt
hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HMaster.java

Modified: hadoop/hbase/branches/0.1/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/CHANGES.txt?rev=639852&r1=639851&r2=639852&view=diff
==
--- hadoop/hbase/branches/0.1/CHANGES.txt (original)
+++ hadoop/hbase/branches/0.1/CHANGES.txt Fri Mar 21 14:24:31 2008
@@ -47,6 +47,7 @@
the top region of a split region
HBASE-524   Problems with getFull
HBASE-514   table 'does not exist' when it does
+   HBASE-537   Wait for hdfs to exit safe mode

   IMPROVEMENTS
HADOOP-2555 Refactor the HTable#get and HTable#getRow methods to avoid

Modified: 
hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HMaster.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HMaster.java?rev=639852&r1=639851&r2=639852&view=diff
==
--- hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HMaster.java 
(original)
+++ hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HMaster.java Fri 
Mar 21 14:24:31 2008
@@ -46,6 +46,8 @@
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.dfs.DistributedFileSystem;
+import org.apache.hadoop.dfs.FSConstants;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -876,10 +878,25 @@
   throws IOException {
 this.conf = conf;
 this.rootdir = rd;
+this.threadWakeFrequency = conf.getInt(THREAD_WAKE_FREQUENCY, 10 * 1000);
 // The filesystem hbase wants to use is probably not what is set into
 // fs.default.name; its value is probably the default.
 this.conf.set("fs.default.name", this.rootdir.toString());
 this.fs = FileSystem.get(conf);
+if (this.fs instanceof DistributedFileSystem) {
+  // Make sure dfs is not in safe mode
+  String message = "Waiting for dfs to exit safe mode...";
+  while (((DistributedFileSystem) fs).setSafeMode(
+  FSConstants.SafeModeAction.SAFEMODE_GET)) {
+System.out.println(message);
+LOG.info(message);
+try {
+  Thread.sleep(this.threadWakeFrequency);
+} catch (InterruptedException e) {
+  //continue
+}
+  }
+}
 this.conf.set(HConstants.HBASE_DIR, this.rootdir.toString());
 this.rand = new Random();
 Path rootRegionDir =
@@ -920,7 +937,6 @@
   throw e;
 }
 
-this.threadWakeFrequency = conf.getInt(THREAD_WAKE_FREQUENCY, 10 * 1000);
 this.numRetries =  conf.getInt("hbase.client.retries.number", 2);
 this.maxRegionOpenTime =
   conf.getLong("hbase.hbasemaster.maxregionopen", 60 * 1000);




svn commit: r639858 - in /hadoop/hbase/trunk: CHANGES.txt src/java/org/apache/hadoop/hbase/master/HMaster.java

2008-03-21 Thread jimk
Author: jimk
Date: Fri Mar 21 14:31:23 2008
New Revision: 639858

URL: http://svn.apache.org/viewvc?rev=639858&view=rev
Log:
HBASE-537   Wait for hdfs to exit safe mode

Modified:
hadoop/hbase/trunk/CHANGES.txt
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/HMaster.java

Modified: hadoop/hbase/trunk/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/CHANGES.txt?rev=639858&r1=639857&r2=639858&view=diff
==
--- hadoop/hbase/trunk/CHANGES.txt (original)
+++ hadoop/hbase/trunk/CHANGES.txt Fri Mar 21 14:31:23 2008
@@ -49,7 +49,8 @@
HBASE-528   table 'does not exist' when it does
HBASE-531   Merge tool won't merge two overlapping regions (port HBASE-483 
to
trunk)
-   
+   HBASE-537   Wait for hdfs to exit safe mode
+  
   IMPROVEMENTS
HBASE-415   Rewrite leases to use DelayedBlockingQueue instead of polling
HBASE-35Make BatchUpdate public in the API

Modified: 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/HMaster.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/HMaster.java?rev=639858&r1=639857&r2=639858&view=diff
==
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/HMaster.java 
(original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/HMaster.java Fri 
Mar 21 14:31:23 2008
@@ -36,6 +36,8 @@
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.dfs.DistributedFileSystem;
+import org.apache.hadoop.dfs.FSConstants;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.io.Cell;
@@ -171,10 +173,25 @@
   throws IOException {
 this.conf = conf;
 this.rootdir = rd;
+this.threadWakeFrequency = conf.getInt(THREAD_WAKE_FREQUENCY, 10 * 1000);
 // The filesystem hbase wants to use is probably not what is set into
 // fs.default.name; its value is probably the default.
 this.conf.set("fs.default.name", this.rootdir.toString());
 this.fs = FileSystem.get(conf);
+if (this.fs instanceof DistributedFileSystem) {
+  // Make sure dfs is not in safe mode
+  String message = "Waiting for dfs to exit safe mode...";
+  while (((DistributedFileSystem) fs).setSafeMode(
+  FSConstants.SafeModeAction.SAFEMODE_GET)) {
+System.out.println(message);
+LOG.info(message);
+try {
+  Thread.sleep(this.threadWakeFrequency);
+} catch (InterruptedException e) {
+  //continue
+}
+  }
+}
 this.conf.set(HConstants.HBASE_DIR, this.rootdir.toString());
 this.rand = new Random();
 Path rootRegionDir =
@@ -215,7 +232,6 @@
   throw e;
 }
 
-this.threadWakeFrequency = conf.getInt(THREAD_WAKE_FREQUENCY, 10 * 1000);
 this.numRetries =  conf.getInt("hbase.client.retries.number", 2);
 this.maxRegionOpenTime =
   conf.getLong("hbase.hbasemaster.maxregionopen", 60 * 1000);




svn commit: r640092 - in /hadoop/hbase/branches/0.1: ./ src/java/org/apache/hadoop/hbase/ src/java/org/apache/hadoop/hbase/filter/ src/test/org/apache/hadoop/hbase/filter/

2008-03-22 Thread jimk
Author: jimk
Date: Sat Mar 22 15:06:52 2008
New Revision: 640092

URL: http://svn.apache.org/viewvc?rev=640092&view=rev
Log:
HBASE-476   RegexpRowFilter behaves incorectly when there are multiple store 
files (Clint Morgan via Jim Kellerman)
HBASE-527   RegexpRowFilter does not work when there are columns from multiple 
families (Clint Morgan via Jim Kellerman)

Added:

hadoop/hbase/branches/0.1/src/test/org/apache/hadoop/hbase/filter/TestRowFilterAfterWrite.java

hadoop/hbase/branches/0.1/src/test/org/apache/hadoop/hbase/filter/TestRowFilterOnMultipleFamilies.java
Modified:
hadoop/hbase/branches/0.1/CHANGES.txt
hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HRegion.java
hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HStore.java

hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/filter/InclusiveStopRowFilter.java

hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/filter/PageRowFilter.java

hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/filter/RegExpRowFilter.java

hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/filter/RowFilterInterface.java

hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/filter/RowFilterSet.java

hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/filter/StopRowFilter.java

hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/filter/WhileMatchRowFilter.java

Modified: hadoop/hbase/branches/0.1/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/CHANGES.txt?rev=640092&r1=640091&r2=640092&view=diff
==
--- hadoop/hbase/branches/0.1/CHANGES.txt (original)
+++ hadoop/hbase/branches/0.1/CHANGES.txt Sat Mar 22 15:06:52 2008
@@ -62,6 +62,10 @@
HBASE-497   RegionServer needs to recover if datanode goes down
HBASE-456   Clearly state which ports need to be opened in order to run 
HBase
HBASE-483   Merge tool won't merge two overlapping regions
+   HBASE-476   RegexpRowFilter behaves incorectly when there are multiple store
+   files (Clint Morgan via Jim Kellerman)
+   HBASE-527   RegexpRowFilter does not work when there are columns from 
+   multiple families (Clint Morgan via Jim Kellerman)
   
 Release 0.16.0
 

Modified: 
hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HRegion.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HRegion.java?rev=640092&r1=640091&r2=640092&view=diff
==
--- hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HRegion.java 
(original)
+++ hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HRegion.java Sat 
Mar 22 15:06:52 2008
@@ -1717,12 +1717,14 @@
 private HInternalScannerInterface[] scanners;
 private TreeMap[] resultSets;
 private HStoreKey[] keys;
+private RowFilterInterface filter;
 
 /** Create an HScanner with a handle on many HStores. */
 @SuppressWarnings("unchecked")
 HScanner(Text[] cols, Text firstRow, long timestamp, HStore[] stores,
 RowFilterInterface filter)
 throws IOException {
+  this.filter = filter;
   this.scanners = new HInternalScannerInterface[stores.length];
   try {
 for (int i = 0; i < stores.length; i++) {
@@ -1733,8 +1735,8 @@
   // At least WhileMatchRowFilter will mess up the scan if only
   // one shared across many rows. See HADOOP-2467.
   scanners[i] = stores[i].getScanner(timestamp, cols, firstRow,
-(i > 0 && filter != null)?
-  (RowFilterInterface)WritableUtils.clone(filter, conf): filter);
+filter != null ?
+  (RowFilterInterface)WritableUtils.clone(filter, conf) : filter);
 }
   } catch(IOException e) {
 for (int i = 0; i < this.scanners.length; i++) {
@@ -1835,6 +1837,12 @@
   }
 }
   }
+  
+  if (filter != null && filter.filterNotNull(results)) {
+  LOG.warn("Filter return true on assembled Results in hstore");
+  return moreToFollow == true && this.next(key, results);
+  }
+
   return moreToFollow;
 }
 

Modified: hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HStore.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HStore.java?rev=640092&r1=640091&r2=640092&view=diff
==
--- hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HStore.java 
(original)
+++ hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HStore.java Sat 
Mar 22 15:06:52 2008
@@ -30,8 +30,6 @@
 import java.util.SortedMap;
 import java.util.Tre

svn commit: r640106 - in /hadoop/hbase/trunk: ./ src/java/org/apache/hadoop/hbase/filter/ src/java/org/apache/hadoop/hbase/regionserver/ src/test/org/apache/hadoop/hbase/filter/

2008-03-22 Thread jimk
Author: jimk
Date: Sat Mar 22 15:35:36 2008
New Revision: 640106

URL: http://svn.apache.org/viewvc?rev=640106&view=rev
Log:
HBASE-476   RegexpRowFilter behaves incorectly when there are multiple store 
files (Clint Morgan via Jim Kellerman)
HBASE-527   RegexpRowFilter does not work when there are columns from multiple 
families (Clint Morgan via Jim Kellerman)

Added:

hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/filter/TestRowFilterAfterWrite.java

hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/filter/TestRowFilterOnMultipleFamilies.java
Modified:
hadoop/hbase/trunk/CHANGES.txt

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/InclusiveStopRowFilter.java

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/PageRowFilter.java

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/RegExpRowFilter.java

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/RowFilterInterface.java
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/RowFilterSet.java

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/StopRowFilter.java

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/WhileMatchRowFilter.java

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HStoreScanner.java

Modified: hadoop/hbase/trunk/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/CHANGES.txt?rev=640106&r1=640105&r2=640106&view=diff
==
--- hadoop/hbase/trunk/CHANGES.txt (original)
+++ hadoop/hbase/trunk/CHANGES.txt Sat Mar 22 15:35:36 2008
@@ -50,6 +50,10 @@
HBASE-531   Merge tool won't merge two overlapping regions (port HBASE-483 
to
trunk)
HBASE-537   Wait for hdfs to exit safe mode
+   HBASE-476   RegexpRowFilter behaves incorectly when there are multiple store
+   files (Clint Morgan via Jim Kellerman)
+   HBASE-527   RegexpRowFilter does not work when there are columns from 
+   multiple families (Clint Morgan via Jim Kellerman)
   
   IMPROVEMENTS
HBASE-415   Rewrite leases to use DelayedBlockingQueue instead of polling

Modified: 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/InclusiveStopRowFilter.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/InclusiveStopRowFilter.java?rev=640106&r1=640105&r2=640106&view=diff
==
--- 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/InclusiveStopRowFilter.java
 (original)
+++ 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/InclusiveStopRowFilter.java
 Sat Mar 22 15:35:36 2008
@@ -19,17 +19,9 @@
  */
 package org.apache.hadoop.hbase.filter;
 
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-import java.util.TreeMap;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.io.Text;
 
-
-/*
+/**
  * Subclass of StopRowFilter that filters rows > the stop row,
  * making it include up to the last row but no further.
  */
@@ -49,6 +41,8 @@
 super(stopRowKey);
   }
   
+  /** [EMAIL PROTECTED] */
+  @Override
   public boolean filter(final Text rowKey) {
 if (rowKey == null) {
   if (this.stopRowKey == null) {

Modified: 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/PageRowFilter.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/PageRowFilter.java?rev=640106&r1=640105&r2=640106&view=diff
==
--- 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/PageRowFilter.java 
(original)
+++ 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/PageRowFilter.java 
Sat Mar 22 15:35:36 2008
@@ -22,7 +22,7 @@
 import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
-import java.util.TreeMap;
+import java.util.SortedMap;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -137,7 +137,7 @@
* [EMAIL PROTECTED]
*/
   public boolean filterNotNull(@SuppressWarnings("unused")
-  final TreeMap columns) {
+  final SortedMap columns) {
 return filterAllRemaining();
   }
 

Modified: 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/RegExpRowFilter.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/RegExpRowFilter.java?rev=640106&r1=640105&r2=640106&view=diff
==
--- 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/RegExpRowFilter.java 
(original)
+++ 
hadoop/hbase/trunk/src/java

svn commit: r643487 - in /hadoop/hbase/branches/0.1: CHANGES.txt src/java/org/onelab/filter/BloomFilter.java src/java/org/onelab/filter/DynamicBloomFilter.java src/java/org/onelab/filter/Key.java

2008-04-01 Thread jimk
Author: jimk
Date: Tue Apr  1 10:56:58 2008
New Revision: 643487

URL: http://svn.apache.org/viewvc?rev=643487&view=rev
Log:
HBASE-552   Fix bloom filter bugs (Andrzej Bialecki via Jim Kellerman)

Modified:
hadoop/hbase/branches/0.1/CHANGES.txt
hadoop/hbase/branches/0.1/src/java/org/onelab/filter/BloomFilter.java
hadoop/hbase/branches/0.1/src/java/org/onelab/filter/DynamicBloomFilter.java
hadoop/hbase/branches/0.1/src/java/org/onelab/filter/Key.java

Modified: hadoop/hbase/branches/0.1/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/CHANGES.txt?rev=643487&r1=643486&r2=643487&view=diff
==
--- hadoop/hbase/branches/0.1/CHANGES.txt (original)
+++ hadoop/hbase/branches/0.1/CHANGES.txt Tue Apr  1 10:56:58 2008
@@ -6,6 +6,7 @@
iteration, edits are aggregated up into the millions
HBASE-505   Region assignments should never time out so long as the region
server reports that it is processing the open request
+   HBASE-552   Fix bloom filter bugs (Andrzej Bialecki via Jim Kellerman)
 
   NEW FEATURES
HBASE-548   Tool to online single region

Modified: hadoop/hbase/branches/0.1/src/java/org/onelab/filter/BloomFilter.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/src/java/org/onelab/filter/BloomFilter.java?rev=643487&r1=643486&r2=643487&view=diff
==
--- hadoop/hbase/branches/0.1/src/java/org/onelab/filter/BloomFilter.java 
(original)
+++ hadoop/hbase/branches/0.1/src/java/org/onelab/filter/BloomFilter.java Tue 
Apr  1 10:56:58 2008
@@ -1,242 +1,240 @@
-/**
- *
- * Copyright (c) 2005, European Commission project OneLab under contract 
034819 (http://www.one-lab.org)
- * All rights reserved.
- * Redistribution and use in source and binary forms, with or 
- * without modification, are permitted provided that the following 
- * conditions are met:
- *  - Redistributions of source code must retain the above copyright 
- *notice, this list of conditions and the following disclaimer.
- *  - Redistributions in binary form must reproduce the above copyright 
- *notice, this list of conditions and the following disclaimer in 
- *the documentation and/or other materials provided with the distribution.
- *  - Neither the name of the University Catholique de Louvain - UCL
- *nor the names of its contributors may be used to endorse or 
- *promote products derived from this software without specific prior 
- *written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 
- * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
- * POSSIBILITY OF SUCH DAMAGE.
- */
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.onelab.filter;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
-import java.util.BitSet;
-
-/**
- * Implements a Bloom filter, as defined by Bloom in 1970.
- * 
- * The Bloom filter is a data structure that was introduced in 1970 and that 
has been adopted by 
- * the networking research community in the past decade thanks to the 
bandwidth efficiencies that it
- * offers for the transmission of set membership information between networked 
hosts.  A sender encodes 
- * the information into a bit vector, the Bloom filter, that is more compact 
than a conventional 
- * representation. Computation and space co

svn commit: r643555 - in /hadoop/hbase/branches/0.1: CHANGES.txt src/java/org/apache/hadoop/hbase/HMaster.java

2008-04-01 Thread jimk
Author: jimk
Date: Tue Apr  1 13:32:02 2008
New Revision: 643555

URL: http://svn.apache.org/viewvc?rev=643555&view=rev
Log:
HBASE-507   Add sleep between retries

Modified:
hadoop/hbase/branches/0.1/CHANGES.txt
hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HMaster.java

Modified: hadoop/hbase/branches/0.1/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/CHANGES.txt?rev=643555&r1=643554&r2=643555&view=diff
==
--- hadoop/hbase/branches/0.1/CHANGES.txt (original)
+++ hadoop/hbase/branches/0.1/CHANGES.txt Tue Apr  1 13:32:02 2008
@@ -7,6 +7,7 @@
HBASE-505   Region assignments should never time out so long as the region
server reports that it is processing the open request
HBASE-552   Fix bloom filter bugs (Andrzej Bialecki via Jim Kellerman)
+   HBASE-507   Add sleep between retries
 
   NEW FEATURES
HBASE-548   Tool to online single region

Modified: 
hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HMaster.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HMaster.java?rev=643555&r1=643554&r2=643555&view=diff
==
--- hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HMaster.java 
(original)
+++ hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HMaster.java Tue 
Apr  1 13:32:02 2008
@@ -2212,6 +2212,7 @@
   throw RemoteExceptionHandler.checkIOException(e);
 }
   }
+  sleeper.sleep();
 }
 if (LOG.isDebugEnabled()) {
   LOG.debug("process server shutdown scanning root region on " +
@@ -2268,6 +2269,7 @@
 throw RemoteExceptionHandler.checkIOException(e);
   }
 }
+sleeper.sleep();
   }
   return true;
 }
@@ -2404,8 +2406,8 @@
   if (tries == numRetries - 1) {
 throw RemoteExceptionHandler.checkIOException(e);
   }
-  continue;
 }
+sleeper.sleep();
   }
 
   if (reassignRegion) {
@@ -2511,6 +2513,7 @@
 throw RemoteExceptionHandler.checkIOException(e);
   }
 }
+sleeper.sleep();
   }
   return true;
 }
@@ -2556,6 +2559,7 @@
   throw RemoteExceptionHandler.checkIOException(e);
 }
   }
+  sleeper.sleep();
 }
   }
 
@@ -2802,6 +2806,7 @@
 checkFileSystem();
 throw RemoteExceptionHandler.checkIOException(e);
   }
+  sleeper.sleep();
   continue;
 }
 break;




svn commit: r643761 [2/2] - in /hadoop/hbase/trunk: ./ src/java/org/apache/hadoop/hbase/ src/java/org/apache/hadoop/hbase/master/ src/java/org/apache/hadoop/hbase/regionserver/ src/java/org/apache/had

2008-04-01 Thread jimk
Modified: 
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestHRegion.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestHRegion.java?rev=643761&r1=643760&r2=643761&view=diff
==
--- 
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestHRegion.java
 (original)
+++ 
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestHRegion.java
 Tue Apr  1 23:58:26 2008
@@ -19,7 +19,6 @@
  */
 package org.apache.hadoop.hbase.regionserver;
 
-import java.io.File;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Iterator;
@@ -57,7 +56,7 @@
*/
   public void testHRegion() throws IOException {
 try {
-  setup();
+  init();
   locks();
   badPuts();
   basic();
@@ -65,14 +64,7 @@
   batchWrite();
   splitAndMerge();
   read();
-  cleanup();
 } finally {
-  if (r != null) {
-r.close();
-  }
-  if (log != null) {
-log.closeAndDelete();
-  }
   StaticTestEnvironment.shutdownDfs(cluster);
 }
   }
@@ -96,22 +88,36 @@
   HRegionIncommon region = null;
   
   private static int numInserted = 0;
-
-  // Create directories, start mini cluster, etc.
   
-  private void setup() throws IOException {
+  /** [EMAIL PROTECTED] */
+  @Override
+  public void setUp() throws Exception {
+this.conf.set("hbase.hstore.compactionThreshold", "2");
+
+if (!StaticTestEnvironment.debugging) {
+  conf.setLong("hbase.hregion.max.filesize", 65536);
+}
 
 cluster = new MiniDFSCluster(conf, 2, true, (String[])null);
+fs = cluster.getFileSystem();
+
 // Set the hbase.rootdir to be the home directory in mini dfs.
 this.conf.set(HConstants.HBASE_DIR,
   this.cluster.getFileSystem().getHomeDirectory().toString());
+
+super.setUp();
+  }
 
+  // Create directories, start mini cluster, etc.
+  
+  private void init() throws IOException {
 desc = new HTableDescriptor("test");
 desc.addFamily(new HColumnDescriptor("contents:"));
 desc.addFamily(new HColumnDescriptor("anchor:"));
 r = createNewHRegion(desc, null, null);
 log = r.getLog();
 region = new HRegionIncommon(r);
+LOG.info("setup completed.");
   }
 
   // Test basic functionality. Writes to contents:basic and anchor:anchornum-*
@@ -129,7 +135,7 @@
   (ANCHORSTR + k).getBytes(HConstants.UTF8_ENCODING));
   region.commit(writeid, System.currentTimeMillis());
 }
-System.out.println("Write " + NUM_VALS + " rows. Elapsed time: "
+LOG.info("Write " + NUM_VALS + " rows. Elapsed time: "
 + ((System.currentTimeMillis() - startTime) / 1000.0));
 
 // Flush cache
@@ -138,7 +144,7 @@
 
 region.flushcache();
 
-System.out.println("Cache flush elapsed time: "
+LOG.info("Cache flush elapsed time: "
 + ((System.currentTimeMillis() - startTime) / 1000.0));
 
 // Read them back in
@@ -165,8 +171,10 @@
   bodystr, teststr);
 }
 
-System.out.println("Read " + NUM_VALS + " rows. Elapsed time: "
+LOG.info("Read " + NUM_VALS + " rows. Elapsed time: "
 + ((System.currentTimeMillis() - startTime) / 1000.0));
+
+LOG.info("basic completed.");
   }
   
   private void badPuts() {
@@ -198,6 +206,7 @@
   }
 }
 assertTrue("Bad family", exceptionThrown);
+LOG.info("badPuts completed.");
   }
   
   /**
@@ -253,6 +262,7 @@
 }
   }
 }
+LOG.info("locks completed.");
   }
 
   // Test scanners. Writes contents:firstcol and anchor:secondcol
@@ -283,7 +293,7 @@
   numInserted += 2;
 }
 
-System.out.println("Write " + (vals1.length / 2) + " elapsed time: "
+LOG.info("Write " + (vals1.length / 2) + " elapsed time: "
 + ((System.currentTimeMillis() - startTime) / 1000.0));
 
 // 2.  Scan from cache
@@ -321,7 +331,7 @@
 }
 assertEquals("Inserted " + numInserted + " values, but fetched " + 
numFetched, numInserted, numFetched);
 
-System.out.println("Scanned " + (vals1.length / 2)
+LOG.info("Scanned " + (vals1.length / 2)
 + " rows from cache. Elapsed time: "
 + ((System.currentTimeMillis() - startTime) / 1000.0));
 
@@ -331,7 +341,7 @@
 
 region.flushcache();
 
-System.out.println("Cache flush elapsed time: "
+LOG.info("Cache flush elapsed time: "
 + ((System.currentTimeMillis() - startTime) / 1000.0));
 
 // 4.  Scan from disk
@@ -368,7 +378,7 @@
 }
 assertEquals("Inserted " + numInserted + " values, but fetched " + 
numFetched, numInserted, numFetched);
 
-System.out.println("Scanned " + (vals1.length / 2)
+LOG.info("Scanned " + (vals1.length / 2)
 + " rows from disk. Elapsed time: "
 + ((System.currentTimeMillis() - startTime) / 1000.0));
 
@@ -386,7 +396,7 @@
   numInserted += 2;
 }
 
-System.out.println("Write " + (val

svn commit: r644572 - in /hadoop/hbase/branches/0.1: CHANGES.txt lib/hadoop-0.16.0-core.jar lib/hadoop-0.16.0-examples.jar lib/hadoop-0.16.0-test.jar lib/hadoop-0.16.2-core.jar lib/hadoop-0.16.2-examp

2008-04-03 Thread jimk
Author: jimk
Date: Thu Apr  3 18:28:19 2008
New Revision: 644572

URL: http://svn.apache.org/viewvc?rev=644572&view=rev
Log:
HBASE-556   Add 0.16.2 to hbase branch -- if it works

Tested HBase-0.1.1 with Hadoop-0.16.2. All regression tests pass and the 
torture tests (PerformanceEvaluation {sequential,random}Write tests also work.

HBase-0.1.1 will be built against Hadoop-0.16.2.

Added:
hadoop/hbase/branches/0.1/lib/hadoop-0.16.2-core.jar   (with props)
hadoop/hbase/branches/0.1/lib/hadoop-0.16.2-examples.jar   (with props)
hadoop/hbase/branches/0.1/lib/hadoop-0.16.2-test.jar   (with props)
Removed:
hadoop/hbase/branches/0.1/lib/hadoop-0.16.0-core.jar
hadoop/hbase/branches/0.1/lib/hadoop-0.16.0-examples.jar
hadoop/hbase/branches/0.1/lib/hadoop-0.16.0-test.jar
Modified:
hadoop/hbase/branches/0.1/CHANGES.txt

Modified: hadoop/hbase/branches/0.1/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/CHANGES.txt?rev=644572&r1=644571&r2=644572&view=diff
==
--- hadoop/hbase/branches/0.1/CHANGES.txt (original)
+++ hadoop/hbase/branches/0.1/CHANGES.txt Thu Apr  3 18:28:19 2008
@@ -12,6 +12,7 @@
havoc of reassignments because open processing is done in series
HBASE-547   UI shows hadoop version, not hbase version
HBASE-561   HBase package does not include LICENSE.txt nor build.xml
+   HBASE-556   Add 0.16.2 to hbase branch -- if it works
 
   NEW FEATURES
HBASE-548   Tool to online single region

Added: hadoop/hbase/branches/0.1/lib/hadoop-0.16.2-core.jar
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/lib/hadoop-0.16.2-core.jar?rev=644572&view=auto
==
Binary file - no diff available.

Propchange: hadoop/hbase/branches/0.1/lib/hadoop-0.16.2-core.jar
--
svn:mime-type = application/octet-stream

Added: hadoop/hbase/branches/0.1/lib/hadoop-0.16.2-examples.jar
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/lib/hadoop-0.16.2-examples.jar?rev=644572&view=auto
==
Binary file - no diff available.

Propchange: hadoop/hbase/branches/0.1/lib/hadoop-0.16.2-examples.jar
--
svn:mime-type = application/octet-stream

Added: hadoop/hbase/branches/0.1/lib/hadoop-0.16.2-test.jar
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/lib/hadoop-0.16.2-test.jar?rev=644572&view=auto
==
Binary file - no diff available.

Propchange: hadoop/hbase/branches/0.1/lib/hadoop-0.16.2-test.jar
--
svn:mime-type = application/octet-stream




svn commit: r644905 - in /hadoop/hbase/trunk: ./ lib/

2008-04-04 Thread jimk
Author: jimk
Date: Fri Apr  4 13:48:46 2008
New Revision: 644905

URL: http://svn.apache.org/viewvc?rev=644905&view=rev
Log:
HBASE-541   Update hadoop jars.

Added:
hadoop/hbase/trunk/lib/hadoop-0.17.0-dev-2008.04.04-13.34.00-core.jar   
(with props)
hadoop/hbase/trunk/lib/hadoop-0.17.0-dev-2008.04.04-13.34.00-test.jar   
(with props)
Removed:
hadoop/hbase/trunk/lib/hadoop-0.17.0-dev.2008-03-04_15-19-00-core.jar
hadoop/hbase/trunk/lib/hadoop-0.17.0-dev.2008-03-04_15-19-00-test.jar
Modified:
hadoop/hbase/trunk/CHANGES.txt

Modified: hadoop/hbase/trunk/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/CHANGES.txt?rev=644905&r1=644904&r2=644905&view=diff
==
--- hadoop/hbase/trunk/CHANGES.txt (original)
+++ hadoop/hbase/trunk/CHANGES.txt Fri Apr  4 13:48:46 2008
@@ -20,7 +20,8 @@
HBASE-544   Purge startUpdate from internal code and test cases
HBASE-557   HTable.getRow() should receive RowResult objects
HBASE-452   "region offline" should throw IOException, not 
IllegalStateException
-
+   HBASE-541   Update hadoop jars.
+   
 Release 0.1.0
 
   INCOMPATIBLE CHANGES

Added: hadoop/hbase/trunk/lib/hadoop-0.17.0-dev-2008.04.04-13.34.00-core.jar
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/lib/hadoop-0.17.0-dev-2008.04.04-13.34.00-core.jar?rev=644905&view=auto
==
Binary file - no diff available.

Propchange: 
hadoop/hbase/trunk/lib/hadoop-0.17.0-dev-2008.04.04-13.34.00-core.jar
--
svn:mime-type = application/octet-stream

Added: hadoop/hbase/trunk/lib/hadoop-0.17.0-dev-2008.04.04-13.34.00-test.jar
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/lib/hadoop-0.17.0-dev-2008.04.04-13.34.00-test.jar?rev=644905&view=auto
==
Binary file - no diff available.

Propchange: 
hadoop/hbase/trunk/lib/hadoop-0.17.0-dev-2008.04.04-13.34.00-test.jar
--
svn:mime-type = application/octet-stream




svn commit: r644948 - in /hadoop/hbase/trunk: CHANGES.txt src/test/org/apache/hadoop/hbase/filter/TestRowFilterAfterWrite.java

2008-04-04 Thread jimk
Author: jimk
Date: Fri Apr  4 14:55:47 2008
New Revision: 644948

URL: http://svn.apache.org/viewvc?rev=644948&view=rev
Log:
HBASE-563   TestRowFilterAfterWrite erroneously sets master address to 
0.0.0.0:60100 rather than relying on conf

Modified:
hadoop/hbase/trunk/CHANGES.txt

hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/filter/TestRowFilterAfterWrite.java

Modified: hadoop/hbase/trunk/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/CHANGES.txt?rev=644948&r1=644947&r2=644948&view=diff
==
--- hadoop/hbase/trunk/CHANGES.txt (original)
+++ hadoop/hbase/trunk/CHANGES.txt Fri Apr  4 14:55:47 2008
@@ -10,6 +10,8 @@
havoc of reassignments because open processing is done in series
HBASE-547   UI shows hadoop version, not hbase version
HBASE-561   HBase package does not include LICENSE.txt nor build.xml
+   HBASE-563   TestRowFilterAfterWrite erroneously sets master address to
+   0.0.0.0:60100 rather than relying on conf
 
   NEW FEATURES
HBASE-548   Tool to online single region

Modified: 
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/filter/TestRowFilterAfterWrite.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/filter/TestRowFilterAfterWrite.java?rev=644948&r1=644947&r2=644948&view=diff
==
--- 
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/filter/TestRowFilterAfterWrite.java
 (original)
+++ 
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/filter/TestRowFilterAfterWrite.java
 Fri Apr  4 14:55:47 2008
@@ -92,19 +92,6 @@
* [EMAIL PROTECTED]
*/
   @Override
-  public void setUp() throws Exception {
-// this.conf.set(HConstants.HBASE_DIR, 
"file:///opt/benchmark/hadoop/hbase");
-this.conf.set(HConstants.MASTER_ADDRESS, "0.0.0.0:60100");
-// Must call super.setup() after starting mini dfs cluster. Otherwise
-// we get a local file system instead of hdfs
-
-super.setUp();
-  }
-
-  /**
-   * [EMAIL PROTECTED]
-   */
-  @Override
   public void tearDown() throws Exception {
 super.tearDown();
   }




svn commit: r644949 - in /hadoop/hbase/branches/0.1: CHANGES.txt src/test/org/apache/hadoop/hbase/filter/TestRowFilterAfterWrite.java

2008-04-04 Thread jimk
Author: jimk
Date: Fri Apr  4 14:57:55 2008
New Revision: 644949

URL: http://svn.apache.org/viewvc?rev=644949&view=rev
Log:
HBASE-563   TestRowFilterAfterWrite erroneously sets master address to 
0.0.0.0:60100 rather than relying on conf

Modified:
hadoop/hbase/branches/0.1/CHANGES.txt

hadoop/hbase/branches/0.1/src/test/org/apache/hadoop/hbase/filter/TestRowFilterAfterWrite.java

Modified: hadoop/hbase/branches/0.1/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/CHANGES.txt?rev=644949&r1=644948&r2=644949&view=diff
==
--- hadoop/hbase/branches/0.1/CHANGES.txt (original)
+++ hadoop/hbase/branches/0.1/CHANGES.txt Fri Apr  4 14:57:55 2008
@@ -13,6 +13,8 @@
HBASE-547   UI shows hadoop version, not hbase version
HBASE-561   HBase package does not include LICENSE.txt nor build.xml
HBASE-556   Add 0.16.2 to hbase branch -- if it works
+   HBASE-563   TestRowFilterAfterWrite erroneously sets master address to
+   0.0.0.0:60100 rather than relying on conf
 
   NEW FEATURES
HBASE-548   Tool to online single region

Modified: 
hadoop/hbase/branches/0.1/src/test/org/apache/hadoop/hbase/filter/TestRowFilterAfterWrite.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/src/test/org/apache/hadoop/hbase/filter/TestRowFilterAfterWrite.java?rev=644949&r1=644948&r2=644949&view=diff
==
--- 
hadoop/hbase/branches/0.1/src/test/org/apache/hadoop/hbase/filter/TestRowFilterAfterWrite.java
 (original)
+++ 
hadoop/hbase/branches/0.1/src/test/org/apache/hadoop/hbase/filter/TestRowFilterAfterWrite.java
 Fri Apr  4 14:57:55 2008
@@ -91,19 +91,6 @@
* [EMAIL PROTECTED]
*/
   @Override
-  public void setUp() throws Exception {
-// this.conf.set(HConstants.HBASE_DIR, 
"file:///opt/benchmark/hadoop/hbase");
-this.conf.set(HConstants.MASTER_ADDRESS, "0.0.0.0:60100");
-// Must call super.setup() after starting mini dfs cluster. Otherwise
-// we get a local file system instead of hdfs
-
-super.setUp();
-  }
-
-  /**
-   * [EMAIL PROTECTED]
-   */
-  @Override
   public void tearDown() throws Exception {
 super.tearDown();
   }




svn commit: r645014 - in /hadoop/hbase/trunk: ./ src/java/org/apache/hadoop/hbase/master/

2008-04-04 Thread jimk
Author: jimk
Date: Fri Apr  4 19:15:34 2008
New Revision: 645014

URL: http://svn.apache.org/viewvc?rev=645014&view=rev
Log:
HBASE-507   Use Callable pattern to sleep between retries

Added:

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/RetryableMetaOperation.java
Modified:
hadoop/hbase/trunk/CHANGES.txt
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/HMaster.java

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ProcessRegionClose.java

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ProcessRegionOpen.java

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ProcessRegionStatusChange.java

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ProcessServerShutdown.java

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/TableOperation.java

Modified: hadoop/hbase/trunk/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/CHANGES.txt?rev=645014&r1=645013&r2=645014&view=diff
==
--- hadoop/hbase/trunk/CHANGES.txt (original)
+++ hadoop/hbase/trunk/CHANGES.txt Fri Apr  4 19:15:34 2008
@@ -12,6 +12,7 @@
HBASE-561   HBase package does not include LICENSE.txt nor build.xml
HBASE-563   TestRowFilterAfterWrite erroneously sets master address to
0.0.0.0:60100 rather than relying on conf
+   HBASE-507   Use Callable pattern to sleep between retries
 
   NEW FEATURES
HBASE-548   Tool to online single region

Modified: 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/HMaster.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/HMaster.java?rev=645014&r1=645013&r2=645014&view=diff
==
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/HMaster.java 
(original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/HMaster.java Fri 
Apr  4 19:15:34 2008
@@ -591,6 +591,7 @@
 if (tries == numRetries - 1) {
   throw RemoteExceptionHandler.checkIOException(e);
 }
+sleeper.sleep();
   }
 }
   }

Modified: 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ProcessRegionClose.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ProcessRegionClose.java?rev=645014&r1=645013&r2=645014&view=diff
==
--- 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ProcessRegionClose.java
 (original)
+++ 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ProcessRegionClose.java
 Fri Apr  4 19:15:34 2008
@@ -21,9 +21,9 @@
 
 import java.io.IOException;
 
+import org.apache.hadoop.hbase.RemoteExceptionHandler;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.RemoteExceptionHandler;
 
 /**
  * ProcessRegionClose is the way we do post-processing on a closed region. We
@@ -34,8 +34,8 @@
  * necessary.
  */
 class ProcessRegionClose extends ProcessRegionStatusChange {
-  private boolean offlineRegion;
-  private boolean deleteRegion;
+  protected final  boolean offlineRegion;
+  protected final boolean deleteRegion;
 
   /**
   * @param master
@@ -61,41 +61,34 @@
 
   @Override
   protected boolean process() throws IOException {
-for (int tries = 0; tries < numRetries; tries++) {
-  if (master.closed.get()) {
-return true;
-  }
-  LOG.info("region closed: " + regionInfo.getRegionName());
-
-  // Mark the Region as unavailable in the appropriate meta table
-
-  if (!metaRegionAvailable()) {
-// We can't proceed unless the meta region we are going to update
-// is online. metaRegionAvailable() has put this operation on the
-// delayedToDoQueue, so return true so the operation is not put 
-// back on the toDoQueue
-return true;
-  }
-
-  try {
-if (deleteRegion) {
-  HRegion.removeRegionFromMETA(getMetaServer(), metaRegionName,
-regionInfo.getRegionName());
-} else if (offlineRegion) {
-  // offline the region in meta and then note that we've offlined the
-  // region. 
-  HRegion.offlineRegionInMETA(getMetaServer(), metaRegionName,
-regionInfo);
-  master.regionManager.regionOfflined(regionInfo.getRegionName());
-}
-break;
-  } catch (IOException e) {
-if (tries == numRetries - 1) {
-  throw RemoteExceptionHandler.checkIOException(e);
+Boolean result =
+  new RetryableMetaOperation(this.metaRegion, this.master) {
+public Boolean call() throws IOException {
+  LOG.info("region closed: " + regionInfo.getRegionName());
+
+  // Mar

svn commit: r645690 - in /hadoop/hbase/trunk: CHANGES.txt src/java/org/apache/hadoop/hbase/regionserver/HStore.java

2008-04-07 Thread jimk
Author: jimk
Date: Mon Apr  7 14:08:48 2008
New Revision: 645690

URL: http://svn.apache.org/viewvc?rev=645690&view=rev
Log:
HBASE-564   Don't do a cache flush if there are zero entries in the cache.

Modified:
hadoop/hbase/trunk/CHANGES.txt
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HStore.java

Modified: hadoop/hbase/trunk/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/CHANGES.txt?rev=645690&r1=645689&r2=645690&view=diff
==
--- hadoop/hbase/trunk/CHANGES.txt (original)
+++ hadoop/hbase/trunk/CHANGES.txt Mon Apr  7 14:08:48 2008
@@ -13,6 +13,7 @@
HBASE-563   TestRowFilterAfterWrite erroneously sets master address to
0.0.0.0:60100 rather than relying on conf
HBASE-507   Use Callable pattern to sleep between retries
+   HBASE-564   Don't do a cache flush if there are zero entries in the cache.
 
   NEW FEATURES
HBASE-548   Tool to online single region

Modified: 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HStore.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HStore.java?rev=645690&r1=645689&r2=645690&view=diff
==
--- 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HStore.java 
(original)
+++ 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HStore.java 
Mon Apr  7 14:08:48 2008
@@ -617,6 +617,11 @@
   private void internalFlushCache(SortedMap cache,
   long logCacheFlushId) throws IOException {
 
+// Don't flush if there are no entries.
+if (cache.size() == 0) {
+  return;
+}
+
 synchronized(flushLock) {
   // A. Write the Maps out to the disk
   HStoreFile flushedFile = new HStoreFile(conf, fs, basedir,




svn commit: r646016 - in /hadoop/hbase/branches/0.1: CHANGES.txt src/java/org/apache/hadoop/hbase/HRegion.java

2008-04-08 Thread jimk
Author: jimk
Date: Tue Apr  8 10:58:14 2008
New Revision: 646016

URL: http://svn.apache.org/viewvc?rev=646016&view=rev
Log:
HBASE-544   filters generate StackOverflowException

Modified:
hadoop/hbase/branches/0.1/CHANGES.txt
hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HRegion.java

Modified: hadoop/hbase/branches/0.1/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/CHANGES.txt?rev=646016&r1=646015&r2=646016&view=diff
==
--- hadoop/hbase/branches/0.1/CHANGES.txt (original)
+++ hadoop/hbase/branches/0.1/CHANGES.txt Tue Apr  8 10:58:14 2008
@@ -15,6 +15,7 @@
HBASE-556   Add 0.16.2 to hbase branch -- if it works
HBASE-563   TestRowFilterAfterWrite erroneously sets master address to
0.0.0.0:60100 rather than relying on conf
+   HBASE-544   filters generate StackOverflowException
 
   NEW FEATURES
HBASE-548   Tool to online single region

Modified: 
hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HRegion.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HRegion.java?rev=646016&r1=646015&r2=646016&view=diff
==
--- hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HRegion.java 
(original)
+++ hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HRegion.java Tue 
Apr  8 10:58:14 2008
@@ -1792,69 +1792,73 @@
 }
 
 /** [EMAIL PROTECTED] */
+@SuppressWarnings("null")
 public boolean next(HStoreKey key, SortedMap results)
 throws IOException {
   boolean moreToFollow = false;
 
-  // Find the lowest-possible key.
+  do {
+// Find the lowest-possible key.
 
-  Text chosenRow = null;
-  long chosenTimestamp = -1;
-  for (int i = 0; i < this.keys.length; i++) {
-if (scanners[i] != null &&
-(chosenRow == null ||
-(keys[i].getRow().compareTo(chosenRow) < 0) ||
-((keys[i].getRow().compareTo(chosenRow) == 0) &&
-(keys[i].getTimestamp() > chosenTimestamp {
-  chosenRow = new Text(keys[i].getRow());
-  chosenTimestamp = keys[i].getTimestamp();
+Text chosenRow = null;
+long chosenTimestamp = -1;
+for (int i = 0; i < this.keys.length; i++) {
+  if (scanners[i] != null &&
+  (chosenRow == null ||
+  (keys[i].getRow().compareTo(chosenRow) < 0) ||
+  ((keys[i].getRow().compareTo(chosenRow) == 0) &&
+  (keys[i].getTimestamp() > chosenTimestamp {
+chosenRow = new Text(keys[i].getRow());
+chosenTimestamp = keys[i].getTimestamp();
+  }
 }
-  }
-
-  // Store the key and results for each sub-scanner. Merge them as
-  // appropriate.
-  if (chosenTimestamp >= 0) {
-// Here we are setting the passed in key with current row+timestamp
-key.setRow(chosenRow);
-key.setVersion(chosenTimestamp);
-key.setColumn(HConstants.EMPTY_TEXT);
 
-for (int i = 0; i < scanners.length; i++) {
-  if (scanners[i] != null && keys[i].getRow().compareTo(chosenRow) == 
0) {
-// NOTE: We used to do results.putAll(resultSets[i]);
-// but this had the effect of overwriting newer
-// values with older ones. So now we only insert
-// a result if the map does not contain the key.
-for (Map.Entry e : resultSets[i].entrySet()) {
-  if (!results.containsKey(e.getKey())) {
-results.put(e.getKey(), e.getValue());
+// Store the key and results for each sub-scanner. Merge them as
+// appropriate.
+if (chosenTimestamp >= 0) {
+  // Here we are setting the passed in key with current row+timestamp
+  key.setRow(chosenRow);
+  key.setVersion(chosenTimestamp);
+  key.setColumn(HConstants.EMPTY_TEXT);
+
+  for (int i = 0; i < scanners.length; i++) {
+if (scanners[i] != null &&
+keys[i].getRow().compareTo(chosenRow) == 0) {
+  // NOTE: We used to do results.putAll(resultSets[i]);
+  // but this had the effect of overwriting newer
+  // values with older ones. So now we only insert
+  // a result if the map does not contain the key.
+  for (Map.Entry e : resultSets[i].entrySet()) {
+if (!results.containsKey(e.getKey())) {
+  results.put(e.getKey(), e.getValue());
+}
+  }
+  resultSets[i].clear();
+  if (!scanners[i].next(keys[i], resultSets[i])) {
+closeScanner(i);
   }
 }
+ 

svn commit: r646031 - in /hadoop/hbase/trunk: CHANGES.txt src/java/org/apache/hadoop/hbase/regionserver/HRegion.java

2008-04-08 Thread jimk
Author: jimk
Date: Tue Apr  8 11:56:36 2008
New Revision: 646031

URL: http://svn.apache.org/viewvc?rev=646031&view=rev
Log:
HBASE-544   filters generate StackOverflowException

Modified:
hadoop/hbase/trunk/CHANGES.txt

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java

Modified: hadoop/hbase/trunk/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/CHANGES.txt?rev=646031&r1=646030&r2=646031&view=diff
==
--- hadoop/hbase/trunk/CHANGES.txt (original)
+++ hadoop/hbase/trunk/CHANGES.txt Tue Apr  8 11:56:36 2008
@@ -14,6 +14,7 @@
0.0.0.0:60100 rather than relying on conf
HBASE-507   Use Callable pattern to sleep between retries
HBASE-564   Don't do a cache flush if there are zero entries in the cache.
+   HBASE-544   filters generate StackOverflowException
 
   NEW FEATURES
HBASE-548   Tool to online single region

Modified: 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java?rev=646031&r1=646030&r2=646031&view=diff
==
--- 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
(original)
+++ 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
Tue Apr  8 11:56:36 2008
@@ -1739,69 +1739,73 @@
 }
 
 /** [EMAIL PROTECTED] */
+@SuppressWarnings("null")
 public boolean next(HStoreKey key, SortedMap results)
 throws IOException {
   boolean moreToFollow = false;
 
-  // Find the lowest-possible key.
+  do {
+// Find the lowest-possible key.
 
-  Text chosenRow = null;
-  long chosenTimestamp = -1;
-  for (int i = 0; i < this.keys.length; i++) {
-if (scanners[i] != null &&
-(chosenRow == null ||
-(keys[i].getRow().compareTo(chosenRow) < 0) ||
-((keys[i].getRow().compareTo(chosenRow) == 0) &&
-(keys[i].getTimestamp() > chosenTimestamp {
-  chosenRow = new Text(keys[i].getRow());
-  chosenTimestamp = keys[i].getTimestamp();
+Text chosenRow = null;
+long chosenTimestamp = -1;
+for (int i = 0; i < this.keys.length; i++) {
+  if (scanners[i] != null &&
+  (chosenRow == null ||
+  (keys[i].getRow().compareTo(chosenRow) < 0) ||
+  ((keys[i].getRow().compareTo(chosenRow) == 0) &&
+  (keys[i].getTimestamp() > chosenTimestamp {
+chosenRow = new Text(keys[i].getRow());
+chosenTimestamp = keys[i].getTimestamp();
+  }
 }
-  }
-
-  // Store the key and results for each sub-scanner. Merge them as
-  // appropriate.
-  if (chosenTimestamp >= 0) {
-// Here we are setting the passed in key with current row+timestamp
-key.setRow(chosenRow);
-key.setVersion(chosenTimestamp);
-key.setColumn(HConstants.EMPTY_TEXT);
 
-for (int i = 0; i < scanners.length; i++) {
-  if (scanners[i] != null && keys[i].getRow().compareTo(chosenRow) == 
0) {
-// NOTE: We used to do results.putAll(resultSets[i]);
-// but this had the effect of overwriting newer
-// values with older ones. So now we only insert
-// a result if the map does not contain the key.
-for (Map.Entry e : resultSets[i].entrySet()) {
-  if (!results.containsKey(e.getKey())) {
-results.put(e.getKey(), e.getValue());
+// Store the key and results for each sub-scanner. Merge them as
+// appropriate.
+if (chosenTimestamp >= 0) {
+  // Here we are setting the passed in key with current row+timestamp
+  key.setRow(chosenRow);
+  key.setVersion(chosenTimestamp);
+  key.setColumn(HConstants.EMPTY_TEXT);
+
+  for (int i = 0; i < scanners.length; i++) {
+if (scanners[i] != null &&
+keys[i].getRow().compareTo(chosenRow) == 0) {
+  // NOTE: We used to do results.putAll(resultSets[i]);
+  // but this had the effect of overwriting newer
+  // values with older ones. So now we only insert
+  // a result if the map does not contain the key.
+  for (Map.Entry e : resultSets[i].entrySet()) {
+if (!results.containsKey(e.getKey())) {
+  results.put(e.getKey(), e.getValue());
+}
+  }
+  resultSets[i].clear();
+  if (!scanners[i].next(keys[i], resultSets[i])) {
+closeScanner(i);
   }
 

svn commit: r646033 - /hadoop/hbase/trunk/CHANGES.txt

2008-04-08 Thread jimk
Author: jimk
Date: Tue Apr  8 11:59:06 2008
New Revision: 646033

URL: http://svn.apache.org/viewvc?rev=646033&view=rev
Log:
HBASE-554   filters generate StackOverflowException - fix typo other commit 
went to 544

Modified:
hadoop/hbase/trunk/CHANGES.txt

Modified: hadoop/hbase/trunk/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/CHANGES.txt?rev=646033&r1=646032&r2=646033&view=diff
==
--- hadoop/hbase/trunk/CHANGES.txt (original)
+++ hadoop/hbase/trunk/CHANGES.txt Tue Apr  8 11:59:06 2008
@@ -14,7 +14,7 @@
0.0.0.0:60100 rather than relying on conf
HBASE-507   Use Callable pattern to sleep between retries
HBASE-564   Don't do a cache flush if there are zero entries in the cache.
-   HBASE-544   filters generate StackOverflowException
+   HBASE-554   filters generate StackOverflowException
 
   NEW FEATURES
HBASE-548   Tool to online single region




svn commit: r646034 - /hadoop/hbase/branches/0.1/CHANGES.txt

2008-04-08 Thread jimk
Author: jimk
Date: Tue Apr  8 12:00:15 2008
New Revision: 646034

URL: http://svn.apache.org/viewvc?rev=646034&view=rev
Log:
HBASE-554   filters generate StackOverflowException - fix typo other commit 
went to 544

Modified:
hadoop/hbase/branches/0.1/CHANGES.txt

Modified: hadoop/hbase/branches/0.1/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/CHANGES.txt?rev=646034&r1=646033&r2=646034&view=diff
==
--- hadoop/hbase/branches/0.1/CHANGES.txt (original)
+++ hadoop/hbase/branches/0.1/CHANGES.txt Tue Apr  8 12:00:15 2008
@@ -15,7 +15,7 @@
HBASE-556   Add 0.16.2 to hbase branch -- if it works
HBASE-563   TestRowFilterAfterWrite erroneously sets master address to
0.0.0.0:60100 rather than relying on conf
-   HBASE-544   filters generate StackOverflowException
+   HBASE-554   filters generate StackOverflowException
HBASE-567   Reused BatchUpdate instances accumulate BatchOperations
 
   NEW FEATURES




svn commit: r646098 - in /hadoop/hbase/branches/0.1: CHANGES.txt build.xml

2008-04-08 Thread jimk
Author: jimk
Date: Tue Apr  8 15:04:43 2008
New Revision: 646098

URL: http://svn.apache.org/viewvc?rev=646098&view=rev
Log:
Preparing for release 0.1.1

Modified:
hadoop/hbase/branches/0.1/CHANGES.txt
hadoop/hbase/branches/0.1/build.xml

Modified: hadoop/hbase/branches/0.1/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/CHANGES.txt?rev=646098&r1=646097&r2=646098&view=diff
==
--- hadoop/hbase/branches/0.1/CHANGES.txt (original)
+++ hadoop/hbase/branches/0.1/CHANGES.txt Tue Apr  8 15:04:43 2008
@@ -1,5 +1,7 @@
 HBase Change Log
 
+Release 0.1.1
+
   BUG FIXES
HBASE-550   EOF trying to read reconstruction log stops region deployment
HBASE-551   Master stuck splitting server logs in shutdown loop; on each

Modified: hadoop/hbase/branches/0.1/build.xml
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/build.xml?rev=646098&r1=646097&r2=646098&view=diff
==
--- hadoop/hbase/branches/0.1/build.xml (original)
+++ hadoop/hbase/branches/0.1/build.xml Tue Apr  8 15:04:43 2008
@@ -18,7 +18,7 @@
 -->
 
 
-  
+  
   
   
   




svn commit: r646100 - /hadoop/hbase/branches/0.1/build.xml

2008-04-08 Thread jimk
Author: jimk
Date: Tue Apr  8 15:13:05 2008
New Revision: 646100

URL: http://svn.apache.org/viewvc?rev=646100&view=rev
Log:
Preparing for release 0.1.1

Modified:
hadoop/hbase/branches/0.1/build.xml

Modified: hadoop/hbase/branches/0.1/build.xml
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/build.xml?rev=646100&r1=646099&r2=646100&view=diff
==
--- hadoop/hbase/branches/0.1/build.xml (original)
+++ hadoop/hbase/branches/0.1/build.xml Tue Apr  8 15:13:05 2008
@@ -18,7 +18,7 @@
 -->
 
 
-  
+  
   
   
   




svn commit: r646116 - /hadoop/hbase/branches/0.1/src/test/org/apache/hadoop/hbase/TestSerialization.java

2008-04-08 Thread jimk
Author: jimk
Date: Tue Apr  8 15:46:53 2008
New Revision: 646116

URL: http://svn.apache.org/viewvc?rev=646116&view=rev
Log:
HBASE-567 Remove TestSerialization from 0.1 branch. API is totally different in 
HTable.

Removed:

hadoop/hbase/branches/0.1/src/test/org/apache/hadoop/hbase/TestSerialization.java



svn commit: r646398 - /hadoop/hbase/branches/0.1/CHANGES.txt

2008-04-09 Thread jimk
Author: jimk
Date: Wed Apr  9 08:11:27 2008
New Revision: 646398

URL: http://svn.apache.org/viewvc?rev=646398&view=rev
Log:
HBASE-554 Added attribution for patch. Thanks Clint and thanks David for 
testing it!

Modified:
hadoop/hbase/branches/0.1/CHANGES.txt

Modified: hadoop/hbase/branches/0.1/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/CHANGES.txt?rev=646398&r1=646397&r2=646398&view=diff
==
--- hadoop/hbase/branches/0.1/CHANGES.txt (original)
+++ hadoop/hbase/branches/0.1/CHANGES.txt Wed Apr  9 08:11:27 2008
@@ -17,7 +17,8 @@
HBASE-556   Add 0.16.2 to hbase branch -- if it works
HBASE-563   TestRowFilterAfterWrite erroneously sets master address to
0.0.0.0:60100 rather than relying on conf
-   HBASE-554   filters generate StackOverflowException
+   HBASE-554   filters generate StackOverflowException (Clint Morgan via
+   Jim Kellerman)
HBASE-567   Reused BatchUpdate instances accumulate BatchOperations
 
   NEW FEATURES




svn commit: r647341 - /hadoop/hbase/tags/0.1.1/

2008-04-11 Thread jimk
Author: jimk
Date: Fri Apr 11 15:48:27 2008
New Revision: 647341

URL: http://svn.apache.org/viewvc?rev=647341&view=rev
Log:
HBase 0.1.1 release

Added:
hadoop/hbase/tags/0.1.1/
  - copied from r647340, hadoop/hbase/branches/0.1/



svn commit: r647342 - in /hadoop/hbase/branches/0.1: CHANGES.txt build.xml

2008-04-11 Thread jimk
Author: jimk
Date: Fri Apr 11 15:54:10 2008
New Revision: 647342

URL: http://svn.apache.org/viewvc?rev=647342&view=rev
Log:
Open 0.1 branch for changes for HBase-0.1.2

Modified:
hadoop/hbase/branches/0.1/CHANGES.txt
hadoop/hbase/branches/0.1/build.xml

Modified: hadoop/hbase/branches/0.1/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/CHANGES.txt?rev=647342&r1=647341&r2=647342&view=diff
==
--- hadoop/hbase/branches/0.1/CHANGES.txt (original)
+++ hadoop/hbase/branches/0.1/CHANGES.txt Fri Apr 11 15:54:10 2008
@@ -1,5 +1,8 @@
 HBase Change Log
 
+Unreleased changes
+  BUG FIXES
+
 Release 0.1.1
 
   BUG FIXES

Modified: hadoop/hbase/branches/0.1/build.xml
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/build.xml?rev=647342&r1=647341&r2=647342&view=diff
==
--- hadoop/hbase/branches/0.1/build.xml (original)
+++ hadoop/hbase/branches/0.1/build.xml Fri Apr 11 15:54:10 2008
@@ -18,7 +18,7 @@
 -->
 
 
-  
+  
   
   
   




svn commit: r647359 - in /hadoop/hbase/site: author/src/documentation/content/xdocs/releases.xml author/src/documentation/content/xdocs/site.xml publish/docs/ publish/docs/current

2008-04-11 Thread jimk
Author: jimk
Date: Fri Apr 11 18:02:00 2008
New Revision: 647359

URL: http://svn.apache.org/viewvc?rev=647359&view=rev
Log:
Updated site for release 0.1.1

Modified:
hadoop/hbase/site/author/src/documentation/content/xdocs/releases.xml
hadoop/hbase/site/author/src/documentation/content/xdocs/site.xml
hadoop/hbase/site/publish/docs/   (props changed)
hadoop/hbase/site/publish/docs/current

Modified: hadoop/hbase/site/author/src/documentation/content/xdocs/releases.xml
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/site/author/src/documentation/content/xdocs/releases.xml?rev=647359&r1=647358&r2=647359&view=diff
==
--- hadoop/hbase/site/author/src/documentation/content/xdocs/releases.xml 
(original)
+++ hadoop/hbase/site/author/src/documentation/content/xdocs/releases.xml Fri 
Apr 11 18:02:00 2008
@@ -35,6 +35,12 @@
 
   News
   
+11 April, 2008: release 0.1.1 available
+This release contains bugfixes that enhance the reliability of 
HBase.
+See the release notes (above) for details. 
+We recommend all upgraded to this latest version.
+  
+  
 27 March, 2008: release 0.1.0 available
This is our first release as a subproject of Hadoop.
 Previously, HBase was a Hadoop contrib project. HBase 0.1.0 is

Modified: hadoop/hbase/site/author/src/documentation/content/xdocs/site.xml
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/site/author/src/documentation/content/xdocs/site.xml?rev=647359&r1=647358&r2=647359&view=diff
==
--- hadoop/hbase/site/author/src/documentation/content/xdocs/site.xml (original)
+++ hadoop/hbase/site/author/src/documentation/content/xdocs/site.xml Fri Apr 
11 18:02:00 2008
@@ -24,7 +24,7 @@
   
 

-
+
   
 
   
@@ -42,8 +42,8 @@
 http://www.apache.org/foundation/thanks.html"/>
 http://hadoop.apache.org/hbase/hadoop-default.html"; 
/>
 http://hadoop.apache.org/hbase/docs/";>
-  
-  
+  
+  
 
   
  

Propchange: hadoop/hbase/site/publish/docs/
--
--- svn:externals (original)
+++ svn:externals Fri Apr 11 18:02:00 2008
@@ -1 +1 @@
-r0.1.0 http://svn.apache.org/repos/asf/hadoop/hbase/tags/0.1.0/docs
+r0.1.1 http://svn.apache.org/repos/asf/hadoop/hbase/tags/0.1.1/docs

Modified: hadoop/hbase/site/publish/docs/current
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/site/publish/docs/current?rev=647359&r1=647358&r2=647359&view=diff
==
--- hadoop/hbase/site/publish/docs/current (original)
+++ hadoop/hbase/site/publish/docs/current Fri Apr 11 18:02:00 2008
@@ -1 +1 @@
-link r0.1.0
\ No newline at end of file
+link r0.1.1
\ No newline at end of file




svn commit: r647517 - in /hadoop/hbase/site/publish: ./ skin/images/

2008-04-12 Thread jimk
Author: jimk
Date: Sat Apr 12 19:28:34 2008
New Revision: 647517

URL: http://svn.apache.org/viewvc?rev=647517&view=rev
Log:
Updated site for release 0.1.1

Modified:
hadoop/hbase/site/publish/credits.html
hadoop/hbase/site/publish/index.html
hadoop/hbase/site/publish/index.pdf
hadoop/hbase/site/publish/irc.html
hadoop/hbase/site/publish/issue_tracking.html
hadoop/hbase/site/publish/linkmap.html
hadoop/hbase/site/publish/mailing_lists.html
hadoop/hbase/site/publish/releases.html
hadoop/hbase/site/publish/releases.pdf
hadoop/hbase/site/publish/skin/images/rc-b-l-15-1body-2menu-3menu.png
hadoop/hbase/site/publish/skin/images/rc-b-r-15-1body-2menu-3menu.png

hadoop/hbase/site/publish/skin/images/rc-b-r-5-1header-2tab-selected-3tab-selected.png

hadoop/hbase/site/publish/skin/images/rc-t-l-5-1header-2searchbox-3searchbox.png

hadoop/hbase/site/publish/skin/images/rc-t-l-5-1header-2tab-selected-3tab-selected.png

hadoop/hbase/site/publish/skin/images/rc-t-l-5-1header-2tab-unselected-3tab-unselected.png
hadoop/hbase/site/publish/skin/images/rc-t-r-15-1body-2menu-3menu.png

hadoop/hbase/site/publish/skin/images/rc-t-r-5-1header-2searchbox-3searchbox.png

hadoop/hbase/site/publish/skin/images/rc-t-r-5-1header-2tab-selected-3tab-selected.png

hadoop/hbase/site/publish/skin/images/rc-t-r-5-1header-2tab-unselected-3tab-unselected.png
hadoop/hbase/site/publish/version_control.html

Modified: hadoop/hbase/site/publish/credits.html
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/site/publish/credits.html?rev=647517&r1=647516&r2=647517&view=diff
==
--- hadoop/hbase/site/publish/credits.html (original)
+++ hadoop/hbase/site/publish/credits.html Sat Apr 12 19:28:34 2008
@@ -117,7 +117,7 @@
 Documentation
 
 
-http://hadoop.apache.org/hbase/docs/r0.1.0/";>Current
+Current
 
 
 Developers

Modified: hadoop/hbase/site/publish/index.html
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/site/publish/index.html?rev=647517&r1=647516&r2=647517&view=diff
==
--- hadoop/hbase/site/publish/index.html (original)
+++ hadoop/hbase/site/publish/index.html Sat Apr 12 19:28:34 2008
@@ -117,7 +117,7 @@
 Documentation
 
 
-http://hadoop.apache.org/hbase/docs/r0.1.0/";>Current
+Current
 
 
 Developers
@@ -188,7 +188,7 @@
 
 
 
-http://hadoop.apache.org/hbase/docs/r0.1.0/";>Learn about HBase by 
reading the documentation.
+http://hadoop.apache.org/hbase/docs/r0.1.1/";>Learn about HBase by 
reading the documentation.
 
 
 Download HBase from the release page.

Modified: hadoop/hbase/site/publish/index.pdf
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/site/publish/index.pdf?rev=647517&r1=647516&r2=647517&view=diff
==
--- hadoop/hbase/site/publish/index.pdf (original)
+++ hadoop/hbase/site/publish/index.pdf Sat Apr 12 19:28:34 2008
@@ -115,7 +115,7 @@
 /Rect [ 108.0 504.066 165.648 492.066 ]
 /C [ 0 0 0 ]
 /Border [ 0 0 0 ]
-/A << /URI (http://hadoop.apache.org/hbase/docs/r0.1.0/)
+/A << /URI (http://hadoop.apache.org/hbase/docs/r0.1.1/)
 /S /URI >>
 /H /I
 >>

Modified: hadoop/hbase/site/publish/irc.html
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/site/publish/irc.html?rev=647517&r1=647516&r2=647517&view=diff
==
--- hadoop/hbase/site/publish/irc.html (original)
+++ hadoop/hbase/site/publish/irc.html Sat Apr 12 19:28:34 2008
@@ -117,7 +117,7 @@
 Documentation
 
 
-http://hadoop.apache.org/hbase/docs/r0.1.0/";>Current
+Current
 
 
 Developers

Modified: hadoop/hbase/site/publish/issue_tracking.html
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/site/publish/issue_tracking.html?rev=647517&r1=647516&r2=647517&view=diff
==
--- hadoop/hbase/site/publish/issue_tracking.html (original)
+++ hadoop/hbase/site/publish/issue_tracking.html Sat Apr 12 19:28:34 2008
@@ -117,7 +117,7 @@
 Documentation
 
 
-http://hadoop.apache.org/hbase/docs/r0.1.0/";>Current
+Current
 
 
 Developers

Modified: hadoop/hbase/site/publish/linkmap.html
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/site/publish/linkmap.html?rev=647517&r1=647516&r2=647517&view=diff
==
--- hadoop/hbase/site/publish/linkmap.html (original)
+++ hadoop/hbase/site/publish/linkmap.html Sat Apr 12 19:28:34 2008
@@ -117,7 +117,7 @@
 Documentation
 
 
-http://hadoop.apache.org/hbase/docs/r0.1.0/";>Current
+Current
 
 
 Developers
@@ -207,7 +207,7 @@
 
 
 
-http://hadoop.apache.org/hbase/docs/r0.1.

svn commit: r647530 - /hadoop/hbase/site/publish/docs/current

2008-04-12 Thread jimk
Author: jimk
Date: Sat Apr 12 22:43:36 2008
New Revision: 647530

URL: http://svn.apache.org/viewvc?rev=647530&view=rev
Log:
Fix release docs

Removed:
hadoop/hbase/site/publish/docs/current



svn commit: r647531 - /hadoop/hbase/site/publish/index.html

2008-04-12 Thread jimk
Author: jimk
Date: Sat Apr 12 23:34:10 2008
New Revision: 647531

URL: http://svn.apache.org/viewvc?rev=647531&view=rev
Log:
Fix site

Modified:
hadoop/hbase/site/publish/index.html

Modified: hadoop/hbase/site/publish/index.html
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/site/publish/index.html?rev=647531&r1=647530&r2=647531&view=diff
==
--- hadoop/hbase/site/publish/index.html (original)
+++ hadoop/hbase/site/publish/index.html Sat Apr 12 23:34:10 2008
@@ -117,7 +117,7 @@
 Documentation
 
 
-Current
+Current
 
 
 Developers




svn commit: r647944 - in /hadoop/hbase/branches/0.1: CHANGES.txt src/java/org/apache/hadoop/hbase/HRegionServer.java

2008-04-14 Thread jimk
Author: jimk
Date: Mon Apr 14 12:23:24 2008
New Revision: 647944

URL: http://svn.apache.org/viewvc?rev=647944&view=rev
Log:
HBASE-577   NPE getting scanner

Modified:
hadoop/hbase/branches/0.1/CHANGES.txt

hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HRegionServer.java

Modified: hadoop/hbase/branches/0.1/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/CHANGES.txt?rev=647944&r1=647943&r2=647944&view=diff
==
--- hadoop/hbase/branches/0.1/CHANGES.txt (original)
+++ hadoop/hbase/branches/0.1/CHANGES.txt Mon Apr 14 12:23:24 2008
@@ -2,6 +2,7 @@
 
 Unreleased changes
   BUG FIXES
+   HBASE-577   NPE getting scanner
 
 Release 0.1.1
 

Modified: 
hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HRegionServer.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HRegionServer.java?rev=647944&r1=647943&r2=647944&view=diff
==
--- 
hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HRegionServer.java 
(original)
+++ 
hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HRegionServer.java 
Mon Apr 14 12:23:24 2008
@@ -1475,6 +1475,19 @@
   final long timestamp, final RowFilterInterface filter)
 throws IOException {
 checkOpen();
+NullPointerException npe = null;
+if (regionName == null) {
+  npe = new NullPointerException("regionName is null");
+} else if (cols == null) {
+  npe = new NullPointerException("columns to scan is null");
+} else if (firstRow == null) {
+  npe = new NullPointerException("firstRow for scanner is null");
+}
+if (npe != null) {
+  IOException io = new IOException("Invalid arguments to openScanner");
+  io.initCause(npe);
+  throw io;
+}
 requestCount.incrementAndGet();
 try {
   HRegion r = getRegion(regionName);




svn commit: r647953 - in /hadoop/hbase/trunk: CHANGES.txt src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java

2008-04-14 Thread jimk
Author: jimk
Date: Mon Apr 14 12:27:17 2008
New Revision: 647953

URL: http://svn.apache.org/viewvc?rev=647953&view=rev
Log:
NPE getting scanner

Modified:
hadoop/hbase/trunk/CHANGES.txt

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java

Modified: hadoop/hbase/trunk/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/CHANGES.txt?rev=647953&r1=647952&r2=647953&view=diff
==
--- hadoop/hbase/trunk/CHANGES.txt (original)
+++ hadoop/hbase/trunk/CHANGES.txt Mon Apr 14 12:27:17 2008
@@ -1,6 +1,7 @@
 Hbase Change Log
   INCOMPATIBLE CHANGES
HBASE-521   Improve client scanner interface
+   HBASE-577   NPE getting scanner
   
   BUG FIXES
HBASE-550   EOF trying to read reconstruction log stops region deployment

Modified: 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java?rev=647953&r1=647952&r2=647953&view=diff
==
--- 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
 (original)
+++ 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
 Mon Apr 14 12:27:17 2008
@@ -1114,6 +1114,19 @@
 final long timestamp, final RowFilterInterface filter)
   throws IOException {
 checkOpen();
+NullPointerException npe = null;
+if (regionName == null) {
+  npe = new NullPointerException("regionName is null");
+} else if (cols == null) {
+  npe = new NullPointerException("columns to scan is null");
+} else if (firstRow == null) {
+  npe = new NullPointerException("firstRow for scanner is null");
+}
+if (npe != null) {
+  IOException io = new IOException("Invalid arguments to openScanner");
+  io.initCause(npe);
+  throw io;
+}
 requestCount.incrementAndGet();
 try {
   HRegion r = getRegion(regionName);




svn commit: r647963 - /hadoop/hbase/trunk/CHANGES.txt

2008-04-14 Thread jimk
Author: jimk
Date: Mon Apr 14 12:36:44 2008
New Revision: 647963

URL: http://svn.apache.org/viewvc?rev=647963&view=rev
Log:
Update CHANGES.txt - bug fix was listed in wrong section

Modified:
hadoop/hbase/trunk/CHANGES.txt

Modified: hadoop/hbase/trunk/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/CHANGES.txt?rev=647963&r1=647962&r2=647963&view=diff
==
--- hadoop/hbase/trunk/CHANGES.txt (original)
+++ hadoop/hbase/trunk/CHANGES.txt Mon Apr 14 12:36:44 2008
@@ -6,7 +6,6 @@
 
   INCOMPATIBLE CHANGES
HBASE-521   Improve client scanner interface
-   HBASE-577   NPE getting scanner
   
   BUG FIXES
HBASE-550   EOF trying to read reconstruction log stops region deployment
@@ -24,6 +23,7 @@
HBASE-564   Don't do a cache flush if there are zero entries in the cache.
HBASE-554   filters generate StackOverflowException
HBASE-567   Reused BatchUpdate instances accumulate BatchOperations
+   HBASE-577   NPE getting scanner
 
   NEW FEATURES
HBASE-548   Tool to online single region




svn commit: r648000 - in /hadoop/hbase/branches/0.1: CHANGES.txt src/java/org/apache/hadoop/hbase/HLog.java

2008-04-14 Thread jimk
Author: jimk
Date: Mon Apr 14 14:12:07 2008
New Revision: 648000

URL: http://svn.apache.org/viewvc?rev=648000&view=rev
Log:
Unexpected exits corrupt DFS

Modified:
hadoop/hbase/branches/0.1/CHANGES.txt
hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HLog.java

Modified: hadoop/hbase/branches/0.1/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/CHANGES.txt?rev=648000&r1=647999&r2=648000&view=diff
==
--- hadoop/hbase/branches/0.1/CHANGES.txt (original)
+++ hadoop/hbase/branches/0.1/CHANGES.txt Mon Apr 14 14:12:07 2008
@@ -4,6 +4,8 @@
   BUG FIXES
HBASE-577   NPE getting scanner
HBASE-574   HBase does not load hadoop native libs (Rong-En Fan via Stack).
+   HBASE-11Unexpected exits corrupt DFS - best we can do until we have at
+   least a subset of HADOOP-1700
HBASE-573   HBase does not read hadoop-*.xml for dfs configuration after
moving out hadoop/contrib
 

Modified: hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HLog.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HLog.java?rev=648000&r1=647999&r2=648000&view=diff
==
--- hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HLog.java 
(original)
+++ hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HLog.java Mon 
Apr 14 14:12:07 2008
@@ -20,6 +20,7 @@
 package org.apache.hadoop.hbase;
 
 import java.io.FileNotFoundException;
+import java.io.EOFException;
 import java.io.IOException;
 import java.util.Collections;
 import java.util.HashMap;
@@ -589,8 +590,11 @@
 LOG.debug("Applied " + count + " total edits");
   }
 } catch (IOException e) {
-  LOG.warn("Exception processing " + logfiles[i].getPath() +
-" -- continuing. Possible DATA LOSS!", e);
+  e = RemoteExceptionHandler.checkIOException(e);
+  if (!(e instanceof EOFException)) {
+LOG.warn("Exception processing " + logfiles[i].getPath() +
+" -- continuing. Possible DATA LOSS!", e);
+  }
 } finally {
   try {
 in.close();




svn commit: r648030 - in /hadoop/hbase/trunk: CHANGES.txt src/java/org/apache/hadoop/hbase/regionserver/HLog.java

2008-04-14 Thread jimk
Author: jimk
Date: Mon Apr 14 15:07:11 2008
New Revision: 648030

URL: http://svn.apache.org/viewvc?rev=648030&view=rev
Log:
HBASE-11Unexpected exits corrupt DFS

Modified:
hadoop/hbase/trunk/CHANGES.txt
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HLog.java

Modified: hadoop/hbase/trunk/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/CHANGES.txt?rev=648030&r1=648029&r2=648030&view=diff
==
--- hadoop/hbase/trunk/CHANGES.txt (original)
+++ hadoop/hbase/trunk/CHANGES.txt Mon Apr 14 15:07:11 2008
@@ -3,6 +3,7 @@
HBASE-574   HBase does not load hadoop native libs (Rong-En Fan via Stack)
HBASE-573   HBase does not read hadoop-*.xml for dfs configuration after 
moving out hadoop/contrib
+   HBASE-11Unexpected exits corrupt DFS
 
 Release 0.1.1 - 04/11/2008
 

Modified: 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HLog.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HLog.java?rev=648030&r1=648029&r2=648030&view=diff
==
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HLog.java 
(original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HLog.java 
Mon Apr 14 15:07:11 2008
@@ -20,6 +20,7 @@
 package org.apache.hadoop.hbase.regionserver;
 
 import java.io.FileNotFoundException;
+import java.io.EOFException;
 import java.io.IOException;
 import java.util.Collections;
 import java.util.HashMap;
@@ -296,7 +297,7 @@
   private void deleteLogFile(final Path p, final Long seqno) throws 
IOException {
 LOG.info("removing old log file " + p.toString() +
   " whose highest sequence/edit id is " + seqno);
-this.fs.delete(p);
+this.fs.delete(p, true);
   }
 
   /**
@@ -314,7 +315,7 @@
*/
   public void closeAndDelete() throws IOException {
 close();
-fs.delete(dir);
+fs.delete(dir, true);
   }
 
   /**
@@ -588,7 +589,7 @@
   w.append(oldkey, oldval);
 }
 old.close();
-fs.delete(oldlogfile);
+fs.delete(oldlogfile, true);
   }
 }
 w.append(key, val);
@@ -597,8 +598,11 @@
 LOG.debug("Applied " + count + " total edits");
   }
 } catch (IOException e) {
-  LOG.warn("Exception processing " + logfiles[i].getPath() +
-" -- continuing. Possible DATA LOSS!", e);
+  e = RemoteExceptionHandler.checkIOException(e);
+  if (!(e instanceof EOFException)) {
+LOG.warn("Exception processing " + logfiles[i].getPath() +
+" -- continuing. Possible DATA LOSS!", e);
+  }
 } finally {
   try {
 in.close();
@@ -610,7 +614,7 @@
   // nothing we can do about it. Replaying it, it could work but we
   // could be stuck replaying for ever. Just continue though we
   // could have lost some edits.
-  fs.delete(logfiles[i].getPath());
+  fs.delete(logfiles[i].getPath(), true);
 }
   }
 } finally {




svn commit: r648425 - in /hadoop/hbase/branches/0.1: CHANGES.txt src/java/org/apache/hadoop/hbase/HMaster.java src/java/org/apache/hadoop/hbase/util/FSUtils.java src/test/org/apache/hadoop/hbase/util/

2008-04-15 Thread jimk
Author: jimk
Date: Tue Apr 15 14:44:13 2008
New Revision: 648425

URL: http://svn.apache.org/viewvc?rev=648425&view=rev
Log:
HBASE-575   master dies with stack overflow error if rootdir isn't qualified

Added:

hadoop/hbase/branches/0.1/src/test/org/apache/hadoop/hbase/util/TestRootPath.java
Modified:
hadoop/hbase/branches/0.1/CHANGES.txt
hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HMaster.java
hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/util/FSUtils.java

Modified: hadoop/hbase/branches/0.1/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/CHANGES.txt?rev=648425&r1=648424&r2=648425&view=diff
==
--- hadoop/hbase/branches/0.1/CHANGES.txt (original)
+++ hadoop/hbase/branches/0.1/CHANGES.txt Tue Apr 15 14:44:13 2008
@@ -10,6 +10,7 @@
moving out hadoop/contrib
HBASE-12when hbase regionserver restarts, it says "impossible state for
createLease()"
+   HBASE-575   master dies with stack overflow error if rootdir isn't qualified
 
   IMPROVEMENTS
HBASE-559   MR example job to count table rows

Modified: 
hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HMaster.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HMaster.java?rev=648425&r1=648424&r2=648425&view=diff
==
--- hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HMaster.java 
(original)
+++ hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HMaster.java Tue 
Apr 15 14:44:13 2008
@@ -880,6 +880,14 @@
   public HMaster(Path rd, HServerAddress address, HBaseConfiguration conf)
   throws IOException {
 this.conf = conf;
+try {
+  FSUtils.validateRootPath(rd);
+} catch (IOException e) {
+  LOG.fatal("Not starting HMaster because the root directory path '" +
+  rd.toString() + "' is not valid. Check the setting of the" +
+  " configuration parameter '" + HBASE_DIR + "'", e);
+  throw e;
+}
 this.rootdir = rd;
 this.threadWakeFrequency = conf.getInt(THREAD_WAKE_FREQUENCY, 10 * 1000);
 // The filesystem hbase wants to use is probably not what is set into

Modified: 
hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/util/FSUtils.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/util/FSUtils.java?rev=648425&r1=648424&r2=648425&view=diff
==
--- 
hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/util/FSUtils.java 
(original)
+++ 
hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/util/FSUtils.java 
Tue Apr 15 14:44:13 2008
@@ -21,6 +21,8 @@
 
 import java.io.DataInputStream;
 import java.io.IOException;
+import java.net.URI;
+import java.net.URISyntaxException;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -121,5 +123,24 @@
 s.writeUTF(HConstants.FILE_SYSTEM_VERSION);
 s.close();
   }
-
+  
+  /**
+   * Verifies root directory path is a valid URI with a scheme
+   * 
+   * @param root root directory path
+   * @throws IOException if not a valid URI with a scheme
+   */
+  public static void validateRootPath(Path root) throws IOException {
+try {
+  URI rootURI = new URI(root.toString());
+  String scheme = rootURI.getScheme();
+  if (scheme == null) {
+throw new IOException("Root directory does not contain a scheme");
+  }
+} catch (URISyntaxException e) {
+  IOException io = new IOException("Root directory path is not a valid 
URI");
+  io.initCause(e);
+  throw io;
+}
+  }
 }

Added: 
hadoop/hbase/branches/0.1/src/test/org/apache/hadoop/hbase/util/TestRootPath.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/src/test/org/apache/hadoop/hbase/util/TestRootPath.java?rev=648425&view=auto
==
--- 
hadoop/hbase/branches/0.1/src/test/org/apache/hadoop/hbase/util/TestRootPath.java
 (added)
+++ 
hadoop/hbase/branches/0.1/src/test/org/apache/hadoop/hbase/util/TestRootPath.java
 Tue Apr 15 14:44:13 2008
@@ -0,0 +1,63 @@
+/**
+ * Copyright 2008 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License a

svn commit: r648427 - in /hadoop/hbase/trunk: CHANGES.txt src/java/org/apache/hadoop/hbase/master/HMaster.java src/java/org/apache/hadoop/hbase/util/FSUtils.java src/test/org/apache/hadoop/hbase/util/

2008-04-15 Thread jimk
Author: jimk
Date: Tue Apr 15 14:45:28 2008
New Revision: 648427

URL: http://svn.apache.org/viewvc?rev=648427&view=rev
Log:
HBASE-575   master dies with stack overflow error if rootdir isn't qualified

Added:
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/util/TestRootPath.java
Modified:
hadoop/hbase/trunk/CHANGES.txt
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/HMaster.java
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/FSUtils.java

Modified: hadoop/hbase/trunk/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/CHANGES.txt?rev=648427&r1=648426&r2=648427&view=diff
==
--- hadoop/hbase/trunk/CHANGES.txt (original)
+++ hadoop/hbase/trunk/CHANGES.txt Tue Apr 15 14:45:28 2008
@@ -6,6 +6,7 @@
HBASE-11Unexpected exits corrupt DFS
HBASE-12When hbase regionserver restarts, it says "impossible state for
createLease()"
+   HBASE-575   master dies with stack overflow error if rootdir isn't qualified
 
   IMPROVEMENTS
HBASE-559   MR example job to count table rows

Modified: 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/HMaster.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/HMaster.java?rev=648427&r1=648426&r2=648427&view=diff
==
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/HMaster.java 
(original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/HMaster.java Tue 
Apr 15 14:45:28 2008
@@ -172,6 +172,14 @@
   public HMaster(Path rd, HServerAddress address, HBaseConfiguration conf)
   throws IOException {
 this.conf = conf;
+try {
+  FSUtils.validateRootPath(rd);
+} catch (IOException e) {
+  LOG.fatal("Not starting HMaster because the root directory path '" +
+  rd.toString() + "' is not valid. Check the setting of the" +
+  " configuration parameter '" + HBASE_DIR + "'", e);
+  throw e;
+}
 this.rootdir = rd;
 this.threadWakeFrequency = conf.getInt(THREAD_WAKE_FREQUENCY, 10 * 1000);
 // The filesystem hbase wants to use is probably not what is set into

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/FSUtils.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/FSUtils.java?rev=648427&r1=648426&r2=648427&view=diff
==
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/FSUtils.java 
(original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/FSUtils.java Tue 
Apr 15 14:45:28 2008
@@ -21,6 +21,8 @@
 
 import java.io.DataInputStream;
 import java.io.IOException;
+import java.net.URI;
+import java.net.URISyntaxException;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -140,4 +142,23 @@
 s.close();
   }
 
+  /**
+   * Verifies root directory path is a valid URI with a scheme
+   * 
+   * @param root root directory path
+   * @throws IOException if not a valid URI with a scheme
+   */
+  public static void validateRootPath(Path root) throws IOException {
+try {
+  URI rootURI = new URI(root.toString());
+  String scheme = rootURI.getScheme();
+  if (scheme == null) {
+throw new IOException("Root directory does not contain a scheme");
+  }
+} catch (URISyntaxException e) {
+  IOException io = new IOException("Root directory path is not a valid 
URI");
+  io.initCause(e);
+  throw io;
+}
+  }
 }

Added: 
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/util/TestRootPath.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/util/TestRootPath.java?rev=648427&view=auto
==
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/util/TestRootPath.java 
(added)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/util/TestRootPath.java 
Tue Apr 15 14:45:28 2008
@@ -0,0 +1,63 @@
+/**
+ * Copyright 2008 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the Lic

svn commit: r648757 - in /hadoop/hbase/branches/0.1: CHANGES.txt src/java/org/apache/hadoop/hbase/HRegionServer.java

2008-04-16 Thread jimk
Author: jimk
Date: Wed Apr 16 10:16:59 2008
New Revision: 648757

URL: http://svn.apache.org/viewvc?rev=648757&view=rev
Log:
HBASE-500   Regionserver stuck on exit

This change may not address the root cause, but it does fix an obvious error.
Leaving issue open in case it occurs again after patch.

Modified:
hadoop/hbase/branches/0.1/CHANGES.txt

hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HRegionServer.java

Modified: hadoop/hbase/branches/0.1/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/CHANGES.txt?rev=648757&r1=648756&r2=648757&view=diff
==
--- hadoop/hbase/branches/0.1/CHANGES.txt (original)
+++ hadoop/hbase/branches/0.1/CHANGES.txt Wed Apr 16 10:16:59 2008
@@ -11,6 +11,7 @@
HBASE-12when hbase regionserver restarts, it says "impossible state for
createLease()"
HBASE-575   master dies with stack overflow error if rootdir isn't qualified
+   HBASE-500   Regionserver stuck on exit
 
   IMPROVEMENTS
HBASE-559   MR example job to count table rows

Modified: 
hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HRegionServer.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HRegionServer.java?rev=648757&r1=648756&r2=648757&view=diff
==
--- 
hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HRegionServer.java 
(original)
+++ 
hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HRegionServer.java 
Wed Apr 16 10:16:59 2008
@@ -22,7 +22,6 @@
 import java.io.IOException;
 import java.lang.Thread.UncaughtExceptionHandler;
 import java.lang.reflect.Constructor;
-import java.lang.reflect.Member;
 import java.net.InetSocketAddress;
 import java.net.UnknownHostException;
 import java.util.ArrayList;
@@ -50,7 +49,6 @@
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.dfs.AlreadyBeingCreatedException;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.filter.RowFilterInterface;
@@ -254,8 +252,10 @@
   if (e == null) {
 continue;
   }
-  e.getRegion().compactIfNeeded();
-  split(e.getRegion());
+  synchronized (compactSplitLock) { // Don't interrupt us while working
+e.getRegion().compactIfNeeded();
+split(e.getRegion());
+  }
 } catch (InterruptedException ex) {
   continue;
 } catch (IOException ex) {




svn commit: r649298 - in /hadoop/hbase/branches/0.1: CHANGES.txt lib/hadoop-0.16.2-core.jar lib/hadoop-0.16.2-test.jar lib/hadoop-0.16.3-core.jar lib/hadoop-0.16.3-test.jar lib/native/Linux-amd64-64/l

2008-04-17 Thread jimk
Author: jimk
Date: Thu Apr 17 14:54:37 2008
New Revision: 649298

URL: http://svn.apache.org/viewvc?rev=649298&view=rev
Log:
HBASE-578   Upgrade branch to 0.16.3 hadoop.

Added:
hadoop/hbase/branches/0.1/lib/hadoop-0.16.3-core.jar   (with props)
hadoop/hbase/branches/0.1/lib/hadoop-0.16.3-test.jar   (with props)
Removed:
hadoop/hbase/branches/0.1/lib/hadoop-0.16.2-core.jar
hadoop/hbase/branches/0.1/lib/hadoop-0.16.2-test.jar
Modified:
hadoop/hbase/branches/0.1/CHANGES.txt
hadoop/hbase/branches/0.1/lib/native/Linux-amd64-64/libhadoop.a
hadoop/hbase/branches/0.1/lib/native/Linux-i386-32/libhadoop.a

Modified: hadoop/hbase/branches/0.1/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/CHANGES.txt?rev=649298&r1=649297&r2=649298&view=diff
==
--- hadoop/hbase/branches/0.1/CHANGES.txt (original)
+++ hadoop/hbase/branches/0.1/CHANGES.txt Thu Apr 17 14:54:37 2008
@@ -18,6 +18,7 @@
 
   IMPROVEMENTS
HBASE-559   MR example job to count table rows
+   HBASE-578   Upgrade branch to 0.16.3 hadoop.
 
 
 Release 0.1.1 - 04/11/2008

Added: hadoop/hbase/branches/0.1/lib/hadoop-0.16.3-core.jar
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/lib/hadoop-0.16.3-core.jar?rev=649298&view=auto
==
Binary file - no diff available.

Propchange: hadoop/hbase/branches/0.1/lib/hadoop-0.16.3-core.jar
--
svn:mime-type = application/octet-stream

Added: hadoop/hbase/branches/0.1/lib/hadoop-0.16.3-test.jar
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/lib/hadoop-0.16.3-test.jar?rev=649298&view=auto
==
Binary file - no diff available.

Propchange: hadoop/hbase/branches/0.1/lib/hadoop-0.16.3-test.jar
--
svn:mime-type = application/octet-stream

Modified: hadoop/hbase/branches/0.1/lib/native/Linux-amd64-64/libhadoop.a
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/lib/native/Linux-amd64-64/libhadoop.a?rev=649298&r1=649297&r2=649298&view=diff
==
Binary files - no diff available.

Modified: hadoop/hbase/branches/0.1/lib/native/Linux-i386-32/libhadoop.a
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/lib/native/Linux-i386-32/libhadoop.a?rev=649298&r1=649297&r2=649298&view=diff
==
Binary files - no diff available.




svn commit: r649308 - in /hadoop/hbase/branches/0.1: CHANGES.txt src/java/org/apache/hadoop/hbase/util/Migrate.java

2008-04-17 Thread jimk
Author: jimk
Date: Thu Apr 17 15:16:24 2008
New Revision: 649308

URL: http://svn.apache.org/viewvc?rev=649308&view=rev
Log:
HBase migration tool does not get correct FileSystem or root directory if 
configuration is not correct

Modified:
hadoop/hbase/branches/0.1/CHANGES.txt
hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/util/Migrate.java

Modified: hadoop/hbase/branches/0.1/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/CHANGES.txt?rev=649308&r1=649307&r2=649308&view=diff
==
--- hadoop/hbase/branches/0.1/CHANGES.txt (original)
+++ hadoop/hbase/branches/0.1/CHANGES.txt Thu Apr 17 15:16:24 2008
@@ -15,6 +15,8 @@
HBASE-582   HBase 554 forgot to clear results on each iteration caused by a 
filter
(Clint Morgan via Stack)
HBASE-532   Odd interaction between HRegion.get, HRegion.deleteAll and 
compactions
+   HBASE-590   HBase migration tool does not get correct FileSystem or root
+   directory if configuration is not correct
 
   IMPROVEMENTS
HBASE-559   MR example job to count table rows

Modified: 
hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/util/Migrate.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/util/Migrate.java?rev=649308&r1=649307&r2=649308&view=diff
==
--- 
hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/util/Migrate.java 
(original)
+++ 
hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/util/Migrate.java 
Thu Apr 17 15:16:24 2008
@@ -123,6 +123,20 @@
   return -1;
 }
 
+// Validate root directory path
+
+Path rd = new Path(conf.get(HConstants.HBASE_DIR));
+try {
+  // Validate root directory path
+  FSUtils.validateRootPath(rd);
+} catch (IOException e) {
+  LOG.fatal("Not starting migration because the root directory path '" +
+  rd.toString() + "' is not valid. Check the setting of the" +
+  " configuration parameter '" + HConstants.HBASE_DIR + "'", e);
+  return -1;
+}
+this.conf.set("fs.default.name", rd.toString());
+
 try {
   // Verify file system is up.
   fs = FileSystem.get(this.conf);   // get DFS handle




svn commit: r649373 - in /hadoop/hbase/trunk: ./ src/java/org/apache/hadoop/hbase/regionserver/

2008-04-17 Thread jimk
Author: jimk
Date: Thu Apr 17 22:32:23 2008
New Revision: 649373

URL: http://svn.apache.org/viewvc?rev=649373&view=rev
Log:
HBASE-10HRegionServer hangs upon exit due to DFSClient Exception

Modified:
hadoop/hbase/trunk/CHANGES.txt

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/Flusher.java

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/LogRoller.java

Modified: hadoop/hbase/trunk/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/CHANGES.txt?rev=649373&r1=649372&r2=649373&view=diff
==
--- hadoop/hbase/trunk/CHANGES.txt (original)
+++ hadoop/hbase/trunk/CHANGES.txt Thu Apr 17 22:32:23 2008
@@ -10,7 +10,8 @@
HBASE-582   HBase 554 forgot to clear results on each iteration caused by a 
filter
(Clint Morgan via Stack)
HBASE-532   Odd interaction between HRegion.get, HRegion.deleteAll and 
compactions
-
+   HBASE-10HRegionServer hangs upon exit due to DFSClient Exception
+   
   IMPROVEMENTS
HBASE-559   MR example job to count table rows
 

Modified: 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java?rev=649373&r1=649372&r2=649373&view=diff
==
--- 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
 (original)
+++ 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
 Thu Apr 17 22:32:23 2008
@@ -24,6 +24,7 @@
 import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.LinkedBlockingQueue;
 import java.util.concurrent.TimeUnit;
+import java.util.concurrent.locks.ReentrantLock;
 
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.util.StringUtils;
@@ -48,7 +49,7 @@
   private HTable meta = null;
   private volatile long startTime;
   private final long frequency;
-  private final Integer lock = new Integer(0);
+  private final ReentrantLock lock = new ReentrantLock();
   
   private final HRegionServer server;
   private final HBaseConfiguration conf;
@@ -79,12 +80,15 @@
   synchronized (regionsInQueue) {
 regionsInQueue.remove(r);
   }
-  synchronized (lock) {
+  lock.lock();
+  try {
 // Don't interrupt us while we are working
 Text midKey = r.compactStores();
 if (midKey != null) {
   split(r, midKey);
 }
+  } finally {
+lock.unlock();
   }
 }
   } catch (InterruptedException ex) {
@@ -218,9 +222,9 @@
   /**
* Only interrupt once it's done with a run through the work loop.
*/ 
-  void interruptPolitely() {
-synchronized (lock) {
-  interrupt();
+  void interruptIfNecessary() {
+if (lock.tryLock()) {
+  this.interrupt();
 }
   }
 }

Modified: 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/Flusher.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/Flusher.java?rev=649373&r1=649372&r2=649373&view=diff
==
--- 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/Flusher.java 
(original)
+++ 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/Flusher.java 
Thu Apr 17 22:32:23 2008
@@ -22,6 +22,7 @@
 import java.io.IOException;
 import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.locks.ReentrantLock;
 import java.util.concurrent.TimeUnit;
 import java.util.HashSet;
 import java.util.Set;
@@ -48,7 +49,7 @@
   private final long threadWakeFrequency;
   private final long optionalFlushPeriod;
   private final HRegionServer server;
-  private final Integer lock = new Integer(0);
+  private final ReentrantLock lock = new ReentrantLock();
   private final Integer memcacheSizeLock = new Integer(0);  
   private long lastOptionalCheck = System.currentTimeMillis();
 
@@ -84,7 +85,10 @@
   try {
 enqueueOptionalFlushRegions();
 r = flushQueue.poll(threadWakeFrequency, TimeUnit.MILLISECONDS);
-if (!flushImmediately(r)) {
+if (r == null) {
+  continue;
+}
+if (!flushRegion(r, false)) {
   break;
 }
   } catch (InterruptedException ex) {
@@ -118,49 +122,72 @@
   /**
* Only interrupt once it's done with a run through the work loop.
*/ 
-  void interruptPolitely() {
-synchronized (l

svn commit: r649668 - /hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/Migrate.java

2008-04-18 Thread jimk
Author: jimk
Date: Fri Apr 18 12:47:21 2008
New Revision: 649668

URL: http://svn.apache.org/viewvc?rev=649668&view=rev
Log:
HBASE-590 HBase migration tool does not get correct FileSystem or root 
directory if configuration is not correct.
Commit change to trunk.

Modified:
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/Migrate.java

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/Migrate.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/Migrate.java?rev=649668&r1=649667&r2=649668&view=diff
==
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/Migrate.java 
(original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/Migrate.java Fri 
Apr 18 12:47:21 2008
@@ -124,6 +124,20 @@
   return -1;
 }
 
+// Validate root directory path
+
+Path rd = new Path(conf.get(HConstants.HBASE_DIR));
+try {
+  // Validate root directory path
+  FSUtils.validateRootPath(rd);
+} catch (IOException e) {
+  LOG.fatal("Not starting migration because the root directory path '" +
+  rd.toString() + "' is not valid. Check the setting of the" +
+  " configuration parameter '" + HConstants.HBASE_DIR + "'", e);
+  return -1;
+}
+this.conf.set("fs.default.name", rd.toString());
+
 try {
   // Verify file system is up.
   fs = FileSystem.get(conf);// get DFS handle
@@ -315,13 +329,13 @@
   LOG.info(message + " ignoring");
 } else if (action == ACTION.DELETE) {
   LOG.info(message + " deleting");
-  fs.delete(p);
+  fs.delete(p, true);
 } else {
   // ACTION.PROMPT
   String response = prompt(message + " delete? [y/n]");
   if (response.startsWith("Y") || response.startsWith("y")) {
 LOG.info(message + " deleting");
-fs.delete(p);
+fs.delete(p, true);
   }
 }
   }




svn commit: r651067 - in /hadoop/hbase/branches/0.1: ./ src/java/org/apache/hadoop/hbase/ src/test/org/apache/hadoop/hbase/

2008-04-23 Thread jimk
Author: jimk
Date: Wed Apr 23 14:20:42 2008
New Revision: 651067

URL: http://svn.apache.org/viewvc?rev=651067&view=rev
Log:
HBASE-572   Backport HBASE-512 to 0.1 branch

Added:

hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/FlushRequester.java

hadoop/hbase/branches/0.1/src/test/org/apache/hadoop/hbase/TestGlobalMemcacheLimit.java
Removed:

hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/CacheFlushListener.java
Modified:
hadoop/hbase/branches/0.1/CHANGES.txt
hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HRegion.java

hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HRegionServer.java

hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/LocalHBaseCluster.java

hadoop/hbase/branches/0.1/src/test/org/apache/hadoop/hbase/MiniHBaseCluster.java

hadoop/hbase/branches/0.1/src/test/org/apache/hadoop/hbase/MultiRegionTable.java

Modified: hadoop/hbase/branches/0.1/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/CHANGES.txt?rev=651067&r1=651066&r2=651067&view=diff
==
--- hadoop/hbase/branches/0.1/CHANGES.txt (original)
+++ hadoop/hbase/branches/0.1/CHANGES.txt Wed Apr 23 14:20:42 2008
@@ -21,6 +21,7 @@
filtering decision is made (Clint Morgan via Stack)
HBASE-586   HRegion runs HStore memcache snapshotting -- fix it so only 
HStore
knows about workings of memcache
+   HBASE-572   Backport HBASE-512 to 0.1 branch
 
   IMPROVEMENTS
HBASE-559   MR example job to count table rows

Added: 
hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/FlushRequester.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/FlushRequester.java?rev=651067&view=auto
==
--- 
hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/FlushRequester.java 
(added)
+++ 
hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/FlushRequester.java 
Wed Apr 23 14:20:42 2008
@@ -0,0 +1,36 @@
+/**
+ * Copyright 2007 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase;
+
+/**
+ * Implementors of this interface want to be notified when an HRegion
+ * determines that a cache flush is needed. A CacheFlushListener (or null)
+ * must be passed to the HRegion constructor.
+ */
+public interface FlushRequester {
+
+  /**
+   * Tell the listener the cache needs to be flushed.
+   * 
+   * @param region the HRegion requesting the cache flush
+   */
+  void request(HRegion region);
+}

Modified: 
hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HRegion.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HRegion.java?rev=651067&r1=651066&r2=651067&view=diff
==
--- hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HRegion.java 
(original)
+++ hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HRegion.java Wed 
Apr 23 14:20:42 2008
@@ -321,15 +321,19 @@
 volatile boolean writesEnabled = true;
   }
 
-  volatile WriteState writestate = new WriteState();
+  private volatile WriteState writestate = new WriteState();
 
   final int memcacheFlushSize;
   private volatile long lastFlushTime;
-  final CacheFlushListener flushListener;
-  final int blockingMemcacheSize;
-  protected final long threadWakeFrequency;
-  private final ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
-  private final Integer updateLock = new Integer(0);
+  final FlushRequester flushListener;
+  private final int blockingMemcacheSize;
+  final long threadWakeFrequency;
+  // Used to guard splits and closes
+  private final ReentrantReadWriteLock splitsAndClosesLock =
+new ReentrantReadWriteLock();
+  // Stop updates lock
+  private final ReentrantReadWriteLock updateLock =
+new ReentrantReadWriteLock();
   private final Integer splitLock = new Integer(0);
   private final long desiredMaxFile

svn commit: r652563 - in /hadoop/hbase/branches/0.1: ./ src/test/org/apache/hadoop/hbase/ src/test/org/apache/hadoop/hbase/mapred/

2008-05-01 Thread jimk
Author: jimk
Date: Thu May  1 09:08:09 2008
New Revision: 652563

URL: http://svn.apache.org/viewvc?rev=652563&view=rev
Log:
HBASE-607 MultiRegionTable.makeMultiRegionTable is not deterministic enough for 
regression tests

M MultiRegionTable

  Make deterministic by creating the regions directly and not rely on
  the asychronous nature of cache flushes, compactions and splits. The
  regions are small, but the point of this class is to generate a
  table with multiple regions so we can test map / reduce, region
  onlining / offlining, etc.

  Removed PUNCTUATION from row keys. Not sure why it was there in the
  first place, other than perhaps to verify that a row key can have
  punctuation in it provided it is not the first character. This will
  become moot when row keys change from Text to byte[] anyways.

  Incorporate repeated code
{code}
region.close();
region.getLog().closeAndDelete();
{code}
  into private method closeRegionAndDeleteLog

M TestSplit

  extends HBaseTestCase instead of MultiRegionTable. It didn't use the
  output of MultiRegionTable, so all that work was just wasted by this
  test.

M TestTableIndex, TestTableMapReduce

  The only two tests that currently use MultiRegionTable. Minor
  modifications needed because MultiRegionTable now handles starting
  and stopping of the mini-DFS cluster. With the new MultiRegionTable
  class, if these tests fail now it will be because something they are
  testing has regressed and not because MultiRegionTable failed.


Modified:
hadoop/hbase/branches/0.1/CHANGES.txt

hadoop/hbase/branches/0.1/src/test/org/apache/hadoop/hbase/MultiRegionTable.java
hadoop/hbase/branches/0.1/src/test/org/apache/hadoop/hbase/TestSplit.java

hadoop/hbase/branches/0.1/src/test/org/apache/hadoop/hbase/mapred/TestTableIndex.java

hadoop/hbase/branches/0.1/src/test/org/apache/hadoop/hbase/mapred/TestTableMapReduce.java

Modified: hadoop/hbase/branches/0.1/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/CHANGES.txt?rev=652563&r1=652562&r2=652563&view=diff
==
--- hadoop/hbase/branches/0.1/CHANGES.txt (original)
+++ hadoop/hbase/branches/0.1/CHANGES.txt Thu May  1 09:08:09 2008
@@ -28,6 +28,8 @@
HBASE-608   HRegionServer::getThisIP() checks hadoop config var for dns 
interface name
(Jim R. Wilson via Stack)
HBASE-609   Master doesn't see regionserver edits because of clock skew
+   HBASE-607   MultiRegionTable.makeMultiRegionTable is not deterministic 
enough
+   for regression tests
 
   IMPROVEMENTS
HBASE-559   MR example job to count table rows

Modified: 
hadoop/hbase/branches/0.1/src/test/org/apache/hadoop/hbase/MultiRegionTable.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/src/test/org/apache/hadoop/hbase/MultiRegionTable.java?rev=652563&r1=652562&r2=652563&view=diff
==
--- 
hadoop/hbase/branches/0.1/src/test/org/apache/hadoop/hbase/MultiRegionTable.java
 (original)
+++ 
hadoop/hbase/branches/0.1/src/test/org/apache/hadoop/hbase/MultiRegionTable.java
 Thu May  1 09:08:09 2008
@@ -20,330 +20,113 @@
 package org.apache.hadoop.hbase;
 
 import java.io.IOException;
-import java.util.ConcurrentModificationException;
-import java.util.Map;
-import java.util.SortedMap;
-import java.util.TreeMap;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.util.Writables;
+
+import org.apache.hadoop.dfs.MiniDFSCluster;
 import org.apache.hadoop.io.Text;
 
 /**
  * Utility class to build a table of multiple regions.
  */
 public class MultiRegionTable extends HBaseTestCase {
-  static final Log LOG = LogFactory.getLog(MultiRegionTable.class.getName());
+  private static final Text[] KEYS = {
+null,
+new Text("bbb"),
+new Text("ccc"),
+new Text("ddd"),
+new Text("eee"),
+new Text("fff"),
+new Text("ggg"),
+new Text("hhh"),
+new Text("iii"),
+new Text("jjj"),
+new Text("kkk"),
+new Text("lll"),
+new Text("mmm"),
+new Text("nnn"),
+new Text("ooo"),
+new Text("ppp"),
+new Text("qqq"),
+new Text("rrr"),
+new Text("sss"),
+new Text("ttt"),
+new Text("uuu"),
+new Text("vvv"),
+new Text("www"),
+new Text("xxx"),
+new Text("yyy")
+  };
+  
+  protected final String columnName;
+  protected HTableDescriptor desc;
+  protected MiniDFSCluster dfsCluster = null;
 
   /**
-   * Make a multi-region table.  Presumption

svn commit: r652587 - in /hadoop/hbase/trunk: ./ src/test/org/apache/hadoop/hbase/ src/test/org/apache/hadoop/hbase/mapred/ src/test/org/apache/hadoop/hbase/regionserver/

2008-05-01 Thread jimk
Author: jimk
Date: Thu May  1 10:37:24 2008
New Revision: 652587

URL: http://svn.apache.org/viewvc?rev=652587&view=rev
Log:
HBASE-607 MultiRegionTable.makeMultiRegionTable is not deterministic enough for 
regression tests

M MultiRegionTable

  Make deterministic by creating the regions directly and not rely on
  the asychronous nature of cache flushes, compactions and splits. The
  regions are small, but the point of this class is to generate a
  table with multiple regions so we can test map / reduce, region
  onlining / offlining, etc.

  Removed PUNCTUATION from row keys. Not sure why it was there in the
  first place, other than perhaps to verify that a row key can have
  punctuation in it provided it is not the first character. This will
  become moot when row keys change from Text to byte[] anyways.

  Incorporate repeated code
{code}
region.close();
region.getLog().closeAndDelete();
{code}
  into private method closeRegionAndDeleteLog

M TestSplit

  extends HBaseClusterTestCase instead of MultiRegionTable. It didn't
  use the output of MultiRegionTable, so all that work was just wasted
  by this test.

M TestTableIndex, TestTableMapReduce

  The only two tests that currently use MultiRegionTable. Minor
  modifications needed because MultiRegionTable now handles starting
  and stopping of the mini-DFS cluster. With the new MultiRegionTable
  class, if these tests fail now it will be because something they are
  testing has regressed and not because MultiRegionTable failed.


Modified:
hadoop/hbase/trunk/CHANGES.txt
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/MultiRegionTable.java

hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/mapred/TestTableIndex.java

hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/mapred/TestTableMapReduce.java

hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestSplit.java

Modified: hadoop/hbase/trunk/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/CHANGES.txt?rev=652587&r1=652586&r2=652587&view=diff
==
--- hadoop/hbase/trunk/CHANGES.txt (original)
+++ hadoop/hbase/trunk/CHANGES.txt Thu May  1 10:37:24 2008
@@ -28,6 +28,8 @@
HBASE-608   HRegionServer::getThisIP() checks hadoop config var for dns 
interface name
(Jim R. Wilson via Stack)
HBASE-609   Master doesn't see regionserver edits because of clock skew
+   HBASE-607   MultiRegionTable.makeMultiRegionTable is not deterministic 
enough
+   for regression tests
 
   IMPROVEMENTS
HBASE-559   MR example job to count table rows

Modified: 
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/MultiRegionTable.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/MultiRegionTable.java?rev=652587&r1=652586&r2=652587&view=diff
==
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/MultiRegionTable.java 
(original)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/MultiRegionTable.java 
Thu May  1 10:37:24 2008
@@ -20,349 +20,101 @@
 package org.apache.hadoop.hbase;
 
 import java.io.IOException;
-import java.util.ConcurrentModificationException;
-import java.util.Map;
-import java.util.TreeMap;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.util.Writables;
+
 import org.apache.hadoop.io.Text;
-import org.apache.hadoop.hbase.io.Cell;
-import org.apache.hadoop.hbase.io.RowResult;
-import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.Scanner;
 
 import org.apache.hadoop.hbase.regionserver.HRegion;
-import org.apache.hadoop.hbase.regionserver.HRegionServer;
 
 /**
  * Utility class to build a table of multiple regions.
  */
 public class MultiRegionTable extends HBaseClusterTestCase {
-  static final Log LOG = LogFactory.getLog(MultiRegionTable.class.getName());
+  private static final Text[] KEYS = {
+null,
+new Text("bbb"),
+new Text("ccc"),
+new Text("ddd"),
+new Text("eee"),
+new Text("fff"),
+new Text("ggg"),
+new Text("hhh"),
+new Text("iii"),
+new Text("jjj"),
+new Text("kkk"),
+new Text("lll"),
+new Text("mmm"),
+new Text("nnn"),
+new Text("ooo"),
+new Text("ppp"),
+new Text("qqq"),
+new Text("rrr"),
+new Text("sss"),
+new Text("ttt"),
+new Text("uuu"),
+new Text("vvv"),
+new Text("www"),
+new Text("xxx"),
+new Text("yyy")
+  };
+  
+  protected final

svn commit: r653642 - in /hadoop/hbase/branches/0.1: CHANGES.txt src/java/org/apache/hadoop/hbase/HMaster.java src/java/org/apache/hadoop/hbase/HStore.java src/test/org/apache/hadoop/hbase/TestHBaseCl

2008-05-05 Thread jimk
Author: jimk
Date: Mon May  5 16:56:31 2008
New Revision: 653642

URL: http://svn.apache.org/viewvc?rev=653642&view=rev
Log:
HBASE-478   offlining of table does not run reliably

Modified:
hadoop/hbase/branches/0.1/CHANGES.txt
hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HMaster.java
hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HStore.java

hadoop/hbase/branches/0.1/src/test/org/apache/hadoop/hbase/TestHBaseCluster.java

Modified: hadoop/hbase/branches/0.1/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/CHANGES.txt?rev=653642&r1=653641&r2=653642&view=diff
==
--- hadoop/hbase/branches/0.1/CHANGES.txt (original)
+++ hadoop/hbase/branches/0.1/CHANGES.txt Mon May  5 16:56:31 2008
@@ -30,7 +30,8 @@
HBASE-609   Master doesn't see regionserver edits because of clock skew
HBASE-607   MultiRegionTable.makeMultiRegionTable is not deterministic 
enough
for regression tests
-
+   HBASE-478   offlining of table does not run reliably
+   
   IMPROVEMENTS
HBASE-559   MR example job to count table rows
HBASE-578   Upgrade branch to 0.16.3 hadoop.

Modified: 
hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HMaster.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HMaster.java?rev=653642&r1=653641&r2=653642&view=diff
==
--- hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HMaster.java 
(original)
+++ hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HMaster.java Mon 
May  5 16:56:31 2008
@@ -430,10 +430,10 @@
 throws IOException {
   // Skip region - if ...
   if (info.isOffline() // offline
-  || killedRegions.contains(info.getRegionName())  // queued for 
offline
-  || regionsToDelete.contains(info.getRegionName())) { // queued for 
delete
+  || killedRegions.contains(info.getRegionName())) {  // queued for 
offline
 
 unassignedRegions.remove(info);
+pendingRegions.remove(info);
 return;
   }
   HServerInfo storedInfo = null;
@@ -846,13 +846,6 @@
   volatile Set killedRegions =
 Collections.synchronizedSet(new HashSet());
 
-  /**
-   * 'regionsToDelete' contains regions that need to be deleted, but cannot be
-   * until the region server closes it
-   */
-  volatile Set regionsToDelete =
-Collections.synchronizedSet(new HashSet());
-
   /** Set of tables currently in creation. */
   private volatile Set tableInCreation = 
 Collections.synchronizedSet(new HashSet());
@@ -1414,8 +1407,9 @@
   } else if (info.isMetaTable()) {
 onlineMetaRegions.remove(info.getStartKey());
   }
-
-  this.unassignedRegions.put(info, ZERO_L);
+  if (!killedRegions.remove(info.getRegionName())) {
+this.unassignedRegions.put(info, ZERO_L);
+  }
 }
   }
 }
@@ -1678,14 +1672,9 @@
   unassignRootRegion();
 } else {
   boolean reassignRegion = !region.isOffline();
-  boolean deleteRegion = false;
   if (killedRegions.remove(region.getRegionName())) {
 reassignRegion = false;
   }
-  if (regionsToDelete.remove(region.getRegionName())) {
-reassignRegion = false;
-deleteRegion = true;
-  }
   if (region.isMetaTable()) {
 // Region is part of the meta table. Remove it from 
onlineMetaRegions
 onlineMetaRegions.remove(region.getStartKey());
@@ -1696,8 +1685,7 @@
   //   reassigned before the close is processed.
   unassignedRegions.remove(region);
   try {
-toDoQueue.put(new ProcessRegionClose(region, reassignRegion,
-deleteRegion));
+toDoQueue.put(new ProcessRegionClose(region, reassignRegion));
 
   } catch (InterruptedException e) {
 throw new RuntimeException(
@@ -2008,13 +1996,11 @@
 private boolean rootRescanned;
 
 private class ToDoEntry {
-  boolean deleteRegion;
   boolean regionOffline;
   Text row;
   HRegionInfo info;
 
   ToDoEntry(Text row, HRegionInfo info) {
-this.deleteRegion = false;
 this.regionOffline = false;
 this.row = row;
 this.info = info;
@@ -2106,35 +2092,26 @@
   ToDoEntry todo = new ToDoEntry(row, info);
   toDoList.add(todo);
 
-  if (killList.containsKey(deadServerName)) {
-HashMap regionsToKill =
-  new HashMap();
-synchronized (killList) {
+  synchronized (killList) {
+if (killList.containsKey(deadServerName)) {
+  H

svn commit: r653941 - in /hadoop/hbase/trunk: CHANGES.txt src/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java

2008-05-06 Thread jimk
Author: jimk
Date: Tue May  6 15:18:24 2008
New Revision: 653941

URL: http://svn.apache.org/viewvc?rev=653941&view=rev
Log:
HBASE-405   TIF and TOF use log4j directly rather than apache commons-logging

Modified:
hadoop/hbase/trunk/CHANGES.txt

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java

Modified: hadoop/hbase/trunk/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/CHANGES.txt?rev=653941&r1=653940&r2=653941&view=diff
==
--- hadoop/hbase/trunk/CHANGES.txt (original)
+++ hadoop/hbase/trunk/CHANGES.txt Tue May  6 15:18:24 2008
@@ -30,6 +30,7 @@
HBASE-609   Master doesn't see regionserver edits because of clock skew
HBASE-607   MultiRegionTable.makeMultiRegionTable is not deterministic 
enough
for regression tests
+   HBASE-405   TIF and TOF use log4j directly rather than apache 
commons-logging
 
   IMPROVEMENTS
HBASE-559   MR example job to count table rows

Modified: 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java?rev=653941&r1=653940&r2=653941&view=diff
==
--- 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java
 (original)
+++ 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java
 Tue May  6 15:18:24 2008
@@ -1,121 +1,109 @@
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.mapred;
-
-import java.io.IOException;
-import java.util.Map;
-
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.io.MapWritable;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.mapred.FileAlreadyExistsException;
-import org.apache.hadoop.mapred.InvalidJobConfException;
-import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.mapred.OutputFormatBase;
-import org.apache.hadoop.mapred.RecordWriter;
-import org.apache.hadoop.mapred.Reporter;
-import org.apache.hadoop.util.Progressable;
-
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.io.BatchUpdate;
-
-import org.apache.log4j.Logger;
-
-/**
- * Convert Map/Reduce output and write it to an HBase table
- */
-public class TableOutputFormat
-  extends OutputFormatBase {
-
-  /** JobConf parameter that specifies the output table */
-  public static final String OUTPUT_TABLE = "hbase.mapred.outputtable";
-
-  static final Logger LOG = 
Logger.getLogger(TableOutputFormat.class.getName());
-
-  /** constructor */
-  public TableOutputFormat() {
-super();
-  }
-
-  /**
-   * Convert Reduce output (key, value) to (HStoreKey, KeyedDataArrayWritable) 
-   * and write to an HBase table
-   */
-  protected class TableRecordWriter
-implements RecordWriter {
-private HTable m_table;
-
-/**
- * Instantiate a TableRecordWriter with the HBase HClient for writing.
- * 
- * @param table
- */
-public TableRecordWriter(HTable table) {
-  m_table = table;
-}
-
-/** [EMAIL PROTECTED] */
-public void close(@SuppressWarnings("unused") Reporter reporter) {
-  // Nothing to do.
-}
-
-/** [EMAIL PROTECTED] */
-public void write(Text key, BatchUpdate value) throws IOException {
-  m_table.commit(value);
-}
-  }
-  
-  /** [EMAIL PROTECTED] */
-  @Override
-  @SuppressWarnings("unchecked")
-  public RecordWriter getRecordWriter(
-  @SuppressWarnings("unused") FileSystem ignored,
-  JobConf job,
-  @SuppressWarnings("unused") String name,
-  @SuppressWarnings("unused") Progressable progress) throws IOException {
-
-// expecting exactly one path
-
-Text tableName = new Text(job.get(OUTP

svn commit: r654193 - in /hadoop/hbase/trunk: ./ src/java/org/apache/hadoop/hbase/master/ src/java/org/apache/hadoop/hbase/regionserver/ src/test/org/apache/hadoop/hbase/

2008-05-07 Thread jimk
Author: jimk
Date: Wed May  7 10:56:06 2008
New Revision: 654193

URL: http://svn.apache.org/viewvc?rev=654193&view=rev
Log:
HBASE-478   offlining of table does not run reliably

Modified:
hadoop/hbase/trunk/CHANGES.txt
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/BaseScanner.java

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ChangeTableState.java
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ModifyColumn.java

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ProcessRegionClose.java

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ProcessServerShutdown.java

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/RegionManager.java

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/RetryableMetaOperation.java

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ServerManager.java
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/TableDelete.java

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/TableOperation.java

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/Memcache.java
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestHBaseCluster.java

Modified: hadoop/hbase/trunk/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/CHANGES.txt?rev=654193&r1=654192&r2=654193&view=diff
==
--- hadoop/hbase/trunk/CHANGES.txt (original)
+++ hadoop/hbase/trunk/CHANGES.txt Wed May  7 10:56:06 2008
@@ -33,6 +33,7 @@
HBASE-405   TIF and TOF use log4j directly rather than apache 
commons-logging
HBASE-618   We always compact if 2 files, regardless of the compaction 
threshold setting
HBASE-619   Fix 'logs' link in UI
+   HBASE-478   offlining of table does not run reliably
 
   IMPROVEMENTS
HBASE-559   MR example job to count table rows

Modified: 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/BaseScanner.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/BaseScanner.java?rev=654193&r1=654192&r2=654193&view=diff
==
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/BaseScanner.java 
(original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/BaseScanner.java 
Wed May  7 10:56:06 2008
@@ -357,10 +357,10 @@
 
 // Skip region - if ...
 if(info.isOffline() // offline
-  || regionManager.isClosing(info.getRegionName()) // queued for offline
-  || regionManager.isMarkedForDeletion(info.getRegionName())) { // queued 
for delete
+  || regionManager.isClosing(info.getRegionName())) { // queued for offline
 
   regionManager.noLongerUnassigned(info);
+  regionManager.noLongerPending(info.getRegionName());
   return;
 }
 HServerInfo storedInfo = null;

Modified: 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ChangeTableState.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ChangeTableState.java?rev=654193&r1=654192&r2=654193&view=diff
==
--- 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ChangeTableState.java
 (original)
+++ 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ChangeTableState.java
 Wed May  7 10:56:06 2008
@@ -71,8 +71,8 @@
 for (HRegionInfo i: unservedRegions) {
   if (i.isOffline() && i.isSplit()) {
 if (LOG.isDebugEnabled()) {
-  LOG.debug("Skipping region " + i.toString() + " because it is " +
-  "offline because it has been split");
+  LOG.debug("Skipping region " + i.toString() +
+  " because it is offline because it has been split");
 }
 continue;
   }
@@ -94,6 +94,7 @@
 
   if (online) {
 // Bring offline regions on-line
+master.regionManager.noLongerClosing(i.getRegionName());
 if (!master.regionManager.isUnassigned(i)) {
   master.regionManager.setUnassigned(i);
 }
@@ -119,23 +120,21 @@
 
   HashMap localKillList =
 new HashMap();
-
-  Map killedRegions = 
-master.regionManager.getMarkedToClose(serverName);
-  if (killedRegions != null) {
-localKillList.putAll(killedRegions);
-  }
-  
+
   for (HRegionInfo i: e.getValue()) {
 if (LOG.isDebugEnabled()) {
-  LOG.debug("adding region " + i.getRegionName() +
-  " to kill list");
+  LOG.debug("adding region " + i.getRegionName() + " to kill list");
 }
 // this marks the regions to be closed
 localKillList.

svn commit: r654229 - in /hadoop/hbase/trunk: ./ src/java/org/apache/hadoop/hbase/filter/

2008-05-07 Thread jimk
Author: jimk
Date: Wed May  7 12:35:47 2008
New Revision: 654229

URL: http://svn.apache.org/viewvc?rev=654229&view=rev
Log:
HBASE-600   Filters have excessive DEBUG logging

Modified:
hadoop/hbase/trunk/CHANGES.txt

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/InclusiveStopRowFilter.java

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/PageRowFilter.java

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/RegExpRowFilter.java
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/RowFilterSet.java

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/StopRowFilter.java

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/WhileMatchRowFilter.java

Modified: hadoop/hbase/trunk/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/CHANGES.txt?rev=654229&r1=654228&r2=654229&view=diff
==
--- hadoop/hbase/trunk/CHANGES.txt (original)
+++ hadoop/hbase/trunk/CHANGES.txt Wed May  7 12:35:47 2008
@@ -44,6 +44,7 @@
the exception with a RetriesExhaustedException
HBASE-47Option to set TTL for columns in hbase
(Andrew Purtell via Bryan Duxbury and Stack)
+   HBASE-600   Filters have excessive DEBUG logging
 
 Release 0.1.1 - 04/11/2008
 

Modified: 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/InclusiveStopRowFilter.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/InclusiveStopRowFilter.java?rev=654229&r1=654228&r2=654229&view=diff
==
--- 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/InclusiveStopRowFilter.java
 (original)
+++ 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/InclusiveStopRowFilter.java
 Wed May  7 12:35:47 2008
@@ -50,12 +50,7 @@
   }
   return false;
 }
-boolean result = this.stopRowKey.compareTo(rowKey) < 0;
-if (LOG.isDebugEnabled()) {
-  LOG.debug("Filter result for rowKey: " + rowKey + ".  Result: " + 
-result);
-}
-return result;
+return this.stopRowKey.compareTo(rowKey) < 0;
   }
   
 }

Modified: 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/PageRowFilter.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/PageRowFilter.java?rev=654229&r1=654228&r2=654229&view=diff
==
--- 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/PageRowFilter.java 
(original)
+++ 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/PageRowFilter.java 
Wed May  7 12:35:47 2008
@@ -24,8 +24,6 @@
 import java.io.IOException;
 import java.util.SortedMap;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.io.Text;
 
 /**
@@ -46,8 +44,6 @@
   private long pageSize = Long.MAX_VALUE;
   private int rowsAccepted = 0;
 
-  static final Log LOG = LogFactory.getLog(PageRowFilter.class);
-  
   /**
* Default constructor, filters nothing. Required though for RPC
* deserialization.
@@ -86,10 +82,6 @@
   @SuppressWarnings("unused") Text rowKey) {
 if (!filtered) {
   this.rowsAccepted++;
-  if (LOG.isDebugEnabled()) {
-LOG.debug("rowProcessed incremented rowsAccepted to " + 
-  this.rowsAccepted);
-  }
 }
   }
 
@@ -106,12 +98,7 @@
* [EMAIL PROTECTED]
*/
   public boolean filterAllRemaining() {
-boolean result = this.rowsAccepted > this.pageSize;
-if (LOG.isDebugEnabled()) {
-  LOG.debug("filtering decision is " + result + " with rowsAccepted: " + 
-this.rowsAccepted);
-}
-return result;
+return this.rowsAccepted > this.pageSize;
   }
 
   /**

Modified: 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/RegExpRowFilter.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/RegExpRowFilter.java?rev=654229&r1=654228&r2=654229&view=diff
==
--- 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/RegExpRowFilter.java 
(original)
+++ 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/RegExpRowFilter.java 
Wed May  7 12:35:47 2008
@@ -31,8 +31,6 @@
 import java.util.Map.Entry;
 import java.util.regex.Pattern;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.io.Text;
 
 import org.apache.hadoop.hbase.regionserver.HLogEdit;
@@ -50,8 +48,6 @@
   private Map equalsMap = new HashMap();
   private Set nullColumns = new HashSet();
 
-  static final Log LOG = LogFactory.getLog(RegExpRowFilter.cla

svn commit: r654256 - in /hadoop/hbase/trunk: CHANGES.txt src/java/org/apache/hadoop/hbase/ipc/HbaseRPC.java

2008-05-07 Thread jimk
Author: jimk
Date: Wed May  7 13:19:53 2008
New Revision: 654256

URL: http://svn.apache.org/viewvc?rev=654256&view=rev
Log:
HBASE-453   undeclared throwable exception from HTable.get

Modified:
hadoop/hbase/trunk/CHANGES.txt
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/ipc/HbaseRPC.java

Modified: hadoop/hbase/trunk/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/CHANGES.txt?rev=654256&r1=654255&r2=654256&view=diff
==
--- hadoop/hbase/trunk/CHANGES.txt (original)
+++ hadoop/hbase/trunk/CHANGES.txt Wed May  7 13:19:53 2008
@@ -34,6 +34,7 @@
HBASE-618   We always compact if 2 files, regardless of the compaction 
threshold setting
HBASE-619   Fix 'logs' link in UI
HBASE-478   offlining of table does not run reliably
+   HBASE-453   undeclared throwable exception from HTable.get
 
   IMPROVEMENTS
HBASE-559   MR example job to count table rows

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/ipc/HbaseRPC.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/ipc/HbaseRPC.java?rev=654256&r1=654255&r2=654256&view=diff
==
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/ipc/HbaseRPC.java 
(original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/ipc/HbaseRPC.java Wed 
May  7 13:19:53 2008
@@ -205,13 +205,24 @@
 }
 
 public Object invoke(Object proxy, Method method, Object[] args)
-  throws Throwable {
+  throws IOException {
   long startTime = System.currentTimeMillis();
-  HbaseObjectWritable value = (HbaseObjectWritable)
-client.call(new Invocation(method, args), address, ticket);
-  long callTime = System.currentTimeMillis() - startTime;
-  LOG.debug("Call: " + method.getName() + " " + callTime);
-  return value.get();
+  try {
+HbaseObjectWritable value = (HbaseObjectWritable)
+  client.call(new Invocation(method, args), address, ticket);
+long callTime = System.currentTimeMillis() - startTime;
+LOG.debug("Call: " + method.getName() + " " + callTime);
+return value.get();
+  } catch (Throwable t) {
+IOException e;
+if (t instanceof IOException) {
+  e = (IOException) t;
+} else {
+  e = new IOException("error during RPC call");
+  e.initCause(t);
+}
+throw e;
+  }
 }
   }
 




svn commit: r654267 - /hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/ipc/HbaseRPC.java

2008-05-07 Thread jimk
Author: jimk
Date: Wed May  7 13:39:14 2008
New Revision: 654267

URL: http://svn.apache.org/viewvc?rev=654267&view=rev
Log:
HBASE-453 undeclared throwable exception from HTable.get

Instead of wrapping UndeclaredThrowableException in a new IOException, wrap 
UndeclaredThrowableException.getCause

Modified:
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/ipc/HbaseRPC.java

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/ipc/HbaseRPC.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/ipc/HbaseRPC.java?rev=654267&r1=654266&r2=654267&view=diff
==
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/ipc/HbaseRPC.java 
(original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/ipc/HbaseRPC.java Wed 
May  7 13:39:14 2008
@@ -219,7 +219,7 @@
   e = (IOException) t;
 } else {
   e = new IOException("error during RPC call");
-  e.initCause(t);
+  e.initCause(t.getCause());
 }
 throw e;
   }




svn commit: r654301 - in /hadoop/hbase/trunk: CHANGES.txt src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java

2008-05-07 Thread jimk
Author: jimk
Date: Wed May  7 15:08:21 2008
New Revision: 654301

URL: http://svn.apache.org/viewvc?rev=654301&view=rev
Log:
HBASE-611   regionserver should do basic health check before reporting 
alls-well to the master

Modified:
hadoop/hbase/trunk/CHANGES.txt

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java

Modified: hadoop/hbase/trunk/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/CHANGES.txt?rev=654301&r1=654300&r2=654301&view=diff
==
--- hadoop/hbase/trunk/CHANGES.txt (original)
+++ hadoop/hbase/trunk/CHANGES.txt Wed May  7 15:08:21 2008
@@ -46,6 +46,8 @@
HBASE-47Option to set TTL for columns in hbase
(Andrew Purtell via Bryan Duxbury and Stack)
HBASE-600   Filters have excessive DEBUG logging
+   HBASE-611   regionserver should do basic health check before reporting
+   alls-well to the master
 
 Release 0.1.1 - 04/11/2008
 

Modified: 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java?rev=654301&r1=654300&r2=654301&view=diff
==
--- 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
 (original)
+++ 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
 Wed May  7 15:08:21 2008
@@ -270,7 +270,7 @@
   init(reportForDuty(sleeper));
   long lastMsg = 0;
   // Now ask master what it wants us to do and tell it what we have done
-  for (int tries = 0; !stopRequested.get();) {
+  for (int tries = 0; !stopRequested.get() && isHealthy();) {
 long now = System.currentTimeMillis();
 if (lastMsg != 0 && (now - lastMsg) >= serverLeaseTimeout) {
   // It has been way too long since we last reported to the master.
@@ -576,7 +576,26 @@
 serverInfo.getServerAddress().toString());
   }
 
-  /* Run some housekeeping tasks before we go into 'hibernation' sleeping at
+  /*
+   * Verify that server is healthy
+   */
+  private boolean isHealthy() {
+if (!fsOk) {
+  // File system problem
+  return false;
+}
+// Verify that all threads are alive
+if (!(leases.isAlive() && compactSplitThread.isAlive() &&
+cacheFlusher.isAlive() && logRoller.isAlive() &&
+workerThread.isAlive())) {
+  // One or more threads are no longer alive - shut down
+  stop();
+  return false;
+}
+return true;
+  }
+  /*
+   * Run some housekeeping tasks before we go into 'hibernation' sleeping at
* the end of the main HRegionServer run loop.
*/
   private void housekeeping() {




svn commit: r654653 - in /hadoop/hbase/trunk: CHANGES.txt src/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java

2008-05-08 Thread jimk
Author: jimk
Date: Thu May  8 16:41:47 2008
New Revision: 654653

URL: http://svn.apache.org/viewvc?rev=654653&view=rev
Log:
HBASE-614   Retiring regions is not used; exploit or remove

Modified:
hadoop/hbase/trunk/CHANGES.txt

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java

Modified: hadoop/hbase/trunk/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/CHANGES.txt?rev=654653&r1=654652&r2=654653&view=diff
==
--- hadoop/hbase/trunk/CHANGES.txt (original)
+++ hadoop/hbase/trunk/CHANGES.txt Thu May  8 16:41:47 2008
@@ -49,7 +49,8 @@
HBASE-600   Filters have excessive DEBUG logging
HBASE-611   regionserver should do basic health check before reporting
alls-well to the master
-
+   HBASE-614   Retiring regions is not used; exploit or remove
+   
 Release 0.1.1 - 04/11/2008
 
   INCOMPATIBLE CHANGES

Modified: 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java?rev=654653&r1=654652&r2=654653&view=diff
==
--- 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
 (original)
+++ 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
 Thu May  8 16:41:47 2008
@@ -190,33 +190,11 @@
   }
   
   /** [EMAIL PROTECTED] */
-  public void closing(final Text regionName) {
-startTime = System.currentTimeMillis();
-server.getWriteLock().lock();
-try {
-  // Remove region from regions Map and add it to the Map of retiring
-  // regions.
-  server.setRegionClosing(regionName);
-  if (LOG.isDebugEnabled()) {
-LOG.debug(regionName.toString() + " closing (" +
-  "Adding to retiringRegions)");
-  }
-} finally {
-  server.getWriteLock().unlock();
-}
+  public void closing(@SuppressWarnings("unused") final Text regionName) {
   }
   
   /** [EMAIL PROTECTED] */
-  public void closed(final Text regionName) {
-server.getWriteLock().lock();
-try {
-  server.setRegionClosed(regionName);
-  if (LOG.isDebugEnabled()) {
-LOG.debug(regionName.toString() + " closed");
-  }
-} finally {
-  server.getWriteLock().unlock();
-}
+  public void closed(@SuppressWarnings("unused") final Text regionName) {
   }
 
   /**

Modified: 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java?rev=654653&r1=654652&r2=654653&view=diff
==
--- 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
 (original)
+++ 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
 Thu May  8 16:41:47 2008
@@ -115,8 +115,6 @@
   // region name -> HRegion
   protected volatile Map onlineRegions =
 new ConcurrentHashMap();
-  protected volatile Map retiringRegions =
-new ConcurrentHashMap();
  
   protected final ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
   private volatile List outboundMsgs =
@@ -1296,42 +1294,10 @@
*/
   protected HRegion getRegion(final Text regionName)
   throws NotServingRegionException {
-return getRegion(regionName, false);
-  }
-  
-  /** Move a region from online to closing. */
-  void setRegionClosing(final Text regionName) {
-retiringRegions.put(regionName, onlineRegions.remove(regionName));
-  }
-  
-  /** Set a region as closed. */
-  void setRegionClosed(final Text regionName) {
-retiringRegions.remove(regionName);
-  }
-  
-  /** 
-   * Protected utility method for safely obtaining an HRegion handle.
-   * @param regionName Name of online [EMAIL PROTECTED] HRegion} to return
-   * @param checkRetiringRegions Set true if we're to check retiring regions
-   * as well as online regions.
-   * @return [EMAIL PROTECTED] HRegion} for regionName
-   * @throws NotServingRegionException
-   */
-  protected HRegion getRegion(final Text regionName,
-final boolean checkRetiringRegions)
-  throws NotServingRegionException {
 HRegion region = null;
 this.lock.readLock().lock();
 try {
   region = onlineRegions.get(regionName);
-  if (region == null && checkRetiringRegions) {
-region = this.retiringRegions.get(regionName);
-if (LOG.isDebugEnabled()) {
-  if (region != null) {
-LOG.debug("Found region " + reg

svn commit: r655650 - in /hadoop/hbase/trunk: ./ src/test/ src/test/org/apache/hadoop/hbase/ src/test/org/apache/hadoop/hbase/regionserver/ src/test/org/apache/hadoop/hbase/util/

2008-05-12 Thread jimk
Author: jimk
Date: Mon May 12 14:22:38 2008
New Revision: 655650

URL: http://svn.apache.org/viewvc?rev=655650&view=rev
Log:
HBASE-622   Remove StaticTestEnvironment and put a log4j.properties in src/test

Added:
hadoop/hbase/trunk/src/test/log4j.properties
Removed:

hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/StaticTestEnvironment.java
Modified:
hadoop/hbase/trunk/CHANGES.txt

hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/HBaseClusterTestCase.java
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/HBaseTestCase.java
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/MultiRegionTable.java

hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/PerformanceEvaluation.java

hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestCompaction.java

hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestDeleteAll.java

hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestDeleteFamily.java

hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestGet.java

hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestHLog.java

hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestHRegion.java

hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java

hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestLogRolling.java

hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestScanner.java
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/util/TestMergeTool.java
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/util/TestMigrate.java

Modified: hadoop/hbase/trunk/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/CHANGES.txt?rev=655650&r1=655649&r2=655650&view=diff
==
--- hadoop/hbase/trunk/CHANGES.txt (original)
+++ hadoop/hbase/trunk/CHANGES.txt Mon May 12 14:22:38 2008
@@ -36,6 +36,7 @@
HBASE-478   offlining of table does not run reliably
HBASE-453   undeclared throwable exception from HTable.get
HBASE-620   testmergetool failing in branch and trunk since hbase-618 went 
in
+   HBASE-622   Remove StaticTestEnvironment and put a log4j.properties in 
src/test
 
   IMPROVEMENTS
HBASE-559   MR example job to count table rows

Added: hadoop/hbase/trunk/src/test/log4j.properties
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/log4j.properties?rev=655650&view=auto
==
--- hadoop/hbase/trunk/src/test/log4j.properties (added)
+++ hadoop/hbase/trunk/src/test/log4j.properties Mon May 12 14:22:38 2008
@@ -0,0 +1,63 @@
+# Define some default values that can be overridden by system properties
+hbase.root.logger=INFO,console
+hbase.log.dir=.
+hbase.log.file=hbase.log
+
+# Define the root logger to the system property "hbase.root.logger".
+log4j.rootLogger=${hbase.root.logger}
+
+# Logging Threshold
+log4j.threshhold=ALL
+
+#
+# Daily Rolling File Appender
+#
+log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFA.File=${hbase.log.dir}/${hbase.log.file}
+
+# Rollver at midnight
+log4j.appender.DRFA.DatePattern=.-MM-dd
+
+# 30-day backup
+#log4j.appender.DRFA.MaxBackupIndex=30
+log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+
+# Debugging Pattern format
+log4j.appender.DRFA.layout.ConversionPattern=%d %-5p [%t] %C{2}(%L): %m%n
+
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this 
+#
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d %-5p [%t] %C{2}(%L): %m%n
+
+# Custom Logging levels
+
+#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
+
+#log4j.logger.org.apache.hadoop=WARN
+
+log4j.logger.org.apache.hadoop.hbase.PerformanceEvaluation=WARN
+log4j.logger.org.apache.hadoop.hbase.client=DEBUG
+log4j.logger.org.apache.hadoop.hbase.filter=INFO
+log4j.logger.org.apache.hadoop.hbase.generated=INFO
+log4j.logger.org.apache.hadoop.hbase.hql=INFO
+log4j.logger.org.apache.hadoop.hbase.io=INFO
+log4j.logger.org.apache.hadoop.hbase.ipc=INFO
+log4j.logger.org.apache.hadoop.hbase.mapred=INFO
+log4j.logger.org.apache.hadoop.hbase.master=DEBUG
+log4j.logger.org.apache.hadoop.hbase.regionserver=DEBUG
+log4j.logger.org.apache.hadoop.hbase.rest=INFO
+log4j.logger.org.apache.hadoop.hbase.thrift=INFO
+log4j.logger.org.apache.hadoop.hbase.util=INFO
+
+log4j.logger.org.apache.hadoop.mapred=DEBUG
+#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
+#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG

Modified: 
hadoop/hbas

svn commit: r655907 - /hadoop/hbase/trunk/src/test/log4j.properties

2008-05-13 Thread jimk
Author: jimk
Date: Tue May 13 08:18:59 2008
New Revision: 655907

URL: http://svn.apache.org/viewvc?rev=655907&view=rev
Log:
HBASE-622 Remove StaticTestEnvironment and put a log4j.properties in src/test

Set logging level for Hadoop to WARN

Modified:
hadoop/hbase/trunk/src/test/log4j.properties

Modified: hadoop/hbase/trunk/src/test/log4j.properties
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/log4j.properties?rev=655907&r1=655906&r2=655907&view=diff
==
--- hadoop/hbase/trunk/src/test/log4j.properties (original)
+++ hadoop/hbase/trunk/src/test/log4j.properties Tue May 13 08:18:59 2008
@@ -42,7 +42,7 @@
 
 #log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
 
-#log4j.logger.org.apache.hadoop=WARN
+log4j.logger.org.apache.hadoop=WARN
 
 log4j.logger.org.apache.hadoop.hbase.PerformanceEvaluation=WARN
 log4j.logger.org.apache.hadoop.hbase.client=DEBUG




svn commit: r655962 - in /hadoop/hbase/trunk: CHANGES.txt src/java/org/apache/hadoop/hbase/master/HMaster.java src/java/org/apache/hadoop/hbase/master/ServerManager.java src/java/org/apache/hadoop/hba

2008-05-13 Thread jimk
Author: jimk
Date: Tue May 13 11:25:31 2008
New Revision: 655962

URL: http://svn.apache.org/viewvc?rev=655962&view=rev
Log:
HBASE-624   Master will shut down if number of active region servers is zero 
even if shutdown was not requested

M HMaster

- Moved HMaster.quiescedMetaServers to ServerManager.quiescedServers and 
changed name since only servers serving user regions get quiesced.
- Removed HMaster.tableInCreation - not used

M ServerManager

- Don't check if quiescedServers.get() >= serversToServerInfo.size() unless 
master.shutdownRequested is true.

M HRegionServer

- Change order of checks in main loop of HRegionServer.run, so that booleans 
are checked before we check the number of messages to process
- Don't break out of main loop if restart or stop requested - stop is checked 
at top of loop and we need to continue rather than break out of main loop if 
restart was requested


Modified:
hadoop/hbase/trunk/CHANGES.txt
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/HMaster.java

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ServerManager.java

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java

Modified: hadoop/hbase/trunk/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/CHANGES.txt?rev=655962&r1=655961&r2=655962&view=diff
==
--- hadoop/hbase/trunk/CHANGES.txt (original)
+++ hadoop/hbase/trunk/CHANGES.txt Tue May 13 11:25:31 2008
@@ -37,6 +37,8 @@
HBASE-453   undeclared throwable exception from HTable.get
HBASE-620   testmergetool failing in branch and trunk since hbase-618 went 
in
HBASE-622   Remove StaticTestEnvironment and put a log4j.properties in 
src/test
+   HBASE-624   Master will shut down if number of active region servers is zero
+   even if shutdown was not requested
 
   IMPROVEMENTS
HBASE-559   MR example job to count table rows

Modified: 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/HMaster.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/HMaster.java?rev=655962&r1=655961&r2=655962&view=diff
==
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/HMaster.java 
(original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/HMaster.java Tue 
May 13 11:25:31 2008
@@ -21,18 +21,14 @@
 
 import java.io.IOException;
 import java.lang.reflect.Constructor;
-import java.util.Collections;
-import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
 import java.util.Random;
-import java.util.Set;
 import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.DelayQueue;
 import java.util.concurrent.LinkedBlockingQueue;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicInteger;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -104,7 +100,6 @@
   // hosting class
   volatile AtomicBoolean closed = new AtomicBoolean(true);
   volatile boolean shutdownRequested = false;
-  volatile AtomicInteger quiescedMetaServers = new AtomicInteger(0);
   volatile boolean fsOk = true;
   final Path rootdir;
   final HBaseConfiguration conf;
@@ -142,10 +137,6 @@
 return infoServer;
   }
 
-  /** Set of tables currently in creation. */
-  private volatile Set tableInCreation = 
-Collections.synchronizedSet(new HashSet());
-
   ServerManager serverManager;
   RegionManager regionManager;
   

Modified: 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ServerManager.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ServerManager.java?rev=655962&r1=655961&r2=655962&view=diff
==
--- 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ServerManager.java 
(original)
+++ 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ServerManager.java 
Tue May 13 11:25:31 2008
@@ -28,6 +28,7 @@
 import java.util.HashSet;
 import java.util.ArrayList;
 import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.atomic.AtomicInteger;
 import java.util.Collections;
 
 import org.apache.commons.logging.Log;
@@ -50,6 +51,8 @@
 class ServerManager implements HConstants {
   static final Log LOG = LogFactory.getLog(ServerManager.class.getName());
   
+  private final AtomicInteger quiescedServers = new AtomicInteger(0);
+
   /** The map of known server names to server info */
   final Map serversToServerInfo =
 new ConcurrentHashMap();
@@ -161,25 +164,27 @@
 return new HMsg[0];
   } else if (msgs[0].getMsg() == HMsg.MSG_REPORT_QUIESCED) {
   

svn commit: r656341 - in /hadoop/hbase/trunk: ./ src/java/org/apache/hadoop/hbase/client/

2008-05-14 Thread jimk
Author: jimk
Date: Wed May 14 11:07:03 2008
New Revision: 656341

URL: http://svn.apache.org/viewvc?rev=656341&view=rev
Log:
HBASE-538   Improve exceptions that come out on client-side

Added:

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/ScannerCallable.java

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/ServerCallable.java
Modified:
hadoop/hbase/trunk/CHANGES.txt
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HConnection.java

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HConnectionManager.java
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HTable.java

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/RegionOfflineException.java

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/RetriesExhaustedException.java
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/Scanner.java

Modified: hadoop/hbase/trunk/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/CHANGES.txt?rev=656341&r1=656340&r2=656341&view=diff
==
--- hadoop/hbase/trunk/CHANGES.txt (original)
+++ hadoop/hbase/trunk/CHANGES.txt Wed May 14 11:07:03 2008
@@ -53,6 +53,7 @@
HBASE-611   regionserver should do basic health check before reporting
alls-well to the master
HBASE-614   Retiring regions is not used; exploit or remove
+   HBASE-538   Improve exceptions that come out on client-side

 Release 0.1.1 - 04/11/2008
 

Modified: 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HConnection.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HConnection.java?rev=656341&r1=656340&r2=656341&view=diff
==
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HConnection.java 
(original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HConnection.java 
Wed May 14 11:07:03 2008
@@ -20,7 +20,6 @@
 package org.apache.hadoop.hbase.client;
 
 import java.io.IOException;
-import java.util.SortedMap;
 
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.hbase.ipc.HMasterInterface;
@@ -69,6 +68,7 @@
* @param row row key you're trying to find the region of
* @return HRegionLocation that describes where to find the reigon in 
* question
+   * @throws IOException
*/
   public HRegionLocation locateRegion(Text tableName, Text row)
   throws IOException;
@@ -80,6 +80,7 @@
* @param row row key you're trying to find the region of
* @return HRegionLocation that describes where to find the reigon in 
* question
+   * @throws IOException
*/
   public HRegionLocation relocateRegion(Text tableName, Text row)
   throws IOException;  
@@ -92,4 +93,29 @@
*/
   public HRegionInterface getHRegionConnection(HServerAddress regionServer)
   throws IOException;
-}
+  
+  /**
+   * Find region location hosting passed row
+   * @param tableName
+   * @param row Row to find.
+   * @param reload If true do not use cache, otherwise bypass.
+   * @return Location of row.
+   * @throws IOException
+   */
+  HRegionLocation getRegionLocation(Text tableName, Text row, boolean reload)
+  throws IOException;
+
+  /**
+   * Pass in a ServerCallable with your particular bit of logic defined and 
+   * this method will manage the process of doing retries with timed waits 
+   * and refinds of missing regions.
+   *
+   * @param  the type of the return value
+   * @param callable
+   * @return an object of type T
+   * @throws IOException
+   * @throws RuntimeException
+   */
+  public  T getRegionServerWithRetries(ServerCallable callable) 
+  throws IOException, RuntimeException;
+}
\ No newline at end of file

Modified: 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HConnectionManager.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HConnectionManager.java?rev=656341&r1=656340&r2=656341&view=diff
==
--- 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HConnectionManager.java
 (original)
+++ 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HConnectionManager.java
 Wed May 14 11:07:03 2008
@@ -20,9 +20,12 @@
 package org.apache.hadoop.hbase.client;
 
 import java.io.IOException;
+import java.lang.reflect.UndeclaredThrowableException;
+import java.util.ArrayList;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
+import java.util.List;
 import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
 
@@ -240,62 +243,46 @@
 }
 
 /** [EMAIL PROTECTED] */
+public HRegionLocation getRegionLocation(Text tableName, Text row,
+boolean reload) throws IOException {
+  return

svn commit: r657226 - in /hadoop/hbase/trunk: ./ src/java/org/apache/hadoop/hbase/regionserver/ src/test/org/apache/hadoop/hbase/regionserver/

2008-05-16 Thread jimk
Author: jimk
Date: Fri May 16 14:45:11 2008
New Revision: 657226

URL: http://svn.apache.org/viewvc?rev=657226&view=rev
Log:
HBASE-629   Split reports incorrect elapsed time

Removed:

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/RegionUnavailableListener.java
Modified:
hadoop/hbase/trunk/CHANGES.txt

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java

hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestHRegion.java

hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestSplit.java

Modified: hadoop/hbase/trunk/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/CHANGES.txt?rev=657226&r1=657225&r2=657226&view=diff
==
--- hadoop/hbase/trunk/CHANGES.txt (original)
+++ hadoop/hbase/trunk/CHANGES.txt Fri May 16 14:45:11 2008
@@ -14,6 +14,7 @@
HBASE-622   Remove StaticTestEnvironment and put a log4j.properties in 
src/test
HBASE-624   Master will shut down if number of active region servers is zero
even if shutdown was not requested
+   HBASE-629   Split reports incorrect elapsed time
 
   IMPROVEMENTS
HBASE-559   MR example job to count table rows

Modified: 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java?rev=657226&r1=657225&r2=657226&view=diff
==
--- 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
 (original)
+++ 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
 Fri May 16 14:45:11 2008
@@ -41,13 +41,11 @@
 /** 
  * Compact region on request and then run split if appropriate
  */
-class CompactSplitThread extends Thread 
-implements RegionUnavailableListener, HConstants {
+class CompactSplitThread extends Thread implements HConstants {
   static final Log LOG = LogFactory.getLog(CompactSplitThread.class);
   
   private HTable root = null;
   private HTable meta = null;
-  private volatile long startTime;
   private final long frequency;
   private final ReentrantLock lock = new ReentrantLock();
   
@@ -132,7 +130,8 @@
   private void split(final HRegion region, final byte [] midKey)
   throws IOException {
 final HRegionInfo oldRegionInfo = region.getRegionInfo();
-final HRegion[] newRegions = region.splitRegion(this, midKey);
+final long startTime = System.currentTimeMillis();
+final HRegion[] newRegions = region.splitRegion(midKey);
 if (newRegions == null) {
   // Didn't need to be split
   return;
@@ -190,16 +189,6 @@
 // Do not serve the new regions. Let the Master assign them.
   }
   
-  /** [EMAIL PROTECTED] */
-  public void closing(@SuppressWarnings("unused") final byte [] regionName) {
-// continue
-  }
-  
-  /** [EMAIL PROTECTED] */
-  public void closed(@SuppressWarnings("unused") final byte [] regionName) {
-// continue
-  }
-
   /**
* Only interrupt once it's done with a run through the work loop.
*/ 

Modified: 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java?rev=657226&r1=657225&r2=657226&view=diff
==
--- 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
(original)
+++ 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
Fri May 16 14:45:11 2008
@@ -357,7 +357,7 @@
 new ReentrantReadWriteLock();
   private final Integer splitLock = new Integer(0);
   private final long minSequenceId;
-  private final AtomicInteger activeScannerCount = new AtomicInteger(0);
+  final AtomicInteger activeScannerCount = new AtomicInteger(0);
 
   
//
   // Constructor
@@ -525,7 +525,7 @@
* @throws IOException
*/
   public List close() throws IOException {
-return close(false, null);
+return close(false);
   }
   
   /**
@@ -536,15 +536,13 @@
* time-sensitive thread.
* 
* @param abort true if server is aborting (only during testing)
-   * @param listener call back to alert caller on close status
* @return Vector of all the storage files that the HRegion's component 
* HStores make use of.  It's a list of HStoreFile objects.  Can be null if
* we are not to close at this time or we a

svn commit: r659245 - in /hadoop/hbase/trunk: ./ lib/ src/java/org/apache/hadoop/hbase/filter/ src/java/org/apache/hadoop/hbase/mapred/ src/java/org/apache/hadoop/hbase/master/ src/java/org/apache/had

2008-05-22 Thread jimk
Author: jimk
Date: Thu May 22 13:32:25 2008
New Revision: 659245

URL: http://svn.apache.org/viewvc?rev=659245&view=rev
Log:
HBASE-589 Remove references to deprecated methods in Hadoop once hadoop-0.17.0 
is released
HBASE-579 Add hadoop 0.17.0

Added:
hadoop/hbase/trunk/lib/hadoop-0.17.0-core.jar   (with props)
hadoop/hbase/trunk/lib/hadoop-0.17.0-test.jar   (with props)
Removed:
hadoop/hbase/trunk/lib/hadoop-0.17.0-dev-2008.04.04-13.34.00-core.jar
hadoop/hbase/trunk/lib/hadoop-0.17.0-dev-2008.04.04-13.34.00-test.jar

hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/DisabledTestScanner2.java
Modified:
hadoop/hbase/trunk/CHANGES.txt
hadoop/hbase/trunk/build.xml

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/ColumnValueFilter.java

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/RegExpRowFilter.java

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/mapred/BuildTableIndex.java

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/mapred/IndexOutputFormat.java

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/mapred/TableInputFormat.java
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/mapred/TableMap.java

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/DeleteColumn.java
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/TableDelete.java
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HLog.java

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HStore.java

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java

hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/HBaseClusterTestCase.java
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/HBaseTestCase.java

hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/MapFilePerformanceEvaluation.java

hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/PerformanceEvaluation.java

hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/mapred/TestTableIndex.java

hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestHLog.java

hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestHRegion.java

hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/util/TestMigrate.java

Modified: hadoop/hbase/trunk/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/CHANGES.txt?rev=659245&r1=659244&r2=659245&view=diff
==
--- hadoop/hbase/trunk/CHANGES.txt (original)
+++ hadoop/hbase/trunk/CHANGES.txt Thu May 22 13:32:25 2008
@@ -17,6 +17,8 @@
HBASE-629   Split reports incorrect elapsed time
HBASE-623   Migration script for hbase-82
HBASE-630   Default hbase.rootdir is garbage
+   HBASE-589   Remove references to deprecated methods in Hadoop once
+   hadoop-0.17.0 is released
 
   IMPROVEMENTS
HBASE-559   MR example job to count table rows
@@ -48,6 +50,7 @@
HBASE-583   RangeRowFilter/ColumnValueFilter to allow choice of rows based 
on
a (lexicographic) comparison to column's values
(Clint Morgan via Stack)
+   HBASE-579   Add hadoop 0.17.x
 
 Release 0.1.2 - 05/13/2008


Modified: hadoop/hbase/trunk/build.xml
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/build.xml?rev=659245&r1=659244&r2=659245&view=diff
==
--- hadoop/hbase/trunk/build.xml (original)
+++ hadoop/hbase/trunk/build.xml Thu May 22 13:32:25 2008
@@ -355,7 +355,8 @@
srcdir="${src.test}" 
includes="**/*.java" 
destdir="${build.test}" 
-   debug="${javac.debug}"> 
+   debug="${javac.debug}"
+   deprecation="${javac.deprecation}"> 
  
 
 

Added: hadoop/hbase/trunk/lib/hadoop-0.17.0-core.jar
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/lib/hadoop-0.17.0-core.jar?rev=659245&view=auto
==
Binary file - no diff available.

Propchange: hadoop/hbase/trunk/lib/hadoop-0.17.0-core.jar
--
svn:mime-type = application/octet-stream

Added: hadoop/hbase/trunk/lib/hadoop-0.17.0-test.jar
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/lib/hadoop-0.17.0-test.jar?rev=659245&view=auto
==
Binary file - no diff available.

Propchange: hadoop/hbase/trunk/lib/hadoop-0.17.0-test.jar
---

svn commit: r662862 - in /hadoop/hbase/branches/0.1: CHANGES.txt src/java/org/apache/hadoop/hbase/HRegion.java

2008-06-03 Thread jimk
Author: jimk
Date: Tue Jun  3 12:25:08 2008
New Revision: 662862

URL: http://svn.apache.org/viewvc?rev=662862&view=rev
Log:
HBASE-663   Incorrect sequence number for cache flush

Modified:
hadoop/hbase/branches/0.1/CHANGES.txt
hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HRegion.java

Modified: hadoop/hbase/branches/0.1/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/CHANGES.txt?rev=662862&r1=662861&r2=662862&view=diff
==
--- hadoop/hbase/branches/0.1/CHANGES.txt (original)
+++ hadoop/hbase/branches/0.1/CHANGES.txt Tue Jun  3 12:25:08 2008
@@ -9,6 +9,7 @@
HBASE-646   EOFException opening HStoreFile info file (spin on HBASE-645 
and 550)
HBASE-648   If mapfile index is empty, run repair
HBASE-659   HLog#cacheFlushLock not cleared; hangs a region
+   HBASE-663   Incorrect sequence number for cache flush
 
 Release 0.1.2 - 05/13/2008
 

Modified: 
hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HRegion.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HRegion.java?rev=662862&r1=662861&r2=662862&view=diff
==
--- hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HRegion.java 
(original)
+++ hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HRegion.java Tue 
Jun  3 12:25:08 2008
@@ -1078,15 +1078,16 @@
 // to do this for a moment.  Its quick.  The subsequent sequence id that
 // goes into the HLog after we've flushed all these snapshots also goes
 // into the info file that sits beside the flushed files.
+long sequenceId = -1L;
 updateLock.writeLock().lock();
 try {
   for (HStore s: stores.values()) {
 s.memcache.snapshot();
   }
+  sequenceId = log.startCacheFlush();
 } finally {
   updateLock.writeLock().unlock();
 }
-long sequenceId = log.startCacheFlush();
 
 // Any failure from here on out will be catastrophic requiring server
 // restart so hlog content can be replayed and put back into the memcache.




svn commit: r662865 - in /hadoop/hbase/trunk: CHANGES.txt src/java/org/apache/hadoop/hbase/regionserver/HRegion.java

2008-06-03 Thread jimk
Author: jimk
Date: Tue Jun  3 12:27:30 2008
New Revision: 662865

URL: http://svn.apache.org/viewvc?rev=662865&view=rev
Log:
HBASE-663   Incorrect sequence number for cache flush

Modified:
hadoop/hbase/trunk/CHANGES.txt

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java

Modified: hadoop/hbase/trunk/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/CHANGES.txt?rev=662865&r1=662864&r2=662865&view=diff
==
--- hadoop/hbase/trunk/CHANGES.txt (original)
+++ hadoop/hbase/trunk/CHANGES.txt Tue Jun  3 12:27:30 2008
@@ -35,7 +35,8 @@
HBASE-650   Add String versions of get, scanner, put in HTable
HBASE-656   Do not retry exceptions such as unknown scanner or illegal 
argument
HBASE-659   HLog#cacheFlushLock not cleared; hangs a region
-
+   HBASE-663   Incorrect sequence number for cache flush
+   
   IMPROVEMENTS
HBASE-559   MR example job to count table rows
HBASE-596   DemoClient.py (Ivan Begtin via Stack)

Modified: 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java?rev=662865&r1=662864&r2=662865&view=diff
==
--- 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
(original)
+++ 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
Tue Jun  3 12:27:30 2008
@@ -980,15 +980,16 @@
 // to do this for a moment.  Its quick.  The subsequent sequence id that
 // goes into the HLog after we've flushed all these snapshots also goes
 // into the info file that sits beside the flushed files.
+long sequenceId = -1L;
 this.updatesLock.writeLock().lock();
 try {
   for (HStore s: stores.values()) {
 s.snapshot();
   }
+  sequenceId = log.startCacheFlush();
 } finally {
   this.updatesLock.writeLock().unlock();
 }
-long sequenceId = log.startCacheFlush();
 
 // Any failure from here on out will be catastrophic requiring server
 // restart so hlog content can be replayed and put back into the memcache.




svn commit: r662905 - in /hadoop/hbase/trunk: CHANGES.txt src/java/org/apache/hadoop/hbase/client/HTable.java src/java/org/apache/hadoop/hbase/master/HMaster.java src/java/org/apache/hadoop/hbase/regi

2008-06-03 Thread jimk
Author: jimk
Date: Tue Jun  3 15:16:43 2008
New Revision: 662905

URL: http://svn.apache.org/viewvc?rev=662905&view=rev
Log:
HBASE-655   Need programmatic way to add column family: need programmatic way 
to enable/disable table

Added HTable.isTableOnline and HTable.isTableOffline

Modified:
hadoop/hbase/trunk/CHANGES.txt
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HTable.java
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/HMaster.java
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HStore.java

Modified: hadoop/hbase/trunk/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/CHANGES.txt?rev=662905&r1=662904&r2=662905&view=diff
==
--- hadoop/hbase/trunk/CHANGES.txt (original)
+++ hadoop/hbase/trunk/CHANGES.txt Tue Jun  3 15:16:43 2008
@@ -36,6 +36,8 @@
HBASE-656   Do not retry exceptions such as unknown scanner or illegal 
argument
HBASE-659   HLog#cacheFlushLock not cleared; hangs a region
HBASE-663   Incorrect sequence number for cache flush
+   HBASE-655   Need programmatic way to add column family: need programmatic 
way
+   to enable/disable table

   IMPROVEMENTS
HBASE-559   MR example job to count table rows

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HTable.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HTable.java?rev=662905&r1=662904&r2=662905&view=diff
==
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HTable.java 
(original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HTable.java Tue 
Jun  3 15:16:43 2008
@@ -42,13 +42,13 @@
 import org.apache.hadoop.hbase.io.Cell;
 import org.apache.hadoop.hbase.io.RowResult;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Writables;
 import org.apache.hadoop.io.Text;
 
 /**
  * Used to communicate with a single HBase table
  */
 public class HTable {
-  private final Log LOG = LogFactory.getLog(this.getClass());
   private final HConnection connection;
   private final byte [] tableName;
   private HBaseConfiguration configuration;
@@ -126,6 +126,174 @@
   }
 
   /**
+   * @param tableName name of table to check
+   * @return true if table is on-line
+   * @throws IOException
+   */
+  public static boolean isTableOnline(Text tableName) throws IOException {
+return isTableOnline(tableName.getBytes());
+  }
+  /**
+   * @param tableName name of table to check
+   * @return true if table is on-line
+   * @throws IOException
+   */
+  public static boolean isTableOnline(String tableName) throws IOException {
+return isTableOnline(Bytes.toBytes(tableName));
+  }
+  /**
+   * @param tableName name of table to check
+   * @return true if table is on-line
+   * @throws IOException
+   */
+  public static boolean isTableOnline(byte[] tableName) throws IOException {
+return isTableOnline(new HBaseConfiguration(), tableName);
+  }
+  
+  /**
+   * @param conf HBaseConfiguration object
+   * @param tableName name of table to check
+   * @return true if table is on-line
+   * @throws IOException
+   */
+  public static boolean isTableOnline(HBaseConfiguration conf, Text tableName)
+  throws IOException {
+return isTableOnline(conf, tableName.getBytes());
+  }
+  
+  /**
+   * @param conf HBaseConfiguration object
+   * @param tableName name of table to check
+   * @return true if table is on-line
+   * @throws IOException
+   */
+  public static boolean isTableOnline(HBaseConfiguration conf, String 
tableName)
+  throws IOException {
+return isTableOnline(conf, Bytes.toBytes(tableName));
+  }
+
+  /**
+   * @param conf HBaseConfiguration object
+   * @param tableName name of table to check
+   * @return true if table is on-line
+   * @throws IOException
+   */
+  public static boolean isTableOnline(HBaseConfiguration conf, byte[] 
tableName)
+  throws IOException {
+boolean online = true;
+if (Bytes.equals(tableName, HConstants.ROOT_TABLE_NAME)) {
+  // The root region is always on-line
+  return true;
+}
+HTable meta = new HTable(conf,
+Bytes.equals(tableName, HConstants.META_TABLE_NAME) ?
+HConstants.ROOT_TABLE_NAME : HConstants.META_TABLE_NAME);
+Scanner s = meta.getScanner(HConstants.COL_REGIONINFO_ARRAY,
+HRegionInfo.createRegionName(tableName, null, HConstants.NINES));
+try {
+  RowResult r = null;
+  while ((r = s.next()) != null) {
+Cell c = r.get(HConstants.COL_REGIONINFO);
+if (c != null) {
+  HRegionInfo info = Writables.getHRegionInfoOrNull(c.getValue());
+  if (info != null) {
+if (info.isOffline()) {
+  online = false;
+  break;
+}
+  }
+}
+  }
+  

svn commit: r662975 - in /hadoop/hbase/trunk: ./ src/java/org/apache/hadoop/hbase/ src/java/org/apache/hadoop/hbase/client/ src/test/org/apache/hadoop/hbase/client/

2008-06-03 Thread jimk
Author: jimk
Date: Tue Jun  3 21:54:47 2008
New Revision: 662975

URL: http://svn.apache.org/viewvc?rev=662975&view=rev
Log:
HBASE-639   Add HBaseAdmin.getTableDescriptor function
HBASE-632   HTable.getMetadata is very inefficient
HBASE-654   API HTable.getMetadata().addFamily shouldn't be exposed to user

Added:

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/UnmodifyableHRegionInfo.java

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/UnmodifyableHTableDescriptor.java
Modified:
hadoop/hbase/trunk/CHANGES.txt
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HConstants.java
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HTableDescriptor.java
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HConnection.java

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HConnectionManager.java
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HTable.java
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/MetaScanner.java
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/TestHTable.java

Modified: hadoop/hbase/trunk/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/CHANGES.txt?rev=662975&r1=662974&r2=662975&view=diff
==
--- hadoop/hbase/trunk/CHANGES.txt (original)
+++ hadoop/hbase/trunk/CHANGES.txt Tue Jun  3 21:54:47 2008
@@ -38,6 +38,7 @@
HBASE-663   Incorrect sequence number for cache flush
HBASE-655   Need programmatic way to add column family: need programmatic 
way
to enable/disable table
+   HBASE-654   API HTable.getMetadata().addFamily shouldn't be exposed to user

   IMPROVEMENTS
HBASE-559   MR example job to count table rows
@@ -71,7 +72,11 @@
(Clint Morgan via Stack)
HBASE-579   Add hadoop 0.17.x
HBASE-660   [Migration] addColumn/deleteColumn functionality in MetaUtils
+   HBASE-632   HTable.getMetadata is very inefficient
 
+  NEW FEATURES
+   HBASE-639   Add HBaseAdmin.getTableDescriptor function
+   
 Release 0.1.2 - 05/13/2008

   BUG FIXES

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HConstants.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HConstants.java?rev=662975&r1=662974&r2=662975&view=diff
==
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HConstants.java 
(original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HConstants.java Tue Jun 
 3 21:54:47 2008
@@ -31,6 +31,7 @@
   static final Long ZERO_L = Long.valueOf(0L);
   
   static final String NINES = "99";
+  static final String ZEROES = "00";
   
   // For migration
 

Modified: 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HTableDescriptor.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HTableDescriptor.java?rev=662975&r1=662974&r2=662975&view=diff
==
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HTableDescriptor.java 
(original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HTableDescriptor.java 
Tue Jun  3 21:54:47 2008
@@ -182,6 +182,7 @@
* descriptors.
* @see #getNameAsString()
*/
+  @Override
   public String toString() {
 return "name: " + Bytes.toString(this.name) + ", families: " +
   this.families.values();

Modified: 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HConnection.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HConnection.java?rev=662975&r1=662974&r2=662975&view=diff
==
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HConnection.java 
(original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HConnection.java 
Tue Jun  3 21:54:47 2008
@@ -61,6 +61,14 @@
   public HTableDescriptor[] listTables() throws IOException;
   
   /**
+   * @param tableName
+   * @return table metadata 
+   * @throws IOException
+   */
+  public HTableDescriptor getHTableDescriptor(byte[] tableName)
+  throws IOException;
+  
+  /**
* Find the location of the region of tableName that row
* lives in.
* @param tableName name of the table row is in

Modified: 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HConnectionManager.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HConnectionManager.java?rev=662975&r1=662974&r2=662975&view=diff
==
--- 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HConnectionMan

svn commit: r663350 - in /hadoop/hbase/trunk: CHANGES.txt src/java/org/apache/hadoop/hbase/HRegionInfo.java src/java/org/apache/hadoop/hbase/client/UnmodifyableHRegionInfo.java src/java/org/apache/had

2008-06-04 Thread jimk
Author: jimk
Date: Wed Jun  4 12:24:40 2008
New Revision: 663350

URL: http://svn.apache.org/viewvc?rev=663350&view=rev
Log:
HBASE-666   UnmodifyableHRegionInfo gives the wrong encoded name

Modified:
hadoop/hbase/trunk/CHANGES.txt
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HRegionInfo.java

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/UnmodifyableHRegionInfo.java

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/UnmodifyableHTableDescriptor.java

Modified: hadoop/hbase/trunk/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/CHANGES.txt?rev=663350&r1=663349&r2=663350&view=diff
==
--- hadoop/hbase/trunk/CHANGES.txt (original)
+++ hadoop/hbase/trunk/CHANGES.txt Wed Jun  4 12:24:40 2008
@@ -39,6 +39,7 @@
HBASE-655   Need programmatic way to add column family: need programmatic 
way
to enable/disable table
HBASE-654   API HTable.getMetadata().addFamily shouldn't be exposed to user
+   HBASE-666   UnmodifyableHRegionInfo gives the wrong encoded name

   IMPROVEMENTS
HBASE-559   MR example job to count table rows

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HRegionInfo.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HRegionInfo.java?rev=663350&r1=663349&r2=663350&view=diff
==
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HRegionInfo.java 
(original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HRegionInfo.java Wed 
Jun  4 12:24:40 2008
@@ -82,7 +82,7 @@
   private String regionNameStr = "";
   private boolean split = false;
   private byte [] startKey = HConstants.EMPTY_BYTE_ARRAY;
-  private HTableDescriptor tableDesc = null;
+  protected HTableDescriptor tableDesc = null;
   private int hashCode = -1;
   public static final int NO_HASH = -1;
   private volatile int encodedName = NO_HASH;
@@ -156,6 +156,24 @@
 setHashCode();
   }
   
+  /**
+   * Costruct a copy of another HRegionInfo
+   * 
+   * @param other
+   */
+  public HRegionInfo(HRegionInfo other) {
+this.endKey = other.getEndKey();
+this.offLine = other.isOffline();
+this.regionId = other.getRegionId();
+this.regionName = other.getRegionName();
+this.regionNameStr = Bytes.toString(this.regionName);
+this.split = other.isSplit();
+this.startKey = other.getStartKey();
+this.tableDesc = other.getTableDesc();
+this.hashCode = other.hashCode();
+this.encodedName = other.getEncodedName();
+  }
+  
   private static byte [] createRegionName(final byte [] tableName,
   final byte [] startKey, final long regionid) {
 return createRegionName(tableName, startKey, Long.toString(regionid));

Modified: 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/UnmodifyableHRegionInfo.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/UnmodifyableHRegionInfo.java?rev=663350&r1=663349&r2=663350&view=diff
==
--- 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/UnmodifyableHRegionInfo.java
 (original)
+++ 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/UnmodifyableHRegionInfo.java
 Wed Jun  4 12:24:40 2008
@@ -24,49 +24,14 @@
 import org.apache.hadoop.hbase.HTableDescriptor;
 
 class UnmodifyableHRegionInfo extends HRegionInfo {
-  /* Default constructor - creates empty object */
-  UnmodifyableHRegionInfo() {
-super(new UnmodifyableHTableDescriptor(), null, null);
-  }
-  
-  /*
-   * Construct HRegionInfo with explicit parameters
-   * 
-   * @param tableDesc the table descriptor
-   * @param startKey first key in region
-   * @param endKey end of key range
-   * @throws IllegalArgumentException
-   */
-  UnmodifyableHRegionInfo(final HTableDescriptor tableDesc,
-  final byte [] startKey, final byte [] endKey)
-  throws IllegalArgumentException {
-super(new UnmodifyableHTableDescriptor(tableDesc), startKey, endKey, 
false);
-  }
-
-  /*
-   * Construct HRegionInfo with explicit parameters
-   * 
-   * @param tableDesc the table descriptor
-   * @param startKey first key in region
-   * @param endKey end of key range
-   * @param split true if this region has split and we have daughter regions
-   * regions that may or may not hold references to this region.
-   * @throws IllegalArgumentException
-   */
-  UnmodifyableHRegionInfo(HTableDescriptor tableDesc,
-  final byte [] startKey, final byte [] endKey, final boolean split)
-  throws IllegalArgumentException {
-super(new UnmodifyableHTableDescriptor(tableDesc), startKey, endKey, 
split);
-  }
-  
   /*
* Creates an unmodifyable copy of an HRegionInfo
* 
* @param info
*/
   Unm

svn commit: r668262 - in /hadoop/hbase/branches/0.1: CHANGES.txt src/java/org/apache/hadoop/hbase/HStore.java

2008-06-16 Thread jimk
Author: jimk
Date: Mon Jun 16 11:55:05 2008
New Revision: 668262

URL: http://svn.apache.org/viewvc?rev=668262&view=rev
Log:
HBASE-681   NPE in Memcache (Clint Morgan via Jim Kellerman)

Modified:
hadoop/hbase/branches/0.1/CHANGES.txt
hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HStore.java

Modified: hadoop/hbase/branches/0.1/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/CHANGES.txt?rev=668262&r1=668261&r2=668262&view=diff
==
--- hadoop/hbase/branches/0.1/CHANGES.txt (original)
+++ hadoop/hbase/branches/0.1/CHANGES.txt Mon Jun 16 11:55:05 2008
@@ -15,6 +15,7 @@
HBASE-652   Dropping table fails silently if table isn't disabled 
HBASE-674   Memcache size unreliable
HBASE-665   server side scanner doesn't honor stop row
+   HBASE-681   NPE in Memcache (Clint Morgan via Jim Kellerman)
 
 Release 0.1.2 - 05/13/2008
 

Modified: hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HStore.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HStore.java?rev=668262&r1=668261&r2=668262&view=diff
==
--- hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HStore.java 
(original)
+++ hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HStore.java Mon 
Jun 16 11:55:05 2008
@@ -533,7 +533,7 @@
 HStoreKey key = es.getKey();
 
 // if there's no column name, then compare rows and timestamps
-if (origin.getColumn().toString().equals("")) {
+if (origin.getColumn() == null || origin.getColumn().getLength() == 0) 
{
   // if the current and origin row don't match, then we can jump
   // out of the loop entirely.
   if (!key.getRow().equals(origin.getRow())) {




svn commit: r668750 - in /hadoop/hbase/branches/0.1: CHANGES.txt src/java/org/apache/hadoop/hbase/HStore.java src/test/org/apache/hadoop/hbase/TestHMemcache.java

2008-06-17 Thread jimk
Author: jimk
Date: Tue Jun 17 10:11:31 2008
New Revision: 668750

URL: http://svn.apache.org/viewvc?rev=668750&view=rev
Log:
HBASE-686   MemcacheScanner didn't return the first row(if it exists), because 
HScannerInterface's output incorrect (LN via Jim Kellerman)

Modified:
hadoop/hbase/branches/0.1/CHANGES.txt
hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HStore.java

hadoop/hbase/branches/0.1/src/test/org/apache/hadoop/hbase/TestHMemcache.java

Modified: hadoop/hbase/branches/0.1/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/CHANGES.txt?rev=668750&r1=668749&r2=668750&view=diff
==
--- hadoop/hbase/branches/0.1/CHANGES.txt (original)
+++ hadoop/hbase/branches/0.1/CHANGES.txt Tue Jun 17 10:11:31 2008
@@ -21,6 +21,8 @@
(LN via Stack)
HBASE-684   unnecessary iteration in HMemcache.internalGet? got much better
reading performance after break it (LN via Stack)
+   HBASE-686   MemcacheScanner didn't return the first row(if it exists),
+   because HScannerInterface's output incorrect (LN via Jim 
Kellerman)
 
 
 Release 0.1.2 - 05/13/2008

Modified: hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HStore.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HStore.java?rev=668750&r1=668749&r2=668750&view=diff
==
--- hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HStore.java 
(original)
+++ hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HStore.java Tue 
Jun 17 10:11:31 2008
@@ -631,8 +631,7 @@
if (results.size() > 0) {
  results.clear();
}
-   while (results.size() <= 0 &&
-   (this.currentRow = getNextRow(this.currentRow)) != null) {
+   while (results.size() <= 0 && this.currentRow != null) {
  if (deletes.size() > 0) {
deletes.clear();
  }
@@ -661,6 +660,7 @@
}
results.put(column, c);
  }
+ this.currentRow = getNextRow(this.currentRow);
}
return results.size() > 0;
  }

Modified: 
hadoop/hbase/branches/0.1/src/test/org/apache/hadoop/hbase/TestHMemcache.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/src/test/org/apache/hadoop/hbase/TestHMemcache.java?rev=668750&r1=668749&r2=668750&view=diff
==
--- 
hadoop/hbase/branches/0.1/src/test/org/apache/hadoop/hbase/TestHMemcache.java 
(original)
+++ 
hadoop/hbase/branches/0.1/src/test/org/apache/hadoop/hbase/TestHMemcache.java 
Tue Jun 17 10:11:31 2008
@@ -196,7 +196,7 @@
   }
   
   /** For HBASE-514 **/
-  public void testGetRowKeyAtOrBefore() throws IOException {
+  public void testGetRowKeyAtOrBefore() {
 // set up some test data
 Text t10 = new Text("010");
 Text t20 = new Text("020");
@@ -240,4 +240,40 @@
   private HStoreKey getHSKForRow(Text row) {
 return new HStoreKey(row, new Text("test_col:"), 
HConstants.LATEST_TIMESTAMP);
   }
+
+  /**
+   * Test memcache scanner scanning cached rows, HBASE-686
+   * @throws IOException
+   */
+  public void testScanner_686() throws IOException {
+addRows(this.hmemcache);
+long timestamp = System.currentTimeMillis();
+Text[] cols = new Text[COLUMNS_COUNT * ROW_COUNT];
+for (int i = 0; i < ROW_COUNT; i++) {
+  for (int ii = 0; ii < COLUMNS_COUNT; ii++) {
+cols[(ii + (i * COLUMNS_COUNT))] = getColumnName(i, ii);
+  }
+}
+//starting from each row, validate results should contain the starting row
+for (int startRowId = 0; startRowId < ROW_COUNT; startRowId++) {
+  HInternalScannerInterface scanner = this.hmemcache.getScanner(timestamp,
+  cols, new Text(getRowName(startRowId)));
+  HStoreKey key = new HStoreKey();
+  TreeMap results = new TreeMap();
+  for (int i = 0; scanner.next(key, results); i++) {
+int rowId = startRowId + i;
+assertTrue("Row name",
+key.toString().startsWith(getRowName(rowId).toString()));
+assertEquals("Count of columns", COLUMNS_COUNT, results.size());
+TreeMap row = new TreeMap();
+for (Map.Entry e : results.entrySet()) {
+  row.put(e.getKey(), e.getValue());
+}
+isExpectedRow(rowId, row);
+// Clear out set.  Otherwise row results accumulate.
+results.clear();
+  }
+}
+  }
+
 }
\ No newline at end of file




svn commit: r668822 - in /hadoop/hbase/trunk: CHANGES.txt src/java/org/apache/hadoop/hbase/regionserver/Memcache.java src/test/org/apache/hadoop/hbase/regionserver/TestHMemcache.java

2008-06-17 Thread jimk
Author: jimk
Date: Tue Jun 17 13:49:10 2008
New Revision: 668822

URL: http://svn.apache.org/viewvc?rev=668822&view=rev
Log:
HBASE-686   MemcacheScanner didn't return the first row(if it exists), because 
HScannerInterface's output incorrect (LN via Jim Kellerman)

Modified:
hadoop/hbase/trunk/CHANGES.txt

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/Memcache.java

hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestHMemcache.java

Modified: hadoop/hbase/trunk/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/CHANGES.txt?rev=668822&r1=668821&r2=668822&view=diff
==
--- hadoop/hbase/trunk/CHANGES.txt (original)
+++ hadoop/hbase/trunk/CHANGES.txt Tue Jun 17 13:49:10 2008
@@ -52,6 +52,8 @@
(LN via Stack)
HBASE-682   Unnecessary iteration in HMemcache.internalGet? got much better
reading performance after break it (LN via Stack)
+   HBASE-686   MemcacheScanner didn't return the first row(if it exists),
+   because HScannerInterface's output incorrect (LN via Jim 
Kellerman)

   IMPROVEMENTS
HBASE-559   MR example job to count table rows

Modified: 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/Memcache.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/Memcache.java?rev=668822&r1=668821&r2=668822&view=diff
==
--- 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/Memcache.java 
(original)
+++ 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/Memcache.java 
Tue Jun 17 13:49:10 2008
@@ -421,8 +421,7 @@
   // take note of the row of this candidate so that we'll know when
   // we cross the row boundary into the previous row.
   if (!HLogEdit.isDeleted(headMap.get(thisKey))) {
-if (ttl == HConstants.FOREVER ||
-now < found_key.getTimestamp() + ttl) {
+if (ttl == HConstants.FOREVER) {
   lastRowFound = thisKey.getRow();
   candidateKeys.put(stripTimestamp(thisKey), 
 new Long(thisKey.getTimestamp()));
@@ -698,8 +697,7 @@
   if (results.size() > 0) {
 results.clear();
   }
-  while (results.size() <= 0 &&
-  (this.currentRow = getNextRow(this.currentRow)) != null) {
+  while (results.size() <= 0 && this.currentRow != null) {
 if (deletes.size() > 0) {
   deletes.clear();
 }
@@ -727,6 +725,8 @@
   }
   results.put(column, c.getValue());
 }
+this.currentRow = getNextRow(this.currentRow);
+
   }
   return results.size() > 0;
 }

Modified: 
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestHMemcache.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestHMemcache.java?rev=668822&r1=668821&r2=668822&view=diff
==
--- 
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestHMemcache.java
 (original)
+++ 
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestHMemcache.java
 Tue Jun 17 13:49:10 2008
@@ -304,4 +304,42 @@
   private HStoreKey getHSKForRow(byte [] row) {
 return new HStoreKey(row, Bytes.toBytes("test_col:"), 
HConstants.LATEST_TIMESTAMP);
   }
+
+  /**
+   * Test memcache scanner scanning cached rows, HBASE-686
+   * @throws IOException
+   */
+  public void testScanner_686() throws IOException {
+addRows(this.hmemcache);
+long timestamp = System.currentTimeMillis();
+byte[][] cols = new byte[COLUMNS_COUNT * ROW_COUNT][];
+for (int i = 0; i < ROW_COUNT; i++) {
+  for (int ii = 0; ii < COLUMNS_COUNT; ii++) {
+cols[(ii + (i * COLUMNS_COUNT))] = getColumnName(i, ii);
+  }
+}
+//starting from each row, validate results should contain the starting row
+for (int startRowId = 0; startRowId < ROW_COUNT; startRowId++) {
+  InternalScanner scanner = this.hmemcache.getScanner(timestamp,
+  cols, getRowName(startRowId));
+  HStoreKey key = new HStoreKey();
+  TreeMap results =
+new TreeMap(Bytes.BYTES_COMPARATOR);
+  for (int i = 0; scanner.next(key, results); i++) {
+int rowId = startRowId + i;
+assertTrue("Row name",
+key.toString().startsWith(Bytes.toString(getRowName(rowId;
+assertEquals("Count of columns", COLUMNS_COUNT, results.size());
+TreeMap row =
+  new TreeMap(Bytes.BYTES_COMPARATOR);
+for (Map.Entry e : results.entrySet()) {
+  row.put(e.getKey(),
+  new Cell(e.g

svn commit: r670104 - in /hadoop/hbase/branches/0.1: CHANGES.txt src/java/org/apache/hadoop/hbase/HStore.java src/test/org/apache/hadoop/hbase/TestScanMultipleVersions.java

2008-06-20 Thread jimk
Author: jimk
Date: Fri Jun 20 17:37:49 2008
New Revision: 670104

URL: http://svn.apache.org/viewvc?rev=670104&view=rev
Log:
HBASE-613   Timestamp-anchored scanning fails to find all records

Added:

hadoop/hbase/branches/0.1/src/test/org/apache/hadoop/hbase/TestScanMultipleVersions.java
Modified:
hadoop/hbase/branches/0.1/CHANGES.txt
hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HStore.java

Modified: hadoop/hbase/branches/0.1/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/CHANGES.txt?rev=670104&r1=670103&r2=670104&view=diff
==
--- hadoop/hbase/branches/0.1/CHANGES.txt (original)
+++ hadoop/hbase/branches/0.1/CHANGES.txt Fri Jun 20 17:37:49 2008
@@ -23,7 +23,7 @@
reading performance after break it (LN via Stack)
HBASE-686   MemcacheScanner didn't return the first row(if it exists),
because HScannerInterface's output incorrect (LN via Jim 
Kellerman)
-
+   HBASE-613   Timestamp-anchored scanning fails to find all records
 
 Release 0.1.2 - 05/13/2008
 

Modified: hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HStore.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HStore.java?rev=670104&r1=670103&r2=670104&view=diff
==
--- hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HStore.java 
(original)
+++ hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HStore.java Fri 
Jun 20 17:37:49 2008
@@ -271,36 +271,49 @@
  * The returned object should map column names to byte arrays (byte[]).
  * @param key
  * @param results
+ * @return most recent timestamp found
  */
-void getFull(HStoreKey key, Map deletes, 
+long getFull(HStoreKey key, Map deletes, 
   SortedMap results) {
+  long rowtime = -1L;
   
   this.mc_lock.readLock().lock();
   try {
 synchronized (mc) {
-  internalGetFull(mc, key, deletes, results);
+  long ts = internalGetFull(mc, key, deletes, results);
+  if (ts != HConstants.LATEST_TIMESTAMP && ts > rowtime) {
+rowtime = ts;
+  }
 }
 synchronized (snapshot) {
-  internalGetFull(snapshot, key, deletes, results);
+  long ts = internalGetFull(snapshot, key, deletes, results);
+  if (ts != HConstants.LATEST_TIMESTAMP && ts > rowtime) {
+rowtime = ts;
+  }
 }
-
+return rowtime;
   } finally {
 this.mc_lock.readLock().unlock();
   }
 }
 
-private void internalGetFull(SortedMap map, HStoreKey 
key, 
+private long internalGetFull(SortedMap map, HStoreKey 
key, 
   Map deletes, SortedMap results) {
 
   if (map.isEmpty() || key == null) {
-return;
+return -1L;
   }
 
+  long rowtime = -1L;
   SortedMap tailMap = map.tailMap(key);
   for (Map.Entry es: tailMap.entrySet()) {
 HStoreKey itKey = es.getKey();
 Text itCol = itKey.getColumn();
 if (results.get(itCol) == null && key.matchesWithoutColumn(itKey)) {
+  if (itKey.getTimestamp() != HConstants.LATEST_TIMESTAMP &&
+  itKey.getTimestamp() > rowtime) {
+rowtime = itKey.getTimestamp();
+  }
   byte [] val = tailMap.get(itKey);
 
   if (HLogEdit.isDeleted(val)) {
@@ -316,6 +329,7 @@
   break;
 }
   }
+  return rowtime;
 }
 
 /**
@@ -631,6 +645,7 @@
if (results.size() > 0) {
  results.clear();
}
+   long ts = -1L;
while (results.size() <= 0 && this.currentRow != null) {
  if (deletes.size() > 0) {
deletes.clear();
@@ -640,7 +655,7 @@
  }
  key.setRow(this.currentRow);
  key.setVersion(this.timestamp);
- getFull(key, deletes, rowResults);
+ ts = getFull(key, deletes, rowResults);
  for (Text column: deletes.keySet()) {
rowResults.put(column, HLogEdit.deleteBytes.get());
  }
@@ -662,6 +677,12 @@
  }
  this.currentRow = getNextRow(this.currentRow);
}
+   // Set the timestamp to the largest one for the row if we would 
otherwise
+   // return HConstants.LATEST_TIMESTAMP
+   if (key.getTimestamp() == HConstants.LATEST_TIMESTAMP &&
+   ts != -1L) {
+ key.setVersion(ts);
+   }
return results.size() > 0;
  }
   
@@ -2512,7 +2533,7 @@
  // Advance the readers to the first pos.
  for (i = 0; i < sfsReaders.length; i++) {
keys[i] = new HStoreKey();
-   if (firstRow.getLength() != 0) {
+   if (firstRow != null && firstRow.getLength() != 0) {
  if (findF

svn commit: r670124 - in /hadoop/hbase/trunk: ./ src/java/org/apache/hadoop/hbase/regionserver/ src/test/org/apache/hadoop/hbase/ src/test/org/apache/hadoop/hbase/util/

2008-06-20 Thread jimk
Author: jimk
Date: Fri Jun 20 19:52:35 2008
New Revision: 670124

URL: http://svn.apache.org/viewvc?rev=670124&view=rev
Log:
HBASE-613 Timestamp-anchored scanning fails to find all records
HBASE-681 NPE in Memcache

HAbstractScanner
- remove HAbstactScanner.iterator() - iterator is not a method on 
InternalScanner

HRegion
- make getScanner more efficient by iterating only once to find the stores we 
need to scan
- only pass columns relevant to a store to a HStoreScanner
- remove HScanner.iterator() - iterator is not a method on InternalScanner

Memcache, MemcacheScanner
- Fix NPE in Memcache
- never return HConstants.LATEST_TIMESTAMP as the timestamp value for a row. 
Instead use the largest timestamp from the cells being returned. This allows a 
scanner to determine a timestamp that can be used to fetch the same data again 
should new versions be inserted later.

StoreFileScanner
- getNextViableRow would find a row that matched the row key, but did not 
consider the requested timestamp. Now if the row it finds has a timestamp 
greater than the one desired it advances to determine if a row with a timestamp 
less than or equal to the requested one exists since timestamps are sorted 
descending.
- removed an unnecessary else

testScanMultipleVersions
- Test program that fails on current trunk but passes when this patch is 
applied.

Added:

hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestScanMultipleVersions.java
Modified:
hadoop/hbase/trunk/CHANGES.txt

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HAbstractScanner.java

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/Memcache.java

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java

hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/AbstractMergeTestBase.java
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/HBaseTestCase.java
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/MultiRegionTable.java

hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestRegionRebalancing.java
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/util/TestMergeTool.java

Modified: hadoop/hbase/trunk/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/CHANGES.txt?rev=670124&r1=670123&r2=670124&view=diff
==
--- hadoop/hbase/trunk/CHANGES.txt (original)
+++ hadoop/hbase/trunk/CHANGES.txt Fri Jun 20 19:52:35 2008
@@ -62,6 +62,9 @@
(Rong-En Fan via Stack)
HBASE-699   Fix TestMigrate up on Hudson
HBASE-615   Region balancer oscillates during cluster startup
+   HBASE-613   Timestamp-anchored scanning fails to find all records
+   HBASE-681   NPE in Memcache
+   

   IMPROVEMENTS
HBASE-559   MR example job to count table rows

Modified: 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HAbstractScanner.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HAbstractScanner.java?rev=670124&r1=670123&r2=670124&view=diff
==
--- 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HAbstractScanner.java
 (original)
+++ 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HAbstractScanner.java
 Fri Jun 20 19:52:35 2008
@@ -21,11 +21,9 @@
 
 import java.io.IOException;
 import java.util.HashMap;
-import java.util.Iterator;
 import java.util.Map;
 import java.util.SortedMap;
 import java.util.Vector;
-import java.util.Map.Entry;
 import java.util.regex.Pattern;
 
 import org.apache.commons.logging.Log;
@@ -183,12 +181,9 @@
 return this.multipleMatchers;
   }
 
+  /** [EMAIL PROTECTED] */
   public abstract boolean next(HStoreKey key,
-SortedMap results)
+  SortedMap results)
   throws IOException;
   
-  public Iterator>> iterator() {
-throw new UnsupportedOperationException("Unimplemented serverside. " +
-  "next(HStoreKey, StortedMap(...) is more efficient");
-  }
 }

Modified: 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java?rev=670124&r1=670123&r2=670124&view=diff
==
--- 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
(original)
+++ 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
Fri Jun 20 19:52:35 2008
@@ -23,7 +23,7 @@
 import java.io.UnsupportedEncodingException;
 import java.util.ArrayList;
 import java.util.Collection;
-import java.util.Iterator;
+import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
 impo

svn commit: r671676 - in /hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase: HRegionServer.java HTable.java

2008-06-25 Thread jimk
Author: jimk
Date: Wed Jun 25 15:45:33 2008
New Revision: 671676

URL: http://svn.apache.org/viewvc?rev=671676&view=rev
Log:
HBASE-613   Timestamp-anchored scanning fails to find all records

Problem was two fold:
- HRegionServer did not return null if no results were found, it returned empty 
results
- HTable$ClientScanner.next loop termination condition was incorrect

Modified:

hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HRegionServer.java
hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HTable.java

Modified: 
hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HRegionServer.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HRegionServer.java?rev=671676&r1=671675&r2=671676&view=diff
==
--- 
hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HRegionServer.java 
(original)
+++ 
hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HRegionServer.java 
Wed Jun 25 15:45:33 2008
@@ -1523,7 +1523,7 @@
 // No data for this row, go get another.
 results.clear();
   }
-  return values;
+  return values.size() == 0 ? null : values;
   
 } catch (IOException e) {
   checkFileSystem();

Modified: hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HTable.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HTable.java?rev=671676&r1=671675&r2=671676&view=diff
==
--- hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HTable.java 
(original)
+++ hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HTable.java Wed 
Jun 25 15:45:33 2008
@@ -848,7 +848,7 @@
 startRow : oldLocation.getRegionInfo().getEndKey();
 
   // advance to the region that starts with the current region's end key
-  LOG.debug("Advancing internal scanner to startKey " + localStartKey);
+  LOG.debug("Advancing internal scanner to startKey '" + localStartKey + 
"'");
   this.currentRegionLocation = getRegionLocation(localStartKey);
   
   LOG.debug("New region: " + this.currentRegionLocation);
@@ -924,7 +924,7 @@
   results.clear();
   do {
 values = server.next(scannerId);
-  } while (values != null && values.size() == 0 && nextScanner());
+  } while ((values == null || values.size() == 0) && nextScanner());
 
   if (values != null && values.size() != 0) {
 for (Map.Entry e: values.entrySet()) {
@@ -1046,16 +1046,15 @@
   " attempts.\n";
 int i = 1;
 for (IOException e2 : exceptions) {
-  message = message + "Exception " + i + ":\n" + e;
+  message = message + "Exception " + i + ":\n" + e2;
 }
 LOG.debug(message);
   }
   throw e;
-} else {
-  if (LOG.isDebugEnabled()) {
-exceptions.add(e);
-LOG.debug("reloading table servers because: " + e.getMessage());
-  }
+}
+if (LOG.isDebugEnabled()) {
+  exceptions.add(e);
+  LOG.debug("reloading table servers because: " + e.getMessage());
 }
 
   } catch (Exception e) {




svn commit: r671731 - in /hadoop/hbase/trunk/src: java/org/apache/hadoop/hbase/client/HTable.java java/org/apache/hadoop/hbase/regionserver/HRegionServer.java test/org/apache/hadoop/hbase/util/TestMer

2008-06-25 Thread jimk
Author: jimk
Date: Wed Jun 25 21:50:54 2008
New Revision: 671731

URL: http://svn.apache.org/viewvc?rev=671731&view=rev
Log:
HBASE-613   Timestamp-anchored scanning fails to find all records

Three problems:
- HRegionServer.next did not return null if there were no results
- HTable$ClientScanner.next had wrong loop termination
- TestMergeTool did not correctly set fs, hbase.rootdir

Modified:
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HTable.java

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/util/TestMergeTool.java

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HTable.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HTable.java?rev=671731&r1=671730&r2=671731&view=diff
==
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HTable.java 
(original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HTable.java Wed 
Jun 25 21:50:54 2008
@@ -1343,8 +1343,8 @@
   byte [] localStartKey = oldRegion == null? startRow: 
oldRegion.getEndKey();
 
   if (CLIENT_LOG.isDebugEnabled()) {
-CLIENT_LOG.debug("Advancing internal scanner to startKey at " +
-  Bytes.toString(localStartKey));
+CLIENT_LOG.debug("Advancing internal scanner to startKey at '" +
+  Bytes.toString(localStartKey) + "'");
   }
 
   try {
@@ -1387,7 +1387,7 @@
   RowResult values = null;
   do {
 values = getConnection().getRegionServerWithRetries(callable);
-  } while (values != null && values.size() == 0 && nextScanner());
+  } while ((values == null || values.size() == 0) && nextScanner());
 
   if (values != null && values.size() != 0) {
 return values;

Modified: 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java?rev=671731&r1=671730&r2=671731&view=diff
==
--- 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
 (original)
+++ 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
 Wed Jun 25 21:50:54 2008
@@ -1128,7 +1128,7 @@
 // No data for this row, go get another.
 results.clear();
   }
-  return new RowResult(key.getRow(), values);
+  return values.size() == 0 ? null : new RowResult(key.getRow(), values);
 } catch (IOException e) {
   checkFileSystem();
   throw e;

Modified: 
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/util/TestMergeTool.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/util/TestMergeTool.java?rev=671731&r1=671730&r2=671731&view=diff
==
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/util/TestMergeTool.java 
(original)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/util/TestMergeTool.java 
Wed Jun 25 21:50:54 2008
@@ -25,7 +25,6 @@
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.dfs.MiniDFSCluster;
-import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseTestCase;
 import org.apache.hadoop.hbase.HColumnDescriptor;
@@ -48,7 +47,6 @@
   private HTableDescriptor desc;
   private byte [][][] rows;
   private MiniDFSCluster dfsCluster = null;
-  private FileSystem fs;
   
   /** [EMAIL PROTECTED] */
   @Override
@@ -101,13 +99,19 @@
 // Start up dfs
 this.dfsCluster = new MiniDFSCluster(conf, 2, true, (String[])null);
 this.fs = this.dfsCluster.getFileSystem();
-
+conf.set("fs.default.name", fs.getUri().toString());
+Path parentdir = fs.getHomeDirectory();
+conf.set(HConstants.HBASE_DIR, parentdir.toString());
+fs.mkdirs(parentdir);
+FSUtils.setVersion(fs, parentdir);
+
 // Note: we must call super.setUp after starting the mini cluster or
 // we will end up with a local file system
 
 super.setUp();
-
 try {
+  // Create root and meta regions
+  createRootAndMetaRegions();
   /*
* Create the regions we will merge
*/
@@ -123,11 +127,6 @@
   b.put(COLUMN_NAME, new ImmutableBytesWritable(row).get());
   regions[i].batchUpdate(b);
 }
-  }
-  // Create root and meta regions
-  createRootAndMetaRegions();
-  // Insert the regions we created into the meta
-  for(int i = 0; i < regions.length; i++) {
 HRegion.addRegionToMETA(meta, regions[i]);
   }
   // Close root and meta regions




svn commit: r672423 - in /hadoop/hbase/trunk: ./ src/java/org/apache/hadoop/hbase/client/ src/java/org/apache/hadoop/hbase/master/

2008-06-27 Thread jimk
Author: jimk
Date: Fri Jun 27 16:13:26 2008
New Revision: 672423

URL: http://svn.apache.org/viewvc?rev=672423&view=rev
Log:
HBASE-627   Disable table doesn't work reliably

Modified:
hadoop/hbase/trunk/CHANGES.txt
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HConnection.java

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HConnectionManager.java
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HTable.java

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/TableOperation.java

Modified: hadoop/hbase/trunk/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/CHANGES.txt?rev=672423&r1=672422&r2=672423&view=diff
==
--- hadoop/hbase/trunk/CHANGES.txt (original)
+++ hadoop/hbase/trunk/CHANGES.txt Fri Jun 27 16:13:26 2008
@@ -71,6 +71,7 @@
HBASE-710   If clocks are way off, then we can have daughter split come
before rather than after its parent in .META.
HBASE-714   Showing bytes in log when should be string (2)
+   HBASE-627   Disable table doesn't work reliably


   IMPROVEMENTS

Modified: 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HBaseAdmin.java?rev=672423&r1=672422&r2=672423&view=diff
==
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HBaseAdmin.java 
(original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HBaseAdmin.java 
Fri Jun 27 16:13:26 2008
@@ -21,7 +21,6 @@
 
 import java.io.IOException;
 import java.util.Map;
-import java.util.NoSuchElementException;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -314,78 +313,17 @@
   throw new MasterNotRunningException("master has been shut down");
 }
 HTableDescriptor.isLegalTableName(tableName);
-HRegionLocation firstMetaServer = getFirstMetaServerForTable(tableName);
-
 try {
   this.master.enableTable(tableName);
-  
 } catch (RemoteException e) {
   throw RemoteExceptionHandler.decodeRemoteException(e);
 }
 
-// Wait until first region is enabled
+// Wait until all regions are enabled
 
-HRegionInterface server =
-  connection.getHRegionConnection(firstMetaServer.getServerAddress());
-
-HRegionInfo info = new HRegionInfo();
-for (int tries = 0; tries < numRetries; tries++) {
-  int valuesfound = 0;
-  long scannerId = -1L;
-  try {
-scannerId =
-  server.openScanner(firstMetaServer.getRegionInfo().getRegionName(),
-HConstants.COL_REGIONINFO_ARRAY, tableName,
-HConstants.LATEST_TIMESTAMP, null);
-boolean isenabled = false;
-
-while (true) {
-  RowResult values = server.next(scannerId);
-  if (values == null || values.size() == 0) {
-if (valuesfound == 0) {
-  throw new NoSuchElementException(
-  "table " + Bytes.toString(tableName) + " not found");
-}
-break;
-  }
-  valuesfound += 1;
-  for (Map.Entry e: values.entrySet()) {
-if (Bytes.equals(e.getKey(), HConstants.COL_REGIONINFO)) {
-  info = (HRegionInfo) Writables.getWritable(
-e.getValue().getValue(), info);
-
-  isenabled = !info.isOffline();
-  break;
-}
-  }
-  if (isenabled) {
-break;
-  }
-}
-if (isenabled) {
-  break;
-}
-
-  } catch (IOException e) {
-if (tries == numRetries - 1) {  // no more retries
-  if (e instanceof RemoteException) {
-e = RemoteExceptionHandler.decodeRemoteException((RemoteException) 
e);
-  }
-  throw e;
-}
-
-  } finally {
-if (scannerId != -1L) {
-  try {
-server.close(scannerId);
-
-  } catch (Exception e) {
-LOG.warn(e);
-  }
-}
-  }
+while (!isTableEnabled(tableName)) {
   if (LOG.isDebugEnabled()) {
-LOG.debug("Sleep. Waiting for first region to be enabled from " +
+LOG.debug("Sleep. Waiting for all regions to be enabled from " +
   Bytes.toString(tableName));
   }
   try {
@@ -395,7 +333,7 @@
 // continue
   }
   if (LOG.isDebugEnabled()) {
-LOG.debug("Wake. Waiting for first region to be enabled from " +
+LOG.debug("Wake. Waiting for all regions to be enabled from " +

svn commit: r672456 - in /hadoop/hbase/trunk: CHANGES.txt src/java/org/apache/hadoop/hbase/regionserver/HStore.java

2008-06-27 Thread jimk
Author: jimk
Date: Fri Jun 27 19:32:28 2008
New Revision: 672456

URL: http://svn.apache.org/viewvc?rev=672456&view=rev
Log:
HBASE-716   TestGet2.testGetClosestBefore fails with hadoop-0.17.1

Modified:
hadoop/hbase/trunk/CHANGES.txt
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HStore.java

Modified: hadoop/hbase/trunk/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/CHANGES.txt?rev=672456&r1=672455&r2=672456&view=diff
==
--- hadoop/hbase/trunk/CHANGES.txt (original)
+++ hadoop/hbase/trunk/CHANGES.txt Fri Jun 27 19:32:28 2008
@@ -72,6 +72,7 @@
before rather than after its parent in .META.
HBASE-714   Showing bytes in log when should be string (2)
HBASE-627   Disable table doesn't work reliably
+   HBASE-716   TestGet2.testGetClosestBefore fails with hadoop-0.17.1


   IMPROVEMENTS

Modified: 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HStore.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HStore.java?rev=672456&r1=672455&r2=672456&view=diff
==
--- 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HStore.java 
(original)
+++ 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HStore.java 
Fri Jun 27 19:32:28 2008
@@ -1544,7 +1544,6 @@
   private void rowAtOrBeforeFromMapFile(MapFile.Reader map, final byte [] row, 
 SortedMap candidateKeys)
   throws IOException {
-HStoreKey searchKey = null;
 ImmutableBytesWritable readval = new ImmutableBytesWritable();
 HStoreKey readkey = new HStoreKey();
 
@@ -1585,52 +1584,72 @@
   return;
 }
 
-// seek to the exact row, or the one that would be immediately before it
-readkey = (HStoreKey)map.getClosest(searchKey, readval, true);
-  
-if (readkey == null) {
-  // didn't find anything that would match, so return
-  return;
-}
-  
-do {
-  // if we have an exact match on row, and it's not a delete, save this
-  // as a candidate key
-  if (Bytes.equals(readkey.getRow(), row)) {
-if (!HLogEdit.isDeleted(readval.get())) {
-  if (ttl == HConstants.FOREVER || 
+HStoreKey deletedOrExpiredRow = null;
+boolean foundCandidate = false;
+while (!foundCandidate) {
+  // seek to the exact row, or the one that would be immediately before it
+  readkey = (HStoreKey)map.getClosest(searchKey, readval, true);
+
+  if (readkey == null) {
+// didn't find anything that would match, so return
+return;
+  }
+
+  do {
+// if we have an exact match on row, and it's not a delete, save this
+// as a candidate key
+if (Bytes.equals(readkey.getRow(), row)) {
+  if (!HLogEdit.isDeleted(readval.get())) {
+if (ttl == HConstants.FOREVER || 
 now < readkey.getTimestamp() + ttl) {
-candidateKeys.put(stripTimestamp(readkey), 
-  new Long(readkey.getTimestamp()));
-  } else {
+  candidateKeys.put(stripTimestamp(readkey), 
+  new Long(readkey.getTimestamp()));
+  foundCandidate = true;
+  continue;
+}
 if (LOG.isDebugEnabled()) {
   LOG.debug("rowAtOrBeforeFromMapFile:" + readkey +
-": expired, skipped");
+  ": expired, skipped");
 }
   }
-}
-  } else if (Bytes.compareTo(readkey.getRow(), row) > 0 ) {
-// if the row key we just read is beyond the key we're searching for,
-// then we're done. return.
-return;
-  } else {
-// so, the row key doesn't match, but we haven't gone past the row
-// we're seeking yet, so this row is a candidate for closest
-// (assuming that it isn't a delete).
-if (!HLogEdit.isDeleted(readval.get())) {
-  if (ttl == HConstants.FOREVER || 
-  now < readkey.getTimestamp() + ttl) {
-candidateKeys.put(stripTimestamp(readkey), 
-  new Long(readkey.getTimestamp()));
-  } else {
+  deletedOrExpiredRow = stripTimestamp(readkey);
+} else if (Bytes.compareTo(readkey.getRow(), row) > 0 ) {
+  // if the row key we just read is beyond the key we're searching for,
+  // then we're done. return.
+  break;
+} else {
+  // so, the row key doesn't match, but we haven't gone past the row
+  // we're seeking yet, so this row is a candidate for closest
+  // (assuming that it isn't a delete).
+  if (!HLogEdit.isDeleted(readval.get())) {
+ 

svn commit: r672457 - in /hadoop/hbase/trunk: CHANGES.txt lib/hadoop-0.17.0-core.jar lib/hadoop-0.17.0-test.jar lib/hadoop-0.17.1-core.jar lib/hadoop-0.17.1-test.jar

2008-06-27 Thread jimk
Author: jimk
Date: Fri Jun 27 19:36:24 2008
New Revision: 672457

URL: http://svn.apache.org/viewvc?rev=672457&view=rev
Log:
HBASE-715   Base HBase 0.2 on Hadoop 0.17.1

Added:
hadoop/hbase/trunk/lib/hadoop-0.17.1-core.jar   (with props)
hadoop/hbase/trunk/lib/hadoop-0.17.1-test.jar   (with props)
Removed:
hadoop/hbase/trunk/lib/hadoop-0.17.0-core.jar
hadoop/hbase/trunk/lib/hadoop-0.17.0-test.jar
Modified:
hadoop/hbase/trunk/CHANGES.txt

Modified: hadoop/hbase/trunk/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/CHANGES.txt?rev=672457&r1=672456&r2=672457&view=diff
==
--- hadoop/hbase/trunk/CHANGES.txt (original)
+++ hadoop/hbase/trunk/CHANGES.txt Fri Jun 27 19:36:24 2008
@@ -73,7 +73,7 @@
HBASE-714   Showing bytes in log when should be string (2)
HBASE-627   Disable table doesn't work reliably
HBASE-716   TestGet2.testGetClosestBefore fails with hadoop-0.17.1
-   
+   HBASE-715   Base HBase 0.2 on Hadoop 0.17.1

   IMPROVEMENTS
HBASE-559   MR example job to count table rows

Added: hadoop/hbase/trunk/lib/hadoop-0.17.1-core.jar
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/lib/hadoop-0.17.1-core.jar?rev=672457&view=auto
==
Binary file - no diff available.

Propchange: hadoop/hbase/trunk/lib/hadoop-0.17.1-core.jar
--
svn:mime-type = application/octet-stream

Added: hadoop/hbase/trunk/lib/hadoop-0.17.1-test.jar
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/lib/hadoop-0.17.1-test.jar?rev=672457&view=auto
==
Binary file - no diff available.

Propchange: hadoop/hbase/trunk/lib/hadoop-0.17.1-test.jar
--
svn:mime-type = application/octet-stream




svn commit: r673503 - /hadoop/hbase/trunk/CHANGES.txt

2008-07-02 Thread jimk
Author: jimk
Date: Wed Jul  2 14:23:15 2008
New Revision: 673503

URL: http://svn.apache.org/viewvc?rev=673503&view=rev
Log:
Make changes.txt accurately reflect changes in trunk.

Modified:
hadoop/hbase/trunk/CHANGES.txt

Modified: hadoop/hbase/trunk/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/CHANGES.txt?rev=673503&r1=673502&r2=673503&view=diff
==
--- hadoop/hbase/trunk/CHANGES.txt (original)
+++ hadoop/hbase/trunk/CHANGES.txt Wed Jul  2 14:23:15 2008
@@ -1,11 +1,6 @@
 Hbase Change Log
-  INCOMPATIBLE CHANGES
-
-  BUG FIXES
-   HBASE-718   hbase shell help info
-   HBASE-717   alter table broke with new shell returns 
InvalidColumnNameException
 
-Release 0.1.3 - 07/25/2008
+Trunk (unreleased changes)
 
   INCOMPATIBLE CHANGES
HBASE-584   Names in the filter interface are confusing (Clint Morgan via
@@ -16,6 +11,9 @@
HBASE-76Purge servers of Text (Done as part of HBASE-82 commit).
HBASE-487   Replace hql w/ a hbase-friendly jirb or jython shell
Part 1: purge of hql and added raw jirb in its place.
+   HBASE-521   Improve client scanner interface
+   HBASE-288   Add in-memory caching of data. Required update of hadoop to 
+   0.17.0-dev.2008-02-07_12-01-58. (Tom White via Stack) 
 
   BUG FIXES
HBASE-574   HBase does not load hadoop native libs (Rong-En Fan via Stack)
@@ -81,55 +79,8 @@
HBASE-627   Disable table doesn't work reliably
HBASE-716   TestGet2.testGetClosestBefore fails with hadoop-0.17.1
HBASE-715   Base HBase 0.2 on Hadoop 0.17.1
-   
-  IMPROVEMENTS
-   HBASE-559   MR example job to count table rows
-   HBASE-596   DemoClient.py (Ivan Begtin via Stack)
-   HBASE-581   Allow adding filters to TableInputFormat (At same time, ensure 
TIF
-   is subclassable) (David Alves via Stack)
-   HBASE-603   When an exception bubbles out of getRegionServerWithRetries, 
wrap 
-   the exception with a RetriesExhaustedException
-   HBASE-600   Filters have excessive DEBUG logging
-   HBASE-611   regionserver should do basic health check before reporting
-   alls-well to the master
-   HBASE-614   Retiring regions is not used; exploit or remove
-   HBASE-538   Improve exceptions that come out on client-side
-   HBASE-569   DemoClient.php (Jim R. Wilson via Stack)
-   HBASE-522   Where new Text(string) might be used in client side method 
calls,
-   add an overload that takes String (Done as part of HBASE-82)
-   HBASE-570   Remove HQL unit test (Done as part of HBASE-82 commit).
-   HBASE-626   Use Visitor pattern in MetaRegion to reduce code clones in 
HTable
-   and HConnectionManager (Jean-Daniel Cryans via Stack)
-   HBASE-621   Make MAX_VERSIONS work like TTL: In scans and gets, check
-   MAX_VERSIONs setting and return that many only rather than wait 
on
-   compaction (Jean-Daniel Cryans via Stack)
-   HBASE-504   Allow HMsg's carry a payload: e.g. exception that happened over
-   on the remote side.
-   HBASE-583   RangeRowFilter/ColumnValueFilter to allow choice of rows based 
on
-   a (lexicographic) comparison to column's values
-   (Clint Morgan via Stack)
-   HBASE-579   Add hadoop 0.17.x
-   HBASE-660   [Migration] addColumn/deleteColumn functionality in MetaUtils
-   HBASE-632   HTable.getMetadata is very inefficient
-   HBASE-671   New UI page displaying all regions in a table should be sorted
-   HBASE-672   Sort regions in the regionserver UI
-   HBASE-677   Make HTable, HRegion, HRegionServer, HStore, and 
HColumnDescriptor
-   subclassable (Clint Morgan via Stack)
-   HBASE-682   Regularize toString
-   HBASE-672   Sort regions in the regionserver UI
-
-  NEW FEATURES
-   HBASE-47Option to set TTL for columns in hbase
-   (Andrew Purtell via Bryan Duxbury and Stack)
-   HBASE-23UI listing regions should be sorted by address and show 
additional
-   region state (Jean-Daniel Cryans via Stack)
-   HBASE-639   Add HBaseAdmin.getTableDescriptor function
-   HBASE-533   Region Historian
-   HBASE-487   Replace hql w/ a hbase-friendly jirb or jython shell
-   
-Release 0.1.2 - 05/13/2008
-   
-  BUG FIXES
+   HBASE-718   hbase shell help info
+   HBASE-717   alter table broke with new shell returns 
InvalidColumnNameException
HBASE-573   HBase does not read hadoop-*.xml for dfs configuration after 
moving out hadoop/contrib
HBASE-11Unexpected exits corrupt DFS
@@ -157,22 +108,11 @@
HBASE-478   offlining of table does not run reliably
HBASE-453   undeclared throwable exception from HTable.get
HBASE-620   testmergetool failing in branch and trunk since hbase-618 went 
in
-
-
-Release 0.1.1 - 04/11/2008
-
-  INCOMPATIBLE CHANGES
-   HBASE-521   Improve client scanner interface
-  
-  BUG FIXES
   

svn commit: r673524 - in /hadoop/hbase/trunk: CHANGES.txt src/java/org/apache/hadoop/hbase/regionserver/HRegion.java src/java/org/apache/hadoop/hbase/regionserver/HStore.java

2008-07-02 Thread jimk
Author: jimk
Date: Wed Jul  2 15:13:41 2008
New Revision: 673524

URL: http://svn.apache.org/viewvc?rev=673524&view=rev
Log:
HBASE-712   midKey found compacting is the first, not necessarily the optimal

Modified:
hadoop/hbase/trunk/CHANGES.txt

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HStore.java

Modified: hadoop/hbase/trunk/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/CHANGES.txt?rev=673524&r1=673523&r2=673524&view=diff
==
--- hadoop/hbase/trunk/CHANGES.txt (original)
+++ hadoop/hbase/trunk/CHANGES.txt Wed Jul  2 15:13:41 2008
@@ -163,6 +163,7 @@
HBASE-527   RegexpRowFilter does not work when there are columns from 
multiple families (Clint Morgan via Jim Kellerman)
HBASE-534   Double-assignment at SPLIT-time
+   HBASE-712   midKey found compacting is the first, not necessarily the 
optimal

   IMPROVEMENTS
HBASE-559   MR example job to count table rows

Modified: 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java?rev=673524&r1=673523&r2=673524&view=diff
==
--- 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
(original)
+++ 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
Wed Jul  2 15:13:41 2008
@@ -876,10 +876,12 @@
   LOG.info("starting compaction on region " + this);
   long startTime = System.currentTimeMillis();
   doRegionCompactionPrep();
+  long maxSize = -1;
   for (HStore store: stores.values()) {
-final byte [] key = store.compact(force);
-if (key != null && midKey == null) {
-  midKey = key;
+final HStore.StoreSize size = store.compact(force);
+if (size != null && size.getSize() > maxSize) {
+  maxSize = size.getSize();
+  midKey = size.getKey();
 }
   }
   doRegionCompactionCleanup();

Modified: 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HStore.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HStore.java?rev=673524&r1=673523&r2=673524&view=diff
==
--- 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HStore.java 
(original)
+++ 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HStore.java 
Wed Jul  2 15:13:41 2008
@@ -854,7 +854,7 @@
* @return mid key if a split is needed, null otherwise
* @throws IOException
*/
-  byte [] compact(final boolean force) throws IOException {
+  StoreSize compact(final boolean force) throws IOException {
 synchronized (compactLock) {
   long maxId = -1;
   List filesToCompact = null;
@@ -1811,9 +1811,9 @@
   /**
* Determines if HStore can be split
* 
-   * @return midKey if store can be split, null otherwise
+   * @return a StoreSize if store can be split, null otherwise
*/
-  byte [] checkSplit() {
+  StoreSize checkSplit() {
 if (this.storefiles.size() <= 0) {
   return null;
 }
@@ -1865,7 +1865,7 @@
 Bytes.equals(mk.getRow(), lastKey.getRow())) {
   return null;
 }
-return mk.getRow();
+return new StoreSize(maxSize, mk.getRow());
   }
 } catch(IOException e) {
   LOG.warn("Failed getting store size for " + this.storeNameStr, e);
@@ -1931,4 +1931,22 @@
   return copy;
 }
   }
+  
+  class StoreSize {
+private final long size;
+private final byte[] key;
+StoreSize(long size, byte[] key) {
+  this.size = size;
+  this.key = new byte[key.length];
+  System.arraycopy(key, 0, this.key, 0, key.length);
+}
+/* @return the size */
+long getSize() {
+  return size;
+}
+/* @return the key */
+byte[] getKey() {
+  return key;
+}
+  }
 }
\ No newline at end of file




svn commit: r674108 - in /hadoop/hbase/trunk: CHANGES.txt src/java/org/apache/hadoop/hbase/regionserver/HRegion.java src/java/org/apache/hadoop/hbase/regionserver/HStore.java src/java/org/apache/hadoo

2008-07-04 Thread jimk
Author: jimk
Date: Fri Jul  4 12:15:16 2008
New Revision: 674108

URL: http://svn.apache.org/viewvc?rev=674108&view=rev
Log:
HBASE-674   Memcache size unreliable

Modified:
hadoop/hbase/trunk/CHANGES.txt

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HStore.java

hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/Memcache.java

Modified: hadoop/hbase/trunk/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/CHANGES.txt?rev=674108&r1=674107&r2=674108&view=diff
==
--- hadoop/hbase/trunk/CHANGES.txt (original)
+++ hadoop/hbase/trunk/CHANGES.txt Fri Jul  4 12:15:16 2008
@@ -167,6 +167,7 @@
HBASE-719   Find out why users have network problems in HBase and not in 
Hadoop
and HConnectionManager (Jean-Daniel Cryans via Stack)
HBASE-703   Invalid regions listed by regionserver.jsp (Izaak Rubin via 
Stack)
+   HBASE-674   Memcache size unreliable

   IMPROVEMENTS
HBASE-559   MR example job to count table rows

Modified: 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java?rev=674108&r1=674107&r2=674108&view=diff
==
--- 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
(original)
+++ 
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
Fri Jul  4 12:15:16 2008
@@ -504,7 +504,7 @@
   long getMinSequenceId() {
 return this.minSequenceId;
   }
-
+  
   /** @return a HRegionInfo object for this region */
   public HRegionInfo getRegionInfo() {
 return this.regionInfo;
@@ -1002,6 +1002,9 @@
 // to do this for a moment.  Its quick.  The subsequent sequence id that
 // goes into the HLog after we've flushed all these snapshots also goes
 // into the info file that sits beside the flushed files.
+// We also set the memcache size to zero here before we allow updates
+// again so its value will represent the size of the updates received
+// during the flush
 long sequenceId = -1L;
 this.updatesLock.writeLock().lock();
 try {
@@ -1009,6 +1012,7 @@
 s.snapshot();
   }
   sequenceId = log.startCacheFlush();
+  this.memcacheSize.set(0);
 } finally {
   this.updatesLock.writeLock().unlock();
 }
@@ -1017,20 +1021,13 @@
 // restart so hlog content can be replayed and put back into the memcache.
 // Otherwise, the snapshot content while backed up in the hlog, it will not
 // be part of the current running servers state.
+long flushed = 0;
 try {
   // A.  Flush memcache to all the HStores.
   // Keep running vector of all store files that includes both old and the
   // just-made new flush store file.
   for (HStore hstore: stores.values()) {
-long flushed = hstore.flushCache(sequenceId);
-// Subtract amount flushed.
-long size = this.memcacheSize.addAndGet(-flushed);
-if (size < 0) {
-   if (LOG.isDebugEnabled()) {
- LOG.warn("Memcache size went negative " + size + "; resetting");
-   }
-   this.memcacheSize.set(0);
-}
+flushed += hstore.flushCache(sequenceId);
   }
 } catch (Throwable t) {
   // An exception here means that the snapshot was not persisted.
@@ -1068,7 +1065,7 @@
 " in " +
   (System.currentTimeMillis() - startTime) + "ms, sequence id=" +
   sequenceId + ", " +
-  StringUtils.humanReadableInt(this.memcacheSize.get()));
+  StringUtils.humanReadableInt(flushed));
   if (!regionInfo.isMetaRegion()) {
 this.historian.addRegionFlush(regionInfo, timeTaken);
   }
@@ -1374,7 +1371,7 @@
*/
   private synchronized void checkResources() {
 boolean blocked = false;
-
+
 while (this.memcacheSize.get() >= this.blockingMemcacheSize) {
   if (!blocked) {
 LOG.info("Blocking updates for '" + Thread.currentThread().getName() +
@@ -1538,9 +1535,8 @@
   long size = 0;
   for (Map.Entry e: updatesByColumn.entrySet()) {
 HStoreKey key = e.getKey();
-byte[] val = e.getValue();
-size = this.memcacheSize.addAndGet(getEntrySize(key, val));
-getStore(key.getColumn()).add(key, val);
+size = this.memcacheSize.addAndGet(
+getStore(key.getColumn()).add(key, e.getValue()));
   }
   flush = this.flushListener != null && !this.flushRequested &&
 size > this.memcacheFlushSize;
@@ -1578,19 +1574,6 @@
 return this.stores.get(HStoreKey.getFamilyMapKey(colum

svn commit: r676059 - in /hadoop/hbase/trunk/lib/native: Linux-amd64-64/ Linux-i386-32/

2008-07-11 Thread jimk
Author: jimk
Date: Fri Jul 11 12:43:41 2008
New Revision: 676059

URL: http://svn.apache.org/viewvc?rev=676059&view=rev
Log:
HBASE-715 Base HBase 0.2 on Hadoop 0.17.1 --- update native libraries

Modified:
hadoop/hbase/trunk/lib/native/Linux-amd64-64/libhadoop.a
hadoop/hbase/trunk/lib/native/Linux-amd64-64/libhadoop.so
hadoop/hbase/trunk/lib/native/Linux-amd64-64/libhadoop.so.1
hadoop/hbase/trunk/lib/native/Linux-amd64-64/libhadoop.so.1.0.0
hadoop/hbase/trunk/lib/native/Linux-i386-32/libhadoop.a
hadoop/hbase/trunk/lib/native/Linux-i386-32/libhadoop.so
hadoop/hbase/trunk/lib/native/Linux-i386-32/libhadoop.so.1
hadoop/hbase/trunk/lib/native/Linux-i386-32/libhadoop.so.1.0.0

Modified: hadoop/hbase/trunk/lib/native/Linux-amd64-64/libhadoop.a
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/lib/native/Linux-amd64-64/libhadoop.a?rev=676059&r1=676058&r2=676059&view=diff
==
Binary files - no diff available.

Modified: hadoop/hbase/trunk/lib/native/Linux-amd64-64/libhadoop.so
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/lib/native/Linux-amd64-64/libhadoop.so?rev=676059&r1=676058&r2=676059&view=diff
==
Binary files - no diff available.

Modified: hadoop/hbase/trunk/lib/native/Linux-amd64-64/libhadoop.so.1
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/lib/native/Linux-amd64-64/libhadoop.so.1?rev=676059&r1=676058&r2=676059&view=diff
==
Binary files - no diff available.

Modified: hadoop/hbase/trunk/lib/native/Linux-amd64-64/libhadoop.so.1.0.0
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/lib/native/Linux-amd64-64/libhadoop.so.1.0.0?rev=676059&r1=676058&r2=676059&view=diff
==
Binary files - no diff available.

Modified: hadoop/hbase/trunk/lib/native/Linux-i386-32/libhadoop.a
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/lib/native/Linux-i386-32/libhadoop.a?rev=676059&r1=676058&r2=676059&view=diff
==
Binary files - no diff available.

Modified: hadoop/hbase/trunk/lib/native/Linux-i386-32/libhadoop.so
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/lib/native/Linux-i386-32/libhadoop.so?rev=676059&r1=676058&r2=676059&view=diff
==
Binary files - no diff available.

Modified: hadoop/hbase/trunk/lib/native/Linux-i386-32/libhadoop.so.1
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/lib/native/Linux-i386-32/libhadoop.so.1?rev=676059&r1=676058&r2=676059&view=diff
==
Binary files - no diff available.

Modified: hadoop/hbase/trunk/lib/native/Linux-i386-32/libhadoop.so.1.0.0
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/lib/native/Linux-i386-32/libhadoop.so.1.0.0?rev=676059&r1=676058&r2=676059&view=diff
==
Binary files - no diff available.




svn commit: r676728 - in /hadoop/hbase/trunk: CHANGES.txt src/java/org/onelab/filter/BloomFilter.java src/java/org/onelab/filter/Filter.java src/test/org/onelab/test/TestFilter.java

2008-07-14 Thread jimk
Author: jimk
Date: Mon Jul 14 13:46:55 2008
New Revision: 676728

URL: http://svn.apache.org/viewvc?rev=676728&view=rev
Log:
HBASE-744   BloomFilter serialization/deserialization broken

Modified:
hadoop/hbase/trunk/CHANGES.txt
hadoop/hbase/trunk/src/java/org/onelab/filter/BloomFilter.java
hadoop/hbase/trunk/src/java/org/onelab/filter/Filter.java
hadoop/hbase/trunk/src/test/org/onelab/test/TestFilter.java

Modified: hadoop/hbase/trunk/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/CHANGES.txt?rev=676728&r1=676727&r2=676728&view=diff
==
--- hadoop/hbase/trunk/CHANGES.txt (original)
+++ hadoop/hbase/trunk/CHANGES.txt Mon Jul 14 13:46:55 2008
@@ -183,6 +183,7 @@
HBASE-742   Rename getMetainfo in HTable as getTableDescriptor
HBASE-739   HBaseAdmin.createTable() using old HTableDescription doesn't 
work
(Izaak Rubin via Stack)
+   HBASE-744   BloomFilter serialization/deserialization broken
 
   IMPROVEMENTS
HBASE-559   MR example job to count table rows

Modified: hadoop/hbase/trunk/src/java/org/onelab/filter/BloomFilter.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/onelab/filter/BloomFilter.java?rev=676728&r1=676727&r2=676728&view=diff
==
--- hadoop/hbase/trunk/src/java/org/onelab/filter/BloomFilter.java (original)
+++ hadoop/hbase/trunk/src/java/org/onelab/filter/BloomFilter.java Mon Jul 14 
13:46:55 2008
@@ -226,6 +226,7 @@
   @Override
   public void readFields(DataInput in) throws IOException {
 super.readFields(in);
+bits = new BitSet(this.vectorSize);
 byte[] bytes = new byte[getNBytes()];
 in.readFully(bytes);
 for(int i = 0, byteIndex = 0, bitIndex = 0; i < vectorSize; i++, 
bitIndex++) {
@@ -233,9 +234,6 @@
 bitIndex = 0;
 byteIndex++;
   }
-  if (bitIndex == 0) {
-bytes[byteIndex] = 0;
-  }
   if ((bytes[byteIndex] & bitvalues[bitIndex]) != 0) {
 bits.set(i);
   }

Modified: hadoop/hbase/trunk/src/java/org/onelab/filter/Filter.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/onelab/filter/Filter.java?rev=676728&r1=676727&r2=676728&view=diff
==
--- hadoop/hbase/trunk/src/java/org/onelab/filter/Filter.java (original)
+++ hadoop/hbase/trunk/src/java/org/onelab/filter/Filter.java Mon Jul 14 
13:46:55 2008
@@ -76,13 +76,13 @@
  */
 public abstract class Filter implements Writable {
   /** The vector size of this filter. */
-  int vectorSize;
+  protected int vectorSize;
 
   /** The hash function used to map a key to several positions in the vector. 
*/
   protected HashFunction hash;
 
   /** The number of hash function to consider. */
-  int nbHash;
+  protected int nbHash;
 
   protected Filter() {}
   

Modified: hadoop/hbase/trunk/src/test/org/onelab/test/TestFilter.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/onelab/test/TestFilter.java?rev=676728&r1=676727&r2=676728&view=diff
==
--- hadoop/hbase/trunk/src/test/org/onelab/test/TestFilter.java (original)
+++ hadoop/hbase/trunk/src/test/org/onelab/test/TestFilter.java Mon Jul 14 
13:46:55 2008
@@ -48,9 +48,16 @@
  */
 package org.onelab.test;
 
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
 import java.io.UnsupportedEncodingException;
 import junit.framework.TestCase;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.onelab.filter.*;
 
 /**
@@ -61,22 +68,196 @@
  * @version 1.0 - 8 Feb. 07
  */
 public class TestFilter extends TestCase {
+  private static final Log LOG = LogFactory.getLog(TestFilter.class);
   
   /** Test a BloomFilter
* @throws UnsupportedEncodingException
+   * @throws IOException
*/
-  public void testBloomFilter() throws UnsupportedEncodingException {
-Filter bf = new BloomFilter(8, 2);
-Key key = new StringKey("toto");
-Key k2 = new StringKey("lulu");
-Key k3 = new StringKey("mama");
-bf.add(key);
-bf.add(k2);
-bf.add(k3);
-assertTrue(bf.membershipTest(key));
-assertTrue(bf.membershipTest(new StringKey("graknyl")));
-assertFalse(bf.membershipTest(new StringKey("xyzzy")));
-assertFalse(bf.membershipTest(new StringKey("abcd")));
+  public void testBloomFilter() throws UnsupportedEncodingException,
+  IOException {
+final StringKey[] inserted = {
+new StringKey("wmjwjzyv"),
+new StringKey("baietibz"),
+new StringKey("guh

  1   2   3   >