Repository: crunch
Updated Branches:
  refs/heads/master ffca004e9 -> 1b2c058c4


http://git-wip-us.apache.org/repos/asf/crunch/blob/1b2c058c/crunch-spark/src/it/java/org/apache/crunch/SparkWordCountHBaseIT.java
----------------------------------------------------------------------
diff --git 
a/crunch-spark/src/it/java/org/apache/crunch/SparkWordCountHBaseIT.java 
b/crunch-spark/src/it/java/org/apache/crunch/SparkWordCountHBaseIT.java
index 9accfed..9df9c65 100644
--- a/crunch-spark/src/it/java/org/apache/crunch/SparkWordCountHBaseIT.java
+++ b/crunch-spark/src/it/java/org/apache/crunch/SparkWordCountHBaseIT.java
@@ -33,12 +33,13 @@ import org.apache.crunch.test.TemporaryPath;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Delete;
-import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.mapreduce.MultiTableInputFormat;
 import org.apache.hadoop.hbase.mapreduce.MultiTableInputFormatBase;
@@ -103,9 +104,9 @@ public class SparkWordCountHBaseIT {
     @Before
     public void setUp() throws Exception {
         Configuration conf = 
HBaseConfiguration.create(tmpDir.getDefaultConfiguration());
+        conf.set(HConstants.TEMPORARY_FS_DIRECTORY_KEY, 
tmpDir.getFile("hbase-staging").getAbsolutePath());
         hbaseTestUtil = new HBaseTestingUtility(conf);
-        hbaseTestUtil.startMiniZKCluster();
-        hbaseTestUtil.startMiniHBaseCluster(1, 1);
+        hbaseTestUtil.startMiniCluster();
     }
 
     @Test
@@ -123,8 +124,7 @@ public class SparkWordCountHBaseIT {
 
     @After
     public void tearDown() throws Exception {
-        hbaseTestUtil.shutdownMiniHBaseCluster();
-        hbaseTestUtil.shutdownMiniZKCluster();
+        hbaseTestUtil.shutdownMiniCluster();
     }
 
     public void run(Pipeline pipeline) throws Exception {
@@ -135,17 +135,17 @@ public class SparkWordCountHBaseIT {
 
         Random rand = new Random();
         int postFix = rand.nextInt() & 0x7FFFFFFF;
-        String inputTableName = "crunch_words_" + postFix;
-        String outputTableName = "crunch_counts_" + postFix;
-        String otherTableName = "crunch_other_" + postFix;
+        TableName inputTableName = TableName.valueOf("crunch_words_" + 
postFix);
+        TableName outputTableName = TableName.valueOf("crunch_counts_" + 
postFix);
+        TableName otherTableName = TableName.valueOf("crunch_other_" + 
postFix);
 
-        HTable inputTable = 
hbaseTestUtil.createTable(Bytes.toBytes(inputTableName), WORD_COLFAM);
+        Table inputTable = hbaseTestUtil.createTable(inputTableName, 
WORD_COLFAM);
 
         int key = 0;
         key = put(inputTable, key, "cat");
         key = put(inputTable, key, "cat");
         key = put(inputTable, key, "dog");
-        inputTable.flushCommits();
+        inputTable.close();
 
         //Setup scan using multiple scans that simply cut the rows in half.
         Scan scan = new Scan();
@@ -158,7 +158,7 @@ public class SparkWordCountHBaseIT {
 
         HBaseSourceTarget source = null;
         if (clazz == null) {
-            source = new HBaseSourceTarget(TableName.valueOf(inputTableName), 
scan, scan2);
+            source = new HBaseSourceTarget(inputTableName, scan, scan2);
         } else {
             source = new HBaseSourceTarget(inputTableName, clazz, new 
Scan[]{scan, scan2});
         }
@@ -172,9 +172,9 @@ public class SparkWordCountHBaseIT {
         pipeline.done();
     }
 
-    protected int put(HTable table, int key, String value) throws IOException {
+    protected int put(Table table, int key, String value) throws IOException {
         Put put = new Put(Bytes.toBytes(key));
-        put.add(WORD_COLFAM, null, Bytes.toBytes(value));
+        put.addColumn(WORD_COLFAM, null, Bytes.toBytes(value));
         table.put(put);
         return key + 1;
     }

http://git-wip-us.apache.org/repos/asf/crunch/blob/1b2c058c/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 9af5374..e3b6d8d 100644
--- a/pom.xml
+++ b/pom.xml
@@ -100,8 +100,8 @@ under the License.
     <mockito.version>1.9.0</mockito.version>
     <pkg>org.apache.crunch</pkg>
 
-    <hadoop.version>2.6.0</hadoop.version>
-    <hbase.version>1.0.0</hbase.version>
+    <hadoop.version>2.7.1</hadoop.version>
+    <hbase.version>2.0.1</hbase.version>
     <avro.classifier>hadoop2</avro.classifier>
     <hive.version>2.1.0</hive.version>
 
@@ -211,7 +211,6 @@ under the License.
         <artifactId>guava</artifactId>
         <version>${guava.version}</version>
       </dependency>
-
       <dependency>
         <groupId>org.apache.avro</groupId>
         <artifactId>avro</artifactId>
@@ -355,6 +354,12 @@ under the License.
 
       <dependency>
         <groupId>org.apache.hbase</groupId>
+        <artifactId>hbase-mapreduce</artifactId>
+        <version>${hbase.version}</version>
+      </dependency>
+
+      <dependency>
+        <groupId>org.apache.hbase</groupId>
         <artifactId>hbase-testing-util</artifactId>
         <version>${hbase.version}</version>
       </dependency>

Reply via email to