Repository: hive
Updated Branches:
  refs/heads/master 7ea12e731 -> a3e872822


http://git-wip-us.apache.org/repos/asf/hive/blob/a3e87282/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/hbase/TestPigHBaseStorageHandler.java
----------------------------------------------------------------------
diff --git 
a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/hbase/TestPigHBaseStorageHandler.java
 
b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/hbase/TestPigHBaseStorageHandler.java
index f8f18b3..120b4af 100644
--- 
a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/hbase/TestPigHBaseStorageHandler.java
+++ 
b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/hbase/TestPigHBaseStorageHandler.java
@@ -34,12 +34,16 @@ import java.util.Map;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hive.cli.CliSessionState;
 import org.apache.hadoop.hive.conf.HiveConf;
@@ -94,10 +98,17 @@ public class TestPigHBaseStorageHandler extends 
SkeletonHBaseTest {
 
   }
 
-  private void populateHBaseTable(String tName) throws IOException {
+  private void populateHBaseTable(String tName, Connection connection) throws 
IOException {
     List<Put> myPuts = generatePuts(tName);
-    HTable table = new HTable(getHbaseConf(), Bytes.toBytes(tName));
-    table.put(myPuts);
+    Table table = null;
+    try {
+      table = connection.getTable(TableName.valueOf(tName));
+      table.put(myPuts);
+    } finally {
+      if (table != null) {
+        table.close();
+      }
+    }
   }
 
   private List<Put> generatePuts(String tableName) throws IOException {
@@ -107,8 +118,8 @@ public class TestPigHBaseStorageHandler extends 
SkeletonHBaseTest {
     myPuts = new ArrayList<Put>();
     for (int i = 1; i <=10; i++) {
       Put put = new Put(Bytes.toBytes(i));
-      put.add(FAMILY, QUALIFIER1, 1, Bytes.toBytes("textA-" + i));
-      put.add(FAMILY, QUALIFIER2, 1, Bytes.toBytes("textB-" + i));
+      put.addColumn(FAMILY, QUALIFIER1, 1, Bytes.toBytes("textA-" + i));
+      put.addColumn(FAMILY, QUALIFIER2, 1, Bytes.toBytes("textB-" + i));
       myPuts.add(put);
     }
     return myPuts;
@@ -165,8 +176,22 @@ public class TestPigHBaseStorageHandler extends 
SkeletonHBaseTest {
 
     CommandProcessorResponse responseThree = driver.run(tableQuery);
 
-    HBaseAdmin hAdmin = new HBaseAdmin(getHbaseConf());
-    boolean doesTableExist = hAdmin.tableExists(hbaseTableName);
+    Connection connection = null;
+    Admin hAdmin = null;
+    boolean doesTableExist = false;
+    try {
+      connection = ConnectionFactory.createConnection(getHbaseConf());
+      hAdmin = connection.getAdmin();
+      doesTableExist = hAdmin.tableExists(TableName.valueOf(hbaseTableName));
+    } finally {
+      if (hAdmin != null) {
+        hAdmin.close();
+      }
+      if (connection != null) {
+        connection.close();
+      }
+    }
+
     assertTrue(doesTableExist);
 
     PigServer server = new 
PigServer(ExecType.LOCAL,hcatConf.getAllProperties());
@@ -220,17 +245,39 @@ public class TestPigHBaseStorageHandler extends 
SkeletonHBaseTest {
 
     CommandProcessorResponse responseThree = driver.run(tableQuery);
 
-    HBaseAdmin hAdmin = new HBaseAdmin(getHbaseConf());
-    boolean doesTableExist = hAdmin.tableExists(hbaseTableName);
-    assertTrue(doesTableExist);
+    Connection connection = null;
+    Admin hAdmin = null;
+    Table table = null;
+    ResultScanner scanner = null;
+    boolean doesTableExist = false;
+    try {
+      connection = ConnectionFactory.createConnection(getHbaseConf());
+      hAdmin = connection.getAdmin();
+      doesTableExist = hAdmin.tableExists(TableName.valueOf(hbaseTableName));
+
+      assertTrue(doesTableExist);
+
+      populateHBaseTable(hbaseTableName, connection);
 
-    populateHBaseTable(hbaseTableName);
+      table = connection.getTable(TableName.valueOf(hbaseTableName));
+      Scan scan = new Scan();
+      scan.addFamily(Bytes.toBytes("testFamily"));
+      scanner = table.getScanner(scan);
+    } finally {
+      if (scanner != null) {
+        scanner.close();
+      }
+      if (table != null ) {
+        table.close();
+      }
+      if (hAdmin != null) {
+        hAdmin.close();
+      }
+      if (connection != null) {
+        connection.close();
+      }
+    }
 
-    Configuration conf = new Configuration(getHbaseConf());
-    HTable table = new HTable(conf, hbaseTableName);
-    Scan scan = new Scan();
-    scan.addFamily(Bytes.toBytes("testFamily"));
-    ResultScanner scanner = table.getScanner(scan);
     int index=1;
 
     PigServer server = new 
PigServer(ExecType.LOCAL,hcatConf.getAllProperties());
@@ -288,59 +335,80 @@ public class TestPigHBaseStorageHandler extends 
SkeletonHBaseTest {
 
     CommandProcessorResponse responseThree = driver.run(tableQuery);
 
-    HBaseAdmin hAdmin = new HBaseAdmin(getHbaseConf());
-    boolean doesTableExist = hAdmin.tableExists(hbaseTableName);
-    assertTrue(doesTableExist);
-
-
-    createTestDataFile(POPTXT_FILE_NAME);
-
-    PigServer server = new 
PigServer(ExecType.LOCAL,hcatConf.getAllProperties());
-    server.registerQuery("A = load '"+POPTXT_FILE_NAME+"' using PigStorage() 
as (key:int, testqualifier1:float, testqualifier2:chararray);");
-    server.registerQuery("B = filter A by (key > 2) AND (key < 8) ;");
-    server.registerQuery("store B into 
'"+databaseName.toLowerCase()+"."+tableName.toLowerCase()+"' using  
org.apache.hive.hcatalog.pig.HCatStorer();");
-    server.registerQuery("C = load 
'"+databaseName.toLowerCase()+"."+tableName.toLowerCase()+"' using 
org.apache.hive.hcatalog.pig.HCatLoader();");
-    // Schema should be same
-    Schema dumpedBSchema = server.dumpSchema("C");
-
-    List<FieldSchema> fields = dumpedBSchema.getFields();
-    assertEquals(3, fields.size());
-
-    assertEquals(DataType.INTEGER,fields.get(0).type);
-    assertEquals("key",fields.get(0).alias.toLowerCase());
-
-    assertEquals( DataType.FLOAT,fields.get(1).type);
-    assertEquals("testQualifier1".toLowerCase(), 
fields.get(1).alias.toLowerCase());
-
-    assertEquals( DataType.CHARARRAY,fields.get(2).type);
-    assertEquals("testQualifier2".toLowerCase(), 
fields.get(2).alias.toLowerCase());
-
-    //Query the hbase table and check the key is valid and only 5  are present
-    Configuration conf = new Configuration(getHbaseConf());
-    HTable table = new HTable(conf, hbaseTableName);
-    Scan scan = new Scan();
-    scan.addFamily(Bytes.toBytes("testFamily"));
-    byte[] familyNameBytes = Bytes.toBytes("testFamily");
-    ResultScanner scanner = table.getScanner(scan);
-    int index=3;
-    int count=0;
-    for(Result result: scanner) {
-      //key is correct
-      assertEquals(index,Bytes.toInt(result.getRow()));
-      //first column exists
-      
assertTrue(result.containsColumn(familyNameBytes,Bytes.toBytes("testQualifier1")));
-      //value is correct
-      
assertEquals((index+f),Bytes.toFloat(result.getValue(familyNameBytes,Bytes.toBytes("testQualifier1"))),0);
-
-      //second column exists
-      
assertTrue(result.containsColumn(familyNameBytes,Bytes.toBytes("testQualifier2")));
-      //value is correct
-      
assertEquals(("textB-"+index).toString(),Bytes.toString(result.getValue(familyNameBytes,Bytes.toBytes("testQualifier2"))));
-      index++;
-      count++;
+    Connection connection = null;
+    Admin hAdmin = null;
+    Table table = null;
+    ResultScanner scanner = null;
+    boolean doesTableExist = false;
+    try {
+      connection = ConnectionFactory.createConnection(getHbaseConf());
+      hAdmin = connection.getAdmin();
+      doesTableExist = hAdmin.tableExists(TableName.valueOf(hbaseTableName));
+
+      assertTrue(doesTableExist);
+
+
+      createTestDataFile(POPTXT_FILE_NAME);
+
+      PigServer server = new 
PigServer(ExecType.LOCAL,hcatConf.getAllProperties());
+      server.registerQuery("A = load '"+POPTXT_FILE_NAME+"' using PigStorage() 
as (key:int, testqualifier1:float, testqualifier2:chararray);");
+      server.registerQuery("B = filter A by (key > 2) AND (key < 8) ;");
+      server.registerQuery("store B into 
'"+databaseName.toLowerCase()+"."+tableName.toLowerCase()+"' using  
org.apache.hive.hcatalog.pig.HCatStorer();");
+      server.registerQuery("C = load 
'"+databaseName.toLowerCase()+"."+tableName.toLowerCase()+"' using 
org.apache.hive.hcatalog.pig.HCatLoader();");
+      // Schema should be same
+      Schema dumpedBSchema = server.dumpSchema("C");
+
+      List<FieldSchema> fields = dumpedBSchema.getFields();
+      assertEquals(3, fields.size());
+
+      assertEquals(DataType.INTEGER,fields.get(0).type);
+      assertEquals("key",fields.get(0).alias.toLowerCase());
+
+      assertEquals( DataType.FLOAT,fields.get(1).type);
+      assertEquals("testQualifier1".toLowerCase(), 
fields.get(1).alias.toLowerCase());
+
+      assertEquals( DataType.CHARARRAY,fields.get(2).type);
+      assertEquals("testQualifier2".toLowerCase(), 
fields.get(2).alias.toLowerCase());
+
+      //Query the hbase table and check the key is valid and only 5  are 
present
+      table = connection.getTable(TableName.valueOf(hbaseTableName));
+      Scan scan = new Scan();
+      scan.addFamily(Bytes.toBytes("testFamily"));
+      byte[] familyNameBytes = Bytes.toBytes("testFamily");
+      scanner = table.getScanner(scan);
+      int index=3;
+      int count=0;
+      for(Result result: scanner) {
+        //key is correct
+        assertEquals(index,Bytes.toInt(result.getRow()));
+        //first column exists
+        
assertTrue(result.containsColumn(familyNameBytes,Bytes.toBytes("testQualifier1")));
+        //value is correct
+        
assertEquals((index+f),Bytes.toFloat(result.getValue(familyNameBytes,Bytes.toBytes("testQualifier1"))),0);
+
+        //second column exists
+        
assertTrue(result.containsColumn(familyNameBytes,Bytes.toBytes("testQualifier2")));
+        //value is correct
+        
assertEquals(("textB-"+index).toString(),Bytes.toString(result.getValue(familyNameBytes,Bytes.toBytes("testQualifier2"))));
+        index++;
+        count++;
+      }
+      // 5 rows should be returned
+      assertEquals(count,5);
+    } finally {
+      if (scanner != null) {
+        scanner.close();
+      }
+      if (table != null ) {
+        table.close();
+      }
+      if (hAdmin != null) {
+        hAdmin.close();
+      }
+      if (connection != null) {
+        connection.close();
+      }
     }
-    // 5 rows should be returned
-    assertEquals(count,5);
 
     //Check if hive returns results correctly
     driver.run(selectQuery);

http://git-wip-us.apache.org/repos/asf/hive/blob/a3e87282/itests/hive-minikdc/pom.xml
----------------------------------------------------------------------
diff --git a/itests/hive-minikdc/pom.xml b/itests/hive-minikdc/pom.xml
index 95d2614..689e679 100644
--- a/itests/hive-minikdc/pom.xml
+++ b/itests/hive-minikdc/pom.xml
@@ -191,7 +191,7 @@
             <artifactId>commons-logging</artifactId>
           </exclusion>
       </exclusions>
-   </dependency>
+    </dependency>
     <dependency>
       <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-server</artifactId>
@@ -199,6 +199,12 @@
       <scope>test</scope>
     </dependency>
     <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-mapreduce</artifactId>
+      <version>${hbase.version}</version>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
       <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-minicluster</artifactId>
       <scope>test</scope>

http://git-wip-us.apache.org/repos/asf/hive/blob/a3e87282/itests/hive-unit-hadoop2/pom.xml
----------------------------------------------------------------------
diff --git a/itests/hive-unit-hadoop2/pom.xml b/itests/hive-unit-hadoop2/pom.xml
index 339a194..f862dac 100644
--- a/itests/hive-unit-hadoop2/pom.xml
+++ b/itests/hive-unit-hadoop2/pom.xml
@@ -198,6 +198,12 @@
       <scope>test</scope>
     </dependency>
     <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-mapreduce</artifactId>
+      <version>${hbase.version}</version>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
       <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-minicluster</artifactId>
       <scope>test</scope>

http://git-wip-us.apache.org/repos/asf/hive/blob/a3e87282/itests/hive-unit/pom.xml
----------------------------------------------------------------------
diff --git a/itests/hive-unit/pom.xml b/itests/hive-unit/pom.xml
index 1440983..ea5b7b9 100644
--- a/itests/hive-unit/pom.xml
+++ b/itests/hive-unit/pom.xml
@@ -125,6 +125,18 @@
             <groupId>commmons-logging</groupId>
             <artifactId>commons-logging</artifactId>
           </exclusion>
+          <exclusion>
+            <groupId>org.apache.hadoop</groupId>
+            <artifactId>hadoop-mapreduce-client-common</artifactId>
+          </exclusion>
+          <exclusion>
+            <groupId>org.apache.hadoop</groupId>
+            <artifactId>hadoop-yarn-api</artifactId>
+          </exclusion>
+          <exclusion>
+            <groupId>org.apache.hadoop</groupId>
+            <artifactId>hadoop-yarn-common</artifactId>
+          </exclusion>
       </exclusions>
     </dependency>
     <!-- dependencies are always listed in sorted order by groupId, artifectId 
-->
@@ -227,7 +239,7 @@
             <artifactId>commons-logging</artifactId>
           </exclusion>
       </exclusions>
-   </dependency>
+    </dependency>
     <dependency>
       <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-mapreduce-client-core</artifactId>
@@ -253,6 +265,19 @@
     </dependency>
     <dependency>
       <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-common</artifactId>
+      <version>${hbase.version}</version>
+      <classifier>tests</classifier>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-procedure</artifactId>
+      <version>${hbase.version}</version>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-hadoop-compat</artifactId>
       <version>${hbase.version}</version>
       <type>test-jar</type>
@@ -366,6 +391,10 @@
             <groupId>commmons-logging</groupId>
             <artifactId>commons-logging</artifactId>
           </exclusion>
+          <exclusion>
+            <groupId>org.apache.hadoop</groupId>
+            <artifactId>hadoop-yarn-server-web-proxy</artifactId>
+          </exclusion>
       </exclusions>
     </dependency>
     <dependency>

http://git-wip-us.apache.org/repos/asf/hive/blob/a3e87282/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestAcidOnTez.java
----------------------------------------------------------------------
diff --git 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestAcidOnTez.java 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestAcidOnTez.java
index dbfc235..65a1ed1 100644
--- 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestAcidOnTez.java
+++ 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestAcidOnTez.java
@@ -49,6 +49,7 @@ import org.apache.hadoop.hive.ql.io.HiveInputFormat;
 import org.apache.hadoop.hive.ql.lockmgr.TestDbTxnManager2;
 import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse;
 import org.apache.hadoop.hive.ql.session.SessionState;
+import org.apache.tez.mapreduce.hadoop.MRJobConfig;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -106,6 +107,8 @@ public class TestAcidOnTez {
         .setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER,
             
"org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory");
     TxnDbUtil.setConfValues(hiveConf);
+    hiveConf.setInt(MRJobConfig.MAP_MEMORY_MB, 1024);
+    hiveConf.setInt(MRJobConfig.REDUCE_MEMORY_MB, 1024);
     TxnDbUtil.prepDb(hiveConf);
     File f = new File(TEST_WAREHOUSE_DIR);
     if (f.exists()) {

http://git-wip-us.apache.org/repos/asf/hive/blob/a3e87282/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java
----------------------------------------------------------------------
diff --git 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java
 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java
index 707bcd1..7103fb9 100644
--- 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java
+++ 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java
@@ -1207,7 +1207,7 @@ public class TestCompactor {
     t.init(stop, looped);
     t.run();
     JobConf job = t.getMrJob();
-    Assert.assertEquals("2048", job.get("mapreduce.map.memory.mb"));  // 2048 
comes from tblproperties
+    Assert.assertEquals(2048, job.getMemoryForMapTask());  // 2048 comes from 
tblproperties
     // Compact ttp1
     stop = new AtomicBoolean(true);
     t = new Worker();
@@ -1217,7 +1217,7 @@ public class TestCompactor {
     t.init(stop, looped);
     t.run();
     job = t.getMrJob();
-    Assert.assertEquals("1024", job.get("mapreduce.map.memory.mb"));  // 1024 
is the default value
+    Assert.assertEquals(1024, job.getMemoryForMapTask());  // 1024 is the 
default value
     // Clean up
     runCleaner(conf);
     rsp = txnHandler.showCompact(new ShowCompactRequest());
@@ -1269,7 +1269,7 @@ public class TestCompactor {
     t.init(stop, looped);
     t.run();
     job = t.getMrJob();
-    Assert.assertEquals("3072", job.get("mapreduce.map.memory.mb"));
+    Assert.assertEquals(3072, job.getMemoryForMapTask());
     
Assert.assertTrue(job.get("hive.compactor.table.props").contains("orc.compress.size4:8192"));
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a3e87282/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithLocalClusterSpark.java
----------------------------------------------------------------------
diff --git 
a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithLocalClusterSpark.java
 
b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithLocalClusterSpark.java
index cabddea..dd24f02 100644
--- 
a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithLocalClusterSpark.java
+++ 
b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithLocalClusterSpark.java
@@ -70,6 +70,11 @@ public class TestJdbcWithLocalClusterSpark {
     conf.set("hive.execution.engine", "spark");
     conf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer");
     conf.set("spark.master", "local-cluster[2,2,1024]");
+    // FIXME: Hadoop3 made the incompatible change for 
dfs.client.datanode-restart.timeout
+    // while spark2 is still using Hadoop2.
+    // Spark requires Hive to support Hadoop3 first then Spark can start
+    // working on Hadoop3 support. Remove this after Spark supports Hadoop3.
+    conf.set("dfs.client.datanode-restart.timeout", "30");
     return conf;
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a3e87282/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestMultiSessionsHS2WithLocalClusterSpark.java
----------------------------------------------------------------------
diff --git 
a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestMultiSessionsHS2WithLocalClusterSpark.java
 
b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestMultiSessionsHS2WithLocalClusterSpark.java
index e3f9646..2156f4b 100644
--- 
a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestMultiSessionsHS2WithLocalClusterSpark.java
+++ 
b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestMultiSessionsHS2WithLocalClusterSpark.java
@@ -79,6 +79,11 @@ public class TestMultiSessionsHS2WithLocalClusterSpark {
     conf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer");
     conf.set("spark.master", "local-cluster[2,2,1024]");
     conf.set("spark.deploy.defaultCores", "2");
+    // FIXME: Hadoop3 made the incompatible change for 
dfs.client.datanode-restart.timeout
+    // while spark2 is still using Hadoop2.
+    // Spark requires Hive to support Hadoop3 first then Spark can start
+    // working on Hadoop3 support. Remove this after Spark supports Hadoop3.
+    conf.set("dfs.client.datanode-restart.timeout", "30");
     return conf;
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a3e87282/itests/qtest-accumulo/pom.xml
----------------------------------------------------------------------
diff --git a/itests/qtest-accumulo/pom.xml b/itests/qtest-accumulo/pom.xml
index 40d0a74..29ea428 100644
--- a/itests/qtest-accumulo/pom.xml
+++ b/itests/qtest-accumulo/pom.xml
@@ -40,6 +40,7 @@
     <skip.accumulo.tests>true</skip.accumulo.tests>
     <!-- Must correspond with the Accumulo version specified in the pom -->
     <accumulo-thrift.version>0.9.1</accumulo-thrift.version>
+    <accumulo-htrace.version>3.1.0-incubating</accumulo-htrace.version>
     <test.dfs.mkdir>-mkdir -p</test.dfs.mkdir>
   </properties>
 
@@ -260,23 +261,9 @@
     </dependency>
     <dependency>
       <groupId>org.apache.hbase</groupId>
-      <artifactId>hbase-common</artifactId>
-      <version>${hbase.version}</version>
-      <scope>test</scope>
-      <classifier>tests</classifier>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hbase</groupId>
-      <artifactId>hbase-hadoop-compat</artifactId>
-      <version>${hbase.version}</version>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-hadoop-compat</artifactId>
       <version>${hbase.version}</version>
       <scope>test</scope>
-      <classifier>tests</classifier>
     </dependency>
     <dependency>
       <groupId>org.apache.hbase</groupId>
@@ -286,10 +273,9 @@
     </dependency>
     <dependency>
       <groupId>org.apache.hbase</groupId>
-      <artifactId>hbase-hadoop2-compat</artifactId>
+      <artifactId>hbase-mapreduce</artifactId>
       <version>${hbase.version}</version>
       <scope>test</scope>
-      <classifier>tests</classifier>
     </dependency>
     <dependency>
       <groupId>org.apache.hbase</groupId>
@@ -298,10 +284,9 @@
       <scope>test</scope>
     </dependency>
     <dependency>
-      <groupId>org.apache.hbase</groupId>
-      <artifactId>hbase-server</artifactId>
-      <version>${hbase.version}</version>
-      <classifier>tests</classifier>
+      <groupId>org.apache.htrace</groupId>
+      <artifactId>htrace-core</artifactId>
+      <version>${accumulo-htrace.version}</version>
       <scope>test</scope>
     </dependency>
     <dependency>
@@ -385,9 +370,15 @@
       </properties>
       <dependencies>
        <dependency>
-               <groupId>org.apache.accumulo</groupId>
-               <artifactId>accumulo-minicluster</artifactId>
-               <scope>test</scope>
+         <groupId>org.apache.accumulo</groupId>
+         <artifactId>accumulo-minicluster</artifactId>
+         <scope>test</scope>
+          <exclusions>
+            <exclusion>
+              <groupId>commons-beanutils</groupId>
+              <artifactId>commons-beanutils-core</artifactId>
+            </exclusion>
+          </exclusions>
         </dependency>
         <!-- Override the thrift dependency pulled in for metaserver -->
         <dependency>

http://git-wip-us.apache.org/repos/asf/hive/blob/a3e87282/itests/qtest-spark/pom.xml
----------------------------------------------------------------------
diff --git a/itests/qtest-spark/pom.xml b/itests/qtest-spark/pom.xml
index a506f7f..b9b17b6 100644
--- a/itests/qtest-spark/pom.xml
+++ b/itests/qtest-spark/pom.xml
@@ -34,7 +34,6 @@
     <test.console.log.level>OFF</test.console.log.level>
 
     <!-- The following are to match the latest in spark project, overriding 
hive's versions -->
-    <spark.jetty.version>8.1.14.v20131031</spark.jetty.version>
     <spark.kryo.version>2.21</spark.kryo.version>
     <qfile></qfile>
     <qfile_regex></qfile_regex>
@@ -67,25 +66,31 @@
     <dependency>
       <groupId>org.eclipse.jetty</groupId>
       <artifactId>jetty-util</artifactId>
-      <version>${spark.jetty.version}</version>
+      <version>${jetty.version}</version>
       <scope>test</scope>
     </dependency>
     <dependency>
       <groupId>org.eclipse.jetty</groupId>
       <artifactId>jetty-security</artifactId>
-      <version>${spark.jetty.version}</version>
+      <version>${jetty.version}</version>
       <scope>test</scope>
     </dependency>
     <dependency>
       <groupId>org.eclipse.jetty</groupId>
       <artifactId>jetty-plus</artifactId>
-      <version>${spark.jetty.version}</version>
+      <version>${jetty.version}</version>
       <scope>test</scope>
     </dependency>
     <dependency>
       <groupId>org.eclipse.jetty</groupId>
       <artifactId>jetty-server</artifactId>
-      <version>${spark.jetty.version}</version>
+      <version>${jetty.version}</version>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.eclipse.jetty</groupId>
+      <artifactId>jetty-servlet</artifactId>
+      <version>${jetty.version}</version>
       <scope>test</scope>
     </dependency>
     <dependency>
@@ -316,6 +321,12 @@
       <scope>test</scope>
     </dependency>
     <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-mapreduce</artifactId>
+      <version>${hbase.version}</version>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
       <groupId>junit</groupId>
       <artifactId>junit</artifactId>
       <version>${junit.version}</version>

http://git-wip-us.apache.org/repos/asf/hive/blob/a3e87282/itests/qtest/pom.xml
----------------------------------------------------------------------
diff --git a/itests/qtest/pom.xml b/itests/qtest/pom.xml
index 02664f3..7f7d5f3 100644
--- a/itests/qtest/pom.xml
+++ b/itests/qtest/pom.xml
@@ -328,6 +328,12 @@
       <scope>test</scope>
     </dependency>
     <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-mapreduce</artifactId>
+      <version>${hbase.version}</version>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
       <groupId>org.apache.tez</groupId>
       <artifactId>tez-tests</artifactId>
       <version>${tez.version}</version>

http://git-wip-us.apache.org/repos/asf/hive/blob/a3e87282/itests/util/pom.xml
----------------------------------------------------------------------
diff --git a/itests/util/pom.xml b/itests/util/pom.xml
index e6dc09f..16118b5 100644
--- a/itests/util/pom.xml
+++ b/itests/util/pom.xml
@@ -38,7 +38,7 @@
       <groupId>org.apache.accumulo</groupId>
       <artifactId>accumulo-minicluster</artifactId>
         <exclusions>
-             <exclusion>
+          <exclusion>
             <groupId>org.slf4j</groupId>
             <artifactId>slf4j-log4j12</artifactId>
           </exclusion>
@@ -46,6 +46,10 @@
             <groupId>commmons-logging</groupId>
             <artifactId>commons-logging</artifactId>
           </exclusion>
+          <exclusion>
+            <groupId>commons-beanutils</groupId>
+            <artifactId>commons-beanutils-core</artifactId>
+          </exclusion>
       </exclusions>
    </dependency>
     <dependency>
@@ -143,6 +147,11 @@
       <artifactId>hbase-server</artifactId>
       <version>${hbase.version}</version>
     </dependency>
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-mapreduce</artifactId>
+      <version>${hbase.version}</version>
+    </dependency>
     <!-- test inter-project -->
     <dependency>
       <groupId>junit</groupId>
@@ -170,12 +179,18 @@
             <artifactId>commons-logging</artifactId>
           </exclusion>
       </exclusions>
-   </dependency>
+    </dependency>
     <dependency>
       <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-server</artifactId>
       <version>${hbase.version}</version>
       <classifier>tests</classifier>
     </dependency>
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-mapreduce</artifactId>
+      <version>${hbase.version}</version>
+      <classifier>tests</classifier>
+    </dependency>
   </dependencies>
 </project>

http://git-wip-us.apache.org/repos/asf/hive/blob/a3e87282/itests/util/src/main/java/org/apache/hadoop/hive/hbase/HBaseQTestUtil.java
----------------------------------------------------------------------
diff --git 
a/itests/util/src/main/java/org/apache/hadoop/hive/hbase/HBaseQTestUtil.java 
b/itests/util/src/main/java/org/apache/hadoop/hive/hbase/HBaseQTestUtil.java
index 0cc9a89..e5d72e0 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/hbase/HBaseQTestUtil.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/hbase/HBaseQTestUtil.java
@@ -17,14 +17,12 @@
  */
 package org.apache.hadoop.hive.hbase;
 
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HConnection;
-import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hive.metastore.Warehouse;
 import org.apache.hadoop.hive.ql.QTestUtil;
 
-import java.util.List;
-
 /**
  * HBaseQTestUtil initializes HBase-specific test fixtures.
  */
@@ -37,7 +35,7 @@ public class HBaseQTestUtil extends QTestUtil {
   public static String HBASE_SRC_SNAPSHOT_NAME = "src_hbase_snapshot";
 
   /** A handle to this harness's cluster */
-  private final HConnection conn;
+  private final Connection conn;
 
   private HBaseTestSetup hbaseSetup = null;
 
@@ -53,19 +51,6 @@ public class HBaseQTestUtil extends QTestUtil {
     super.init();
   }
 
-  /** return true when HBase table snapshot exists, false otherwise. */
-  private static boolean hbaseTableSnapshotExists(HBaseAdmin admin, String 
snapshotName) throws
-      Exception {
-    List<HBaseProtos.SnapshotDescription> snapshots =
-      admin.listSnapshots(".*" + snapshotName + ".*");
-    for (HBaseProtos.SnapshotDescription sn : snapshots) {
-      if (sn.getName().equals(HBASE_SRC_SNAPSHOT_NAME)) {
-        return true;
-      }
-    }
-    return false;
-  }
-
   @Override
   public void init() throws Exception {
     // defer
@@ -93,10 +78,10 @@ public class HBaseQTestUtil extends QTestUtil {
     runCmd("INSERT OVERWRITE TABLE " + HBASE_SRC_NAME + " SELECT * FROM src");
 
     // create a snapshot
-    HBaseAdmin admin = null;
+    Admin admin = null;
     try {
-      admin = new HBaseAdmin(conn.getConfiguration());
-      admin.snapshot(HBASE_SRC_SNAPSHOT_NAME, HBASE_SRC_NAME);
+      admin = conn.getAdmin();
+      admin.snapshot(HBASE_SRC_SNAPSHOT_NAME, 
TableName.valueOf(HBASE_SRC_NAME));
     } finally {
       if (admin != null) admin.close();
     }
@@ -111,12 +96,10 @@ public class HBaseQTestUtil extends QTestUtil {
     // drop in case leftover from unsuccessful run
     db.dropTable(Warehouse.DEFAULT_DATABASE_NAME, HBASE_SRC_NAME);
 
-    HBaseAdmin admin = null;
+    Admin admin = null;
     try {
-      admin = new HBaseAdmin(conn.getConfiguration());
-      if (hbaseTableSnapshotExists(admin, HBASE_SRC_SNAPSHOT_NAME)) {
-        admin.deleteSnapshot(HBASE_SRC_SNAPSHOT_NAME);
-      }
+      admin = conn.getAdmin();
+      admin.deleteSnapshots(HBASE_SRC_SNAPSHOT_NAME);
     } finally {
       if (admin != null) admin.close();
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/a3e87282/itests/util/src/main/java/org/apache/hadoop/hive/hbase/HBaseTestSetup.java
----------------------------------------------------------------------
diff --git 
a/itests/util/src/main/java/org/apache/hadoop/hive/hbase/HBaseTestSetup.java 
b/itests/util/src/main/java/org/apache/hadoop/hive/hbase/HBaseTestSetup.java
index 4f8fa05..5db44d2 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/hbase/HBaseTestSetup.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/hbase/HBaseTestSetup.java
@@ -28,12 +28,12 @@ import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HConnection;
-import org.apache.hadoop.hbase.client.HConnectionManager;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.MiniHBaseCluster;
-import org.apache.hadoop.hbase.client.HTableInterface;
 import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.mapred.JobConf;
@@ -48,11 +48,11 @@ public class HBaseTestSetup {
   private MiniHBaseCluster hbaseCluster;
   private int zooKeeperPort;
   private String hbaseRoot;
-  private HConnection hbaseConn;
+  private Connection hbaseConn;
 
   private static final int NUM_REGIONSERVERS = 1;
 
-  public HConnection getConnection() {
+  public Connection getConnection() {
     return this.hbaseConn;
   }
 
@@ -94,12 +94,15 @@ public class HBaseTestSetup {
     hbaseConf.setInt("hbase.master.info.port", -1);
     hbaseConf.setInt("hbase.regionserver.port", findFreePort());
     hbaseConf.setInt("hbase.regionserver.info.port", -1);
+    // Fix needed due to dependency for hbase-mapreduce module
+    System.setProperty("org.apache.hadoop.hbase.shaded.io.netty.packagePrefix",
+        "org.apache.hadoop.hbase.shaded.");
     hbaseCluster = new MiniHBaseCluster(hbaseConf, NUM_REGIONSERVERS);
     conf.set("hbase.master", 
hbaseCluster.getMaster().getServerName().getHostAndPort());
-    hbaseConn = HConnectionManager.createConnection(hbaseConf);
+    hbaseConn = ConnectionFactory.createConnection(hbaseConf);
 
     // opening the META table ensures that cluster is running
-    HTableInterface meta = null;
+    Table meta = null;
     try {
       meta = hbaseConn.getTable(TableName.META_TABLE_NAME);
     } finally {
@@ -110,7 +113,7 @@ public class HBaseTestSetup {
 
   private void createHBaseTable() throws IOException {
     final String HBASE_TABLE_NAME = "HiveExternalTable";
-    HTableDescriptor htableDesc = new 
HTableDescriptor(HBASE_TABLE_NAME.getBytes());
+    HTableDescriptor htableDesc = new 
HTableDescriptor(TableName.valueOf(HBASE_TABLE_NAME));
     HColumnDescriptor hcolDesc = new HColumnDescriptor("cf".getBytes());
     htableDesc.addFamily(hcolDesc);
 
@@ -123,16 +126,16 @@ public class HBaseTestSetup {
     float [] floats = new float [] { Float.MIN_VALUE, -1.0F, Float.MAX_VALUE };
     double [] doubles = new double [] { Double.MIN_VALUE, -1.0, 
Double.MAX_VALUE };
 
-    HBaseAdmin hbaseAdmin = null;
-    HTableInterface htable = null;
+    Admin hbaseAdmin = null;
+    Table htable = null;
     try {
-      hbaseAdmin = new HBaseAdmin(hbaseConn.getConfiguration());
+      hbaseAdmin = hbaseConn.getAdmin();
       if (Arrays.asList(hbaseAdmin.listTables()).contains(htableDesc)) {
         // if table is already in there, don't recreate.
         return;
       }
       hbaseAdmin.createTable(htableDesc);
-      htable = hbaseConn.getTable(HBASE_TABLE_NAME);
+      htable = hbaseConn.getTable(TableName.valueOf(HBASE_TABLE_NAME));
 
       // data
       Put[] puts = new Put[]{
@@ -140,14 +143,14 @@ public class HBaseTestSetup {
 
       // store data
       for (int i = 0; i < puts.length; i++) {
-        puts[i].add("cf".getBytes(), "cq-boolean".getBytes(), 
Bytes.toBytes(booleans[i]));
-        puts[i].add("cf".getBytes(), "cq-byte".getBytes(), new 
byte[]{bytes[i]});
-        puts[i].add("cf".getBytes(), "cq-short".getBytes(), 
Bytes.toBytes(shorts[i]));
-        puts[i].add("cf".getBytes(), "cq-int".getBytes(), 
Bytes.toBytes(ints[i]));
-        puts[i].add("cf".getBytes(), "cq-long".getBytes(), 
Bytes.toBytes(longs[i]));
-        puts[i].add("cf".getBytes(), "cq-string".getBytes(), 
Bytes.toBytes(strings[i]));
-        puts[i].add("cf".getBytes(), "cq-float".getBytes(), 
Bytes.toBytes(floats[i]));
-        puts[i].add("cf".getBytes(), "cq-double".getBytes(), 
Bytes.toBytes(doubles[i]));
+        puts[i].addColumn("cf".getBytes(), "cq-boolean".getBytes(), 
Bytes.toBytes(booleans[i]));
+        puts[i].addColumn("cf".getBytes(), "cq-byte".getBytes(), new 
byte[]{bytes[i]});
+        puts[i].addColumn("cf".getBytes(), "cq-short".getBytes(), 
Bytes.toBytes(shorts[i]));
+        puts[i].addColumn("cf".getBytes(), "cq-int".getBytes(), 
Bytes.toBytes(ints[i]));
+        puts[i].addColumn("cf".getBytes(), "cq-long".getBytes(), 
Bytes.toBytes(longs[i]));
+        puts[i].addColumn("cf".getBytes(), "cq-string".getBytes(), 
Bytes.toBytes(strings[i]));
+        puts[i].addColumn("cf".getBytes(), "cq-float".getBytes(), 
Bytes.toBytes(floats[i]));
+        puts[i].addColumn("cf".getBytes(), "cq-double".getBytes(), 
Bytes.toBytes(doubles[i]));
 
         htable.put(puts[i]);
       }
@@ -170,7 +173,6 @@ public class HBaseTestSetup {
       hbaseConn = null;
     }
     if (hbaseCluster != null) {
-      HConnectionManager.deleteAllConnections(true);
       hbaseCluster.shutdown();
       hbaseCluster = null;
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/a3e87282/llap-server/pom.xml
----------------------------------------------------------------------
diff --git a/llap-server/pom.xml b/llap-server/pom.xml
index 47a04cc..176110d 100644
--- a/llap-server/pom.xml
+++ b/llap-server/pom.xml
@@ -82,8 +82,8 @@
     </dependency>
     <dependency>
       <groupId>io.netty</groupId>
-      <artifactId>netty</artifactId>
-      <version>3.6.2.Final</version>
+      <artifactId>netty-all</artifactId>
+      <version>${netty.version}</version>
     </dependency>
     <dependency>
       <groupId>org.apache.avro</groupId>
@@ -153,6 +153,18 @@
           <groupId>commmons-logging</groupId>
           <artifactId>commons-logging</artifactId>
         </exclusion>
+        <exclusion>
+          <groupId>io.netty</groupId>
+          <artifactId>netty-all</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-yarn-client</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-yarn-api</artifactId>
+        </exclusion>
       </exclusions>
     </dependency>
     <dependency>
@@ -169,6 +181,14 @@
           <groupId>commmons-logging</groupId>
           <artifactId>commons-logging</artifactId>
         </exclusion>
+        <exclusion>
+          <groupId>io.netty</groupId>
+          <artifactId>netty-all</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-yarn-api</artifactId>
+        </exclusion>
       </exclusions>
     </dependency>
     <dependency>
@@ -234,6 +254,16 @@
       <groupId>org.codehaus.jettison</groupId>
       <artifactId>jettison</artifactId>
     </dependency>
+    <dependency>
+      <groupId>org.eclipse.jetty</groupId>
+      <artifactId>jetty-server</artifactId>
+      <version>${jetty.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.eclipse.jetty</groupId>
+      <artifactId>jetty-util</artifactId>
+      <version>${jetty.version}</version>
+    </dependency>
 
     <!-- test intra-project -->
     <dependency>
@@ -266,6 +296,12 @@
       <artifactId>hadoop-hdfs</artifactId>
       <version>${hadoop.version}</version>
       <scope>test</scope>
+      <exclusions>
+        <exclusion>
+          <groupId>io.netty</groupId>
+          <artifactId>netty-all</artifactId>
+        </exclusion>
+      </exclusions>
     </dependency>
     <dependency>
       <groupId>org.apache.hive</groupId>
@@ -279,6 +315,12 @@
       <version>${hadoop.version}</version>
       <classifier>tests</classifier>
       <scope>test</scope>
+      <exclusions>
+        <exclusion>
+          <groupId>io.netty</groupId>
+          <artifactId>netty-all</artifactId>
+        </exclusion>
+      </exclusions>
     </dependency>
     <dependency>
       <groupId>junit</groupId>
@@ -326,6 +368,21 @@
     </dependency>
     <dependency>
       <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-mapreduce</artifactId>
+      <version>${hbase.version}</version>
+      <exclusions>
+        <exclusion>
+          <groupId>org.slf4j</groupId>
+          <artifactId>slf4j-log4j12</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>commmons-logging</groupId>
+          <artifactId>commons-logging</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-common</artifactId>
       <version>${hbase.version}</version>
     </dependency>

http://git-wip-us.apache.org/repos/asf/hive/blob/a3e87282/llap-server/src/java/org/apache/hadoop/hive/llap/shufflehandler/ShuffleHandler.java
----------------------------------------------------------------------
diff --git 
a/llap-server/src/java/org/apache/hadoop/hive/llap/shufflehandler/ShuffleHandler.java
 
b/llap-server/src/java/org/apache/hadoop/hive/llap/shufflehandler/ShuffleHandler.java
index 6b08da6..51fc1c5 100644
--- 
a/llap-server/src/java/org/apache/hadoop/hive/llap/shufflehandler/ShuffleHandler.java
+++ 
b/llap-server/src/java/org/apache/hadoop/hive/llap/shufflehandler/ShuffleHandler.java
@@ -698,9 +698,9 @@ public class ShuffleHandler implements 
AttemptRegistrationListener {
       }
       // Check whether the shuffle version is compatible
       if (!ShuffleHeader.DEFAULT_HTTP_HEADER_NAME.equals(
-          request.getHeader(ShuffleHeader.HTTP_HEADER_NAME))
+          request.headers().get(ShuffleHeader.HTTP_HEADER_NAME))
           || !ShuffleHeader.DEFAULT_HTTP_HEADER_VERSION.equals(
-              request.getHeader(ShuffleHeader.HTTP_HEADER_VERSION))) {
+              request.headers().get(ShuffleHeader.HTTP_HEADER_VERSION))) {
         sendError(ctx, "Incompatible shuffle request version", BAD_REQUEST);
       }
       final Map<String,List<String>> q =
@@ -904,12 +904,12 @@ public class ShuffleHandler implements 
AttemptRegistrationListener {
         boolean keepAliveParam, long contentLength) {
       if (!connectionKeepAliveEnabled && !keepAliveParam) {
         LOG.info("Setting connection close header...");
-        response.setHeader(HttpHeaders.Names.CONNECTION, CONNECTION_CLOSE);
+        response.headers().add(HttpHeaders.Names.CONNECTION, CONNECTION_CLOSE);
       } else {
-        response.setHeader(HttpHeaders.Names.CONTENT_LENGTH,
+        response.headers().add(HttpHeaders.Names.CONTENT_LENGTH,
           String.valueOf(contentLength));
-        response.setHeader(HttpHeaders.Names.CONNECTION, 
HttpHeaders.Values.KEEP_ALIVE);
-        response.setHeader(HttpHeaders.Values.KEEP_ALIVE, "timeout="
+        response.headers().add(HttpHeaders.Names.CONNECTION, 
HttpHeaders.Values.KEEP_ALIVE);
+        response.headers().add(HttpHeaders.Values.KEEP_ALIVE, "timeout="
             + connectionKeepAliveTimeOut);
         LOG.debug("Content Length in shuffle : " + contentLength);
       }
@@ -937,7 +937,7 @@ public class ShuffleHandler implements 
AttemptRegistrationListener {
       String enc_str = SecureShuffleUtils.buildMsgFrom(requestUri);
       // hash from the fetcher
       String urlHashStr =
-        request.getHeader(SecureShuffleUtils.HTTP_HEADER_URL_HASH);
+        request.headers().get(SecureShuffleUtils.HTTP_HEADER_URL_HASH);
       if (urlHashStr == null) {
         LOG.info("Missing header hash for " + appid);
         throw new IOException("fetcher cannot be authenticated");
@@ -953,11 +953,11 @@ public class ShuffleHandler implements 
AttemptRegistrationListener {
       String reply =
         SecureShuffleUtils.generateHash(urlHashStr.getBytes(Charsets.UTF_8), 
             tokenSecret);
-      response.setHeader(SecureShuffleUtils.HTTP_HEADER_REPLY_URL_HASH, reply);
+      response.headers().add(SecureShuffleUtils.HTTP_HEADER_REPLY_URL_HASH, 
reply);
       // Put shuffle version into http header
-      response.setHeader(ShuffleHeader.HTTP_HEADER_NAME,
+      response.headers().add(ShuffleHeader.HTTP_HEADER_NAME,
           ShuffleHeader.DEFAULT_HTTP_HEADER_NAME);
-      response.setHeader(ShuffleHeader.HTTP_HEADER_VERSION,
+      response.headers().add(ShuffleHeader.HTTP_HEADER_VERSION,
           ShuffleHeader.DEFAULT_HTTP_HEADER_VERSION);
       if (LOG.isDebugEnabled()) {
         int len = reply.length();
@@ -1025,11 +1025,11 @@ public class ShuffleHandler implements 
AttemptRegistrationListener {
     protected void sendError(ChannelHandlerContext ctx, String message,
         HttpResponseStatus status) {
       HttpResponse response = new DefaultHttpResponse(HTTP_1_1, status);
-      response.setHeader(CONTENT_TYPE, "text/plain; charset=UTF-8");
+      response.headers().add(CONTENT_TYPE, "text/plain; charset=UTF-8");
       // Put shuffle version into http header
-      response.setHeader(ShuffleHeader.HTTP_HEADER_NAME,
+      response.headers().add(ShuffleHeader.HTTP_HEADER_NAME,
           ShuffleHeader.DEFAULT_HTTP_HEADER_NAME);
-      response.setHeader(ShuffleHeader.HTTP_HEADER_VERSION,
+      response.headers().add(ShuffleHeader.HTTP_HEADER_VERSION,
           ShuffleHeader.DEFAULT_HTTP_HEADER_VERSION);
       response.setContent(
         ChannelBuffers.copiedBuffer(message, CharsetUtil.UTF_8));

http://git-wip-us.apache.org/repos/asf/hive/blob/a3e87282/metastore/pom.xml
----------------------------------------------------------------------
diff --git a/metastore/pom.xml b/metastore/pom.xml
index 04c6f47..c8f919c 100644
--- a/metastore/pom.xml
+++ b/metastore/pom.xml
@@ -155,7 +155,7 @@
     </dependency>
     <dependency>
       <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdfs</artifactId>
+      <artifactId>hadoop-hdfs-client</artifactId>
       <version>${hadoop.version}</version>
       <optional>true</optional>
       <exclusions>

http://git-wip-us.apache.org/repos/asf/hive/blob/a3e87282/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 006e8f8..dc31bd5 100644
--- a/pom.xml
+++ b/pom.xml
@@ -144,10 +144,10 @@
     <guava.version>14.0.1</guava.version>
     <groovy.version>2.4.11</groovy.version>
     <h2database.version>1.3.166</h2database.version>
-    <hadoop.version>2.8.1</hadoop.version>
+    <hadoop.version>3.0.0-beta1</hadoop.version>
     
<hadoop.bin.path>${basedir}/${hive.path.to.root}/testutils/hadoop</hadoop.bin.path>
     <hamcrest.version>1.3</hamcrest.version>
-    <hbase.version>1.1.1</hbase.version>
+    <hbase.version>2.0.0-alpha3</hbase.version>
     <!-- required for logging test to avoid including hbase which pulls 
disruptor transitively -->
     <disruptor.version>3.3.0</disruptor.version>
     <hikaricp.version>2.6.1</hikaricp.version>
@@ -194,7 +194,7 @@
     <slf4j.version>1.7.10</slf4j.version>
     <ST4.version>4.0.4</ST4.version>
     <storage-api.version>3.0.0-SNAPSHOT</storage-api.version>
-    <tez.version>0.9.0</tez.version>
+    <tez.version>0.9.1-SNAPSHOT</tez.version>
     <slider.version>0.92.0-incubating</slider.version>
     <super-csv.version>2.2.0</super-csv.version>
     <spark.version>2.0.0</spark.version>
@@ -754,6 +754,10 @@
             <groupId>commmons-logging</groupId>
             <artifactId>commons-logging</artifactId>
           </exclusion>
+          <exclusion>
+            <groupId>com.codahale.metrics</groupId>
+            <artifactId>metrics-core</artifactId>
+          </exclusion>
         </exclusions>
      </dependency>
       <dependency>
@@ -835,6 +839,68 @@
         <groupId>org.apache.hbase</groupId>
         <artifactId>hbase-hadoop2-compat</artifactId>
         <version>${hbase.version}</version>
+        <exclusions>
+          <exclusion>
+            <groupId>javax.servlet</groupId>
+            <artifactId>servlet-api</artifactId>
+          </exclusion>
+          <exclusion>
+            <groupId>javax.servlet.jsp</groupId>
+            <artifactId>jsp-api</artifactId>
+          </exclusion>
+          <exclusion>
+            <groupId>org.jruby</groupId>
+            <artifactId>jruby-complete</artifactId>
+          </exclusion>
+          <exclusion>
+            <groupId>org.jboss.netty</groupId>
+            <artifactId>netty</artifactId>
+          </exclusion>
+          <exclusion>
+            <groupId>io.netty</groupId>
+            <artifactId>netty</artifactId>
+          </exclusion>
+          <exclusion>
+            <groupId>org.mortbay.jetty</groupId>
+            <artifactId>jsp-2.1</artifactId>
+          </exclusion>
+          <exclusion>
+            <groupId>org.mortbay.jetty</groupId>
+            <artifactId>jsp-api-2.1</artifactId>
+         </exclusion>
+          <exclusion>
+            <groupId>org.mortbay.jetty</groupId>
+            <artifactId>servlet-api-2.5</artifactId>
+          </exclusion>
+          <exclusion>
+            <groupId>org.mortbay.jetty</groupId>
+            <artifactId>servlet-api-2.5</artifactId>
+          </exclusion>
+          <exclusion>
+            <groupId>com.sun.jersey</groupId>
+            <artifactId>jersey-core</artifactId>
+         </exclusion>
+          <exclusion>
+            <groupId>com.sun.jersey</groupId>
+            <artifactId>jersey-json</artifactId>
+          </exclusion>
+          <exclusion>
+            <groupId>com.sun.jersey</groupId>
+            <artifactId>jersey-server</artifactId>
+          </exclusion>
+          <exclusion>
+            <groupId>org.mortbay.jetty</groupId>
+            <artifactId>jetty</artifactId>
+          </exclusion>
+          <exclusion>
+            <groupId>org.mortbay.jetty</groupId>
+            <artifactId>jetty-util</artifactId>
+          </exclusion>
+          <exclusion>
+            <groupId>com.codahale.metrics</groupId>
+            <artifactId>metrics-core</artifactId>
+          </exclusion>
+        </exclusions>
       </dependency>
       <dependency>
         <groupId>org.apache.hbase</groupId>
@@ -842,6 +908,11 @@
         <version>${hbase.version}</version>
       </dependency>
       <dependency>
+        <groupId>org.apache.hbase</groupId>
+        <artifactId>hbase-mapreduce</artifactId>
+        <version>${hbase.version}</version>
+      </dependency>
+      <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-minicluster</artifactId>
         <version>${hadoop.version}</version>

http://git-wip-us.apache.org/repos/asf/hive/blob/a3e87282/ql/src/test/org/apache/hadoop/hive/ql/io/TestRCFile.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/TestRCFile.java 
b/ql/src/test/org/apache/hadoop/hive/ql/io/TestRCFile.java
index 19b97e4..dc83a4b 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/io/TestRCFile.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/io/TestRCFile.java
@@ -61,6 +61,7 @@ import 
org.apache.hadoop.hive.serde2.objectinspector.StructField;
 import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
 import org.apache.hadoop.io.IntWritable;
 import org.apache.hadoop.io.LongWritable;
+import org.apache.hadoop.io.SequenceFile;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.compress.CompressionCodec;
@@ -101,7 +102,7 @@ public class TestRCFile {
   private final BytesRefArrayWritable patialS = new BytesRefArrayWritable();
   private byte[][] bytesArray;
   private BytesRefArrayWritable s;
-
+  private int numRepeat = 1000;
   @Before
   public void setup() throws Exception {
     conf = new Configuration();
@@ -143,6 +144,8 @@ public class TestRCFile {
       // LazyString has no so-called NULL sequence. The value is empty string 
if not.
       patialS.set(7, new BytesRefWritable("".getBytes("UTF-8")));
 
+      numRepeat = (int) Math.ceil((double)SequenceFile.SYNC_INTERVAL / 
(double)bytesArray.length);
+
     } catch (UnsupportedEncodingException e) {
       throw new RuntimeException(e);
     }
@@ -659,24 +662,24 @@ public class TestRCFile {
   }
 
   private void splitBeforeSync() throws IOException {
-    writeThenReadByRecordReader(600, 1000, 2, 1, null);
+    writeThenReadByRecordReader(600, numRepeat, 2, 1, null);
   }
 
   private void splitRightBeforeSync() throws IOException {
-    writeThenReadByRecordReader(500, 1000, 2, 17750, null);
+    writeThenReadByRecordReader(500, numRepeat, 2, 17750, null);
   }
 
   private void splitInMiddleOfSync() throws IOException {
-    writeThenReadByRecordReader(500, 1000, 2, 17760, null);
+    writeThenReadByRecordReader(500, numRepeat, 2, 17760, null);
 
   }
 
   private void splitRightAfterSync() throws IOException {
-    writeThenReadByRecordReader(500, 1000, 2, 17770, null);
+    writeThenReadByRecordReader(500, numRepeat, 2, 17770, null);
   }
 
   private void splitAfterSync() throws IOException {
-    writeThenReadByRecordReader(500, 1000, 2, 19950, null);
+    writeThenReadByRecordReader(500, numRepeat, 2, 19950, null);
   }
 
   private void writeThenReadByRecordReader(int intervalRecordCount,
@@ -711,7 +714,7 @@ public class TestRCFile {
     jonconf.set("mapred.input.dir", testDir.toString());
     HiveConf.setLongVar(jonconf, HiveConf.ConfVars.MAPREDMINSPLITSIZE, 
minSplitSize);
     InputSplit[] splits = inputFormat.getSplits(jonconf, splitNumber);
-    assertEquals("splits length should be " + splitNumber, splits.length, 
splitNumber);
+    assertEquals("splits length should be " + splitNumber, splitNumber, 
splits.length);
     int readCount = 0;
     for (int i = 0; i < splits.length; i++) {
       int previousReadCount = readCount;

http://git-wip-us.apache.org/repos/asf/hive/blob/a3e87282/ql/src/test/results/clientnegative/exim_00_unsupported_schema.q.out
----------------------------------------------------------------------
diff --git 
a/ql/src/test/results/clientnegative/exim_00_unsupported_schema.q.out 
b/ql/src/test/results/clientnegative/exim_00_unsupported_schema.q.out
index b582471..db0ab8d 100644
--- a/ql/src/test/results/clientnegative/exim_00_unsupported_schema.q.out
+++ b/ql/src/test/results/clientnegative/exim_00_unsupported_schema.q.out
@@ -19,4 +19,4 @@ POSTHOOK: type: LOAD
 #### A masked pattern was here ####
 POSTHOOK: Output: default@exim_department
 #### A masked pattern was here ####
-FAILED: SemanticException [Error 10320]: Error while performing IO operation : 
No FileSystem for scheme: nosuchschema
+FAILED: SemanticException [Error 10320]: Error while performing IO operation : 
No FileSystem for scheme "nosuchschema"

http://git-wip-us.apache.org/repos/asf/hive/blob/a3e87282/ql/src/test/results/clientnegative/external1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/external1.q.out 
b/ql/src/test/results/clientnegative/external1.q.out
index 661d669..f2bc9c6 100644
--- a/ql/src/test/results/clientnegative/external1.q.out
+++ b/ql/src/test/results/clientnegative/external1.q.out
@@ -3,4 +3,4 @@ PREHOOK: type: CREATETABLE
 #### A masked pattern was here ####
 PREHOOK: Output: database:default
 PREHOOK: Output: default@external1
-FAILED: Execution Error, return code 1 from 
org.apache.hadoop.hive.ql.exec.DDLTask. java.io.IOException: No FileSystem for 
scheme: invalidscheme
+FAILED: Execution Error, return code 1 from 
org.apache.hadoop.hive.ql.exec.DDLTask. 
org.apache.hadoop.fs.UnsupportedFileSystemException: No FileSystem for scheme 
"invalidscheme"

http://git-wip-us.apache.org/repos/asf/hive/blob/a3e87282/ql/src/test/results/clientnegative/external2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/external2.q.out 
b/ql/src/test/results/clientnegative/external2.q.out
index eb5518c..05ddc28 100644
--- a/ql/src/test/results/clientnegative/external2.q.out
+++ b/ql/src/test/results/clientnegative/external2.q.out
@@ -10,4 +10,4 @@ POSTHOOK: Output: default@external2
 PREHOOK: type: ALTERTABLE_ADDPARTS
 #### A masked pattern was here ####
 PREHOOK: Output: default@external2
-FAILED: Execution Error, return code 1 from 
org.apache.hadoop.hive.ql.exec.DDLTask. java.io.IOException: No FileSystem for 
scheme: invalidscheme
+FAILED: Execution Error, return code 1 from 
org.apache.hadoop.hive.ql.exec.DDLTask. 
org.apache.hadoop.fs.UnsupportedFileSystemException: No FileSystem for scheme 
"invalidscheme"

http://git-wip-us.apache.org/repos/asf/hive/blob/a3e87282/serde/pom.xml
----------------------------------------------------------------------
diff --git a/serde/pom.xml b/serde/pom.xml
index 7419cfb..0247c32 100644
--- a/serde/pom.xml
+++ b/serde/pom.xml
@@ -152,6 +152,12 @@
    </dependency>
     <dependency>
       <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-hdfs-client</artifactId>
+      <version>${hadoop.version}</version>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-hdfs</artifactId>
       <version>${hadoop.version}</version>
       <scope>test</scope>

http://git-wip-us.apache.org/repos/asf/hive/blob/a3e87282/shims/0.23/pom.xml
----------------------------------------------------------------------
diff --git a/shims/0.23/pom.xml b/shims/0.23/pom.xml
index 3ff1d38..61cf459 100644
--- a/shims/0.23/pom.xml
+++ b/shims/0.23/pom.xml
@@ -64,6 +64,12 @@
    </dependency>
     <dependency>
       <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-hdfs-client</artifactId>
+      <version>${hadoop.version}</version>
+      <optional>true</optional>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-hdfs</artifactId>
       <version>${hadoop.version}</version>
       <optional>true</optional>
@@ -198,6 +204,12 @@
      <version>${hadoop.version}</version>
       <optional>true</optional>
      <type>test-jar</type>
+     <exclusions>
+       <exclusion>
+         <groupId>com.codahale.metrics</groupId>
+         <artifactId>metrics-core</artifactId>
+       </exclusion>
+     </exclusions>
    </dependency>
    <dependency>
      <groupId>org.apache.hadoop</groupId>

http://git-wip-us.apache.org/repos/asf/hive/blob/a3e87282/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java
----------------------------------------------------------------------
diff --git 
a/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java 
b/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java
index e9445eb..1f86e76 100644
--- a/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java
+++ b/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java
@@ -296,8 +296,8 @@ public class Hadoop23Shims extends HadoopShimsSecure {
       JobConf jConf = new JobConf(conf);
       jConf.set("yarn.scheduler.capacity.root.queues", "default");
       jConf.set("yarn.scheduler.capacity.root.default.capacity", "100");
-      jConf.setInt(MRJobConfig.MAP_MEMORY_MB, 128);
-      jConf.setInt(MRJobConfig.REDUCE_MEMORY_MB, 128);
+      jConf.setInt(MRJobConfig.MAP_MEMORY_MB, 512);
+      jConf.setInt(MRJobConfig.REDUCE_MEMORY_MB, 512);
       jConf.setInt(MRJobConfig.MR_AM_VMEM_MB, 128);
       jConf.setInt(YarnConfiguration.YARN_MINICLUSTER_NM_PMEM_MB, 512);
       jConf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 128);
@@ -329,8 +329,8 @@ public class Hadoop23Shims extends HadoopShimsSecure {
       for (Map.Entry<String, String> pair: jConf) {
         conf.set(pair.getKey(), pair.getValue());
       }
-      conf.setInt(MRJobConfig.MAP_MEMORY_MB, 128);
-      conf.setInt(MRJobConfig.REDUCE_MEMORY_MB, 128);
+      conf.setInt(MRJobConfig.MAP_MEMORY_MB, 512);
+      conf.setInt(MRJobConfig.REDUCE_MEMORY_MB, 512);
       conf.setInt(MRJobConfig.MR_AM_VMEM_MB, 128);
     }
   }
@@ -1128,10 +1128,11 @@ public class Hadoop23Shims extends HadoopShimsSecure {
 
   @Override
   public boolean runDistCp(List<Path> srcPaths, Path dst, Configuration conf) 
throws IOException {
-    DistCpOptions options = new DistCpOptions(srcPaths, dst);
-    options.setSyncFolder(true);
-    options.setSkipCRC(true);
-    options.preserve(FileAttribute.BLOCKSIZE);
+       DistCpOptions options = new DistCpOptions.Builder(srcPaths, dst)
+        .withSyncFolder(true)
+        .withCRC(true)
+        .preserve(FileAttribute.BLOCKSIZE)
+        .build();
 
     // Creates the command-line parameters for distcp
     List<String> params = constructDistCpParams(srcPaths, dst, conf);
@@ -1207,18 +1208,24 @@ public class Hadoop23Shims extends HadoopShimsSecure {
       if(!"hdfs".equalsIgnoreCase(path.toUri().getScheme())) {
         return false;
       }
-      try {
-        return (hdfsAdmin.getEncryptionZoneForPath(fullPath) != null);
-      } catch (FileNotFoundException fnfe) {
-        LOG.debug("Failed to get EZ for non-existent path: "+ fullPath, fnfe);
-        return false;
-      }
+
+      return (getEncryptionZoneForPath(fullPath) != null);
+    }
+
+    private EncryptionZone getEncryptionZoneForPath(Path path) throws 
IOException {
+      if (path.getFileSystem(conf).exists(path)) {
+        return hdfsAdmin.getEncryptionZoneForPath(path);
+      } else if (!path.getParent().equals(path)) {
+        return getEncryptionZoneForPath(path.getParent());
+      } else {
+        return null;
+       }
     }
 
     @Override
     public boolean arePathsOnSameEncryptionZone(Path path1, Path path2) throws 
IOException {
-      return 
equivalentEncryptionZones(hdfsAdmin.getEncryptionZoneForPath(path1),
-                                       
hdfsAdmin.getEncryptionZoneForPath(path2));
+      return equivalentEncryptionZones(getEncryptionZoneForPath(path1),
+                                       getEncryptionZoneForPath(path2));
     }
 
     private boolean equivalentEncryptionZones(EncryptionZone zone1, 
EncryptionZone zone2) {
@@ -1256,8 +1263,8 @@ public class Hadoop23Shims extends HadoopShimsSecure {
     public int comparePathKeyStrength(Path path1, Path path2) throws 
IOException {
       EncryptionZone zone1, zone2;
 
-      zone1 = hdfsAdmin.getEncryptionZoneForPath(path1);
-      zone2 = hdfsAdmin.getEncryptionZoneForPath(path2);
+      zone1 = getEncryptionZoneForPath(path1);
+      zone2 = getEncryptionZoneForPath(path2);
 
       if (zone1 == null && zone2 == null) {
         return 0;

http://git-wip-us.apache.org/repos/asf/hive/blob/a3e87282/shims/common/src/main/java/org/apache/hadoop/fs/ProxyFileSystem.java
----------------------------------------------------------------------
diff --git 
a/shims/common/src/main/java/org/apache/hadoop/fs/ProxyFileSystem.java 
b/shims/common/src/main/java/org/apache/hadoop/fs/ProxyFileSystem.java
index 2c37a51..a82b2f0 100644
--- a/shims/common/src/main/java/org/apache/hadoop/fs/ProxyFileSystem.java
+++ b/shims/common/src/main/java/org/apache/hadoop/fs/ProxyFileSystem.java
@@ -23,6 +23,7 @@ import java.net.URI;
 import java.net.URISyntaxException;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.Shell;
@@ -181,6 +182,12 @@ public class ProxyFileSystem extends FilterFileSystem {
   }
 
   @Override
+  protected void rename(Path src, Path dst, Rename... options)
+      throws IOException {
+    super.rename(swizzleParamPath(src), swizzleParamPath(dst), options);
+  }
+
+  @Override
   public boolean delete(Path f, boolean recursive) throws IOException {
     return super.delete(swizzleParamPath(f), recursive);
   }
@@ -264,6 +271,11 @@ public class ProxyFileSystem extends FilterFileSystem {
   }
 
   @Override
+  public FileStatus getFileLinkStatus(Path f) throws IOException {
+    return swizzleFileStatus(super.getFileLinkStatus(swizzleParamPath(f)), 
false);
+  }
+
+  @Override
   public FileStatus getFileStatus(Path f) throws IOException {
     return swizzleFileStatus(super.getFileStatus(swizzleParamPath(f)), false);
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/a3e87282/shims/scheduler/pom.xml
----------------------------------------------------------------------
diff --git a/shims/scheduler/pom.xml b/shims/scheduler/pom.xml
index 0eadb69..076e2ee 100644
--- a/shims/scheduler/pom.xml
+++ b/shims/scheduler/pom.xml
@@ -87,8 +87,14 @@
      <groupId>org.apache.hadoop</groupId>
      <artifactId>hadoop-yarn-server-tests</artifactId>
      <version>${hadoop.version}</version>
-      <optional>true</optional>
+     <optional>true</optional>
      <type>test-jar</type>
+     <exclusions>
+       <exclusion>
+         <groupId>com.codahale.metrics</groupId>
+         <artifactId>metrics-core</artifactId>
+       </exclusion>
+     </exclusions>
    </dependency>
   </dependencies>
 </project>

http://git-wip-us.apache.org/repos/asf/hive/blob/a3e87282/spark-client/pom.xml
----------------------------------------------------------------------
diff --git a/spark-client/pom.xml b/spark-client/pom.xml
index 784d908..b38c6fa 100644
--- a/spark-client/pom.xml
+++ b/spark-client/pom.xml
@@ -85,6 +85,10 @@
          <groupId>commmons-logging</groupId>
          <artifactId>commons-logging</artifactId>
        </exclusion>
+        <exclusion>
+          <groupId>com.fasterxml.jackson.core</groupId>
+          <artifactId>jackson-databind</artifactId>
+        </exclusion>
      </exclusions>
    </dependency>
     <dependency>

http://git-wip-us.apache.org/repos/asf/hive/blob/a3e87282/standalone-metastore/pom.xml
----------------------------------------------------------------------
diff --git a/standalone-metastore/pom.xml b/standalone-metastore/pom.xml
index 924286f..d002f47 100644
--- a/standalone-metastore/pom.xml
+++ b/standalone-metastore/pom.xml
@@ -62,7 +62,7 @@
     
<dropwizard-metrics-hadoop-metrics2-reporter.version>0.1.2</dropwizard-metrics-hadoop-metrics2-reporter.version>
     <dropwizard.version>3.1.0</dropwizard.version>
     <guava.version>14.0.1</guava.version>
-    <hadoop.version>2.8.0</hadoop.version>
+    <hadoop.version>3.0.0-beta1</hadoop.version>
     <hikaricp.version>2.6.1</hikaricp.version>
     <jackson.new.version>2.6.5</jackson.new.version>
     <javolution.version>5.5.1</javolution.version>
@@ -190,6 +190,22 @@
         </exclusion>
       </exclusions>
     </dependency>
+   <dependency>
+     <groupId>org.apache.hadoop</groupId>
+     <artifactId>hadoop-hdfs-client</artifactId>
+     <version>${hadoop.version}</version>
+     <optional>true</optional>
+     <exclusions>
+       <exclusion>
+         <groupId>org.slf4j</groupId>
+         <artifactId>slf4j-log4j12</artifactId>
+       </exclusion>
+       <exclusion>
+         <groupId>commmons-logging</groupId>
+         <artifactId>commons-logging</artifactId>
+       </exclusion>
+     </exclusions>
+    </dependency>
     <!-- This is our one and only Hive dependency.-->
     <dependency>
       <groupId>org.apache.hive</groupId>

http://git-wip-us.apache.org/repos/asf/hive/blob/a3e87282/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/HdfsUtils.java
----------------------------------------------------------------------
diff --git 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/HdfsUtils.java
 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/HdfsUtils.java
index ecbddc3..8d560e7 100644
--- 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/HdfsUtils.java
+++ 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/HdfsUtils.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.tools.DistCp;
 import org.apache.hadoop.tools.DistCpOptions;
+import org.apache.hadoop.tools.DistCpOptions.FileAttribute;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -152,10 +153,11 @@ public class HdfsUtils {
 
   public static boolean runDistCp(List<Path> srcPaths, Path dst, Configuration 
conf)
       throws IOException {
-    DistCpOptions options = new DistCpOptions(srcPaths, dst);
-    options.setSyncFolder(true);
-    options.setSkipCRC(true);
-    options.preserve(DistCpOptions.FileAttribute.BLOCKSIZE);
+    DistCpOptions options = new DistCpOptions.Builder(srcPaths, dst)
+        .withSyncFolder(true)
+        .withCRC(true)
+        .preserve(FileAttribute.BLOCKSIZE)
+        .build();
 
     // Creates the command-line parameters for distcp
     List<String> params = constructDistCpParams(srcPaths, dst, conf);

http://git-wip-us.apache.org/repos/asf/hive/blob/a3e87282/testutils/ptest2/src/main/resources/batch-exec.vm
----------------------------------------------------------------------
diff --git a/testutils/ptest2/src/main/resources/batch-exec.vm 
b/testutils/ptest2/src/main/resources/batch-exec.vm
index 2d16ca3..4ff74f4 100644
--- a/testutils/ptest2/src/main/resources/batch-exec.vm
+++ b/testutils/ptest2/src/main/resources/batch-exec.vm
@@ -35,7 +35,7 @@ then
   export PATH=$JAVA_HOME/bin/:$PATH
 fi
 export ANT_OPTS="-Xmx1g -XX:MaxPermSize=256m -Djava.io.tmpdir=$logDir/tmp 
${antEnvOpts}"
-export MAVEN_OPTS="-Xmx256m -Djava.io.tmpdir=$logDir/tmp ${mavenEnvOpts}"
+export MAVEN_OPTS="-Xmx1g -Djava.io.tmpdir=$logDir/tmp ${mavenEnvOpts}"
 export HADOOP_ROOT_LOGGER=INFO,console
 export HADOOP_OPTS="-Dhive.log.dir=$logDir -Dhive.query.id=hadoop 
-Djava.io.tmpdir=$logDir/tmp"
 cd $localDir/$instanceName/${repositoryName}-source || exit 1

Reply via email to