IMPALA-4277: temporary hack to avoid compile errors for HDFS block location API

This is a temporary workaround to get Impala building against HDFS 3.0
that can be undone once IMPALA-4172 is committed.

This builds if I put together a directory with the minimum files required to
build the backend against HDFS:

hadoop-3.0.0-alpha1-cdh6.x-SNAPSHOT/lib/native/libgplcompression.a
hadoop-3.0.0-alpha1-cdh6.x-SNAPSHOT/lib/native/libhdfs.so.0.0.0
hadoop-3.0.0-alpha1-cdh6.x-SNAPSHOT/lib/native/libgplcompression.so
hadoop-3.0.0-alpha1-cdh6.x-SNAPSHOT/lib/native/libgplcompression.so.0.0.0
hadoop-3.0.0-alpha1-cdh6.x-SNAPSHOT/lib/native/libhdfs.so
hadoop-3.0.0-alpha1-cdh6.x-SNAPSHOT/lib/native/libgplcompression.la
hadoop-3.0.0-alpha1-cdh6.x-SNAPSHOT/lib/native/libhdfs.a
hadoop-3.0.0-alpha1-cdh6.x-SNAPSHOT/lib/native/libgplcompression.so.0
hadoop-3.0.0-alpha1-cdh6.x-SNAPSHOT/include/hdfs.h

Change-Id: Ice765b1be62c0c3d1b99282d8c652dd35d3d766d
Reviewed-on: http://gerrit.cloudera.org:8080/4701
Reviewed-by: Tim Armstrong <tarmstr...@cloudera.com>
Tested-by: Tim Armstrong <tarmstr...@cloudera.com>


Project: http://git-wip-us.apache.org/repos/asf/incubator-impala/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-impala/commit/cc603eb1
Tree: http://git-wip-us.apache.org/repos/asf/incubator-impala/tree/cc603eb1
Diff: http://git-wip-us.apache.org/repos/asf/incubator-impala/diff/cc603eb1

Branch: refs/heads/hadoop-next
Commit: cc603eb152eaa374c543b934c861fe8560032b1f
Parents: 51c6fde
Author: Tim Armstrong <tarmstr...@cloudera.com>
Authored: Tue Oct 11 16:54:42 2016 -0700
Committer: Tim Armstrong <tarmstr...@cloudera.com>
Committed: Tue Oct 18 16:50:36 2016 +0000

----------------------------------------------------------------------
 bin/impala-config.sh                            |  2 +-
 .../org/apache/impala/catalog/HdfsTable.java    | 30 +++++++++++++-------
 .../org/apache/impala/service/JniFrontend.java  |  9 +++---
 3 files changed, 26 insertions(+), 15 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/cc603eb1/bin/impala-config.sh
----------------------------------------------------------------------
diff --git a/bin/impala-config.sh b/bin/impala-config.sh
index 6425d90..dd0ab3f 100755
--- a/bin/impala-config.sh
+++ b/bin/impala-config.sh
@@ -295,7 +295,7 @@ if [[ $OSTYPE == "darwin"* ]]; then
   IMPALA_THRIFT_JAVA_VERSION=0.9.2
 fi
 
-export IMPALA_HADOOP_VERSION=${IMPALA_HADOOP_VERSION:-2.6.0-cdh5.10.0-SNAPSHOT}
+export 
IMPALA_HADOOP_VERSION=${IMPALA_HADOOP_VERSION:-3.0.0-alpha1-cdh6.x-SNAPSHOT}
 export IMPALA_HBASE_VERSION=${IMPALA_HBASE_VERSION:-2.0.0-cdh6.x-SNAPSHOT}
 export IMPALA_HIVE_VERSION=${IMPALA_HIVE_VERSION:-2.1.0-cdh6.x-SNAPSHOT}
 export IMPALA_SENTRY_VERSION=${IMPALA_SENTRY_VERSION:-1.5.1-cdh6.x-SNAPSHOT}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/cc603eb1/fe/src/main/java/org/apache/impala/catalog/HdfsTable.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/HdfsTable.java 
b/fe/src/main/java/org/apache/impala/catalog/HdfsTable.java
index e664f6f..607b764 100644
--- a/fe/src/main/java/org/apache/impala/catalog/HdfsTable.java
+++ b/fe/src/main/java/org/apache/impala/catalog/HdfsTable.java
@@ -33,11 +33,9 @@ import java.util.TreeMap;
 import org.apache.avro.Schema;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
-import org.apache.hadoop.fs.BlockStorageLocation;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.VolumeId;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
@@ -110,6 +108,17 @@ import com.google.common.collect.ImmutableMap;
  *
  */
 public class HdfsTable extends Table {
+
+  /** Dummy class to get it to compile */
+  public class BlockStorageLocation {
+
+  }
+
+  /** Dummy class to get it to compile */
+  public class VolumeId {
+
+  }
+
   // hive's default value for table property 'serialization.null.format'
   private static final String DEFAULT_NULL_COLUMN_VALUE = "\\N";
 
@@ -444,14 +453,14 @@ public class HdfsTable extends Table {
       FileBlocksInfo blockLists = perFsFileBlocks.get(fsKey);
       Preconditions.checkNotNull(blockLists);
       BlockStorageLocation[] storageLocs = null;
-      try {
+      //try {
         // Get the BlockStorageLocations for all the blocks
-        storageLocs = dfs.getFileBlockStorageLocations(blockLists.locations);
-      } catch (IOException e) {
-        LOG.error("Couldn't determine block storage locations for filesystem " 
+
-            fs + ":\n" + e.getMessage());
-        continue;
-      }
+        // storageLocs = 
dfs.getFileBlockStorageLocations(blockLists.locations);
+      //} catch (IOException e) {
+      //  LOG.error("Couldn't determine block storage locations for filesystem 
" +
+      //      fs + ":\n" + e.getMessage());
+      //  continue;
+      //}
       if (storageLocs == null || storageLocs.length == 0) {
         LOG.warn("Attempted to get block locations for filesystem " + fs +
             " but the call returned no results");
@@ -468,7 +477,8 @@ public class HdfsTable extends Table {
       // Attach volume IDs given by the storage location to the corresponding
       // THdfsFileBlocks.
       for (int locIdx = 0; locIdx < storageLocs.length; ++locIdx) {
-        VolumeId[] volumeIds = storageLocs[locIdx].getVolumeIds();
+        //VolumeId[] volumeIds = storageLocs[locIdx].getVolumeIds();
+        VolumeId[] volumeIds = new VolumeId[0];
         THdfsFileBlock block = blockLists.blocks.get(locIdx);
         // Convert opaque VolumeId to 0 based ids.
         // TODO: the diskId should be eventually retrievable from Hdfs when the

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/cc603eb1/fe/src/main/java/org/apache/impala/service/JniFrontend.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/service/JniFrontend.java 
b/fe/src/main/java/org/apache/impala/service/JniFrontend.java
index 07d6ec6..dbabaf5 100644
--- a/fe/src/main/java/org/apache/impala/service/JniFrontend.java
+++ b/fe/src/main/java/org/apache/impala/service/JniFrontend.java
@@ -715,13 +715,14 @@ public class JniFrontend {
     }
 
     // dfs.client.file-block-storage-locations.timeout.millis should be >= 10 
seconds
-    int dfsClientFileBlockStorageLocationsTimeoutMs = conf.getInt(
-        DFSConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS,
-        
DFSConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS_DEFAULT);
+    int dfsClientFileBlockStorageLocationsTimeoutMs = 0;
+        //conf.getInt(
+        // DFSConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS,
+        // 
DFSConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS_DEFAULT);
     if (dfsClientFileBlockStorageLocationsTimeoutMs <
         MIN_DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS) {
       errorCause.append(prefix);
-      
errorCause.append(DFSConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS);
+      
//errorCause.append(DFSConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS);
       errorCause.append(" is too low. It should be at least 10 seconds.\n");
     }
 

Reply via email to