Repository: incubator-impala Updated Branches: refs/heads/hadoop-next 32dfcebfa -> 32705b748
Partially "IMPALA-4277: temporary hack to avoid compile errors for HDFS block location API" Revert the code changes, keep the Hadoop version. This commit is not expected to compile. Change-Id: I183017f37ffd5af6c9a5b79a682f415e4c2a1d89 Reviewed-on: http://gerrit.cloudera.org:8080/5527 Reviewed-by: Tim Armstrong <[email protected]> Tested-by: Tim Armstrong <[email protected]> Project: http://git-wip-us.apache.org/repos/asf/incubator-impala/repo Commit: http://git-wip-us.apache.org/repos/asf/incubator-impala/commit/32705b74 Tree: http://git-wip-us.apache.org/repos/asf/incubator-impala/tree/32705b74 Diff: http://git-wip-us.apache.org/repos/asf/incubator-impala/diff/32705b74 Branch: refs/heads/hadoop-next Commit: 32705b74822e9e598df69f982426129121acfb7e Parents: 32dfceb Author: Tim Armstrong <[email protected]> Authored: Thu Dec 15 13:41:38 2016 -0800 Committer: Tim Armstrong <[email protected]> Committed: Thu Dec 15 22:49:47 2016 +0000 ---------------------------------------------------------------------- .../org/apache/impala/catalog/HdfsTable.java | 30 +++++++------------- .../org/apache/impala/service/JniFrontend.java | 9 +++--- 2 files changed, 14 insertions(+), 25 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/32705b74/fe/src/main/java/org/apache/impala/catalog/HdfsTable.java ---------------------------------------------------------------------- diff --git a/fe/src/main/java/org/apache/impala/catalog/HdfsTable.java b/fe/src/main/java/org/apache/impala/catalog/HdfsTable.java index 6530502..7268cf1 100644 --- a/fe/src/main/java/org/apache/impala/catalog/HdfsTable.java +++ b/fe/src/main/java/org/apache/impala/catalog/HdfsTable.java @@ -33,9 +33,11 @@ import java.util.TreeMap; import org.apache.avro.Schema; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BlockLocation; +import org.apache.hadoop.fs.BlockStorageLocation; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.VolumeId; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DistributedFileSystem; @@ -105,17 +107,6 @@ import com.google.common.collect.Sets; * */ public class HdfsTable extends Table { - - /** Dummy class to get it to compile */ - public class BlockStorageLocation { - - } - - /** Dummy class to get it to compile */ - public class VolumeId { - - } - // hive's default value for table property 'serialization.null.format' private static final String DEFAULT_NULL_COLUMN_VALUE = "\\N"; @@ -444,14 +435,14 @@ public class HdfsTable extends Table { FileBlocksInfo blockLists = perFsFileBlocks.get(fsKey); Preconditions.checkNotNull(blockLists); BlockStorageLocation[] storageLocs = null; - //try { + try { // Get the BlockStorageLocations for all the blocks - // storageLocs = dfs.getFileBlockStorageLocations(blockLists.locations); - //} catch (IOException e) { - // LOG.error("Couldn't determine block storage locations for filesystem " + - // fs + ":\n" + e.getMessage()); - // continue; - //} + storageLocs = dfs.getFileBlockStorageLocations(blockLists.locations); + } catch (IOException e) { + LOG.error("Couldn't determine block storage locations for filesystem " + + fs + ":\n" + e.getMessage()); + continue; + } if (storageLocs == null || storageLocs.length == 0) { LOG.warn("Attempted to get block locations for filesystem " + fs + " but the call returned no results"); @@ -468,8 +459,7 @@ public class HdfsTable extends Table { // Attach volume IDs given by the storage location to the corresponding // THdfsFileBlocks. for (int locIdx = 0; locIdx < storageLocs.length; ++locIdx) { - //VolumeId[] volumeIds = storageLocs[locIdx].getVolumeIds(); - VolumeId[] volumeIds = new VolumeId[0]; + VolumeId[] volumeIds = storageLocs[locIdx].getVolumeIds(); THdfsFileBlock block = blockLists.blocks.get(locIdx); // Convert opaque VolumeId to 0 based ids. // TODO: the diskId should be eventually retrievable from Hdfs when the http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/32705b74/fe/src/main/java/org/apache/impala/service/JniFrontend.java ---------------------------------------------------------------------- diff --git a/fe/src/main/java/org/apache/impala/service/JniFrontend.java b/fe/src/main/java/org/apache/impala/service/JniFrontend.java index 4e3c09a..5e41af4 100644 --- a/fe/src/main/java/org/apache/impala/service/JniFrontend.java +++ b/fe/src/main/java/org/apache/impala/service/JniFrontend.java @@ -720,14 +720,13 @@ public class JniFrontend { } // dfs.client.file-block-storage-locations.timeout.millis should be >= 10 seconds - int dfsClientFileBlockStorageLocationsTimeoutMs = 0; - //conf.getInt( - // DFSConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS, - // DFSConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS_DEFAULT); + int dfsClientFileBlockStorageLocationsTimeoutMs = conf.getInt( + DFSConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS, + DFSConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS_DEFAULT); if (dfsClientFileBlockStorageLocationsTimeoutMs < MIN_DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS) { errorCause.append(prefix); - //errorCause.append(DFSConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS); + errorCause.append(DFSConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS); errorCause.append(" is too low. It should be at least 10 seconds.\n"); }
