IMPALA-4160: Remove some leftover Llama references Change-Id: I62e12363ab3ecca42bf7a82be3c2df01bc47cdca Reviewed-on: http://gerrit.cloudera.org:8080/4493 Reviewed-by: Matthew Jacobs <[email protected]> Reviewed-by: Henry Robinson <[email protected]> Tested-by: Internal Jenkins
Project: http://git-wip-us.apache.org/repos/asf/incubator-impala/repo Commit: http://git-wip-us.apache.org/repos/asf/incubator-impala/commit/1b9d9ea7 Tree: http://git-wip-us.apache.org/repos/asf/incubator-impala/tree/1b9d9ea7 Diff: http://git-wip-us.apache.org/repos/asf/incubator-impala/diff/1b9d9ea7 Branch: refs/heads/master Commit: 1b9d9ea7c151c35b034b5e2097017794849a1b7b Parents: 57fcbf7 Author: Henry Robinson <[email protected]> Authored: Tue Sep 20 23:13:21 2016 -0700 Committer: Internal Jenkins <[email protected]> Committed: Thu Sep 22 02:10:32 2016 +0000 ---------------------------------------------------------------------- testdata/cluster/admin | 27 ++------------------------- 1 file changed, 2 insertions(+), 25 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/1b9d9ea7/testdata/cluster/admin ---------------------------------------------------------------------- diff --git a/testdata/cluster/admin b/testdata/cluster/admin index d263999..98969ca 100755 --- a/testdata/cluster/admin +++ b/testdata/cluster/admin @@ -18,7 +18,7 @@ # specific language governing permissions and limitations # under the License. -# This will create/control/destroy a local hdfs+yarn+llama cluster. +# This will create/control/destroy a local hdfs+yarn cluster. # # The original idea was to run each node on a different loopback address but # https://jira.cloudera.com/browse/CDH-16602 makes that impossible for now. So all roles @@ -79,7 +79,6 @@ KUDU_TS_RPC_FREE_PORT_START=31200 # existing cluster. export HDFS_WEBUI_PORT=5070 # changed from 50070 so it is not ephemeral export YARN_WEBUI_PORT=8088 # same as default -export LLAMA_WEBUI_PORT=1501 # same as default export KMS_WEBUI_PORT=16000 # same as default export KUDU_WEBUI_PORT=8051 # same as default @@ -205,24 +204,6 @@ function create_cluster { ${MINIKDC_INIT} stop fi - # Llama needs a mapping of DataNodes to NodeManagers and for that we'll need to know - # the hostname hadoop has chosen. It should be the first entry in /etc/hosts for - # 127.0.0.1. - # - # It is possible this hostname must also mactch the impala hostname, which will - # never be "localhost". - if $IS_OSX; then - HADOOP_HOSTNAME=$(dscacheutil -q host -a ip_address 127.0.0.1 | head -n 1 \ - | awk '{print $2}') - else - if getent hosts 127.0.0.1 1>/dev/null; then - HADOOP_HOSTNAME=$(getent hosts 127.0.0.1 | awk '{print $2}') - else - # This may be an error case... - HADOOP_HOSTNAME=127.0.0.1 - fi - fi - # For consistency, the first node will host all the master roles. for ((NODE_IDX=$NODE_COUNT; NODE_IDX >= 1; NODE_IDX--)); do NODE=${NODE_PREFIX}$NODE_IDX @@ -259,11 +240,7 @@ function create_cluster { echo "$NODE will use ports DATANODE_PORT=$DATANODE_PORT," \ "NODEMANAGER_PORT=$NODEMANAGER_PORT, and KUDU_TS_RPC_PORT=$KUDU_TS_RPC_PORT" - # Escape the first : to workaround https://jira.cloudera.com/browse/CDH-16840 - LLAMA_PORT_MAPPINGS+="$HADOOP_HOSTNAME\\:$DATANODE_PORT=" - LLAMA_PORT_MAPPINGS+="$HADOOP_HOSTNAME:$NODEMANAGER_PORT -" - export NODE NODE_DIR DATANODE_PORT NODEMANAGER_PORT LLAMA_PORT_MAPPINGS + export NODE NODE_DIR DATANODE_PORT NODEMANAGER_PORT export KUDU_TS_RPC_PORT for TEMPLATE_PATH in $(find "$NODE_DIR" -name "*$TEMPLATE_SUFFIX"); do ACTUAL_PATH="${TEMPLATE_PATH%$TEMPLATE_SUFFIX}"
