This is an automated email from the ASF dual-hosted git repository.
wuzhiguo pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/bigtop-manager.git
The following commit(s) were added to refs/heads/main by this push:
new 47b9aeb6 BIGTOP-4132: Add Hive component on Bigtop-3.3.0 stack (#132)
47b9aeb6 is described below
commit 47b9aeb65f3e52e1192ab5a9fd93ff00d413cc3e
Author: Zhiguo Wu <[email protected]>
AuthorDate: Sat Dec 21 21:29:10 2024 +0800
BIGTOP-4132: Add Hive component on Bigtop-3.3.0 stack (#132)
---
.../services/hadoop/configuration/hdfs-site.xml | 4 +
.../stacks/bigtop/3.3.0/services/hadoop/order.json | 7 +-
.../services/hive/configuration/beeline-log4j2.xml | 80 ++
.../3.3.0/services/hive/configuration/hive-env.xml | 112 ++
.../hive/configuration/hive-exec-log4j2.xml | 101 ++
.../services/hive/configuration/hive-log4j2.xml | 118 ++
.../services/hive/configuration/hive-site.xml | 1253 ++++++++++++++++++++
.../services/hive/configuration/hive.conf.xml | 64 +
.../hive/configuration/llap-cli-log4j2.xml | 126 ++
.../hive/configuration/llap-daemon-log4j2.xml | 192 +++
.../3.3.0/services/{tez => hive}/metainfo.xml | 39 +-
.../stacks/bigtop/3.3.0/services/hive/order.json | 22 +
.../stacks/bigtop/3.3.0/services/tez/metainfo.xml | 2 +-
.../stacks/infra/1.0.0/services/mysql/order.json | 6 +-
.../stack/bigtop/v3_3_0/hive/HiveClientScript.java | 52 +
.../bigtop/v3_3_0/hive/HiveMetastoreScript.java | 142 +++
.../stack/bigtop/v3_3_0/hive/HiveParams.java | 162 +++
.../bigtop/v3_3_0/hive/HiveServer2Script.java | 105 ++
.../stack/bigtop/v3_3_0/hive/HiveSetup.java | 126 ++
.../stack/core/tarball/TarballDownloader.java | 2 +-
.../stack/core/utils/linux/LinuxFileUtils.java | 23 +
21 files changed, 2724 insertions(+), 14 deletions(-)
diff --git
a/bigtop-manager-server/src/main/resources/stacks/bigtop/3.3.0/services/hadoop/configuration/hdfs-site.xml
b/bigtop-manager-server/src/main/resources/stacks/bigtop/3.3.0/services/hadoop/configuration/hdfs-site.xml
index 85fd05d4..13f2c2d0 100644
---
a/bigtop-manager-server/src/main/resources/stacks/bigtop/3.3.0/services/hadoop/configuration/hdfs-site.xml
+++
b/bigtop-manager-server/src/main/resources/stacks/bigtop/3.3.0/services/hadoop/configuration/hdfs-site.xml
@@ -92,6 +92,10 @@
regardless of whether 'dfs.namenode.checkpoint.period' has expired.
</description>
</property>
+ <property>
+ <name>hadoop.proxyuser.hive.hosts</name>
+ <value>*</value>
+ </property>
<property>
<name>dfs.replication.max</name>
<value>512</value>
diff --git
a/bigtop-manager-server/src/main/resources/stacks/bigtop/3.3.0/services/hadoop/order.json
b/bigtop-manager-server/src/main/resources/stacks/bigtop/3.3.0/services/hadoop/order.json
index 88d0990f..4d7b8d3c 100644
---
a/bigtop-manager-server/src/main/resources/stacks/bigtop/3.3.0/services/hadoop/order.json
+++
b/bigtop-manager-server/src/main/resources/stacks/bigtop/3.3.0/services/hadoop/order.json
@@ -11,7 +11,8 @@
"NAMENODE-STOP": [
"DATANODE-STOP",
"SECONDARYNAMENODE-STOP",
- "HBASE_MASTER-STOP"
+ "HBASE_MASTER-STOP",
+ "HIVE_METASTORE-STOP"
],
"NAMENODE-START": [
"ZKFC-START",
@@ -43,6 +44,10 @@
"NODEMANAGER-RESTART": [
"NAMENODE-RESTART"
],
+ "NODEMANAGER-STOP": [
+ "HIVE_METASTORE-STOP",
+ "HIVESERVER2-STOP"
+ ],
"HISTORY_SERVER-START": ["NAMENODE-START", "DATANODE-START"],
"HISTORY_SERVER-RESTART": ["NAMENODE-RESTART"]
}
\ No newline at end of file
diff --git
a/bigtop-manager-server/src/main/resources/stacks/bigtop/3.3.0/services/hive/configuration/beeline-log4j2.xml
b/bigtop-manager-server/src/main/resources/stacks/bigtop/3.3.0/services/hive/configuration/beeline-log4j2.xml
new file mode 100644
index 00000000..11140c1d
--- /dev/null
+++
b/bigtop-manager-server/src/main/resources/stacks/bigtop/3.3.0/services/hive/configuration/beeline-log4j2.xml
@@ -0,0 +1,80 @@
+<?xml version="1.0"?>
+<!--
+ ~ Licensed to the Apache Software Foundation (ASF) under one
+ ~ or more contributor license agreements. See the NOTICE file
+ ~ distributed with this work for additional information
+ ~ regarding copyright ownership. The ASF licenses this file
+ ~ to you under the Apache License, Version 2.0 (the
+ ~ "License"); you may not use this file except in compliance
+ ~ with the License. You may obtain a copy of the License at
+ ~
+ ~ http://www.apache.org/licenses/LICENSE-2.0
+ ~
+ ~ Unless required by applicable law or agreed to in writing,
+ ~ software distributed under the License is distributed on an
+ ~ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ ~ KIND, either express or implied. See the License for the
+ ~ specific language governing permissions and limitations
+ ~ under the License.
+-->
+
+<configuration>
+ <property>
+ <name>content</name>
+ <display-name>beeline-log4j2 template</display-name>
+ <description>Custom beeline-log4j2.properties</description>
+ <value><![CDATA[
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+status = INFO
+name = BeelineLog4j2
+packages = org.apache.hadoop.hive.ql.log
+
+<#noparse>
+# list of properties
+property.hive.log.level = WARN
+property.hive.root.logger = console
+
+# list of all appenders
+appenders = console
+
+# console appender
+appender.console.type = Console
+appender.console.name = console
+appender.console.target = SYSTEM_ERR
+appender.console.layout.type = PatternLayout
+appender.console.layout.pattern = %d{yy/MM/dd HH:mm:ss} [%t]: %p %c{2}: %m%n
+
+# list of all loggers
+loggers = HiveConnection
+
+# HiveConnection logs useful info for dynamic service discovery
+logger.HiveConnection.name = org.apache.hive.jdbc.HiveConnection
+logger.HiveConnection.level = INFO
+
+# root logger
+rootLogger.level = ${sys:hive.log.level}
+rootLogger.appenderRefs = root
+rootLogger.appenderRef.root.ref = ${sys:hive.root.logger}
+</#noparse>
+]]>
+ </value>
+ <attrs>
+ <type>longtext</type>
+ </attrs>
+ </property>
+</configuration>
diff --git
a/bigtop-manager-server/src/main/resources/stacks/bigtop/3.3.0/services/hive/configuration/hive-env.xml
b/bigtop-manager-server/src/main/resources/stacks/bigtop/3.3.0/services/hive/configuration/hive-env.xml
new file mode 100644
index 00000000..73a63891
--- /dev/null
+++
b/bigtop-manager-server/src/main/resources/stacks/bigtop/3.3.0/services/hive/configuration/hive-env.xml
@@ -0,0 +1,112 @@
+<?xml version="1.0"?>
+<!--
+ ~ Licensed to the Apache Software Foundation (ASF) under one
+ ~ or more contributor license agreements. See the NOTICE file
+ ~ distributed with this work for additional information
+ ~ regarding copyright ownership. The ASF licenses this file
+ ~ to you under the Apache License, Version 2.0 (the
+ ~ "License"); you may not use this file except in compliance
+ ~ with the License. You may obtain a copy of the License at
+ ~
+ ~ http://www.apache.org/licenses/LICENSE-2.0
+ ~
+ ~ Unless required by applicable law or agreed to in writing,
+ ~ software distributed under the License is distributed on an
+ ~ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ ~ KIND, either express or implied. See the License for the
+ ~ specific language governing permissions and limitations
+ ~ under the License.
+-->
+
+<configuration>
+ <property>
+ <name>hive_log_dir</name>
+ <value>/var/log/hive</value>
+ <display-name>Hive Log Dir</display-name>
+ <description>Directory for Hive Log files.</description>
+ </property>
+ <property>
+ <name>hive_pid_dir</name>
+ <value>/var/run/hive</value>
+ <display-name>Hive PID Dir</display-name>
+ <description>Hive PID Dir.</description>
+ </property>
+ <property>
+ <name>hive_heapsize</name>
+ <value>512</value>
+ <description>Hive Java heap size</description>
+ <display-name>HiveServer2 Heap Size</display-name>
+ </property>
+ <property>
+ <name>hive_metastore_heapsize</name>
+ <value>1024</value>
+ <description>Hive Metastore Java heap size</description>
+ <display-name>Metastore Heap Size</display-name>
+ </property>
+ <property>
+ <name>heap_dump_path</name>
+ <value>/tmp</value>
+ <description>Path for heap dump file</description>
+ <display-name>Heap dump path</display-name>
+ </property>
+
+ <!-- hive-env.sh -->
+ <property>
+ <name>content</name>
+ <display-name>hive-env template</display-name>
+ <description>This is the freemarker template for hive-env.sh
file</description>
+ <value><![CDATA[
+# The heap size of the jvm, and jvm args stared by hive shell script can be
controlled via:
+if [ "$SERVICE" = "metastore" ]; then
+
+export HADOOP_HEAPSIZE=${hive_metastore_heapsize} # Setting for HiveMetastore
+export HADOOP_OPTS="$HADOOP_OPTS
-Xloggc:${hive_log_dir}/hivemetastore-gc-%t.log
+-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCCause
-XX:+UseGCLogFileRotation
+-XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=10M -XX:+HeapDumpOnOutOfMemoryError
+-XX:HeapDumpPath=${hive_log_dir}/hms_heapdump.hprof
-Dhive.log.dir=${hive_log_dir}
+-Dhive.log.file=hivemetastore.log"
+
+fi
+
+if [ "$SERVICE" = "hiveserver2" ]; then
+
+export HADOOP_HEAPSIZE=${hive_heapsize} # Setting for HiveServer2 and Client
+export HADOOP_OPTS="$HADOOP_OPTS -Xloggc:${hive_log_dir}/hiveserver2-gc-%t.log
+-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCCause
-XX:+UseGCLogFileRotation
+-XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=10M -XX:+HeapDumpOnOutOfMemoryError
+-XX:HeapDumpPath=${hive_log_dir}/hs2_heapdump.hprof
-Dhive.log.dir=${hive_log_dir}
+-Dhive.log.file=hiveserver2.log"
+
+fi
+
+<#noparse>
+export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS -Xms${HADOOP_HEAPSIZE}m
-Xmx${HADOOP_HEAPSIZE}m"
+</#noparse>
+export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS -XX:+HeapDumpOnOutOfMemoryError
-XX:HeapDumpPath=${heap_dump_path}"
+
+# Larger heap size may be required when running queries over large number of
files or partitions.
+# By default hive shell scripts use a heap size of 256 (MB). Larger heap size
would also be
+# appropriate for hive server (hwi etc).
+
+
+# Set HADOOP_HOME to point to a specific hadoop install directory
+HADOOP_HOME=${hadoop_home}
+
+export HIVE_HOME=${hive_home}
+
+# Hive Configuration Directory can be controlled by:
+export HIVE_CONF_DIR=${hive_conf_dir}
+
+# Folder containing extra libraries required for hive compilation/execution
can be controlled by:
+if [ "$HIVE_AUX_JARS_PATH" != "" ]; then
+export HIVE_AUX_JARS_PATH=$HIVE_AUX_JARS_PATH
+fi
+
+export METASTORE_PORT=${hive_metastore_port}
+]]>
+ </value>
+ <attrs>
+ <type>longtext</type>
+ </attrs>
+ </property>
+</configuration>
diff --git
a/bigtop-manager-server/src/main/resources/stacks/bigtop/3.3.0/services/hive/configuration/hive-exec-log4j2.xml
b/bigtop-manager-server/src/main/resources/stacks/bigtop/3.3.0/services/hive/configuration/hive-exec-log4j2.xml
new file mode 100644
index 00000000..1568c59a
--- /dev/null
+++
b/bigtop-manager-server/src/main/resources/stacks/bigtop/3.3.0/services/hive/configuration/hive-exec-log4j2.xml
@@ -0,0 +1,101 @@
+<?xml version="1.0"?>
+<!--
+ ~ Licensed to the Apache Software Foundation (ASF) under one
+ ~ or more contributor license agreements. See the NOTICE file
+ ~ distributed with this work for additional information
+ ~ regarding copyright ownership. The ASF licenses this file
+ ~ to you under the Apache License, Version 2.0 (the
+ ~ "License"); you may not use this file except in compliance
+ ~ with the License. You may obtain a copy of the License at
+ ~
+ ~ http://www.apache.org/licenses/LICENSE-2.0
+ ~
+ ~ Unless required by applicable law or agreed to in writing,
+ ~ software distributed under the License is distributed on an
+ ~ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ ~ KIND, either express or implied. See the License for the
+ ~ specific language governing permissions and limitations
+ ~ under the License.
+-->
+
+<configuration>
+ <property>
+ <name>content</name>
+ <display-name>hive-exec-log4j2 template</display-name>
+ <description>Custom hive-exec-log4j2.properties</description>
+ <value><![CDATA[
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+status = INFO
+name = HiveExecLog4j2
+packages = org.apache.hadoop.hive.ql.log
+
+<#noparse>
+# list of properties
+property.hive.log.level = INFO
+property.hive.root.logger = FA
+property.hive.query.id = hadoop
+property.hive.log.dir = ${sys:java.io.tmpdir}/${sys:user.name}
+property.hive.log.file = ${sys:hive.query.id}.log
+
+# list of all appenders
+appenders = console, FA
+
+# console appender
+appender.console.type = Console
+appender.console.name = console
+appender.console.target = SYSTEM_ERR
+appender.console.layout.type = PatternLayout
+appender.console.layout.pattern = %d{ISO8601} %5p [%t] %c{2}: %m%n
+
+# simple file appender
+appender.FA.type = RandomAccessFile
+appender.FA.name = FA
+appender.FA.fileName = ${sys:hive.log.dir}/${sys:hive.log.file}
+appender.FA.layout.type = PatternLayout
+appender.FA.layout.pattern = %d{ISO8601} %5p [%t] %c{2}: %m%n
+
+# list of all loggers
+loggers = NIOServerCnxn, ClientCnxnSocketNIO, DataNucleus, Datastore, JPOX
+
+logger.NIOServerCnxn.name = org.apache.zookeeper.server.NIOServerCnxn
+logger.NIOServerCnxn.level = WARN
+
+logger.ClientCnxnSocketNIO.name = org.apache.zookeeper.ClientCnxnSocketNIO
+logger.ClientCnxnSocketNIO.level = WARN
+
+logger.DataNucleus.name = DataNucleus
+logger.DataNucleus.level = ERROR
+
+logger.Datastore.name = Datastore
+logger.Datastore.level = ERROR
+
+logger.JPOX.name = JPOX
+logger.JPOX.level = ERROR
+
+# root logger
+rootLogger.level = ${sys:hive.log.level}
+rootLogger.appenderRefs = root
+rootLogger.appenderRef.root.ref = ${sys:hive.root.logger}
+</#noparse>
+]]>
+ </value>
+ <attrs>
+ <type>longtext</type>
+ </attrs>
+ </property>
+</configuration>
diff --git
a/bigtop-manager-server/src/main/resources/stacks/bigtop/3.3.0/services/hive/configuration/hive-log4j2.xml
b/bigtop-manager-server/src/main/resources/stacks/bigtop/3.3.0/services/hive/configuration/hive-log4j2.xml
new file mode 100644
index 00000000..f8796d0c
--- /dev/null
+++
b/bigtop-manager-server/src/main/resources/stacks/bigtop/3.3.0/services/hive/configuration/hive-log4j2.xml
@@ -0,0 +1,118 @@
+<?xml version="1.0"?>
+<!--
+ ~ Licensed to the Apache Software Foundation (ASF) under one
+ ~ or more contributor license agreements. See the NOTICE file
+ ~ distributed with this work for additional information
+ ~ regarding copyright ownership. The ASF licenses this file
+ ~ to you under the Apache License, Version 2.0 (the
+ ~ "License"); you may not use this file except in compliance
+ ~ with the License. You may obtain a copy of the License at
+ ~
+ ~ http://www.apache.org/licenses/LICENSE-2.0
+ ~
+ ~ Unless required by applicable law or agreed to in writing,
+ ~ software distributed under the License is distributed on an
+ ~ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ ~ KIND, either express or implied. See the License for the
+ ~ specific language governing permissions and limitations
+ ~ under the License.
+-->
+
+<configuration>
+ <property>
+ <name>content</name>
+ <display-name>hive-log4j2 template</display-name>
+ <description>Custom hive-log4j2.properties</description>
+ <value><![CDATA[
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+status = INFO
+name = HiveLog4j2
+packages = org.apache.hadoop.hive.ql.log
+
+<#noparse>
+# list of properties
+property.hive.log.level = INFO
+property.hive.root.logger = DRFA
+property.hive.log.dir = ${sys:java.io.tmpdir}/${sys:user.name}
+property.hive.log.file = hive.log
+property.hive.perflogger.log.level = INFO
+
+# list of all appenders
+appenders = console, DRFA
+
+# console appender
+appender.console.type = Console
+appender.console.name = console
+appender.console.target = SYSTEM_ERR
+appender.console.layout.type = PatternLayout
+appender.console.layout.pattern = %d{ISO8601} %5p [%t] %c{2}: %m%n
+
+# daily rolling file appender
+appender.DRFA.type = RollingRandomAccessFile
+appender.DRFA.name = DRFA
+appender.DRFA.fileName = ${sys:hive.log.dir}/${sys:hive.log.file}
+# Use %pid in the filePattern to append <process-id>@<host-name> to the
filename if you want separate log files for different CLI session
+appender.DRFA.filePattern =
${sys:hive.log.dir}/${sys:hive.log.file}.%d{yyyy-MM-dd}
+appender.DRFA.layout.type = PatternLayout
+appender.DRFA.layout.pattern = %d{ISO8601} %5p [%t] %c{2}: %m%n
+appender.DRFA.policies.type = Policies
+appender.DRFA.policies.time.type = TimeBasedTriggeringPolicy
+appender.DRFA.policies.time.interval = 1
+appender.DRFA.policies.time.modulate = true
+appender.DRFA.strategy.type = DefaultRolloverStrategy
+appender.DRFA.strategy.max = 30
+
+# list of all loggers
+loggers = NIOServerCnxn, ClientCnxnSocketNIO, DataNucleus, Datastore, JPOX,
PerfLogger, AmazonAws, ApacheHttp
+
+logger.NIOServerCnxn.name = org.apache.zookeeper.server.NIOServerCnxn
+logger.NIOServerCnxn.level = WARN
+
+logger.ClientCnxnSocketNIO.name = org.apache.zookeeper.ClientCnxnSocketNIO
+logger.ClientCnxnSocketNIO.level = WARN
+
+logger.DataNucleus.name = DataNucleus
+logger.DataNucleus.level = ERROR
+
+logger.Datastore.name = Datastore
+logger.Datastore.level = ERROR
+
+logger.JPOX.name = JPOX
+logger.JPOX.level = ERROR
+
+logger.AmazonAws.name=com.amazonaws
+logger.AmazonAws.level = INFO
+
+logger.ApacheHttp.name=org.apache.http
+logger.ApacheHttp.level = INFO
+
+logger.PerfLogger.name = org.apache.hadoop.hive.ql.log.PerfLogger
+logger.PerfLogger.level = ${sys:hive.perflogger.log.level}
+
+# root logger
+rootLogger.level = ${sys:hive.log.level}
+rootLogger.appenderRefs = root
+rootLogger.appenderRef.root.ref = ${sys:hive.root.logger}
+</#noparse>
+]]>
+ </value>
+ <attrs>
+ <type>longtext</type>
+ </attrs>
+ </property>
+</configuration>
diff --git
a/bigtop-manager-server/src/main/resources/stacks/bigtop/3.3.0/services/hive/configuration/hive-site.xml
b/bigtop-manager-server/src/main/resources/stacks/bigtop/3.3.0/services/hive/configuration/hive-site.xml
new file mode 100644
index 00000000..32aa7b70
--- /dev/null
+++
b/bigtop-manager-server/src/main/resources/stacks/bigtop/3.3.0/services/hive/configuration/hive-site.xml
@@ -0,0 +1,1253 @@
+<?xml version="1.0"?>
+<!--
+ ~ Licensed to the Apache Software Foundation (ASF) under one
+ ~ or more contributor license agreements. See the NOTICE file
+ ~ distributed with this work for additional information
+ ~ regarding copyright ownership. The ASF licenses this file
+ ~ to you under the Apache License, Version 2.0 (the
+ ~ "License"); you may not use this file except in compliance
+ ~ with the License. You may obtain a copy of the License at
+ ~
+ ~ http://www.apache.org/licenses/LICENSE-2.0
+ ~
+ ~ Unless required by applicable law or agreed to in writing,
+ ~ software distributed under the License is distributed on an
+ ~ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ ~ KIND, either express or implied. See the License for the
+ ~ specific language governing permissions and limitations
+ ~ under the License.
+-->
+
+<configuration>
+ <property>
+ <name>tez.session.am.dag.submit.timeout.secs</name>
+ <value>0</value>
+ <description>
+ Time (in seconds) for which the Tez AM should wait for a DAG to be
submitted before shutting down
+ </description>
+ </property>
+ <property>
+ <name>hive.server2.materializedviews.registry.impl</name>
+ <value>DUMMY</value>
+ <description>
+ Expects one of [default, dummy].
+ The implementation that we should use for the materialized views
registry.
+ DEFAULT: Default cache for materialized views
+ DUMMY: Do not cache materialized views and hence forward requests
to metastore
+ </description>
+ </property>
+ <property>
+ <name>hive.server2.max.start.attempts</name>
+ <description>
+ This number of times HiveServer2 will attempt to start before
exiting, sleeping 60 seconds between retries.
+ </description>
+ <value>5</value>
+ </property>
+ <property>
+ <name>hive.server2.transport.mode</name>
+ <value>binary</value>
+ <description>Expects one of [binary, http]. Transport mode of
HiveServer2.</description>
+ </property>
+ <property>
+ <name>hive.default.fileformat</name>
+ <value>TextFile</value>
+ <description>Default file format for CREATE TABLE
statement.</description>
+ <display-name>Default File Format</display-name>
+ </property>
+ <property>
+ <name>hive.metastore.sasl.enabled</name>
+ <value>false</value>
+ <description>
+ If true, the metastore thrift interface will be secured with SASL.
Clients must authenticate with Kerberos.
+ </description>
+ </property>
+ <property>
+ <name>hive.metastore.execute.setugi</name>
+ <value>true</value>
+ <description>
+ In unsecure mode, setting this property to true will cause the
metastore to execute DFS operations
+ using the client's reported user and group permissions. Note that
this property must be set on both the
+ client and server sides. Further note that its best effort. If
client sets its to true and server sets it to
+ false, client setting will be ignored.
+ </description>
+ </property>
+ <property>
+ <name>hive.optimize.bucketmapjoin.sortedmerge</name>
+ <value>false</value>
+ <description>
+ If the tables being joined are sorted and bucketized on the join
columns, and they have the same number
+ of buckets, a sort-merge join can be performed by setting this
parameter as true.
+ </description>
+ </property>
+ <property>
+ <name>hive.tez.container.size</name>
+ <value>682</value>
+ <description>
+ By default, Tez uses the java options from map tasks. Use this
property to override that value.
+ </description>
+ <display-name>Tez Container Size</display-name>
+ </property>
+ <property>
+ <name>hive.tez.input.format</name>
+ <value>org.apache.hadoop.hive.ql.io.HiveInputFormat</value>
+ <description>The default input format for Tez. Tez groups splits in
the Application Master.</description>
+ </property>
+ <property>
+ <name>hive.tez.java.opts</name>
+ <value>-server -Djava.net.preferIPv4Stack=true -XX:NewRatio=8
-XX:+UseNUMA -XX:+UseG1GC -XX:+ResizeTLAB -XX:+PrintGCDetails -verbose:gc
-XX:+PrintGCTimeStamps</value>
+ <description>Java command line options for Tez.</description>
+ </property>
+ <property>
+ <name>hive.txn.timeout</name>
+ <value>300</value>
+ <description>
+ Time after which transactions are declared aborted if the client
has not sent a heartbeat, in seconds.
+ </description>
+ </property>
+ <property>
+ <name>hive.compactor.initiator.on</name>
+ <value>true</value>
+ <description>
+ Whether to run the compactor's initiator thread in this metastore
instance or not. If there is more
+ than one instance of the thrift metastore this should be set to
true on only one instance. Setting true on
+ only one host can be achieved by creating a config-group
containing the metastore host, and overriding the
+ default value to true in it.
+ </description>
+ <display-name>Run Compactor</display-name>
+ </property>
+ <property>
+ <name>hive.compactor.worker.threads</name>
+ <value>5</value>
+ <description>
+ Number of compactor worker threads to run on this metastore
instance. Can be different values on different metastore instances.
+ </description>
+ <display-name>Number of threads used by Compactor</display-name>
+ </property>
+ <property>
+ <name>hive.create.as.insert.only</name>
+ <value>false</value>
+ <description>
+ Whether the eligible tables should be created as ACID insert-only
by default.
+ Does not apply to external tables, the ones using storage
handlers, etc.
+ </description>
+ <display-name>Create Tables as ACID Insert Only</display-name>
+ </property>
+ <property>
+ <name>metastore.create.as.acid</name>
+ <value>false</value>
+ <description>
+ Whether the eligible tables should be created as full ACID by
default.
+ Does not apply to external tables, the ones using storage
handlers, etc.
+ </description>
+ <display-name>Create Tables as Full ACID</display-name>
+ </property>
+ <property>
+ <name>hive.compactor.delta.num.threshold</name>
+ <value>10</value>
+ <description>
+ Number of delta files that must exist in a directory before the
compactor will attempt a minor compaction.
+ </description>
+ </property>
+ <property>
+ <name>hive.compactor.abortedtxn.threshold</name>
+ <value>1000</value>
+ <description>
+ Number of aborted transactions involving a particular table or
partition before major compaction is initiated.
+ </description>
+ </property>
+ <property>
+ <name>datanucleus.cache.level2.type</name>
+ <value>none</value>
+ <description>
+ Determines caching mechanism DataNucleus L2 cache will use. It is
strongly recommended to use
+ default value of 'none' as other values may cause consistency
errors in Hive.
+ </description>
+ </property>
+ <property>
+ <name>hive.metastore.connect.retries</name>
+ <value>24</value>
+ <description>Number of retries while opening a connection to
metastore</description>
+ </property>
+ <property>
+ <name>hive.metastore.failure.retries</name>
+ <value>24</value>
+ <description>Number of retries upon failure of Thrift metastore
calls</description>
+ </property>
+ <property>
+ <name>hive.metastore.client.connect.retry.delay</name>
+ <value>5s</value>
+ <description>
+ Expects a time value with unit (d/day, h/hour, m/min, s/sec,
ms/msec, us/usec, ns/nsec), which is sec if not specified.
+ Number of seconds for the client to wait between consecutive
connection attempts
+ </description>
+ </property>
+ <property>
+ <name>hive.metastore.client.socket.timeout</name>
+ <value>1800s</value>
+ <description>
+ Expects a time value with unit (d/day, h/hour, m/min, s/sec,
ms/msec, us/usec, ns/nsec), which is sec if not specified.
+ MetaStore Client socket timeout in seconds
+ </description>
+ </property>
+ <property>
+ <name>hive.mapjoin.bucket.cache.size</name>
+ <value>10000</value>
+ <description/>
+ </property>
+ <property>
+ <name>hive.cluster.delegation.token.store.class</name>
+ <value>org.apache.hadoop.hive.thrift.ZooKeeperTokenStore</value>
+ <description>
+ The delegation token store implementation. Set to
org.apache.hadoop.hive.thrift.ZooKeeperTokenStore for load-balanced cluster.
+ </description>
+ </property>
+ <property>
+ <name>hive.server2.support.dynamic.service.discovery</name>
+ <value>true</value>
+ <description>
+ Whether HiveServer2 supports dynamic service discovery for its
clients.
+ To support this, each instance of HiveServer2 currently uses
ZooKeeper to register itself,
+ when it is brought up. JDBC/ODBC clients should use the ZooKeeper
ensemble: hive.zookeeper.quorum
+ in their connection string.
+ </description>
+ </property>
+ <property>
+ <name>hive.exec.scratchdir</name>
+ <display-name>Hive Exec Scratchdir</display-name>
+ <value>/tmp/hive</value>
+ <description>
+ HDFS root scratch dir for Hive jobs which gets created with write
all (733) permission. For each
+ connecting user, an HDFS scratch dir:
${hive.exec.scratchdir}/<username> is created, with
+ ${hive.scratch.dir.permission}.
+ </description>
+ </property>
+ <property>
+ <name>hive.exec.submitviachild</name>
+ <value>false</value>
+ <description/>
+ </property>
+ <property>
+ <name>hive.exec.submit.local.task.via.child</name>
+ <value>true</value>
+ <description>
+ Determines whether local tasks (typically mapjoin hashtable
generation phase) runs in
+ separate JVM (true recommended) or not.
+ Avoids the overhead of spawning new JVM, but can lead to
out-of-memory issues.
+ </description>
+ </property>
+ <property>
+ <name>hive.exec.compress.output</name>
+ <value>false</value>
+ <description>
+ This controls whether the final outputs of a query (to a
local/HDFS file or a Hive table) is compressed.
+ The compression codec and other options are determined from Hadoop
config variables mapred.output.compress*
+ </description>
+ </property>
+ <property>
+ <name>hive.exec.compress.intermediate</name>
+ <value>false</value>
+ <description>
+ This controls whether intermediate files produced by Hive between
multiple map-reduce jobs are compressed.
+ The compression codec and other options are determined from Hadoop
config variables mapred.output.compress*
+ </description>
+ </property>
+ <property>
+ <name>hive.exec.reducers.bytes.per.reducer</name>
+ <value>67108864</value>
+ <description>
+ Defines the size per reducer. For example, if it is set to 64M,
given 256M input size, 4 reducers will be used.
+ </description>
+ <display-name>Data per Reducer</display-name>
+ </property>
+ <property>
+ <name>hive.exec.reducers.max</name>
+ <value>1009</value>
+ <description>
+ max number of reducers will be used. If the one specified in the
configuration parameter mapred.reduce.tasks is
+ negative, Hive will use this one as the max number of reducers
when automatically determine number of reducers.
+ </description>
+ </property>
+ <property>
+ <name>hive.exec.pre.hooks</name>
+ <value />
+ <description>
+ Comma-separated list of pre-execution hooks to be invoked for each
statement.
+ A pre-execution hook is specified as the name of a Java class
which implements the
+ org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface.
+ </description>
+ </property>
+ <property>
+ <name>hive.exec.post.hooks</name>
+ <value />
+ <description>
+ Comma-separated list of post-execution hooks to be invoked for
each statement.
+ A post-execution hook is specified as the name of a Java class
which implements the
+ org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface.
+ </description>
+ </property>
+ <property>
+ <name>hive.exec.failure.hooks</name>
+ <value />
+ <description>
+ Comma-separated list of on-failure hooks to be invoked for each
statement.
+ An on-failure hook is specified as the name of Java class which
implements the
+ org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface.
+ </description>
+ </property>
+ <property>
+ <name>hive.exec.parallel</name>
+ <value>false</value>
+ <description>Whether to execute jobs in parallel</description>
+ </property>
+ <property>
+ <name>hive.exec.parallel.thread.number</name>
+ <value>8</value>
+ <description>How many jobs at most can be executed in
parallel</description>
+ </property>
+ <property>
+ <name>hive.mapred.reduce.tasks.speculative.execution</name>
+ <value>false</value>
+ <description>Whether speculative execution for reducers should be
turned on.</description>
+ </property>
+ <property>
+ <name>hive.exec.dynamic.partition</name>
+ <value>true</value>
+ <description>Whether or not to allow dynamic partitions in
DML/DDL.</description>
+ </property>
+ <property>
+ <name>hive.exec.dynamic.partition.mode</name>
+ <value>nonstrict</value>
+ <description>
+ In strict mode, the user must specify at least one static partition
+ in case the user accidentally overwrites all partitions.
+ NonStrict allows all partitions of a table to be dynamic.
+ </description>
+ <display-name>Allow all partitions to be Dynamic</display-name>
+ </property>
+ <property>
+ <name>hive.exec.max.dynamic.partitions</name>
+ <value>5000</value>
+ <description>Maximum number of dynamic partitions allowed to be
created in total.</description>
+ </property>
+ <property>
+ <name>hive.exec.max.dynamic.partitions.pernode</name>
+ <value>2000</value>
+ <description>Maximum number of dynamic partitions allowed to be
created in each mapper/reducer node.</description>
+ </property>
+ <property>
+ <name>hive.exec.max.created.files</name>
+ <value>100000</value>
+ <description>Maximum number of HDFS files created by all
mappers/reducers in a MapReduce job.</description>
+ </property>
+ <property>
+ <name>hive.metastore.warehouse.dir</name>
+ <display-name>Hive Metastore Warehouse directory</display-name>
+ <value>/warehouse/tablespace/managed/hive</value>
+ <description>location of default database for the
warehouse</description>
+ </property>
+ <property>
+ <name>hive.metastore.warehouse.external.dir</name>
+ <display-name>Hive Metastore Warehouse External
directory</display-name>
+ <value>/warehouse/tablespace/external/hive</value>
+ <description>location of default database for the warehouse of
external tables</description>
+ </property>
+ <property>
+ <name>hive.lock.manager</name>
+ <value/>
+ </property>
+ <property>
+ <name>hive.metastore.uris</name>
+ <value>thrift://localhost:9083</value>
+ <description>
+ Thrift URI for the remote metastore. Used by metastore client to
connect to remote metastore.
+ </description>
+ </property>
+ <property>
+ <name>hive.metastore.server.max.threads</name>
+ <value>100000</value>
+ <description>Maximum number of worker threads in the Thrift server's
pool.</description>
+ </property>
+ <property>
+ <name>hive.metastore.kerberos.keytab.file</name>
+ <value>/etc/security/keytabs/hive.service.keytab</value>
+ <description>
+ The path to the Kerberos Keytab file containing the metastore
Thrift server's service principal.
+ </description>
+ </property>
+ <property>
+ <name>hive.cluster.delegation.token.store.zookeeper.znode</name>
+ <value>/hive/cluster/delegation</value>
+ <description>The root path for token store data.</description>
+ </property>
+ <property>
+ <name>hive.metastore.cache.pinobjtypes</name>
+ <value>Table,Database,Type,FieldSchema,Order</value>
+ <description>List of comma separated metastore object types that
should be pinned in the cache</description>
+ </property>
+ <property>
+ <name>hive.metastore.pre.event.listeners</name>
+
<value>org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener</value>
+ <description>List of comma separated listeners for metastore
events.</description>
+ </property>
+ <property>
+ <name>hive.metastore.authorization.storage.checks</name>
+ <value>false</value>
+ <description>
+ Should the metastore do authorization checks against the
underlying storage (usually hdfs)
+ for operations like drop-partition (disallow the drop-partition if
the user in
+ question doesn't have permissions to delete the corresponding
directory
+ on the storage).
+ </description>
+ </property>
+ <property>
+ <name>hive.server2.idle.session.timeout</name>
+ <value>1d</value>
+ </property>
+ <property>
+ <name>hive.server2.idle.operation.timeout</name>
+ <value>6h</value>
+ </property>
+ <property>
+ <name>hive.strict.managed.tables</name>
+ <value>false</value>
+ </property>
+ <property>
+ <name>hive.txn.strict.locking.mode</name>
+ <value>false</value>
+ </property>
+ <property>
+ <name>hive.materializedview.rewriting.incremental</name>
+ <value>false</value>
+ </property>
+ <property>
+ <name>hive.map.aggr</name>
+ <value>true</value>
+ <description>Whether to use map-side aggregation in Hive Group By
queries</description>
+ </property>
+ <property>
+ <name>hive.cbo.enable</name>
+ <value>true</value>
+ <description>Flag to control enabling Cost Based Optimizations using
Calcite framework.</description>
+ <display-name>Enable Cost Based Optimizer</display-name>
+ </property>
+ <property>
+ <name>hive.mapjoin.optimized.hashtable</name>
+ <value>true</value>
+ <description>
+ Whether Hive should use memory-optimized hash table for MapJoin.
Only works on Tez,
+ because memory-optimized hashtable cannot be serialized.
+ </description>
+ </property>
+ <property>
+ <name>hive.smbjoin.cache.rows</name>
+ <value>10000</value>
+ <description>How many rows with the same key value should be cached in
memory per smb joined table.</description>
+ </property>
+ <property>
+ <name>hive.map.aggr.hash.percentmemory</name>
+ <value>0.5</value>
+ <description>Portion of total memory to be used by map-side group
aggregation hash table</description>
+ </property>
+ <property>
+ <name>hive.map.aggr.hash.force.flush.memory.threshold</name>
+ <value>0.9</value>
+ <description>
+ The max memory to be used by map-side group aggregation hash table.
+ If the memory usage is higher than this number, force to flush data
+ </description>
+ </property>
+ <property>
+ <name>hive.map.aggr.hash.min.reduction</name>
+ <value>0.5</value>
+ <description>
+ Hash aggregation will be turned off if the ratio between hash
table size and input rows is bigger than this number.
+ Set to 1 to make sure hash aggregation is never turned off.
+ </description>
+ </property>
+ <property>
+ <name>hive.merge.mapfiles</name>
+ <value>true</value>
+ <description>Merge small files at the end of a map-only
job</description>
+ </property>
+ <property>
+ <name>hive.merge.mapredfiles</name>
+ <value>false</value>
+ <description>Merge small files at the end of a map-reduce
job</description>
+ </property>
+ <property>
+ <name>hive.merge.tezfiles</name>
+ <value>false</value>
+ <description>Merge small files at the end of a Tez DAG</description>
+ </property>
+ <property>
+ <name>hive.merge.size.per.task</name>
+ <value>256000000</value>
+ <description>Size of merged files at the end of the job</description>
+ </property>
+ <property>
+ <name>hive.merge.smallfiles.avgsize</name>
+ <value>16000000</value>
+ <description>
+ When the average output file size of a job is less than this
number, Hive will start an additional
+ map-reduce job to merge the output files into bigger files. This
is only done for map-only jobs
+ if hive.merge.mapfiles is true, and for map-reduce jobs if
hive.merge.mapredfiles is true.
+ </description>
+ </property>
+ <property>
+ <name>hive.merge.rcfile.block.level</name>
+ <value>true</value>
+ <description/>
+ </property>
+ <property>
+ <name>hive.merge.orcfile.stripe.level</name>
+ <value>true</value>
+ <description>
+ When hive.merge.mapfiles or hive.merge.mapredfiles is enabled
while writing a
+ table with ORC file format, enabling this config will do stripe
level fast merge
+ for small ORC files. Note that enabling this config will not honor
padding tolerance
+ config (hive.exec.orc.block.padding.tolerance).
+ </description>
+ </property>
+ <property>
+ <name>hive.orc.splits.include.file.footer</name>
+ <value>false</value>
+ <description>
+ If turned on splits generated by orc will include metadata about
the stripes in the file. This
+ data is read remotely (from the client or HS2 machine) and sent to
all the tasks.
+ </description>
+ </property>
+ <property>
+ <name>hive.orc.compute.splits.num.threads</name>
+ <value>10</value>
+ <description>How many threads orc should use to create splits in
parallel.</description>
+ </property>
+ <property>
+ <name>hive.auto.convert.join</name>
+ <value>true</value>
+ <description>
+ Whether Hive enables the optimization about converting common join
into mapjoin based on the input file size
+ </description>
+ </property>
+ <property>
+ <name>hive.auto.convert.join.noconditionaltask</name>
+ <value>true</value>
+ <description>
+ Whether Hive enables the optimization about converting common join
into mapjoin based on the input file size.
+ If this parameter is on, and the sum of size for n-1 of the
tables/partitions for a n-way join is smaller than the
+ specified size, the join is directly converted to a mapjoin (there
is no conditional task).
+ </description>
+ </property>
+ <property>
+ <name>hive.limit.optimize.enable</name>
+ <value>true</value>
+ <description>Whether to enable to optimization to trying a smaller
subset of data for simple LIMIT first.</description>
+ </property>
+ <property>
+ <name>hive.tez.cpu.vcores</name>
+ <value>-1</value>
+ <description>
+ By default Tez will ask for however many cpus map-reduce is
configured to use per container. This can be used to overwrite.
+ </description>
+ </property>
+ <property>
+ <name>hive.tez.log.level</name>
+ <value>INFO</value>
+ <description>
+ The log level to use for tasks executing as part of the DAG.
+ Used only if hive.tez.java.opts is used to configure Java options.
+ </description>
+ </property>
+ <property>
+ <name>hive.enforce.sortmergebucketmapjoin</name>
+ <value>true</value>
+ <description>
+ If the user asked for sort-merge bucketed map-side join, and it
cannot be performed, should the query fail or not ?
+ </description>
+ </property>
+ <property>
+ <name>hive.auto.convert.sortmerge.join</name>
+ <value>true</value>
+ <description>
+ Will the join be automatically converted to a sort-merge join, if
the joined tables pass the criteria for sort-merge join.
+ </description>
+ </property>
+ <property>
+ <name>hive.auto.convert.sortmerge.join.to.mapjoin</name>
+ <value>true</value>
+ <description>
+ If hive.auto.convert.sortmerge.join is set to true, and a join was
converted to a sort-merge join,
+ this parameter decides whether each table should be tried as a big
table, and effectively a map-join should be
+ tried. That would create a conditional task with n+1 children for
a n-way join (1 child for each table as the
+ big table), and the backup task will be the sort-merge join. In
some cases, a map-join would be faster than a
+ sort-merge join, if there is no advantage of having the output
bucketed and sorted. For example, if a very big sorted
+ and bucketed table with few files (say 10 files) are being joined
with a very small sorter and bucketed table
+ with few files (10 files), the sort-merge join will only use 10
mappers, and a simple map-only join might be faster
+ if the complete small table can fit in memory, and a map-join can
be performed.
+ </description>
+ </property>
+ <property>
+ <name>hive.optimize.constant.propagation</name>
+ <value>true</value>
+ <description>Whether to enable constant propagation
optimizer</description>
+ </property>
+ <property>
+ <name>hive.optimize.metadataonly</name>
+ <value>true</value>
+ <description/>
+ </property>
+ <property>
+ <name>hive.optimize.null.scan</name>
+ <value>true</value>
+ <description>Dont scan relations which are guaranteed to not generate
any rows</description>
+ </property>
+ <property>
+ <name>hive.optimize.bucketmapjoin</name>
+ <value>true</value>
+ <description>Whether to try bucket mapjoin</description>
+ </property>
+ <property>
+ <name>hive.optimize.reducededuplication</name>
+ <value>true</value>
+ <description>
+ Remove extra map-reduce jobs if the data is already clustered by
the same key which needs to be used again.
+ This should always be set to true. Since it is a new feature, it
has been made configurable.
+ </description>
+ </property>
+ <property>
+ <name>hive.optimize.reducededuplication.min.reducer</name>
+ <value>4</value>
+ <description>
+ Reduce deduplication merges two RSs by moving
key/parts/reducer-num of the child RS to parent RS.
+ That means if reducer-num of the child RS is fixed (order by or
forced bucketing) and small, it can make
+ very slow, single MR.
+ The optimization will be automatically disabled if number of
reducers would be less than specified value.
+ </description>
+ </property>
+ <property>
+ <name>hive.optimize.sort.dynamic.partition</name>
+ <value>false</value>
+ <description>
+ When enabled dynamic partitioning column will be globally sorted.
+ This way we can keep only one record writer open for each
partition value
+ in the reducer thereby reducing the memory pressure on reducers.
+ </description>
+ <display-name>Sort Partitions Dynamically</display-name>
+ </property>
+ <property>
+ <name>hive.stats.autogather</name>
+ <value>true</value>
+ <description>A flag to gather statistics automatically during the
INSERT OVERWRITE command.</description>
+ </property>
+ <property>
+ <name>hive.stats.dbclass</name>
+ <value>fs</value>
+ <description>
+ Expects one of the pattern in [jdbc(:.*), hbase, counter, custom,
fs].
+ The storage that stores temporary Hive statistics. Currently,
jdbc, hbase, counter and custom type are
+ supported.
+ </description>
+ </property>
+ <property>
+ <name>hive.stats.fetch.partition.stats</name>
+ <value>true</value>
+ <description>
+ Annotation of operator tree with statistics information requires
partition level basic
+ statistics like number of rows, data size and file size. Partition
statistics are fetched from
+ metastore. Fetching partition statistics for each needed partition
can be expensive when the
+ number of partitions is high. This flag can be used to disable
fetching of partition statistics
+ from metastore. When this flag is disabled, Hive will make calls
to filesystem to get file sizes
+ and will estimate the number of rows from row schema.
+ </description>
+ <display-name>Fetch partition stats at compiler</display-name>
+ </property>
+ <property>
+ <name>hive.stats.fetch.column.stats</name>
+ <value>false</value>
+ <description>
+ Annotation of operator tree with statistics information requires
column statistics.
+ Column statistics are fetched from metastore. Fetching column
statistics for each needed column
+ can be expensive when the number of columns is high. This flag can
be used to disable fetching
+ of column statistics from metastore.
+ </description>
+ <display-name>Fetch column stats at compiler</display-name>
+ </property>
+ <property>
+ <name>hive.zookeeper.namespace</name>
+ <value>hive_zookeeper_namespace</value>
+ <description>The parent node under which all ZooKeeper nodes are
created.</description>
+ </property>
+ <property>
+ <name>hive.txn.manager</name>
+ <value>org.apache.hadoop.hive.ql.lockmgr.DbTxnManager</value>
+ <description/>
+ <display-name>Transaction Manager</display-name>
+ </property>
+ <property>
+ <name>hive.txn.max.open.batch</name>
+ <value>1000</value>
+ <description>
+ Maximum number of transactions that can be fetched in one call to
open_txns().
+ Increasing this will decrease the number of delta files created
when
+ streaming data into Hive. But it will also increase the number of
+ open transactions at any given time, possibly impacting read
performance.
+ </description>
+ </property>
+ <property>
+ <name>hive.support.concurrency</name>
+ <value>true</value>
+ <description>Support concurrency and use locks, needed for
Transactions. Requires Zookeeper.</description>
+ <display-name>Use Locking</display-name>
+ </property>
+ <property>
+ <name>hive.cli.print.header</name>
+ <value>false</value>
+ <description>Whether to print the names of the columns in query
output.</description>
+ </property>
+ <property>
+ <name>hive.compactor.worker.timeout</name>
+ <value>86400</value>
+ <description>
+ Expects a time value with unit (d/day, h/hour, m/min, s/sec,
ms/msec, us/usec, ns/nsec), which is sec if not
+ specified.
+ Time before a given compaction in working state is declared a
failure
+ and returned to the initiated state.
+ </description>
+ </property>
+ <property>
+ <name>hive.compactor.check.interval</name>
+ <value>300</value>
+ <description>
+ Expects a time value with unit (d/day, h/hour, m/min, s/sec,
ms/msec, us/usec, ns/nsec), which is sec if not
+ specified.
+ Time between checks to see if any partitions need compacted.
+ This should be kept high because each check for compaction
requires many calls against the NameNode.
+ </description>
+ </property>
+ <property>
+ <name>hive.compactor.delta.pct.threshold</name>
+ <value>0.1f</value>
+ <description>Percentage (by size) of base that deltas can be before
major compaction is initiated.</description>
+ </property>
+ <property>
+ <name>hive.fetch.task.conversion</name>
+ <value>more</value>
+ <description>
+ Expects one of [none, minimal, more].
+ Some select queries can be converted to single FETCH task
minimizing latency.
+ Currently the query should be single sourced not having any
subquery and should not have
+ any aggregations or distincts (which incurs RS), lateral views and
joins.
+ 0. none : disable hive.fetch.task.conversion
+ 1. minimal : SELECT STAR, FILTER on partition columns, LIMIT only
+ 2. more : SELECT, FILTER, LIMIT only (support TABLESAMPLE and
virtual columns)
+ </description>
+ </property>
+ <property>
+ <name>hive.fetch.task.conversion.threshold</name>
+ <value>1073741824</value>
+ <description>
+ Input threshold for applying hive.fetch.task.conversion. If target
table is native, input length
+ is calculated by summation of file lengths. If it's not native,
storage handler for the table
+ can optionally implement
org.apache.hadoop.hive.ql.metadata.InputEstimator interface.
+ </description>
+ </property>
+ <property>
+ <name>hive.fetch.task.aggr</name>
+ <value>false</value>
+ <description>
+ Aggregation queries with no group-by clause (for example, select
count(*) from src) execute
+ final aggregations in single reduce task. If this is set true,
Hive delegates final aggregation
+ stage to fetch task, possibly decreasing the query time.
+ </description>
+ </property>
+ <property>
+ <name>hive.security.metastore.authorization.manager</name>
+ <display-name>Hive Authorization Manager</display-name>
+
<value>org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider</value>
+ <description>
+ authorization manager class name to be used in the metastore for
authorization.
+ The user defined authorization class should implement interface
+
org.apache.hadoop.hive.ql.security.authorization.HiveMetastoreAuthorizationProvider.
+ </description>
+ </property>
+ <property>
+ <name>hive.security.metastore.authorization.auth.reads</name>
+ <value>true</value>
+ <description>If this is true, metastore authorizer authorizes read
actions on database, table</description>
+ </property>
+ <property>
+ <name>hive.security.metastore.authenticator.manager</name>
+
<value>org.apache.hadoop.hive.ql.security.HadoopDefaultMetastoreAuthenticator</value>
+ <description>
+ authenticator manager class name to be used in the metastore for
authentication.
+ The user defined authenticator should implement interface
+ org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider.
+ </description>
+ </property>
+ <property>
+ <name>hive.server2.logging.operation.enabled</name>
+ <value>true</value>
+ <description>When true, HS2 will save operation logs</description>
+ </property>
+ <property>
+ <name>hive.server2.logging.operation.log.location</name>
+ <display-name>HiveServer2 Logging Operation Log Location</display-name>
+ <value>/tmp/hive/operation_logs</value>
+ <description>Top level directory where operation logs are stored if
logging functionality is enabled</description>
+ </property>
+ <property>
+ <name>hive.server2.zookeeper.namespace</name>
+ <value>hiveserver2</value>
+ <description>The parent node in ZooKeeper used by HiveServer2 when
supporting dynamic service discovery.</description>
+ </property>
+ <property>
+ <name>hive.server2.thrift.http.port</name>
+ <value>10001</value>
+ <description>Port number of HiveServer2 Thrift interface when
hive.server2.transport.mode is 'http'.</description>
+ </property>
+ <property>
+ <name>hive.server2.thrift.port</name>
+ <value>10000</value>
+ <display-name>HiveServer2 Port</display-name>
+ <description>TCP port number to listen on, default 10000.</description>
+ </property>
+ <property>
+ <name>hive.server2.thrift.sasl.qop</name>
+ <value>auth</value>
+ <description>
+ Expects one of [auth, auth-int, auth-conf].
+ Sasl QOP value; Set it to one of following values to enable higher
levels of
+ protection for HiveServer2 communication with clients.
+ "auth" - authentication only (default)
+ "auth-int" - authentication plus integrity protection
+ "auth-conf" - authentication plus integrity and confidentiality
protection
+ This is applicable only if HiveServer2 is configured to use
Kerberos authentication.
+ </description>
+ </property>
+ <property>
+ <name>hive.server2.thrift.max.worker.threads</name>
+ <value>500</value>
+ <description>Maximum number of Thrift worker threads</description>
+ </property>
+ <property>
+ <name>hive.server2.allow.user.substitution</name>
+ <value>true</value>
+ <description>Allow alternate user to be specified as part of
HiveServer2 open connection request.</description>
+ </property>
+ <property>
+ <name>hive.server2.authentication.spnego.keytab</name>
+ <value>/etc/security/keytabs/spnego.service.keytab</value>
+ <description>
+ keytab file for SPNego principal, optional,
+ typical value would look like
/etc/security/keytabs/spnego.service.keytab,
+ This keytab would be used by HiveServer2 when Kerberos security is
enabled and
+ HTTP transport mode is used.
+ This needs to be set only if SPNEGO is to be used in
authentication.
+ SPNego authentication would be honored only if valid
+ hive.server2.authentication.spnego.principal
+ and
+ hive.server2.authentication.spnego.keytab
+ are specified.
+ </description>
+ </property>
+ <property>
+ <name>hive.server2.authentication</name>
+ <description>Authentication mode, default NONE. Options are NONE,
NOSASL, KERBEROS, LDAP, PAM and CUSTOM
+ </description>
+ <value>NONE</value>
+ <display-name>HiveServer2 Authentication</display-name>
+ </property>
+ <property>
+ <name>hive.metastore.event.db.notification.api.auth</name>
+ <value>true</value>
+ <description>
+ Should metastore do authorization against database notification
related APIs such as get_next_notification.
+ If set to true, then only the superusers in proxy settings have
the permission.
+ </description>
+ </property>
+ <property>
+ <name>hive.server2.enable.doAs</name>
+ <value>true</value>
+ <description>
+ Setting this property to true will have HiveServer2 execute
+ Hive operations as the user making the calls to it.
+ </description>
+ <display-name>Run as end user instead of Hive user</display-name>
+ </property>
+ <property>
+ <name>hive.server2.table.type.mapping</name>
+ <value>CLASSIC</value>
+ <description>
+ Expects one of [classic, hive].
+ This setting reflects how HiveServer2 will report the table types
for JDBC and other
+ client implementations that retrieve the available tables and
supported table types
+ HIVE : Exposes Hive's native table types like MANAGED_TABLE,
EXTERNAL_TABLE, VIRTUAL_VIEW
+ CLASSIC : More generic types like TABLE and VIEW
+ </description>
+ </property>
+ <property>
+ <name>hive.server2.use.SSL</name>
+ <value>false</value>
+ <description>Set this to true for using SSL encryption in
HiveServer2.</description>
+ <display-name>Use SSL</display-name>
+ </property>
+ <property>
+ <name>hive.user.install.directory</name>
+ <display-name>Hive User Install directory</display-name>
+ <value>/user/</value>
+ <description>
+ If hive (in tez mode only) cannot find a usable hive jar in
"hive.jar.directory",
+ it will upload the hive jar to
"hive.user.install.directory/user.name"
+ and use it to run queries.
+ </description>
+ </property>
+ <property>
+ <name>hive.vectorized.groupby.maxentries</name>
+ <value>100000</value>
+ <description>
+ Max number of entries in the vector group by aggregation
hashtables.
+ Exceeding this will trigger a flush irrelevant of memory pressure
condition.
+ </description>
+ </property>
+ <property>
+ <name>hive.merge.nway.joins</name>
+ <value>false</value>
+ <description>Merge adjacent joins into a single n-way
join</description>
+ </property>
+ <property>
+ <name>hive.prewarm.enabled</name>
+ <value>false</value>
+ <description>Enables container prewarm for Tez (Hadoop 2
only)</description>
+ <display-name>Hold Containers to Reduce Latency</display-name>
+ </property>
+ <property>
+ <name>hive.prewarm.numcontainers</name>
+ <value>3</value>
+ <description>Controls the number of containers to prewarm for Tez
(Hadoop 2 only)</description>
+ <display-name>Number of Containers Held</display-name>
+ </property>
+ <property>
+ <name>hive.convert.join.bucket.mapjoin.tez</name>
+ <value>false</value>
+ <description>
+ Whether joins can be automatically converted to bucket map joins
in hive
+ when tez is used as the execution engine.
+ </description>
+ </property>
+ <property>
+ <name>hive.tez.auto.reducer.parallelism</name>
+ <value>true</value>
+ <description>
+ Turn on Tez' auto reducer parallelism feature. When enabled, Hive
will still estimate data sizes
+ and set parallelism estimates. Tez will sample source vertices'
output sizes and adjust the estimates at
+ runtime as necessary.
+ </description>
+ <display-name>Allow dynamic numbers of reducers</display-name>
+ </property>
+ <property>
+ <name>hive.tez.max.partition.factor</name>
+ <value>2.0</value>
+ <description>
+ When auto reducer parallelism is enabled this factor will be used
to over-partition data in shuffle edges.
+ </description>
+ </property>
+ <property>
+ <name>hive.tez.min.partition.factor</name>
+ <value>0.25</value>
+ <description>
+ When auto reducer parallelism is enabled this factor will be used
to put a lower limit to the number
+ of reducers that tez specifies.
+ </description>
+ </property>
+ <property>
+ <name>hive.tez.dynamic.partition.pruning</name>
+ <value>true</value>
+ <description>
+ When dynamic pruning is enabled, joins on partition keys will be
processed by sending events from
+ the processing vertices to the tez application master. These
events will be used to prune unnecessary
+ partitions.
+ </description>
+ <display-name>Allow dynamic partition pruning</display-name>
+ </property>
+ <property>
+ <name>hive.tez.dynamic.partition.pruning.max.event.size</name>
+ <value>1048576</value>
+ <description>
+ Maximum size of events sent by processors in dynamic pruning. If
this size is crossed no pruning will take place.
+ </description>
+ </property>
+ <property>
+ <name>hive.tez.dynamic.partition.pruning.max.data.size</name>
+ <value>104857600</value>
+ <description>Maximum total data size of events in dynamic
pruning.</description>
+ </property>
+ <property>
+ <name>hive.tez.smb.number.waves</name>
+ <value>0.5</value>
+ <description>
+ The number of waves in which to run the SMB join. Account for
cluster being occupied. Ideally should be 1 wave.
+ </description>
+ </property>
+ <property>
+ <name>hive.vectorized.execution.enabled</name>
+ <value>true</value>
+ <description>
+ This flag should be set to true to enable vectorized mode of query
execution.
+ The default value is false.
+ </description>
+ <display-name>Enable Vectorization and Map Vectorization</display-name>
+ </property>
+ <property>
+ <name>hive.auto.convert.join.noconditionaltask.size</name>
+ <value>52428800</value>
+ <description>
+ If hive.auto.convert.join.noconditionaltask is off, this parameter
does not take affect. However, if it
+ is on, and the sum of size for n-1 of the tables/partitions for a
n-way join is smaller than this size, the join is directly
+ converted to a mapjoin(there is no conditional task).
+ </description>
+ <display-name>For Map Join, per Map memory threshold</display-name>
+ </property>
+ <property>
+ <name>hive.optimize.index.filter</name>
+ <value>true</value>
+ <description>Whether to enable automatic use of indexes</description>
+ <display-name>Push Filters to Storage</display-name>
+ </property>
+ <property>
+ <name>hive.vectorized.groupby.checkinterval</name>
+ <value>4096</value>
+ <description>
+ Number of entries added to the group by aggregation hash before a
recomputation of average entry size is performed.
+ </description>
+ </property>
+ <property>
+ <name>hive.vectorized.groupby.flush.percent</name>
+ <value>0.1</value>
+ <description>Percent of entries in the group by aggregation hash
flushed when the memory threshold is exceeded.</description>
+ </property>
+ <property>
+ <name>hive.compute.query.using.stats</name>
+ <value>true</value>
+ <description>
+ When set to true Hive will answer a few queries like count(1)
purely using stats
+ stored in metastore. For basic stats collection turn on the config
hive.stats.autogather to true.
+ For more advanced stats collection need to run analyze table
queries.
+ </description>
+ <display-name>Compute simple queries using stats only</display-name>
+ </property>
+ <property>
+ <name>hive.limit.pushdown.memory.usage</name>
+ <value>0.04</value>
+ <description>The max memory to be used for hash in RS operator for top
K selection.</description>
+ </property>
+ <property>
+ <name>hive.server2.tez.sessions.per.default.queue</name>
+ <value>1</value>
+ <description>
+ A positive integer that determines the number of Tez sessions that
should be
+ launched on each of the queues specified by
"hive.server2.tez.default.queues".
+ Determines the parallelism on each queue.
+ </description>
+ <display-name>Session per queue</display-name>
+ </property>
+ <property>
+ <name>hive.driver.parallel.compilation</name>
+ <value>true</value>
+ <description>This flag allows HiveServer2 to compile queries in
parallel.</description>
+ <display-name>Compile queries in parallel</display-name>
+ </property>
+ <property>
+ <name>hive.server2.tez.initialize.default.sessions</name>
+ <value>false</value>
+ <description>
+ This flag is used in HiveServer2 to enable a user to use
HiveServer2 without
+ turning on Tez for HiveServer2. The user could potentially want to
run queries
+ over Tez without the pool of sessions.
+ </description>
+ <display-name>Start Tez session at Initialization</display-name>
+ </property>
+ <property>
+ <name>hive.server2.tez.default.queues</name>
+ <display-name>Default query queues</display-name>
+ <value>default</value>
+ <description>
+ A list of comma separated values corresponding to YARN queues of
the same name.
+ When HiveServer2 is launched in Tez mode, this configuration needs
to be set
+ for multiple Tez sessions to run in parallel on the cluster.
+ </description>
+ </property>
+ <property>
+ <name>hive.server2.webui.port</name>
+ <value>10002</value>
+ <description>Web UI port address</description>
+ </property>
+ <property>
+ <name>hive.server2.webui.use.ssl</name>
+ <value>false</value>
+ <description>Enable SSL for HiveServer2 Interactive</description>
+ </property>
+ <property>
+ <name>hive.server2.webui.enable.cors</name>
+ <value>true</value>
+ <description>Enable cross origin requests (CORS)</description>
+ </property>
+ <property>
+ <name>hive.server2.webui.cors.allowed.headers</name>
+
<value>X-Requested-With,Content-Type,Accept,Origin,X-Requested-By,x-requested-by</value>
+ <description>Comma separated list of http headers that are allowed
when CORS is enabled.</description>
+ </property>
+ <property>
+ <name>hive.exec.orc.split.strategy</name>
+ <value>HYBRID</value>
+ <description>
+ This is not a user level config. BI strategy is used when the
requirement is to spend less time in split
+ generation
+ as opposed to query execution (split generation does not read or
cache file footers).
+ ETL strategy is used when spending little more time in split
generation is acceptable
+ (split generation reads and caches file footers). HYBRID chooses
between the above strategies
+ based on heuristics.
+ </description>
+ </property>
+ <property>
+ <name>hive.vectorized.execution.reduce.enabled</name>
+ <value>true</value>
+ <description>
+ This flag should be set to true to enable vectorized mode of the
reduce-side of
+ query execution.
+ </description>
+ <display-name>Enable Reduce Vectorization</display-name>
+ </property>
+ <property>
+ <name>hive.default.fileformat.managed</name>
+ <value>ORC</value>
+ <description>
+ Default file format for CREATE TABLE statement applied to managed
tables only.
+ External tables will be created with default file format. Leaving
this null
+ will result in using the default file format for all tables.
+ </description>
+ </property>
+ <property>
+ <name>hive.hook.proto.base-directory</name>
+
<value>${hive.metastore.warehouse.external.dir}/sys.db/query_data/</value>
+ <description>Base directory for hive proto hook.</description>
+ </property>
+ <property>
+ <name>hive.execution.mode</name>
+ <value>container</value>
+ <description>Chooses whether query fragments will run in container or
in llap</description>
+ </property>
+ <property>
+ <name>hive.tez.input.generate.consistent.splits</name>
+ <value>true</value>
+ <description>Whether to generate consistent split locations when
generating splits in the AM</description>
+ </property>
+ <property>
+ <name>hive.tez.exec.print.summary</name>
+ <value>true</value>
+ <description>Display breakdown of execution steps, for every query
executed by the shell.</description>
+ </property>
+ <property>
+ <name>hive.vectorized.execution.mapjoin.native.enabled</name>
+ <value>true</value>
+ <description>
+ This flag should be set to true to enable native (i.e. non-pass
through) vectorization
+ of queries using MapJoin.
+ </description>
+ </property>
+ <property>
+ <name>hive.vectorized.execution.mapjoin.minmax.enabled</name>
+ <value>true</value>
+ <description>
+ This flag should be set to true to enable vector map join hash
tables to
+ use max / max filtering for integer join queries using MapJoin.
+ </description>
+ </property>
+ <property>
+
<name>hive.vectorized.execution.mapjoin.native.fast.hashtable.enabled</name>
+ <value>true</value>
+ <description>
+ This flag should be set to true to enable use of native fast
vector map join hash tables in
+ queries using MapJoin.
+ </description>
+ </property>
+ <property>
+ <name>hive.optimize.dynamic.partition.hashjoin</name>
+ <value>true</value>
+ <description>
+ Whether to enable dynamically partitioned hash join optimization.
+ This setting is also dependent on enabling hive.auto.convert.join
+ </description>
+ </property>
+ <property>
+ <name>hive.metastore.event.listeners</name>
+ <value/>
+ <description>Listeners for metastore events</description>
+ </property>
+ <property>
+ <name>hive.mapjoin.hybridgrace.hashtable</name>
+ <value>false</value>
+ <description>
+ Whether to use hybrid grace hash join as the join method for
mapjoin.
+ Applies to dynamically partitioned joins when running in LLAP, but
not to regular
+ broadcast(map) joins. hive.llap.enable.grace.join.in.llap is used
for this.
+ </description>
+ </property>
+ <property>
+ <name>hive.tez.cartesian-product.enabled</name>
+ <value>true</value>
+ <description>Use Tez cartesian product edge for Hive cartesian
product</description>
+ </property>
+ <property>
+ <name>hive.tez.bucket.pruning</name>
+ <value>true</value>
+ <description>
+ When pruning is enabled, filters on bucket columns will be
processed by
+ filtering the splits against a bitset of included buckets. This
needs predicates
+ produced by hive.optimize.ppd and hive.optimize.index.filters.
+ </description>
+ </property>
+ <property>
+ <name>hive.service.metrics.codahale.reporter.classes</name>
+
<value>org.apache.hadoop.hive.common.metrics.metrics2.JsonFileMetricsReporter,org.apache.hadoop.hive.common.metrics.metrics2.JmxMetricsReporter,org.apache.hadoop.hive.common.metrics.metrics2.Metrics2Reporter</value>
+ <description>Comma separated list of reporter implementation classes
for metric class</description>
+ </property>
+ <property>
+ <name>hive.metastore.dml.events</name>
+ <value>true</value>
+ <description>If true, the metastore will be asked to fire events for
DML operations</description>
+ </property>
+ <property>
+ <name>hive.repl.cm.enabled</name>
+ <value/>
+ <description>Turn on ChangeManager, so delete files will go to
cmrootdir.</description>
+ </property>
+ <property>
+ <name>hive.metastore.transactional.event.listeners</name>
+ <value>org.apache.hive.hcatalog.listener.DbNotificationListener</value>
+ <description>
+ A comma separated list of Java classes that implement the
+ org.apache.hadoop.hive.metastore.MetaStoreEventListener interface.
Both the metastore event and
+ corresponding listener method will be invoked in the same JDO
transaction.
+ </description>
+ </property>
+ <property>
+ <name>hive.repl.cmrootdir</name>
+ <value/>
+ <description>Root dir for ChangeManager, used for deleted
files.</description>
+ </property>
+ <property>
+ <name>hive.repl.rootdir</name>
+ <value/>
+ <description>HDFS root dir for all replication dumps.</description>
+ </property>
+ <property>
+ <name>hive.vectorized.adaptor.usage.mode</name>
+ <value>chosen</value>
+ <description>
+ Specifies the extent to which the VectorUDFAdaptor will be used
for UDFs that do not have a corresponding
+ vectorized class.
+ 0. none : disable any usage of VectorUDFAdaptor
+ 1. chosen : use VectorUDFAdaptor for a small set of UDFs that were
chosen for good performance
+ 2. all : use VectorUDFAdaptor for all UDFs
+ </description>
+ </property>
+</configuration>
diff --git
a/bigtop-manager-server/src/main/resources/stacks/bigtop/3.3.0/services/hive/configuration/hive.conf.xml
b/bigtop-manager-server/src/main/resources/stacks/bigtop/3.3.0/services/hive/configuration/hive.conf.xml
new file mode 100644
index 00000000..c7bce78f
--- /dev/null
+++
b/bigtop-manager-server/src/main/resources/stacks/bigtop/3.3.0/services/hive/configuration/hive.conf.xml
@@ -0,0 +1,64 @@
+<?xml version="1.0"?>
+<!--
+ ~ Licensed to the Apache Software Foundation (ASF) under one
+ ~ or more contributor license agreements. See the NOTICE file
+ ~ distributed with this work for additional information
+ ~ regarding copyright ownership. The ASF licenses this file
+ ~ to you under the Apache License, Version 2.0 (the
+ ~ "License"); you may not use this file except in compliance
+ ~ with the License. You may obtain a copy of the License at
+ ~
+ ~ http://www.apache.org/licenses/LICENSE-2.0
+ ~
+ ~ Unless required by applicable law or agreed to in writing,
+ ~ software distributed under the License is distributed on an
+ ~ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ ~ KIND, either express or implied. See the License for the
+ ~ specific language governing permissions and limitations
+ ~ under the License.
+-->
+
+<configuration>
+ <property>
+ <name>hive_user_nofile_limit</name>
+ <value>128000</value>
+ <description>Max open files limit setting for Hive user.</description>
+ </property>
+ <property>
+ <name>hive_user_nproc_limit</name>
+ <value>65536</value>
+ <description>Max number of processes limit setting for Hive
user.</description>
+ </property>
+ <property>
+ <name>content</name>
+ <display-name>hive.conf template</display-name>
+ <description>This is the freemarker template for Hive
file</description>
+ <value><![CDATA[
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+${hive_user} - nofile ${hive_user_nofile_limit}
+${hive_group} - nproc ${hive_user_nproc_limit}
+
+]]>
+ </value>
+ <attrs>
+ <type>longtext</type>
+ </attrs>
+ </property>
+</configuration>
\ No newline at end of file
diff --git
a/bigtop-manager-server/src/main/resources/stacks/bigtop/3.3.0/services/hive/configuration/llap-cli-log4j2.xml
b/bigtop-manager-server/src/main/resources/stacks/bigtop/3.3.0/services/hive/configuration/llap-cli-log4j2.xml
new file mode 100644
index 00000000..901816f7
--- /dev/null
+++
b/bigtop-manager-server/src/main/resources/stacks/bigtop/3.3.0/services/hive/configuration/llap-cli-log4j2.xml
@@ -0,0 +1,126 @@
+<?xml version="1.0"?>
+<!--
+ ~ Licensed to the Apache Software Foundation (ASF) under one
+ ~ or more contributor license agreements. See the NOTICE file
+ ~ distributed with this work for additional information
+ ~ regarding copyright ownership. The ASF licenses this file
+ ~ to you under the Apache License, Version 2.0 (the
+ ~ "License"); you may not use this file except in compliance
+ ~ with the License. You may obtain a copy of the License at
+ ~
+ ~ http://www.apache.org/licenses/LICENSE-2.0
+ ~
+ ~ Unless required by applicable law or agreed to in writing,
+ ~ software distributed under the License is distributed on an
+ ~ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ ~ KIND, either express or implied. See the License for the
+ ~ specific language governing permissions and limitations
+ ~ under the License.
+-->
+
+<configuration>
+ <property>
+ <name>content</name>
+ <display-name>llap-cli-log4j2 template</display-name>
+ <description>Custom llap-cli-log4j2.properties</description>
+ <value><![CDATA[
+
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+status = WARN
+name = LlapCliLog4j2
+packages = org.apache.hadoop.hive.ql.log
+
+<#noparse>
+# list of properties
+property.hive.log.level = WARN
+property.hive.root.logger = console
+property.hive.log.dir = ${sys:java.io.tmpdir}/${sys:user.name}
+property.hive.log.file = llap-cli.log
+property.hive.llapstatus.consolelogger.level = INFO
+
+# list of all appenders
+appenders = console, DRFA, llapstatusconsole
+
+# console appender
+appender.console.type = Console
+appender.console.name = console
+appender.console.target = SYSTEM_ERR
+appender.console.layout.type = PatternLayout
+appender.console.layout.pattern = %p %c{2}: %m%n
+
+# llapstatusconsole appender
+appender.llapstatusconsole.type = Console
+appender.llapstatusconsole.name = llapstatusconsole
+appender.llapstatusconsole.target = SYSTEM_ERR
+appender.llapstatusconsole.layout.type = PatternLayout
+appender.llapstatusconsole.layout.pattern = %m%n
+
+# daily rolling file appender
+appender.DRFA.type = RollingRandomAccessFile
+appender.DRFA.name = DRFA
+appender.DRFA.fileName = ${sys:hive.log.dir}/${sys:hive.log.file}
+# Use %pidn in the filePattern to append <process-id>@<host-name> to the
filename if you want separate log files for different CLI session
+appender.DRFA.filePattern =
${sys:hive.log.dir}/${sys:hive.log.file}.%d{yyyy-MM-dd}
+appender.DRFA.layout.type = PatternLayout
+appender.DRFA.layout.pattern = %d{ISO8601} %5p [%t] %c{2}: %m%n
+appender.DRFA.policies.type = Policies
+appender.DRFA.policies.time.type = TimeBasedTriggeringPolicy
+appender.DRFA.policies.time.interval = 1
+appender.DRFA.policies.time.modulate = true
+appender.DRFA.strategy.type = DefaultRolloverStrategy
+appender.DRFA.strategy.max = 30
+
+# list of all loggers
+loggers = ZooKeeper, DataNucleus, Datastore, JPOX, HadoopConf,
LlapStatusServiceDriverConsole
+
+logger.ZooKeeper.name = org.apache.zookeeper
+logger.ZooKeeper.level = WARN
+
+logger.DataNucleus.name = DataNucleus
+logger.DataNucleus.level = ERROR
+
+logger.Datastore.name = Datastore
+logger.Datastore.level = ERROR
+
+logger.JPOX.name = JPOX
+logger.JPOX.level = ERROR
+
+logger.HadoopConf.name = org.apache.hadoop.conf.Configuration
+logger.HadoopConf.level = ERROR
+
+logger.LlapStatusServiceDriverConsole.name = LlapStatusServiceDriverConsole
+logger.LlapStatusServiceDriverConsole.additivity = false
+logger.LlapStatusServiceDriverConsole.level =
${sys:hive.llapstatus.consolelogger.level}
+
+
+# root logger
+rootLogger.level = ${sys:hive.log.level}
+rootLogger.appenderRefs = root, DRFA
+rootLogger.appenderRef.root.ref = ${sys:hive.root.logger}
+rootLogger.appenderRef.DRFA.ref = DRFA
+logger.LlapStatusServiceDriverConsole.appenderRefs = llapstatusconsole, DRFA
+logger.LlapStatusServiceDriverConsole.appenderRef.llapstatusconsole.ref =
llapstatusconsole
+logger.LlapStatusServiceDriverConsole.appenderRef.DRFA.ref = DRFA
+</#noparse>
+]]>
+ </value>
+ <attrs>
+ <type>longtext</type>
+ </attrs>
+ </property>
+</configuration>
diff --git
a/bigtop-manager-server/src/main/resources/stacks/bigtop/3.3.0/services/hive/configuration/llap-daemon-log4j2.xml
b/bigtop-manager-server/src/main/resources/stacks/bigtop/3.3.0/services/hive/configuration/llap-daemon-log4j2.xml
new file mode 100644
index 00000000..0ef8491e
--- /dev/null
+++
b/bigtop-manager-server/src/main/resources/stacks/bigtop/3.3.0/services/hive/configuration/llap-daemon-log4j2.xml
@@ -0,0 +1,192 @@
+<?xml version="1.0"?>
+<!--
+ ~ Licensed to the Apache Software Foundation (ASF) under one
+ ~ or more contributor license agreements. See the NOTICE file
+ ~ distributed with this work for additional information
+ ~ regarding copyright ownership. The ASF licenses this file
+ ~ to you under the Apache License, Version 2.0 (the
+ ~ "License"); you may not use this file except in compliance
+ ~ with the License. You may obtain a copy of the License at
+ ~
+ ~ http://www.apache.org/licenses/LICENSE-2.0
+ ~
+ ~ Unless required by applicable law or agreed to in writing,
+ ~ software distributed under the License is distributed on an
+ ~ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ ~ KIND, either express or implied. See the License for the
+ ~ specific language governing permissions and limitations
+ ~ under the License.
+-->
+
+<configuration>
+ <property>
+ <name>content</name>
+ <display-name>llap-daemon-log4j2 template</display-name>
+ <description>Custom llap-daemon-log4j2.properties</description>
+ <value><![CDATA[
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# This is the log4j2 properties file used by llap-daemons. There's several
loggers defined, which
+# can be selected while configuring LLAP.
+# Based on the one selected - UI links etc need to be manipulated in the
system.
+# Note: Some names and logic is common to this file and llap LogHelpers. Make
sure to change that
+# as well, if changing this file.
+
+status = INFO
+name = LlapDaemonLog4j2
+packages = org.apache.hadoop.hive.ql.log
+
+<#noparse>
+# list of properties
+property.llap.daemon.log.level = INFO
+property.llap.daemon.root.logger = console
+property.llap.daemon.log.dir = .
+property.llap.daemon.log.file = llapdaemon.log
+property.llap.daemon.historylog.file = llapdaemon_history.log
+property.llap.daemon.log.maxfilesize = 256MB
+property.llap.daemon.log.maxbackupindex = 240
+
+# list of all appenders
+appenders = console, RFA, HISTORYAPPENDER, query-routing
+
+# console appender
+appender.console.type = Console
+appender.console.name = console
+appender.console.target = SYSTEM_ERR
+appender.console.layout.type = PatternLayout
+appender.console.layout.pattern = %d{ISO8601} %5p [%t (%X{fragmentId})] %c{2}:
%m%n
+
+# rolling file appender
+appender.RFA.type = RollingRandomAccessFile
+appender.RFA.name = RFA
+appender.RFA.fileName = ${sys:llap.daemon.log.dir}/${sys:llap.daemon.log.file}
+appender.RFA.filePattern =
${sys:llap.daemon.log.dir}/${sys:llap.daemon.log.file}_%d{yyyy-MM-dd-HH}_%i.done
+appender.RFA.layout.type = PatternLayout
+appender.RFA.layout.pattern = %d{ISO8601} %-5p [%t (%X{fragmentId})] %c: %m%n
+appender.RFA.policies.type = Policies
+appender.RFA.policies.time.type = TimeBasedTriggeringPolicy
+appender.RFA.policies.time.interval = 1
+appender.RFA.policies.time.modulate = true
+appender.RFA.policies.size.type = SizeBasedTriggeringPolicy
+appender.RFA.policies.size.size = ${sys:llap.daemon.log.maxfilesize}
+appender.RFA.strategy.type = DefaultRolloverStrategy
+appender.RFA.strategy.max = ${sys:llap.daemon.log.maxbackupindex}
+
+# history file appender
+appender.HISTORYAPPENDER.type = RollingRandomAccessFile
+appender.HISTORYAPPENDER.name = HISTORYAPPENDER
+appender.HISTORYAPPENDER.fileName =
${sys:llap.daemon.log.dir}/${sys:llap.daemon.historylog.file}
+appender.HISTORYAPPENDER.filePattern =
${sys:llap.daemon.log.dir}/${sys:llap.daemon.historylog.file}_%d{yyyy-MM-dd-HH}_%i.done
+appender.HISTORYAPPENDER.layout.type = PatternLayout
+appender.HISTORYAPPENDER.layout.pattern = %m%n
+appender.HISTORYAPPENDER.policies.type = Policies
+appender.HISTORYAPPENDER.policies.size.type = SizeBasedTriggeringPolicy
+appender.HISTORYAPPENDER.policies.size.size =
${sys:llap.daemon.log.maxfilesize}
+appender.HISTORYAPPENDER.policies.time.type = TimeBasedTriggeringPolicy
+appender.HISTORYAPPENDER.policies.time.interval = 1
+appender.HISTORYAPPENDER.policies.time.modulate = true
+appender.HISTORYAPPENDER.strategy.type = DefaultRolloverStrategy
+appender.HISTORYAPPENDER.strategy.max = ${sys:llap.daemon.log.maxbackupindex}
+
+# queryId based routing file appender
+appender.query-routing.type = Routing
+appender.query-routing.name = query-routing
+appender.query-routing.routes.type = Routes
+appender.query-routing.routes.pattern = $${ctx:queryId}
+#Purge policy for query-based Routing Appender
+appender.query-routing.purgePolicy.type = IdlePurgePolicy
+appender.query-routing.purgePolicy.timeToLive = 60
+appender.query-routing.purgePolicy.timeUnit = SECONDS
+# default route
+appender.query-routing.routes.route-default.type = Route
+appender.query-routing.routes.route-default.key = $${ctx:queryId}
+appender.query-routing.routes.route-default.ref = RFA
+# queryId based route
+appender.query-routing.routes.route-mdc.type = Route
+appender.query-routing.routes.route-mdc.file-mdc.type =
LlapRandomAccessFileAppender
+appender.query-routing.routes.route-mdc.file-mdc.name = query-file-appender
+appender.query-routing.routes.route-mdc.file-mdc.fileName =
${sys:llap.daemon.log.dir}/${ctx:queryId}-${ctx:dagId}.log
+appender.query-routing.routes.route-mdc.file-mdc.layout.type = PatternLayout
+appender.query-routing.routes.route-mdc.file-mdc.layout.pattern = %d{ISO8601}
%5p [%t (%X{fragmentId})] %c{2}: %m%n
+
+# list of all loggers
+loggers = PerfLogger, EncodedReader, NIOServerCnxn, ClientCnxnSocketNIO,
DataNucleus, Datastore, JPOX, HistoryLogger, LlapIoImpl, LlapIoOrc,
LlapIoCache, LlapIoLocking, TezSM, TezSS, TezHC, LlapDaemon
+
+logger.LlapDaemon.name = org.apache.hadoop.hive.llap.daemon.impl.LlapDaemon
+logger.LlapDaemon.level = INFO
+
+# shut up the Tez logs that log debug-level stuff on INFO
+
+logger.TezSM.name =
org.apache.tez.runtime.library.common.shuffle.impl.ShuffleManager.fetch
+logger.TezSM.level = WARN
+logger.TezSS.name =
org.apache.tez.runtime.library.common.shuffle.orderedgrouped.ShuffleScheduler.fetch
+logger.TezSS.level = WARN
+logger.TezHC.name = org.apache.tez.http.HttpConnection.url
+logger.TezHC.level = WARN
+
+logger.PerfLogger.name = org.apache.hadoop.hive.ql.log.PerfLogger
+logger.PerfLogger.level = DEBUG
+
+logger.EncodedReader.name =
org.apache.hadoop.hive.ql.io.orc.encoded.EncodedReaderImpl
+logger.EncodedReader.level = INFO
+
+logger.LlapIoImpl.name = LlapIoImpl
+logger.LlapIoImpl.level = INFO
+
+logger.LlapIoOrc.name = LlapIoOrc
+logger.LlapIoOrc.level = WARN
+
+logger.LlapIoCache.name = LlapIoCache
+logger.LlapIoCache.level = WARN
+
+logger.LlapIoLocking.name = LlapIoLocking
+logger.LlapIoLocking.level = WARN
+
+logger.NIOServerCnxn.name = org.apache.zookeeper.server.NIOServerCnxn
+logger.NIOServerCnxn.level = WARN
+
+logger.ClientCnxnSocketNIO.name = org.apache.zookeeper.ClientCnxnSocketNIO
+logger.ClientCnxnSocketNIO.level = WARN
+
+logger.DataNucleus.name = DataNucleus
+logger.DataNucleus.level = ERROR
+
+logger.Datastore.name = Datastore
+logger.Datastore.level = ERROR
+
+logger.JPOX.name = JPOX
+logger.JPOX.level = ERROR
+
+logger.HistoryLogger.name = org.apache.hadoop.hive.llap.daemon.HistoryLogger
+logger.HistoryLogger.level = INFO
+logger.HistoryLogger.additivity = false
+logger.HistoryLogger.appenderRefs = HistoryAppender
+logger.HistoryLogger.appenderRef.HistoryAppender.ref = HISTORYAPPENDER
+
+# root logger
+rootLogger.level = ${sys:llap.daemon.log.level}
+rootLogger.appenderRefs = root
+rootLogger.appenderRef.root.ref = ${sys:llap.daemon.root.logger}
+</#noparse>
+]]>
+ </value>
+ <attrs>
+ <type>longtext</type>
+ </attrs>
+ </property>
+</configuration>
diff --git
a/bigtop-manager-server/src/main/resources/stacks/bigtop/3.3.0/services/tez/metainfo.xml
b/bigtop-manager-server/src/main/resources/stacks/bigtop/3.3.0/services/hive/metainfo.xml
similarity index 56%
copy from
bigtop-manager-server/src/main/resources/stacks/bigtop/3.3.0/services/tez/metainfo.xml
copy to
bigtop-manager-server/src/main/resources/stacks/bigtop/3.3.0/services/hive/metainfo.xml
index 509db831..7ce431aa 100644
---
a/bigtop-manager-server/src/main/resources/stacks/bigtop/3.3.0/services/tez/metainfo.xml
+++
b/bigtop-manager-server/src/main/resources/stacks/bigtop/3.3.0/services/hive/metainfo.xml
@@ -20,16 +20,34 @@
<metainfo>
<service>
- <name>tez</name>
- <display-name>Tez</display-name>
- <desc>Tez is the next generation Hadoop Query Processing framework
written on top of YARN.</desc>
- <version>0.10.2-1</version>
- <user>tez</user>
+ <name>hive</name>
+ <display-name>Hive</display-name>
+ <desc>
+ The Apache Hive is a distributed, fault-tolerant data warehouse
system
+ that enables analytics at a massive scale and facilitates reading,
writing,
+ and managing petabytes of data residing in distributed storage
using SQL.
+ </desc>
+ <version>3.1.3-1</version>
+ <user>hive</user>
<components>
<component>
- <name>tez_client</name>
- <display-name>Tez Client</display-name>
+ <name>hiveserver2</name>
+ <display-name>HiveServer2</display-name>
+ <category>server</category>
+ <cardinality>1+</cardinality>
+ </component>
+
+ <component>
+ <name>hive_metastore</name>
+ <display-name>Hive Metastore</display-name>
+ <category>server</category>
+ <cardinality>1+</cardinality>
+ </component>
+
+ <component>
+ <name>hive_client</name>
+ <display-name>Hive Client</display-name>
<category>client</category>
<cardinality>1+</cardinality>
</component>
@@ -43,15 +61,16 @@
</architectures>
<packages>
<package>
- <name>tez-0.10.2-1.tar.gz</name>
-
<checksum>SHA-256:ebc2c195780fc76fb9b12e8987e48c301ca0b1916dca515f5c7a1122d74f397e</checksum>
+ <name>hive-3.1.3-1.tar.gz</name>
+
<checksum>SHA-256:1118e8c485ccc52dbf06a54604659c2bdbd7b4d4ba366aa40c000585303fcbf9</checksum>
</package>
</packages>
</package-specific>
</package-specifics>
<required-services>
+ <service>mysql</service>
<service>hadoop</service>
</required-services>
</service>
-</metainfo>
+</metainfo>
\ No newline at end of file
diff --git
a/bigtop-manager-server/src/main/resources/stacks/bigtop/3.3.0/services/hive/order.json
b/bigtop-manager-server/src/main/resources/stacks/bigtop/3.3.0/services/hive/order.json
new file mode 100644
index 00000000..19452cf0
--- /dev/null
+++
b/bigtop-manager-server/src/main/resources/stacks/bigtop/3.3.0/services/hive/order.json
@@ -0,0 +1,22 @@
+{
+ "HIVE_METASTORE-START": [
+ "MYSQL_SERVER-START",
+ "NAMENODE-START",
+ "NODEMANAGER-START"
+ ],
+ "HIVE_METASTORE-RESTART": [
+ "MYSQL_SERVER-RESTART",
+ "NAMENODE-RESTART",
+ "NODEMANAGER-RESTART"
+ ],
+ "HIVESERVER2-START": [
+ "NODEMANAGER-START",
+ "ZOOKEEPER_SERVER-START",
+ "HIVE_METASTORE-START"
+ ],
+ "HIVESERVER2-RESTART": [
+ "NODEMANAGER-RESTART",
+ "ZOOKEEPER_SERVER-RESTART",
+ "HIVE_METASTORE-RESTART"
+ ]
+}
\ No newline at end of file
diff --git
a/bigtop-manager-server/src/main/resources/stacks/bigtop/3.3.0/services/tez/metainfo.xml
b/bigtop-manager-server/src/main/resources/stacks/bigtop/3.3.0/services/tez/metainfo.xml
index 509db831..d35a1973 100644
---
a/bigtop-manager-server/src/main/resources/stacks/bigtop/3.3.0/services/tez/metainfo.xml
+++
b/bigtop-manager-server/src/main/resources/stacks/bigtop/3.3.0/services/tez/metainfo.xml
@@ -51,7 +51,7 @@
</package-specifics>
<required-services>
- <service>hadoop</service>
+ <service>hive</service>
</required-services>
</service>
</metainfo>
diff --git
a/bigtop-manager-server/src/main/resources/stacks/infra/1.0.0/services/mysql/order.json
b/bigtop-manager-server/src/main/resources/stacks/infra/1.0.0/services/mysql/order.json
index 9e26dfee..2462a50a 100644
---
a/bigtop-manager-server/src/main/resources/stacks/infra/1.0.0/services/mysql/order.json
+++
b/bigtop-manager-server/src/main/resources/stacks/infra/1.0.0/services/mysql/order.json
@@ -1 +1,5 @@
-{}
\ No newline at end of file
+{
+ "MYSQL_SERVER-STOP": [
+ "HIVE_METASTORE-STOP"
+ ]
+}
\ No newline at end of file
diff --git
a/bigtop-manager-stack/bigtop-manager-stack-bigtop/src/main/java/org/apache/bigtop/manager/stack/bigtop/v3_3_0/hive/HiveClientScript.java
b/bigtop-manager-stack/bigtop-manager-stack-bigtop/src/main/java/org/apache/bigtop/manager/stack/bigtop/v3_3_0/hive/HiveClientScript.java
new file mode 100644
index 00000000..2456ddf0
--- /dev/null
+++
b/bigtop-manager-stack/bigtop-manager-stack-bigtop/src/main/java/org/apache/bigtop/manager/stack/bigtop/v3_3_0/hive/HiveClientScript.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.bigtop.manager.stack.bigtop.v3_3_0.hive;
+
+import org.apache.bigtop.manager.common.shell.ShellResult;
+import org.apache.bigtop.manager.stack.core.spi.param.Params;
+import org.apache.bigtop.manager.stack.core.spi.script.AbstractClientScript;
+import org.apache.bigtop.manager.stack.core.spi.script.Script;
+
+import com.google.auto.service.AutoService;
+import lombok.extern.slf4j.Slf4j;
+
+import java.util.Properties;
+
+@Slf4j
+@AutoService(Script.class)
+public class HiveClientScript extends AbstractClientScript {
+
+ @Override
+ public ShellResult add(Params params) {
+ Properties properties = new Properties();
+ properties.setProperty(PROPERTY_KEY_SKIP_LEVELS, "1");
+
+ return super.add(params, properties);
+ }
+
+ @Override
+ public ShellResult configure(Params params) {
+ return HiveSetup.configure(params);
+ }
+
+ @Override
+ public String getComponentName() {
+ return "hive_client";
+ }
+}
diff --git
a/bigtop-manager-stack/bigtop-manager-stack-bigtop/src/main/java/org/apache/bigtop/manager/stack/bigtop/v3_3_0/hive/HiveMetastoreScript.java
b/bigtop-manager-stack/bigtop-manager-stack-bigtop/src/main/java/org/apache/bigtop/manager/stack/bigtop/v3_3_0/hive/HiveMetastoreScript.java
new file mode 100644
index 00000000..181ab29a
--- /dev/null
+++
b/bigtop-manager-stack/bigtop-manager-stack-bigtop/src/main/java/org/apache/bigtop/manager/stack/bigtop/v3_3_0/hive/HiveMetastoreScript.java
@@ -0,0 +1,142 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.bigtop.manager.stack.bigtop.v3_3_0.hive;
+
+import org.apache.bigtop.manager.common.constants.Constants;
+import org.apache.bigtop.manager.common.constants.MessageConstants;
+import org.apache.bigtop.manager.common.message.entity.pojo.RepoInfo;
+import org.apache.bigtop.manager.common.shell.ShellResult;
+import org.apache.bigtop.manager.common.utils.os.OSDetection;
+import org.apache.bigtop.manager.stack.core.exception.StackException;
+import org.apache.bigtop.manager.stack.core.spi.param.Params;
+import org.apache.bigtop.manager.stack.core.spi.script.AbstractServerScript;
+import org.apache.bigtop.manager.stack.core.spi.script.Script;
+import org.apache.bigtop.manager.stack.core.tarball.TarballDownloader;
+import org.apache.bigtop.manager.stack.core.utils.LocalSettings;
+import org.apache.bigtop.manager.stack.core.utils.linux.LinuxFileUtils;
+import org.apache.bigtop.manager.stack.core.utils.linux.LinuxOSUtils;
+
+import com.google.auto.service.AutoService;
+import lombok.extern.slf4j.Slf4j;
+
+import java.io.IOException;
+import java.text.MessageFormat;
+import java.util.Properties;
+
+@Slf4j
+@AutoService(Script.class)
+public class HiveMetastoreScript extends AbstractServerScript {
+
+ @Override
+ public ShellResult add(Params params) {
+ Properties properties = new Properties();
+ properties.setProperty(PROPERTY_KEY_SKIP_LEVELS, "1");
+
+ ShellResult shellResult = super.add(params, properties);
+
+ // Download mysql jdbc driver
+ RepoInfo repoInfo = LocalSettings.repos().stream()
+ .filter(r -> OSDetection.getArch().equals(r.getArch()) &&
r.getType() == 2)
+ .findFirst()
+ .orElseThrow(() -> new RuntimeException("Cannot find repo for
os: [" + OSDetection.getOS()
+ + "] and arch: [" + OSDetection.getArch() + "]"));
+ String mysqlDriver = repoInfo.getBaseUrl() +
"/mysql-connector-j-8.0.33.jar";
+ TarballDownloader.download(mysqlDriver, params.stackHome());
+ LinuxFileUtils.moveFile(params.stackHome() +
"/mysql-connector-j-8.0.33.jar", params.serviceHome() + "/lib/");
+ LinuxFileUtils.updateOwner(params.serviceHome() + "/lib",
params.user(), params.group(), true);
+ LinuxFileUtils.updatePermissions(params.serviceHome() + "/lib",
Constants.PERMISSION_755, true);
+
+ return shellResult;
+ }
+
+ @Override
+ public ShellResult configure(Params params) {
+ return HiveSetup.configure(params);
+ }
+
+ @Override
+ public ShellResult start(Params params) {
+ configure(params);
+ HiveParams hiveParams = (HiveParams) params;
+ try {
+ initSchema(params);
+ String cmd = MessageFormat.format(
+ "{0}/hive-service.sh metastore " +
hiveParams.getHiveMetastorePidFile(),
+ hiveParams.serviceHome() + "/bin");
+ ShellResult shellResult = LinuxOSUtils.sudoExecCmd(cmd,
hiveParams.user());
+ if (shellResult.getExitCode() != 0) {
+ throw new StackException("Failed to start HiveMetastore: {0}",
shellResult.getErrMsg());
+ }
+ long startTime = System.currentTimeMillis();
+ long maxWaitTime = 5000;
+ long pollInterval = 500;
+
+ while (System.currentTimeMillis() - startTime < maxWaitTime) {
+ ShellResult statusResult = status(params);
+ if (statusResult.getExitCode() == 0) {
+ return statusResult;
+ }
+ Thread.sleep(pollInterval);
+ }
+ return status(params);
+ } catch (Exception e) {
+ throw new StackException(e);
+ }
+ }
+
+ @Override
+ public ShellResult stop(Params params) {
+ HiveParams hiveParams = (HiveParams) params;
+ int pid = Integer.parseInt(
+
LinuxFileUtils.readFile(hiveParams.getHiveMetastorePidFile()).replaceAll("\r|\n",
""));
+ String cmd = "kill -9 " + pid;
+ try {
+ return LinuxOSUtils.sudoExecCmd(cmd, hiveParams.user());
+ } catch (IOException e) {
+ throw new StackException(e);
+ }
+ }
+
+ @Override
+ public ShellResult status(Params params) {
+ HiveParams hiveParams = (HiveParams) params;
+ return LinuxOSUtils.checkProcess(hiveParams.getHiveMetastorePidFile());
+ }
+
+ private void initSchema(Params params) throws Exception {
+ HiveParams hiveParams = (HiveParams) params;
+ String cmd = hiveParams.serviceHome() + "/bin/schematool -validate
-dbType mysql";
+ ShellResult shellResult = LinuxOSUtils.sudoExecCmd(cmd,
hiveParams.user());
+ String clusterName = LocalSettings.cluster().getName();
+ if (shellResult.getExitCode() != MessageConstants.SUCCESS_CODE
+ && shellResult.getErrMsg().contains("Table '" + clusterName +
"_hive.VERSION' doesn't exist")) {
+ // init schema
+ cmd = hiveParams.serviceHome() + "/bin/schematool -initSchema
-dbType mysql";
+ shellResult = LinuxOSUtils.sudoExecCmd(cmd, hiveParams.user());
+ if (shellResult.getExitCode() != MessageConstants.SUCCESS_CODE) {
+ throw new StackException(shellResult.getErrMsg());
+ }
+ }
+ }
+
+ @Override
+ public String getComponentName() {
+ return "hive_metastore";
+ }
+}
diff --git
a/bigtop-manager-stack/bigtop-manager-stack-bigtop/src/main/java/org/apache/bigtop/manager/stack/bigtop/v3_3_0/hive/HiveParams.java
b/bigtop-manager-stack/bigtop-manager-stack-bigtop/src/main/java/org/apache/bigtop/manager/stack/bigtop/v3_3_0/hive/HiveParams.java
new file mode 100644
index 00000000..68abf5b2
--- /dev/null
+++
b/bigtop-manager-stack/bigtop-manager-stack-bigtop/src/main/java/org/apache/bigtop/manager/stack/bigtop/v3_3_0/hive/HiveParams.java
@@ -0,0 +1,162 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.bigtop.manager.stack.bigtop.v3_3_0.hive;
+
+import org.apache.bigtop.manager.common.message.entity.payload.CommandPayload;
+import org.apache.bigtop.manager.stack.bigtop.param.BigtopParams;
+import org.apache.bigtop.manager.stack.core.annotations.GlobalParams;
+import org.apache.bigtop.manager.stack.core.spi.param.Params;
+import org.apache.bigtop.manager.stack.core.utils.LocalSettings;
+
+import com.google.auto.service.AutoService;
+import lombok.Getter;
+import lombok.NoArgsConstructor;
+import lombok.extern.slf4j.Slf4j;
+
+import java.util.List;
+import java.util.Map;
+
+@Getter
+@Slf4j
+@AutoService(Params.class)
+@NoArgsConstructor
+public class HiveParams extends BigtopParams {
+
+ private String hiveLogDir = "/var/log/hive";
+ private String hivePidDir = "/var/run/hive";
+
+ private String hiveserver2PidFile;
+ private String hiveMetastorePidFile;
+
+ private Integer metastorePort;
+ private String hiveEnvContent;
+ private String hiveLog4j2Content;
+ private String beelineLog4j2Content;
+ private String hiveExecLog4j2Content;
+ private String llapCliLog4j2Content;
+ private String llapDaemonLog4j2Content;
+
+ private final String hiveShellContent =
+ "dir=$(dirname $0)\n$dir/hive --service $1 > /dev/null 2>&1
&\necho $! > $2";
+
+ public HiveParams(CommandPayload commandPayload) {
+ super(commandPayload);
+ globalParamsMap.put("java_home", javaHome());
+ globalParamsMap.put("hadoop_home", hadoopHome());
+ globalParamsMap.put("hive_home", serviceHome());
+ globalParamsMap.put("hive_conf_dir", confDir());
+ globalParamsMap.put("security_enabled", false);
+ globalParamsMap.put("hive_user", user());
+ globalParamsMap.put("hive_group", group());
+
+ hiveserver2PidFile = hivePidDir + "/hiveserver2.pid";
+ hiveMetastorePidFile = hivePidDir + "/hive-metastore.pid";
+ }
+
+ public String hiveLimits() {
+ Map<String, Object> hiveConf =
LocalSettings.configurations(getServiceName(), "hive.conf");
+ return (String) hiveConf.get("content");
+ }
+
+ @GlobalParams
+ public Map<String, Object> hiveSite() {
+ Map<String, Object> configurations =
LocalSettings.configurations(getServiceName(), "hive-site");
+ String metastoreUris =
configurations.get("hive.metastore.uris").toString();
+
+ String[] split = metastoreUris.split(":");
+ metastorePort = Integer.parseInt(split[split.length - 1]);
+ globalParamsMap.put("hive_metastore_port", metastorePort);
+
+ // Auto generate zookeeper properties for hive-site.xml
+ Map<String, Object> zooCfg = LocalSettings.configurations("zookeeper",
"zoo.cfg");
+ List<String> zookeeperQuorum = LocalSettings.hosts("zookeeper_server");
+
+ configurations.put("hive.zookeeper.client.port",
zooCfg.get("clientPort"));
+ configurations.put("hive.zookeeper.quorum", String.join(",",
zookeeperQuorum));
+
+ // Auto generate database properties for hive-site.xml
+ String mysqlHost = LocalSettings.hosts("mysql_server").get(0);
+ String mysqlPassword = LocalSettings.configurations("mysql", "common")
+ .get("root_password")
+ .toString();
+ String clusterName = LocalSettings.cluster().getName();
+ configurations.put("hive.metastore.db.type", "mysql");
+ configurations.put(
+ "javax.jdo.option.ConnectionURL",
+ "jdbc:mysql://" + mysqlHost + ":3306/" + clusterName
+ +
"_hive?createDatabaseIfNotExist=true&useSSL=false&allowPublicKeyRetrieval=true");
+ configurations.put("javax.jdo.option.ConnectionDriverName",
"com.mysql.cj.jdbc.Driver");
+ configurations.put("javax.jdo.option.ConnectionUserName", "root");
+ configurations.put("javax.jdo.option.ConnectionPassword",
mysqlPassword);
+ return configurations;
+ }
+
+ @GlobalParams
+ public Map<String, Object> hiveEnv() {
+ Map<String, Object> hiveEnv =
LocalSettings.configurations(getServiceName(), "hive-env");
+ hivePidDir = (String) hiveEnv.get("hive_pid_dir");
+ hiveLogDir = (String) hiveEnv.get("hive_log_dir");
+ hiveEnvContent = (String) hiveEnv.get("content");
+ return hiveEnv;
+ }
+
+ @GlobalParams
+ public Map<String, Object> hiveLog4j2() {
+ Map<String, Object> configurations =
LocalSettings.configurations(getServiceName(), "hive-log4j2");
+ hiveLog4j2Content = (String) configurations.get("content");
+ return configurations;
+ }
+
+ @GlobalParams
+ public Map<String, Object> beelineLog4j2() {
+ Map<String, Object> configurations =
LocalSettings.configurations(getServiceName(), "beeline-log4j2");
+ beelineLog4j2Content = (String) configurations.get("content");
+ return configurations;
+ }
+
+ @GlobalParams
+ public Map<String, Object> hiveExecLog4j2() {
+ Map<String, Object> configurations =
LocalSettings.configurations(getServiceName(), "hive-exec-log4j2");
+ hiveExecLog4j2Content = (String) configurations.get("content");
+ return configurations;
+ }
+
+ @GlobalParams
+ public Map<String, Object> llapCliLog4j2() {
+ Map<String, Object> configurations =
LocalSettings.configurations(getServiceName(), "llap-cli-log4j2");
+ llapCliLog4j2Content = (String) configurations.get("content");
+ return configurations;
+ }
+
+ @GlobalParams
+ public Map<String, Object> llapDaemonLog4j2() {
+ Map<String, Object> configurations =
LocalSettings.configurations(getServiceName(), "llap-daemon-log4j2");
+ llapDaemonLog4j2Content = (String) configurations.get("content");
+ return configurations;
+ }
+
+ public String hadoopHome() {
+ return stackHome() + "/hadoop";
+ }
+
+ @Override
+ public String getServiceName() {
+ return "hive";
+ }
+}
diff --git
a/bigtop-manager-stack/bigtop-manager-stack-bigtop/src/main/java/org/apache/bigtop/manager/stack/bigtop/v3_3_0/hive/HiveServer2Script.java
b/bigtop-manager-stack/bigtop-manager-stack-bigtop/src/main/java/org/apache/bigtop/manager/stack/bigtop/v3_3_0/hive/HiveServer2Script.java
new file mode 100644
index 00000000..20b65b4e
--- /dev/null
+++
b/bigtop-manager-stack/bigtop-manager-stack-bigtop/src/main/java/org/apache/bigtop/manager/stack/bigtop/v3_3_0/hive/HiveServer2Script.java
@@ -0,0 +1,105 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.bigtop.manager.stack.bigtop.v3_3_0.hive;
+
+import org.apache.bigtop.manager.common.shell.ShellResult;
+import org.apache.bigtop.manager.stack.core.exception.StackException;
+import org.apache.bigtop.manager.stack.core.spi.param.Params;
+import org.apache.bigtop.manager.stack.core.spi.script.AbstractServerScript;
+import org.apache.bigtop.manager.stack.core.spi.script.Script;
+import org.apache.bigtop.manager.stack.core.utils.linux.LinuxFileUtils;
+import org.apache.bigtop.manager.stack.core.utils.linux.LinuxOSUtils;
+
+import com.google.auto.service.AutoService;
+import lombok.extern.slf4j.Slf4j;
+
+import java.io.IOException;
+import java.text.MessageFormat;
+import java.util.Properties;
+
+@Slf4j
+@AutoService(Script.class)
+public class HiveServer2Script extends AbstractServerScript {
+
+ @Override
+ public ShellResult add(Params params) {
+ Properties properties = new Properties();
+ properties.setProperty(PROPERTY_KEY_SKIP_LEVELS, "1");
+
+ return super.add(params, properties);
+ }
+
+ @Override
+ public ShellResult configure(Params params) {
+ return HiveSetup.configure(params);
+ }
+
+ @Override
+ public ShellResult start(Params params) {
+ configure(params);
+ HiveParams hiveParams = (HiveParams) params;
+ String cmd = MessageFormat.format(
+ "{0}/hive-service.sh hiveserver2 " +
hiveParams.getHiveserver2PidFile(),
+ hiveParams.serviceHome() + "/bin");
+ try {
+ ShellResult shellResult = LinuxOSUtils.sudoExecCmd(cmd,
hiveParams.user());
+ if (shellResult.getExitCode() != 0) {
+ throw new StackException("Failed to start HiveServer2: {0}",
shellResult.getErrMsg());
+ }
+ long startTime = System.currentTimeMillis();
+ long maxWaitTime = 5000;
+ long pollInterval = 500;
+
+ while (System.currentTimeMillis() - startTime < maxWaitTime) {
+ ShellResult statusResult = status(params);
+ if (statusResult.getExitCode() == 0) {
+ return statusResult;
+ }
+ Thread.sleep(pollInterval);
+ }
+ return status(params);
+ } catch (Exception e) {
+ throw new StackException(e);
+ }
+ }
+
+ @Override
+ public ShellResult stop(Params params) {
+ HiveParams hiveParams = (HiveParams) params;
+ int pid = Integer.parseInt(
+
LinuxFileUtils.readFile(hiveParams.getHiveserver2PidFile()).replaceAll("\r|\n",
""));
+ String cmd = "kill -9 " + pid;
+ try {
+ return LinuxOSUtils.sudoExecCmd(cmd, hiveParams.user());
+ } catch (IOException e) {
+ throw new StackException(e);
+ }
+ }
+
+ @Override
+ public ShellResult status(Params params) {
+ HiveParams hiveParams = (HiveParams) params;
+ return LinuxOSUtils.checkProcess(hiveParams.getHiveserver2PidFile());
+ }
+
+ @Override
+ public String getComponentName() {
+ return "hiveserver2";
+ }
+}
diff --git
a/bigtop-manager-stack/bigtop-manager-stack-bigtop/src/main/java/org/apache/bigtop/manager/stack/bigtop/v3_3_0/hive/HiveSetup.java
b/bigtop-manager-stack/bigtop-manager-stack-bigtop/src/main/java/org/apache/bigtop/manager/stack/bigtop/v3_3_0/hive/HiveSetup.java
new file mode 100644
index 00000000..6ba97337
--- /dev/null
+++
b/bigtop-manager-stack/bigtop-manager-stack-bigtop/src/main/java/org/apache/bigtop/manager/stack/bigtop/v3_3_0/hive/HiveSetup.java
@@ -0,0 +1,126 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.bigtop.manager.stack.bigtop.v3_3_0.hive;
+
+import org.apache.bigtop.manager.common.constants.Constants;
+import org.apache.bigtop.manager.common.shell.ShellResult;
+import org.apache.bigtop.manager.stack.bigtop.v3_3_0.hadoop.HadoopParams;
+import org.apache.bigtop.manager.stack.core.enums.ConfigType;
+import org.apache.bigtop.manager.stack.core.spi.param.Params;
+import org.apache.bigtop.manager.stack.core.utils.linux.LinuxFileUtils;
+
+import lombok.AccessLevel;
+import lombok.NoArgsConstructor;
+import lombok.extern.slf4j.Slf4j;
+
+import java.text.MessageFormat;
+
+import static
org.apache.bigtop.manager.common.constants.Constants.PERMISSION_755;
+
+@Slf4j
+@NoArgsConstructor(access = AccessLevel.PRIVATE)
+public class HiveSetup {
+
+ public static ShellResult configure(Params params) {
+ log.info("Configuring Hive");
+ HiveParams hiveParams = (HiveParams) params;
+
+ String confDir = hiveParams.confDir();
+ String hiveUser = hiveParams.user();
+ String hiveGroup = hiveParams.group();
+
+ LinuxFileUtils.createDirectories(hiveParams.getHiveLogDir(), hiveUser,
hiveGroup, PERMISSION_755, true);
+ LinuxFileUtils.createDirectories(hiveParams.getHivePidDir(), hiveUser,
hiveGroup, PERMISSION_755, true);
+
+ LinuxFileUtils.toFile(
+ ConfigType.CONTENT,
+ MessageFormat.format("{0}/hive-service.sh",
hiveParams.serviceHome() + "/bin"),
+ hiveUser,
+ hiveGroup,
+ Constants.PERMISSION_755,
+ hiveParams.getHiveShellContent());
+
+ LinuxFileUtils.toFileByTemplate(
+ hiveParams.hiveLimits(),
+ MessageFormat.format("{0}/hive.conf",
HadoopParams.LIMITS_CONF_DIR),
+ Constants.ROOT_USER,
+ Constants.ROOT_USER,
+ Constants.PERMISSION_644,
+ hiveParams.getGlobalParamsMap());
+
+ LinuxFileUtils.toFileByTemplate(
+ hiveParams.getHiveEnvContent(),
+ MessageFormat.format("{0}/hive-env.sh", confDir),
+ hiveUser,
+ hiveGroup,
+ Constants.PERMISSION_644,
+ hiveParams.getGlobalParamsMap());
+
+ LinuxFileUtils.toFile(
+ ConfigType.XML,
+ MessageFormat.format("{0}/hive-site.xml", confDir),
+ hiveUser,
+ hiveGroup,
+ Constants.PERMISSION_644,
+ hiveParams.hiveSite());
+
+ LinuxFileUtils.toFileByTemplate(
+ hiveParams.getHiveLog4j2Content(),
+ MessageFormat.format("{0}/hive-log4j2.properties", confDir),
+ hiveUser,
+ hiveGroup,
+ Constants.PERMISSION_644,
+ hiveParams.getGlobalParamsMap());
+
+ LinuxFileUtils.toFileByTemplate(
+ hiveParams.getBeelineLog4j2Content(),
+ MessageFormat.format("{0}/beeline-log4j2.properties", confDir),
+ hiveUser,
+ hiveGroup,
+ Constants.PERMISSION_644,
+ hiveParams.getGlobalParamsMap());
+
+ LinuxFileUtils.toFileByTemplate(
+ hiveParams.getHiveExecLog4j2Content(),
+ MessageFormat.format("{0}/hive-exec-log4j2.properties",
confDir),
+ hiveUser,
+ hiveGroup,
+ Constants.PERMISSION_644,
+ hiveParams.getGlobalParamsMap());
+
+ LinuxFileUtils.toFileByTemplate(
+ hiveParams.getLlapCliLog4j2Content(),
+ MessageFormat.format("{0}/llap-cli-log4j2.properties",
confDir),
+ hiveUser,
+ hiveGroup,
+ Constants.PERMISSION_644,
+ hiveParams.getGlobalParamsMap());
+
+ LinuxFileUtils.toFileByTemplate(
+ hiveParams.getLlapDaemonLog4j2Content(),
+ MessageFormat.format("{0}/llap-daemon-log4j2.properties",
confDir),
+ hiveUser,
+ hiveGroup,
+ Constants.PERMISSION_644,
+ hiveParams.getGlobalParamsMap());
+
+ log.info("Successfully configured Hive");
+ return ShellResult.success();
+ }
+}
diff --git
a/bigtop-manager-stack/bigtop-manager-stack-core/src/main/java/org/apache/bigtop/manager/stack/core/tarball/TarballDownloader.java
b/bigtop-manager-stack/bigtop-manager-stack-core/src/main/java/org/apache/bigtop/manager/stack/core/tarball/TarballDownloader.java
index 363ce53e..4bf31665 100644
---
a/bigtop-manager-stack/bigtop-manager-stack-core/src/main/java/org/apache/bigtop/manager/stack/core/tarball/TarballDownloader.java
+++
b/bigtop-manager-stack/bigtop-manager-stack-core/src/main/java/org/apache/bigtop/manager/stack/core/tarball/TarballDownloader.java
@@ -61,7 +61,7 @@ public class TarballDownloader {
log.info("Checksum validate successfully for [{}]",
localFile.getAbsolutePath());
}
- private static void download(String remoteUrl, String saveDir) {
+ public static void download(String remoteUrl, String saveDir) {
int i = 1;
while (true) {
Boolean downloaded = downloadFile(remoteUrl, saveDir);
diff --git
a/bigtop-manager-stack/bigtop-manager-stack-core/src/main/java/org/apache/bigtop/manager/stack/core/utils/linux/LinuxFileUtils.java
b/bigtop-manager-stack/bigtop-manager-stack-core/src/main/java/org/apache/bigtop/manager/stack/core/utils/linux/LinuxFileUtils.java
index c0912fc9..87070926 100644
---
a/bigtop-manager-stack/bigtop-manager-stack-core/src/main/java/org/apache/bigtop/manager/stack/core/utils/linux/LinuxFileUtils.java
+++
b/bigtop-manager-stack/bigtop-manager-stack-core/src/main/java/org/apache/bigtop/manager/stack/core/utils/linux/LinuxFileUtils.java
@@ -275,6 +275,29 @@ public class LinuxFileUtils {
}
}
+ public static String writeFile(String source, String content) {
+ if (StringUtils.isBlank(source)) {
+ throw new StackException("source must not be empty");
+ }
+
+ List<String> builderParameters = new ArrayList<>();
+ builderParameters.add("echo");
+ builderParameters.add(content);
+ builderParameters.add(">");
+ builderParameters.add(source);
+
+ try {
+ ShellResult shellResult = sudoExecCmd(builderParameters);
+ if (shellResult.getExitCode() != MessageConstants.SUCCESS_CODE) {
+ throw new StackException(shellResult.getErrMsg());
+ }
+
+ return shellResult.getOutput();
+ } catch (IOException e) {
+ throw new StackException(e);
+ }
+ }
+
/**
* create symbolic link
*