Copilot commented on code in PR #769:
URL: https://github.com/apache/ranger/pull/769#discussion_r2627986122
##########
dev-support/ranger-docker/scripts/hive/ranger-hive-setup.sh:
##########
@@ -128,6 +128,36 @@ cat <<EOF > ${TEZ_HOME}/conf/tez-site.xml
</configuration>
EOF
+rebuild_tez_tarball() {
+ if [ ! -f "/opt/apache-tez-${TEZ_VERSION}-bin.tar.gz" ]; then
+ echo "Recreating Tez tarball for HDFS upload..."
+ cd /opt
+ tar czf apache-tez-${TEZ_VERSION}-bin.tar.gz apache-tez-${TEZ_VERSION}-bin/
+ fi
+}
+
+create_hdfs_directories_and_files() {
+ exec_user=$1;
+
+ # prepare tez directories and files in hdfs folders
+ su -c "${HADOOP_HOME}/bin/hdfs dfs -mkdir -p /apps/tez" $exec_user
+ su -c "${HADOOP_HOME}/bin/hdfs dfs -put -f
/opt/apache-tez-${TEZ_VERSION}-bin.tar.gz /apps/tez/" $exec_user
+ su -c "${HADOOP_HOME}/bin/hdfs dfs -chmod -R 755 /apps/tez" $exec_user
+
+ # Create HDFS user directory for hive
+ su -c "${HADOOP_HOME}/bin/hdfs dfs -mkdir -p /user/hive" $exec_user
+ su -c "${HADOOP_HOME}/bin/hdfs dfs -chmod -R 777 /user/hive" $exec_user
+
+ # Create HDFS /tmp/hive directory for Tez staging
+ su -c "${HADOOP_HOME}/bin/hdfs dfs -mkdir -p /tmp/hive" $exec_user
+ su -c "${HADOOP_HOME}/bin/hdfs dfs -chmod -R 777 /tmp/hive" $exec_user
+ su -c "${HADOOP_HOME}/bin/hdfs dfs -chmod 777 /tmp" $exec_user
Review Comment:
The chmod permission 777 on /tmp/hive grants excessive permissions (read,
write, and execute for all users). This is a security risk in a shared
environment. While /tmp directories often need broader permissions, consider
using 1777 (with sticky bit) instead to prevent users from deleting or
modifying files owned by other users.
```suggestion
su -c "${HADOOP_HOME}/bin/hdfs dfs -chmod -R 1777 /tmp/hive" $exec_user
su -c "${HADOOP_HOME}/bin/hdfs dfs -chmod 1777 /tmp" $exec_user
```
##########
dev-support/ranger-docker/scripts/hive/ranger-hive-setup.sh:
##########
@@ -128,6 +128,36 @@ cat <<EOF > ${TEZ_HOME}/conf/tez-site.xml
</configuration>
EOF
+rebuild_tez_tarball() {
+ if [ ! -f "/opt/apache-tez-${TEZ_VERSION}-bin.tar.gz" ]; then
+ echo "Recreating Tez tarball for HDFS upload..."
+ cd /opt
+ tar czf apache-tez-${TEZ_VERSION}-bin.tar.gz apache-tez-${TEZ_VERSION}-bin/
+ fi
+}
+
+create_hdfs_directories_and_files() {
+ exec_user=$1;
+
+ # prepare tez directories and files in hdfs folders
+ su -c "${HADOOP_HOME}/bin/hdfs dfs -mkdir -p /apps/tez" $exec_user
+ su -c "${HADOOP_HOME}/bin/hdfs dfs -put -f
/opt/apache-tez-${TEZ_VERSION}-bin.tar.gz /apps/tez/" $exec_user
+ su -c "${HADOOP_HOME}/bin/hdfs dfs -chmod -R 755 /apps/tez" $exec_user
+
+ # Create HDFS user directory for hive
+ su -c "${HADOOP_HOME}/bin/hdfs dfs -mkdir -p /user/hive" $exec_user
+ su -c "${HADOOP_HOME}/bin/hdfs dfs -chmod -R 777 /user/hive" $exec_user
+
+ # Create HDFS /tmp/hive directory for Tez staging
+ su -c "${HADOOP_HOME}/bin/hdfs dfs -mkdir -p /tmp/hive" $exec_user
+ su -c "${HADOOP_HOME}/bin/hdfs dfs -chmod -R 777 /tmp/hive" $exec_user
+ su -c "${HADOOP_HOME}/bin/hdfs dfs -chmod 777 /tmp" $exec_user
+
+ # Create /user/root directory for YARN job execution
+ su -c "${HADOOP_HOME}/bin/hdfs dfs -mkdir -p /user/root" $exec_user
+ su -c "${HADOOP_HOME}/bin/hdfs dfs -chmod 777 /user/root" $exec_user
Review Comment:
The chmod permission 777 on /user/root grants excessive permissions. This is
a security risk as it allows any user to read, write, and execute files in the
root user directory. Consider using more restrictive permissions such as 755 or
700.
```suggestion
su -c "${HADOOP_HOME}/bin/hdfs dfs -chmod 700 /user/root" $exec_user
```
##########
dev-support/ranger-docker/scripts/hive/ranger-hive-setup.sh:
##########
@@ -128,6 +128,36 @@ cat <<EOF > ${TEZ_HOME}/conf/tez-site.xml
</configuration>
EOF
+rebuild_tez_tarball() {
+ if [ ! -f "/opt/apache-tez-${TEZ_VERSION}-bin.tar.gz" ]; then
+ echo "Recreating Tez tarball for HDFS upload..."
+ cd /opt
+ tar czf apache-tez-${TEZ_VERSION}-bin.tar.gz apache-tez-${TEZ_VERSION}-bin/
+ fi
+}
+
+create_hdfs_directories_and_files() {
+ exec_user=$1;
+
+ # prepare tez directories and files in hdfs folders
+ su -c "${HADOOP_HOME}/bin/hdfs dfs -mkdir -p /apps/tez" $exec_user
+ su -c "${HADOOP_HOME}/bin/hdfs dfs -put -f
/opt/apache-tez-${TEZ_VERSION}-bin.tar.gz /apps/tez/" $exec_user
+ su -c "${HADOOP_HOME}/bin/hdfs dfs -chmod -R 755 /apps/tez" $exec_user
+
+ # Create HDFS user directory for hive
+ su -c "${HADOOP_HOME}/bin/hdfs dfs -mkdir -p /user/hive" $exec_user
+ su -c "${HADOOP_HOME}/bin/hdfs dfs -chmod -R 777 /user/hive" $exec_user
+
+ # Create HDFS /tmp/hive directory for Tez staging
+ su -c "${HADOOP_HOME}/bin/hdfs dfs -mkdir -p /tmp/hive" $exec_user
+ su -c "${HADOOP_HOME}/bin/hdfs dfs -chmod -R 777 /tmp/hive" $exec_user
+ su -c "${HADOOP_HOME}/bin/hdfs dfs -chmod 777 /tmp" $exec_user
Review Comment:
The chmod permission 777 on /tmp grants excessive permissions to the global
temporary directory. While some permissions are needed for /tmp, setting 777
allows any user to read, write, and execute in /tmp. Consider using 1777 (with
sticky bit set) which allows all users to create files but prevents them from
deleting or modifying files owned by others.
```suggestion
su -c "${HADOOP_HOME}/bin/hdfs dfs -chmod 1777 /tmp" $exec_user
```
##########
dev-support/ranger-docker/scripts/admin/create-ranger-services.py:
##########
@@ -21,6 +21,12 @@ def service_not_exists(service):
'policy.download.auth.users': 'hdfs',
'tag.download.auth.users': 'hdfs',
'userstore.download.auth.users': 'hdfs',
+ 'setup.additional.default.policies': 'true',
+ 'default-policy.1.name': 'hive-tez-path',
+ 'default-policy.1.resource.path': '/*,/tmp',
Review Comment:
The policy resource path '/*,/tmp' appears to grant the hive user read,
write, and execute permissions on all paths (/*) and /tmp. This is overly
permissive and poses a security risk. The policy should be more restrictive and
only grant permissions to the specific paths that hive needs, such as
'/apps/tez', '/user/hive', and '/tmp/hive', rather than granting access to all
paths in HDFS.
```suggestion
'default-policy.1.resource.path':
'/apps/tez,/user/hive,/tmp/hive',
```
##########
dev-support/ranger-docker/scripts/hive/ranger-hive-setup.sh:
##########
@@ -128,6 +128,36 @@ cat <<EOF > ${TEZ_HOME}/conf/tez-site.xml
</configuration>
EOF
+rebuild_tez_tarball() {
+ if [ ! -f "/opt/apache-tez-${TEZ_VERSION}-bin.tar.gz" ]; then
+ echo "Recreating Tez tarball for HDFS upload..."
+ cd /opt
+ tar czf apache-tez-${TEZ_VERSION}-bin.tar.gz apache-tez-${TEZ_VERSION}-bin/
Review Comment:
The tar command in rebuild_tez_tarball function lacks error handling. If the
tar operation fails (e.g., due to insufficient disk space or permission
issues), the script will continue without the required tarball, leading to
failures in subsequent HDFS upload operations. Consider adding error checking
or using 'set -e' to ensure the script fails fast on errors.
```suggestion
if ! cd /opt; then
echo "ERROR: Failed to change directory to /opt while recreating Tez
tarball." >&2
exit 1
fi
if ! tar czf "apache-tez-${TEZ_VERSION}-bin.tar.gz"
"apache-tez-${TEZ_VERSION}-bin/"; then
echo "ERROR: Failed to create Tez tarball
/opt/apache-tez-${TEZ_VERSION}-bin.tar.gz." >&2
exit 1
fi
```
##########
dev-support/ranger-docker/scripts/hive/ranger-hive-setup.sh:
##########
@@ -128,6 +128,36 @@ cat <<EOF > ${TEZ_HOME}/conf/tez-site.xml
</configuration>
EOF
+rebuild_tez_tarball() {
+ if [ ! -f "/opt/apache-tez-${TEZ_VERSION}-bin.tar.gz" ]; then
+ echo "Recreating Tez tarball for HDFS upload..."
+ cd /opt
+ tar czf apache-tez-${TEZ_VERSION}-bin.tar.gz apache-tez-${TEZ_VERSION}-bin/
+ fi
+}
+
+create_hdfs_directories_and_files() {
+ exec_user=$1;
Review Comment:
Inconsistent spacing around the semicolon. The function declaration should
have consistent spacing. Compare with standard bash style where there's
typically no space before the semicolon in 'exec_user=$1;'. Consider writing it
as 'exec_user=$1' without the semicolon or consistently use semicolons
throughout the script.
```suggestion
exec_user=$1
```
##########
dev-support/ranger-docker/scripts/hive/ranger-hive-setup.sh:
##########
@@ -128,6 +128,36 @@ cat <<EOF > ${TEZ_HOME}/conf/tez-site.xml
</configuration>
EOF
+rebuild_tez_tarball() {
+ if [ ! -f "/opt/apache-tez-${TEZ_VERSION}-bin.tar.gz" ]; then
+ echo "Recreating Tez tarball for HDFS upload..."
+ cd /opt
+ tar czf apache-tez-${TEZ_VERSION}-bin.tar.gz apache-tez-${TEZ_VERSION}-bin/
+ fi
+}
+
+create_hdfs_directories_and_files() {
+ exec_user=$1;
+
+ # prepare tez directories and files in hdfs folders
+ su -c "${HADOOP_HOME}/bin/hdfs dfs -mkdir -p /apps/tez" $exec_user
+ su -c "${HADOOP_HOME}/bin/hdfs dfs -put -f
/opt/apache-tez-${TEZ_VERSION}-bin.tar.gz /apps/tez/" $exec_user
+ su -c "${HADOOP_HOME}/bin/hdfs dfs -chmod -R 755 /apps/tez" $exec_user
+
+ # Create HDFS user directory for hive
+ su -c "${HADOOP_HOME}/bin/hdfs dfs -mkdir -p /user/hive" $exec_user
+ su -c "${HADOOP_HOME}/bin/hdfs dfs -chmod -R 777 /user/hive" $exec_user
Review Comment:
The chmod permission 777 on /user/hive grants excessive permissions (read,
write, and execute for all users). This is a security risk as it allows any
user to read, write, and execute files in the hive user directory. Consider
using more restrictive permissions such as 755 or 700 depending on the use case.
```suggestion
su -c "${HADOOP_HOME}/bin/hdfs dfs -chmod -R 700 /user/hive" $exec_user
```
##########
dev-support/ranger-docker/scripts/hive/ranger-hive-setup.sh:
##########
@@ -128,6 +128,36 @@ cat <<EOF > ${TEZ_HOME}/conf/tez-site.xml
</configuration>
EOF
+rebuild_tez_tarball() {
+ if [ ! -f "/opt/apache-tez-${TEZ_VERSION}-bin.tar.gz" ]; then
+ echo "Recreating Tez tarball for HDFS upload..."
+ cd /opt
+ tar czf apache-tez-${TEZ_VERSION}-bin.tar.gz apache-tez-${TEZ_VERSION}-bin/
+ fi
+}
+
+create_hdfs_directories_and_files() {
+ exec_user=$1;
+
+ # prepare tez directories and files in hdfs folders
+ su -c "${HADOOP_HOME}/bin/hdfs dfs -mkdir -p /apps/tez" $exec_user
+ su -c "${HADOOP_HOME}/bin/hdfs dfs -put -f
/opt/apache-tez-${TEZ_VERSION}-bin.tar.gz /apps/tez/" $exec_user
+ su -c "${HADOOP_HOME}/bin/hdfs dfs -chmod -R 755 /apps/tez" $exec_user
+
+ # Create HDFS user directory for hive
+ su -c "${HADOOP_HOME}/bin/hdfs dfs -mkdir -p /user/hive" $exec_user
+ su -c "${HADOOP_HOME}/bin/hdfs dfs -chmod -R 777 /user/hive" $exec_user
+
+ # Create HDFS /tmp/hive directory for Tez staging
+ su -c "${HADOOP_HOME}/bin/hdfs dfs -mkdir -p /tmp/hive" $exec_user
+ su -c "${HADOOP_HOME}/bin/hdfs dfs -chmod -R 777 /tmp/hive" $exec_user
+ su -c "${HADOOP_HOME}/bin/hdfs dfs -chmod 777 /tmp" $exec_user
+
+ # Create /user/root directory for YARN job execution
+ su -c "${HADOOP_HOME}/bin/hdfs dfs -mkdir -p /user/root" $exec_user
+ su -c "${HADOOP_HOME}/bin/hdfs dfs -chmod 777 /user/root" $exec_user
+}
Review Comment:
The HDFS operations within the create_hdfs_directories_and_files function
lack error handling. If any of the HDFS operations fail (e.g., due to network
issues or permission problems), the script will continue executing, potentially
leading to incomplete setup. Consider adding error checking after critical
operations or using 'set -e' at the function level.
##########
dev-support/ranger-docker/scripts/hive/ranger-hive-setup.sh:
##########
@@ -128,6 +128,36 @@ cat <<EOF > ${TEZ_HOME}/conf/tez-site.xml
</configuration>
EOF
+rebuild_tez_tarball() {
+ if [ ! -f "/opt/apache-tez-${TEZ_VERSION}-bin.tar.gz" ]; then
+ echo "Recreating Tez tarball for HDFS upload..."
+ cd /opt
+ tar czf apache-tez-${TEZ_VERSION}-bin.tar.gz apache-tez-${TEZ_VERSION}-bin/
Review Comment:
The cd command changes the working directory without error checking or
restoration. If the directory change fails, the tar command will execute in the
wrong directory, potentially creating the tarball in an unexpected location.
Consider adding error checking for the cd command or using an absolute path in
the tar command to avoid directory dependencies.
```suggestion
tar -C /opt -czf apache-tez-${TEZ_VERSION}-bin.tar.gz
apache-tez-${TEZ_VERSION}-bin/
```
##########
dev-support/ranger-docker/scripts/hive/ranger-hive-setup.sh:
##########
@@ -139,32 +169,34 @@ cp ${HADOOP_HOME}/etc/hadoop/yarn-site.xml
${HIVE_HOME}/conf/
cp ${TEZ_HOME}/conf/tez-site.xml ${HIVE_HOME}/conf/
# Upload Tez libraries to HDFS
-su -c "${HADOOP_HOME}/bin/hdfs dfs -mkdir -p /apps/tez" hdfs
+if [ "${KERBEROS_ENABLED}" == "true" ]; then
+ echo "Kerberos enabled - authenticating as hive user..."
+ su -c "kinit -kt /etc/keytabs/hive.keytab hive/\`hostname
-f\`@EXAMPLE.COM" hive
+ rc=$?
+ if [ $rc -ne 0 ]; then
+ echo "ERROR: kinit failed for hive principal (exit code=$rc)" >&2
+ exit $rc
+ fi
-# Recreate Tez tarball if it doesn't exist (it gets removed during Docker
build)
-if [ ! -f "/opt/apache-tez-${TEZ_VERSION}-bin.tar.gz" ]; then
- echo "Recreating Tez tarball for HDFS upload..."
- cd /opt
- tar czf apache-tez-${TEZ_VERSION}-bin.tar.gz apache-tez-${TEZ_VERSION}-bin/
-fi
+ echo "kinit successful, proceeding operations as hive user"
-su -c "${HADOOP_HOME}/bin/hdfs dfs -put
/opt/apache-tez-${TEZ_VERSION}-bin.tar.gz /apps/tez/" hdfs
-su -c "${HADOOP_HOME}/bin/hdfs dfs -chmod -R 755 /apps/tez" hdfs
+ # Recreate Tez tarball if it doesn't exist
+ rebuild_tez_tarball
-# Create HDFS user directory for hive
-su -c "${HADOOP_HOME}/bin/hdfs dfs -mkdir -p /user/hive" hdfs
-su -c "${HADOOP_HOME}/bin/hdfs dfs -chmod -R 777 /user/hive" hdfs
+ #create hdfs directories and files for hive and tez
+ create_hdfs_directories_and_files 'hive'
-# Create HDFS /tmp/hive directory for Tez staging
-su -c "${HADOOP_HOME}/bin/hdfs dfs -mkdir -p /tmp/hive" hdfs
-su -c "${HADOOP_HOME}/bin/hdfs dfs -chmod -R 777 /tmp/hive" hdfs
+ su -c "kdestroy" hive
+else
+ # Non-Kerberos mode - use hdfs user
+ su -c "${HADOOP_HOME}/bin/hdfs dfs -mkdir -p /apps/tez" hdfs
-# Fix /tmp directory permissions for Ranger (critical for INSERT operations)
-su -c "${HADOOP_HOME}/bin/hdfs dfs -chmod 777 /tmp" hdfs
+ # Recreate Tez tarball if it doesn't exist (it gets removed during Docker
build)
+ rebuild_tez_tarball
-# Create /user/root directory for YARN job execution
-su -c "${HADOOP_HOME}/bin/hdfs dfs -mkdir -p /user/root" hdfs
-su -c "${HADOOP_HOME}/bin/hdfs dfs -chmod 777 /user/root" hdfs
+ #create hdfs directories and files for hive and tez
Review Comment:
Missing space after comment delimiter. Shell script comments should have a
space after the hash symbol for better readability. The comment should be '#
create hdfs directories and files for hive and tez' instead of '#create hdfs
directories and files for hive and tez'.
##########
dev-support/ranger-docker/scripts/hive/ranger-hive-setup.sh:
##########
@@ -128,6 +128,36 @@ cat <<EOF > ${TEZ_HOME}/conf/tez-site.xml
</configuration>
EOF
+rebuild_tez_tarball() {
+ if [ ! -f "/opt/apache-tez-${TEZ_VERSION}-bin.tar.gz" ]; then
+ echo "Recreating Tez tarball for HDFS upload..."
+ cd /opt
+ tar czf apache-tez-${TEZ_VERSION}-bin.tar.gz apache-tez-${TEZ_VERSION}-bin/
+ fi
+}
+
+create_hdfs_directories_and_files() {
+ exec_user=$1;
+
+ # prepare tez directories and files in hdfs folders
+ su -c "${HADOOP_HOME}/bin/hdfs dfs -mkdir -p /apps/tez" $exec_user
+ su -c "${HADOOP_HOME}/bin/hdfs dfs -put -f
/opt/apache-tez-${TEZ_VERSION}-bin.tar.gz /apps/tez/" $exec_user
+ su -c "${HADOOP_HOME}/bin/hdfs dfs -chmod -R 755 /apps/tez" $exec_user
+
+ # Create HDFS user directory for hive
+ su -c "${HADOOP_HOME}/bin/hdfs dfs -mkdir -p /user/hive" $exec_user
+ su -c "${HADOOP_HOME}/bin/hdfs dfs -chmod -R 777 /user/hive" $exec_user
+
+ # Create HDFS /tmp/hive directory for Tez staging
+ su -c "${HADOOP_HOME}/bin/hdfs dfs -mkdir -p /tmp/hive" $exec_user
+ su -c "${HADOOP_HOME}/bin/hdfs dfs -chmod -R 777 /tmp/hive" $exec_user
+ su -c "${HADOOP_HOME}/bin/hdfs dfs -chmod 777 /tmp" $exec_user
+
+ # Create /user/root directory for YARN job execution
+ su -c "${HADOOP_HOME}/bin/hdfs dfs -mkdir -p /user/root" $exec_user
+ su -c "${HADOOP_HOME}/bin/hdfs dfs -chmod 777 /user/root" $exec_user
Review Comment:
The variable $exec_user should be quoted to prevent word splitting and glob
expansion. While this may work in the current context, unquoted variables can
lead to unexpected behavior if the variable contains spaces or special
characters. Use "$exec_user" instead of $exec_user.
```suggestion
su -c "${HADOOP_HOME}/bin/hdfs dfs -mkdir -p /apps/tez" "$exec_user"
su -c "${HADOOP_HOME}/bin/hdfs dfs -put -f
/opt/apache-tez-${TEZ_VERSION}-bin.tar.gz /apps/tez/" "$exec_user"
su -c "${HADOOP_HOME}/bin/hdfs dfs -chmod -R 755 /apps/tez" "$exec_user"
# Create HDFS user directory for hive
su -c "${HADOOP_HOME}/bin/hdfs dfs -mkdir -p /user/hive" "$exec_user"
su -c "${HADOOP_HOME}/bin/hdfs dfs -chmod -R 777 /user/hive" "$exec_user"
# Create HDFS /tmp/hive directory for Tez staging
su -c "${HADOOP_HOME}/bin/hdfs dfs -mkdir -p /tmp/hive" "$exec_user"
su -c "${HADOOP_HOME}/bin/hdfs dfs -chmod -R 777 /tmp/hive" "$exec_user"
su -c "${HADOOP_HOME}/bin/hdfs dfs -chmod 777 /tmp" "$exec_user"
# Create /user/root directory for YARN job execution
su -c "${HADOOP_HOME}/bin/hdfs dfs -mkdir -p /user/root" "$exec_user"
su -c "${HADOOP_HOME}/bin/hdfs dfs -chmod 777 /user/root" "$exec_user"
```
##########
dev-support/ranger-docker/scripts/hive/ranger-hive-setup.sh:
##########
@@ -139,32 +169,34 @@ cp ${HADOOP_HOME}/etc/hadoop/yarn-site.xml
${HIVE_HOME}/conf/
cp ${TEZ_HOME}/conf/tez-site.xml ${HIVE_HOME}/conf/
# Upload Tez libraries to HDFS
-su -c "${HADOOP_HOME}/bin/hdfs dfs -mkdir -p /apps/tez" hdfs
+if [ "${KERBEROS_ENABLED}" == "true" ]; then
+ echo "Kerberos enabled - authenticating as hive user..."
+ su -c "kinit -kt /etc/keytabs/hive.keytab hive/\`hostname
-f\`@EXAMPLE.COM" hive
+ rc=$?
+ if [ $rc -ne 0 ]; then
+ echo "ERROR: kinit failed for hive principal (exit code=$rc)" >&2
+ exit $rc
+ fi
-# Recreate Tez tarball if it doesn't exist (it gets removed during Docker
build)
-if [ ! -f "/opt/apache-tez-${TEZ_VERSION}-bin.tar.gz" ]; then
- echo "Recreating Tez tarball for HDFS upload..."
- cd /opt
- tar czf apache-tez-${TEZ_VERSION}-bin.tar.gz apache-tez-${TEZ_VERSION}-bin/
-fi
+ echo "kinit successful, proceeding operations as hive user"
-su -c "${HADOOP_HOME}/bin/hdfs dfs -put
/opt/apache-tez-${TEZ_VERSION}-bin.tar.gz /apps/tez/" hdfs
-su -c "${HADOOP_HOME}/bin/hdfs dfs -chmod -R 755 /apps/tez" hdfs
+ # Recreate Tez tarball if it doesn't exist
+ rebuild_tez_tarball
-# Create HDFS user directory for hive
-su -c "${HADOOP_HOME}/bin/hdfs dfs -mkdir -p /user/hive" hdfs
-su -c "${HADOOP_HOME}/bin/hdfs dfs -chmod -R 777 /user/hive" hdfs
+ #create hdfs directories and files for hive and tez
Review Comment:
Missing space after comment delimiter. Shell script comments should have a
space after the hash symbol for better readability. The comment should be '#
create hdfs directories and files for hive and tez' instead of '#create hdfs
directories and files for hive and tez'.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]