This is an automated email from the ASF dual-hosted git repository.

madhan pushed a commit to branch ranger-2.6
in repository https://gitbox.apache.org/repos/asf/ranger.git

commit 8f14fc68539469ac3404caaa2748aeafdb0a8377
Author: Abhishek Kumar <[email protected]>
AuthorDate: Tue Sep 10 23:07:43 2024 -0700

    RANGER-3801: Add support for Ozone in docker (#377)
    
    Includes following changes:
    - Add support for Ozone in docker with ranger-ozone plugin enabled
    - Docker CI updated to include ozone containers
    - download-archives.sh updated to allow download of specific service 
tarballs
    - Updated docker README
    
    (cherry picked from commit f49413f9731af7192b8cb4fa564fef1f2756510d)
---
 .github/workflows/maven.yml                        |  12 +-
 dev-support/ranger-docker/.dockerignore            |   1 +
 dev-support/ranger-docker/.env                     |   5 +
 dev-support/ranger-docker/Dockerfile.ranger-base   |   1 +
 dev-support/ranger-docker/Dockerfile.ranger-ozone  |  30 ++
 dev-support/ranger-docker/README.md                | 129 +++---
 .../ranger-docker/config/ozone/docker-config       |  68 +++
 .../config/ozone/enable-ozone-plugin.sh            | 497 +++++++++++++++++++++
 .../ozone/ranger-ozone-plugin-install.properties   |  83 ++++
 .../config/ozone/ranger-ozone-setup.sh             |  34 ++
 .../ranger-docker/docker-compose.ranger-ozone.yml  |  88 ++++
 dev-support/ranger-docker/download-archives.sh     |  45 +-
 .../scripts/create-ranger-services.py              |  10 +-
 .../scripts/ozone-plugin-docker-setup.sh           |  28 ++
 plugin-ozone/pom.xml                               |   5 +
 15 files changed, 964 insertions(+), 72 deletions(-)

diff --git a/.github/workflows/maven.yml b/.github/workflows/maven.yml
index 6f0a0602a..e8d3191e6 100644
--- a/.github/workflows/maven.yml
+++ b/.github/workflows/maven.yml
@@ -90,7 +90,7 @@ jobs:
       - name: Run download-archives.sh
         run: |
           cd dev-support/ranger-docker
-          chmod +x download-archives.sh && ./download-archives.sh
+          ./download-archives.sh hadoop hive hbase kafka knox ozone
       
       - name: Build all ranger-service images
         run: |
@@ -109,10 +109,13 @@ jobs:
           -f docker-compose.ranger-hbase.yml \
           -f docker-compose.ranger-kafka.yml \
           -f docker-compose.ranger-hive.yml \
-          -f docker-compose.ranger-knox.yml build
+          -f docker-compose.ranger-knox.yml \
+          -f docker-compose.ranger-ozone.yml build
+
       - name: Bring up containers
         run: |
           cd dev-support/ranger-docker
+          ./scripts/ozone-plugin-docker-setup.sh
           export RANGER_DB_TYPE=postgres
           docker compose \
           -f docker-compose.ranger-${RANGER_DB_TYPE}.yml \
@@ -124,11 +127,12 @@ jobs:
           -f docker-compose.ranger-hbase.yml \
           -f docker-compose.ranger-kafka.yml \
           -f docker-compose.ranger-hive.yml \
-          -f docker-compose.ranger-knox.yml up -d
+          -f docker-compose.ranger-knox.yml \
+          -f docker-compose.ranger-ozone.yml up -d
       - name: Check status of containers and remove them
         run: | 
           sleep 60
-          containers=(ranger ranger-zk ranger-solr ranger-postgres 
ranger-usersync ranger-tagsync ranger-kms ranger-hadoop ranger-hbase 
ranger-kafka ranger-hive ranger-knox);
+          containers=(ranger ranger-zk ranger-solr ranger-postgres 
ranger-usersync ranger-tagsync ranger-kms ranger-hadoop ranger-hbase 
ranger-kafka ranger-hive ranger-knox ozone-om ozone-scm ozone-datanode);
           flag=true;
           for container in "${containers[@]}"; do
               if [[ $(docker inspect -f '{{.State.Running}}' $container 
2>/dev/null) == "true" ]]; then
diff --git a/dev-support/ranger-docker/.dockerignore 
b/dev-support/ranger-docker/.dockerignore
index a994d013c..1125a66ff 100644
--- a/dev-support/ranger-docker/.dockerignore
+++ b/dev-support/ranger-docker/.dockerignore
@@ -12,5 +12,6 @@
 !dist/ranger-*-kafka-plugin.tar.gz
 !dist/ranger-*-knox-plugin.tar.gz
 !dist/ranger-*-trino-plugin.tar.gz
+!dist/ranger-*-ozone-plugin.tar.gz
 !downloads/*
 !scripts/*
diff --git a/dev-support/ranger-docker/.env b/dev-support/ranger-docker/.env
index d227d566e..e273422a0 100644
--- a/dev-support/ranger-docker/.env
+++ b/dev-support/ranger-docker/.env
@@ -47,6 +47,10 @@ HIVE_HADOOP_VERSION=3.1.1
 KAFKA_VERSION=2.8.2
 KNOX_VERSION=2.0.0
 TRINO_VERSION=377
+OZONE_VERSION=1.4.0
+OZONE_RUNNER_VERSION=20230615-1
+OZONE_RUNNER_IMAGE=apache/ozone-runner
+OZONE_OPTS=
 
 # versions of ranger services
 RANGER_VERSION=2.6.0-SNAPSHOT
@@ -62,6 +66,7 @@ HBASE_PLUGIN_VERSION=2.6.0-SNAPSHOT
 KAFKA_PLUGIN_VERSION=2.6.0-SNAPSHOT
 KNOX_PLUGIN_VERSION=2.6.0-SNAPSHOT
 TRINO_PLUGIN_VERSION=2.6.0-SNAPSHOT
+OZONE_PLUGIN_VERSION=2.6.0-SNAPSHOT
 
 # To enable debug logs
 DEBUG_ADMIN=false
diff --git a/dev-support/ranger-docker/Dockerfile.ranger-base 
b/dev-support/ranger-docker/Dockerfile.ranger-base
index e9e0f1aae..4414c13dc 100644
--- a/dev-support/ranger-docker/Dockerfile.ranger-base
+++ b/dev-support/ranger-docker/Dockerfile.ranger-base
@@ -50,6 +50,7 @@ RUN groupadd ranger && \
     useradd -g hadoop -ms /bin/bash hive && \
     useradd -g hadoop -ms /bin/bash hbase && \
     useradd -g hadoop -ms /bin/bash kafka && \
+    useradd -g hadoop -ms /bin/bash ozone && \
     groupadd knox && \
     useradd -g knox -ms /bin/bash knox && \
     mkdir -p /home/ranger/dist && \
diff --git a/dev-support/ranger-docker/Dockerfile.ranger-ozone 
b/dev-support/ranger-docker/Dockerfile.ranger-ozone
new file mode 100644
index 000000000..3c1f6ef6f
--- /dev/null
+++ b/dev-support/ranger-docker/Dockerfile.ranger-ozone
@@ -0,0 +1,30 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+ARG OZONE_RUNNER_IMAGE
+ARG OZONE_RUNNER_VERSION
+FROM ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION}
+
+ARG OZONE_HOME
+ARG OZONE_PLUGIN_VERSION
+
+USER root
+
+RUN useradd -g hadoop -ms /bin/bash ozone
+RUN mkdir -p -m 755 /var/log/ozone
+
+# Required to locate the plugin jars and the config files
+ENV 
OZONE_MANAGER_CLASSPATH="${OZONE_HOME}/ranger-ozone-plugin/lib/libext/*:${OZONE_HOME}/ranger-ozone-plugin/conf"
diff --git a/dev-support/ranger-docker/README.md 
b/dev-support/ranger-docker/README.md
index 4ebaf27c8..43769081a 100644
--- a/dev-support/ranger-docker/README.md
+++ b/dev-support/ranger-docker/README.md
@@ -19,71 +19,94 @@ under the License.
 
 ## Overview
 
-Docker files in this folder create docker images and run them to build Apache 
Ranger, deploy Apache Ranger and dependent services in containers.
+Use Dockerfiles in this directory to create docker images and run them to 
build Apache Ranger, deploy Apache Ranger and dependent services in containers.
 
-## Usage
+### Environment Setup
 
-1. Ensure that you have recent version of Docker installed from 
[docker.io](http://www.docker.io) (as of this writing: Engine 20.10.5, Compose 
1.28.5).
+- Ensure that you have recent version of Docker installed from 
[docker.io](http://www.docker.io) (as of this writing: Engine 20.10.5, Compose 
1.28.5).
    Make sure to configure docker with at least 6gb of memory.
 
-2. Update environment variables in ```.env``` file, if necessary
+- Update environment variables in ```.env``` file, if necessary
 
-3. Set ```dev-support/ranger-docker``` as your working directory.
+- Set ```dev-support/ranger-docker``` as your working directory.
 
-4. Execute following command to download necessary archives to setup 
Ranger/HDFS/Hive/HBase/Kafka/Knox services:
+- Execute following command to download necessary archives to setup 
Ranger/HDFS/Hive/HBase/Kafka/Knox/Ozone services:
    ~~~
-   chmod +x download-archives.sh && ./download-archives.sh
+   chmod +x download-archives.sh
+   # use a subset of the below to download specific services
+   ./download-archives.sh hadoop hive hbase kafka knox ozone
    ~~~
 
-5. Execute following commands to set environment variables to build Apache 
Ranger docker containers:
+- Execute following commands to set environment variables to build Apache 
Ranger docker containers:
    ~~~
    export DOCKER_BUILDKIT=1
    export COMPOSE_DOCKER_CLI_BUILD=1
    export RANGER_DB_TYPE=postgres
    ~~~
 
-6. Build Apache Ranger in containers using docker-compose
-
-   1. Execute following command to build Apache Ranger:
-      ~~~
-      docker-compose -f docker-compose.ranger-base.yml -f 
docker-compose.ranger-build.yml up
-      ~~~
-
-      Time taken to complete the build might vary (upto an hour), depending on 
status of ```${HOME}/.m2``` directory cache.
-
-   2. Alternatively, the following commands can be executed from the parent 
directory
-      1. To generate tarballs:```mvn clean package -DskipTests```
-
-      2. Copy the tarballs and version file to 
```dev-support/ranger-docker/dist```
-         ~~~
-         cp target/ranger-* dev-support/ranger-docker/dist/
-         cp target/version dev-support/ranger-docker/dist/
-         ~~~
-
-      3. Build the ranger-base image:
-         ~~~
-         # ubuntu base image:
-         docker-compose -f docker-compose.ranger-base.yml build --no-cache
-         # OR
-         # ubi base image:
-         docker-compose -f docker-compose.ranger-base-ubi.yml build --no-cache
-         ~~~
-7. To enable file based sync source for usersync execute: ```export 
ENABLE_FILE_SYNC_SOURCE=true```
-
-8. Execute following command to start Ranger, Ranger Usersync, Ranger Tagsync, 
Ranger enabled HDFS/YARN/HBase/Hive/Kafka/Knox and dependent services (Solr, 
DB) in containers:
-   ~~~
-   docker-compose -f docker-compose.ranger-base.yml -f 
docker-compose.ranger.yml -f docker-compose.ranger-${RANGER_DB_TYPE}.yml -f 
docker-compose.ranger-usersync.yml -f docker-compose.ranger-tagsync.yml -f 
docker-compose.ranger-kms.yml -f docker-compose.ranger-hadoop.yml -f 
docker-compose.ranger-hbase.yml -f docker-compose.ranger-kafka.yml -f 
docker-compose.ranger-hive.yml -f docker-compose.ranger-knox.yml up -d
-   ~~~
-
-       - valid values for RANGER_DB_TYPE: mysql or postgres
-9. To run ranger enabled Trino in containers (Requires docker build with JDK 
11):
-   ~~~
-   docker-compose -f docker-compose.ranger-base.yml -f 
docker-compose.ranger.yml -f docker-compose.ranger-${RANGER_DB_TYPE}.yml -f 
docker-compose.ranger-trino.yml up -d
-   ~~~
-
-10. To rebuild specific images and start containers with the new image, use 
following command:
-   ~~~
-   docker-compose -f docker-compose.ranger-base.yml -f 
docker-compose.ranger.yml -f docker-compose.ranger-usersync.yml -f 
docker-compose.ranger-tagsync.yml -f docker-compose.ranger-kms.yml -f 
docker-compose.ranger-hadoop.yml -f docker-compose.ranger-hbase.yml -f 
docker-compose.ranger-kafka.yml -f docker-compose.ranger-hive.yml -f 
docker-compose.ranger-trino.yml -f docker-compose.ranger-knox.yml up -d 
--no-deps --force-recreate --build <service-1> <service-2>
-   ~~~
-
-9. Ranger Admin can be accessed at http://localhost:6080 (admin/rangerR0cks!)
+### Apache Ranger Build
+
+#### In containers using docker-compose
+
+Execute following command to build Apache Ranger:
+~~~
+docker-compose -f docker-compose.ranger-base.yml -f 
docker-compose.ranger-build.yml up
+~~~
+Time taken to complete the build might vary (upto an hour), depending on 
status of ```${HOME}/.m2``` directory cache.  
+
+
+#### OR
+#### Regular build
+
+~~~
+cd ./../../
+mvn clean package -DskipTests
+cp target/ranger-* dev-support/ranger-docker/dist/
+cp target/version dev-support/ranger-docker/dist/
+cd dev-support/ranger-docker
+~~~
+
+### Docker Image Build
+
+#### Prerequisite: ranger-base image build
+~~~
+# ubuntu base image:
+docker-compose -f docker-compose.ranger-base.yml build --no-cache
+# OR
+# ubi base image:
+docker-compose -f docker-compose.ranger-base-ubi.yml build --no-cache
+~~~
+#### Bring up ranger, usersync and tagsync containers
+~~~
+# To enable file based sync source for usersync do:
+# export ENABLE_FILE_SYNC_SOURCE=true
+
+# valid values for RANGER_DB_TYPE: mysql/postgres
+
+docker-compose -f docker-compose.ranger.yml -f 
docker-compose.ranger-${RANGER_DB_TYPE}.yml -f 
docker-compose.ranger-usersync.yml -f docker-compose.ranger-tagsync.yml up -d
+
+# Ranger Admin can be accessed at http://localhost:6080 (admin/rangerR0cks!)
+~~~
+#### Bring up hive container
+~~~
+docker-compose -f docker-compose.ranger.yml -f 
docker-compose.ranger-${RANGER_DB_TYPE}.yml -f docker-compose.ranger-hadoop.yml 
-f docker-compose.ranger-hive.yml up -d
+~~~
+#### Bring up hbase container
+~~~
+docker-compose -f docker-compose.ranger.yml -f 
docker-compose.ranger-${RANGER_DB_TYPE}.yml -f docker-compose.ranger-hadoop.yml 
-f docker-compose.ranger-hbase.yml up -d
+~~~
+#### Bring up ozone containers
+~~~
+./scripts/ozone-plugin-docker-setup.sh
+docker-compose -f docker-compose.ranger.yml -f 
docker-compose.ranger-${RANGER_DB_TYPE}.yml -f docker-compose.ranger-ozone.yml 
up -d
+~~~
+#### Bring up trino container (requires docker build with jdk 11):
+~~~
+docker-compose -f docker-compose.ranger.yml -f 
docker-compose.ranger-${RANGER_DB_TYPE}.yml -f docker-compose.ranger-trino.yml 
up -d
+~~~
+Similarly, check the `depends` section of the 
`docker-compose.ranger-service.yaml` file and add docker-compose files for 
these services when trying to bring up the `service` container.
+
+#### To rebuild specific images and start containers with the new image:
+~~~
+docker-compose -f docker-compose.ranger.yml -f 
docker-compose.ranger-usersync.yml -f docker-compose.ranger-tagsync.yml -f 
docker-compose.ranger-kms.yml -f docker-compose.ranger-hadoop.yml -f 
docker-compose.ranger-hbase.yml -f docker-compose.ranger-kafka.yml -f 
docker-compose.ranger-hive.yml -f docker-compose.ranger-trino.yml -f 
docker-compose.ranger-knox.yml up -d --no-deps --force-recreate --build 
<service-1> <service-2>
+~~~
diff --git a/dev-support/ranger-docker/config/ozone/docker-config 
b/dev-support/ranger-docker/config/ozone/docker-config
new file mode 100644
index 000000000..ae06dde87
--- /dev/null
+++ b/dev-support/ranger-docker/config/ozone/docker-config
@@ -0,0 +1,68 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+CORE-SITE.XML_fs.defaultFS=ofs://om
+CORE-SITE.XML_fs.trash.interval=1
+# For HttpFS service it is required to enable proxying users.
+CORE-SITE.XML_hadoop.proxyuser.hadoop.hosts=*
+CORE-SITE.XML_hadoop.proxyuser.hadoop.groups=*
+
+OZONE-SITE.XML_ozone.om.address=om
+OZONE-SITE.XML_ozone.om.http-address=om:9874
+OZONE-SITE.XML_ozone.scm.http-address=scm:9876
+OZONE-SITE.XML_ozone.scm.container.size=1GB
+OZONE-SITE.XML_ozone.scm.block.size=1MB
+OZONE-SITE.XML_ozone.scm.datanode.ratis.volume.free-space.min=10MB
+OZONE-SITE.XML_ozone.scm.pipeline.creation.interval=30s
+OZONE-SITE.XML_ozone.scm.pipeline.owner.container.count=1
+OZONE-SITE.XML_ozone.scm.names=scm
+OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data
+OZONE-SITE.XML_ozone.scm.block.client.address=scm
+OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata
+#OZONE-SITE.XML_ozone.recon.db.dir=/data/metadata/recon
+OZONE-SITE.XML_ozone.scm.client.address=scm
+OZONE-SITE.XML_hdds.datanode.dir=/data/hdds
+OZONE-SITE.XML_hdds.datanode.volume.min.free.space=100MB
+#OZONE-SITE.XML_ozone.recon.address=recon:9891
+#OZONE-SITE.XML_ozone.recon.http-address=0.0.0.0:9888
+#OZONE-SITE.XML_ozone.recon.https-address=0.0.0.0:9889
+#OZONE-SITE.XML_ozone.recon.om.snapshot.task.interval.delay=1m
+OZONE-SITE.XML_ozone.datanode.pipeline.limit=1
+OZONE-SITE.XML_hdds.scmclient.max.retry.timeout=30s
+OZONE-SITE.XML_hdds.container.report.interval=60s
+OZONE-SITE.XML_ozone.scm.stale.node.interval=30s
+OZONE-SITE.XML_ozone.scm.dead.node.interval=45s
+OZONE-SITE.XML_hdds.heartbeat.interval=5s
+OZONE-SITE.XML_ozone.scm.close.container.wait.duration=5s
+OZONE-SITE.XML_hdds.scm.replication.thread.interval=15s
+OZONE-SITE.XML_hdds.scm.replication.under.replicated.interval=5s
+OZONE-SITE.XML_hdds.scm.replication.over.replicated.interval=5s
+OZONE-SITE.XML_hdds.scm.wait.time.after.safemode.exit=30s
+OZONE-SITE.XML_ozone.http.basedir=/tmp/ozone_http
+
+OZONE-SITE.XML_hdds.container.ratis.datastream.enabled=true
+
+OZONE_CONF_DIR=/etc/hadoop
+OZONE_LOG_DIR=/var/log/hadoop
+
+no_proxy=om,scm,s3g,recon,kdc,localhost,127.0.0.1
+
+# Explicitly enable filesystem snapshot feature for this Docker compose cluster
+OZONE-SITE.XML_ozone.filesystem.snapshot.enabled=true
+
+# To enable Ranger as the Authorizer in Ozone
+OZONE-SITE.XML_ozone.acl.enabled=true
+OZONE-SITE.XML_ozone.acl.authorizer.class=org.apache.ranger.authorization.ozone.authorizer.RangerOzoneAuthorizer
diff --git a/dev-support/ranger-docker/config/ozone/enable-ozone-plugin.sh 
b/dev-support/ranger-docker/config/ozone/enable-ozone-plugin.sh
new file mode 100644
index 000000000..ac78458a3
--- /dev/null
+++ b/dev-support/ranger-docker/config/ozone/enable-ozone-plugin.sh
@@ -0,0 +1,497 @@
+#!/bin/bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+function getInstallProperty() {
+    local propertyName=$1
+    local propertyValue=""
+
+    for file in "${COMPONENT_INSTALL_ARGS}" "${INSTALL_ARGS}"
+    do
+        if [ -f "${file}" ]
+        then
+            propertyValue=`grep "^${propertyName}[ \t]*=" ${file} | awk -F= '{ 
 sub("^[ \t]*", "", $2); sub("[ \t]*$", "", $2); print $2 }'`
+            if [ "${propertyValue}" != "" ]
+            then
+                break
+            fi
+        fi
+    done
+
+    echo ${propertyValue}
+}
+
+#
+# Base env variable for Ranger related files/directories
+#
+PROJ_NAME=ranger
+
+#
+# The script should be run by "root" user
+#
+
+if [ ! -w /etc/passwd ]
+then
+    echo "ERROR: $0 script should be run as root."
+    exit 1
+fi
+
+#Check for JAVA_HOME
+if [ "${JAVA_HOME}" == "" ]
+then
+    echo "ERROR: JAVA_HOME environment property not defined, aborting 
installation."
+    exit 1
+fi
+
+#
+# Identify the component, action from the script file
+#
+
+basedir=`dirname $0`
+if [ "${basedir}" = "." ]
+then
+    basedir=`pwd`
+elif [ "${basedir}" = ".." ]
+then
+    basedir=`(cd .. ;pwd)`
+fi
+
+#
+# As this script is common to all component, find the component name based on 
the script-name
+#
+
+COMPONENT_NAME=`basename $0 | cut -d. -f1 | sed -e 's:^disable-::' | sed -e 
's:^enable-::'`
+
+echo "${COMPONENT_NAME}" | grep 'plugin' > /dev/null 2>&1
+
+if [ $? -ne 0 ]
+then
+       echo "$0 : is not applicable for component [${COMPONENT_NAME}]. It is 
applicable only for ranger plugin component; Exiting ..."
+       exit 0
+fi
+
+HCOMPONENT_NAME=`echo ${COMPONENT_NAME} | sed -e 's:-plugin::'`
+
+CFG_OWNER_INF="${HCOMPONENT_NAME}:${HCOMPONENT_NAME}"
+
+if [ "${HCOMPONENT_NAME}" = "hdfs" ]
+then
+       HCOMPONENT_NAME="hadoop"
+fi
+
+#
+# Based on script name, identify if the action is enabled or disabled
+#
+
+basename $0 | cut -d. -f1 | grep '^enable-' > /dev/null 2>&1
+
+if [ $? -eq 0 ]
+then
+       action=enable
+else
+       action=disable
+fi
+
+
+#
+# environment variables for enable|disable scripts
+#
+
+PROJ_INSTALL_DIR=`(cd ${basedir} ; pwd)`
+SET_ENV_SCRIPT_NAME=set-${COMPONENT_NAME}-env.sh
+SET_ENV_SCRIPT_TEMPLATE=${PROJ_INSTALL_DIR}/install/conf.templates/enable/${SET_ENV_SCRIPT_NAME}
+DEFAULT_XML_CONFIG=${PROJ_INSTALL_DIR}/install/conf.templates/default/configuration.xml
+PROJ_LIB_DIR=${PROJ_INSTALL_DIR}/lib
+PROJ_INSTALL_LIB_DIR="${PROJ_INSTALL_DIR}/install/lib"
+INSTALL_ARGS="${PROJ_INSTALL_DIR}/install.properties"
+COMPONENT_INSTALL_ARGS="${PROJ_INSTALL_DIR}/${COMPONENT_NAME}-install.properties"
+JAVA=$JAVA_HOME/bin/java
+
+PLUGIN_DEPENDENT_LIB_DIR=lib/"${PROJ_NAME}-${COMPONENT_NAME}-impl"
+PROJ_LIB_PLUGIN_DIR=${PROJ_INSTALL_DIR}/${PLUGIN_DEPENDENT_LIB_DIR}
+
+HCOMPONENT_INSTALL_DIR_NAME=$(getInstallProperty 'COMPONENT_INSTALL_DIR_NAME')
+
+CUSTOM_USER=$(getInstallProperty 'CUSTOM_USER')
+CUSTOM_USER=${CUSTOM_USER// }
+
+CUSTOM_GROUP=$(getInstallProperty 'CUSTOM_GROUP')
+CUSTOM_GROUP=${CUSTOM_GROUP// }
+
+CUSTOM_GROUP_STATUS=${CUSTOM_GROUP};
+CUSTOM_USER_STATUS=${CUSTOM_USER};
+egrep "^$CUSTOM_GROUP" /etc/group >& /dev/null
+if [ $? -ne 0 ]
+then
+       CUSTOM_GROUP_STATUS=""
+fi
+id -u ${CUSTOM_USER} > /dev/null 2>&1
+if [ $? -ne 0 ]
+then
+       CUSTOM_USER_STATUS=""
+fi
+
+if [ ! -z "${CUSTOM_USER_STATUS}" ] && [ ! -z "${CUSTOM_GROUP_STATUS}" ]
+then
+  echo "Custom user and group is available, using custom user and group."
+  CFG_OWNER_INF="${CUSTOM_USER}:${CUSTOM_GROUP}"
+elif [ ! -z "${CUSTOM_USER_STATUS}" ] && [ -z "${CUSTOM_GROUP_STATUS}" ]
+then
+  echo "Custom user is available, using custom user and default group."
+  CFG_OWNER_INF="${CUSTOM_USER}:${HCOMPONENT_NAME}"
+elif [ -z  "${CUSTOM_USER_STATUS}" ] && [ ! -z  "${CUSTOM_GROUP_STATUS}" ]
+then
+  echo "Custom group is available, using default user and custom group."
+  CFG_OWNER_INF="${HCOMPONENT_NAME}:${CUSTOM_GROUP}"
+else
+  echo "Custom user and group are not available, using default user and group."
+  CFG_OWNER_INF="${HCOMPONENT_NAME}:${HCOMPONENT_NAME}"
+fi
+
+if [ "${HCOMPONENT_INSTALL_DIR_NAME}" = "" ]
+then
+  HCOMPONENT_INSTALL_DIR_NAME=${HCOMPONENT_NAME}
+fi
+
+firstletter=${HCOMPONENT_INSTALL_DIR_NAME:0:1}
+if [ "$firstletter" = "/" ]; then
+    hdir=${HCOMPONENT_INSTALL_DIR_NAME}
+else
+    hdir=${PROJ_INSTALL_DIR}/../${HCOMPONENT_INSTALL_DIR_NAME}
+fi
+
+#
+# TEST - START
+#
+if [ ! -d ${hdir} ]
+then
+       mkdir -p ${hdir}
+fi
+#
+# TEST - END
+#
+HCOMPONENT_INSTALL_DIR=`(cd ${hdir} ; pwd)`
+HCOMPONENT_LIB_DIR=${HCOMPONENT_INSTALL_DIR}/lib
+if [ "${HCOMPONENT_NAME}" = "hadoop" ] ||
+     [ "${HCOMPONENT_NAME}" = "yarn" ]; then
+    HCOMPONENT_LIB_DIR=${HCOMPONENT_INSTALL_DIR}/share/hadoop/hdfs/lib
+fi
+
+HCOMPONENT_CONF_DIR=${HCOMPONENT_INSTALL_DIR}/conf
+HCOMPONENT_ARCHIVE_CONF_DIR=${HCOMPONENT_CONF_DIR}/.archive
+SET_ENV_SCRIPT=${HCOMPONENT_CONF_DIR}/${SET_ENV_SCRIPT_NAME}
+
+
+if [ ! -d "${HCOMPONENT_INSTALL_DIR}" ]
+then
+       echo "ERROR: Unable to find the install directory of component 
[${HCOMPONENT_NAME}]; dir [${HCOMPONENT_INSTALL_DIR}] not found."
+       echo "Exiting installation."
+       exit 1
+fi
+
+if [ ! -d "${HCOMPONENT_CONF_DIR}" ]
+then
+       echo "ERROR: Unable to find the conf directory of component 
[${HCOMPONENT_NAME}]; dir [${HCOMPONENT_CONF_DIR}] not found."
+       echo "Exiting installation."
+       exit 1
+fi
+
+if [ ! -d "${HCOMPONENT_LIB_DIR}" ]
+then
+    mkdir -p "${HCOMPONENT_LIB_DIR}"
+    if [ ! -d "${HCOMPONENT_LIB_DIR}" ]
+    then
+        echo "ERROR: Unable to find the lib directory of component 
[${HCOMPONENT_NAME}];  dir [${HCOMPONENT_LIB_DIR}] not found."
+        echo "Exiting installation."
+        exit 1
+    fi
+fi
+
+#
+# Common functions used by all enable/disable scripts
+#
+
+log() {
+       echo "+ `date` : $*"
+}
+
+
+create_jceks() {
+
+       alias=$1
+       pass=$2
+       jceksFile=$3
+
+       if [ -f "${jceksFile}" ]
+       then
+               jcebdir=`dirname ${jceksFile}`
+               jcebname=`basename ${jceksFile}`
+               archive_jce=${jcebdir}/.${jcebname}.`date '+%Y%m%d%H%M%S'`
+               log "Saving current JCE file: ${jceksFile} to ${archive_jce} 
..."
+               cp ${jceksFile} ${archive_jce}
+       fi
+
+       tempFile=/tmp/jce.$$.out
+
+  $JAVA_HOME/bin/java -cp ":${PROJ_INSTALL_LIB_DIR}/*:" 
org.apache.ranger.credentialapi.buildks create "${alias}" -value "${pass}" 
-provider "jceks://file${jceksFile}" > ${tempFile} 2>&1
+
+       if [ $? -ne 0 ]
+       then
+               echo "Unable to store password in non-plain text format. Error: 
[`cat ${tempFile}`]"
+               echo "Exiting plugin installation"
+               rm -f ${tempFile}
+               exit 0
+       fi
+
+       rm -f ${tempFile}
+}
+
+log "${HCOMPONENT_NAME}: lib folder=$HCOMPONENT_LIB_DIR conf 
folder=$HCOMPONENT_CONF_DIR"
+
+#
+# If there is a set-ranger-${COMPONENT}-env.sh, install it
+#
+dt=`date '+%Y%m%d-%H%M%S'`
+
+if [ -f "${SET_ENV_SCRIPT_TEMPLATE}" ]
+then
+       #
+       # If the setenv script already exists, move it to the archive folder
+       #
+       if [ -f "${SET_ENV_SCRIPT}" ]
+       then
+               if [ ! -d "${HCOMPONENT_ARCHIVE_CONF_DIR}" ]
+               then
+                       mkdir -p ${HCOMPONENT_ARCHIVE_CONF_DIR}
+               fi
+               log "Saving current ${SET_ENV_SCRIPT_NAME} to 
${HCOMPONENT_ARCHIVE_CONF_DIR} ..."
+               mv ${SET_ENV_SCRIPT} 
${HCOMPONENT_ARCHIVE_CONF_DIR}/${SET_ENV_SCRIPT_NAME}.${dt}
+       fi
+
+       if [ "${action}" = "enable" ]
+       then
+
+               cp ${SET_ENV_SCRIPT_TEMPLATE} ${SET_ENV_SCRIPT}
+
+               
DEST_SCRIPT_FILE=${HCOMPONENT_INSTALL_DIR}/libexec/${HCOMPONENT_NAME}-config.sh
+
+               
DEST_SCRIPT_ARCHIVE_FILE=${HCOMPONENT_INSTALL_DIR}/libexec/.${HCOMPONENT_NAME}-config.sh.${dt}
+
+               if [ -f "${DEST_SCRIPT_FILE}" ]
+               then
+
+                       log "Saving current ${DEST_SCRIPT_FILE} to 
${DEST_SCRIPT_ARCHIVE_FILE} ..."
+
+                       cp ${DEST_SCRIPT_FILE} ${DEST_SCRIPT_ARCHIVE_FILE}
+
+                       grep 'xasecure-.*-env.sh' ${DEST_SCRIPT_FILE} > 
/dev/null 2>&1
+                       if [ $? -eq 0 ]
+                       then
+                               ts=`date '+%Y%m%d%H%M%S'`
+                               grep -v 'xasecure-.*-env.sh' 
${DEST_SCRIPT_FILE} > ${DEST_SCRIPT_FILE}.${ts}
+                               if [ $? -eq 0 ]
+                               then
+                                       log "Removing old reference to xasecure 
setenv source ..."
+                                       cat ${DEST_SCRIPT_FILE}.${ts} > 
${DEST_SCRIPT_FILE}
+                                       rm -f ${DEST_SCRIPT_FILE}.${ts}
+                               fi
+                       fi
+
+                       grep "[ \t]*.[ \t]*${SET_ENV_SCRIPT}" 
${DEST_SCRIPT_FILE} > /dev/null
+                       if [ $? -ne 0 ]
+                       then
+                               log "Appending sourcing script, 
${SET_ENV_SCRIPT_NAME} in the file: ${DEST_SCRIPT_FILE} "
+                               cat >> ${DEST_SCRIPT_FILE} <<!
+if [ -f ${SET_ENV_SCRIPT} ]
+then
+       .  ${SET_ENV_SCRIPT}
+fi
+!
+                       else
+                               log "INFO: ${DEST_SCRIPT_FILE} is being sourced 
from file: ${HCOMPONENT_CONF_DIR}/${HCOMPONENT_NAME}-env.sh "
+                       fi
+               fi
+       fi
+fi
+
+#
+# Run, the enable|disable ${COMPONENT} configurations
+#
+
+if [ -d "${PROJ_INSTALL_DIR}/install/conf.templates/${action}" ]
+then
+       INSTALL_CP="${PROJ_INSTALL_LIB_DIR}/*"
+       if [ "${action}" = "enable" ]
+       then
+               echo "<ranger>\n<enabled>`date`</enabled>\n</ranger>" > 
${HCOMPONENT_CONF_DIR}/ranger-security.xml
+               chown ${CFG_OWNER_INF} 
${HCOMPONENT_CONF_DIR}/ranger-security.xml
+               chmod a+r ${HCOMPONENT_CONF_DIR}/ranger-security.xml
+               for cf in 
${PROJ_INSTALL_DIR}/install/conf.templates/${action}/*.xml
+               do
+                       cfb=`basename ${cf}`
+                       if [ -f "${HCOMPONENT_CONF_DIR}/${cfb}" ]
+                       then
+                               log "Saving ${HCOMPONENT_CONF_DIR}/${cfb} to 
${HCOMPONENT_CONF_DIR}/.${cfb}.${dt} ..."
+                               cp ${HCOMPONENT_CONF_DIR}/${cfb} 
${HCOMPONENT_CONF_DIR}/.${cfb}.${dt}
+                       fi
+                       cp ${cf} ${HCOMPONENT_CONF_DIR}/
+                       chown ${CFG_OWNER_INF} ${HCOMPONENT_CONF_DIR}/${cfb}
+                       chmod a+r ${HCOMPONENT_CONF_DIR}/${cfb}
+               done
+    else
+               if [ -f ${HCOMPONENT_CONF_DIR}/ranger-security.xml ]
+               then
+                       mv ${HCOMPONENT_CONF_DIR}/ranger-security.xml 
${HCOMPONENT_CONF_DIR}/.ranger-security.xml.`date '+%Y%m%d%H%M%S'`
+               fi
+       fi
+
+       #
+       # Ensure that POLICY_CACHE_FILE_PATH is accessible
+       #
+       REPO_NAME=$(getInstallProperty 'REPOSITORY_NAME')
+       export POLICY_CACHE_FILE_PATH=/etc/${PROJ_NAME}/${REPO_NAME}/policycache
+       export 
CREDENTIAL_PROVIDER_FILE=/etc/${PROJ_NAME}/${REPO_NAME}/cred.jceks
+       if [ ! -d ${POLICY_CACHE_FILE_PATH} ]
+       then
+               mkdir -p ${POLICY_CACHE_FILE_PATH}
+       fi
+       chmod a+rx /etc/${PROJ_NAME}
+       chmod a+rx /etc/${PROJ_NAME}/${REPO_NAME}
+       chmod a+rx ${POLICY_CACHE_FILE_PATH}
+       chown -R ${CFG_OWNER_INF} /etc/${PROJ_NAME}/${REPO_NAME}
+
+       for f in ${PROJ_INSTALL_DIR}/install/conf.templates/${action}/*.cfg
+       do
+               if [ -f "${f}" ]
+               then
+                       fn=`basename $f`
+               orgfn=`echo $fn | sed -e 's:-changes.cfg:.xml:'`
+               fullpathorgfn="${HCOMPONENT_CONF_DIR}/${orgfn}"
+               if [ ! -f ${fullpathorgfn} ]
+               then
+                               if [ -f ${DEFAULT_XML_CONFIG} ]
+                               then
+                                       log "Creating default file from 
[${DEFAULT_XML_CONFIG}] for [${fullpathorgfn}] .."
+                                       cp ${DEFAULT_XML_CONFIG} 
${fullpathorgfn}
+                                       chown ${CFG_OWNER_INF} ${fullpathorgfn}
+                                       chmod a+r ${fullpathorgfn}
+                               else
+                               echo "ERROR: Unable to find ${fullpathorgfn}"
+                               exit 1
+                               fi
+               fi
+                       archivefn="${HCOMPONENT_CONF_DIR}/.${orgfn}.${dt}"
+               newfn="${HCOMPONENT_CONF_DIR}/.${orgfn}-new.${dt}"
+                       log "Saving current config file: ${fullpathorgfn} to 
${archivefn} ..."
+            cp ${fullpathorgfn} ${archivefn}
+                       if [ $? -eq 0 ]
+                       then
+                               ${JAVA} -cp "${INSTALL_CP}" 
org.apache.ranger.utils.install.XmlConfigChanger -i ${archivefn} -o ${newfn} -c 
${f} -p  ${INSTALL_ARGS}
+                               if [ $? -eq 0 ]
+                then
+                       diff -w ${newfn} ${fullpathorgfn} > /dev/null 2>&1
+                    if [ $? -ne 0 ]
+                    then
+                       cat ${newfn} > ${fullpathorgfn}
+                    fi
+
+                       else
+                                   echo "ERROR: Unable to make changes to 
config. file: ${fullpathorgfn}"
+                    echo "exiting ...."
+                    exit 1
+                               fi
+                       else
+                               echo "ERROR: Unable to save config. file: 
${fullpathorgfn}  to ${archivefn}"
+                echo "exiting ...."
+                exit 1
+                       fi
+               fi
+       done
+fi
+
+#
+# Create library link
+#
+if [ "${action}" = "enable" ]
+then
+       dt=`date '+%Y%m%d%H%M%S'`
+       #
+       # Encrypt the password and keep it secure in Credential Provider API
+       #
+       CredFile=${CREDENTIAL_PROVIDER_FILE}
+       if ! [ `echo ${CredFile} | grep '^/.*'` ]
+       then
+               echo "ERROR:Please enter the Credential File Store with proper 
file path"
+               exit 1
+       fi
+
+       pardir=`dirname ${CredFile}`
+
+       if [ ! -d "${pardir}" ]
+       then
+               mkdir -p "${pardir}"
+               if [ $? -ne 0 ]
+               then
+               echo "ERROR: Unable to create credential store file path"
+                       exit 1
+               fi
+               chmod a+rx "${pardir}"
+       fi
+fi
+
+
+#Check Properties whether in File, return code 1 if not exist
+#$1 -> propertyName; $2 -> fileName
+checkPropertyInFile(){
+       validate=$(sed '/^\#/d' $2 | grep "^$1"  | tail -n 1 | cut -d "=" -f1-) 
# for validation
+       if test -z "$validate" ; then return 1; fi
+}
+
+#Add Properties to File
+#$1 -> propertyName; $2 -> newPropertyValue; $3 -> fileName
+addPropertyToFile(){
+       echo "$1=$2">>$3
+       validate=$(sed '/^\#/d' $3 | grep "^$1"  | tail -n 1 | cut -d "=" -f2-) 
# for validation
+       if test -z "$validate" ; then log "[E] Failed to add properties '$1' to 
$3 file!"; exit 1; fi
+       echo "Property $1 added successfully with : '$2'"
+}
+
+#Update Properties to File
+#$1 -> propertyName; $2 -> newPropertyValue; $3 -> fileName
+updatePropertyToFile(){
+       sed -i 's@^'$1'=[^ ]*$@'$1'='$2'@g' $3
+       validate=$(sed '/^\#/d' $3 | grep "^$1"  | tail -n 1 | cut -d "=" -f2-) 
# for validation
+       if test -z "$validate" ; then log "[E] '$1' not found in $3 file while 
Updating....!!"; exit 1; fi
+       echo "Property $1 updated successfully with : '$2'"
+}
+
+#Add or Update Properties to File
+#$1 -> propertyName; $2 -> newPropertyValue; $3 -> fileName
+addOrUpdatePropertyToFile(){
+       checkPropertyInFile $1 $3
+       if [ $? -eq 1 ]
+       then
+               addPropertyToFile $1 $2 $3
+       else
+               updatePropertyToFile $1 $2 $3
+       fi
+}
+
+
+# Set notice to restart the ${HCOMPONENT_NAME}
+echo "Ranger Plugin for ${HCOMPONENT_NAME} has been ${action}d. Please restart 
${HCOMPONENT_NAME} to ensure that changes are effective."
+
+exit 0
diff --git 
a/dev-support/ranger-docker/config/ozone/ranger-ozone-plugin-install.properties 
b/dev-support/ranger-docker/config/ozone/ranger-ozone-plugin-install.properties
new file mode 100644
index 000000000..b0e4ee195
--- /dev/null
+++ 
b/dev-support/ranger-docker/config/ozone/ranger-ozone-plugin-install.properties
@@ -0,0 +1,83 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+POLICY_MGR_URL=http://ranger:6080
+REPOSITORY_NAME=dev_ozone
+COMPONENT_INSTALL_DIR_NAME=ranger-ozone-plugin
+
+CUSTOM_USER=ozone
+CUSTOM_GROUP=hadoop
+
+XAAUDIT.SOLR.IS_ENABLED=true
+XAAUDIT.SOLR.MAX_QUEUE_SIZE=1
+XAAUDIT.SOLR.MAX_FLUSH_INTERVAL_MS=1000
+XAAUDIT.SOLR.SOLR_URL=http://ranger-solr:8983/solr/ranger_audits
+
+XAAUDIT.HDFS.IS_ENABLED=false
+XAAUDIT.HDFS.DESTINATION_DIRECTORY=/ranger/audit
+XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY=__REPLACE__LOG_DIR/ozone/audit
+XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY=__REPLACE__LOG_DIR/ozone/audit/archive
+XAAUDIT.HDFS.DESTINTATION_FILE=%hostname%-audit.log
+XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS=900
+XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS=86400
+XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS=60
+XAAUDIT.HDFS.LOCAL_BUFFER_FILE=%time:yyyyMMdd-HHmm.ss%.log
+XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS=60
+XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS=600
+XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT=10
+
+XAAUDIT.SUMMARY.ENABLE=true
+
+XAAUDIT.SOLR.ENABLE=true
+XAAUDIT.SOLR.URL=http://ranger-solr:8983/solr/ranger_audits
+XAAUDIT.SOLR.USER=NONE
+XAAUDIT.SOLR.PASSWORD=NONE
+XAAUDIT.SOLR.ZOOKEEPER=NONE
+XAAUDIT.SOLR.FILE_SPOOL_DIR=/var/log/ozone/audit/solr/spool
+
+XAAUDIT.ELASTICSEARCH.ENABLE=false
+XAAUDIT.ELASTICSEARCH.URL=NONE
+XAAUDIT.ELASTICSEARCH.USER=NONE
+XAAUDIT.ELASTICSEARCH.PASSWORD=NONE
+XAAUDIT.ELASTICSEARCH.INDEX=NONE
+XAAUDIT.ELASTICSEARCH.PORT=NONE
+XAAUDIT.ELASTICSEARCH.PROTOCOL=NONE
+
+XAAUDIT.HDFS.ENABLE=false
+XAAUDIT.HDFS.HDFS_DIR=hdfs://ranger-hadoop:9000/ranger/audit
+XAAUDIT.HDFS.FILE_SPOOL_DIR=/var/log/ozone/audit/hdfs/spool
+
+XAAUDIT.HDFS.AZURE_ACCOUNTNAME=__REPLACE_AZURE_ACCOUNT_NAME
+XAAUDIT.HDFS.AZURE_ACCOUNTKEY=__REPLACE_AZURE_ACCOUNT_KEY
+XAAUDIT.HDFS.AZURE_SHELL_KEY_PROVIDER=__REPLACE_AZURE_SHELL_KEY_PROVIDER
+XAAUDIT.HDFS.AZURE_ACCOUNTKEY_PROVIDER=__REPLACE_AZURE_ACCOUNT_KEY_PROVIDER
+
+XAAUDIT.LOG4J.ENABLE=true
+XAAUDIT.LOG4J.IS_ASYNC=false
+XAAUDIT.LOG4J.ASYNC.MAX.QUEUE.SIZE=10240
+XAAUDIT.LOG4J.ASYNC.MAX.FLUSH.INTERVAL.MS=30000
+XAAUDIT.LOG4J.DESTINATION.LOG4J=true
+XAAUDIT.LOG4J.DESTINATION.LOG4J.LOGGER=xaaudit
+
+XAAUDIT.AMAZON_CLOUDWATCH.ENABLE=false
+XAAUDIT.AMAZON_CLOUDWATCH.LOG_GROUP=NONE
+XAAUDIT.AMAZON_CLOUDWATCH.LOG_STREAM_PREFIX=NONE
+XAAUDIT.AMAZON_CLOUDWATCH.FILE_SPOOL_DIR=NONE
+XAAUDIT.AMAZON_CLOUDWATCH.REGION=NONE
+
+SSL_KEYSTORE_FILE_PATH=/etc/hadoop/conf/ranger-plugin-keystore.jks
+SSL_KEYSTORE_PASSWORD=myKeyFilePassword
+SSL_TRUSTSTORE_FILE_PATH=/etc/hadoop/conf/ranger-plugin-truststore.jks
+SSL_TRUSTSTORE_PASSWORD=changeit
diff --git a/dev-support/ranger-docker/config/ozone/ranger-ozone-setup.sh 
b/dev-support/ranger-docker/config/ozone/ranger-ozone-setup.sh
new file mode 100644
index 000000000..8c52474fd
--- /dev/null
+++ b/dev-support/ranger-docker/config/ozone/ranger-ozone-setup.sh
@@ -0,0 +1,34 @@
+#!/bin/bash
+
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+cd "${OZONE_HOME}"/ranger-ozone-plugin || exit
+
+if [[ ! -f "${OZONE_HOME}"/.setupDone ]];
+then
+  if [ ! -d conf ]; then
+    mkdir -p conf
+    echo "conf directory created!"
+  else
+    echo "conf directory exists already!"
+  fi
+  echo "export JAVA_HOME=${JAVA_HOME}" >> conf/ozone-env.sh
+  sudo JAVA_HOME=/usr/lib/jvm/jre/ ./enable-ozone-plugin.sh
+  touch "${OZONE_HOME}"/.setupDone
+else
+  echo "Ranger Ozone Plugin Installation is already complete!"
+fi
diff --git a/dev-support/ranger-docker/docker-compose.ranger-ozone.yml 
b/dev-support/ranger-docker/docker-compose.ranger-ozone.yml
new file mode 100644
index 000000000..a0ee4abc9
--- /dev/null
+++ b/dev-support/ranger-docker/docker-compose.ranger-ozone.yml
@@ -0,0 +1,88 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+version: "3"
+services:
+  datanode:
+    image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION}
+    container_name: ozone-datanode
+    volumes:
+      - ./downloads/ozone-${OZONE_VERSION}:/opt/hadoop
+    networks:
+      - ranger
+    ports:
+      - 9864
+    command: ["/opt/hadoop/bin/ozone","datanode"]
+    env_file:
+      - ./config/ozone/docker-config
+    environment:
+      OZONE_OPTS:
+  om:
+    build:
+      context: .
+      dockerfile: Dockerfile.ranger-ozone
+      args:
+        - OZONE_RUNNER_IMAGE=${OZONE_RUNNER_IMAGE}
+        - OZONE_RUNNER_VERSION=${OZONE_RUNNER_VERSION}
+        - OZONE_HOME=/opt/hadoop
+        - OZONE_PLUGIN_VERSION=${OZONE_PLUGIN_VERSION}
+    image: ranger-ozone:latest
+    container_name: ozone-om
+    hostname: om
+    volumes:
+      - ./downloads/ozone-${OZONE_VERSION}:/opt/hadoop
+      - 
./dist/ranger-${OZONE_PLUGIN_VERSION}-ozone-plugin:/opt/hadoop/ranger-ozone-plugin
+    networks:
+      - ranger
+    ports:
+      - 9874:9874
+      - 9862:9862
+    depends_on:
+      ranger:
+        condition: service_started
+      ranger-solr:
+        condition: service_started
+    environment:
+      ENSURE_OM_INITIALIZED: /data/metadata/om/current/VERSION
+      OZONE_OPTS: -Dcom.sun.net.ssl.checkRevocation=false
+      OZONE_HOME: /opt/hadoop
+      OZONE_PLUGIN_VERSION: ${OZONE_PLUGIN_VERSION}
+      OZONE_VERSION: ${OZONE_VERSION}
+    env_file:
+      - ./config/ozone/docker-config
+    command: bash -c "/opt/hadoop/ranger-ozone-plugin/ranger-ozone-setup.sh && 
/opt/hadoop/bin/ozone om"
+  scm:
+    image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION}
+    container_name: ozone-scm
+    hostname: scm
+    volumes:
+      - ./downloads/ozone-${OZONE_VERSION}:/opt/hadoop
+    networks:
+      - ranger
+    ports:
+      - 9876:9876
+      - 9860:9860
+    env_file:
+      - ./config/ozone/docker-config
+    environment:
+      ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION
+      OZONE-SITE.XML_hdds.scm.safemode.min.datanode: 
"${OZONE_SAFEMODE_MIN_DATANODES:-1}"
+      OZONE_OPTS:
+    command: ["/opt/hadoop/bin/ozone","scm"]
+
+networks:
+  ranger:
+    name: rangernw
diff --git a/dev-support/ranger-docker/download-archives.sh 
b/dev-support/ranger-docker/download-archives.sh
index 4b2736fb6..53cce79fc 100755
--- a/dev-support/ranger-docker/download-archives.sh
+++ b/dev-support/ranger-docker/download-archives.sh
@@ -17,12 +17,10 @@
 # limitations under the License.
 
 #
-# Downloads HDFS/Hive/HBase/Kafka/.. archives to a local cache directory.
-# The downloaded archives will be used while building docker images that
-# run these services
+# Downloads HDFS/Hive/HBase/Kafka/Knox/Ozone archives to a local cache 
directory.
+# The downloaded archives will be used while building docker images that run 
these services.
 #
 
-
 #
 # source .env file to get versions to download
 #
@@ -43,14 +41,35 @@ downloadIfNotPresent() {
   fi
 }
 
-downloadIfNotPresent hadoop-${HADOOP_VERSION}.tar.gz        
https://archive.apache.org/dist/hadoop/common/hadoop-${HADOOP_VERSION}
-downloadIfNotPresent hbase-${HBASE_VERSION}-bin.tar.gz      
https://archive.apache.org/dist/hbase/${HBASE_VERSION}
-downloadIfNotPresent kafka_2.12-${KAFKA_VERSION}.tgz        
https://archive.apache.org/dist/kafka/${KAFKA_VERSION}
-downloadIfNotPresent apache-hive-${HIVE_VERSION}-bin.tar.gz 
https://archive.apache.org/dist/hive/hive-${HIVE_VERSION}
-downloadIfNotPresent hadoop-${HIVE_HADOOP_VERSION}.tar.gz   
https://archive.apache.org/dist/hadoop/common/hadoop-${HIVE_HADOOP_VERSION}
-downloadIfNotPresent postgresql-42.2.16.jre7.jar            
https://search.maven.org/remotecontent?filepath=org/postgresql/postgresql/42.2.16.jre7
-downloadIfNotPresent mysql-connector-java-8.0.28.jar        
https://search.maven.org/remotecontent?filepath=mysql/mysql-connector-java/8.0.28
+downloadIfNotPresent postgresql-42.2.16.jre7.jar            
"https://search.maven.org/remotecontent?filepath=org/postgresql/postgresql/42.2.16.jre7";
+downloadIfNotPresent mysql-connector-java-8.0.28.jar        
"https://search.maven.org/remotecontent?filepath=mysql/mysql-connector-java/8.0.28";
 downloadIfNotPresent log4jdbc-1.2.jar                       
https://repo1.maven.org/maven2/com/googlecode/log4jdbc/log4jdbc/1.2
 
-downloadIfNotPresent knox-${KNOX_VERSION}.tar.gz            
https://archive.apache.org/dist/knox/${KNOX_VERSION}
-
+for arg in "$@"; do
+  if [[ $arg == 'hadoop' ]]
+  then
+    downloadIfNotPresent hadoop-${HADOOP_VERSION}.tar.gz        
https://archive.apache.org/dist/hadoop/common/hadoop-${HADOOP_VERSION}
+  elif [[ $arg == 'hbase' ]]
+  then
+    downloadIfNotPresent hbase-${HBASE_VERSION}-bin.tar.gz      
https://archive.apache.org/dist/hbase/${HBASE_VERSION}
+  elif [[ $arg == 'hive' ]]
+  then
+    downloadIfNotPresent apache-hive-${HIVE_VERSION}-bin.tar.gz 
https://archive.apache.org/dist/hive/hive-${HIVE_VERSION}
+    downloadIfNotPresent hadoop-${HIVE_HADOOP_VERSION}.tar.gz   
https://archive.apache.org/dist/hadoop/common/hadoop-${HIVE_HADOOP_VERSION}
+  elif [[ $arg == 'kafka' ]]
+  then
+    downloadIfNotPresent kafka_2.12-${KAFKA_VERSION}.tgz        
https://archive.apache.org/dist/kafka/${KAFKA_VERSION}
+  elif [[ $arg == 'knox' ]]
+  then
+    downloadIfNotPresent knox-${KNOX_VERSION}.tar.gz            
https://archive.apache.org/dist/knox/${KNOX_VERSION}
+  elif [[ $arg == 'ozone' ]]
+  then
+    downloadIfNotPresent ozone-${OZONE_VERSION}.tar.gz          
https://archive.apache.org/dist/ozone/${OZONE_VERSION}
+    if [ ! -d downloads/ozone-${OZONE_VERSION} ]
+    then
+      tar xvfz downloads/ozone-${OZONE_VERSION}.tar.gz --directory=downloads/
+    fi
+  else
+    echo "Passed argument $arg is invalid!"
+  fi
+done
diff --git a/dev-support/ranger-docker/scripts/create-ranger-services.py 
b/dev-support/ranger-docker/scripts/create-ranger-services.py
index 057b067dc..45d6a7791 100644
--- a/dev-support/ranger-docker/scripts/create-ranger-services.py
+++ b/dev-support/ranger-docker/scripts/create-ranger-services.py
@@ -58,7 +58,14 @@ trino = RangerService({'name': 'dev_trino',
                            'jdbc.url': 'jdbc:trino://ranger-trino:8080',
                        }})
 
-services = [hdfs, yarn, hive, hbase, kafka, knox, kms, trino]
+ozone = RangerService({'name': 'dev_ozone',
+                       'type': 'ozone',
+                       'displayName': 'dev_ozone',
+                       'configs': {'username': 'hdfs', 'password': 'hdfs',
+                                   'ozone.om.http-address': 'http://om:9874',
+                                   'hadoop.security.authentication': 
'simple'}})
+
+services = [hdfs, yarn, hive, hbase, kafka, knox, kms, trino, ozone]
 for service in services:
     try:
         if service_not_exists(service):
@@ -66,4 +73,3 @@ for service in services:
             print(f" {service.name} service created!")
     except Exception as e:
         print(f"An exception occured: {e}")
-
diff --git a/dev-support/ranger-docker/scripts/ozone-plugin-docker-setup.sh 
b/dev-support/ranger-docker/scripts/ozone-plugin-docker-setup.sh
new file mode 100755
index 000000000..295fb41d7
--- /dev/null
+++ b/dev-support/ranger-docker/scripts/ozone-plugin-docker-setup.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+source .env
+
+if [ ! -d dist/ranger-${OZONE_PLUGIN_VERSION}-ozone-plugin ]
+then
+  tar xvfz dist/ranger-${OZONE_PLUGIN_VERSION}-ozone-plugin.tar.gz 
--directory=dist/
+fi
+
+cp -f config/ozone/ranger-ozone-plugin-install.properties 
dist/ranger-${OZONE_PLUGIN_VERSION}-ozone-plugin/install.properties
+cp -f config/ozone/ranger-ozone-setup.sh 
dist/ranger-${OZONE_PLUGIN_VERSION}-ozone-plugin/
+cp -f config/ozone/enable-ozone-plugin.sh 
dist/ranger-${OZONE_PLUGIN_VERSION}-ozone-plugin/
+chmod +x dist/ranger-${OZONE_PLUGIN_VERSION}-ozone-plugin/ranger-ozone-setup.sh
diff --git a/plugin-ozone/pom.xml b/plugin-ozone/pom.xml
index 8d88dd308..a9582a257 100644
--- a/plugin-ozone/pom.xml
+++ b/plugin-ozone/pom.xml
@@ -143,5 +143,10 @@ limitations under the License.
             <version>${slf4j.version}</version>
            <scope>test</scope>
         </dependency>
+        <dependency>
+            <groupId>org.apache.zookeeper</groupId>
+            <artifactId>zookeeper</artifactId>
+            <version>${zookeeper.version}</version>
+        </dependency>
     </dependencies>
 </project>


Reply via email to