Repository: ambari
Updated Branches:
  refs/heads/trunk 01b3af1b7 -> 31cdf9fab


http://git-wip-us.apache.org/repos/asf/ambari/blob/31cdf9fa/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/params.py
 
b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/params.py
new file mode 100644
index 0000000..bb7d94c
--- /dev/null
+++ 
b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/params.py
@@ -0,0 +1,44 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.libraries.functions.version import 
format_hdp_stack_version, compare_versions
+from resource_management import *
+
+config  = Script.get_config()
+tmp_dir = Script.get_tmp_dir()
+
+hdp_stack_version         = str(config['hostLevelParams']['stack_version'])
+hdp_stack_version         = format_hdp_stack_version(hdp_stack_version)
+stack_is_hdp22_or_further = hdp_stack_version != "" and 
compare_versions(hdp_stack_version, '2.2') >= 0
+
+if stack_is_hdp22_or_further:
+       ranger_home    = '/usr/hdp/current/ranger-admin'
+       ranger_stop    = '/usr/bin/ranger-admin-stop'
+       ranger_start   = '/usr/bin/ranger-admin-start'
+       usersync_home  = '/usr/hdp/current/ranger-usersync'
+       usersync_start = '/usr/bin/ranger-usersync-start'
+       usersync_stop  = '/usr/bin/ranger-usersync-stop'
+else:
+       pass
+
+java_home = config['hostLevelParams']['java_home']
+unix_user  = default("/configurations/ranger-env/unix_user", "ranger")
+unix_group = default("/configurations/ranger-env/unix_group", "ranger")
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/31cdf9fa/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/ranger_admin.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/ranger_admin.py
 
b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/ranger_admin.py
new file mode 100644
index 0000000..c916162
--- /dev/null
+++ 
b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/ranger_admin.py
@@ -0,0 +1,49 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+from setup_ranger import setup_ranger
+
+class RangerAdmin(Script):
+    def install(self, env):
+        self.install_packages(env)
+        setup_ranger(env)
+
+    def stop(self, env):
+        import params
+        env.set_params(params)
+        Execute(format('{params.ranger_stop}'))
+
+    def start(self, env):
+        import params
+        setup_ranger(env)
+        Execute(format('{params.ranger_start}'))
+     
+    def status(self, env):
+        pass
+
+    def configure(self, env):
+        import params
+        env.set_params(params)
+
+
+if __name__ == "__main__":
+  RangerAdmin().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/31cdf9fa/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/ranger_usersync.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/ranger_usersync.py
 
b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/ranger_usersync.py
new file mode 100644
index 0000000..c4ed7ea
--- /dev/null
+++ 
b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/ranger_usersync.py
@@ -0,0 +1,47 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import sys
+from resource_management import *
+from setup_ranger import setup_usersync
+
+class RangerUsersync(Script):
+    def install(self, env):
+        self.install_packages(env)
+        setup_usersync(env)        
+
+    def stop(self, env):
+        import params
+        Execute(format('{params.usersync_stop}'))
+
+    def start(self, env):
+        import params
+        setup_usersync(env)
+        Execute(format('{params.usersync_start}'))
+     
+    def status(self, env):
+        pass
+
+    def configure(self, env):
+        import params
+        env.set_params(params)
+
+
+if __name__ == "__main__":
+  RangerUsersync().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/31cdf9fa/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/setup_ranger.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/setup_ranger.py
 
b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/setup_ranger.py
new file mode 100644
index 0000000..265018d
--- /dev/null
+++ 
b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/setup_ranger.py
@@ -0,0 +1,132 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+import fileinput
+from resource_management import *
+from resource_management.core.logger import Logger
+
+def setup_ranger(env):
+    import params
+    env.set_params(params)
+
+    if check_db_connnection(env):
+        file_path = params.ranger_home + '/install.properties'
+        write_properties_to_file(file_path, 
params.config['configurations']['admin-properties'])
+    
+        cmd = format('cd {ranger_home} && sh setup.sh')
+        Execute(cmd, environment={'JAVA_HOME': params.java_home}, 
logoutput=True)
+
+def setup_usersync(env):
+    import params
+    env.set_params(params)
+
+    file_path = params.usersync_home + '/install.properties'
+    write_properties_to_file(file_path, usersync_properties(params))
+    
+    cmd = format('cd {usersync_home} && sh setup.sh')
+    Execute(cmd, environment={'JAVA_HOME': params.java_home}, logoutput=True)
+
+def write_properties_to_file(file_path, value):
+    for key in value:
+      modify_config(file_path, key, value[key])        
+
+def modify_config(filepath, variable, setting):
+    var_found = False
+    already_set = False
+    V=str(variable)
+    S=str(setting)
+
+    if ' ' in S:
+        S = '%s' % S
+
+    for line in fileinput.input(filepath, inplace = 1):
+        if not line.lstrip(' ').startswith('#') and '=' in line:
+            _infile_var = str(line.split('=')[0].rstrip(' '))
+            _infile_set = str(line.split('=')[1].lstrip(' ').rstrip())
+            if var_found == False and _infile_var.rstrip(' ') == V:
+                var_found = True
+                if _infile_set.lstrip(' ') == S:
+                    already_set = True
+                else:
+                    line = "%s=%s\n" % (V, S)
+
+        sys.stdout.write(line)
+
+    if not var_found:
+        with open(filepath, "a") as f:
+            f.write("%s=%s\n" % (V, S))
+    elif already_set == True:
+        pass
+    else:
+        pass
+
+    return
+
+def usersync_properties(params):
+    d = dict()
+
+    d['POLICY_MGR_URL'] = 
params.config['configurations']['admin-properties']['policymgr_external_url']
+    
+    d['SYNC_SOURCE'] = 
params.config['configurations']['usersync-properties']['SYNC_SOURCE']
+    d['MIN_UNIX_USER_ID_TO_SYNC'] = 
params.config['configurations']['usersync-properties']['MIN_UNIX_USER_ID_TO_SYNC']
+    d['SYNC_INTERVAL'] = 
params.config['configurations']['usersync-properties']['SYNC_INTERVAL']
+    d['SYNC_LDAP_URL'] = 
params.config['configurations']['usersync-properties']['SYNC_LDAP_URL']
+    d['SYNC_LDAP_BIND_DN'] = 
params.config['configurations']['usersync-properties']['SYNC_LDAP_BIND_DN']
+    d['SYNC_LDAP_BIND_PASSWORD'] = 
params.config['configurations']['usersync-properties']['SYNC_LDAP_BIND_PASSWORD']
+    d['CRED_KEYSTORE_FILENAME'] = 
params.config['configurations']['usersync-properties']['CRED_KEYSTORE_FILENAME']
+    d['SYNC_LDAP_USER_SEARCH_BASE'] = 
params.config['configurations']['usersync-properties']['SYNC_LDAP_USER_SEARCH_BASE']
+    d['SYNC_LDAP_USER_SEARCH_SCOPE'] = 
params.config['configurations']['usersync-properties']['SYNC_LDAP_USER_SEARCH_SCOPE']
+    d['SYNC_LDAP_USER_OBJECT_CLASS'] = 
params.config['configurations']['usersync-properties']['SYNC_LDAP_USER_OBJECT_CLASS']
+    d['SYNC_LDAP_USER_SEARCH_FILTER'] = 
params.config['configurations']['usersync-properties']['SYNC_LDAP_USER_SEARCH_FILTER']
+    d['SYNC_LDAP_USER_NAME_ATTRIBUTE'] = 
params.config['configurations']['usersync-properties']['SYNC_LDAP_USER_NAME_ATTRIBUTE']
+    d['SYNC_LDAP_USER_GROUP_NAME_ATTRIBUTE'] = 
params.config['configurations']['usersync-properties']['SYNC_LDAP_USER_GROUP_NAME_ATTRIBUTE']
+    d['SYNC_LDAP_USERNAME_CASE_CONVERSION'] = 
params.config['configurations']['usersync-properties']['SYNC_LDAP_USERNAME_CASE_CONVERSION']
+    d['SYNC_LDAP_GROUPNAME_CASE_CONVERSION'] = 
params.config['configurations']['usersync-properties']['SYNC_LDAP_GROUPNAME_CASE_CONVERSION']
+    d['logdir'] = 
params.config['configurations']['usersync-properties']['logdir']
+
+    return d
+
+def check_db_connnection(env):
+    import params
+    env.set_params(params)
+    
+    db_root_password = 
params.config['configurations']['admin-properties']["db_root_password"]
+    db_root_user = 
params.config['configurations']['admin-properties']["db_root_user"]
+    db_host = params.config['configurations']['admin-properties']['db_host']
+    sql_command_invoker = 
params.config['configurations']['admin-properties']['SQL_COMMAND_INVOKER']
+
+    Logger.info('Checking MYSQL root password')
+
+    cmd_str = "\""+sql_command_invoker+"\""+" -u "+db_root_user+" 
--password="+db_root_password+" -h "+db_host+" -s -e \"select version();\""
+    status, output = get_status_output(cmd_str)
+
+    if status == 0:
+        Logger.info('Checking MYSQL root password DONE')
+        return True 
+    else:
+        Logger.info('Ranger Admin Installation Failed, Ranger Host requires DB 
client installed and running to setup DB on given host')
+        sys.exit(1)
+
+def get_status_output(cmd):
+    import subprocess
+
+    ret = subprocess.call(cmd, shell=True)
+    return ret, ret

http://git-wip-us.apache.org/repos/asf/ambari/blob/31cdf9fa/ambari-server/src/main/resources/stacks/HDP/2.2/role_command_order.json
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.2/role_command_order.json 
b/ambari-server/src/main/resources/stacks/HDP/2.2/role_command_order.json
index e8bbe32..9b6465a 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/role_command_order.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/role_command_order.json
@@ -3,6 +3,19 @@
   "_comment" : "blockedRole-blockedCommand: [blockerRole1-blockerCommand1, 
blockerRole2-blockerCommand2, ...]",
   "general_deps" : {
     "_comment" : "dependencies for all cases",
+    "NAMENODE-INSTALL" : ["RANGER_ADMIN-INSTALL"],
+    "DATANODE-INSTALL" : ["RANGER_ADMIN-INSTALL"],
+    "SECONDARY_NAMENODE-INSTALL" : ["RANGER_ADMIN-INSTALL"],
+    "HDFS_CLIENT-INSTALL" : ["RANGER_ADMIN-INSTALL"],
+    "HBASE_MASTER-INSTALL" : ["RANGER_ADMIN-INSTALL"],
+    "HBASE_REGIONSERVER-INSTALL" : ["RANGER_ADMIN-INSTALL"],
+    "HIVE_SERVER-INSTALL" : ["RANGER_ADMIN-INSTALL"],
+    "KNOX_GATEWAY-INSTALL" : ["RANGER_ADMIN-INSTALL"],
+    "RANGER_USERSYNC-INSTALL" : ["RANGER_ADMIN-INSTALL"],
+    "NAMENODE-START" : ["RANGER_ADMIN-START"],
+    "HBASE_MASTER-START" : ["RANGER_ADMIN-START"],
+    "HIVE_SERVER-START" : ["RANGER_ADMIN-START"],
+    "KNOX_GATEWAY-START" : ["RANGER_ADMIN-START"],    
     "FALCON_SERVER-START": ["NAMENODE-START", "DATANODE-START", 
"OOZIE_SERVER-START"],
     "WEBHCAT_SERVICE_CHECK-SERVICE_CHECK": ["WEBHCAT_SERVER-START"],
     "FLUME_SERVICE_CHECK-SERVICE_CHECK": ["FLUME_HANDLER-START"],

http://git-wip-us.apache.org/repos/asf/ambari/blob/31cdf9fa/ambari-server/src/main/resources/stacks/HDP/2.2/services/HBASE/configuration/ranger-hbase-plugin-properties.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.2/services/HBASE/configuration/ranger-hbase-plugin-properties.xml
 
b/ambari-server/src/main/resources/stacks/HDP/2.2/services/HBASE/configuration/ranger-hbase-plugin-properties.xml
new file mode 100644
index 0000000..d393d97
--- /dev/null
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.2/services/HBASE/configuration/ranger-hbase-plugin-properties.xml
@@ -0,0 +1,150 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="true">
+
+        <property>
+                <name>ranger-hbase-plugin-enabled</name>
+                <value>Yes</value>
+                <description>Enable ranger hbase plugin ?</description>
+        </property>
+
+       <property>
+               <name>REPOSITORY_CONFIG_USERNAME</name>
+               <value>hbase</value>
+               <description></description>
+       </property>     
+
+       <property>
+               <name>REPOSITORY_CONFIG_PASSWORD</name>
+               <value>hbase</value>
+               <property-type>PASSWORD</property-type>
+               <description></description>
+       </property>     
+
+       <property>
+               <name>XAAUDIT.DB.IS_ENABLED</name>
+               <value>true</value>
+               <description></description>
+       </property>     
+
+       <property>
+               <name>XAAUDIT.HDFS.IS_ENABLED</name>
+               <value>false</value>
+               <description></description>
+       </property>
+
+       <property>
+               <name>XAAUDIT.HDFS.DESTINATION_DIRECTORY</name>
+               
<value>hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%</value>
+               <description></description>
+       </property>
+
+       <property>
+               <name>XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY</name>
+               <value>__REPLACE__LOG_DIR/hadoop/%app-type%/audit</value>
+               <description></description>
+       </property>
+
+       <property>
+               <name>XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY</name>
+               
<value>__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive</value>
+               <description></description>
+       </property>
+
+       <property>
+               <name>XAAUDIT.HDFS.DESTINTATION_FILE</name>
+               <value>%hostname%-audit.log</value>
+               <description></description>
+       </property>
+
+       <property>
+               <name>XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS</name>
+               <value>900</value>
+               <description></description>
+       </property>
+
+       <property>
+               <name>XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS</name>
+               <value>86400</value>
+               <description></description>
+       </property>
+
+       <property>
+               
<name>XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS</name>
+               <value>60</value>
+               <description></description>
+       </property>
+
+       <property>
+               <name>XAAUDIT.HDFS.LOCAL_BUFFER_FILE</name>
+               <value>%time:yyyyMMdd-HHmm.ss%.log</value>
+               <description></description>
+       </property>
+
+       <property>
+               <name>XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS</name>
+               <value>60</value>
+               <description></description>
+       </property>
+
+       <property>
+               <name>XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS</name>
+               <value>600</value>
+               <description></description>
+       </property>
+
+       <property>
+               <name>XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT</name>
+               <value>10</value>
+               <description></description>
+       </property>
+
+       <property>
+               <name>SSL_KEYSTORE_FILE_PATH</name>
+               <value>/etc/hadoop/conf/ranger-plugin-keystore.jks</value>
+               <description></description>
+       </property>
+
+       <property>
+               <name>SSL_KEYSTORE_PASSWORD</name>
+               <value>myKeyFilePassword</value>
+               <description></description>
+       </property>
+
+       <property>
+               <name>SSL_TRUSTSTORE_FILE_PATH</name>
+               <value>/etc/hadoop/conf/ranger-plugin-truststore.jks</value>
+               <description></description>
+       </property>
+
+       <property>
+               <name>SSL_TRUSTSTORE_PASSWORD</name>
+               <value>changeit</value>
+               <description></description>
+       </property>
+
+       <property>
+               <name>UPDATE_XAPOLICIES_ON_GRANT_REVOKE</name>
+               <value>true</value>
+               <description></description>
+       </property>
+
+</configuration>       

http://git-wip-us.apache.org/repos/asf/ambari/blob/31cdf9fa/ambari-server/src/main/resources/stacks/HDP/2.2/services/HDFS/configuration/ranger-hdfs-plugin-properties.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.2/services/HDFS/configuration/ranger-hdfs-plugin-properties.xml
 
b/ambari-server/src/main/resources/stacks/HDP/2.2/services/HDFS/configuration/ranger-hdfs-plugin-properties.xml
new file mode 100644
index 0000000..4b549f9
--- /dev/null
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.2/services/HDFS/configuration/ranger-hdfs-plugin-properties.xml
@@ -0,0 +1,144 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="true">
+
+        <property>
+               <name>ranger-hdfs-plugin-enabled</name>
+               <value>Yes</value>
+               <description>Enable ranger hdfs plugin ?</description>
+        </property>
+
+       <property>
+               <name>REPOSITORY_CONFIG_USERNAME</name>
+               <value>hadoop</value>
+               <description></description>
+       </property>
+
+       <property>
+               <name>REPOSITORY_CONFIG_PASSWORD</name>
+               <value>hadoop</value>
+               <property-type>PASSWORD</property-type>
+               <description></description>
+       </property>     
+
+       <property>
+               <name>XAAUDIT.DB.IS_ENABLED</name>
+               <value>true</value>
+               <description></description>
+       </property>
+
+       <property>
+               <name>XAAUDIT.HDFS.IS_ENABLED</name>
+               <value>false</value>
+               <description></description>
+       </property>
+
+       <property>
+               <name>XAAUDIT.HDFS.DESTINATION_DIRECTORY</name>
+               
<value>hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%</value>
+               <description></description>
+       </property>
+
+       <property>
+               <name>XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY</name>
+               <value>__REPLACE__LOG_DIR/hadoop/%app-type%/audit</value>
+               <description></description>
+       </property>
+
+       <property>
+               <name>XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY</name>
+               
<value>__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive</value>
+               <description></description>
+       </property>
+
+       <property>
+               <name>XAAUDIT.HDFS.DESTINTATION_FILE</name>
+               <value>%hostname%-audit.log</value>
+               <description></description>
+       </property>
+
+       <property>
+               <name>XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS</name>
+               <value>900</value>
+               <description></description>
+       </property>
+
+       <property>
+               <name>XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS</name>
+               <value>86400</value>
+               <description></description>
+       </property>
+
+       <property>
+               
<name>XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS</name>
+               <value>60</value>
+               <description></description>
+       </property>
+
+       <property>
+               <name>XAAUDIT.HDFS.LOCAL_BUFFER_FILE</name>
+               <value>%time:yyyyMMdd-HHmm.ss%.log</value>
+               <description></description>
+       </property>
+
+       <property>
+               <name>XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS</name>
+               <value>60</value>
+               <description></description>
+       </property>
+
+       <property>
+               <name>XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS</name>
+               <value>600</value>
+               <description></description>
+       </property>
+
+       <property>
+               <name>XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT</name>
+               <value>10</value>
+               <description></description>
+       </property>
+
+       <property>
+               <name>SSL_KEYSTORE_FILE_PATH</name>
+               <value>/etc/hadoop/conf/ranger-plugin-keystore.jks</value>
+               <description></description>
+       </property>
+
+       <property>
+               <name>SSL_KEYSTORE_PASSWORD</name>
+               <value>myKeyFilePassword</value>
+               <description></description>
+       </property>
+
+       <property>
+               <name>SSL_TRUSTSTORE_FILE_PATH</name>
+               <value>/etc/hadoop/conf/ranger-plugin-truststore.jks</value>
+               <description></description>
+       </property>
+
+       <property>
+               <name>SSL_TRUSTSTORE_PASSWORD</name>
+               <value>changeit</value>
+               <description></description>
+       </property>
+
+</configuration>       

http://git-wip-us.apache.org/repos/asf/ambari/blob/31cdf9fa/ambari-server/src/main/resources/stacks/HDP/2.2/services/HIVE/configuration/ranger-hive-plugin-properties.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.2/services/HIVE/configuration/ranger-hive-plugin-properties.xml
 
b/ambari-server/src/main/resources/stacks/HDP/2.2/services/HIVE/configuration/ranger-hive-plugin-properties.xml
new file mode 100644
index 0000000..6bf38de
--- /dev/null
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.2/services/HIVE/configuration/ranger-hive-plugin-properties.xml
@@ -0,0 +1,150 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="true">
+
+        <property>
+                <name>ranger-hive-plugin-enabled</name>
+                <value>Yes</value>
+                <description>Enable ranger hive plugin ?</description>
+        </property>
+
+       <property>
+               <name>REPOSITORY_CONFIG_USERNAME</name>
+               <value>hive</value>
+               <description></description>
+       </property>
+
+       <property>
+               <name>REPOSITORY_CONFIG_PASSWORD</name>
+               <value>hive</value>
+               <property-type>PASSWORD</property-type>
+               <description></description>
+       </property>
+
+       <property>
+               <name>XAAUDIT.DB.IS_ENABLED</name>
+               <value>true</value>
+               <description></description>
+       </property>     
+
+       <property>
+               <name>XAAUDIT.HDFS.IS_ENABLED</name>
+               <value>false</value>
+               <description></description>
+       </property>
+
+       <property>
+               <name>XAAUDIT.HDFS.DESTINATION_DIRECTORY</name>
+               
<value>hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%</value>
+               <description></description>
+       </property>
+
+       <property>
+               <name>XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY</name>
+               <value>__REPLACE__LOG_DIR/hadoop/%app-type%/audit</value>
+               <description></description>
+       </property>
+
+       <property>
+               <name>XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY</name>
+               
<value>__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive</value>
+               <description></description>
+       </property>
+
+       <property>
+               <name>XAAUDIT.HDFS.DESTINTATION_FILE</name>
+               <value>%hostname%-audit.log</value>
+               <description></description>
+       </property>
+
+       <property>
+               <name>XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS</name>
+               <value>900</value>
+               <description></description>
+       </property>
+
+       <property>
+               <name>XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS</name>
+               <value>86400</value>
+               <description></description>
+       </property>
+
+       <property>
+               
<name>XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS</name>
+               <value>60</value>
+               <description></description>
+       </property>
+
+       <property>
+               <name>XAAUDIT.HDFS.LOCAL_BUFFER_FILE</name>
+               <value>%time:yyyyMMdd-HHmm.ss%.log</value>
+               <description></description>
+       </property>
+
+       <property>
+               <name>XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS</name>
+               <value>60</value>
+               <description></description>
+       </property>
+
+       <property>
+               <name>XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS</name>
+               <value>600</value>
+               <description></description>
+       </property>
+
+       <property>
+               <name>XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT</name>
+               <value>10</value>
+               <description></description>
+       </property>
+
+       <property>
+               <name>SSL_KEYSTORE_FILE_PATH</name>
+               <value>/etc/hadoop/conf/ranger-plugin-keystore.jks</value>
+               <description></description>
+       </property>
+
+       <property>
+               <name>SSL_KEYSTORE_PASSWORD</name>
+               <value>myKeyFilePassword</value>
+               <description></description>
+       </property>
+
+       <property>
+               <name>SSL_TRUSTSTORE_FILE_PATH</name>
+               <value>/etc/hadoop/conf/ranger-plugin-truststore.jks</value>
+               <description></description>
+       </property>
+
+       <property>
+               <name>SSL_TRUSTSTORE_PASSWORD</name>
+               <value>changeit</value>
+               <description></description>
+       </property>
+
+       <property>
+               <name>UPDATE_XAPOLICIES_ON_GRANT_REVOKE</name>
+               <value>true</value>
+               <description></description>
+       </property>     
+
+</configuration>       

http://git-wip-us.apache.org/repos/asf/ambari/blob/31cdf9fa/ambari-server/src/main/resources/stacks/HDP/2.2/services/RANGER/metainfo.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.2/services/RANGER/metainfo.xml 
b/ambari-server/src/main/resources/stacks/HDP/2.2/services/RANGER/metainfo.xml
new file mode 100644
index 0000000..5f91087
--- /dev/null
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.2/services/RANGER/metainfo.xml
@@ -0,0 +1,29 @@
+<?xml version="1.0"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<metainfo>
+    <schemaVersion>2.0</schemaVersion>
+    <services>
+        <service>
+            <name>RANGER</name>
+            <extends>common-services/RANGER/0.4.0</extends>            
+        </service>
+    </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/31cdf9fa/ambari-server/src/test/python/stacks/2.0.6/configs/client-upgrade.json
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/test/python/stacks/2.0.6/configs/client-upgrade.json 
b/ambari-server/src/test/python/stacks/2.0.6/configs/client-upgrade.json
index 05aba97..b1f5d73 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/client-upgrade.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/client-upgrade.json
@@ -9,7 +9,7 @@
                 "dfs.support.append": "true", 
                 "dfs.namenode.http-address": "true"
             }
-        }, 
+        },
         "yarn-log4j": {}, 
         "hadoop-policy": {}, 
         "hdfs-log4j": {}, 
@@ -326,7 +326,13 @@
             "hadoop.proxyuser.oozie.hosts": "c6402.ambari.apache.org", 
             "ipc.client.connection.maxidletime": "30000", 
             "ipc.client.connect.max.retries": "50"
-        }, 
+        },
+        "ranger-hdfs-plugin-properties" : {
+            "ranger-hdfs-plugin-enabled":"yes"
+        },
+        "ranger-hbase-plugin-properties" : {
+            "ranger-hbase-plugin-enabled":"yes"
+        },
         "yarn-env": {
             "yarn_pid_dir_prefix": "/var/run/hadoop-yarn", 
             "apptimelineserver_heapsize": "1024", 
@@ -521,7 +527,8 @@
             "content": "\n# Set environment variables here.\n\n# The java 
implementation to use. Java 1.6 required.\nexport 
JAVA_HOME={{java64_home}}\n\n# HBase Configuration directory\nexport 
HBASE_CONF_DIR=${HBASE_CONF_DIR:-{{hbase_conf_dir}}}\n\n# Extra Java CLASSPATH 
elements. Optional.\nexport HBASE_CLASSPATH=${HBASE_CLASSPATH}\n\n\n# The 
maximum amount of heap to use, in MB. Default is 1000.\n# export 
HBASE_HEAPSIZE=1000\n\n# Extra Java runtime options.\n# Below are what we set 
by default. May only work with SUN JVM.\n# For more on why as well as other 
possible settings,\n# see 
http://wiki.apache.org/hadoop/PerformanceTuning\nexport 
SERVER_GC_OPTS=\"-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps 
-Xloggc:{{log_dir}}/gc.log-`date +'%Y%m%d%H%M'`\"\n# Uncomment below to enable 
java garbage collection logging.\n# export HBASE_OPTS=\"$HBASE_OPTS -verbose:gc 
-XX:+PrintGCDetails -XX:+PrintGCDateStamps 
-Xloggc:$HBASE_HOME/logs/gc-hbase.log\"\n\n# Uncomment and adjust to enable 
 JMX exporting\n# See jmxremote.password and jmxremote.access in 
$JRE_HOME/lib/management to configure remote password access.\n# More details 
at: 
http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html\n#\n# 
export HBASE_JMX_BASE=\"-Dcom.sun.management.jmxremote.ssl=false 
-Dcom.sun.management.jmxremote.authenticate=false\"\n# If you want to configure 
BucketCache, specify '-XX: MaxDirectMemorySize=' with proper direct memory 
size\n# export HBASE_THRIFT_OPTS=\"$HBASE_JMX_BASE 
-Dcom.sun.management.jmxremote.port=10103\"\n# export 
HBASE_ZOOKEEPER_OPTS=\"$HBASE_JMX_BASE 
-Dcom.sun.management.jmxremote.port=10104\"\n\n# File naming hosts on which 
HRegionServers will run. $HBASE_HOME/conf/regionservers by default.\nexport 
HBASE_REGIONSERVERS=${HBASE_CONF_DIR}/regionservers\n\n# Extra ssh options. 
Empty by default.\n# export HBASE_SSH_OPTS=\"-o ConnectTimeout=1 -o 
SendEnv=HBASE_CONF_DIR\"\n\n# Where log files are stored. $HBASE_HOME/logs by 
default.\nexport HBASE_LOG_DIR={{l
 og_dir}}\n\n# A string representing this instance of hbase. $USER by 
default.\n# export HBASE_IDENT_STRING=$USER\n\n# The scheduling priority for 
daemon processes. See 'man nice'.\n# export HBASE_NICENESS=10\n\n# The 
directory where pid files are stored. /tmp by default.\nexport 
HBASE_PID_DIR={{pid_dir}}\n\n# Seconds to sleep between slave commands. Unset 
by default. This\n# can be useful in large clusters, where, e.g., slave rsyncs 
can\n# otherwise arrive faster than the master can service them.\n# export 
HBASE_SLAVE_SLEEP=0.1\n\n# Tell HBase whether it should manage it's own 
instance of Zookeeper or not.\nexport HBASE_MANAGES_ZK=false\n\n{% if 
security_enabled %}\nexport HBASE_OPTS=\"$HBASE_OPTS -XX:+UseConcMarkSweepGC 
-XX:ErrorFile={{log_dir}}/hs_err_pid%p.log 
-Djava.security.auth.login.config={{client_jaas_config_file}}\"\nexport 
HBASE_MASTER_OPTS=\"$HBASE_MASTER_OPTS -Xmx{{master_heapsize}} 
-Djava.security.auth.login.config={{master_jaas_config_file}}\"\nexport 
HBASE_REGIONSERV
 ER_OPTS=\"$HBASE_REGIONSERVER_OPTS -Xmn{{regionserver_xmn_size}} 
-XX:CMSInitiatingOccupancyFraction=70  -Xms{{regionserver_heapsize}} 
-Xmx{{regionserver_heapsize}} 
-Djava.security.auth.login.config={{regionserver_jaas_config_file}}\"\n{% else 
%}\nexport HBASE_OPTS=\"$HBASE_OPTS -XX:+UseConcMarkSweepGC 
-XX:ErrorFile={{log_dir}}/hs_err_pid%p.log\"\nexport 
HBASE_MASTER_OPTS=\"$HBASE_MASTER_OPTS -Xmx{{master_heapsize}}\"\nexport 
HBASE_REGIONSERVER_OPTS=\"$HBASE_REGIONSERVER_OPTS 
-Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  
-Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}}\"\n{% endif %}", 
             "hbase_regionserver_heapsize": "1024m", 
             "hbase_log_dir": "/var/log/hbase"
-        }, 
+        },
+
         "zoo.cfg": {
             "clientPort": "2181", 
             "autopurge.purgeInterval": "24", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/31cdf9fa/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default.json 
b/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
index 82b485b..a70e90e 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
@@ -371,7 +371,13 @@
             "hive.server2.enable.doAs": "true",
             "hive.server2.authentication": "NOSASL",
             "hive.optimize.mapjoin.mapreduce": "true"
-        }, 
+        },
+        "ranger-hive-plugin-properties": {
+            "ranger-hive-plugin-enabled":"yes"
+        },
+        "ranger-knox-plugin-properties": {
+            "ranger-knox-plugin-enabled":"yes"
+        },
         "yarn-site": {
             "yarn.nodemanager.disk-health-checker.min-healthy-disks": "0.25", 
             "yarn.nodemanager.container-executor.class": 
"org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/31cdf9fa/ambari-server/src/test/python/stacks/2.0.6/configs/default_client.json
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/test/python/stacks/2.0.6/configs/default_client.json 
b/ambari-server/src/test/python/stacks/2.0.6/configs/default_client.json
index f544b88..b0c962f 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default_client.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default_client.json
@@ -330,7 +330,10 @@
             "hadoop.proxyuser.hcat.groups": "users", 
             "ipc.client.connection.maxidletime": "30000", 
             "ipc.client.connect.max.retries": "50"
-        }, 
+        },
+        "ranger-hive-plugin-properties" : {
+            "ranger-hive-plugin-enabled":"yes"
+        },
         "hive-site": {
             "hive.enforce.sorting": "true", 
             "javax.jdo.option.ConnectionPassword": "!`\"' 1", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/31cdf9fa/ambari-server/src/test/python/stacks/2.0.6/configs/ha_default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/ha_default.json 
b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_default.json
index 253747a..fdef520 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/ha_default.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_default.json
@@ -227,7 +227,10 @@
             "ha.zookeeper.quorum": 
"c6401.ambari.apache.org:2181,c6402.ambari.apache.org:2181,c6403.ambari.apache.org:2181",
 
             "ipc.client.connection.maxidletime": "30000", 
             "hadoop.security.auth_to_local": "\n        
RULE:[2:$1@$0]([rn]m@.*)s/.*/yarn/\n        
RULE:[2:$1@$0](jhs@.*)s/.*/mapred/\n        
RULE:[2:$1@$0]([nd]n@.*)s/.*/hdfs/\n        RULE:[2:$1@$0](hm@.*)s/.*/hbase/\n  
      RULE:[2:$1@$0](rs@.*)s/.*/hbase/\n        DEFAULT"
-        }, 
+        },
+         "ranger-hdfs-plugin-properties" : {
+            "ranger-hdfs-plugin-enabled":"yes"
+         },
         "hdfs-log4j": {
             "log4j.appender.DRFA.layout": "org.apache.log4j.PatternLayout", 
             "log4j.appender.DRFA.layout.ConversionPattern": "%d{ISO8601} %p 
%c: %m%n", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/31cdf9fa/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-2.2.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-2.2.json 
b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-2.2.json
index 8e6b3d4..99fe020 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-2.2.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-2.2.json
@@ -532,7 +532,10 @@
             "content": "\n# Set environment variables here.\n\n# The java 
implementation to use. Java 1.6 required.\nexport 
JAVA_HOME={{java64_home}}\n\n# HBase Configuration directory\nexport 
HBASE_CONF_DIR=${HBASE_CONF_DIR:-{{hbase_conf_dir}}}\n\n# Extra Java CLASSPATH 
elements. Optional.\nexport HBASE_CLASSPATH=${HBASE_CLASSPATH}\n\n\n# The 
maximum amount of heap to use, in MB. Default is 1000.\n# export 
HBASE_HEAPSIZE=1000\n\n# Extra Java runtime options.\n# Below are what we set 
by default. May only work with SUN JVM.\n# For more on why as well as other 
possible settings,\n# see 
http://wiki.apache.org/hadoop/PerformanceTuning\nexport 
SERVER_GC_OPTS=\"-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps 
-Xloggc:{{log_dir}}/gc.log-`date +'%Y%m%d%H%M'`\"\n# Uncomment below to enable 
java garbage collection logging.\n# export HBASE_OPTS=\"$HBASE_OPTS -verbose:gc 
-XX:+PrintGCDetails -XX:+PrintGCDateStamps 
-Xloggc:$HBASE_HOME/logs/gc-hbase.log\"\n\n# Uncomment and adjust to enable 
 JMX exporting\n# See jmxremote.password and jmxremote.access in 
$JRE_HOME/lib/management to configure remote password access.\n# More details 
at: 
http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html\n#\n# 
export HBASE_JMX_BASE=\"-Dcom.sun.management.jmxremote.ssl=false 
-Dcom.sun.management.jmxremote.authenticate=false\"\n# If you want to configure 
BucketCache, specify '-XX: MaxDirectMemorySize=' with proper direct memory 
size\n# export HBASE_THRIFT_OPTS=\"$HBASE_JMX_BASE 
-Dcom.sun.management.jmxremote.port=10103\"\n# export 
HBASE_ZOOKEEPER_OPTS=\"$HBASE_JMX_BASE 
-Dcom.sun.management.jmxremote.port=10104\"\n\n# File naming hosts on which 
HRegionServers will run. $HBASE_HOME/conf/regionservers by default.\nexport 
HBASE_REGIONSERVERS=${HBASE_CONF_DIR}/regionservers\n\n# Extra ssh options. 
Empty by default.\n# export HBASE_SSH_OPTS=\"-o ConnectTimeout=1 -o 
SendEnv=HBASE_CONF_DIR\"\n\n# Where log files are stored. $HBASE_HOME/logs by 
default.\nexport HBASE_LOG_DIR={{l
 og_dir}}\n\n# A string representing this instance of hbase. $USER by 
default.\n# export HBASE_IDENT_STRING=$USER\n\n# The scheduling priority for 
daemon processes. See 'man nice'.\n# export HBASE_NICENESS=10\n\n# The 
directory where pid files are stored. /tmp by default.\nexport 
HBASE_PID_DIR={{pid_dir}}\n\n# Seconds to sleep between slave commands. Unset 
by default. This\n# can be useful in large clusters, where, e.g., slave rsyncs 
can\n# otherwise arrive faster than the master can service them.\n# export 
HBASE_SLAVE_SLEEP=0.1\n\n# Tell HBase whether it should manage it's own 
instance of Zookeeper or not.\nexport HBASE_MANAGES_ZK=false\n\n{% if 
security_enabled %}\nexport HBASE_OPTS=\"$HBASE_OPTS -XX:+UseConcMarkSweepGC 
-XX:ErrorFile={{log_dir}}/hs_err_pid%p.log 
-Djava.security.auth.login.config={{client_jaas_config_file}}\"\nexport 
HBASE_MASTER_OPTS=\"$HBASE_MASTER_OPTS -Xmx{{master_heapsize}} 
-Djava.security.auth.login.config={{master_jaas_config_file}}\"\nexport 
HBASE_REGIONSERV
 ER_OPTS=\"$HBASE_REGIONSERVER_OPTS -Xmn{{regionserver_xmn_size}} 
-XX:CMSInitiatingOccupancyFraction=70  -Xms{{regionserver_heapsize}} 
-Xmx{{regionserver_heapsize}} 
-Djava.security.auth.login.config={{regionserver_jaas_config_file}}\"\n{% else 
%}\nexport HBASE_OPTS=\"$HBASE_OPTS -XX:+UseConcMarkSweepGC 
-XX:ErrorFile={{log_dir}}/hs_err_pid%p.log\"\nexport 
HBASE_MASTER_OPTS=\"$HBASE_MASTER_OPTS -Xmx{{master_heapsize}}\"\nexport 
HBASE_REGIONSERVER_OPTS=\"$HBASE_REGIONSERVER_OPTS 
-Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  
-Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}}\"\n{% endif %}", 
             "hbase_regionserver_heapsize": "1024m", 
             "hbase_log_dir": "/var/log/hbase"
-        }, 
+        },
+        "ranger-hbase-plugin-properties": {
+            "ranger-hbase-plugin-enabled":"yes"
+        },
         "ganglia-env": {
             "gmond_user": "nobody", 
             "ganglia_runtime_dir": "/var/run/ganglia/hdp", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/31cdf9fa/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-check-2.2.json
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-check-2.2.json 
b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-check-2.2.json
index 410e70e..f48863b 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-check-2.2.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-check-2.2.json
@@ -527,7 +527,10 @@
             "content": "\n# Set environment variables here.\n\n# The java 
implementation to use. Java 1.6 required.\nexport 
JAVA_HOME={{java64_home}}\n\n# HBase Configuration directory\nexport 
HBASE_CONF_DIR=${HBASE_CONF_DIR:-{{hbase_conf_dir}}}\n\n# Extra Java CLASSPATH 
elements. Optional.\nexport HBASE_CLASSPATH=${HBASE_CLASSPATH}\n\n\n# The 
maximum amount of heap to use, in MB. Default is 1000.\n# export 
HBASE_HEAPSIZE=1000\n\n# Extra Java runtime options.\n# Below are what we set 
by default. May only work with SUN JVM.\n# For more on why as well as other 
possible settings,\n# see 
http://wiki.apache.org/hadoop/PerformanceTuning\nexport 
SERVER_GC_OPTS=\"-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps 
-Xloggc:{{log_dir}}/gc.log-`date +'%Y%m%d%H%M'`\"\n# Uncomment below to enable 
java garbage collection logging.\n# export HBASE_OPTS=\"$HBASE_OPTS -verbose:gc 
-XX:+PrintGCDetails -XX:+PrintGCDateStamps 
-Xloggc:$HBASE_HOME/logs/gc-hbase.log\"\n\n# Uncomment and adjust to enable 
 JMX exporting\n# See jmxremote.password and jmxremote.access in 
$JRE_HOME/lib/management to configure remote password access.\n# More details 
at: 
http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html\n#\n# 
export HBASE_JMX_BASE=\"-Dcom.sun.management.jmxremote.ssl=false 
-Dcom.sun.management.jmxremote.authenticate=false\"\n# If you want to configure 
BucketCache, specify '-XX: MaxDirectMemorySize=' with proper direct memory 
size\n# export HBASE_THRIFT_OPTS=\"$HBASE_JMX_BASE 
-Dcom.sun.management.jmxremote.port=10103\"\n# export 
HBASE_ZOOKEEPER_OPTS=\"$HBASE_JMX_BASE 
-Dcom.sun.management.jmxremote.port=10104\"\n\n# File naming hosts on which 
HRegionServers will run. $HBASE_HOME/conf/regionservers by default.\nexport 
HBASE_REGIONSERVERS=${HBASE_CONF_DIR}/regionservers\n\n# Extra ssh options. 
Empty by default.\n# export HBASE_SSH_OPTS=\"-o ConnectTimeout=1 -o 
SendEnv=HBASE_CONF_DIR\"\n\n# Where log files are stored. $HBASE_HOME/logs by 
default.\nexport HBASE_LOG_DIR={{l
 og_dir}}\n\n# A string representing this instance of hbase. $USER by 
default.\n# export HBASE_IDENT_STRING=$USER\n\n# The scheduling priority for 
daemon processes. See 'man nice'.\n# export HBASE_NICENESS=10\n\n# The 
directory where pid files are stored. /tmp by default.\nexport 
HBASE_PID_DIR={{pid_dir}}\n\n# Seconds to sleep between slave commands. Unset 
by default. This\n# can be useful in large clusters, where, e.g., slave rsyncs 
can\n# otherwise arrive faster than the master can service them.\n# export 
HBASE_SLAVE_SLEEP=0.1\n\n# Tell HBase whether it should manage it's own 
instance of Zookeeper or not.\nexport HBASE_MANAGES_ZK=false\n\n{% if 
security_enabled %}\nexport HBASE_OPTS=\"$HBASE_OPTS -XX:+UseConcMarkSweepGC 
-XX:ErrorFile={{log_dir}}/hs_err_pid%p.log 
-Djava.security.auth.login.config={{client_jaas_config_file}}\"\nexport 
HBASE_MASTER_OPTS=\"$HBASE_MASTER_OPTS -Xmx{{master_heapsize}} 
-Djava.security.auth.login.config={{master_jaas_config_file}}\"\nexport 
HBASE_REGIONSERV
 ER_OPTS=\"$HBASE_REGIONSERVER_OPTS -Xmn{{regionserver_xmn_size}} 
-XX:CMSInitiatingOccupancyFraction=70  -Xms{{regionserver_heapsize}} 
-Xmx{{regionserver_heapsize}} 
-Djava.security.auth.login.config={{regionserver_jaas_config_file}}\"\n{% else 
%}\nexport HBASE_OPTS=\"$HBASE_OPTS -XX:+UseConcMarkSweepGC 
-XX:ErrorFile={{log_dir}}/hs_err_pid%p.log\"\nexport 
HBASE_MASTER_OPTS=\"$HBASE_MASTER_OPTS -Xmx{{master_heapsize}}\"\nexport 
HBASE_REGIONSERVER_OPTS=\"$HBASE_REGIONSERVER_OPTS 
-Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  
-Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}}\"\n{% endif %}", 
             "hbase_regionserver_heapsize": "1024m", 
             "hbase_log_dir": "/var/log/hbase"
-        }, 
+        },
+        "ranger-hbase-plugin-properties": {
+            "ranger-hbase-plugin-enabled":"yes"
+        },
         "ganglia-env": {
             "gmond_user": "nobody", 
             "ganglia_runtime_dir": "/var/run/ganglia/hdp", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/31cdf9fa/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-preupgrade.json
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-preupgrade.json 
b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-preupgrade.json
index ee46527..1a1af3e 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-preupgrade.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-preupgrade.json
@@ -81,7 +81,10 @@
             "content": "\n# Set environment variables here.\n\n# The java 
implementation to use. Java 1.6 required.\nexport 
JAVA_HOME={{java64_home}}\n\n# HBase Configuration directory\nexport 
HBASE_CONF_DIR=${HBASE_CONF_DIR:-{{hbase_conf_dir}}}\n\n# Extra Java CLASSPATH 
elements. Optional.\nexport HBASE_CLASSPATH=${HBASE_CLASSPATH}\n\n\n# The 
maximum amount of heap to use, in MB. Default is 1000.\n# export 
HBASE_HEAPSIZE=1000\n\n# Extra Java runtime options.\n# Below are what we set 
by default. May only work with SUN JVM.\n# For more on why as well as other 
possible settings,\n# see 
http://wiki.apache.org/hadoop/PerformanceTuning\nexport 
SERVER_GC_OPTS=\"-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps 
-Xloggc:{{log_dir}}/gc.log-`date +'%Y%m%d%H%M'`\"\n# Uncomment below to enable 
java garbage collection logging.\n# export HBASE_OPTS=\"$HBASE_OPTS -verbose:gc 
-XX:+PrintGCDetails -XX:+PrintGCDateStamps 
-Xloggc:$HBASE_HOME/logs/gc-hbase.log\"\n\n# Uncomment and adjust to enable 
 JMX exporting\n# See jmxremote.password and jmxremote.access in 
$JRE_HOME/lib/management to configure remote password access.\n# More details 
at: 
http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html\n#\n# 
export HBASE_JMX_BASE=\"-Dcom.sun.management.jmxremote.ssl=false 
-Dcom.sun.management.jmxremote.authenticate=false\"\n# If you want to configure 
BucketCache, specify '-XX: MaxDirectMemorySize=' with proper direct memory 
size\n# export HBASE_THRIFT_OPTS=\"$HBASE_JMX_BASE 
-Dcom.sun.management.jmxremote.port=10103\"\n# export 
HBASE_ZOOKEEPER_OPTS=\"$HBASE_JMX_BASE 
-Dcom.sun.management.jmxremote.port=10104\"\n\n# File naming hosts on which 
HRegionServers will run. $HBASE_HOME/conf/regionservers by default.\nexport 
HBASE_REGIONSERVERS=${HBASE_CONF_DIR}/regionservers\n\n# Extra ssh options. 
Empty by default.\n# export HBASE_SSH_OPTS=\"-o ConnectTimeout=1 -o 
SendEnv=HBASE_CONF_DIR\"\n\n# Where log files are stored. $HBASE_HOME/logs by 
default.\nexport HBASE_LOG_DIR={{l
 og_dir}}\n\n# A string representing this instance of hbase. $USER by 
default.\n# export HBASE_IDENT_STRING=$USER\n\n# The scheduling priority for 
daemon processes. See 'man nice'.\n# export HBASE_NICENESS=10\n\n# The 
directory where pid files are stored. /tmp by default.\nexport 
HBASE_PID_DIR={{pid_dir}}\n\n# Seconds to sleep between slave commands. Unset 
by default. This\n# can be useful in large clusters, where, e.g., slave rsyncs 
can\n# otherwise arrive faster than the master can service them.\n# export 
HBASE_SLAVE_SLEEP=0.1\n\n# Tell HBase whether it should manage it's own 
instance of Zookeeper or not.\nexport HBASE_MANAGES_ZK=false\n\n{% if 
security_enabled %}\nexport HBASE_OPTS=\"$HBASE_OPTS -XX:+UseConcMarkSweepGC 
-XX:ErrorFile={{log_dir}}/hs_err_pid%p.log 
-Djava.security.auth.login.config={{client_jaas_config_file}}\"\nexport 
HBASE_MASTER_OPTS=\"$HBASE_MASTER_OPTS -Xmx{{master_heapsize}} 
-Djava.security.auth.login.config={{master_jaas_config_file}}\"\nexport 
HBASE_REGIONSERV
 ER_OPTS=\"$HBASE_REGIONSERVER_OPTS -Xmn{{regionserver_xmn_size}} 
-XX:CMSInitiatingOccupancyFraction=70  -Xms{{regionserver_heapsize}} 
-Xmx{{regionserver_heapsize}} 
-Djava.security.auth.login.config={{regionserver_jaas_config_file}}\"\n{% else 
%}\nexport HBASE_OPTS=\"$HBASE_OPTS -XX:+UseConcMarkSweepGC 
-XX:ErrorFile={{log_dir}}/hs_err_pid%p.log\"\nexport 
HBASE_MASTER_OPTS=\"$HBASE_MASTER_OPTS -Xmx{{master_heapsize}}\"\nexport 
HBASE_REGIONSERVER_OPTS=\"$HBASE_REGIONSERVER_OPTS 
-Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  
-Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}}\"\n{% endif %}", 
             "hbase_regionserver_heapsize": "1024m", 
             "hbase_log_dir": "/var/log/hbase"
-        }, 
+        },
+        "ranger-hbase-plugin-properties": {
+            "ranger-hbase-plugin-enabled":"yes"
+        },
         "cluster-env": {
             "security_enabled": "false", 
             "hive_tar_source": "/usr/hdp/current/hive-client/hive.tar.gz", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/31cdf9fa/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-rs-2.2.json
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-rs-2.2.json 
b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-rs-2.2.json
index 89face6..4cf5cd7 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-rs-2.2.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-rs-2.2.json
@@ -532,7 +532,10 @@
             "content": "\n# Set environment variables here.\n\n# The java 
implementation to use. Java 1.6 required.\nexport 
JAVA_HOME={{java64_home}}\n\n# HBase Configuration directory\nexport 
HBASE_CONF_DIR=${HBASE_CONF_DIR:-{{hbase_conf_dir}}}\n\n# Extra Java CLASSPATH 
elements. Optional.\nexport HBASE_CLASSPATH=${HBASE_CLASSPATH}\n\n\n# The 
maximum amount of heap to use, in MB. Default is 1000.\n# export 
HBASE_HEAPSIZE=1000\n\n# Extra Java runtime options.\n# Below are what we set 
by default. May only work with SUN JVM.\n# For more on why as well as other 
possible settings,\n# see 
http://wiki.apache.org/hadoop/PerformanceTuning\nexport 
SERVER_GC_OPTS=\"-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps 
-Xloggc:{{log_dir}}/gc.log-`date +'%Y%m%d%H%M'`\"\n# Uncomment below to enable 
java garbage collection logging.\n# export HBASE_OPTS=\"$HBASE_OPTS -verbose:gc 
-XX:+PrintGCDetails -XX:+PrintGCDateStamps 
-Xloggc:$HBASE_HOME/logs/gc-hbase.log\"\n\n# Uncomment and adjust to enable 
 JMX exporting\n# See jmxremote.password and jmxremote.access in 
$JRE_HOME/lib/management to configure remote password access.\n# More details 
at: 
http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html\n#\n# 
export HBASE_JMX_BASE=\"-Dcom.sun.management.jmxremote.ssl=false 
-Dcom.sun.management.jmxremote.authenticate=false\"\n# If you want to configure 
BucketCache, specify '-XX: MaxDirectMemorySize=' with proper direct memory 
size\n# export HBASE_THRIFT_OPTS=\"$HBASE_JMX_BASE 
-Dcom.sun.management.jmxremote.port=10103\"\n# export 
HBASE_ZOOKEEPER_OPTS=\"$HBASE_JMX_BASE 
-Dcom.sun.management.jmxremote.port=10104\"\n\n# File naming hosts on which 
HRegionServers will run. $HBASE_HOME/conf/regionservers by default.\nexport 
HBASE_REGIONSERVERS=${HBASE_CONF_DIR}/regionservers\n\n# Extra ssh options. 
Empty by default.\n# export HBASE_SSH_OPTS=\"-o ConnectTimeout=1 -o 
SendEnv=HBASE_CONF_DIR\"\n\n# Where log files are stored. $HBASE_HOME/logs by 
default.\nexport HBASE_LOG_DIR={{l
 og_dir}}\n\n# A string representing this instance of hbase. $USER by 
default.\n# export HBASE_IDENT_STRING=$USER\n\n# The scheduling priority for 
daemon processes. See 'man nice'.\n# export HBASE_NICENESS=10\n\n# The 
directory where pid files are stored. /tmp by default.\nexport 
HBASE_PID_DIR={{pid_dir}}\n\n# Seconds to sleep between slave commands. Unset 
by default. This\n# can be useful in large clusters, where, e.g., slave rsyncs 
can\n# otherwise arrive faster than the master can service them.\n# export 
HBASE_SLAVE_SLEEP=0.1\n\n# Tell HBase whether it should manage it's own 
instance of Zookeeper or not.\nexport HBASE_MANAGES_ZK=false\n\n{% if 
security_enabled %}\nexport HBASE_OPTS=\"$HBASE_OPTS -XX:+UseConcMarkSweepGC 
-XX:ErrorFile={{log_dir}}/hs_err_pid%p.log 
-Djava.security.auth.login.config={{client_jaas_config_file}}\"\nexport 
HBASE_MASTER_OPTS=\"$HBASE_MASTER_OPTS -Xmx{{master_heapsize}} 
-Djava.security.auth.login.config={{master_jaas_config_file}}\"\nexport 
HBASE_REGIONSERV
 ER_OPTS=\"$HBASE_REGIONSERVER_OPTS -Xmn{{regionserver_xmn_size}} 
-XX:CMSInitiatingOccupancyFraction=70  -Xms{{regionserver_heapsize}} 
-Xmx{{regionserver_heapsize}} 
-Djava.security.auth.login.config={{regionserver_jaas_config_file}}\"\n{% else 
%}\nexport HBASE_OPTS=\"$HBASE_OPTS -XX:+UseConcMarkSweepGC 
-XX:ErrorFile={{log_dir}}/hs_err_pid%p.log\"\nexport 
HBASE_MASTER_OPTS=\"$HBASE_MASTER_OPTS -Xmx{{master_heapsize}}\"\nexport 
HBASE_REGIONSERVER_OPTS=\"$HBASE_REGIONSERVER_OPTS 
-Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  
-Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}}\"\n{% endif %}", 
             "hbase_regionserver_heapsize": "1024m", 
             "hbase_log_dir": "/var/log/hbase"
-        }, 
+        },
+        "ranger-hbase-plugin-properties": {
+            "ranger-hbase-plugin-enabled":"yes"
+        },
         "ganglia-env": {
             "gmond_user": "nobody", 
             "ganglia_runtime_dir": "/var/run/ganglia/hdp", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/31cdf9fa/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json 
b/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json
index d48b0ab..8e93823 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json
@@ -367,7 +367,13 @@
             "hadoop.security.auth_to_local": 
"RULE:[2:$1@$0](rm@.*EXAMPLE.COM)s/.*/yarn/\nRULE:[2:$1@$0](nm@.*EXAMPLE.COM)s/.*/yarn/\nRULE:[2:$1@$0](nn@.*EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](dn@.*EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](hbase@.*EXAMPLE.COM)s/.*/hbase/\nRULE:[2:$1@$0](hbase@.*EXAMPLE.COM)s/.*/hbase/\nRULE:[2:$1@$0](oozie@.*EXAMPLE.COM)s/.*/oozie/\nRULE:[2:$1@$0](jhs@.*EXAMPLE.COM)s/.*/mapred/\nRULE:[2:$1@$0](jn/_HOST@.*EXAMPLE.COM)s/.*/hdfs/\nDEFAULT",
 
             "hadoop.proxyuser.oozie.hosts": "c6402.ambari.apache.org", 
             "ipc.client.connection.maxidletime": "30000"
-        }, 
+        },
+        "ranger-hdfs-plugin-properties" : {
+            "ranger-hdfs-plugin-enabled":"yes"
+        },
+        "ranger-hive-plugin-properties" : {
+            "ranger-hive-plugin-enabled":"yes"
+        },
         "hive-site": {
             "hive.enforce.sorting": "true", 
             "javax.jdo.option.ConnectionPassword": "!`\"' 1", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/31cdf9fa/ambari-server/src/test/python/stacks/2.0.6/configs/secured_client.json
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/test/python/stacks/2.0.6/configs/secured_client.json 
b/ambari-server/src/test/python/stacks/2.0.6/configs/secured_client.json
index 69c7b4d..6714686 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/secured_client.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/secured_client.json
@@ -365,7 +365,10 @@
             "hadoop.security.auth_to_local": 
"RULE:[2:$1@$0](rm@.*EXAMPLE.COM)s/.*/yarn/\nRULE:[2:$1@$0](nm@.*EXAMPLE.COM)s/.*/yarn/\nRULE:[2:$1@$0](nn@.*EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](dn@.*EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](hbase@.*EXAMPLE.COM)s/.*/hbase/\nRULE:[2:$1@$0](hbase@.*EXAMPLE.COM)s/.*/hbase/\nRULE:[2:$1@$0](oozie@.*EXAMPLE.COM)s/.*/oozie/\nRULE:[2:$1@$0](jhs@.*EXAMPLE.COM)s/.*/mapred/\nRULE:[2:$1@$0](jn/_HOST@.*EXAMPLE.COM)s/.*/hdfs/\nDEFAULT",
 
             "hadoop.proxyuser.oozie.hosts": "c6402.ambari.apache.org", 
             "ipc.client.connection.maxidletime": "30000"
-        }, 
+        },
+        "ranger-hive-plugin-properties" : {
+            "ranger-hive-plugin-enabled":"yes"
+        },
         "hive-site": {
             "hive.enforce.sorting": "true", 
             "javax.jdo.option.ConnectionPassword": "!`\"' 1", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/31cdf9fa/ambari-server/src/test/python/stacks/2.2/configs/default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/configs/default.json 
b/ambari-server/src/test/python/stacks/2.2/configs/default.json
index 28a17ae..d37011d 100644
--- a/ambari-server/src/test/python/stacks/2.2/configs/default.json
+++ b/ambari-server/src/test/python/stacks/2.2/configs/default.json
@@ -149,8 +149,13 @@
         "kafka.ganglia.metrics.port": "8649",
         "log.index.interval.bytes": "4096",
         "log.retention.hours": "168"
-      }
-
+      },
+      "ranger-hbase-plugin-properties": {
+            "ranger-hbase-plugin-enabled":"yes"
+      },
+      "ranger-hive-plugin-properties": {
+            "ranger-hive-plugin-enabled":"yes"
+        }
 
     },
     "configuration_attributes": {

http://git-wip-us.apache.org/repos/asf/ambari/blob/31cdf9fa/ambari-server/src/test/python/stacks/2.2/configs/hive-upgrade.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/configs/hive-upgrade.json 
b/ambari-server/src/test/python/stacks/2.2/configs/hive-upgrade.json
index 35aedc0..8783b64 100644
--- a/ambari-server/src/test/python/stacks/2.2/configs/hive-upgrade.json
+++ b/ambari-server/src/test/python/stacks/2.2/configs/hive-upgrade.json
@@ -312,6 +312,11 @@
             "hadoop.security.auth_to_local": "\n        
RULE:[2:$1@$0]([rn]m@.*)s/.*/yarn/\n        
RULE:[2:$1@$0](jhs@.*)s/.*/mapred/\n        
RULE:[2:$1@$0]([nd]n@.*)s/.*/hdfs/\n        RULE:[2:$1@$0](hm@.*)s/.*/hbase/\n  
      RULE:[2:$1@$0](rs@.*)s/.*/hbase/\n        DEFAULT",
             "ipc.client.connection.maxidletime": "30000"
         },
+
+        "ranger-hive-plugin-properties" : {
+            "ranger-hive-plugin-enabled":"yes"
+        },
+
         "hadoop-env": {
             "dtnode_heapsize": "1024m",
             "namenode_opt_maxnewsize": "200m",

http://git-wip-us.apache.org/repos/asf/ambari/blob/31cdf9fa/ambari-web/app/config.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/config.js b/ambari-web/app/config.js
index 8e2e562..a22430a 100644
--- a/ambari-web/app/config.js
+++ b/ambari-web/app/config.js
@@ -61,6 +61,7 @@ App.supports = {
   autoRollbackHA: false,
   alwaysEnableManagedMySQLForHive: false,
   automatedKerberos: false,
+  ranger: false,
   customizeAgentUserAccount: false,
   installGanglia: false
 };

Reply via email to