AMBARI-14628 - Create JAAS config for Atlas. (tbeerbower)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/7cacc409 Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/7cacc409 Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/7cacc409 Branch: refs/heads/branch-dev-patch-upgrade Commit: 7cacc4093285db5a7271df2d8939c767556fda46 Parents: b641d52 Author: tbeerbower <[email protected]> Authored: Wed Jan 13 16:26:28 2016 -0500 Committer: tbeerbower <[email protected]> Committed: Wed Jan 13 16:26:28 2016 -0500 ---------------------------------------------------------------------- .../ATLAS/0.1.0.2.3/configuration/atlas-env.xml | 6 +- .../ATLAS/0.1.0.2.3/package/scripts/metadata.py | 5 + .../ATLAS/0.1.0.2.3/package/scripts/params.py | 9 + .../package/templates/atlas_jaas.conf.j2 | 26 ++ .../stacks/2.3/ATLAS/test_metadata_server.py | 15 + .../test/python/stacks/2.3/configs/secure.json | 308 +++++++++++++++++++ 6 files changed, 368 insertions(+), 1 deletion(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/ambari/blob/7cacc409/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/configuration/atlas-env.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/configuration/atlas-env.xml b/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/configuration/atlas-env.xml index 2935e8f..c3cb367 100644 --- a/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/configuration/atlas-env.xml +++ b/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/configuration/atlas-env.xml @@ -69,9 +69,13 @@ export JAVA_HOME={{java64_home}} # any additional java opts you want to set. This will apply to both client and server operations +{% if security_enabled %} +export METADATA_OPTS="{{metadata_opts}} -Djava.security.auth.login.config={{atlas_jaas_file}}" +{% else %} export METADATA_OPTS="{{metadata_opts}}" +{% endif %} -# metadata configuration directory +# metadata configuration directory export METADATA_CONF={{conf_dir}} # Where log files are stored. Defatult is logs directory under the base install location http://git-wip-us.apache.org/repos/asf/ambari/blob/7cacc409/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/metadata.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/metadata.py b/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/metadata.py index 8c17214..6df47b0 100644 --- a/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/metadata.py +++ b/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/metadata.py @@ -20,6 +20,7 @@ limitations under the License. from resource_management import Directory, Fail, Logger, File, \ InlineTemplate, PropertiesFile, StaticFile from resource_management.libraries.functions import format +from resource_management.libraries.resources.template_config import TemplateConfig def metadata(): @@ -89,3 +90,7 @@ def metadata(): group=params.user_group, content=StaticFile('atlas-log4j.xml') ) + + if params.security_enabled: + TemplateConfig(format(params.atlas_jaas_file), + owner=params.metadata_user) http://git-wip-us.apache.org/repos/asf/ambari/blob/7cacc409/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/params.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/params.py b/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/params.py index df8b772..1a0c67b 100644 --- a/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/params.py +++ b/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/params.py @@ -31,6 +31,12 @@ config = Script.get_config() # security enabled security_enabled = status_params.security_enabled +if security_enabled: + _hostname_lowercase = config['hostname'].lower() + _atlas_principal_name = config['configurations']['application-properties']['atlas.authentication.principal'] + atlas_jaas_principal = _atlas_principal_name.replace('_HOST',_hostname_lowercase) + atlas_keytab_path = config['configurations']['application-properties']['atlas.authentication.keytab'] + stack_name = default("/hostLevelParams/stack_name", None) # New Cluster Stack Version that is defined during the RESTART of a Stack Upgrade @@ -54,6 +60,9 @@ conf_dir = status_params.conf_dir # "/etc/metadata/conf" # service locations hadoop_conf_dir = os.path.join(os.environ["HADOOP_HOME"], "conf") if 'HADOOP_HOME' in os.environ else '/etc/hadoop/conf' +# some commands may need to supply the JAAS location when running as atlas +atlas_jaas_file = format("{conf_dir}/atlas_jaas.conf") + # user and status metadata_user = status_params.metadata_user user_group = config['configurations']['cluster-env']['user_group'] http://git-wip-us.apache.org/repos/asf/ambari/blob/7cacc409/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/templates/atlas_jaas.conf.j2 ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/templates/atlas_jaas.conf.j2 b/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/templates/atlas_jaas.conf.j2 new file mode 100644 index 0000000..68eb088 --- /dev/null +++ b/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/templates/atlas_jaas.conf.j2 @@ -0,0 +1,26 @@ +{# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#} +Client { + com.sun.security.auth.module.Krb5LoginModule required + useKeyTab=true + useTicketCache=false + storeKey=true + doNotPrompt=false + keyTab="{{atlas_keytab_path}}" + principal="{{atlas_jaas_principal}}"; +}; http://git-wip-us.apache.org/repos/asf/ambari/blob/7cacc409/ambari-server/src/test/python/stacks/2.3/ATLAS/test_metadata_server.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/test/python/stacks/2.3/ATLAS/test_metadata_server.py b/ambari-server/src/test/python/stacks/2.3/ATLAS/test_metadata_server.py index 2bd02c7..b4ef2bc 100644 --- a/ambari-server/src/test/python/stacks/2.3/ATLAS/test_metadata_server.py +++ b/ambari-server/src/test/python/stacks/2.3/ATLAS/test_metadata_server.py @@ -108,6 +108,21 @@ class TestMetadataServer(RMFTestCase): self.configureResourcesCalled() self.assertNoMoreResources() + def test_configure_secure(self): + self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/metadata_server.py", + classname = "MetadataServer", + command = "configure", + config_file="secure.json", + hdp_stack_version = self.STACK_VERSION, + target = RMFTestCase.TARGET_COMMON_SERVICES + ) + + self.configureResourcesCalled() + self.assertResourceCalled('TemplateConfig', '/etc/atlas/conf/atlas_jaas.conf', + owner = 'atlas', + ) + self.assertNoMoreResources() + def test_start_default(self): self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/metadata_server.py", classname = "MetadataServer", http://git-wip-us.apache.org/repos/asf/ambari/blob/7cacc409/ambari-server/src/test/python/stacks/2.3/configs/secure.json ---------------------------------------------------------------------- diff --git a/ambari-server/src/test/python/stacks/2.3/configs/secure.json b/ambari-server/src/test/python/stacks/2.3/configs/secure.json new file mode 100644 index 0000000..2b60be6 --- /dev/null +++ b/ambari-server/src/test/python/stacks/2.3/configs/secure.json @@ -0,0 +1,308 @@ +{ + "roleCommand": "SERVICE_CHECK", + "clusterName": "c1", + "hostname": "c6401.ambari.apache.org", + "hostLevelParams": { + "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", + "ambari_db_rca_password": "mapred", + "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca", + "jce_name": "UnlimitedJCEPolicyJDK7.zip", + "stack_version": "2.3", + "stack_name": "HDP", + "ambari_db_rca_driver": "org.postgresql.Driver", + "jdk_name": "jdk-7u67-linux-x64.tar.gz", + "ambari_db_rca_username": "mapred", + "java_home": "/usr/jdk64/jdk1.7.0_45", + "db_name": "ambari" + }, + "commandType": "EXECUTION_COMMAND", + "roleParams": {}, + "serviceName": "SLIDER", + "role": "SLIDER", + "commandParams": { + "version": "2.2.1.0-2067", + "command_timeout": "300", + "service_package_folder": "OOZIE", + "script_type": "PYTHON", + "script": "scripts/service_check.py", + "excluded_hosts": "host1,host2" + }, + "taskId": 152, + "public_hostname": "c6401.ambari.apache.org", + "configurations": { + "slider-client": { + "slider.yarn.queue": "default" + }, + "mahout-env": { + "mahout_user": "mahout" + }, + "mahout-log4j": { + "content": "\n #\n #\n # Licensed to the Apache Software Foundation (ASF) under one\n # or more contributor license agreements. See the NOTICE file\n # distributed with this work for additional information\n # regarding copyright ownership. The ASF licenses this file\n # to you under the Apache License, Version 2.0 (the\n # \"License\"); you may not use this file except in compliance\n # with the License. You may obtain a copy of the License at\n #\n # http://www.apache.org/licenses/LICENSE-2.0\n #\n # Unless required by applicable law or agreed to in writing,\n # software distributed under the License is distributed on an\n # \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n # KIND, either express or implied. See the License for the\n # specific language governing permissions a nd limitations\n # under the License.\n #\n #\n #\n\n # Set everything to be logged to the console\n log4j.rootCategory=WARN, console\n log4j.appender.console=org.apache.log4j.ConsoleAppender\n log4j.appender.console.target=System.err\n log4j.appender.console.layout=org.apache.log4j.PatternLayout\n log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{1}: %m%n\n\n # Settings to quiet third party logs that are too verbose\n log4j.logger.org.eclipse.jetty=WARN\n log4j.logger.org.apache.spark.repl.SparkIMain$exprTyper=WARN\n log4j.logger.org.apache.spark.repl.SparkILoop$SparkILoopInterpreter=WARN" + }, + "hadoop-env": { + "hdfs_user": "hdfs" + }, + "core-site": { + "fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020" + }, + "hdfs-site": { + "a": "b" + }, + "yarn-site": { + "yarn.application.classpath": "/etc/hadoop/conf,/usr/lib/hadoop/*,/usr/lib/hadoop/lib/*,/usr/lib/hadoop-hdfs/*,/usr/lib/hadoop-hdfs/lib/*,/usr/lib/hadoop-yarn/*,/usr/lib/hadoop-yarn/lib/*,/usr/lib/hadoop-mapreduce/*,/usr/lib/hadoop-mapreduce/lib/*", + "yarn.resourcemanager.address": "c6401.ambari.apache.org:8050", + "yarn.resourcemanager.scheduler.address": "c6401.ambari.apache.org:8030" + }, + "cluster-env": { + "security_enabled": "true", + "ignore_groupsusers_create": "false", + "smokeuser": "ambari-qa", + "kerberos_domain": "EXAMPLE.COM", + "user_group": "hadoop" + }, + "webhcat-site": { + "templeton.jar": "/usr/hdp/current/hive-webhcat/share/webhcat/svr/lib/hive-webhcat-*.jar", + "templeton.pig.archive": "hdfs:///hdp/apps/{{ hdp_stack_version }}/pig/pig.tar.gz", + "templeton.hive.archive": "hdfs:///hdp/apps/{{ hdp_stack_version }}/hive/hive.tar.gz", + "templeton.sqoop.archive": "hdfs:///hdp/apps/{{ hdp_stack_version }}/sqoop/sqoop.tar.gz", + "templeton.streaming.jar": "hdfs:///hdp/apps/{{ hdp_stack_version }}/mr/hadoop-streaming.jar" + }, + "slider-log4j": { + "content": "log4jproperties\nline2" + }, + "slider-env": { + "content": "envproperties\nline2" + }, + "gateway-site": { + "java.security.auth.login.config": "/etc/knox/conf/krb5JAASLogin.conf", + "gateway.hadoop.kerberos.secured": "false", + "gateway.gateway.conf.dir": "deployments", + "gateway.path": "gateway", + "sun.security.krb5.debug": "true", + "java.security.krb5.conf": "/etc/knox/conf/krb5.conf", + "gateway.port": "8443" + }, + + "users-ldif": { + "content": "\n # Licensed to the Apache Software Foundation (ASF) under one\n # or more contributor license agreements. See the NOTICE file\n # distributed with this work for additional information\n # regarding copyright ownership. The ASF licenses this file\n # to you under the Apache License, Version 2.0 (the\n # \"License\"); you may not use this file except in compliance\n # with the License. You may obtain a copy of the License at\n #\n # http://www.apache.org/licenses/LICENSE-2.0\n #\n # Unless required by applicable law or agreed to in writing, software\n # distributed under the License is distributed on an \"AS IS\" BASIS,\n # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n # See the License for the specific language governing permissions and\n # limitations under the Lice nse.\n\n version: 1\n\n # Please replace with site specific values\n dn: dc=hadoop,dc=apache,dc=org\n objectclass: organization\n objectclass: dcObject\n o: Hadoop\n dc: hadoop\n\n # Entry for a sample people container\n # Please replace with site specific values\n dn: ou=people,dc=hadoop,dc=apache,dc=org\n objectclass:top\n objectclass:organizationalUnit\n ou: people\n\n # Entry for a sample end user\n # Please replace with site specific values\n dn: uid=guest,ou=people,dc=hadoop,dc=apache,dc=org\n objectclass:top\n objectclass:person\n objectclass:organizationalPerson\n objectclass:inetOrgPerson\n cn: Guest\n sn: User\n uid: guest\n userPassword:guest-password\n\n # entry for sample user admin\n dn: uid=admin,ou=people,dc=hadoop,dc=apache,dc=org\n objectclass:top\n objectclass:person\n objectclass:organizationalPerson\n objectclass:inetOrgPerson\n cn: Admin\n sn: Admin\n uid: admin\n userPassword:admin-password\n\n # entry for sample user sam\n dn: uid=sam,ou=people,dc=hadoop,dc=apache,dc=org\n objectclass:top\n objectclass:person\n objectclass:organizationalPerson\n objectclass:inetOrgPerson\n cn: sam\n sn: sam\n uid: sam\n userPassword:sam-password\n\n # entry for sample user tom\n dn: uid=tom,ou=people,dc=hadoop,dc=apache,dc=org\n objectclass:top\n objectclass:person\n objectclass:organizationalPerson\n objectclass:inetOrgPerson\n cn: tom\n sn: tom\n uid: tom\n userPasswor d:tom-password\n\n # create FIRST Level groups branch\n dn: ou=groups,dc=hadoop,dc=apache,dc=org\n objectclass:top\n objectclass:organizationalUnit\n ou: groups\n description: generic groups branch\n\n # create the analyst group under groups\n dn: cn=analyst,ou=groups,dc=hadoop,dc=apache,dc=org\n objectclass:top\n objectclass: groupofnames\n cn: analyst\n description:analyst group\n member: uid=sam,ou=people,dc=hadoop,dc=apache,dc=org\n member: uid=tom,ou=people,dc=hadoop,dc=apache,dc=org\n\n\n # create the scientist group under groups\n dn: cn=scientist,ou=groups,dc=hadoop,dc=apache,dc=org\n objectclass:top\n objectclass: groupofnames\n cn: scientist\n description: scientist group\n member: uid=sam,ou=people,dc=hadoop,dc=apache,dc=org" + }, + + "topology": { + "content": "\n <topology>\n\n <gateway>\n\n <provider>\n <role>authentication</role>\n <name>ShiroProvider</name>\n <enabled>true</enabled>\n <param>\n <name>sessionTimeout</name>\n <value>30</value>\n </param>\n <param>\n <name>main.ldapRealm</name>\n <value>org.apache.hadoop.gateway.shirorealm.KnoxLdapRealm</value>\n </param>\n <param>\n <name>main.ldapRealm.userDnTemplate</name>\n <value>uid={0},ou=people,dc=hadoop,dc=apache,dc=org</value>\n </param>\n <param>\n <name>main.ldapRealm.contextFactory.url</name>\n <value>ldap://{{knox_host_name}}:33389</value>\n </param>\n <param>\n <name>main.ldapRealm.contextFactory.authenticationMechanism</name>\n <value>simple</value>\n </param>\n <param>\n <name>urls./**</name>\n <value>authcBasic</value>\n </param>\n </provider>\n\n <provider>\n <role>identity-assertion</role>\n <name>Default</name>\n <enabled>true</enabled>\n </provider>\n\n </gateway>\n\n <service>\n <role>NAMENODE</role>\n <url>hdfs://{{namenode_host}}:{{namenode_rpc_port}}</url>\n </service>\n\n <service>\n <role>JOBTRACKER</role>\n <url>rpc://{{rm_host}}:{{jt_rpc_port}}</url>\n </service>\n\n <service>\n <role>WEBHDFS</role >\n ><url>http://{{namenode_host}}:{{namenode_http_port}}/webhdfs</url>\n > </service>\n\n <service>\n <role>WEBHCAT</role>\n > ><url>http://{{webhcat_server_host}}:{{templeton_port}}/templeton</url>\n > </service>\n\n <service>\n ><role>OOZIE</role>\n ><url>http://{{oozie_server_host}}:{{oozie_server_port}}/oozie</url>\n > </service>\n\n <service>\n ><role>WEBHBASE</role>\n ><url>http://{{hbase_master_host}}:{{hbase_master_port}}</url>\n ></service>\n\n <service>\n <role>HIVE</role>\n > ><url>http://{{hive_server_host}}:{{hive_http_port}}/{{hive_http_path}}</url>\n > </service>\n\n <service>\n ><role>RESOURCEMANAGER</role>\n ><url>http://{{rm_host}}:{{rm_port}}/ws</url>\n </service>\n ></topology>" + }, + + "ldap-log4j": { + "content": "\n # Licensed to the Apache Software Foundation (ASF) under one\n # or more contributor license agreements. See the NOTICE file\n # distributed with this work for additional information\n # regarding copyright ownership. The ASF licenses this file\n # to you under the Apache License, Version 2.0 (the\n # \"License\"); you may not use this file except in compliance\n # with the License. You may obtain a copy of the License at\n #\n # http://www.apache.org/licenses/LICENSE-2.0\n #\n # Unless required by applicable law or agreed to in writing, software\n # distributed under the License is distributed on an \"AS IS\" BASIS,\n # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n # See the License for the specific language governing permissions and\n # limitations under the License.\n #testing\n\n app.log.dir=${launcher.dir }/../logs\n app.log.file=${launcher.name}.log\n\n log4j.rootLogger=ERROR, drfa\n log4j.logger.org.apache.directory.server.ldap.LdapServer=INFO\n log4j.logger.org.apache.directory=WARN\n\n log4j.appender.stdout=org.apache.log4j.ConsoleAppender\n log4j.appender.stdout.layout=org.apache.log4j.PatternLayout\n log4j.appender.stdout.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\n log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender\n log4j.appender.drfa.File=${app.log.dir}/${app.log.file}\n log4j.appender.drfa.DatePattern=.yyyy-MM-dd\n log4j.appender.drfa.layout=org.apache.log4j.PatternLayout\n log4j.appender.drfa.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n" + }, + + "gateway-log4j": { + "content": "\n\n # Licensed to the Apache Software Foundation (ASF) under one\n # or more contributor license agreements. See the NOTICE file\n # distributed with this work for additional information\n # regarding copyright ownership. The ASF licenses this file\n # to you under the Apache License, Version 2.0 (the\n # \"License\"); you may not use this file except in compliance\n # with the License. You may obtain a copy of the License at\n #\n # http://www.apache.org/licenses/LICENSE-2.0\n #\n # Unless required by applicable law or agreed to in writing, software\n # distributed under the License is distributed on an \"AS IS\" BASIS,\n # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n # See the License for the specific language governing permissions and\n # limitations under the License.\n\n app.log.dir=${launcher.dir}/../logs\n app.log.file=${launcher.name}.log\n app.audit.file=${launcher.name}-audit.log\n\n log4j.rootLogger=ERROR, drfa\n\n log4j.logger.org.apache.hadoop.gateway=INFO\n #log4j.logger.org.apache.hadoop.gateway=DEBUG\n\n #log4j.logger.org.eclipse.jetty=DEBUG\n #log4j.logger.org.apache.shiro=DEBUG\n #log4j.logger.org.apache.http=DEBUG\n #log4j.logger.org.apache.http.client=DEBUG\n #log4j.logger.org.apache.http.headers=DEBUG\n #log4j.logger.org.apache.http.wire=DEBUG\n\n log4j.appender.stdout=org.apache.log4j.ConsoleAppender\n log4j.appender.stdout.layout=org.apache.log4j.PatternLayout\n log4j.appender.stdout.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\n log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender\n log4j.appender.drfa.File=${app.log.dir}/${app.log.file}\n log4j.appender.drfa.DatePattern=.yyyy-MM-dd\n log4j.appender.drfa.layout=org.apache.log4j.PatternLayout\n log4j.appender.drfa.layout.ConversionPattern=% d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n log4j.logger.audit=INFO, auditfile\n log4j.appender.auditfile=org.apache.log4j.DailyRollingFileAppender\n log4j.appender.auditfile.File=${app.log.dir}/${app.audit.file}\n log4j.appender.auditfile.Append = true\n log4j.appender.auditfile.DatePattern = '.'yyyy-MM-dd\n log4j.appender.auditfile.layout = org.apache.hadoop.gateway.audit.log4j.layout.AuditLayout" + }, + "knox-env": { + "knox_master_secret": "sa", + "knox_group": "knox", + "knox_pid_dir": "/var/run/knox", + "knox_user": "knox" + }, + "kafka-env": { + "content": "\n#!/bin/bash\n\n# Set KAFKA specific environment variables here.\n\n# The java implementation to use.\nexport JAVA_HOME={{java64_home}}\nexport PATH=$PATH:$JAVA_HOME/bin", + "kafka_user": "kafka", + "kafka_log_dir": "/var/log/kafka", + "kafka_pid_dir": "/var/run/kafka" + }, + "kafka-log4j": { + "content": "\n#\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n#\n#\nkafka.logs.dir=logs\n\nlog4j.rootLogger=INFO, stdout\n\nlog4j.appender.stdout=org.apache.log4j.ConsoleAppender\nlog4j.appender.stdout.layout=org.apache.log4j .PatternLayout\nlog4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n\n\nlog4j.appender.kafkaAppender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.kafkaAppender.DatePattern='.'yyyy-MM-dd-HH\nlog4j.appender.kafkaAppender.File=${kafka.logs.dir}/server.log\nlog4j.appender.kafkaAppender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.kafkaAppender.layout.ConversionPattern=[%d] %p %m (%c)%n\n\nlog4j.appender.stateChangeAppender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.stateChangeAppender.DatePattern='.'yyyy-MM-dd-HH\nlog4j.appender.stateChangeAppender.File=${kafka.logs.dir}/state-change.log\nlog4j.appender.stateChangeAppender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.stateChangeAppender.layout.ConversionPattern=[%d] %p %m (%c)%n\n\nlog4j.appender.requestAppender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.requestAppender.DatePattern='.'yyyy-MM-dd-HH\nlog4j.appender.requestAppender.File=${kafka.logs.dir}/kafka-reques t.log\nlog4j.appender.requestAppender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.requestAppender.layout.ConversionPattern=[%d] %p %m (%c)%n\n\nlog4j.appender.cleanerAppender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.cleanerAppender.DatePattern='.'yyyy-MM-dd-HH\nlog4j.appender.cleanerAppender.File=${kafka.logs.dir}/log-cleaner.log\nlog4j.appender.cleanerAppender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.cleanerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n\n\nlog4j.appender.controllerAppender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.controllerAppender.DatePattern='.'yyyy-MM-dd-HH\nlog4j.appender.controllerAppender.File=${kafka.logs.dir}/controller.log\nlog4j.appender.controllerAppender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.controllerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n\n\n# Turn on all our debugging info\n#log4j.logger.kafka.producer.async.DefaultEventHandler=DEBUG, kafkaAppender\n#log4j.logge r.kafka.client.ClientUtils=DEBUG, kafkaAppender\n#log4j.logger.kafka.perf=DEBUG, kafkaAppender\n#log4j.logger.kafka.perf.ProducerPerformance$ProducerThread=DEBUG, kafkaAppender\n#log4j.logger.org.I0Itec.zkclient.ZkClient=DEBUG\nlog4j.logger.kafka=INFO, kafkaAppender\nlog4j.logger.kafka.network.RequestChannel$=WARN, requestAppender\nlog4j.additivity.kafka.network.RequestChannel$=false\n\n#log4j.logger.kafka.network.Processor=TRACE, requestAppender\n#log4j.logger.kafka.server.KafkaApis=TRACE, requestAppender\n#log4j.additivity.kafka.server.KafkaApis=false\nlog4j.logger.kafka.request.logger=WARN, requestAppender\nlog4j.additivity.kafka.request.logger=false\n\nlog4j.logger.kafka.controller=TRACE, controllerAppender\nlog4j.additivity.kafka.controller=false\n\nlog4j.logger.kafka.log.LogCleaner=INFO, cleanerAppender\nlog4j.additivity.kafka.log.LogCleaner=false\n\nlog4j.logger.state.change.logger=TRACE, stateChangeAppender\nlog4j.additivity.state.change.logger=false" + }, + "kafka-broker": { + "log.segment.bytes": "1073741824", + "socket.send.buffer.bytes": "102400", + "num.network.threads": "3", + "log.flush.scheduler.interval.ms": "3000", + "kafka.ganglia.metrics.host": "localhost", + "zookeeper.session.timeout.ms": "6000", + "replica.lag.time.max.ms": "10000", + "num.io.threads": "8", + "kafka.ganglia.metrics.group": "kafka", + "replica.lag.max.messages": "4000", + "port": "6667", + "log.retention.bytes": "-1", + "fetch.purgatory.purge.interval.requests": "10000", + "producer.purgatory.purge.interval.requests": "10000", + "default.replication.factor": "1", + "replica.high.watermark.checkpoint.interval.ms": "5000", + "zookeeper.connect": "c6402.ambari.apache.org:2181", + "controlled.shutdown.retry.backoff.ms": "5000", + "num.partitions": "1", + "log.flush.interval.messages": "10000", + "replica.fetch.min.bytes": "1", + "queued.max.requests": "500", + "controlled.shutdown.max.retries": "3", + "replica.fetch.wait.max.ms": "500", + "controlled.shutdown.enable": "false", + "log.roll.hours": "168", + "log.cleanup.interval.mins": "10", + "replica.socket.receive.buffer.bytes": "65536", + "zookeeper.connection.timeout.ms": "6000", + "replica.fetch.max.bytes": "1048576", + "num.replica.fetchers": "1", + "socket.request.max.bytes": "104857600", + "message.max.bytes": "1000000", + "zookeeper.sync.time.ms": "2000", + "socket.receive.buffer.bytes": "102400", + "controller.message.queue.size": "10", + "log.flush.interval.ms": "3000", + "log.dirs": "/tmp/log/dir", + "controller.socket.timeout.ms": "30000", + "replica.socket.timeout.ms": "30000", + "auto.create.topics.enable": "true", + "log.index.size.max.bytes": "10485760", + "kafka.ganglia.metrics.port": "8649", + "log.index.interval.bytes": "4096", + "log.retention.hours": "168" + }, + "application-properties": { + "atlas.graph.storage.backend": "berkeleyje", + "atlas.graph.storage.directory": "data/berkley", + "atlas.graph.index.search.backend": "elasticsearch", + "atlas.graph.index.search.directory": "data/es", + "atlas.graph.index.search.elasticsearch.client-only": false, + "atlas.graph.index.search.elasticsearch.local-mode": true, + "atlas.lineage.hive.table.type.name": "Table", + "atlas.lineage.hive.column.type.name": "Column", + "atlas.lineage.hive.table.column.name": "columns", + "atlas.lineage.hive.process.type.name": "LoadProcess", + "atlas.lineage.hive.process.inputs.name": "inputTables", + "atlas.lineage.hive.process.outputs.name": "outputTables", + "atlas.enableTLS": false, + "atlas.authentication.method": "simple", + "atlas.authentication.principal": "atlas", + "atlas.authentication.keytab": "/etc/security/keytabs/atlas.service.keytab", + "atlas.http.authentication.enabled": false, + "atlas.http.authentication.type": "simple", + "atlas.http.authentication.kerberos.principal": "HTTP/[email protected]", + "atlas.http.authentication.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab", + "atlas.http.authentication.kerberos.name.rules": "DEFAULT" + }, + "atlas-env": { + "content": "# The java implementation to use. If JAVA_HOME is not found we expect java and jar to be in path\nexport JAVA_HOME={{java64_home}}\n# any additional java opts you want to set. This will apply to both client and server operations\nexport METADATA_OPTS={{metadata_opts}}\n# metadata configuration directory\nexport METADATA_CONF={{conf_dir}}\n# Where log files are stored. Defatult is logs directory under the base install location\nexport METADATA_LOG_DIR={{log_dir}}\n# additional classpath entries\nexport METADATACPPATH={{metadata_classpath}}\n# data dir\nexport METADATA_DATA_DIR={{data_dir}}\n# Where do you want to expand the war file. By Default it is in /server/webapp dir under the base install dir.\nexport METADATA_EXPANDED_WEBAPP_DIR={{expanded_war_dir}}", + "metadata_user": "atlas", + "metadata_port": 21000, + "metadata_pid_dir": "/var/run/atlas", + "metadata_log_dir": "/var/log/atlas", + "metadata_data_dir": "/var/lib/atlas/data", + "metadata_expanded_war_dir": "/var/lib/atlas/server/webapp" + }, + "ranger-hbase-plugin-properties": { + "ranger-hbase-plugin-enabled":"yes" + }, + "ranger-hive-plugin-properties": { + "ranger-hive-plugin-enabled":"yes" + }, + "ranger-env": { + "xml_configurations_supported" : "true" + } + }, + "configuration_attributes": { + "yarn-site": { + "final": { + "yarn.nodemanager.disk-health-checker.min-healthy-disks": "true", + "yarn.nodemanager.container-executor.class": "true", + "yarn.nodemanager.local-dirs": "true" + } + }, + "yarn-site": { + "final": { + "is_supported_yarn_ranger": "true" + } + }, + "hdfs-site": { + "final": { + "dfs.web.ugi": "true", + "dfs.support.append": "true", + "dfs.cluster.administrators": "true" + } + }, + "core-site": { + "final": { + "hadoop.proxyuser.hive.groups": "true", + "webinterface.private.actions": "true", + "hadoop.proxyuser.oozie.hosts": "true" + } + }, + "knox-env": {}, + "gateway-site": {}, + "users-ldif": {}, + "kafka-env": {}, + "kafka-log4j": {}, + "kafka-broker": {}, + "metadata-env": {} + }, + "configurationTags": { + "slider-client": { + "tag": "version1" + }, + "slider-log4j": { + "tag": "version1" + }, + "slider-env": { + "tag": "version1" + }, + "core-site": { + "tag": "version1" + }, + "hdfs-site": { + "tag": "version1" + }, + "yarn-site": { + "tag": "version1" + }, + "gateway-site": { + "tag": "version1" + }, + "topology": { + "tag": "version1" + }, + "users-ldif": { + "tag": "version1" + }, + "kafka-env": { + "tag": "version1" + }, + "kafka-log4j": { + "tag": "version1" + }, + "kafka-broker": { + "tag": "version1" + }, + "metadata-env": { + "tag": "version1" + } + }, + "commandId": "7-1", + "clusterHostInfo": { + "ambari_server_host": [ + "c6401.ambari.apache.org" + ], + "all_ping_ports": [ + "8670", + "8670" + ], + "rm_host": [ + "c6402.ambari.apache.org" + ], + "all_hosts": [ + "c6401.ambari.apache.org", + "c6402.ambari.apache.org" + ], + "knox_gateway_hosts": [ + "jaimin-knox-1.c.pramod-thangali.internal" + ], + "kafka_broker_hosts": [ + "c6401.ambari.apache.org" + ], + "zookeeper_hosts": [ + "c6401.ambari.apache.org" + ] + + } +}
