[ 
https://issues.apache.org/jira/browse/AMBARI-12442?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=14630366#comment-14630366
 ] 

Irina Easterling commented on AMBARI-12442:
-------------------------------------------

#DataNode stderr:
{code}
Traceback (most recent call last):
  File 
"/var/lib/ambari-agent/cache/common-services/HDFS/2.1.0.2.0/package/scripts/datanode.py",
 line 153, in <module>
    DataNode().execute()
  File 
"/usr/lib/python2.6/site-packages/resource_management/libraries/script/script.py",
 line 218, in execute
    method(env)
  File 
"/var/lib/ambari-agent/cache/common-services/HDFS/2.1.0.2.0/package/scripts/datanode.py",
 line 47, in start
    datanode(action="start")
  File "/usr/lib/python2.6/site-packages/ambari_commons/os_family_impl.py", 
line 89, in thunk
    return fn(*args, **kwargs)
  File 
"/var/lib/ambari-agent/cache/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_datanode.py",
 line 58, in datanode
    create_log_dir=True
  File 
"/var/lib/ambari-agent/cache/common-services/HDFS/2.1.0.2.0/package/scripts/utils.py",
 line 271, in service
    environment=hadoop_env_exports
  File "/usr/lib/python2.6/site-packages/resource_management/core/base.py", 
line 157, in __init__
    self.env.run()
  File 
"/usr/lib/python2.6/site-packages/resource_management/core/environment.py", 
line 152, in run
    self.run_action(resource, action)
  File 
"/usr/lib/python2.6/site-packages/resource_management/core/environment.py", 
line 118, in run_action
    provider_action()
  File 
"/usr/lib/python2.6/site-packages/resource_management/core/providers/system.py",
 line 258, in action_run
    tries=self.resource.tries, try_sleep=self.resource.try_sleep)
  File "/usr/lib/python2.6/site-packages/resource_management/core/shell.py", 
line 70, in inner
    result = function(command, **kwargs)
  File "/usr/lib/python2.6/site-packages/resource_management/core/shell.py", 
line 92, in checked_call
    tries=tries, try_sleep=try_sleep)
  File "/usr/lib/python2.6/site-packages/resource_management/core/shell.py", 
line 140, in _call_wrapper
    result = _call(command, **kwargs_copy)
  File "/usr/lib/python2.6/site-packages/resource_management/core/shell.py", 
line 291, in _call
    raise Fail(err_msg)
resource_management.core.exceptions.Fail: Execution of 'ambari-sudo.sh  -H -E 
/usr/hdp/current/hadoop-client/sbin/hadoop-daemon.sh --config 
/usr/hdp/current/hadoop-client/conf start datanode' returned 1. starting 
datanode, logging to 
/var/opt/teradata/log/hadoop/hdfs/hadoop-hdfs-datanode-baron3.out

{code}

#DataNode stdout:

{code}
2015-07-16 16:33:44,851 - Group['hadoop'] {'ignore_failures': False}
2015-07-16 16:33:44,852 - Group['users'] {'ignore_failures': False}
2015-07-16 16:33:44,852 - User['hive'] {'gid': 'hadoop', 'ignore_failures': 
False, 'groups': ['hadoop']}
2015-07-16 16:33:44,853 - User['zookeeper'] {'gid': 'hadoop', 
'ignore_failures': False, 'groups': ['hadoop']}
2015-07-16 16:33:44,853 - User['oozie'] {'gid': 'hadoop', 'ignore_failures': 
False, 'groups': ['users']}
2015-07-16 16:33:44,854 - User['ams'] {'gid': 'hadoop', 'ignore_failures': 
False, 'groups': ['hadoop']}
2015-07-16 16:33:44,854 - User['falcon'] {'gid': 'hadoop', 'ignore_failures': 
False, 'groups': ['users']}
2015-07-16 16:33:44,854 - User['tez'] {'gid': 'hadoop', 'ignore_failures': 
False, 'groups': ['users']}
2015-07-16 16:33:44,855 - User['mahout'] {'gid': 'hadoop', 'ignore_failures': 
False, 'groups': ['hadoop']}
2015-07-16 16:33:44,855 - User['ambari-qa'] {'gid': 'hadoop', 
'ignore_failures': False, 'groups': ['users']}
2015-07-16 16:33:44,856 - User['flume'] {'gid': 'hadoop', 'ignore_failures': 
False, 'groups': ['hadoop']}
2015-07-16 16:33:44,856 - User['hdfs'] {'gid': 'hadoop', 'ignore_failures': 
False, 'groups': ['hadoop']}
2015-07-16 16:33:44,857 - Modifying user hdfs
2015-07-16 16:33:44,877 - User['sqoop'] {'gid': 'hadoop', 'ignore_failures': 
False, 'groups': ['hadoop']}
2015-07-16 16:33:44,878 - User['yarn'] {'gid': 'hadoop', 'ignore_failures': 
False, 'groups': ['hadoop']}
2015-07-16 16:33:44,878 - User['mapred'] {'gid': 'hadoop', 'ignore_failures': 
False, 'groups': ['hadoop']}
2015-07-16 16:33:44,879 - User['hbase'] {'gid': 'hadoop', 'ignore_failures': 
False, 'groups': ['hadoop']}
2015-07-16 16:33:44,879 - User['hcat'] {'gid': 'hadoop', 'ignore_failures': 
False, 'groups': ['hadoop']}
2015-07-16 16:33:44,880 - File['/var/lib/ambari-agent/data/tmp/changeUid.sh'] 
{'content': StaticFile('changeToSecureUid.sh'), 'mode': 0555}
2015-07-16 16:33:44,881 - Execute['/var/lib/ambari-agent/data/tmp/changeUid.sh 
ambari-qa 
/tmp/hadoop-ambari-qa,/tmp/hsperfdata_ambari-qa,/home/ambari-qa,/tmp/ambari-qa,/tmp/sqoop-ambari-qa']
 {'not_if': '(test $(id -u ambari-qa) -gt 1000) || (false)'}
2015-07-16 16:33:44,886 - Skipping 
Execute['/var/lib/ambari-agent/data/tmp/changeUid.sh ambari-qa 
/tmp/hadoop-ambari-qa,/tmp/hsperfdata_ambari-qa,/home/ambari-qa,/tmp/ambari-qa,/tmp/sqoop-ambari-qa']
 due to not_if
2015-07-16 16:33:44,887 - Directory['/tmp/hbase-hbase'] {'owner': 'hbase', 
'recursive': True, 'mode': 0775, 'cd_access': 'a'}
2015-07-16 16:33:44,887 - File['/var/lib/ambari-agent/data/tmp/changeUid.sh'] 
{'content': StaticFile('changeToSecureUid.sh'), 'mode': 0555}
2015-07-16 16:33:44,888 - Execute['/var/lib/ambari-agent/data/tmp/changeUid.sh 
hbase /home/hbase,/tmp/hbase,/usr/bin/hbase,/var/log/hbase,/tmp/hbase-hbase'] 
{'not_if': '(test $(id -u hbase) -gt 1000) || (false)'}
2015-07-16 16:33:44,893 - Skipping 
Execute['/var/lib/ambari-agent/data/tmp/changeUid.sh hbase 
/home/hbase,/tmp/hbase,/usr/bin/hbase,/var/log/hbase,/tmp/hbase-hbase'] due to 
not_if
2015-07-16 16:33:44,894 - Group['hdfs'] {'ignore_failures': False}
2015-07-16 16:33:44,894 - User['hdfs'] {'ignore_failures': False, 'groups': 
['hadoop', 'hdfs', 'hdfs']}
2015-07-16 16:33:44,895 - Modifying user hdfs
2015-07-16 16:33:44,914 - Directory['/etc/hadoop'] {'mode': 0755}
2015-07-16 16:33:44,928 - 
File['/usr/hdp/current/hadoop-client/conf/hadoop-env.sh'] {'content': 
InlineTemplate(...), 'owner': 'root', 'group': 'hadoop'}
2015-07-16 16:33:44,940 - Execute['('setenforce', '0')'] {'not_if': '(! which 
getenforce ) || (which getenforce && getenforce | grep -q Disabled)', 'sudo': 
True, 'only_if': 'test -f /selinux/enforce'}
2015-07-16 16:33:44,945 - Skipping Execute['('setenforce', '0')'] due to not_if
2015-07-16 16:33:44,945 - Directory['/var/opt/teradata/log/hadoop'] {'owner': 
'root', 'mode': 0775, 'group': 'hadoop', 'recursive': True, 'cd_access': 'a'}
2015-07-16 16:33:44,948 - Directory['/var/run/hadoop'] {'owner': 'root', 
'group': 'root', 'recursive': True, 'cd_access': 'a'}
2015-07-16 16:33:44,948 - Changing owner for /var/run/hadoop from 511 to root
2015-07-16 16:33:44,948 - Changing group for /var/run/hadoop from 1001 to root
2015-07-16 16:33:44,948 - Directory['/tmp/hadoop-hdfs'] {'owner': 'hdfs', 
'recursive': True, 'cd_access': 'a'}
2015-07-16 16:33:44,953 - 
File['/usr/hdp/current/hadoop-client/conf/commons-logging.properties'] 
{'content': Template('commons-logging.properties.j2'), 'owner': 'root'}
2015-07-16 16:33:44,954 - 
File['/usr/hdp/current/hadoop-client/conf/health_check'] {'content': 
Template('health_check.j2'), 'owner': 'root'}
2015-07-16 16:33:44,955 - 
File['/usr/hdp/current/hadoop-client/conf/log4j.properties'] {'content': '...', 
'owner': 'hdfs', 'group': 'hadoop', 'mode': 0644}
2015-07-16 16:33:44,964 - 
File['/usr/hdp/current/hadoop-client/conf/hadoop-metrics2.properties'] 
{'content': Template('hadoop-metrics2.properties.j2'), 'owner': 'hdfs'}
2015-07-16 16:33:44,964 - 
File['/usr/hdp/current/hadoop-client/conf/task-log4j.properties'] {'content': 
StaticFile('task-log4j.properties'), 'mode': 0755}
2015-07-16 16:33:44,965 - 
File['/usr/hdp/current/hadoop-client/conf/configuration.xsl'] {'owner': 'hdfs', 
'group': 'hadoop'}
2015-07-16 16:33:44,970 - File['/etc/hadoop/conf/topology_mappings.data'] 
{'owner': 'hdfs', 'content': Template('topology_mappings.data.j2'), 'group': 
'hadoop'}
2015-07-16 16:33:44,971 - File['/etc/hadoop/conf/topology_script.py'] 
{'content': StaticFile('topology_script.py'), 'mode': 0755}
2015-07-16 16:33:45,115 - Directory['/etc/security/limits.d'] {'owner': 'root', 
'group': 'root', 'recursive': True}
2015-07-16 16:33:45,122 - File['/etc/security/limits.d/hdfs.conf'] {'content': 
Template('hdfs.conf.j2'), 'owner': 'root', 'group': 'root', 'mode': 0644}
2015-07-16 16:33:45,123 - XmlConfig['hadoop-policy.xml'] {'owner': 'hdfs', 
'group': 'hadoop', 'conf_dir': '/usr/hdp/current/hadoop-client/conf', 
'configuration_attributes': {}, 'configurations': ...}
2015-07-16 16:33:45,134 - Generating config: 
/usr/hdp/current/hadoop-client/conf/hadoop-policy.xml
2015-07-16 16:33:45,134 - 
File['/usr/hdp/current/hadoop-client/conf/hadoop-policy.xml'] {'owner': 'hdfs', 
'content': InlineTemplate(...), 'group': 'hadoop', 'mode': None, 'encoding': 
'UTF-8'}
2015-07-16 16:33:45,146 - Writing 
File['/usr/hdp/current/hadoop-client/conf/hadoop-policy.xml'] because contents 
don't match
2015-07-16 16:33:45,147 - XmlConfig['ssl-client.xml'] {'owner': 'hdfs', 
'group': 'hadoop', 'conf_dir': '/usr/hdp/current/hadoop-client/conf', 
'configuration_attributes': {}, 'configurations': ...}
2015-07-16 16:33:45,157 - Generating config: 
/usr/hdp/current/hadoop-client/conf/ssl-client.xml
2015-07-16 16:33:45,158 - 
File['/usr/hdp/current/hadoop-client/conf/ssl-client.xml'] {'owner': 'hdfs', 
'content': InlineTemplate(...), 'group': 'hadoop', 'mode': None, 'encoding': 
'UTF-8'}
2015-07-16 16:33:45,163 - Writing 
File['/usr/hdp/current/hadoop-client/conf/ssl-client.xml'] because contents 
don't match
2015-07-16 16:33:45,164 - 
Directory['/usr/hdp/current/hadoop-client/conf/secure'] {'owner': 'root', 
'group': 'hadoop', 'recursive': True, 'cd_access': 'a'}
2015-07-16 16:33:45,164 - XmlConfig['ssl-client.xml'] {'owner': 'hdfs', 
'group': 'hadoop', 'conf_dir': '/usr/hdp/current/hadoop-client/conf/secure', 
'configuration_attributes': {}, 'configurations': ...}
2015-07-16 16:33:45,174 - Generating config: 
/usr/hdp/current/hadoop-client/conf/secure/ssl-client.xml
2015-07-16 16:33:45,175 - 
File['/usr/hdp/current/hadoop-client/conf/secure/ssl-client.xml'] {'owner': 
'hdfs', 'content': InlineTemplate(...), 'group': 'hadoop', 'mode': None, 
'encoding': 'UTF-8'}
2015-07-16 16:33:45,181 - Writing 
File['/usr/hdp/current/hadoop-client/conf/secure/ssl-client.xml'] because 
contents don't match
2015-07-16 16:33:45,181 - XmlConfig['ssl-server.xml'] {'owner': 'hdfs', 
'group': 'hadoop', 'conf_dir': '/usr/hdp/current/hadoop-client/conf', 
'configuration_attributes': {}, 'configurations': ...}
2015-07-16 16:33:45,191 - Generating config: 
/usr/hdp/current/hadoop-client/conf/ssl-server.xml
2015-07-16 16:33:45,192 - 
File['/usr/hdp/current/hadoop-client/conf/ssl-server.xml'] {'owner': 'hdfs', 
'content': InlineTemplate(...), 'group': 'hadoop', 'mode': None, 'encoding': 
'UTF-8'}
2015-07-16 16:33:45,199 - Writing 
File['/usr/hdp/current/hadoop-client/conf/ssl-server.xml'] because contents 
don't match
2015-07-16 16:33:45,199 - XmlConfig['hdfs-site.xml'] {'owner': 'hdfs', 'group': 
'hadoop', 'conf_dir': '/usr/hdp/current/hadoop-client/conf', 
'configuration_attributes': {}, 'configurations': ...}
2015-07-16 16:33:45,209 - Generating config: 
/usr/hdp/current/hadoop-client/conf/hdfs-site.xml
2015-07-16 16:33:45,209 - 
File['/usr/hdp/current/hadoop-client/conf/hdfs-site.xml'] {'owner': 'hdfs', 
'content': InlineTemplate(...), 'group': 'hadoop', 'mode': None, 'encoding': 
'UTF-8'}
2015-07-16 16:33:45,270 - Writing 
File['/usr/hdp/current/hadoop-client/conf/hdfs-site.xml'] because contents 
don't match
2015-07-16 16:33:45,271 - XmlConfig['core-site.xml'] {'group': 'hadoop', 
'conf_dir': '/usr/hdp/current/hadoop-client/conf', 'mode': 0644, 
'configuration_attributes': {}, 'owner': 'hdfs', 'configurations': ...}
2015-07-16 16:33:45,280 - Generating config: 
/usr/hdp/current/hadoop-client/conf/core-site.xml
2015-07-16 16:33:45,281 - 
File['/usr/hdp/current/hadoop-client/conf/core-site.xml'] {'owner': 'hdfs', 
'content': InlineTemplate(...), 'group': 'hadoop', 'mode': 0644, 'encoding': 
'UTF-8'}
2015-07-16 16:33:45,308 - Writing 
File['/usr/hdp/current/hadoop-client/conf/core-site.xml'] because contents 
don't match
2015-07-16 16:33:45,310 - File['/usr/hdp/current/hadoop-client/conf/slaves'] 
{'content': Template('slaves.j2'), 'owner': 'root'}
2015-07-16 16:33:45,311 - Directory['/var/lib/hadoop-hdfs'] {'owner': 'hdfs', 
'group': 'hadoop', 'mode': 0751, 'recursive': True}
2015-07-16 16:33:45,315 - Host contains mounts: ['/', '/proc', '/sys', 
'/sys/kernel/debug', '/dev', '/dev/shm', '/dev/pts', '/var', 
'/var/opt/teradata', '/data1', '/data2', '/data3', '/data4', '/data5', 
'/data6', '/data7', '/data8', '/data9', '/data10', '/data11', '/data12', 
'/sys/fs/fuse/connections', '/var/lib/ntp/proc'].
2015-07-16 16:33:45,316 - Mount point for directory /data1/hadoop/hdfs/data is /
2015-07-16 16:33:45,316 - Mount point for directory /data2/hadoop/hdfs/data is /
2015-07-16 16:33:45,316 - Mount point for directory /data3/hadoop/hdfs/data is /
2015-07-16 16:33:45,316 - Mount point for directory /data4/hadoop/hdfs/data is /
2015-07-16 16:33:45,316 - Mount point for directory /data5/hadoop/hdfs/data is /
2015-07-16 16:33:45,316 - Mount point for directory /data6/hadoop/hdfs/data is /
2015-07-16 16:33:45,316 - Mount point for directory /data7/hadoop/hdfs/data is /
2015-07-16 16:33:45,316 - Mount point for directory /data8/hadoop/hdfs/data is /
2015-07-16 16:33:45,316 - Mount point for directory /data9/hadoop/hdfs/data is /
2015-07-16 16:33:45,317 - Mount point for directory /data10/hadoop/hdfs/data is 
/
2015-07-16 16:33:45,317 - Mount point for directory /data11/hadoop/hdfs/data is 
/
2015-07-16 16:33:45,317 - Mount point for directory /data12/hadoop/hdfs/data is 
/
2015-07-16 16:33:45,318 - Directory['/var/run/hadoop'] {'owner': 'hdfs', 
'group': 'hadoop', 'mode': 0755}
2015-07-16 16:33:45,318 - Changing owner for /var/run/hadoop from 0 to hdfs
2015-07-16 16:33:45,318 - Changing group for /var/run/hadoop from 0 to hadoop
2015-07-16 16:33:45,318 - Directory['/var/run/hadoop/hdfs'] {'owner': 'hdfs', 
'recursive': True}
2015-07-16 16:33:45,319 - Directory['/var/opt/teradata/log/hadoop/hdfs'] 
{'owner': 'hdfs', 'recursive': True}
2015-07-16 16:33:45,319 - File['/var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid'] 
{'action': ['delete'], 'not_if': 'ambari-sudo.sh  -H -E test -f 
/var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid && ambari-sudo.sh  -H -E pgrep -F 
/var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid'}
2015-07-16 16:33:45,331 - Deleting 
File['/var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid']
2015-07-16 16:33:45,332 - Execute['ambari-sudo.sh  -H -E 
/usr/hdp/current/hadoop-client/sbin/hadoop-daemon.sh --config 
/usr/hdp/current/hadoop-client/conf start datanode'] {'environment': 
{'HADOOP_LIBEXEC_DIR': '/usr/hdp/current/hadoop-client/libexec'}, 'not_if': 
'ambari-sudo.sh  -H -E test -f /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid && 
ambari-sudo.sh  -H -E pgrep -F /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid'}
   Do not show this dialog again when starting a background operation
{code}

>  DataNodes and JournalNodes failed to start  after enabling Kerberos via the 
> Ambari Wizard. 
> --------------------------------------------------------------------------------------------
>
>                 Key: AMBARI-12442
>                 URL: https://issues.apache.org/jira/browse/AMBARI-12442
>             Project: Ambari
>          Issue Type: Bug
>          Components: ambari-web
>    Affects Versions: 2.1.0
>         Environment: ambari-server-2.1.0-1462
>            Reporter: Irina Easterling
>            Priority: Blocker
>
> On an HDP-2.3 cluster the DataNodes and JournalNodes failed to start 
> after enabling Kerberos via the Ambari Wizard. 



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)

Reply via email to