Ottomata has submitted this change and it was merged.
Change subject: Puppetizing hue
......................................................................
Puppetizing hue
Change-Id: Ic523aa456df115e3944126f2ed4b411f16ad21d1
---
M README.md
M TODO.md
A files/hue/hue.init.d.sh
A manifests/hue.pp
A manifests/hue/defaults.pp
A templates/hue/hue.ini.erb
6 files changed, 1,051 insertions(+), 2 deletions(-)
Approvals:
Faidon: Looks good to me, approved
jenkins-bot: Verified
diff --git a/README.md b/README.md
index 2233d94..b73cea6 100644
--- a/README.md
+++ b/README.md
@@ -129,3 +129,27 @@
jdbc_password -> $secret_password,
}
```
+
+## Hue
+
+To install hue server, simply:
+
+```puppet
+class { 'cdh4::hue':
+ secret_key => 'ii7nnoCGtP0wjub6nqnRfQx93YUV3iWG', # your secret key here.
+}
+```
+
+There are many more parameters to the ```cdh4::hue``` class. See the class
+documentation in manifests/hue.pp.
+
+Note that while much of this puppet-cdh4 module supports MRv1, this Hue
+puppetization currently does not. (Feel free to submit a patch to add
+MRv1 support though!)
+
+If you include ```cdh4::hive``` or ```cdh4::oozie``` classes on this node,
+Hue will be configured to run its Hive and Oozie apps.
+
+Hue Impala is not currently supported, since Impala hasn't been puppetized
+in this module yet.
+
diff --git a/TODO.md b/TODO.md
index 7f1f572..7201943 100644
--- a/TODO.md
+++ b/TODO.md
@@ -15,8 +15,6 @@
## Oozie
-## Hue
-
## HBase
## Zookeeper
diff --git a/files/hue/hue.init.d.sh b/files/hue/hue.init.d.sh
new file mode 100644
index 0000000..952c328
--- /dev/null
+++ b/files/hue/hue.init.d.sh
@@ -0,0 +1,277 @@
+#!/bin/bash
+
+# NOTE: This file is managed by Puppet.
+# This file has been modified by the wikimedia/puppet-cdh4 module.
+# It adds a --chuid flag to start-stop-daemon. See the comment below,
+# and https://issues.cloudera.org/browse/HUE-1398 for more info.
+
+#
+# (c) Copyright 2011 Cloudera, Inc.
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+### BEGIN INIT INFO
+# Provides: hue
+# Required-Start: $network $local_fs
+# Required-Stop:
+# Should-Start: $named
+# Should-Stop:
+# Default-Start: 2 3 4 5
+# Default-Stop: 0 1 6
+# Short-Description: Hue
+# Description: Hue Web Interface
+### END INIT INFO
+
+PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin
+
+DAEMON=/usr/share/hue/build/env/bin/supervisor # Introduce the server's
location here
+NAME=hue # Introduce the short server's name here
+DESC="Hue for Hadoop" # Introduce a short description here
+LOGDIR=/var/log/hue # Log directory to use
+
+PIDFILE=/var/run/hue/supervisor.pid
+
+test -x $DAEMON || exit 0
+
+. /lib/lsb/init-functions
+
+# Default options, these can be overriden by the information
+# at /etc/default/$NAME
+DAEMON_OPTS="-p $PIDFILE -d -l $LOGDIR" # Additional options given to the
server
+
+DIETIME=10 # Time to wait for the server to die, in seconds
+ # If this value is set too low you might not
+ # let some servers to die gracefully and
+ # 'restart' will not work
+
+STARTTIME=5 # Time to wait for the server to start, in seconds
+ # If this value is set each time the server is
+ # started (on start or restart) the script will
+ # stall to try to determine if it is running
+ # If it is not set and the server takes time
+ # to setup a pid file the log message might
+ # be a false positive (says it did not start
+ # when it actually did)
+
+DAEMONUSER=hue # Users to run the daemons as. If this value
+ # is set start-stop-daemon will chuid the server
+
+# Include defaults if available
+if [ -f /etc/default/$NAME ] ; then
+ . /etc/default/$NAME
+fi
+
+# Use this if you want the user to explicitly set 'RUN' in
+# /etc/default/
+#if [ "x$RUN" != "xyes" ] ; then
+# log_failure_msg "$NAME disabled, please adjust the configuration to your
needs "
+# log_failure_msg "and then set RUN to 'yes' in /etc/default/$NAME to
enable it."
+# exit 1
+#fi
+
+# Check that the user exists (if we set a user)
+# Does the user exist?
+if [ -n "$DAEMONUSER" ] ; then
+ if getent passwd | grep -q "^$DAEMONUSER:"; then
+ # Obtain the uid and gid
+ DAEMONUID=`getent passwd |grep "^$DAEMONUSER:" | awk -F : '{print $3}'`
+ DAEMONGID=`getent passwd |grep "^$DAEMONUSER:" | awk -F : '{print $4}'`
+ else
+ log_failure_msg "The user $DAEMONUSER, required to run $NAME does not
exist."
+ exit 1
+ fi
+fi
+
+
+set -e
+
+running_pid() {
+# Check if a given process pid's cmdline matches a given name
+ pid=$1
+ [ -z "$pid" ] && return 1
+ [ ! -d /proc/$pid ] && return 1
+ cmd=`cat /proc/$pid/cmdline | tr "\000" "\n"|head -n 1 |cut -d : -f 1`
+ echo $cmd | grep -q python || return 1
+ return 0
+}
+
+running() {
+# Check if the process is running looking at /proc
+# (works for all users)
+
+ # No pidfile, probably no daemon present
+ [ ! -f "$PIDFILE" ] && return 1
+ pid=`cat $PIDFILE`
+ running_pid $pid || return 1
+ return 0
+}
+
+start_server() {
+# Start the process using the wrapper
+ export PYTHON_EGG_CACHE='/tmp/.hue-python-eggs'
+ mkdir -p /usr/share/hue/pids/
+ mkdir -p ${PYTHON_EGG_CACHE}
+ mkdir -p $(dirname $PIDFILE) $LOGDIR
+ chown -R $DAEMONUSER $(dirname $PIDFILE) $LOGDIR ${PYTHON_EGG_CACHE}
+ # dont setuid, since supervisor will drop privileges on its
+ # own.
+ # start-stop-daemon --start --quiet --pidfile $PIDFILE \
+ # --exec $DAEMON -- $DAEMON_OPTS
+
+ # ===== wikimedia/puppet-cdh4 patch =====
+ # THIS IS A BUG! We need to honor $DAEMONUSER here.
+ # supervisor.py is smart enough to know what to do.
+ # See https://issues.cloudera.org/browse/HUE-1398.
+ # This init.d script will be removed when a newer
+ # version of hue (hopefully) fixes this.
+ start-stop-daemon --start --quiet --pidfile $PIDFILE \
+ --chuid $DAEMONUSER --exec $DAEMON -- $DAEMON_OPTS
+
+ errcode=$?
+ return $errcode
+}
+
+stop_server() {
+# Stop the process using the wrapper
+ killproc -p $PIDFILE $DAEMON
+ errcode=$?
+ return $errcode
+}
+
+reload_server() {
+ [ ! -f "$PIDFILE" ] && return 1
+ pid=pidofproc $PIDFILE # This is the daemon's pid
+ # Send a SIGHUP
+ kill -1 $pid
+ return $?
+}
+
+force_stop() {
+# Force the process to die killing it manually
+ [ ! -e "$PIDFILE" ] && return
+ if running ; then
+ kill -15 $pid
+ # Is it really dead?
+ sleep "$DIETIME"s
+ if running ; then
+ kill -9 $pid
+ sleep "$DIETIME"s
+ if running ; then
+ echo "Cannot kill $NAME (pid=$pid)!"
+ exit 1
+ fi
+ fi
+ fi
+ rm -f $PIDFILE
+}
+
+
+case "$1" in
+ start)
+ log_daemon_msg "Starting $DESC " "$NAME"
+ # Check if it's running first
+ if running ; then
+ log_progress_msg "apparently already running"
+ log_end_msg 0
+ exit 0
+ fi
+ if start_server ; then
+ # NOTE: Some servers might die some time after they start,
+ # this code will detect this issue if STARTTIME is set
+ # to a reasonable value
+ [ -n "$STARTTIME" ] && sleep $STARTTIME # Wait some time
+ if running ; then
+ # It's ok, the server started and is running
+ log_end_msg 0
+ else
+ # It is not running after we did start
+ log_end_msg 1
+ fi
+ else
+ # Either we could not start it
+ log_end_msg 1
+ fi
+ ;;
+ stop)
+ log_daemon_msg "Stopping $DESC" "$NAME"
+ if running ; then
+ # Only stop the server if we see it running
+ errcode=0
+ stop_server || errcode=$?
+ log_end_msg $errcode
+ else
+ # If it's not running don't do anything
+ log_progress_msg "apparently not running"
+ log_end_msg 0
+ exit 0
+ fi
+ ;;
+ force-stop)
+ # First try to stop gracefully the program
+ $0 stop
+ errcode=0
+ if running; then
+ # If it's still running try to kill it more forcefully
+ log_daemon_msg "Stopping (force) $DESC" "$NAME"
+ force_stop || errcode=$?
+ fi
+ # if there are still processes running as hue, just kill them.
+ # we only do this if the user is hue, in case it's been changed
+ # to nobody - we don't want to go and kill a webserver
+ if [ "$DAEMONUSER" -eq hue ] && ps -u hue | grep -q build/env/bin ;
then
+ killall -9 -u hue
+ errcode=$?
+ fi
+ log_end_msg $errcode
+ ;;
+ restart|force-reload)
+ log_daemon_msg "Restarting $DESC" "$NAME"
+ errcode=0
+ stop_server || errcode=$?
+ # Wait some sensible amount, some server need this
+ [ -n "$DIETIME" ] && sleep $DIETIME
+ start_server || errcode=$?
+ [ -n "$STARTTIME" ] && sleep $STARTTIME
+ running || errcode=$?
+ log_end_msg $errcode
+ ;;
+ status)
+
+ log_daemon_msg "Checking status of $DESC" "$NAME"
+ if running ; then
+ log_progress_msg "running"
+ log_end_msg 0
+ else
+ log_progress_msg "apparently not running"
+ log_end_msg 1
+ exit 1
+ fi
+ ;;
+ # Use this if the daemon cannot reload
+ reload)
+ log_warning_msg "Reloading $NAME daemon: not implemented, as the
daemon"
+ log_warning_msg "cannot re-read the config file (use restart)."
+ ;;
+
+ *)
+ N=/etc/init.d/$NAME
+ echo "Usage: $N {start|stop|force-stop|restart|force-reload|status}"
>&2
+ exit 1
+ ;;
+esac
+
+exit 0
diff --git a/manifests/hue.pp b/manifests/hue.pp
new file mode 100644
index 0000000..f16e2e0
--- /dev/null
+++ b/manifests/hue.pp
@@ -0,0 +1,173 @@
+# == Class cdh4::hue
+#
+# Installs hue, sets up the hue.ini file
+# and ensures that hue server is running.
+# This requires that cdh4::hadoop is included.
+#
+# If cdh4::hive and/or cdh4::oozie are included
+# on this node, hue will be configured to interface
+# with hive and oozie.
+#
+# == Parameters
+# $http_host - IP for webservice to bind.
+# $http_port - Port for webservice to bind.
+# $secret_key - Secret key used for session hashing.
+#
+# $oozie_url - URL for Oozie API. If cdh4::oozie is included,
+# this will be inferred. Else this will be disabled.
+# $oozie_security_enabled - Default: false.
+#
+# $smtp_host - SMTP host for email notifications.
+# Default: undef, SMTP will not be configured.
+# $smtp_port - SMTP port. Default: 25
+# $smtp_from_email - Sender email address of notifications. Default:
undef
+# $smtp_username - Username for SMTP authentication. Default:
undef
+# $smtp_password - Password for SMTP authentication. Default:
undef
+#
+# $httpfs_enabled - If true, Hue will be configured to interact with
HDFS via
+# HttpFS rather than the default WebHDFS. You must
+# manually configure HttpFS on your namenode.
+#
+# $ssl_certificate - Path to SSL certificate. Default:
/etc/ssl/certs/ssl-cert-snakeoil.pem
+# $ssl_private_key - Path to SSL private key. Default:
/etc/ssl/private/ssl-cert-snakeoil.key
+# If ssl_certificate and ssl_private_key are set to
the defaults,
+# the snakeoil certificates will be generated
automatically for you.
+#
+# === LDAP parameters:
+# See hue.ini comments for documentation. By default these are undefined.
+#
+# $ldap_url
+# $ldap_cert
+# $ldap_nt_domain
+# $ldap_bind_dn
+# $ldap_base_dn
+# $ldap_bind_password
+# $ldap_username_pattern
+# $ldap_user_filter
+# $ldap_user_name_attr
+# $ldap_group_filter
+# $ldap_group_name_attr
+# $ldap_group_member_attr
+#
+class cdh4::hue(
+ $http_host = $cdh4::hue::defaults::http_host,
+ $http_port = $cdh4::hue::defaults::http_port,
+ $secret_key = $cdh4::hue::defaults::secret_key,
+
+ $oozie_url = $cdh4::hue::defaults::oozie_url,
+ $oozie_security_enabled = $cdh4::hue::defaults::oozie_security_enabled,
+
+ $smtp_host = $cdh4::hue::defaults::smtp_host,
+ $smtp_port = $cdh4::hue::defaults::smtp_port,
+ $smtp_user = $cdh4::hue::defaults::smtp_user,
+ $smtp_password = $cdh4::hue::defaults::smtp_password,
+ $smtp_from_email = $cdh4::hue::defaults::smtp_from_email,
+
+ $httpfs_enabled = $cdh4::hue::defaults::httpfs_enabled,
+
+ $ssl_certificate = $cdh4::hue::defaults::ssl_certificate,
+ $ssl_private_key = $cdh4::hue::defaults::ssl_private_key,
+
+ $ldap_url = $cdh4::hue::defaults::ldap_url,
+ $ldap_cert = $cdh4::hue::defaults::ldap_cert,
+ $ldap_nt_domain = $cdh4::hue::defaults::ldap_nt_domain,
+ $ldap_bind_dn = $cdh4::hue::defaults::ldap_bind_dn,
+ $ldap_base_dn = $cdh4::hue::defaults::ldap_base_dn,
+ $ldap_bind_password = $cdh4::hue::defaults::ldap_bind_password,
+ $ldap_username_pattern = $cdh4::hue::defaults::ldap_username_pattern,
+ $ldap_user_filter = $cdh4::hue::defaults::ldap_user_filter,
+ $ldap_user_name_attr = $cdh4::hue::defaults::ldap_user_name_attr,
+ $ldap_group_filter = $cdh4::hue::defaults::ldap_group_filter,
+ $ldap_group_name_attr = $cdh4::hue::defaults::ldap_group_name_attr,
+ $ldap_group_member_attr = $cdh4::hue::defaults::ldap_group_member_attr,
+
+ $hue_ini_template = $cdh4::hue::defaults::hue_ini_template
+) inherits cdh4::hue::defaults
+{
+ Class['cdh4::hadoop'] -> Class['cdh4::hue']
+
+ package { ['hue', 'hue-server']:
+ ensure => 'installed'
+ }
+
+ # Managing the hue user here so we can add
+ # it to the hive group if hive-site.xml is
+ # not world readable.
+ user { 'hue':
+ gid => 'hue',
+ comment => 'Hue daemon',
+ home => '/usr/share/hue',
+ shell => '/bin/false',
+ managehome => false,
+ system => true,
+ require => [Package['hue'], Package['hue-server']],
+ }
+ # hive-site.xml might not be world readable.
+ if (defined(Class['cdh4::hive'])) {
+ # make sure cdh4::hive is applied before cdh4::hue.
+ Class['cdh4::hive'] -> Class['cdh4::hue']
+ # Add the hue user to the hive group.
+ User['hue'] { groups +> 'hive'}
+
+ # Growl. The packaged hue init.d script
+ # has a bug where it doesn't --chuid to hue.
+ # this causes hue not to be able to read the
+ # hive-site.xml file here, even though it is
+ # in the hive group. Install our own patched
+ # init.d instead. This will be removed once
+ # Cloudera fixes the problem.
+ # See: https://issues.cloudera.org/browse/HUE-1398
+ file { '/etc/init.d/hue':
+ source => 'puppet:///modules/cdh4/hue/hue.init.d.sh',
+ mode => '0755',
+ owner => 'root',
+ group => 'root',
+ require => Package['hue'],
+ notify => Service['hue'],
+ }
+ }
+
+ if ($ssl_certificate and $ssl_private_key) {
+ if (!defined(Package['python-openssl'])) {
+ package{ 'python-openssl':
+ ensure => 'installed',
+ }
+ }
+
+ # If the ssl settings are left at the defaults (snakeoil),
+ # then run make-ssl-cert to generate the default snakeoil cert.
+ if (($ssl_certificate == $cdh4::hue::defaults::ssl_certificate) and
+ ($ssl_private_key == $cdh4::hue::defaults::ssl_private_key)) {
+
+ if (!defined(Package['ssl-cert'])) {
+ package{ 'ssl-cert':
+ ensure => 'installed',
+ }
+ }
+
+ exec { 'generate_hue_snakeoil_ssl_cert':
+ command => '/usr/sbin/make-ssl-cert generate-default-snakeoil',
+ creates => $ssl_certificate,
+ require => Package['ssl-cert'],
+ }
+
+ # generate the cert before hue is started.
+ Exec['generate_hue_snakeoil_ssl_cert'] -> Service['hue']
+ }
+ }
+
+ $namenode_hostname = $cdh4::hadoop::namenode_hostname
+ file { '/etc/hue/hue.ini':
+ content => template($hue_ini_template),
+ require => Package['hue-server'],
+ }
+
+ service { 'hue':
+ ensure => 'running',
+ enable => true,
+ hasrestart => true,
+ hasstatus => true,
+ subscribe => File['/etc/hue/hue.ini'],
+ require => [Package['hue-server'], User['hue']],
+ }
+}
diff --git a/manifests/hue/defaults.pp b/manifests/hue/defaults.pp
new file mode 100644
index 0000000..310f68e
--- /dev/null
+++ b/manifests/hue/defaults.pp
@@ -0,0 +1,49 @@
+# == Class cdh4::hue::defaults
+#
+class cdh4::hue::defaults {
+ $http_host = '0.0.0.0'
+ $http_port = 8888
+ $secret_key = undef
+
+ # Set Hue Oozie defaults to those already
+ # set in the cdh4::oozie class.
+ if (defined(Class['cdh4::oozie'])) {
+ $oozie_url = $cdh4::oozie::url
+ # Is this the proper default values? I'm not sure.
+ $oozie_security_enabled =
$cdh4::hue::defaults::oozie_security_enabled
+ }
+ # Otherwise disable Oozie interface for Hue.
+ else {
+ $oozie_url = undef
+ $oozie_security_enabled = undef
+ }
+
+ $smtp_host = 'localhost'
+ $smtp_port = 25
+ $smtp_user = undef
+ $smtp_password = undef
+ $smtp_from_email = undef
+
+ $ssl_certificate = '/etc/ssl/certs/ssl-cert-snakeoil.pem'
+ $ssl_private_key = '/etc/ssl/private/ssl-cert-snakeoil.key'
+
+ # if httpfs is enabled, the default httpfs port
+ # will be used, instead of the webhdfs port.
+ $httpfs_enabled = false
+
+ $ldap_url = undef
+ $ldap_cert = undef
+ $ldap_nt_domain = undef
+ $ldap_bind_dn = undef
+ $ldap_base_dn = undef
+ $ldap_bind_password = undef
+ $ldap_username_pattern = undef
+ $ldap_user_filter = undef
+ $ldap_user_name_attr = undef
+ $ldap_group_filter = undef
+ $ldap_group_name_attr = undef
+ $ldap_group_member_attr = undef
+
+ $hue_ini_template = 'cdh4/hue/hue.ini.erb'
+
+}
\ No newline at end of file
diff --git a/templates/hue/hue.ini.erb b/templates/hue/hue.ini.erb
new file mode 100644
index 0000000..cbc6803
--- /dev/null
+++ b/templates/hue/hue.ini.erb
@@ -0,0 +1,528 @@
+# Note: This file is managed by Puppet.
+
+# Hue configuration file
+# ===================================
+#
+# For complete documentation about the contents of this file, run
+# $ <hue_root>/build/env/bin/hue config_help
+#
+# All .ini files under the current directory are treated equally. Their
+# contents are merged to form the Hue configuration, which can
+# can be viewed on the Hue at
+# http://<hue_host>:<port>/dump_config
+
+
+###########################################################################
+# General configuration for core Desktop features (authentication, etc)
+###########################################################################
+
+[desktop]
+
+ # Set this to a random string, the longer the better.
+ # This is used for secure hashing in the session store.
+ secret_key=<%= @secret_key ? secret_key : "" %>
+
+ # Webserver listens on this address and port
+ http_host=<%= @http_host %>
+ http_port=<%= @http_port %>
+
+ # Time zone name
+ time_zone=<%= @timezone %>
+
+ # Turn off debug
+ django_debug_mode=0
+
+ # Turn off backtrace for server error
+ http_500_debug_mode=0
+
+ # Server email for internal error messages
+ ## django_server_email='[email protected]'
+
+ # Email backend
+ ## django_email_backend=django.core.mail.backends.smtp.EmailBackend
+
+ # Set to true to use CherryPy as the webserver, set to false
+ # to use Spawning as the webserver. Defaults to Spawning if
+ # key is not specified.
+ ## use_cherrypy_server = false
+
+ # Webserver runs as this user
+ ## server_user=hue
+ ## server_group=hue
+
+ # If set to false, runcpserver will not actually start the web server.
+ # Used if Apache is being used as a WSGI container.
+ ## enable_server=yes
+
+ # Number of threads used by the CherryPy web server
+ ## cherrypy_server_threads=10
+
+ # Filename of SSL Certificate
+ <%= @ssl_certificate ? "ssl_certificate=\"#{@ssl_certificate}\"" : "##
base_dn=" %>
+
+ # Filename of SSL RSA Private Key
+ ## ssl_private_key=
+ <%= @ssl_private_key ? "ssl_private_key=\"#{@ssl_private_key}\"" : "##
base_dn=" %>
+
+
+ # Default encoding for site data
+ ## default_site_encoding=utf-8
+
+ # Administrators
+ # ----------------
+ [[django_admins]]
+ ## [[[admin1]]]
+ ## name=john
+ ## [email protected]
+
+ # UI customizations
+ # -------------------
+ [[custom]]
+
+ # Top banner HTML code
+ ## banner_top_html=
+
+ # Configuration options for user authentication into the web application
+ # ------------------------------------------------------------------------
+ [[auth]]
+
+ # Authentication backend. Common settings are:
+ # - django.contrib.auth.backends.ModelBackend (entirely Django backend)
+ # - desktop.auth.backend.AllowAllBackend (allows everyone)
+ # - desktop.auth.backend.AllowFirstUserDjangoBackend
+ # (Default. Relies on Django and user manager, after the first login)
+ # - desktop.auth.backend.LdapBackend
+ # - desktop.auth.backend.PamBackend
+ # - desktop.auth.backend.SpnegoDjangoBackend
+ # - desktop.auth.backend.RemoteUserDjangoBackend
+ ## backend=desktop.auth.backend.AllowFirstUserDjangoBackend
+ <%= @ldap_url ? "backend=desktop.auth.backend.LdapBackend" : "##
backend=desktop.auth.backend.AllowFirstUserDjangoBackend" %>
+
+ ## pam_service=login
+
+ # When using the desktop.auth.backend.RemoteUserDjangoBackend, this sets
+ # the normalized name of the header that contains the remote user.
+ # The HTTP header in the request is converted to a key by converting
+ # all characters to uppercase, replacing any hyphens with underscores
+ # and adding an HTTP_ prefix to the name. So, for example, if the header
+ # is called Remote-User that would be configured as HTTP_REMOTE_USER
+ #
+ # Defaults to HTTP_REMOTE_USER
+ ## remote_user_header=HTTP_REMOTE_USER
+
+ # Configuration options for connecting to LDAP and Active Directory
+ # -------------------------------------------------------------------
+ [[ldap]]
+
+ # The search base for finding users and groups
+ <%= @ldap_base_dn ? "base_dn=\"#{ldap_base_dn}\""
: "## base_dn=\"DC=mycompany,DC=com\"" %>
+
+ # The NT domain to connect to (only for use with Active Directory)
+ <%= @ldap_nt_domain ? "nt_domain=#{ldap_nt_domain}"
: "## nt_domain=mycompany.com" %>
+
+ # URL of the LDAP server
+ <%= @ldap_url ? "ldap_url=\"#{ldap_url}\""
: "## ldap_url=ldap://auth.mycompany.com" %>
+
+ # Path to certificate for authentication over TLS
+ <%= @ldap_cert ? "ldap_cert=#{ldap_cert}"
: "## ldap_cert=" %>
+
+ # Distinguished name of the user to bind as -- not necessary if the LDAP
server
+ # supports anonymous searches
+ <%= @ldap_bind_dn ? "bind_dn=\"#{ldap_bind_dn}\""
: "## bind_dn=\"CN=ServiceAccount,DC=mycompany,DC=com\"" %>
+
+ # Password of the bind user -- not necessary if the LDAP server supports
+ # anonymous searches
+ <%= @ldap_bind_password ? "bind_password=#{ldap_bind_password}"
: "## bind_password=" %>
+
+ # Pattern for searching for usernames -- Use <username> for the parameter
+ # For use when using LdapBackend for Hue authentication
+ <%= @ldap_username_pattern ?
"ldap_username_pattern=\"#{ldap_username_pattern}\"" : "##
ldap_username_pattern=\"uid=<username>,ou=People,dc=mycompany,dc=com\"" %>
+
+ # Create users in Hue when they try to login with their LDAP credentials
+ # For use when using LdapBackend for Hue authentication
+ ## create_users_on_login = true
+
+ [[[users]]]
+
+ # Base filter for searching for users
+ <%= @ldap_user_filter ? "user_filter=\"#{ldap_user_filter}\""
: "## user_filter=\"objectclass=*\"" %>
+
+ # The username attribute in the LDAP schema
+ <%= @ldap_user_name_attr ?
"user_name_attr=\"#{ldap_user_name_attr}\"" : "##
user_name_attr=sAMAccountName" %>
+
+ [[[groups]]]
+
+ # Base filter for searching for groups
+ <%= @ldap_group_filter ? "group_filter=\"#{ldap_group_filter}\""
: "## group_filter=\"objectclass=*\"" %>
+
+ # The group name attribute in the LDAP schema
+ <%= @ldap_group_name_attr ?
"group_name_attr=\"#{ldap_group_name_attr}\"" : "## group_name_attr=cn" %>
+
+ # The attribute of the group object which identifies the members of the
group
+ <%= @ldap_group_member_attr ?
"group_member_attr=\"#{ldap_group_member_attr}\"" : "##
group_member_attr=members" %>
+
+ # Configuration options for specifying the Desktop Database. For more info,
+ # see http://docs.djangoproject.com/en/1.1/ref/settings/#database-engine
+ # ------------------------------------------------------------------------
+ [[database]]
+ # Database engine is typically one of:
+ # postgresql_psycopg2, mysql, or sqlite3
+ #
+ # Note that for sqlite3, 'name', below is a filename;
+ # for other backends, it is the database name.
+ ## engine=sqlite3
+ ## host=
+ ## port=
+ ## user=
+ ## password=
+ ## name=
+
+
+ # Configuration options for connecting to an external SMTP server
+ # ------------------------------------------------------------------------
+ [[smtp]]
+
+ # The SMTP server information for email notification delivery
+ host=<%= @smtp_host %>
+ port=<%= @smtp_port %>
+ user=<%= @smtp_user %>
+ password=<%= @smtp_password %>
+
+ # Whether to use a TLS (secure) connection when talking to the SMTP server
+ tls=<%= (@smtp_user and !@smtp_user.empty?) ? 'yes' : 'no' %>
+
+ # Default email address to use for various automated notification from Hue
+ <%= @smtp_from_email ? "default_from_email=#{smtp_from_email}" : "##
default_from_email=hue@localhost" %>
+
+
+ # Configuration options for Kerberos integration for secured Hadoop clusters
+ # ------------------------------------------------------------------------
+ [[kerberos]]
+
+ # Path to Hue's Kerberos keytab file
+ ## hue_keytab=
+ # Kerberos principal name for Hue
+ ## hue_principal=hue/hostname.foo.com
+ # Path to kinit
+ ## kinit_path=/path/to/kinit
+
+
+###########################################################################
+# Settings to configure your Hadoop cluster.
+###########################################################################
+
+[hadoop]
+
+ # Configuration for HDFS NameNode
+ # ------------------------------------------------------------------------
+ [[hdfs_clusters]]
+
+ [[[default]]]
+ # Enter the filesystem uri
+ fs_defaultfs=hdfs://<%= @namenode_hostname %>/
+
+ # Change this if your HDFS cluster is Kerberos-secured
+ ## security_enabled=false
+
+ # Use WebHdfs/HttpFs as the communication mechanism.
+ # This should be the web service root URL, such as
+ # http://namenode:50070/webhdfs/v1
+ webhdfs_url=http://<%= @namenode_hostname %>:<%= @httpfs_enabled ?
'14000' : '50070' %>/webhdfs/v1/
+
+ # Settings about this HDFS cluster. If you install HDFS in a
+ # different location, you need to set the following.
+
+ # Defaults to $HADOOP_HDFS_HOME or /usr/lib/hadoop-hdfs
+ ## hadoop_hdfs_home=/usr/lib/hadoop-hdfs
+
+ # Defaults to $HADOOP_BIN or /usr/bin/hadoop
+ ## hadoop_bin=/usr/bin/hadoop
+
+ # Defaults to $HADOOP_CONF_DIR or /etc/hadoop/conf
+ ## hadoop_conf_dir=/etc/hadoop/conf
+<%
+# This hue puppetization does not support MRv1.
+if false
+%>
+ # Configuration for MapReduce 0.20 JobTracker (MR1)
+ # ------------------------------------------------------------------------
+ [[mapred_clusters]]
+
+ [[[default]]]
+ # Enter the host on which you are running the Hadoop JobTracker
+ jobtracker_host=localhost
+ # The port where the JobTracker IPC listens on
+ jobtracker_port=8021
+ # Thrift plug-in port for the JobTracker
+ ## thrift_port=9290
+ # Whether to submit jobs to this cluster
+ ## submit_to=True
+
+ # Change this if your MapReduce cluster is Kerberos-secured
+ ## security_enabled=false
+
+ # Settings about this MR1 cluster. If you install MR1 in a
+ # different location, you need to set the following.
+
+ # Defaults to $HADOOP_MR1_HOME or /usr/lib/hadoop-0.20-mapreduce
+ ## hadoop_mapred_home=/usr/lib/hadoop-0.20-mapreduce
+
+ # Defaults to $HADOOP_BIN or /usr/bin/hadoop
+ ## hadoop_bin=/usr/bin/hadoop
+
+ # Defaults to $HADOOP_CONF_DIR or /etc/hadoop/conf
+ ## hadoop_conf_dir=/etc/hadoop/conf
+<% end -%>
+
+ # Configuration for YARN (MR2)
+ # ------------------------------------------------------------------------
+ [[yarn_clusters]]
+
+ [[[default]]]
+ # Enter the host on which you are running the ResourceManager
+ resourcemanager_host=<%= @namenode_hostname %>
+ # The port where the ResourceManager IPC listens on
+ resourcemanager_port=8032
+ # Whether to submit jobs to this cluster
+ submit_to=True
+
+ # Change this if your YARN cluster is Kerberos-secured
+ ## security_enabled=false
+
+ # Settings about this MR2 cluster. If you install MR2 in a
+ # different location, you need to set the following.
+
+ # Defaults to $HADOOP_MR2_HOME or /usr/lib/hadoop-mapreduce
+ ## hadoop_mapred_home=/usr/lib/hadoop-mapreduce
+
+ # Defaults to $HADOOP_BIN or /usr/bin/hadoop
+ ## hadoop_bin=/usr/bin/hadoop
+
+ # Defaults to $HADOOP_CONF_DIR or /etc/hadoop/conf
+ ## hadoop_conf_dir=/etc/hadoop/conf
+
+ # URL of the ResourceManager API
+ ## resourcemanager_api_url=http://<%= @namenode_hostname %>:8088
+
+ # URL of the ProxyServer API
+ proxy_api_url=http://<%= @namenode_hostname %>:8088
+
+ # URL of the HistoryServer API
+ history_server_api_url=http://<%= @namenode_hostname %>:19888
+
+
+<%
+# Enable Hue <-> Oozie API interface if @oozie_url is set.
+if @oozie_url
+-%>
+###########################################################################
+# Settings to configure liboozie
+###########################################################################
+
+[liboozie]
+ # The URL where the Oozie service runs on. This is required in order for
+ # users to submit jobs.
+ oozie_url=<%= @oozie_url %>
+
+ <%= @oozie_security_enabled ? "security_enabled=true" : "##
security_enabled=false" %>
+
+ # Location on HDFS where the workflows/coordinator are deployed when
submitted.
+ ## remote_deployement_dir=/user/hue/oozie/deployments
+
+
+###########################################################################
+# Settings to configure the Oozie app
+###########################################################################
+
+[oozie]
+ # Location on local FS where the examples are stored.
+ ## local_data_dir=..../examples
+
+ # Location on local FS where the data for the examples is stored.
+ ## sample_data_dir=...thirdparty/sample_data
+
+ # Location on HDFS where the oozie examples and workflows are stored.
+ ## remote_data_dir=/user/hue/oozie/workspaces
+
+ # Share workflows and coordinators information with all users. If set to
false,
+ # they will be visible only to the owner and administrators.
+ ## share_jobs=True
+
+ # Maximum of Oozie workflows or coodinators to retrieve in one API call.
+ ## oozie_jobs_count=100
+<% end # if @oozie_url -%>
+
+###########################################################################
+# Settings to configure Beeswax
+###########################################################################
+
+[beeswax]
+
+ # Host where Beeswax server Thrift daemon is running.
+ # If Kerberos security is enabled, the fully-qualified domain name (FQDN) is
+ # required, even if the Thrift daemon is running on the same host as Hue.
+ ## beeswax_server_host=
+
+ # Port where Beeswax Thrift server runs on.
+ ## beeswax_server_port=8002
+
+ # Host where internal metastore Thrift daemon is running.
+ ## beeswax_meta_server_host=localhost
+
+ # Configure the port the internal metastore daemon runs on.
+ # Used only if hive.metastore.local is true.
+ ## beeswax_meta_server_port=8003
+
+ # Hive home directory
+ ## hive_home_dir=/usr/lib/hive
+
+ # Hive configuration directory, where hive-site.xml is located
+ ## hive_conf_dir=/etc/hive/conf
+
+ # Timeout in seconds for thrift calls to beeswax service
+ ## beeswax_server_conn_timeout=120
+
+ # Timeout in seconds for thrift calls to the hive metastore
+ ## metastore_conn_timeout=10
+
+ # Maximum Java heapsize (in megabytes) used by Beeswax Server.
+ # Note that the setting of HADOOP_HEAPSIZE in $HADOOP_CONF_DIR/hadoop-env.sh
+ # may override this setting.
+ ## beeswax_server_heapsize=1000
+
+ # Share saved queries with all users. If set to false, saved queries are
+ # visible only to the owner and administrators.
+ ## share_saved_queries=true
+
+ # The backend to contact for queries/metadata requests.
+ # Choices are 'beeswax' (default), 'hiveserver2'.
+ ## server_interface=beeswax
+
+<%
+# This puppet module does not yet support Impala.
+if false
+-%>
+###########################################################################
+# Settings to configure Impala
+###########################################################################
+
+[impala]
+
+ # Host of the Impala Server
+ ## server_host=localhost
+
+ # Port of the Impala Server
+ ## server_port=21000
+
+<% end -%>
+
+###########################################################################
+# Settings to configure Job Designer
+###########################################################################
+
+[jobsub]
+ # Location on HDFS where the jobsub examples and templates are stored.
+ ## remote_data_dir=/user/hue/jobsub
+
+ # Location on local FS where examples and template are stored.
+ ## local_data_dir=..../data
+
+ # Location on local FS where sample data is stored
+ ## sample_data_dir=...thirdparty/sample_data
+
+
+###########################################################################
+# Settings to configure Job Browser.
+###########################################################################
+
+[jobbrowser]
+ # Share submitted jobs information with all users. If set to false,
+ # submitted jobs are visible only to the owner and administrators.
+ ## share_jobs=true
+
+
+###########################################################################
+# Settings to configure the Shell application
+###########################################################################
+
+[shell]
+ # The shell_buffer_amount specifies the number of bytes of output per shell
+ # that the Shell app will keep in memory. If not specified, it defaults to
+ # 524288 (512 MiB).
+ ## shell_buffer_amount=100
+
+ # If you run Hue against a Hadoop cluster with Kerberos security enabled, the
+ # Shell app needs to acquire delegation tokens for the subprocesses to work
+ # correctly. These delegation tokens are stored as temporary files in some
+ # directory. You can configure this directory here. If not specified, it
+ # defaults to /tmp/hue_delegation_tokens.
+ ## shell_delegation_token_dir=/tmp/hue_delegation_tokens
+
+ [[ shelltypes ]]
+
+ # Define and configure a new shell type "flume"
+ # ------------------------------------------------------------------------
+ # [[[ flume ]]]
+ # nice_name = "Flume Shell"
+ # command = "/usr/bin/flume shell"
+ # help = "The command-line Flume client interface."
+ #
+ # [[[[ environment ]]]]
+ # # You can specify environment variables for the Flume shell
+ # # in this section.
+
+ # Define and configure a new shell type "pig"
+ # ------------------------------------------------------------------------
+ [[[ pig ]]]
+ nice_name = "Pig Shell (Grunt)"
+ command = "/usr/bin/pig -l /dev/null"
+ help = "The command-line interpreter for Pig"
+
+ [[[[ environment ]]]]
+ # You can specify environment variables for the Pig shell
+ # in this section. Note that JAVA_HOME must be configured
+ # for the Pig shell to run.
+
+ [[[[[ JAVA_HOME ]]]]]
+ value = "/usr/lib/jvm/java-6-sun"
+
+ # Define and configure a new shell type "sqoop2"
+ # ------------------------------------------------------------------------
+ [[[ sqoop2 ]]]
+ nice_name = "Sqoop2 Shell"
+ command = "/usr/bin/sqoop2"
+ help = "The command-line Sqoop2 client."
+
+ [[[[ environment ]]]]
+ # You can configure environment variables for the Sqoop2 shell
+ # in this section.
+
+ # Define and configure a new shell type "hbase"
+ # ------------------------------------------------------------------------
+ [[[ hbase ]]]
+ nice_name = "HBase Shell"
+ command = "/usr/bin/hbase shell"
+ help = "The command-line HBase client interface."
+
+ [[[[ environment ]]]]
+ # You can configure environment variables for the HBase shell
+ # in this section.
+
+ # Define and configure a new shell type "hive"
+ # ------------------------------------------------------------------------
+ [[[ hive ]]]
+ nice_name = "Hive Shell"
+ command = "/usr/bin/hive"
+ help = "The command-line Hive client interface."
+
+ [[[[ environment ]]]]
+ # You can configure environment variables for the Hive shell
+ # in this section.
+
+###########################################################################
+# Settings for the User Admin application
+###########################################################################
+
+[useradmin]
+ # The name of the default user group that users will be a member of
+ ## default_user_group=default
--
To view, visit https://gerrit.wikimedia.org/r/69805
To unsubscribe, visit https://gerrit.wikimedia.org/r/settings
Gerrit-MessageType: merged
Gerrit-Change-Id: Ic523aa456df115e3944126f2ed4b411f16ad21d1
Gerrit-PatchSet: 5
Gerrit-Project: operations/puppet/cdh4
Gerrit-Branch: master
Gerrit-Owner: Ottomata <[email protected]>
Gerrit-Reviewer: Akosiaris <[email protected]>
Gerrit-Reviewer: Faidon <[email protected]>
Gerrit-Reviewer: Ottomata <[email protected]>
Gerrit-Reviewer: jenkins-bot
_______________________________________________
MediaWiki-commits mailing list
[email protected]
https://lists.wikimedia.org/mailman/listinfo/mediawiki-commits