This is an automated email from the ASF dual-hosted git repository.

mwalch pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/fluo-muchos.git


The following commit(s) were added to refs/heads/master by this push:
     new 9a232dd  Spark installation is now optional (#226)
9a232dd is described below

commit 9a232dd7fabfd23c30a312ecb684a827b325f579
Author: Mike Walch <[email protected]>
AuthorDate: Tue Jul 17 10:52:07 2018 -0400

    Spark installation is now optional (#226)
    
    * Users now need to specify Spark on a node to install
    * Default version of Spark is 2.2.2
---
 README.md                                    |  5 ++++-
 ansible/common.yml                           | 15 ---------------
 ansible/hadoop.yml                           |  9 +++++++++
 ansible/roles/hadoop/tasks/main.yml          |  3 ++-
 ansible/roles/hadoop/templates/yarn-site.xml | 11 +++++++----
 ansible/roles/proxy/tasks/main.yml           |  1 -
 ansible/roles/spark/tasks/download.yml       | 18 ++++++++++++++++++
 ansible/roles/spark/tasks/main.yml           |  2 +-
 ansible/spark.yml                            |  9 +++++++++
 ansible/zookeeper.yml                        |  6 ++++++
 conf/hosts/example/example_cluster           |  8 ++++----
 conf/muchos.props.example                    | 15 +++++++--------
 lib/muchos/config.py                         |  2 +-
 lib/muchos/main.py                           |  7 ++++++-
 lib/tests/test_config.py                     | 27 ++++++++++++---------------
 15 files changed, 86 insertions(+), 52 deletions(-)

diff --git a/README.md b/README.md
index 8533616..17aafef 100644
--- a/README.md
+++ b/README.md
@@ -90,7 +90,10 @@ section below.
 metrics are sent to InfluxDB using collectd and are viewable in Grafana.  If 
Fluo is running, its
 metrics will also be viewable in Grafana.
 
-3. `mesosmaster` - If specified, a Mesos master will be started on this node 
and Mesos slaves will
+3. `spark` - If specified on a node, Apache Spark will be installed on all 
nodes and the Spark History
+server will be run on this node.
+
+4. `mesosmaster` - If specified, a Mesos master will be started on this node 
and Mesos slaves will
 be started on all workers nodes. The Mesos status page will be viewable at
 `http://<MESOS_MASTER_NODE>:5050/`. Marathon will also be started on this node 
and will be viewable
 at `http://<MESOS_MASTER_NODE>:8080/`.
diff --git a/ansible/common.yml b/ansible/common.yml
index df171c3..b9214f4 100644
--- a/ansible/common.yml
+++ b/ansible/common.yml
@@ -16,18 +16,3 @@
   handlers:
     - name: "update network settings"
       command: /sbin/ifup-local {{ network_interface }}
-- hosts: all
-  roles:
-    - spark
-    - hadoop
-    - zookeeper
-- hosts: namenode
-  tasks:
-    - import_tasks: roles/hadoop/tasks/start-hdfs.yml
-- hosts: resourcemanager
-  tasks:
-    - import_tasks: roles/hadoop/tasks/start-yarn.yml
-    - import_tasks: roles/spark/tasks/start-spark-history.yml
-- hosts: zookeepers
-  tasks:
-    - import_tasks: roles/zookeeper/tasks/start-zookeeper.yml
diff --git a/ansible/hadoop.yml b/ansible/hadoop.yml
new file mode 100644
index 0000000..63c5ce9
--- /dev/null
+++ b/ansible/hadoop.yml
@@ -0,0 +1,9 @@
+- hosts: all
+  roles:
+    - hadoop
+- hosts: namenode
+  tasks:
+    - import_tasks: roles/hadoop/tasks/start-hdfs.yml
+- hosts: resourcemanager
+  tasks:
+    - import_tasks: roles/hadoop/tasks/start-yarn.yml
diff --git a/ansible/roles/hadoop/tasks/main.yml 
b/ansible/roles/hadoop/tasks/main.yml
index 23ef7e7..e091d62 100644
--- a/ansible/roles/hadoop/tasks/main.yml
+++ b/ansible/roles/hadoop/tasks/main.yml
@@ -9,7 +9,8 @@
     - mapred-site.xml
     - slaves
 - name: "copy spark yarn shuffle jar to hadoop lib"
-  command: cp {{ spark_home }}/lib/spark-{{ spark_version }}-yarn-shuffle.jar 
{{ hadoop_prefix }}/share/hadoop/yarn/lib/ creates={{ hadoop_prefix 
}}/share/hadoop/yarn/lib/spark-{{ spark_version }}-yarn-shuffle.jar
+  command: cp {{ spark_home }}/yarn/spark-{{ spark_version }}-yarn-shuffle.jar 
{{ hadoop_prefix }}/share/hadoop/yarn/lib/ creates={{ hadoop_prefix 
}}/share/hadoop/yarn/lib/spark-{{ spark_version }}-yarn-shuffle.jar
+  when: "'spark' in groups"
 - name: "setup hadoop short circuit socket dir"
   file: path=/var/lib/hadoop-hdfs state=directory owner={{ cluster_user }} 
group={{ cluster_user }} mode=0755
   become: yes
diff --git a/ansible/roles/hadoop/templates/yarn-site.xml 
b/ansible/roles/hadoop/templates/yarn-site.xml
index d8b8d4d..603dc31 100644
--- a/ansible/roles/hadoop/templates/yarn-site.xml
+++ b/ansible/roles/hadoop/templates/yarn-site.xml
@@ -32,10 +32,7 @@
     <name>yarn.nodemanager.log-dirs</name>
     <value>{{ worker_data_dirs[0] }}/hadoop/yarn/logs</value>
   </property>
-  <property>
-    <name>yarn.nodemanager.aux-services</name>
-    <value>mapreduce_shuffle</value>
-  </property>
+  {% if 'spark' in groups %}
   <property>
     <name>yarn.nodemanager.aux-services</name>
     <value>mapreduce_shuffle,spark_shuffle</value>
@@ -44,6 +41,12 @@
     <name>yarn.nodemanager.aux-services.spark_shuffle.class</name>
     <value>org.apache.spark.network.yarn.YarnShuffleService</value>
   </property>
+  {% else %}
+  <property>
+    <name>yarn.nodemanager.aux-services</name>
+    <value>mapreduce_shuffle</value>
+  </property>
+  {% endif %}
   <property>
     <name>yarn.nodemanager.resource.memory-mb</name>
     <value>{{ yarn_nm_mem_mb }}</value>
diff --git a/ansible/roles/proxy/tasks/main.yml 
b/ansible/roles/proxy/tasks/main.yml
index bed6171..8ebc7f7 100644
--- a/ansible/roles/proxy/tasks/main.yml
+++ b/ansible/roles/proxy/tasks/main.yml
@@ -19,5 +19,4 @@
     - { urlp: "{{ apache_mirror.stdout }}/zookeeper/zookeeper-{{ 
zookeeper_version }}", fn: "{{ zookeeper_tarball }}", sum: "{{ zookeeper_sha256 
}}" }
     - { urlp: "{{ apache_mirror.stdout }}/hadoop/common/hadoop-{{ 
hadoop_version }}", fn: "{{ hadoop_tarball }}", sum: "{{ hadoop_sha256 }}" }
     - { urlp: "{{ apache_mirror.stdout }}/maven/maven-3/{{ maven_version 
}}/binaries", fn: "{{ maven_tarball }}", sum: "{{ maven_sha256 }}" }
-    - { urlp: "{{ apache_mirror.stdout }}/spark/spark-{{ spark_version }}", 
fn: "{{ spark_tarball }}", sum: "{{ spark_sha256 }}" }
     - { urlp: "https://github.com/github/hub/releases/download/v{{ hub_version 
}}", fn: "{{ hub_tarball }}", sum: "{{ hub_sha256 }}" } 
diff --git a/ansible/roles/spark/tasks/download.yml 
b/ansible/roles/spark/tasks/download.yml
new file mode 100644
index 0000000..c22dbc3
--- /dev/null
+++ b/ansible/roles/spark/tasks/download.yml
@@ -0,0 +1,18 @@
+- name: "determine best apache mirror to use"
+  shell: curl -sk https://apache.org/mirrors.cgi?as_json | grep preferred | 
cut -d \" -f 4
+  args:
+    warn: no
+  register: apache_mirror
+  failed_when: "'http' not in apache_mirror.stdout"
+  changed_when: False
+- name: "check if Spark tarball was uploaded to proxy"
+  stat: path={{ tarballs_dir }}/{{ spark_tarball }}
+  register: spark
+- name: "download Spark tarball to proxy"
+  get_url: url={{ item.urlp }}/{{ item.fn }} dest={{ tarballs_dir }}/{{ 
item.fn }} sha256sum={{ item.sum }} force=no
+  register: gresult
+  until: "'OK' in gresult.msg or 'file already exists' in gresult.msg"
+  retries: 3
+  with_items:
+    - { urlp: "{{ apache_mirror.stdout }}/spark/spark-{{ spark_version }}", 
fn: "{{ spark_tarball }}", sum: "{{ spark_sha256 }}" }
+  when: spark.stat.exists == False
diff --git a/ansible/roles/spark/tasks/main.yml 
b/ansible/roles/spark/tasks/main.yml
index 79e4000..6bd432d 100644
--- a/ansible/roles/spark/tasks/main.yml
+++ b/ansible/roles/spark/tasks/main.yml
@@ -1,5 +1,5 @@
 - name: "install spark tarball"
-  unarchive: src={{ tarballs_dir }}/{{ spark_tarball }} dest={{ install_dir }} 
creates={{ spark_home }} copy=yes owner={{ cluster_user }} group={{ 
cluster_user }}
+  unarchive: src={{ tarballs_dir }}/{{ spark_tarball }} dest={{ install_dir }} 
creates={{ spark_home }} copy=yes
 - name: "configure spark"
   template: src={{ item }} dest={{ spark_home }}/conf/{{ item }} owner={{ 
cluster_user }} group={{ cluster_user }}
   with_items:
diff --git a/ansible/spark.yml b/ansible/spark.yml
new file mode 100644
index 0000000..1c1ba03
--- /dev/null
+++ b/ansible/spark.yml
@@ -0,0 +1,9 @@
+- hosts: proxy
+  tasks:
+    - import_tasks: roles/spark/tasks/download.yml
+- hosts: all
+  roles:
+    - spark
+- hosts: spark
+  tasks:
+    - import_tasks: roles/spark/tasks/start-spark-history.yml
diff --git a/ansible/zookeeper.yml b/ansible/zookeeper.yml
new file mode 100644
index 0000000..2e001d2
--- /dev/null
+++ b/ansible/zookeeper.yml
@@ -0,0 +1,6 @@
+- hosts: all
+  roles:
+    - zookeeper
+- hosts: zookeepers
+  tasks:
+    - import_tasks: roles/zookeeper/tasks/start-zookeeper.yml
diff --git a/conf/hosts/example/example_cluster 
b/conf/hosts/example/example_cluster
index 74d7e15..3109cd1 100644
--- a/conf/hosts/example/example_cluster
+++ b/conf/hosts/example/example_cluster
@@ -4,7 +4,7 @@
 # <Hostname> <Private IP> [<Public IP>]
 leader1 10.0.0.0 23.0.0.0
 leader2 10.0.0.1
-leader3 10.0.0.2
-worker1 10.0.0.3
-worker2 10.0.0.4
-worker3 10.0.0.5
+worker1 10.0.0.2
+worker2 10.0.0.3
+worker3 10.0.0.4
+worker4 10.0.0.5
diff --git a/conf/muchos.props.example b/conf/muchos.props.example
index b729350..b7fe36a 100644
--- a/conf/muchos.props.example
+++ b/conf/muchos.props.example
@@ -36,14 +36,14 @@ accumulo_password = secret
 # Software versions
 hadoop_version = 2.8.4
 zookeeper_version = 3.4.12
-spark_version = 1.6.3
+spark_version = 2.2.2
 fluo_version = 1.2.0
 fluo_yarn_version = 1.0.0
 accumulo_version = 1.9.1
 # Software sha256 checksums
 hadoop_sha256 = 
6b545972fdd73173887cdbc3e1cbd3cc72068271924edea82a0e7e653199b115
 zookeeper_sha256 = 
c686f9319050565b58e642149cb9e4c9cc8c7207aacc2cb70c5c0672849594b9
-spark_sha256 = d13358a2d45e78d7c8cf22656d63e5715a5900fab33b3340df9e11ce3747e314
+spark_sha256 = 023b2fea378b3dd0fee2d5d1de6bfaf2d8349aefe7be97a9cbcf03bbacc428d7
 fluo_sha256 = 037f89cd2bfdaf76a1368256c52de46d6b9a85c9c1bfc776ec4447d02c813fb2
 fluo_yarn_sha256 = 
c6220d35cf23127272f3b5638c44586504dc17a46f5beecdfee5027b5ff874b0
 accumulo_sha256 = 
f9cebff3ff85cacb8c80263725663b047ef239916cb4490c93c62509d62e1e76
@@ -158,12 +158,11 @@ yarn_nm_mem_mb=16384
 # Where:
 #   Hostname = Must be unique.  Will be used for hostname in EC2 or should 
match hostname on your own cluster
 #   Service = Service to run on node (possible values: zookeeper, namenode, 
resourcemanager, accumulomaster,
-#             mesosmaster, worker, fluo, metrics)
-#   All services are required below except for mesosmaster, fluo, fluo_yarn & 
metrics which are optional
-leader1 = namenode,zookeeper,fluo,fluo_yarn
-leader2 = resourcemanager,zookeeper
-leader3 = accumulomaster,zookeeper
-metrics = metrics
+#             mesosmaster, worker, fluo, metrics, spark). The following 
services are required: namenode, resourcemanager,
+#             accumulomaster, zookeeper & worker
+leader1 = namenode,resourcemanager,accumulomaster,zookeeper
+leader2 = metrics
 worker1 = worker
 worker2 = worker
 worker3 = worker
+worker4 = worker
diff --git a/lib/muchos/config.py b/lib/muchos/config.py
index 7196aa0..3048aa2 100644
--- a/lib/muchos/config.py
+++ b/lib/muchos/config.py
@@ -17,7 +17,7 @@ from sys import exit
 from util import get_ephemeral_devices, get_arch
 import os
 
-SERVICES = ['zookeeper', 'namenode', 'resourcemanager', 'accumulomaster', 
'mesosmaster', 'worker', 'fluo', 'fluo_yarn', 'metrics']
+SERVICES = ['zookeeper', 'namenode', 'resourcemanager', 'accumulomaster', 
'mesosmaster', 'worker', 'fluo', 'fluo_yarn', 'metrics', 'spark']
 
 
 class DeployConfig(ConfigParser):
diff --git a/lib/muchos/main.py b/lib/muchos/main.py
index 26c7025..49bbb4a 100644
--- a/lib/muchos/main.py
+++ b/lib/muchos/main.py
@@ -223,6 +223,10 @@ class MuchosCluster:
 
         with open(join(config.deploy_path, "ansible/site.yml"), 'w') as 
site_file:
             print >>site_file, "- import_playbook: common.yml"
+            if config.has_service("spark"):
+                print >>site_file, "- import_playbook: spark.yml"
+            print >>site_file, "- import_playbook: hadoop.yml"
+            print >>site_file, "- import_playbook: zookeeper.yml"
             if config.has_service("metrics"):
                 print >>site_file, "- import_playbook: metrics.yml"
             print >>site_file, "- import_playbook: accumulo.yml"
@@ -239,9 +243,10 @@ class MuchosCluster:
             print >>hosts_file, 
"\n[accumulomaster]\n{0}".format(config.get_service_hostnames("accumulomaster")[0])
             print >>hosts_file, 
"\n[namenode]\n{0}".format(config.get_service_hostnames("namenode")[0])
             print >>hosts_file, 
"\n[resourcemanager]\n{0}".format(config.get_service_hostnames("resourcemanager")[0])
+            if config.has_service("spark"):
+                print >>hosts_file, 
"\n[spark]\n{0}".format(config.get_service_hostnames("spark")[0])
             if config.has_service("mesosmaster"):
                 print >>hosts_file, 
"\n[mesosmaster]\n{0}".format(config.get_service_hostnames("mesosmaster")[0])
-
             if config.has_service("metrics"):
                 print >>hosts_file, 
"\n[metrics]\n{0}".format(config.get_service_hostnames("metrics")[0])
 
diff --git a/lib/tests/test_config.py b/lib/tests/test_config.py
index 6ef2f32..738036f 100644
--- a/lib/tests/test_config.py
+++ b/lib/tests/test_config.py
@@ -31,17 +31,16 @@ def test_defaults():
     assert not c.has_option('ec2', 'subnet_id')
     assert c.get('ec2', 'key_name') == 'my_aws_key'
     assert c.instance_tags() == {}
-    assert len(c.nodes()) == 7
-    assert c.get_node('leader1') == ['namenode', 'zookeeper', 'fluo', 
'fluo_yarn']
+    assert len(c.nodes()) == 6
+    assert c.get_node('leader1') == ['namenode', 'resourcemanager', 
'accumulomaster', 'zookeeper']
     assert c.get_node('worker1') == ['worker']
     assert c.get_node('worker2') == ['worker']
     assert c.get_node('worker3') == ['worker']
-    assert c.has_service('fluo')
-    assert c.get_service_hostnames('worker') == ['worker1', 'worker2', 
'worker3']
-    assert c.get_service_hostnames('zookeeper') == ['leader1', 'leader2', 
'leader3']
-    assert c.get_hosts() == {'leader2': ('10.0.0.1', None), 'leader3': 
('10.0.0.2', None),
-                             'leader1': ('10.0.0.0', '23.0.0.0'), 'worker1': 
('10.0.0.3', None),
-                             'worker3': ('10.0.0.5', None), 'worker2': 
('10.0.0.4', None)}
+    assert c.has_service('accumulomaster')
+    assert not c.has_service('fluo')
+    assert c.get_service_hostnames('worker') == ['worker1', 'worker2', 
'worker3', 'worker4']
+    assert c.get_service_hostnames('zookeeper') == ['leader1']
+    assert c.get_hosts() == {'leader2': ('10.0.0.1', None), 'leader1': 
('10.0.0.0', '23.0.0.0'), 'worker1': ('10.0.0.2', None), 'worker3': 
('10.0.0.4', None), 'worker2': ('10.0.0.3', None), 'worker4': ('10.0.0.5', 
None)}
     assert c.get_public_ip('leader1') == '23.0.0.0'
     assert c.get_private_ip('leader1') == '10.0.0.0'
     assert c.cluster_name == 'mycluster'
@@ -49,18 +48,16 @@ def test_defaults():
     assert c.version("fluo").startswith('1.')
     assert c.version("hadoop").startswith('2.')
     assert c.version("zookeeper").startswith('3.')
-    assert c.get_service_private_ips("worker") == ['10.0.0.3', '10.0.0.4', 
'10.0.0.5']
+    assert c.get_service_private_ips("worker") == ['10.0.0.2', '10.0.0.3', 
'10.0.0.4', '10.0.0.5']
     assert c.get('general', 'proxy_hostname') == "leader1"
     assert c.proxy_public_ip() == "23.0.0.0"
     assert c.proxy_private_ip() == "10.0.0.0"
     assert c.get('general', 'cluster_basedir') == "/home/centos"
     assert c.get('general', 'cluster_user') == "centos"
-    assert c.get_non_proxy() == [('10.0.0.1', 'leader2'), ('10.0.0.2', 
'leader3'), ('10.0.0.3', 'worker1'),
-                                 ('10.0.0.4', 'worker2'), ('10.0.0.5', 
'worker3')]
-    assert c.get_host_services() == [('leader1', 'namenode zookeeper fluo 
fluo_yarn'), ('leader2', 'resourcemanager zookeeper'),
-                                     ('leader3', 'accumulomaster zookeeper'), 
('metrics', 'metrics'),
-                                     ('worker1', 'worker'), ('worker2', 
'worker'), ('worker3', 'worker')]
-
+    assert c.get_non_proxy() == [('10.0.0.1', 'leader2'), ('10.0.0.2', 
'worker1'), ('10.0.0.3', 'worker2'),
+                                 ('10.0.0.4', 'worker3'), ('10.0.0.5', 
'worker4')]
+    assert c.get_host_services() == [('leader1', 'namenode resourcemanager 
accumulomaster zookeeper'), ('leader2', 'metrics'),
+            ('worker1', 'worker'), ('worker2', 'worker'), ('worker3', 
'worker'), ('worker4', 'worker')]
 
 def test_case_sensitive():
     c = DeployConfig("muchos", '../conf/muchos.props.example', 
'../conf/hosts/example/example_cluster',

Reply via email to