This is an automated email from the ASF dual-hosted git repository.
lfrolov pushed a commit to branch DATALAB-2091
in repository https://gitbox.apache.org/repos/asf/incubator-datalab.git
The following commit(s) were added to refs/heads/DATALAB-2091 by this push:
new de5fcf3 [DATALAB-2091]: fixed some spark reconfiguration errors
de5fcf3 is described below
commit de5fcf34c7cfad447861d3f970fab727d15866b9
Author: leonidfrolov <[email protected]>
AuthorDate: Tue May 25 17:35:48 2021 +0300
[DATALAB-2091]: fixed some spark reconfiguration errors
---
infrastructure-provisioning/src/general/lib/azure/actions_lib.py | 8 ++++----
infrastructure-provisioning/src/general/lib/gcp/actions_lib.py | 3 ++-
2 files changed, 6 insertions(+), 5 deletions(-)
diff --git a/infrastructure-provisioning/src/general/lib/azure/actions_lib.py
b/infrastructure-provisioning/src/general/lib/azure/actions_lib.py
index 27e5488..f5924d2 100644
--- a/infrastructure-provisioning/src/general/lib/azure/actions_lib.py
+++ b/infrastructure-provisioning/src/general/lib/azure/actions_lib.py
@@ -1191,10 +1191,10 @@ def configure_dataengine_spark(cluster_name, jars_dir,
cluster_dir, datalake_ena
if os.path.exists('{0}'.format(cluster_dir)):
subprocess.run('cp -f /tmp/{0}/notebook_spark-defaults_local.conf
{1}spark/conf/spark-defaults.conf'.format(cluster_name,
cluster_dir), shell=True, check=True)
- if datalake_enabled == 'false':
- subprocess.run('cp -f /opt/spark/conf/core-site.xml
{}spark/conf/'.format(cluster_dir), shell=True, check=True)
- else:
- subprocess.run('cp -f /opt/hadoop/etc/hadoop/core-site.xml
{}hadoop/etc/hadoop/core-site.xml'.format(cluster_dir), shell=True, check=True)
+ if datalake_enabled == 'false':
+ subprocess.run('cp -f /opt/spark/conf/core-site.xml
{}spark/conf/'.format(cluster_dir), shell=True, check=True)
+ else:
+ subprocess.run('cp -f /opt/hadoop/etc/hadoop/core-site.xml
{}hadoop/etc/hadoop/core-site.xml'.format(cluster_dir), shell=True, check=True)
if spark_configs and os.path.exists('{0}'.format(cluster_dir)):
datalab_header = subprocess.run('cat
/tmp/{0}/notebook_spark-defaults_local.conf | grep "^#"'.format(cluster_name),
capture_output=True, shell=True,
check=True).stdout.decode('UTF-8').rstrip("\n\r")
diff --git a/infrastructure-provisioning/src/general/lib/gcp/actions_lib.py
b/infrastructure-provisioning/src/general/lib/gcp/actions_lib.py
index 4589980..a2429bc 100644
--- a/infrastructure-provisioning/src/general/lib/gcp/actions_lib.py
+++ b/infrastructure-provisioning/src/general/lib/gcp/actions_lib.py
@@ -1523,7 +1523,8 @@ def configure_dataengine_spark(cluster_name, jars_dir,
cluster_dir, datalake_ena
if os.path.exists('{0}'.format(cluster_dir)):
subprocess.run('cp -f /tmp/{0}/notebook_spark-defaults_local.conf
{1}spark/conf/spark-defaults.conf'.format(cluster_name,
cluster_dir), shell=True, check=True)
- subprocess.run('cp -f /opt/spark/conf/core-site.xml
{}spark/conf/'.format(cluster_dir), shell=True, check=True)
+ if os.path.exists('{0}'.format(cluster_dir)):
+ subprocess.run('cp -f /opt/spark/conf/core-site.xml
{}spark/conf/'.format(cluster_dir), shell=True, check=True)
if spark_configs and os.path.exists('{0}'.format(cluster_dir)):
datalab_header = subprocess.run('cat
/tmp/{0}/notebook_spark-defaults_local.conf | grep "^#"'.format(cluster_name),
capture_output=True, shell=True,
check=True).stdout.decode('UTF-8').rstrip("\n\r")
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]