This is an automated email from the ASF dual-hosted git repository.

lfrolov pushed a commit to branch DATALAB-2587
in repository https://gitbox.apache.org/repos/asf/incubator-datalab.git


The following commit(s) were added to refs/heads/DATALAB-2587 by this push:
     new 3af65bd  [DATALAB-2587]: fixed nod defined variable error
3af65bd is described below

commit 3af65bd19682d08c1d974c84adb3cbaa734a3978
Author: leonidfrolov <[email protected]>
AuthorDate: Fri Nov 19 10:50:35 2021 +0200

    [DATALAB-2587]: fixed nod defined variable error
---
 .../scripts/os/zeppelin_dataengine_create_configs.py     | 16 ++++++++--------
 .../scripts/os/zeppelin_install_dataengine_kernels.py    |  4 ++--
 2 files changed, 10 insertions(+), 10 deletions(-)

diff --git 
a/infrastructure-provisioning/src/general/scripts/os/zeppelin_dataengine_create_configs.py
 
b/infrastructure-provisioning/src/general/scripts/os/zeppelin_dataengine_create_configs.py
index 4f31d74..1f2abcc 100644
--- 
a/infrastructure-provisioning/src/general/scripts/os/zeppelin_dataengine_create_configs.py
+++ 
b/infrastructure-provisioning/src/general/scripts/os/zeppelin_dataengine_create_configs.py
@@ -45,6 +45,7 @@ parser.add_argument('--region', type=str, default='')
 parser.add_argument('--datalake_enabled', type=str, default='')
 parser.add_argument('--r_enabled', type=str, default='')
 parser.add_argument('--spark_configurations', type=str, default='')
+parser.add_argument('--python_version', type=str, default='')
 args = parser.parse_args()
 
 cluster_dir = '/opt/' + args.cluster_name + '/'
@@ -55,7 +56,7 @@ spark_link = "https://archive.apache.org/dist/spark/spark-"; + 
spark_version + "/
              "-bin-hadoop" + hadoop_version + ".tgz"
 
 
-def configure_zeppelin_dataengine_interpreter(cluster_name, cluster_dir, 
os_user, multiple_clusters, spark_master):
+def configure_zeppelin_dataengine_interpreter(cluster_name, cluster_dir, 
os_user, multiple_clusters, spark_master, python_version):
     try:
         port_number_found = False
         zeppelin_restarted = False
@@ -123,23 +124,22 @@ def 
configure_zeppelin_dataengine_interpreter(cluster_name, cluster_dir, os_user
             subprocess.run('sudo systemctl start livy-server-' + 
str(livy_port), shell=True, check=True)
         else:
             template_file = 
"/tmp/{}/dataengine_interpreter.json".format(args.cluster_name)
-            p_version = os.environ['notebook_python_venv_version']
             fr = open(template_file, 'r+')
             text = fr.read()
             text = text.replace('CLUSTERNAME', cluster_name)
-            text = text.replace('PYTHONVERSION', p_version[:3])
-            text = text.replace('PYTHONVER_FULL', p_version)
+            text = text.replace('PYTHONVERSION', python_version[:3])
+            text = text.replace('PYTHONVER_FULL', python_version)
             text = text.replace('SPARK_HOME', cluster_dir + 'spark/')
-            text = text.replace('PYTHONVER_SHORT', p_version[:1])
+            text = text.replace('PYTHONVER_SHORT', python_version[:1])
             text = text.replace('MASTER', str(spark_master))
-            tmp_file = "/tmp/dataengine_spark_py" + p_version + 
"_interpreter.json"
+            tmp_file = "/tmp/dataengine_spark_py" + python_version + 
"_interpreter.json"
             fw = open(tmp_file, 'w')
             fw.write(text)
             fw.close()
             for _ in range(5):
                 try:
                     subprocess.run("curl --noproxy localhost -H 'Content-Type: 
application/json' -X POST -d " +
-                            "@/tmp/dataengine_spark_py" + p_version +
+                            "@/tmp/dataengine_spark_py" + python_version +
                             "_interpreter.json 
http://localhost:8080/api/interpreter/setting";, shell=True, check=True)
                     break
                 except:
@@ -175,5 +175,5 @@ if __name__ == "__main__":
     if args.multiple_clusters == 'true':
         install_remote_livy(args)
     configure_zeppelin_dataengine_interpreter(args.cluster_name, cluster_dir, 
args.os_user,
-                                              args.multiple_clusters, 
args.spark_master)
+                                              args.multiple_clusters, 
args.spark_master, args.python_version)
     update_zeppelin_interpreters(args.multiple_clusters, args.r_enabled)
\ No newline at end of file
diff --git 
a/infrastructure-provisioning/src/general/scripts/os/zeppelin_install_dataengine_kernels.py
 
b/infrastructure-provisioning/src/general/scripts/os/zeppelin_install_dataengine_kernels.py
index 643cf8f..0f2bb6a 100644
--- 
a/infrastructure-provisioning/src/general/scripts/os/zeppelin_install_dataengine_kernels.py
+++ 
b/infrastructure-provisioning/src/general/scripts/os/zeppelin_install_dataengine_kernels.py
@@ -93,7 +93,7 @@ if __name__ == "__main__":
     conn.sudo('/usr/bin/python3 
/usr/local/bin/zeppelin_dataengine_create_configs.py '
          '--cluster_name {} --spark_version {} --hadoop_version {} --os_user 
{} --spark_master {} --keyfile {} \
          --notebook_ip {} --livy_version {} --multiple_clusters {} --region {} 
--datalake_enabled {} '
-         '--r_enabled {} --spark_configurations "{}"'.
+         '--r_enabled {} --spark_configurations "{}" --python_version {}'.
          format(args.cluster_name, args.spark_version, args.hadoop_version, 
args.os_user, args.spark_master,
                 args.keyfile, args.notebook_ip, livy_version, 
os.environ['notebook_multiple_clusters'], region,
-                args.datalake_enabled, r_enabled, 
os.environ['spark_configurations']))
+                args.datalake_enabled, r_enabled, 
os.environ['spark_configurations'], os.environ['notebook_python_venv_version']))

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to