Repository: ambari
Updated Branches:
  refs/heads/feature-branch-AMBARI-21307 16e9ab751 -> 2e5b3e67a (forced update)


AMBARI-21585.Remove environment variables from zeppelin-env.sh(Prabhjyot Singh 
via Venkata Sairam)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/8dfdf5ec
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/8dfdf5ec
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/8dfdf5ec

Branch: refs/heads/feature-branch-AMBARI-21307
Commit: 8dfdf5ec275849597884508ad1bc1b5846d007cf
Parents: d260c89
Author: Venkata Sairam <venkatasairam.la...@gmail.com>
Authored: Tue Aug 22 16:48:39 2017 +0530
Committer: Venkata Sairam <venkatasairam.la...@gmail.com>
Committed: Tue Aug 22 16:48:39 2017 +0530

----------------------------------------------------------------------
 .../ZEPPELIN/0.6.0.2.5/configuration/zeppelin-env.xml          | 5 +----
 .../ZEPPELIN/0.6.0.3.0/configuration/zeppelin-env.xml          | 6 +-----
 2 files changed, 2 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/8dfdf5ec/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/configuration/zeppelin-env.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/configuration/zeppelin-env.xml
 
b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/configuration/zeppelin-env.xml
index 80ac2bb..7f0d9e4 100644
--- 
a/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/configuration/zeppelin-env.xml
+++ 
b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/configuration/zeppelin-env.xml
@@ -66,7 +66,6 @@ export JAVA_HOME={{java64_home}}
 export MASTER=yarn-client
 export SPARK_YARN_JAR={{spark_jar}}
 # export ZEPPELIN_JAVA_OPTS                   # Additional jvm options. for 
example, export ZEPPELIN_JAVA_OPTS="-Dspark.executor.memory=8g 
-Dspark.cores.max=16"
-export ZEPPELIN_JAVA_OPTS="-Dspark.executor.memory={{executor_mem}} 
-Dspark.executor.instances={{executor_instances}} 
-Dspark.yarn.queue={{spark_queue}}"
 # export ZEPPELIN_MEM                         # Zeppelin jvm mem options 
Default -Xms1024m -Xmx1024m -XX:MaxPermSize=512m
 # export ZEPPELIN_INTP_MEM                    # zeppelin interpreter process 
jvm mem options. Default -Xms1024m -Xmx1024m -XX:MaxPermSize=512m
 # export ZEPPELIN_INTP_JAVA_OPTS              # zeppelin interpreter process 
jvm options.
@@ -101,7 +100,7 @@ export KERBEROS_REFRESH_INTERVAL=1d
 ## defining SPARK_HOME makes Zeppelin run spark interpreter process using 
spark-submit
 ##
 # export SPARK_HOME                           # (required) When it is defined, 
load it instead of Zeppelin embedded Spark libraries
-#export SPARK_HOME={{spark_home}}
+# export SPARK_HOME={{spark_home}}
 # export SPARK_SUBMIT_OPTIONS                 # (optional) extra options to 
pass to spark submit. eg) "--driver-memory 512M --executor-memory 1G".
 # export SPARK_APP_NAME                       # (optional) The name of spark 
application.
 
@@ -117,8 +116,6 @@ export HADOOP_CONF_DIR=/etc/hadoop/conf
 # export PYSPARK_PYTHON                       # path to the python command. 
must be the same path on the driver(Zeppelin) and all workers.
 # export PYTHONPATH
 
-export 
PYTHONPATH="${SPARK_HOME}/python:${SPARK_HOME}/python/lib/py4j-0.8.2.1-src.zip"
-export SPARK_YARN_USER_ENV="PYTHONPATH=${PYTHONPATH}"
 
 ## Spark interpreter options ##
 ##

http://git-wip-us.apache.org/repos/asf/ambari/blob/8dfdf5ec/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.3.0/configuration/zeppelin-env.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.3.0/configuration/zeppelin-env.xml
 
b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.3.0/configuration/zeppelin-env.xml
index 7bd597f..1edd991 100644
--- 
a/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.3.0/configuration/zeppelin-env.xml
+++ 
b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.3.0/configuration/zeppelin-env.xml
@@ -68,7 +68,6 @@ export JAVA_HOME={{java64_home}}
 export MASTER=yarn-client
 export SPARK_YARN_JAR={{spark_jar}}
 # export ZEPPELIN_JAVA_OPTS                   # Additional jvm options. for 
example, export ZEPPELIN_JAVA_OPTS="-Dspark.executor.memory=8g 
-Dspark.cores.max=16"
-export ZEPPELIN_JAVA_OPTS="-Dhdp.version={{full_stack_version}} 
-Dspark.executor.memory={{executor_mem}} 
-Dspark.executor.instances={{executor_instances}} 
-Dspark.yarn.queue={{spark_queue}}"
 # export ZEPPELIN_MEM                         # Zeppelin jvm mem options 
Default -Xms1024m -Xmx1024m -XX:MaxPermSize=512m
 # export ZEPPELIN_INTP_MEM                    # zeppelin interpreter process 
jvm mem options. Default -Xms1024m -Xmx1024m -XX:MaxPermSize=512m
 # export ZEPPELIN_INTP_JAVA_OPTS              # zeppelin interpreter process 
jvm options.
@@ -99,7 +98,7 @@ export 
ZEPPELIN_INTP_CLASSPATH_OVERRIDES="{{external_dependency_conf}}"
 ## defining SPARK_HOME makes Zeppelin run spark interpreter process using 
spark-submit
 ##
 # export SPARK_HOME                           # (required) When it is defined, 
load it instead of Zeppelin embedded Spark libraries
-#export SPARK_HOME={{spark_home}}
+# export SPARK_HOME={{spark_home}}
 # export SPARK_SUBMIT_OPTIONS                 # (optional) extra options to 
pass to spark submit. eg) "--driver-memory 512M --executor-memory 1G".
 # export SPARK_APP_NAME                       # (optional) The name of spark 
application.
 
@@ -115,9 +114,6 @@ export HADOOP_CONF_DIR=/etc/hadoop/conf
 # export PYSPARK_PYTHON                       # path to the python command. 
must be the same path on the driver(Zeppelin) and all workers.
 # export PYTHONPATH
 
-export 
PYTHONPATH="${SPARK_HOME}/python:${SPARK_HOME}/python/lib/py4j-0.8.2.1-src.zip"
-export SPARK_YARN_USER_ENV="PYTHONPATH=${PYTHONPATH}"
-
 ## Spark interpreter options ##
 ##
 # export ZEPPELIN_SPARK_USEHIVECONTEXT        # Use HiveContext instead of 
SQLContext if set true. true by default.

Reply via email to