[jira] [Assigned] (SPARK-19038) Can't find keytab file when using Hive catalog

2017-01-05 Thread Apache Spark (JIRA)

 [ 
https://issues.apache.org/jira/browse/SPARK-19038?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Apache Spark reassigned SPARK-19038:


Assignee: Apache Spark

> Can't find keytab file when using Hive catalog
> --
>
> Key: SPARK-19038
> URL: https://issues.apache.org/jira/browse/SPARK-19038
> Project: Spark
>  Issue Type: Bug
>  Components: YARN
>Affects Versions: 2.0.2
> Environment: Hadoop / YARN 2.6, pyspark, yarn-client mode
>Reporter: Peter Parente
>Assignee: Apache Spark
>  Labels: hive, kerberos, pyspark
>
> h2. Stack Trace
> {noformat}
> Py4JJavaErrorTraceback (most recent call last)
>  in ()
> > 1 sdf = sql.createDataFrame(df)
> /opt/spark2/python/pyspark/sql/context.py in createDataFrame(self, data, 
> schema, samplingRatio, verifySchema)
> 307 Py4JJavaError: ...
> 308 """
> --> 309 return self.sparkSession.createDataFrame(data, schema, 
> samplingRatio, verifySchema)
> 310 
> 311 @since(1.3)
> /opt/spark2/python/pyspark/sql/session.py in createDataFrame(self, data, 
> schema, samplingRatio, verifySchema)
> 524 rdd, schema = self._createFromLocal(map(prepare, data), 
> schema)
> 525 jrdd = 
> self._jvm.SerDeUtil.toJavaArray(rdd._to_java_object_rdd())
> --> 526 jdf = self._jsparkSession.applySchemaToPythonRDD(jrdd.rdd(), 
> schema.json())
> 527 df = DataFrame(jdf, self._wrapped)
> 528 df._schema = schema
> /opt/spark2/python/lib/py4j-0.10.3-src.zip/py4j/java_gateway.py in 
> __call__(self, *args)
>1131 answer = self.gateway_client.send_command(command)
>1132 return_value = get_return_value(
> -> 1133 answer, self.gateway_client, self.target_id, self.name)
>1134 
>1135 for temp_arg in temp_args:
> /opt/spark2/python/pyspark/sql/utils.py in deco(*a, **kw)
>  61 def deco(*a, **kw):
>  62 try:
> ---> 63 return f(*a, **kw)
>  64 except py4j.protocol.Py4JJavaError as e:
>  65 s = e.java_exception.toString()
> /opt/spark2/python/lib/py4j-0.10.3-src.zip/py4j/protocol.py in 
> get_return_value(answer, gateway_client, target_id, name)
> 317 raise Py4JJavaError(
> 318 "An error occurred while calling {0}{1}{2}.\n".
> --> 319 format(target_id, ".", name), value)
> 320 else:
> 321 raise Py4JError(
> Py4JJavaError: An error occurred while calling o47.applySchemaToPythonRDD.
> : org.apache.spark.SparkException: Keytab file: 
> .keytab-f0b9b814-460e-4fa8-8e7d-029186b696c4 specified in spark.yarn.keytab 
> does not exist
>   at 
> org.apache.spark.sql.hive.client.HiveClientImpl.(HiveClientImpl.scala:113)
>   at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
>   at 
> sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62)
>   at 
> sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
>   at java.lang.reflect.Constructor.newInstance(Constructor.java:423)
>   at 
> org.apache.spark.sql.hive.client.IsolatedClientLoader.createClient(IsolatedClientLoader.scala:258)
>   at 
> org.apache.spark.sql.hive.HiveUtils$.newClientForMetadata(HiveUtils.scala:359)
>   at 
> org.apache.spark.sql.hive.HiveUtils$.newClientForMetadata(HiveUtils.scala:263)
>   at 
> org.apache.spark.sql.hive.HiveSharedState.metadataHive$lzycompute(HiveSharedState.scala:39)
>   at 
> org.apache.spark.sql.hive.HiveSharedState.metadataHive(HiveSharedState.scala:38)
>   at 
> org.apache.spark.sql.hive.HiveSharedState.externalCatalog$lzycompute(HiveSharedState.scala:46)
>   at 
> org.apache.spark.sql.hive.HiveSharedState.externalCatalog(HiveSharedState.scala:45)
>   at 
> org.apache.spark.sql.hive.HiveSessionState.catalog$lzycompute(HiveSessionState.scala:50)
>   at 
> org.apache.spark.sql.hive.HiveSessionState.catalog(HiveSessionState.scala:48)
>   at 
> org.apache.spark.sql.hive.HiveSessionState$$anon$1.(HiveSessionState.scala:63)
>   at 
> org.apache.spark.sql.hive.HiveSessionState.analyzer$lzycompute(HiveSessionState.scala:63)
>   at 
> org.apache.spark.sql.hive.HiveSessionState.analyzer(HiveSessionState.scala:62)
>   at 
> org.apache.spark.sql.execution.QueryExecution.assertAnalyzed(QueryExecution.scala:49)
>   at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:64)
>   at 
> org.apache.spark.sql.SparkSession.applySchemaToPythonRDD(SparkSession.scala:666)
>   at 
> org.apache.spark.sql.SparkSession.applySchemaToPythonRDD(SparkSession.scala:656)
>   at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
>   at 
> 

[jira] [Assigned] (SPARK-19038) Can't find keytab file when using Hive catalog

2017-01-05 Thread Apache Spark (JIRA)

 [ 
https://issues.apache.org/jira/browse/SPARK-19038?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Apache Spark reassigned SPARK-19038:


Assignee: (was: Apache Spark)

> Can't find keytab file when using Hive catalog
> --
>
> Key: SPARK-19038
> URL: https://issues.apache.org/jira/browse/SPARK-19038
> Project: Spark
>  Issue Type: Bug
>  Components: YARN
>Affects Versions: 2.0.2
> Environment: Hadoop / YARN 2.6, pyspark, yarn-client mode
>Reporter: Peter Parente
>  Labels: hive, kerberos, pyspark
>
> h2. Stack Trace
> {noformat}
> Py4JJavaErrorTraceback (most recent call last)
>  in ()
> > 1 sdf = sql.createDataFrame(df)
> /opt/spark2/python/pyspark/sql/context.py in createDataFrame(self, data, 
> schema, samplingRatio, verifySchema)
> 307 Py4JJavaError: ...
> 308 """
> --> 309 return self.sparkSession.createDataFrame(data, schema, 
> samplingRatio, verifySchema)
> 310 
> 311 @since(1.3)
> /opt/spark2/python/pyspark/sql/session.py in createDataFrame(self, data, 
> schema, samplingRatio, verifySchema)
> 524 rdd, schema = self._createFromLocal(map(prepare, data), 
> schema)
> 525 jrdd = 
> self._jvm.SerDeUtil.toJavaArray(rdd._to_java_object_rdd())
> --> 526 jdf = self._jsparkSession.applySchemaToPythonRDD(jrdd.rdd(), 
> schema.json())
> 527 df = DataFrame(jdf, self._wrapped)
> 528 df._schema = schema
> /opt/spark2/python/lib/py4j-0.10.3-src.zip/py4j/java_gateway.py in 
> __call__(self, *args)
>1131 answer = self.gateway_client.send_command(command)
>1132 return_value = get_return_value(
> -> 1133 answer, self.gateway_client, self.target_id, self.name)
>1134 
>1135 for temp_arg in temp_args:
> /opt/spark2/python/pyspark/sql/utils.py in deco(*a, **kw)
>  61 def deco(*a, **kw):
>  62 try:
> ---> 63 return f(*a, **kw)
>  64 except py4j.protocol.Py4JJavaError as e:
>  65 s = e.java_exception.toString()
> /opt/spark2/python/lib/py4j-0.10.3-src.zip/py4j/protocol.py in 
> get_return_value(answer, gateway_client, target_id, name)
> 317 raise Py4JJavaError(
> 318 "An error occurred while calling {0}{1}{2}.\n".
> --> 319 format(target_id, ".", name), value)
> 320 else:
> 321 raise Py4JError(
> Py4JJavaError: An error occurred while calling o47.applySchemaToPythonRDD.
> : org.apache.spark.SparkException: Keytab file: 
> .keytab-f0b9b814-460e-4fa8-8e7d-029186b696c4 specified in spark.yarn.keytab 
> does not exist
>   at 
> org.apache.spark.sql.hive.client.HiveClientImpl.(HiveClientImpl.scala:113)
>   at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
>   at 
> sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62)
>   at 
> sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
>   at java.lang.reflect.Constructor.newInstance(Constructor.java:423)
>   at 
> org.apache.spark.sql.hive.client.IsolatedClientLoader.createClient(IsolatedClientLoader.scala:258)
>   at 
> org.apache.spark.sql.hive.HiveUtils$.newClientForMetadata(HiveUtils.scala:359)
>   at 
> org.apache.spark.sql.hive.HiveUtils$.newClientForMetadata(HiveUtils.scala:263)
>   at 
> org.apache.spark.sql.hive.HiveSharedState.metadataHive$lzycompute(HiveSharedState.scala:39)
>   at 
> org.apache.spark.sql.hive.HiveSharedState.metadataHive(HiveSharedState.scala:38)
>   at 
> org.apache.spark.sql.hive.HiveSharedState.externalCatalog$lzycompute(HiveSharedState.scala:46)
>   at 
> org.apache.spark.sql.hive.HiveSharedState.externalCatalog(HiveSharedState.scala:45)
>   at 
> org.apache.spark.sql.hive.HiveSessionState.catalog$lzycompute(HiveSessionState.scala:50)
>   at 
> org.apache.spark.sql.hive.HiveSessionState.catalog(HiveSessionState.scala:48)
>   at 
> org.apache.spark.sql.hive.HiveSessionState$$anon$1.(HiveSessionState.scala:63)
>   at 
> org.apache.spark.sql.hive.HiveSessionState.analyzer$lzycompute(HiveSessionState.scala:63)
>   at 
> org.apache.spark.sql.hive.HiveSessionState.analyzer(HiveSessionState.scala:62)
>   at 
> org.apache.spark.sql.execution.QueryExecution.assertAnalyzed(QueryExecution.scala:49)
>   at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:64)
>   at 
> org.apache.spark.sql.SparkSession.applySchemaToPythonRDD(SparkSession.scala:666)
>   at 
> org.apache.spark.sql.SparkSession.applySchemaToPythonRDD(SparkSession.scala:656)
>   at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
>   at 
>