Hi, Can you try without creating sqlContext manually?
# sqlContext = SQLContext(sc) Thanks, moon On Tue, Aug 4, 2015 at 8:20 AM Braden Callahan1 <brad...@us.ibm.com> wrote: > I have an issue where I create a table in Python (pyspark) and the sql > interpreter > does not see the table. If I create it using scala both python and sql pick > it up without an issue. > > > Example: > > %pyspark > sqlContext = SQLContext(sc) > > tweet = sqlContext.read.json("/temp/twitter_data_small/") > tweet.registerTempTable("tweet") > > > > #this work fine: > %pyspark > > print tweet.take(5) > > > %sql > select * from tweet > > java.lang.RuntimeException: Table Not Found: tweet at > scala.sys.package$.error(package.scala:27) at > org.apache.spark.sql.catalyst.analysis.SimpleCatalog$$anonfun$1.apply(Catalog.scala:115) > at > org.apache.spark.sql.catalyst.analysis.SimpleCatalog$$anonfun$1.apply(Catalog.scala:115) > at scala.collection.MapLike$class.getOrElse(MapLike.scala:128) at > scala.collection.AbstractMap.getOrElse(Map.scala:58) at > org.apache.spark.sql.catalyst.analysis.SimpleCatalog.lookupRelation(Catalog.scala:115) > at > org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveRelations$.getTable(Analyzer.scala:222) > at > org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveRelations$$anonfun$apply$7.applyOrElse(Analyzer.scala:233) > at > org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveRelations$$anonfun$apply$7.applyOrElse(Analyzer.scala:229) > at > org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$3.apply(TreeNode.scala:222) > at > org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$3.apply(TreeNode.scala:222) > at > org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:51) > at > org.apache.spark.sql.catalyst.trees.TreeNode.transformDown(TreeNode.scala:221) > at > org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$4.apply(TreeNode.scala:242) > at scala.collection.Iterator$$anon$11.next(Iterator.scala:328) at > scala.collection.Iterator$class.foreach(Iterator.scala:727) at > scala.collection.AbstractIterator.foreach(Iterator.scala:1157) at > scala.collection.generic.Growable$class.$plus$plus$eq(Growable.scala:48) at > scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:103) > at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:47) > at scala.collection.TraversableOnce$class.to(TraversableOnce.scala:273) > at scala.collection.AbstractIterator.to(Iterator.scala:1157) at > scala.collection.TraversableOnce$class.toBuffer(TraversableOnce.scala:265) > at scala.collection.AbstractIterator.toBuffer(Iterator.scala:1157) at > scala.collection.TraversableOnce$class.toArray(TraversableOnce.scala:252) > at scala.collection.AbstractIterator.toArray(Iterator.scala:1157) at > org.apache.spark.sql.catalyst.trees.TreeNode.transformChildrenDown(TreeNode.scala:272) > at > org.apache.spark.sql.catalyst.trees.TreeNode.transformDown(TreeNode.scala:227) > at > org.apache.spark.sql.catalyst.trees.TreeNode.transform(TreeNode.scala:212) > at > org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveRelations$.apply(Analyzer.scala:229) > at > org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveRelations$.apply(Analyzer.scala:219) > at > org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$execute$1$$anonfun$apply$1.apply(RuleExecutor.scala:61) > at > org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$execute$1$$anonfun$apply$1.apply(RuleExecutor.scala:59) > at > scala.collection.LinearSeqOptimized$class.foldLeft(LinearSeqOptimized.scala:111) > at scala.collection.immutable.List.foldLeft(List.scala:84) at > org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$execute$1.apply(RuleExecutor.scala:59) > at > org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$execute$1.apply(RuleExecutor.scala:51) > at scala.collection.immutable.List.foreach(List.scala:318) at > org.apache.spark.sql.catalyst.rules.RuleExecutor.execute(RuleExecutor.scala:51) > at > org.apache.spark.sql.SQLContext$QueryExecution.analyzed$lzycompute(SQLContext.scala:922) > at > org.apache.spark.sql.SQLContext$QueryExecution.analyzed(SQLContext.scala:922) > at > org.apache.spark.sql.SQLContext$QueryExecution.assertAnalyzed(SQLContext.scala:920) > at org.apache.spark.sql.DataFrame.<init>(DataFrame.scala:131) at > org.apache.spark.sql.DataFrame$.apply(DataFrame.scala:51) at > org.apache.spark.sql.SQLContext.sql(SQLContext.scala:744) at > org.apache.zeppelin.spark.SparkSqlInterpreter.interpret(SparkSqlInterpreter.java:133) > at > org.apache.zeppelin.interpreter.ClassloaderInterpreter.interpret(ClassloaderInterpreter.java:57) > at > org.apache.zeppelin.interpreter.LazyOpenInterpreter.interpret(LazyOpenInterpreter.java:93) > at > org.apache.zeppelin.interpreter.remote.RemoteInterpreterServer$InterpretJob.jobRun(RemoteInterpreterServer.java:276) > at org.apache.zeppelin.scheduler.Job.run(Job.java:170) at > org.apache.zeppelin.scheduler.FIFOScheduler$1.run(FIFOScheduler.java:118) > at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:471) > at java.util.concurrent.FutureTask.run(FutureTask.java:262) at > java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.access$201(ScheduledThreadPoolExecutor.java:178) > at > java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:292) > at > java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145) > at > java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615) > at java.lang.Thread.run(Thread.java:745) > Took 0 seconds >