phoenix git commit: PHOENIX-2196 phoenix-spark should automatically convert DataFrame field names (Randy Gelhausen)

2015-09-28 Thread jmahonin
Repository: phoenix
Updated Branches:
  refs/heads/4.5-HBase-1.1 9fdb8bcc7 -> 45835f04b


PHOENIX-2196 phoenix-spark should automatically convert DataFrame field names 
(Randy Gelhausen)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/45835f04
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/45835f04
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/45835f04

Branch: refs/heads/4.5-HBase-1.1
Commit: 45835f04b370a24d7ce10689a346a549ed5a9616
Parents: 9fdb8bc
Author: Josh Mahonin 
Authored: Sun Aug 23 14:18:55 2015 -0400
Committer: Josh Mahonin 
Committed: Mon Sep 28 12:39:27 2015 -0400

--
 .../apache/phoenix/spark/PhoenixSparkIT.scala   | 22 ++--
 .../phoenix/spark/DataFrameFunctions.scala  | 15 +++--
 2 files changed, 29 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/45835f04/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala
--
diff --git 
a/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala 
b/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala
index e1c9df4..1a28b60 100644
--- a/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala
+++ b/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala
@@ -23,8 +23,8 @@ import org.apache.phoenix.query.BaseTest
 import org.apache.phoenix.schema.{TableNotFoundException, 
ColumnNotFoundException}
 import org.apache.phoenix.schema.types.PVarchar
 import org.apache.phoenix.util.{SchemaUtil, ColumnInfo}
-import org.apache.spark.sql.{SaveMode, execution, SQLContext}
-import org.apache.spark.sql.types.{LongType, DataType, StringType, StructField}
+import org.apache.spark.sql.{Row, SaveMode, execution, SQLContext}
+import org.apache.spark.sql.types._
 import org.apache.spark.{SparkConf, SparkContext}
 import org.joda.time.DateTime
 import org.scalatest._
@@ -448,4 +448,22 @@ class PhoenixSparkIT extends FunSuite with Matchers with 
BeforeAndAfterAll {
 count shouldEqual 1L
 
   }
+
+  test("Ensure DataFrame field normalization (PHOENIX-2196)") {
+val rdd1 = sc
+  .parallelize(Seq((1L,1L,"One"),(2L,2L,"Two")))
+  .map(p => Row(p._1, p._2, p._3))
+
+val sqlContext = new SQLContext(sc)
+
+val schema = StructType(Seq(
+  StructField("id", LongType, nullable = false),
+  StructField("table1_id", LongType, nullable = true),
+  StructField("\"t2col1\"", StringType, nullable = true)
+))
+
+val df = sqlContext.createDataFrame(rdd1, schema)
+
+df.saveToPhoenix("TABLE2", zkUrl = Some(quorumAddress))
+  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/45835f04/phoenix-spark/src/main/scala/org/apache/phoenix/spark/DataFrameFunctions.scala
--
diff --git 
a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/DataFrameFunctions.scala
 
b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/DataFrameFunctions.scala
index 5042eaa..9408210 100644
--- 
a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/DataFrameFunctions.scala
+++ 
b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/DataFrameFunctions.scala
@@ -17,6 +17,7 @@ import org.apache.hadoop.conf.Configuration
 import org.apache.hadoop.io.NullWritable
 import org.apache.phoenix.mapreduce.PhoenixOutputFormat
 import org.apache.phoenix.mapreduce.util.{ColumnInfoToStringEncoderDecoder, 
PhoenixConfigurationUtil}
+import org.apache.phoenix.util.SchemaUtil
 import org.apache.spark.Logging
 import org.apache.spark.sql.DataFrame
 import scala.collection.JavaConversions._
@@ -26,16 +27,18 @@ class DataFrameFunctions(data: DataFrame) extends Logging 
with Serializable {
   def saveToPhoenix(tableName: String, conf: Configuration = new Configuration,
 zkUrl: Option[String] = None): Unit = {
 
+
+// Retrieve the schema field names and normalize to Phoenix, need to do 
this outside of mapPartitions
+val fieldArray = data.schema.fieldNames.map(x => 
SchemaUtil.normalizeIdentifier(x))
+
 // Create a configuration object to use for saving
-@transient val outConfig = 
ConfigurationUtil.getOutputConfiguration(tableName, data.schema.fieldNames, 
zkUrl, Some(conf))
+@transient val outConfig = 
ConfigurationUtil.getOutputConfiguration(tableName, fieldArray, zkUrl, 
Some(conf))
 
 // Retrieve the zookeeper URL
 val zkUrlFinal = ConfigurationUtil.getZookeeperURL(outConfig)
 
- // Retrieve the schema field names, need to do this outside of 
mapPartitions
- val fieldArray = data.schema.fieldNames
- // Map the row objects into 

phoenix git commit: PHOENIX-2287 phoenix-spark: Cast class exception (add support for Spark 1.5.0)

2015-09-28 Thread jmahonin
Repository: phoenix
Updated Branches:
  refs/heads/4.5-HBase-1.1 45835f04b -> 86423f7b8


PHOENIX-2287 phoenix-spark: Cast class exception (add support for Spark 1.5.0)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/86423f7b
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/86423f7b
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/86423f7b

Branch: refs/heads/4.5-HBase-1.1
Commit: 86423f7b887ebe50fc1d69e0155e454d6fb05bf5
Parents: 45835f0
Author: Josh Mahonin 
Authored: Mon Sep 28 13:13:23 2015 -0400
Committer: Josh Mahonin 
Committed: Mon Sep 28 13:20:43 2015 -0400

--
 phoenix-spark/pom.xml   |  2 +-
 phoenix-spark/src/it/resources/setup.sql|  4 +++-
 .../apache/phoenix/spark/PhoenixSparkIT.scala   |  7 ++
 .../org/apache/phoenix/spark/PhoenixRDD.scala   | 25 +++-
 .../apache/phoenix/spark/PhoenixRelation.scala  | 15 ++--
 5 files changed, 38 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/86423f7b/phoenix-spark/pom.xml
--
diff --git a/phoenix-spark/pom.xml b/phoenix-spark/pom.xml
index d5209ee..e6de36c 100644
--- a/phoenix-spark/pom.xml
+++ b/phoenix-spark/pom.xml
@@ -34,7 +34,7 @@
   Phoenix - Spark
 
   
-1.4.0
+1.5.0
 2.10.4
 2.10
 ${project.basedir}/..

http://git-wip-us.apache.org/repos/asf/phoenix/blob/86423f7b/phoenix-spark/src/it/resources/setup.sql
--
diff --git a/phoenix-spark/src/it/resources/setup.sql 
b/phoenix-spark/src/it/resources/setup.sql
index 154a996..db46a92 100644
--- a/phoenix-spark/src/it/resources/setup.sql
+++ b/phoenix-spark/src/it/resources/setup.sql
@@ -34,4 +34,6 @@ CREATE TABLE DATE_PREDICATE_TEST_TABLE (ID BIGINT NOT NULL, 
TIMESERIES_KEY TIMES
 UPSERT INTO DATE_PREDICATE_TEST_TABLE (ID, TIMESERIES_KEY) VALUES (1, 
CAST(CURRENT_TIME() AS TIMESTAMP))
 CREATE TABLE OUTPUT_TEST_TABLE (id BIGINT NOT NULL PRIMARY KEY, col1 VARCHAR, 
col2 INTEGER, col3 DATE)
 CREATE TABLE CUSTOM_ENTITY."z02"(id BIGINT NOT NULL PRIMARY KEY)
-UPSERT INTO CUSTOM_ENTITY."z02" (id) VALUES(1)
\ No newline at end of file
+UPSERT INTO CUSTOM_ENTITY."z02" (id) VALUES(1)
+CREATE TABLE TEST_DECIMAL (ID BIGINT NOT NULL PRIMARY KEY, COL1 DECIMAL)
+UPSERT INTO TEST_DECIMAL VALUES (1, 123.456789)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/86423f7b/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala
--
diff --git 
a/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala 
b/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala
index 1a28b60..f610d44 100644
--- a/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala
+++ b/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala
@@ -466,4 +466,11 @@ class PhoenixSparkIT extends FunSuite with Matchers with 
BeforeAndAfterAll {
 
 df.saveToPhoenix("TABLE2", zkUrl = Some(quorumAddress))
   }
+
+  // We can load the type, but it defaults to Spark's default (precision 38, 
scale 10)
+  ignore("Can load decimal types with accurate precision and scale 
(PHOENIX-2288)") {
+val sqlContext = new SQLContext(sc)
+val df = sqlContext.load("org.apache.phoenix.spark", Map("table" -> 
"TEST_DECIMAL", "zkUrl" -> quorumAddress))
+assert(df.select("COL1").first().getDecimal(0) == BigDecimal("123.456789"))
+  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/86423f7b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala
--
diff --git 
a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala 
b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala
index 427fb24..e2d96cb 100644
--- a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala
+++ b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala
@@ -13,17 +13,18 @@
  */
 package org.apache.phoenix.spark
 
+import java.text.DecimalFormat
+
 import org.apache.hadoop.conf.Configuration
 import org.apache.hadoop.hbase.{HBaseConfiguration, HConstants}
 import org.apache.hadoop.io.NullWritable
 import org.apache.phoenix.mapreduce.PhoenixInputFormat
 import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil
 import org.apache.phoenix.schema.types._
-import org.apache.phoenix.util.ColumnInfo
+import org.apache.phoenix.util.{PhoenixRuntime, ColumnInfo}
 import org.apache.spark._
 import 

phoenix git commit: PHOENIX-2287 phoenix-spark: Cast class exception (add support for Spark 1.5.0)

2015-09-28 Thread jmahonin
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.0 7db4d1e53 -> 82b8b08ee


PHOENIX-2287 phoenix-spark: Cast class exception (add support for Spark 1.5.0)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/82b8b08e
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/82b8b08e
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/82b8b08e

Branch: refs/heads/4.x-HBase-1.0
Commit: 82b8b08eed5a436d476b79190c3f446b7a0836ce
Parents: 7db4d1e
Author: Josh Mahonin 
Authored: Mon Sep 28 13:13:23 2015 -0400
Committer: Josh Mahonin 
Committed: Mon Sep 28 13:20:06 2015 -0400

--
 phoenix-spark/pom.xml   |  2 +-
 phoenix-spark/src/it/resources/setup.sql|  4 +++-
 .../apache/phoenix/spark/PhoenixSparkIT.scala   |  7 ++
 .../org/apache/phoenix/spark/PhoenixRDD.scala   | 25 +++-
 .../apache/phoenix/spark/PhoenixRelation.scala  | 15 ++--
 5 files changed, 38 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/82b8b08e/phoenix-spark/pom.xml
--
diff --git a/phoenix-spark/pom.xml b/phoenix-spark/pom.xml
index 8f09288..79ab227 100644
--- a/phoenix-spark/pom.xml
+++ b/phoenix-spark/pom.xml
@@ -34,7 +34,7 @@
   Phoenix - Spark
 
   
-1.4.0
+1.5.0
 2.10.4
 2.10
 ${project.basedir}/..

http://git-wip-us.apache.org/repos/asf/phoenix/blob/82b8b08e/phoenix-spark/src/it/resources/setup.sql
--
diff --git a/phoenix-spark/src/it/resources/setup.sql 
b/phoenix-spark/src/it/resources/setup.sql
index 154a996..db46a92 100644
--- a/phoenix-spark/src/it/resources/setup.sql
+++ b/phoenix-spark/src/it/resources/setup.sql
@@ -34,4 +34,6 @@ CREATE TABLE DATE_PREDICATE_TEST_TABLE (ID BIGINT NOT NULL, 
TIMESERIES_KEY TIMES
 UPSERT INTO DATE_PREDICATE_TEST_TABLE (ID, TIMESERIES_KEY) VALUES (1, 
CAST(CURRENT_TIME() AS TIMESTAMP))
 CREATE TABLE OUTPUT_TEST_TABLE (id BIGINT NOT NULL PRIMARY KEY, col1 VARCHAR, 
col2 INTEGER, col3 DATE)
 CREATE TABLE CUSTOM_ENTITY."z02"(id BIGINT NOT NULL PRIMARY KEY)
-UPSERT INTO CUSTOM_ENTITY."z02" (id) VALUES(1)
\ No newline at end of file
+UPSERT INTO CUSTOM_ENTITY."z02" (id) VALUES(1)
+CREATE TABLE TEST_DECIMAL (ID BIGINT NOT NULL PRIMARY KEY, COL1 DECIMAL)
+UPSERT INTO TEST_DECIMAL VALUES (1, 123.456789)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/82b8b08e/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala
--
diff --git 
a/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala 
b/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala
index 1a28b60..f610d44 100644
--- a/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala
+++ b/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala
@@ -466,4 +466,11 @@ class PhoenixSparkIT extends FunSuite with Matchers with 
BeforeAndAfterAll {
 
 df.saveToPhoenix("TABLE2", zkUrl = Some(quorumAddress))
   }
+
+  // We can load the type, but it defaults to Spark's default (precision 38, 
scale 10)
+  ignore("Can load decimal types with accurate precision and scale 
(PHOENIX-2288)") {
+val sqlContext = new SQLContext(sc)
+val df = sqlContext.load("org.apache.phoenix.spark", Map("table" -> 
"TEST_DECIMAL", "zkUrl" -> quorumAddress))
+assert(df.select("COL1").first().getDecimal(0) == BigDecimal("123.456789"))
+  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/82b8b08e/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala
--
diff --git 
a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala 
b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala
index 427fb24..e2d96cb 100644
--- a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala
+++ b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala
@@ -13,17 +13,18 @@
  */
 package org.apache.phoenix.spark
 
+import java.text.DecimalFormat
+
 import org.apache.hadoop.conf.Configuration
 import org.apache.hadoop.hbase.{HBaseConfiguration, HConstants}
 import org.apache.hadoop.io.NullWritable
 import org.apache.phoenix.mapreduce.PhoenixInputFormat
 import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil
 import org.apache.phoenix.schema.types._
-import org.apache.phoenix.util.ColumnInfo
+import org.apache.phoenix.util.{PhoenixRuntime, ColumnInfo}
 import org.apache.spark._
 import 

Build failed in Jenkins: Phoenix-4.x-HBase-1.0 #172

2015-09-28 Thread Apache Jenkins Server
See 

Changes:

[jmahonin] PHOENIX-2287 phoenix-spark: Cast class exception (add support for 
Spark 1.5.0)

--
[...truncated 128956 lines...]
385/599 KB   128/232 KB   
389/599 KB   128/232 KB   
393/599 KB   128/232 KB   
397/599 KB   128/232 KB   
401/599 KB   128/232 KB   
405/599 KB   128/232 KB   
409/599 KB   128/232 KB   
413/599 KB   128/232 KB   
417/599 KB   128/232 KB   
421/599 KB   128/232 KB   
425/599 KB   128/232 KB   
429/599 KB   128/232 KB   
433/599 KB   128/232 KB   
437/599 KB   128/232 KB   
441/599 KB   128/232 KB   
445/599 KB   128/232 KB   
449/599 KB   128/232 KB   
453/599 KB   128/232 KB   
457/599 KB   128/232 KB   
461/599 KB   128/232 KB   
465/599 KB   128/232 KB   
469/599 KB   128/232 KB   
473/599 KB   128/232 KB   
477/599 KB   128/232 KB   
481/599 KB   128/232 KB   
485/599 KB   128/232 KB   
489/599 KB   128/232 KB   
493/599 KB   128/232 KB   
497/599 KB   128/232 KB   
501/599 KB   128/232 KB   
505/599 KB   128/232 KB   
509/599 KB   128/232 KB   
513/599 KB   128/232 KB   
517/599 KB   128/232 KB   
521/599 KB   128/232 KB   
525/599 KB   128/232 KB   
529/599 KB   128/232 KB   
533/599 KB   128/232 KB   
537/599 KB   128/232 KB   
541/599 KB   128/232 KB   
545/599 KB   128/232 KB   
549/599 KB   128/232 KB   
553/599 KB   128/232 KB   
557/599 KB   128/232 KB   
561/599 KB   128/232 KB   
565/599 KB   128/232 KB   
569/599 KB   128/232 KB   
573/599 KB   128/232 KB   
577/599 KB   128/232 KB   
581/599 KB   128/232 KB   
585/599 KB   128/232 KB   
589/599 KB   128/232 KB   
593/599 KB   128/232 KB   
597/599 KB   128/232 KB   
599/599 KB   128/232 KB   
  
Downloaded: 
http://repo.maven.apache.org/maven2/org/codehaus/janino/janino/2.7.8/janino-2.7.8.jar
 (599 KB at 3345.9 KB/sec)
132/232 KB
135/232 KB   
139/232 KB   
139/232 KB   
143/232 KB   
147/232 KB   
151/232 KB   
155/232 KB   
158/232 KB   
162/232 KB   
162/232 KB   
166/232 KB   
166/232 KB   
170/232 KB   
170/232 KB   
174/232 KB   
178/232 KB   
182/232 KB   
186/232 KB   
189/232 KB   
193/232 KB   
197/232 KB   
199/232 KB   
203/232 KB   
203/232 KB   
207/232 KB   
211/232 KB   
214/232 KB   
218/232 KB   
221/232 KB   
225/232 KB   
229/232 KB   
232/232 KB   
 
Downloaded: 
http://repo.maven.apache.org/maven2/net/jpountz/lz4/lz4/1.3.0/lz4-1.3.0.jar 
(232 KB at 185.8 KB/sec)
[INFO] 
[INFO] --- maven-clean-plugin:2.5:clean (default-clean) @ phoenix-spark ---
[INFO] Deleting 

[INFO] 
[INFO] --- maven-checkstyle-plugin:2.13:check (validate) @ phoenix-spark ---
[INFO] Starting audit...
Audit done.

[INFO] 
[INFO] --- maven-remote-resources-plugin:1.5:process (default) @ phoenix-spark 
---
[INFO] 
[INFO] --- maven-resources-plugin:2.6:resources (default-resources) @ 
phoenix-spark ---
[INFO] Using 'UTF-8' encoding to copy filtered resources.
[INFO] skip non existing resourceDirectory 

[INFO] Copying 3 resources
[INFO] 
[INFO] --- scala-maven-plugin:3.2.0:add-source (scala-compile-first) @ 
phoenix-spark ---
[INFO] Add Source directory: 

[INFO] 
[INFO] --- scala-maven-plugin:3.2.0:compile (scala-compile-first) @ 
phoenix-spark ---
[WARNING]  Expected all dependencies to require Scala version: 2.10.4
[WARNING]  org.apache.phoenix:phoenix-spark:4.6.0-HBase-1.0-SNAPSHOT requires 
scala version: 2.10.4
[WARNING]  com.twitter:chill_2.10:0.5.0 requires scala version: 2.10.4
[WARNING]  com.typesafe.akka:akka-remote_2.10:2.3.11 requires scala version: 
2.10.4
[WARNING]  com.typesafe.akka:akka-actor_2.10:2.3.11 requires scala version: 
2.10.4
[WARNING]  com.typesafe.akka:akka-slf4j_2.10:2.3.11 requires scala version: 
2.10.4
[WARNING]  org.apache.spark:spark-core_2.10:1.5.0 requires scala version: 2.10.4
[WARNING]  org.json4s:json4s-jackson_2.10:3.2.10 requires scala version: 2.10.0
[WARNING] Multiple versions of scala libraries detected!
[INFO] 
:-1:
 info: compiling
[INFO] Compiling 10 source files to 

 at 1443461402622
[WARNING] warning: Class org.joda.convert.FromString not found - continuing 
with a stub.
[WARNING] warning: Class org.joda.convert.ToString not found - continuing with 
a stub.
[WARNING] warning: Class org.joda.convert.ToString not found - continuing with 
a stub.
[WARNING] warning: Class org.joda.convert.FromString not found - continuing 
with a stub.
[WARNING] warning: Class org.joda.convert.ToString not found - continuing with 
a stub.
[WARNING] warning: Class org.joda.convert.FromString not found - continuing 

Apache-Phoenix | 4.5-HBase-0.98 | Build Successful

2015-09-28 Thread Apache Jenkins Server
4.5-HBase-0.98 branch build status Successful

Source repository https://git-wip-us.apache.org/repos/asf?p=phoenix.git;a=shortlog;h=refs/heads/4.5-HBase-0.98

Compiled Artifacts https://builds.apache.org/job/Phoenix-4.5-HBase-0.98/lastSuccessfulBuild/artifact/

Test Report https://builds.apache.org/job/Phoenix-4.5-HBase-0.98/lastCompletedBuild/testReport/

Changes
[jmahonin] PHOENIX-2196 phoenix-spark should automatically convert DataFrame field names (Randy Gelhausen)



Build times for last couple of runsLatest build time is the right most | Legend blue: normal, red: test failure, gray: timeout


phoenix git commit: PHOENIX-2196 phoenix-spark should automatically convert DataFrame field names (Randy Gelhausen)

2015-09-28 Thread jmahonin
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.0 2b3dedde9 -> 7db4d1e53


PHOENIX-2196 phoenix-spark should automatically convert DataFrame field names 
(Randy Gelhausen)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/7db4d1e5
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/7db4d1e5
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/7db4d1e5

Branch: refs/heads/4.x-HBase-1.0
Commit: 7db4d1e5318279bd7e3d207c208687ed3802632a
Parents: 2b3dedd
Author: Josh Mahonin 
Authored: Sun Aug 23 14:18:55 2015 -0400
Committer: Josh Mahonin 
Committed: Mon Sep 28 12:39:52 2015 -0400

--
 .../apache/phoenix/spark/PhoenixSparkIT.scala   | 22 ++--
 .../phoenix/spark/DataFrameFunctions.scala  | 15 +++--
 2 files changed, 29 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/7db4d1e5/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala
--
diff --git 
a/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala 
b/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala
index e1c9df4..1a28b60 100644
--- a/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala
+++ b/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala
@@ -23,8 +23,8 @@ import org.apache.phoenix.query.BaseTest
 import org.apache.phoenix.schema.{TableNotFoundException, 
ColumnNotFoundException}
 import org.apache.phoenix.schema.types.PVarchar
 import org.apache.phoenix.util.{SchemaUtil, ColumnInfo}
-import org.apache.spark.sql.{SaveMode, execution, SQLContext}
-import org.apache.spark.sql.types.{LongType, DataType, StringType, StructField}
+import org.apache.spark.sql.{Row, SaveMode, execution, SQLContext}
+import org.apache.spark.sql.types._
 import org.apache.spark.{SparkConf, SparkContext}
 import org.joda.time.DateTime
 import org.scalatest._
@@ -448,4 +448,22 @@ class PhoenixSparkIT extends FunSuite with Matchers with 
BeforeAndAfterAll {
 count shouldEqual 1L
 
   }
+
+  test("Ensure DataFrame field normalization (PHOENIX-2196)") {
+val rdd1 = sc
+  .parallelize(Seq((1L,1L,"One"),(2L,2L,"Two")))
+  .map(p => Row(p._1, p._2, p._3))
+
+val sqlContext = new SQLContext(sc)
+
+val schema = StructType(Seq(
+  StructField("id", LongType, nullable = false),
+  StructField("table1_id", LongType, nullable = true),
+  StructField("\"t2col1\"", StringType, nullable = true)
+))
+
+val df = sqlContext.createDataFrame(rdd1, schema)
+
+df.saveToPhoenix("TABLE2", zkUrl = Some(quorumAddress))
+  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7db4d1e5/phoenix-spark/src/main/scala/org/apache/phoenix/spark/DataFrameFunctions.scala
--
diff --git 
a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/DataFrameFunctions.scala
 
b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/DataFrameFunctions.scala
index 5042eaa..9408210 100644
--- 
a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/DataFrameFunctions.scala
+++ 
b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/DataFrameFunctions.scala
@@ -17,6 +17,7 @@ import org.apache.hadoop.conf.Configuration
 import org.apache.hadoop.io.NullWritable
 import org.apache.phoenix.mapreduce.PhoenixOutputFormat
 import org.apache.phoenix.mapreduce.util.{ColumnInfoToStringEncoderDecoder, 
PhoenixConfigurationUtil}
+import org.apache.phoenix.util.SchemaUtil
 import org.apache.spark.Logging
 import org.apache.spark.sql.DataFrame
 import scala.collection.JavaConversions._
@@ -26,16 +27,18 @@ class DataFrameFunctions(data: DataFrame) extends Logging 
with Serializable {
   def saveToPhoenix(tableName: String, conf: Configuration = new Configuration,
 zkUrl: Option[String] = None): Unit = {
 
+
+// Retrieve the schema field names and normalize to Phoenix, need to do 
this outside of mapPartitions
+val fieldArray = data.schema.fieldNames.map(x => 
SchemaUtil.normalizeIdentifier(x))
+
 // Create a configuration object to use for saving
-@transient val outConfig = 
ConfigurationUtil.getOutputConfiguration(tableName, data.schema.fieldNames, 
zkUrl, Some(conf))
+@transient val outConfig = 
ConfigurationUtil.getOutputConfiguration(tableName, fieldArray, zkUrl, 
Some(conf))
 
 // Retrieve the zookeeper URL
 val zkUrlFinal = ConfigurationUtil.getZookeeperURL(outConfig)
 
- // Retrieve the schema field names, need to do this outside of 
mapPartitions
- val fieldArray = data.schema.fieldNames
- // Map the row objects into 

Apache-Phoenix | 4.5-HBase-1.0 | Build Successful

2015-09-28 Thread Apache Jenkins Server
4.5-HBase-1.0 branch build status Successful

Source repository https://git-wip-us.apache.org/repos/asf?p=phoenix.git;a=shortlog;h=refs/heads/4.5-HBase-1.0

Compiled Artifacts https://builds.apache.org/job/Phoenix-4.5-HBase-1.0/lastSuccessfulBuild/artifact/

Test Report https://builds.apache.org/job/Phoenix-4.5-HBase-1.0/lastCompletedBuild/testReport/

Changes
[jmahonin] PHOENIX-2196 phoenix-spark should automatically convert DataFrame field names (Randy Gelhausen)



Build times for last couple of runsLatest build time is the right most | Legend blue: normal, red: test failure, gray: timeout


phoenix git commit: PHOENIX-2196 phoenix-spark should automatically convert DataFrame field names (Randy Gelhausen)

2015-09-28 Thread jmahonin
Repository: phoenix
Updated Branches:
  refs/heads/4.5-HBase-1.0 8e37ad182 -> 72707bc06


PHOENIX-2196 phoenix-spark should automatically convert DataFrame field names 
(Randy Gelhausen)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/72707bc0
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/72707bc0
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/72707bc0

Branch: refs/heads/4.5-HBase-1.0
Commit: 72707bc065000703076abff7b483a0eefd6056b5
Parents: 8e37ad1
Author: Josh Mahonin 
Authored: Sun Aug 23 14:18:55 2015 -0400
Committer: Josh Mahonin 
Committed: Mon Sep 28 12:36:50 2015 -0400

--
 .../apache/phoenix/spark/PhoenixSparkIT.scala   | 22 ++--
 .../phoenix/spark/DataFrameFunctions.scala  | 15 +++--
 2 files changed, 29 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/72707bc0/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala
--
diff --git 
a/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala 
b/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala
index e1c9df4..1a28b60 100644
--- a/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala
+++ b/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala
@@ -23,8 +23,8 @@ import org.apache.phoenix.query.BaseTest
 import org.apache.phoenix.schema.{TableNotFoundException, 
ColumnNotFoundException}
 import org.apache.phoenix.schema.types.PVarchar
 import org.apache.phoenix.util.{SchemaUtil, ColumnInfo}
-import org.apache.spark.sql.{SaveMode, execution, SQLContext}
-import org.apache.spark.sql.types.{LongType, DataType, StringType, StructField}
+import org.apache.spark.sql.{Row, SaveMode, execution, SQLContext}
+import org.apache.spark.sql.types._
 import org.apache.spark.{SparkConf, SparkContext}
 import org.joda.time.DateTime
 import org.scalatest._
@@ -448,4 +448,22 @@ class PhoenixSparkIT extends FunSuite with Matchers with 
BeforeAndAfterAll {
 count shouldEqual 1L
 
   }
+
+  test("Ensure DataFrame field normalization (PHOENIX-2196)") {
+val rdd1 = sc
+  .parallelize(Seq((1L,1L,"One"),(2L,2L,"Two")))
+  .map(p => Row(p._1, p._2, p._3))
+
+val sqlContext = new SQLContext(sc)
+
+val schema = StructType(Seq(
+  StructField("id", LongType, nullable = false),
+  StructField("table1_id", LongType, nullable = true),
+  StructField("\"t2col1\"", StringType, nullable = true)
+))
+
+val df = sqlContext.createDataFrame(rdd1, schema)
+
+df.saveToPhoenix("TABLE2", zkUrl = Some(quorumAddress))
+  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/72707bc0/phoenix-spark/src/main/scala/org/apache/phoenix/spark/DataFrameFunctions.scala
--
diff --git 
a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/DataFrameFunctions.scala
 
b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/DataFrameFunctions.scala
index 5042eaa..9408210 100644
--- 
a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/DataFrameFunctions.scala
+++ 
b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/DataFrameFunctions.scala
@@ -17,6 +17,7 @@ import org.apache.hadoop.conf.Configuration
 import org.apache.hadoop.io.NullWritable
 import org.apache.phoenix.mapreduce.PhoenixOutputFormat
 import org.apache.phoenix.mapreduce.util.{ColumnInfoToStringEncoderDecoder, 
PhoenixConfigurationUtil}
+import org.apache.phoenix.util.SchemaUtil
 import org.apache.spark.Logging
 import org.apache.spark.sql.DataFrame
 import scala.collection.JavaConversions._
@@ -26,16 +27,18 @@ class DataFrameFunctions(data: DataFrame) extends Logging 
with Serializable {
   def saveToPhoenix(tableName: String, conf: Configuration = new Configuration,
 zkUrl: Option[String] = None): Unit = {
 
+
+// Retrieve the schema field names and normalize to Phoenix, need to do 
this outside of mapPartitions
+val fieldArray = data.schema.fieldNames.map(x => 
SchemaUtil.normalizeIdentifier(x))
+
 // Create a configuration object to use for saving
-@transient val outConfig = 
ConfigurationUtil.getOutputConfiguration(tableName, data.schema.fieldNames, 
zkUrl, Some(conf))
+@transient val outConfig = 
ConfigurationUtil.getOutputConfiguration(tableName, fieldArray, zkUrl, 
Some(conf))
 
 // Retrieve the zookeeper URL
 val zkUrlFinal = ConfigurationUtil.getZookeeperURL(outConfig)
 
- // Retrieve the schema field names, need to do this outside of 
mapPartitions
- val fieldArray = data.schema.fieldNames
- // Map the row objects into 

phoenix git commit: PHOENIX-2287 phoenix-spark: Cast class exception (add support for Spark 1.5.0)

2015-09-28 Thread jmahonin
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-0.98 c3907d14a -> 1ca6dabbb


PHOENIX-2287 phoenix-spark: Cast class exception (add support for Spark 1.5.0)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/1ca6dabb
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/1ca6dabb
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/1ca6dabb

Branch: refs/heads/4.x-HBase-0.98
Commit: 1ca6da020bfb8dcbfb1b20baf0f0107badf3
Parents: c3907d1
Author: Josh Mahonin 
Authored: Mon Sep 28 13:13:23 2015 -0400
Committer: Josh Mahonin 
Committed: Mon Sep 28 13:20:24 2015 -0400

--
 phoenix-spark/pom.xml   |  2 +-
 phoenix-spark/src/it/resources/setup.sql|  4 +++-
 .../apache/phoenix/spark/PhoenixSparkIT.scala   |  7 ++
 .../org/apache/phoenix/spark/PhoenixRDD.scala   | 25 +++-
 .../apache/phoenix/spark/PhoenixRelation.scala  | 15 ++--
 5 files changed, 38 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/1ca6dabb/phoenix-spark/pom.xml
--
diff --git a/phoenix-spark/pom.xml b/phoenix-spark/pom.xml
index 9c62573..326afa8 100644
--- a/phoenix-spark/pom.xml
+++ b/phoenix-spark/pom.xml
@@ -34,7 +34,7 @@
   Phoenix - Spark
 
   
-1.4.0
+1.5.0
 2.10.4
 2.10
 ${project.basedir}/..

http://git-wip-us.apache.org/repos/asf/phoenix/blob/1ca6dabb/phoenix-spark/src/it/resources/setup.sql
--
diff --git a/phoenix-spark/src/it/resources/setup.sql 
b/phoenix-spark/src/it/resources/setup.sql
index 154a996..db46a92 100644
--- a/phoenix-spark/src/it/resources/setup.sql
+++ b/phoenix-spark/src/it/resources/setup.sql
@@ -34,4 +34,6 @@ CREATE TABLE DATE_PREDICATE_TEST_TABLE (ID BIGINT NOT NULL, 
TIMESERIES_KEY TIMES
 UPSERT INTO DATE_PREDICATE_TEST_TABLE (ID, TIMESERIES_KEY) VALUES (1, 
CAST(CURRENT_TIME() AS TIMESTAMP))
 CREATE TABLE OUTPUT_TEST_TABLE (id BIGINT NOT NULL PRIMARY KEY, col1 VARCHAR, 
col2 INTEGER, col3 DATE)
 CREATE TABLE CUSTOM_ENTITY."z02"(id BIGINT NOT NULL PRIMARY KEY)
-UPSERT INTO CUSTOM_ENTITY."z02" (id) VALUES(1)
\ No newline at end of file
+UPSERT INTO CUSTOM_ENTITY."z02" (id) VALUES(1)
+CREATE TABLE TEST_DECIMAL (ID BIGINT NOT NULL PRIMARY KEY, COL1 DECIMAL)
+UPSERT INTO TEST_DECIMAL VALUES (1, 123.456789)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/1ca6dabb/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala
--
diff --git 
a/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala 
b/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala
index a4e37e1..790624e 100644
--- a/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala
+++ b/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala
@@ -465,4 +465,11 @@ class PhoenixSparkIT extends FunSuite with Matchers with 
BeforeAndAfterAll {
 
 df.saveToPhoenix("TABLE2", zkUrl = Some(quorumAddress))
   }
+
+  // We can load the type, but it defaults to Spark's default (precision 38, 
scale 10)
+  ignore("Can load decimal types with accurate precision and scale 
(PHOENIX-2288)") {
+val sqlContext = new SQLContext(sc)
+val df = sqlContext.load("org.apache.phoenix.spark", Map("table" -> 
"TEST_DECIMAL", "zkUrl" -> quorumAddress))
+assert(df.select("COL1").first().getDecimal(0) == BigDecimal("123.456789"))
+  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/1ca6dabb/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala
--
diff --git 
a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala 
b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala
index 427fb24..e2d96cb 100644
--- a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala
+++ b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala
@@ -13,17 +13,18 @@
  */
 package org.apache.phoenix.spark
 
+import java.text.DecimalFormat
+
 import org.apache.hadoop.conf.Configuration
 import org.apache.hadoop.hbase.{HBaseConfiguration, HConstants}
 import org.apache.hadoop.io.NullWritable
 import org.apache.phoenix.mapreduce.PhoenixInputFormat
 import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil
 import org.apache.phoenix.schema.types._
-import org.apache.phoenix.util.ColumnInfo
+import org.apache.phoenix.util.{PhoenixRuntime, ColumnInfo}
 import org.apache.spark._
 import 

phoenix git commit: PHOENIX-2287 phoenix-spark: Cast class exception (add support for Spark 1.5.0)

2015-09-28 Thread jmahonin
Repository: phoenix
Updated Branches:
  refs/heads/master eb9fab429 -> a9bd64092


PHOENIX-2287 phoenix-spark: Cast class exception (add support for Spark 1.5.0)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/a9bd6409
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/a9bd6409
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/a9bd6409

Branch: refs/heads/master
Commit: a9bd640929bbfa2e02ea5b1c903a01ea57fe151b
Parents: eb9fab4
Author: Josh Mahonin 
Authored: Mon Sep 28 13:13:23 2015 -0400
Committer: Josh Mahonin 
Committed: Mon Sep 28 13:19:08 2015 -0400

--
 phoenix-spark/pom.xml   |  2 +-
 phoenix-spark/src/it/resources/setup.sql|  4 +++-
 .../apache/phoenix/spark/PhoenixSparkIT.scala   |  7 ++
 .../org/apache/phoenix/spark/PhoenixRDD.scala   | 25 +++-
 .../apache/phoenix/spark/PhoenixRelation.scala  | 15 ++--
 5 files changed, 38 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/a9bd6409/phoenix-spark/pom.xml
--
diff --git a/phoenix-spark/pom.xml b/phoenix-spark/pom.xml
index 4287bf1..477c0c2 100644
--- a/phoenix-spark/pom.xml
+++ b/phoenix-spark/pom.xml
@@ -34,7 +34,7 @@
   Phoenix - Spark
 
   
-1.4.0
+1.5.0
 2.10.4
 2.10
 ${project.basedir}/..

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a9bd6409/phoenix-spark/src/it/resources/setup.sql
--
diff --git a/phoenix-spark/src/it/resources/setup.sql 
b/phoenix-spark/src/it/resources/setup.sql
index 154a996..db46a92 100644
--- a/phoenix-spark/src/it/resources/setup.sql
+++ b/phoenix-spark/src/it/resources/setup.sql
@@ -34,4 +34,6 @@ CREATE TABLE DATE_PREDICATE_TEST_TABLE (ID BIGINT NOT NULL, 
TIMESERIES_KEY TIMES
 UPSERT INTO DATE_PREDICATE_TEST_TABLE (ID, TIMESERIES_KEY) VALUES (1, 
CAST(CURRENT_TIME() AS TIMESTAMP))
 CREATE TABLE OUTPUT_TEST_TABLE (id BIGINT NOT NULL PRIMARY KEY, col1 VARCHAR, 
col2 INTEGER, col3 DATE)
 CREATE TABLE CUSTOM_ENTITY."z02"(id BIGINT NOT NULL PRIMARY KEY)
-UPSERT INTO CUSTOM_ENTITY."z02" (id) VALUES(1)
\ No newline at end of file
+UPSERT INTO CUSTOM_ENTITY."z02" (id) VALUES(1)
+CREATE TABLE TEST_DECIMAL (ID BIGINT NOT NULL PRIMARY KEY, COL1 DECIMAL)
+UPSERT INTO TEST_DECIMAL VALUES (1, 123.456789)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a9bd6409/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala
--
diff --git 
a/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala 
b/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala
index 1a28b60..f610d44 100644
--- a/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala
+++ b/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala
@@ -466,4 +466,11 @@ class PhoenixSparkIT extends FunSuite with Matchers with 
BeforeAndAfterAll {
 
 df.saveToPhoenix("TABLE2", zkUrl = Some(quorumAddress))
   }
+
+  // We can load the type, but it defaults to Spark's default (precision 38, 
scale 10)
+  ignore("Can load decimal types with accurate precision and scale 
(PHOENIX-2288)") {
+val sqlContext = new SQLContext(sc)
+val df = sqlContext.load("org.apache.phoenix.spark", Map("table" -> 
"TEST_DECIMAL", "zkUrl" -> quorumAddress))
+assert(df.select("COL1").first().getDecimal(0) == BigDecimal("123.456789"))
+  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a9bd6409/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala
--
diff --git 
a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala 
b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala
index 427fb24..e2d96cb 100644
--- a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala
+++ b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala
@@ -13,17 +13,18 @@
  */
 package org.apache.phoenix.spark
 
+import java.text.DecimalFormat
+
 import org.apache.hadoop.conf.Configuration
 import org.apache.hadoop.hbase.{HBaseConfiguration, HConstants}
 import org.apache.hadoop.io.NullWritable
 import org.apache.phoenix.mapreduce.PhoenixInputFormat
 import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil
 import org.apache.phoenix.schema.types._
-import org.apache.phoenix.util.ColumnInfo
+import org.apache.phoenix.util.{PhoenixRuntime, ColumnInfo}
 import org.apache.spark._
 import org.apache.spark.annotation.DeveloperApi
 

Apache-Phoenix | 4.5-HBase-1.0 | Build Successful

2015-09-28 Thread Apache Jenkins Server
4.5-HBase-1.0 branch build status Successful

Source repository https://git-wip-us.apache.org/repos/asf?p=phoenix.git;a=shortlog;h=refs/heads/4.5-HBase-1.0

Compiled Artifacts https://builds.apache.org/job/Phoenix-4.5-HBase-1.0/lastSuccessfulBuild/artifact/

Test Report https://builds.apache.org/job/Phoenix-4.5-HBase-1.0/lastCompletedBuild/testReport/

Changes
[jmahonin] PHOENIX-2287 phoenix-spark: Cast class exception (add support for Spark 1.5.0)



Build times for last couple of runsLatest build time is the right most | Legend blue: normal, red: test failure, gray: timeout


Apache-Phoenix | Master | Build Successful

2015-09-28 Thread Apache Jenkins Server
Master branch build status Successful
Source repository https://git-wip-us.apache.org/repos/asf?p=phoenix.git;a=shortlog;h=refs/heads/master

Last Successful Compiled Artifacts https://builds.apache.org/job/Phoenix-master/lastSuccessfulBuild/artifact/

Last Complete Test Report https://builds.apache.org/job/Phoenix-master/lastCompletedBuild/testReport/

Changes
[jmahonin] PHOENIX-2196 phoenix-spark should automatically convert DataFrame field names (Randy Gelhausen)



Build times for last couple of runsLatest build time is the right most | Legend blue: normal, red: test failure, gray: timeout


phoenix git commit: PHOENIX-2196 phoenix-spark should automatically convert DataFrame field names (Randy Gelhausen)

2015-09-28 Thread jmahonin
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-0.98 9af1327b1 -> c3907d14a


PHOENIX-2196 phoenix-spark should automatically convert DataFrame field names 
(Randy Gelhausen)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/c3907d14
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/c3907d14
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/c3907d14

Branch: refs/heads/4.x-HBase-0.98
Commit: c3907d14acdcaa7c48d75cf068c77143fe5ff4f0
Parents: 9af1327
Author: Josh Mahonin 
Authored: Sun Aug 23 14:18:55 2015 -0400
Committer: Josh Mahonin 
Committed: Mon Sep 28 12:40:24 2015 -0400

--
 .../apache/phoenix/spark/PhoenixSparkIT.scala   | 22 ++--
 .../phoenix/spark/DataFrameFunctions.scala  | 15 +++--
 2 files changed, 29 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3907d14/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala
--
diff --git 
a/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala 
b/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala
index 2889464..a4e37e1 100644
--- a/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala
+++ b/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala
@@ -23,8 +23,8 @@ import org.apache.phoenix.query.BaseTest
 import org.apache.phoenix.schema.{TableNotFoundException, 
ColumnNotFoundException}
 import org.apache.phoenix.schema.types.PVarchar
 import org.apache.phoenix.util.{SchemaUtil, ColumnInfo}
-import org.apache.spark.sql.{SaveMode, execution, SQLContext}
-import org.apache.spark.sql.types.{LongType, DataType, StringType, StructField}
+import org.apache.spark.sql.{Row, SaveMode, execution, SQLContext}
+import org.apache.spark.sql.types._
 import org.apache.spark.{SparkConf, SparkContext}
 import org.joda.time.DateTime
 import org.scalatest._
@@ -447,4 +447,22 @@ class PhoenixSparkIT extends FunSuite with Matchers with 
BeforeAndAfterAll {
 count shouldEqual 1L
 
   }
+
+  test("Ensure DataFrame field normalization (PHOENIX-2196)") {
+val rdd1 = sc
+  .parallelize(Seq((1L,1L,"One"),(2L,2L,"Two")))
+  .map(p => Row(p._1, p._2, p._3))
+
+val sqlContext = new SQLContext(sc)
+
+val schema = StructType(Seq(
+  StructField("id", LongType, nullable = false),
+  StructField("table1_id", LongType, nullable = true),
+  StructField("\"t2col1\"", StringType, nullable = true)
+))
+
+val df = sqlContext.createDataFrame(rdd1, schema)
+
+df.saveToPhoenix("TABLE2", zkUrl = Some(quorumAddress))
+  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3907d14/phoenix-spark/src/main/scala/org/apache/phoenix/spark/DataFrameFunctions.scala
--
diff --git 
a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/DataFrameFunctions.scala
 
b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/DataFrameFunctions.scala
index 5042eaa..9408210 100644
--- 
a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/DataFrameFunctions.scala
+++ 
b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/DataFrameFunctions.scala
@@ -17,6 +17,7 @@ import org.apache.hadoop.conf.Configuration
 import org.apache.hadoop.io.NullWritable
 import org.apache.phoenix.mapreduce.PhoenixOutputFormat
 import org.apache.phoenix.mapreduce.util.{ColumnInfoToStringEncoderDecoder, 
PhoenixConfigurationUtil}
+import org.apache.phoenix.util.SchemaUtil
 import org.apache.spark.Logging
 import org.apache.spark.sql.DataFrame
 import scala.collection.JavaConversions._
@@ -26,16 +27,18 @@ class DataFrameFunctions(data: DataFrame) extends Logging 
with Serializable {
   def saveToPhoenix(tableName: String, conf: Configuration = new Configuration,
 zkUrl: Option[String] = None): Unit = {
 
+
+// Retrieve the schema field names and normalize to Phoenix, need to do 
this outside of mapPartitions
+val fieldArray = data.schema.fieldNames.map(x => 
SchemaUtil.normalizeIdentifier(x))
+
 // Create a configuration object to use for saving
-@transient val outConfig = 
ConfigurationUtil.getOutputConfiguration(tableName, data.schema.fieldNames, 
zkUrl, Some(conf))
+@transient val outConfig = 
ConfigurationUtil.getOutputConfiguration(tableName, fieldArray, zkUrl, 
Some(conf))
 
 // Retrieve the zookeeper URL
 val zkUrlFinal = ConfigurationUtil.getZookeeperURL(outConfig)
 
- // Retrieve the schema field names, need to do this outside of 
mapPartitions
- val fieldArray = data.schema.fieldNames
- // Map the row objects into 

Build failed in Jenkins: Phoenix-4.x-HBase-1.0 #171

2015-09-28 Thread Apache Jenkins Server
See 

Changes:

[jmahonin] PHOENIX-2196 phoenix-spark should automatically convert DataFrame 
field names (Randy Gelhausen)

--
[...truncated 117756 lines...]
#
#  SIGSEGV (0xb) at pc=0x6614d52c, pid=1232, tid=1582463808
#
# JRE version: 7.0_25-b15
# Java VM: Java HotSpot(TM) Server VM (23.25-b01 mixed mode linux-x86 )
# Problematic frame:
# C  [libnet.so+0x352c]  _init+0x620
#
# Failed to write core dump. Core dumps have been disabled. To enable core 
dumping, try "ulimit -c unlimited" before starting Java again
#
# An error report file with more information is saved as:
# 

#
# A fatal error has been detected by the Java Runtime Environment:
#
#  SIGSEGV (0xb) at pc=0x6606d52c, pid=1234, tid=1578158912
#
# JRE version: 7.0_25-b15
# Java VM: Java HotSpot(TM) Server VM (23.25-b01 mixed mode linux-x86 )
# Problematic frame:
# C  [libnet.so+0x352c]  _init+0x620
#
# Failed to write core dump. Core dumps have been disabled. To enable core 
dumping, try "ulimit -c unlimited" before starting Java again
#
# An error report file with more information is saved as:
# 

#
# If you would like to submit a bug report, please visit:
#   http://bugreport.sun.com/bugreport/crash.jsp
# The crash happened outside the Java Virtual Machine in native code.
# See problematic frame for where to report the bug.
#
#
# If you would like to submit a bug report, please visit:
#   http://bugreport.sun.com/bugreport/crash.jsp
# The crash happened outside the Java Virtual Machine in native code.
# See problematic frame for where to report the bug.
#
Aborted
Aborted

Results :




Tests run: 0, Failures: 0, Errors: 0, Skipped: 0

[INFO] 
[INFO] --- maven-failsafe-plugin:2.18:integration-test 
(NeedTheirOwnClusterTests) @ phoenix-pherf ---
[INFO] Failsafe report directory: 

[INFO] parallel='none', perCoreThreadCount=true, threadCount=0, 
useUnlimitedThreads=false, threadCountSuites=0, threadCountClasses=0, 
threadCountMethods=0, parallelOptimized=true

---
 T E S T S
---

Results :




Tests run: 0, Failures: 0, Errors: 0, Skipped: 0

[INFO] 
[INFO] --- maven-failsafe-plugin:2.18:verify (ClientManagedTimeTests) @ 
phoenix-pherf ---
[INFO] Failsafe report directory: 

[INFO] 
[INFO] --- maven-failsafe-plugin:2.18:verify (HBaseManagedTimeTests) @ 
phoenix-pherf ---
[INFO] Failsafe report directory: 

[INFO] 
[INFO] --- maven-failsafe-plugin:2.18:verify (NeedTheirOwnClusterTests) @ 
phoenix-pherf ---
[INFO] Failsafe report directory: 

[INFO] 
[INFO] --- maven-install-plugin:2.5.1:install (default-install) @ phoenix-pherf 
---
[INFO] Installing 

 to 
/home/jenkins/.m2/repository/org/apache/phoenix/phoenix-pherf/4.6.0-HBase-1.0-SNAPSHOT/phoenix-pherf-4.6.0-HBase-1.0-SNAPSHOT.jar
[INFO] Installing 
 
to 
/home/jenkins/.m2/repository/org/apache/phoenix/phoenix-pherf/4.6.0-HBase-1.0-SNAPSHOT/phoenix-pherf-4.6.0-HBase-1.0-SNAPSHOT.pom
[INFO] Installing 

 to 
/home/jenkins/.m2/repository/org/apache/phoenix/phoenix-pherf/4.6.0-HBase-1.0-SNAPSHOT/phoenix-pherf-4.6.0-HBase-1.0-SNAPSHOT-sources.jar
[INFO] Installing 

 to 
/home/jenkins/.m2/repository/org/apache/phoenix/phoenix-pherf/4.6.0-HBase-1.0-SNAPSHOT/phoenix-pherf-4.6.0-HBase-1.0-SNAPSHOT-tests.jar
[INFO] Installing 

 to 
/home/jenkins/.m2/repository/org/apache/phoenix/phoenix-pherf/4.6.0-HBase-1.0-SNAPSHOT/phoenix-pherf-4.6.0-HBase-1.0-SNAPSHOT-jar-with-dependencies.jar
[INFO] Installing 

 to 

Apache-Phoenix | 4.5-HBase-1.1 | Build Successful

2015-09-28 Thread Apache Jenkins Server
4.5-HBase-1.1 branch build status Successful

Source repository https://git-wip-us.apache.org/repos/asf?p=phoenix.git;a=shortlog;h=refs/heads/4.5-HBase-1.1

Compiled Artifacts https://builds.apache.org/job/Phoenix-4.5-HBase-1.1/lastSuccessfulBuild/artifact/

Test Report https://builds.apache.org/job/Phoenix-4.5-HBase-1.1/lastCompletedBuild/testReport/

Changes
[jmahonin] PHOENIX-2196 phoenix-spark should automatically convert DataFrame field names (Randy Gelhausen)



Build times for last couple of runsLatest build time is the right most | Legend blue: normal, red: test failure, gray: timeout


Apache-Phoenix | 4.5-HBase-0.98 | Build Successful

2015-09-28 Thread Apache Jenkins Server
4.5-HBase-0.98 branch build status Successful

Source repository https://git-wip-us.apache.org/repos/asf?p=phoenix.git;a=shortlog;h=refs/heads/4.5-HBase-0.98

Compiled Artifacts https://builds.apache.org/job/Phoenix-4.5-HBase-0.98/lastSuccessfulBuild/artifact/

Test Report https://builds.apache.org/job/Phoenix-4.5-HBase-0.98/lastCompletedBuild/testReport/

Changes
[jmahonin] PHOENIX-2287 phoenix-spark: Cast class exception (add support for Spark 1.5.0)



Build times for last couple of runsLatest build time is the right most | Legend blue: normal, red: test failure, gray: timeout


phoenix git commit: PHOENIX-2287 phoenix-spark: Cast class exception (add support for Spark 1.5.0)

2015-09-28 Thread jmahonin
Repository: phoenix
Updated Branches:
  refs/heads/4.5-HBase-1.0 72707bc06 -> 5bb0d4c3a


PHOENIX-2287 phoenix-spark: Cast class exception (add support for Spark 1.5.0)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/5bb0d4c3
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/5bb0d4c3
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/5bb0d4c3

Branch: refs/heads/4.5-HBase-1.0
Commit: 5bb0d4c3a0efddbfc0a4d3c97b1bc1aed9ee4d58
Parents: 72707bc
Author: Josh Mahonin 
Authored: Mon Sep 28 13:13:23 2015 -0400
Committer: Josh Mahonin 
Committed: Mon Sep 28 13:21:04 2015 -0400

--
 phoenix-spark/pom.xml   |  2 +-
 phoenix-spark/src/it/resources/setup.sql|  4 +++-
 .../apache/phoenix/spark/PhoenixSparkIT.scala   |  7 ++
 .../org/apache/phoenix/spark/PhoenixRDD.scala   | 25 +++-
 .../apache/phoenix/spark/PhoenixRelation.scala  | 15 ++--
 5 files changed, 38 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/5bb0d4c3/phoenix-spark/pom.xml
--
diff --git a/phoenix-spark/pom.xml b/phoenix-spark/pom.xml
index 79d7d35..f2ee611 100644
--- a/phoenix-spark/pom.xml
+++ b/phoenix-spark/pom.xml
@@ -34,7 +34,7 @@
   Phoenix - Spark
 
   
-1.4.0
+1.5.0
 2.10.4
 2.10
 ${project.basedir}/..

http://git-wip-us.apache.org/repos/asf/phoenix/blob/5bb0d4c3/phoenix-spark/src/it/resources/setup.sql
--
diff --git a/phoenix-spark/src/it/resources/setup.sql 
b/phoenix-spark/src/it/resources/setup.sql
index 154a996..db46a92 100644
--- a/phoenix-spark/src/it/resources/setup.sql
+++ b/phoenix-spark/src/it/resources/setup.sql
@@ -34,4 +34,6 @@ CREATE TABLE DATE_PREDICATE_TEST_TABLE (ID BIGINT NOT NULL, 
TIMESERIES_KEY TIMES
 UPSERT INTO DATE_PREDICATE_TEST_TABLE (ID, TIMESERIES_KEY) VALUES (1, 
CAST(CURRENT_TIME() AS TIMESTAMP))
 CREATE TABLE OUTPUT_TEST_TABLE (id BIGINT NOT NULL PRIMARY KEY, col1 VARCHAR, 
col2 INTEGER, col3 DATE)
 CREATE TABLE CUSTOM_ENTITY."z02"(id BIGINT NOT NULL PRIMARY KEY)
-UPSERT INTO CUSTOM_ENTITY."z02" (id) VALUES(1)
\ No newline at end of file
+UPSERT INTO CUSTOM_ENTITY."z02" (id) VALUES(1)
+CREATE TABLE TEST_DECIMAL (ID BIGINT NOT NULL PRIMARY KEY, COL1 DECIMAL)
+UPSERT INTO TEST_DECIMAL VALUES (1, 123.456789)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/5bb0d4c3/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala
--
diff --git 
a/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala 
b/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala
index 1a28b60..f610d44 100644
--- a/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala
+++ b/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala
@@ -466,4 +466,11 @@ class PhoenixSparkIT extends FunSuite with Matchers with 
BeforeAndAfterAll {
 
 df.saveToPhoenix("TABLE2", zkUrl = Some(quorumAddress))
   }
+
+  // We can load the type, but it defaults to Spark's default (precision 38, 
scale 10)
+  ignore("Can load decimal types with accurate precision and scale 
(PHOENIX-2288)") {
+val sqlContext = new SQLContext(sc)
+val df = sqlContext.load("org.apache.phoenix.spark", Map("table" -> 
"TEST_DECIMAL", "zkUrl" -> quorumAddress))
+assert(df.select("COL1").first().getDecimal(0) == BigDecimal("123.456789"))
+  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/5bb0d4c3/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala
--
diff --git 
a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala 
b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala
index 427fb24..e2d96cb 100644
--- a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala
+++ b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala
@@ -13,17 +13,18 @@
  */
 package org.apache.phoenix.spark
 
+import java.text.DecimalFormat
+
 import org.apache.hadoop.conf.Configuration
 import org.apache.hadoop.hbase.{HBaseConfiguration, HConstants}
 import org.apache.hadoop.io.NullWritable
 import org.apache.phoenix.mapreduce.PhoenixInputFormat
 import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil
 import org.apache.phoenix.schema.types._
-import org.apache.phoenix.util.ColumnInfo
+import org.apache.phoenix.util.{PhoenixRuntime, ColumnInfo}
 import org.apache.spark._
 import 

phoenix git commit: PHOENIX-2287 phoenix-spark: Cast class exception (add support for Spark 1.5.0)

2015-09-28 Thread jmahonin
Repository: phoenix
Updated Branches:
  refs/heads/4.5-HBase-0.98 a89d048b9 -> b3053e98b


PHOENIX-2287 phoenix-spark: Cast class exception (add support for Spark 1.5.0)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/b3053e98
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/b3053e98
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/b3053e98

Branch: refs/heads/4.5-HBase-0.98
Commit: b3053e98b36db0928d9d5aabfcfb30888ea1f589
Parents: a89d048
Author: Josh Mahonin 
Authored: Mon Sep 28 13:13:23 2015 -0400
Committer: Josh Mahonin 
Committed: Mon Sep 28 13:21:18 2015 -0400

--
 phoenix-spark/pom.xml   |  2 +-
 phoenix-spark/src/it/resources/setup.sql|  4 +++-
 .../apache/phoenix/spark/PhoenixSparkIT.scala   |  7 ++
 .../org/apache/phoenix/spark/PhoenixRDD.scala   | 25 +++-
 .../apache/phoenix/spark/PhoenixRelation.scala  | 15 ++--
 5 files changed, 38 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/b3053e98/phoenix-spark/pom.xml
--
diff --git a/phoenix-spark/pom.xml b/phoenix-spark/pom.xml
index cb3790f..0ed2a61 100644
--- a/phoenix-spark/pom.xml
+++ b/phoenix-spark/pom.xml
@@ -34,7 +34,7 @@
   Phoenix - Spark
 
   
-1.4.0
+1.5.0
 2.10.4
 2.10
 ${project.basedir}/..

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b3053e98/phoenix-spark/src/it/resources/setup.sql
--
diff --git a/phoenix-spark/src/it/resources/setup.sql 
b/phoenix-spark/src/it/resources/setup.sql
index 154a996..db46a92 100644
--- a/phoenix-spark/src/it/resources/setup.sql
+++ b/phoenix-spark/src/it/resources/setup.sql
@@ -34,4 +34,6 @@ CREATE TABLE DATE_PREDICATE_TEST_TABLE (ID BIGINT NOT NULL, 
TIMESERIES_KEY TIMES
 UPSERT INTO DATE_PREDICATE_TEST_TABLE (ID, TIMESERIES_KEY) VALUES (1, 
CAST(CURRENT_TIME() AS TIMESTAMP))
 CREATE TABLE OUTPUT_TEST_TABLE (id BIGINT NOT NULL PRIMARY KEY, col1 VARCHAR, 
col2 INTEGER, col3 DATE)
 CREATE TABLE CUSTOM_ENTITY."z02"(id BIGINT NOT NULL PRIMARY KEY)
-UPSERT INTO CUSTOM_ENTITY."z02" (id) VALUES(1)
\ No newline at end of file
+UPSERT INTO CUSTOM_ENTITY."z02" (id) VALUES(1)
+CREATE TABLE TEST_DECIMAL (ID BIGINT NOT NULL PRIMARY KEY, COL1 DECIMAL)
+UPSERT INTO TEST_DECIMAL VALUES (1, 123.456789)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b3053e98/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala
--
diff --git 
a/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala 
b/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala
index a4e37e1..790624e 100644
--- a/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala
+++ b/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala
@@ -465,4 +465,11 @@ class PhoenixSparkIT extends FunSuite with Matchers with 
BeforeAndAfterAll {
 
 df.saveToPhoenix("TABLE2", zkUrl = Some(quorumAddress))
   }
+
+  // We can load the type, but it defaults to Spark's default (precision 38, 
scale 10)
+  ignore("Can load decimal types with accurate precision and scale 
(PHOENIX-2288)") {
+val sqlContext = new SQLContext(sc)
+val df = sqlContext.load("org.apache.phoenix.spark", Map("table" -> 
"TEST_DECIMAL", "zkUrl" -> quorumAddress))
+assert(df.select("COL1").first().getDecimal(0) == BigDecimal("123.456789"))
+  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b3053e98/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala
--
diff --git 
a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala 
b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala
index 427fb24..e2d96cb 100644
--- a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala
+++ b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala
@@ -13,17 +13,18 @@
  */
 package org.apache.phoenix.spark
 
+import java.text.DecimalFormat
+
 import org.apache.hadoop.conf.Configuration
 import org.apache.hadoop.hbase.{HBaseConfiguration, HConstants}
 import org.apache.hadoop.io.NullWritable
 import org.apache.phoenix.mapreduce.PhoenixInputFormat
 import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil
 import org.apache.phoenix.schema.types._
-import org.apache.phoenix.util.ColumnInfo
+import org.apache.phoenix.util.{PhoenixRuntime, ColumnInfo}
 import org.apache.spark._
 import 

phoenix git commit: PHOENIX-2196 phoenix-spark should automatically convert DataFrame field names (Randy Gelhausen)

2015-09-28 Thread jmahonin
Repository: phoenix
Updated Branches:
  refs/heads/master 5ecd4967f -> eb9fab429


PHOENIX-2196 phoenix-spark should automatically convert DataFrame field names 
(Randy Gelhausen)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/eb9fab42
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/eb9fab42
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/eb9fab42

Branch: refs/heads/master
Commit: eb9fab42911b08f9e817b313f2250f4685ba13a2
Parents: 5ecd496
Author: Josh Mahonin 
Authored: Sun Aug 23 14:18:55 2015 -0400
Committer: Josh Mahonin 
Committed: Mon Sep 28 12:35:25 2015 -0400

--
 .../apache/phoenix/spark/PhoenixSparkIT.scala   | 22 ++--
 .../phoenix/spark/DataFrameFunctions.scala  | 15 +++--
 2 files changed, 29 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/eb9fab42/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala
--
diff --git 
a/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala 
b/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala
index e1c9df4..1a28b60 100644
--- a/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala
+++ b/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala
@@ -23,8 +23,8 @@ import org.apache.phoenix.query.BaseTest
 import org.apache.phoenix.schema.{TableNotFoundException, 
ColumnNotFoundException}
 import org.apache.phoenix.schema.types.PVarchar
 import org.apache.phoenix.util.{SchemaUtil, ColumnInfo}
-import org.apache.spark.sql.{SaveMode, execution, SQLContext}
-import org.apache.spark.sql.types.{LongType, DataType, StringType, StructField}
+import org.apache.spark.sql.{Row, SaveMode, execution, SQLContext}
+import org.apache.spark.sql.types._
 import org.apache.spark.{SparkConf, SparkContext}
 import org.joda.time.DateTime
 import org.scalatest._
@@ -448,4 +448,22 @@ class PhoenixSparkIT extends FunSuite with Matchers with 
BeforeAndAfterAll {
 count shouldEqual 1L
 
   }
+
+  test("Ensure DataFrame field normalization (PHOENIX-2196)") {
+val rdd1 = sc
+  .parallelize(Seq((1L,1L,"One"),(2L,2L,"Two")))
+  .map(p => Row(p._1, p._2, p._3))
+
+val sqlContext = new SQLContext(sc)
+
+val schema = StructType(Seq(
+  StructField("id", LongType, nullable = false),
+  StructField("table1_id", LongType, nullable = true),
+  StructField("\"t2col1\"", StringType, nullable = true)
+))
+
+val df = sqlContext.createDataFrame(rdd1, schema)
+
+df.saveToPhoenix("TABLE2", zkUrl = Some(quorumAddress))
+  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/eb9fab42/phoenix-spark/src/main/scala/org/apache/phoenix/spark/DataFrameFunctions.scala
--
diff --git 
a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/DataFrameFunctions.scala
 
b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/DataFrameFunctions.scala
index 5042eaa..9408210 100644
--- 
a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/DataFrameFunctions.scala
+++ 
b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/DataFrameFunctions.scala
@@ -17,6 +17,7 @@ import org.apache.hadoop.conf.Configuration
 import org.apache.hadoop.io.NullWritable
 import org.apache.phoenix.mapreduce.PhoenixOutputFormat
 import org.apache.phoenix.mapreduce.util.{ColumnInfoToStringEncoderDecoder, 
PhoenixConfigurationUtil}
+import org.apache.phoenix.util.SchemaUtil
 import org.apache.spark.Logging
 import org.apache.spark.sql.DataFrame
 import scala.collection.JavaConversions._
@@ -26,16 +27,18 @@ class DataFrameFunctions(data: DataFrame) extends Logging 
with Serializable {
   def saveToPhoenix(tableName: String, conf: Configuration = new Configuration,
 zkUrl: Option[String] = None): Unit = {
 
+
+// Retrieve the schema field names and normalize to Phoenix, need to do 
this outside of mapPartitions
+val fieldArray = data.schema.fieldNames.map(x => 
SchemaUtil.normalizeIdentifier(x))
+
 // Create a configuration object to use for saving
-@transient val outConfig = 
ConfigurationUtil.getOutputConfiguration(tableName, data.schema.fieldNames, 
zkUrl, Some(conf))
+@transient val outConfig = 
ConfigurationUtil.getOutputConfiguration(tableName, fieldArray, zkUrl, 
Some(conf))
 
 // Retrieve the zookeeper URL
 val zkUrlFinal = ConfigurationUtil.getZookeeperURL(outConfig)
 
- // Retrieve the schema field names, need to do this outside of 
mapPartitions
- val fieldArray = data.schema.fieldNames
- // Map the row objects into PhoenixRecordWritable

Build failed in Jenkins: Phoenix | Master #906

2015-09-28 Thread Apache Jenkins Server
See 

Changes:

[jmahonin] PHOENIX-2287 phoenix-spark: Cast class exception (add support for 
Spark 1.5.0)

--
[...truncated 119464 lines...]
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 22.376 sec - in 
org.apache.phoenix.end2end.CbrtFunctionEnd2EndIT
Running org.apache.phoenix.end2end.LikeExpressionIT
Tests run: 15, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 322.819 sec - 
in org.apache.phoenix.end2end.index.LocalMutableIndexIT
Running org.apache.phoenix.end2end.FirstValueFunctionIT
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 26.78 sec - in 
org.apache.phoenix.end2end.LikeExpressionIT
Running org.apache.phoenix.end2end.RoundFloorCeilFunctionsEnd2EndIT
Tests run: 7, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 16.355 sec - in 
org.apache.phoenix.end2end.FirstValueFunctionIT
Running org.apache.phoenix.end2end.CSVCommonsLoaderIT
Tests run: 31, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 48.833 sec - 
in org.apache.phoenix.end2end.RoundFloorCeilFunctionsEnd2EndIT
Running org.apache.phoenix.end2end.DynamicFamilyIT
Tests run: 4, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 24.971 sec - in 
org.apache.phoenix.end2end.DynamicFamilyIT
Tests run: 58, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 381.244 sec - 
in org.apache.phoenix.end2end.index.IndexExpressionIT
Running org.apache.phoenix.end2end.EvaluationOfORIT
Running org.apache.phoenix.end2end.AlterSessionIT
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 10.367 sec - in 
org.apache.phoenix.end2end.EvaluationOfORIT
Running org.apache.phoenix.end2end.ArrayConcatFunctionIT
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 15.073 sec - in 
org.apache.phoenix.end2end.AlterSessionIT
Running org.apache.phoenix.end2end.UpsertBigValuesIT
Tests run: 16, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 83.202 sec - 
in org.apache.phoenix.end2end.CSVCommonsLoaderIT
Running org.apache.phoenix.end2end.ToDateFunctionIT
Tests run: 11, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 19.493 sec - 
in org.apache.phoenix.end2end.ToDateFunctionIT
Running org.apache.phoenix.end2end.SignFunctionEnd2EndIT
Tests run: 4, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 44.176 sec - in 
org.apache.phoenix.end2end.UpsertBigValuesIT
Running org.apache.phoenix.end2end.RegexpSplitFunctionIT
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 23.641 sec - in 
org.apache.phoenix.end2end.SignFunctionEnd2EndIT
Running org.apache.phoenix.end2end.salted.SaltedTableUpsertSelectIT
Tests run: 30, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 56.526 sec - 
in org.apache.phoenix.end2end.ArrayConcatFunctionIT
Running org.apache.phoenix.end2end.salted.SaltedTableVarLengthRowKeyIT
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 10.786 sec - in 
org.apache.phoenix.end2end.salted.SaltedTableVarLengthRowKeyIT
Running org.apache.phoenix.end2end.GetSetByteBitFunctionEnd2EndIT
Tests run: 11, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 25.537 sec - 
in org.apache.phoenix.end2end.RegexpSplitFunctionIT
Running org.apache.phoenix.end2end.RegexpReplaceFunctionIT
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 9.592 sec - in 
org.apache.phoenix.end2end.GetSetByteBitFunctionEnd2EndIT
Running org.apache.phoenix.end2end.StatementHintsIT
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 10.769 sec - in 
org.apache.phoenix.end2end.RegexpReplaceFunctionIT
Running org.apache.phoenix.end2end.ReverseFunctionIT
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 10.776 sec - in 
org.apache.phoenix.end2end.StatementHintsIT
Running org.apache.phoenix.end2end.DecodeFunctionIT
Tests run: 5, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 42.694 sec - in 
org.apache.phoenix.end2end.salted.SaltedTableUpsertSelectIT
Running org.apache.phoenix.end2end.ReadOnlyIT
Tests run: 5, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 14.261 sec - in 
org.apache.phoenix.end2end.ReverseFunctionIT
Running org.apache.phoenix.end2end.UpsertSelectAutoCommitIT
Tests run: 6, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 14.636 sec - in 
org.apache.phoenix.end2end.DecodeFunctionIT
Running org.apache.phoenix.end2end.OctetLengthFunctionEnd2EndIT
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 19.466 sec - in 
org.apache.phoenix.end2end.ReadOnlyIT
Running org.apache.phoenix.end2end.LpadFunctionIT
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 10.491 sec - in 
org.apache.phoenix.end2end.OctetLengthFunctionEnd2EndIT
Running org.apache.phoenix.end2end.SpooledTmpFileDeleteIT
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 12.237 sec - in 
org.apache.phoenix.end2end.SpooledTmpFileDeleteIT
Running org.apache.phoenix.end2end.CoalesceFunctionIT
Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 47.626 sec - in