PHOENIX-2426 phoenix-spark: Support for TINYINT and SMALLINT
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/27a152eb Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/27a152eb Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/27a152eb Branch: refs/heads/txn Commit: 27a152eb03279cd30e6a633d5adbe06363c696a6 Parents: 7808697 Author: Josh Mahonin <[email protected]> Authored: Tue Nov 17 14:24:10 2015 -0500 Committer: Josh Mahonin <[email protected]> Committed: Tue Nov 17 14:25:10 2015 -0500 ---------------------------------------------------------------------- phoenix-spark/src/it/resources/setup.sql | 4 +++- .../it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala | 7 +++++++ .../src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala | 3 ++- 3 files changed, 12 insertions(+), 2 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/phoenix/blob/27a152eb/phoenix-spark/src/it/resources/setup.sql ---------------------------------------------------------------------- diff --git a/phoenix-spark/src/it/resources/setup.sql b/phoenix-spark/src/it/resources/setup.sql index d6dbe20..814d311 100644 --- a/phoenix-spark/src/it/resources/setup.sql +++ b/phoenix-spark/src/it/resources/setup.sql @@ -36,4 +36,6 @@ CREATE TABLE OUTPUT_TEST_TABLE (id BIGINT NOT NULL PRIMARY KEY, col1 VARCHAR, co CREATE TABLE CUSTOM_ENTITY."z02"(id BIGINT NOT NULL PRIMARY KEY) UPSERT INTO CUSTOM_ENTITY."z02" (id) VALUES(1) CREATE TABLE TEST_DECIMAL (ID BIGINT NOT NULL PRIMARY KEY, COL1 DECIMAL(9, 6)) -UPSERT INTO TEST_DECIMAL VALUES (1, 123.456789) \ No newline at end of file +UPSERT INTO TEST_DECIMAL VALUES (1, 123.456789) +CREATE TABLE TEST_SMALL_TINY (ID BIGINT NOT NULL PRIMARY KEY, COL1 SMALLINT, COL2 TINYINT) +UPSERT INTO TEST_SMALL_TINY VALUES (1, 32767, 127) http://git-wip-us.apache.org/repos/asf/phoenix/blob/27a152eb/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala ---------------------------------------------------------------------- diff --git a/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala b/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala index 31104ba..5ae17da 100644 --- a/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala +++ b/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala @@ -516,4 +516,11 @@ class PhoenixSparkIT extends FunSuite with Matchers with BeforeAndAfterAll { val df = sqlContext.load("org.apache.phoenix.spark", Map("table" -> "TEST_DECIMAL", "zkUrl" -> quorumAddress)) assert(df.select("COL1").first().getDecimal(0) == BigDecimal("123.456789").bigDecimal) } + + test("Can load small and tiny integeger types (PHOENIX-2426)") { + val sqlContext = new SQLContext(sc) + val df = sqlContext.load("org.apache.phoenix.spark", Map("table" -> "TEST_SMALL_TINY", "zkUrl" -> quorumAddress)) + assert(df.select("COL1").first().getShort(0).toInt == 32767) + assert(df.select("COL2").first().getByte(0).toInt == 127) + } } http://git-wip-us.apache.org/repos/asf/phoenix/blob/27a152eb/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala ---------------------------------------------------------------------- diff --git a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala index ac60ceb..fa36a1f 100644 --- a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala +++ b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala @@ -23,7 +23,6 @@ import org.apache.phoenix.util.ColumnInfo import org.apache.spark._ import org.apache.spark.annotation.DeveloperApi import org.apache.spark.rdd.RDD -import org.apache.spark.sql.types.{DataType, StructField, StructType} import org.apache.spark.sql.{Row, DataFrame, SQLContext} import org.apache.spark.sql.types._ import scala.collection.JavaConverters._ @@ -133,6 +132,8 @@ class PhoenixRDD(sc: SparkContext, table: String, columns: Seq[String], case t if t.isInstanceOf[PVarchar] || t.isInstanceOf[PChar] => StringType case t if t.isInstanceOf[PLong] || t.isInstanceOf[PUnsignedLong] => LongType case t if t.isInstanceOf[PInteger] || t.isInstanceOf[PUnsignedInt] => IntegerType + case t if t.isInstanceOf[PSmallint] || t.isInstanceOf[PUnsignedSmallint] => ShortType + case t if t.isInstanceOf[PTinyint] || t.isInstanceOf[PUnsignedTinyint] => ByteType case t if t.isInstanceOf[PFloat] || t.isInstanceOf[PUnsignedFloat] => FloatType case t if t.isInstanceOf[PDouble] || t.isInstanceOf[PUnsignedDouble] => DoubleType // Use Spark system default precision for now (explicit to work with < 1.5)
