This is an automated email from the ASF dual-hosted git repository.
sarutak pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git
The following commit(s) were added to refs/heads/master by this push:
new 555cd3846cac [SPARK-55645][SQL] Add serdeName to CatalogStorageFormat
555cd3846cac is described below
commit 555cd3846caca13bf92be601a04c29fb8e56e555
Author: David Tagatac <[email protected]>
AuthorDate: Fri Feb 27 14:39:57 2026 +0900
[SPARK-55645][SQL] Add serdeName to CatalogStorageFormat
### What changes were proposed in this pull request?
- Add `serdeName` to
`org.apache.spark.sql.catalyst.catalog.CatalogStorageFormat`.
- Include this field when responding to `DESCRIBE EXTENDED` queries.
- Handle this field when parsing table details from the Hive Metastore API
and when writing back to it.
### Why are the changes needed?
- This field is included in
[`SerDeInfo`](https://github.com/apache/hive/blob/5160d3af392248255f68e41e1e0557eae4d95273/metastore/if/hive_metastore.thrift#L260)
returned by the Hive Metastore API.
- Its omission in the internal representation of Hive tables makes it
cumbersome to consume this field.
Before this change:
```
private def hasExampleSerdeName(h: HiveTableRelation): Boolean = {
val key = (h.tableMeta.database, h.tableMeta.identifier.table)
serdeNameCache.computeIfAbsent(key, _ => {
val catalog = session.sharedState.externalCatalog.unwrapped
.asInstanceOf[HiveExternalCatalog]
catalog.client.getRawHiveTableOption(key._1, key._2).exists {
rawHiveTable =>
// Use reflection to access SerDeInfo.name across classloader
boundaries,
// so that this works even when spark.sql.hive.metastore.jars is
configured.
val rawTable = rawHiveTable.rawTable
val tTable =
rawTable.getClass.getMethod("getTTable").invoke(rawTable)
val sd = tTable.getClass.getMethod("getSd").invoke(tTable)
val serdeInfo = sd.getClass.getMethod("getSerdeInfo").invoke(sd)
val name = serdeInfo.getClass.getMethod("getName").invoke(serdeInfo)
name == ExampleSerdeInfoName
}
})
}
```
After this change:
```
private def hasExampleSerdeName(h: HiveTableRelation): Boolean = {
h.tableMeta.storage.serdeName.contains(ExampleSerdeInfoName)
}
```
### Does this PR introduce _any_ user-facing change?
Yes, developers can now access `CatalogStorageFormat.serdeName`,
representing the Hive Metastore API field `SerDeInfo.name`, when interacting
with Spark representations of Hive tables.
### How was this patch tested?
- Unit test added.
- `DESCRIBE EXTENDED` run via `spark-shell` returns "Serde Name" properly
for a Hive table with a Serde name:
```
scala> spark.sql("CREATE TABLE t (d1 DECIMAL(10,3), d2 STRING) STORED AS
TEXTFILE;").show()
++
||
++
++
scala> spark.sql("DESCRIBE EXTENDED t;").show()
+--------------------+--------------------+-------+
| col_name| data_type|comment|
+--------------------+--------------------+-------+
| d1| decimal(10,3)| NULL|
| d2| string| NULL|
| | | |
|# Detailed Table ...| | |
...
| Location|file:/local/home/...| |
| Serde Library|org.apache.hadoop...| |
...
+--------------------+--------------------+-------+
scala> import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.catalyst.TableIdentifier
scala> val hiveTable =
spark.sessionState.catalog.getTableMetadata(TableIdentifier("t",
Some("default")))
val hiveTable: org.apache.spark.sql.catalyst.catalog.CatalogTable =
...
scala> val updated = hiveTable.copy(storage =
hiveTable.storage.copy(serdeName = Some("testSerdeName")))
val updated: org.apache.spark.sql.catalyst.catalog.CatalogTable =
...
scala> spark.sessionState.catalog.alterTable(updated)
scala> spark.sql("DESCRIBE EXTENDED t;").show()
+--------------------+--------------------+-------+
| col_name| data_type|comment|
+--------------------+--------------------+-------+
| d1| decimal(10,3)| NULL|
| d2| string| NULL|
| | | |
|# Detailed Table ...| | |
...
| Location|file:/local/home/...| |
| Serde Name| testSerdeName| |
| Serde Library|org.apache.hadoop...| |
...
+--------------------+--------------------+-------+
```
### Was this patch authored or co-authored using generative AI tooling?
No.
*This contribution is my original work, and I license the work to the Spark
project under the project’s open source license.*
Closes #54467 from tagatac/serde-name.
Authored-by: David Tagatac <[email protected]>
Signed-off-by: Kousuke Saruta <[email protected]>
---
.../org/apache/spark/sql/catalyst/catalog/interface.scala | 9 ++++++---
.../sql/catalyst/catalog/ExternalCatalogEventSuite.scala | 1 +
.../spark/sql/catalyst/catalog/ExternalCatalogSuite.scala | 7 ++++---
.../apache/spark/sql/AlwaysPersistedConfigsSuite.scala | 2 +-
.../org/apache/spark/sql/execution/command/DDLSuite.scala | 2 +-
.../scala/org/apache/spark/sql/sources/InsertSuite.scala | 2 ++
.../org/apache/spark/sql/hive/HiveMetastoreCatalog.scala | 3 +++
.../org/apache/spark/sql/hive/client/HiveClientImpl.scala | 4 ++++
.../apache/spark/sql/hive/HiveMetastoreCatalogSuite.scala | 15 +++++++++++++++
.../apache/spark/sql/hive/HiveSchemaInferenceSuite.scala | 1 +
.../apache/spark/sql/hive/MetastoreDataSourcesSuite.scala | 1 +
.../apache/spark/sql/hive/client/HiveClientSuite.scala | 3 +++
.../sql/hive/client/HivePartitionFilteringSuite.scala | 1 +
.../apache/spark/sql/hive/execution/HiveDDLSuite.scala | 2 ++
14 files changed, 45 insertions(+), 8 deletions(-)
diff --git
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/interface.scala
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/interface.scala
index fcaea4709504..8ad01e54ae9b 100644
---
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/interface.scala
+++
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/interface.scala
@@ -144,6 +144,7 @@ case class CatalogStorageFormat(
locationUri: Option[URI],
inputFormat: Option[String],
outputFormat: Option[String],
+ serdeName: Option[String],
serde: Option[String],
compressed: Boolean,
properties: Map[String, String]) extends MetadataMapSupport {
@@ -158,6 +159,7 @@ case class CatalogStorageFormat(
val map = mutable.LinkedHashMap[String, JValue]()
locationUri.foreach(l => map += ("Location" ->
JString(CatalogUtils.URIToString(l))))
+ serdeName.foreach(s => map += ("Serde Name" -> JString(s)))
serde.foreach(s => map += ("Serde Library" -> JString(s)))
inputFormat.foreach(format => map += ("InputFormat" -> JString(format)))
outputFormat.foreach(format => map += ("OutputFormat" -> JString(format)))
@@ -178,8 +180,8 @@ case class CatalogStorageFormat(
object CatalogStorageFormat {
/** Empty storage format for default values and copies. */
- val empty = CatalogStorageFormat(locationUri = None, inputFormat = None,
- outputFormat = None, serde = None, compressed = false, properties =
Map.empty)
+ val empty = CatalogStorageFormat(locationUri = None, inputFormat = None,
outputFormat = None,
+ serdeName = None, serde = None, compressed = false, properties = Map.empty)
}
/**
@@ -614,10 +616,11 @@ case class CatalogTable(
inputFormat: Option[String] = storage.inputFormat,
outputFormat: Option[String] = storage.outputFormat,
compressed: Boolean = false,
+ serdeName: Option[String] = storage.serdeName,
serde: Option[String] = storage.serde,
properties: Map[String, String] = storage.properties): CatalogTable = {
copy(storage = CatalogStorageFormat(
- locationUri, inputFormat, outputFormat, serde, compressed, properties))
+ locationUri, inputFormat, outputFormat, serdeName, serde, compressed,
properties))
}
def toJsonLinkedHashMap: mutable.LinkedHashMap[String, JValue] = {
diff --git
a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/catalog/ExternalCatalogEventSuite.scala
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/catalog/ExternalCatalogEventSuite.scala
index f332393e503f..12e9cd790ca6 100644
---
a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/catalog/ExternalCatalogEventSuite.scala
+++
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/catalog/ExternalCatalogEventSuite.scala
@@ -247,6 +247,7 @@ class ExternalCatalogEventSuite extends SparkFunSuite {
locationUri = Some(tableUri),
inputFormat = Some("tableInputFormat"),
outputFormat = Some("tableOutputFormat"),
+ serdeName = None,
serde = None,
compressed = false,
properties = Map.empty)
diff --git
a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/catalog/ExternalCatalogSuite.scala
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/catalog/ExternalCatalogSuite.scala
index f06128f23679..0a9e370e6b21 100644
---
a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/catalog/ExternalCatalogSuite.scala
+++
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/catalog/ExternalCatalogSuite.scala
@@ -911,7 +911,7 @@ abstract class ExternalCatalogSuite extends SparkFunSuite {
tableType = CatalogTableType.EXTERNAL,
storage = CatalogStorageFormat(
Some(Utils.createTempDir().toURI),
- None, None, None, false, Map.empty),
+ None, None, None, None, false, Map.empty),
schema = new StructType().add("a", "int").add("b", "string"),
provider = Some(defaultProvider)
)
@@ -959,7 +959,7 @@ abstract class ExternalCatalogSuite extends SparkFunSuite {
Map("partCol1" -> "7", "partCol2" -> "8"),
CatalogStorageFormat(
Some(tempPath.toURI),
- None, None, None, false, Map.empty))
+ None, None, None, None, false, Map.empty))
catalog.createPartitions("db1", "tbl", Seq(partWithExistingDir),
ignoreIfExists = false)
tempPath.delete()
@@ -968,7 +968,7 @@ abstract class ExternalCatalogSuite extends SparkFunSuite {
Map("partCol1" -> "9", "partCol2" -> "10"),
CatalogStorageFormat(
Some(tempPath.toURI),
- None, None, None, false, Map.empty))
+ None, None, None, None, false, Map.empty))
catalog.createPartitions("db1", "tbl", Seq(partWithNonExistingDir),
ignoreIfExists = false)
assert(tempPath.exists())
}
@@ -1030,6 +1030,7 @@ abstract class CatalogTestUtils {
locationUri = None,
inputFormat = Some(tableInputFormat),
outputFormat = Some(tableOutputFormat),
+ serdeName = None,
serde = None,
compressed = false,
properties = Map.empty)
diff --git
a/sql/core/src/test/scala/org/apache/spark/sql/AlwaysPersistedConfigsSuite.scala
b/sql/core/src/test/scala/org/apache/spark/sql/AlwaysPersistedConfigsSuite.scala
index bfadf1eb1885..ac438320f967 100644
---
a/sql/core/src/test/scala/org/apache/spark/sql/AlwaysPersistedConfigsSuite.scala
+++
b/sql/core/src/test/scala/org/apache/spark/sql/AlwaysPersistedConfigsSuite.scala
@@ -158,7 +158,7 @@ class AlwaysPersistedConfigsSuite extends QueryTest with
SharedSparkSession {
val catalogTable = new CatalogTable(
identifier = TableIdentifier(testViewName),
tableType = CatalogTableType.VIEW,
- storage = CatalogStorageFormat(None, None, None, None, false, Map.empty),
+ storage = CatalogStorageFormat(None, None, None, None, None, false,
Map.empty),
schema = new StructType(),
properties = Map.empty[String, String]
)
diff --git
a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala
b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala
index 42410a6cca52..46cf79b60b25 100644
---
a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala
+++
b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala
@@ -317,7 +317,7 @@ trait DDLSuiteBase extends SQLTestUtils {
spec: TablePartitionSpec,
tableName: TableIdentifier): Unit = {
val part = CatalogTablePartition(
- spec, CatalogStorageFormat(None, None, None, None, false, Map()))
+ spec, CatalogStorageFormat(None, None, None, None, None, false, Map()))
catalog.createPartitions(tableName, Seq(part), ignoreIfExists = false)
}
diff --git
a/sql/core/src/test/scala/org/apache/spark/sql/sources/InsertSuite.scala
b/sql/core/src/test/scala/org/apache/spark/sql/sources/InsertSuite.scala
index d92e79645571..4cc8db226b43 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/sources/InsertSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/sources/InsertSuite.scala
@@ -944,6 +944,7 @@ class InsertSuite extends DataSourceTest with
SharedSparkSession {
locationUri = None,
inputFormat = None,
outputFormat = None,
+ serdeName = None,
serde = None,
compressed = false,
properties = Map.empty),
@@ -972,6 +973,7 @@ class InsertSuite extends DataSourceTest with
SharedSparkSession {
locationUri = None,
inputFormat = None,
outputFormat = None,
+ serdeName = None,
serde = None,
compressed = false,
properties = Map.empty),
diff --git
a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala
b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala
index b0e0cc5a926c..bfff91b6fc6b 100644
---
a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala
+++
b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala
@@ -177,6 +177,7 @@ private[hive] class HiveMetastoreCatalog(sparkSession:
SparkSession) extends Log
val options = storage.properties + (ParquetOptions.MERGE_SCHEMA ->
SQLConf.get.getConf(HiveUtils.CONVERT_METASTORE_PARQUET_WITH_SCHEMA_MERGING).toString)
storage.copy(
+ serdeName = None,
serde = None,
properties = options
)
@@ -184,11 +185,13 @@ private[hive] class HiveMetastoreCatalog(sparkSession:
SparkSession) extends Log
val options = storage.properties
if (SQLConf.get.getConf(SQLConf.ORC_IMPLEMENTATION) == "native") {
storage.copy(
+ serdeName = None,
serde = None,
properties = options
)
} else {
storage.copy(
+ serdeName = None,
serde = None,
properties = options
)
diff --git
a/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClientImpl.scala
b/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClientImpl.scala
index 4404a13c1266..482d12207b02 100644
---
a/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClientImpl.scala
+++
b/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClientImpl.scala
@@ -557,6 +557,7 @@ private[hive] class HiveClientImpl(
outputFormat = Option(h.getTTable.getSd.getOutputFormat).orElse {
Option(h.getStorageHandler).map(_.getOutputFormatClass.getName)
},
+ serdeName = Option(h.getTTable.getSd.getSerdeInfo.getName),
serde = Option(h.getSerializationLib),
compressed = h.getTTable.getSd.isCompressed,
properties = Option(h.getTTable.getSd.getSerdeInfo.getParameters)
@@ -1201,6 +1202,7 @@ private[hive] object HiveClientImpl extends Logging {
hiveTable.getTTable.getSd.setLocation(loc)}
table.storage.inputFormat.map(toInputFormat).foreach(hiveTable.setInputFormatClass)
table.storage.outputFormat.map(toOutputFormat).foreach(hiveTable.setOutputFormatClass)
+ table.storage.serdeName.foreach(hiveTable.getSd.getSerdeInfo.setName)
hiveTable.setSerializationLib(
table.storage.serde.getOrElse("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"))
table.storage.properties.foreach { case (k, v) =>
hiveTable.setSerdeParam(k, v) }
@@ -1251,6 +1253,7 @@ private[hive] object HiveClientImpl extends Logging {
p.storage.locationUri.map(CatalogUtils.URIToString(_)).foreach(storageDesc.setLocation)
p.storage.inputFormat.foreach(storageDesc.setInputFormat)
p.storage.outputFormat.foreach(storageDesc.setOutputFormat)
+ p.storage.serdeName.foreach(serdeInfo.setName)
p.storage.serde.foreach(serdeInfo.setSerializationLib)
serdeInfo.setParameters(p.storage.properties.asJava)
storageDesc.setSerdeInfo(serdeInfo)
@@ -1280,6 +1283,7 @@ private[hive] object HiveClientImpl extends Logging {
locationUri =
Option(CatalogUtils.stringToURI(apiPartition.getSd.getLocation)),
inputFormat = Option(apiPartition.getSd.getInputFormat),
outputFormat = Option(apiPartition.getSd.getOutputFormat),
+ serdeName = Option(apiPartition.getSd.getSerdeInfo.getName),
serde = Option(apiPartition.getSd.getSerdeInfo.getSerializationLib),
compressed = apiPartition.getSd.isCompressed,
properties = Option(apiPartition.getSd.getSerdeInfo.getParameters)
diff --git
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveMetastoreCatalogSuite.scala
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveMetastoreCatalogSuite.scala
index a7d43ebbef07..671a705b5390 100644
---
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveMetastoreCatalogSuite.scala
+++
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveMetastoreCatalogSuite.scala
@@ -498,4 +498,19 @@ class DataSourceWithHiveMetastoreCatalogSuite
assert(spark.table("t").schema ===
CatalystSqlParser.parseTableSchema(schema))
}
}
+
+ test("SPARK-55645: Read/write Serde Name to/from an external table") {
+ withTable("t") {
+ sql("CREATE TABLE t (d1 DECIMAL(10,3), d2 STRING) STORED AS TEXTFILE")
+
+ val hiveTable =
+ sessionState.catalog.getTableMetadata(TableIdentifier("t",
Some("default")))
+ val updated =
+ hiveTable.copy(storage = hiveTable.storage.copy(serdeName =
Some("testSerdeName")))
+ sessionState.catalog.alterTable(updated)
+ val tableWithSerdeName =
+ sessionState.catalog.getTableMetadata(TableIdentifier("t",
Some("default")))
+ assert(tableWithSerdeName.storage.serdeName === Some("testSerdeName"))
+ }
+ }
}
diff --git
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveSchemaInferenceSuite.scala
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveSchemaInferenceSuite.scala
index 5e521565383c..089b131d3c27 100644
---
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveSchemaInferenceSuite.scala
+++
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveSchemaInferenceSuite.scala
@@ -107,6 +107,7 @@ class HiveSchemaInferenceSuite
locationUri = Option(dir.toURI),
inputFormat = serde.inputFormat,
outputFormat = serde.outputFormat,
+ serdeName = None,
serde = serde.serde,
compressed = false,
properties = Map("serialization.format" -> "1")),
diff --git
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala
index 772db8dff615..b0afd8c2376d 100644
---
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala
+++
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala
@@ -756,6 +756,7 @@ class MetastoreDataSourcesSuite extends QueryTest
locationUri = None,
inputFormat = None,
outputFormat = None,
+ serdeName = None,
serde = None,
compressed = false,
properties = Map(
diff --git
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/client/HiveClientSuite.scala
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/client/HiveClientSuite.scala
index 7db9632c87b9..623410c4e69d 100644
---
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/client/HiveClientSuite.scala
+++
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/client/HiveClientSuite.scala
@@ -82,6 +82,7 @@ class HiveClientSuite(version: String) extends
HiveVersionSuite(version) {
locationUri = None,
inputFormat = Some(classOf[TextInputFormat].getName),
outputFormat = Some(classOf[HiveIgnoreKeyTextOutputFormat[_,
_]].getName),
+ serdeName = None,
serde = Some(classOf[LazySimpleSerDe].getName),
compressed = false,
properties = Map.empty
@@ -369,6 +370,7 @@ class HiveClientSuite(version: String) extends
HiveVersionSuite(version) {
locationUri = None,
inputFormat = None,
outputFormat = None,
+ serdeName = None,
serde = None,
compressed = false,
properties = Map.empty)
@@ -383,6 +385,7 @@ class HiveClientSuite(version: String) extends
HiveVersionSuite(version) {
locationUri = None,
inputFormat = Some(classOf[TextInputFormat].getName),
outputFormat = Some(classOf[HiveIgnoreKeyTextOutputFormat[_,
_]].getName),
+ serdeName = None,
serde = Some(classOf[LazySimpleSerDe].getName),
compressed = false,
properties = Map.empty
diff --git
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/client/HivePartitionFilteringSuite.scala
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/client/HivePartitionFilteringSuite.scala
index fae01d6cbc45..ddcd2aea06df 100644
---
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/client/HivePartitionFilteringSuite.scala
+++
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/client/HivePartitionFilteringSuite.scala
@@ -63,6 +63,7 @@ class HivePartitionFilteringSuite(version: String)
locationUri = None,
inputFormat = Some(classOf[TextInputFormat].getName),
outputFormat = Some(classOf[HiveIgnoreKeyTextOutputFormat[_, _]].getName),
+ serdeName = None,
serde = Some(classOf[LazySimpleSerDe].getName()),
compressed = false,
properties = Map.empty
diff --git
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveDDLSuite.scala
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveDDLSuite.scala
index 43f15a12cad1..a73736fbde7a 100644
---
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveDDLSuite.scala
+++
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveDDLSuite.scala
@@ -84,6 +84,7 @@ class HiveDDLSuite
locationUri = Some(catalog.defaultTablePath(name)),
inputFormat = serde.get.inputFormat,
outputFormat = serde.get.outputFormat,
+ serdeName = None,
serde = serde.get.serde,
compressed = false,
properties = Map.empty)
@@ -92,6 +93,7 @@ class HiveDDLSuite
locationUri = Some(catalog.defaultTablePath(name)),
inputFormat =
Some("org.apache.hadoop.mapred.SequenceFileInputFormat"),
outputFormat =
Some("org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat"),
+ serdeName = None,
serde = Some("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"),
compressed = false,
properties = Map("serialization.format" -> "1"))
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]