Github user ravipesala commented on a diff in the pull request:
https://github.com/apache/carbondata/pull/1418#discussion_r149997215
--- Diff:
integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/CarbonDropTableCommand.scala
---
@@ -43,24 +44,31 @@ case class CarbonDropTableCommand(
override def processSchema(sparkSession: SparkSession): Seq[Row] = {
val LOGGER: LogService =
LogServiceFactory.getLogService(this.getClass.getCanonicalName)
val dbName = GetDB.getDatabaseName(databaseNameOp, sparkSession)
- val identifier = TableIdentifier(tableName, Option(dbName))
val carbonTableIdentifier = new CarbonTableIdentifier(dbName,
tableName, "")
val locksToBeAcquired = List(LockUsage.METADATA_LOCK,
LockUsage.DROP_TABLE_LOCK)
val carbonEnv = CarbonEnv.getInstance(sparkSession)
val catalog = carbonEnv.carbonMetastore
- val tableIdentifier =
-
AbsoluteTableIdentifier.from(CarbonEnv.getInstance(sparkSession).storePath,
- dbName.toLowerCase, tableName.toLowerCase)
-
catalog.checkSchemasModifiedTimeAndReloadTables(tableIdentifier.getStorePath)
+ // get the absolute table identifier to drop the table.
+ val metadataCache = catalog.getTableFromMetadataCache(dbName,
tableName)
+ val absoluteTableIdentifier = metadataCache match {
+ case Some(tableMeta) =>
+ tableMeta.carbonTable.getAbsoluteTableIdentifier
+ case None =>
+ val storePath = GetDB.getDatabaseLocation(dbName, sparkSession,
--- End diff --
rename to dbpath
---