This is an automated email from the ASF dual-hosted git repository.

akashrn5 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/carbondata.git


The following commit(s) were added to refs/heads/master by this push:
     new 9b04540  [CARBONDATA-4107] Added related MV tables Map to fact table 
and added lock while touchMDTFile
9b04540 is described below

commit 9b045402ca2c66d6a78b85ffe6f22cb18cccac20
Author: Indhumathi27 <[email protected]>
AuthorDate: Tue Jan 12 14:08:56 2021 +0530

    [CARBONDATA-4107] Added related MV tables Map to fact table and added lock 
while touchMDTFile
    
    Why is this PR needed?
    1. After MV support multi-tenancy PR, mv system folder is moved to database 
level. Hence,
    during each operation, insert/Load/IUD/show mv/query, we are listing all 
the databases in the
    system and collecting mv schemas and checking if there is any mv mapped to 
the table or not.
    This will degrade performance of the query, to collect mv schemas from all 
databases, even
    though the table has mv or not.
    
    2. When different jvm process call touchMDTFile method, file creation and 
deletion can
    happen same time. This may fail the operation.
    
    What changes were proposed in this PR?
    1. Added a table property relatedMVTablesMap to fact tables of MV during MV 
creation. During
    any operation, check if the table has MV or not using the added property 
and if it has, then
    collect schemas of only related databases. In this way, we can avoid 
collecting mv schemas
    for table which dont have MV.
    
    2. Take a Global level lock on system folder location, to update last 
modified time.
    
    NOTE: For compatibilty scenarios, can perform refresh MV operation to 
update these table properties.
    
    Does this PR introduce any user interface change?
    Yes.
    For compatibilty scenarios, can perform refresh MV operation to update 
these table properties.
    
    Is any new testcase added?
    No
    
    This closes #4076
---
 .../core/constants/CarbonCommonConstants.java      |  5 ++
 .../core/metadata/schema/table/CarbonTable.java    | 10 +++
 .../org/apache/carbondata/core/view/MVManager.java | 57 ++++++-------
 .../apache/carbondata/core/view/MVProvider.java    | 32 +++++--
 docs/mv-guide.md                                   |  6 ++
 .../org/apache/carbondata/view/MVHelper.scala      | 97 +++++++++++++++++++++-
 .../apache/carbondata/view/MVManagerInSpark.scala  |  3 +
 .../command/table/CarbonDropTableCommand.scala     |  3 +-
 .../command/view/CarbonCreateMVCommand.scala       |  3 +
 .../command/view/CarbonDropMVCommand.scala         |  9 +-
 .../command/view/CarbonRefreshMVCommand.scala      |  6 +-
 11 files changed, 190 insertions(+), 41 deletions(-)

diff --git 
a/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
 
b/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
index 85d6b7c..41a51b8 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
@@ -1223,6 +1223,11 @@ public final class CarbonCommonConstants {
   public static final String CARBON_ENABLE_MV_DEFAULT = "true";
 
   /**
+   * Related mv table's map for a fact table
+   */
+  public static final String RELATED_MV_TABLES_MAP = "relatedmvtablesmap";
+
+  /**
    * ENABLE_QUERY_STATISTICS
    */
   @CarbonProperty
diff --git 
a/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/CarbonTable.java
 
b/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/CarbonTable.java
index 96084cc..2c22142 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/CarbonTable.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/CarbonTable.java
@@ -68,6 +68,7 @@ import org.apache.carbondata.core.util.path.CarbonTablePath;
 import static 
org.apache.carbondata.core.util.CarbonUtil.thriftColumnSchemaToWrapperColumnSchema;
 
 import com.google.common.collect.Lists;
+import com.google.gson.Gson;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.Writable;
 import org.apache.log4j.Logger;
@@ -1246,6 +1247,15 @@ public class CarbonTable implements Serializable, 
Writable {
     return parentTableName;
   }
 
+  public Map<String, List<String>> getMVTablesMap() {
+    String relatedMVDB = 
this.getTableInfo().getFactTable().getTableProperties()
+        .get(CarbonCommonConstants.RELATED_MV_TABLES_MAP);
+    if (null == relatedMVDB) {
+      return new HashMap<>();
+    }
+    return new Gson().fromJson(relatedMVDB, Map.class);
+  }
+
   /**
    * It only gives the visible Indexes
    */
diff --git a/core/src/main/java/org/apache/carbondata/core/view/MVManager.java 
b/core/src/main/java/org/apache/carbondata/core/view/MVManager.java
index 284a672..3892657 100644
--- a/core/src/main/java/org/apache/carbondata/core/view/MVManager.java
+++ b/core/src/main/java/org/apache/carbondata/core/view/MVManager.java
@@ -62,17 +62,8 @@ public abstract class MVManager {
 
   public abstract String getDatabaseLocation(String databaseName);
 
-  public boolean hasSchemaOnTable(CarbonTable table) throws IOException {
-    List<MVSchema> schemas = getSchemas();
-    for (MVSchema schema : schemas) {
-      for (RelationIdentifier relatedTable : schema.getRelatedTables()) {
-        if 
(relatedTable.getDatabaseName().equalsIgnoreCase(table.getDatabaseName()) &&
-            
relatedTable.getTableName().equalsIgnoreCase(table.getTableName())) {
-          return true;
-        }
-      }
-    }
-    return false;
+  public boolean hasSchemaOnTable(CarbonTable table) {
+    return !table.getMVTablesMap().isEmpty();
   }
 
   public boolean isMVInSyncWithParentTables(MVSchema mvSchema) throws 
IOException {
@@ -85,22 +76,7 @@ public abstract class MVManager {
    */
   public List<MVSchema> getSchemasOnTable(CarbonTable table)
       throws IOException {
-    List<MVSchema> schemasOnTable = new ArrayList<>();
-    List<MVSchema> schemas = getSchemas();
-    for (MVSchema schema : schemas) {
-      boolean isSchemaOnTable = false;
-      for (RelationIdentifier relatedTable : schema.getRelatedTables()) {
-        if 
(relatedTable.getDatabaseName().equalsIgnoreCase(table.getDatabaseName()) &&
-            
relatedTable.getTableName().equalsIgnoreCase(table.getTableName())) {
-          isSchemaOnTable = true;
-          break;
-        }
-      }
-      if (isSchemaOnTable) {
-        schemasOnTable.add(schema);
-      }
-    }
-    return schemasOnTable;
+    return getSchemas(table.getMVTablesMap());
   }
 
   /**
@@ -133,6 +109,32 @@ public abstract class MVManager {
   }
 
   /**
+   * It gives all mv schemas from given databases in the store
+   */
+  public List<MVSchema> getSchemas(Map<String, List<String>> mvTablesMap) 
throws IOException {
+    List<MVSchema> schemas = new ArrayList<>();
+    for (Map.Entry<String, List<String>> databaseEntry : 
mvTablesMap.entrySet()) {
+      String database = databaseEntry.getKey();
+      List<String> mvTables = databaseEntry.getValue();
+      for (String mvTable : mvTables) {
+        try {
+          schemas.add(this.getSchema(database, mvTable));
+        } catch (IOException ex) {
+          LOGGER.error("Error while fetching MV schema " + mvTable + " from 
database: " + database);
+          throw ex;
+        } catch (Exception ex) {
+          LOGGER.error(
+              "Exception Occurred: Skipping MV schema " + mvTable + " from 
database: " + database);
+          if (LOGGER.isDebugEnabled()) {
+            LOGGER.debug(ex.getMessage());
+          }
+        }
+      }
+    }
+    return schemas;
+  }
+
+  /**
    * It gives all mv schemas from store.
    */
   public List<MVSchema> getSchemas(String databaseName) throws IOException {
@@ -252,7 +254,6 @@ public abstract class MVManager {
       } catch (IOException ex) {
         throw ex;
       } catch (Exception ex) {
-        LOGGER.error("Exception Occurred: Skipping MV schemas from database: " 
+ database);
         if (LOGGER.isDebugEnabled()) {
           LOGGER.debug(ex.getMessage());
         }
diff --git a/core/src/main/java/org/apache/carbondata/core/view/MVProvider.java 
b/core/src/main/java/org/apache/carbondata/core/view/MVProvider.java
index cf5f6a4..87da842 100644
--- a/core/src/main/java/org/apache/carbondata/core/view/MVProvider.java
+++ b/core/src/main/java/org/apache/carbondata/core/view/MVProvider.java
@@ -47,7 +47,6 @@ import 
org.apache.carbondata.core.fileoperations.AtomicFileOperationFactory;
 import org.apache.carbondata.core.fileoperations.AtomicFileOperations;
 import org.apache.carbondata.core.fileoperations.FileWriteOperation;
 import org.apache.carbondata.core.locks.CarbonLockFactory;
-import org.apache.carbondata.core.locks.CarbonLockUtil;
 import org.apache.carbondata.core.locks.ICarbonLock;
 import org.apache.carbondata.core.locks.LockUsage;
 import org.apache.carbondata.core.metadata.schema.table.CarbonTable;
@@ -292,7 +291,7 @@ public class MVProvider {
       }
     } finally {
       if (locked) {
-        CarbonLockUtil.fileUnlock(carbonTableStatusLock, 
LockUsage.INDEX_STATUS_LOCK);
+        carbonTableStatusLock.unlock();
       }
     }
   }
@@ -569,14 +568,31 @@ public class MVProvider {
         FileFactory.createDirectoryAndSetPermission(this.systemDirectory,
             new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL));
       }
-      CarbonFile schemaIndexFile = 
FileFactory.getCarbonFile(this.schemaIndexFilePath);
-      if (schemaIndexFile.exists()) {
-        schemaIndexFile.delete();
+      // two or more JVM process can access this method to update last 
modified time at same
+      // time causing exception. So take a system level lock on system folder 
and update
+      // last modified time of schema index file
+      ICarbonLock systemDirLock = CarbonLockFactory
+          .getSystemLevelCarbonLockObj(this.systemDirectory,
+              LockUsage.MATERIALIZED_VIEW_STATUS_LOCK);
+      boolean locked = false;
+      try {
+        locked = systemDirLock.lockWithRetries();
+        if (locked) {
+          CarbonFile schemaIndexFile = 
FileFactory.getCarbonFile(this.schemaIndexFilePath);
+          if (schemaIndexFile.exists()) {
+            schemaIndexFile.delete();
+          }
+          schemaIndexFile.createNewFile(new FsPermission(FsAction.ALL, 
FsAction.ALL, FsAction.ALL));
+          this.lastModifiedTime = schemaIndexFile.getLastModifiedTime();
+        } else {
+          LOG.warn("Unable to get Lock to refresh schema index last modified 
time");
+        }
+      } finally {
+        if (locked) {
+          systemDirLock.unlock();
+        }
       }
-      schemaIndexFile.createNewFile(new FsPermission(FsAction.ALL, 
FsAction.ALL, FsAction.ALL));
-      this.lastModifiedTime = schemaIndexFile.getLastModifiedTime();
     }
-
   }
 
 }
diff --git a/docs/mv-guide.md b/docs/mv-guide.md
index f6478a9..8260b09 100644
--- a/docs/mv-guide.md
+++ b/docs/mv-guide.md
@@ -241,6 +241,12 @@ The current information includes:
  | Refresh Mode          | FULL / INCREMENTAL refresh to MV                    
      |
  | Refresh Trigger Mode  | ON_COMMIT / ON_MANUAL refresh to MV provided by 
user |
  | Properties              | Table properties of the materialized view         
              |
+
+**NOTE**: For materialized views created
+before 
[CARBONDATA-4107](https://issues.apache.org/jira/browse/CARBONDATA-4107) issue 
fix, run
+refresh mv command to add mv name to fact table's table properties and to 
enable it. If refresh
+command is not executed, the mv and fact tables may not be in sync and query 
won't use mv for
+pruning.
   
 ## Time Series Support
 
diff --git 
a/integration/spark/src/main/scala/org/apache/carbondata/view/MVHelper.scala 
b/integration/spark/src/main/scala/org/apache/carbondata/view/MVHelper.scala
index 4124b48..0047612 100644
--- a/integration/spark/src/main/scala/org/apache/carbondata/view/MVHelper.scala
+++ b/integration/spark/src/main/scala/org/apache/carbondata/view/MVHelper.scala
@@ -24,17 +24,23 @@ import scala.collection.JavaConverters._
 import scala.collection.mutable
 import scala.collection.mutable.ArrayBuffer
 
-import org.apache.spark.sql.CarbonToSparkAdapter
+import com.google.gson.Gson
+import org.apache.spark.sql.{CarbonEnv, CarbonToSparkAdapter, SparkSession}
 import org.apache.spark.sql.catalyst.catalog.CatalogTable
 import org.apache.spark.sql.catalyst.expressions.{Alias, Attribute, 
AttributeReference, Expression, GetArrayItem, GetMapValue, GetStructField, 
NamedExpression, ScalaUDF, SortOrder}
 import org.apache.spark.sql.catalyst.expressions.aggregate.AggregateExpression
 import org.apache.spark.sql.catalyst.plans.logical.{Aggregate, LogicalPlan, 
Project}
 import org.apache.spark.sql.execution.command.Field
 import org.apache.spark.sql.execution.datasources.LogicalRelation
+import org.apache.spark.sql.hive.CarbonHiveIndexMetadataUtil
+import org.apache.spark.sql.index.CarbonIndexUtil
 import org.apache.spark.sql.types.DataType
 
 import org.apache.carbondata.common.exceptions.sql.MalformedMVCommandException
+import org.apache.carbondata.common.logging.LogServiceFactory
 import org.apache.carbondata.core.constants.CarbonCommonConstants
+import org.apache.carbondata.core.locks.{CarbonLockUtil, ICarbonLock, 
LockUsage}
+import org.apache.carbondata.core.view.MVSchema
 import org.apache.carbondata.mv.plans.modular.{GroupBy, ModularPlan, 
ModularRelation, Select}
 import org.apache.carbondata.spark.util.CommonUtil
 
@@ -43,6 +49,8 @@ import org.apache.carbondata.spark.util.CommonUtil
  */
 object MVHelper {
 
+  private val LOGGER = 
LogServiceFactory.getLogService(this.getClass.getCanonicalName)
+
   def dropDummyFunction(plan: LogicalPlan): LogicalPlan = {
     plan transform {
       case Project(expressions, child) =>
@@ -372,6 +380,93 @@ object MVHelper {
     updatedName
   }
 
+  /**
+   * Add or modify the MV database and table name to fact table's related mv 
tables Map
+   * 1. During Create, add the database and mv name to the fact table's map
+   * 2. On drop mv, remove if fact table's map contains mv name
+   * 3. On refresh, add mv name to the fact table's map if it is not present.
+   * If already present, then just return
+   */
+  def addOrModifyMVTablesMap(session: SparkSession,
+      mvSchema: MVSchema,
+      isMVDrop: Boolean = false,
+      isLockAcquiredOnFactTable: String = null,
+      isRefreshMV: Boolean = false): Unit = {
+    val mvDatabaseName = mvSchema.getIdentifier.getDatabaseName
+    val mvName = mvSchema.getIdentifier.getTableName
+    mvSchema.getRelatedTables.asScala.foreach { parentTable =>
+      val parentDbName = parentTable.getDatabaseName
+      val parentTableName = parentTable.getTableName
+      var carbonLock: ICarbonLock = null
+      try {
+        val carbonTable = try {
+          CarbonEnv.getCarbonTable(Some(parentDbName), 
parentTableName)(session)
+        } catch {
+          case _: Exception =>
+            LOGGER.error(s"Error while getting carbon table for " +
+                         s"${ parentDbName + "." + parentTableName }")
+            null
+        }
+        if (null != carbonTable) {
+          // get the mv tables map from fact table
+          var relatedMVTablesMap = carbonTable.getMVTablesMap
+          if (isRefreshMV && relatedMVTablesMap.containsKey(mvDatabaseName) &&
+              relatedMVTablesMap.get(mvDatabaseName).contains(mvName)) {
+            // in case of refresh mv scenario, if mvName is already present in 
fact table
+            // properties, then do not update , just return
+            return
+          }
+          // get lock on fact table
+          if (null == isLockAcquiredOnFactTable) {
+            carbonLock = 
CarbonLockUtil.getLockObject(carbonTable.getAbsoluteTableIdentifier,
+              LockUsage.METADATA_LOCK)
+          } else if (null != isLockAcquiredOnFactTable &&
+                     !isLockAcquiredOnFactTable.equals(parentTableName)) {
+            carbonLock = 
CarbonLockUtil.getLockObject(carbonTable.getAbsoluteTableIdentifier,
+              LockUsage.METADATA_LOCK)
+          }
+          // get the mv tables map again from fact table after acquiring lock
+          relatedMVTablesMap = carbonTable.getMVTablesMap
+          var needFactTableUpdate = true
+          if (isMVDrop) {
+            //  If database don't have any MV, then remove the database from 
related tables
+            //  property and update table property of fact table
+            relatedMVTablesMap.get(mvDatabaseName).remove(mvName)
+            if (relatedMVTablesMap.get(mvDatabaseName).isEmpty) {
+              relatedMVTablesMap.remove(mvDatabaseName)
+            }
+          } else {
+            if (!relatedMVTablesMap.containsKey(mvDatabaseName)) {
+              val mvTables = new util.ArrayList[String]()
+              mvTables.add(mvName)
+              relatedMVTablesMap.put(mvDatabaseName, mvTables)
+            } else if 
(!relatedMVTablesMap.get(mvDatabaseName).contains(mvName)) {
+              relatedMVTablesMap.get(mvDatabaseName).add(mvName)
+            } else {
+              needFactTableUpdate = false
+            }
+          }
+          if (needFactTableUpdate) {
+            CarbonIndexUtil.addOrModifyTableProperty(carbonTable,
+              Map(CarbonCommonConstants.RELATED_MV_TABLES_MAP ->
+                  new Gson().toJson(relatedMVTablesMap)),
+              needLock = false)(session)
+            CarbonHiveIndexMetadataUtil.refreshTable(parentDbName, 
parentTableName, session)
+          }
+        }
+      } finally {
+        if (null != carbonLock) {
+          val unlock = carbonLock.unlock()
+          if (unlock) {
+            LOGGER.info("Table MetaData Unlocked Successfully")
+          } else {
+            LOGGER.error(s"Unable to unlock metadata lock for table 
$parentTableName")
+          }
+        }
+      }
+    }
+  }
+
 }
 
 
diff --git 
a/integration/spark/src/main/scala/org/apache/carbondata/view/MVManagerInSpark.scala
 
b/integration/spark/src/main/scala/org/apache/carbondata/view/MVManagerInSpark.scala
index 3300629..3f76247 100644
--- 
a/integration/spark/src/main/scala/org/apache/carbondata/view/MVManagerInSpark.scala
+++ 
b/integration/spark/src/main/scala/org/apache/carbondata/view/MVManagerInSpark.scala
@@ -66,6 +66,9 @@ object MVManagerInSpark {
     }
     val viewManager = MVManagerInSpark.get(sparkSession)
     val viewSchemas = new util.ArrayList[MVSchema]()
+    if (!viewManager.hasSchemaOnTable(carbonTable)) {
+      return
+    }
     for (viewSchema <- viewManager.getSchemasOnTable(carbonTable).asScala) {
       if (viewSchema.isRefreshOnManual) {
         viewSchemas.add(viewSchema)
diff --git 
a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/table/CarbonDropTableCommand.scala
 
b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/table/CarbonDropTableCommand.scala
index 0289254..35ecb21 100644
--- 
a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/table/CarbonDropTableCommand.scala
+++ 
b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/table/CarbonDropTableCommand.scala
@@ -110,7 +110,8 @@ case class CarbonDropTableCommand(
               Option(schema.getIdentifier.getDatabaseName),
               schema.getIdentifier.getTableName,
               ifExistsSet = true,
-              forceDrop = true
+              forceDrop = true,
+              isLockAcquiredOnFactTable = carbonTable.getTableName
             )
         }
         viewDropCommands.foreach(_.processMetadata(sparkSession))
diff --git 
a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/view/CarbonCreateMVCommand.scala
 
b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/view/CarbonCreateMVCommand.scala
index 98b7179..66e0f8b 100644
--- 
a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/view/CarbonCreateMVCommand.scala
+++ 
b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/view/CarbonCreateMVCommand.scala
@@ -99,6 +99,9 @@ case class CarbonCreateMVCommand(
     val viewCatalog = MVManagerInSpark.getOrReloadMVCatalog(session)
     val schema = doCreate(session, identifier, viewManager, viewCatalog)
 
+    // Update the related mv tables property to mv fact tables
+    MVHelper.addOrModifyMVTablesMap(session, schema)
+
     try {
       viewCatalog.registerSchema(schema)
       if (schema.isRefreshOnManual) {
diff --git 
a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/view/CarbonDropMVCommand.scala
 
b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/view/CarbonDropMVCommand.scala
index b201db6..388ea0b 100644
--- 
a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/view/CarbonDropMVCommand.scala
+++ 
b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/view/CarbonDropMVCommand.scala
@@ -28,7 +28,7 @@ import org.apache.carbondata.common.logging.LogServiceFactory
 import org.apache.carbondata.core.datastore.impl.FileFactory
 import org.apache.carbondata.core.util.CarbonProperties
 import org.apache.carbondata.events.{OperationContext, OperationListenerBus}
-import org.apache.carbondata.view.{MVCatalogInSpark, MVManagerInSpark, 
UpdateMVPostExecutionEvent, UpdateMVPreExecutionEvent}
+import org.apache.carbondata.view.{MVCatalogInSpark, MVHelper, 
MVManagerInSpark, UpdateMVPostExecutionEvent, UpdateMVPreExecutionEvent}
 
 /**
  * Drop Materialized View Command implementation
@@ -38,7 +38,8 @@ case class CarbonDropMVCommand(
     databaseNameOption: Option[String],
     name: String,
     ifExistsSet: Boolean,
-    forceDrop: Boolean = false)
+    forceDrop: Boolean = false,
+    isLockAcquiredOnFactTable: String = null)
   extends AtomicRunnableCommand {
 
   private val logger = CarbonDropMVCommand.LOGGER
@@ -90,6 +91,10 @@ case class CarbonDropMVCommand(
           }
         }
 
+        // Update the related mv table's property to mv fact tables
+        MVHelper.addOrModifyMVTablesMap(session, schema, isMVDrop = true,
+          isLockAcquiredOnFactTable = isLockAcquiredOnFactTable)
+
         this.dropTableCommand = dropTableCommand
       } else {
         if (!ifExistsSet) {
diff --git 
a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/view/CarbonRefreshMVCommand.scala
 
b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/view/CarbonRefreshMVCommand.scala
index bec2925..ab43241 100644
--- 
a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/view/CarbonRefreshMVCommand.scala
+++ 
b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/view/CarbonRefreshMVCommand.scala
@@ -24,7 +24,7 @@ import org.apache.spark.sql.execution.command.DataCommand
 import 
org.apache.carbondata.common.exceptions.sql.{MalformedMVCommandException, 
NoSuchMVException}
 import org.apache.carbondata.core.view.MVStatus
 import org.apache.carbondata.events.{OperationContext, OperationListenerBus}
-import org.apache.carbondata.view.{MVManagerInSpark, MVRefresher, 
RefreshMVPostExecutionEvent, RefreshMVPreExecutionEvent}
+import org.apache.carbondata.view.{MVHelper, MVManagerInSpark, MVRefresher, 
RefreshMVPostExecutionEvent, RefreshMVPreExecutionEvent}
 
 /**
  * Refresh Materialized View Command implementation
@@ -46,6 +46,10 @@ case class CarbonRefreshMVCommand(
         throw new MalformedMVCommandException(
           s"Materialized view ${ databaseName }.${ mvName } does not exist")
     }
+
+    // refresh table property of parent table if needed
+    MVHelper.addOrModifyMVTablesMap(session, schema, isRefreshMV = true)
+
     val table = CarbonEnv.getCarbonTable(Option(databaseName), mvName)(session)
     setAuditTable(table)
 

Reply via email to