This is an automated email from the ASF dual-hosted git repository.

qiangcai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/carbondata.git


The following commit(s) were added to refs/heads/master by this push:
     new 83d71b4  [CARBONDATA-4015] Remove hard-coded parameters of the 
ICarbonLock.lockWithRetries method
83d71b4 is described below

commit 83d71b4077b23749efccee6f782104521be8c780
Author: Kejian-Li <likeji...@huawei.com>
AuthorDate: Tue Sep 29 21:58:55 2020 +0800

    [CARBONDATA-4015] Remove hard-coded parameters of the 
ICarbonLock.lockWithRetries method
    
    Why is this PR needed?
    Retrycount and retryInterval of compactLock and updateLock is fixed as "3" 
when they try to get lock, default values of these two parameters is exactly 
"3", therefore, it is not neccessary to be fixed in code, it is also good to 
allow users to configure them in the carbon.properties by removing "3".
    
    What changes were proposed in this PR?
    Removed the parameters of compactLock and updateLock when they try to get 
lock.
    Besides, I made some obvious but trivial changes.
    
    Does this PR introduce any user interface change?
    No
    
    Is any new testcase added?
    No
    
    This closes #3964
---
 .../carbondata/core/statusmanager/SegmentStatusManager.java       | 8 ++++----
 .../org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala    | 2 +-
 .../sql/execution/command/management/CarbonAddLoadCommand.scala   | 2 +-
 .../command/management/CarbonAlterTableCompactionCommand.scala    | 2 +-
 .../command/mutation/CarbonProjectForDeleteCommand.scala          | 4 ++--
 .../command/mutation/CarbonProjectForUpdateCommand.scala          | 4 ++--
 6 files changed, 11 insertions(+), 11 deletions(-)

diff --git 
a/core/src/main/java/org/apache/carbondata/core/statusmanager/SegmentStatusManager.java
 
b/core/src/main/java/org/apache/carbondata/core/statusmanager/SegmentStatusManager.java
index 970187a..6fc0754 100755
--- 
a/core/src/main/java/org/apache/carbondata/core/statusmanager/SegmentStatusManager.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/statusmanager/SegmentStatusManager.java
@@ -876,7 +876,7 @@ public class SegmentStatusManager {
   /**
    * Return true if any load or insert overwrite is in progress for specified 
table
    */
-  public static Boolean isLoadInProgressInTable(CarbonTable carbonTable) {
+  public static boolean isLoadInProgressInTable(CarbonTable carbonTable) {
     if (carbonTable == null) {
       return false;
     }
@@ -903,7 +903,7 @@ public class SegmentStatusManager {
    * @param carbonTable
    * @return
    */
-  public static Boolean isCompactionInProgress(CarbonTable carbonTable) {
+  public static boolean isCompactionInProgress(CarbonTable carbonTable) {
     if (carbonTable == null) {
       return false;
     }
@@ -921,7 +921,7 @@ public class SegmentStatusManager {
   /**
    * Return true if insert overwrite is in progress for specified table
    */
-  public static Boolean isOverwriteInProgressInTable(CarbonTable carbonTable) {
+  public static boolean isOverwriteInProgressInTable(CarbonTable carbonTable) {
     if (carbonTable == null) {
       return false;
     }
@@ -945,7 +945,7 @@ public class SegmentStatusManager {
   /**
    * Return true if the specified `loadName` is in progress, by checking the 
load lock.
    */
-  public static Boolean isLoadInProgress(AbsoluteTableIdentifier 
absoluteTableIdentifier,
+  public static boolean isLoadInProgress(AbsoluteTableIdentifier 
absoluteTableIdentifier,
       String loadName) {
     ICarbonLock segmentLock = 
CarbonLockFactory.getCarbonLockObj(absoluteTableIdentifier,
         CarbonTablePath.addSegmentPrefix(loadName) + LockUsage.LOCK);
diff --git 
a/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
 
b/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
index 6d32dae..cb58953 100644
--- 
a/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
+++ 
b/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
@@ -974,7 +974,7 @@ object CarbonDataRDDFactory {
         val updateLock = CarbonLockFactory.getCarbonLockObj(carbonTable
           .getAbsoluteTableIdentifier, LockUsage.UPDATE_LOCK)
         try {
-          if (updateLock.lockWithRetries(3, 3)) {
+          if (updateLock.lockWithRetries()) {
             if (lock.lockWithRetries()) {
               LOGGER.info("Acquired the compaction lock.")
               startCompactionThreads(sqlContext,
diff --git 
a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonAddLoadCommand.scala
 
b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonAddLoadCommand.scala
index 80abb63..8434d9a 100644
--- 
a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonAddLoadCommand.scala
+++ 
b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonAddLoadCommand.scala
@@ -86,7 +86,7 @@ case class CarbonAddLoadCommand(
     }
     // if insert overwrite in progress, do not allow add segment
     if (SegmentStatusManager.isOverwriteInProgressInTable(carbonTable)) {
-      throw new ConcurrentOperationException(carbonTable, "insert overwrite", 
"delete segment")
+      throw new ConcurrentOperationException(carbonTable, "insert overwrite", 
"add segment")
     }
 
     val inputPath = options.getOrElse(
diff --git 
a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonAlterTableCompactionCommand.scala
 
b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonAlterTableCompactionCommand.scala
index c24c922..755d35b 100644
--- 
a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonAlterTableCompactionCommand.scala
+++ 
b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonAlterTableCompactionCommand.scala
@@ -321,7 +321,7 @@ case class CarbonAlterTableCompactionCommand(
         // COMPACTION_LOCK and UPDATE_LOCK are already locked when start to 
execute update sql,
         // so it don't need to require locks again when compactionType is 
IUD_UPDDEL_DELTA.
         if (CompactionType.IUD_UPDDEL_DELTA != compactionType) {
-          if (!updateLock.lockWithRetries(3, 3)) {
+          if (!updateLock.lockWithRetries()) {
             throw new ConcurrentOperationException(carbonTable, "update", 
"compaction")
           }
           if (!lock.lockWithRetries()) {
diff --git 
a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/mutation/CarbonProjectForDeleteCommand.scala
 
b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/mutation/CarbonProjectForDeleteCommand.scala
index cf81200..6c6d65a 100644
--- 
a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/mutation/CarbonProjectForDeleteCommand.scala
+++ 
b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/mutation/CarbonProjectForDeleteCommand.scala
@@ -96,10 +96,10 @@ private[sql] case class CarbonProjectForDeleteCommand(
     try {
       lockStatus = metadataLock.lockWithRetries()
       if (lockStatus) {
-        if (!compactionLock.lockWithRetries(3, 3)) {
+        if (!compactionLock.lockWithRetries()) {
           throw new ConcurrentOperationException(carbonTable, "compaction", 
"delete")
         }
-        if (!updateLock.lockWithRetries(3, 3)) {
+        if (!updateLock.lockWithRetries()) {
           throw new ConcurrentOperationException(carbonTable, "update/delete", 
"delete")
         }
         LOGGER.info("Successfully able to get the table metadata file lock")
diff --git 
a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/mutation/CarbonProjectForUpdateCommand.scala
 
b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/mutation/CarbonProjectForUpdateCommand.scala
index 16d90ef..50d1e21 100644
--- 
a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/mutation/CarbonProjectForUpdateCommand.scala
+++ 
b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/mutation/CarbonProjectForUpdateCommand.scala
@@ -128,8 +128,8 @@ private[sql] case class CarbonProjectForUpdateCommand(
       }
 
       val executionErrors = new ExecutionErrors(FailureCauses.NONE, "")
-      if (updateLock.lockWithRetries(3, 3)) {
-        if (compactionLock.lockWithRetries(3, 3)) {
+      if (updateLock.lockWithRetries()) {
+        if (compactionLock.lockWithRetries()) {
           // Get RDD.
           dataSet = if (isPersistEnabled) {
             Dataset.ofRows(sparkSession, plan).persist(StorageLevel.fromString(

Reply via email to