This is an automated email from the ASF dual-hosted git repository.

dataroaring pushed a commit to branch branch-2.1
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/branch-2.1 by this push:
     new dd420d47ac9 [fix](storage medium) Fix show partition storage medium 
not right whe… (#30160) (#31642)
dd420d47ac9 is described below

commit dd420d47ac96e62d3a8983864a236f20503a2146
Author: deardeng <[email protected]>
AuthorDate: Mon Mar 4 20:34:25 2024 +0800

    [fix](storage medium) Fix show partition storage medium not right whe… 
(#30160) (#31642)
---
 .../java/org/apache/doris/backup/RestoreJob.java   |   3 +-
 .../org/apache/doris/catalog/DataProperty.java     |   4 +
 .../java/org/apache/doris/catalog/OlapTable.java   |   3 +-
 .../apache/doris/datasource/InternalCatalog.java   | 168 ++++++++++-----------
 .../org/apache/doris/system/SystemInfoService.java |   9 +-
 .../apache/doris/catalog/ModifyBackendTest.java    |   2 +-
 .../doris/catalog/ReplicaAllocationTest.java       |   5 +-
 .../apache/doris/system/SystemInfoServiceTest.java |   4 +-
 .../test_partition_default_medium.groovy           | 109 +++++++++++++
 9 files changed, 208 insertions(+), 99 deletions(-)

diff --git a/fe/fe-core/src/main/java/org/apache/doris/backup/RestoreJob.java 
b/fe/fe-core/src/main/java/org/apache/doris/backup/RestoreJob.java
index fcfea3b2b24..d4470a1029e 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/backup/RestoreJob.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/backup/RestoreJob.java
@@ -1142,8 +1142,9 @@ public class RestoreJob extends AbstractJob {
 
                 // replicas
                 try {
-                    Map<Tag, List<Long>> beIds = Env.getCurrentSystemInfo()
+                    Pair<Map<Tag, List<Long>>, TStorageMedium> beIdsAndMedium 
= Env.getCurrentSystemInfo()
                             .selectBackendIdsForReplicaCreation(replicaAlloc, 
nextIndexs, null, false, false);
+                    Map<Tag, List<Long>> beIds = beIdsAndMedium.first;
                     for (Map.Entry<Tag, List<Long>> entry : beIds.entrySet()) {
                         for (Long beId : entry.getValue()) {
                             long newReplicaId = env.getNextId();
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/catalog/DataProperty.java 
b/fe/fe-core/src/main/java/org/apache/doris/catalog/DataProperty.java
index 731776384d0..2974d337167 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/DataProperty.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/DataProperty.java
@@ -118,6 +118,10 @@ public class DataProperty implements Writable, 
GsonPostProcessable {
         storageMediumSpecified = isSpecified;
     }
 
+    public void setStorageMedium(TStorageMedium medium) {
+        this.storageMedium = medium;
+    }
+
     public static DataProperty read(DataInput in) throws IOException {
         if (Env.getCurrentEnvJournalVersion() >= FeMetaVersion.VERSION_108) {
             String json = Text.readString(in);
diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/OlapTable.java 
b/fe/fe-core/src/main/java/org/apache/doris/catalog/OlapTable.java
index 6b0a218f302..dfb0d8bcd99 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/OlapTable.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/OlapTable.java
@@ -637,9 +637,10 @@ public class OlapTable extends Table implements 
MTMVRelatedTableIf {
 
                     // replicas
                     try {
-                        Map<Tag, List<Long>> tag2beIds =
+                        Pair<Map<Tag, List<Long>>, TStorageMedium> 
tag2beIdsAndMedium =
                                 
Env.getCurrentSystemInfo().selectBackendIdsForReplicaCreation(
                                         replicaAlloc, nextIndexs, null, false, 
false);
+                        Map<Tag, List<Long>> tag2beIds = 
tag2beIdsAndMedium.first;
                         for (Map.Entry<Tag, List<Long>> entry3 : 
tag2beIds.entrySet()) {
                             for (Long beId : entry3.getValue()) {
                                 long newReplicaId = env.getNextId();
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/datasource/InternalCatalog.java 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/InternalCatalog.java
index 99e5b15f6ee..d05f9cce48d 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/datasource/InternalCatalog.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/InternalCatalog.java
@@ -1565,20 +1565,13 @@ public class InternalCatalog implements 
CatalogIf<Database> {
         }
         try {
             long partitionId = idGeneratorBuffer.getNextId();
-            Partition partition = createPartitionWithIndices(db.getId(), 
olapTable.getId(),
-                    olapTable.getName(), olapTable.getBaseIndexId(), 
partitionId, partitionName, indexIdToMeta,
-                    distributionInfo, dataProperty.getStorageMedium(), 
singlePartitionDesc.getReplicaAlloc(),
-                    singlePartitionDesc.getVersionInfo(), bfColumns, 
olapTable.getBfFpp(), tabletIdSet,
-                    olapTable.getCopiedIndexes(), 
singlePartitionDesc.isInMemory(), olapTable.getStorageFormat(),
-                    singlePartitionDesc.getTabletType(), 
olapTable.getCompressionType(), olapTable.getDataSortInfo(),
-                    olapTable.getEnableUniqueKeyMergeOnWrite(), storagePolicy, 
idGeneratorBuffer,
-                    olapTable.disableAutoCompaction(), 
olapTable.enableSingleReplicaCompaction(),
-                    olapTable.skipWriteIndexOnLoad(), 
olapTable.getCompactionPolicy(),
-                    olapTable.getTimeSeriesCompactionGoalSizeMbytes(),
-                    olapTable.getTimeSeriesCompactionFileCountThreshold(),
-                    olapTable.getTimeSeriesCompactionTimeThresholdSeconds(),
-                    olapTable.getTimeSeriesCompactionEmptyRowsetsThreshold(),
-                    olapTable.storeRowColumn(),
+            Partition partition = createPartitionWithIndices(db.getId(), 
olapTable,
+                    partitionId, partitionName, indexIdToMeta,
+                    distributionInfo, dataProperty, 
singlePartitionDesc.getReplicaAlloc(),
+                    singlePartitionDesc.getVersionInfo(), bfColumns, 
tabletIdSet,
+                    singlePartitionDesc.isInMemory(),
+                    singlePartitionDesc.getTabletType(),
+                    storagePolicy, idGeneratorBuffer,
                     binlogConfig, dataProperty.isStorageMediumSpecified(), 
null);
             // TODO cluster key ids
 
@@ -1827,33 +1820,33 @@ public class InternalCatalog implements 
CatalogIf<Database> {
         }
     }
 
-    private Partition createPartitionWithIndices(long dbId, long tableId, 
String tableName,
-            long baseIndexId, long partitionId, String partitionName, 
Map<Long, MaterializedIndexMeta> indexIdToMeta,
-            DistributionInfo distributionInfo, TStorageMedium storageMedium, 
ReplicaAllocation replicaAlloc,
-            Long versionInfo, Set<String> bfColumns, double bfFpp, Set<Long> 
tabletIdSet, List<Index> indexes,
-            boolean isInMemory, TStorageFormat storageFormat, TTabletType 
tabletType, TCompressionType compressionType,
-            DataSortInfo dataSortInfo, boolean enableUniqueKeyMergeOnWrite, 
String storagePolicy,
-            IdGeneratorBuffer idGeneratorBuffer, boolean disableAutoCompaction,
-            boolean enableSingleReplicaCompaction, boolean 
skipWriteIndexOnLoad,
-            String compactionPolicy, Long timeSeriesCompactionGoalSizeMbytes,
-            Long timeSeriesCompactionFileCountThreshold, Long 
timeSeriesCompactionTimeThresholdSeconds,
-            Long timeSeriesCompactionEmptyRowsetsThreshold,
-            boolean storeRowColumn, BinlogConfig binlogConfig,
-            boolean isStorageMediumSpecified, List<Integer> clusterKeyIndexes) 
throws DdlException {
+    protected Partition createPartitionWithIndices(long dbId, OlapTable tbl, 
long partitionId,
+                                                   String partitionName, 
Map<Long, MaterializedIndexMeta> indexIdToMeta,
+                                                   DistributionInfo 
distributionInfo, DataProperty dataProperty,
+                                                   ReplicaAllocation 
replicaAlloc,
+                                                   Long versionInfo, 
Set<String> bfColumns, Set<Long> tabletIdSet,
+                                                   boolean isInMemory,
+                                                   TTabletType tabletType,
+                                                   String storagePolicy,
+                                                   IdGeneratorBuffer 
idGeneratorBuffer,
+                                                   BinlogConfig binlogConfig,
+                                                   boolean 
isStorageMediumSpecified, List<Integer> clusterKeyIndexes)
+            throws DdlException {
+
         // create base index first.
-        Preconditions.checkArgument(baseIndexId != -1);
-        MaterializedIndex baseIndex = new MaterializedIndex(baseIndexId, 
IndexState.NORMAL);
+        Preconditions.checkArgument(tbl.getBaseIndexId() != -1);
+        MaterializedIndex baseIndex = new 
MaterializedIndex(tbl.getBaseIndexId(), IndexState.NORMAL);
 
         // create partition with base index
         Partition partition = new Partition(partitionId, partitionName, 
baseIndex, distributionInfo);
 
         // add to index map
         Map<Long, MaterializedIndex> indexMap = new HashMap<>();
-        indexMap.put(baseIndexId, baseIndex);
+        indexMap.put(tbl.getBaseIndexId(), baseIndex);
 
         // create rollup index if has
         for (long indexId : indexIdToMeta.keySet()) {
-            if (indexId == baseIndexId) {
+            if (indexId == tbl.getBaseIndexId()) {
                 continue;
             }
 
@@ -1869,6 +1862,7 @@ public class InternalCatalog implements 
CatalogIf<Database> {
         long version = partition.getVisibleVersion();
 
         short totalReplicaNum = replicaAlloc.getTotalReplicaNum();
+        TStorageMedium realStorageMedium = null;
         for (Map.Entry<Long, MaterializedIndex> entry : indexMap.entrySet()) {
             long indexId = entry.getKey();
             MaterializedIndex index = entry.getValue();
@@ -1876,9 +1870,16 @@ public class InternalCatalog implements 
CatalogIf<Database> {
 
             // create tablets
             int schemaHash = indexMeta.getSchemaHash();
-            TabletMeta tabletMeta = new TabletMeta(dbId, tableId, partitionId, 
indexId, schemaHash, storageMedium);
-            createTablets(index, ReplicaState.NORMAL, distributionInfo, 
version, replicaAlloc, tabletMeta,
-                    tabletIdSet, idGeneratorBuffer, isStorageMediumSpecified);
+            TabletMeta tabletMeta = new TabletMeta(dbId, tbl.getId(), 
partitionId, indexId,
+                    schemaHash, dataProperty.getStorageMedium());
+            realStorageMedium = createTablets(index, ReplicaState.NORMAL, 
distributionInfo, version, replicaAlloc,
+                tabletMeta, tabletIdSet, idGeneratorBuffer, 
dataProperty.isStorageMediumSpecified());
+            if (realStorageMedium != null && 
!realStorageMedium.equals(dataProperty.getStorageMedium())) {
+                dataProperty.setStorageMedium(realStorageMedium);
+                LOG.info("real medium not eq default "
+                        + "tableName={} tableId={} partitionName={} 
partitionId={} readMedium {}",
+                        tbl.getName(), tbl.getId(), partitionName, 
partitionId, realStorageMedium);
+            }
 
             boolean ok = false;
             String errMsg = null;
@@ -1897,17 +1898,20 @@ public class InternalCatalog implements 
CatalogIf<Database> {
                     long backendId = replica.getBackendId();
                     long replicaId = replica.getId();
                     countDownLatch.addMark(backendId, tabletId);
-                    CreateReplicaTask task = new CreateReplicaTask(backendId, 
dbId, tableId, partitionId, indexId,
+                    CreateReplicaTask task = new CreateReplicaTask(backendId, 
dbId, tbl.getId(), partitionId, indexId,
                             tabletId, replicaId, shortKeyColumnCount, 
schemaHash, version, keysType, storageType,
-                            storageMedium, schema, bfColumns, bfFpp, 
countDownLatch, indexes, isInMemory, tabletType,
-                            dataSortInfo, compressionType, 
enableUniqueKeyMergeOnWrite, storagePolicy,
-                            disableAutoCompaction, 
enableSingleReplicaCompaction, skipWriteIndexOnLoad,
-                            compactionPolicy, 
timeSeriesCompactionGoalSizeMbytes,
-                            timeSeriesCompactionFileCountThreshold, 
timeSeriesCompactionTimeThresholdSeconds,
-                            timeSeriesCompactionEmptyRowsetsThreshold,
-                            storeRowColumn, binlogConfig);
-
-                    task.setStorageFormat(storageFormat);
+                            realStorageMedium, schema, bfColumns, 
tbl.getBfFpp(), countDownLatch,
+                            tbl.getCopiedIndexes(), tbl.isInMemory(), 
tabletType,
+                            tbl.getDataSortInfo(), tbl.getCompressionType(),
+                            tbl.getEnableUniqueKeyMergeOnWrite(), 
storagePolicy, tbl.disableAutoCompaction(),
+                            tbl.enableSingleReplicaCompaction(), 
tbl.skipWriteIndexOnLoad(),
+                            tbl.getCompactionPolicy(), 
tbl.getTimeSeriesCompactionGoalSizeMbytes(),
+                            tbl.getTimeSeriesCompactionFileCountThreshold(),
+                            tbl.getTimeSeriesCompactionTimeThresholdSeconds(),
+                            tbl.getTimeSeriesCompactionEmptyRowsetsThreshold(),
+                            tbl.storeRowColumn(), binlogConfig);
+
+                    task.setStorageFormat(tbl.getStorageFormat());
                     task.setClusterKeyIndexes(clusterKeyIndexes);
                     batchTask.addTask(task);
                     // add to AgentTaskQueue for handling finish report.
@@ -1971,14 +1975,14 @@ public class InternalCatalog implements 
CatalogIf<Database> {
                 }
             }
 
-            if (index.getId() != baseIndexId) {
+            if (index.getId() != tbl.getBaseIndexId()) {
                 // add rollup index to partition
                 partition.createRollupIndex(index);
             }
         } // end for indexMap
 
         LOG.info("succeed in creating partition[{}-{}], table : [{}-{}]", 
partitionId, partitionName,
-                tableId, tableName);
+                tbl.getId(), tbl.getName());
 
         return partition;
     }
@@ -2566,20 +2570,14 @@ public class InternalCatalog implements 
CatalogIf<Database> {
                             "Database " + db.getFullName() + " create 
unpartitioned table " + tableName + " increasing "
                                     + totalReplicaNum + " of replica exceeds 
quota[" + db.getReplicaQuota() + "]");
                 }
-                Partition partition = createPartitionWithIndices(db.getId(), 
olapTable.getId(),
-                        olapTable.getName(), olapTable.getBaseIndexId(), 
partitionId, partitionName,
+                Partition partition = createPartitionWithIndices(db.getId(), 
olapTable, partitionId, partitionName,
                         olapTable.getIndexIdToMeta(), 
partitionDistributionInfo,
-                        
partitionInfo.getDataProperty(partitionId).getStorageMedium(),
-                        partitionInfo.getReplicaAllocation(partitionId), 
versionInfo, bfColumns, bfFpp, tabletIdSet,
-                        olapTable.getCopiedIndexes(), isInMemory, 
storageFormat, tabletType, compressionType,
-                        olapTable.getDataSortInfo(), 
olapTable.getEnableUniqueKeyMergeOnWrite(), storagePolicy,
-                        idGeneratorBuffer, olapTable.disableAutoCompaction(),
-                        olapTable.enableSingleReplicaCompaction(), 
skipWriteIndexOnLoad,
-                        olapTable.getCompactionPolicy(), 
olapTable.getTimeSeriesCompactionGoalSizeMbytes(),
-                        olapTable.getTimeSeriesCompactionFileCountThreshold(),
-                        
olapTable.getTimeSeriesCompactionTimeThresholdSeconds(),
-                        
olapTable.getTimeSeriesCompactionEmptyRowsetsThreshold(),
-                        storeRowColumn, binlogConfigForTask,
+                        partitionInfo.getDataProperty(partitionId),
+                        partitionInfo.getReplicaAllocation(partitionId), 
versionInfo, bfColumns, tabletIdSet,
+                        isInMemory, tabletType,
+                        storagePolicy,
+                        idGeneratorBuffer,
+                        binlogConfigForTask,
                         
partitionInfo.getDataProperty(partitionId).isStorageMediumSpecified(),
                         keysDesc.getClusterKeysColumnIds());
                 olapTable.addPartition(partition);
@@ -2665,20 +2663,15 @@ public class InternalCatalog implements 
CatalogIf<Database> {
                     
Env.getCurrentEnv().getPolicyMgr().checkStoragePolicyExist(partionStoragePolicy);
 
                     Partition partition = 
createPartitionWithIndices(db.getId(),
-                            olapTable.getId(), olapTable.getName(), 
olapTable.getBaseIndexId(), entry.getValue(),
+                            olapTable, entry.getValue(),
                             entry.getKey(), olapTable.getIndexIdToMeta(), 
partitionDistributionInfo,
-                            dataProperty.getStorageMedium(), 
partitionInfo.getReplicaAllocation(entry.getValue()),
-                            versionInfo, bfColumns, bfFpp, tabletIdSet, 
olapTable.getCopiedIndexes(), isInMemory,
-                            storageFormat, 
partitionInfo.getTabletType(entry.getValue()), compressionType,
-                            olapTable.getDataSortInfo(), 
olapTable.getEnableUniqueKeyMergeOnWrite(),
-                            partionStoragePolicy, idGeneratorBuffer, 
olapTable.disableAutoCompaction(),
-                            olapTable.enableSingleReplicaCompaction(), 
skipWriteIndexOnLoad,
-                            olapTable.getCompactionPolicy(), 
olapTable.getTimeSeriesCompactionGoalSizeMbytes(),
-                            
olapTable.getTimeSeriesCompactionFileCountThreshold(),
-                            
olapTable.getTimeSeriesCompactionTimeThresholdSeconds(),
-                            
olapTable.getTimeSeriesCompactionEmptyRowsetsThreshold(),
-                            storeRowColumn, binlogConfigForTask,
-                            dataProperty.isStorageMediumSpecified(), 
keysDesc.getClusterKeysColumnIds());
+                            dataProperty, 
partitionInfo.getReplicaAllocation(entry.getValue()),
+                            versionInfo, bfColumns, tabletIdSet, isInMemory,
+                            partitionInfo.getTabletType(entry.getValue()),
+                            partionStoragePolicy, idGeneratorBuffer,
+                            binlogConfigForTask,
+                            dataProperty.isStorageMediumSpecified(),
+                            keysDesc.getClusterKeysColumnIds());
                     olapTable.addPartition(partition);
                     
olapTable.getPartitionInfo().getDataProperty(partition.getId())
                             .setStoragePolicy(partionStoragePolicy);
@@ -2872,7 +2865,7 @@ public class InternalCatalog implements 
CatalogIf<Database> {
     }
 
     @VisibleForTesting
-    public void createTablets(MaterializedIndex index, ReplicaState 
replicaState,
+    public TStorageMedium createTablets(MaterializedIndex index, ReplicaState 
replicaState,
             DistributionInfo distributionInfo, long version, ReplicaAllocation 
replicaAlloc, TabletMeta tabletMeta,
             Set<Long> tabletIdSet, IdGeneratorBuffer idGeneratorBuffer, 
boolean isStorageMediumSpecified)
             throws DdlException {
@@ -2925,9 +2918,12 @@ public class InternalCatalog implements 
CatalogIf<Database> {
             if (chooseBackendsArbitrary) {
                 // This is the first colocate table in the group, or just a 
normal table,
                 // choose backends
-                chosenBackendIds = 
systemInfoService.selectBackendIdsForReplicaCreation(replicaAlloc, nextIndexs,
+                Pair<Map<Tag, List<Long>>, TStorageMedium> 
chosenBackendIdsAndMedium
+                        = systemInfoService.selectBackendIdsForReplicaCreation(
+                        replicaAlloc, nextIndexs,
                         storageMedium, isStorageMediumSpecified, false);
-
+                chosenBackendIds = chosenBackendIdsAndMedium.first;
+                storageMedium = chosenBackendIdsAndMedium.second;
                 for (Map.Entry<Tag, List<Long>> entry : 
chosenBackendIds.entrySet()) {
                     backendsPerBucketSeq.putIfAbsent(entry.getKey(), 
Lists.newArrayList());
                     
backendsPerBucketSeq.get(entry.getKey()).add(entry.getValue());
@@ -2959,6 +2955,7 @@ public class InternalCatalog implements 
CatalogIf<Database> {
             ColocatePersistInfo info = 
ColocatePersistInfo.createForBackendsPerBucketSeq(groupId, 
backendsPerBucketSeq);
             
Env.getCurrentEnv().getEditLog().logColocateBackendsPerBucketSeq(info);
         }
+        return storageMedium;
     }
 
     /*
@@ -3110,23 +3107,16 @@ public class InternalCatalog implements 
CatalogIf<Database> {
                 // which is the right behavior.
                 long oldPartitionId = entry.getValue();
                 long newPartitionId = idGeneratorBuffer.getNextId();
-                Partition newPartition = 
createPartitionWithIndices(db.getId(), copiedTbl.getId(),
-                        copiedTbl.getName(), copiedTbl.getBaseIndexId(), 
newPartitionId, entry.getKey(),
+                Partition newPartition = 
createPartitionWithIndices(db.getId(), copiedTbl,
+                        newPartitionId, entry.getKey(),
                         copiedTbl.getIndexIdToMeta(), 
partitionsDistributionInfo.get(oldPartitionId),
-                        
copiedTbl.getPartitionInfo().getDataProperty(oldPartitionId).getStorageMedium(),
+                        
copiedTbl.getPartitionInfo().getDataProperty(oldPartitionId),
                         
copiedTbl.getPartitionInfo().getReplicaAllocation(oldPartitionId), null /* 
version info */,
-                        copiedTbl.getCopiedBfColumns(), copiedTbl.getBfFpp(), 
tabletIdSet, copiedTbl.getCopiedIndexes(),
-                        copiedTbl.isInMemory(), copiedTbl.getStorageFormat(),
-                        
copiedTbl.getPartitionInfo().getTabletType(oldPartitionId), 
copiedTbl.getCompressionType(),
-                        copiedTbl.getDataSortInfo(), 
copiedTbl.getEnableUniqueKeyMergeOnWrite(),
+                        copiedTbl.getCopiedBfColumns(), tabletIdSet,
+                        copiedTbl.isInMemory(),
+                        
copiedTbl.getPartitionInfo().getTabletType(oldPartitionId),
                         
olapTable.getPartitionInfo().getDataProperty(oldPartitionId).getStoragePolicy(),
-                        idGeneratorBuffer, olapTable.disableAutoCompaction(),
-                        olapTable.enableSingleReplicaCompaction(), 
olapTable.skipWriteIndexOnLoad(),
-                        olapTable.getCompactionPolicy(), 
olapTable.getTimeSeriesCompactionGoalSizeMbytes(),
-                        olapTable.getTimeSeriesCompactionFileCountThreshold(),
-                        
olapTable.getTimeSeriesCompactionTimeThresholdSeconds(),
-                        
olapTable.getTimeSeriesCompactionEmptyRowsetsThreshold(),
-                        olapTable.storeRowColumn(), binlogConfig,
+                        idGeneratorBuffer, binlogConfig,
                         
copiedTbl.getPartitionInfo().getDataProperty(oldPartitionId).isStorageMediumSpecified(),
                         clusterKeyIdxes);
                 newPartitions.add(newPartition);
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/system/SystemInfoService.java 
b/fe/fe-core/src/main/java/org/apache/doris/system/SystemInfoService.java
index 1a132b5449c..0dc314240b3 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/system/SystemInfoService.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/system/SystemInfoService.java
@@ -485,7 +485,7 @@ public class SystemInfoService {
      * @return return the selected backend ids group by tag.
      * @throws DdlException
      */
-    public Map<Tag, List<Long>> selectBackendIdsForReplicaCreation(
+    public Pair<Map<Tag, List<Long>>, TStorageMedium> 
selectBackendIdsForReplicaCreation(
             ReplicaAllocation replicaAlloc, Map<Tag, Integer> nextIndexs,
             TStorageMedium storageMedium, boolean isStorageMediumSpecified,
             boolean isOnlyForCheck)
@@ -520,6 +520,7 @@ public class SystemInfoService {
                 List<Long> beIds = selectBackendIdsByPolicy(policy, 
entry.getValue());
                 // first time empty, retry with different storage medium
                 // if only for check, no need to retry different storage 
medium to get backend
+                TStorageMedium originalStorageMedium = storageMedium;
                 if (beIds.isEmpty() && storageMedium != null && 
!isStorageMediumSpecified && !isOnlyForCheck) {
                     storageMedium = (storageMedium == TStorageMedium.HDD) ? 
TStorageMedium.SSD : TStorageMedium.HDD;
                     builder.setStorageMedium(storageMedium);
@@ -534,10 +535,10 @@ public class SystemInfoService {
                 }
                 // after retry different storage medium, it's still empty
                 if (beIds.isEmpty()) {
-                    LOG.error("failed backend(s) for policy:" + policy);
+                    LOG.error("failed backend(s) for policy: {} real medium 
{}", policy, originalStorageMedium);
                     String errorReplication = "replication tag: " + 
entry.getKey()
                             + ", replication num: " + entry.getValue()
-                            + ", storage medium: " + storageMedium;
+                            + ", storage medium: " + originalStorageMedium;
                     failedEntries.add(errorReplication);
                 } else {
                     chosenBackendIds.put(entry.getKey(), beIds);
@@ -554,7 +555,7 @@ public class SystemInfoService {
         }
 
         Preconditions.checkState(totalReplicaNum == 
replicaAlloc.getTotalReplicaNum());
-        return chosenBackendIds;
+        return Pair.of(chosenBackendIds, storageMedium);
     }
 
     /**
diff --git 
a/fe/fe-core/src/test/java/org/apache/doris/catalog/ModifyBackendTest.java 
b/fe/fe-core/src/test/java/org/apache/doris/catalog/ModifyBackendTest.java
index 6d5380034d5..ca4a658c4d3 100644
--- a/fe/fe-core/src/test/java/org/apache/doris/catalog/ModifyBackendTest.java
+++ b/fe/fe-core/src/test/java/org/apache/doris/catalog/ModifyBackendTest.java
@@ -84,7 +84,7 @@ public class ModifyBackendTest {
         ExceptionChecker.expectThrowsWithMsg(DdlException.class,
                 "Failed to find enough backend, please check the replication 
num,replication tag and storage medium and avail capacity of backends.\n"
                         + "Create failed replications:\n"
-                        + "replication tag: {\"location\" : \"default\"}, 
replication num: 1, storage medium: SSD",
+                        + "replication tag: {\"location\" : \"default\"}, 
replication num: 1, storage medium: HDD",
                 () -> DdlExecutor.execute(Env.getCurrentEnv(), createStmt));
 
         createStr = "create table test.tbl1(\n" + "k1 int\n" + ") distributed 
by hash(k1)\n" + "buckets 3 properties(\n"
diff --git 
a/fe/fe-core/src/test/java/org/apache/doris/catalog/ReplicaAllocationTest.java 
b/fe/fe-core/src/test/java/org/apache/doris/catalog/ReplicaAllocationTest.java
index c53715cd817..971abe9b803 100644
--- 
a/fe/fe-core/src/test/java/org/apache/doris/catalog/ReplicaAllocationTest.java
+++ 
b/fe/fe-core/src/test/java/org/apache/doris/catalog/ReplicaAllocationTest.java
@@ -21,6 +21,7 @@ import org.apache.doris.common.AnalysisException;
 import org.apache.doris.common.DdlException;
 import org.apache.doris.common.ExceptionChecker;
 import org.apache.doris.common.FeConstants;
+import org.apache.doris.common.Pair;
 import org.apache.doris.common.util.PropertyAnalyzer;
 import org.apache.doris.meta.MetaContext;
 import org.apache.doris.resource.Tag;
@@ -56,8 +57,8 @@ public class ReplicaAllocationTest {
                         (TStorageMedium) any, false, true);
                 minTimes = 0;
                 result = new Delegate() {
-                    Map<Tag, List<Long>> selectBackendIdsForReplicaCreation() {
-                        return Maps.newHashMap();
+                    Pair<Map<Tag, List<Long>>, TStorageMedium> 
selectBackendIdsForReplicaCreation() {
+                        return Pair.of(Maps.newHashMap(), TStorageMedium.HDD);
                     }
                 };
             }
diff --git 
a/fe/fe-core/src/test/java/org/apache/doris/system/SystemInfoServiceTest.java 
b/fe/fe-core/src/test/java/org/apache/doris/system/SystemInfoServiceTest.java
index 22e12b37da3..e933c0df17c 100644
--- 
a/fe/fe-core/src/test/java/org/apache/doris/system/SystemInfoServiceTest.java
+++ 
b/fe/fe-core/src/test/java/org/apache/doris/system/SystemInfoServiceTest.java
@@ -22,6 +22,7 @@ import org.apache.doris.catalog.Env;
 import org.apache.doris.catalog.ReplicaAllocation;
 import org.apache.doris.common.AnalysisException;
 import org.apache.doris.common.FeMetaVersion;
+import org.apache.doris.common.Pair;
 import org.apache.doris.meta.MetaContext;
 import org.apache.doris.resource.Tag;
 import org.apache.doris.system.SystemInfoService.HostInfo;
@@ -403,8 +404,9 @@ public class SystemInfoServiceTest {
         // also check if the random selection logic can evenly distribute the 
replica.
         Map<Long, Integer> beCounterMap = Maps.newHashMap();
         for (int i = 0; i < 10000; ++i) {
-            Map<Tag, List<Long>> res = 
infoService.selectBackendIdsForReplicaCreation(replicaAlloc,
+            Pair<Map<Tag, List<Long>>, TStorageMedium> ret = 
infoService.selectBackendIdsForReplicaCreation(replicaAlloc,
                     Maps.newHashMap(), TStorageMedium.HDD, false, false);
+            Map<Tag, List<Long>> res = ret.first;
             Assert.assertEquals(3, res.get(Tag.DEFAULT_BACKEND_TAG).size());
             for (Long beId : res.get(Tag.DEFAULT_BACKEND_TAG)) {
                 beCounterMap.put(beId, beCounterMap.getOrDefault(beId, 0) + 1);
diff --git 
a/regression-test/suites/storage_medium_p0/test_partition_default_medium.groovy 
b/regression-test/suites/storage_medium_p0/test_partition_default_medium.groovy
new file mode 100644
index 00000000000..3543ce64ab1
--- /dev/null
+++ 
b/regression-test/suites/storage_medium_p0/test_partition_default_medium.groovy
@@ -0,0 +1,109 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+import org.apache.doris.regression.suite.ClusterOptions
+import org.apache.doris.regression.util.NodeType
+import org.apache.doris.regression.suite.SuiteCluster
+
+suite("test_partition_default_medium") {
+    def options = new ClusterOptions()
+    options.feConfigs += [
+        'default_storage_medium=HDD',
+    ]
+    options.beDisks = ['SSD=4']
+
+    def checkCreateTablePartitionDefaultMediumEq = {tbl, sum ->
+        sleep 1000
+
+        def partitions = sql_return_maparray "SHOW PARTITIONS FROM $tbl;"
+        def partitionsMedium = [:]
+        partitions.each {
+            def num = partitionsMedium.get(it.StorageMedium)
+            if (partitionsMedium) {
+                partitionsMedium.put(it.StorageMedium, ++num)
+            } else {
+                partitionsMedium.put(it.StorageMedium, 1)
+            }
+        }
+        log.info("table ${tbl} partition mediums $partitionsMedium")
+        def count = partitionsMedium.values().stream().distinct().count()
+        assertEquals(count, 1)
+        assertEquals(partitionsMedium.get("SSD"), sum.toInteger())
+    }
+
+    docker(options) {
+        def single_partition_tbl = "single_partition_tbl"
+        def multi_partition_tbl = "multi_partition_tbl"
+        def dynamic_partition_tbl = "dynamic_partition_tbl"
+        sql """drop table if exists $single_partition_tbl"""
+        sql """drop table if exists $multi_partition_tbl"""
+        sql """drop table if exists $dynamic_partition_tbl"""
+
+        sql """
+            CREATE TABLE ${single_partition_tbl}
+            (
+                k1 BIGINT,
+                k2 LARGEINT,
+                v1 VARCHAR(2048),
+                v2 SMALLINT DEFAULT "10"
+            )
+            UNIQUE KEY(k1, k2)
+            DISTRIBUTED BY HASH (k1, k2) BUCKETS 32;
+        """
+
+        checkCreateTablePartitionDefaultMediumEq(single_partition_tbl, 1)
+
+
+        sql """
+            CREATE TABLE $multi_partition_tbl
+            (
+                k1 DATE,
+                k2 DECIMAL(10, 2) DEFAULT "10.5",
+                k3 CHAR(10) COMMENT "string column",
+                k4 INT NOT NULL DEFAULT "1" COMMENT "int column"
+            )
+            DUPLICATE KEY(k1, k2)
+            COMMENT "my first table"
+            PARTITION BY RANGE(k1)
+            (
+                PARTITION p1 VALUES LESS THAN ("2020-02-01"),
+                PARTITION p2 VALUES LESS THAN ("2020-03-01"),
+                PARTITION p3 VALUES LESS THAN ("2020-04-01")
+            )
+            DISTRIBUTED BY HASH(k1) BUCKETS 32;
+        """
+        checkCreateTablePartitionDefaultMediumEq(multi_partition_tbl, 3)
+
+        sql """
+            CREATE TABLE $dynamic_partition_tbl
+            (
+                k1 DATE
+            )
+            PARTITION BY RANGE(k1) ()
+            DISTRIBUTED BY HASH(k1)
+            PROPERTIES
+            (
+                "dynamic_partition.enable" = "true",
+                "dynamic_partition.time_unit" = "DAY",
+                "dynamic_partition.start" = "-7",
+                "dynamic_partition.end" = "3",
+                "dynamic_partition.prefix" = "p",
+                "dynamic_partition.buckets" = "32"
+            );
+        """
+        checkCreateTablePartitionDefaultMediumEq(dynamic_partition_tbl, 4)
+    }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]


Reply via email to