wuchong commented on code in PR #2126:
URL: https://github.com/apache/fluss/pull/2126#discussion_r2637646467
##########
fluss-server/src/main/java/org/apache/fluss/server/coordinator/MetadataManager.java:
##########
@@ -709,14 +724,19 @@ public void createPartition(
int bucketCount =
partitionAssignment.getBucketAssignments().size();
// currently, every partition has the same bucket count
int totalBuckets = bucketCount * (partitionNumber + 1);
- if (totalBuckets > maxBucketNum) {
+ int maxBucketNumOfDb =
+ DatabaseLimitResolver.resolveMaxBucketForDb(
+ maxBucketNumOfCluster,
+ dynamicConfigManager.describeConfigs(),
+ tablePath.getDatabaseName());
+ if (totalBuckets > maxBucketNumOfDb) {
throw new TooManyBucketsException(
String.format(
- "Adding partition '%s' would result in %d
total buckets for table %s, exceeding the maximum of %d buckets.",
+ "Adding partition '%s' would result in %d
total buckets for table %s, exceeding the database-level maximum of %d
buckets.",
partition.getPartitionName(),
totalBuckets,
tablePath,
- maxBucketNum));
+ maxBucketNumOfDb));
Review Comment:
It seems we simply drops the support of `maxBucketNumOfCluster` or
`maxBucketNum`. This is backward imcompatible, we shouldn't remove them.
##########
fluss-client/src/test/java/org/apache/fluss/client/admin/FlussAdminITCase.java:
##########
@@ -1403,7 +1403,169 @@ public void testBucketLimitForNonPartitionedTable()
throws Exception {
assertThatThrownBy(() -> admin.createTable(tablePath,
nonPartitionedTable, false).get())
.cause()
.isInstanceOf(TooManyBucketsException.class)
- .hasMessageContaining("exceeds the maximum limit");
+ .hasMessageContaining("exceeds the database-level maximum
limit");
+ }
+
+ /** Database-level bucket limit should be enforced for non-partitioned
tables. */
+ @Test
+ void testDatabaseBucketLimitForNonPartitionedTable() throws Exception {
+ String dbName = "db_bucket_limit_np";
+ admin.createDatabase(dbName, DatabaseDescriptor.EMPTY, true).get();
+
+ admin.alterClusterConfigs(
+ Collections.singletonList(
+ new AlterConfig(
+ "database.limits." + dbName +
".max.bucket.num",
+ "12",
+ AlterConfigOpType.SET)))
+ .get();
+
+ TablePath tablePath = TablePath.of(dbName, "t_np_bucket_limit");
+ TableDescriptor tooManyBuckets =
+ TableDescriptor.builder()
+ .schema(
+ Schema.newBuilder()
+ .column("id", DataTypes.STRING())
+ .column("name", DataTypes.STRING())
+ .build())
+ .distributedBy(20, "id")
+ .build();
+
+ assertThatThrownBy(() -> admin.createTable(tablePath, tooManyBuckets,
false).get())
+ .cause()
+ .isInstanceOf(TooManyBucketsException.class);
Review Comment:
assert the message as well, the message should be clear to user the given
database has exceed the allowed number of buckets.
##########
fluss-client/src/test/java/org/apache/fluss/client/admin/FlussAdminITCase.java:
##########
@@ -1403,7 +1403,169 @@ public void testBucketLimitForNonPartitionedTable()
throws Exception {
assertThatThrownBy(() -> admin.createTable(tablePath,
nonPartitionedTable, false).get())
.cause()
.isInstanceOf(TooManyBucketsException.class)
- .hasMessageContaining("exceeds the maximum limit");
+ .hasMessageContaining("exceeds the database-level maximum
limit");
+ }
+
+ /** Database-level bucket limit should be enforced for non-partitioned
tables. */
+ @Test
+ void testDatabaseBucketLimitForNonPartitionedTable() throws Exception {
+ String dbName = "db_bucket_limit_np";
+ admin.createDatabase(dbName, DatabaseDescriptor.EMPTY, true).get();
+
+ admin.alterClusterConfigs(
+ Collections.singletonList(
+ new AlterConfig(
+ "database.limits." + dbName +
".max.bucket.num",
+ "12",
+ AlterConfigOpType.SET)))
+ .get();
+
+ TablePath tablePath = TablePath.of(dbName, "t_np_bucket_limit");
+ TableDescriptor tooManyBuckets =
+ TableDescriptor.builder()
+ .schema(
+ Schema.newBuilder()
+ .column("id", DataTypes.STRING())
+ .column("name", DataTypes.STRING())
+ .build())
+ .distributedBy(20, "id")
+ .build();
+
+ assertThatThrownBy(() -> admin.createTable(tablePath, tooManyBuckets,
false).get())
+ .cause()
+ .isInstanceOf(TooManyBucketsException.class);
+
+ TableDescriptor ok =
+ TableDescriptor.builder()
+ .schema(
+ Schema.newBuilder()
+ .column("id", DataTypes.STRING())
+ .column("name", DataTypes.STRING())
+ .build())
+ .distributedBy(12, "id")
+ .build();
+ admin.createTable(tablePath, ok, true).get();
+ }
+
+ /** Database-level bucket limit should cap total buckets for partitioned
tables. */
+ @Test
+ void testDatabaseBucketLimitForPartitionedTable() throws Exception {
+ String dbName = "db_bucket_limit_part";
+ admin.createDatabase(dbName, DatabaseDescriptor.EMPTY, true).get();
+
+ admin.alterClusterConfigs(
+ Collections.singletonList(
+ new AlterConfig(
+ "database.limits." + dbName +
".max.bucket.num",
+ "25",
+ AlterConfigOpType.SET)))
+ .get();
+
+ TablePath tablePath = TablePath.of(dbName, "t_part_bucket_limit");
+ TableDescriptor td =
+ TableDescriptor.builder()
+ .schema(
+ Schema.newBuilder()
+ .column("id", DataTypes.STRING())
+ .column("age", DataTypes.STRING())
+ .build())
+ .distributedBy(10, "id")
+ .partitionedBy("age")
+ .build();
+ admin.createTable(tablePath, td, true).get();
+
+ admin.createPartition(tablePath, newPartitionSpec("age", "1"),
false).get();
+ admin.createPartition(tablePath, newPartitionSpec("age", "2"),
false).get();
+
+ assertThatThrownBy(
+ () ->
+ admin.createPartition(
+ tablePath,
newPartitionSpec("age", "3"), false)
+ .get())
+ .cause()
+ .isInstanceOf(TooManyBucketsException.class);
Review Comment:
ditto
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]