This is an automated email from the ASF dual-hosted git repository.
honahx pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/polaris.git
The following commit(s) were added to refs/heads/main by this push:
new d55a18001 [Bug] Fix a bug that causing error when setting
`write.data.path` to be a subdirectory of the table location (#3371)
d55a18001 is described below
commit d55a180018e6281b14c17541e041bbfc4d4d4d5c
Author: Honah (Jonas) J. <[email protected]>
AuthorDate: Wed Jan 7 17:04:17 2026 -0600
[Bug] Fix a bug that causing error when setting `write.data.path` to be a
subdirectory of the table location (#3371)
Currently, when updating write.data.path of the table to a subdir under the
table location, it will fail the location overlap check. For example
spark-sql> ALTER TABLE tb1 SET TBLPROPERTIES (
'write.data.path' = '<tableLocation>/alternative_data'
);
org.apache.iceberg.exceptions.ForbiddenException: Forbidden: Unable to
create table at location 's3://<table_location>' because it conflicts with
existing table or namespace at location 's3://<table_location>`
IcebergCatalog.validateNoLocationOverlap(...) constructs a virtual
PolarisEntity for overlap checking, but it did not set the entity name. When
fetching the siblings of the table, it fails to filter out itself and thus the
check mistaken considered that the write.data.path conflict with the table's
own base location. (isChildOf)
This PR fix the issue by adding name to the virtual PolarisEntity and add a
unit and a integration test.
---
.../it/test/PolarisSparkIntegrationTest.java | 23 +++++++++++
.../service/catalog/iceberg/IcebergCatalog.java | 1 +
.../iceberg/IcebergAllowedLocationTest.java | 47 ++++++++++++++++++++++
3 files changed, 71 insertions(+)
diff --git
a/integration-tests/src/main/java/org/apache/polaris/service/it/test/PolarisSparkIntegrationTest.java
b/integration-tests/src/main/java/org/apache/polaris/service/it/test/PolarisSparkIntegrationTest.java
index 2bdd10928..c12d59acb 100644
---
a/integration-tests/src/main/java/org/apache/polaris/service/it/test/PolarisSparkIntegrationTest.java
+++
b/integration-tests/src/main/java/org/apache/polaris/service/it/test/PolarisSparkIntegrationTest.java
@@ -147,6 +147,29 @@ public class PolarisSparkIntegrationTest extends
PolarisSparkIntegrationTestBase
assertThat(recordCount).isEqualTo(3);
}
+ @Test
+ public void testSetWriteDataPathToSubdirectory() {
+ onSpark("CREATE NAMESPACE ns1");
+ onSpark("USE ns1");
+ onSpark("CREATE TABLE tb1 (col1 integer, col2 string)");
+
+ LoadTableResponse tableResponse = loadTable(catalogName, "ns1", "tb1");
+ String tableLocation = tableResponse.tableMetadata().location();
+ assertThat(tableLocation).isNotNull();
+
+ // Set a custom write data path to a subdirectory within the table location
+ String writeDataPath = tableLocation + "/alternative_data";
+ onSpark("ALTER TABLE tb1 SET TBLPROPERTIES ('write.data.path' = '" +
writeDataPath + "')");
+
+ tableResponse = loadTable(catalogName, "ns1", "tb1");
+ assertThat(tableResponse.tableMetadata().properties())
+ .containsEntry("write.data.path", writeDataPath);
+
+ onSpark("INSERT INTO tb1 VALUES (1, 'a'), (2, 'b'), (3, 'c')");
+ long recordCount = onSpark("SELECT * FROM tb1").count();
+ assertThat(recordCount).isEqualTo(3);
+ }
+
private LoadTableResponse loadTable(String catalog, String namespace, String
table) {
try (Response response =
catalogApi
diff --git
a/runtime/service/src/main/java/org/apache/polaris/service/catalog/iceberg/IcebergCatalog.java
b/runtime/service/src/main/java/org/apache/polaris/service/catalog/iceberg/IcebergCatalog.java
index 745a9ed4f..bda38f009 100644
---
a/runtime/service/src/main/java/org/apache/polaris/service/catalog/iceberg/IcebergCatalog.java
+++
b/runtime/service/src/main/java/org/apache/polaris/service/catalog/iceberg/IcebergCatalog.java
@@ -992,6 +992,7 @@ public class IcebergCatalog extends BaseMetastoreViewCatalog
IcebergTableLikeEntity virtualEntity =
IcebergTableLikeEntity.of(
new PolarisEntity.Builder()
+ .setName(identifier.name())
.setType(PolarisEntityType.TABLE_LIKE)
.setSubType(PolarisEntitySubType.ICEBERG_TABLE)
.setParentId(lastNamespace.getId())
diff --git
a/runtime/service/src/test/java/org/apache/polaris/service/catalog/iceberg/IcebergAllowedLocationTest.java
b/runtime/service/src/test/java/org/apache/polaris/service/catalog/iceberg/IcebergAllowedLocationTest.java
index 09ae01739..825131494 100644
---
a/runtime/service/src/test/java/org/apache/polaris/service/catalog/iceberg/IcebergAllowedLocationTest.java
+++
b/runtime/service/src/test/java/org/apache/polaris/service/catalog/iceberg/IcebergAllowedLocationTest.java
@@ -304,6 +304,53 @@ public class IcebergAllowedLocationTest {
.build();
}
+ @Test
+ void testSetWriteDataPathToSubdirectoryUnderTableLocation(@TempDir Path
tmpDir) {
+ var services = getTestServices();
+ var tableName = getTableName();
+ var tableId = TableIdentifier.of(namespace, tableName);
+
+ var catalogLocation =
tmpDir.resolve(catalog).toAbsolutePath().toUri().toString();
+ var namespaceLocation = catalogLocation + "/" + namespace;
+
+ createCatalog(services, Map.of(), catalogLocation,
List.of(catalogLocation));
+ createNamespace(services, namespaceLocation);
+
+ var createTableRequest =
+
CreateTableRequest.builder().withName(tableName).withSchema(SCHEMA).build();
+
+ var createResponse =
+ services
+ .restApi()
+ .createTable(
+ catalog,
+ namespace,
+ createTableRequest,
+ null,
+ services.realmContext(),
+ services.securityContext());
+
assertThat(createResponse.getStatus()).isEqualTo(Response.Status.OK.getStatusCode());
+ var tableLocation = namespaceLocation + "/" + tableName;
+
+ // Update the table to set write.data.path to a subdirectory under the
table's location
+ String writeDataPath = tableLocation + "/alternative_data";
+ Map<String, String> updatedProperties = new HashMap<>();
+ updatedProperties.put("write.data.path", writeDataPath);
+
+ var updateRequest =
+ UpdateTableRequest.create(
+ tableId, List.of(), List.of(new
MetadataUpdate.SetProperties(updatedProperties)));
+
+ var updateResponse =
+ services
+ .catalogAdapter()
+ .newHandlerWrapper(services.securityContext(), catalog)
+ .updateTable(tableId, updateRequest);
+
+ assertThat(updateResponse.tableMetadata().properties())
+ .containsEntry("write.data.path", writeDataPath);
+ }
+
private void createCatalog(
TestServices services,
Map<String, String> catalogConfig,