This is an automated email from the ASF dual-hosted git repository.

honahx pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/polaris.git


The following commit(s) were added to refs/heads/main by this push:
     new acdccae28 Remove credential flag from 
`StorageAccessProperty.CLIENT_REGION` (#2838)
acdccae28 is described below

commit acdccae288fd22d75ab5bad428ba50eb4c8368e0
Author: Dmitri Bourlatchkov <[email protected]>
AuthorDate: Tue Oct 21 00:39:46 2025 -0400

    Remove credential flag from `StorageAccessProperty.CLIENT_REGION` (#2838)
    
    `CLIENT_REGION` is not a credential value, which is in line with
    Iceberg's `VendedCredentialsProvider` code.
    
    Cf. https://github.com/apache/iceberg/pull/11389
---
 CHANGELOG.md                                       |  2 ++
 .../core/storage/StorageAccessProperty.java        | 26 +++++++++-------------
 .../aws/AwsCredentialsStorageIntegrationTest.java  |  7 +++---
 3 files changed, 16 insertions(+), 19 deletions(-)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index 9e4fc491e..ee808f8df 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -39,6 +39,8 @@ request adding CHANGELOG notes for breaking (!) changes and 
possibly other secti
 
 ### Changes
 
+- `client.region` is no longer considered a "credential" property (related to 
Iceberg REST Catalog API).
+
 ### Deprecations
 
 ### Fixes
diff --git 
a/polaris-core/src/main/java/org/apache/polaris/core/storage/StorageAccessProperty.java
 
b/polaris-core/src/main/java/org/apache/polaris/core/storage/StorageAccessProperty.java
index 7dc102dc5..31a92c4f4 100644
--- 
a/polaris-core/src/main/java/org/apache/polaris/core/storage/StorageAccessProperty.java
+++ 
b/polaris-core/src/main/java/org/apache/polaris/core/storage/StorageAccessProperty.java
@@ -29,9 +29,9 @@ import org.apache.iceberg.gcp.GCPProperties;
  * storage.
  */
 public enum StorageAccessProperty {
-  AWS_KEY_ID(String.class, "s3.access-key-id", "the aws access key id"),
-  AWS_SECRET_KEY(String.class, "s3.secret-access-key", "the aws access key 
secret"),
-  AWS_TOKEN(String.class, "s3.session-token", "the aws scoped access token"),
+  AWS_KEY_ID(String.class, "s3.access-key-id", "the aws access key id", true),
+  AWS_SECRET_KEY(String.class, "s3.secret-access-key", "the aws access key 
secret", true),
+  AWS_TOKEN(String.class, "s3.session-token", "the aws scoped access token", 
true),
   AWS_SESSION_TOKEN_EXPIRES_AT_MS(
       String.class,
       "s3.session-token-expires-at-ms",
@@ -42,7 +42,10 @@ public enum StorageAccessProperty {
   AWS_PATH_STYLE_ACCESS(
       Boolean.class, "s3.path-style-access", "whether to use S3 path style 
access", false),
   CLIENT_REGION(
-      String.class, "client.region", "region to configure client for making 
requests to AWS"),
+      String.class,
+      "client.region",
+      "region to configure client for making requests to AWS",
+      false),
   AWS_REFRESH_CREDENTIALS_ENDPOINT(
       String.class,
       AwsClientProperties.REFRESH_CREDENTIALS_ENDPOINT,
@@ -50,7 +53,7 @@ public enum StorageAccessProperty {
       false,
       false),
 
-  GCS_ACCESS_TOKEN(String.class, "gcs.oauth2.token", "the gcs scoped access 
token"),
+  GCS_ACCESS_TOKEN(String.class, "gcs.oauth2.token", "the gcs scoped access 
token", true),
   GCS_ACCESS_TOKEN_EXPIRES_AT(
       String.class,
       "gcs.oauth2.token-expires-at",
@@ -66,8 +69,8 @@ public enum StorageAccessProperty {
 
   // Currently not using ACCESS TOKEN as the ResolvingFileIO is using 
ADLSFileIO for azure case and
   // it expects for SAS
-  AZURE_ACCESS_TOKEN(String.class, "", "the azure scoped access token"),
-  AZURE_SAS_TOKEN(String.class, "adls.sas-token.", "an azure shared access 
signature token"),
+  AZURE_ACCESS_TOKEN(String.class, "", "the azure scoped access token", true),
+  AZURE_SAS_TOKEN(String.class, "adls.sas-token.", "an azure shared access 
signature token", true),
   AZURE_REFRESH_CREDENTIALS_ENDPOINT(
       String.class,
       AzureProperties.ADLS_REFRESH_CREDENTIALS_ENDPOINT,
@@ -93,15 +96,6 @@ public enum StorageAccessProperty {
   private final boolean isCredential;
   private final boolean isExpirationTimestamp;
 
-  /*
-  s3.access-key-id`: id for for credentials that provide access to the data in 
S3
-           - `s3.secret-access-key`: secret for credentials that provide 
access to data in S3
-           - `s3.session-token
-   */
-  StorageAccessProperty(Class valueType, String propertyName, String 
description) {
-    this(valueType, propertyName, description, true);
-  }
-
   StorageAccessProperty(
       Class valueType, String propertyName, String description, boolean 
isCredential) {
     this(valueType, propertyName, description, isCredential, false);
diff --git 
a/polaris-core/src/test/java/org/apache/polaris/service/storage/aws/AwsCredentialsStorageIntegrationTest.java
 
b/polaris-core/src/test/java/org/apache/polaris/service/storage/aws/AwsCredentialsStorageIntegrationTest.java
index 7b4b50dec..ac1ba85fd 100644
--- 
a/polaris-core/src/test/java/org/apache/polaris/service/storage/aws/AwsCredentialsStorageIntegrationTest.java
+++ 
b/polaris-core/src/test/java/org/apache/polaris/service/storage/aws/AwsCredentialsStorageIntegrationTest.java
@@ -594,13 +594,14 @@ class AwsCredentialsStorageIntegrationTest extends 
BaseStorageIntegrationTest {
                     Set.of(),
                     Optional.empty());
         assertThat(accessConfig.credentials())
-            .isNotEmpty()
-            
.containsEntry(StorageAccessProperty.CLIENT_REGION.getPropertyName(), 
clientRegion);
+            .containsEntry(StorageAccessProperty.AWS_TOKEN.getPropertyName(), 
"sess")
+            .containsEntry(StorageAccessProperty.AWS_KEY_ID.getPropertyName(), 
"accessKey")
+            
.containsEntry(StorageAccessProperty.AWS_SECRET_KEY.getPropertyName(), 
"secretKey")
+            
.doesNotContainKey(StorageAccessProperty.CLIENT_REGION.getPropertyName());
         break;
       default:
         throw new IllegalArgumentException("Unknown aws partition: " + 
awsPartition);
     }
-    ;
   }
 
   @ParameterizedTest

Reply via email to