yihua commented on code in PR #18295:
URL: https://github.com/apache/hudi/pull/18295#discussion_r3035610649
##########
hudi-client/hudi-client-common/src/main/java/org/apache/hudi/metadata/HoodieMetadataWriteUtils.java:
##########
@@ -362,6 +387,99 @@ public static HoodieWriteConfig createMetadataWriteConfig(
return metadataWriteConfig;
}
+ /**
+ * Build the lock config for the metadata table by extracting lock-specific
properties from the
+ * data table's write config. This avoids copying all properties (which
would overwrite MDT-specific
+ * settings like base path and auto-clean).
+ */
+ private static HoodieLockConfig buildMetadataLockConfig(HoodieWriteConfig
writeConfig) {
+ TypedProperties props = writeConfig.getProps();
+ HoodieLockConfig.Builder lockConfigBuilder = HoodieLockConfig.newBuilder()
+ .withClientNumRetries(Integer.parseInt(props.getString(
+ HoodieLockConfig.LOCK_ACQUIRE_CLIENT_NUM_RETRIES.key(),
+ HoodieLockConfig.LOCK_ACQUIRE_CLIENT_NUM_RETRIES.defaultValue())))
+ .withClientRetryWaitTimeInMillis(Long.parseLong(props.getString(
+
HoodieLockConfig.LOCK_ACQUIRE_CLIENT_RETRY_WAIT_TIME_IN_MILLIS.key(),
+
HoodieLockConfig.LOCK_ACQUIRE_CLIENT_RETRY_WAIT_TIME_IN_MILLIS.defaultValue())))
+ .withLockWaitTimeInMillis(Long.valueOf(props.getString(
+ HoodieLockConfig.LOCK_ACQUIRE_WAIT_TIMEOUT_MS.key(),
+
String.valueOf(HoodieLockConfig.LOCK_ACQUIRE_WAIT_TIMEOUT_MS.defaultValue()))))
+ .withNumRetries(Integer.parseInt(props.getString(
+ HoodieLockConfig.LOCK_ACQUIRE_NUM_RETRIES.key(),
+ HoodieLockConfig.LOCK_ACQUIRE_NUM_RETRIES.defaultValue())))
+ .withRetryWaitTimeInMillis(Long.parseLong(props.getString(
+ HoodieLockConfig.LOCK_ACQUIRE_RETRY_WAIT_TIME_IN_MILLIS.key(),
+
HoodieLockConfig.LOCK_ACQUIRE_RETRY_WAIT_TIME_IN_MILLIS.defaultValue())))
+ .withRetryMaxWaitTimeInMillis(Long.parseLong(props.getString(
+ HoodieLockConfig.LOCK_ACQUIRE_RETRY_MAX_WAIT_TIME_IN_MILLIS.key(),
+
HoodieLockConfig.LOCK_ACQUIRE_RETRY_MAX_WAIT_TIME_IN_MILLIS.defaultValue())))
+ .withHeartbeatIntervalInMillis(Long.valueOf(props.getString(
+ HoodieLockConfig.LOCK_HEARTBEAT_INTERVAL_MS.key(),
+
String.valueOf(HoodieLockConfig.LOCK_HEARTBEAT_INTERVAL_MS.defaultValue()))));
+
+ String lockProviderClass = writeConfig.getLockProviderClass();
+ if (lockProviderClass == null) {
Review Comment:
🤖 It does throw — `checkState(lockProviderClass != null, "Lock provider
class must be set...")` fires before `getLockConfigForBuiltInLockProvider` is
ever called, so null is an explicit error condition that results in
`IllegalStateException`.
##########
hudi-client/hudi-client-common/src/main/java/org/apache/hudi/metadata/HoodieMetadataWriteUtils.java:
##########
@@ -362,6 +387,99 @@ public static HoodieWriteConfig createMetadataWriteConfig(
return metadataWriteConfig;
}
+ /**
+ * Build the lock config for the metadata table by extracting lock-specific
properties from the
+ * data table's write config. This avoids copying all properties (which
would overwrite MDT-specific
+ * settings like base path and auto-clean).
+ */
+ private static HoodieLockConfig buildMetadataLockConfig(HoodieWriteConfig
writeConfig) {
+ TypedProperties props = writeConfig.getProps();
+ HoodieLockConfig.Builder lockConfigBuilder = HoodieLockConfig.newBuilder()
+ .withClientNumRetries(Integer.parseInt(props.getString(
+ HoodieLockConfig.LOCK_ACQUIRE_CLIENT_NUM_RETRIES.key(),
+ HoodieLockConfig.LOCK_ACQUIRE_CLIENT_NUM_RETRIES.defaultValue())))
+ .withClientRetryWaitTimeInMillis(Long.parseLong(props.getString(
+
HoodieLockConfig.LOCK_ACQUIRE_CLIENT_RETRY_WAIT_TIME_IN_MILLIS.key(),
+
HoodieLockConfig.LOCK_ACQUIRE_CLIENT_RETRY_WAIT_TIME_IN_MILLIS.defaultValue())))
+ .withLockWaitTimeInMillis(Long.valueOf(props.getString(
+ HoodieLockConfig.LOCK_ACQUIRE_WAIT_TIMEOUT_MS.key(),
+
String.valueOf(HoodieLockConfig.LOCK_ACQUIRE_WAIT_TIMEOUT_MS.defaultValue()))))
+ .withNumRetries(Integer.parseInt(props.getString(
+ HoodieLockConfig.LOCK_ACQUIRE_NUM_RETRIES.key(),
+ HoodieLockConfig.LOCK_ACQUIRE_NUM_RETRIES.defaultValue())))
+ .withRetryWaitTimeInMillis(Long.parseLong(props.getString(
+ HoodieLockConfig.LOCK_ACQUIRE_RETRY_WAIT_TIME_IN_MILLIS.key(),
+
HoodieLockConfig.LOCK_ACQUIRE_RETRY_WAIT_TIME_IN_MILLIS.defaultValue())))
+ .withRetryMaxWaitTimeInMillis(Long.parseLong(props.getString(
+ HoodieLockConfig.LOCK_ACQUIRE_RETRY_MAX_WAIT_TIME_IN_MILLIS.key(),
+
HoodieLockConfig.LOCK_ACQUIRE_RETRY_MAX_WAIT_TIME_IN_MILLIS.defaultValue())))
+ .withHeartbeatIntervalInMillis(Long.valueOf(props.getString(
+ HoodieLockConfig.LOCK_HEARTBEAT_INTERVAL_MS.key(),
+
String.valueOf(HoodieLockConfig.LOCK_HEARTBEAT_INTERVAL_MS.defaultValue()))));
+
+ String lockProviderClass = writeConfig.getLockProviderClass();
+ if (lockProviderClass == null) {
+ return lockConfigBuilder.build();
+ }
+
+ Properties providerProp = new Properties();
+ providerProp.setProperty(HoodieLockConfig.LOCK_PROVIDER_CLASS_NAME.key(),
lockProviderClass);
+ lockConfigBuilder.fromProperties(providerProp);
+
+ if (ZookeeperBasedLockProvider.class.getName().equals(lockProviderClass)) {
+ if (props.containsKey(HoodieLockConfig.ZK_CONNECT_URL.key())) {
+
lockConfigBuilder.withZkQuorum(props.getString(HoodieLockConfig.ZK_CONNECT_URL.key()));
+ }
+ if (props.containsKey(HoodieLockConfig.ZK_BASE_PATH.key())) {
+
lockConfigBuilder.withZkBasePath(props.getString(HoodieLockConfig.ZK_BASE_PATH.key()));
+ }
+ if (props.containsKey(HoodieLockConfig.ZK_LOCK_KEY.key())) {
+
lockConfigBuilder.withZkLockKey(props.getString(HoodieLockConfig.ZK_LOCK_KEY.key()));
+ }
+ if (props.containsKey(HoodieLockConfig.ZK_PORT.key())) {
+
lockConfigBuilder.withZkPort(props.getString(HoodieLockConfig.ZK_PORT.key()));
+ }
+ if (props.containsKey(HoodieLockConfig.ZK_SESSION_TIMEOUT_MS.key())) {
+ lockConfigBuilder.withZkSessionTimeoutInMs(
+
Long.valueOf(props.getString(HoodieLockConfig.ZK_SESSION_TIMEOUT_MS.key())));
+ }
+ if (props.containsKey(HoodieLockConfig.ZK_CONNECTION_TIMEOUT_MS.key())) {
+ lockConfigBuilder.withZkConnectionTimeoutInMs(
+
Long.valueOf(props.getString(HoodieLockConfig.ZK_CONNECTION_TIMEOUT_MS.key())));
+ }
+ } else if
(FileSystemBasedLockProvider.class.getName().equals(lockProviderClass)) {
+ if (props.containsKey(HoodieLockConfig.FILESYSTEM_LOCK_PATH.key())) {
+
lockConfigBuilder.withFileSystemLockPath(props.getString(HoodieLockConfig.FILESYSTEM_LOCK_PATH.key()));
+ }
+ if (props.containsKey(HoodieLockConfig.FILESYSTEM_LOCK_EXPIRE.key())) {
+ lockConfigBuilder.withFileSystemLockExpire(
+
Integer.parseInt(props.getString(HoodieLockConfig.FILESYSTEM_LOCK_EXPIRE.key())));
+ }
+ } else if (lockProviderClass.contains("HiveMetastoreBasedLockProvider")) {
+ if (props.containsKey(HoodieLockConfig.HIVE_DATABASE_NAME.key())) {
+
lockConfigBuilder.withHiveDatabaseName(props.getString(HoodieLockConfig.HIVE_DATABASE_NAME.key()));
+ }
+ if (props.containsKey(HoodieLockConfig.HIVE_TABLE_NAME.key())) {
+
lockConfigBuilder.withHiveTableName(props.getString(HoodieLockConfig.HIVE_TABLE_NAME.key()));
+ }
+ if (props.containsKey(HoodieLockConfig.HIVE_METASTORE_URI.key())) {
+
lockConfigBuilder.withHiveMetastoreURIs(props.getString(HoodieLockConfig.HIVE_METASTORE_URI.key()));
+ }
+ } else {
+ // For any custom lock provider, pass through all lock-prefixed
properties
+ // so provider-specific configs are preserved.
Review Comment:
🤖 Alternate keys are handled by the prefix-based approach —
`copyPropsWithPrefix` iterates all data table props and copies anything that
starts with the provider's dedicated prefix (e.g.
`hoodie.write.lock.dynamodb.`, `hoodie.write.lock.storage.`), so any
non-standard keys under that prefix come along automatically. The per-provider
dispatch is still needed to select the right prefix.
##########
hudi-common/src/main/java/org/apache/hudi/common/config/HoodieMetadataConfig.java:
##########
@@ -81,6 +85,31 @@ public final class HoodieMetadataConfig extends HoodieConfig
{
+ "in streaming manner rather than two disjoint writes. By default "
+ "streaming writes to metadata table is enabled for SPARK engine
for incremental operations and disabled for all other cases.");
+ public static final ConfigProperty<String> METADATA_WRITE_CONCURRENCY_MODE =
ConfigProperty
+ .key(METADATA_PREFIX + ".write.concurrency.mode")
+ .defaultValue(WriteConcurrencyMode.SINGLE_WRITER.name())
+ .markAdvanced()
+ .withDocumentation("Change this to OPTIMISTIC_CONCURRENCY_CONTROL when
MDT operations are being performed "
+ + "from an external concurrent writer (such as a table service
platform) so that appropriate locks are taken.");
+
+ public static final ConfigProperty<Boolean> TABLE_SERVICE_MANAGER_ENABLED =
ConfigProperty
+ .key(METADATA_PREFIX + ".table.service.manager.enabled")
+ .defaultValue(false)
+ .markAdvanced()
+ .withDocumentation("If true, delegate specified table service actions on
the metadata table to the table service manager "
+ + "instead of executing them inline. This prevents the current
writer from executing compaction/logcompaction "
Review Comment:
🤖 When `TABLE_SERVICE_MANAGER_ENABLED=true` but
`TABLE_SERVICE_MANAGER_ACTIONS` is left at its default of `""`,
`shouldDelegateToTableServiceManager` will always return `false` for every
action type — TSM delegation is silently a no-op. Could you add a validation
(either in the builder or in `createMetadataWriteConfig`) that rejects the
combination of `enabled=true` with an empty actions list? Without that guard a
user who enables TSM but forgets to specify actions will see compaction
executed inline with no indication anything is misconfigured.
##########
hudi-common/src/test/java/org/apache/hudi/common/config/TestHoodieMetadataConfig.java:
##########
@@ -171,4 +174,80 @@ void testFailOnTableServiceFailures() {
assertEquals("hoodie.metadata.write.fail.on.table.service.failures",
HoodieMetadataConfig.FAIL_ON_TABLE_SERVICE_FAILURES.key());
}
+
Review Comment:
🤖 The PR description highlights a validation that rejects streaming writes
combined with multi-writer concurrency mode — could we add a test case for that
conflict here? It's the one new validation path that doesn't appear to be
covered, so a regression there would go undetected.
##########
hudi-common/src/main/java/org/apache/hudi/common/config/HoodieMetadataConfig.java:
##########
@@ -81,6 +82,28 @@ public final class HoodieMetadataConfig extends HoodieConfig
{
+ "in streaming manner rather than two disjoint writes. By default "
+ "streaming writes to metadata table is enabled for SPARK engine
for incremental operations and disabled for all other cases.");
+ public static final ConfigProperty<String> METADATA_WRITE_CONCURRENCY_MODE =
ConfigProperty
+ .key(METADATA_PREFIX + ".write.concurrency.mode")
Review Comment:
🤖 The PR does add `TestHoodieMetadataConfig.java` with 79 new lines covering
the new configs — looks like that's already been addressed.
##########
hudi-common/src/main/java/org/apache/hudi/common/config/HoodieMetadataConfig.java:
##########
@@ -1324,6 +1383,23 @@ private boolean
getDefaultSecondaryIndexEnable(EngineType engineType) {
throw new HoodieNotSupportedException("Unsupported engine " +
engineType);
}
}
Review Comment:
🤖 `validateTableServiceManagerActions` is only called inside the
programmatic builder (`withTableServiceManagerActions`). When configs are
loaded from a properties file — e.g.
`HoodieMetadataConfig.newBuilder().fromProperties(props).build()` — this path
is never exercised, so an invalid action like `"clustering"` would be accepted
silently and only fail later when `ActionType.valueOf()` is called inside
`isEnabledAndActionSupported`. Could you move the validation into `build()` (or
into a `validate()` call there) so it runs regardless of how the config is
populated?
##########
hudi-client/hudi-client-common/src/main/java/org/apache/hudi/metadata/HoodieBackedTableMetadataWriter.java:
##########
@@ -156,7 +158,7 @@
*
* @param <I> Type of input for the write client
*/
-public abstract class HoodieBackedTableMetadataWriter<I, O> implements
HoodieTableMetadataWriter<I, O> {
+public abstract class HoodieBackedTableMetadataWriter<I, O> implements
HoodieTableMetadataWriter<I, O>, RunsTableService {
Review Comment:
🤖 Having `HoodieBackedTableMetadataWriter` implement `RunsTableService`
solely to get access to `shouldDelegateToTableServiceManager` feels like it
conflates the metadata-writer and write-client roles. If `RunsTableService`
ever gains new abstract methods, every subclass of this abstract class would be
forced to implement them even if they're meaningless in a metadata-writer
context. Would it be cleaner to expose `shouldDelegateToTableServiceManager` as
a static utility (e.g. on `HoodieTableServiceManagerConfig` itself) and call it
directly, keeping the interface reserved for actual write clients?
##########
hudi-client/hudi-client-common/src/main/java/org/apache/hudi/metadata/HoodieMetadataWriteUtils.java:
##########
@@ -362,6 +387,99 @@ public static HoodieWriteConfig createMetadataWriteConfig(
return metadataWriteConfig;
}
+ /**
+ * Build the lock config for the metadata table by extracting lock-specific
properties from the
+ * data table's write config. This avoids copying all properties (which
would overwrite MDT-specific
+ * settings like base path and auto-clean).
+ */
+ private static HoodieLockConfig buildMetadataLockConfig(HoodieWriteConfig
writeConfig) {
+ TypedProperties props = writeConfig.getProps();
+ HoodieLockConfig.Builder lockConfigBuilder = HoodieLockConfig.newBuilder()
+ .withClientNumRetries(Integer.parseInt(props.getString(
+ HoodieLockConfig.LOCK_ACQUIRE_CLIENT_NUM_RETRIES.key(),
+ HoodieLockConfig.LOCK_ACQUIRE_CLIENT_NUM_RETRIES.defaultValue())))
+ .withClientRetryWaitTimeInMillis(Long.parseLong(props.getString(
+
HoodieLockConfig.LOCK_ACQUIRE_CLIENT_RETRY_WAIT_TIME_IN_MILLIS.key(),
+
HoodieLockConfig.LOCK_ACQUIRE_CLIENT_RETRY_WAIT_TIME_IN_MILLIS.defaultValue())))
+ .withLockWaitTimeInMillis(Long.valueOf(props.getString(
+ HoodieLockConfig.LOCK_ACQUIRE_WAIT_TIMEOUT_MS.key(),
+
String.valueOf(HoodieLockConfig.LOCK_ACQUIRE_WAIT_TIMEOUT_MS.defaultValue()))))
+ .withNumRetries(Integer.parseInt(props.getString(
+ HoodieLockConfig.LOCK_ACQUIRE_NUM_RETRIES.key(),
+ HoodieLockConfig.LOCK_ACQUIRE_NUM_RETRIES.defaultValue())))
+ .withRetryWaitTimeInMillis(Long.parseLong(props.getString(
+ HoodieLockConfig.LOCK_ACQUIRE_RETRY_WAIT_TIME_IN_MILLIS.key(),
+
HoodieLockConfig.LOCK_ACQUIRE_RETRY_WAIT_TIME_IN_MILLIS.defaultValue())))
+ .withRetryMaxWaitTimeInMillis(Long.parseLong(props.getString(
+ HoodieLockConfig.LOCK_ACQUIRE_RETRY_MAX_WAIT_TIME_IN_MILLIS.key(),
+
HoodieLockConfig.LOCK_ACQUIRE_RETRY_MAX_WAIT_TIME_IN_MILLIS.defaultValue())))
+ .withHeartbeatIntervalInMillis(Long.valueOf(props.getString(
+ HoodieLockConfig.LOCK_HEARTBEAT_INTERVAL_MS.key(),
+
String.valueOf(HoodieLockConfig.LOCK_HEARTBEAT_INTERVAL_MS.defaultValue()))));
+
+ String lockProviderClass = writeConfig.getLockProviderClass();
+ if (lockProviderClass == null) {
+ return lockConfigBuilder.build();
+ }
+
+ Properties providerProp = new Properties();
+ providerProp.setProperty(HoodieLockConfig.LOCK_PROVIDER_CLASS_NAME.key(),
lockProviderClass);
+ lockConfigBuilder.fromProperties(providerProp);
+
+ if (ZookeeperBasedLockProvider.class.getName().equals(lockProviderClass)) {
+ if (props.containsKey(HoodieLockConfig.ZK_CONNECT_URL.key())) {
+
lockConfigBuilder.withZkQuorum(props.getString(HoodieLockConfig.ZK_CONNECT_URL.key()));
+ }
+ if (props.containsKey(HoodieLockConfig.ZK_BASE_PATH.key())) {
+
lockConfigBuilder.withZkBasePath(props.getString(HoodieLockConfig.ZK_BASE_PATH.key()));
+ }
+ if (props.containsKey(HoodieLockConfig.ZK_LOCK_KEY.key())) {
+
lockConfigBuilder.withZkLockKey(props.getString(HoodieLockConfig.ZK_LOCK_KEY.key()));
+ }
+ if (props.containsKey(HoodieLockConfig.ZK_PORT.key())) {
+
lockConfigBuilder.withZkPort(props.getString(HoodieLockConfig.ZK_PORT.key()));
+ }
+ if (props.containsKey(HoodieLockConfig.ZK_SESSION_TIMEOUT_MS.key())) {
+ lockConfigBuilder.withZkSessionTimeoutInMs(
+
Long.valueOf(props.getString(HoodieLockConfig.ZK_SESSION_TIMEOUT_MS.key())));
+ }
+ if (props.containsKey(HoodieLockConfig.ZK_CONNECTION_TIMEOUT_MS.key())) {
+ lockConfigBuilder.withZkConnectionTimeoutInMs(
+
Long.valueOf(props.getString(HoodieLockConfig.ZK_CONNECTION_TIMEOUT_MS.key())));
+ }
+ } else if
(FileSystemBasedLockProvider.class.getName().equals(lockProviderClass)) {
+ if (props.containsKey(HoodieLockConfig.FILESYSTEM_LOCK_PATH.key())) {
+
lockConfigBuilder.withFileSystemLockPath(props.getString(HoodieLockConfig.FILESYSTEM_LOCK_PATH.key()));
+ }
+ if (props.containsKey(HoodieLockConfig.FILESYSTEM_LOCK_EXPIRE.key())) {
+ lockConfigBuilder.withFileSystemLockExpire(
+
Integer.parseInt(props.getString(HoodieLockConfig.FILESYSTEM_LOCK_EXPIRE.key())));
+ }
+ } else if (lockProviderClass.contains("HiveMetastoreBasedLockProvider")) {
+ if (props.containsKey(HoodieLockConfig.HIVE_DATABASE_NAME.key())) {
+
lockConfigBuilder.withHiveDatabaseName(props.getString(HoodieLockConfig.HIVE_DATABASE_NAME.key()));
+ }
+ if (props.containsKey(HoodieLockConfig.HIVE_TABLE_NAME.key())) {
+
lockConfigBuilder.withHiveTableName(props.getString(HoodieLockConfig.HIVE_TABLE_NAME.key()));
+ }
+ if (props.containsKey(HoodieLockConfig.HIVE_METASTORE_URI.key())) {
+
lockConfigBuilder.withHiveMetastoreURIs(props.getString(HoodieLockConfig.HIVE_METASTORE_URI.key()));
+ }
+ } else {
+ // For any custom lock provider, pass through all lock-prefixed
properties
+ // so provider-specific configs are preserved.
Review Comment:
🤖 Both are actually accounted for in the current implementation —
`DynamoDBBasedLockProvider` (and the implicit partition key variant) are
referenced via the string constants `DYNAMODB_BASED_LOCK_PROVIDER_CLASS` /
`DYNAMODB_BASED_IMPLICIT_PARTITION_KEY_LOCK_PROVIDER_CLASS` (since the class
lives in `hudi-aws` and can't be imported directly), and
`StorageBasedLockProvider` is handled via direct class reference.
##########
hudi-client/hudi-client-common/src/main/java/org/apache/hudi/config/HoodieLockConfig.java:
##########
@@ -249,6 +266,65 @@ public static HoodieLockConfig.Builder newBuilder() {
return new HoodieLockConfig.Builder();
}
+ /**
+ * Build a {@link HoodieLockConfig} by copying lock-related configs from the
given write config
+ * based on the configured lock provider. Only built-in lock providers are
supported.
+ *
+ * @param lockProviderClass the lock provider class name
+ * @param writeConfig the write config to copy lock properties from
+ * @return a new HoodieLockConfig with the relevant lock properties
+ * @throws HoodieException if the lock provider is not a built-in provider
+ */
+ public static HoodieLockConfig getLockConfigForBuiltInLockProvider(String
lockProviderClass, HoodieWriteConfig writeConfig) {
+ TypedProperties dataProps = writeConfig.getProps();
+ Properties lockProps = new Properties();
+ lockProps.put(LOCK_PROVIDER_CLASS_NAME.key(), lockProviderClass);
+
+ // Common lock configs used by all providers
+ lockProps.put(LOCK_ACQUIRE_RETRY_WAIT_TIME_IN_MILLIS.key(),
+
writeConfig.getStringOrDefault(LOCK_ACQUIRE_RETRY_WAIT_TIME_IN_MILLIS));
+ lockProps.put(LOCK_ACQUIRE_RETRY_MAX_WAIT_TIME_IN_MILLIS.key(),
+
writeConfig.getStringOrDefault(LOCK_ACQUIRE_RETRY_MAX_WAIT_TIME_IN_MILLIS));
+ lockProps.put(LOCK_ACQUIRE_CLIENT_RETRY_WAIT_TIME_IN_MILLIS.key(),
+
writeConfig.getStringOrDefault(LOCK_ACQUIRE_CLIENT_RETRY_WAIT_TIME_IN_MILLIS));
+ lockProps.put(LOCK_ACQUIRE_NUM_RETRIES.key(),
+ writeConfig.getStringOrDefault(LOCK_ACQUIRE_NUM_RETRIES));
+ lockProps.put(LOCK_ACQUIRE_CLIENT_NUM_RETRIES.key(),
+ writeConfig.getStringOrDefault(LOCK_ACQUIRE_CLIENT_NUM_RETRIES));
+ lockProps.put(LOCK_ACQUIRE_WAIT_TIMEOUT_MS.key(),
+
String.valueOf(writeConfig.getIntOrDefault(LOCK_ACQUIRE_WAIT_TIMEOUT_MS)));
+ lockProps.put(LOCK_HEARTBEAT_INTERVAL_MS.key(),
+
String.valueOf(writeConfig.getIntOrDefault(LOCK_HEARTBEAT_INTERVAL_MS)));
+
+ // Provider-specific configs
+ if
(FileSystemBasedLockProvider.class.getCanonicalName().equals(lockProviderClass))
{
+ copyPropsWithPrefix(dataProps, lockProps,
LockConfiguration.FILESYSTEM_BASED_LOCK_PROPERTY_PREFIX);
+ } else if
(ZookeeperBasedLockProvider.class.getCanonicalName().equals(lockProviderClass)
+ ||
ZookeeperBasedImplicitBasePathLockProvider.class.getCanonicalName().equals(lockProviderClass))
{
+ copyPropsWithPrefix(dataProps, lockProps,
LockConfiguration.ZOOKEEPER_BASED_LOCK_PROPERTY_PREFIX);
+ } else if
(HIVE_METASTORE_BASED_LOCK_PROVIDER_CLASS.equals(lockProviderClass)) {
Review Comment:
🤖 When the data table uses `ZookeeperBasedImplicitBasePathLockProvider`,
copying its config properties to the MDT write config means the MDT writer will
derive the ZK lock path from the **MDT base path** (since that's what
`writeConfig.getBasePath()` resolves to in the MDT write config), not the data
table base path. Is that intentional? If multiple MDT service runners all use
the MDT base path for the ZK path they'll coordinate correctly with each other,
but they'll use a different ZK path than what data table writers use — so
there's no shared lock between them. For OCC that's fine since conflict
resolution handles races post-hoc, but it's worth confirming this is the
intended coordination model.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]