kerneltime commented on code in PR #6945: URL: https://github.com/apache/ozone/pull/6945#discussion_r1697580318
########## hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/checksum/DNContainerOperationClient.java: ########## @@ -0,0 +1,122 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * <p> + * http://www.apache.org/licenses/LICENSE-2.0 + * <p> + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.container.checksum; + +import com.google.common.collect.ImmutableList; +import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; +import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.XceiverClientManager; +import org.apache.hadoop.hdds.scm.XceiverClientSpi; +import org.apache.hadoop.hdds.scm.client.ClientTrustManager; +import org.apache.hadoop.hdds.scm.container.ContainerID; +import org.apache.hadoop.hdds.scm.pipeline.Pipeline; +import org.apache.hadoop.hdds.scm.pipeline.PipelineID; +import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls; +import org.apache.hadoop.hdds.security.SecurityConfig; +import org.apache.hadoop.hdds.security.symmetric.SecretKeySignerClient; +import org.apache.hadoop.hdds.security.x509.certificate.client.CACertificateProvider; +import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; +import org.apache.hadoop.hdds.utils.HAUtils; +import org.apache.hadoop.ozone.OzoneSecurityUtil; +import jakarta.annotation.Nonnull; +import org.apache.hadoop.ozone.container.common.helpers.TokenHelper; +import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration; +import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; + +import static org.apache.hadoop.ozone.container.common.helpers.TokenHelper.encode; + +/** + * This class wraps necessary container-level rpc calls for container reconcilitaion. + * - GetContainerMerkleTree + */ +public class DNContainerOperationClient implements AutoCloseable { + + private static final Logger LOG = + LoggerFactory.getLogger(DNContainerOperationClient.class); + private final TokenHelper tokenHelper; + private final XceiverClientManager xceiverClientManager; + + public DNContainerOperationClient(ConfigurationSource conf, + CertificateClient certificateClient, + SecretKeySignerClient secretKeyClient) throws IOException { + this.tokenHelper = new TokenHelper(new SecurityConfig(conf), secretKeyClient); + this.xceiverClientManager = createClientManager(conf, certificateClient); + } + + @Nonnull + private static XceiverClientManager createClientManager( + ConfigurationSource conf, CertificateClient certificateClient) + throws IOException { + ClientTrustManager trustManager = null; + if (OzoneSecurityUtil.isSecurityEnabled(conf)) { + CACertificateProvider localCaCerts = + () -> HAUtils.buildCAX509List(certificateClient, conf); + CACertificateProvider remoteCacerts = + () -> HAUtils.buildCAX509List(null, conf); + trustManager = new ClientTrustManager(remoteCacerts, localCaCerts); + } + DatanodeConfiguration dnConf = conf.getObject(DatanodeConfiguration.class); + return new XceiverClientManager(conf, + new XceiverClientManager.XceiverClientManagerConfigBuilder() + .setMaxCacheSize(dnConf.getContainerClientCacheSize()) + .setStaleThresholdMs(dnConf.getContainerClientCacheStaleThreshold()) + .build(), trustManager); + } + + public XceiverClientManager getXceiverClientManager() { + return xceiverClientManager; Review Comment: This seems to be almost identical to `org.apache.hadoop.ozone.container.ec.reconstruction.ECContainerOperationClient#createClientManager` maybe in a separate PR we can move the code into `XceiverClientManager` itself to create an instance. Also, would allow for configuration options here to be available elsewhere. ########## hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/checksum/DNContainerOperationClient.java: ########## @@ -0,0 +1,122 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * <p> + * http://www.apache.org/licenses/LICENSE-2.0 + * <p> + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.container.checksum; + +import com.google.common.collect.ImmutableList; +import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; +import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.XceiverClientManager; +import org.apache.hadoop.hdds.scm.XceiverClientSpi; +import org.apache.hadoop.hdds.scm.client.ClientTrustManager; +import org.apache.hadoop.hdds.scm.container.ContainerID; +import org.apache.hadoop.hdds.scm.pipeline.Pipeline; +import org.apache.hadoop.hdds.scm.pipeline.PipelineID; +import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls; +import org.apache.hadoop.hdds.security.SecurityConfig; +import org.apache.hadoop.hdds.security.symmetric.SecretKeySignerClient; +import org.apache.hadoop.hdds.security.x509.certificate.client.CACertificateProvider; +import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; +import org.apache.hadoop.hdds.utils.HAUtils; +import org.apache.hadoop.ozone.OzoneSecurityUtil; +import jakarta.annotation.Nonnull; +import org.apache.hadoop.ozone.container.common.helpers.TokenHelper; +import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration; +import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; + +import static org.apache.hadoop.ozone.container.common.helpers.TokenHelper.encode; + +/** + * This class wraps necessary container-level rpc calls for container reconcilitaion. + * - GetContainerMerkleTree + */ +public class DNContainerOperationClient implements AutoCloseable { Review Comment: ```suggestion public class DNContainerReconciliationOperationClient implements AutoCloseable { ``` ########## hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/checksum/DNContainerOperationClient.java: ########## @@ -0,0 +1,122 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * <p> + * http://www.apache.org/licenses/LICENSE-2.0 + * <p> + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.container.checksum; + +import com.google.common.collect.ImmutableList; +import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; +import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.XceiverClientManager; +import org.apache.hadoop.hdds.scm.XceiverClientSpi; +import org.apache.hadoop.hdds.scm.client.ClientTrustManager; +import org.apache.hadoop.hdds.scm.container.ContainerID; +import org.apache.hadoop.hdds.scm.pipeline.Pipeline; +import org.apache.hadoop.hdds.scm.pipeline.PipelineID; +import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls; +import org.apache.hadoop.hdds.security.SecurityConfig; +import org.apache.hadoop.hdds.security.symmetric.SecretKeySignerClient; +import org.apache.hadoop.hdds.security.x509.certificate.client.CACertificateProvider; +import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; +import org.apache.hadoop.hdds.utils.HAUtils; +import org.apache.hadoop.ozone.OzoneSecurityUtil; +import jakarta.annotation.Nonnull; +import org.apache.hadoop.ozone.container.common.helpers.TokenHelper; +import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration; +import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; + +import static org.apache.hadoop.ozone.container.common.helpers.TokenHelper.encode; + +/** + * This class wraps necessary container-level rpc calls for container reconcilitaion. + * - GetContainerMerkleTree + */ +public class DNContainerOperationClient implements AutoCloseable { + + private static final Logger LOG = + LoggerFactory.getLogger(DNContainerOperationClient.class); + private final TokenHelper tokenHelper; + private final XceiverClientManager xceiverClientManager; + + public DNContainerOperationClient(ConfigurationSource conf, + CertificateClient certificateClient, + SecretKeySignerClient secretKeyClient) throws IOException { + this.tokenHelper = new TokenHelper(new SecurityConfig(conf), secretKeyClient); + this.xceiverClientManager = createClientManager(conf, certificateClient); + } + + @Nonnull + private static XceiverClientManager createClientManager( + ConfigurationSource conf, CertificateClient certificateClient) + throws IOException { + ClientTrustManager trustManager = null; + if (OzoneSecurityUtil.isSecurityEnabled(conf)) { + CACertificateProvider localCaCerts = + () -> HAUtils.buildCAX509List(certificateClient, conf); + CACertificateProvider remoteCacerts = + () -> HAUtils.buildCAX509List(null, conf); + trustManager = new ClientTrustManager(remoteCacerts, localCaCerts); + } + DatanodeConfiguration dnConf = conf.getObject(DatanodeConfiguration.class); + return new XceiverClientManager(conf, + new XceiverClientManager.XceiverClientManagerConfigBuilder() + .setMaxCacheSize(dnConf.getContainerClientCacheSize()) + .setStaleThresholdMs(dnConf.getContainerClientCacheStaleThreshold()) Review Comment: What is the expected behavior to cache clients ? Is the client per Datanode? We do not expect much concurrency on the client, so should the threshold be longer ? ########## hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/checksum/DNContainerOperationClient.java: ########## @@ -0,0 +1,122 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * <p> + * http://www.apache.org/licenses/LICENSE-2.0 + * <p> + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.container.checksum; + +import com.google.common.collect.ImmutableList; +import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; +import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.XceiverClientManager; +import org.apache.hadoop.hdds.scm.XceiverClientSpi; +import org.apache.hadoop.hdds.scm.client.ClientTrustManager; +import org.apache.hadoop.hdds.scm.container.ContainerID; +import org.apache.hadoop.hdds.scm.pipeline.Pipeline; +import org.apache.hadoop.hdds.scm.pipeline.PipelineID; +import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls; +import org.apache.hadoop.hdds.security.SecurityConfig; +import org.apache.hadoop.hdds.security.symmetric.SecretKeySignerClient; +import org.apache.hadoop.hdds.security.x509.certificate.client.CACertificateProvider; +import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; +import org.apache.hadoop.hdds.utils.HAUtils; +import org.apache.hadoop.ozone.OzoneSecurityUtil; +import jakarta.annotation.Nonnull; +import org.apache.hadoop.ozone.container.common.helpers.TokenHelper; +import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration; +import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; + +import static org.apache.hadoop.ozone.container.common.helpers.TokenHelper.encode; + +/** + * This class wraps necessary container-level rpc calls for container reconcilitaion. Review Comment: ```suggestion * This class wraps necessary container-level rpc calls for container reconciliation. ``` ########## hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeConfiguration.java: ########## @@ -111,6 +114,8 @@ public class DatanodeConfiguration extends ReconfigurableConfig { public static final Boolean OZONE_DATANODE_CHECK_EMPTY_CONTAINER_DIR_ON_DELETE_DEFAULT = false; public static final int CONTAINER_CHECKSUM_LOCK_STRIPES_DEFAULT = 127; + public static final int CONTAINER_CLIENT_CACHE_SIZE_DEFAULT = 100; + public static final int CONTAINER_CLIENT_CACHE_STALE_THRESHOLD_DEFAULT = 10000; Review Comment: ```suggestion public static final int CONTAINER_CLIENT_CACHE_STALE_THRESHOLD_DEFAULT_MILLISECONDS = 10000; ``` -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected] --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
