This is an automated email from the ASF dual-hosted git repository. captainzmc pushed a commit to branch ozone-1.3 in repository https://gitbox.apache.org/repos/asf/ozone.git
commit 2911f933b9351c098629b441cb1c97f58f0d3fd2 Author: Istvan Fajth <[email protected]> AuthorDate: Wed Nov 9 04:33:49 2022 +0100 HDDS-7453. Check certificate expiration at service startup, renew if necessary. (#3930) --- .../org/apache/hadoop/hdds/HddsConfigKeys.java | 10 +++ .../hadoop/hdds/security/x509/SecurityConfig.java | 18 ++++++ .../apache/hadoop/ozone/common/StorageInfo.java | 4 ++ .../common/src/main/resources/ozone-default.xml | 8 +++ .../apache/hadoop/ozone/HddsDatanodeService.java | 5 ++ .../hadoop/ozone/TestHddsSecureDatanodeInit.java | 2 +- .../x509/certificate/client/CertificateClient.java | 3 +- .../client/CommonCertificateClient.java | 6 ++ .../client/DefaultCertificateClient.java | 60 ++++++++++++++++-- .../certificate/client/SCMCertificateClient.java | 3 + .../client/TestCertificateClientInit.java | 2 +- .../client/TestDefaultCertificateClient.java | 52 +++++++++++++++ .../hadoop/ozone/om/TestSecureOzoneManager.java | 2 +- .../java/org/apache/hadoop/ozone/om/OMStorage.java | 4 ++ .../org/apache/hadoop/ozone/om/OzoneManager.java | 73 +++++++++++----------- .../org/apache/hadoop/ozone/recon/ReconServer.java | 25 ++++---- .../hadoop/ozone/recon/scm/ReconStorageConfig.java | 4 ++ 17 files changed, 223 insertions(+), 58 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java index 5f8f6f5259..4c017e831c 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java @@ -184,6 +184,16 @@ public final class HddsConfigKeys { // Default Certificate duration to one year. public static final String HDDS_X509_DEFAULT_DURATION_DEFAULT = "P365D"; + /** + * Duration of the grace period within which a certificate should be + * renewed before the current one expires. + * Default is 28 days. + */ + public static final String HDDS_X509_RENEW_GRACE_DURATION = + "hdds.x509.renew.grace.duration"; + + public static final String HDDS_X509_RENEW_GRACE_DURATION_DEFAULT = "P28D"; + /** * Do not instantiate. */ diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/SecurityConfig.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/SecurityConfig.java index b02ce1bdeb..751711c5ce 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/SecurityConfig.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/SecurityConfig.java @@ -63,6 +63,8 @@ import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_X509_FILE_NAME; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_X509_FILE_NAME_DEFAULT; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_X509_MAX_DURATION; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_X509_MAX_DURATION_DEFAULT; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_X509_RENEW_GRACE_DURATION; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_X509_RENEW_GRACE_DURATION_DEFAULT; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_X509_SIGNATURE_ALGO; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_X509_SIGNATURE_ALGO_DEFAULT; import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; @@ -100,6 +102,7 @@ public class SecurityConfig { private final String certificateFileName; private final boolean grpcTlsEnabled; private final Duration defaultCertDuration; + private final Duration renewalGracePeriod; private final boolean isSecurityEnabled; private final String crlName; private boolean grpcTlsUseTestCert; @@ -164,6 +167,10 @@ public class SecurityConfig { this.configuration.get(HDDS_X509_DEFAULT_DURATION, HDDS_X509_DEFAULT_DURATION_DEFAULT); defaultCertDuration = Duration.parse(certDurationString); + String renewalGraceDurationString = this.configuration.get( + HDDS_X509_RENEW_GRACE_DURATION, + HDDS_X509_RENEW_GRACE_DURATION_DEFAULT); + renewalGracePeriod = Duration.parse(renewalGraceDurationString); if (maxCertDuration.compareTo(defaultCertDuration) < 0) { LOG.error("Certificate duration {} should not be greater than Maximum " + @@ -216,6 +223,17 @@ public class SecurityConfig { return defaultCertDuration; } + /** + * Duration of the grace period within which a certificate should be + * renewed before the current one expires. + * Default is 28 days. + * + * @return the value of hdds.x509.renew.grace.duration property + */ + public Duration getRenewalGracePeriod() { + return renewalGracePeriod; + } + /** * Returns the Standard Certificate file name. * diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/StorageInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/StorageInfo.java index 6ba438456e..f7d92e5d2b 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/StorageInfo.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/StorageInfo.java @@ -154,6 +154,10 @@ public class StorageInfo { properties.setProperty(key, value); } + public void unsetProperty(String key) { + properties.remove(key); + } + public void setClusterId(String clusterId) { properties.setProperty(CLUSTER_ID, clusterId); } diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index a481ba5e36..0dae783e69 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -2077,6 +2077,14 @@ valid. The formats accepted are based on the ISO-8601 duration format PnDTnHnMn.nS</description> </property> + <property> + <name>hdds.x509.renew.grace.duration</name> + <value>P28D</value> + <tag>OZONE, HDDS, SECURITY</tag> + <description>Duration of the grace period within which a certificate should be + * renewed before the current one expires. Default is 28 days. + </description> + </property> <property> <name>hdds.x509.dir.name</name> <value>certs</value> diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java index d36a178550..d775a426da 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java @@ -339,6 +339,11 @@ public class HddsDatanodeService extends GenericCli implements ServicePlugin { LOG.info("Initializing secure Datanode."); CertificateClient.InitResponse response = dnCertClient.init(); + if (response.equals(CertificateClient.InitResponse.REINIT)) { + LOG.info("Re-initialize certificate client."); + dnCertClient = new DNCertificateClient(new SecurityConfig(conf)); + response = dnCertClient.init(); + } LOG.info("Init response: {}", response); switch (response) { case SUCCESS: diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsSecureDatanodeInit.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsSecureDatanodeInit.java index 2d0cb37141..39e36baae0 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsSecureDatanodeInit.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsSecureDatanodeInit.java @@ -102,7 +102,7 @@ public class TestHddsSecureDatanodeInit { X509Certificate x509Certificate = null; x509Certificate = KeyStoreTestUtil.generateCertificate( - "CN=Test", new KeyPair(publicKey, privateKey), 10, + "CN=Test", new KeyPair(publicKey, privateKey), 365, securityConfig.getSignatureAlgo()); certHolder = new X509CertificateHolder(x509Certificate.getEncoded()); diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/CertificateClient.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/CertificateClient.java index 396452fbb2..3357bb3089 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/CertificateClient.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/CertificateClient.java @@ -203,7 +203,8 @@ public interface CertificateClient { SUCCESS, FAILURE, GETCERT, - RECOVER + RECOVER, + REINIT } /** diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/CommonCertificateClient.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/CommonCertificateClient.java index dbf634f915..bb122955a5 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/CommonCertificateClient.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/CommonCertificateClient.java @@ -25,6 +25,7 @@ import org.slf4j.Logger; import static org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient.InitResponse.FAILURE; import static org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient.InitResponse.GETCERT; import static org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient.InitResponse.RECOVER; +import static org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient.InitResponse.REINIT; import static org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient.InitResponse.SUCCESS; /** @@ -102,6 +103,11 @@ public class CommonCertificateClient extends DefaultCertificateClient { } else { return FAILURE; } + case EXPIRED_CERT: + getLogger().info("Component certificate is about to expire. Initiating" + + "renewal."); + removeMaterial(); + return REINIT; default: log.error("Unexpected case: {} (private/public/cert)", Integer.toBinaryString(init.ordinal())); diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java index e84826d481..a55db9a427 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java @@ -38,6 +38,7 @@ import java.security.SignatureException; import java.security.cert.CertStore; import java.security.cert.X509Certificate; import java.security.spec.InvalidKeySpecException; +import java.time.Instant; import java.util.ArrayList; import java.util.List; import java.util.Map; @@ -47,6 +48,7 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; +import org.apache.commons.io.FileUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.SCMSecurityProtocol; import org.apache.hadoop.hdds.security.x509.crl.CRLInfo; @@ -63,8 +65,10 @@ import org.apache.commons.io.FilenameUtils; import org.apache.commons.lang3.RandomStringUtils; import org.apache.commons.lang3.math.NumberUtils; import org.apache.commons.validator.routines.DomainValidator; + import static org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient.InitResponse.FAILURE; import static org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient.InitResponse.GETCERT; +import static org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient.InitResponse.REINIT; import static org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient.InitResponse.SUCCESS; import static org.apache.hadoop.hdds.security.x509.exceptions.CertificateException.ErrorCode.BOOTSTRAP_ERROR; import static org.apache.hadoop.hdds.security.x509.exceptions.CertificateException.ErrorCode.CERTIFICATE_ERROR; @@ -74,7 +78,6 @@ import static org.apache.hadoop.hdds.security.x509.exceptions.CertificateExcepti import static org.apache.hadoop.hdds.utils.HddsServerUtil.getScmSecurityClient; import static org.apache.hadoop.hdds.utils.HddsServerUtil.getScmSecurityClientWithMaxRetry; -import org.apache.ratis.util.FileUtils; import org.bouncycastle.cert.X509CertificateHolder; import org.slf4j.Logger; @@ -636,7 +639,10 @@ public abstract class DefaultCertificateClient implements CertificateClient { * 6. PUBLICKEY_PRIVATEKEY indicates private and public key were read * successfully from configured location but * Certificate. - * 7. All Keypair as well as certificate is present. + * 7. ALL Keypair as well as certificate is present. + * 8. EXPIRED_CERT The certificate is present, but either it has + * already expired, or is about to be expired within + * the grace period provided in the configuration. * * */ protected enum InitCase { @@ -647,7 +653,8 @@ public abstract class DefaultCertificateClient implements CertificateClient { PRIVATE_KEY, PRIVATEKEY_CERT, PUBLICKEY_PRIVATEKEY, - ALL + ALL, + EXPIRED_CERT } /** @@ -657,6 +664,9 @@ public abstract class DefaultCertificateClient implements CertificateClient { * 2. Generates and stores a keypair. * 3. Try to recover public key if private key and certificate is present * but public key is missing. + * 4. Checks if the certificate is about to be expired or have already been + * expired, and if yes removes the key material and the certificate and + * asks for re-initialization in the result. * * Truth table: * +--------------+-----------------+--------------+----------------+ @@ -688,6 +698,13 @@ public abstract class DefaultCertificateClient implements CertificateClient { * 2. When keypair (public/private key) is available but certificate is * missing. * + * Returns REINIT in following case: + * If it would return SUCCESS, but the certificate expiration date is + * within the configured grace period or if the certificate is already + * expired. + * The grace period is configured by the hdds.x509.renew.grace.duration + * configuration property. + * */ @Override public synchronized InitResponse init() throws CertificateException { @@ -695,7 +712,6 @@ public abstract class DefaultCertificateClient implements CertificateClient { PrivateKey pvtKey = getPrivateKey(); PublicKey pubKey = getPublicKey(); X509Certificate certificate = getCertificate(); - if (pvtKey != null) { initCase = initCase | 1 << 2; } @@ -705,8 +721,21 @@ public abstract class DefaultCertificateClient implements CertificateClient { if (certificate != null) { initCase = initCase | 1; } + + boolean successCase = + initCase == InitCase.ALL.ordinal() || + initCase == InitCase.PRIVATEKEY_CERT.ordinal(); + boolean shouldRenew = + certificate != null && + Instant.now().plus(securityConfig.getRenewalGracePeriod()) + .isAfter(certificate.getNotAfter().toInstant()); + + if (successCase && shouldRenew) { + initCase = InitCase.EXPIRED_CERT.ordinal(); + } + getLogger().info("Certificate client init case: {}", initCase); - Preconditions.checkArgument(initCase < 8, "Not a " + + Preconditions.checkArgument(initCase < InitCase.values().length, "Not a " + "valid case."); InitCase init = InitCase.values()[initCase]; return handleCase(init); @@ -766,6 +795,11 @@ public abstract class DefaultCertificateClient implements CertificateClient { } else { return FAILURE; } + case EXPIRED_CERT: + getLogger().info("Component certificate is about to expire. Initiating" + + "renewal."); + removeMaterial(); + return REINIT; default: getLogger().error("Unexpected case: {} (private/public/cert)", Integer.toBinaryString(init.ordinal())); @@ -774,6 +808,20 @@ public abstract class DefaultCertificateClient implements CertificateClient { } } + protected void removeMaterial() throws CertificateException { + try { + FileUtils.deleteDirectory( + securityConfig.getKeyLocation(component).toFile()); + getLogger().info("Certificate renewal: key material is removed."); + FileUtils.deleteDirectory( + securityConfig.getCertificateLocation(component).toFile()); + getLogger().info("Certificate renewal: certificates are removed."); + } catch (IOException e) { + throw new CertificateException("Certificate renewal failed: remove key" + + " material failed.", e); + } + } + /** * Validate keypair and certificate. * */ @@ -988,7 +1036,7 @@ public abstract class DefaultCertificateClient implements CertificateClient { certName = ROOT_CA_CERT_PREFIX + certName; } - FileUtils.deleteFileQuietly(basePath.resolve(certName).toFile()); + FileUtils.deleteQuietly(basePath.resolve(certName).toFile()); // remove in memory certificateMap.remove(certId); diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/SCMCertificateClient.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/SCMCertificateClient.java index d941d88e43..91acc1e767 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/SCMCertificateClient.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/SCMCertificateClient.java @@ -109,6 +109,9 @@ public class SCMCertificateClient extends DefaultCertificateClient { } else { return FAILURE; } + case EXPIRED_CERT: + LOG.warn("SCM CA certificate is about to be expire!"); + return SUCCESS; default: LOG.error("Unexpected case: {} (private/public/cert)", Integer.toBinaryString(init.ordinal())); diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/TestCertificateClientInit.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/TestCertificateClientInit.java index 47b02a9a2e..72730858fc 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/TestCertificateClientInit.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/TestCertificateClientInit.java @@ -212,6 +212,6 @@ public class TestCertificateClientInit { private X509Certificate getX509Certificate() throws Exception { return KeyStoreTestUtil.generateCertificate( - "CN=Test", keyPair, 10, securityConfig.getSignatureAlgo()); + "CN=Test", keyPair, 365, securityConfig.getSignatureAlgo()); } } \ No newline at end of file diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/TestDefaultCertificateClient.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/TestDefaultCertificateClient.java index fbc5c34f30..394f4b0a3b 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/TestDefaultCertificateClient.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/TestDefaultCertificateClient.java @@ -18,6 +18,7 @@ */ package org.apache.hadoop.hdds.security.x509.certificate.client; +import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient.InitResponse; import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec; import org.apache.hadoop.hdds.security.x509.exceptions.CertificateException; import org.apache.hadoop.hdds.security.x509.keys.KeyCodec; @@ -35,6 +36,9 @@ import java.security.PrivateKey; import java.security.PublicKey; import java.security.Signature; import java.security.cert.X509Certificate; +import java.time.Duration; +import java.util.Calendar; +import java.util.Date; import java.util.UUID; import org.apache.commons.io.FileUtils; @@ -48,18 +52,25 @@ import org.apache.hadoop.hdds.security.x509.keys.HDDSKeyGenerator; import org.apache.hadoop.security.ssl.KeyStoreTestUtil; import org.apache.ozone.test.GenericTestUtils; import org.apache.ozone.test.LambdaTestUtils; +import org.slf4j.Logger; import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_METADATA_DIR_NAME; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_NAMES; import static org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient.InitResponse.FAILURE; +import static org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient.InitResponse.REINIT; import static org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec.getPEMEncodedString; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.atLeastOnce; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; /** * Test class for {@link DefaultCertificateClient}. @@ -474,4 +485,45 @@ public class TestDefaultCertificateClient { omClientLog.clearOutput(); } + @Test + public void testCertificateExpirationHandlingInInit() throws Exception { + String certId = "1L"; + String compName = "TEST"; + + Logger mockLogger = mock(Logger.class); + + SecurityConfig config = mock(SecurityConfig.class); + Path nonexistent = Paths.get("nonexistent"); + when(config.getCertificateLocation(anyString())).thenReturn(nonexistent); + when(config.getKeyLocation(anyString())).thenReturn(nonexistent); + when(config.getRenewalGracePeriod()).thenReturn(Duration.ofDays(28)); + + Calendar cal = Calendar.getInstance(); + cal.add(Calendar.DAY_OF_YEAR, 2); + Date expiration = cal.getTime(); + X509Certificate mockCert = mock(X509Certificate.class); + when(mockCert.getNotAfter()).thenReturn(expiration); + + DefaultCertificateClient client = + new DefaultCertificateClient(config, mockLogger, certId, compName) { + @Override + public PrivateKey getPrivateKey() { + return mock(PrivateKey.class); + } + + @Override + public PublicKey getPublicKey() { + return mock(PublicKey.class); + } + + @Override + public X509Certificate getCertificate() { + return mockCert; + } + }; + + InitResponse resp = client.init(); + verify(mockLogger, atLeastOnce()).info(anyString()); + assertEquals(resp, REINIT); + } } \ No newline at end of file diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSecureOzoneManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSecureOzoneManager.java index 32cd7beb17..6347a4f965 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSecureOzoneManager.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSecureOzoneManager.java @@ -164,7 +164,7 @@ public class TestSecureOzoneManager { CertificateCodec certCodec = new CertificateCodec(securityConfig, COMPONENT); X509Certificate x509Certificate = KeyStoreTestUtil.generateCertificate( - "CN=Test", new KeyPair(publicKey, privateKey), 10, + "CN=Test", new KeyPair(publicKey, privateKey), 365, securityConfig.getSignatureAlgo()); certCodec.writeCertificate(new X509CertificateHolder( x509Certificate.getEncoded())); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMStorage.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMStorage.java index 31c2aefd1e..0cde1047d5 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMStorage.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMStorage.java @@ -55,6 +55,10 @@ public class OMStorage extends Storage { getStorageInfo().setProperty(OM_CERT_SERIAL_ID, certSerialId); } + public void unsetOmCertSerialId() throws IOException { + getStorageInfo().unsetProperty(OM_CERT_SERIAL_ID); + } + public void setOmId(String omId) throws IOException { if (getState() == StorageState.INITIALIZED) { throw new IOException("OM is already initialized."); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java index b5be6c02d7..dd9375e5bf 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java @@ -1228,52 +1228,44 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl loginOMUserIfSecurityEnabled(conf); OMStorage omStorage = new OMStorage(conf); StorageState state = omStorage.getState(); - if (state != StorageState.INITIALIZED) { - try { - ScmInfo scmInfo = getScmInfo(conf); - String clusterId = scmInfo.getClusterId(); - String scmId = scmInfo.getScmId(); - if (clusterId == null || clusterId.isEmpty()) { - throw new IOException("Invalid Cluster ID"); - } - if (scmId == null || scmId.isEmpty()) { - throw new IOException("Invalid SCM ID"); - } + String scmId = null; + try { + ScmInfo scmInfo = getScmInfo(conf); + scmId = scmInfo.getScmId(); + if (scmId == null || scmId.isEmpty()) { + throw new IOException("Invalid SCM ID"); + } + String clusterId = scmInfo.getClusterId(); + if (clusterId == null || clusterId.isEmpty()) { + throw new IOException("Invalid Cluster ID"); + } + + if (state != StorageState.INITIALIZED) { omStorage.setClusterId(clusterId); - if (OzoneSecurityUtil.isSecurityEnabled(conf)) { - initializeSecurity(conf, omStorage, scmId); - } omStorage.initialize(); System.out.println( "OM initialization succeeded.Current cluster id for sd=" + omStorage.getStorageDir() + ";cid=" + omStorage .getClusterID() + ";layoutVersion=" + omStorage .getLayoutVersion()); - - return true; - } catch (IOException ioe) { - LOG.error("Could not initialize OM version file", ioe); - return false; - } - } else { - if (OzoneSecurityUtil.isSecurityEnabled(conf) && - omStorage.getOmCertSerialId() == null) { - ScmInfo scmInfo = HAUtils.getScmInfo(conf); - String scmId = scmInfo.getScmId(); - if (scmId == null || scmId.isEmpty()) { - throw new IOException("Invalid SCM ID"); - } - LOG.info("OM storage is already initialized. Initializing security"); - initializeSecurity(conf, omStorage, scmId); - omStorage.persistCurrentState(); + } else { + System.out.println( + "OM already initialized.Reusing existing cluster id for sd=" + + omStorage.getStorageDir() + ";cid=" + omStorage + .getClusterID() + ";layoutVersion=" + omStorage + .getLayoutVersion()); } - System.out.println( - "OM already initialized.Reusing existing cluster id for sd=" - + omStorage.getStorageDir() + ";cid=" + omStorage - .getClusterID() + ";layoutVersion=" + omStorage - .getLayoutVersion()); - return true; + } catch (IOException ioe) { + LOG.error("Could not initialize OM version file", ioe); + return false; + } + + if (OzoneSecurityUtil.isSecurityEnabled(conf)) { + LOG.info("OM storage initialized. Initializing security"); + initializeSecurity(conf, omStorage, scmId); } + omStorage.persistCurrentState(); + return true; } /** @@ -1289,6 +1281,13 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl new OMCertificateClient(new SecurityConfig(conf), omStore.getOmCertSerialId()); CertificateClient.InitResponse response = certClient.init(); + if (response.equals(CertificateClient.InitResponse.REINIT)) { + LOG.info("Re-initialize certificate client."); + omStore.unsetOmCertSerialId(); + omStore.persistCurrentState(); + certClient = new OMCertificateClient(new SecurityConfig(conf)); + response = certClient.init(); + } LOG.info("Init response: {}", response); switch (response) { case SUCCESS: diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServer.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServer.java index 5fb1d2b02b..55e5856f98 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServer.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServer.java @@ -117,19 +117,14 @@ public class ReconServer extends GenericCli { loginReconUserIfSecurityEnabled(configuration); try { if (reconStorage.getState() != INITIALIZED) { - if (OzoneSecurityUtil.isSecurityEnabled(configuration)) { - initializeCertificateClient(configuration); - } reconStorage.initialize(); - } else { - if (OzoneSecurityUtil.isSecurityEnabled(configuration) && - reconStorage.getReconCertSerialId() == null) { - LOG.info("ReconStorageConfig is already initialized." + - "Initializing certificate."); - initializeCertificateClient(configuration); - reconStorage.persistCurrentState(); - } } + if (OzoneSecurityUtil.isSecurityEnabled(configuration)) { + LOG.info("ReconStorageConfig initialized." + + "Initializing certificate."); + initializeCertificateClient(configuration); + } + reconStorage.persistCurrentState(); } catch (Exception e) { LOG.error("Error during initializing Recon certificate", e); } @@ -182,6 +177,14 @@ public class ReconServer extends GenericCli { reconStorage.getReconCertSerialId()); CertificateClient.InitResponse response = certClient.init(); + if (response.equals(CertificateClient.InitResponse.REINIT)) { + LOG.info("Re-initialize certificate client."); + reconStorage.unsetReconCertSerialId(); + reconStorage.persistCurrentState(); + certClient = new ReconCertificateClient(new SecurityConfig(configuration), + reconStorage.getReconCertSerialId()); + response = certClient.init(); + } LOG.info("Init response: {}", response); switch (response) { case SUCCESS: diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageConfig.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageConfig.java index 7a82542e6e..6e13408b41 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageConfig.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageConfig.java @@ -90,5 +90,9 @@ public class ReconStorageConfig extends SCMStorageConfig { return getStorageInfo().getProperty(RECON_CERT_SERIAL_ID); } + public void unsetReconCertSerialId() { + getStorageInfo().unsetProperty(RECON_CERT_SERIAL_ID); + } + } --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
