This is an automated email from the ASF dual-hosted git repository. chungen pushed a commit to branch ozone-2.1 in repository https://gitbox.apache.org/repos/asf/ozone.git
commit 01af91ced76f59934660e78913c526b20d88539e Author: Devesh Kumar Singh <[email protected]> AuthorDate: Fri Oct 17 20:53:21 2025 +0530 HDDS-13808. Revert of HDDS-13792. (#9168) (cherry picked from commit 3daf9f89a9b51c3d14bd189bee19d9b18dd2d573) --- .../src/main/proto/OmStorageProtocol.proto | 4 - .../hadoop/ozone/recon/ReconControllerModule.java | 4 - .../ozone/recon/api/UtilizationEndpoint.java | 76 +++++------- .../scm/ReconStorageContainerManagerFacade.java | 15 ++- .../spi/ReconContainerSizeMetadataManager.java | 100 --------------- .../ReconContainerSizeMetadataManagerImpl.java | 117 ------------------ .../ozone/recon/spi/impl/ReconDBDefinition.java | 11 +- .../ozone/recon/tasks/ContainerSizeCountKey.java | 83 ------------- .../ozone/recon/tasks/ContainerSizeCountTask.java | 132 +++++++++++++------- .../ozone/recon/tasks/ReconTaskControllerImpl.java | 7 +- .../hadoop/ozone/recon/ReconTestInjector.java | 5 - .../hadoop/ozone/recon/api/TestEndpoints.java | 10 +- .../recon/tasks/TestContainerSizeCountTask.java | 137 ++++++++------------- .../ozone/recon/tasks/TestEventBufferOverflow.java | 25 ++-- .../TestNSSummaryTaskControllerIntegration.java | 4 +- .../recon/tasks/TestReconTaskControllerImpl.java | 7 +- 16 files changed, 209 insertions(+), 528 deletions(-) diff --git a/hadoop-ozone/interface-storage/src/main/proto/OmStorageProtocol.proto b/hadoop-ozone/interface-storage/src/main/proto/OmStorageProtocol.proto index a2e20c7a22a..ee57233f6de 100644 --- a/hadoop-ozone/interface-storage/src/main/proto/OmStorageProtocol.proto +++ b/hadoop-ozone/interface-storage/src/main/proto/OmStorageProtocol.proto @@ -73,8 +73,4 @@ message FileSizeCountKeyProto { required string volume = 1; required string bucket = 2; required int64 fileSizeUpperBound = 3; -} - -message ContainerSizeCountKeyProto { - required int64 containerSizeUpperBound = 1; } \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconControllerModule.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconControllerModule.java index e6361339314..3f7e99056e4 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconControllerModule.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconControllerModule.java @@ -51,14 +51,12 @@ import org.apache.hadoop.ozone.recon.scm.ReconStorageContainerManagerFacade; import org.apache.hadoop.ozone.recon.spi.OzoneManagerServiceProvider; import org.apache.hadoop.ozone.recon.spi.ReconContainerMetadataManager; -import org.apache.hadoop.ozone.recon.spi.ReconContainerSizeMetadataManager; import org.apache.hadoop.ozone.recon.spi.ReconFileMetadataManager; import org.apache.hadoop.ozone.recon.spi.ReconGlobalStatsManager; import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager; import org.apache.hadoop.ozone.recon.spi.StorageContainerServiceProvider; import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl; import org.apache.hadoop.ozone.recon.spi.impl.ReconContainerMetadataManagerImpl; -import org.apache.hadoop.ozone.recon.spi.impl.ReconContainerSizeMetadataManagerImpl; import org.apache.hadoop.ozone.recon.spi.impl.ReconDBProvider; import org.apache.hadoop.ozone.recon.spi.impl.ReconFileMetadataManagerImpl; import org.apache.hadoop.ozone.recon.spi.impl.ReconGlobalStatsManagerImpl; @@ -111,8 +109,6 @@ protected void configure() { .to(ReconFileMetadataManagerImpl.class).in(Singleton.class); bind(ReconGlobalStatsManager.class) .to(ReconGlobalStatsManagerImpl.class).in(Singleton.class); - bind(ReconContainerSizeMetadataManager.class) - .to(ReconContainerSizeMetadataManagerImpl.class).in(Singleton.class); bind(ReconNamespaceSummaryManager.class) .to(ReconNamespaceSummaryManagerImpl.class).in(Singleton.class); bind(OzoneManagerServiceProvider.class) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/UtilizationEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/UtilizationEndpoint.java index 7912f9ed6b4..5a19890d3dc 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/UtilizationEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/UtilizationEndpoint.java @@ -21,9 +21,12 @@ import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_QUERY_CONTAINER_SIZE; import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_QUERY_FILE_SIZE; import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_QUERY_VOLUME; +import static org.apache.ozone.recon.schema.generated.tables.ContainerCountBySizeTable.CONTAINER_COUNT_BY_SIZE; import java.util.ArrayList; +import java.util.Collections; import java.util.List; +import java.util.stream.Collectors; import javax.inject.Inject; import javax.ws.rs.GET; import javax.ws.rs.Path; @@ -34,12 +37,14 @@ import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.Table.KeyValueIterator; import org.apache.hadoop.ozone.recon.ReconUtils; -import org.apache.hadoop.ozone.recon.spi.ReconContainerSizeMetadataManager; import org.apache.hadoop.ozone.recon.spi.ReconFileMetadataManager; -import org.apache.hadoop.ozone.recon.tasks.ContainerSizeCountKey; import org.apache.hadoop.ozone.recon.tasks.FileSizeCountKey; +import org.apache.ozone.recon.schema.UtilizationSchemaDefinition; +import org.apache.ozone.recon.schema.generated.tables.daos.ContainerCountBySizeDao; import org.apache.ozone.recon.schema.generated.tables.pojos.ContainerCountBySize; import org.apache.ozone.recon.schema.generated.tables.pojos.FileCountBySize; +import org.jooq.DSLContext; +import org.jooq.Record1; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -50,17 +55,19 @@ @Produces(MediaType.APPLICATION_JSON) public class UtilizationEndpoint { + private UtilizationSchemaDefinition utilizationSchemaDefinition; + private ContainerCountBySizeDao containerCountBySizeDao; private ReconFileMetadataManager reconFileMetadataManager; - private ReconContainerSizeMetadataManager reconContainerSizeMetadataManager; private static final Logger LOG = LoggerFactory .getLogger(UtilizationEndpoint.class); @Inject - public UtilizationEndpoint( - ReconFileMetadataManager reconFileMetadataManager, - ReconContainerSizeMetadataManager reconContainerSizeMetadataManager) { + public UtilizationEndpoint(ContainerCountBySizeDao containerCountBySizeDao, + UtilizationSchemaDefinition utilizationSchemaDefinition, + ReconFileMetadataManager reconFileMetadataManager) { + this.utilizationSchemaDefinition = utilizationSchemaDefinition; + this.containerCountBySizeDao = containerCountBySizeDao; this.reconFileMetadataManager = reconFileMetadataManager; - this.reconContainerSizeMetadataManager = reconContainerSizeMetadataManager; } /** @@ -129,7 +136,7 @@ public Response getFileCounts( } /** - * Return the container size counts from RocksDB. + * Return the container size counts from Recon DB. * * @return {@link Response} */ @@ -138,46 +145,31 @@ public Response getFileCounts( public Response getContainerCounts( @QueryParam(RECON_QUERY_CONTAINER_SIZE) long upperBound) { - List<ContainerCountBySize> resultSet = new ArrayList<>(); + DSLContext dslContext = utilizationSchemaDefinition.getDSLContext(); + Long containerSizeUpperBound = + ReconUtils.getContainerSizeUpperBound(upperBound); + List<ContainerCountBySize> resultSet; try { - Table<ContainerSizeCountKey, Long> containerCountTable = - reconContainerSizeMetadataManager.getContainerCountTable(); - - Long containerSizeUpperBound = - ReconUtils.getContainerSizeUpperBound(upperBound); - if (upperBound > 0) { - // Query for specific container size - ContainerSizeCountKey key = - new ContainerSizeCountKey(containerSizeUpperBound); - Long count = containerCountTable.get(key); - if (count != null && count > 0) { - ContainerCountBySize record = new ContainerCountBySize(); - record.setContainerSize(containerSizeUpperBound); - record.setCount(count); - resultSet.add(record); - } + // Get the current count from database and update + Record1<Long> recordToFind = + dslContext.newRecord( + CONTAINER_COUNT_BY_SIZE.CONTAINER_SIZE) + .value1(containerSizeUpperBound); + ContainerCountBySize record = + containerCountBySizeDao.findById(recordToFind.value1()); + resultSet = record != null ? + Collections.singletonList(record) : Collections.emptyList(); } else { - // Iterate through all records - try (KeyValueIterator<ContainerSizeCountKey, Long> iterator = - containerCountTable.iterator()) { - while (iterator.hasNext()) { - Table.KeyValue<ContainerSizeCountKey, Long> entry = iterator.next(); - ContainerSizeCountKey key = entry.getKey(); - Long count = entry.getValue(); - - if (count != null && count > 0) { - ContainerCountBySize record = new ContainerCountBySize(); - record.setContainerSize(key.getContainerSizeUpperBound()); - record.setCount(count); - resultSet.add(record); - } - } - } + // fetch all records having values greater than zero + resultSet = containerCountBySizeDao.findAll().stream() + .filter(record -> record.getCount() > 0) + .collect(Collectors.toList()); } return Response.ok(resultSet).build(); } catch (Exception e) { - LOG.error("Error retrieving container counts from RocksDB", e); + // Log the exception and return a server error response + LOG.error("Error retrieving container counts", e); return Response.status(Response.Status.INTERNAL_SERVER_ERROR).build(); } } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java index d01964311d4..b6b7e3cf5b4 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java @@ -117,11 +117,12 @@ import org.apache.hadoop.ozone.recon.fsck.ReconSafeModeMgrTask; import org.apache.hadoop.ozone.recon.persistence.ContainerHealthSchemaManager; import org.apache.hadoop.ozone.recon.spi.ReconContainerMetadataManager; -import org.apache.hadoop.ozone.recon.spi.ReconContainerSizeMetadataManager; import org.apache.hadoop.ozone.recon.spi.StorageContainerServiceProvider; import org.apache.hadoop.ozone.recon.tasks.ContainerSizeCountTask; import org.apache.hadoop.ozone.recon.tasks.ReconTaskConfig; import org.apache.hadoop.ozone.recon.tasks.updater.ReconTaskStatusUpdaterManager; +import org.apache.ozone.recon.schema.UtilizationSchemaDefinition; +import org.apache.ozone.recon.schema.generated.tables.daos.ContainerCountBySizeDao; import org.apache.ratis.util.ExitUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -164,6 +165,7 @@ public class ReconStorageContainerManagerFacade private ReconSafeModeManager safeModeManager; private ReconSafeModeMgrTask reconSafeModeMgrTask; private ContainerSizeCountTask containerSizeCountTask; + private ContainerCountBySizeDao containerCountBySizeDao; private AtomicBoolean isSyncDataFromSCMRunning; private final String threadNamePrefix; @@ -173,7 +175,8 @@ public class ReconStorageContainerManagerFacade @SuppressWarnings({"checkstyle:ParameterNumber", "checkstyle:MethodLength"}) public ReconStorageContainerManagerFacade(OzoneConfiguration conf, StorageContainerServiceProvider scmServiceProvider, - ReconContainerSizeMetadataManager reconContainerSizeMetadataManager, + ContainerCountBySizeDao containerCountBySizeDao, + UtilizationSchemaDefinition utilizationSchemaDefinition, ContainerHealthSchemaManager containerHealthSchemaManager, ReconContainerMetadataManager reconContainerMetadataManager, ReconUtils reconUtils, @@ -246,6 +249,7 @@ public ReconStorageContainerManagerFacade(OzoneConfiguration conf, scmhaManager, sequenceIdGen, pendingOps); this.scmServiceProvider = scmServiceProvider; this.isSyncDataFromSCMRunning = new AtomicBoolean(); + this.containerCountBySizeDao = containerCountBySizeDao; NodeReportHandler nodeReportHandler = new NodeReportHandler(nodeManager); @@ -266,7 +270,7 @@ public ReconStorageContainerManagerFacade(OzoneConfiguration conf, reconTaskConfig, reconContainerMetadataManager, conf, taskStatusUpdaterManager); this.containerSizeCountTask = new ContainerSizeCountTask(containerManager, - reconTaskConfig, reconContainerSizeMetadataManager, taskStatusUpdaterManager); + reconTaskConfig, containerCountBySizeDao, utilizationSchemaDefinition, taskStatusUpdaterManager); this.dataSource = dataSource; @@ -711,6 +715,11 @@ public ContainerHealthTask getContainerHealthTask() { return containerHealthTask; } + @VisibleForTesting + public ContainerCountBySizeDao getContainerCountBySizeDao() { + return containerCountBySizeDao; + } + public ReconContext getReconContext() { return reconContext; } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/ReconContainerSizeMetadataManager.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/ReconContainerSizeMetadataManager.java deleted file mode 100644 index c4c58008334..00000000000 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/ReconContainerSizeMetadataManager.java +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.recon.spi; - -import java.io.IOException; -import org.apache.hadoop.hdds.annotation.InterfaceStability; -import org.apache.hadoop.hdds.utils.db.BatchOperation; -import org.apache.hadoop.hdds.utils.db.DBStore; -import org.apache.hadoop.hdds.utils.db.RDBBatchOperation; -import org.apache.hadoop.hdds.utils.db.Table; -import org.apache.hadoop.ozone.recon.spi.impl.ReconDBProvider; -import org.apache.hadoop.ozone.recon.tasks.ContainerSizeCountKey; - -/** - * The Recon Container Size Metadata DB Service interface for container size counts. - */ [email protected] -public interface ReconContainerSizeMetadataManager { - - /** - * Returns staged DB container size metadata manager. - * - * @param stagedReconDbStore staged Recon DB store - * @return ReconContainerSizeMetadataManager - */ - ReconContainerSizeMetadataManager getStagedReconContainerSizeMetadataManager( - DBStore stagedReconDbStore); - - /** - * reinitialize the ReconContainerSizeMetadataManager. - * - * @param reconDBProvider recon DB provider to reinitialize with. - */ - void reinitialize(ReconDBProvider reconDBProvider); - - /** - * Store the container size count mapping into a batch. - * - * @param batch the batch operation we store into - * @param containerSizeCountKey the container size count key. - * @param count Count of containers with that size range. - */ - void batchStoreContainerSizeCount(BatchOperation batch, - ContainerSizeCountKey containerSizeCountKey, - Long count) throws IOException; - - /** - * Delete container size count mapping from a batch. - * - * @param batch the batch operation we add the deletion to - * @param containerSizeCountKey the container size count key to be deleted. - */ - void batchDeleteContainerSizeCount(BatchOperation batch, - ContainerSizeCountKey containerSizeCountKey) - throws IOException; - - /** - * Get the stored container size count for the given key. - * - * @param containerSizeCountKey the container size count key. - * @return count of containers with that size range. - */ - Long getContainerSizeCount(ContainerSizeCountKey containerSizeCountKey) - throws IOException; - - /** - * Get the entire containerCountTable. - * @return containerCountTable - */ - Table<ContainerSizeCountKey, Long> getContainerCountTable(); - - /** - * Commit a batch operation into the containerSizeMetadataDbStore. - * - * @param rdbBatchOperation batch operation we want to commit - */ - void commitBatchOperation(RDBBatchOperation rdbBatchOperation) - throws IOException; - - /** - * Clear all container size count data from the table. - * This method is used during reprocess operations. - */ - void clearContainerCountTable() throws IOException; -} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconContainerSizeMetadataManagerImpl.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconContainerSizeMetadataManagerImpl.java deleted file mode 100644 index 6d580cc92bd..00000000000 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconContainerSizeMetadataManagerImpl.java +++ /dev/null @@ -1,117 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.recon.spi.impl; - -import static org.apache.hadoop.ozone.recon.spi.impl.ReconDBDefinition.CONTAINER_COUNT_BY_SIZE; -import static org.apache.hadoop.ozone.recon.spi.impl.ReconDBProvider.truncateTable; - -import java.io.IOException; -import javax.inject.Inject; -import javax.inject.Singleton; -import org.apache.hadoop.hdds.utils.db.BatchOperation; -import org.apache.hadoop.hdds.utils.db.DBStore; -import org.apache.hadoop.hdds.utils.db.RDBBatchOperation; -import org.apache.hadoop.hdds.utils.db.Table; -import org.apache.hadoop.ozone.recon.spi.ReconContainerSizeMetadataManager; -import org.apache.hadoop.ozone.recon.tasks.ContainerSizeCountKey; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Implementation of the Recon Container Size Metadata DB Service. - */ -@Singleton -public class ReconContainerSizeMetadataManagerImpl - implements ReconContainerSizeMetadataManager { - - private static final Logger LOG = - LoggerFactory.getLogger(ReconContainerSizeMetadataManagerImpl.class); - - private Table<ContainerSizeCountKey, Long> containerCountTable; - private DBStore containerSizeMetadataDbStore; - - @Inject - public ReconContainerSizeMetadataManagerImpl(ReconDBProvider reconDBProvider) { - this(reconDBProvider.getDbStore()); - } - - private ReconContainerSizeMetadataManagerImpl(DBStore reconDBStore) { - containerSizeMetadataDbStore = reconDBStore; - initializeTables(); - } - - @Override - public ReconContainerSizeMetadataManager getStagedReconContainerSizeMetadataManager( - DBStore stagedReconDbStore) { - return new ReconContainerSizeMetadataManagerImpl(stagedReconDbStore); - } - - @Override - public void reinitialize(ReconDBProvider reconDBProvider) { - containerSizeMetadataDbStore = reconDBProvider.getDbStore(); - initializeTables(); - } - - /** - * Initialize the container size metadata DB tables. - */ - private void initializeTables() { - try { - this.containerCountTable = CONTAINER_COUNT_BY_SIZE.getTable(containerSizeMetadataDbStore); - } catch (IOException e) { - LOG.error("Unable to create Container Size Count table.", e); - } - } - - @Override - public void batchStoreContainerSizeCount(BatchOperation batch, - ContainerSizeCountKey containerSizeCountKey, - Long count) throws IOException { - containerCountTable.putWithBatch(batch, containerSizeCountKey, count); - } - - @Override - public void batchDeleteContainerSizeCount(BatchOperation batch, - ContainerSizeCountKey containerSizeCountKey) - throws IOException { - containerCountTable.deleteWithBatch(batch, containerSizeCountKey); - } - - @Override - public Long getContainerSizeCount(ContainerSizeCountKey containerSizeCountKey) - throws IOException { - return containerCountTable.get(containerSizeCountKey); - } - - @Override - public Table<ContainerSizeCountKey, Long> getContainerCountTable() { - return containerCountTable; - } - - @Override - public void commitBatchOperation(RDBBatchOperation rdbBatchOperation) - throws IOException { - containerSizeMetadataDbStore.commitBatchOperation(rdbBatchOperation); - } - - @Override - public void clearContainerCountTable() throws IOException { - truncateTable(containerCountTable); - LOG.info("Successfully cleared container count table"); - } -} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconDBDefinition.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconDBDefinition.java index f06a5b7d90d..3fe489da79d 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconDBDefinition.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconDBDefinition.java @@ -29,7 +29,6 @@ import org.apache.hadoop.ozone.recon.api.types.NSSummary; import org.apache.hadoop.ozone.recon.codec.NSSummaryCodec; import org.apache.hadoop.ozone.recon.scm.ContainerReplicaHistoryList; -import org.apache.hadoop.ozone.recon.tasks.ContainerSizeCountKey; import org.apache.hadoop.ozone.recon.tasks.FileSizeCountKey; import org.apache.hadoop.ozone.recon.tasks.GlobalStatsValue; @@ -96,13 +95,6 @@ public class ReconDBDefinition extends DBDefinition.WithMap { StringCodec.get(), GlobalStatsValue.getCodec()); - public static final DBColumnFamilyDefinition<ContainerSizeCountKey, Long> - CONTAINER_COUNT_BY_SIZE = - new DBColumnFamilyDefinition<>( - "containerCountBySizeTable", - ContainerSizeCountKey.getCodec(), - LongCodec.get()); - private static final Map<String, DBColumnFamilyDefinition<?, ?>> COLUMN_FAMILIES = DBColumnFamilyDefinition.newUnmodifiableMap( CONTAINER_KEY, @@ -112,8 +104,7 @@ public class ReconDBDefinition extends DBDefinition.WithMap { REPLICA_HISTORY, REPLICA_HISTORY_V2, FILE_COUNT_BY_SIZE, - GLOBAL_STATS, - CONTAINER_COUNT_BY_SIZE); + GLOBAL_STATS); public ReconDBDefinition(String dbName) { super(COLUMN_FAMILIES); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ContainerSizeCountKey.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ContainerSizeCountKey.java deleted file mode 100644 index 8f840dba2e9..00000000000 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ContainerSizeCountKey.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.recon.tasks; - -import org.apache.hadoop.hdds.utils.db.Codec; -import org.apache.hadoop.hdds.utils.db.DelegatedCodec; -import org.apache.hadoop.hdds.utils.db.Proto2Codec; -import org.apache.hadoop.ozone.storage.proto.OzoneManagerStorageProtos.ContainerSizeCountKeyProto; - -/** - * Key class used for grouping container size counts in RocksDB storage. - * Represents a key of containerSizeUpperBound for CONTAINER_COUNT_BY_SIZE column family. - */ -public class ContainerSizeCountKey { - private static final Codec<ContainerSizeCountKey> CODEC = new DelegatedCodec<>( - Proto2Codec.get(ContainerSizeCountKeyProto.getDefaultInstance()), - ContainerSizeCountKey::fromProto, - ContainerSizeCountKey::toProto, - ContainerSizeCountKey.class); - - private final Long containerSizeUpperBound; - - public ContainerSizeCountKey(Long containerSizeUpperBound) { - this.containerSizeUpperBound = containerSizeUpperBound; - } - - public static Codec<ContainerSizeCountKey> getCodec() { - return CODEC; - } - - public Long getContainerSizeUpperBound() { - return containerSizeUpperBound; - } - - public ContainerSizeCountKeyProto toProto() { - return ContainerSizeCountKeyProto.newBuilder() - .setContainerSizeUpperBound(containerSizeUpperBound) - .build(); - } - - public static ContainerSizeCountKey fromProto(ContainerSizeCountKeyProto proto) { - return new ContainerSizeCountKey(proto.getContainerSizeUpperBound()); - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - ContainerSizeCountKey that = (ContainerSizeCountKey) o; - return containerSizeUpperBound.equals(that.containerSizeUpperBound); - } - - @Override - public int hashCode() { - return containerSizeUpperBound.hashCode(); - } - - @Override - public String toString() { - return "ContainerSizeCountKey{" + - "containerSizeUpperBound=" + containerSizeUpperBound + - '}'; - } -} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ContainerSizeCountTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ContainerSizeCountTask.java index 826df60208a..2a343af5d6e 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ContainerSizeCountTask.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ContainerSizeCountTask.java @@ -18,9 +18,10 @@ package org.apache.hadoop.ozone.recon.tasks; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.DELETED; +import static org.apache.ozone.recon.schema.generated.tables.ContainerCountBySizeTable.CONTAINER_COUNT_BY_SIZE; import com.google.common.annotations.VisibleForTesting; -import java.io.IOException; +import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -29,12 +30,15 @@ import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.ContainerManager; -import org.apache.hadoop.hdds.utils.db.RDBBatchOperation; import org.apache.hadoop.ozone.recon.ReconUtils; import org.apache.hadoop.ozone.recon.scm.ReconScmTask; -import org.apache.hadoop.ozone.recon.spi.ReconContainerSizeMetadataManager; import org.apache.hadoop.ozone.recon.tasks.updater.ReconTaskStatusUpdater; import org.apache.hadoop.ozone.recon.tasks.updater.ReconTaskStatusUpdaterManager; +import org.apache.ozone.recon.schema.UtilizationSchemaDefinition; +import org.apache.ozone.recon.schema.generated.tables.daos.ContainerCountBySizeDao; +import org.apache.ozone.recon.schema.generated.tables.pojos.ContainerCountBySize; +import org.jooq.DSLContext; +import org.jooq.Record1; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -50,7 +54,8 @@ public class ContainerSizeCountTask extends ReconScmTask { private ContainerManager containerManager; private final long interval; - private ReconContainerSizeMetadataManager reconContainerSizeMetadataManager; + private ContainerCountBySizeDao containerCountBySizeDao; + private DSLContext dslContext; private HashMap<ContainerID, Long> processedContainers = new HashMap<>(); private ReadWriteLock lock = new ReentrantReadWriteLock(true); private final ReconTaskStatusUpdater taskStatusUpdater; @@ -58,11 +63,13 @@ public class ContainerSizeCountTask extends ReconScmTask { public ContainerSizeCountTask( ContainerManager containerManager, ReconTaskConfig reconTaskConfig, - ReconContainerSizeMetadataManager reconContainerSizeMetadataManager, + ContainerCountBySizeDao containerCountBySizeDao, + UtilizationSchemaDefinition utilizationSchemaDefinition, ReconTaskStatusUpdaterManager taskStatusUpdaterManager) { super(taskStatusUpdaterManager); this.containerManager = containerManager; - this.reconContainerSizeMetadataManager = reconContainerSizeMetadataManager; + this.containerCountBySizeDao = containerCountBySizeDao; + this.dslContext = utilizationSchemaDefinition.getDSLContext(); interval = reconTaskConfig.getContainerSizeCountTaskInterval().toMillis(); this.taskStatusUpdater = getTaskStatusUpdater(); } @@ -122,9 +129,8 @@ private void process(ContainerInfo container, protected void runTask() throws Exception { final List<ContainerInfo> containers = containerManager.getContainers(); if (processedContainers.isEmpty()) { - // Clear RocksDB table instead of truncating Derby - reconContainerSizeMetadataManager.clearContainerCountTable(); - LOG.debug("Cleared container count table in RocksDB"); + int execute = dslContext.truncate(CONTAINER_COUNT_BY_SIZE).execute(); + LOG.debug("Deleted {} records from {}", execute, CONTAINER_COUNT_BY_SIZE); } processContainers(containers); } @@ -195,14 +201,20 @@ public void processContainers(List<ContainerInfo> containers) { } /** - * Populate RocksDB with the counts of container sizes using batch operations. + * Populate DB with the counts of container sizes calculated + * using the dao. * <p> - * The writeCountsToDB function updates RocksDB with the count of - * container sizes. It uses batch operations for atomic writes. If the database - * has not been truncated, it reads the current count from RocksDB, adds the - * delta, and either updates the entry (if new count > 0) or deletes it - * (if new count = 0). If the database has been truncated, it only inserts - * entries with non-zero counts. + * The writeCountsToDB function updates the database with the count of + * container sizes. It does this by creating two lists of records to be + * inserted or updated in the database. It iterates over the keys of the + * containerSizeCountMap and creates a new record for each key. It then + * checks whether the database has been truncated or not. If it has not been + * truncated, it attempts to find the current count for the container size + * in the database and either inserts a new record or updates the current + * record with the updated count. If the database has been truncated, + * it only inserts a new record if the count is non-zero. Finally, it + * uses the containerCountBySizeDao to insert the new records and update + * the existing records in the database. * * @param isDbTruncated that checks if the database has been truncated or not. * @param containerSizeCountMap stores counts of container sizes @@ -210,37 +222,36 @@ public void processContainers(List<ContainerInfo> containers) { private void writeCountsToDB(boolean isDbTruncated, Map<ContainerSizeCountKey, Long> containerSizeCountMap) { - try (RDBBatchOperation rdbBatchOperation = new RDBBatchOperation()) { - for (Map.Entry<ContainerSizeCountKey, Long> entry : - containerSizeCountMap.entrySet()) { - ContainerSizeCountKey key = entry.getKey(); - Long delta = entry.getValue(); - - if (!isDbTruncated) { - // Get current count from RocksDB - Long currentCount = reconContainerSizeMetadataManager - .getContainerSizeCount(key); - long newCount = (currentCount != null ? currentCount : 0L) + delta; - - if (newCount > 0) { - reconContainerSizeMetadataManager.batchStoreContainerSizeCount( - rdbBatchOperation, key, newCount); - } else if (newCount == 0 && currentCount != null) { - // Delete the entry if count reaches zero - reconContainerSizeMetadataManager.batchDeleteContainerSizeCount( - rdbBatchOperation, key); - } - } else if (delta > 0) { - // After truncate, just insert non-zero counts - reconContainerSizeMetadataManager.batchStoreContainerSizeCount( - rdbBatchOperation, key, delta); + List<ContainerCountBySize> insertToDb = new ArrayList<>(); + List<ContainerCountBySize> updateInDb = new ArrayList<>(); + + containerSizeCountMap.keySet().forEach((ContainerSizeCountKey key) -> { + ContainerCountBySize newRecord = new ContainerCountBySize(); + newRecord.setContainerSize(key.containerSizeUpperBound); + newRecord.setCount(containerSizeCountMap.get(key)); + if (!isDbTruncated) { + // Get the current count from database and update + Record1<Long> recordToFind = + dslContext.newRecord( + CONTAINER_COUNT_BY_SIZE.CONTAINER_SIZE) + .value1(key.containerSizeUpperBound); + ContainerCountBySize containerCountRecord = + containerCountBySizeDao.findById(recordToFind.value1()); + if (containerCountRecord == null && newRecord.getCount() > 0L) { + // insert new row only for non-zero counts. + insertToDb.add(newRecord); + } else if (containerCountRecord != null) { + newRecord.setCount(containerCountRecord.getCount() + + containerSizeCountMap.get(key)); + updateInDb.add(newRecord); } + } else if (newRecord.getCount() > 0) { + // insert new row only for non-zero counts. + insertToDb.add(newRecord); } - reconContainerSizeMetadataManager.commitBatchOperation(rdbBatchOperation); - } catch (IOException e) { - LOG.error("Failed to write container size counts to RocksDB", e); - throw new RuntimeException(e); - } + }); + containerCountBySizeDao.insert(insertToDb); + containerCountBySizeDao.update(updateInDb); } /** @@ -342,4 +353,35 @@ private static ContainerSizeCountKey getContainerSizeCountKey( ReconUtils.getContainerSizeUpperBound(containerSize)); } + /** + * The ContainerSizeCountKey class is a simple key class that has a single + * field, containerSizeUpperBound, which is a Long representing the upper + * bound of the container size range. + */ + private static class ContainerSizeCountKey { + + private Long containerSizeUpperBound; + + ContainerSizeCountKey( + Long containerSizeUpperBound) { + this.containerSizeUpperBound = containerSizeUpperBound; + } + + @Override + public boolean equals(Object obj) { + if (obj instanceof ContainerSizeCountKey) { + ContainerSizeCountTask.ContainerSizeCountKey + s = (ContainerSizeCountTask.ContainerSizeCountKey) obj; + return + containerSizeUpperBound.equals(s.containerSizeUpperBound); + } + return false; + } + + @Override + public int hashCode() { + return (containerSizeUpperBound).hashCode(); + } + } + } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconTaskControllerImpl.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconTaskControllerImpl.java index 628404f7795..f9b3d40d118 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconTaskControllerImpl.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconTaskControllerImpl.java @@ -54,7 +54,6 @@ import org.apache.hadoop.ozone.recon.metrics.ReconTaskMetrics; import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; import org.apache.hadoop.ozone.recon.spi.ReconContainerMetadataManager; -import org.apache.hadoop.ozone.recon.spi.ReconContainerSizeMetadataManager; import org.apache.hadoop.ozone.recon.spi.ReconFileMetadataManager; import org.apache.hadoop.ozone.recon.spi.ReconGlobalStatsManager; import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager; @@ -80,7 +79,6 @@ public class ReconTaskControllerImpl implements ReconTaskController { private final ReconNamespaceSummaryManager reconNamespaceSummaryManager; private final ReconGlobalStatsManager reconGlobalStatsManager; private final ReconFileMetadataManager reconFileMetadataManager; - private final ReconContainerSizeMetadataManager reconContainerSizeMetadataManager; private Map<String, ReconOmTask> reconOmTasks; private ExecutorService executorService; @@ -111,15 +109,13 @@ public ReconTaskControllerImpl(OzoneConfiguration configuration, ReconContainerMetadataManager reconContainerMetadataManager, ReconNamespaceSummaryManager reconNamespaceSummaryManager, ReconGlobalStatsManager reconGlobalStatsManager, - ReconFileMetadataManager reconFileMetadataManager, - ReconContainerSizeMetadataManager reconContainerSizeMetadataManager) { + ReconFileMetadataManager reconFileMetadataManager) { this.configuration = configuration; this.reconDBProvider = reconDBProvider; this.reconContainerMetadataManager = reconContainerMetadataManager; this.reconNamespaceSummaryManager = reconNamespaceSummaryManager; this.reconGlobalStatsManager = reconGlobalStatsManager; this.reconFileMetadataManager = reconFileMetadataManager; - this.reconContainerSizeMetadataManager = reconContainerSizeMetadataManager; reconOmTasks = new HashMap<>(); threadCount = configuration.getInt(OZONE_RECON_TASK_THREAD_COUNT_KEY, OZONE_RECON_TASK_THREAD_COUNT_DEFAULT); @@ -282,7 +278,6 @@ public synchronized boolean reInitializeTasks(ReconOMMetadataManager omMetadataM reconContainerMetadataManager.reinitialize(reconDBProvider); reconGlobalStatsManager.reinitialize(reconDBProvider); reconFileMetadataManager.reinitialize(reconDBProvider); - reconContainerSizeMetadataManager.reinitialize(reconDBProvider); recordAllTaskStatus(localReconOmTaskMap, 0, omMetadataManager.getLastSequenceNumberFromDB()); // Track reprocess success diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/ReconTestInjector.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/ReconTestInjector.java index d14bc87baa5..e93e4d71f52 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/ReconTestInjector.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/ReconTestInjector.java @@ -43,12 +43,10 @@ import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; import org.apache.hadoop.ozone.recon.spi.OzoneManagerServiceProvider; import org.apache.hadoop.ozone.recon.spi.ReconContainerMetadataManager; -import org.apache.hadoop.ozone.recon.spi.ReconContainerSizeMetadataManager; import org.apache.hadoop.ozone.recon.spi.ReconFileMetadataManager; import org.apache.hadoop.ozone.recon.spi.ReconGlobalStatsManager; import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager; import org.apache.hadoop.ozone.recon.spi.impl.ReconContainerMetadataManagerImpl; -import org.apache.hadoop.ozone.recon.spi.impl.ReconContainerSizeMetadataManagerImpl; import org.apache.hadoop.ozone.recon.spi.impl.ReconDBProvider; import org.apache.hadoop.ozone.recon.spi.impl.ReconFileMetadataManagerImpl; import org.apache.hadoop.ozone.recon.spi.impl.ReconGlobalStatsManagerImpl; @@ -185,9 +183,6 @@ protected void configure() { bind(ReconGlobalStatsManager.class) .to(ReconGlobalStatsManagerImpl.class) .in(Singleton.class); - bind(ReconContainerSizeMetadataManager.class) - .to(ReconContainerSizeMetadataManagerImpl.class) - .in(Singleton.class); bind(ReconNamespaceSummaryManager.class) .to(ReconNamespaceSummaryManagerImpl.class) .in(Singleton.class); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java index f233c351887..d0a8fed15ed 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java @@ -139,6 +139,8 @@ import org.apache.hadoop.ozone.recon.tasks.OmTableInsightTask; import org.apache.hadoop.ozone.recon.tasks.ReconOmTask; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; +import org.apache.ozone.recon.schema.UtilizationSchemaDefinition; +import org.apache.ozone.recon.schema.generated.tables.daos.ContainerCountBySizeDao; import org.apache.ozone.recon.schema.generated.tables.daos.GlobalStatsDao; import org.apache.ozone.recon.schema.generated.tables.pojos.ContainerCountBySize; import org.apache.ozone.recon.schema.generated.tables.pojos.FileCountBySize; @@ -292,10 +294,16 @@ private void initializeInjector() throws Exception { pipelineEndpoint = reconTestInjector.getInstance(PipelineEndpoint.class); volumeEndpoint = reconTestInjector.getInstance(VolumeEndpoint.class); bucketEndpoint = reconTestInjector.getInstance(BucketEndpoint.class); + ContainerCountBySizeDao containerCountBySizeDao = reconScm.getContainerCountBySizeDao(); GlobalStatsDao globalStatsDao = getDao(GlobalStatsDao.class); + UtilizationSchemaDefinition utilizationSchemaDefinition = + getSchemaDefinition(UtilizationSchemaDefinition.class); reconFileMetadataManager = reconTestInjector.getInstance(ReconFileMetadataManager.class); ReconGlobalStatsManager reconGlobalStatsManager = reconTestInjector.getInstance(ReconGlobalStatsManager.class); - utilizationEndpoint = reconTestInjector.getInstance(UtilizationEndpoint.class); + utilizationEndpoint = new UtilizationEndpoint( + containerCountBySizeDao, + utilizationSchemaDefinition, + reconFileMetadataManager); OzoneConfiguration configuration = reconTestInjector.getInstance(OzoneConfiguration.class); fileSizeCountTaskFSO = new FileSizeCountTaskFSO(reconFileMetadataManager, configuration); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestContainerSizeCountTask.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestContainerSizeCountTask.java index f39db6270f2..de00a73af7c 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestContainerSizeCountTask.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestContainerSizeCountTask.java @@ -22,80 +22,66 @@ import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.DELETED; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.OPEN; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.QUASI_CLOSED; -import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager; -import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.initializeNewOmMetadataManager; +import static org.apache.ozone.recon.schema.generated.tables.ContainerCountBySizeTable.CONTAINER_COUNT_BY_SIZE; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.BDDMockito.given; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; -import java.io.IOException; -import java.nio.file.Files; import java.time.Duration; import java.util.ArrayList; import java.util.List; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.ContainerManager; -import org.apache.hadoop.hdds.utils.db.Table; -import org.apache.hadoop.ozone.recon.ReconTestInjector; -import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; -import org.apache.hadoop.ozone.recon.spi.ReconContainerSizeMetadataManager; +import org.apache.hadoop.ozone.recon.persistence.AbstractReconSqlDBTest; import org.apache.hadoop.ozone.recon.tasks.updater.ReconTaskStatusUpdater; import org.apache.hadoop.ozone.recon.tasks.updater.ReconTaskStatusUpdaterManager; -import org.junit.jupiter.api.BeforeAll; +import org.apache.ozone.recon.schema.UtilizationSchemaDefinition; +import org.apache.ozone.recon.schema.generated.tables.daos.ContainerCountBySizeDao; +import org.apache.ozone.recon.schema.generated.tables.daos.ReconTaskStatusDao; +import org.jooq.DSLContext; +import org.jooq.Record1; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.io.TempDir; /** * Class to test the process method of ContainerSizeCountTask. */ -public class TestContainerSizeCountTask { +public class TestContainerSizeCountTask extends AbstractReconSqlDBTest { - @TempDir - private static java.nio.file.Path temporaryFolder; - - private static ReconContainerSizeMetadataManager reconContainerSizeMetadataManager; + private ContainerCountBySizeDao containerCountBySizeDao; private ContainerSizeCountTask task; - private static Table<ContainerSizeCountKey, Long> containerCountTable; - - @BeforeAll - public static void setupOnce() throws Exception { - ReconOMMetadataManager reconOMMetadataManager = getTestReconOmMetadataManager( - initializeNewOmMetadataManager(Files.createDirectory( - temporaryFolder.resolve("JunitOmDBDir")).toFile()), - Files.createDirectory(temporaryFolder.resolve("NewDir")).toFile()); - ReconTestInjector reconTestInjector = new ReconTestInjector.Builder(temporaryFolder.toFile()) - .withReconSqlDb() - .withReconOm(reconOMMetadataManager) - .withContainerDB() - .build(); - reconContainerSizeMetadataManager = reconTestInjector.getInstance( - ReconContainerSizeMetadataManager.class); - containerCountTable = reconContainerSizeMetadataManager.getContainerCountTable(); + private DSLContext dslContext; + + public TestContainerSizeCountTask() { + super(); } @BeforeEach - public void setUp() throws Exception { + public void setUp() { + UtilizationSchemaDefinition utilizationSchemaDefinition = getSchemaDefinition(UtilizationSchemaDefinition.class); + dslContext = utilizationSchemaDefinition.getDSLContext(); + containerCountBySizeDao = getDao(ContainerCountBySizeDao.class); ReconTaskConfig reconTaskConfig = new ReconTaskConfig(); reconTaskConfig.setContainerSizeCountTaskInterval(Duration.ofSeconds(1)); ReconTaskStatusUpdaterManager reconTaskStatusUpdaterManager = mock(ReconTaskStatusUpdaterManager.class); - when(reconTaskStatusUpdaterManager.getTaskStatusUpdater(anyString())).thenReturn( - mock(ReconTaskStatusUpdater.class)); + when(reconTaskStatusUpdaterManager.getTaskStatusUpdater(anyString())).thenReturn(new ReconTaskStatusUpdater( + getDao(ReconTaskStatusDao.class), "mockedTask-" + System.currentTimeMillis())); ContainerManager containerManager = mock(ContainerManager.class); task = new ContainerSizeCountTask( containerManager, reconTaskConfig, - reconContainerSizeMetadataManager, + containerCountBySizeDao, + utilizationSchemaDefinition, reconTaskStatusUpdaterManager); - // Clear table before running each test - reconContainerSizeMetadataManager.clearContainerCountTable(); + // Truncate table before running each test + dslContext.truncate(CONTAINER_COUNT_BY_SIZE); } @Test - public void testProcess() throws IOException { + public void testProcess() { // mock a container with invalid used bytes ContainerInfo omContainerInfo0 = mock(ContainerInfo.class); given(omContainerInfo0.containerID()).willReturn(ContainerID.valueOf(0)); @@ -122,26 +108,23 @@ public void testProcess() throws IOException { task.processContainers(containers); // Verify 3 containers are in correct bins. - // Note: getEstimatedKeyCount() may be inaccurate in RocksDB, so we count actual entries - int firstCount = 0; - try (org.apache.hadoop.hdds.utils.db.Table.KeyValueIterator<ContainerSizeCountKey, Long> iterator = - containerCountTable.iterator()) { - while (iterator.hasNext()) { - iterator.next(); - firstCount++; - } - } - assertEquals(3, firstCount); + assertEquals(3, containerCountBySizeDao.count()); // container size upper bound for // 1500000000L (1.5GB) is 2147483648L = 2^31 = 2GB (next highest power of 2) - ContainerSizeCountKey key1 = new ContainerSizeCountKey(2147483648L); - assertEquals(1L, containerCountTable.get(key1).longValue()); - + Record1<Long> recordToFind = + dslContext.newRecord( + CONTAINER_COUNT_BY_SIZE.CONTAINER_SIZE) + .value1(2147483648L); + assertEquals(1L, + containerCountBySizeDao.findById(recordToFind.value1()).getCount() + .longValue()); // container size upper bound for // 2500000000L (2.5GB) is 4294967296L = 2^32 = 4GB (next highest power of 2) - ContainerSizeCountKey key2 = new ContainerSizeCountKey(4294967296L); - assertEquals(1L, containerCountTable.get(key2).longValue()); + recordToFind.value1(4294967296L); + assertEquals(1L, + containerCountBySizeDao.findById(recordToFind.value1()).getCount() + .longValue()); // Add a new container ContainerInfo omContainerInfo3 = mock(ContainerInfo.class); @@ -157,39 +140,36 @@ public void testProcess() throws IOException { task.processContainers(containers); // Total size groups added to the database - // After migration to RocksDB, entries with count=0 are deleted, so we expect 4 entries - // Note: getEstimatedKeyCount() may be inaccurate in RocksDB, so we count actual entries - int actualCount = 0; - try (org.apache.hadoop.hdds.utils.db.Table.KeyValueIterator<ContainerSizeCountKey, Long> iterator = - containerCountTable.iterator()) { - while (iterator.hasNext()) { - iterator.next(); - actualCount++; - } - } - assertEquals(4, actualCount); + assertEquals(5, containerCountBySizeDao.count()); // Check whether container size upper bound for // 50000L is 536870912L = 2^29 = 512MB (next highest power of 2) - ContainerSizeCountKey key3 = new ContainerSizeCountKey(536870912L); - assertEquals(1L, containerCountTable.get(key3).longValue()); + recordToFind.value1(536870912L); + assertEquals(1, containerCountBySizeDao + .findById(recordToFind.value1()) + .getCount() + .longValue()); // Check whether container size of 1000000000L has been successfully updated // The previous value upperbound was 4294967296L which is no longer there - ContainerSizeCountKey key4 = new ContainerSizeCountKey(4294967296L); - Long count = containerCountTable.get(key4); - assertEquals(0L, count == null ? 0L : count.longValue()); + recordToFind.value1(4294967296L); + assertEquals(0, containerCountBySizeDao + .findById(recordToFind.value1()) + .getCount() + .longValue()); // Remove the container having size 1.5GB and upperbound 2147483648L containers.remove(omContainerInfo1); task.processContainers(containers); - ContainerSizeCountKey key5 = new ContainerSizeCountKey(2147483648L); - Long count2 = containerCountTable.get(key5); - assertEquals(0L, count2 == null ? 0L : count2.longValue()); + recordToFind.value1(2147483648L); + assertEquals(0, containerCountBySizeDao + .findById(recordToFind.value1()) + .getCount() + .longValue()); } @Test - public void testProcessDeletedAndNegativeSizedContainers() throws IOException { + public void testProcessDeletedAndNegativeSizedContainers() { // Create a list of containers, including one that is deleted ContainerInfo omContainerInfo1 = mock(ContainerInfo.class); given(omContainerInfo1.containerID()).willReturn(ContainerID.valueOf(1)); @@ -239,16 +219,7 @@ public void testProcessDeletedAndNegativeSizedContainers() throws IOException { task.processContainers(containers); // Verify that only the valid containers are counted - // Note: getEstimatedKeyCount() may be inaccurate in RocksDB, so we count actual entries - int count = 0; - try (org.apache.hadoop.hdds.utils.db.Table.KeyValueIterator<ContainerSizeCountKey, Long> iterator = - containerCountTable.iterator()) { - while (iterator.hasNext()) { - iterator.next(); - count++; - } - } - assertEquals(3, count); + assertEquals(3, containerCountBySizeDao.count()); } } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestEventBufferOverflow.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestEventBufferOverflow.java index adfbbc67b6c..77fc6cdbb07 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestEventBufferOverflow.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestEventBufferOverflow.java @@ -46,7 +46,6 @@ import org.apache.hadoop.ozone.recon.persistence.AbstractReconSqlDBTest; import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; import org.apache.hadoop.ozone.recon.spi.ReconContainerMetadataManager; -import org.apache.hadoop.ozone.recon.spi.ReconContainerSizeMetadataManager; import org.apache.hadoop.ozone.recon.spi.ReconFileMetadataManager; import org.apache.hadoop.ozone.recon.spi.ReconGlobalStatsManager; import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager; @@ -96,8 +95,7 @@ public void testBufferOverflowAndAsyncReinitialization() throws Exception { ReconTaskControllerImpl reconTaskController = new ReconTaskControllerImpl( ozoneConfiguration, new HashSet<>(), reconTaskStatusUpdaterManagerMock, reconDbProvider, reconContainerMgr, nsSummaryManager, - mock(ReconGlobalStatsManager.class), mock(ReconFileMetadataManager.class), mock( - ReconContainerSizeMetadataManager.class)); + mock(ReconGlobalStatsManager.class), mock(ReconFileMetadataManager.class)); // Register a mock task for reinitialization CountDownLatch reinitLatch = new CountDownLatch(1); @@ -180,8 +178,7 @@ public void testDirectReInitializationEventQueueing() throws Exception { config, new HashSet<>(), taskStatusUpdaterManager, reconDbProvider, mock(ReconContainerMetadataManager.class), mock(ReconNamespaceSummaryManager.class), - mock(ReconGlobalStatsManager.class), mock(ReconFileMetadataManager.class), mock( - ReconContainerSizeMetadataManager.class)); + mock(ReconGlobalStatsManager.class), mock(ReconFileMetadataManager.class)); // Set up properly mocked ReconOMMetadataManager with required dependencies ReconOMMetadataManager mockOMMetadataManager = mock(ReconOMMetadataManager.class); @@ -251,8 +248,7 @@ public void testCompleteBufferOverflowAndReInitializationCycle() throws Exceptio ReconTaskControllerImpl reconTaskController = new ReconTaskControllerImpl( config, new HashSet<>(), taskStatusUpdaterManager, reconDbProvider, reconContainerMgr, nsSummaryManager, - mock(ReconGlobalStatsManager.class), mock(ReconFileMetadataManager.class), mock( - ReconContainerSizeMetadataManager.class)); + mock(ReconGlobalStatsManager.class), mock(ReconFileMetadataManager.class)); // Set up properly mocked ReconOMMetadataManager with required dependencies ReconOMMetadataManager mockOMMetadataManager = mock(ReconOMMetadataManager.class); @@ -395,8 +391,7 @@ public void testCheckpointCreationFailureAndRetry() throws Exception { config, new HashSet<>(), taskStatusUpdaterManager, reconDbProvider, mock(ReconContainerMetadataManager.class), mock(ReconNamespaceSummaryManager.class), - mock(ReconGlobalStatsManager.class), mock(ReconFileMetadataManager.class), mock( - ReconContainerSizeMetadataManager.class)); + mock(ReconGlobalStatsManager.class), mock(ReconFileMetadataManager.class)); // Set up a mock OM metadata manager ReconOMMetadataManager mockOMMetadataManager = mock(ReconOMMetadataManager.class); @@ -458,8 +453,7 @@ public void testRetryMechanismWithFullSnapshotFallback() throws Exception { config, new HashSet<>(), taskStatusUpdaterManager, reconDbProvider, mock(ReconContainerMetadataManager.class), mock(ReconNamespaceSummaryManager.class), - mock(ReconGlobalStatsManager.class), mock(ReconFileMetadataManager.class), mock( - ReconContainerSizeMetadataManager.class)); + mock(ReconGlobalStatsManager.class), mock(ReconFileMetadataManager.class)); ReconOMMetadataManager mockOMMetadataManager = mock(ReconOMMetadataManager.class); reconTaskController.updateOMMetadataManager(mockOMMetadataManager); @@ -529,8 +523,7 @@ public void testSuccessfulCheckpointAfterFailures() throws Exception { config, new HashSet<>(), taskStatusUpdaterManager, reconDbProvider, mock(ReconContainerMetadataManager.class), mock(ReconNamespaceSummaryManager.class), - mock(ReconGlobalStatsManager.class), mock(ReconFileMetadataManager.class), mock( - ReconContainerSizeMetadataManager.class)); + mock(ReconGlobalStatsManager.class), mock(ReconFileMetadataManager.class)); // Set up properly mocked ReconOMMetadataManager with required dependencies ReconOMMetadataManager mockOMMetadataManager = mock(ReconOMMetadataManager.class); @@ -598,8 +591,7 @@ public void testResetEventBufferMethod() throws Exception { config, new HashSet<>(), taskStatusUpdaterManager, reconDbProvider, mock(ReconContainerMetadataManager.class), mock(ReconNamespaceSummaryManager.class), - mock(ReconGlobalStatsManager.class), mock(ReconFileMetadataManager.class), mock( - ReconContainerSizeMetadataManager.class)); + mock(ReconGlobalStatsManager.class), mock(ReconFileMetadataManager.class)); // Add some events to the buffer for (int i = 0; i < 5; i++) { @@ -637,8 +629,7 @@ public void testResetEventFlagsMethod() throws Exception { config, new HashSet<>(), taskStatusUpdaterManager, reconDbProvider, mock(ReconContainerMetadataManager.class), mock(ReconNamespaceSummaryManager.class), - mock(ReconGlobalStatsManager.class), mock(ReconFileMetadataManager.class), mock( - ReconContainerSizeMetadataManager.class)); + mock(ReconGlobalStatsManager.class), mock(ReconFileMetadataManager.class)); // Test resetting flags for each reason reconTaskController.resetEventFlags(); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskControllerIntegration.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskControllerIntegration.java index 1bd009eaba9..f5ad9462d12 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskControllerIntegration.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskControllerIntegration.java @@ -47,7 +47,6 @@ import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; import org.apache.hadoop.ozone.recon.spi.ReconContainerMetadataManager; -import org.apache.hadoop.ozone.recon.spi.ReconContainerSizeMetadataManager; import org.apache.hadoop.ozone.recon.spi.ReconFileMetadataManager; import org.apache.hadoop.ozone.recon.spi.ReconGlobalStatsManager; import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager; @@ -112,13 +111,12 @@ void setUp() throws Exception { ReconContainerMetadataManager reconContainerMgr = mock(ReconContainerMetadataManager.class); ReconGlobalStatsManager reconGlobalStatsManager = mock(ReconGlobalStatsManager.class); ReconFileMetadataManager reconFileMetadataManager = mock(ReconFileMetadataManager.class); - ReconContainerSizeMetadataManager reconContainerSizeMetadataManager = mock(ReconContainerSizeMetadataManager.class); ReconDBProvider reconDbProvider = mock(ReconDBProvider.class); when(reconDbProvider.getDbStore()).thenReturn(mock(DBStore.class)); when(reconDbProvider.getStagedReconDBProvider()).thenReturn(reconDbProvider); taskController = new ReconTaskControllerImpl(ozoneConfiguration, java.util.Collections.emptySet(), mockTaskStatusUpdaterManager, reconDbProvider, reconContainerMgr, mockNamespaceSummaryManager, - reconGlobalStatsManager, reconFileMetadataManager, reconContainerSizeMetadataManager); + reconGlobalStatsManager, reconFileMetadataManager); taskController.start(); // Initialize the executor service taskController.registerTask(nsSummaryTask); taskController.registerTask(mockOtherTask); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestReconTaskControllerImpl.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestReconTaskControllerImpl.java index 412db266d9c..51763812172 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestReconTaskControllerImpl.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestReconTaskControllerImpl.java @@ -48,7 +48,6 @@ import org.apache.hadoop.ozone.recon.persistence.AbstractReconSqlDBTest; import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; import org.apache.hadoop.ozone.recon.spi.ReconContainerMetadataManager; -import org.apache.hadoop.ozone.recon.spi.ReconContainerSizeMetadataManager; import org.apache.hadoop.ozone.recon.spi.ReconFileMetadataManager; import org.apache.hadoop.ozone.recon.spi.ReconGlobalStatsManager; import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager; @@ -90,10 +89,9 @@ public void setUp() throws IOException { ReconNamespaceSummaryManager nsSummaryManager = mock(ReconNamespaceSummaryManager.class); ReconGlobalStatsManager reconGlobalStatsManager = mock(ReconGlobalStatsManager.class); ReconFileMetadataManager reconFileMetadataManager = mock(ReconFileMetadataManager.class); - ReconContainerSizeMetadataManager reconContainerSizeMetadataManager = mock(ReconContainerSizeMetadataManager.class); reconTaskController = new ReconTaskControllerImpl(ozoneConfiguration, new HashSet<>(), reconTaskStatusUpdaterManagerMock, reconDbProvider, reconContainerMgr, nsSummaryManager, - reconGlobalStatsManager, reconFileMetadataManager, reconContainerSizeMetadataManager); + reconGlobalStatsManager, reconFileMetadataManager); reconTaskController.start(); } @@ -399,10 +397,9 @@ public void testDrainEventBufferAndCleanExistingCheckpoints() throws Exception { ReconNamespaceSummaryManager nsSummaryManager = mock(ReconNamespaceSummaryManager.class); ReconGlobalStatsManager reconGlobalStatsManager = mock(ReconGlobalStatsManager.class); ReconFileMetadataManager reconFileMetadataManager = mock(ReconFileMetadataManager.class); - ReconContainerSizeMetadataManager reconContainerSizeMetadataManager = mock(ReconContainerSizeMetadataManager.class); ReconTaskControllerImpl testController = new ReconTaskControllerImpl(ozoneConfiguration, new HashSet<>(), reconTaskStatusUpdaterManagerMock, reconDbProvider, reconContainerMgr, nsSummaryManager, - reconGlobalStatsManager, reconFileMetadataManager, reconContainerSizeMetadataManager); + reconGlobalStatsManager, reconFileMetadataManager); // Don't start async processing // Add some events to buffer first --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
