ibessonov commented on a change in pull request #7984: URL: https://github.com/apache/ignite/pull/7984#discussion_r530996335
########## File path: modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsDefragmentationTest.java ########## @@ -0,0 +1,539 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.persistence; + +import java.io.File; +import java.io.IOException; +import java.nio.file.FileVisitResult; +import java.nio.file.FileVisitor; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardCopyOption; +import java.nio.file.attribute.BasicFileAttributes; +import java.util.Collections; +import java.util.Random; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Function; +import java.util.function.UnaryOperator; +import java.util.stream.IntStream; +import javax.cache.configuration.Factory; +import javax.cache.expiry.Duration; +import javax.cache.expiry.ExpiryPolicy; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.IgniteDataStreamer; +import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction; +import org.apache.ignite.cluster.ClusterState; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.failure.FailureHandler; +import org.apache.ignite.failure.StopNodeFailureHandler; +import org.apache.ignite.internal.IgniteEx; +import org.apache.ignite.internal.maintenance.MaintenanceFileStore; +import org.apache.ignite.internal.processors.cache.persistence.defragmentation.DefragmentationFileUtils; +import org.apache.ignite.internal.processors.cache.persistence.file.FileIOFactory; +import org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager; +import org.apache.ignite.internal.util.lang.IgniteThrowableConsumer; +import org.apache.ignite.internal.util.typedef.G; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.maintenance.MaintenanceRegistry; +import org.apache.ignite.testframework.GridTestUtils; +import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; +import org.junit.Test; + +import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL; +import static org.apache.ignite.internal.processors.cache.persistence.defragmentation.DefragmentationFileUtils.defragmentationCompletionMarkerFile; +import static org.apache.ignite.internal.processors.cache.persistence.defragmentation.DefragmentationFileUtils.defragmentedIndexFile; +import static org.apache.ignite.internal.processors.cache.persistence.defragmentation.DefragmentationFileUtils.defragmentedPartFile; +import static org.apache.ignite.internal.processors.cache.persistence.defragmentation.DefragmentationFileUtils.defragmentedPartMappingFile; +import static org.apache.ignite.internal.processors.cache.persistence.defragmentation.maintenance.DefragmentationParameters.toStore; +import static org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager.DFLT_STORE_DIR; + +/** */ +public class IgnitePdsDefragmentationTest extends GridCommonAbstractTest { + /** */ + public static final String CACHE_2_NAME = "cache2"; + + /** */ + public static final int PARTS = 5; + + /** */ + public static final int ADDED_KEYS_COUNT = 150; + + /** */ + protected static final String GRP_NAME = "group"; + + /** {@inheritDoc} */ + @Override protected void beforeTest() throws Exception { + super.beforeTest(); + + stopAllGrids(true); + + cleanPersistenceDir(); + } + + /** {@inheritDoc} */ + @Override protected void afterTest() throws Exception { + super.afterTest(); + + stopAllGrids(true); + + cleanPersistenceDir(); + } + + /** {@inheritDoc} */ + @Override protected FailureHandler getFailureHandler(String igniteInstanceName) { + return new StopNodeFailureHandler(); + } + + /** */ + protected static class PolicyFactory implements Factory<ExpiryPolicy> { + /** Serial version uid. */ + private static final long serialVersionUID = 0L; + + /** {@inheritDoc} */ + @Override public ExpiryPolicy create() { + return new ExpiryPolicy() { + @Override public Duration getExpiryForCreation() { + return new Duration(TimeUnit.MILLISECONDS, 13000); + } + + /** {@inheritDoc} */ + @Override public Duration getExpiryForAccess() { + return new Duration(TimeUnit.MILLISECONDS, 13000); + } + + /** {@inheritDoc} */ + @Override public Duration getExpiryForUpdate() { + return new Duration(TimeUnit.MILLISECONDS, 13000); + } + }; + } + } + + /** {@inheritDoc} */ + @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception { + IgniteConfiguration cfg = super.getConfiguration(igniteInstanceName); + + cfg.setConsistentId(igniteInstanceName); + + DataStorageConfiguration dsCfg = new DataStorageConfiguration(); + dsCfg.setWalSegmentSize(4 * 1024 * 1024); + + dsCfg.setDefaultDataRegionConfiguration( + new DataRegionConfiguration() + .setInitialSize(100L * 1024 * 1024) + .setMaxSize(1024L * 1024 * 1024) + .setPersistenceEnabled(true) + ); + + cfg.setDataStorageConfiguration(dsCfg); + + CacheConfiguration<?, ?> cache1Cfg = new CacheConfiguration<>(DEFAULT_CACHE_NAME) + .setAtomicityMode(TRANSACTIONAL) + .setGroupName(GRP_NAME) + .setAffinity(new RendezvousAffinityFunction(false, PARTS)); + + CacheConfiguration<?, ?> cache2Cfg = new CacheConfiguration<>(CACHE_2_NAME) + .setAtomicityMode(TRANSACTIONAL) + .setGroupName(GRP_NAME) + .setExpiryPolicyFactory(new PolicyFactory()) + .setAffinity(new RendezvousAffinityFunction(false, PARTS)); + + cfg.setCacheConfiguration(cache1Cfg, cache2Cfg); + + return cfg; + } + + /** + * Basic test scenario. Does following steps: + * - Start node; + * - Fill cache; + * - Remove part of data; + * - Stop node; + * - Start node in defragmentation mode; + * - Stop node; + * - Start node; + * - Check that partitions became smaller; + * - Check that cache is accessible and works just fine. + * + * @throws Exception If failed. + */ + @Test + public void testEssentials() throws Exception { + IgniteEx ig = startGrid(0); + + ig.cluster().state(ClusterState.ACTIVE); + + fillCache(ig.cache(DEFAULT_CACHE_NAME)); + + forceCheckpoint(ig); + + createMaintenanceRecord(); + + stopGrid(0); + + File workDir = resolveCacheWorkDir(ig); + + long[] oldPartLen = partitionSizes(workDir); + + long oldIdxFileLen = new File(workDir, FilePageStoreManager.INDEX_FILE_NAME).length(); + + startGrid(0); + + long[] newPartLen = partitionSizes(workDir); + + for (int p = 0; p < PARTS; p++) + assertTrue(newPartLen[p] < oldPartLen[p]); //TODO Fails. + + long newIdxFileLen = new File(workDir, FilePageStoreManager.INDEX_FILE_NAME).length(); + + assertTrue(newIdxFileLen <= oldIdxFileLen); + + File completionMarkerFile = defragmentationCompletionMarkerFile(workDir); + assertTrue(completionMarkerFile.exists()); + + stopGrid(0); + + IgniteEx ig0 = startGrid(0); + + ig0.cluster().state(ClusterState.ACTIVE); + + assertFalse(completionMarkerFile.exists()); + + validateCache(grid(0).cache(DEFAULT_CACHE_NAME)); + } + + /** + * @return Working directory for cache group {@link IgnitePdsDefragmentationTest#GRP_NAME}. + * @throws IgniteCheckedException If failed for some reason, like if it's a file instead of directory. + */ + private File resolveCacheWorkDir(IgniteEx ig) throws IgniteCheckedException { + File dbWorkDir = U.resolveWorkDirectory(U.defaultWorkDirectory(), DFLT_STORE_DIR, false); + + File nodeWorkDir = new File(dbWorkDir, U.maskForFileName(ig.name())); + + return new File(nodeWorkDir, FilePageStoreManager.CACHE_GRP_DIR_PREFIX + GRP_NAME); + } + + /** + * Force checkpoint and wait for it so all partitions will be in their final state after restart if no more data is + * uploaded. + * + * @param ig Ignite node. + * @throws IgniteCheckedException If checkpoint failed for some reason. + */ + private void forceCheckpoint(IgniteEx ig) throws IgniteCheckedException { + ig.context().cache().context().database() + .forceCheckpoint("testDefrag") + .futureFor(CheckpointState.FINISHED) + .get(); + } + + /** */ + protected void createMaintenanceRecord() throws IgniteCheckedException { + IgniteEx grid = grid(0); + MaintenanceRegistry mntcReg = grid.context().maintenanceRegistry(); + + mntcReg.registerMaintenanceTask(toStore(Collections.singletonList(groupIdForCache(grid, DEFAULT_CACHE_NAME)))); + } + + /** + * Returns array that contains sizes of partition files in gived working directories. Assumes that partitions + * {@code 0} to {@code PARTS - 1} exist in that dir. + * + * @param workDir Working directory. + * @return The array. + */ + protected long[] partitionSizes(File workDir) { + return IntStream.range(0, PARTS) + .mapToObj(p -> new File(workDir, String.format(FilePageStoreManager.PART_FILE_TEMPLATE, p))) + .mapToLong(File::length) + .toArray(); + } + + /** + * Checks that plain node start after failed defragmentation will finish batch renaming. + * + * @throws Exception If failed. + */ + @Test + public void testFailoverRestartWithoutDefragmentation() throws Exception { + testFailover(workDir -> { + try { + File mntcRecFile = new File(workDir.getParent(), MaintenanceFileStore.MAINTENANCE_FILE_NAME); + + assertTrue(mntcRecFile.exists()); + + Files.delete(mntcRecFile.toPath()); + + startGrid(0); + + validateLeftovers(workDir); + } + catch (Exception e) { + throw new IgniteCheckedException(e); + } + finally { + createMaintenanceRecord(); + + stopGrid(0); + } + }); + } + + /** + * Checks that second start in defragmentation mode will finish defragmentation if no completion marker was found. + * + * @throws Exception If failed. + */ + @Test + public void testFailoverBasic() throws Exception { Review comment: Done ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: [email protected]
