Mmuzaf commented on a change in pull request #8767:
URL: https://github.com/apache/ignite/pull/8767#discussion_r591770980



##########
File path: 
modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/snapshot/IgniteClusterSnapshotCheckTest.java
##########
@@ -315,4 +347,183 @@ public void testClusterSnapshotCheckCRCFail() throws 
Exception {
         Exception ex = res.exceptions().values().iterator().next();
         assertTrue(X.hasCause(ex, 
IgniteDataIntegrityViolationException.class));
     }
+
+    /** @throws Exception If fails. */
+    @Test
+    public void testClusterSnapshotCheckMissedHashes() throws Exception {
+        int keys = 1;
+        CacheConfiguration<Integer, Value> ccfg = txCacheConfig(new 
CacheConfiguration<Integer, Value>(DEFAULT_CACHE_NAME))
+            .setAffinity(new RendezvousAffinityFunction(false, 1));
+
+        IgniteEx ignite = startGridsWithoutCache(2);
+
+        for (int i = 0; i < keys; i++)
+            ignite.getOrCreateCache(ccfg).put(i, new Value(new byte[2000]));
+
+        forceCheckpoint(ignite);
+
+        GridCacheSharedContext<?, ?> cctx = ignite.context().cache().context();
+        GridCacheDatabaseSharedManager db = 
(GridCacheDatabaseSharedManager)cctx.database();
+
+        BinaryContext binCtx = 
((CacheObjectBinaryProcessorImpl)ignite.context().cacheObjects()).binaryContext();
+
+        GridCacheAdapter<?, ?> cache = 
ignite.context().cache().internalCache(dfltCacheCfg.getName());
+        long partCtr = 
cache.context().offheap().lastUpdatedPartitionCounter(0);
+        AtomicBoolean done = new AtomicBoolean();
+
+        db.addCheckpointListener(new CheckpointListener() {
+            @Override public void onMarkCheckpointBegin(Context ctx) throws 
IgniteCheckedException {
+                if (!done.compareAndSet(false, true))
+                    return;
+
+                GridIterator<CacheDataRow> it = 
cache.context().offheap().partitionIterator(0);
+
+                assertTrue(it.hasNext());
+
+                CacheDataRow row0 = it.nextX();
+
+                AffinityTopologyVersion topVer = 
cctx.exchange().readyAffinityVersion();
+                GridCacheEntryEx cached = cache.entryEx(row0.key(), topVer);
+
+                byte[] bytes = new byte[2000];
+                new Random().nextBytes(bytes);
+
+                try {
+                    BinaryObjectImpl newVal = toBinary(new Value(bytes), 
binCtx.marshaller());
+
+                    boolean success0 = cached.initialValue(
+                        newVal,
+                        new GridCacheVersion(row0.version().topologyVersion(),
+                            row0.version().nodeOrder(),
+                            row0.version().order() + 1),
+                        null,
+                        null,
+                        TxState.NA,
+                        TxState.NA,
+                        TTL_ETERNAL,
+                        row0.expireTime(),
+                        true,
+                        topVer,
+                        DR_NONE,
+                        false,
+                        null);
+
+                    assertTrue(success0);
+
+                    long newPartCtr = 
cache.context().offheap().lastUpdatedPartitionCounter(0);
+
+                    assertEquals(newPartCtr, partCtr);
+                }
+                catch (Exception e) {
+                    throw new IgniteCheckedException(e);
+                }
+            }
+
+            @Override public void onCheckpointBegin(Context ctx) throws 
IgniteCheckedException {
+
+            }
+
+            @Override public void beforeCheckpointBegin(Context ctx) throws 
IgniteCheckedException {
+
+            }
+        });
+
+        db.waitForCheckpoint("test-checkpoint");
+
+        ignite.snapshot().createSnapshot(SNAPSHOT_NAME).get();
+
+        Path part0 = 
U.searchFileRecursively(snp(ignite).snapshotLocalDir(SNAPSHOT_NAME).toPath(),
+            getPartitionFileName(0));
+
+        assertNotNull(part0);
+        assertTrue(part0.toString(), part0.toFile().exists());
+
+        IdleVerifyResultV2 res = 
snp(ignite).checkSnapshot(SNAPSHOT_NAME).get();
+
+        StringBuilder b = new StringBuilder();
+        res.print(b::append, true);
+
+        assertTrue(F.isEmpty(res.exceptions()));
+        assertContains(log, b.toString(), "The check procedure has finished, 
found 1 conflict partitions");
+    }
+
+    /** @throws Exception If fails. */
+    @Test
+    public void testClusterSnapshotCompareHashes() throws Exception {
+        Random rnd = new Random();
+        CacheConfiguration<Integer, Value> ccfg = txCacheConfig(new 
CacheConfiguration<>(DEFAULT_CACHE_NAME));
+
+        IgniteEx ignite = startGridsWithCache(1, CACHE_KEYS_RANGE, k -> new 
Value(new byte[rnd.nextInt(32768)]), ccfg);
+
+        ignite.snapshot().createSnapshot(SNAPSHOT_NAME).get();
+
+        Map<PartitionKeyV2, List<PartitionHashRecordV2>> idleHashes = new 
HashMap<>();
+        Map<PartitionKeyV2, List<PartitionHashRecordV2>> snpHashes = new 
HashMap<>();
+
+        IdleVerifyResultV2 idleVerifyRes = ignite.compute().execute(new 
TestVisorBackupPartitionsTask(idleHashes),
+            new VisorIdleVerifyTaskArg(new 
HashSet<>(Collections.singletonList(ccfg.getName())),
+            new HashSet<>(),
+            false,
+            CacheFilterEnum.USER,
+            true));
+
+        IdleVerifyResultV2 snpVerifyRes = ignite.compute().execute(new 
TestSnapshotPartitionsVerifyTask(snpHashes),
+            Collections.singletonMap(ignite.cluster().localNode(),
+                
Collections.singletonList(snp(ignite).readSnapshotMetadata(SNAPSHOT_NAME, 
(String)ignite.configuration().getConsistentId()))));
+
+        assertEquals(idleHashes, snpHashes);
+        assertEquals(idleVerifyRes, snpVerifyRes);
+    }
+
+    /** */
+    private static class TestVisorBackupPartitionsTask extends 
VerifyBackupPartitionsTaskV2 {
+        Map<PartitionKeyV2, List<PartitionHashRecordV2>> hashes;
+
+        /**
+         * @param hashes Map of calculated partition hashes.
+         */
+        public TestVisorBackupPartitionsTask(Map<PartitionKeyV2, 
List<PartitionHashRecordV2>> hashes) {

Review comment:
       I doubt we need it here, since the amount of changes may only be greater.




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]


Reply via email to