anton-vinogradov commented on a change in pull request #49: URL: https://github.com/apache/ignite-extensions/pull/49#discussion_r651844592
########## File path: modules/cdc-ext/modules/core/src/test/config/log4j-test.xml ########## @@ -0,0 +1,145 @@ +<?xml version="1.0" encoding="UTF-8"?> + +<!-- + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +--> + +<!DOCTYPE log4j:configuration PUBLIC "-//APACHE//DTD LOG4J 1.2//EN" + "http://logging.apache.org/log4j/1.2/apidocs/org/apache/log4j/xml/doc-files/log4j.dtd"> +<!-- + Log4j configuration. +--> +<log4j:configuration xmlns:log4j="http://jakarta.apache.org/log4j/" debug="false"> + <!-- + Logs System.out messages to console. + --> + <appender name="CONSOLE" class="org.apache.log4j.ConsoleAppender"> + <!-- Log to STDOUT. --> + <param name="Target" value="System.out"/> + + <!-- Log from DEBUG and higher. --> + <param name="Threshold" value="DEBUG"/> + + <!-- The default pattern: Date Priority [Category] Message\n --> + <layout class="org.apache.log4j.PatternLayout"> + <param name="ConversionPattern" value="[%d{ISO8601}][%-5p][%t][%c{1}] %m%n"/> + </layout> + + <!-- Do not log beyond INFO level. --> + <filter class="org.apache.log4j.varia.LevelRangeFilter"> + <param name="levelMin" value="DEBUG"/> + <param name="levelMax" value="INFO"/> + </filter> + </appender> + + <!-- + Logs all System.err messages to console. + --> + <appender name="CONSOLE_ERR" class="org.apache.log4j.ConsoleAppender"> + <!-- Log to STDERR. --> + <param name="Target" value="System.err"/> + + <!-- Log from WARN and higher. --> + <param name="Threshold" value="WARN"/> + + <!-- The default pattern: Date Priority [Category] Message\n --> + <layout class="org.apache.log4j.PatternLayout"> + <param name="ConversionPattern" value="[%d{ISO8601}][%-5p][%t][%c{1}] %m%n"/> + </layout> + </appender> + + <!-- + Logs all output to specified file. + --> + <appender name="FILE" class="org.apache.log4j.RollingFileAppender"> + <param name="Threshold" value="DEBUG"/> + <param name="File" value="${IGNITE_HOME}/work/log/ignite.log"/> + <param name="Append" value="true"/> + <param name="MaxFileSize" value="10MB"/> + <param name="MaxBackupIndex" value="10"/> + <layout class="org.apache.log4j.PatternLayout"> + <param name="ConversionPattern" value="[%d{ISO8601}][%-5p][%t][%c{1}] %m%n"/> + </layout> + </appender> + + <!-- + Uncomment to enable Ignite query execution debugging. + --> + <!-- + <category name="org.apache.ignite.internal.processors.query"> + <level value="DEBUG"/> + </category> + --> + + <!-- + Uncomment to enable Exchange, Rebalance and Partitions workflow debugging. + --> + <!-- + <category name="org.apache.ignite.internal.processors.cache.distributed.dht.preloader"> + <level value="DEBUG"/> + </category> + + <category name="org.apache.ignite.internal.processors.cache.distributed.dht.topology"> + <level value="DEBUG"/> + </category> + --> + + <!-- + Uncomment to enable debugging of partition counters. + --> + <!-- + <category name="org.apache.ignite.internal.processors.cache.PartitionTxUpdateCounterDebugWrapper"> + <level value="DEBUG"/> + </category> + --> + + <!-- + Uncomment to enable transactions debugging. + --> + <!-- + <category name="org.apache.ignite.cache.msg.tx.prepare"> + <level value="DEBUG"/> + </category> + + <category name="org.apache.ignite.cache.msg.tx.finish"> + <level value="DEBUG"/> Review comment: Could we get rid of commented sections? ########## File path: modules/cdc-ext/modules/core/src/test/config/log4j-test.xml ########## @@ -0,0 +1,145 @@ +<?xml version="1.0" encoding="UTF-8"?> + +<!-- + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +--> + +<!DOCTYPE log4j:configuration PUBLIC "-//APACHE//DTD LOG4J 1.2//EN" Review comment: Could we simplify/refactor this to make it as small and simple as possible? ########## File path: modules/cdc-ext/modules/core/src/test/config/log4j-test.xml ########## @@ -0,0 +1,145 @@ +<?xml version="1.0" encoding="UTF-8"?> + +<!-- + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +--> + +<!DOCTYPE log4j:configuration PUBLIC "-//APACHE//DTD LOG4J 1.2//EN" + "http://logging.apache.org/log4j/1.2/apidocs/org/apache/log4j/xml/doc-files/log4j.dtd"> +<!-- + Log4j configuration. +--> +<log4j:configuration xmlns:log4j="http://jakarta.apache.org/log4j/" debug="false"> + <!-- + Logs System.out messages to console. + --> + <appender name="CONSOLE" class="org.apache.log4j.ConsoleAppender"> + <!-- Log to STDOUT. --> + <param name="Target" value="System.out"/> + + <!-- Log from DEBUG and higher. --> + <param name="Threshold" value="DEBUG"/> + + <!-- The default pattern: Date Priority [Category] Message\n --> + <layout class="org.apache.log4j.PatternLayout"> + <param name="ConversionPattern" value="[%d{ISO8601}][%-5p][%t][%c{1}] %m%n"/> + </layout> + + <!-- Do not log beyond INFO level. --> + <filter class="org.apache.log4j.varia.LevelRangeFilter"> + <param name="levelMin" value="DEBUG"/> + <param name="levelMax" value="INFO"/> + </filter> + </appender> + + <!-- + Logs all System.err messages to console. + --> + <appender name="CONSOLE_ERR" class="org.apache.log4j.ConsoleAppender"> + <!-- Log to STDERR. --> + <param name="Target" value="System.err"/> + + <!-- Log from WARN and higher. --> + <param name="Threshold" value="WARN"/> + + <!-- The default pattern: Date Priority [Category] Message\n --> + <layout class="org.apache.log4j.PatternLayout"> + <param name="ConversionPattern" value="[%d{ISO8601}][%-5p][%t][%c{1}] %m%n"/> + </layout> + </appender> + + <!-- + Logs all output to specified file. + --> + <appender name="FILE" class="org.apache.log4j.RollingFileAppender"> + <param name="Threshold" value="DEBUG"/> + <param name="File" value="${IGNITE_HOME}/work/log/ignite.log"/> + <param name="Append" value="true"/> + <param name="MaxFileSize" value="10MB"/> + <param name="MaxBackupIndex" value="10"/> + <layout class="org.apache.log4j.PatternLayout"> + <param name="ConversionPattern" value="[%d{ISO8601}][%-5p][%t][%c{1}] %m%n"/> + </layout> + </appender> + + <!-- + Uncomment to enable Ignite query execution debugging. + --> + <!-- + <category name="org.apache.ignite.internal.processors.query"> + <level value="DEBUG"/> + </category> + --> + + <!-- + Uncomment to enable Exchange, Rebalance and Partitions workflow debugging. + --> + <!-- + <category name="org.apache.ignite.internal.processors.cache.distributed.dht.preloader"> + <level value="DEBUG"/> + </category> + + <category name="org.apache.ignite.internal.processors.cache.distributed.dht.topology"> + <level value="DEBUG"/> + </category> + --> + + <!-- + Uncomment to enable debugging of partition counters. + --> + <!-- + <category name="org.apache.ignite.internal.processors.cache.PartitionTxUpdateCounterDebugWrapper"> + <level value="DEBUG"/> + </category> + --> + + <!-- + Uncomment to enable transactions debugging. + --> + <!-- + <category name="org.apache.ignite.cache.msg.tx.prepare"> + <level value="DEBUG"/> + </category> + + <category name="org.apache.ignite.cache.msg.tx.finish"> + <level value="DEBUG"/> + </category> + + <category name="org.apache.ignite.cache.msg.tx.recovery"> + <level value="DEBUG"/> + </category> + --> + + <!-- Disable all open source debugging. --> + <category name="org"> + <level value="INFO"/> + </category> + + <category name="org.eclipse.jetty"> + <level value="INFO"/> + </category> + + <!-- Default settings. --> + <root> + <!-- Print at info by default. --> + <level value="INFO"/> + + <!-- Append to file and console. --> + <appender-ref ref="FILE"/> + <appender-ref ref="CONSOLE"/> Review comment: Do we really need both? ########## File path: modules/cdc-ext/modules/core/src/test/config/log4j-test.xml ########## @@ -0,0 +1,145 @@ +<?xml version="1.0" encoding="UTF-8"?> + +<!-- + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +--> + +<!DOCTYPE log4j:configuration PUBLIC "-//APACHE//DTD LOG4J 1.2//EN" + "http://logging.apache.org/log4j/1.2/apidocs/org/apache/log4j/xml/doc-files/log4j.dtd"> +<!-- + Log4j configuration. +--> +<log4j:configuration xmlns:log4j="http://jakarta.apache.org/log4j/" debug="false"> + <!-- + Logs System.out messages to console. + --> + <appender name="CONSOLE" class="org.apache.log4j.ConsoleAppender"> + <!-- Log to STDOUT. --> + <param name="Target" value="System.out"/> + + <!-- Log from DEBUG and higher. --> + <param name="Threshold" value="DEBUG"/> + + <!-- The default pattern: Date Priority [Category] Message\n --> + <layout class="org.apache.log4j.PatternLayout"> + <param name="ConversionPattern" value="[%d{ISO8601}][%-5p][%t][%c{1}] %m%n"/> + </layout> + + <!-- Do not log beyond INFO level. --> + <filter class="org.apache.log4j.varia.LevelRangeFilter"> + <param name="levelMin" value="DEBUG"/> + <param name="levelMax" value="INFO"/> + </filter> + </appender> + + <!-- + Logs all System.err messages to console. + --> + <appender name="CONSOLE_ERR" class="org.apache.log4j.ConsoleAppender"> + <!-- Log to STDERR. --> + <param name="Target" value="System.err"/> + + <!-- Log from WARN and higher. --> + <param name="Threshold" value="WARN"/> + + <!-- The default pattern: Date Priority [Category] Message\n --> + <layout class="org.apache.log4j.PatternLayout"> + <param name="ConversionPattern" value="[%d{ISO8601}][%-5p][%t][%c{1}] %m%n"/> + </layout> + </appender> + + <!-- + Logs all output to specified file. + --> + <appender name="FILE" class="org.apache.log4j.RollingFileAppender"> + <param name="Threshold" value="DEBUG"/> Review comment: Do we really need to log debug? ########## File path: modules/cdc-ext/src/main/java/org/apache/ignite/cdc/conflictresolve/CacheVersionConflictResolverImpl.java ########## @@ -0,0 +1,160 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.cdc.conflictresolve; + +import org.apache.ignite.IgniteLogger; +import org.apache.ignite.binary.BinaryObject; +import org.apache.ignite.internal.processors.cache.CacheObjectValueContext; +import org.apache.ignite.internal.processors.cache.version.CacheVersionConflictResolver; +import org.apache.ignite.internal.processors.cache.version.GridCacheVersionConflictContext; +import org.apache.ignite.internal.processors.cache.version.GridCacheVersionedEntryEx; +import org.apache.ignite.internal.util.typedef.internal.U; + +/** + * This class implements simple conflict resolution algorithm. + * Algorithm decides which version of the entry should be used "new" or "old". + * The following steps performed: + * <ul> + * <li>If entry is freshly created then new version used - {@link GridCacheVersionedEntryEx#isStartVersion()}.</li> + * <li>If change made in this cluster then new version used - {@link GridCacheVersionedEntryEx#dataCenterId()}.</li> + * <li>If cluster of new entry equal to cluster of old entry + * then entry with the greater {@link GridCacheVersionedEntryEx#order()} used.</li> + * <li>If {@link #conflictResolveField} provided and field of new entry greater then new version used.</li> + * <li>If {@link #conflictResolveField} provided and field of old entry greater then old version used.</li> + * <li>Entry with the lower value of {@link GridCacheVersionedEntryEx#dataCenterId()} used.</li> + * </ul> + * + * Note, data center with lower value has greater priority e.g first (1) data center is main in case conflict can't be resolved + * automatically. + */ +public class CacheVersionConflictResolverImpl implements CacheVersionConflictResolver { + /** + * Cluster id. + * Note, cluster with lower value has greater priority e.g first (1) cluster is main in case conflict can't be resolved automatically. + */ + private final byte clusterId; + + /** + * Field for conflict resolve. + * Value of this field will be used to compare two entries in case of conflicting changes. + * values of this field must implement {@link Comparable} interface. + * <pre><i>Note, value of this field used to resolve conflict for external updates only.</i> + * + * @see CacheVersionConflictResolverImpl + */ + private final String conflictResolveField; + + /** Logger. */ + private final IgniteLogger log; + + /** If {@code true} then conflict resolving with the value field enabled. */ + private boolean conflictResolveFieldEnabled; + + /** + * @param clusterId Data center id. + * @param conflictResolveField Field to resolve conflicts. + * @param log Logger. + */ + public CacheVersionConflictResolverImpl(byte clusterId, String conflictResolveField, IgniteLogger log) { + this.clusterId = clusterId; + this.conflictResolveField = conflictResolveField; + this.log = log; + + conflictResolveFieldEnabled = conflictResolveField != null; + } + + /** {@inheritDoc} */ + @Override public <K, V> GridCacheVersionConflictContext<K, V> resolve( + CacheObjectValueContext ctx, + GridCacheVersionedEntryEx<K, V> oldEntry, + GridCacheVersionedEntryEx<K, V> newEntry, + boolean atomicVerComparator + ) { + GridCacheVersionConflictContext<K, V> res = new GridCacheVersionConflictContext<>(ctx, oldEntry, newEntry); + + if (isUseNew(ctx, oldEntry, newEntry)) + res.useNew(); + else { + log.warning("Skip update due to the conflict [key=" + newEntry.key() + ", fromCluster=" + newEntry.dataCenterId() + + ", toCluster=" + oldEntry.dataCenterId() + ']'); + + res.useOld(); + } + + return res; + } + + /** + * @param ctx Context. + * @param oldEntry Old entry. + * @param newEntry New entry. + * @param <K> Key type. + * @param <V> Key type. + * @return {@code True} is should use new entry. + */ + @SuppressWarnings({"unchecked", "rawtypes"}) + private <K, V> boolean isUseNew( + CacheObjectValueContext ctx, + GridCacheVersionedEntryEx<K, V> oldEntry, + GridCacheVersionedEntryEx<K, V> newEntry + ) { + if (newEntry.dataCenterId() == clusterId) // Update made on the local cluster always win. + return true; + + if (oldEntry.isStartVersion()) // New entry. + return true; + + if (oldEntry.dataCenterId() == newEntry.dataCenterId()) + return newEntry.version().compareTo(oldEntry.version()) > 0; // New version from the same cluster. + + if (conflictResolveFieldEnabled) { + Object oldVal = oldEntry.value(ctx); + Object newVal = newEntry.value(ctx); + + if (oldVal != null && newVal != null) { + Comparable o; + Comparable n; + + try { + if (oldVal instanceof BinaryObject) { + o = ((BinaryObject)oldVal).field(conflictResolveField); + n = ((BinaryObject)newVal).field(conflictResolveField); + } + else { + o = U.field(oldVal, conflictResolveField); + n = U.field(newVal, conflictResolveField); + } + + if (o == null || n == null) + return o == null; + + return o.compareTo(n) < 0; + } + catch (Exception e) { + log.error("Error while resolving replication conflict with field '" + + conflictResolveField + "'.\nConflict resolve with the field disabled.", e); + + conflictResolveFieldEnabled = false; + } + } + } + + // Cluster with the lower ID have biggest priority(e.g. first cluster is main). + return newEntry.dataCenterId() < oldEntry.dataCenterId(); Review comment: Warn message with the key should be also generated here since the decision made by fuzzy logic. BTW, do we able to mention keys in logs in theory? How about PCI-DSS? ########## File path: modules/cdc-ext/src/test/java/org/apache/ignite/cdc/CacheConflictOperationsTest.java ########## @@ -0,0 +1,269 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.cdc; + +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.binary.BinaryObject; +import org.apache.ignite.cache.CacheAtomicityMode; +import org.apache.ignite.cdc.kafka.CdcKafkaReplicationTest.Data; +import org.apache.ignite.cdc.conflictresolve.CacheVersionConflictResolverPluginProvider; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.internal.IgniteEx; +import org.apache.ignite.internal.processors.cache.CacheObject; +import org.apache.ignite.internal.processors.cache.CacheObjectImpl; +import org.apache.ignite.internal.processors.cache.IgniteInternalCache; +import org.apache.ignite.internal.processors.cache.KeyCacheObject; +import org.apache.ignite.internal.processors.cache.KeyCacheObjectImpl; +import org.apache.ignite.internal.processors.cache.dr.GridCacheDrInfo; +import org.apache.ignite.internal.processors.cache.version.GridCacheVersion; +import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import static java.util.Collections.singletonMap; +import static org.apache.ignite.cache.CacheAtomicityMode.ATOMIC; +import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL; +import static org.apache.ignite.cdc.kafka.CdcKafkaReplicationTest.checkCRC; +import static org.apache.ignite.cdc.kafka.CdcKafkaReplicationTest.generateSingleData; + +/** + * Cache conflict operations test. + */ +@RunWith(Parameterized.class) +public class CacheConflictOperationsTest extends GridCommonAbstractTest { + /** Cache mode. */ + @Parameterized.Parameter + public CacheAtomicityMode cacheMode; + + /** Cluster id. */ + @Parameterized.Parameter(1) + public byte clusterId; + + /** @return Test parameters. */ + @Parameterized.Parameters(name = "cacheMode={0}, clusterId={1}") + public static Collection<?> parameters() { + return Arrays.asList(new Object[][] { + {ATOMIC, THIRD_CLUSTER}, + {TRANSACTIONAL, THIRD_CLUSTER}, + {ATOMIC, FIRST_CLUSTER}, + {TRANSACTIONAL, FIRST_CLUSTER}, + }); + } + + /** */ + private static IgniteCache<String, Data> cache; + + /** */ + private static IgniteInternalCache<BinaryObject, BinaryObject> cachex; + + /** */ + private static IgniteEx cli; + + /** Cluster have a greater priority that {@link #SECOND_CLUSTER}. */ + private static final byte FIRST_CLUSTER = 1; + + /** */ + private static final byte SECOND_CLUSTER = 2; + + /** Cluster have a lower priority that {@link #SECOND_CLUSTER}. */ + private static final byte THIRD_CLUSTER = 3; + + /** {@inheritDoc} */ + @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception { + CacheVersionConflictResolverPluginProvider<?> pluginCfg = new CacheVersionConflictResolverPluginProvider<>(); + + pluginCfg.setClusterId(SECOND_CLUSTER); + pluginCfg.setCaches(new HashSet<>(Collections.singleton(DEFAULT_CACHE_NAME))); + pluginCfg.setConflictResolveField(conflictResolveField()); + + return super.getConfiguration(igniteInstanceName) + .setPluginProviders(pluginCfg); + } + + /** {@inheritDoc} */ + @Override protected void beforeTestsStarted() throws Exception { + startGrid(1); + cli = startClientGrid(2); + + cache = cli.createCache(new CacheConfiguration<String, Data>(DEFAULT_CACHE_NAME).setAtomicityMode(cacheMode)); + cachex = cli.cachex(DEFAULT_CACHE_NAME); + } + + /** Tests that regular cache operations works with the conflict resolver when there is no update conflicts. */ + @Test + public void testSimpleUpdates() { + String key = "UpdatesWithoutConflict"; + + put(key); + + put(key); + + remove(key); + } + + /** + * Tests that {@code IgniteInternalCache#*AllConflict} cache operations works with the conflict resolver + * when there is no update conflicts. + */ + @Test + public void testUpdatesFromOtherClusterWithoutConflict() throws Exception { + String key = "UpdateFromOtherClusterWithoutConflict"; + + putx(key(key, clusterId), clusterId, 1, true); + + putx(key(key, clusterId), clusterId, 2, true); + + removex(key(key, clusterId), clusterId, 3, true); + } + + + /** + * Tests that {@code IgniteInternalCache#*AllConflict} cache operations works with the conflict resolver + * when there is no update conflicts. + */ + @Test + public void testUpdatesReorderFromOtherCluster() throws Exception { + String key = "UpdateClusterUpdateReorder"; + + putx(key(key, clusterId), clusterId, 2, true); + + // Update with the equal or lower order should fail. + putx(key(key, clusterId), clusterId, 2, false); + putx(key(key, clusterId), clusterId, 1, false); + + // Remove with the equal or lower order should fail. + removex(key(key, clusterId), clusterId, 2, false); + removex(key(key, clusterId), clusterId, 1, false); + + // Remove with the higher order should succeed. + putx(key(key, clusterId), clusterId, 3, true); + } + + /** Tests cache operations for entry replicated from another cluster. */ + @Test + public void testUpdatesConflict() throws Exception { + String key = "UpdateThisClusterConflict0"; + + putx(key(key, clusterId), clusterId, 1, true); + + // Local remove for other cluster entry should succeed. + remove(key(key, clusterId)); + + // Conflict replicated update succeed only if cluster has a greater priority than this cluster. + putx(key(key, clusterId), clusterId, 2, clusterId == FIRST_CLUSTER); + + key = "UpdateThisDCConflict1"; + + putx(key(key, clusterId), clusterId, 3, true); + + // Local update for other cluster entry should succeed. + put(key(key, clusterId)); + + key = "UpdateThisDCConflict2"; + + put(key(key, clusterId)); + + // Conflict replicated remove succeed only if DC has a greater priority than this DC. + removex(key(key, clusterId), clusterId, 4, clusterId == FIRST_CLUSTER); + + key = "UpdateThisDCConflict3"; + + put(key(key, clusterId)); + + // Conflict replicated update succeed only if DC has a greater priority than this DC. + putx(key(key, clusterId), clusterId, 5, clusterId == FIRST_CLUSTER || conflictResolveField() != null); + } + + /** */ + private void put(String key) { + Data newVal = generateSingleData(1); + + cache.put(key, newVal); + + assertEquals(newVal, cache.get(key)); + + checkCRC(cache.get(key), 1); + } + + /** */ + private void putx(String k, byte clusterId, long order, boolean expectSuccess) throws IgniteCheckedException { + Data oldVal = cache.get(k); + Data newVal = generateSingleData(1); + + KeyCacheObject key = new KeyCacheObjectImpl(k, null, cachex.context().affinity().partition(k)); + CacheObject val = new CacheObjectImpl(cli.binary().toBinary(newVal), null); + + cachex.putAllConflict(singletonMap(key, new GridCacheDrInfo(val, new GridCacheVersion(1, order, 1, clusterId)))); + + if (expectSuccess) { Review comment: seems you should just check the val and version equality to expected? ########## File path: modules/cdc-ext/src/test/java/org/apache/ignite/cdc/CacheConflictOperationsTest.java ########## @@ -0,0 +1,269 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.cdc; + +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.binary.BinaryObject; +import org.apache.ignite.cache.CacheAtomicityMode; +import org.apache.ignite.cdc.kafka.CdcKafkaReplicationTest.Data; +import org.apache.ignite.cdc.conflictresolve.CacheVersionConflictResolverPluginProvider; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.internal.IgniteEx; +import org.apache.ignite.internal.processors.cache.CacheObject; +import org.apache.ignite.internal.processors.cache.CacheObjectImpl; +import org.apache.ignite.internal.processors.cache.IgniteInternalCache; +import org.apache.ignite.internal.processors.cache.KeyCacheObject; +import org.apache.ignite.internal.processors.cache.KeyCacheObjectImpl; +import org.apache.ignite.internal.processors.cache.dr.GridCacheDrInfo; +import org.apache.ignite.internal.processors.cache.version.GridCacheVersion; +import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import static java.util.Collections.singletonMap; +import static org.apache.ignite.cache.CacheAtomicityMode.ATOMIC; +import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL; +import static org.apache.ignite.cdc.kafka.CdcKafkaReplicationTest.checkCRC; +import static org.apache.ignite.cdc.kafka.CdcKafkaReplicationTest.generateSingleData; + +/** + * Cache conflict operations test. + */ +@RunWith(Parameterized.class) +public class CacheConflictOperationsTest extends GridCommonAbstractTest { + /** Cache mode. */ + @Parameterized.Parameter + public CacheAtomicityMode cacheMode; + + /** Cluster id. */ + @Parameterized.Parameter(1) + public byte clusterId; + + /** @return Test parameters. */ + @Parameterized.Parameters(name = "cacheMode={0}, clusterId={1}") + public static Collection<?> parameters() { + return Arrays.asList(new Object[][] { + {ATOMIC, THIRD_CLUSTER}, + {TRANSACTIONAL, THIRD_CLUSTER}, + {ATOMIC, FIRST_CLUSTER}, + {TRANSACTIONAL, FIRST_CLUSTER}, Review comment: how about ``` loop ... loop ... ``` here and at others? ########## File path: modules/cdc-ext/modules/core/src/test/config/tests.properties ########## @@ -0,0 +1,153 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + Review comment: Could we simplify/refactor this to make it as small and simple as possible? ########## File path: modules/cdc-ext/src/main/java/org/apache/ignite/cdc/conflictresolve/CacheVersionConflictResolverImpl.java ########## @@ -0,0 +1,160 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.cdc.conflictresolve; + +import org.apache.ignite.IgniteLogger; +import org.apache.ignite.binary.BinaryObject; +import org.apache.ignite.internal.processors.cache.CacheObjectValueContext; +import org.apache.ignite.internal.processors.cache.version.CacheVersionConflictResolver; +import org.apache.ignite.internal.processors.cache.version.GridCacheVersionConflictContext; +import org.apache.ignite.internal.processors.cache.version.GridCacheVersionedEntryEx; +import org.apache.ignite.internal.util.typedef.internal.U; + +/** + * This class implements simple conflict resolution algorithm. + * Algorithm decides which version of the entry should be used "new" or "old". + * The following steps performed: + * <ul> + * <li>If entry is freshly created then new version used - {@link GridCacheVersionedEntryEx#isStartVersion()}.</li> + * <li>If change made in this cluster then new version used - {@link GridCacheVersionedEntryEx#dataCenterId()}.</li> + * <li>If cluster of new entry equal to cluster of old entry + * then entry with the greater {@link GridCacheVersionedEntryEx#order()} used.</li> + * <li>If {@link #conflictResolveField} provided and field of new entry greater then new version used.</li> + * <li>If {@link #conflictResolveField} provided and field of old entry greater then old version used.</li> + * <li>Entry with the lower value of {@link GridCacheVersionedEntryEx#dataCenterId()} used.</li> + * </ul> + * + * Note, data center with lower value has greater priority e.g first (1) data center is main in case conflict can't be resolved + * automatically. + */ +public class CacheVersionConflictResolverImpl implements CacheVersionConflictResolver { + /** + * Cluster id. + * Note, cluster with lower value has greater priority e.g first (1) cluster is main in case conflict can't be resolved automatically. + */ + private final byte clusterId; + + /** + * Field for conflict resolve. + * Value of this field will be used to compare two entries in case of conflicting changes. + * values of this field must implement {@link Comparable} interface. + * <pre><i>Note, value of this field used to resolve conflict for external updates only.</i> + * + * @see CacheVersionConflictResolverImpl + */ + private final String conflictResolveField; + + /** Logger. */ + private final IgniteLogger log; + + /** If {@code true} then conflict resolving with the value field enabled. */ + private boolean conflictResolveFieldEnabled; + + /** + * @param clusterId Data center id. + * @param conflictResolveField Field to resolve conflicts. + * @param log Logger. + */ + public CacheVersionConflictResolverImpl(byte clusterId, String conflictResolveField, IgniteLogger log) { + this.clusterId = clusterId; + this.conflictResolveField = conflictResolveField; + this.log = log; + + conflictResolveFieldEnabled = conflictResolveField != null; + } + + /** {@inheritDoc} */ + @Override public <K, V> GridCacheVersionConflictContext<K, V> resolve( + CacheObjectValueContext ctx, + GridCacheVersionedEntryEx<K, V> oldEntry, + GridCacheVersionedEntryEx<K, V> newEntry, + boolean atomicVerComparator + ) { + GridCacheVersionConflictContext<K, V> res = new GridCacheVersionConflictContext<>(ctx, oldEntry, newEntry); + + if (isUseNew(ctx, oldEntry, newEntry)) + res.useNew(); + else { + log.warning("Skip update due to the conflict [key=" + newEntry.key() + ", fromCluster=" + newEntry.dataCenterId() + + ", toCluster=" + oldEntry.dataCenterId() + ']'); + + res.useOld(); + } + + return res; + } + + /** + * @param ctx Context. + * @param oldEntry Old entry. + * @param newEntry New entry. + * @param <K> Key type. + * @param <V> Key type. + * @return {@code True} is should use new entry. + */ + @SuppressWarnings({"unchecked", "rawtypes"}) + private <K, V> boolean isUseNew( + CacheObjectValueContext ctx, + GridCacheVersionedEntryEx<K, V> oldEntry, + GridCacheVersionedEntryEx<K, V> newEntry + ) { + if (newEntry.dataCenterId() == clusterId) // Update made on the local cluster always win. + return true; + + if (oldEntry.isStartVersion()) // New entry. + return true; + + if (oldEntry.dataCenterId() == newEntry.dataCenterId()) + return newEntry.version().compareTo(oldEntry.version()) > 0; // New version from the same cluster. + + if (conflictResolveFieldEnabled) { + Object oldVal = oldEntry.value(ctx); + Object newVal = newEntry.value(ctx); + + if (oldVal != null && newVal != null) { + Comparable o; + Comparable n; + + try { + if (oldVal instanceof BinaryObject) { + o = ((BinaryObject)oldVal).field(conflictResolveField); + n = ((BinaryObject)newVal).field(conflictResolveField); + } + else { + o = U.field(oldVal, conflictResolveField); + n = U.field(newVal, conflictResolveField); + } + + if (o == null || n == null) Review comment: Should this case produce at least a warning? ########## File path: modules/cdc-ext/src/test/java/org/apache/ignite/cdc/CacheConflictOperationsTest.java ########## @@ -0,0 +1,269 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.cdc; + +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.binary.BinaryObject; +import org.apache.ignite.cache.CacheAtomicityMode; +import org.apache.ignite.cdc.kafka.CdcKafkaReplicationTest.Data; +import org.apache.ignite.cdc.conflictresolve.CacheVersionConflictResolverPluginProvider; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.internal.IgniteEx; +import org.apache.ignite.internal.processors.cache.CacheObject; +import org.apache.ignite.internal.processors.cache.CacheObjectImpl; +import org.apache.ignite.internal.processors.cache.IgniteInternalCache; +import org.apache.ignite.internal.processors.cache.KeyCacheObject; +import org.apache.ignite.internal.processors.cache.KeyCacheObjectImpl; +import org.apache.ignite.internal.processors.cache.dr.GridCacheDrInfo; +import org.apache.ignite.internal.processors.cache.version.GridCacheVersion; +import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import static java.util.Collections.singletonMap; +import static org.apache.ignite.cache.CacheAtomicityMode.ATOMIC; +import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL; +import static org.apache.ignite.cdc.kafka.CdcKafkaReplicationTest.checkCRC; +import static org.apache.ignite.cdc.kafka.CdcKafkaReplicationTest.generateSingleData; + +/** + * Cache conflict operations test. + */ +@RunWith(Parameterized.class) +public class CacheConflictOperationsTest extends GridCommonAbstractTest { + /** Cache mode. */ + @Parameterized.Parameter + public CacheAtomicityMode cacheMode; + + /** Cluster id. */ + @Parameterized.Parameter(1) + public byte clusterId; + + /** @return Test parameters. */ + @Parameterized.Parameters(name = "cacheMode={0}, clusterId={1}") + public static Collection<?> parameters() { + return Arrays.asList(new Object[][] { + {ATOMIC, THIRD_CLUSTER}, + {TRANSACTIONAL, THIRD_CLUSTER}, + {ATOMIC, FIRST_CLUSTER}, + {TRANSACTIONAL, FIRST_CLUSTER}, + }); + } + + /** */ + private static IgniteCache<String, Data> cache; + + /** */ + private static IgniteInternalCache<BinaryObject, BinaryObject> cachex; + + /** */ + private static IgniteEx cli; + + /** Cluster have a greater priority that {@link #SECOND_CLUSTER}. */ + private static final byte FIRST_CLUSTER = 1; + + /** */ + private static final byte SECOND_CLUSTER = 2; + + /** Cluster have a lower priority that {@link #SECOND_CLUSTER}. */ + private static final byte THIRD_CLUSTER = 3; + + /** {@inheritDoc} */ + @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception { + CacheVersionConflictResolverPluginProvider<?> pluginCfg = new CacheVersionConflictResolverPluginProvider<>(); + + pluginCfg.setClusterId(SECOND_CLUSTER); + pluginCfg.setCaches(new HashSet<>(Collections.singleton(DEFAULT_CACHE_NAME))); + pluginCfg.setConflictResolveField(conflictResolveField()); + + return super.getConfiguration(igniteInstanceName) + .setPluginProviders(pluginCfg); + } + + /** {@inheritDoc} */ + @Override protected void beforeTestsStarted() throws Exception { + startGrid(1); + cli = startClientGrid(2); + + cache = cli.createCache(new CacheConfiguration<String, Data>(DEFAULT_CACHE_NAME).setAtomicityMode(cacheMode)); + cachex = cli.cachex(DEFAULT_CACHE_NAME); + } + + /** Tests that regular cache operations works with the conflict resolver when there is no update conflicts. */ + @Test + public void testSimpleUpdates() { + String key = "UpdatesWithoutConflict"; + + put(key); + + put(key); + + remove(key); + } + + /** + * Tests that {@code IgniteInternalCache#*AllConflict} cache operations works with the conflict resolver + * when there is no update conflicts. + */ + @Test + public void testUpdatesFromOtherClusterWithoutConflict() throws Exception { + String key = "UpdateFromOtherClusterWithoutConflict"; + + putx(key(key, clusterId), clusterId, 1, true); + + putx(key(key, clusterId), clusterId, 2, true); + + removex(key(key, clusterId), clusterId, 3, true); + } + + + /** + * Tests that {@code IgniteInternalCache#*AllConflict} cache operations works with the conflict resolver + * when there is no update conflicts. + */ + @Test + public void testUpdatesReorderFromOtherCluster() throws Exception { + String key = "UpdateClusterUpdateReorder"; + + putx(key(key, clusterId), clusterId, 2, true); + + // Update with the equal or lower order should fail. + putx(key(key, clusterId), clusterId, 2, false); + putx(key(key, clusterId), clusterId, 1, false); + + // Remove with the equal or lower order should fail. + removex(key(key, clusterId), clusterId, 2, false); + removex(key(key, clusterId), clusterId, 1, false); + + // Remove with the higher order should succeed. + putx(key(key, clusterId), clusterId, 3, true); + } + + /** Tests cache operations for entry replicated from another cluster. */ + @Test + public void testUpdatesConflict() throws Exception { + String key = "UpdateThisClusterConflict0"; + + putx(key(key, clusterId), clusterId, 1, true); + + // Local remove for other cluster entry should succeed. + remove(key(key, clusterId)); + + // Conflict replicated update succeed only if cluster has a greater priority than this cluster. + putx(key(key, clusterId), clusterId, 2, clusterId == FIRST_CLUSTER); + + key = "UpdateThisDCConflict1"; + + putx(key(key, clusterId), clusterId, 3, true); + + // Local update for other cluster entry should succeed. + put(key(key, clusterId)); + + key = "UpdateThisDCConflict2"; + + put(key(key, clusterId)); + + // Conflict replicated remove succeed only if DC has a greater priority than this DC. + removex(key(key, clusterId), clusterId, 4, clusterId == FIRST_CLUSTER); + + key = "UpdateThisDCConflict3"; + + put(key(key, clusterId)); + + // Conflict replicated update succeed only if DC has a greater priority than this DC. + putx(key(key, clusterId), clusterId, 5, clusterId == FIRST_CLUSTER || conflictResolveField() != null); + } + + /** */ + private void put(String key) { + Data newVal = generateSingleData(1); + + cache.put(key, newVal); + + assertEquals(newVal, cache.get(key)); + + checkCRC(cache.get(key), 1); + } + + /** */ + private void putx(String k, byte clusterId, long order, boolean expectSuccess) throws IgniteCheckedException { + Data oldVal = cache.get(k); + Data newVal = generateSingleData(1); + + KeyCacheObject key = new KeyCacheObjectImpl(k, null, cachex.context().affinity().partition(k)); + CacheObject val = new CacheObjectImpl(cli.binary().toBinary(newVal), null); + + cachex.putAllConflict(singletonMap(key, new GridCacheDrInfo(val, new GridCacheVersion(1, order, 1, clusterId)))); + + if (expectSuccess) { + assertTrue(cache.containsKey(k)); + + assertEquals(newVal, cache.get(k)); + + checkCRC(cache.get(k), newVal.getIter()); Review comment: looks overcomplicated, why not just equals? ########## File path: modules/cdc-ext/src/test/java/org/apache/ignite/cdc/CacheConflictOperationsTest.java ########## @@ -0,0 +1,269 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.cdc; + +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.binary.BinaryObject; +import org.apache.ignite.cache.CacheAtomicityMode; +import org.apache.ignite.cdc.kafka.CdcKafkaReplicationTest.Data; +import org.apache.ignite.cdc.conflictresolve.CacheVersionConflictResolverPluginProvider; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.internal.IgniteEx; +import org.apache.ignite.internal.processors.cache.CacheObject; +import org.apache.ignite.internal.processors.cache.CacheObjectImpl; +import org.apache.ignite.internal.processors.cache.IgniteInternalCache; +import org.apache.ignite.internal.processors.cache.KeyCacheObject; +import org.apache.ignite.internal.processors.cache.KeyCacheObjectImpl; +import org.apache.ignite.internal.processors.cache.dr.GridCacheDrInfo; +import org.apache.ignite.internal.processors.cache.version.GridCacheVersion; +import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import static java.util.Collections.singletonMap; +import static org.apache.ignite.cache.CacheAtomicityMode.ATOMIC; +import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL; +import static org.apache.ignite.cdc.kafka.CdcKafkaReplicationTest.checkCRC; +import static org.apache.ignite.cdc.kafka.CdcKafkaReplicationTest.generateSingleData; Review comment: Please get rid of cross-tests imports ########## File path: modules/cdc-ext/src/test/java/org/apache/ignite/cdc/CacheConflictOperationsTest.java ########## @@ -0,0 +1,269 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.cdc; + +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.binary.BinaryObject; +import org.apache.ignite.cache.CacheAtomicityMode; +import org.apache.ignite.cdc.kafka.CdcKafkaReplicationTest.Data; +import org.apache.ignite.cdc.conflictresolve.CacheVersionConflictResolverPluginProvider; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.internal.IgniteEx; +import org.apache.ignite.internal.processors.cache.CacheObject; +import org.apache.ignite.internal.processors.cache.CacheObjectImpl; +import org.apache.ignite.internal.processors.cache.IgniteInternalCache; +import org.apache.ignite.internal.processors.cache.KeyCacheObject; +import org.apache.ignite.internal.processors.cache.KeyCacheObjectImpl; +import org.apache.ignite.internal.processors.cache.dr.GridCacheDrInfo; +import org.apache.ignite.internal.processors.cache.version.GridCacheVersion; +import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import static java.util.Collections.singletonMap; +import static org.apache.ignite.cache.CacheAtomicityMode.ATOMIC; +import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL; +import static org.apache.ignite.cdc.kafka.CdcKafkaReplicationTest.checkCRC; +import static org.apache.ignite.cdc.kafka.CdcKafkaReplicationTest.generateSingleData; + +/** + * Cache conflict operations test. + */ +@RunWith(Parameterized.class) +public class CacheConflictOperationsTest extends GridCommonAbstractTest { + /** Cache mode. */ + @Parameterized.Parameter + public CacheAtomicityMode cacheMode; + + /** Cluster id. */ + @Parameterized.Parameter(1) + public byte clusterId; + + /** @return Test parameters. */ + @Parameterized.Parameters(name = "cacheMode={0}, clusterId={1}") + public static Collection<?> parameters() { + return Arrays.asList(new Object[][] { + {ATOMIC, THIRD_CLUSTER}, + {TRANSACTIONAL, THIRD_CLUSTER}, + {ATOMIC, FIRST_CLUSTER}, + {TRANSACTIONAL, FIRST_CLUSTER}, + }); + } + + /** */ + private static IgniteCache<String, Data> cache; + + /** */ + private static IgniteInternalCache<BinaryObject, BinaryObject> cachex; + + /** */ + private static IgniteEx cli; + + /** Cluster have a greater priority that {@link #SECOND_CLUSTER}. */ + private static final byte FIRST_CLUSTER = 1; + + /** */ + private static final byte SECOND_CLUSTER = 2; + + /** Cluster have a lower priority that {@link #SECOND_CLUSTER}. */ + private static final byte THIRD_CLUSTER = 3; Review comment: *_CLUSTER_ID ########## File path: modules/cdc-ext/src/test/java/org/apache/ignite/cdc/CacheConflictOperationsTest.java ########## @@ -0,0 +1,269 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.cdc; + +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.binary.BinaryObject; +import org.apache.ignite.cache.CacheAtomicityMode; +import org.apache.ignite.cdc.kafka.CdcKafkaReplicationTest.Data; +import org.apache.ignite.cdc.conflictresolve.CacheVersionConflictResolverPluginProvider; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.internal.IgniteEx; +import org.apache.ignite.internal.processors.cache.CacheObject; +import org.apache.ignite.internal.processors.cache.CacheObjectImpl; +import org.apache.ignite.internal.processors.cache.IgniteInternalCache; +import org.apache.ignite.internal.processors.cache.KeyCacheObject; +import org.apache.ignite.internal.processors.cache.KeyCacheObjectImpl; +import org.apache.ignite.internal.processors.cache.dr.GridCacheDrInfo; +import org.apache.ignite.internal.processors.cache.version.GridCacheVersion; +import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import static java.util.Collections.singletonMap; +import static org.apache.ignite.cache.CacheAtomicityMode.ATOMIC; +import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL; +import static org.apache.ignite.cdc.kafka.CdcKafkaReplicationTest.checkCRC; +import static org.apache.ignite.cdc.kafka.CdcKafkaReplicationTest.generateSingleData; + +/** + * Cache conflict operations test. + */ +@RunWith(Parameterized.class) +public class CacheConflictOperationsTest extends GridCommonAbstractTest { + /** Cache mode. */ + @Parameterized.Parameter + public CacheAtomicityMode cacheMode; + + /** Cluster id. */ + @Parameterized.Parameter(1) + public byte clusterId; + + /** @return Test parameters. */ + @Parameterized.Parameters(name = "cacheMode={0}, clusterId={1}") + public static Collection<?> parameters() { + return Arrays.asList(new Object[][] { + {ATOMIC, THIRD_CLUSTER}, + {TRANSACTIONAL, THIRD_CLUSTER}, + {ATOMIC, FIRST_CLUSTER}, + {TRANSACTIONAL, FIRST_CLUSTER}, + }); + } + + /** */ + private static IgniteCache<String, Data> cache; + + /** */ + private static IgniteInternalCache<BinaryObject, BinaryObject> cachex; + + /** */ + private static IgniteEx cli; Review comment: client -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: [email protected]
