This is an automated email from the ASF dual-hosted git repository. tkalkirill pushed a commit to branch ignite-26849 in repository https://gitbox.apache.org/repos/asf/ignite-3.git
commit 833463d1b3c9cbb4c3f61ac463a7f1f797ee9d67 Author: Kirill Tkalenko <[email protected]> AuthorDate: Mon Nov 17 16:25:44 2025 +0300 IGNITE-26849 wip --- .../internal/BaseTruncateRaftLogAbstractTest.java | 202 +++++++++++++++++++++ .../ItTruncateRaftLogAndRebalanceTest.java | 77 ++++++++ .../ItTruncateRaftLogAndRestartNodesTest.java | 190 +------------------ 3 files changed, 289 insertions(+), 180 deletions(-) diff --git a/modules/runner/src/integrationTest/java/org/apache/ignite/internal/BaseTruncateRaftLogAbstractTest.java b/modules/runner/src/integrationTest/java/org/apache/ignite/internal/BaseTruncateRaftLogAbstractTest.java new file mode 100644 index 00000000000..0e68f89a5bf --- /dev/null +++ b/modules/runner/src/integrationTest/java/org/apache/ignite/internal/BaseTruncateRaftLogAbstractTest.java @@ -0,0 +1,202 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal; + +import static org.apache.ignite.internal.catalog.CatalogService.DEFAULT_STORAGE_PROFILE; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.instanceOf; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; + +import java.util.List; +import java.util.stream.IntStream; +import org.apache.ignite.internal.app.IgniteImpl; +import org.apache.ignite.internal.binarytuple.BinaryTupleReader; +import org.apache.ignite.internal.replicator.ReplicationGroupId; +import org.apache.ignite.internal.schema.BinaryRow; +import org.apache.ignite.internal.schema.Column; +import org.apache.ignite.internal.schema.SchemaDescriptor; +import org.apache.ignite.internal.tostring.IgniteToStringInclude; +import org.apache.ignite.internal.tostring.S; +import org.apache.ignite.raft.jraft.core.NodeImpl; + +/** Base class for raft log truncating related integration tests, containing useful methods, classes, and methods. */ +class BaseTruncateRaftLogAbstractTest extends ClusterPerTestIntegrationTest { + static final String ZONE_NAME = "TEST_ZONE"; + + static final String TABLE_NAME = "TEST_TABLE"; + + void createZoneAndTablePerson(String zoneName, String tableName, int replicas, int partitions) { + executeSql(createZoneDdl(zoneName, replicas, partitions)); + executeSql(createTablePersonDdl(zoneName, tableName)); + } + + void insertPeople(String tableName, Person... people) { + for (Person person : people) { + executeSql(insertPersonDml(tableName, person)); + } + } + + NodeImpl raftNodeImpl(int nodeIndex, ReplicationGroupId replicationGroupId) { + return raftNodeImpl(igniteImpl(nodeIndex), replicationGroupId); + } + + NodeImpl raftNodeImpl(IgniteImpl ignite, ReplicationGroupId replicationGroupId) { + NodeImpl[] node = {null}; + + ignite.raftManager().forEach((raftNodeId, raftGroupService) -> { + if (replicationGroupId.equals(raftNodeId.groupId())) { + assertNull( + node[0], + String.format("NodeImpl already found: [node=%s, replicationGroupId=%s]", ignite.name(), replicationGroupId) + ); + + node[0] = (NodeImpl) raftGroupService.getRaftNode(); + } + }); + + NodeImpl res = node[0]; + + assertNotNull(res, String.format("Can't find NodeImpl: [node=%s, replicationGroupId=%s]", ignite.name(), replicationGroupId)); + + return res; + } + + private static String createTablePersonDdl(String zoneName, String tableName) { + return String.format( + "create table if not exists %s (%s bigint primary key, %s varchar, %s bigint) zone %s", + tableName, + Person.ID_COLUMN_NAME, Person.NAME_COLUMN_NAME, Person.SALARY_COLUMN_NAME, + zoneName + ); + } + + private static String createZoneDdl(String zoneName, int replicas, int partitions) { + return String.format( + "create zone %s with replicas=%s, partitions=%s, storage_profiles='%s'", + zoneName, replicas, partitions, DEFAULT_STORAGE_PROFILE + ); + } + + private static String insertPersonDml(String tableName, Person person) { + return String.format( + "insert into %s(%s, %s, %s) values(%s, '%s', %s)", + tableName, + Person.ID_COLUMN_NAME, Person.NAME_COLUMN_NAME, Person.SALARY_COLUMN_NAME, + person.id, person.name, person.salary + ); + } + + static Person[] generatePeople(int count) { + assertThat(count, greaterThanOrEqualTo(0)); + + return IntStream.range(0, count) + .mapToObj(i -> new Person(i, "name-" + i, i + 1_000)) + .toArray(Person[]::new); + } + + static Person[] toPeopleFromSqlRows(List<List<Object>> sqlResult) { + return sqlResult.stream() + .map(BaseTruncateRaftLogAbstractTest::toPersonFromSqlRow) + .toArray(Person[]::new); + } + + private static Person toPersonFromSqlRow(List<Object> sqlRow) { + assertThat(sqlRow, hasSize(3)); + assertThat(sqlRow.get(0), instanceOf(Long.class)); + assertThat(sqlRow.get(1), instanceOf(String.class)); + assertThat(sqlRow.get(2), instanceOf(Long.class)); + + return new Person((Long) sqlRow.get(0), (String) sqlRow.get(1), (Long) sqlRow.get(2)); + } + + static Person toPersonFromBinaryRow(SchemaDescriptor schemaDescriptor, BinaryRow binaryRow) { + var binaryTupleReader = new BinaryTupleReader(schemaDescriptor.length(), binaryRow.tupleSlice()); + + Column idColumn = findColumnByName(schemaDescriptor, Person.ID_COLUMN_NAME); + Column nameColumn = findColumnByName(schemaDescriptor, Person.NAME_COLUMN_NAME); + Column salaryColumn = findColumnByName(schemaDescriptor, Person.SALARY_COLUMN_NAME); + + return new Person( + binaryTupleReader.longValue(idColumn.positionInRow()), + binaryTupleReader.stringValue(nameColumn.positionInRow()), + binaryTupleReader.longValue(salaryColumn.positionInRow()) + ); + } + + private static Column findColumnByName(SchemaDescriptor schemaDescriptor, String columnName) { + return schemaDescriptor.columns().stream() + .filter(column -> columnName.equalsIgnoreCase(column.name())) + .findFirst() + .orElseThrow(() -> new AssertionError( + String.format("Can't find column by name: [columnName=%s, schema=%s]", columnName, schemaDescriptor) + )); + } + + static class Person { + static final String ID_COLUMN_NAME = "ID"; + + static final String NAME_COLUMN_NAME = "NAME"; + + static final String SALARY_COLUMN_NAME = "SALARY"; + + @IgniteToStringInclude + final long id; + + @IgniteToStringInclude + final String name; + + @IgniteToStringInclude + final long salary; + + private Person(long id, String name, long salary) { + this.id = id; + this.name = name; + this.salary = salary; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + Person p = (Person) o; + + return id == p.id && salary == p.salary && name.equals(p.name); + } + + @Override + public int hashCode() { + int result = (int) (id ^ (id >>> 32)); + result = 31 * result + (int) (salary ^ (salary >>> 32)); + result = 31 * result + name.hashCode(); + return result; + } + + @Override + public String toString() { + return S.toString(Person.class, this); + } + } +} diff --git a/modules/runner/src/integrationTest/java/org/apache/ignite/internal/ItTruncateRaftLogAndRebalanceTest.java b/modules/runner/src/integrationTest/java/org/apache/ignite/internal/ItTruncateRaftLogAndRebalanceTest.java new file mode 100644 index 00000000000..a79f5f95e92 --- /dev/null +++ b/modules/runner/src/integrationTest/java/org/apache/ignite/internal/ItTruncateRaftLogAndRebalanceTest.java @@ -0,0 +1,77 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal; + +import static org.awaitility.Awaitility.await; +import static org.junit.jupiter.api.Assertions.assertEquals; + +import com.typesafe.config.parser.ConfigDocumentFactory; +import java.util.Arrays; +import java.util.concurrent.TimeUnit; +import org.apache.ignite.internal.replicator.ReplicationGroupId; +import org.junit.jupiter.api.Test; + +/** Class for testing various raft log truncation and rebalancing scenarios. */ +public class ItTruncateRaftLogAndRebalanceTest extends BaseTruncateRaftLogAbstractTest { + private static final int RAFT_SNAPSHOT_INTERVAL_SECS = 5; + + @Override + protected int initialNodes() { + return 3; + } + + @Override + protected String getNodeBootstrapConfigTemplate() { + return ConfigDocumentFactory.parseString(super.getNodeBootstrapConfigTemplate()) + .withValueText("ignite.system.properties.raftSnapshotIntervalSecs", Integer.toString(RAFT_SNAPSHOT_INTERVAL_SECS)) + .render(); + } + + @Test + void test() throws Exception { + createZoneAndTablePerson(ZONE_NAME, TABLE_NAME, 3, 1); + + ReplicationGroupId replicationGroupId = cluster.solePartitionId(ZONE_NAME, TABLE_NAME); + + cluster.transferLeadershipTo(0, replicationGroupId); + + insertPeopleAndAwaitTruncateRaftLogOnAllNodes(replicationGroupId); + } + + private void insertPeopleAndAwaitTruncateRaftLogOnAllNodes(ReplicationGroupId replicationGroupId) { + long[] beforeInsertPeopleRaftFirstLogIndexes = collectRaftFirstLogIndexes(replicationGroupId); + assertEquals(initialNodes(), beforeInsertPeopleRaftFirstLogIndexes.length); + + insertPeople(TABLE_NAME, generatePeople(1_000)); + + await().atMost(RAFT_SNAPSHOT_INTERVAL_SECS * 2, TimeUnit.SECONDS).until(() -> { + long[] raftFirstLogIndexes = collectRaftFirstLogIndexes(replicationGroupId); + assertEquals(initialNodes(), raftFirstLogIndexes.length); + + return !Arrays.equals(beforeInsertPeopleRaftFirstLogIndexes, raftFirstLogIndexes); + }); + } + + private long[] collectRaftFirstLogIndexes(ReplicationGroupId replicationGroupId) { + return cluster.runningNodes() + .map(TestWrappers::unwrapIgniteImpl) + .map(igniteImpl -> raftNodeImpl(igniteImpl, replicationGroupId)) + .mapToLong(raftNodeImpl -> raftNodeImpl.logStorage().getFirstLogIndex()) + .toArray(); + } +} diff --git a/modules/runner/src/integrationTest/java/org/apache/ignite/internal/ItTruncateRaftLogAndRestartNodesTest.java b/modules/runner/src/integrationTest/java/org/apache/ignite/internal/ItTruncateRaftLogAndRestartNodesTest.java index 955b83a8c36..b01e040e812 100644 --- a/modules/runner/src/integrationTest/java/org/apache/ignite/internal/ItTruncateRaftLogAndRestartNodesTest.java +++ b/modules/runner/src/integrationTest/java/org/apache/ignite/internal/ItTruncateRaftLogAndRestartNodesTest.java @@ -19,23 +19,18 @@ package org.apache.ignite.internal; import static org.apache.ignite.internal.TestWrappers.unwrapIgniteImpl; import static org.apache.ignite.internal.TestWrappers.unwrapTableViewInternal; -import static org.apache.ignite.internal.catalog.CatalogService.DEFAULT_STORAGE_PROFILE; import static org.apache.ignite.internal.testframework.flow.TestFlowUtils.subscribeToList; import static org.apache.ignite.internal.testframework.matchers.CompletableFutureMatcher.willCompleteSuccessfully; import static org.apache.ignite.internal.util.CompletableFutures.allOf; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.arrayWithSize; import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.Matchers.greaterThanOrEqualTo; -import static org.hamcrest.Matchers.hasSize; -import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.lessThan; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertTrue; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.List; @@ -44,12 +39,9 @@ import java.util.concurrent.Flow.Publisher; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Function; -import java.util.stream.IntStream; import org.apache.ignite.internal.app.IgniteImpl; -import org.apache.ignite.internal.binarytuple.BinaryTupleReader; import org.apache.ignite.internal.close.ManuallyCloseable; import org.apache.ignite.internal.logger.IgniteLogger; -import org.apache.ignite.internal.logger.Loggers; import org.apache.ignite.internal.manager.ComponentContext; import org.apache.ignite.internal.network.InternalClusterNode; import org.apache.ignite.internal.raft.storage.LogStorageFactory; @@ -57,7 +49,6 @@ import org.apache.ignite.internal.raft.storage.impl.IgniteJraftServiceFactory; import org.apache.ignite.internal.raft.util.SharedLogStorageFactoryUtils; import org.apache.ignite.internal.replicator.ReplicationGroupId; import org.apache.ignite.internal.schema.BinaryRow; -import org.apache.ignite.internal.schema.Column; import org.apache.ignite.internal.schema.SchemaDescriptor; import org.apache.ignite.internal.storage.MvPartitionStorage; import org.apache.ignite.internal.table.OperationContext; @@ -65,8 +56,6 @@ import org.apache.ignite.internal.table.TableViewInternal; import org.apache.ignite.internal.table.TxContext; import org.apache.ignite.internal.table.distributed.storage.InternalTableImpl; import org.apache.ignite.internal.testframework.IgniteTestUtils; -import org.apache.ignite.internal.tostring.IgniteToStringInclude; -import org.apache.ignite.internal.tostring.S; import org.apache.ignite.internal.tx.InternalTransaction; import org.apache.ignite.internal.util.IgniteUtils; import org.apache.ignite.raft.jraft.conf.ConfigurationManager; @@ -85,13 +74,7 @@ import org.junit.jupiter.api.Test; * groups associated with tables. */ // TODO: IGNITE-25501 Fix partition state after snapshot -public class ItTruncateRaftLogAndRestartNodesTest extends ClusterPerTestIntegrationTest { - private static final IgniteLogger LOG = Loggers.forClass(ItTruncateRaftLogAndRestartNodesTest.class); - - private static final String ZONE_NAME = "TEST_ZONE"; - - private static final String TABLE_NAME = "TEST_TABLE"; - +public class ItTruncateRaftLogAndRestartNodesTest extends BaseTruncateRaftLogAbstractTest { @Override protected int initialNodes() { return 0; @@ -164,38 +147,6 @@ public class ItTruncateRaftLogAndRestartNodesTest extends ClusterPerTestIntegrat } } - private void createZoneAndTablePerson(String zoneName, String tableName, int replicas, int partitions) { - executeSql(createZoneDdl(zoneName, replicas, partitions)); - executeSql(createTablePersonDdl(zoneName, tableName)); - } - - private void insertPeople(String tableName, Person... people) { - for (Person person : people) { - executeSql(insertPersonDml(tableName, person)); - } - } - - private NodeImpl raftNodeImpl(int nodeIndex, ReplicationGroupId replicationGroupId) { - NodeImpl[] node = {null}; - - igniteImpl(nodeIndex).raftManager().forEach((raftNodeId, raftGroupService) -> { - if (replicationGroupId.equals(raftNodeId.groupId())) { - assertNull( - node[0], - String.format("NodeImpl already found: [nodeIndex=%s, replicationGroupId=%s]", nodeIndex, replicationGroupId) - ); - - node[0] = (NodeImpl) raftGroupService.getRaftNode(); - } - }); - - NodeImpl res = node[0]; - - assertNotNull(res, String.format("Can't find NodeImpl: [nodeIndex=%s, replicationGroupId=%s]", nodeIndex, replicationGroupId)); - - return res; - } - /** * Creates and prepares {@link TestLogStorageFactory} for {@link TestLogStorageFactory#createLogStorage} creation after the * corresponding node is stopped, so that there are no errors. @@ -210,7 +161,7 @@ public class ItTruncateRaftLogAndRestartNodesTest extends ClusterPerTestIntegrat NodeImpl nodeImpl = raftNodeImpl(nodeIndex, replicationGroupId); - return new TestLogStorageFactory(logStorageFactory, nodeImpl.getOptions(), nodeImpl.getRaftOptions()); + return new TestLogStorageFactory(logStorageFactory, nodeImpl.getOptions(), nodeImpl.getRaftOptions(), log); } private void awaitMajority(ReplicationGroupId replicationGroupId) { @@ -249,54 +200,6 @@ public class ItTruncateRaftLogAndRestartNodesTest extends ClusterPerTestIntegrat ); } - private static String insertPersonDml(String tableName, Person person) { - return String.format( - "insert into %s(%s, %s, %s) values(%s, '%s', %s)", - tableName, - Person.ID_COLUMN_NAME, Person.NAME_COLUMN_NAME, Person.SALARY_COLUMN_NAME, - person.id, person.name, person.salary - ); - } - - private static String createTablePersonDdl(String zoneName, String tableName) { - return String.format( - "create table if not exists %s (%s bigint primary key, %s varchar, %s bigint) zone %s", - tableName, - Person.ID_COLUMN_NAME, Person.NAME_COLUMN_NAME, Person.SALARY_COLUMN_NAME, - zoneName - ); - } - - private static String createZoneDdl(String zoneName, int replicas, int partitions) { - return String.format( - "create zone %s with replicas=%s, partitions=%s, storage_profiles='%s'", - zoneName, replicas, partitions, DEFAULT_STORAGE_PROFILE - ); - } - - private static Person[] generatePeople(int count) { - assertThat(count, greaterThanOrEqualTo(0)); - - return IntStream.range(0, count) - .mapToObj(i -> new Person(i, "name-" + i, i + 1_000)) - .toArray(Person[]::new); - } - - private static Person[] toPeopleFromSqlRows(List<List<Object>> sqlResult) { - return sqlResult.stream() - .map(ItTruncateRaftLogAndRestartNodesTest::toPersonFromSqlRow) - .toArray(Person[]::new); - } - - private static Person toPersonFromSqlRow(List<Object> sqlRow) { - assertThat(sqlRow, hasSize(3)); - assertThat(sqlRow.get(0), instanceOf(Long.class)); - assertThat(sqlRow.get(1), instanceOf(String.class)); - assertThat(sqlRow.get(2), instanceOf(Long.class)); - - return new Person((Long) sqlRow.get(0), (String) sqlRow.get(1), (Long) sqlRow.get(2)); - } - private Person[] scanPeopleFromAllPartitions(int nodeIndex, String tableName) { IgniteImpl ignite = igniteImpl(nodeIndex); @@ -327,10 +230,6 @@ public class ItTruncateRaftLogAndRestartNodesTest extends ClusterPerTestIntegrat } } - private static Person[] half(Person... people) { - return Arrays.copyOfRange(people, 0, people.length / 2); - } - private static Publisher<BinaryRow> scan( InternalTableImpl internalTableImpl, InternalTransaction roTx, @@ -346,30 +245,7 @@ public class ItTruncateRaftLogAndRestartNodesTest extends ClusterPerTestIntegrat ); } - private static Person toPersonFromBinaryRow(SchemaDescriptor schemaDescriptor, BinaryRow binaryRow) { - var binaryTupleReader = new BinaryTupleReader(schemaDescriptor.length(), binaryRow.tupleSlice()); - - Column idColumn = findColumnByName(schemaDescriptor, Person.ID_COLUMN_NAME); - Column nameColumn = findColumnByName(schemaDescriptor, Person.NAME_COLUMN_NAME); - Column salaryColumn = findColumnByName(schemaDescriptor, Person.SALARY_COLUMN_NAME); - - return new Person( - binaryTupleReader.longValue(idColumn.positionInRow()), - binaryTupleReader.stringValue(nameColumn.positionInRow()), - binaryTupleReader.longValue(salaryColumn.positionInRow()) - ); - } - - private static Column findColumnByName(SchemaDescriptor schemaDescriptor, String columnName) { - return schemaDescriptor.columns().stream() - .filter(column -> columnName.equalsIgnoreCase(column.name())) - .findFirst() - .orElseThrow(() -> new AssertionError( - String.format("Can't find column by name: [columnName=%s, schema=%s]", columnName, schemaDescriptor) - )); - } - - private static void truncateRaftLogSuffixHalfOfChanges(LogStorage logStorage, long startRaftLogIndex) { + private void truncateRaftLogSuffixHalfOfChanges(LogStorage logStorage, long startRaftLogIndex) { long lastLogIndex = logStorage.getLastLogIndex(); long lastIndexKept = lastLogIndex - (lastLogIndex - startRaftLogIndex) / 2; @@ -381,7 +257,7 @@ public class ItTruncateRaftLogAndRestartNodesTest extends ClusterPerTestIntegrat ) ); - LOG.info( + log.info( "Successfully truncated raft log suffix: [startRaftLogIndex={}, oldLastLogIndex={}, lastIndexKept={}, term={}]", startRaftLogIndex, lastLogIndex, lastIndexKept, logStorage.getEntry(lastIndexKept).getId().getTerm() ); @@ -400,16 +276,20 @@ public class ItTruncateRaftLogAndRestartNodesTest extends ClusterPerTestIntegrat private final RaftOptions raftOptions; + private final IgniteLogger log; + private final AtomicBoolean closeGuard = new AtomicBoolean(); private TestLogStorageFactory( LogStorageFactory logStorageFactory, NodeOptions nodeOptions, - RaftOptions raftOptions + RaftOptions raftOptions, + IgniteLogger log ) { this.logStorageFactory = logStorageFactory; this.nodeOptions = nodeOptions; this.raftOptions = raftOptions; + this.log = log; } /** @@ -429,7 +309,7 @@ public class ItTruncateRaftLogAndRestartNodesTest extends ClusterPerTestIntegrat storage.init(logStorageOptions); - LOG.info( + log.info( "Successfully created LogStorage: [firstLogIndex={}, lastLogIndex={}, term={}]", storage.getFirstLogIndex(), storage.getLastLogIndex(), storage.getEntry(storage.getLastLogIndex()).getId().getTerm() ); @@ -446,54 +326,4 @@ public class ItTruncateRaftLogAndRestartNodesTest extends ClusterPerTestIntegrat assertThat(logStorageFactory.stopAsync(), willCompleteSuccessfully()); } } - - private static class Person { - static final String ID_COLUMN_NAME = "ID"; - - static final String NAME_COLUMN_NAME = "NAME"; - - static final String SALARY_COLUMN_NAME = "SALARY"; - - @IgniteToStringInclude - final long id; - - @IgniteToStringInclude - final String name; - - @IgniteToStringInclude - final long salary; - - private Person(long id, String name, long salary) { - this.id = id; - this.name = name; - this.salary = salary; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - - Person p = (Person) o; - - return id == p.id && salary == p.salary && name.equals(p.name); - } - - @Override - public int hashCode() { - int result = (int) (id ^ (id >>> 32)); - result = 31 * result + (int) (salary ^ (salary >>> 32)); - result = 31 * result + name.hashCode(); - return result; - } - - @Override - public String toString() { - return S.toString(Person.class, this); - } - } }
