jacek-lewandowski commented on code in PR #2534: URL: https://github.com/apache/cassandra/pull/2534#discussion_r1314657454
########## test/distributed/org/apache/cassandra/distributed/test/accord/AccordMetricsTest.java: ########## @@ -0,0 +1,258 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.cassandra.distributed.test.accord; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.function.Function; + +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.cassandra.distributed.api.ConsistencyLevel; +import org.apache.cassandra.distributed.api.IMessageFilters; +import org.apache.cassandra.exceptions.ReadTimeoutException; +import org.apache.cassandra.exceptions.WriteTimeoutException; +import org.apache.cassandra.metrics.AccordMetrics; +import org.apache.cassandra.metrics.DefaultNameFactory; +import org.apache.cassandra.net.Verb; +import org.apache.cassandra.service.accord.AccordService; +import org.apache.cassandra.service.accord.exceptions.ReadPreemptedException; +import org.apache.cassandra.service.accord.exceptions.WritePreemptedException; + +import static org.assertj.core.api.Assertions.assertThat; + + +public class AccordMetricsTest extends AccordTestBase +{ + private static final Logger logger = LoggerFactory.getLogger(AccordMetricsTest.class); + + @Override + protected Logger logger() + { + return logger; + } + + @BeforeClass + public static void setupClass() throws IOException + { + AccordTestBase.setupClass(); + SHARED_CLUSTER.forEach(node -> node.runOnInstance(() -> AccordService.instance().createEpochFromConfigUnsafe())); + SHARED_CLUSTER.forEach(node -> node.runOnInstance(() -> AccordService.instance().setCacheSize(0))); + for (int i = 0; i < SHARED_CLUSTER.size(); i++) // initialize metrics + logger.trace(SHARED_CLUSTER.get(i + 1).callOnInstance(() -> AccordMetrics.readMetrics.toString() + AccordMetrics.writeMetrics.toString())); + } + + String writeCql() + { + return "BEGIN TRANSACTION\n" + + " LET val = (SELECT v FROM " + currentTable + " WHERE k=? AND c=?);\n" + + " SELECT val.v;\n" + + " UPDATE " + currentTable + " SET v = v + 1 WHERE k=? AND c=?;\n" + + "COMMIT TRANSACTION"; + } + + String readCql() + { + return "BEGIN TRANSACTION\n" + + " LET val = (SELECT v FROM " + currentTable + " WHERE k=? AND c=?);\n" + + " SELECT val.v;\n" + + "COMMIT TRANSACTION"; + } + + Map<Integer, Map<String, Long>> countingMetrics0; + + @Before + public void beforeTest() + { + SHARED_CLUSTER.filters().reset(); + SHARED_CLUSTER.schemaChange("CREATE TABLE " + currentTable + " (k int, c int, v int, PRIMARY KEY (k, c))"); + SHARED_CLUSTER.coordinator(1).execute("INSERT INTO " + currentTable + " (k, c, v) VALUES (0, 0, 0)", ConsistencyLevel.ALL); + } + + @Test + public void testRegularMetrics() throws Exception + { + countingMetrics0 = getMetrics(); + SHARED_CLUSTER.coordinator(1).executeWithResult(writeCql(), ConsistencyLevel.ALL, 0, 0, 0, 0); + assertCoordinatorMetrics(0, "rw", 1, 0, 0, 0, 0); + assertCoordinatorMetrics(1, "rw", 0, 0, 0, 0, 0); + assertReplicaMetrics(0, "rw", 1, 1, 1); + assertReplicaMetrics(1, "rw", 1, 1, 1); + assertZeroMetrics("ro"); + + countingMetrics0 = getMetrics(); + SHARED_CLUSTER.coordinator(1).executeWithResult(readCql(), ConsistencyLevel.ALL, 0, 0); + assertCoordinatorMetrics(0, "ro", 1, 0, 0, 0, 0); + assertCoordinatorMetrics(1, "ro", 0, 0, 0, 0, 0); + assertReplicaMetrics(0, "ro", 1, 1, 0); + assertReplicaMetrics(1, "ro", 1, 1, 0); + assertZeroMetrics("rw"); + } + + @Test + public void testPreemptionMetrics() + { + IMessageFilters.Filter commitFilter = SHARED_CLUSTER.filters().outbound().verbs(Verb.ACCORD_COMMIT_REQ.id).from(1).to(2).drop(); + commitFilter.on(); + + countingMetrics0 = getMetrics(); + try + { + SHARED_CLUSTER.coordinator(1).executeWithResult(writeCql(), ConsistencyLevel.ALL, 0, 0, 0, 0); + } + catch (RuntimeException t) + { + assertThat(t.getCause().getClass().getName()).isEqualTo(WritePreemptedException.class.getName()); + } + + assertCoordinatorMetrics(0, "rw", 1, 0, 1, 0, 0); + assertCoordinatorMetrics(1, "rw", 0, 0, 0, 0, 1); + assertReplicaMetrics(0, "rw", 1, 1, 1); + assertReplicaMetrics(1, "rw", 1, 1, 1); + + assertZeroMetrics("ro"); + + countingMetrics0 = getMetrics(); + try + { + SHARED_CLUSTER.coordinator(1).executeWithResult(readCql(), ConsistencyLevel.ALL, 0, 0); + } + catch (RuntimeException t) + { + assertThat(t.getCause().getClass().getName()).isEqualTo(ReadPreemptedException.class.getName()); + } + + assertCoordinatorMetrics(0, "ro", 1, 0, 1, 0, 0); + assertCoordinatorMetrics(1, "ro", 0, 0, 0, 0, 1); + assertReplicaMetrics(0, "ro", 1, 1, 0); + assertReplicaMetrics(1, "ro", 1, 1, 0); + + assertZeroMetrics("rw"); + } + + @Test + public void testTimeoutMetrics() + { + IMessageFilters.Filter preAcceptFilter = SHARED_CLUSTER.filters().outbound().verbs(Verb.ACCORD_PRE_ACCEPT_REQ.id).from(1).to(2).drop(); + preAcceptFilter.on(); + + countingMetrics0 = getMetrics(); + try + { + SHARED_CLUSTER.coordinator(1).executeWithResult(readCql(), ConsistencyLevel.ALL, 0, 0); + } + catch (RuntimeException t) + { + assertThat(t.getCause().getClass().getName()).isEqualTo(ReadTimeoutException.class.getName()); + } + + assertCoordinatorMetrics(0, "ro", 0, 0, 0, 1, 0); + assertCoordinatorMetrics(1, "ro", 0, 0, 0, 0, 0); + assertReplicaMetrics(0, "ro", 0, 0, 0); + assertReplicaMetrics(1, "ro", 0, 0, 0); + + assertZeroMetrics("rw"); + + countingMetrics0 = getMetrics(); + try + { + SHARED_CLUSTER.coordinator(1).executeWithResult(writeCql(), ConsistencyLevel.ALL, 0, 0, 0, 0); + } + catch (RuntimeException t) + { + assertThat(t.getCause().getClass().getName()).isEqualTo(WriteTimeoutException.class.getName()); + } + + assertCoordinatorMetrics(0, "rw", 0, 0, 0, 1, 0); + assertCoordinatorMetrics(1, "rw", 0, 0, 0, 0, 0); + assertReplicaMetrics(0, "rw", 0, 0, 0); + assertReplicaMetrics(1, "rw", 0, 0, 0); + + assertZeroMetrics("ro"); + } + + private void assertZeroMetrics(String scope) + { + for (int i = 0; i < SHARED_CLUSTER.size(); i++) + { + assertCoordinatorMetrics(i, scope, 0, 0, 0, 0, 0); + assertReplicaMetrics(i, scope, 0, 0, 0); + } + } + + private void assertCoordinatorMetrics(int node, String scope, long fastPathTrx, long slowPathTrx, long preemptions, long timeouts, long recoveries) + { + DefaultNameFactory nameFactory = new DefaultNameFactory("accord-coordinator", scope); + Map<String, Long> metrics = diff(countingMetrics0).get(node); + logger.info("Metrics for node {} / {}: {}", node, scope, metrics); + Function<String, Long> metric = n -> metrics.get(nameFactory.createMetricName(n).getMetricName()); + assertThat(metric.apply("FastPathTrx")).isEqualTo(fastPathTrx); + assertThat(metric.apply("SlowPathTrx")).isEqualTo(slowPathTrx); + assertThat(metric.apply("PreemptedTrx")).isEqualTo(preemptions); + assertThat(metric.apply("TimedoutTrx")).isEqualTo(timeouts); + assertThat(metric.apply("RecoveryDelay")).isEqualTo(recoveries); + assertThat(metric.apply("RecoveryTime")).isEqualTo(recoveries); + assertThat(metric.apply("Dependencies")).isEqualTo(fastPathTrx + slowPathTrx); + } + + private void assertReplicaMetrics(int node, String scope, long commits, long executions, long applications) + { + DefaultNameFactory nameFactory = new DefaultNameFactory("accord-replica", scope); + Map<String, Long> metrics = diff(countingMetrics0).get(node); + Function<String, Long> metric = n -> metrics.get(nameFactory.createMetricName(n).getMetricName()); + assertThat(metric.apply("CommitLatency")).isEqualTo(commits); + assertThat(metric.apply("ExecuteLatency")).isEqualTo(executions); + assertThat(metric.apply("ApplyLatency")).isEqualTo(applications); + assertThat(metric.apply("ApplyDuration")).isEqualTo(applications); + assertThat(metric.apply("Dependencies")).isEqualTo(executions); Review Comment: Do you mean just those strings or full mbean names? -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected] --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]

