This is an automated email from the ASF dual-hosted git repository.

dcapwell pushed a commit to branch cassandra-3.11
in repository https://gitbox.apache.org/repos/asf/cassandra.git

commit dd4ca8e0ad809e99185704bb3ab13e746ba94dce
Merge: c61f390 4b0c081
Author: David Capwell <[email protected]>
AuthorDate: Tue Jul 28 18:00:37 2020 -0700

    Merge branch 'cassandra-3.0' into cassandra-3.11

 CHANGES.txt                                        |   1 +
 src/java/org/apache/cassandra/db/LegacyLayout.java |   2 +
 src/java/org/apache/cassandra/tools/NodeProbe.java |   2 +-
 .../org/apache/cassandra/utils/ByteBufferUtil.java |   2 +
 .../distributed/upgrade/MigrateDropColumns.java    | 113 +++++++++++++++++++++
 .../upgrade/MigrateDropColumns22To30To311Test.java |  11 ++
 .../upgrade/MigrateDropColumns22To311Test.java     |  11 ++
 .../upgrade/MigrateDropColumns30To311Test.java     |  11 ++
 8 files changed, 152 insertions(+), 1 deletion(-)

diff --cc CHANGES.txt
index 812e020,d22ba43..d6ff2e1
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@@ -1,8 -1,7 +1,9 @@@
 -3.0.22:
 - * 3.x fails to start if commit log has range tombstones from a column which 
is also deleted (CASSANDRA-15970)
 +3.11.8
 + * Frozen RawTuple is not annotated with frozen in the toString method 
(CASSANDRA-15857)
 +Merged from 3.0:
   * Forbid altering UDTs used in partition keys (CASSANDRA-15933)
   * Fix empty/null json string representation (CASSANDRA-15896)
++ * 3.x fails to start if commit log has range tombstones from a column which 
is also deleted (CASSANDRA-15970)
  Merged from 2.2:
   * Fix CQL parsing of collections when the column type is reversed 
(CASSANDRA-15814)
  
diff --cc src/java/org/apache/cassandra/tools/NodeProbe.java
index 2c4e409,2425821..8e712db
--- a/src/java/org/apache/cassandra/tools/NodeProbe.java
+++ b/src/java/org/apache/cassandra/tools/NodeProbe.java
@@@ -277,17 -273,10 +277,17 @@@ public class NodeProbe implements AutoC
          return ssProxy.upgradeSSTables(keyspaceName, excludeCurrentVersion, 
jobs, tableNames);
      }
  
 +    public int garbageCollect(String tombstoneOption, int jobs, String 
keyspaceName, String... tableNames) throws IOException, ExecutionException, 
InterruptedException
 +    {
 +        return ssProxy.garbageCollect(tombstoneOption, jobs, keyspaceName, 
tableNames);
 +    }
 +
      private void checkJobs(PrintStream out, int jobs)
      {
 +        // TODO this should get the configured number of 
concurrent_compactors via JMX and not using DatabaseDescriptor
-         DatabaseDescriptor.toolInitialization();
++        DatabaseDescriptor.toolInitialization(false); // if running in dtest, 
this would fail if true (default)
          if (jobs > DatabaseDescriptor.getConcurrentCompactors())
 -            out.println(String.format("jobs (%d) is bigger than configured 
concurrent_compactors (%d), using at most %d threads", jobs, 
DatabaseDescriptor.getConcurrentCompactors(), 
DatabaseDescriptor.getConcurrentCompactors()));
 +            out.println(String.format("jobs (%d) is bigger than configured 
concurrent_compactors (%d) on this host, using at most %d threads", jobs, 
DatabaseDescriptor.getConcurrentCompactors(), 
DatabaseDescriptor.getConcurrentCompactors()));
      }
  
      public void forceKeyspaceCleanup(PrintStream out, int jobs, String 
keyspaceName, String... tableNames) throws IOException, ExecutionException, 
InterruptedException
diff --cc 
test/distributed/org/apache/cassandra/distributed/upgrade/MigrateDropColumns.java
index 0000000,0000000..c8c04d1
new file mode 100644
--- /dev/null
+++ 
b/test/distributed/org/apache/cassandra/distributed/upgrade/MigrateDropColumns.java
@@@ -1,0 -1,0 +1,113 @@@
++package org.apache.cassandra.distributed.upgrade;
++
++import java.util.Arrays;
++import java.util.Collections;
++import java.util.Objects;
++
++import com.google.common.collect.ImmutableMap;
++import com.google.common.collect.ImmutableSet;
++import com.google.common.collect.Sets;
++import org.junit.Assert;
++import org.junit.Test;
++
++import org.apache.cassandra.db.marshal.CompositeType;
++import org.apache.cassandra.db.marshal.Int32Type;
++import org.apache.cassandra.db.marshal.MapType;
++import org.apache.cassandra.distributed.api.ConsistencyLevel;
++import org.apache.cassandra.distributed.api.Feature;
++import org.apache.cassandra.distributed.api.ICoordinator;
++import org.apache.cassandra.distributed.api.QueryResults;
++import org.apache.cassandra.distributed.api.SimpleQueryResult;
++import org.apache.cassandra.distributed.shared.AssertUtils;
++import org.apache.cassandra.distributed.shared.Versions;
++import org.apache.cassandra.distributed.test.ThriftClientUtils;
++import org.apache.cassandra.thrift.Deletion;
++import org.apache.cassandra.thrift.Mutation;
++import org.apache.cassandra.thrift.SlicePredicate;
++import org.apache.cassandra.thrift.SliceRange;
++import org.apache.cassandra.utils.ByteBufferUtil;
++
++public abstract class MigrateDropColumns extends UpgradeTestBase
++{
++    private static final MapType MAP_TYPE = 
MapType.getInstance(Int32Type.instance, Int32Type.instance, true);
++
++    private final Versions.Major initial;
++    private final Versions.Major[] upgrade;
++
++    protected MigrateDropColumns(Versions.Major initial, Versions.Major... 
upgrade)
++    {
++        this.initial = Objects.requireNonNull(initial, "initial");
++        this.upgrade = Objects.requireNonNull(upgrade, "upgrade");
++    }
++
++    @Test
++    public void dropColumns() throws Throwable
++    {
++        new TestCase()
++        .upgrade(initial, upgrade)
++        .withConfig(c -> c.with(Feature.NATIVE_PROTOCOL))
++        .setup(cluster -> {
++            cluster.schemaChange(withKeyspace("CREATE TABLE %s.tbl(pk int, 
tables map<int, int>, PRIMARY KEY (pk))"));
++
++            ICoordinator coordinator = cluster.coordinator(1);
++
++            // write a RT to pk=0
++            ThriftClientUtils.thriftClient(cluster.get(1), thrift -> {
++                thrift.set_keyspace(KEYSPACE);
++
++                Mutation mutation = new Mutation();
++                Deletion deletion = new Deletion();
++                SlicePredicate slice = new SlicePredicate();
++                SliceRange range = new SliceRange();
++                
range.setStart(CompositeType.build(ByteBufferUtil.bytes("tables")));
++                
range.setFinish(CompositeType.build(ByteBufferUtil.bytes("tables")));
++                slice.setSlice_range(range);
++                deletion.setPredicate(slice);
++                deletion.setTimestamp(System.currentTimeMillis());
++                mutation.setDeletion(deletion);
++
++                
thrift.batch_mutate(Collections.singletonMap(ByteBufferUtil.bytes(0),
++                                                             
Collections.singletonMap("tbl", Arrays.asList(mutation))),
++                                    
org.apache.cassandra.thrift.ConsistencyLevel.ALL);
++            });
++
++            // write table to pk=1
++            // NOTE: because jvm-dtest doesn't support collections in the 
execute interface (see CASSANDRA-15969)
++            // need to encode to a ByteBuffer first
++            coordinator.execute(withKeyspace("INSERT INTO %s.tbl (pk, tables) 
VALUES (?, ?)"), ConsistencyLevel.ONE, 1, MAP_TYPE.decompose(ImmutableMap.of(1, 
1)));
++
++            cluster.forEach(inst -> inst.flush(KEYSPACE));
++
++            cluster.schemaChange(withKeyspace("ALTER TABLE %s.tbl DROP 
tables"));
++        })
++        .runAfterClusterUpgrade(cluster -> {
++            ICoordinator coordinator = cluster.coordinator(1);
++            SimpleQueryResult qr = coordinator.executeWithResult("SELECT 
column_name " +
++                                                                 "FROM 
system_schema.dropped_columns " +
++                                                                 "WHERE 
keyspace_name=?" +
++                                                                 " AND 
table_name=?;",
++                                                                 
ConsistencyLevel.ALL, KEYSPACE, "tbl");
++            Assert.assertEquals(ImmutableSet.of("tables"), 
Sets.newHashSet(qr.map(r -> r.getString("column_name"))));
++
++            assertRows(coordinator);
++
++            // upgradesstables, make sure everything is still working
++            cluster.forEach(n -> n.nodetoolResult("upgradesstables", 
KEYSPACE).asserts().success());
++
++            assertRows(coordinator);
++        })
++        .run();
++    }
++
++    private static void assertRows(ICoordinator coordinator)
++    {
++        // since only a RT was written to this row there is no liveness 
information, so the row will be skipped
++        AssertUtils.assertRows(
++        coordinator.executeWithResult(withKeyspace("SELECT * FROM %s.tbl 
WHERE pk=?"), ConsistencyLevel.ALL, 0),
++        QueryResults.empty());
++
++        AssertUtils.assertRows(
++        coordinator.executeWithResult(withKeyspace("SELECT * FROM %s.tbl 
WHERE pk=?"), ConsistencyLevel.ALL, 1),
++        QueryResults.builder().row(1).build());
++    }
++}
diff --cc 
test/distributed/org/apache/cassandra/distributed/upgrade/MigrateDropColumns22To30To311Test.java
index 0000000,0000000..e98b023
new file mode 100644
--- /dev/null
+++ 
b/test/distributed/org/apache/cassandra/distributed/upgrade/MigrateDropColumns22To30To311Test.java
@@@ -1,0 -1,0 +1,11 @@@
++package org.apache.cassandra.distributed.upgrade;
++
++import org.apache.cassandra.distributed.shared.Versions;
++
++public class MigrateDropColumns22To30To311Test extends MigrateDropColumns
++{
++    public MigrateDropColumns22To30To311Test()
++    {
++        super(Versions.Major.v22, Versions.Major.v30, Versions.Major.v3X);
++    }
++}
diff --cc 
test/distributed/org/apache/cassandra/distributed/upgrade/MigrateDropColumns22To311Test.java
index 0000000,0000000..b1670bb
new file mode 100644
--- /dev/null
+++ 
b/test/distributed/org/apache/cassandra/distributed/upgrade/MigrateDropColumns22To311Test.java
@@@ -1,0 -1,0 +1,11 @@@
++package org.apache.cassandra.distributed.upgrade;
++
++import org.apache.cassandra.distributed.shared.Versions;
++
++public class MigrateDropColumns22To311Test extends MigrateDropColumns
++{
++    public MigrateDropColumns22To311Test()
++    {
++        super(Versions.Major.v22, Versions.Major.v3X);
++    }
++}
diff --cc 
test/distributed/org/apache/cassandra/distributed/upgrade/MigrateDropColumns30To311Test.java
index 0000000,0000000..a0e0d1c
new file mode 100644
--- /dev/null
+++ 
b/test/distributed/org/apache/cassandra/distributed/upgrade/MigrateDropColumns30To311Test.java
@@@ -1,0 -1,0 +1,11 @@@
++package org.apache.cassandra.distributed.upgrade;
++
++import org.apache.cassandra.distributed.shared.Versions;
++
++public class MigrateDropColumns30To311Test extends MigrateDropColumns
++{
++    public MigrateDropColumns30To311Test()
++    {
++        super(Versions.Major.v30, Versions.Major.v3X);
++    }
++}


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to