Merge branch 'cassandra-3.0' into cassandra-3.11

Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/adc32ac8
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/adc32ac8
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/adc32ac8

Branch: refs/heads/cassandra-3.11
Commit: adc32ac836e90b8c4503030feb76ce031998ad80
Parents: 95c8aef 0521f8d
Author: Alex Petrov <oleksandr.pet...@gmail.com>
Authored: Tue Dec 19 10:55:50 2017 +0100
Committer: Alex Petrov <oleksandr.pet...@gmail.com>
Committed: Tue Dec 19 10:55:50 2017 +0100

----------------------------------------------------------------------
 CHANGES.txt                                     |   1 +
 .../apache/cassandra/index/TargetParser.java    |  17 +--
 .../DropCompactStorageThriftTest.java           | 116 +++++++++++++++++++
 3 files changed, 127 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cassandra/blob/adc32ac8/CHANGES.txt
----------------------------------------------------------------------
diff --cc CHANGES.txt
index 3f15c5d,7746c73..5a1b891
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@@ -1,15 -1,5 +1,16 @@@
 -3.0.16
 +3.11.2
 + * Prevent continuous schema exchange between 3.0 and 3.11 nodes 
(CASSANDRA-14109)
 + * Fix imbalanced disks when replacing node with same address with JBOD 
(CASSANDRA-14084)
 + * Reload compaction strategies when disk boundaries are invalidated 
(CASSANDRA-13948)
 + * Remove OpenJDK log warning (CASSANDRA-13916)
 + * Prevent compaction strategies from looping indefinitely (CASSANDRA-14079)
 + * Cache disk boundaries (CASSANDRA-13215)
 + * Add asm jar to build.xml for maven builds (CASSANDRA-11193)
 + * Round buffer size to powers of 2 for the chunk cache (CASSANDRA-13897)
 + * Update jackson JSON jars (CASSANDRA-13949)
 + * Avoid locks when checking LCS fanout and if we should defrag 
(CASSANDRA-13930)
 +Merged from 3.0:
+  * Fix index target computation for dense composite tables with dropped 
compact storage (CASSANDRA-14104)
   * Improve commit log chain marker updating (CASSANDRA-14108)
   * Extra range tombstone bound creates double rows (CASSANDRA-14008)
   * Fix SStable ordering by max timestamp in SinglePartitionReadCommand 
(CASSANDRA-14010)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/adc32ac8/src/java/org/apache/cassandra/index/TargetParser.java
----------------------------------------------------------------------
diff --cc src/java/org/apache/cassandra/index/TargetParser.java
index 849ad16,0000000..96d03af
mode 100644,000000..100644
--- a/src/java/org/apache/cassandra/index/TargetParser.java
+++ b/src/java/org/apache/cassandra/index/TargetParser.java
@@@ -1,90 -1,0 +1,93 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.cassandra.index;
 +
 +import java.util.regex.Matcher;
 +import java.util.regex.Pattern;
 +
 +import org.apache.commons.lang3.StringUtils;
 +
 +import org.apache.cassandra.config.CFMetaData;
 +import org.apache.cassandra.config.ColumnDefinition;
 +import org.apache.cassandra.cql3.ColumnIdentifier;
 +import org.apache.cassandra.cql3.statements.IndexTarget;
 +import org.apache.cassandra.exceptions.ConfigurationException;
 +import org.apache.cassandra.schema.IndexMetadata;
 +import org.apache.cassandra.utils.Pair;
 +
 +public class TargetParser
 +{
 +    private static final Pattern TARGET_REGEX = 
Pattern.compile("^(keys|entries|values|full)\\((.+)\\)$");
 +    private static final Pattern TWO_QUOTES = Pattern.compile("\"\"");
 +    private static final String QUOTE = "\"";
 +
 +    public static Pair<ColumnDefinition, IndexTarget.Type> parse(CFMetaData 
cfm, IndexMetadata indexDef)
 +    {
 +        String target = indexDef.options.get("target");
 +        assert target != null : String.format("No target definition found for 
index %s", indexDef.name);
 +        Pair<ColumnDefinition, IndexTarget.Type> result = parse(cfm, target);
 +        if (result == null)
 +            throw new ConfigurationException(String.format("Unable to parse 
targets for index %s (%s)", indexDef.name, target));
 +        return result;
 +    }
 +
 +    public static Pair<ColumnDefinition, IndexTarget.Type> parse(CFMetaData 
cfm, String target)
 +    {
 +        // if the regex matches then the target is in the form "keys(foo)", 
"entries(bar)" etc
 +        // if not, then it must be a simple column name and implictly its 
type is VALUES
 +        Matcher matcher = TARGET_REGEX.matcher(target);
 +        String columnName;
 +        IndexTarget.Type targetType;
 +        if (matcher.matches())
 +        {
 +            targetType = IndexTarget.Type.fromString(matcher.group(1));
 +            columnName = matcher.group(2);
 +        }
 +        else
 +        {
 +            columnName = target;
 +            targetType = IndexTarget.Type.VALUES;
 +        }
 +
 +        // in the case of a quoted column name the name in the target string
 +        // will be enclosed in quotes, which we need to unwrap. It may also
 +        // include quote characters internally, escaped like so:
 +        //      abc"def -> abc""def.
 +        // Because the target string is stored in a CQL compatible form, we
 +        // need to un-escape any such quotes to get the actual column name
 +        if (columnName.startsWith(QUOTE))
 +        {
 +            columnName = 
StringUtils.substring(StringUtils.substring(columnName, 1), 0, -1);
 +            columnName = TWO_QUOTES.matcher(columnName).replaceAll(QUOTE);
 +        }
 +
 +        // if it's not a CQL table, we can't assume that the column name is 
utf8, so
-         // in that case we have to do a linear scan of the cfm's columns to 
get the matching one
-         if (cfm.isCQLTable())
-             return Pair.create(cfm.getColumnDefinition(new 
ColumnIdentifier(columnName, true)), targetType);
-         else
-             for (ColumnDefinition column : cfm.allColumns())
-                 if (column.name.toString().equals(columnName))
-                     return Pair.create(column, targetType);
++        // in that case we have to do a linear scan of the cfm's columns to 
get the matching one.
++        // After dropping compact storage (see CASSANDRA-10857), we can't 
distinguish between the
++        // former compact/thrift table, so we have to fall back to linear 
scan in both cases.
++        ColumnDefinition cd = cfm.getColumnDefinition(new 
ColumnIdentifier(columnName, true));
++        if (cd != null)
++            return Pair.create(cd, targetType);
++
++        for (ColumnDefinition column : cfm.allColumns())
++            if (column.name.toString().equals(columnName))
++                return Pair.create(column, targetType);
 +
 +        return null;
 +    }
 +}

http://git-wip-us.apache.org/repos/asf/cassandra/blob/adc32ac8/test/unit/org/apache/cassandra/cql3/validation/operations/DropCompactStorageThriftTest.java
----------------------------------------------------------------------
diff --cc 
test/unit/org/apache/cassandra/cql3/validation/operations/DropCompactStorageThriftTest.java
index dde3e7b,7d81018..973412a
--- 
a/test/unit/org/apache/cassandra/cql3/validation/operations/DropCompactStorageThriftTest.java
+++ 
b/test/unit/org/apache/cassandra/cql3/validation/operations/DropCompactStorageThriftTest.java
@@@ -37,7 -41,9 +41,9 @@@ import org.apache.cassandra.db.marshal.
  import org.apache.cassandra.db.marshal.Int32Type;
  import org.apache.cassandra.db.marshal.MapType;
  import org.apache.cassandra.db.marshal.UTF8Type;
 -import org.apache.cassandra.index.internal.CassandraIndex;
++import org.apache.cassandra.index.TargetParser;
  import org.apache.cassandra.locator.SimpleStrategy;
+ import org.apache.cassandra.serializers.MarshalException;
  import org.apache.cassandra.thrift.Cassandra;
  import org.apache.cassandra.thrift.CfDef;
  import org.apache.cassandra.thrift.Column;
@@@ -491,6 -501,112 +501,112 @@@ public class DropCompactStorageThriftTe
                     row("key1", "ckey2", "sval2", 
ByteBufferUtil.bytes("val2")));
      }
  
+     @Test
+     public void denseCompositeWithIndexesTest() throws Throwable
+     {
+         final String KEYSPACE = "thrift_dense_composite_table_test_ks";
+         final String TABLE = "dense_composite_table";
+ 
+         ByteBuffer aCol = 
createDynamicCompositeKey(ByteBufferUtil.bytes("a"));
+         ByteBuffer bCol = 
createDynamicCompositeKey(ByteBufferUtil.bytes("b"));
+         ByteBuffer cCol = 
createDynamicCompositeKey(ByteBufferUtil.bytes("c"));
+ 
+         String compositeType = "DynamicCompositeType(a => BytesType, b => 
TimeUUIDType, c => UTF8Type)";
+ 
+         CfDef cfDef = new CfDef();
+         cfDef.setName(TABLE);
+         cfDef.setComparator_type(compositeType);
+         cfDef.setKeyspace(KEYSPACE);
+ 
+         cfDef.setColumn_metadata(
+         Arrays.asList(new ColumnDef(aCol, 
"BytesType").setIndex_type(IndexType.KEYS).setIndex_name(KEYSPACE + "_a"),
+                       new ColumnDef(bCol, 
"BytesType").setIndex_type(IndexType.KEYS).setIndex_name(KEYSPACE + "_b"),
+                       new ColumnDef(cCol, 
"BytesType").setIndex_type(IndexType.KEYS).setIndex_name(KEYSPACE + "_c")));
+ 
+ 
+         KsDef ksDef = new KsDef(KEYSPACE,
+                                 SimpleStrategy.class.getName(),
+                                 Collections.singletonList(cfDef));
+         
ksDef.setStrategy_options(Collections.singletonMap("replication_factor", "1"));
+ 
+         Cassandra.Client client = getClient();
+         client.system_add_keyspace(ksDef);
+         client.set_keyspace(KEYSPACE);
+ 
+         CFMetaData cfm = 
Keyspace.open(KEYSPACE).getColumnFamilyStore(TABLE).metadata;
+         assertFalse(cfm.isCQLTable());
+ 
+         List<Pair<ColumnDefinition, IndexTarget.Type>> compactTableTargets = 
new ArrayList<>();
 -        compactTableTargets.add(CassandraIndex.parseTarget(cfm, "a"));
 -        compactTableTargets.add(CassandraIndex.parseTarget(cfm, "b"));
 -        compactTableTargets.add(CassandraIndex.parseTarget(cfm, "c"));
++        compactTableTargets.add(TargetParser.parse(cfm, "a"));
++        compactTableTargets.add(TargetParser.parse(cfm, "b"));
++        compactTableTargets.add(TargetParser.parse(cfm, "c"));
+ 
+         execute(String.format("ALTER TABLE %s.%s DROP COMPACT STORAGE", 
KEYSPACE, TABLE));
+         cfm = Keyspace.open(KEYSPACE).getColumnFamilyStore(TABLE).metadata;
+         assertTrue(cfm.isCQLTable());
+ 
+         List<Pair<ColumnDefinition, IndexTarget.Type>> cqlTableTargets = new 
ArrayList<>();
 -        cqlTableTargets.add(CassandraIndex.parseTarget(cfm, "a"));
 -        cqlTableTargets.add(CassandraIndex.parseTarget(cfm, "b"));
 -        cqlTableTargets.add(CassandraIndex.parseTarget(cfm, "c"));
++        cqlTableTargets.add(TargetParser.parse(cfm, "a"));
++        cqlTableTargets.add(TargetParser.parse(cfm, "b"));
++        cqlTableTargets.add(TargetParser.parse(cfm, "c"));
+ 
+         assertEquals(compactTableTargets, cqlTableTargets);
+     }
+ 
+     private static ByteBuffer createDynamicCompositeKey(Object... objects)
+     {
+         int length = 0;
+ 
+         for (Object object : objects)
+         {
+             length += 2 * Short.BYTES +  Byte.BYTES;
+             if (object instanceof String)
+                 length += ((String) object).length();
+             else if (object instanceof UUID)
+                 length += 2 * Long.BYTES;
+             else if (object instanceof ByteBuffer)
+                 length += ((ByteBuffer) object).remaining();
+             else
+                 throw new MarshalException(object.getClass().getName() + " is 
not recognized as a valid type for this composite");
+         }
+ 
+         ByteBuffer out = ByteBuffer.allocate(length);
+ 
+         for (Object object : objects)
+         {
+             if (object instanceof String)
+             {
+                 String cast = (String) object;
+ 
+                 out.putShort((short) (0x8000 | 's'));
+                 out.putShort((short) cast.length());
+                 out.put(cast.getBytes());
+                 out.put((byte) 0);
+             }
+             else if (object instanceof UUID)
+             {
+                 out.putShort((short) (0x8000 | 't'));
+                 out.putShort((short) 16);
+                 out.put(UUIDGen.decompose((UUID) object));
+                 out.put((byte) 0);
+             }
+             else if (object instanceof ByteBuffer)
+             {
+                 ByteBuffer bytes = ((ByteBuffer) object).duplicate();
+                 out.putShort((short) (0x8000 | 'b'));
+                 out.putShort((short) bytes.remaining());
+                 out.put(bytes);
+                 out.put((byte) 0);
+             }
+             else
+             {
+                 throw new MarshalException(object.getClass().getName() + " is 
not recognized as a valid type for this composite");
+             }
+         }
+ 
+         return out;
+     }
+ 
      private Column getColumnForInsert(ByteBuffer columnName, ByteBuffer value)
      {
          Column column = new Column();


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@cassandra.apache.org
For additional commands, e-mail: commits-h...@cassandra.apache.org

Reply via email to