David Capwell created CASSANDRA-20243:
-----------------------------------------

             Summary: ALLOW FILTERING returned data that doesn't match the 
single column condition.
                 Key: CASSANDRA-20243
                 URL: https://issues.apache.org/jira/browse/CASSANDRA-20243
             Project: Apache Cassandra
          Issue Type: Bug
          Components: Consistency/Coordination
            Reporter: David Capwell


This was found by CASSANDRA-20156

The following history returns extra data that doesn’t match the condition

{code}
        History:
                19: INSERT INTO ks1.tbl (pk0, ck0, ck1, v0, v2) VALUES 
('滲魶潁ꧏ', '㳩륓彫澳炬ᔕ?', 0x00000000000014008f00000000000000, 
'a559:fe1:d87b:25:c95e:9029:44e5:6ad2', 1.85573747E16 + 1.3498884E31) -- on 
node2
                21: INSERT INTO ks1.tbl (pk0, ck0, ck1, s0, s1, v0, v1, v2) 
VALUES ('滲魶潁ꧏ', '᱓흩窡' + '㯪?竲翐尞绸봧헑ꑖ', 0x0000000000004800a900000000000000, 
00000000-0000-4900-b600-000000000000, 00000000-0000-1900-ac00-000000000000, 
'170.247.202.199', 7538904746043055610 + 7400846939126123242, -5.0338443E-27) 
-- on node3
                22: SELECT * FROM ks1.tbl WHERE ck1 = 
0x0000000000004800a900000000000000 ALLOW FILTERING -- ck1 
'org.apache.cassandra.db.marshal.LexicalUUIDType' (reversed), on node1, fetch 
size 100
{code}

This fails with

{code}
Caused by: java.lang.AssertionError: Unexpected rows found:
pk0     | ck0          | ck1                                | s0                
                   | s1                                   | v0                  
                   | v1   | v2          
'滲魶潁ꧏ' | '㳩륓彫澳炬ᔕ?' | 0x00000000000014008f00000000000000 | 
00000000-0000-4900-b600-000000000000 | 00000000-0000-1900-ac00-000000000000 | 
'a559:fe1:d87b:25:c95e:9029:44e5:6ad2' | null | 1.3498884E31

Expected:
pk0     | ck0             | ck1                                | s0             
                      | s1                                   | v0               
 | v1                   | v2            
'滲魶潁ꧏ' | '᱓흩窡㯪?竲翐尞绸봧헑ꑖ' | 0x0000000000004800a900000000000000 | 
00000000-0000-4900-b600-000000000000 | 00000000-0000-1900-ac00-000000000000 | 
'170.247.202.199' | -3506992388540372764 | -5.0338443E-27
{code}

Took this and created a non-fuzz test to repo

{code}
public class RepoTest extends TestBaseImpl
{
    private static final Logger logger = 
LoggerFactory.getLogger(RepoTest.class);

    @Test
    public void test() throws IOException
    {
        boolean singleNode = false;
        try (Cluster cluster = Cluster.build(singleNode ? 1 : 3).start())
        {
            schemaChanges(cluster, "\t\tCREATE KEYSPACE IF NOT EXISTS ks1 WITH 
replication = {'class': 'SimpleStrategy', 'replication_factor': "+(singleNode ? 
1 : 3)+"};\n" +
                                   "\t\tCREATE TABLE ks1.tbl (\n" +
                                   "\t\t    pk0 text,\n" +
                                   "\t\t    ck0 text,\n" +
                                   "\t\t    ck1 
'org.apache.cassandra.db.marshal.LexicalUUIDType',\n" +
                                   "\t\t    s0 uuid static,\n" +
                                   "\t\t    s1 timeuuid static,\n" +
                                   "\t\t    v0 inet,\n" +
                                   "\t\t    v1 bigint,\n" +
                                   "\t\t    v2 float,\n" +
                                   "\t\t    PRIMARY KEY (pk0, ck0, ck1)\n" +
                                   "\t\t) WITH CLUSTERING ORDER BY (ck0 DESC, 
ck1 DESC)\n" +
                                   "\t\t    AND additional_write_policy = 
'99p'\n" +
                                   "\t\t    AND allow_auto_snapshot = true\n" +
                                   "\t\t    AND bloom_filter_fp_chance = 
0.01\n" +
                                   "\t\t    AND caching = {'keys': 'ALL', 
'rows_per_partition': 'NONE'}\n" +
                                   "\t\t    AND cdc = false\n" +
                                   "\t\t    AND comment = ''\n" +
                                   "\t\t    AND compaction = {'class': 
'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 
'max_threshold': '32', 'min_threshold': '4'}\n" +
                                   "\t\t    AND compression = 
{'chunk_length_in_kb': '16', 'class': 
'org.apache.cassandra.io.compress.LZ4Compressor'}\n" +
                                   "\t\t    AND memtable = 'default'\n" +
                                   "\t\t    AND crc_check_chance = 1.0\n" +
                                   "\t\t    AND fast_path = 'keyspace'\n" +
                                   "\t\t    AND default_time_to_live = 0\n" +
                                   "\t\t    AND extensions = {}\n" +
                                   "\t\t    AND gc_grace_seconds = 864000\n" +
                                   "\t\t    AND incremental_backups = true\n" +
                                   "\t\t    AND max_index_interval = 2048\n" +
                                   "\t\t    AND memtable_flush_period_in_ms = 
0\n" +
                                   "\t\t    AND min_index_interval = 128\n" +
                                   "\t\t    AND read_repair = 'BLOCKING'\n" +
                                   "\t\t    AND transactional_mode = 'off'\n" +
                                   "\t\t    AND transactional_migration_from = 
'none'\n" +
                                   "\t\t    AND speculative_retry = '99p';\n"
            );

            cluster.get(singleNode ? 1 : 2).executeInternal("INSERT INTO 
ks1.tbl (pk0, ck0, ck1, v0, v2) VALUES ('滲魶\uE2F7潁ꧏ', 
'㳩륓\uE799彫澳\uF38E炬\uEC9Dᔕ?', 0x00000000000014008f00000000000000, 
'a559:fe1:d87b:25:c95e:9029:44e5:6ad2', 1.85573747E16 + 1.3498884E31)");
            cluster.get(singleNode ? 1 : 3).executeInternal("INSERT INTO 
ks1.tbl (pk0, ck0, ck1, s0, s1, v0, v1, v2) VALUES ('滲魶\uE2F7潁ꧏ', '᱓흩窡' + 
'㯪?竲\uEE38翐尞绸봧헑ꑖ', 0x0000000000004800a900000000000000, 
00000000-0000-4900-b600-000000000000, 00000000-0000-1900-ac00-000000000000, 
'170.247.202.199', 7538904746043055610 + 7400846939126123242, -5.0338443E-27)");
            var qr = cluster.get(1).coordinator().executeWithResult("SELECT * 
FROM ks1.tbl WHERE ck1 = 0x0000000000004800a900000000000000 ALLOW FILTERING", 
ConsistencyLevel.ALL);
            AssertUtils.assertRows(qr, QueryResults.builder()
                                                   .row("滲魶\uE2F7潁ꧏ", // pk0
                                                        "᱓흩窡㯪?竲\uEE38翐尞绸봧헑ꑖ", 
// ck0
                                                        
UUID.fromString("00000000-0000-4800-a900-000000000000"), // ck1.  The CQL 
literal is "0x0000000000004800a900000000000000" as this is a LexicalUUIDType, 
but jvm-dtest converts this to a UUID in the resultset
                                                        
UUID.fromString("00000000-0000-4900-b600-000000000000"), // s0
                                                        
UUID.fromString("00000000-0000-1900-ac00-000000000000"), // s1
                                                        
InetAddress.getByName("170.247.202.199"), // v0
                                                        -3506992388540372764L, 
// v1
                                                        -5.0338443E-27F // v2
                                                   )
                                                   .build());
        }
    }

    private static void schemaChanges(Cluster cluster, String cql)
    {
        for (String change : 
Splitter.on(';').omitEmptyStrings().trimResults().splitToList(cql))
        {
            logger.warn("Schema change: {}", change);
            cluster.schemaChange(change);
        }
    }
}
{code}

This fails because the first write (which doesn’t match the condition) got 
returned… only the second write is expected.

I ran the test with single node and multi node, and this looks to only fail 
when on multi node



--
This message was sent by Atlassian Jira
(v8.20.10#820010)

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to