[ 
https://issues.apache.org/jira/browse/CASSANDRA-9269?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Moloud Shahbazi updated CASSANDRA-9269:
---------------------------------------
    Description: 
I wrote at once a lot of data in several column families of keyspaceName on a 
single node cluster. I only read from this keyspace afterward. My issue is that 
size of commitlog is huge and does not reduce:
$ du -sh data/*
7.8G    data/commitlog
7.0G    data/data
36M     data/saved_caches

When I try to flush using nodetool, if i run "./bin/nodetool flush" no error 
occurs and no change happens in size of commit log.  even when I specify 
keyspaceName no change happens.

Also Fot further information, the following is description of my keyspace 
schema, 

cqlsh> describe keyspace keyspaceName;

CREATE KEYSPACE keyspaceName WITH replication = {'class': 
'NetworkTopologyStrategy', 'datacenter1': '1'}  AND durable_writes = true;

CREATE TABLE keyspaceName.CF0 (
    concept_id text,
    chunk_id int,
    json_data text,
    PRIMARY KEY (concept_id, chunk_id)
) WITH CLUSTERING ORDER BY (chunk_id ASC)
    AND bloom_filter_fp_chance = 0.01
    AND caching = '{"keys":"ALL", "rows_per_partition":"NONE"}'
    AND comment = ''
    AND compaction = {'min_threshold': '4', 'class': 
'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 
'max_threshold': '32'}
    AND compression = {'sstable_compression': 
'org.apache.cassandra.io.compress.LZ4Compressor'}
    AND dclocal_read_repair_chance = 0.0
    AND default_time_to_live = 0
    AND gc_grace_seconds = 864000
    AND max_index_interval = 2048
    AND memtable_flush_period_in_ms = 0
    AND min_index_interval = 128
    AND read_repair_chance = 0.0
    AND speculative_retry = '99.0PERCENTILE';

CREATE TABLE keyspaceName.CF1 (
    item_id text,
    chunk_id int,
    reviews text,
    PRIMARY KEY (item_id, chunk_id)
) WITH CLUSTERING ORDER BY (chunk_id ASC)
    AND bloom_filter_fp_chance = 0.01
    AND caching = '{"keys":"ALL", "rows_per_partition":"NONE"}'
    AND comment = ''
    AND compaction = {'min_threshold': '4', 'class': 
'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 
'max_threshold': '32'}
    AND compression = {'sstable_compression': 
'org.apache.cassandra.io.compress.LZ4Compressor'}
    AND dclocal_read_repair_chance = 0.0
    AND default_time_to_live = 0
    AND gc_grace_seconds = 864000
    AND max_index_interval = 2048
    AND memtable_flush_period_in_ms = 0
    AND min_index_interval = 128
    AND read_repair_chance = 0.0
    AND speculative_retry = '99.0PERCENTILE';

CREATE TABLE keyspaceName.CF2 (
    review_id text PRIMARY KEY,
    conceptids text,
    score int
) WITH bloom_filter_fp_chance = 0.01
    AND caching = '{"keys":"ALL", "rows_per_partition":"NONE"}'
    AND comment = ''
    AND compaction = {'min_threshold': '4', 'class': 
'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 
'max_threshold': '32'}
    AND compression = {'sstable_compression': 
'org.apache.cassandra.io.compress.LZ4Compressor'}
    AND dclocal_read_repair_chance = 0.0
    AND default_time_to_live = 0
    AND gc_grace_seconds = 864000
    AND max_index_interval = 2048
    AND memtable_flush_period_in_ms = 0
    AND min_index_interval = 128
    AND read_repair_chance = 0.0
    AND speculative_retry = '99.0PERCENTILE';

  was:
I wrote at once a lot of data in several column families of keyspaceName on a 
single node cluster. I only read from this keyspace afterward. My issue is that 
size of commitlog is huge and does not reduce:
$ du -sh data/*
7.8G    data/commitlog
7.0G    data/data
36M     data/saved_caches

When I try to flush using nodetool, if i run "./bin/nodetool flush" no error 
occurs and no change happens in size of commit log.  even when I specify 
keyspaceName no change happens.

Also Fot further information, the following is description of my keyspace 
schema, 

cqlsh> describe keyspace keyspaceName;

CREATE KEYSPACE keyspaceName WITH replication = {'class': 
'NetworkTopologyStrategy', 'datacenter1': '1'}  AND durable_writes = true;

CREATE TABLE keyspaceName.nconcepts (
    concept_id text,
    chunk_id int,
    json_data text,
    PRIMARY KEY (concept_id, chunk_id)
) WITH CLUSTERING ORDER BY (chunk_id ASC)
    AND bloom_filter_fp_chance = 0.01
    AND caching = '{"keys":"ALL", "rows_per_partition":"NONE"}'
    AND comment = ''
    AND compaction = {'min_threshold': '4', 'class': 
'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 
'max_threshold': '32'}
    AND compression = {'sstable_compression': 
'org.apache.cassandra.io.compress.LZ4Compressor'}
    AND dclocal_read_repair_chance = 0.0
    AND default_time_to_live = 0
    AND gc_grace_seconds = 864000
    AND max_index_interval = 2048
    AND memtable_flush_period_in_ms = 0
    AND min_index_interval = 128
    AND read_repair_chance = 0.0
    AND speculative_retry = '99.0PERCENTILE';

CREATE TABLE keyspaceName.items (
    item_id text,
    chunk_id int,
    reviews text,
    PRIMARY KEY (item_id, chunk_id)
) WITH CLUSTERING ORDER BY (chunk_id ASC)
    AND bloom_filter_fp_chance = 0.01
    AND caching = '{"keys":"ALL", "rows_per_partition":"NONE"}'
    AND comment = ''
    AND compaction = {'min_threshold': '4', 'class': 
'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 
'max_threshold': '32'}
    AND compression = {'sstable_compression': 
'org.apache.cassandra.io.compress.LZ4Compressor'}
    AND dclocal_read_repair_chance = 0.0
    AND default_time_to_live = 0
    AND gc_grace_seconds = 864000
    AND max_index_interval = 2048
    AND memtable_flush_period_in_ms = 0
    AND min_index_interval = 128
    AND read_repair_chance = 0.0
    AND speculative_retry = '99.0PERCENTILE';

CREATE TABLE keyspaceName.reviews (
    review_id text PRIMARY KEY,
    conceptids text,
    score int
) WITH bloom_filter_fp_chance = 0.01
    AND caching = '{"keys":"ALL", "rows_per_partition":"NONE"}'
    AND comment = ''
    AND compaction = {'min_threshold': '4', 'class': 
'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 
'max_threshold': '32'}
    AND compression = {'sstable_compression': 
'org.apache.cassandra.io.compress.LZ4Compressor'}
    AND dclocal_read_repair_chance = 0.0
    AND default_time_to_live = 0
    AND gc_grace_seconds = 864000
    AND max_index_interval = 2048
    AND memtable_flush_period_in_ms = 0
    AND min_index_interval = 128
    AND read_repair_chance = 0.0
    AND speculative_retry = '99.0PERCENTILE';

CREATE TABLE keyspaceName.rconcepts (
    concept_id text,
    chunk_id int,
    json_data text,
    PRIMARY KEY (concept_id, chunk_id)
) WITH CLUSTERING ORDER BY (chunk_id ASC)
    AND bloom_filter_fp_chance = 0.01
    AND caching = '{"keys":"ALL", "rows_per_partition":"NONE"}'
    AND comment = ''
    AND compaction = {'min_threshold': '4', 'class': 
'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 
'max_threshold': '32'}
    AND compression = {'sstable_compression': 
'org.apache.cassandra.io.compress.LZ4Compressor'}
    AND dclocal_read_repair_chance = 0.0
    AND default_time_to_live = 0
    AND gc_grace_seconds = 864000
    AND max_index_interval = 2048
    AND memtable_flush_period_in_ms = 0
    AND min_index_interval = 128
    AND read_repair_chance = 0.0
    AND speculative_retry = '99.0PERCENTILE';

CREATE TABLE keyspaceName.pconcepts (
    concept_id text,
    chunk_id int,
    json_data text,
    PRIMARY KEY (concept_id, chunk_id)
) WITH CLUSTERING ORDER BY (chunk_id ASC)
    AND bloom_filter_fp_chance = 0.01
    AND caching = '{"keys":"ALL", "rows_per_partition":"NONE"}'
    AND comment = ''
    AND compaction = {'min_threshold': '4', 'class': 
'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 
'max_threshold': '32'}
    AND compression = {'sstable_compression': 
'org.apache.cassandra.io.compress.LZ4Compressor'}
    AND dclocal_read_repair_chance = 0.0
    AND default_time_to_live = 0
    AND gc_grace_seconds = 864000
    AND max_index_interval = 2048
    AND memtable_flush_period_in_ms = 0
    AND min_index_interval = 128
    AND read_repair_chance = 0.0
    AND speculative_retry = '99.0PERCENTILE';


> Huge commitlog not flushed.
> ---------------------------
>
>                 Key: CASSANDRA-9269
>                 URL: https://issues.apache.org/jira/browse/CASSANDRA-9269
>             Project: Cassandra
>          Issue Type: Bug
>         Environment: ubuntu
> cassandra-2.1.3
>            Reporter: Moloud Shahbazi
>             Fix For: 2.1.x
>
>
> I wrote at once a lot of data in several column families of keyspaceName on a 
> single node cluster. I only read from this keyspace afterward. My issue is 
> that size of commitlog is huge and does not reduce:
> $ du -sh data/*
> 7.8G    data/commitlog
> 7.0G    data/data
> 36M     data/saved_caches
> When I try to flush using nodetool, if i run "./bin/nodetool flush" no error 
> occurs and no change happens in size of commit log.  even when I specify 
> keyspaceName no change happens.
> Also Fot further information, the following is description of my keyspace 
> schema, 
> cqlsh> describe keyspace keyspaceName;
> CREATE KEYSPACE keyspaceName WITH replication = {'class': 
> 'NetworkTopologyStrategy', 'datacenter1': '1'}  AND durable_writes = true;
> CREATE TABLE keyspaceName.CF0 (
>     concept_id text,
>     chunk_id int,
>     json_data text,
>     PRIMARY KEY (concept_id, chunk_id)
> ) WITH CLUSTERING ORDER BY (chunk_id ASC)
>     AND bloom_filter_fp_chance = 0.01
>     AND caching = '{"keys":"ALL", "rows_per_partition":"NONE"}'
>     AND comment = ''
>     AND compaction = {'min_threshold': '4', 'class': 
> 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 
> 'max_threshold': '32'}
>     AND compression = {'sstable_compression': 
> 'org.apache.cassandra.io.compress.LZ4Compressor'}
>     AND dclocal_read_repair_chance = 0.0
>     AND default_time_to_live = 0
>     AND gc_grace_seconds = 864000
>     AND max_index_interval = 2048
>     AND memtable_flush_period_in_ms = 0
>     AND min_index_interval = 128
>     AND read_repair_chance = 0.0
>     AND speculative_retry = '99.0PERCENTILE';
> CREATE TABLE keyspaceName.CF1 (
>     item_id text,
>     chunk_id int,
>     reviews text,
>     PRIMARY KEY (item_id, chunk_id)
> ) WITH CLUSTERING ORDER BY (chunk_id ASC)
>     AND bloom_filter_fp_chance = 0.01
>     AND caching = '{"keys":"ALL", "rows_per_partition":"NONE"}'
>     AND comment = ''
>     AND compaction = {'min_threshold': '4', 'class': 
> 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 
> 'max_threshold': '32'}
>     AND compression = {'sstable_compression': 
> 'org.apache.cassandra.io.compress.LZ4Compressor'}
>     AND dclocal_read_repair_chance = 0.0
>     AND default_time_to_live = 0
>     AND gc_grace_seconds = 864000
>     AND max_index_interval = 2048
>     AND memtable_flush_period_in_ms = 0
>     AND min_index_interval = 128
>     AND read_repair_chance = 0.0
>     AND speculative_retry = '99.0PERCENTILE';
> CREATE TABLE keyspaceName.CF2 (
>     review_id text PRIMARY KEY,
>     conceptids text,
>     score int
> ) WITH bloom_filter_fp_chance = 0.01
>     AND caching = '{"keys":"ALL", "rows_per_partition":"NONE"}'
>     AND comment = ''
>     AND compaction = {'min_threshold': '4', 'class': 
> 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 
> 'max_threshold': '32'}
>     AND compression = {'sstable_compression': 
> 'org.apache.cassandra.io.compress.LZ4Compressor'}
>     AND dclocal_read_repair_chance = 0.0
>     AND default_time_to_live = 0
>     AND gc_grace_seconds = 864000
>     AND max_index_interval = 2048
>     AND memtable_flush_period_in_ms = 0
>     AND min_index_interval = 128
>     AND read_repair_chance = 0.0
>     AND speculative_retry = '99.0PERCENTILE';



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)

Reply via email to