Merge branch 'cassandra-2.1' into trunk

Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/afb52aa9
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/afb52aa9
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/afb52aa9

Branch: refs/heads/trunk
Commit: afb52aa9a684dd9cfa86d131e86c83013833deee
Parents: 95638b6 4c7c5be
Author: Aleksey Yeschenko <alek...@apache.org>
Authored: Tue May 5 20:51:33 2015 +0300
Committer: Aleksey Yeschenko <alek...@apache.org>
Committed: Tue May 5 20:51:33 2015 +0300

----------------------------------------------------------------------
 CHANGES.txt                                     |  1 +
 .../cassandra/db/AbstractRangeCommand.java      | 23 ++++++++
 .../db/index/SecondaryIndexManager.java         | 48 +++++++++------
 .../db/index/SecondaryIndexSearcher.java        | 44 +++++++++++---
 .../db/index/composites/CompositesSearcher.java |  2 +-
 .../cassandra/db/index/keys/KeysSearcher.java   |  2 +-
 .../apache/cassandra/service/StorageProxy.java  | 62 ++++++++++++--------
 7 files changed, 130 insertions(+), 52 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cassandra/blob/afb52aa9/CHANGES.txt
----------------------------------------------------------------------
diff --cc CHANGES.txt
index c0c209d,da14ca3..ea03ba0
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@@ -1,103 -1,5 +1,104 @@@
 +3.0
 + * Disable memory mapping of hsperfdata file for JVM statistics 
(CASSANDRA-9242)
 + * Add pre-startup checks to detect potential incompatibilities 
(CASSANDRA-8049)
 + * Distinguish between null and unset in protocol v4 (CASSANDRA-7304)
 + * Add user/role permissions for user-defined functions (CASSANDRA-7557)
 + * Allow cassandra config to be updated to restart daemon without unloading 
classes (CASSANDRA-9046)
 + * Don't initialize compaction writer before checking if iter is empty 
(CASSANDRA-9117)
 + * Don't execute any functions at prepare-time (CASSANDRA-9037)
 + * Share file handles between all instances of a SegmentedFile 
(CASSANDRA-8893)
 + * Make it possible to major compact LCS (CASSANDRA-7272)
 + * Make FunctionExecutionException extend RequestExecutionException
 +   (CASSANDRA-9055)
 + * Add support for SELECT JSON, INSERT JSON syntax and new toJson(), 
fromJson()
 +   functions (CASSANDRA-7970)
 + * Optimise max purgeable timestamp calculation in compaction (CASSANDRA-8920)
 + * Constrain internode message buffer sizes, and improve IO class hierarchy 
(CASSANDRA-8670) 
 + * New tool added to validate all sstables in a node (CASSANDRA-5791)
 + * Push notification when tracing completes for an operation (CASSANDRA-7807)
 + * Delay "node up" and "node added" notifications until native protocol 
server is started (CASSANDRA-8236)
 + * Compressed Commit Log (CASSANDRA-6809)
 + * Optimise IntervalTree (CASSANDRA-8988)
 + * Add a key-value payload for third party usage (CASSANDRA-8553, 9212)
 + * Bump metrics-reporter-config dependency for metrics 3.0 (CASSANDRA-8149)
 + * Partition intra-cluster message streams by size, not type (CASSANDRA-8789)
 + * Add WriteFailureException to native protocol, notify coordinator of
 +   write failures (CASSANDRA-8592)
 + * Convert SequentialWriter to nio (CASSANDRA-8709)
 + * Add role based access control (CASSANDRA-7653, 8650, 7216, 8760, 8849, 
8761, 8850)
 + * Record client ip address in tracing sessions (CASSANDRA-8162)
 + * Indicate partition key columns in response metadata for prepared
 +   statements (CASSANDRA-7660)
 + * Merge UUIDType and TimeUUIDType parse logic (CASSANDRA-8759)
 + * Avoid memory allocation when searching index summary (CASSANDRA-8793)
 + * Optimise (Time)?UUIDType Comparisons (CASSANDRA-8730)
 + * Make CRC32Ex into a separate maven dependency (CASSANDRA-8836)
 + * Use preloaded jemalloc w/ Unsafe (CASSANDRA-8714)
 + * Avoid accessing partitioner through StorageProxy (CASSANDRA-8244, 8268)
 + * Upgrade Metrics library and remove depricated metrics (CASSANDRA-5657)
 + * Serializing Row cache alternative, fully off heap (CASSANDRA-7438)
 + * Duplicate rows returned when in clause has repeated values (CASSANDRA-6707)
 + * Make CassandraException unchecked, extend RuntimeException (CASSANDRA-8560)
 + * Support direct buffer decompression for reads (CASSANDRA-8464)
 + * DirectByteBuffer compatible LZ4 methods (CASSANDRA-7039)
 + * Group sstables for anticompaction correctly (CASSANDRA-8578)
 + * Add ReadFailureException to native protocol, respond
 +   immediately when replicas encounter errors while handling
 +   a read request (CASSANDRA-7886)
 + * Switch CommitLogSegment from RandomAccessFile to nio (CASSANDRA-8308)
 + * Allow mixing token and partition key restrictions (CASSANDRA-7016)
 + * Support index key/value entries on map collections (CASSANDRA-8473)
 + * Modernize schema tables (CASSANDRA-8261)
 + * Support for user-defined aggregation functions (CASSANDRA-8053)
 + * Fix NPE in SelectStatement with empty IN values (CASSANDRA-8419)
 + * Refactor SelectStatement, return IN results in natural order instead
 +   of IN value list order and ignore duplicate values in partition key IN 
restrictions (CASSANDRA-7981)
 + * Support UDTs, tuples, and collections in user-defined
 +   functions (CASSANDRA-7563)
 + * Fix aggregate fn results on empty selection, result column name,
 +   and cqlsh parsing (CASSANDRA-8229)
 + * Mark sstables as repaired after full repair (CASSANDRA-7586)
 + * Extend Descriptor to include a format value and refactor reader/writer
 +   APIs (CASSANDRA-7443)
 + * Integrate JMH for microbenchmarks (CASSANDRA-8151)
 + * Keep sstable levels when bootstrapping (CASSANDRA-7460)
 + * Add Sigar library and perform basic OS settings check on startup 
(CASSANDRA-7838)
 + * Support for aggregation functions (CASSANDRA-4914)
 + * Remove cassandra-cli (CASSANDRA-7920)
 + * Accept dollar quoted strings in CQL (CASSANDRA-7769)
 + * Make assassinate a first class command (CASSANDRA-7935)
 + * Support IN clause on any partition key column (CASSANDRA-7855)
 + * Support IN clause on any clustering column (CASSANDRA-4762)
 + * Improve compaction logging (CASSANDRA-7818)
 + * Remove YamlFileNetworkTopologySnitch (CASSANDRA-7917)
 + * Do anticompaction in groups (CASSANDRA-6851)
 + * Support user-defined functions (CASSANDRA-7395, 7526, 7562, 7740, 7781, 
7929,
 +   7924, 7812, 8063, 7813, 7708)
 + * Permit configurable timestamps with cassandra-stress (CASSANDRA-7416)
 + * Move sstable RandomAccessReader to nio2, which allows using the
 +   FILE_SHARE_DELETE flag on Windows (CASSANDRA-4050)
 + * Remove CQL2 (CASSANDRA-5918)
 + * Add Thrift get_multi_slice call (CASSANDRA-6757)
 + * Optimize fetching multiple cells by name (CASSANDRA-6933)
 + * Allow compilation in java 8 (CASSANDRA-7028)
 + * Make incremental repair default (CASSANDRA-7250)
 + * Enable code coverage thru JaCoCo (CASSANDRA-7226)
 + * Switch external naming of 'column families' to 'tables' (CASSANDRA-4369) 
 + * Shorten SSTable path (CASSANDRA-6962)
 + * Use unsafe mutations for most unit tests (CASSANDRA-6969)
 + * Fix race condition during calculation of pending ranges (CASSANDRA-7390)
 + * Fail on very large batch sizes (CASSANDRA-8011)
 + * Improve concurrency of repair (CASSANDRA-6455, 8208)
 + * Select optimal CRC32 implementation at runtime (CASSANDRA-8614)
 + * Evaluate MurmurHash of Token once per query (CASSANDRA-7096)
 + * Generalize progress reporting (CASSANDRA-8901)
 + * Resumable bootstrap streaming (CASSANDRA-8838, CASSANDRA-8942)
 + * Allow scrub for secondary index (CASSANDRA-5174)
 + * Save repair data to system table (CASSANDRA-5839)
 + * fix nodetool names that reference column families (CASSANDRA-8872)
 +
  2.1.6
+  * Add support for top-k custom 2i queries (CASSANDRA-8717)
   * Fix error when dropping table during compaction (CASSANDRA-9251)
   * cassandra-stress supports validation operations over user profiles 
(CASSANDRA-8773)
   * Add support for rate limiting log messages (CASSANDRA-9029)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/afb52aa9/src/java/org/apache/cassandra/db/index/SecondaryIndexManager.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/cassandra/blob/afb52aa9/src/java/org/apache/cassandra/db/index/SecondaryIndexSearcher.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/cassandra/blob/afb52aa9/src/java/org/apache/cassandra/db/index/composites/CompositesSearcher.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/cassandra/blob/afb52aa9/src/java/org/apache/cassandra/db/index/keys/KeysSearcher.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/cassandra/blob/afb52aa9/src/java/org/apache/cassandra/service/StorageProxy.java
----------------------------------------------------------------------
diff --cc src/java/org/apache/cassandra/service/StorageProxy.java
index f1ee35f,1536e46..78376a8
--- a/src/java/org/apache/cassandra/service/StorageProxy.java
+++ b/src/java/org/apache/cassandra/service/StorageProxy.java
@@@ -1684,18 -1621,39 +1684,42 @@@ public class StorageProxy implements St
              else
                  ranges = getRestrictedRanges(command.keyRange);
  
-             // our estimate of how many result rows there will be per-range
-             float resultRowsPerRange = estimateResultRowsPerRange(command, 
keyspace);
-             // underestimate how many rows we will get per-range in order to 
increase the likelihood that we'll
-             // fetch enough rows in the first round
-             resultRowsPerRange -= resultRowsPerRange * 
CONCURRENT_SUBREQUESTS_MARGIN;
-             int concurrencyFactor = resultRowsPerRange == 0.0
+             // determine the number of rows to be fetched and the concurrency 
factor
+             int rowsToBeFetched = command.limit();
+             int concurrencyFactor;
+             if (command.requiresScanningAllRanges())
+             {
+                 // all nodes must be queried
+                 rowsToBeFetched *= ranges.size();
+                 concurrencyFactor = ranges.size();
+                 logger.debug("Requested rows: {}, ranges.size(): {}; 
concurrent range requests: {}",
+                              command.limit(),
+                              ranges.size(),
+                              concurrencyFactor);
+                 Tracing.trace("Submitting range requests on {} ranges with a 
concurrency of {}",
 -                              new Object[]{ ranges.size(), 
concurrencyFactor});
++                              ranges.size(), concurrencyFactor);
+             }
+             else
+             {
+                 // our estimate of how many result rows there will be 
per-range
+                 float resultRowsPerRange = 
estimateResultRowsPerRange(command, keyspace);
+                 // underestimate how many rows we will get per-range in order 
to increase the likelihood that we'll
+                 // fetch enough rows in the first round
+                 resultRowsPerRange -= resultRowsPerRange * 
CONCURRENT_SUBREQUESTS_MARGIN;
+                 concurrencyFactor = resultRowsPerRange == 0.0
                                    ? 1
                                    : Math.max(1, Math.min(ranges.size(), (int) 
Math.ceil(command.limit() / resultRowsPerRange)));
-             logger.debug("Estimated result rows per range: {}; requested 
rows: {}, ranges.size(): {}; concurrent range requests: {}",
-                          resultRowsPerRange, command.limit(), ranges.size(), 
concurrencyFactor);
-             Tracing.trace("Submitting range requests on {} ranges with a 
concurrency of {} ({} rows per range expected)",
-                           ranges.size(), concurrencyFactor, 
resultRowsPerRange);
++
+                 logger.debug("Estimated result rows per range: {}; requested 
rows: {}, ranges.size(): {}; concurrent range requests: {}",
+                              resultRowsPerRange,
+                              command.limit(),
+                              ranges.size(),
+                              concurrencyFactor);
+                 Tracing.trace("Submitting range requests on {} ranges with a 
concurrency of {} ({} rows per range expected)",
 -                              new Object[]{ ranges.size(), concurrencyFactor, 
resultRowsPerRange});
++                              ranges.size(),
++                              concurrencyFactor,
++                              resultRowsPerRange);
+             }
  
              boolean haveSufficientRows = false;
              int i = 0;

Reply via email to