Merge branch 'cassandra-3.11' into trunk

Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/6ad99802
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/6ad99802
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/6ad99802

Branch: refs/heads/trunk
Commit: 6ad99802f20504170c3a447114f5694cf51662a5
Parents: f3d1b45 35823fc
Author: Aleksey Yeshchenko <[email protected]>
Authored: Wed Apr 18 11:35:58 2018 +0100
Committer: Aleksey Yeshchenko <[email protected]>
Committed: Wed Apr 18 11:36:28 2018 +0100

----------------------------------------------------------------------
 CHANGES.txt                                     |  2 +
 NEWS.txt                                        |  3 +
 conf/cassandra.yaml                             |  2 +-
 doc/cql3/CQL.textile                            |  8 +--
 doc/source/architecture/dynamo.rst              |  8 +--
 doc/source/cql/ddl.rst                          | 15 +----
 doc/source/operating/compaction.rst             |  3 +-
 doc/source/operating/snitch.rst                 |  4 +-
 pylib/cqlshlib/cql3handling.py                  |  5 +-
 pylib/cqlshlib/test/test_cqlsh_completion.py    |  9 +--
 pylib/cqlshlib/test/test_cqlsh_output.py        |  2 -
 .../org/apache/cassandra/auth/AuthKeyspace.java |  1 -
 .../cql3/statements/TableAttributes.java        |  6 --
 .../apache/cassandra/db/ConsistencyLevel.java   | 47 ++-------------
 .../org/apache/cassandra/db/SystemKeyspace.java |  1 -
 .../org/apache/cassandra/db/TableCQLHelper.java |  2 -
 .../cassandra/metrics/ReadRepairMetrics.java    |  3 +
 .../repair/SystemDistributedKeyspace.java       |  1 -
 .../apache/cassandra/schema/SchemaKeyspace.java | 15 ++---
 .../apache/cassandra/schema/TableMetadata.java  | 18 +-----
 .../apache/cassandra/schema/TableParams.java    | 42 --------------
 .../service/reads/AbstractReadExecutor.java     | 57 ++----------------
 .../service/reads/AsyncRepairCallback.java      | 61 --------------------
 .../cassandra/service/reads/DataResolver.java   | 29 ++--------
 .../cassandra/service/reads/DigestResolver.java |  8 ---
 .../cassandra/service/reads/ReadCallback.java   |  9 +--
 .../service/reads/ReadRepairDecision.java       | 23 --------
 .../service/reads/ResponseResolver.java         |  6 --
 .../reads/repair/BlockingReadRepair.java        | 34 +----------
 .../service/reads/repair/NoopReadRepair.java    | 17 +-----
 .../service/reads/repair/ReadRepair.java        | 29 ++--------
 .../apache/cassandra/tracing/TraceKeyspace.java |  1 -
 .../validation/miscellaneous/OverflowTest.java  |  4 --
 .../apache/cassandra/db/TableCQLHelperTest.java |  4 --
 .../cassandra/schema/MigrationManagerTest.java  |  2 -
 .../reads/repair/TestableReadRepair.java        | 18 +-----
 36 files changed, 59 insertions(+), 440 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cassandra/blob/6ad99802/CHANGES.txt
----------------------------------------------------------------------
diff --cc CHANGES.txt
index f2c9ffc,990c5db..76321dd
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@@ -1,227 -1,3 +1,229 @@@
 +4.0
++ * Eliminate background repair and probablistic read_repair_chance table 
options
++   (CASSANDRA-13910)
 + * Bind to correct local address in 4.0 streaming (CASSANDRA-14362)
 + * Use standard Amazon naming for datacenter and rack in Ec2Snitch 
(CASSANDRA-7839)
 + * Fix junit failure for SSTableReaderTest (CASSANDRA-14387)
 + * Abstract write path for pluggable storage (CASSANDRA-14118)
 + * nodetool describecluster should be more informative (CASSANDRA-13853)
 + * Compaction performance improvements (CASSANDRA-14261) 
 + * Refactor Pair usage to avoid boxing ints/longs (CASSANDRA-14260)
 + * Add options to nodetool tablestats to sort and limit output 
(CASSANDRA-13889)
 + * Rename internals to reflect CQL vocabulary (CASSANDRA-14354)
 + * Add support for hybrid MIN(), MAX() speculative retry policies
 +   (CASSANDRA-14293, CASSANDRA-14338, CASSANDRA-14352)
 + * Fix some regressions caused by 14058 (CASSANDRA-14353)
 + * Abstract repair for pluggable storage (CASSANDRA-14116)
 + * Add meaningful toString() impls (CASSANDRA-13653)
 + * Add sstableloader option to accept target keyspace name (CASSANDRA-13884)
 + * Move processing of EchoMessage response to gossip stage (CASSANDRA-13713)
 + * Add coordinator write metric per CF (CASSANDRA-14232)
 + * Correct and clarify SSLFactory.getSslContext method and call sites 
(CASSANDRA-14314)
 + * Handle static and partition deletion properly on 
ThrottledUnfilteredIterator (CASSANDRA-14315)
 + * NodeTool clientstats should show SSL Cipher (CASSANDRA-14322)
 + * Add ability to specify driver name and version (CASSANDRA-14275)
 + * Abstract streaming for pluggable storage (CASSANDRA-14115)
 + * Forced incremental repairs should promote sstables if they can 
(CASSANDRA-14294)
 + * Use Murmur3 for validation compactions (CASSANDRA-14002)
 + * Comma at the end of the seed list is interpretated as localhost 
(CASSANDRA-14285)
 + * Refactor read executor and response resolver, abstract read repair 
(CASSANDRA-14058)
 + * Add optional startup delay to wait until peers are ready (CASSANDRA-13993)
 + * Add a few options to nodetool verify (CASSANDRA-14201)
 + * CVE-2017-5929 Security vulnerability and redefine default log rotation 
policy (CASSANDRA-14183)
 + * Use JVM default SSL validation algorithm instead of custom default 
(CASSANDRA-13259)
 + * Better document in code InetAddressAndPort usage post 7544, incorporate 
port into UUIDGen node (CASSANDRA-14226)
 + * Fix sstablemetadata date string for minLocalDeletionTime (CASSANDRA-14132)
 + * Make it possible to change neverPurgeTombstones during runtime 
(CASSANDRA-14214)
 + * Remove GossipDigestSynVerbHandler#doSort() (CASSANDRA-14174)
 + * Add nodetool clientlist (CASSANDRA-13665)
 + * Revert ProtocolVersion changes from CASSANDRA-7544 (CASSANDRA-14211)
 + * Non-disruptive seed node list reload (CASSANDRA-14190)
 + * Nodetool tablehistograms to print statics for all the tables 
(CASSANDRA-14185)
 + * Migrate dtests to use pytest and python3 (CASSANDRA-14134)
 + * Allow storage port to be configurable per node (CASSANDRA-7544)
 + * Make sub-range selection for non-frozen collections return null instead of 
empty (CASSANDRA-14182)
 + * BloomFilter serialization format should not change byte ordering 
(CASSANDRA-9067)
 + * Remove unused on-heap BloomFilter implementation (CASSANDRA-14152)
 + * Delete temp test files on exit (CASSANDRA-14153)
 + * Make PartitionUpdate and Mutation immutable (CASSANDRA-13867)
 + * Fix CommitLogReplayer exception for CDC data (CASSANDRA-14066)
 + * Fix cassandra-stress startup failure (CASSANDRA-14106)
 + * Remove initialDirectories from CFS (CASSANDRA-13928)
 + * Fix trivial log format error (CASSANDRA-14015)
 + * Allow sstabledump to do a json object per partition (CASSANDRA-13848)
 + * Add option to optimise merkle tree comparison across replicas 
(CASSANDRA-3200)
 + * Remove unused and deprecated methods from AbstractCompactionStrategy 
(CASSANDRA-14081)
 + * Fix Distribution.average in cassandra-stress (CASSANDRA-14090)
 + * Support a means of logging all queries as they were invoked 
(CASSANDRA-13983)
 + * Presize collections (CASSANDRA-13760)
 + * Add GroupCommitLogService (CASSANDRA-13530)
 + * Parallelize initial materialized view build (CASSANDRA-12245)
 + * Fix flaky SecondaryIndexManagerTest.assert[Not]MarkedAsBuilt 
(CASSANDRA-13965)
 + * Make LWTs send resultset metadata on every request (CASSANDRA-13992)
 + * Fix flaky indexWithFailedInitializationIsNotQueryableAfterPartialRebuild 
(CASSANDRA-13963)
 + * Introduce leaf-only iterator (CASSANDRA-9988)
 + * Upgrade Guava to 23.3 and Airline to 0.8 (CASSANDRA-13997)
 + * Allow only one concurrent call to StatusLogger (CASSANDRA-12182)
 + * Refactoring to specialised functional interfaces (CASSANDRA-13982)
 + * Speculative retry should allow more friendly params (CASSANDRA-13876)
 + * Throw exception if we send/receive repair messages to incompatible nodes 
(CASSANDRA-13944)
 + * Replace usages of MessageDigest with Guava's Hasher (CASSANDRA-13291)
 + * Add nodetool cmd to print hinted handoff window (CASSANDRA-13728)
 + * Fix some alerts raised by static analysis (CASSANDRA-13799)
 + * Checksum sstable metadata (CASSANDRA-13321, CASSANDRA-13593)
 + * Add result set metadata to prepared statement MD5 hash calculation 
(CASSANDRA-10786)
 + * Refactor GcCompactionTest to avoid boxing (CASSANDRA-13941)
 + * Expose recent histograms in JmxHistograms (CASSANDRA-13642)
 + * Fix buffer length comparison when decompressing in netty-based streaming 
(CASSANDRA-13899)
 + * Properly close StreamCompressionInputStream to release any ByteBuf 
(CASSANDRA-13906)
 + * Add SERIAL and LOCAL_SERIAL support for cassandra-stress (CASSANDRA-13925)
 + * LCS needlessly checks for L0 STCS candidates multiple times 
(CASSANDRA-12961)
 + * Correctly close netty channels when a stream session ends (CASSANDRA-13905)
 + * Update lz4 to 1.4.0 (CASSANDRA-13741)
 + * Optimize Paxos prepare and propose stage for local requests 
(CASSANDRA-13862)
 + * Throttle base partitions during MV repair streaming to prevent OOM 
(CASSANDRA-13299)
 + * Use compaction threshold for STCS in L0 (CASSANDRA-13861)
 + * Fix problem with min_compress_ratio: 1 and disallow ratio < 1 
(CASSANDRA-13703)
 + * Add extra information to SASI timeout exception (CASSANDRA-13677)
 + * Add incremental repair support for --hosts, --force, and subrange repair 
(CASSANDRA-13818)
 + * Rework CompactionStrategyManager.getScanners synchronization 
(CASSANDRA-13786)
 + * Add additional unit tests for batch behavior, TTLs, Timestamps 
(CASSANDRA-13846)
 + * Add keyspace and table name in schema validation exception 
(CASSANDRA-13845)
 + * Emit metrics whenever we hit tombstone failures and warn thresholds 
(CASSANDRA-13771)
 + * Make netty EventLoopGroups daemon threads (CASSANDRA-13837)
 + * Race condition when closing stream sessions (CASSANDRA-13852)
 + * NettyFactoryTest is failing in trunk on macOS (CASSANDRA-13831)
 + * Allow changing log levels via nodetool for related classes 
(CASSANDRA-12696)
 + * Add stress profile yaml with LWT (CASSANDRA-7960)
 + * Reduce memory copies and object creations when acting on ByteBufs 
(CASSANDRA-13789)
 + * Simplify mx4j configuration (Cassandra-13578)
 + * Fix trigger example on 4.0 (CASSANDRA-13796)
 + * Force minumum timeout value (CASSANDRA-9375)
 + * Use netty for streaming (CASSANDRA-12229)
 + * Use netty for internode messaging (CASSANDRA-8457)
 + * Add bytes repaired/unrepaired to nodetool tablestats (CASSANDRA-13774)
 + * Don't delete incremental repair sessions if they still have sstables 
(CASSANDRA-13758)
 + * Fix pending repair manager index out of bounds check (CASSANDRA-13769)
 + * Don't use RangeFetchMapCalculator when RF=1 (CASSANDRA-13576)
 + * Don't optimise trivial ranges in RangeFetchMapCalculator (CASSANDRA-13664)
 + * Use an ExecutorService for repair commands instead of new 
Thread(..).start() (CASSANDRA-13594)
 + * Fix race / ref leak in anticompaction (CASSANDRA-13688)
 + * Expose tasks queue length via JMX (CASSANDRA-12758)
 + * Fix race / ref leak in PendingRepairManager (CASSANDRA-13751)
 + * Enable ppc64le runtime as unsupported architecture (CASSANDRA-13615)
 + * Improve sstablemetadata output (CASSANDRA-11483)
 + * Support for migrating legacy users to roles has been dropped 
(CASSANDRA-13371)
 + * Introduce error metrics for repair (CASSANDRA-13387)
 + * Refactoring to primitive functional interfaces in AuthCache 
(CASSANDRA-13732)
 + * Update metrics to 3.1.5 (CASSANDRA-13648)
 + * batch_size_warn_threshold_in_kb can now be set at runtime (CASSANDRA-13699)
 + * Avoid always rebuilding secondary indexes at startup (CASSANDRA-13725)
 + * Upgrade JMH from 1.13 to 1.19 (CASSANDRA-13727)
 + * Upgrade SLF4J from 1.7.7 to 1.7.25 (CASSANDRA-12996)
 + * Default for start_native_transport now true if not set in config 
(CASSANDRA-13656)
 + * Don't add localhost to the graph when calculating where to stream from 
(CASSANDRA-13583)
 + * Make CDC availability more deterministic via hard-linking (CASSANDRA-12148)
 + * Allow skipping equality-restricted clustering columns in ORDER BY clause 
(CASSANDRA-10271)
 + * Use common nowInSec for validation compactions (CASSANDRA-13671)
 + * Improve handling of IR prepare failures (CASSANDRA-13672)
 + * Send IR coordinator messages synchronously (CASSANDRA-13673)
 + * Flush system.repair table before IR finalize promise (CASSANDRA-13660)
 + * Fix column filter creation for wildcard queries (CASSANDRA-13650)
 + * Add 'nodetool getbatchlogreplaythrottle' and 'nodetool 
setbatchlogreplaythrottle' (CASSANDRA-13614)
 + * fix race condition in PendingRepairManager (CASSANDRA-13659)
 + * Allow noop incremental repair state transitions (CASSANDRA-13658)
 + * Run repair with down replicas (CASSANDRA-10446)
 + * Added started & completed repair metrics (CASSANDRA-13598)
 + * Added started & completed repair metrics (CASSANDRA-13598)
 + * Improve secondary index (re)build failure and concurrency handling 
(CASSANDRA-10130)
 + * Improve calculation of available disk space for compaction 
(CASSANDRA-13068)
 + * Change the accessibility of RowCacheSerializer for third party row cache 
plugins (CASSANDRA-13579)
 + * Allow sub-range repairs for a preview of repaired data (CASSANDRA-13570)
 + * NPE in IR cleanup when columnfamily has no sstables (CASSANDRA-13585)
 + * Fix Randomness of stress values (CASSANDRA-12744)
 + * Allow selecting Map values and Set elements (CASSANDRA-7396)
 + * Fast and garbage-free Streaming Histogram (CASSANDRA-13444)
 + * Update repairTime for keyspaces on completion (CASSANDRA-13539)
 + * Add configurable upper bound for validation executor threads 
(CASSANDRA-13521)
 + * Bring back maxHintTTL propery (CASSANDRA-12982)
 + * Add testing guidelines (CASSANDRA-13497)
 + * Add more repair metrics (CASSANDRA-13531)
 + * RangeStreamer should be smarter when picking endpoints for streaming 
(CASSANDRA-4650)
 + * Avoid rewrapping an exception thrown for cache load functions 
(CASSANDRA-13367)
 + * Log time elapsed for each incremental repair phase (CASSANDRA-13498)
 + * Add multiple table operation support to cassandra-stress (CASSANDRA-8780)
 + * Fix incorrect cqlsh results when selecting same columns multiple times 
(CASSANDRA-13262)
 + * Fix WriteResponseHandlerTest is sensitive to test execution order 
(CASSANDRA-13421)
 + * Improve incremental repair logging (CASSANDRA-13468)
 + * Start compaction when incremental repair finishes (CASSANDRA-13454)
 + * Add repair streaming preview (CASSANDRA-13257)
 + * Cleanup isIncremental/repairedAt usage (CASSANDRA-13430)
 + * Change protocol to allow sending key space independent of query string 
(CASSANDRA-10145)
 + * Make gc_log and gc_warn settable at runtime (CASSANDRA-12661)
 + * Take number of files in L0 in account when estimating remaining compaction 
tasks (CASSANDRA-13354)
 + * Skip building views during base table streams on range movements 
(CASSANDRA-13065)
 + * Improve error messages for +/- operations on maps and tuples 
(CASSANDRA-13197)
 + * Remove deprecated repair JMX APIs (CASSANDRA-11530)
 + * Fix version check to enable streaming keep-alive (CASSANDRA-12929)
 + * Make it possible to monitor an ideal consistency level separate from 
actual consistency level (CASSANDRA-13289)
 + * Outbound TCP connections ignore internode authenticator (CASSANDRA-13324)
 + * Upgrade junit from 4.6 to 4.12 (CASSANDRA-13360)
 + * Cleanup ParentRepairSession after repairs (CASSANDRA-13359)
 + * Upgrade snappy-java to 1.1.2.6 (CASSANDRA-13336)
 + * Incremental repair not streaming correct sstables (CASSANDRA-13328)
 + * Upgrade the jna version to 4.3.0 (CASSANDRA-13300)
 + * Add the currentTimestamp, currentDate, currentTime and currentTimeUUID 
functions (CASSANDRA-13132)
 + * Remove config option index_interval (CASSANDRA-10671)
 + * Reduce lock contention for collection types and serializers 
(CASSANDRA-13271)
 + * Make it possible to override MessagingService.Verb ids (CASSANDRA-13283)
 + * Avoid synchronized on prepareForRepair in ActiveRepairService 
(CASSANDRA-9292)
 + * Adds the ability to use uncompressed chunks in compressed files 
(CASSANDRA-10520)
 + * Don't flush sstables when streaming for incremental repair 
(CASSANDRA-13226)
 + * Remove unused method (CASSANDRA-13227)
 + * Fix minor bugs related to #9143 (CASSANDRA-13217)
 + * Output warning if user increases RF (CASSANDRA-13079)
 + * Remove pre-3.0 streaming compatibility code for 4.0 (CASSANDRA-13081)
 + * Add support for + and - operations on dates (CASSANDRA-11936)
 + * Fix consistency of incrementally repaired data (CASSANDRA-9143)
 + * Increase commitlog version (CASSANDRA-13161)
 + * Make TableMetadata immutable, optimize Schema (CASSANDRA-9425)
 + * Refactor ColumnCondition (CASSANDRA-12981)
 + * Parallelize streaming of different keyspaces (CASSANDRA-4663)
 + * Improved compactions metrics (CASSANDRA-13015)
 + * Speed-up start-up sequence by avoiding un-needed flushes (CASSANDRA-13031)
 + * Use Caffeine (W-TinyLFU) for on-heap caches (CASSANDRA-10855)
 + * Thrift removal (CASSANDRA-11115)
 + * Remove pre-3.0 compatibility code for 4.0 (CASSANDRA-12716)
 + * Add column definition kind to dropped columns in schema (CASSANDRA-12705)
 + * Add (automate) Nodetool Documentation (CASSANDRA-12672)
 + * Update bundled cqlsh python driver to 3.7.0 (CASSANDRA-12736)
 + * Reject invalid replication settings when creating or altering a keyspace 
(CASSANDRA-12681)
 + * Clean up the SSTableReader#getScanner API wrt removal of RateLimiter 
(CASSANDRA-12422)
 + * Use new token allocation for non bootstrap case as well (CASSANDRA-13080)
 + * Avoid byte-array copy when key cache is disabled (CASSANDRA-13084)
 + * Require forceful decommission if number of nodes is less than replication 
factor (CASSANDRA-12510)
 + * Allow IN restrictions on column families with collections (CASSANDRA-12654)
 + * Log message size in trace message in OutboundTcpConnection 
(CASSANDRA-13028)
 + * Add timeUnit Days for cassandra-stress (CASSANDRA-13029)
 + * Add mutation size and batch metrics (CASSANDRA-12649)
 + * Add method to get size of endpoints to TokenMetadata (CASSANDRA-12999)
 + * Expose time spent waiting in thread pool queue (CASSANDRA-8398)
 + * Conditionally update index built status to avoid unnecessary flushes 
(CASSANDRA-12969)
 + * cqlsh auto completion: refactor definition of compaction strategy options 
(CASSANDRA-12946)
 + * Add support for arithmetic operators (CASSANDRA-11935)
 + * Add histogram for delay to deliver hints (CASSANDRA-13234)
 + * Fix cqlsh automatic protocol downgrade regression (CASSANDRA-13307)
 + * Changing `max_hint_window_in_ms` at runtime (CASSANDRA-11720)
 + * Trivial format error in StorageProxy (CASSANDRA-13551)
 + * Nodetool repair can hang forever if we lose the notification for the 
repair completing/failing (CASSANDRA-13480)
 + * Anticompaction can cause noisy log messages (CASSANDRA-13684)
 + * Switch to client init for sstabledump (CASSANDRA-13683)
 + * CQLSH: Don't pause when capturing data (CASSANDRA-13743)
 + * nodetool clearsnapshot requires --all to clear all snapshots 
(CASSANDRA-13391)
 + * Correctly count range tombstones in traces and tombstone thresholds 
(CASSANDRA-8527)
 + * cqlshrc.sample uses incorrect option for time formatting (CASSANDRA-14243)
 +
 +
  3.11.3
   * Allow existing nodes to use all peers in shadow round (CASSANDRA-13851)
   * Fix cqlsh to read connection.ssl cqlshrc option again (CASSANDRA-14299)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/6ad99802/NEWS.txt
----------------------------------------------------------------------
diff --cc NEWS.txt
index 3be52dc,3cfd9a3..e8e88a4
--- a/NEWS.txt
+++ b/NEWS.txt
@@@ -33,127 -42,6 +33,130 @@@ restore snapshots created with the prev
  'sstableloader' tool. You can upgrade the file format of your snapshots
  using the provided 'sstableupgrade' tool.
  
 +4.0
 +===
 +
 +New features
 +------------
 +   - An experimental option to compare all merkle trees together has been 
added - for example, in
 +     a 3 node cluster with 2 replicas identical and 1 out-of-date, with this 
option enabled, the
 +     out-of-date replica will only stream a single copy from up-to-date 
replica. Enable it by adding
 +     "-os" to nodetool repair. See CASSANDRA-3200.
 +   - The currentTimestamp, currentDate, currentTime and currentTimeUUID 
functions have been added.
 +     See CASSANDRA-13132
 +   - Support for arithmetic operations between `timestamp`/`date` and 
`duration` has been added.
 +     See CASSANDRA-11936
 +   - Support for arithmetic operations on number has been added. See 
CASSANDRA-11935
 +   - Preview expected streaming required for a repair (nodetool repair 
--preview), and validate the
 +     consistency of repaired data between nodes (nodetool repair --validate). 
See CASSANDRA-13257
 +   - Support for selecting Map values and Set elements has been added for 
SELECT queries. See CASSANDRA-7396
 +   - Change-Data-Capture has been modified to make CommitLogSegments available
 +     immediately upon creation via hard-linking the files. This means that 
incomplete
 +     segments will be available in cdc_raw rather than fully flushed. See 
documentation
 +     and CASSANDRA-12148 for more detail.
 +   - The initial build of materialized views can be parallelized. The number 
of concurrent builder
 +     threads is specified by the property 
`cassandra.yaml:concurrent_materialized_view_builders`.
 +     This property can be modified at runtime through both JMX and the new 
`setconcurrentviewbuilders`
 +     and `getconcurrentviewbuilders` nodetool commands. See CASSANDRA-12245 
for more details.
 +   - There is now a binary full query log based on Chronicle Queue that can 
be controlled using
 +     nodetool enablefullquerylog, disablefullquerylog, and resetfullquerylog. 
The log
 +     contains all queries invoked, approximate time they were invoked, any 
parameters necessary
 +     to bind wildcard values, and all query options. A human readable version 
of the log can be
 +     dumped or tailed using the new bin/fqltool utility. The full query log 
is designed to be safe
 +     to use in production and limits utilization of heap memory and disk 
space with limits
 +     you can specify when enabling the log.
 +     See nodetool and fqltool help text for more information.
 +   - SSTableDump now supports the -l option to output each partition as it's 
own json object
 +     See CASSANDRA-13848 for more detail
 +   - Metric for coordinator writes per table has been added. See 
CASSANDRA-14232
 +   - Nodetool cfstats now has options to sort by various metrics as well as 
limit results.
 +
 +Upgrading
 +---------
 +    - Cassandra 4.0 removed support for COMPACT STORAGE tables. All Compact 
Tables
 +      have to be migrated using `ALTER ... DROP COMPACT STORAGE` statement in 
3.0/3.11.
 +      Cassandra starting 4.0 will not start if flags indicate that the table 
is non-CQL.
 +      Syntax for creating compact tables is also deprecated.
 +    - Support for legacy auth tables in the system_auth keyspace (users,
 +      permissions, credentials) and the migration code has been removed. 
Migration
 +      of these legacy auth tables must have been completed before the upgrade 
to
 +      4.0 and the legacy tables must have been removed. See the 'Upgrading' 
section
 +      for version 2.2 for migration instructions.
 +    - Cassandra 4.0 removed support for the deprecated Thrift interface. 
Amongst
 +      other things, this implies the removal of all yaml options related to 
thrift
 +      ('start_rpc', rpc_port, ...).
 +    - Cassandra 4.0 removed support for any pre-3.0 format. This means you
 +      cannot upgrade from a 2.x version to 4.0 directly, you have to upgrade 
to
 +      a 3.0.x/3.x version first (and run upgradesstable). In particular, this
 +      mean Cassandra 4.0 cannot load or read pre-3.0 sstables in any way: you
 +      will need to upgrade those sstable in 3.0.x/3.x first.
 +    - Upgrades from 3.0.x or 3.x are supported since 3.0.13 or 3.11.0, 
previous
 +      versions will causes issues during rolling upgrades (CASSANDRA-13274).
 +    - Cassandra will no longer allow invalid keyspace replication options, 
such
 +      as invalid datacenter names for NetworkTopologyStrategy. Operators MUST
 +      add new nodes to a datacenter before they can set set ALTER or CREATE
 +      keyspace replication policies using that datacenter. Existing keyspaces
 +      will continue to operate, but CREATE and ALTER will validate that all
 +      datacenters specified exist in the cluster.
 +    - Cassandra 4.0 fixes a problem with incremental repair which caused 
repaired
 +      data to be inconsistent between nodes. The fix changes the behavior of 
both
 +      full and incremental repairs. For full repairs, data is no longer marked
 +      repaired. For incremental repairs, anticompaction is run at the 
beginning
 +      of the repair, instead of at the end. If incremental repair was being 
used
 +      prior to upgrading, a full repair should be run after upgrading to 
resolve
 +      any inconsistencies.
 +    - Config option index_interval has been removed (it was deprecated since 
2.0)
 +    - Deprecated repair JMX APIs are removed.
 +    - The version of snappy-java has been upgraded to 1.1.2.6
 +      - the miniumum value for internode message timeouts is 10ms. 
Previously, any
 +        positive value was allowed. See cassandra.yaml entries like
 +        read_request_timeout_in_ms for more details.
 +      - Cassandra 4.0 allows a single port to be used for both secure and 
insecure
 +        connections between cassandra nodes (CASSANDRA-10404). See the yaml 
for
 +        specific property changes, and see the security doc for full details.
 +    - Due to the parallelization of the initial build of materialized views,
 +      the per token range view building status is stored in the new table
 +      `system.view_builds_in_progress`. The old table 
`system.views_builds_in_progress`
 +      is no longer used and can be removed. See CASSANDRA-12245 for more 
details.
 +      - Config option commitlog_sync_batch_window_in_ms has been deprecated 
as it's
 +        documentation has been incorrect and the setting itself near useless.
 +        Batch mode remains a valid commit log mode, however.
 +      - There is a new commit log mode, group, which is similar to batch mode
 +        but blocks for up to a configurable number of milliseconds between 
disk flushes.
 +      - nodetool clearsnapshot now required the --all flag to remove all 
snapshots.
 +        Previous behavior would delete all snapshots by default.
 +    - Nodes are now identified by a combination of IP, and storage port.
 +      Existing JMX APIs, nodetool, and system tables continue to work
 +      and accept/return just an IP, but there is a new
 +      version of each that works with the full unambiguous identifier.
 +      You should prefer these over the deprecated ambiguous versions that only
 +      work with an IP. This was done to support multiple instances per IP.
 +      Additionally we are moving to only using a single port for encrypted and
 +      unencrypted traffic and if you want multiple instances per IP you must
 +      first switch encrypted traffic to the storage port and not a separate
 +      encrypted port. If you want to use multiple instances per IP
 +      with SSL you will need to use StartTLS on storage_port and set
 +      outgoing_encrypted_port_source to gossip outbound connections
 +      know what port to connect to for each instance. Before changing
 +      storage port or native port at nodes you must first upgrade the entire 
cluster
 +      and clients to 4.0 so they can handle the port not being consistent 
across
 +      the cluster.
 +    - Names of AWS regions/availability zones have been cleaned up to more 
correctly
 +      match the Amazon names. There is now a new option in 
conf/cassandra-rackdc.properties
 +      that lets users enable the correct names for new clusters, or use the 
legacy
 +      names for existing clusters. See conf/cassandra-rackdc.properties for 
details.
++    - Background repair has been removed. dclocal_read_repair_chance and
++      read_repair_chance table options have been removed and are now rejected.
++      See CASSANDRA-13910 for details.
 +
 +Materialized Views
 +-------------------
 +   - Following a discussion regarding concerns about the design and safety of 
Materialized Views, the C* development
 +     community no longer recommends them for production use, and considers 
them experimental. Warnings messages will
 +     now be logged when they are created. (See 
https://www.mail-archive.com/[email protected]/msg11511.html)
 +   - An 'enable_materialized_views' flag has been added to cassandra.yaml to 
allow operators to prevent creation of
 +     views
 +
  3.11.3
  =====
  

http://git-wip-us.apache.org/repos/asf/cassandra/blob/6ad99802/conf/cassandra.yaml
----------------------------------------------------------------------
diff --cc conf/cassandra.yaml
index 1be6feb,7d25bef..d466072
--- a/conf/cassandra.yaml
+++ b/conf/cassandra.yaml
@@@ -917,7 -954,7 +917,7 @@@ dynamic_snitch_update_interval_in_ms: 1
  # controls how often to reset all host scores, allowing a bad host to
  # possibly recover
  dynamic_snitch_reset_interval_in_ms: 600000
--# if set greater than zero and read_repair_chance is < 1.0, this will allow
++# if set greater than zero, this will allow
  # 'pinning' of replicas to hosts in order to increase cache capacity.
  # The badness threshold will control how much worse the pinned host has to be
  # before the dynamic snitch will prefer other replicas over it.  This is

http://git-wip-us.apache.org/repos/asf/cassandra/blob/6ad99802/doc/cql3/CQL.textile
----------------------------------------------------------------------
diff --cc doc/cql3/CQL.textile
index d87e72a,f32e30c..8d55baa
--- a/doc/cql3/CQL.textile
+++ b/doc/cql3/CQL.textile
@@@ -250,8 -250,8 +250,7 @@@ CREATE TABLE monkeySpecies 
      common_name text,
      population varint,
      average_size int
--) WITH comment='Important biological records'
--   AND read_repair_chance = 1.0;
++) WITH comment='Important biological records';
  
  CREATE TABLE timeline (
      userid uuid,
@@@ -334,8 -334,8 +333,6 @@@ Table creation supports the following o
  
  |_. option                    |_. kind   |_. default   |_. description|
  |@comment@                    | _simple_ | none        | A free-form, 
human-readable comment.|
--|@read_repair_chance@         | _simple_ | 0.1         | The probability with 
which to query extra nodes (e.g. more nodes than required by the consistency 
level) for the purpose of read repairs.|
--|@dclocal_read_repair_chance@ | _simple_ | 0           | The probability with 
which to query extra nodes (e.g. more nodes than required by the consistency 
level) belonging to the same data center than the read coordinator for the 
purpose of read repairs.|
  |@gc_grace_seconds@           | _simple_ | 864000      | Time to wait before 
garbage collecting tombstones (deletion markers).|
  |@bloom_filter_fp_chance@     | _simple_ | 0.00075     | The target 
probability of false positive of the sstable bloom filters. Said bloom filters 
will be sized to provide the provided probability (thus lowering this value 
impact the size of bloom filters in-memory and on-disk)|
  |@default_time_to_live@       | _simple_ | 0           | The default 
expiration time ("TTL") in seconds for a table.|
@@@ -411,8 -411,8 +408,7 @@@ ALTER TABLE addamsFamil
  ADD gravesite varchar;
  
  ALTER TABLE addamsFamily
--WITH comment = 'A most excellent and useful column family'
-- AND read_repair_chance = 0.2;
++WITH comment = 'A most excellent and useful column family';
  p. 
  The @ALTER@ statement is used to manipulate table definitions. It allows for 
adding new columns, dropping existing ones, or updating the table options. As 
with table creation, @ALTER COLUMNFAMILY@ is allowed as an alias for @ALTER 
TABLE@.
  

http://git-wip-us.apache.org/repos/asf/cassandra/blob/6ad99802/doc/source/architecture/dynamo.rst
----------------------------------------------------------------------
diff --cc doc/source/architecture/dynamo.rst
index a7dbb87,a7dbb87..365a695
--- a/doc/source/architecture/dynamo.rst
+++ b/doc/source/architecture/dynamo.rst
@@@ -117,12 -117,12 +117,8 @@@ Write operations are always sent to al
  controls how many responses the coordinator waits for before responding to 
the client.
  
  For read operations, the coordinator generally only issues read commands to 
enough replicas to satisfy the consistency
--level. There are a couple of exceptions to this:
--
--- Speculative retry may issue a redundant read request to an extra replica if 
the other replicas have not responded
--  within a specified time window.
--- Based on ``read_repair_chance`` and ``dclocal_read_repair_chance`` (part of 
a table's schema), read requests may be
--  randomly sent to all replicas in order to repair potentially inconsistent 
data.
++level, with one exception. Speculative retry may issue a redundant read 
request to an extra replica if the other replicas
++have not responded within a specified time window.
  
  Picking Consistency Levels
  ~~~~~~~~~~~~~~~~~~~~~~~~~~

http://git-wip-us.apache.org/repos/asf/cassandra/blob/6ad99802/doc/source/cql/ddl.rst
----------------------------------------------------------------------
diff --cc doc/source/cql/ddl.rst
index be5e2c6,3027775..c8cedcf
--- a/doc/source/cql/ddl.rst
+++ b/doc/source/cql/ddl.rst
@@@ -186,8 -186,8 +186,7 @@@ For instance:
          common_name text,
          population varint,
          average_size int
--    ) WITH comment='Important biological records'
--       AND read_repair_chance = 1.0;
++    ) WITH comment='Important biological records';
  
      CREATE TABLE timeline (
          userid uuid,
@@@ -453,17 -454,14 +452,8 @@@ A table supports the following options
  
+================================+==========+=============+===========================================================+
  | ``comment``                    | *simple* | none        | A free-form, 
human-readable comment.                      |
  
+--------------------------------+----------+-------------+-----------------------------------------------------------+
- | ``read_repair_chance``         | *simple* | 0           | The probability 
with which to query extra nodes (e.g.     |
 -| ``read_repair_chance``         | *simple* | 0.1         | The probability 
with which to query extra nodes (e.g.     |
--|                                |          |             | more nodes than 
required by the consistency level) for    |
--|                                |          |             | the purpose of 
read repairs.                              |
--+--------------------------------+----------+-------------+-----------------------------------------------------------+
- | ``dclocal_read_repair_chance`` | *simple* | 0.1         | The probability 
with which to query extra nodes (e.g.     |
 -| ``dclocal_read_repair_chance`` | *simple* | 0           | The probability 
with which to query extra nodes (e.g.     |
--|                                |          |             | more nodes than 
required by the consistency level)        |
--|                                |          |             | belonging to the 
same data center than the read           |
--|                                |          |             | coordinator for 
the purpose of read repairs.              |
- 
+--------------------------------+----------+-------------+-----------------------------------------------------------+
 +| ``speculative_retry``          | *simple* | 99PERCENTILE| :ref:`Speculative 
retry options                           |
 +|                                |          |             | 
<speculative-retry-options>`.                             |
  
+--------------------------------+----------+-------------+-----------------------------------------------------------+
  | ``gc_grace_seconds``           | *simple* | 864000      | Time to wait 
before garbage collecting tombstones         |
  |                                |          |             | (deletion 
markers).                                       |
@@@ -636,8 -590,8 +626,7 @@@ For instance:
      ALTER TABLE addamsFamily ADD gravesite varchar;
  
      ALTER TABLE addamsFamily
--           WITH comment = 'A most excellent and useful table'
--           AND read_repair_chance = 0.2;
++           WITH comment = 'A most excellent and useful table';
  
  The ``ALTER TABLE`` statement can:
  

http://git-wip-us.apache.org/repos/asf/cassandra/blob/6ad99802/doc/source/operating/compaction.rst
----------------------------------------------------------------------
diff --cc doc/source/operating/compaction.rst
index cb6be45,0f39000..ace9aa9
--- a/doc/source/operating/compaction.rst
+++ b/doc/source/operating/compaction.rst
@@@ -432,8 -426,8 +432,7 @@@ order, with new data and old data in th
  
  While TWCS tries to minimize the impact of comingled data, users should 
attempt to avoid this behavior.  Specifically,
  users should avoid queries that explicitly set the timestamp via CQL ``USING 
TIMESTAMP``. Additionally, users should run
--frequent repairs (which streams data in such a way that it does not become 
comingled), and disable background read
--repair by setting the table's ``read_repair_chance`` and 
``dclocal_read_repair_chance`` to 0.
++frequent repairs (which streams data in such a way that it does not become 
comingled).
  
  Changing TimeWindowCompactionStrategy Options
  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

http://git-wip-us.apache.org/repos/asf/cassandra/blob/6ad99802/doc/source/operating/snitch.rst
----------------------------------------------------------------------
diff --cc doc/source/operating/snitch.rst
index 91bab29,faea0b3..5f6760a
--- a/doc/source/operating/snitch.rst
+++ b/doc/source/operating/snitch.rst
@@@ -35,8 -35,8 +35,8 @@@ configured with the following propertie
  - ``dynamic_snitch``: whether the dynamic snitch should be enabled or 
disabled.
  - ``dynamic_snitch_update_interval_in_ms``: controls how often to perform the 
more expensive part of host score
    calculation.
--- ``dynamic_snitch_reset_interval_in_ms``: if set greater than zero and 
read_repair_chance is < 1.0, this will allow
--  'pinning' of replicas to hosts in order to increase cache capacity.
++- ``dynamic_snitch_reset_interval_in_ms``: if set greater than zero, this 
will allow 'pinning' of replicas to hosts
++  in order to increase cache capacity.
  - ``dynamic_snitch_badness_threshold:``: The badness threshold will control 
how much worse the pinned host has to be
    before the dynamic snitch will prefer other replicas over it.  This is 
expressed as a double which represents a
    percentage.  Thus, a value of 0.2 means Cassandra would continue to prefer 
the static snitch values until the pinned

http://git-wip-us.apache.org/repos/asf/cassandra/blob/6ad99802/pylib/cqlshlib/cql3handling.py
----------------------------------------------------------------------
diff --cc pylib/cqlshlib/cql3handling.py
index ca14a36,ae5bc8a..314c431
--- a/pylib/cqlshlib/cql3handling.py
+++ b/pylib/cqlshlib/cql3handling.py
@@@ -44,11 -44,11 +44,9 @@@ class Cql3ParsingRuleSet(CqlParsingRule
      columnfamily_layout_options = (
          ('bloom_filter_fp_chance', None),
          ('comment', None),
--        ('dclocal_read_repair_chance', 'local_read_repair_chance'),
          ('gc_grace_seconds', None),
          ('min_index_interval', None),
          ('max_index_interval', None),
--        ('read_repair_chance', None),
          ('default_time_to_live', None),
          ('speculative_retry', None),
          ('memtable_flush_period_in_ms', None),
@@@ -503,8 -476,8 +501,7 @@@ def cf_prop_val_completer(ctxt, cass)
          return ["{'keys': '"]
      if any(this_opt == opt[0] for opt in CqlRuleSet.obsolete_cf_options):
          return ["'<obsolete_option>'"]
--    if this_opt in ('read_repair_chance', 'bloom_filter_fp_chance',
--                    'dclocal_read_repair_chance'):
++    if this_opt == 'bloom_filter_fp_chance':
          return [Hint('<float_between_0_and_1>')]
      if this_opt in ('min_compaction_threshold', 'max_compaction_threshold',
                      'gc_grace_seconds', 'min_index_interval', 
'max_index_interval'):

http://git-wip-us.apache.org/repos/asf/cassandra/blob/6ad99802/pylib/cqlshlib/test/test_cqlsh_completion.py
----------------------------------------------------------------------
diff --cc pylib/cqlshlib/test/test_cqlsh_completion.py
index 90eee4e,df4f7e8..fa9490d
--- a/pylib/cqlshlib/test/test_cqlsh_completion.py
+++ b/pylib/cqlshlib/test/test_cqlsh_completion.py
@@@ -589,21 -589,21 +589,19 @@@ class TestCqlshCompletion(CqlshCompleti
          self.trycompletions(prefix + ' new_table (col_a int PRIMARY KEY) WITH 
',
                              choices=['bloom_filter_fp_chance', 'compaction',
                                       'compression',
--                                     'dclocal_read_repair_chance',
                                       'default_time_to_live', 
'gc_grace_seconds',
                                       'max_index_interval',
                                       'memtable_flush_period_in_ms',
--                                     'read_repair_chance', 'CLUSTERING',
++                                     'CLUSTERING',
                                       'COMPACT', 'caching', 'comment',
                                       'min_index_interval', 
'speculative_retry', 'cdc'])
          self.trycompletions(prefix + ' new_table (col_a int PRIMARY KEY) WITH 
',
                              choices=['bloom_filter_fp_chance', 'compaction',
                                       'compression',
--                                     'dclocal_read_repair_chance',
                                       'default_time_to_live', 
'gc_grace_seconds',
                                       'max_index_interval',
                                       'memtable_flush_period_in_ms',
--                                     'read_repair_chance', 'CLUSTERING',
++                                     'CLUSTERING',
                                       'COMPACT', 'caching', 'comment',
                                       'min_index_interval', 
'speculative_retry', 'cdc'])
          self.trycompletions(prefix + ' new_table (col_a int PRIMARY KEY) WITH 
bloom_filter_fp_chance ',
@@@ -647,11 -647,11 +645,10 @@@
                              + "{'class': 'SizeTieredCompactionStrategy'} AND 
",
                              choices=['bloom_filter_fp_chance', 'compaction',
                                       'compression',
--                                     'dclocal_read_repair_chance',
                                       'default_time_to_live', 
'gc_grace_seconds',
                                       'max_index_interval',
                                       'memtable_flush_period_in_ms',
--                                     'read_repair_chance', 'CLUSTERING',
++                                     'CLUSTERING',
                                       'COMPACT', 'caching', 'comment',
                                       'min_index_interval', 
'speculative_retry', 'cdc'])
          self.trycompletions(prefix + " new_table (col_a int PRIMARY KEY) WITH 
compaction = "

http://git-wip-us.apache.org/repos/asf/cassandra/blob/6ad99802/pylib/cqlshlib/test/test_cqlsh_output.py
----------------------------------------------------------------------
diff --cc pylib/cqlshlib/test/test_cqlsh_output.py
index b7240f1,f57c734..2f0d9bf
--- a/pylib/cqlshlib/test/test_cqlsh_output.py
+++ b/pylib/cqlshlib/test/test_cqlsh_output.py
@@@ -617,13 -617,13 +617,11 @@@ class TestCqlshOutput(BaseTestCase)
                  AND compaction = {'class': 
'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 
'max_threshold': '32', 'min_threshold': '4'}
                  AND compression = {'chunk_length_in_kb': '64', 'class': 
'org.apache.cassandra.io.compress.LZ4Compressor'}
                  AND crc_check_chance = 1.0
--                AND dclocal_read_repair_chance = 0.1
                  AND default_time_to_live = 0
                  AND gc_grace_seconds = 864000
                  AND max_index_interval = 2048
                  AND memtable_flush_period_in_ms = 0
                  AND min_index_interval = 128
--                AND read_repair_chance = 0.0
                  AND speculative_retry = '99PERCENTILE';
  
          """ % quote_name(get_keyspace()))

http://git-wip-us.apache.org/repos/asf/cassandra/blob/6ad99802/src/java/org/apache/cassandra/auth/AuthKeyspace.java
----------------------------------------------------------------------
diff --cc src/java/org/apache/cassandra/auth/AuthKeyspace.java
index a7079dc,a5add9a..9a9dffe
--- a/src/java/org/apache/cassandra/auth/AuthKeyspace.java
+++ b/src/java/org/apache/cassandra/auth/AuthKeyspace.java
@@@ -42,51 -38,48 +42,50 @@@ public final class AuthKeyspac
  
      public static final long SUPERUSER_SETUP_DELAY = 
Long.getLong("cassandra.superuser_setup_delay_ms", 10000);
  
 -    private static final CFMetaData Roles =
 -        compile(ROLES,
 -                "role definitions",
 -                "CREATE TABLE %s ("
 -                + "role text,"
 -                + "is_superuser boolean,"
 -                + "can_login boolean,"
 -                + "salted_hash text,"
 -                + "member_of set<text>,"
 -                + "PRIMARY KEY(role))");
 +    private static final TableMetadata Roles =
 +        parse(ROLES,
 +              "role definitions",
 +              "CREATE TABLE %s ("
 +              + "role text,"
 +              + "is_superuser boolean,"
 +              + "can_login boolean,"
 +              + "salted_hash text,"
 +              + "member_of set<text>,"
 +              + "PRIMARY KEY(role))");
  
 -    private static final CFMetaData RoleMembers =
 -        compile(ROLE_MEMBERS,
 -                "role memberships lookup table",
 -                "CREATE TABLE %s ("
 -                + "role text,"
 -                + "member text,"
 -                + "PRIMARY KEY(role, member))");
 +    private static final TableMetadata RoleMembers =
 +        parse(ROLE_MEMBERS,
 +              "role memberships lookup table",
 +              "CREATE TABLE %s ("
 +              + "role text,"
 +              + "member text,"
 +              + "PRIMARY KEY(role, member))");
  
 -    private static final CFMetaData RolePermissions =
 -        compile(ROLE_PERMISSIONS,
 -                "permissions granted to db roles",
 -                "CREATE TABLE %s ("
 -                + "role text,"
 -                + "resource text,"
 -                + "permissions set<text>,"
 -                + "PRIMARY KEY(role, resource))");
 +    private static final TableMetadata RolePermissions =
 +        parse(ROLE_PERMISSIONS,
 +              "permissions granted to db roles",
 +              "CREATE TABLE %s ("
 +              + "role text,"
 +              + "resource text,"
 +              + "permissions set<text>,"
 +              + "PRIMARY KEY(role, resource))");
  
 -    private static final CFMetaData ResourceRoleIndex =
 -        compile(RESOURCE_ROLE_INDEX,
 -                "index of db roles with permissions granted on a resource",
 -                "CREATE TABLE %s ("
 -                + "resource text,"
 -                + "role text,"
 -                + "PRIMARY KEY(resource, role))");
 +    private static final TableMetadata ResourceRoleIndex =
 +        parse(RESOURCE_ROLE_INDEX,
 +              "index of db roles with permissions granted on a resource",
 +              "CREATE TABLE %s ("
 +              + "resource text,"
 +              + "role text,"
 +              + "PRIMARY KEY(resource, role))");
  
  
 -    private static CFMetaData compile(String name, String description, String 
schema)
 +    private static TableMetadata parse(String name, String description, 
String cql)
      {
 -        return CFMetaData.compile(String.format(schema, name), 
SchemaConstants.AUTH_KEYSPACE_NAME)
 -                         .comment(description)
 -                         .gcGraceSeconds((int) TimeUnit.DAYS.toSeconds(90));
 +        return CreateTableStatement.parse(format(cql, name), 
SchemaConstants.AUTH_KEYSPACE_NAME)
 +                                   
.id(TableId.forSystemTable(SchemaConstants.AUTH_KEYSPACE_NAME, name))
 +                                   .comment(description)
-                                    .dcLocalReadRepairChance(0.0)
 +                                   .gcGraceSeconds((int) 
TimeUnit.DAYS.toSeconds(90))
 +                                   .build();
      }
  
      public static KeyspaceMetadata metadata()

http://git-wip-us.apache.org/repos/asf/cassandra/blob/6ad99802/src/java/org/apache/cassandra/cql3/statements/TableAttributes.java
----------------------------------------------------------------------
diff --cc src/java/org/apache/cassandra/cql3/statements/TableAttributes.java
index ba7a001,04b9532..572362b
--- a/src/java/org/apache/cassandra/cql3/statements/TableAttributes.java
+++ b/src/java/org/apache/cassandra/cql3/statements/TableAttributes.java
@@@ -104,9 -107,19 +104,6 @@@ public final class TableAttributes exte
              
builder.compression(CompressionParams.fromMap(getMap(Option.COMPRESSION)));
          }
  
--        if (hasOption(Option.DCLOCAL_READ_REPAIR_CHANCE))
-             
builder.dcLocalReadRepairChance(getDouble(Option.DCLOCAL_READ_REPAIR_CHANCE));
 -        {
 -            double chance = getDouble(Option.DCLOCAL_READ_REPAIR_CHANCE);
 -
 -            if (chance != 0.0)
 -            {
 -                ClientWarn.instance.warn("dclocal_read_repair_chance table 
option has been deprecated and will be removed in version 4.0");
 -                maybeLogReadRepairChanceDeprecationWarning();
 -            }
 -
 -            builder.dcLocalReadRepairChance(chance);
 -        }
--
          if (hasOption(Option.DEFAULT_TIME_TO_LIVE))
              builder.defaultTimeToLive(getInt(Option.DEFAULT_TIME_TO_LIVE));
  
@@@ -122,11 -135,21 +119,8 @@@
          if (hasOption(Option.MIN_INDEX_INTERVAL))
              builder.minIndexInterval(getInt(Option.MIN_INDEX_INTERVAL));
  
--        if (hasOption(Option.READ_REPAIR_CHANCE))
-             builder.readRepairChance(getDouble(Option.READ_REPAIR_CHANCE));
 -        {
 -            double chance = getDouble(Option.READ_REPAIR_CHANCE);
 -
 -            if (chance != 0.0)
 -            {
 -                ClientWarn.instance.warn("read_repair_chance table option has 
been deprecated and will be removed in version 4.0");
 -                maybeLogReadRepairChanceDeprecationWarning();
 -            }
 -
 -            builder.readRepairChance(chance);
 -        }
--
          if (hasOption(Option.SPECULATIVE_RETRY))
 -            
builder.speculativeRetry(SpeculativeRetryParam.fromString(getString(Option.SPECULATIVE_RETRY)));
 +            
builder.speculativeRetry(SpeculativeRetryPolicy.fromString(getString(Option.SPECULATIVE_RETRY)));
  
          if (hasOption(Option.CRC_CHECK_CHANCE))
              builder.crcCheckChance(getDouble(Option.CRC_CHECK_CHANCE));

http://git-wip-us.apache.org/repos/asf/cassandra/blob/6ad99802/src/java/org/apache/cassandra/db/ConsistencyLevel.java
----------------------------------------------------------------------
diff --cc src/java/org/apache/cassandra/db/ConsistencyLevel.java
index 19f2e10,ab4243f..840c174
--- a/src/java/org/apache/cassandra/db/ConsistencyLevel.java
+++ b/src/java/org/apache/cassandra/db/ConsistencyLevel.java
@@@ -17,8 -17,9 +17,7 @@@
   */
  package org.apache.cassandra.db;
  
 -import java.net.InetAddress;
  import java.util.ArrayList;
--import java.util.Collections;
  import java.util.HashMap;
  import java.util.List;
  import java.util.Map;
@@@ -27,10 -28,9 +26,9 @@@ import com.google.common.collect.Iterab
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
 -import org.apache.cassandra.config.CFMetaData;
 +import org.apache.cassandra.locator.InetAddressAndPort;
 +import org.apache.cassandra.schema.TableMetadata;
  import org.apache.cassandra.config.DatabaseDescriptor;
- import org.apache.cassandra.service.reads.ReadRepairDecision;
 -import org.apache.cassandra.config.ReadRepairDecision;
  import org.apache.cassandra.exceptions.InvalidRequestException;
  import org.apache.cassandra.exceptions.UnavailableException;
  import org.apache.cassandra.locator.AbstractReplicationStrategy;
@@@ -175,20 -175,20 +173,15 @@@ public enum ConsistencyLeve
          return dcEndpoints;
      }
  
 -    public List<InetAddress> filterForQuery(Keyspace keyspace, 
List<InetAddress> liveEndpoints)
 -    {
 -        return filterForQuery(keyspace, liveEndpoints, 
ReadRepairDecision.NONE);
 -    }
 -
 -    public List<InetAddress> filterForQuery(Keyspace keyspace, 
List<InetAddress> liveEndpoints, ReadRepairDecision readRepair)
 +    public List<InetAddressAndPort> filterForQuery(Keyspace keyspace, 
List<InetAddressAndPort> liveEndpoints)
      {
-         return filterForQuery(keyspace, liveEndpoints, 
ReadRepairDecision.NONE);
-     }
- 
-     public List<InetAddressAndPort> filterForQuery(Keyspace keyspace, 
List<InetAddressAndPort> liveEndpoints, ReadRepairDecision readRepair)
-     {
          /*
           * If we are doing an each quorum query, we have to make sure that 
the endpoints we select
           * provide a quorum for each data center. If we are not using a 
NetworkTopologyStrategy,
           * we should fall through and grab a quorum in the replication 
strategy.
           */
          if (this == EACH_QUORUM && keyspace.getReplicationStrategy() 
instanceof NetworkTopologyStrategy)
--            return filterForEachQuorum(keyspace, liveEndpoints, readRepair);
++            return filterForEachQuorum(keyspace, liveEndpoints);
  
          /*
           * Endpoints are expected to be restricted to live replicas, sorted 
by snitch preference.
@@@ -197,43 -197,43 +190,16 @@@
           * the blockFor first ones).
           */
          if (isDCLocal)
--            Collections.sort(liveEndpoints, 
DatabaseDescriptor.getLocalComparator());
++            liveEndpoints.sort(DatabaseDescriptor.getLocalComparator());
  
--        switch (readRepair)
--        {
--            case NONE:
--                return liveEndpoints.subList(0, 
Math.min(liveEndpoints.size(), blockFor(keyspace)));
--            case GLOBAL:
--                return liveEndpoints;
--            case DC_LOCAL:
-                 List<InetAddressAndPort> local = new ArrayList<>();
-                 List<InetAddressAndPort> other = new ArrayList<>();
-                 for (InetAddressAndPort add : liveEndpoints)
 -                List<InetAddress> local = new ArrayList<InetAddress>();
 -                List<InetAddress> other = new ArrayList<InetAddress>();
 -                for (InetAddress add : liveEndpoints)
--                {
--                    if (isLocal(add))
--                        local.add(add);
--                    else
--                        other.add(add);
--                }
--                // check if blockfor more than we have localep's
--                int blockFor = blockFor(keyspace);
--                if (local.size() < blockFor)
--                    local.addAll(other.subList(0, Math.min(blockFor - 
local.size(), other.size())));
--                return local;
--            default:
--                throw new AssertionError();
--        }
++        return liveEndpoints.subList(0, Math.min(liveEndpoints.size(), 
blockFor(keyspace)));
      }
  
-     private List<InetAddressAndPort> filterForEachQuorum(Keyspace keyspace, 
List<InetAddressAndPort> liveEndpoints, ReadRepairDecision readRepair)
 -    private List<InetAddress> filterForEachQuorum(Keyspace keyspace, 
List<InetAddress> liveEndpoints, ReadRepairDecision readRepair)
++    private List<InetAddressAndPort> filterForEachQuorum(Keyspace keyspace, 
List<InetAddressAndPort> liveEndpoints)
      {
          NetworkTopologyStrategy strategy = (NetworkTopologyStrategy) 
keyspace.getReplicationStrategy();
  
--        // quickly drop out if read repair is GLOBAL, since we just use all 
of the live endpoints
--        if (readRepair == ReadRepairDecision.GLOBAL)
--            return liveEndpoints;
--
 -        Map<String, List<InetAddress>> dcsEndpoints = new HashMap<>();
 +        Map<String, List<InetAddressAndPort>> dcsEndpoints = new HashMap<>();
          for (String dc: strategy.getDatacenters())
              dcsEndpoints.put(dc, new ArrayList<>());
  
@@@ -243,14 -243,14 +209,11 @@@
              dcsEndpoints.get(dc).add(add);
          }
  
 -        List<InetAddress> waitSet = new ArrayList<>();
 -        for (Map.Entry<String, List<InetAddress>> dcEndpoints : 
dcsEndpoints.entrySet())
 +        List<InetAddressAndPort> waitSet = new ArrayList<>();
 +        for (Map.Entry<String, List<InetAddressAndPort>> dcEndpoints : 
dcsEndpoints.entrySet())
          {
 -            List<InetAddress> dcEndpoint = dcEndpoints.getValue();
 -            if (readRepair == ReadRepairDecision.DC_LOCAL && 
dcEndpoints.getKey().equals(DatabaseDescriptor.getLocalDataCenter()))
 -                waitSet.addAll(dcEndpoint);
 -            else
 -                waitSet.addAll(dcEndpoint.subList(0, 
Math.min(localQuorumFor(keyspace, dcEndpoints.getKey()), dcEndpoint.size())));
 +            List<InetAddressAndPort> dcEndpoint = dcEndpoints.getValue();
-             if (readRepair == ReadRepairDecision.DC_LOCAL && 
dcEndpoints.getKey().equals(DatabaseDescriptor.getLocalDataCenter()))
-                 waitSet.addAll(dcEndpoint);
-             else
-                 waitSet.addAll(dcEndpoint.subList(0, 
Math.min(localQuorumFor(keyspace, dcEndpoints.getKey()), dcEndpoint.size())));
++            waitSet.addAll(dcEndpoint.subList(0, 
Math.min(localQuorumFor(keyspace, dcEndpoints.getKey()), dcEndpoint.size())));
          }
  
          return waitSet;

http://git-wip-us.apache.org/repos/asf/cassandra/blob/6ad99802/src/java/org/apache/cassandra/db/SystemKeyspace.java
----------------------------------------------------------------------
diff --cc src/java/org/apache/cassandra/db/SystemKeyspace.java
index 91a1bff,ac51662..12ab952
--- a/src/java/org/apache/cassandra/db/SystemKeyspace.java
+++ b/src/java/org/apache/cassandra/db/SystemKeyspace.java
@@@ -307,73 -293,154 +307,72 @@@ public final class SystemKeyspac
                  + "prepared_id blob,"
                  + "logged_keyspace text,"
                  + "query_string text,"
 -                + "PRIMARY KEY ((prepared_id)))");
 -
 -    @Deprecated
 -    public static final CFMetaData LegacyHints =
 -        compile(LEGACY_HINTS,
 -                "*DEPRECATED* hints awaiting delivery",
 -                "CREATE TABLE %s ("
 -                + "target_id uuid,"
 -                + "hint_id timeuuid,"
 -                + "message_version int,"
 -                + "mutation blob,"
 -                + "PRIMARY KEY ((target_id), hint_id, message_version)) "
 -                + "WITH COMPACT STORAGE")
 -                .compaction(CompactionParams.scts(singletonMap("enabled", 
"false")))
 -                .gcGraceSeconds(0);
 -
 -    @Deprecated
 -    public static final CFMetaData LegacyBatchlog =
 -        compile(LEGACY_BATCHLOG,
 -                "*DEPRECATED* batchlog entries",
 -                "CREATE TABLE %s ("
 -                + "id uuid,"
 -                + "data blob,"
 -                + "version int,"
 -                + "written_at timestamp,"
 -                + "PRIMARY KEY ((id)))")
 -                
.compaction(CompactionParams.scts(singletonMap("min_threshold", "2")))
 -                .gcGraceSeconds(0);
 -
 -    @Deprecated
 -    public static final CFMetaData LegacyKeyspaces =
 -        compile(LEGACY_KEYSPACES,
 -                "*DEPRECATED* keyspace definitions",
 -                "CREATE TABLE %s ("
 -                + "keyspace_name text,"
 -                + "durable_writes boolean,"
 -                + "strategy_class text,"
 -                + "strategy_options text,"
 -                + "PRIMARY KEY ((keyspace_name))) "
 -                + "WITH COMPACT STORAGE");
 +                + "PRIMARY KEY ((prepared_id)))")
 +                .build();
 +
 +    private static final TableMetadata Repairs =
 +        parse(REPAIRS,
 +          "repairs",
 +          "CREATE TABLE %s ("
 +          + "parent_id timeuuid, "
 +          + "started_at timestamp, "
 +          + "last_update timestamp, "
 +          + "repaired_at timestamp, "
 +          + "state int, "
 +          + "coordinator inet, "
 +          + "coordinator_port int,"
 +          + "participants set<inet>,"
 +          + "participants_wp set<text>,"
 +          + "ranges set<blob>, "
 +          + "cfids set<uuid>, "
 +          + "PRIMARY KEY (parent_id))").build();
  
      @Deprecated
 -    public static final CFMetaData LegacyColumnfamilies =
 -        compile(LEGACY_COLUMNFAMILIES,
 -                "*DEPRECATED* table definitions",
 -                "CREATE TABLE %s ("
 -                + "keyspace_name text,"
 -                + "columnfamily_name text,"
 -                + "bloom_filter_fp_chance double,"
 -                + "caching text,"
 -                + "cf_id uuid," // post-2.1 UUID cfid
 -                + "comment text,"
 -                + "compaction_strategy_class text,"
 -                + "compaction_strategy_options text,"
 -                + "comparator text,"
 -                + "compression_parameters text,"
 -                + "default_time_to_live int,"
 -                + "default_validator text,"
 -                + "dropped_columns map<text, bigint>,"
 -                + "gc_grace_seconds int,"
 -                + "is_dense boolean,"
 -                + "key_validator text,"
 -                + "local_read_repair_chance double,"
 -                + "max_compaction_threshold int,"
 -                + "max_index_interval int,"
 -                + "memtable_flush_period_in_ms int,"
 -                + "min_compaction_threshold int,"
 -                + "min_index_interval int,"
 -                + "read_repair_chance double,"
 -                + "speculative_retry text,"
 -                + "subcomparator text,"
 -                + "type text,"
 -                + "PRIMARY KEY ((keyspace_name), columnfamily_name))");
 +    private static final TableMetadata LegacyPeers =
 +        parse(LEGACY_PEERS,
 +            "information about known peers in the cluster",
 +            "CREATE TABLE %s ("
 +            + "peer inet,"
 +            + "data_center text,"
 +            + "host_id uuid,"
 +            + "preferred_ip inet,"
 +            + "rack text,"
 +            + "release_version text,"
 +            + "rpc_address inet,"
 +            + "schema_version uuid,"
 +            + "tokens set<varchar>,"
 +            + "PRIMARY KEY ((peer)))")
 +            .build();
  
      @Deprecated
 -    public static final CFMetaData LegacyColumns =
 -        compile(LEGACY_COLUMNS,
 -                "*DEPRECATED* column definitions",
 -                "CREATE TABLE %s ("
 -                + "keyspace_name text,"
 -                + "columnfamily_name text,"
 -                + "column_name text,"
 -                + "component_index int,"
 -                + "index_name text,"
 -                + "index_options text,"
 -                + "index_type text,"
 -                + "type text,"
 -                + "validator text,"
 -                + "PRIMARY KEY ((keyspace_name), columnfamily_name, 
column_name))");
 +    private static final TableMetadata LegacyPeerEvents =
 +        parse(LEGACY_PEER_EVENTS,
 +            "events related to peers",
 +            "CREATE TABLE %s ("
 +            + "peer inet,"
 +            + "hints_dropped map<uuid, int>,"
 +            + "PRIMARY KEY ((peer)))")
 +            .build();
  
      @Deprecated
 -    public static final CFMetaData LegacyTriggers =
 -        compile(LEGACY_TRIGGERS,
 -                "*DEPRECATED* trigger definitions",
 -                "CREATE TABLE %s ("
 -                + "keyspace_name text,"
 -                + "columnfamily_name text,"
 -                + "trigger_name text,"
 -                + "trigger_options map<text, text>,"
 -                + "PRIMARY KEY ((keyspace_name), columnfamily_name, 
trigger_name))");
 -
 -    @Deprecated
 -    public static final CFMetaData LegacyUsertypes =
 -        compile(LEGACY_USERTYPES,
 -                "*DEPRECATED* user defined type definitions",
 -                "CREATE TABLE %s ("
 -                + "keyspace_name text,"
 -                + "type_name text,"
 -                + "field_names list<text>,"
 -                + "field_types list<text>,"
 -                + "PRIMARY KEY ((keyspace_name), type_name))");
 -
 -    @Deprecated
 -    public static final CFMetaData LegacyFunctions =
 -        compile(LEGACY_FUNCTIONS,
 -                "*DEPRECATED* user defined function definitions",
 -                "CREATE TABLE %s ("
 -                + "keyspace_name text,"
 -                + "function_name text,"
 -                + "signature frozen<list<text>>,"
 -                + "argument_names list<text>,"
 -                + "argument_types list<text>,"
 -                + "body text,"
 -                + "language text,"
 -                + "return_type text,"
 -                + "called_on_null_input boolean,"
 -                + "PRIMARY KEY ((keyspace_name), function_name, signature))");
 -
 -    @Deprecated
 -    public static final CFMetaData LegacyAggregates =
 -        compile(LEGACY_AGGREGATES,
 -                "*DEPRECATED* user defined aggregate definitions",
 -                "CREATE TABLE %s ("
 -                + "keyspace_name text,"
 -                + "aggregate_name text,"
 -                + "signature frozen<list<text>>,"
 -                + "argument_types list<text>,"
 -                + "final_func text,"
 -                + "initcond blob,"
 -                + "return_type text,"
 -                + "state_func text,"
 -                + "state_type text,"
 -                + "PRIMARY KEY ((keyspace_name), aggregate_name, 
signature))");
 -
 -    private static CFMetaData compile(String name, String description, String 
schema)
 -    {
 -        return CFMetaData.compile(String.format(schema, name), 
SchemaConstants.SYSTEM_KEYSPACE_NAME)
 -                         .comment(description);
 +    private static final TableMetadata LegacyTransferredRanges =
 +        parse(LEGACY_TRANSFERRED_RANGES,
 +            "record of transferred ranges for streaming operation",
 +            "CREATE TABLE %s ("
 +            + "operation text,"
 +            + "peer inet,"
 +            + "keyspace_name text,"
 +            + "ranges set<blob>,"
 +            + "PRIMARY KEY ((operation, keyspace_name), peer))")
 +            .build();
 +
 +    private static TableMetadata.Builder parse(String table, String 
description, String cql)
 +    {
 +        return CreateTableStatement.parse(format(cql, table), 
SchemaConstants.SYSTEM_KEYSPACE_NAME)
 +                                   
.id(TableId.forSystemTable(SchemaConstants.SYSTEM_KEYSPACE_NAME, table))
-                                    .dcLocalReadRepairChance(0.0)
 +                                   .gcGraceSeconds(0)
 +                                   .memtableFlushPeriod((int) 
TimeUnit.HOURS.toMillis(1))
 +                                   .comment(description);
      }
  
      public static KeyspaceMetadata metadata()

http://git-wip-us.apache.org/repos/asf/cassandra/blob/6ad99802/src/java/org/apache/cassandra/db/TableCQLHelper.java
----------------------------------------------------------------------
diff --cc src/java/org/apache/cassandra/db/TableCQLHelper.java
index 862cf30,0000000..0e9977d
mode 100644,000000..100644
--- a/src/java/org/apache/cassandra/db/TableCQLHelper.java
+++ b/src/java/org/apache/cassandra/db/TableCQLHelper.java
@@@ -1,428 -1,0 +1,426 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +
 +package org.apache.cassandra.db;
 +
 +import java.nio.ByteBuffer;
 +import java.util.*;
 +import java.util.concurrent.atomic.*;
 +import java.util.function.*;
 +
 +import com.google.common.annotations.VisibleForTesting;
 +import com.google.common.collect.Iterables;
 +
 +import org.apache.cassandra.cql3.statements.*;
 +import org.apache.cassandra.db.marshal.*;
 +import org.apache.cassandra.schema.*;
 +import org.apache.cassandra.utils.*;
 +
 +/**
 + * Helper methods to represent TableMetadata and related objects in CQL format
 + */
 +public class TableCQLHelper
 +{
 +    public static List<String> dumpReCreateStatements(TableMetadata metadata)
 +    {
 +        List<String> l = new ArrayList<>();
 +        // Types come first, as table can't be created without them
 +        l.addAll(TableCQLHelper.getUserTypesAsCQL(metadata));
 +        // Record re-create schema statements
 +        l.add(TableCQLHelper.getTableMetadataAsCQL(metadata, true));
 +        // Dropped columns (and re-additions)
 +        l.addAll(TableCQLHelper.getDroppedColumnsAsCQL(metadata));
 +        // Indexes applied as last, since otherwise they may interfere with 
column drops / re-additions
 +        l.addAll(TableCQLHelper.getIndexesAsCQL(metadata));
 +        return l;
 +    }
 +
 +    private static List<ColumnMetadata> getClusteringColumns(TableMetadata 
metadata)
 +    {
 +        List<ColumnMetadata> cds = new 
ArrayList<>(metadata.clusteringColumns().size());
 +
 +        if (!metadata.isStaticCompactTable())
 +            for (ColumnMetadata cd : metadata.clusteringColumns())
 +                cds.add(cd);
 +
 +        return cds;
 +    }
 +
 +    private static List<ColumnMetadata> getPartitionColumns(TableMetadata 
metadata)
 +    {
 +        List<ColumnMetadata> cds = new 
ArrayList<>(metadata.regularAndStaticColumns().size());
 +
 +        for (ColumnMetadata cd : metadata.staticColumns())
 +            cds.add(cd);
 +
 +        if (metadata.isDense())
 +        {
 +            // remove an empty type
 +            for (ColumnMetadata cd : metadata.regularColumns())
 +                if (!cd.type.equals(EmptyType.instance))
 +                    cds.add(cd);
 +        }
 +        // "regular" columns are not exposed for static compact tables
 +        else if (!metadata.isStaticCompactTable())
 +        {
 +            for (ColumnMetadata cd : metadata.regularColumns())
 +                cds.add(cd);
 +        }
 +
 +        return cds;
 +    }
 +
 +    /**
 +     * Build a CQL String representation of Table Metadata
 +     */
 +    @VisibleForTesting
 +    public static String getTableMetadataAsCQL(TableMetadata metadata, 
boolean includeDroppedColumns)
 +    {
 +        StringBuilder sb = new StringBuilder();
 +        if (!isCqlCompatible(metadata))
 +        {
 +            sb.append(String.format("/*\nWarning: Table %s omitted because it 
has constructs not compatible with CQL (was created via legacy API).\n",
 +                                    metadata.toString()));
 +            sb.append("\nApproximate structure, for reference:");
 +            sb.append("\n(this should not be used to reproduce this 
schema)\n\n");
 +        }
 +
 +        sb.append("CREATE TABLE IF NOT EXISTS ");
 +        sb.append(metadata.toString()).append(" (");
 +
 +        List<ColumnMetadata> partitionKeyColumns = 
metadata.partitionKeyColumns();
 +        List<ColumnMetadata> clusteringColumns = 
getClusteringColumns(metadata);
 +        List<ColumnMetadata> partitionColumns = getPartitionColumns(metadata);
 +
 +        Consumer<StringBuilder> cdCommaAppender = commaAppender("\n\t");
 +        sb.append("\n\t");
 +        for (ColumnMetadata cfd: partitionKeyColumns)
 +        {
 +            cdCommaAppender.accept(sb);
 +            sb.append(toCQL(cfd));
 +            if (partitionKeyColumns.size() == 1 && clusteringColumns.size() 
== 0)
 +                sb.append(" PRIMARY KEY");
 +        }
 +
 +        for (ColumnMetadata cfd: clusteringColumns)
 +        {
 +            cdCommaAppender.accept(sb);
 +            sb.append(toCQL(cfd));
 +        }
 +
 +        for (ColumnMetadata cfd: partitionColumns)
 +        {
 +            cdCommaAppender.accept(sb);
 +            sb.append(toCQL(cfd, metadata.isStaticCompactTable()));
 +        }
 +
 +        if (includeDroppedColumns)
 +        {
 +            for (Map.Entry<ByteBuffer, DroppedColumn> entry: 
metadata.droppedColumns.entrySet())
 +            {
 +                if (metadata.getColumn(entry.getKey()) != null)
 +                    continue;
 +
 +                DroppedColumn droppedColumn = entry.getValue();
 +                cdCommaAppender.accept(sb);
 +                sb.append(droppedColumn.column.name.toCQLString());
 +                sb.append(' ');
 +                sb.append(droppedColumn.column.type.asCQL3Type().toString());
 +            }
 +        }
 +
 +        if (clusteringColumns.size() > 0 || partitionKeyColumns.size() > 1)
 +        {
 +            sb.append(",\n\tPRIMARY KEY (");
 +            if (partitionKeyColumns.size() > 1)
 +            {
 +                sb.append("(");
 +                Consumer<StringBuilder> pkCommaAppender = commaAppender(" ");
 +                for (ColumnMetadata cfd : partitionKeyColumns)
 +                {
 +                    pkCommaAppender.accept(sb);
 +                    sb.append(cfd.name.toCQLString());
 +                }
 +                sb.append(")");
 +            }
 +            else
 +            {
 +                sb.append(partitionKeyColumns.get(0).name.toCQLString());
 +            }
 +
 +            for (ColumnMetadata cfd : metadata.clusteringColumns())
 +                sb.append(", ").append(cfd.name.toCQLString());
 +
 +            sb.append(')');
 +        }
 +        sb.append(")\n\t");
 +        sb.append("WITH ");
 +
 +        sb.append("ID = ").append(metadata.id).append("\n\tAND ");
 +
 +        if (metadata.isCompactTable())
 +            sb.append("COMPACT STORAGE\n\tAND ");
 +
 +        if (clusteringColumns.size() > 0)
 +        {
 +            sb.append("CLUSTERING ORDER BY (");
 +
 +            Consumer<StringBuilder> cOrderCommaAppender = commaAppender(" ");
 +            for (ColumnMetadata cd : clusteringColumns)
 +            {
 +                cOrderCommaAppender.accept(sb);
 +                sb.append(cd.name.toCQLString()).append(' 
').append(cd.clusteringOrder().toString());
 +            }
 +            sb.append(")\n\tAND ");
 +        }
 +
 +        sb.append(toCQL(metadata.params));
 +        sb.append(";");
 +
 +        if (!isCqlCompatible(metadata))
 +        {
 +            sb.append("\n*/");
 +        }
 +        return sb.toString();
 +    }
 +
 +    /**
 +     * Build a CQL String representation of User Types used in the given 
Table.
 +     *
 +     * Type order is ensured as types are built incrementally: from the 
innermost (most nested)
 +     * to the outermost.
 +     */
 +    @VisibleForTesting
 +    public static List<String> getUserTypesAsCQL(TableMetadata metadata)
 +    {
 +        List<AbstractType> types = new ArrayList<>();
 +        Set<AbstractType> typeSet = new HashSet<>();
 +        for (ColumnMetadata cd: 
Iterables.concat(metadata.partitionKeyColumns(), metadata.clusteringColumns(), 
metadata.regularAndStaticColumns()))
 +        {
 +            AbstractType type = cd.type;
 +            if (type.isUDT())
 +                resolveUserType((UserType) type, typeSet, types);
 +        }
 +
 +        List<String> typeStrings = new ArrayList<>(types.size());
 +        for (AbstractType type: types)
 +            typeStrings.add(toCQL((UserType) type));
 +        return typeStrings;
 +    }
 +
 +    /**
 +     * Build a CQL String representation of Dropped Columns in the given 
Table.
 +     *
 +     * If the column was dropped once, but is now re-created `ADD` will be 
appended accordingly.
 +     */
 +    @VisibleForTesting
 +    public static List<String> getDroppedColumnsAsCQL(TableMetadata metadata)
 +    {
 +        List<String> droppedColumns = new ArrayList<>();
 +
 +        for (Map.Entry<ByteBuffer, DroppedColumn> entry: 
metadata.droppedColumns.entrySet())
 +        {
 +            DroppedColumn column = entry.getValue();
 +            droppedColumns.add(toCQLDrop(metadata, column));
 +            if (metadata.getColumn(entry.getKey()) != null)
 +                droppedColumns.add(toCQLAdd(metadata, 
metadata.getColumn(entry.getKey())));
 +        }
 +
 +        return droppedColumns;
 +    }
 +
 +    /**
 +     * Build a CQL String representation of Indexes on columns in the given 
Table
 +     */
 +    @VisibleForTesting
 +    public static List<String> getIndexesAsCQL(TableMetadata metadata)
 +    {
 +        List<String> indexes = new ArrayList<>(metadata.indexes.size());
 +        for (IndexMetadata indexMetadata: metadata.indexes)
 +            indexes.add(toCQL(metadata, indexMetadata));
 +        return indexes;
 +    }
 +
 +    private static String toCQL(TableMetadata baseTable, IndexMetadata 
indexMetadata)
 +    {
 +        if (indexMetadata.isCustom())
 +        {
 +            Map<String, String> options = new HashMap<>();
 +            indexMetadata.options.forEach((k, v) -> {
 +                if (!k.equals(IndexTarget.TARGET_OPTION_NAME) && 
!k.equals(IndexTarget.CUSTOM_INDEX_OPTION_NAME))
 +                    options.put(k, v);
 +            });
 +
 +            return String.format("CREATE CUSTOM INDEX %s ON %s (%s) USING 
'%s'%s;",
 +                                 indexMetadata.toCQLString(),
 +                                 baseTable.toString(),
 +                                 
indexMetadata.options.get(IndexTarget.TARGET_OPTION_NAME),
 +                                 
indexMetadata.options.get(IndexTarget.CUSTOM_INDEX_OPTION_NAME),
 +                                 options.isEmpty() ? "" : " WITH OPTIONS " + 
toCQL(options));
 +        }
 +        else
 +        {
 +            return String.format("CREATE INDEX %s ON %s (%s);",
 +                                 indexMetadata.toCQLString(),
 +                                 baseTable.toString(),
 +                                 
indexMetadata.options.get(IndexTarget.TARGET_OPTION_NAME));
 +        }
 +    }
 +    private static String toCQL(UserType userType)
 +    {
 +        StringBuilder sb = new StringBuilder();
 +        sb.append("CREATE TYPE ").append(userType.toCQLString()).append(" (");
 +
 +        Consumer<StringBuilder> commaAppender = commaAppender(" ");
 +        for (int i = 0; i < userType.size(); i++)
 +        {
 +            commaAppender.accept(sb);
 +            sb.append(String.format("%s %s",
 +                                    userType.fieldNameAsString(i),
 +                                    userType.fieldType(i).asCQL3Type()));
 +        }
 +        sb.append(");");
 +        return sb.toString();
 +    }
 +
 +    private static String toCQL(TableParams tableParams)
 +    {
 +        StringBuilder builder = new StringBuilder();
 +
 +        builder.append("bloom_filter_fp_chance = 
").append(tableParams.bloomFilterFpChance);
-         builder.append("\n\tAND dclocal_read_repair_chance = 
").append(tableParams.dcLocalReadRepairChance);
 +        builder.append("\n\tAND crc_check_chance = 
").append(tableParams.crcCheckChance);
 +        builder.append("\n\tAND default_time_to_live = 
").append(tableParams.defaultTimeToLive);
 +        builder.append("\n\tAND gc_grace_seconds = 
").append(tableParams.gcGraceSeconds);
 +        builder.append("\n\tAND min_index_interval = 
").append(tableParams.minIndexInterval);
 +        builder.append("\n\tAND max_index_interval = 
").append(tableParams.maxIndexInterval);
 +        builder.append("\n\tAND memtable_flush_period_in_ms = 
").append(tableParams.memtableFlushPeriodInMs);
-         builder.append("\n\tAND read_repair_chance = 
").append(tableParams.readRepairChance);
 +        builder.append("\n\tAND speculative_retry = 
'").append(tableParams.speculativeRetry).append("'");
 +        builder.append("\n\tAND comment = 
").append(singleQuote(tableParams.comment));
 +        builder.append("\n\tAND caching = 
").append(toCQL(tableParams.caching.asMap()));
 +        builder.append("\n\tAND compaction = 
").append(toCQL(tableParams.compaction.asMap()));
 +        builder.append("\n\tAND compression = 
").append(toCQL(tableParams.compression.asMap()));
 +        builder.append("\n\tAND cdc = ").append(tableParams.cdc);
 +
 +        builder.append("\n\tAND extensions = { ");
 +        for (Map.Entry<String, ByteBuffer> entry : 
tableParams.extensions.entrySet())
 +        {
 +            builder.append(singleQuote(entry.getKey()));
 +            builder.append(": ");
 +            
builder.append("0x").append(ByteBufferUtil.bytesToHex(entry.getValue()));
 +        }
 +        builder.append(" }");
 +        return builder.toString();
 +    }
 +
 +    private static String toCQL(Map<?, ?> map)
 +    {
 +        StringBuilder builder = new StringBuilder("{ ");
 +
 +        boolean isFirst = true;
 +        for (Map.Entry entry: map.entrySet())
 +        {
 +            if (isFirst)
 +                isFirst = false;
 +            else
 +                builder.append(", ");
 +            builder.append(singleQuote(entry.getKey().toString()));
 +            builder.append(": ");
 +            builder.append(singleQuote(entry.getValue().toString()));
 +        }
 +
 +        builder.append(" }");
 +        return builder.toString();
 +    }
 +
 +    private static String toCQL(ColumnMetadata cd)
 +    {
 +        return toCQL(cd, false);
 +    }
 +
 +    private static String toCQL(ColumnMetadata cd, boolean 
isStaticCompactTable)
 +    {
 +        return String.format("%s %s%s",
 +                             cd.name.toCQLString(),
 +                             cd.type.asCQL3Type().toString(),
 +                             cd.isStatic() && !isStaticCompactTable ? " 
static" : "");
 +    }
 +
 +    private static String toCQLAdd(TableMetadata table, ColumnMetadata cd)
 +    {
 +        return String.format("ALTER TABLE %s ADD %s %s%s;",
 +                             table.toString(),
 +                             cd.name.toCQLString(),
 +                             cd.type.asCQL3Type().toString(),
 +                             cd.isStatic() ? " static" : "");
 +    }
 +
 +    private static String toCQLDrop(TableMetadata table, DroppedColumn 
droppedColumn)
 +    {
 +        return String.format("ALTER TABLE %s DROP %s USING TIMESTAMP %s;",
 +                             table.toString(),
 +                             droppedColumn.column.name.toCQLString(),
 +                             droppedColumn.droppedTime);
 +    }
 +
 +    private static void resolveUserType(UserType type, Set<AbstractType> 
typeSet, List<AbstractType> types)
 +    {
 +        for (AbstractType subType: type.fieldTypes())
 +            if (!typeSet.contains(subType) && subType.isUDT())
 +                resolveUserType((UserType) subType, typeSet, types);
 +
 +        if (!typeSet.contains(type))
 +        {
 +            typeSet.add(type);
 +            types.add(type);
 +        }
 +    }
 +
 +    private static String singleQuote(String s)
 +    {
 +        return String.format("'%s'", s.replaceAll("'", "''"));
 +    }
 +
 +    private static Consumer<StringBuilder> commaAppender(String afterComma)
 +    {
 +        AtomicBoolean isFirst = new AtomicBoolean(true);
 +        return new Consumer<StringBuilder>()
 +        {
 +            public void accept(StringBuilder stringBuilder)
 +            {
 +                if (!isFirst.getAndSet(false))
 +                    stringBuilder.append(',').append(afterComma);
 +            }
 +        };
 +    }
 +
 +    /**
 +     * Whether or not the given metadata is compatible / representable with 
CQL Language
 +     */
 +    public static boolean isCqlCompatible(TableMetadata metaData)
 +    {
 +        if (metaData.isSuper())
 +            return false;
 +
 +        if (metaData.isCompactTable()
 +            && metaData.regularColumns().size() > 1
 +            && metaData.clusteringColumns().size() >= 1)
 +            return false;
 +
 +        return true;
 +    }
 +}

http://git-wip-us.apache.org/repos/asf/cassandra/blob/6ad99802/src/java/org/apache/cassandra/metrics/ReadRepairMetrics.java
----------------------------------------------------------------------
diff --cc src/java/org/apache/cassandra/metrics/ReadRepairMetrics.java
index 9ee1c60,9ee1c60..c79fe89
--- a/src/java/org/apache/cassandra/metrics/ReadRepairMetrics.java
+++ b/src/java/org/apache/cassandra/metrics/ReadRepairMetrics.java
@@@ -29,6 -29,6 +29,9 @@@ public class ReadRepairMetric
      private static final MetricNameFactory factory = new 
DefaultNameFactory("ReadRepair");
  
      public static final Meter repairedBlocking = 
Metrics.meter(factory.createMetricName("RepairedBlocking"));
++
++    @Deprecated
      public static final Meter repairedBackground = 
Metrics.meter(factory.createMetricName("RepairedBackground"));
++    @Deprecated
      public static final Meter attempted = 
Metrics.meter(factory.createMetricName("Attempted"));
  }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/6ad99802/src/java/org/apache/cassandra/repair/SystemDistributedKeyspace.java
----------------------------------------------------------------------
diff --cc src/java/org/apache/cassandra/repair/SystemDistributedKeyspace.java
index b46ae5e,638cf38..19d83db
--- a/src/java/org/apache/cassandra/repair/SystemDistributedKeyspace.java
+++ b/src/java/org/apache/cassandra/repair/SystemDistributedKeyspace.java
@@@ -119,13 -114,11 +119,12 @@@ public final class SystemDistributedKey
                       + "status text,"
                       + "PRIMARY KEY ((keyspace_name, view_name), host_id))");
  
 -    private static CFMetaData compile(String name, String description, String 
schema)
 +    private static TableMetadata parse(String table, String description, 
String cql)
      {
 -        return CFMetaData.compile(String.format(schema, name), 
SchemaConstants.DISTRIBUTED_KEYSPACE_NAME)
 -                         .comment(description)
 -                         .gcGraceSeconds((int) TimeUnit.DAYS.toSeconds(10));
 +        return CreateTableStatement.parse(format(cql, table), 
SchemaConstants.DISTRIBUTED_KEYSPACE_NAME)
 +                                   
.id(TableId.forSystemTable(SchemaConstants.DISTRIBUTED_KEYSPACE_NAME, table))
-                                    .dcLocalReadRepairChance(0.0)
 +                                   .comment(description)
 +                                   .build();
      }
  
      public static KeyspaceMetadata metadata()


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to