Merge branch 'cassandra-3.0' into cassandra-3.11

# Conflicts:
#       CHANGES.txt
#       src/java/org/apache/cassandra/config/DatabaseDescriptor.java


Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/12841938
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/12841938
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/12841938

Branch: refs/heads/trunk
Commit: 12841938a0cd420d626749e91f5f696f26354b03
Parents: eb027a1 8fc9275
Author: Andrés de la Peña <a.penya.gar...@gmail.com>
Authored: Tue Sep 12 14:52:18 2017 +0100
Committer: Andrés de la Peña <a.penya.gar...@gmail.com>
Committed: Tue Sep 12 14:52:18 2017 +0100

----------------------------------------------------------------------
 CHANGES.txt                                          |  1 +
 conf/cassandra.yaml                                  |  5 +++--
 .../apache/cassandra/config/DatabaseDescriptor.java  | 15 +++++++++++++++
 src/java/org/apache/cassandra/utils/FBUtilities.java |  7 +++++--
 4 files changed, 24 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cassandra/blob/12841938/CHANGES.txt
----------------------------------------------------------------------
diff --cc CHANGES.txt
index 099a869,b00e47c..752d9aa
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@@ -1,17 -1,6 +1,18 @@@
 -3.0.15
 +3.11.1
 + * Add a compaction option to TWCS to ignore sstables overlapping checks 
(CASSANDRA-13418)
 + * BTree.Builder memory leak (CASSANDRA-13754)
 + * Revert CASSANDRA-10368 of supporting non-pk column filtering due to 
correctness (CASSANDRA-13798)
 + * Fix cassandra-stress hang issues when an error during cluster connection 
happens (CASSANDRA-12938)
 + * Better bootstrap failure message when blocked by (potential) range 
movement (CASSANDRA-13744)
 + * "ignore" option is ignored in sstableloader (CASSANDRA-13721)
 + * Deadlock in AbstractCommitLogSegmentManager (CASSANDRA-13652)
 + * Duplicate the buffer before passing it to analyser in SASI operation 
(CASSANDRA-13512)
 + * Properly evict pstmts from prepared statements cache (CASSANDRA-13641)
 +Merged from 3.0:
+  * Improve config validation and documentation on overflow and NPE 
(CASSANDRA-13622)
 + * Fix pending view mutations handling and cleanup batchlog when there are 
local and remote paired mutations (CASSANDRA-13069)
   * Range deletes in a CAS batch are ignored (CASSANDRA-13655)
 + * Avoid assertion error when IndexSummary > 2G (CASSANDRA-12014)
   * Change repair midpoint logging for tiny ranges (CASSANDRA-13603)
   * Better handle corrupt final commitlog segment (CASSANDRA-11995)
   * StreamingHistogram is not thread safe (CASSANDRA-13756)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/12841938/conf/cassandra.yaml
----------------------------------------------------------------------
diff --cc conf/cassandra.yaml
index 4bb5840,d77d27a..e847e54
--- a/conf/cassandra.yaml
+++ b/conf/cassandra.yaml
@@@ -1105,96 -959,11 +1106,96 @@@ enable_scripted_user_defined_functions
  # setting.
  windows_timer_interval: 1
  
 +
 +# Enables encrypting data at-rest (on disk). Different key providers can be 
plugged in, but the default reads from
 +# a JCE-style keystore. A single keystore can hold multiple keys, but the one 
referenced by
 +# the "key_alias" is the only key that will be used for encrypt opertaions; 
previously used keys
 +# can still (and should!) be in the keystore and will be used on decrypt 
operations
 +# (to handle the case of key rotation).
 +#
 +# It is strongly recommended to download and install Java Cryptography 
Extension (JCE)
 +# Unlimited Strength Jurisdiction Policy Files for your version of the JDK.
 +# (current link: 
http://www.oracle.com/technetwork/java/javase/downloads/jce8-download-2133166.html)
 +#
 +# Currently, only the following file types are supported for transparent data 
encryption, although
 +# more are coming in future cassandra releases: commitlog, hints
 +transparent_data_encryption_options:
 +    enabled: false
 +    chunk_length_kb: 64
 +    cipher: AES/CBC/PKCS5Padding
 +    key_alias: testing:1
 +    # CBC IV length for AES needs to be 16 bytes (which is also the default 
size)
 +    # iv_length: 16
 +    key_provider: 
 +      - class_name: org.apache.cassandra.security.JKSKeyProvider
 +        parameters: 
 +          - keystore: conf/.keystore
 +            keystore_password: cassandra
 +            store_type: JCEKS
 +            key_password: cassandra
 +
 +
 +#####################
 +# SAFETY THRESHOLDS #
 +#####################
 +
 +# When executing a scan, within or across a partition, we need to keep the
 +# tombstones seen in memory so we can return them to the coordinator, which
 +# will use them to make sure other replicas also know about the deleted rows.
 +# With workloads that generate a lot of tombstones, this can cause performance
 +# problems and even exaust the server heap.
 +# 
(http://www.datastax.com/dev/blog/cassandra-anti-patterns-queues-and-queue-like-datasets)
 +# Adjust the thresholds here if you understand the dangers and want to
 +# scan more tombstones anyway.  These thresholds may also be adjusted at 
runtime
 +# using the StorageService mbean.
 +tombstone_warn_threshold: 1000
 +tombstone_failure_threshold: 100000
 +
 +# Log WARN on any multiple-partition batch size exceeding this value. 5kb per 
batch by default.
 +# Caution should be taken on increasing the size of this threshold as it can 
lead to node instability.
 +batch_size_warn_threshold_in_kb: 5
 +
 +# Fail any multiple-partition batch exceeding this value. 50kb (10x warn 
threshold) by default.
 +batch_size_fail_threshold_in_kb: 50
 +
 +# Log WARN on any batches not of type LOGGED than span across more partitions 
than this limit
 +unlogged_batch_across_partitions_warn_threshold: 10
 +
 +# Log a warning when compacting partitions larger than this value
 +compaction_large_partition_warning_threshold_mb: 100
 +
 +# GC Pauses greater than gc_warn_threshold_in_ms will be logged at WARN level
 +# Adjust the threshold based on your application throughput requirement
 +# By default, Cassandra logs GC Pauses greater than 200 ms at INFO level
 +gc_warn_threshold_in_ms: 1000
 +
  # Maximum size of any value in SSTables. Safety measure to detect SSTable 
corruption
  # early. Any value size larger than this threshold will result into marking 
an SSTable
- # as corrupted.
+ # as corrupted. This should be positive and less than 2048.
  # max_value_size_in_mb: 256
  
 +# Back-pressure settings #
 +# If enabled, the coordinator will apply the back-pressure strategy specified 
below to each mutation
 +# sent to replicas, with the aim of reducing pressure on overloaded replicas.
 +back_pressure_enabled: false
 +# The back-pressure strategy applied.
 +# The default implementation, RateBasedBackPressure, takes three arguments:
 +# high ratio, factor, and flow type, and uses the ratio between incoming 
mutation responses and outgoing mutation requests.
 +# If below high ratio, outgoing mutations are rate limited according to the 
incoming rate decreased by the given factor;
 +# if above high ratio, the rate limiting is increased by the given factor;
 +# such factor is usually best configured between 1 and 10, use larger values 
for a faster recovery
 +# at the expense of potentially more dropped mutations;
 +# the rate limiting is applied according to the flow type: if FAST, it's rate 
limited at the speed of the fastest replica,
 +# if SLOW at the speed of the slowest one.
 +# New strategies can be added. Implementors need to implement 
org.apache.cassandra.net.BackpressureStrategy and
 +# provide a public constructor accepting a Map<String, Object>.
 +back_pressure_strategy:
 +    - class_name: org.apache.cassandra.net.RateBasedBackPressure
 +      parameters:
 +        - high_ratio: 0.90
 +          factor: 5
 +          flow: FAST
 +
  # Coalescing Strategies #
  # Coalescing multiples messages turns out to significantly boost message 
processing throughput (think doubling or more).
  # On bare metal, the floor for packet processing throughput is high enough 
that many applications won't notice, but in

http://git-wip-us.apache.org/repos/asf/cassandra/blob/12841938/src/java/org/apache/cassandra/config/DatabaseDescriptor.java
----------------------------------------------------------------------
diff --cc src/java/org/apache/cassandra/config/DatabaseDescriptor.java
index ad43565,029db89..97c868e
--- a/src/java/org/apache/cassandra/config/DatabaseDescriptor.java
+++ b/src/java/org/apache/cassandra/config/DatabaseDescriptor.java
@@@ -429,54 -442,100 +429,57 @@@ public class DatabaseDescripto
  
          if (conf.native_transport_max_frame_size_in_mb <= 0)
              throw new 
ConfigurationException("native_transport_max_frame_size_in_mb must be positive, 
but was " + conf.native_transport_max_frame_size_in_mb, false);
+         else if (conf.native_transport_max_frame_size_in_mb >= 2048)
+             throw new 
ConfigurationException("native_transport_max_frame_size_in_mb must be smaller 
than 2048, but was "
+                     + conf.native_transport_max_frame_size_in_mb, false);
  
 -        // fail early instead of OOMing (see CASSANDRA-8116)
 -        if (ThriftServer.HSHA.equals(conf.rpc_server_type) && 
conf.rpc_max_threads == Integer.MAX_VALUE)
 -            throw new ConfigurationException("The hsha rpc_server_type is not 
compatible with an rpc_max_threads " +
 -                                             "setting of 'unlimited'.  Please 
see the comments in cassandra.yaml " +
 -                                             "for rpc_server_type and 
rpc_max_threads.",
 -                                             false);
 -        if (ThriftServer.HSHA.equals(conf.rpc_server_type) && 
conf.rpc_max_threads > (FBUtilities.getAvailableProcessors() * 2 + 1024))
 -            logger.warn("rpc_max_threads setting of {} may be too high for 
the hsha server and cause unnecessary thread contention, reducing performance", 
conf.rpc_max_threads);
 +        // if data dirs, commitlog dir, or saved caches dir are set in 
cassandra.yaml, use that.  Otherwise,
 +        // use -Dcassandra.storagedir (set in cassandra-env.sh) as the parent 
dir for data/, commitlog/, and saved_caches/
 +        if (conf.commitlog_directory == null)
 +        {
 +            conf.commitlog_directory = storagedirFor("commitlog");
 +        }
  
 -        /* end point snitch */
 -        if (conf.endpoint_snitch == null)
 +        if (conf.hints_directory == null)
          {
 -            throw new ConfigurationException("Missing endpoint_snitch 
directive", false);
 +            conf.hints_directory = storagedirFor("hints");
          }
 -        snitch = createEndpointSnitch(conf.endpoint_snitch);
 -        EndpointSnitchInfo.create();
  
 -        localDC = snitch.getDatacenter(FBUtilities.getBroadcastAddress());
 -        localComparator = new Comparator<InetAddress>()
 +        if (conf.cdc_raw_directory == null)
          {
 -            public int compare(InetAddress endpoint1, InetAddress endpoint2)
 -            {
 -                boolean local1 = 
localDC.equals(snitch.getDatacenter(endpoint1));
 -                boolean local2 = 
localDC.equals(snitch.getDatacenter(endpoint2));
 -                if (local1 && !local2)
 -                    return -1;
 -                if (local2 && !local1)
 -                    return 1;
 -                return 0;
 -            }
 -        };
 +            conf.cdc_raw_directory = storagedirFor("cdc_raw");
 +        }
  
 -        /* Request Scheduler setup */
 -        requestSchedulerOptions = conf.request_scheduler_options;
 -        if (conf.request_scheduler != null)
 +        if (conf.commitlog_total_space_in_mb == null)
          {
 +            int preferredSize = 8192;
 +            int minSize = 0;
              try
              {
 -                if (requestSchedulerOptions == null)
 -                {
 -                    requestSchedulerOptions = new RequestSchedulerOptions();
 -                }
 -                Class<?> cls = Class.forName(conf.request_scheduler);
 -                requestScheduler = (IRequestScheduler) 
cls.getConstructor(RequestSchedulerOptions.class).newInstance(requestSchedulerOptions);
 +                // use 1/4 of available space.  See discussion on #10013 and 
#10199
 +                minSize = 
Ints.saturatedCast((guessFileStore(conf.commitlog_directory).getTotalSpace() / 
1048576) / 4);
              }
 -            catch (ClassNotFoundException e)
 +            catch (IOException e)
              {
 -                throw new ConfigurationException("Invalid Request Scheduler 
class " + conf.request_scheduler, false);
 +                logger.debug("Error checking disk space", e);
 +                throw new ConfigurationException(String.format("Unable to 
check disk space available to %s. Perhaps the Cassandra user does not have the 
necessary permissions",
 +                                                               
conf.commitlog_directory), e);
              }
 -            catch (Exception e)
 +            if (minSize < preferredSize)
              {
 -                throw new ConfigurationException("Unable to instantiate 
request scheduler", e);
 +                logger.warn("Small commitlog volume detected at {}; setting 
commitlog_total_space_in_mb to {}.  You can override this in cassandra.yaml",
 +                            conf.commitlog_directory, minSize);
 +                conf.commitlog_total_space_in_mb = minSize;
 +            }
 +            else
 +            {
 +                conf.commitlog_total_space_in_mb = preferredSize;
              }
 -        }
 -        else
 -        {
 -            requestScheduler = new NoScheduler();
          }
  
 -        if (conf.request_scheduler_id == RequestSchedulerId.keyspace)
 -        {
 -            requestSchedulerId = conf.request_scheduler_id;
 -        }
 -        else
 -        {
 -            // Default to Keyspace
 -            requestSchedulerId = RequestSchedulerId.keyspace;
 -        }
 -
 -        // if data dirs, commitlog dir, or saved caches dir are set in 
cassandra.yaml, use that.  Otherwise,
 -        // use -Dcassandra.storagedir (set in cassandra-env.sh) as the parent 
dir for data/, commitlog/, and saved_caches/
 -        if (conf.commitlog_directory == null)
 -        {
 -            conf.commitlog_directory = 
System.getProperty("cassandra.storagedir", null);
 -            if (conf.commitlog_directory == null)
 -                throw new ConfigurationException("commitlog_directory is 
missing and -Dcassandra.storagedir is not set", false);
 -            conf.commitlog_directory += File.separator + "commitlog";
 -        }
 -
 -        if (conf.hints_directory == null)
 +        if (conf.cdc_total_space_in_mb == 0)
          {
 -            conf.hints_directory = System.getProperty("cassandra.storagedir", 
null);
 -            if (conf.hints_directory == null)
 -                throw new ConfigurationException("hints_directory is missing 
and -Dcassandra.storagedir is not set", false);
 -            conf.hints_directory += File.separator + "hints";
 -        }
 -
 -        if (conf.commitlog_total_space_in_mb == null)
 -        {
 -            int preferredSize = 8192;
 +            int preferredSize = 4096;
              int minSize = 0;
              try
              {
@@@ -641,332 -677,30 +646,342 @@@
          }
          catch (NumberFormatException e)
          {
 -            throw new ConfigurationException("counter_cache_size_in_mb option 
was set incorrectly to '"
 -                    + conf.counter_cache_size_in_mb + "', supported values 
are <integer> >= 0.", false);
 +            throw new ConfigurationException("counter_cache_size_in_mb option 
was set incorrectly to '"
 +                                             + conf.counter_cache_size_in_mb 
+ "', supported values are <integer> >= 0.", false);
 +        }
 +
 +        // if set to empty/"auto" then use 5% of Heap size
 +        indexSummaryCapacityInMB = (conf.index_summary_capacity_in_mb == null)
 +                                   ? Math.max(1, (int) 
(Runtime.getRuntime().totalMemory() * 0.05 / 1024 / 1024))
 +                                   : conf.index_summary_capacity_in_mb;
 +
 +        if (indexSummaryCapacityInMB < 0)
 +            throw new ConfigurationException("index_summary_capacity_in_mb 
option was set incorrectly to '"
 +                                             + 
conf.index_summary_capacity_in_mb + "', it should be a non-negative integer.", 
false);
 +
 +        if (conf.index_interval != null)
 +            logger.warn("index_interval has been deprecated and should be 
removed from cassandra.yaml");
 +
 +        if(conf.encryption_options != null)
 +        {
 +            logger.warn("Please rename encryption_options as 
server_encryption_options in the yaml");
 +            //operate under the assumption that server_encryption_options is 
not set in yaml rather than both
 +            conf.server_encryption_options = conf.encryption_options;
 +        }
 +
 +        if (conf.user_defined_function_fail_timeout < 0)
 +            throw new 
ConfigurationException("user_defined_function_fail_timeout must not be 
negative", false);
 +        if (conf.user_defined_function_warn_timeout < 0)
 +            throw new 
ConfigurationException("user_defined_function_warn_timeout must not be 
negative", false);
 +
 +        if (conf.user_defined_function_fail_timeout < 
conf.user_defined_function_warn_timeout)
 +            throw new 
ConfigurationException("user_defined_function_warn_timeout must less than 
user_defined_function_fail_timeout", false);
 +
++        if (conf.commitlog_segment_size_in_mb <= 0)
++            throw new ConfigurationException("commitlog_segment_size_in_mb 
must be positive, but was "
++                    + conf.commitlog_segment_size_in_mb, false);
++        else if (conf.commitlog_segment_size_in_mb >= 2048)
++            throw new ConfigurationException("commitlog_segment_size_in_mb 
must be smaller than 2048, but was "
++                    + conf.commitlog_segment_size_in_mb, false);
++
 +        if (conf.max_mutation_size_in_kb == null)
 +            conf.max_mutation_size_in_kb = conf.commitlog_segment_size_in_mb 
* 1024 / 2;
 +        else if (conf.commitlog_segment_size_in_mb * 1024 < 2 * 
conf.max_mutation_size_in_kb)
 +            throw new ConfigurationException("commitlog_segment_size_in_mb 
must be at least twice the size of max_mutation_size_in_kb / 1024", false);
 +
 +        // native transport encryption options
 +        if (conf.native_transport_port_ssl != null
 +            && conf.native_transport_port_ssl != conf.native_transport_port
 +            && !conf.client_encryption_options.enabled)
 +        {
 +            throw new ConfigurationException("Encryption must be enabled in 
client_encryption_options for native_transport_port_ssl", false);
 +        }
 +
 +        if (conf.max_value_size_in_mb <= 0)
 +            throw new ConfigurationException("max_value_size_in_mb must be 
positive", false);
++        else if (conf.max_value_size_in_mb >= 2048)
++            throw new ConfigurationException("max_value_size_in_mb must be 
smaller than 2048, but was "
++                    + conf.max_value_size_in_mb, false);
 +
 +        switch (conf.disk_optimization_strategy)
 +        {
 +            case ssd:
 +                diskOptimizationStrategy = new 
SsdDiskOptimizationStrategy(conf.disk_optimization_page_cross_chance);
 +                break;
 +            case spinning:
 +                diskOptimizationStrategy = new 
SpinningDiskOptimizationStrategy();
 +                break;
 +        }
 +
 +        try
 +        {
 +            ParameterizedClass strategy = conf.back_pressure_strategy != null 
? conf.back_pressure_strategy : RateBasedBackPressure.withDefaultParams();
 +            Class<?> clazz = Class.forName(strategy.class_name);
 +            if (!BackPressureStrategy.class.isAssignableFrom(clazz))
 +                throw new ConfigurationException(strategy + " is not an 
instance of " + BackPressureStrategy.class.getCanonicalName(), false);
 +
 +            Constructor<?> ctor = clazz.getConstructor(Map.class);
 +            BackPressureStrategy instance = (BackPressureStrategy) 
ctor.newInstance(strategy.parameters);
 +            logger.info("Back-pressure is {} with strategy {}.", 
backPressureEnabled() ? "enabled" : "disabled", conf.back_pressure_strategy);
 +            backPressureStrategy = instance;
 +        }
 +        catch (ConfigurationException ex)
 +        {
 +            throw ex;
 +        }
 +        catch (Exception ex)
 +        {
 +            throw new ConfigurationException("Error configuring back-pressure 
strategy: " + conf.back_pressure_strategy, ex);
 +        }
 +
 +        if (conf.otc_coalescing_enough_coalesced_messages > 128)
 +            throw new 
ConfigurationException("otc_coalescing_enough_coalesced_messages must be 
smaller than 128", false);
 +
 +        if (conf.otc_coalescing_enough_coalesced_messages <= 0)
 +            throw new 
ConfigurationException("otc_coalescing_enough_coalesced_messages must be 
positive", false);
 +    }
 +
 +    private static String storagedirFor(String type)
 +    {
 +        return storagedir(type + "_directory") + File.separator + type;
 +    }
 +
 +    private static String storagedir(String errMsgType)
 +    {
 +        String storagedir = System.getProperty(Config.PROPERTY_PREFIX + 
"storagedir", null);
 +        if (storagedir == null)
 +            throw new ConfigurationException(errMsgType + " is missing and 
-Dcassandra.storagedir is not set", false);
 +        return storagedir;
 +    }
 +
 +    public static void applyAddressConfig() throws ConfigurationException
 +    {
 +        applyAddressConfig(conf);
 +    }
 +
 +    public static void applyAddressConfig(Config config) throws 
ConfigurationException
 +    {
 +        listenAddress = null;
 +        rpcAddress = null;
 +        broadcastAddress = null;
 +        broadcastRpcAddress = null;
 +
 +        /* Local IP, hostname or interface to bind services to */
 +        if (config.listen_address != null && config.listen_interface != null)
 +        {
 +            throw new ConfigurationException("Set listen_address OR 
listen_interface, not both", false);
 +        }
 +        else if (config.listen_address != null)
 +        {
 +            try
 +            {
 +                listenAddress = InetAddress.getByName(config.listen_address);
 +            }
 +            catch (UnknownHostException e)
 +            {
 +                throw new ConfigurationException("Unknown listen_address '" + 
config.listen_address + "'", false);
 +            }
 +
 +            if (listenAddress.isAnyLocalAddress())
 +                throw new ConfigurationException("listen_address cannot be a 
wildcard address (" + config.listen_address + ")!", false);
 +        }
 +        else if (config.listen_interface != null)
 +        {
 +            listenAddress = 
getNetworkInterfaceAddress(config.listen_interface, "listen_interface", 
config.listen_interface_prefer_ipv6);
 +        }
 +
 +        /* Gossip Address to broadcast */
 +        if (config.broadcast_address != null)
 +        {
 +            try
 +            {
 +                broadcastAddress = 
InetAddress.getByName(config.broadcast_address);
 +            }
 +            catch (UnknownHostException e)
 +            {
 +                throw new ConfigurationException("Unknown broadcast_address 
'" + config.broadcast_address + "'", false);
 +            }
 +
 +            if (broadcastAddress.isAnyLocalAddress())
 +                throw new ConfigurationException("broadcast_address cannot be 
a wildcard address (" + config.broadcast_address + ")!", false);
 +        }
 +
 +        /* Local IP, hostname or interface to bind RPC server to */
 +        if (config.rpc_address != null && config.rpc_interface != null)
 +        {
 +            throw new ConfigurationException("Set rpc_address OR 
rpc_interface, not both", false);
 +        }
 +        else if (config.rpc_address != null)
 +        {
 +            try
 +            {
 +                rpcAddress = InetAddress.getByName(config.rpc_address);
 +            }
 +            catch (UnknownHostException e)
 +            {
 +                throw new ConfigurationException("Unknown host in rpc_address 
" + config.rpc_address, false);
 +            }
 +        }
 +        else if (config.rpc_interface != null)
 +        {
 +            rpcAddress = getNetworkInterfaceAddress(config.rpc_interface, 
"rpc_interface", config.rpc_interface_prefer_ipv6);
 +        }
 +        else
 +        {
 +            rpcAddress = FBUtilities.getLocalAddress();
 +        }
 +
 +        /* RPC address to broadcast */
 +        if (config.broadcast_rpc_address != null)
 +        {
 +            try
 +            {
 +                broadcastRpcAddress = 
InetAddress.getByName(config.broadcast_rpc_address);
 +            }
 +            catch (UnknownHostException e)
 +            {
 +                throw new ConfigurationException("Unknown 
broadcast_rpc_address '" + config.broadcast_rpc_address + "'", false);
 +            }
 +
 +            if (broadcastRpcAddress.isAnyLocalAddress())
 +                throw new ConfigurationException("broadcast_rpc_address 
cannot be a wildcard address (" + config.broadcast_rpc_address + ")!", false);
 +        }
 +        else
 +        {
 +            if (rpcAddress.isAnyLocalAddress())
 +                throw new ConfigurationException("If rpc_address is set to a 
wildcard address (" + config.rpc_address + "), then " +
 +                                                 "you must set 
broadcast_rpc_address to a value other than " + config.rpc_address, false);
 +        }
 +    }
 +
 +    public static void applyThriftHSHA()
 +    {
 +        // fail early instead of OOMing (see CASSANDRA-8116)
 +        if (ThriftServerType.HSHA.equals(conf.rpc_server_type) && 
conf.rpc_max_threads == Integer.MAX_VALUE)
 +            throw new ConfigurationException("The hsha rpc_server_type is not 
compatible with an rpc_max_threads " +
 +                                             "setting of 'unlimited'.  Please 
see the comments in cassandra.yaml " +
 +                                             "for rpc_server_type and 
rpc_max_threads.",
 +                                             false);
 +        if (ThriftServerType.HSHA.equals(conf.rpc_server_type) && 
conf.rpc_max_threads > (FBUtilities.getAvailableProcessors() * 2 + 1024))
 +            logger.warn("rpc_max_threads setting of {} may be too high for 
the hsha server and cause unnecessary thread contention, reducing performance", 
conf.rpc_max_threads);
 +    }
 +
 +    public static void applyEncryptionContext()
 +    {
 +        // always attempt to load the cipher factory, as we could be in the 
situation where the user has disabled encryption,
 +        // but has existing commitlogs and sstables on disk that are still 
encrypted (and still need to be read)
 +        encryptionContext = new 
EncryptionContext(conf.transparent_data_encryption_options);
 +    }
 +
 +    public static void applySeedProvider()
 +    {
 +        // load the seeds for node contact points
 +        if (conf.seed_provider == null)
 +        {
 +            throw new ConfigurationException("seeds configuration is missing; 
a minimum of one seed is required.", false);
 +        }
 +        try
 +        {
 +            Class<?> seedProviderClass = 
Class.forName(conf.seed_provider.class_name);
 +            seedProvider = 
(SeedProvider)seedProviderClass.getConstructor(Map.class).newInstance(conf.seed_provider.parameters);
 +        }
 +        // there are about 5 checked exceptions that could be thrown here.
 +        catch (Exception e)
 +        {
 +            throw new ConfigurationException(e.getMessage() + "\nFatal 
configuration error; unable to start server.  See log for stacktrace.", true);
 +        }
 +        if (seedProvider.getSeeds().size() == 0)
 +            throw new ConfigurationException("The seed provider lists no 
seeds.", false);
 +    }
 +
 +    public static void applyInitialTokens()
 +    {
 +        if (conf.initial_token != null)
 +        {
 +            Collection<String> tokens = tokensFromString(conf.initial_token);
 +            if (tokens.size() != conf.num_tokens)
 +                throw new ConfigurationException("The number of initial 
tokens (by initial_token) specified is different from num_tokens value", false);
 +
 +            for (String token : tokens)
 +                partitioner.getTokenFactory().validate(token);
 +        }
 +    }
 +
 +    // Maybe safe for clients + tools
 +    public static void applyRequestScheduler()
 +    {
 +        /* Request Scheduler setup */
 +        requestSchedulerOptions = conf.request_scheduler_options;
 +        if (conf.request_scheduler != null)
 +        {
 +            try
 +            {
 +                if (requestSchedulerOptions == null)
 +                {
 +                    requestSchedulerOptions = new RequestSchedulerOptions();
 +                }
 +                Class<?> cls = Class.forName(conf.request_scheduler);
 +                requestScheduler = (IRequestScheduler) 
cls.getConstructor(RequestSchedulerOptions.class).newInstance(requestSchedulerOptions);
 +            }
 +            catch (ClassNotFoundException e)
 +            {
 +                throw new ConfigurationException("Invalid Request Scheduler 
class " + conf.request_scheduler, false);
 +            }
 +            catch (Exception e)
 +            {
 +                throw new ConfigurationException("Unable to instantiate 
request scheduler", e);
 +            }
 +        }
 +        else
 +        {
 +            requestScheduler = new NoScheduler();
 +        }
 +
 +        if (conf.request_scheduler_id == RequestSchedulerId.keyspace)
 +        {
 +            requestSchedulerId = conf.request_scheduler_id;
 +        }
 +        else
 +        {
 +            // Default to Keyspace
 +            requestSchedulerId = RequestSchedulerId.keyspace;
          }
 +    }
  
 -        // if set to empty/"auto" then use 5% of Heap size
 -        indexSummaryCapacityInMB = (conf.index_summary_capacity_in_mb == null)
 -            ? Math.max(1, (int) (Runtime.getRuntime().totalMemory() * 0.05 / 
1024 / 1024))
 -            : conf.index_summary_capacity_in_mb;
 -
 -        if (indexSummaryCapacityInMB < 0)
 -            throw new ConfigurationException("index_summary_capacity_in_mb 
option was set incorrectly to '"
 -                    + conf.index_summary_capacity_in_mb + "', it should be a 
non-negative integer.", false);
 -
 -        if(conf.encryption_options != null)
 +    // definitely not safe for tools + clients - implicitly instantiates 
StorageService
 +    public static void applySnitch()
 +    {
 +        /* end point snitch */
 +        if (conf.endpoint_snitch == null)
          {
 -            logger.warn("Please rename encryption_options as 
server_encryption_options in the yaml");
 -            //operate under the assumption that server_encryption_options is 
not set in yaml rather than both
 -            conf.server_encryption_options = conf.encryption_options;
 +            throw new ConfigurationException("Missing endpoint_snitch 
directive", false);
          }
 +        snitch = createEndpointSnitch(conf.dynamic_snitch, 
conf.endpoint_snitch);
 +        EndpointSnitchInfo.create();
  
 -        // load the seeds for node contact points
 -        if (conf.seed_provider == null)
 +        localDC = snitch.getDatacenter(FBUtilities.getBroadcastAddress());
 +        localComparator = new Comparator<InetAddress>()
          {
 -            throw new ConfigurationException("seeds configuration is missing; 
a minimum of one seed is required.", false);
 +            public int compare(InetAddress endpoint1, InetAddress endpoint2)
 +            {
 +                boolean local1 = 
localDC.equals(snitch.getDatacenter(endpoint1));
 +                boolean local2 = 
localDC.equals(snitch.getDatacenter(endpoint2));
 +                if (local1 && !local2)
 +                    return -1;
 +                if (local2 && !local1)
 +                    return 1;
 +                return 0;
 +            }
 +        };
 +    }
 +
 +    // definitely not safe for tools + clients - implicitly instantiates 
schema
 +    public static void applyPartitioner()
 +    {
 +        /* Hashing strategy */
 +        if (conf.partitioner == null)
 +        {
 +            throw new ConfigurationException("Missing directive: 
partitioner", false);
          }
          try
          {

http://git-wip-us.apache.org/repos/asf/cassandra/blob/12841938/src/java/org/apache/cassandra/utils/FBUtilities.java
----------------------------------------------------------------------


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@cassandra.apache.org
For additional commands, e-mail: commits-h...@cassandra.apache.org

Reply via email to