This is an automated email from the ASF dual-hosted git repository.

edimitrova pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/cassandra.git


The following commit(s) were added to refs/heads/trunk by this push:
     new dac738d2eb Transfer config parameters to the new types; Fix corner 
case for permissions_update_interval, roles_update_interval, 
credentials_update_interval;Fix typo in Config annotation; Made Converters type 
safe and fixed a few cases where converters used the wrong type; o should be 
provided with unit to DataStorageSpec and DurationStorageSpec; Fix null bug in 
DataStorageSpec and DurationSpec patch by Ekaterina Dimitrova, David Capwell; 
reviewed by David Capwell and Caleb Rackli [...]
dac738d2eb is described below

commit dac738d2eba8629d4f482d7cbfd855d2c5b9df47
Author: Ekaterina Dimitrova <[email protected]>
AuthorDate: Tue Mar 22 19:56:52 2022 -0400

    Transfer config parameters to the new types; Fix corner case for 
permissions_update_interval, roles_update_interval, 
credentials_update_interval;Fix typo in Config annotation; Made Converters type 
safe and fixed a few cases where converters used the wrong type; o should be 
provided with unit to DataStorageSpec and DurationStorageSpec; Fix null bug in 
DataStorageSpec and DurationSpec
    patch by Ekaterina Dimitrova, David Capwell; reviewed by David Capwell and 
Caleb Rackliffe for CASSANDRA-17431
    
    Co-authored-by: Ekaterina Dimitrova <[email protected]>
    Co-authored-by: David Capwell <[email protected]>
---
 CHANGES.txt                                        |   5 +
 NEWS.txt                                           |  11 +-
 doc/modules/cassandra/pages/new/configuration.adoc |  14 ++-
 src/java/org/apache/cassandra/auth/AuthConfig.java |   2 +-
 src/java/org/apache/cassandra/config/Config.java   |  38 ++++---
 .../org/apache/cassandra/config/Converters.java    | 106 +++++++++++-------
 .../org/apache/cassandra/config/DataRateSpec.java  |   2 +-
 .../apache/cassandra/config/DataStorageSpec.java   |  14 ---
 .../cassandra/config/DatabaseDescriptor.java       | 122 +++++++++++++++------
 .../org/apache/cassandra/config/DurationSpec.java  |  19 +---
 .../config/SmallestDurationMilliseconds.java       |  12 ++
 .../cassandra/config/YamlConfigurationLoader.java  |   7 +-
 .../apache/cassandra/db/virtual/SettingsTable.java |   2 +-
 .../cassandra/transport/ClientResourceLimits.java  |  10 +-
 test/conf/cassandra-old.yaml                       |   1 +
 .../distributed/test/PaxosRepairTest.java          |  22 ++--
 .../LoadOldYAMLBackwardCompatibilityTest.java      |   7 +-
 .../cassandra/config/ParseAndConvertUnitsTest.java |  55 ++++++----
 .../config/YamlConfigurationLoaderTest.java        | 112 +++++++++++++++++++
 .../tools/nodetool/SetAuthCacheConfigTest.java     |  35 ++++++
 .../tools/nodetool/SetGetColumnIndexSizeTest.java  |   2 +-
 .../transport/ClientResourceLimitsTest.java        |  18 +--
 22 files changed, 421 insertions(+), 195 deletions(-)

diff --git a/CHANGES.txt b/CHANGES.txt
index ed00f8bca7..414164181d 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -1,4 +1,9 @@
 4.1
+ * Migrate advanced config parameters to the new Config types (CASSANDRA-17431)
+ * Make null to be meaning disabled and leave 0 as a valid value for 
permissions_update_interval, roles_update_interval, credentials_update_interval 
(CASSANDRA-17431)
+ * Fix typo in Config annotation (CASSANDRA-17431)
+ * Made Converters type safe and fixed a few cases where converters used the 
wrong type (CASSANDRA-17431)
+ * Fix null bug in DataStorageSpec and DurationSpec and require units to be 
added when providing 0 value (CASSANDRA-17431)
  * Shutdown ScheduledExecutors as part of node drainage (CASSANDRA-17493)
  * Provide JMX endpoint to allow transient logging of blocking read repairs 
(CASSANDRA-17471)
  * Add guardrail for GROUP BY queries (CASSANDRA-17509)
diff --git a/NEWS.txt b/NEWS.txt
index 10a5e30356..992c291115 100644
--- a/NEWS.txt
+++ b/NEWS.txt
@@ -136,10 +136,15 @@ Upgrading
       will be removed in a future major release.
     - There is a new cassandra.yaml version 2. Units suffixes should be 
provided for all rates(B/s|MiB/s|KiB/s|MiB/s),
       memory (KiB|MiB|GiB|B) and duration(d|h|s|ms|us|µs|ns|m)
-      parameters. (CASSANDRA-15234)
+      parameters. List of changed parameters and details to consider during 
configuration setup can be
+      found at 
https://cassandra.apache.org/doc/latest/cassandra/new/configuration.html. 
(CASSANDRA-15234)
       Backward compatibility with the old cassandra.yaml file will be in place 
until at least the next major version.
-    - Many cassandra.yaml parameters' names have been changed. Full list can 
be found on ...... (ADD LINK LATER WHEN PAGE
-      IS CREATED) (CASSANDRA-15234)
+    - Many cassandra.yaml parameters' names have been changed. Full list and 
details to consider during configuration setup
+      when installing/upgrading Cassandra can be found at 
https://cassandra.apache.org/doc/latest/cassandra/new/configuration.html 
(CASSANDRA-15234)
+    - Negative values cannot be used for parameters of type data rate, 
duration and data storage with both old and new cassandra.yaml version.
+      Only exception is if you use old cassandra.yaml, pre-CASSANDRA-15234 - 
then -1 or other negative values which were advertised as an option
+      to disable config parameters in the old cassandra.yaml are still used. 
Those are probably converted to null value with the new cassandra.yaml,
+      as written in the new cassandra.yaml version and docs.
     - Before you upgrade, if you are using 
`cassandra.auth_bcrypt_gensalt_log2_rounds` property,
       confirm it is set to value lower than 31 otherwise Cassandra will fail 
to start. See CASSANDRA-9384
       for further details. You also need to regenerate passwords for users for 
who the password
diff --git a/doc/modules/cassandra/pages/new/configuration.adoc 
b/doc/modules/cassandra/pages/new/configuration.adoc
index 91791925c2..e572778b58 100644
--- a/doc/modules/cassandra/pages/new/configuration.adoc
+++ b/doc/modules/cassandra/pages/new/configuration.adoc
@@ -41,8 +41,8 @@ Accepted units: seconds, minutes, hours, days.
 
 Why was this needed?
 Because we can run into precision issues. The full solution to the problem is 
to convert internally all parameters’ values
-to be manipulated with the smallest supported by Cassandra unit. A series of 
tickets to assess and migrate to the smallest unit
-our parameters (incrementally, post 
https://issues.apache.org/jira/browse/CASSANDRA-15234[CASSANDRA-15234]) will be 
opened soon.
+to be manipulated with the smallest supported by Cassandra unit. A series of 
tickets to assess and maybe migrate to the smallest unit
+our parameters (incrementally, post 
https://issues.apache.org/jira/browse/CASSANDRA-15234[CASSANDRA-15234]) will be 
opened in the future.
 
 
 [cols=",,",options="header",]
@@ -52,7 +52,6 @@ our parameters (incrementally, post 
https://issues.apache.org/jira/browse/CASSAN
 |permissions_update_interval_in_ms |permissions_update_interval |ms
 |roles_validity_in_ms |roles_validity |ms
 |roles_update_interval_in_ms |roles_update_interval |ms
-|roles_update_interval_in_ms |roles_update_interval |ms
 |credentials_validity_in_ms |credentials_validity |ms
 |credentials_update_interval_in_ms |credentials_update_interval |ms
 |max_hint_window_in_ms |max_hint_window |ms
@@ -131,9 +130,12 @@ our parameters (incrementally, post 
https://issues.apache.org/jira/browse/CASSAN
 |enable_drop_compact_storage |drop_compact_storage_enabled |-
 |enable_user_defined_functions_threads |user_defined_functions_threads_enabled 
|-
 |enable_legacy_ssl_storage_port |legacy_ssl_storage_port_enabled |-
+|user_defined_function_fail_timeout |user_defined_functions_fail_timeout |ms
+|user_defined_function_warn_timeout |user_defined_functions_warn_timeout |ms
+|cache_load_timeout_seconds |cache_load_timeout |s
 |===
 
-Another TO DO is to add JMX methods supporting the new format. However, we may 
abandon this if virtual tables support for
+Another TO DO is to add JMX methods supporting the new format. However, we may 
abandon this if virtual tables support
 configuration changes in the near future.
 
 *Notes for Cassandra Developers*:
@@ -155,7 +157,9 @@ If the parameter is of type duration, data rate or data 
storage, its value shoul
 - If for some reason you consider the smallest unit shouldn’t be the one that 
is supported as such in Cassandra, please,
 use the extended classes `SmallestDuration*`, `SmallestDataStorage*`.
 
-- New parameters should be added as non-negative numbers.
+- New parameters should be added as non-negative numbers. For parameters where 
you would have set -1 to disable in the past, you might
+want to consider a separate flag parameter or null value. In case you use the 
null value, please, ensure that any default value
+introduced in the DatabaDescriptor to handle it is also duplicated in any 
related setters.
 
 - Any time you add @Replaces with a name change, we need to add an entry in 
this 
https://github.com/riptano/ccm/blob/808b6ca13526785b0fddfe1ead2383c060c4b8b6/ccmlib/common.py#L62[Python
 dictionary in CCM] to support the same backward compatibility as SnakeYAML.
 
diff --git a/src/java/org/apache/cassandra/auth/AuthConfig.java 
b/src/java/org/apache/cassandra/auth/AuthConfig.java
index 28ee102fdb..c4b71eb152 100644
--- a/src/java/org/apache/cassandra/auth/AuthConfig.java
+++ b/src/java/org/apache/cassandra/auth/AuthConfig.java
@@ -56,7 +56,7 @@ public final class AuthConfig
         // work with PasswordAuthenticator, so log a message if some other 
authenticator
         // is in use and non-default values are detected
         if (!(authenticator instanceof PasswordAuthenticator)
-            && (conf.credentials_update_interval.toMillisecondsAsInt() != 0
+            && (conf.credentials_update_interval != null
                 || conf.credentials_validity.toMillisecondsAsInt() != 2000
                 || conf.credentials_cache_max_entries != 1000))
         {
diff --git a/src/java/org/apache/cassandra/config/Config.java 
b/src/java/org/apache/cassandra/config/Config.java
index 464c7992a0..eb416aa4cc 100644
--- a/src/java/org/apache/cassandra/config/Config.java
+++ b/src/java/org/apache/cassandra/config/Config.java
@@ -77,19 +77,19 @@ public class Config
     public volatile SmallestDurationMilliseconds permissions_validity = new 
SmallestDurationMilliseconds("2s");
     public volatile int permissions_cache_max_entries = 1000;
     @Replaces(oldName = "permissions_update_interval_in_ms", converter = 
Converters.MILLIS_CUSTOM_DURATION, deprecated = true)
-    public volatile SmallestDurationMilliseconds permissions_update_interval = 
new SmallestDurationMilliseconds("0ms");
+    public volatile SmallestDurationMilliseconds permissions_update_interval = 
null;
     public volatile boolean permissions_cache_active_update = false;
     @Replaces(oldName = "roles_validity_in_ms", converter = 
Converters.MILLIS_DURATION, deprecated = true)
     public volatile SmallestDurationMilliseconds roles_validity = new 
SmallestDurationMilliseconds("2s");
     public volatile int roles_cache_max_entries = 1000;
     @Replaces(oldName = "roles_update_interval_in_ms", converter = 
Converters.MILLIS_CUSTOM_DURATION, deprecated = true)
-    public volatile SmallestDurationMilliseconds roles_update_interval= new 
SmallestDurationMilliseconds("0ms");
+    public volatile SmallestDurationMilliseconds roles_update_interval = null;
     public volatile boolean roles_cache_active_update = false;
     @Replaces(oldName = "credentials_validity_in_ms", converter = 
Converters.MILLIS_DURATION, deprecated = true)
     public volatile SmallestDurationMilliseconds credentials_validity = new 
SmallestDurationMilliseconds("2s");
     public volatile int credentials_cache_max_entries = 1000;
     @Replaces(oldName = "credentials_update_interval_in_ms", converter = 
Converters.MILLIS_CUSTOM_DURATION, deprecated = true)
-    public volatile SmallestDurationMilliseconds credentials_update_interval= 
new SmallestDurationMilliseconds("0ms");
+    public volatile SmallestDurationMilliseconds credentials_update_interval = 
null;
     public volatile boolean credentials_cache_active_update = false;
 
     /* Hashing strategy Random or OPHF */
@@ -257,12 +257,16 @@ public class Config
     public volatile long native_transport_max_concurrent_connections_per_ip = 
-1L;
     public boolean native_transport_flush_in_batches_legacy = false;
     public volatile boolean native_transport_allow_older_protocols = true;
-    public volatile long 
native_transport_max_concurrent_requests_in_bytes_per_ip = -1L;
-    public volatile long native_transport_max_concurrent_requests_in_bytes = 
-1L;
+    // Below 2 parameters were fixed in 4.0 + to get default value when ==-1 
(old name and value format) or ==null(new name and value format),
+    // not <=0 as it is in previous versions. Throwing config exceptions on < 
-1
+    @Replaces(oldName = 
"native_transport_max_concurrent_requests_in_bytes_per_ip", converter = 
Converters.BYTES_CUSTOM_DATASTORAGE, deprecated = true)
+    public volatile DataStorageSpec 
native_transport_max_request_data_in_flight_per_ip = null;
+    @Replaces(oldName = "native_transport_max_concurrent_requests_in_bytes", 
converter = Converters.BYTES_CUSTOM_DATASTORAGE, deprecated = true)
+    public volatile DataStorageSpec 
native_transport_max_request_data_in_flight = null;
     public volatile boolean native_transport_rate_limiting_enabled = false;
     public volatile int native_transport_max_requests_per_second = 1000000;
-    // not exposed in the yaml
-    public int native_transport_receive_queue_capacity_in_bytes = 1 << 20; // 
1MiB
+    @Replaces(oldName = "native_transport_receive_queue_capacity_in_bytes", 
converter = Converters.BYTES_DATASTORAGE, deprecated = true)
+    public DataStorageSpec native_transport_receive_queue_capacity = new 
DataStorageSpec("1MiB");
 
     @Deprecated
     public Integer native_transport_max_negotiable_protocol_version = null;
@@ -425,7 +429,7 @@ public class Config
 
     public SmallestDataStorageMebibytes paxos_cache_size = null;
 
-    @Replaces(oldName = "cache_load_timeout_seconds ", converter = 
Converters.SECONDS_DURATION, deprecated = true)
+    @Replaces(oldName = "cache_load_timeout_seconds", converter = 
Converters.SECONDS_DURATION, deprecated = true)
     public SmallestDurationSeconds cache_load_timeout = new 
SmallestDurationSeconds("30s");
 
     private static boolean isClientMode = false;
@@ -565,19 +569,17 @@ public class Config
      * Time in milliseconds after a warning will be emitted to the log and to 
the client that a UDF runs too long.
      * (Only valid, if user_defined_functions_threads_enabled==true)
      */
-    //TO DO: transfer below parameter to the new config framework 
(DurationSpec)
-    //Below parameter is in ms
-    public long user_defined_function_warn_timeout = 500;
+    @Replaces(oldName = "user_defined_function_warn_timeout", converter = 
Converters.MILLIS_DURATION, deprecated = true)
+    public SmallestDurationMilliseconds user_defined_functions_warn_timeout = 
new SmallestDurationMilliseconds("500ms");
     /**
      * Time in milliseconds after a fatal UDF run-time situation is detected 
and action according to
      * user_function_timeout_policy will take place.
      * (Only valid, if user_defined_functions_threads_enabled==true)
      */
-    //TO DO: transfer below parameter to the new config framework 
(DurationSpec)
-    //Below parameter is in ms
-    public long user_defined_function_fail_timeout = 1500;
+    @Replaces(oldName = "user_defined_function_fail_timeout", converter = 
Converters.MILLIS_DURATION, deprecated = true)
+    public SmallestDurationMilliseconds user_defined_functions_fail_timeout = 
new SmallestDurationMilliseconds("1500ms");
     /**
-     * Defines what to do when a UDF ran longer than 
user_defined_function_fail_timeout.
+     * Defines what to do when a UDF ran longer than 
user_defined_functions_fail_timeout.
      * Possible options are:
      * - 'die' - i.e. it is able to emit a warning to the client before the 
Cassandra Daemon will shut down.
      * - 'die_immediate' - shut down C* daemon immediately (effectively 
prevent the chance that the client will receive a warning).
@@ -660,11 +662,13 @@ public class Config
     public volatile boolean snapshot_on_repaired_data_mismatch = false;
 
     /**
-     * number of seconds to set nowInSec into the future when performing 
validation previews against repaired data
+     * Number of seconds to set nowInSec into the future when performing 
validation previews against repaired data
      * this (attempts) to prevent a race where validations on different 
machines are started on different sides of
      * a tombstone being compacted away
      */
-    public volatile int validation_preview_purge_head_start_in_sec = 60 * 60;
+
+    @Replaces(oldName = "validation_preview_purge_head_start_in_sec", 
converter = Converters.NEGATIVE_SECONDS_DURATION, deprecated = true)
+    public volatile SmallestDurationSeconds 
validation_preview_purge_head_start = new SmallestDurationSeconds("3600s");
 
     public boolean auth_cache_warming_enabled = false;
 
diff --git a/src/java/org/apache/cassandra/config/Converters.java 
b/src/java/org/apache/cassandra/config/Converters.java
index 447e73189f..ff072fee3c 100644
--- a/src/java/org/apache/cassandra/config/Converters.java
+++ b/src/java/org/apache/cassandra/config/Converters.java
@@ -36,64 +36,77 @@ public enum Converters
      * This converter is used when we change the name of a cassandra.yaml 
configuration parameter but we want to be
      * able to still use the old name too. No units involved.
      */
-    IDENTITY(null, o -> o, o-> o),
-    MILLIS_DURATION(Long.class,
-                    o -> SmallestDurationMilliseconds.inMilliseconds((Long) o),
-                    o -> ((SmallestDurationMilliseconds)o).toMilliseconds()),
-    MILLIS_DOUBLE_DURATION(Double.class,
-                           o ->  
SmallestDurationMilliseconds.inDoubleMilliseconds((Double) o),
-                           o -> 
((SmallestDurationMilliseconds)o).toMilliseconds()),
+    IDENTITY(null, null, o -> o, o-> o),
+    MILLIS_DURATION(Long.class, SmallestDurationMilliseconds.class,
+                    SmallestDurationMilliseconds::inMilliseconds,
+                    o -> o.toMilliseconds()),
+    MILLIS_DOUBLE_DURATION(Double.class, SmallestDurationMilliseconds.class,
+                           o -> Double.isNaN(o) ? 
SmallestDurationMilliseconds.inMilliseconds(0) : 
SmallestDurationMilliseconds.inDoubleMilliseconds(o),
+                           o -> (double) o.toMilliseconds()),
     /**
      * This converter is used to support backward compatibility for parameters 
where in the past -1 was used as a value
-     * Example: credentials_update_interval_in_ms = -1 and 
credentials_update_interval = null (quantity of 0ms) are equal.
+     * Example: credentials_update_interval_in_ms = -1 and 
credentials_update_interval = null are equal.
      */
-    MILLIS_CUSTOM_DURATION(Long.class,
-                           o -> (Long)o == -1 ? new 
SmallestDurationMilliseconds("0ms") : 
SmallestDurationMilliseconds.inMilliseconds((Long) o),
-                           o -> 
((SmallestDurationMilliseconds)o).toMilliseconds() == 0 ? -1 : 
((SmallestDurationMilliseconds)o).toMilliseconds()),
-    SECONDS_DURATION(Long.class,
-                     o -> SmallestDurationSeconds.inSeconds((Long) o),
-                     o -> ((SmallestDurationSeconds)o).toSeconds()),
+    MILLIS_CUSTOM_DURATION(Long.class, SmallestDurationMilliseconds.class,
+                           o -> o == -1 ? null : 
SmallestDurationMilliseconds.inMilliseconds(o),
+                           o -> o == null ? -1 : o.toMilliseconds()),
+    SECONDS_DURATION(Long.class, SmallestDurationSeconds.class,
+                     SmallestDurationSeconds::inSeconds,
+                     DurationSpec::toSeconds),
+    NEGATIVE_SECONDS_DURATION(Long.class, SmallestDurationSeconds.class,
+                              o -> o < 0 ? 
SmallestDurationSeconds.inSeconds(0) : SmallestDurationSeconds.inSeconds(o),
+                              DurationSpec::toSeconds),
     /**
      * This converter is used to support backward compatibility for Duration 
parameters where we added the opportunity
      * for the users to add a unit in the parameters' values but we didn't 
change the names. (key_cache_save_period,
      * row_cache_save_period, counter_cache_save_period)
      * Example: row_cache_save_period = 0 and row_cache_save_period = 0s 
(quantity of 0s) are equal.
      */
-    SECONDS_CUSTOM_DURATION(String.class,
-                            o -> 
SmallestDurationSeconds.inSecondsString((String) o),
-                            o -> ((SmallestDurationSeconds)o).toSeconds()),
-    MINUTES_DURATION(Long.class,
-                     o -> SmallestDurationMinutes.inMinutes((Long) o),
-                     o -> ((SmallestDurationMinutes)o).toMinutes()),
-    MEBIBYTES_DATA_STORAGE(Long.class,
-                          o -> SmallestDataStorageMebibytes.inMebibytes((Long) 
o),
-                          o -> 
((SmallestDataStorageMebibytes)o).toMebibytes()),
-    KIBIBYTES_DATASTORAGE(Long.class,
-                          o -> SmallestDataStorageKibibytes.inKibibytes((Long) 
o),
-                          o -> 
((SmallestDataStorageKibibytes)o).toKibibytes()),
-    BYTES_DATASTORAGE(Long.class,
-                      o -> DataStorageSpec.inBytes((Long) o),
-                      o -> ((DataStorageSpec)o).toBytes()),
-    MEBIBYTES_PER_SECOND_DATA_RATE(Long.class,
-                                   o -> 
DataRateSpec.inMebibytesPerSecond((Long) o),
-                                   o -> 
((DataRateSpec)o).toMebibytesPerSecondAsInt()),
+    SECONDS_CUSTOM_DURATION(String.class, SmallestDurationSeconds.class,
+                            SmallestDurationSeconds::inSecondsString,
+                            o -> Long.toString(o.toSeconds())),
+    MINUTES_DURATION(Long.class, SmallestDurationMinutes.class,
+                     SmallestDurationMinutes::inMinutes,
+                     DurationSpec::toMinutes),
+    MEBIBYTES_DATA_STORAGE(Long.class, SmallestDataStorageMebibytes.class,
+                          SmallestDataStorageMebibytes::inMebibytes,
+                          DataStorageSpec::toMebibytes),
+    KIBIBYTES_DATASTORAGE(Long.class, SmallestDataStorageKibibytes.class,
+                          SmallestDataStorageKibibytes::inKibibytes,
+                          DataStorageSpec::toKibibytes),
+    BYTES_DATASTORAGE(Long.class, DataStorageSpec.class,
+                      DataStorageSpec::inBytes,
+                      DataStorageSpec::toBytes),
+    /**
+     * This converter is used to support backward compatibility for parameters 
where in the past negative number was used as a value
+     * Example: native_transport_max_concurrent_requests_in_bytes_per_ip = -1 
and native_transport_max_request_data_in_flight_per_ip = null
+     * are equal. All negative numbers are printed as 0 in virtual tables.
+     */
+    BYTES_CUSTOM_DATASTORAGE(Long.class, DataStorageSpec.class,
+                             o -> o == -1 ? null : DataStorageSpec.inBytes(o),
+                             DataStorageSpec::toBytes),
+    MEBIBYTES_PER_SECOND_DATA_RATE(Long.class, DataRateSpec.class,
+                                   DataRateSpec::inMebibytesPerSecond,
+                                   o -> (long) o.toMebibytesPerSecondAsInt()),
     /**
      * This converter is a custom one to support backward compatibility for 
stream_throughput_outbound and
      * inter_dc_stream_throughput_outbound which were provided in megatibs per 
second prior CASSANDRA-15234.
      */
-    MEGABITS_TO_MEBIBYTES_PER_SECOND_DATA_RATE(Long.class,
-                                               o -> 
DataRateSpec.megabitsPerSecondInMebibytesPerSecond((Long)o),
-                                               o -> 
((DataRateSpec)o).toMegabitsPerSecondAsInt());
+    MEGABITS_TO_MEBIBYTES_PER_SECOND_DATA_RATE(Long.class, DataRateSpec.class,
+                                               
DataRateSpec::megabitsPerSecondInMebibytesPerSecond,
+                                               o -> (long) 
o.toMegabitsPerSecondAsInt());
 
-    private final Class<?> inputType;
+    private final Class<?> oldType;
+    private final Class<?> newType;
     private final Function<Object, Object> convert;
     private final Function<Object, Object> reverseConvert;
 
-    Converters(Class<?> inputType, Function<Object, Object> convert, 
Function<Object, Object> reverseConvert)
+    <Old, New> Converters(Class<Old> oldType, Class<New> newType, 
Function<Old, New> convert, Function<New, Old> reverseConvert)
     {
-        this.inputType = inputType;
-        this.convert = convert;
-        this.reverseConvert = reverseConvert;
+        this.oldType = oldType;
+        this.newType = newType;
+        this.convert = (Function<Object, Object>) convert;
+        this.reverseConvert = (Function<Object, Object>) reverseConvert;
     }
 
     /**
@@ -102,9 +115,18 @@ public enum Converters
      *
      * @return class type
      */
-    public Class<?> getInputType()
+    public Class<?> getOldType()
+    {
+        return oldType;
+    }
+
+    /**
+     * Expected return type from {@link #convert(Object)}, and input type to 
{@link #deconvert(Object)}
+     * @return type that {@link #convert(Object)} is expected to return
+     */
+    public Class<?> getNewType()
     {
-        return inputType;
+        return newType;
     }
 
     /**
diff --git a/src/java/org/apache/cassandra/config/DataRateSpec.java 
b/src/java/org/apache/cassandra/config/DataRateSpec.java
index 3512513f4f..dd45c25d8d 100644
--- a/src/java/org/apache/cassandra/config/DataRateSpec.java
+++ b/src/java/org/apache/cassandra/config/DataRateSpec.java
@@ -58,7 +58,7 @@ public final class DataRateSpec
     DataRateSpec(double quantity, DataRateUnit unit)
     {
         if (quantity < 0)
-            throw new ConfigurationException("Invalid bit rare: value must be 
non-negative");
+            throw new ConfigurationException("Invalid bit rate: value must be 
non-negative");
 
         if (quantity > Long.MAX_VALUE)
             throw new NumberFormatException("Invalid bit rate: value must be 
between 0 and Long.MAX_VALUE = 9223372036854775807");
diff --git a/src/java/org/apache/cassandra/config/DataStorageSpec.java 
b/src/java/org/apache/cassandra/config/DataStorageSpec.java
index 8b5a0e4be0..eeafe2ed8d 100644
--- a/src/java/org/apache/cassandra/config/DataStorageSpec.java
+++ b/src/java/org/apache/cassandra/config/DataStorageSpec.java
@@ -57,13 +57,6 @@ public class DataStorageSpec
 
     public DataStorageSpec(String value)
     {
-        if (value == null || value.equals("null"))
-        {
-            quantity = 0;
-            unit = MEBIBYTES; // the unit doesn't really matter as 0 is 0 in 
all units
-            return;
-        }
-
         //parse the string field value
         Matcher matcher = STORAGE_UNITS_PATTERN.matcher(value);
 
@@ -88,13 +81,6 @@ public class DataStorageSpec
 
     public DataStorageSpec (String value, DataStorageUnit minUnit)
     {
-        if (value == null || value.equals("null") || value.equals("0"))
-        {
-            quantity = 0;
-            unit = minUnit;
-            return;
-        }
-
         if (!MAP_UNITS_PER_MIN_UNIT.containsKey(minUnit))
             throw new ConfigurationException("Invalid smallest unit set for " 
+ value);
 
diff --git a/src/java/org/apache/cassandra/config/DatabaseDescriptor.java 
b/src/java/org/apache/cassandra/config/DatabaseDescriptor.java
index 470d4f67c9..e1418fa55f 100644
--- a/src/java/org/apache/cassandra/config/DatabaseDescriptor.java
+++ b/src/java/org/apache/cassandra/config/DatabaseDescriptor.java
@@ -551,14 +551,14 @@ public class DatabaseDescriptor
             conf.hints_directory = storagedirFor("hints");
         }
 
-        if (conf.native_transport_max_concurrent_requests_in_bytes <= 0)
+        if (conf.native_transport_max_request_data_in_flight == null)
         {
-            conf.native_transport_max_concurrent_requests_in_bytes = 
Runtime.getRuntime().maxMemory() / 10;
+            conf.native_transport_max_request_data_in_flight = 
DataStorageSpec.inBytes(Runtime.getRuntime().maxMemory() / 10);
         }
 
-        if (conf.native_transport_max_concurrent_requests_in_bytes_per_ip <= 0)
+        if (conf.native_transport_max_request_data_in_flight_per_ip == null)
         {
-            conf.native_transport_max_concurrent_requests_in_bytes_per_ip = 
Runtime.getRuntime().maxMemory() / 40;
+            conf.native_transport_max_request_data_in_flight_per_ip = 
DataStorageSpec.inBytes(Runtime.getRuntime().maxMemory() / 40);
         }
         
         if (conf.native_transport_rate_limiting_enabled)
@@ -774,13 +774,8 @@ public class DatabaseDescriptor
             throw new ConfigurationException("index_summary_capacity option 
was set incorrectly to '"
                                              + 
conf.index_summary_capacity.toString() + "', it should be a non-negative 
integer.", false);
 
-        if (conf.user_defined_function_fail_timeout < 0)
-            throw new 
ConfigurationException("user_defined_function_fail_timeout must not be 
negative", false);
-        if (conf.user_defined_function_warn_timeout < 0)
-            throw new 
ConfigurationException("user_defined_function_warn_timeout must not be 
negative", false);
-
-        if (conf.user_defined_function_fail_timeout < 
conf.user_defined_function_warn_timeout)
-            throw new 
ConfigurationException("user_defined_function_warn_timeout must less than 
user_defined_function_fail_timeout", false);
+        if (conf.user_defined_functions_fail_timeout.toMilliseconds() < 
conf.user_defined_functions_warn_timeout.toMilliseconds())
+            throw new 
ConfigurationException("user_defined_functions_warn_timeout must less than 
user_defined_function_fail_timeout", false);
 
         if (!conf.allow_insecure_udfs && 
!conf.user_defined_functions_threads_enabled)
             throw new ConfigurationException("To be able to set 
enable_user_defined_functions_threads: false you need to set 
allow_insecure_udfs: true - this is an unsafe configuration and is not 
recommended.");
@@ -1355,14 +1350,26 @@ public class DatabaseDescriptor
 
     public static int getPermissionsUpdateInterval()
     {
-        return conf.permissions_update_interval.toMilliseconds() == 0
+        return conf.permissions_update_interval == null
              ? conf.permissions_validity.toMillisecondsAsInt()
              : conf.permissions_update_interval.toMillisecondsAsInt();
     }
 
     public static void setPermissionsUpdateInterval(int updateInterval)
     {
-        conf.permissions_update_interval = 
SmallestDurationMilliseconds.inMilliseconds(updateInterval);
+        if (updateInterval == -1)
+            conf.permissions_update_interval = null;
+        else
+        {
+            try
+            {
+                conf.permissions_update_interval = 
SmallestDurationMilliseconds.inMilliseconds(updateInterval);
+            }
+            catch (ConfigurationException e)
+            {
+                throw new 
IllegalArgumentException("permissions_update_interval should be >= -1");
+            }
+        }
     }
 
     public static int getPermissionsCacheMaxEntries()
@@ -1397,7 +1404,7 @@ public class DatabaseDescriptor
 
     public static int getRolesUpdateInterval()
     {
-        return conf.roles_update_interval.toMillisecondsAsInt() == 0
+        return conf.roles_update_interval == null
              ? conf.roles_validity.toMillisecondsAsInt()
              : conf.roles_update_interval.toMillisecondsAsInt();
     }
@@ -1414,7 +1421,19 @@ public class DatabaseDescriptor
 
     public static void setRolesUpdateInterval(int interval)
     {
-        conf.roles_update_interval = 
SmallestDurationMilliseconds.inMilliseconds(interval);
+        if (interval == -1)
+            conf.roles_update_interval = null;
+        else
+        {
+            try
+            {
+                conf.roles_update_interval = 
SmallestDurationMilliseconds.inMilliseconds(interval);
+            }
+            catch(ConfigurationException e)
+            {
+                throw new IllegalArgumentException("roles_update_interval 
should be >= -1");
+            }
+        }
     }
 
     public static int getRolesCacheMaxEntries()
@@ -1439,14 +1458,26 @@ public class DatabaseDescriptor
 
     public static int getCredentialsUpdateInterval()
     {
-        return conf.credentials_update_interval.toMillisecondsAsInt() == 0
+        return conf.credentials_update_interval == null
                ? conf.credentials_validity.toMillisecondsAsInt()
                : conf.credentials_update_interval.toMillisecondsAsInt();
     }
 
     public static void setCredentialsUpdateInterval(int updateInterval)
     {
-        conf.credentials_update_interval = 
SmallestDurationMilliseconds.inMilliseconds(updateInterval);
+        if (updateInterval == -1)
+            conf.credentials_update_interval = null;
+        else
+        {
+            try
+            {
+                conf.credentials_update_interval = 
SmallestDurationMilliseconds.inMilliseconds(updateInterval);
+            }
+            catch (ConfigurationException e)
+            {
+                throw new 
IllegalArgumentException("credentials_update_interval should be >= -1.");
+            }
+        }
     }
 
     public static int getCredentialsCacheMaxEntries()
@@ -2481,17 +2512,17 @@ public class DatabaseDescriptor
 
     public static int getNativeTransportReceiveQueueCapacityInBytes()
     {
-        return conf.native_transport_receive_queue_capacity_in_bytes;
+        return conf.native_transport_receive_queue_capacity.toBytesAsInt();
     }
 
     public static void setNativeTransportReceiveQueueCapacityInBytes(int 
queueSize)
     {
-        conf.native_transport_receive_queue_capacity_in_bytes = queueSize;
+        conf.native_transport_receive_queue_capacity = 
DataStorageSpec.inBytes(queueSize);
     }
 
-    public static long getNativeTransportMaxConcurrentRequestsInBytesPerIp()
+    public static long getNativeTransportMaxRequestDataInFlightPerIpInBytes()
     {
-        return conf.native_transport_max_concurrent_requests_in_bytes_per_ip;
+        return 
conf.native_transport_max_request_data_in_flight_per_ip.toBytes();
     }
 
     public static Config.PaxosVariant getPaxosVariant()
@@ -2624,19 +2655,40 @@ public class DatabaseDescriptor
         conf.paxos_auto_repair_threshold_mb = threshold;
     }
 
-    public static void 
setNativeTransportMaxConcurrentRequestsInBytesPerIp(long 
maxConcurrentRequestsInBytes)
+    public static void 
setNativeTransportMaxRequestDataInFlightPerIpInBytes(long 
maxRequestDataInFlightInBytes)
     {
-        conf.native_transport_max_concurrent_requests_in_bytes_per_ip = 
maxConcurrentRequestsInBytes;
+        if (maxRequestDataInFlightInBytes == -1)
+            maxRequestDataInFlightInBytes = Runtime.getRuntime().maxMemory() / 
40;
+
+        try
+        {
+            conf.native_transport_max_request_data_in_flight_per_ip = 
DataStorageSpec.inBytes(maxRequestDataInFlightInBytes);
+        }
+        catch (ConfigurationException e)
+        {
+            throw new 
IllegalArgumentException("native_transport_max_request_data_in_flight_per_ip 
can be only -1 which gets default value or >= 0");
+        }
+
     }
 
-    public static long getNativeTransportMaxConcurrentRequestsInBytes()
+    public static long getNativeTransportMaxRequestDataInFlightInBytes()
     {
-        return conf.native_transport_max_concurrent_requests_in_bytes;
+        return conf.native_transport_max_request_data_in_flight.toBytes();
     }
 
-    public static void setNativeTransportMaxConcurrentRequestsInBytes(long 
maxConcurrentRequestsInBytes)
+    public static void 
setNativeTransportConcurrentRequestDataInFlightInBytes(long 
maxRequestDataInFlightInBytes)
     {
-        conf.native_transport_max_concurrent_requests_in_bytes = 
maxConcurrentRequestsInBytes;
+        if (maxRequestDataInFlightInBytes == -1)
+            maxRequestDataInFlightInBytes = Runtime.getRuntime().maxMemory() / 
10;
+
+        try
+        {
+            conf.native_transport_max_request_data_in_flight = 
DataStorageSpec.inBytes(maxRequestDataInFlightInBytes);
+        }
+        catch (ConfigurationException e)
+        {
+            throw new 
IllegalArgumentException("native_transport_max_request_data_in_flight can be 
only -1 which gets default value or >= 0");
+        }
     }
 
     public static int getNativeTransportMaxRequestsPerSecond()
@@ -3322,12 +3374,12 @@ public class DatabaseDescriptor
 
     public static long getUserDefinedFunctionWarnTimeout()
     {
-        return conf.user_defined_function_warn_timeout;
+        return conf.user_defined_functions_warn_timeout.toMilliseconds();
     }
 
     public static void setUserDefinedFunctionWarnTimeout(long 
userDefinedFunctionWarnTimeout)
     {
-        conf.user_defined_function_warn_timeout = 
userDefinedFunctionWarnTimeout;
+        conf.user_defined_functions_warn_timeout = 
SmallestDurationMilliseconds.inMilliseconds(userDefinedFunctionWarnTimeout);
     }
 
     public static boolean allowInsecureUDFs()
@@ -3383,12 +3435,12 @@ public class DatabaseDescriptor
 
     public static long getUserDefinedFunctionFailTimeout()
     {
-        return conf.user_defined_function_fail_timeout;
+        return conf.user_defined_functions_fail_timeout.toMilliseconds();
     }
 
     public static void setUserDefinedFunctionFailTimeout(long 
userDefinedFunctionFailTimeout)
     {
-        conf.user_defined_function_fail_timeout = 
userDefinedFunctionFailTimeout;
+        conf.user_defined_functions_fail_timeout = 
SmallestDurationMilliseconds.inMilliseconds(userDefinedFunctionFailTimeout);
     }
 
     public static Config.UserFunctionTimeoutPolicy 
getUserFunctionTimeoutPolicy()
@@ -3656,10 +3708,9 @@ public class DatabaseDescriptor
         long valueInBytes = value.toBytes();
         if (valueInBytes < 0 || valueInBytes > Integer.MAX_VALUE)
         {
-            throw new ConfigurationException(String.format("%s must be 
positive value < %dB, but was %dB",
+            throw new ConfigurationException(String.format("%s must be 
positive value <= %dB, but was %dB",
                                                            name,
-                                                           value.getUnit()
-                                                                
.convert(Integer.MAX_VALUE, DataStorageSpec.DataStorageUnit.BYTES),
+                                                           Integer.MAX_VALUE,
                                                            valueInBytes),
                                              false);
         }
@@ -3667,8 +3718,7 @@ public class DatabaseDescriptor
 
     public static int getValidationPreviewPurgeHeadStartInSec()
     {
-        int seconds = conf.validation_preview_purge_head_start_in_sec;
-        return Math.max(seconds, 0);
+        return conf.validation_preview_purge_head_start.toSecondsAsInt();
     }
 
     public static boolean checkForDuplicateRowsDuringReads()
diff --git a/src/java/org/apache/cassandra/config/DurationSpec.java 
b/src/java/org/apache/cassandra/config/DurationSpec.java
index a192e35abb..4b407b27a5 100644
--- a/src/java/org/apache/cassandra/config/DurationSpec.java
+++ b/src/java/org/apache/cassandra/config/DurationSpec.java
@@ -18,7 +18,6 @@
 package org.apache.cassandra.config;
 
 import java.util.Arrays;
-import java.util.Locale;
 import java.util.Objects;
 import java.util.concurrent.TimeUnit;
 import java.util.regex.Matcher;
@@ -64,13 +63,6 @@ public class DurationSpec
 
     public DurationSpec(String value)
     {
-        if (value == null || value.equals("null") || 
value.toLowerCase(Locale.ROOT).equals("nan") || value.equals("0"))
-        {
-            quantity = 0;
-            unit = MILLISECONDS;
-            return;
-        }
-
         //parse the string field value
         Matcher matcher = TIME_UNITS_PATTERN.matcher(value);
 
@@ -89,26 +81,19 @@ public class DurationSpec
     DurationSpec(long quantity, TimeUnit unit)
     {
         if (quantity < 0)
-            throw new ConfigurationException("Invalid duration: value must be 
positive");
+            throw new ConfigurationException("Invalid duration " + quantity + 
": value must be positive");
 
         this.quantity = quantity;
         this.unit = unit;
     }
 
-    private DurationSpec(double quantity, TimeUnit unit)
+    DurationSpec(double quantity, TimeUnit unit)
     {
         this(Math.round(quantity), unit);
     }
 
     public DurationSpec(String value, TimeUnit minUnit)
     {
-        if (value == null || value.equals("null") || 
value.toLowerCase(Locale.ROOT).equals("nan"))
-        {
-            quantity = 0;
-            unit = minUnit;
-            return;
-        }
-
         if (!MAP_UNITS_PER_MIN_UNIT.containsKey(minUnit))
             throw new ConfigurationException("Invalid smallest unit set for " 
+ value);
 
diff --git 
a/src/java/org/apache/cassandra/config/SmallestDurationMilliseconds.java 
b/src/java/org/apache/cassandra/config/SmallestDurationMilliseconds.java
index 3fc564b01e..5a152f56d0 100644
--- a/src/java/org/apache/cassandra/config/SmallestDurationMilliseconds.java
+++ b/src/java/org/apache/cassandra/config/SmallestDurationMilliseconds.java
@@ -20,6 +20,8 @@ package org.apache.cassandra.config;
 
 import java.util.concurrent.TimeUnit;
 
+import static java.util.concurrent.TimeUnit.MILLISECONDS;
+
 /**
  * Wrapper class for Cassandra duration configuration parameters which are 
internally represented in Milliseconds. In order
  * not to lose precision while converting to smaller units (until we migrate 
those parameters to use internally the smallest
@@ -44,6 +46,11 @@ public final class SmallestDurationMilliseconds extends 
DurationSpec
         super(quantity, unit);
     }
 
+    private SmallestDurationMilliseconds(double quantity, TimeUnit unit)
+    {
+        super(quantity, unit);
+    }
+
     /**
      * Creates a {@code SmallestDurationMilliseconds} of the specified amount 
of milliseconds.
      *
@@ -54,4 +61,9 @@ public final class SmallestDurationMilliseconds extends 
DurationSpec
     {
         return new SmallestDurationMilliseconds(milliseconds, 
TimeUnit.MILLISECONDS);
     }
+
+    public static SmallestDurationMilliseconds inDoubleMilliseconds(double 
milliseconds)
+    {
+        return new SmallestDurationMilliseconds(milliseconds, MILLISECONDS);
+    }
 }
diff --git a/src/java/org/apache/cassandra/config/YamlConfigurationLoader.java 
b/src/java/org/apache/cassandra/config/YamlConfigurationLoader.java
index 18400545e0..92039fc8e7 100644
--- a/src/java/org/apache/cassandra/config/YamlConfigurationLoader.java
+++ b/src/java/org/apache/cassandra/config/YamlConfigurationLoader.java
@@ -42,8 +42,6 @@ import com.google.common.collect.Maps;
 import com.google.common.collect.Sets;
 import com.google.common.io.ByteStreams;
 
-import org.apache.commons.lang3.SystemUtils;
-
 import org.apache.cassandra.io.util.File;
 
 import org.slf4j.Logger;
@@ -468,9 +466,12 @@ public class YamlConfigurationLoader implements 
ConfigurationLoader
 
         boolean deprecated = r.deprecated();
 
-        Class<?> oldType = r.converter().getInputType();
+        Class<?> oldType = r.converter().getOldType();
         if (oldType == null)
             oldType = newType;
+        Class<?> expectedNewType = r.converter().getNewType();
+        if (expectedNewType != null)
+            assert expectedNewType.equals(newType) : String.format("Converter 
is expected to return %s but %s#%s expects %s", expectedNewType, klass, 
newName, newType);
 
         replacements.add(new Replacement(klass, oldName, oldType, newName, 
r.converter(), deprecated));
     }
diff --git a/src/java/org/apache/cassandra/db/virtual/SettingsTable.java 
b/src/java/org/apache/cassandra/db/virtual/SettingsTable.java
index b82180dea2..ea427548a8 100644
--- a/src/java/org/apache/cassandra/db/virtual/SettingsTable.java
+++ b/src/java/org/apache/cassandra/db/virtual/SettingsTable.java
@@ -60,7 +60,7 @@ final class SettingsTable extends AbstractVirtualTable
 
     // CASSANDRA-15234 - a few configuration parameters kept their names but 
added unit to their value, only the
     // new value format is displayed for them
-    private final List<String> EXCLUDED_CONFIG = new ArrayList<String>()
+    private static final List<String> EXCLUDED_CONFIG = new ArrayList<String>()
     {
         {
             add("key_cache_save_period");
diff --git a/src/java/org/apache/cassandra/transport/ClientResourceLimits.java 
b/src/java/org/apache/cassandra/transport/ClientResourceLimits.java
index f9ed692841..e38cfdb812 100644
--- a/src/java/org/apache/cassandra/transport/ClientResourceLimits.java
+++ b/src/java/org/apache/cassandra/transport/ClientResourceLimits.java
@@ -61,12 +61,12 @@ public class ClientResourceLimits
 
     public static long getGlobalLimit()
     {
-        return 
DatabaseDescriptor.getNativeTransportMaxConcurrentRequestsInBytes();
+        return 
DatabaseDescriptor.getNativeTransportMaxRequestDataInFlightInBytes();
     }
 
     public static void setGlobalLimit(long newLimit)
     {
-        
DatabaseDescriptor.setNativeTransportMaxConcurrentRequestsInBytes(newLimit);
+        
DatabaseDescriptor.setNativeTransportConcurrentRequestDataInFlightInBytes(newLimit);
         long existingLimit = GLOBAL_LIMIT.setLimit(getGlobalLimit());
         logger.info("Changed native_max_transport_requests_in_bytes from {} to 
{}", existingLimit, newLimit);
     }
@@ -78,13 +78,13 @@ public class ClientResourceLimits
 
     public static long getEndpointLimit()
     {
-        return 
DatabaseDescriptor.getNativeTransportMaxConcurrentRequestsInBytesPerIp();
+        return 
DatabaseDescriptor.getNativeTransportMaxRequestDataInFlightPerIpInBytes();
     }
 
     public static void setEndpointLimit(long newLimit)
     {
-        long existingLimit = 
DatabaseDescriptor.getNativeTransportMaxConcurrentRequestsInBytesPerIp();
-        
DatabaseDescriptor.setNativeTransportMaxConcurrentRequestsInBytesPerIp(newLimit);
 // ensure new instances get the new limit
+        long existingLimit = 
DatabaseDescriptor.getNativeTransportMaxRequestDataInFlightPerIpInBytes();
+        
DatabaseDescriptor.setNativeTransportMaxRequestDataInFlightPerIpInBytes(newLimit);
 // ensure new instances get the new limit
         for (Allocator allocator : PER_ENDPOINT_ALLOCATORS.values())
             existingLimit = 
allocator.endpointAndGlobal.endpoint().setLimit(newLimit);
         logger.info("Changed native_max_transport_requests_in_bytes_per_ip 
from {} to {}", existingLimit, newLimit);
diff --git a/test/conf/cassandra-old.yaml b/test/conf/cassandra-old.yaml
index 27af143813..86983acc9e 100644
--- a/test/conf/cassandra-old.yaml
+++ b/test/conf/cassandra-old.yaml
@@ -55,3 +55,4 @@ file_cache_enabled: true
 internode_send_buff_size_in_bytes: 5
 internode_recv_buff_size_in_bytes: 5
 max_hint_window_in_ms: 10800000
+cache_load_timeout_seconds: 35
diff --git 
a/test/distributed/org/apache/cassandra/distributed/test/PaxosRepairTest.java 
b/test/distributed/org/apache/cassandra/distributed/test/PaxosRepairTest.java
index 846be53e05..207cb3797b 100644
--- 
a/test/distributed/org/apache/cassandra/distributed/test/PaxosRepairTest.java
+++ 
b/test/distributed/org/apache/cassandra/distributed/test/PaxosRepairTest.java
@@ -40,7 +40,6 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import org.apache.cassandra.config.*;
-import org.apache.cassandra.cql3.ColumnIdentifier;
 import org.apache.cassandra.cql3.QueryOptions;
 import org.apache.cassandra.cql3.QueryProcessor;
 import org.apache.cassandra.cql3.statements.SelectStatement;
@@ -48,7 +47,6 @@ import org.apache.cassandra.db.*;
 import org.apache.cassandra.db.compaction.CompactionManager;
 import org.apache.cassandra.db.marshal.Int32Type;
 import org.apache.cassandra.db.partitions.PartitionIterator;
-import org.apache.cassandra.db.partitions.PartitionUpdate;
 import org.apache.cassandra.db.rows.*;
 import org.apache.cassandra.distributed.Cluster;
 import org.apache.cassandra.distributed.api.ConsistencyLevel;
@@ -57,7 +55,6 @@ import org.apache.cassandra.distributed.api.IInstance;
 import org.apache.cassandra.distributed.api.IInstanceConfig;
 import org.apache.cassandra.distributed.api.IInvokableInstance;
 import org.apache.cassandra.distributed.api.IMessageFilters;
-import org.apache.cassandra.exceptions.CasWriteTimeoutException;
 import org.apache.cassandra.gms.FailureDetector;
 import org.apache.cassandra.gms.Gossiper;
 import org.apache.cassandra.locator.InetAddressAndPort;
@@ -66,7 +63,6 @@ import org.apache.cassandra.gms.EndpointState;
 import org.apache.cassandra.gms.VersionedValue;
 import org.apache.cassandra.repair.RepairParallelism;
 import org.apache.cassandra.repair.messages.RepairOption;
-import org.apache.cassandra.schema.ColumnMetadata;
 import org.apache.cassandra.schema.Schema;
 import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.service.ActiveRepairService;
@@ -204,10 +200,10 @@ public class PaxosRepairTest extends TestBaseImpl
     private static final Consumer<IInstanceConfig> CONFIG_CONSUMER = cfg -> {
         cfg.with(Feature.NETWORK);
         cfg.with(Feature.GOSSIP);
-        cfg.set("paxos_purge_grace_period", "0");
+        cfg.set("paxos_purge_grace_period", "0s");
         cfg.set("paxos_state_purging", 
Config.PaxosStatePurging.repaired.toString());
         cfg.set("paxos_variant", "v2_without_linearizable_reads");
-        cfg.set("truncate_request_timeout_in_ms", 1000L);
+        cfg.set("truncate_request_timeout", "1000ms");
         cfg.set("partitioner", "ByteOrderedPartitioner");
         cfg.set("initial_token", 
ByteBufferUtil.bytesToHex(ByteBufferUtil.bytes(cfg.num() * 100)));
     };
@@ -302,9 +298,9 @@ public class PaxosRepairTest extends TestBaseImpl
         try (Cluster cluster = init(Cluster.build(3)
                                            .withConfig(cfg -> cfg
                                                               
.set("paxos_variant", "v2")
-                                                              
.set("paxos_purge_grace_period", 0)
+                                                              
.set("paxos_purge_grace_period", "0s")
                                                               
.set("paxos_state_purging", Config.PaxosStatePurging.repaired.toString())
-                                                              
.set("truncate_request_timeout_in_ms", 1000L))
+                                                              
.set("truncate_request_timeout", "1000ms"))
                                            .withoutVNodes()
                                            .start()))
         {
@@ -373,9 +369,9 @@ public class PaxosRepairTest extends TestBaseImpl
         try (Cluster cluster = init(Cluster.build(5)
                                            .withConfig(cfg -> cfg
                                                               
.set("paxos_variant", "v2")
-                                                              
.set("paxos_purge_grace_period", 0)
-                                                              
.set("paxos_cache_size", "0")
-                                                              
.set("truncate_request_timeout_in_ms", 1000L))
+                                                              
.set("paxos_purge_grace_period", "0s")
+                                                              
.set("paxos_cache_size", "0MiB")
+                                                              
.set("truncate_request_timeout", "1000ms"))
                                            .withoutVNodes()
                                            .start()))
         {
@@ -445,9 +441,9 @@ public class PaxosRepairTest extends TestBaseImpl
         try (Cluster cluster = init(Cluster.build(3)
                                            .withConfig(cfg -> cfg
                                                               
.set("paxos_variant", "v2")
-                                                              
.set("paxos_purge_grace_period", 0)
+                                                              
.set("paxos_purge_grace_period", "0s")
                                                               
.set("paxos_state_purging", Config.PaxosStatePurging.repaired.toString())
-                                                              
.set("truncate_request_timeout_in_ms", 1000L))
+                                                              
.set("truncate_request_timeout", "1000ms"))
                                            .withoutVNodes()
                                            .start())
         )
diff --git 
a/test/unit/org/apache/cassandra/config/LoadOldYAMLBackwardCompatibilityTest.java
 
b/test/unit/org/apache/cassandra/config/LoadOldYAMLBackwardCompatibilityTest.java
index 39d37ce496..997d98255c 100644
--- 
a/test/unit/org/apache/cassandra/config/LoadOldYAMLBackwardCompatibilityTest.java
+++ 
b/test/unit/org/apache/cassandra/config/LoadOldYAMLBackwardCompatibilityTest.java
@@ -110,11 +110,11 @@ public class LoadOldYAMLBackwardCompatibilityTest
         assertTrue(config.drop_compact_storage_enabled);
         assertTrue(config.user_defined_functions_threads_enabled);
         assertEquals(DurationSpec.inMilliseconds(2000), 
config.permissions_validity);
-        assertEquals(DurationSpec.inMilliseconds(0), 
config.permissions_update_interval);
+        assertNull(config.permissions_update_interval);
         assertEquals(DurationSpec.inMilliseconds(2000), config.roles_validity);
-        assertEquals(DurationSpec.inMilliseconds(0), 
config.roles_update_interval);
+        assertNull(config.roles_update_interval);
         assertEquals(DurationSpec.inMilliseconds(2000), 
config.credentials_validity);
-        assertEquals(DurationSpec.inMilliseconds(0), 
config.credentials_update_interval);
+        assertNull(config.credentials_update_interval);
         assertEquals(DurationSpec.inMinutes(60), 
config.index_summary_resize_interval);
 
         //parameters which names have not changed with CASSANDRA-15234
@@ -124,5 +124,6 @@ public class LoadOldYAMLBackwardCompatibilityTest
         assertEquals(DurationSpec.inSecondsString("0"), 
config.row_cache_save_period);
         assertEquals(DurationSpec.inSeconds(0), config.row_cache_save_period);
         assertEquals(DurationSpec.inHours(2), 
config.counter_cache_save_period);
+        assertEquals(DurationSpec.inSeconds(35), config.cache_load_timeout);
     }
 }
diff --git 
a/test/unit/org/apache/cassandra/config/ParseAndConvertUnitsTest.java 
b/test/unit/org/apache/cassandra/config/ParseAndConvertUnitsTest.java
index e760826a89..93895b1254 100644
--- a/test/unit/org/apache/cassandra/config/ParseAndConvertUnitsTest.java
+++ b/test/unit/org/apache/cassandra/config/ParseAndConvertUnitsTest.java
@@ -63,45 +63,52 @@ public class ParseAndConvertUnitsTest
         assertEquals(DurationSpec.inSeconds(86400), 
config.trace_type_query_ttl);
         assertEquals(DurationSpec.inSeconds(604800), 
config.trace_type_repair_ttl);
         assertEquals(DurationSpec.inMilliseconds(2000), 
config.permissions_validity);
-        assertEquals(DurationSpec.inMilliseconds(0), 
config.permissions_update_interval);
+        assertNull(config.permissions_update_interval);
         assertEquals(DurationSpec.inMilliseconds(2000), config.roles_validity);
-        assertEquals(DurationSpec.inMilliseconds(0), 
config.roles_update_interval);
+        assertNull(config.roles_update_interval);
         assertEquals(DurationSpec.inMilliseconds(2000), 
config.credentials_validity);
-        assertEquals(DurationSpec.inMilliseconds(0), 
config.credentials_update_interval);
+        assertNull(config.credentials_update_interval);
         assertEquals(DurationSpec.inMinutes(60), 
config.index_summary_resize_interval);
         assertEquals(DurationSpec.inHours(4), config.key_cache_save_period);
+        assertEquals(DurationSpec.inSeconds(30), config.cache_load_timeout);
+        assertEquals(DurationSpec.inMilliseconds(1500), 
config.user_defined_functions_fail_timeout);
+        assertEquals(DurationSpec.inMilliseconds(500), 
config.user_defined_functions_warn_timeout);
+        assertEquals(DurationSpec.inSeconds(3600), 
config.validation_preview_purge_head_start);
 
         //Confirm space parameters were successfully parsed with the default 
values in cassandra.yaml
         assertNull(config.memtable_heap_space);
         assertNull(config.memtable_offheap_space);
         assertNull(config.repair_session_space); //null everywhere so should 
be correct, let's check whether it will bomb
-        assertEquals(new DataStorageSpec("4194304B"), 
config.internode_application_send_queue_capacity);
-        assertEquals(new DataStorageSpec("134217728B"), 
config.internode_application_send_queue_reserve_endpoint_capacity);
-        assertEquals(new DataStorageSpec("536870912B"), 
config.internode_application_send_queue_reserve_global_capacity);
-        assertEquals(new DataStorageSpec("4194304B"), 
config.internode_application_receive_queue_capacity);
-        assertEquals(new DataStorageSpec("134217728B"), 
config.internode_application_receive_queue_reserve_endpoint_capacity);
-        assertEquals(new DataStorageSpec("536870912B"), 
config.internode_application_receive_queue_reserve_global_capacity);
-        assertEquals(new DataStorageSpec("16MiB"), 
config.native_transport_max_frame_size);
-        assertEquals(new DataStorageSpec("256MiB"), config.max_value_size);
-        assertEquals(new DataStorageSpec("4KiB"), config.column_index_size);
-        assertEquals(new DataStorageSpec("2KiB"), 
config.column_index_cache_size);
-        assertEquals(new DataStorageSpec("5KiB"), 
config.batch_size_warn_threshold);
-        assertEquals(new DataStorageSpec("50KiB"), 
config.batch_size_fail_threshold);
-        assertEquals(new DataStorageSpec("100MiB"), 
config.compaction_large_partition_warning_threshold);
+        assertEquals(DataStorageSpec.inBytes(4194304), 
config.internode_application_send_queue_capacity);
+        assertEquals(DataStorageSpec.inBytes(134217728), 
config.internode_application_send_queue_reserve_endpoint_capacity);
+        assertEquals(DataStorageSpec.inBytes(536870912), 
config.internode_application_send_queue_reserve_global_capacity);
+        assertEquals(DataStorageSpec.inBytes(4194304), 
config.internode_application_receive_queue_capacity);
+        assertEquals(DataStorageSpec.inBytes(134217728), 
config.internode_application_receive_queue_reserve_endpoint_capacity);
+        assertEquals(DataStorageSpec.inBytes(536870912), 
config.internode_application_receive_queue_reserve_global_capacity);
+        assertEquals(DataStorageSpec.inMebibytes(16), 
config.native_transport_max_frame_size);
+        assertEquals(DataStorageSpec.inMebibytes(256), config.max_value_size);
+        assertEquals(DataStorageSpec.inKibibytes(4), config.column_index_size);
+        assertEquals(DataStorageSpec.inKibibytes(2), 
config.column_index_cache_size);
+        assertEquals(DataStorageSpec.inKibibytes(5), 
config.batch_size_warn_threshold);
+        assertEquals(DataStorageSpec.inKibibytes(50), 
config.batch_size_fail_threshold);
+        assertEquals(DataStorageSpec.inMebibytes(100), 
config.compaction_large_partition_warning_threshold);
         assertNull(config.commitlog_total_space);
-        assertEquals(new DataStorageSpec("5MiB"), 
config.commitlog_segment_size);
+        assertEquals(DataStorageSpec.inMebibytes(5), 
config.commitlog_segment_size);
         assertNull(config.max_mutation_size); //not set explicitly in the 
default yaml, check the config; not set there too
-        assertEquals(new DataStorageSpec("0MiB"), config.cdc_total_space);
-        assertEquals(new DataStorageSpec("1024KiB"), 
config.hinted_handoff_throttle);
-        assertEquals(new DataStorageSpec("1024KiB"), 
config.batchlog_replay_throttle);
-        assertEquals(new DataStorageSpec("10240KiB"), 
config.trickle_fsync_interval);
-        assertEquals(new DataStorageSpec("50MiB"), 
config.sstable_preemptive_open_interval);
+        assertEquals(DataStorageSpec.inMebibytes(0), config.cdc_total_space);
+        assertEquals(DataStorageSpec.inKibibytes(1024), 
config.hinted_handoff_throttle);
+        assertEquals(DataStorageSpec.inKibibytes(1024), 
config.batchlog_replay_throttle);
+        assertEquals(DataStorageSpec.inKibibytes(10240), 
config.trickle_fsync_interval);
+        assertEquals(DataStorageSpec.inMebibytes(50), 
config.sstable_preemptive_open_interval);
         assertNull(config.counter_cache_size);
         assertNull(config.file_cache_size);
         assertNull(config.index_summary_capacity);
-        assertEquals(new DataStorageSpec("1MiB"), 
config.prepared_statements_cache_size);
+        assertEquals(DataStorageSpec.inMebibytes(1), 
config.prepared_statements_cache_size);
         assertNull(config.key_cache_size);
-        assertEquals(new DataStorageSpec("16MiB"), config.row_cache_size);
+        assertEquals(DataStorageSpec.inMebibytes(16), config.row_cache_size);
+        assertNull(config.native_transport_max_request_data_in_flight);
+        assertNull(config.native_transport_max_request_data_in_flight_per_ip);
+        assertEquals(DataStorageSpec.inMebibytes(1), 
config.native_transport_receive_queue_capacity);
 
         //Confirm rate parameters were successfully parsed with the default 
values in cassandra.yaml
         assertEquals(DataRateSpec.inMebibytesPerSecond(0), 
config.compaction_throughput);
diff --git 
a/test/unit/org/apache/cassandra/config/YamlConfigurationLoaderTest.java 
b/test/unit/org/apache/cassandra/config/YamlConfigurationLoaderTest.java
index ae2bf5f53d..1fc13dd8d5 100644
--- a/test/unit/org/apache/cassandra/config/YamlConfigurationLoaderTest.java
+++ b/test/unit/org/apache/cassandra/config/YamlConfigurationLoaderTest.java
@@ -27,9 +27,11 @@ import java.util.Map;
 import com.google.common.collect.ImmutableMap;
 import org.junit.Test;
 
+import org.apache.cassandra.exceptions.ConfigurationException;
 import org.apache.cassandra.io.util.File;
 
 import static org.assertj.core.api.Assertions.assertThat;
+import static org.assertj.core.api.Assertions.assertThatThrownBy;
 import static org.junit.Assert.assertEquals;
 
 
@@ -93,6 +95,7 @@ public class YamlConfigurationLoaderTest
                                  .put("client_encryption_options", 
encryptionOptions)
                                  .put("internode_socket_send_buffer_size", 
"5B")
                                  .put("internode_socket_receive_buffer_size", 
"5B")
+                                 .put("commitlog_sync_group_window_in_ms", 
"42")
                                  .build();
 
         Config config = YamlConfigurationLoader.fromMap(map, Config.class);
@@ -105,6 +108,20 @@ public class YamlConfigurationLoaderTest
         assertEquals(new DataStorageSpec("5B"), 
config.internode_socket_receive_buffer_size); // Check names backward 
compatibility (CASSANDRA-17141 and CASSANDRA-15234)
     }
 
+    @Test
+    public void typeChange()
+    {
+        Config old = 
YamlConfigurationLoader.fromMap(ImmutableMap.of("key_cache_save_period", 42,
+                                                                     
"row_cache_save_period", 42,
+                                                                     
"counter_cache_save_period", 42), Config.class);
+        Config latest = 
YamlConfigurationLoader.fromMap(ImmutableMap.of("key_cache_save_period", "42s",
+                                                                        
"row_cache_save_period", "42s",
+                                                                        
"counter_cache_save_period", "42s"), Config.class);
+        
assertThat(old.key_cache_save_period).isEqualTo(latest.key_cache_save_period).isEqualTo(SmallestDurationSeconds.inSeconds(42));
+        
assertThat(old.row_cache_save_period).isEqualTo(latest.row_cache_save_period).isEqualTo(SmallestDurationSeconds.inSeconds(42));
+        
assertThat(old.counter_cache_save_period).isEqualTo(latest.counter_cache_save_period).isEqualTo(SmallestDurationSeconds.inSeconds(42));
+    }
+
     @Test
     public void sharedErrorReportingExclusions()
     {
@@ -114,6 +131,101 @@ public class YamlConfigurationLoaderTest
         
assertThat(config.internode_error_reporting_exclusions).isEqualTo(expected);
     }
 
+    @Test
+    public void converters()
+    {
+        // MILLIS_DURATION
+        assertThat(from("permissions_validity_in_ms", 
"42").permissions_validity.toMilliseconds()).isEqualTo(42);
+        assertThatThrownBy(() -> from("permissions_validity", 
-2).permissions_validity.toMilliseconds())
+        .hasRootCauseInstanceOf(ConfigurationException.class)
+        .hasRootCauseMessage("Invalid duration: -2 Accepted 
units:[MILLISECONDS, SECONDS, MINUTES, HOURS, DAYS] where case matters and only 
non-negative values.");
+
+        // MILLIS_DOUBLE_DURATION
+        assertThat(from("commitlog_sync_group_window_in_ms", 
"42").commitlog_sync_group_window.toMilliseconds()).isEqualTo(42);
+        assertThat(from("commitlog_sync_group_window_in_ms", 
"0.2").commitlog_sync_group_window.toMilliseconds()).isEqualTo(0);
+        assertThat(from("commitlog_sync_group_window_in_ms", 
"42.5").commitlog_sync_group_window.toMilliseconds()).isEqualTo(43);
+        assertThat(from("commitlog_sync_group_window_in_ms", 
"NaN").commitlog_sync_group_window.toMilliseconds()).isEqualTo(0);
+        assertThatThrownBy(() -> from("commitlog_sync_group_window_in_ms", 
-2).commitlog_sync_group_window.toMilliseconds())
+        .hasRootCauseInstanceOf(ConfigurationException.class)
+        .hasRootCauseMessage("Invalid duration -2: value must be positive");
+
+        // MILLIS_CUSTOM_DURATION
+        assertThat(from("permissions_update_interval_in_ms", 
42).permissions_update_interval).isEqualTo(SmallestDurationMilliseconds.inMilliseconds(42));
+        assertThat(from("permissions_update_interval_in_ms", 
-1).permissions_update_interval).isNull();
+        assertThatThrownBy(() -> from("permissions_update_interval_in_ms", -2))
+        .hasRootCauseInstanceOf(ConfigurationException.class)
+        .hasRootCauseMessage("Invalid duration -2: value must be positive");
+
+        // SECONDS_DURATION
+        assertThat(from("streaming_keep_alive_period_in_secs", 
"42").streaming_keep_alive_period.toSeconds()).isEqualTo(42);
+        assertThatThrownBy(() -> from("streaming_keep_alive_period_in_secs", 
-2).streaming_keep_alive_period.toSeconds())
+        .hasRootCauseInstanceOf(ConfigurationException.class)
+        .hasRootCauseMessage("Invalid duration -2: value must be positive");
+
+        // NEGATIVE_SECONDS_DURATION
+        assertThat(from("validation_preview_purge_head_start_in_sec", 
-1).validation_preview_purge_head_start.toSeconds()).isEqualTo(0);
+        assertThat(from("validation_preview_purge_head_start_in_sec", 
0).validation_preview_purge_head_start.toSeconds()).isEqualTo(0);
+        assertThat(from("validation_preview_purge_head_start_in_sec", 
42).validation_preview_purge_head_start.toSeconds()).isEqualTo(42);
+
+        // SECONDS_CUSTOM_DURATION already tested in type change
+
+        // MINUTES_DURATION
+        assertThat(from("index_summary_resize_interval_in_minutes", 
"42").index_summary_resize_interval.toMinutes()).isEqualTo(42);
+        assertThatThrownBy(() -> 
from("index_summary_resize_interval_in_minutes", 
-2).index_summary_resize_interval.toMinutes())
+        .hasRootCauseInstanceOf(ConfigurationException.class)
+        .hasRootCauseMessage("Invalid duration -2: value must be positive");
+
+        // BYTES_CUSTOM_DATASTORAGE
+        
assertThat(from("native_transport_max_concurrent_requests_in_bytes_per_ip", 
-1).native_transport_max_request_data_in_flight_per_ip).isEqualTo(null);
+        
assertThat(from("native_transport_max_concurrent_requests_in_bytes_per_ip", 
0).native_transport_max_request_data_in_flight_per_ip.toBytes()).isEqualTo(0);
+        
assertThat(from("native_transport_max_concurrent_requests_in_bytes_per_ip", 
42).native_transport_max_request_data_in_flight_per_ip.toBytes()).isEqualTo(42);
+
+        // MEBIBYTES_DATA_STORAGE
+        assertThat(from("memtable_heap_space_in_mb", 
"42").memtable_heap_space.toMebibytes()).isEqualTo(42);
+        assertThatThrownBy(() -> from("memtable_heap_space_in_mb", 
-2).memtable_heap_space.toMebibytes())
+        .hasRootCauseInstanceOf(ConfigurationException.class)
+        .hasRootCauseMessage("Invalid data storage: value must be positive, 
but was -2");
+
+        // KIBIBYTES_DATASTORAGE
+        assertThat(from("column_index_size_in_kb", 
"42").column_index_size.toKibibytes()).isEqualTo(42);
+        assertThatThrownBy(() -> from("column_index_size_in_kb", 
-2).column_index_size.toMebibytes())
+        .hasRootCauseInstanceOf(ConfigurationException.class)
+        .hasRootCauseMessage("Invalid data storage: value must be positive, 
but was -2");
+
+        // BYTES_DATASTORAGE
+        assertThat(from("internode_max_message_size_in_bytes", 
"42").internode_max_message_size.toBytes()).isEqualTo(42);
+        assertThatThrownBy(() -> from("internode_max_message_size_in_bytes", 
-2).internode_max_message_size.toBytes())
+        .hasRootCauseInstanceOf(ConfigurationException.class)
+        .hasRootCauseMessage("Invalid data storage: value must be positive, 
but was -2");
+
+        // BYTES_DATASTORAGE
+        assertThat(from("internode_max_message_size_in_bytes", 
"42").internode_max_message_size.toBytes()).isEqualTo(42);
+        assertThatThrownBy(() -> from("internode_max_message_size_in_bytes", 
-2).internode_max_message_size.toBytes())
+        .hasRootCauseInstanceOf(ConfigurationException.class)
+        .hasRootCauseMessage("Invalid data storage: value must be positive, 
but was -2");
+
+        // MEBIBYTES_PER_SECOND_DATA_RATE
+        assertThat(from("compaction_throughput_mb_per_sec", 
"42").compaction_throughput.toMebibytesPerSecondAsInt()).isEqualTo(42);
+        assertThatThrownBy(() -> from("compaction_throughput_mb_per_sec", 
-2).compaction_throughput.toMebibytesPerSecondAsInt())
+        .hasRootCauseInstanceOf(ConfigurationException.class)
+        .hasRootCauseMessage("Invalid bit rate: value must be non-negative");
+
+        // MEGABITS_TO_MEBIBYTES_PER_SECOND_DATA_RATE
+        assertThat(from("stream_throughput_outbound_megabits_per_sec", 
"42").stream_throughput_outbound.toMegabitsPerSecondAsInt()).isEqualTo(42);
+        assertThatThrownBy(() -> 
from("stream_throughput_outbound_megabits_per_sec", 
-2).stream_throughput_outbound.toMegabitsPerSecondAsInt())
+        .hasRootCauseInstanceOf(ConfigurationException.class)
+        .hasRootCauseMessage("Invalid bit rate: value must be non-negative");
+    }
+
+    private static Config from(Object... values)
+    {
+        assert values.length % 2 == 0 : "Map can only be created with an even 
number of inputs: given " + values.length;
+        ImmutableMap.Builder<String, Object> builder = ImmutableMap.builder();
+        for (int i = 0; i < values.length; i += 2)
+            builder.put((String) values[i], values[i + 1]);
+        return YamlConfigurationLoader.fromMap(builder.build(), Config.class);
+    }
+
     private static Config load(String path)
     {
         URL url = 
YamlConfigurationLoaderTest.class.getClassLoader().getResource(path);
diff --git 
a/test/unit/org/apache/cassandra/tools/nodetool/SetAuthCacheConfigTest.java 
b/test/unit/org/apache/cassandra/tools/nodetool/SetAuthCacheConfigTest.java
index 0ed2534bf4..2c90486ee2 100644
--- a/test/unit/org/apache/cassandra/tools/nodetool/SetAuthCacheConfigTest.java
+++ b/test/unit/org/apache/cassandra/tools/nodetool/SetAuthCacheConfigTest.java
@@ -162,6 +162,21 @@ public class SetAuthCacheConfigTest extends CQLTester
         assertSetConfig(Roles.cache, RolesCacheMBean.CACHE_NAME);
     }
 
+    @Test
+    public void testSetConfigDisabled()
+    {
+        assertSetConfigDisabled(AuthenticatedUser.permissionsCache, 
PermissionsCacheMBean.CACHE_NAME);
+
+        PasswordAuthenticator passwordAuthenticator = (PasswordAuthenticator) 
DatabaseDescriptor.getAuthenticator();
+        assertSetConfigDisabled(passwordAuthenticator.getCredentialsCache(), 
PasswordAuthenticator.CredentialsCacheMBean.CACHE_NAME);
+
+        assertSetConfigDisabled(AuthorizationProxy.jmxPermissionsCache, 
AuthorizationProxy.JmxPermissionsCacheMBean.CACHE_NAME);
+
+        assertSetConfigDisabled(AuthenticatedUser.networkPermissionsCache, 
NetworkPermissionsCacheMBean.CACHE_NAME);
+
+        assertSetConfigDisabled(Roles.cache, RolesCacheMBean.CACHE_NAME);
+    }
+
     private void assertSetConfig(AuthCache<?, ?> authCache, String cacheName)
     {
         ToolRunner.ToolResult tool = 
ToolRunner.invokeNodetool("setauthcacheconfig",
@@ -182,6 +197,26 @@ public class SetAuthCacheConfigTest extends CQLTester
         assertThat(authCache.getActiveUpdate()).isFalse();
     }
 
+    private void assertSetConfigDisabled(AuthCache<?, ?> authCache, String 
cacheName)
+    {
+        ToolRunner.ToolResult tool = 
ToolRunner.invokeNodetool("setauthcacheconfig",
+                                                               "--cache-name", 
cacheName,
+                                                               
"--validity-period", "1",
+                                                               
"--update-interval", "-1",
+                                                               
"--max-entries", "3",
+                                                               
"--disable-active-update");
+        tool.assertOnCleanExit();
+        assertThat(tool.getStdout()).isEqualTo("Changed Validity Period to 
1\n" +
+                                               "Changed Update Interval to 
-1\n" +
+                                               "Changed Max Entries to 3\n" +
+                                               "Changed Active Update to 
false\n");
+        // -1 means disabled and means update_interval will be assigned the 
value of validity_period
+        assertThat(authCache.getValidity()).isEqualTo(1);
+        assertThat(authCache.getUpdateInterval()).isEqualTo(1);
+        assertThat(authCache.getMaxEntries()).isEqualTo(3);
+        assertThat(authCache.getActiveUpdate()).isFalse();
+    }
+
     private String wrapByDefaultNodetoolMessage(String s)
     {
         return "nodetool: " + s + "\nSee 'nodetool help' or 'nodetool help 
<command>'.\n";
diff --git 
a/test/unit/org/apache/cassandra/tools/nodetool/SetGetColumnIndexSizeTest.java 
b/test/unit/org/apache/cassandra/tools/nodetool/SetGetColumnIndexSizeTest.java
index 45736c992c..2d78eb5a36 100644
--- 
a/test/unit/org/apache/cassandra/tools/nodetool/SetGetColumnIndexSizeTest.java
+++ 
b/test/unit/org/apache/cassandra/tools/nodetool/SetGetColumnIndexSizeTest.java
@@ -72,7 +72,7 @@ public class SetGetColumnIndexSizeTest extends CQLTester
     @Test
     public void testInvalidValue()
     {
-        assertSetInvalidColumnIndexSize("2097152", "column_index_size must be 
positive value < 2097151B, but was 2147483648B", 2);
+        assertSetInvalidColumnIndexSize("2097152", "column_index_size must be 
positive value <= 2147483647B, but was 2147483648B", 2);
     }
 
     @Test
diff --git 
a/test/unit/org/apache/cassandra/transport/ClientResourceLimitsTest.java 
b/test/unit/org/apache/cassandra/transport/ClientResourceLimitsTest.java
index 1ffd9b49da..8e94997239 100644
--- a/test/unit/org/apache/cassandra/transport/ClientResourceLimitsTest.java
+++ b/test/unit/org/apache/cassandra/transport/ClientResourceLimitsTest.java
@@ -69,8 +69,8 @@ public class ClientResourceLimitsTest extends CQLTester
     public static void setUp()
     {
         DatabaseDescriptor.setNativeTransportReceiveQueueCapacityInBytes(1);
-        
DatabaseDescriptor.setNativeTransportMaxConcurrentRequestsInBytesPerIp(LOW_LIMIT);
-        
DatabaseDescriptor.setNativeTransportMaxConcurrentRequestsInBytes(LOW_LIMIT);
+        
DatabaseDescriptor.setNativeTransportMaxRequestDataInFlightPerIpInBytes(LOW_LIMIT);
+        
DatabaseDescriptor.setNativeTransportConcurrentRequestDataInFlightInBytes(LOW_LIMIT);
 
         requireNetwork();
     }
@@ -78,8 +78,8 @@ public class ClientResourceLimitsTest extends CQLTester
     @AfterClass
     public static void tearDown()
     {
-        
DatabaseDescriptor.setNativeTransportMaxConcurrentRequestsInBytesPerIp(3000000000L);
-        
DatabaseDescriptor.setNativeTransportMaxConcurrentRequestsInBytes(HIGH_LIMIT);
+        
DatabaseDescriptor.setNativeTransportMaxRequestDataInFlightPerIpInBytes(3000000000L);
+        
DatabaseDescriptor.setNativeTransportConcurrentRequestDataInFlightInBytes(HIGH_LIMIT);
     }
 
     @Before
@@ -382,7 +382,7 @@ public class ClientResourceLimitsTest extends CQLTester
             // change global limit, query will still fail because endpoint 
limit
             ClientResourceLimits.setGlobalLimit(HIGH_LIMIT);
             Assert.assertEquals("new global limit not returned by 
EndpointPayloadTrackers", HIGH_LIMIT, ClientResourceLimits.getGlobalLimit());
-            Assert.assertEquals("new global limit not returned by 
DatabaseDescriptor", HIGH_LIMIT, 
DatabaseDescriptor.getNativeTransportMaxConcurrentRequestsInBytes());
+            Assert.assertEquals("new global limit not returned by 
DatabaseDescriptor", HIGH_LIMIT, 
DatabaseDescriptor.getNativeTransportMaxRequestDataInFlightInBytes());
 
             try
             {
@@ -397,7 +397,7 @@ public class ClientResourceLimitsTest extends CQLTester
             // change endpoint limit, query will now succeed
             ClientResourceLimits.setEndpointLimit(HIGH_LIMIT);
             Assert.assertEquals("new endpoint limit not returned by 
EndpointPayloadTrackers", HIGH_LIMIT, ClientResourceLimits.getEndpointLimit());
-            Assert.assertEquals("new endpoint limit not returned by 
DatabaseDescriptor", HIGH_LIMIT, 
DatabaseDescriptor.getNativeTransportMaxConcurrentRequestsInBytesPerIp());
+            Assert.assertEquals("new endpoint limit not returned by 
DatabaseDescriptor", HIGH_LIMIT, 
DatabaseDescriptor.getNativeTransportMaxRequestDataInFlightPerIpInBytes());
             client.execute(queryMessage());
 
             // ensure new clients also see the new raised limits
@@ -408,7 +408,7 @@ public class ClientResourceLimitsTest extends CQLTester
             // lower the global limit and ensure the query fails again
             ClientResourceLimits.setGlobalLimit(LOW_LIMIT);
             Assert.assertEquals("new global limit not returned by 
EndpointPayloadTrackers", LOW_LIMIT, ClientResourceLimits.getGlobalLimit());
-            Assert.assertEquals("new global limit not returned by 
DatabaseDescriptor", LOW_LIMIT, 
DatabaseDescriptor.getNativeTransportMaxConcurrentRequestsInBytes());
+            Assert.assertEquals("new global limit not returned by 
DatabaseDescriptor", LOW_LIMIT, 
DatabaseDescriptor.getNativeTransportMaxRequestDataInFlightInBytes());
             try
             {
                 client.execute(queryMessage());
@@ -422,7 +422,7 @@ public class ClientResourceLimitsTest extends CQLTester
             // lower the endpoint limit and ensure existing clients also have 
requests that fail
             ClientResourceLimits.setEndpointLimit(60);
             Assert.assertEquals("new endpoint limit not returned by 
EndpointPayloadTrackers", 60, ClientResourceLimits.getEndpointLimit());
-            Assert.assertEquals("new endpoint limit not returned by 
DatabaseDescriptor", 60, 
DatabaseDescriptor.getNativeTransportMaxConcurrentRequestsInBytesPerIp());
+            Assert.assertEquals("new endpoint limit not returned by 
DatabaseDescriptor", 60, 
DatabaseDescriptor.getNativeTransportMaxRequestDataInFlightPerIpInBytes());
             try
             {
                 client.execute(smallMessage);
@@ -449,7 +449,7 @@ public class ClientResourceLimitsTest extends CQLTester
             // put the test state back
             ClientResourceLimits.setEndpointLimit(LOW_LIMIT);
             Assert.assertEquals("new endpoint limit not returned by 
EndpointPayloadTrackers", LOW_LIMIT, ClientResourceLimits.getEndpointLimit());
-            Assert.assertEquals("new endpoint limit not returned by 
DatabaseDescriptor", LOW_LIMIT, 
DatabaseDescriptor.getNativeTransportMaxConcurrentRequestsInBytesPerIp());
+            Assert.assertEquals("new endpoint limit not returned by 
DatabaseDescriptor", LOW_LIMIT, 
DatabaseDescriptor.getNativeTransportMaxRequestDataInFlightPerIpInBytes());
         }
         finally
         {


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to