This is an automated email from the ASF dual-hosted git repository.

mck pushed a commit to branch cassandra-3.11
in repository https://gitbox.apache.org/repos/asf/cassandra.git

commit 9eab2633cc194af7a6b04977c0f148a65a735189
Merge: 8a5b127 0cd3b68
Author: Mick Semb Wever <[email protected]>
AuthorDate: Wed Mar 11 17:35:26 2020 +0100

    Merge branch 'cassandra-3.0' into cassandra-3.11

 .../org/apache/cassandra/utils/GuidGenerator.java  | 10 +++-------
 .../org/apache/cassandra/auth/jmx/JMXAuthTest.java |  1 +
 .../cassandra/concurrent/SEPExecutorTest.java      |  9 +++++++++
 .../validation/operations/ThriftCQLTester.java     | 10 +++-------
 .../cassandra/db/OldFormatDeserializerTest.java    |  8 ++++++++
 .../db/commitlog/CommitLogDescriptorTest.java      |  3 +++
 .../apache/cassandra/gms/ArrivalWindowTest.java    | 22 +++++++++++-----------
 .../org/apache/cassandra/gms/GossipDigestTest.java |  9 +++++++++
 .../cassandra/io/sstable/SSTableHeaderFixTest.java |  1 +
 .../io/sstable/format/ClientModeSSTableTest.java   |  3 ++-
 .../apache/cassandra/net/MessagingServiceTest.java |  3 ++-
 .../messages/RepairMessageSerializationsTest.java  |  9 +++++----
 .../schema/TupleTypesRepresentationTest.java       |  1 +
 .../apache/cassandra/security/SSLFactoryTest.java  | 11 +++++++++--
 .../cassandra/streaming/SessionInfoTest.java       |  8 ++++++++
 .../cassandra/tools/CompactionStressTest.java      |  7 +++++++
 .../org/apache/cassandra/tools/ToolsTester.java    |  5 +++++
 .../apache/cassandra/triggers/TriggersTest.java    | 13 ++++++-------
 .../org/apache/cassandra/utils/MerkleTreeTest.java |  7 +++++++
 .../apache/cassandra/utils/TopKSamplerTest.java    |  8 ++++++++
 20 files changed, 108 insertions(+), 40 deletions(-)

diff --cc test/unit/org/apache/cassandra/auth/jmx/JMXAuthTest.java
index 10c871b,0000000..08a89df
mode 100644,000000..100644
--- a/test/unit/org/apache/cassandra/auth/jmx/JMXAuthTest.java
+++ b/test/unit/org/apache/cassandra/auth/jmx/JMXAuthTest.java
@@@ -1,279 -1,0 +1,280 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +
 +package org.apache.cassandra.auth.jmx;
 +
 +import java.lang.reflect.Field;
 +import java.nio.file.Paths;
 +import java.rmi.server.RMISocketFactory;
 +import java.util.HashMap;
 +import java.util.Map;
 +import javax.management.JMX;
 +import javax.management.MBeanServerConnection;
 +import javax.management.ObjectName;
 +import javax.management.remote.*;
 +import javax.security.auth.Subject;
 +import javax.security.auth.callback.CallbackHandler;
 +import javax.security.auth.login.LoginException;
 +import javax.security.auth.spi.LoginModule;
 +
 +import com.google.common.collect.ImmutableSet;
 +import org.junit.Before;
 +import org.junit.BeforeClass;
 +import org.junit.Test;
 +
 +import org.apache.cassandra.auth.*;
 +import org.apache.cassandra.config.DatabaseDescriptor;
 +import org.apache.cassandra.cql3.CQLTester;
 +import org.apache.cassandra.db.ColumnFamilyStoreMBean;
 +import org.apache.cassandra.utils.JMXServerUtils;
 +
 +import static org.junit.Assert.assertEquals;
 +import static org.junit.Assert.fail;
 +
 +public class JMXAuthTest extends CQLTester
 +{
 +    private static JMXConnectorServer jmxServer;
 +    private static MBeanServerConnection connection;
 +    private RoleResource role;
 +    private String tableName;
 +    private JMXResource tableMBean;
 +
 +    @FunctionalInterface
 +    private interface MBeanAction
 +    {
 +        void execute();
 +    }
 +
 +    @BeforeClass
 +    public static void setupClass() throws Exception
 +    {
++        DatabaseDescriptor.daemonInitialization();
 +        setupAuthorizer();
 +        setupJMXServer();
 +    }
 +
 +    private static void setupAuthorizer()
 +    {
 +        try
 +        {
 +            IAuthorizer authorizer = new StubAuthorizer();
 +            Field authorizerField = 
DatabaseDescriptor.class.getDeclaredField("authorizer");
 +            authorizerField.setAccessible(true);
 +            authorizerField.set(null, authorizer);
 +            DatabaseDescriptor.setPermissionsValidity(0);
 +        }
 +        catch (IllegalAccessException | NoSuchFieldException e)
 +        {
 +            throw new RuntimeException(e);
 +        }
 +    }
 +
 +    private static void setupJMXServer() throws Exception
 +    {
 +        String config = 
Paths.get(ClassLoader.getSystemResource("auth/cassandra-test-jaas.conf").toURI()).toString();
 +        System.setProperty("com.sun.management.jmxremote.authenticate", 
"true");
 +        System.setProperty("java.security.auth.login.config", config);
 +        System.setProperty("cassandra.jmx.remote.login.config", "TestLogin");
 +        System.setProperty("cassandra.jmx.authorizer", 
NoSuperUserAuthorizationProxy.class.getName());
 +        jmxServer = JMXServerUtils.createJMXServer(9999, true);
 +        jmxServer.start();
 +
 +        JMXServiceURL jmxUrl = new 
JMXServiceURL("service:jmx:rmi:///jndi/rmi://localhost:9999/jmxrmi");
 +        Map<String, Object> env = new HashMap<>();
 +        env.put("com.sun.jndi.rmi.factory.socket", 
RMISocketFactory.getDefaultSocketFactory());
 +        JMXConnector jmxc = JMXConnectorFactory.connect(jmxUrl, env);
 +        connection = jmxc.getMBeanServerConnection();
 +    }
 +
 +    @Before
 +    public void setup() throws Throwable
 +    {
 +        role = RoleResource.role("test_role");
 +        clearAllPermissions();
 +        tableName = createTable("CREATE TABLE %s (k int, v int, PRIMARY KEY 
(k))");
 +        tableMBean = 
JMXResource.mbean(String.format("org.apache.cassandra.db:type=Tables,keyspace=%s,table=%s",
 +                                                     KEYSPACE, tableName));
 +    }
 +
 +    @Test
 +    public void readAttribute() throws Throwable
 +    {
 +        ColumnFamilyStoreMBean proxy = JMX.newMBeanProxy(connection,
 +                                                         
ObjectName.getInstance(tableMBean.getObjectName()),
 +                                                         
ColumnFamilyStoreMBean.class);
 +
 +        // grant SELECT on a single specific Table mbean
 +        assertPermissionOnResource(Permission.SELECT, tableMBean, 
proxy::getTableName);
 +
 +        // grant SELECT on all Table mbeans in named keyspace
 +        clearAllPermissions();
 +        JMXResource allTablesInKeyspace = 
JMXResource.mbean(String.format("org.apache.cassandra.db:type=Tables,keyspace=%s,*",
 +                                                                          
KEYSPACE));
 +        assertPermissionOnResource(Permission.SELECT, allTablesInKeyspace, 
proxy::getTableName);
 +
 +        // grant SELECT on all Table mbeans
 +        clearAllPermissions();
 +        JMXResource allTables = 
JMXResource.mbean("org.apache.cassandra.db:type=Tables,*");
 +        assertPermissionOnResource(Permission.SELECT, allTables, 
proxy::getTableName);
 +
 +        // grant SELECT ON ALL MBEANS
 +        clearAllPermissions();
 +        assertPermissionOnResource(Permission.SELECT, JMXResource.root(), 
proxy::getTableName);
 +    }
 +
 +    @Test
 +    public void writeAttribute() throws Throwable
 +    {
 +        ColumnFamilyStoreMBean proxy = JMX.newMBeanProxy(connection,
 +                                                         
ObjectName.getInstance(tableMBean.getObjectName()),
 +                                                         
ColumnFamilyStoreMBean.class);
 +        MBeanAction action = () -> proxy.setMinimumCompactionThreshold(4);
 +
 +        // grant MODIFY on a single specific Table mbean
 +        assertPermissionOnResource(Permission.MODIFY, tableMBean, action);
 +
 +        // grant MODIFY on all Table mbeans in named keyspace
 +        clearAllPermissions();
 +        JMXResource allTablesInKeyspace = 
JMXResource.mbean(String.format("org.apache.cassandra.db:type=Tables,keyspace=%s,*",
 +                                                                          
KEYSPACE));
 +        assertPermissionOnResource(Permission.MODIFY, allTablesInKeyspace, 
action);
 +
 +        // grant MODIFY on all Table mbeans
 +        clearAllPermissions();
 +        JMXResource allTables = 
JMXResource.mbean("org.apache.cassandra.db:type=Tables,*");
 +        assertPermissionOnResource(Permission.MODIFY, allTables, action);
 +
 +        // grant MODIFY ON ALL MBEANS
 +        clearAllPermissions();
 +        assertPermissionOnResource(Permission.MODIFY, JMXResource.root(), 
action);
 +    }
 +
 +    @Test
 +    public void executeMethod() throws Throwable
 +    {
 +        ColumnFamilyStoreMBean proxy = JMX.newMBeanProxy(connection,
 +                                                         
ObjectName.getInstance(tableMBean.getObjectName()),
 +                                                         
ColumnFamilyStoreMBean.class);
 +
 +        // grant EXECUTE on a single specific Table mbean
 +        assertPermissionOnResource(Permission.EXECUTE, tableMBean, 
proxy::estimateKeys);
 +
 +        // grant EXECUTE on all Table mbeans in named keyspace
 +        clearAllPermissions();
 +        JMXResource allTablesInKeyspace = 
JMXResource.mbean(String.format("org.apache.cassandra.db:type=Tables,keyspace=%s,*",
 +                                                                          
KEYSPACE));
 +        assertPermissionOnResource(Permission.EXECUTE, allTablesInKeyspace, 
proxy::estimateKeys);
 +
 +        // grant EXECUTE on all Table mbeans
 +        clearAllPermissions();
 +        JMXResource allTables = 
JMXResource.mbean("org.apache.cassandra.db:type=Tables,*");
 +        assertPermissionOnResource(Permission.EXECUTE, allTables, 
proxy::estimateKeys);
 +
 +        // grant EXECUTE ON ALL MBEANS
 +        clearAllPermissions();
 +        assertPermissionOnResource(Permission.EXECUTE, JMXResource.root(), 
proxy::estimateKeys);
 +    }
 +
 +    private void assertPermissionOnResource(Permission permission,
 +                                            JMXResource resource,
 +                                            MBeanAction action)
 +    {
 +        assertUnauthorized(action);
 +        grantPermission(permission, resource, role);
 +        assertAuthorized(action);
 +    }
 +
 +    private void grantPermission(Permission permission, JMXResource resource, 
RoleResource role)
 +    {
 +        
DatabaseDescriptor.getAuthorizer().grant(AuthenticatedUser.SYSTEM_USER,
 +                                                 ImmutableSet.of(permission),
 +                                                 resource,
 +                                                 role);
 +    }
 +
 +    private void assertAuthorized(MBeanAction action)
 +    {
 +        action.execute();
 +    }
 +
 +    private void assertUnauthorized(MBeanAction action)
 +    {
 +        try
 +        {
 +            action.execute();
 +            fail("Expected an UnauthorizedException, but none was thrown");
 +        }
 +        catch (SecurityException e)
 +        {
 +            assertEquals("Access Denied", e.getLocalizedMessage());
 +        }
 +    }
 +
 +    private void clearAllPermissions()
 +    {
 +        ((StubAuthorizer) DatabaseDescriptor.getAuthorizer()).clear();
 +    }
 +
 +    public static class StubLoginModule implements LoginModule
 +    {
 +        private CassandraPrincipal principal;
 +        private Subject subject;
 +
 +        public StubLoginModule(){}
 +
 +        public void initialize(Subject subject, CallbackHandler 
callbackHandler, Map<String, ?> sharedState, Map<String, ?> options)
 +        {
 +            this.subject = subject;
 +            principal = new 
CassandraPrincipal((String)options.get("role_name"));
 +        }
 +
 +        public boolean login() throws LoginException
 +        {
 +            return true;
 +        }
 +
 +        public boolean commit() throws LoginException
 +        {
 +            if (!subject.getPrincipals().contains(principal))
 +                subject.getPrincipals().add(principal);
 +            return true;
 +        }
 +
 +        public boolean abort() throws LoginException
 +        {
 +            return true;
 +        }
 +
 +        public boolean logout() throws LoginException
 +        {
 +            return true;
 +        }
 +    }
 +
 +    // always answers false to isSuperUser and true to isAuthSetup complete - 
saves us having to initialize
 +    // a real IRoleManager and StorageService for the test
 +    public static class NoSuperUserAuthorizationProxy extends 
AuthorizationProxy
 +    {
 +        public NoSuperUserAuthorizationProxy()
 +        {
 +            super();
 +            this.isSuperuser = (role) -> false;
 +            this.isAuthSetupComplete = () -> true;
 +        }
 +    }
 +}
diff --cc test/unit/org/apache/cassandra/concurrent/SEPExecutorTest.java
index 8526dd0,8526dd0..9702e8f
--- a/test/unit/org/apache/cassandra/concurrent/SEPExecutorTest.java
+++ b/test/unit/org/apache/cassandra/concurrent/SEPExecutorTest.java
@@@ -25,12 -25,12 +25,21 @@@ import java.util.concurrent.ExecutorSer
  import java.util.concurrent.TimeUnit;
  
  import org.junit.Assert;
++import org.junit.BeforeClass;
  import org.junit.Test;
  
++import org.apache.cassandra.config.DatabaseDescriptor;
  import org.apache.cassandra.utils.FBUtilities;
  
++
  public class SEPExecutorTest
  {
++    @BeforeClass
++    public static void beforeClass()
++    {
++        DatabaseDescriptor.daemonInitialization();
++    }
++
      @Test
      public void shutdownTest() throws Throwable
      {
diff --cc test/unit/org/apache/cassandra/db/OldFormatDeserializerTest.java
index a9549e1,886b191..cb37229
--- a/test/unit/org/apache/cassandra/db/OldFormatDeserializerTest.java
+++ b/test/unit/org/apache/cassandra/db/OldFormatDeserializerTest.java
@@@ -20,9 -20,9 +20,11 @@@ package org.apache.cassandra.db
  
  import java.util.function.Supplier;
  
++import org.junit.BeforeClass;
  import org.junit.Test;
  
  import org.apache.cassandra.config.CFMetaData;
++import org.apache.cassandra.config.DatabaseDescriptor;
  import 
org.apache.cassandra.db.UnfilteredDeserializer.OldFormatDeserializer.UnfilteredIterator;
  import org.apache.cassandra.db.marshal.Int32Type;
  import org.apache.cassandra.db.rows.RangeTombstoneMarker;
@@@ -37,6 -37,6 +39,12 @@@ import static org.junit.Assert.*
  
  public class OldFormatDeserializerTest
  {
++    @BeforeClass
++    public static void beforeClass()
++    {
++        DatabaseDescriptor.daemonInitialization();
++    }
++
      @Test
      public void testRangeTombstones() throws Exception
      {
diff --cc 
test/unit/org/apache/cassandra/db/commitlog/CommitLogDescriptorTest.java
index fdedafd,898c19f..a6bbdb6
--- a/test/unit/org/apache/cassandra/db/commitlog/CommitLogDescriptorTest.java
+++ b/test/unit/org/apache/cassandra/db/commitlog/CommitLogDescriptorTest.java
@@@ -25,51 -23,21 +25,54 @@@ import java.util.HashMap
  import java.util.Map;
  
  import com.google.common.collect.ImmutableMap;
 -
 +import org.junit.Assert;
 +import org.junit.Before;
  import org.junit.Test;
  
++import org.apache.cassandra.config.DatabaseDescriptor;
  import org.apache.cassandra.config.ParameterizedClass;
 +import org.apache.cassandra.config.TransparentDataEncryptionOptions;
  import org.apache.cassandra.exceptions.ConfigurationException;
 -import org.apache.cassandra.io.util.DataInputBuffer;
 +import org.apache.cassandra.io.compress.LZ4Compressor;
 +import org.apache.cassandra.io.util.FileDataInput;
 +import org.apache.cassandra.io.util.FileSegmentInputStream;
  import org.apache.cassandra.net.MessagingService;
 -
 -import static org.junit.Assert.assertEquals;
 -import static org.junit.Assert.assertFalse;
 -import static org.junit.Assert.assertTrue;
 -import static org.junit.Assert.fail;
 +import org.apache.cassandra.security.EncryptionContext;
 +import org.apache.cassandra.security.EncryptionContextGenerator;
  
  public class CommitLogDescriptorTest
  {
 +    private static final byte[] iv = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 
12, 13, 14, 15};
 +
 +    ParameterizedClass compression;
 +    TransparentDataEncryptionOptions enabledTdeOptions;
 +
 +    // Context with enabledTdeOptions enabled
 +    EncryptionContext enabledEncryption;
 +
 +    // Context with enabledTdeOptions disabled, with the assumption that 
enabledTdeOptions was never previously enabled
 +    EncryptionContext neverEnabledEncryption;
 +
 +    // Context with enabledTdeOptions disabled, with the assumption that 
enabledTdeOptions was previously enabled, but now disabled
 +    // due to operator changing the yaml.
 +    EncryptionContext previouslyEnabledEncryption;
 +
 +    @Before
 +    public void setup()
 +    {
 +        Map<String,String> params = new HashMap<>();
 +        compression = new ParameterizedClass(LZ4Compressor.class.getName(), 
params);
 +
 +        enabledTdeOptions = 
EncryptionContextGenerator.createEncryptionOptions();
 +        enabledEncryption = new EncryptionContext(enabledTdeOptions, iv, 
false);
 +        
 +        neverEnabledEncryption = 
EncryptionContextGenerator.createDisabledContext();
 +        TransparentDataEncryptionOptions disaabledTdeOptions = new 
TransparentDataEncryptionOptions(false, enabledTdeOptions.cipher, 
enabledTdeOptions.key_alias, enabledTdeOptions.key_provider);
 +        previouslyEnabledEncryption = new 
EncryptionContext(disaabledTdeOptions);
++        
++        DatabaseDescriptor.daemonInitialization();
 +    }
 +
      @Test
      public void testVersions()
      {
diff --cc test/unit/org/apache/cassandra/gms/ArrivalWindowTest.java
index 511511b,459956b..a539ca8
--- a/test/unit/org/apache/cassandra/gms/ArrivalWindowTest.java
+++ b/test/unit/org/apache/cassandra/gms/ArrivalWindowTest.java
@@@ -25,12 -25,12 +25,20 @@@ import static org.junit.Assert.*
  
  import org.junit.Test;
  
- import java.lang.RuntimeException;
  import java.net.InetAddress;
- import java.net.UnknownHostException;
+ 
++import org.apache.cassandra.config.DatabaseDescriptor;
+ import org.apache.cassandra.utils.FBUtilities;
++import org.junit.BeforeClass;
  
  public class ArrivalWindowTest
  {
++    @BeforeClass
++    public static void beforeClass()
++    {
++        DatabaseDescriptor.daemonInitialization();
++    }
++
      @Test
      public void testWithNanoTime()
      {
diff --cc test/unit/org/apache/cassandra/gms/GossipDigestTest.java
index 3191b03,3191b03..36e3b27
--- a/test/unit/org/apache/cassandra/gms/GossipDigestTest.java
+++ b/test/unit/org/apache/cassandra/gms/GossipDigestTest.java
@@@ -28,11 -28,11 +28,20 @@@ import org.apache.cassandra.io.util.Dat
  
  import java.net.InetAddress;
  
++import org.apache.cassandra.config.DatabaseDescriptor;
  import org.apache.cassandra.net.MessagingService;
++
++import org.junit.BeforeClass;
  import org.junit.Test;
  
  public class GossipDigestTest
  {
++    @BeforeClass
++    public static void beforeClass()
++    {
++        DatabaseDescriptor.daemonInitialization();
++    }
++
      @Test
      public void test() throws IOException
      {
diff --cc test/unit/org/apache/cassandra/io/sstable/SSTableHeaderFixTest.java
index 8821872,0000000..c2eadc4
mode 100644,000000..100644
--- a/test/unit/org/apache/cassandra/io/sstable/SSTableHeaderFixTest.java
+++ b/test/unit/org/apache/cassandra/io/sstable/SSTableHeaderFixTest.java
@@@ -1,962 -1,0 +1,963 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.cassandra.io.sstable;
 +
 +import java.io.File;
 +import java.nio.ByteBuffer;
 +import java.util.ArrayList;
 +import java.util.Arrays;
 +import java.util.Collections;
 +import java.util.HashSet;
 +import java.util.List;
 +import java.util.Map;
 +import java.util.Set;
 +import java.util.UUID;
 +import java.util.function.Function;
 +import java.util.regex.Matcher;
 +import java.util.regex.Pattern;
 +import java.util.stream.Collectors;
 +import java.util.stream.IntStream;
 +import java.util.stream.Stream;
 +
 +import com.google.common.collect.Sets;
 +import org.junit.After;
 +import org.junit.Before;
 +import org.junit.Test;
 +
 +import org.apache.cassandra.config.CFMetaData;
 +import org.apache.cassandra.config.ColumnDefinition;
 +import org.apache.cassandra.config.DatabaseDescriptor;
 +import org.apache.cassandra.cql3.FieldIdentifier;
 +import org.apache.cassandra.cql3.statements.IndexTarget;
 +import org.apache.cassandra.db.SerializationHeader;
 +import org.apache.cassandra.db.marshal.AbstractCompositeType;
 +import org.apache.cassandra.db.marshal.AbstractType;
 +import org.apache.cassandra.db.marshal.CollectionType;
 +import org.apache.cassandra.db.marshal.CompositeType;
 +import org.apache.cassandra.db.marshal.FloatType;
 +import org.apache.cassandra.db.marshal.FrozenType;
 +import org.apache.cassandra.db.marshal.Int32Type;
 +import org.apache.cassandra.db.marshal.ListType;
 +import org.apache.cassandra.db.marshal.MapType;
 +import org.apache.cassandra.db.marshal.SetType;
 +import org.apache.cassandra.db.marshal.TupleType;
 +import org.apache.cassandra.db.marshal.UTF8Type;
 +import org.apache.cassandra.db.marshal.UserType;
 +import org.apache.cassandra.db.rows.EncodingStats;
 +import org.apache.cassandra.dht.Murmur3Partitioner;
 +import org.apache.cassandra.io.sstable.format.SSTableFormat;
 +import org.apache.cassandra.io.sstable.format.Version;
 +import org.apache.cassandra.io.sstable.format.big.BigFormat;
 +import org.apache.cassandra.io.sstable.metadata.MetadataType;
 +import org.apache.cassandra.io.util.FileUtils;
 +import org.apache.cassandra.io.util.SequentialWriter;
 +import org.apache.cassandra.schema.IndexMetadata;
 +import org.apache.cassandra.schema.SchemaKeyspace;
 +import org.apache.cassandra.utils.ByteBufferUtil;
 +import org.apache.cassandra.utils.FBUtilities;
 +
 +import static org.junit.Assert.assertEquals;
 +import static org.junit.Assert.assertFalse;
 +import static org.junit.Assert.assertTrue;
 +import static org.junit.Assert.fail;
 +
 +/**
 + * Test the functionality of {@link SSTableHeaderFix}.
 + * It writes an 'big-m' version sstable(s) and executes against these.
 + */
 +public class SSTableHeaderFixTest
 +{
 +    static
 +    {
 +        DatabaseDescriptor.toolInitialization();
++        DatabaseDescriptor.applyAddressConfig();
 +    }
 +
 +    private File temporaryFolder;
 +
 +    @Before
 +    public void setup()
 +    {
 +        File f = FileUtils.createTempFile("SSTableUDTFixTest", "");
 +        f.delete();
 +        f.mkdirs();
 +        temporaryFolder = f;
 +    }
 +
 +    @After
 +    public void teardown()
 +    {
 +        FileUtils.deleteRecursive(temporaryFolder);
 +    }
 +
 +    private static final AbstractType<?> udtPK = makeUDT("udt_pk");
 +    private static final AbstractType<?> udtCK = makeUDT("udt_ck");
 +    private static final AbstractType<?> udtStatic = makeUDT("udt_static");
 +    private static final AbstractType<?> udtRegular = makeUDT("udt_regular");
 +    private static final AbstractType<?> udtInner = makeUDT("udt_inner");
 +    private static final AbstractType<?> udtNested = new UserType("ks",
 +                                                                  
ByteBufferUtil.bytes("udt_nested"),
 +                                                                  
Arrays.asList(new FieldIdentifier(ByteBufferUtil.bytes("a_field")),
 +                                                                              
  new FieldIdentifier(ByteBufferUtil.bytes("a_udt"))),
 +                                                                  
Arrays.asList(UTF8Type.instance,
 +                                                                              
  udtInner),
 +                                                                  true);
 +    private static final AbstractType<?> tupleInTuple = 
makeTuple(makeTuple());
 +    private static final AbstractType<?> udtInTuple = makeTuple(udtInner);
 +    private static final AbstractType<?> tupleInComposite = 
CompositeType.getInstance(UTF8Type.instance, makeTuple());
 +    private static final AbstractType<?> udtInComposite = 
CompositeType.getInstance(UTF8Type.instance, udtInner);
 +    private static final AbstractType<?> udtInList = 
ListType.getInstance(udtInner, true);
 +    private static final AbstractType<?> udtInSet = 
SetType.getInstance(udtInner, true);
 +    private static final AbstractType<?> udtInMap = 
MapType.getInstance(UTF8Type.instance, udtInner, true);
 +    private static final AbstractType<?> udtInFrozenList = 
ListType.getInstance(udtInner, false);
 +    private static final AbstractType<?> udtInFrozenSet = 
SetType.getInstance(udtInner, false);
 +    private static final AbstractType<?> udtInFrozenMap = 
MapType.getInstance(UTF8Type.instance, udtInner, false);
 +
 +    private static AbstractType<?> makeUDT2(String udtName, boolean multiCell)
 +    {
 +        return new UserType("ks",
 +                            ByteBufferUtil.bytes(udtName),
 +                            Arrays.asList(new 
FieldIdentifier(ByteBufferUtil.bytes("a_field")),
 +                                          new 
FieldIdentifier(ByteBufferUtil.bytes("a_udt"))),
 +                            Arrays.asList(UTF8Type.instance,
 +                                          udtInner),
 +                            multiCell);
 +    }
 +
 +    private static AbstractType<?> makeUDT(String udtName)
 +    {
 +        return new UserType("ks",
 +                            ByteBufferUtil.bytes(udtName),
 +                            Collections.singletonList(new 
FieldIdentifier(ByteBufferUtil.bytes("a_field"))),
 +                            Collections.singletonList(UTF8Type.instance),
 +                            true);
 +    }
 +
 +    private static TupleType makeTuple()
 +    {
 +        return makeTuple(Int32Type.instance);
 +    }
 +
 +    private static TupleType makeTuple(AbstractType<?> second)
 +    {
 +        return new TupleType(Arrays.asList(UTF8Type.instance,
 +                                           second));
 +    }
 +
 +    private static TupleType makeTupleSimple()
 +    {
 +        // TODO this should create a non-frozen tuple type for the sake of 
handling a dropped, non-frozen UDT
 +        return new TupleType(Collections.singletonList(UTF8Type.instance));
 +    }
 +
 +    private static final Version version = 
BigFormat.instance.getVersion("mc");
 +
 +    private CFMetaData tableMetadata;
 +    private final Set<String> updatedColumns = new HashSet<>();
 +
 +    private ColumnDefinition getColDef(String n)
 +    {
 +        return tableMetadata.getColumnDefinition(ByteBufferUtil.bytes(n));
 +    }
 +
 +    /**
 +     * Very basic test whether {@link SSTableHeaderFix} detect a type 
mismatch (regular_c 'int' vs 'float').
 +     */
 +    @Test
 +    public void verifyTypeMismatchTest() throws Exception
 +    {
 +        File dir = temporaryFolder;
 +        File sstable = generateFakeSSTable(dir, 1);
 +
 +        SerializationHeader.Component header = readHeader(sstable);
 +        assertFrozenUdt(header, false, true);
 +
 +        ColumnDefinition cd = getColDef("regular_c");
 +        tableMetadata.removeColumnDefinition(cd);
 +        tableMetadata.addColumnDefinition(ColumnDefinition.regularDef("ks", 
"cf", "regular_c", FloatType.instance));
 +
 +        SSTableHeaderFix headerFix = builder().withPath(sstable.toPath())
 +                                              .build();
 +        headerFix.execute();
 +
 +        assertTrue(headerFix.hasError());
 +        assertTrue(headerFix.hasChanges());
 +
 +        // must not have re-written the stats-component
 +        header = readHeader(sstable);
 +        assertFrozenUdt(header, false, true);
 +    }
 +
 +    @Test
 +    public void verifyTypeMatchTest() throws Exception
 +    {
 +        File dir = temporaryFolder;
 +
 +        List<ColumnDefinition> cols = new ArrayList<>();
 +        cols.add(ColumnDefinition.partitionKeyDef("ks", "cf", "pk", udtPK, 
0));
 +        cols.add(ColumnDefinition.clusteringDef("ks", "cf", "ck", udtCK, 0));
 +        commonColumns(cols);
 +        File sstable = buildFakeSSTable(dir, 1, cols, false);
 +
 +        SerializationHeader.Component header = readHeader(sstable);
 +        assertFrozenUdt(header, false, true);
 +
 +        SSTableHeaderFix headerFix = builder().withPath(sstable.toPath())
 +                                              .build();
 +        headerFix.execute();
 +
 +        assertTrue(updatedColumns.isEmpty());
 +        assertFalse(headerFix.hasError());
 +        assertFalse(headerFix.hasChanges());
 +
 +        // must not have re-written the stats-component
 +        header = readHeader(sstable);
 +        assertFrozenUdt(header, false, true);
 +    }
 +
 +    /**
 +     * Simulates the case when an sstable contains a column not present in 
the schema, which can just be ignored.
 +     */
 +    @Test
 +    public void verifyWithUnknownColumnTest() throws Exception
 +    {
 +        File dir = temporaryFolder;
 +        List<ColumnDefinition> cols = new ArrayList<>();
 +        cols.add(ColumnDefinition.partitionKeyDef("ks", "cf", "pk", udtPK, 
0));
 +        cols.add(ColumnDefinition.clusteringDef("ks", "cf", "ck", udtCK, 0));
 +        commonColumns(cols);
 +        cols.add(ColumnDefinition.regularDef("ks", "cf", "solr_query", 
UTF8Type.instance));
 +        File sstable = buildFakeSSTable(dir, 1, cols, true);
 +
 +        SerializationHeader.Component header = readHeader(sstable);
 +        assertFrozenUdt(header, false, true);
 +
 +        ColumnDefinition cd = getColDef("solr_query");
 +        tableMetadata.removeColumnDefinition(cd);
 +
 +        SSTableHeaderFix headerFix = builder().withPath(sstable.toPath())
 +                                              .build();
 +        headerFix.execute();
 +
 +        assertFalse(headerFix.hasError());
 +        assertTrue(headerFix.hasChanges());
 +
 +        // must not have re-written the stats-component
 +        header = readHeader(sstable);
 +        assertFrozenUdt(header, true, true);
 +    }
 +
 +    /**
 +     * Simulates the case when an sstable contains a column not present in 
the table but as a target for an index.
 +     * It can just be ignored.
 +     */
 +    @Test
 +    public void verifyWithIndexedUnknownColumnTest() throws Exception
 +    {
 +        File dir = temporaryFolder;
 +        List<ColumnDefinition> cols = new ArrayList<>();
 +        cols.add(ColumnDefinition.partitionKeyDef("ks", "cf", "pk", udtPK, 
0));
 +        cols.add(ColumnDefinition.clusteringDef("ks", "cf", "ck", udtCK, 0));
 +        commonColumns(cols);
 +        cols.add(ColumnDefinition.regularDef("ks", "cf", "solr_query", 
UTF8Type.instance));
 +        File sstable = buildFakeSSTable(dir, 1, cols, true);
 +
 +        SerializationHeader.Component header = readHeader(sstable);
 +        assertFrozenUdt(header, false, true);
 +
 +        ColumnDefinition cd = getColDef("solr_query");
 +        
tableMetadata.indexes(tableMetadata.getIndexes().with(IndexMetadata.fromSchemaMetadata("some
 search index", IndexMetadata.Kind.CUSTOM, 
Collections.singletonMap(IndexTarget.TARGET_OPTION_NAME, "solr_query"))));
 +        tableMetadata.removeColumnDefinition(cd);
 +
 +        SSTableHeaderFix headerFix = builder().withPath(sstable.toPath())
 +                                              .build();
 +        headerFix.execute();
 +
 +        assertFalse(headerFix.hasError());
 +        assertTrue(headerFix.hasChanges());
 +
 +        // must not have re-written the stats-component
 +        header = readHeader(sstable);
 +        assertFrozenUdt(header, true, true);
 +    }
 +
 +    @Test
 +    public void complexTypeMatchTest() throws Exception
 +    {
 +        File dir = temporaryFolder;
 +
 +        List<ColumnDefinition> cols = new ArrayList<>();
 +        cols.add(ColumnDefinition.partitionKeyDef("ks", "cf", "pk", udtPK, 
0));
 +        cols.add(ColumnDefinition.clusteringDef("ks", "cf", "ck", udtCK, 0));
 +        commonColumns(cols);
 +        cols.add(ColumnDefinition.regularDef("ks", "cf", "tuple_in_tuple", 
tupleInTuple));
 +        cols.add(ColumnDefinition.regularDef("ks", "cf", "udt_nested", 
udtNested));
 +        cols.add(ColumnDefinition.regularDef("ks", "cf", "udt_in_tuple", 
udtInTuple));
 +        cols.add(ColumnDefinition.regularDef("ks", "cf", 
"tuple_in_composite", tupleInComposite));
 +        cols.add(ColumnDefinition.regularDef("ks", "cf", "udt_in_composite", 
udtInComposite));
 +        cols.add(ColumnDefinition.regularDef("ks", "cf", "udt_in_list", 
udtInList));
 +        cols.add(ColumnDefinition.regularDef("ks", "cf", "udt_in_set", 
udtInSet));
 +        cols.add(ColumnDefinition.regularDef("ks", "cf", "udt_in_map", 
udtInMap));
 +        cols.add(ColumnDefinition.regularDef("ks", "cf", 
"udt_in_frozen_list", udtInFrozenList));
 +        cols.add(ColumnDefinition.regularDef("ks", "cf", "udt_in_frozen_set", 
udtInFrozenSet));
 +        cols.add(ColumnDefinition.regularDef("ks", "cf", "udt_in_frozen_map", 
udtInFrozenMap));
 +        File sstable = buildFakeSSTable(dir, 1, cols, true);
 +
 +        SerializationHeader.Component header = readHeader(sstable);
 +        assertFrozenUdt(header, false, true);
 +
 +        SSTableHeaderFix headerFix = builder().withPath(sstable.toPath())
 +                                              .build();
 +        headerFix.execute();
 +
 +        assertFalse(headerFix.hasError());
 +        assertTrue(headerFix.hasChanges());
 +        assertEquals(Sets.newHashSet("pk", "ck", "regular_b", "static_b",
 +                                     "udt_nested", "udt_in_composite", 
"udt_in_list", "udt_in_set", "udt_in_map"), updatedColumns);
 +
 +        // must not have re-written the stats-component
 +        header = readHeader(sstable);
 +        assertFrozenUdt(header, true, true);
 +    }
 +
 +    @Test
 +    public void complexTypeDroppedColumnsMatchTest() throws Exception
 +    {
 +        File dir = temporaryFolder;
 +
 +        List<ColumnDefinition> cols = new ArrayList<>();
 +        cols.add(ColumnDefinition.partitionKeyDef("ks", "cf", "pk", udtPK, 
0));
 +        cols.add(ColumnDefinition.clusteringDef("ks", "cf", "ck", udtCK, 0));
 +        commonColumns(cols);
 +        cols.add(ColumnDefinition.regularDef("ks", "cf", "tuple_in_tuple", 
tupleInTuple));
 +        cols.add(ColumnDefinition.regularDef("ks", "cf", "udt_nested", 
udtNested));
 +        cols.add(ColumnDefinition.regularDef("ks", "cf", "udt_in_tuple", 
udtInTuple));
 +        cols.add(ColumnDefinition.regularDef("ks", "cf", 
"tuple_in_composite", tupleInComposite));
 +        cols.add(ColumnDefinition.regularDef("ks", "cf", "udt_in_composite", 
udtInComposite));
 +        cols.add(ColumnDefinition.regularDef("ks", "cf", "udt_in_list", 
udtInList));
 +        cols.add(ColumnDefinition.regularDef("ks", "cf", "udt_in_set", 
udtInSet));
 +        cols.add(ColumnDefinition.regularDef("ks", "cf", "udt_in_map", 
udtInMap));
 +        cols.add(ColumnDefinition.regularDef("ks", "cf", 
"udt_in_frozen_list", udtInFrozenList));
 +        cols.add(ColumnDefinition.regularDef("ks", "cf", "udt_in_frozen_set", 
udtInFrozenSet));
 +        cols.add(ColumnDefinition.regularDef("ks", "cf", "udt_in_frozen_map", 
udtInFrozenMap));
 +        File sstable = buildFakeSSTable(dir, 1, cols, true);
 +
 +        for (String col : new String[]{"tuple_in_tuple", "udt_nested", 
"udt_in_tuple",
 +                                       "tuple_in_composite", 
"udt_in_composite",
 +                                       "udt_in_list", "udt_in_set", 
"udt_in_map",
 +                                       "udt_in_frozen_list", 
"udt_in_frozen_set", "udt_in_frozen_map"})
 +        {
 +            ColumnDefinition cd = getColDef(col);
 +            tableMetadata.removeColumnDefinition(cd);
 +            AbstractType<?> dropType = 
SchemaKeyspace.expandUserTypes(cd.type);
 +            tableMetadata.recordColumnDrop(new ColumnDefinition(cd.ksName, 
cd.cfName, cd.name, dropType, cd.position(), cd.kind), 
FBUtilities.timestampMicros());
 +        }
 +
 +        SerializationHeader.Component header = readHeader(sstable);
 +        assertFrozenUdt(header, false, true);
 +
 +        SSTableHeaderFix headerFix = builder().withPath(sstable.toPath())
 +                                              .build();
 +        headerFix.execute();
 +
 +        assertFalse(headerFix.hasError());
 +        assertTrue(headerFix.hasChanges());
 +        assertEquals(Sets.newHashSet("pk", "ck", "regular_b", "static_b", 
"udt_nested"), updatedColumns);
 +
 +        // must not have re-written the stats-component
 +        header = readHeader(sstable);
 +        // do not check the inner types, as the inner types were not fixed in 
the serialization-header (test thing)
 +        assertFrozenUdt(header, true, false);
 +    }
 +
 +    @Test
 +    public void variousDroppedUserTypes() throws Exception
 +    {
 +        File dir = temporaryFolder;
 +
 +        List<ColumnDefinition> cols = new ArrayList<>();
 +        cols.add(ColumnDefinition.partitionKeyDef("ks", "cf", "pk", udtPK, 
0));
 +        cols.add(ColumnDefinition.clusteringDef("ks", "cf", "ck", udtCK, 0));
 +
 +        ColSpec[] colSpecs = new ColSpec[]
 +                {
 +                        // 'frozen<udt>' / live
 +                        new ColSpec("frozen_udt_as_frozen_udt_live",
 +                                    makeUDT2("frozen_udt_as_frozen_udt_live", 
false),
 +                                    makeUDT2("frozen_udt_as_frozen_udt_live", 
false),
 +                                    false,
 +                                    false),
 +                        // 'frozen<udt>' / live / as 'udt'
 +                        new ColSpec("frozen_udt_as_unfrozen_udt_live",
 +                                    
makeUDT2("frozen_udt_as_unfrozen_udt_live", false),
 +                                    
makeUDT2("frozen_udt_as_unfrozen_udt_live", true),
 +                                    false,
 +                                    true),
 +                        // 'frozen<udt>' / dropped
 +                        new ColSpec("frozen_udt_as_frozen_udt_dropped",
 +                                    
SchemaKeyspace.expandUserTypes(makeUDT2("frozen_udt_as_frozen_udt_dropped", 
true).freezeNestedMulticellTypes().freeze()),
 +                                    
makeUDT2("frozen_udt_as_frozen_udt_dropped", false),
 +                                    
makeUDT2("frozen_udt_as_frozen_udt_dropped", false),
 +                                    true,
 +                                    false),
 +                        // 'frozen<udt>' / dropped / as 'udt'
 +                        new ColSpec("frozen_udt_as_unfrozen_udt_dropped",
 +                                    
SchemaKeyspace.expandUserTypes(makeUDT2("frozen_udt_as_unfrozen_udt_dropped", 
true).freezeNestedMulticellTypes().freeze()),
 +                                    
makeUDT2("frozen_udt_as_unfrozen_udt_dropped", true),
 +                                    
makeUDT2("frozen_udt_as_unfrozen_udt_dropped", false),
 +                                    true,
 +                                    true),
 +                        // 'udt' / live
 +                        new ColSpec("unfrozen_udt_as_unfrozen_udt_live",
 +                                    
makeUDT2("unfrozen_udt_as_unfrozen_udt_live", true),
 +                                    
makeUDT2("unfrozen_udt_as_unfrozen_udt_live", true),
 +                                    false,
 +                                    false),
 +                        // 'udt' / dropped
 +// TODO unable to test dropping a non-frozen UDT, as that requires an 
unfrozen tuple as well
 +//                        new ColSpec("unfrozen_udt_as_unfrozen_udt_dropped",
 +//                                    
SchemaKeyspace.expandUserTypes(makeUDT2("unfrozen_udt_as_unfrozen_udt_dropped", 
true).freezeNestedMulticellTypes()),
 +//                                    
makeUDT2("unfrozen_udt_as_unfrozen_udt_dropped", true),
 +//                                    
makeUDT2("unfrozen_udt_as_unfrozen_udt_dropped", true),
 +//                                    true,
 +//                                    false),
 +                        // 'frozen<tuple>' as 'TupleType(multiCell=false' 
(there is nothing like 'FrozenType(TupleType(')
 +                        new ColSpec("frozen_tuple_as_frozen_tuple_live",
 +                                    makeTupleSimple(),
 +                                    makeTupleSimple(),
 +                                    false,
 +                                    false),
 +                        // 'frozen<tuple>' as 'TupleType(multiCell=false' 
(there is nothing like 'FrozenType(TupleType(')
 +                        new ColSpec("frozen_tuple_as_frozen_tuple_dropped",
 +                                    makeTupleSimple(),
 +                                    makeTupleSimple(),
 +                                    true,
 +                                    false)
 +                };
 +
 +        Arrays.stream(colSpecs).forEach(c -> 
cols.add(ColumnDefinition.regularDef("ks", "cf", c.name,
 +                                                                              
// use the initial column type for the serialization header header.
 +                                                                              
c.preFix)));
 +
 +        Map<String, ColSpec> colSpecMap = 
Arrays.stream(colSpecs).collect(Collectors.toMap(c -> c.name, c -> c));
 +        File sstable = buildFakeSSTable(dir, 1, cols, (s) -> s.map(c -> {
 +            ColSpec cs = colSpecMap.get(c.name.toString());
 +            if (cs == null)
 +                return c;
 +            // update the column type in the schema to the "correct" one.
 +            return new ColumnDefinition(c.ksName, c.cfName, c.name, 
cs.schema, c.position(), c.kind);
 +        }));
 +
 +        Arrays.stream(colSpecs)
 +              .filter(c -> c.dropped)
 +              .forEach(c -> {
 +                  ColumnDefinition cd = getColDef(c.name);
 +                  tableMetadata.removeColumnDefinition(cd);
 +                  tableMetadata.recordColumnDrop(cd, 
FBUtilities.timestampMicros());
 +              });
 +
 +        SerializationHeader.Component header = readHeader(sstable);
 +        for (ColSpec colSpec : colSpecs)
 +        {
 +            AbstractType<?> hdrType = 
header.getRegularColumns().get(ByteBufferUtil.bytes(colSpec.name));
 +            assertEquals(colSpec.name, colSpec.preFix, hdrType);
 +            assertEquals(colSpec.name, colSpec.preFix.isMultiCell(), 
hdrType.isMultiCell());
 +        }
 +
 +        SSTableHeaderFix headerFix = builder().withPath(sstable.toPath())
 +                                              .build();
 +        headerFix.execute();
 +
 +        assertFalse(headerFix.hasError());
 +        assertTrue(headerFix.hasChanges());
 +        // Verify that all columns to fix are in the updatedColumns set 
(paranoid, yet)
 +        Arrays.stream(colSpecs)
 +              .filter(c -> c.mustFix)
 +              .forEach(c -> assertTrue("expect " + c.name + " to be updated, 
but was not (" + updatedColumns + ")", updatedColumns.contains(c.name)));
 +        // Verify that the number of updated columns maches the expected 
number of columns to fix
 +        assertEquals(Arrays.stream(colSpecs).filter(c -> c.mustFix).count(), 
updatedColumns.size());
 +
 +        header = readHeader(sstable);
 +        for (ColSpec colSpec : colSpecs)
 +        {
 +            AbstractType<?> hdrType = 
header.getRegularColumns().get(ByteBufferUtil.bytes(colSpec.name));
 +            assertEquals(colSpec.name, colSpec.expect, hdrType);
 +            assertEquals(colSpec.name, colSpec.expect.isMultiCell(), 
hdrType.isMultiCell());
 +        }
 +    }
 +
 +    static class ColSpec
 +    {
 +        final String name;
 +        final AbstractType<?> schema;
 +        final AbstractType<?> preFix;
 +        final AbstractType<?> expect;
 +        final boolean dropped;
 +        final boolean mustFix;
 +
 +        ColSpec(String name, AbstractType<?> schema, AbstractType<?> preFix, 
boolean dropped, boolean mustFix)
 +        {
 +            this(name, schema, preFix, schema, dropped, mustFix);
 +        }
 +
 +        ColSpec(String name, AbstractType<?> schema, AbstractType<?> preFix, 
AbstractType<?> expect, boolean dropped, boolean mustFix)
 +        {
 +            this.name = name;
 +            this.schema = schema;
 +            this.preFix = preFix;
 +            this.expect = expect;
 +            this.dropped = dropped;
 +            this.mustFix = mustFix;
 +        }
 +    }
 +
 +    @Test
 +    public void verifyTypeMatchCompositeKeyTest() throws Exception
 +    {
 +        File dir = temporaryFolder;
 +
 +        List<ColumnDefinition> cols = new ArrayList<>();
 +        cols.add(ColumnDefinition.partitionKeyDef("ks", "cf", "pk1", 
UTF8Type.instance, 0));
 +        cols.add(ColumnDefinition.partitionKeyDef("ks", "cf", "pk2", udtPK, 
1));
 +        cols.add(ColumnDefinition.clusteringDef("ks", "cf", "ck", udtCK, 0));
 +        commonColumns(cols);
 +        File sstable = buildFakeSSTable(dir, 1, cols, false);
 +
 +        SerializationHeader.Component header = readHeader(sstable);
 +        assertFrozenUdt(header, false, true);
 +
 +        SSTableHeaderFix headerFix = builder().withPath(sstable.toPath())
 +                                              .build();
 +        headerFix.execute();
 +
 +        assertFalse(headerFix.hasError());
 +        assertFalse(headerFix.hasChanges());
 +        assertTrue(updatedColumns.isEmpty());
 +
 +        // must not have re-written the stats-component
 +        header = readHeader(sstable);
 +        assertFrozenUdt(header, false, true);
 +    }
 +
 +    @Test
 +    public void compositePartitionKey() throws Exception
 +    {
 +        List<ColumnDefinition> cols = new ArrayList<>();
 +        cols.add(ColumnDefinition.partitionKeyDef("ks", "cf", "pk1", 
UTF8Type.instance, 0));
 +        cols.add(ColumnDefinition.partitionKeyDef("ks", "cf", "pk2", udtPK, 
1));
 +        cols.add(ColumnDefinition.clusteringDef("ks", "cf", "ck", udtCK, 0));
 +        commonColumns(cols);
 +
 +        File dir = temporaryFolder;
 +        File sstable = buildFakeSSTable(dir, 1, cols, true);
 +
 +        SerializationHeader.Component header = readHeader(sstable);
 +        assertTrue(header.getKeyType() instanceof CompositeType);
 +        CompositeType keyType = (CompositeType) header.getKeyType();
 +        assertEquals(Arrays.asList(UTF8Type.instance, udtPK), 
keyType.getComponents());
 +
 +        SSTableHeaderFix headerFix = builder().withPath(sstable.toPath())
 +                                              .build();
 +        headerFix.execute();
 +
 +        assertFalse(headerFix.hasError());
 +        assertTrue(headerFix.hasChanges());
 +        assertEquals(Sets.newHashSet("pk2", "ck", "regular_b", "static_b"), 
updatedColumns);
 +
 +        header = readHeader(sstable);
 +        assertTrue(header.getKeyType() instanceof CompositeType);
 +        keyType = (CompositeType) header.getKeyType();
 +        assertEquals(Arrays.asList(UTF8Type.instance, udtPK.freeze()), 
keyType.getComponents());
 +    }
 +
 +    @Test
 +    public void compositeClusteringKey() throws Exception
 +    {
 +        List<ColumnDefinition> cols = new ArrayList<>();
 +        cols.add(ColumnDefinition.partitionKeyDef("ks", "cf", "pk", udtPK, 
0));
 +        cols.add(ColumnDefinition.clusteringDef("ks", "cf", "ck1", 
Int32Type.instance, 0));
 +        cols.add(ColumnDefinition.clusteringDef("ks", "cf", "ck2", udtCK, 1));
 +        commonColumns(cols);
 +
 +        File dir = temporaryFolder;
 +        File sstable = buildFakeSSTable(dir, 1, cols, true);
 +
 +        SerializationHeader.Component header = readHeader(sstable);
 +        assertEquals(Arrays.asList(Int32Type.instance, udtCK), 
header.getClusteringTypes());
 +
 +        SSTableHeaderFix headerFix = builder().withPath(sstable.toPath())
 +                                              .build();
 +        headerFix.execute();
 +
 +        assertFalse(headerFix.hasError());
 +        assertTrue(headerFix.hasChanges());
 +        assertEquals(Sets.newHashSet("pk", "ck2", "regular_b", "static_b"), 
updatedColumns);
 +
 +        header = readHeader(sstable);
 +        assertEquals(Arrays.asList(Int32Type.instance, udtCK.freeze()), 
header.getClusteringTypes());
 +    }
 +
 +    /**
 +     * Check whether {@link SSTableHeaderFix} can operate on a single file.
 +     */
 +    @Test
 +    public void singleFileUDTFixTest() throws Exception
 +    {
 +        File dir = temporaryFolder;
 +        File sstable = generateFakeSSTable(dir, 1);
 +
 +        SerializationHeader.Component header = readHeader(sstable);
 +        assertFrozenUdt(header, false, true);
 +
 +        SSTableHeaderFix headerFix = builder().withPath(sstable.toPath())
 +                                              .build();
 +        headerFix.execute();
 +
 +        assertTrue(headerFix.hasChanges());
 +        assertFalse(headerFix.hasError());
 +
 +        header = readHeader(sstable);
 +        assertFrozenUdt(header, true, true);
 +    }
 +
 +    /**
 +     * Check whether {@link SSTableHeaderFix} can operate on a file in a 
directory.
 +     */
 +    @Test
 +    public void singleDirectoryUDTFixTest() throws Exception
 +    {
 +        File dir = temporaryFolder;
 +        List<File> sstables = IntStream.range(1, 11)
 +                                       .mapToObj(g -> 
generateFakeSSTable(dir, g))
 +                                       .collect(Collectors.toList());
 +
 +        for (File sstable : sstables)
 +        {
 +            SerializationHeader.Component header = readHeader(sstable);
 +            assertFrozenUdt(header, false, true);
 +        }
 +
 +        SSTableHeaderFix headerFix = builder().withPath(dir.toPath())
 +                                              .build();
 +        headerFix.execute();
 +
 +        assertTrue(headerFix.hasChanges());
 +        assertFalse(headerFix.hasError());
 +
 +        for (File sstable : sstables)
 +        {
 +            SerializationHeader.Component header = readHeader(sstable);
 +            assertFrozenUdt(header, true, true);
 +        }
 +    }
 +
 +    /**
 +     * Check whether {@link SSTableHeaderFix} can operate multiple, single 
files.
 +     */
 +    @Test
 +    public void multipleFilesUDTFixTest() throws Exception
 +    {
 +        File dir = temporaryFolder;
 +        List<File> sstables = IntStream.range(1, 11)
 +                                       .mapToObj(g -> 
generateFakeSSTable(dir, g))
 +                                       .collect(Collectors.toList());
 +
 +        for (File sstable : sstables)
 +        {
 +            SerializationHeader.Component header = readHeader(sstable);
 +            assertFrozenUdt(header, false, true);
 +        }
 +
 +        SSTableHeaderFix.Builder builder = builder();
 +        sstables.stream().map(File::toPath).forEach(builder::withPath);
 +        SSTableHeaderFix headerFix = builder.build();
 +        headerFix.execute();
 +
 +        assertTrue(headerFix.hasChanges());
 +        assertFalse(headerFix.hasError());
 +
 +        for (File sstable : sstables)
 +        {
 +            SerializationHeader.Component header = readHeader(sstable);
 +            assertFrozenUdt(header, true, true);
 +        }
 +    }
 +
 +    /**
 +     * Check whether {@link SSTableHeaderFix} can operate multiple files in a 
directory.
 +     */
 +    @Test
 +    public void multipleFilesInDirectoryUDTFixTest() throws Exception
 +    {
 +        File dir = temporaryFolder;
 +        List<File> sstables = IntStream.range(1, 11)
 +                                       .mapToObj(g -> 
generateFakeSSTable(dir, g))
 +                                       .collect(Collectors.toList());
 +
 +        for (File sstable : sstables)
 +        {
 +            SerializationHeader.Component header = readHeader(sstable);
 +            assertFrozenUdt(header, false, true);
 +        }
 +
 +        SSTableHeaderFix headerFix = builder().withPath(dir.toPath())
 +                                              .build();
 +        headerFix.execute();
 +
 +        assertTrue(headerFix.hasChanges());
 +        assertFalse(headerFix.hasError());
 +
 +        for (File sstable : sstables)
 +        {
 +            SerializationHeader.Component header = readHeader(sstable);
 +            assertFrozenUdt(header, true, true);
 +        }
 +    }
 +
 +    private static final Pattern p = Pattern.compile(".* Column '([^']+)' 
needs to be updated from type .*");
 +
 +    private SSTableHeaderFix.Builder builder()
 +    {
 +        updatedColumns.clear();
 +        return SSTableHeaderFix.builder()
 +                               .schemaCallback(() -> (desc) -> tableMetadata)
 +                               .info(ln -> {
 +                                   System.out.println("INFO: " + ln);
 +                                   Matcher m = p.matcher(ln);
 +                                   if (m.matches())
 +                                       updatedColumns.add(m.group(1));
 +                               })
 +                               .warn(ln -> System.out.println("WARN: " + ln))
 +                               .error(ln -> System.out.println("ERROR: " + 
ln));
 +    }
 +
 +    private File generateFakeSSTable(File dir, int generation)
 +    {
 +        List<ColumnDefinition> cols = new ArrayList<>();
 +        cols.add(ColumnDefinition.partitionKeyDef("ks", "cf", "pk", udtPK, 
0));
 +        cols.add(ColumnDefinition.clusteringDef("ks", "cf", "ck", udtCK, 0));
 +        commonColumns(cols);
 +        return buildFakeSSTable(dir, generation, cols, true);
 +    }
 +
 +    private void commonColumns(List<ColumnDefinition> cols)
 +    {
 +        cols.add(ColumnDefinition.regularDef("ks", "cf", "regular_a", 
UTF8Type.instance));
 +        cols.add(ColumnDefinition.regularDef("ks", "cf", "regular_b", 
udtRegular));
 +        cols.add(ColumnDefinition.regularDef("ks", "cf", "regular_c", 
Int32Type.instance));
 +        cols.add(ColumnDefinition.staticDef("ks", "cf", "static_a", 
UTF8Type.instance));
 +        cols.add(ColumnDefinition.staticDef("ks", "cf", "static_b", 
udtStatic));
 +        cols.add(ColumnDefinition.staticDef("ks", "cf", "static_c", 
Int32Type.instance));
 +    }
 +
 +    private File buildFakeSSTable(File dir, int generation, 
List<ColumnDefinition> cols, boolean freezeInSchema)
 +    {
 +        return buildFakeSSTable(dir, generation, cols, freezeInSchema
 +                                                       ? s -> s.map(c -> new 
ColumnDefinition(c.ksName, c.cfName, c.name, freezeUdt(c.type), c.position(), 
c.kind))
 +                                                       : s -> s);
 +    }
 +
 +    private File buildFakeSSTable(File dir, int generation, 
List<ColumnDefinition> cols, Function<Stream<ColumnDefinition>, 
Stream<ColumnDefinition>> freezer)
 +    {
 +        CFMetaData headerMetadata = CFMetaData.create("ks", "cf",
 +                                                      UUID.randomUUID(),
 +                                                      false, true, false, 
false, false,
 +                                                      cols,
 +                                                      new 
Murmur3Partitioner());
 +
 +        List<ColumnDefinition> schemaCols = 
freezer.apply(cols.stream()).collect(Collectors.toList());
 +        tableMetadata = CFMetaData.create("ks", "cf",
 +                                          UUID.randomUUID(),
 +                                          false, true, false, false, false,
 +                                          schemaCols,
 +                                          new Murmur3Partitioner());
 +
 +        try
 +        {
 +            Descriptor desc = new Descriptor(version, dir, "ks", "cf", 
generation, SSTableFormat.Type.BIG, Component.DATA);
 +
 +            // Just create the component files - we don't really need those.
 +            for (Component component : requiredComponents)
 +                assertTrue(new 
File(desc.filenameFor(component)).createNewFile());
 +
 +            AbstractType<?> partitionKey = headerMetadata.getKeyValidator();
 +            List<AbstractType<?>> clusteringKey = 
headerMetadata.clusteringColumns()
 +                                                                .stream()
 +                                                                .map(cd -> 
cd.type)
 +                                                                
.collect(Collectors.toList());
 +            Map<ByteBuffer, AbstractType<?>> staticColumns = 
headerMetadata.allColumns()
 +                                                                           
.stream()
 +                                                                           
.filter(cd -> cd.kind == ColumnDefinition.Kind.STATIC)
 +                                                                           
.collect(Collectors.toMap(cd -> cd.name.bytes, cd -> cd.type, (a, b) -> a));
 +            Map<ByteBuffer, AbstractType<?>> regularColumns = 
headerMetadata.allColumns()
 +                                                                            
.stream()
 +                                                                            
.filter(cd -> cd.kind == ColumnDefinition.Kind.REGULAR)
 +                                                                            
.collect(Collectors.toMap(cd -> cd.name.bytes, cd -> cd.type, (a, b) -> a));
 +
 +            File statsFile = new File(desc.filenameFor(Component.STATS));
 +            SerializationHeader.Component header = 
SerializationHeader.Component.buildComponentForTools(partitionKey,
 +                                                                              
                          clusteringKey,
 +                                                                              
                          staticColumns,
 +                                                                              
                          regularColumns,
 +                                                                              
                          EncodingStats.NO_STATS);
 +
 +            try (SequentialWriter out = new SequentialWriter(statsFile))
 +            {
 +                
desc.getMetadataSerializer().serialize(Collections.singletonMap(MetadataType.HEADER,
 header), out, version);
 +                out.finish();
 +            }
 +
 +            return new File(desc.filenameFor(Component.DATA));
 +        }
 +        catch (Exception e)
 +        {
 +            throw new RuntimeException(e);
 +        }
 +    }
 +
 +    private AbstractType<?> freezeUdt(AbstractType<?> type)
 +    {
 +        if (type instanceof CollectionType)
 +        {
 +            if (type.getClass() == ListType.class)
 +            {
 +                ListType<?> cHeader = (ListType<?>) type;
 +                return 
ListType.getInstance(freezeUdt(cHeader.getElementsType()), 
cHeader.isMultiCell());
 +            }
 +            else if (type.getClass() == SetType.class)
 +            {
 +                SetType<?> cHeader = (SetType<?>) type;
 +                return 
SetType.getInstance(freezeUdt(cHeader.getElementsType()), 
cHeader.isMultiCell());
 +            }
 +            else if (type.getClass() == MapType.class)
 +            {
 +                MapType<?, ?> cHeader = (MapType<?, ?>) type;
 +                return MapType.getInstance(freezeUdt(cHeader.getKeysType()), 
freezeUdt(cHeader.getValuesType()), cHeader.isMultiCell());
 +            }
 +        }
 +        else if (type instanceof AbstractCompositeType)
 +        {
 +            if (type.getClass() == CompositeType.class)
 +            {
 +                CompositeType cHeader = (CompositeType) type;
 +                return 
CompositeType.getInstance(cHeader.types.stream().map(this::freezeUdt).collect(Collectors.toList()));
 +            }
 +        }
 +        else if (type instanceof TupleType)
 +        {
 +            if (type.getClass() == UserType.class)
 +            {
 +                UserType cHeader = (UserType) type;
 +                cHeader = cHeader.freeze();
 +                return new UserType(cHeader.keyspace, cHeader.name, 
cHeader.fieldNames(),
 +                                    
cHeader.allTypes().stream().map(this::freezeUdt).collect(Collectors.toList()),
 +                                    cHeader.isMultiCell());
 +            }
 +        }
 +        return type;
 +    }
 +
 +    private void assertFrozenUdt(SerializationHeader.Component header, 
boolean frozen, boolean checkInner)
 +    {
 +        AbstractType<?> keyType = header.getKeyType();
 +        if (keyType instanceof CompositeType)
 +        {
 +            for (AbstractType<?> component : ((CompositeType) keyType).types)
 +                assertFrozenUdt("partition-key-component", component, frozen, 
checkInner);
 +        }
 +        assertFrozenUdt("partition-key", keyType, frozen, checkInner);
 +
 +        for (AbstractType<?> type : header.getClusteringTypes())
 +            assertFrozenUdt("clustering-part", type, frozen, checkInner);
 +        for (Map.Entry<ByteBuffer, AbstractType<?>> col : 
header.getStaticColumns().entrySet())
 +            assertFrozenUdt(UTF8Type.instance.compose(col.getKey()), 
col.getValue(), frozen, checkInner);
 +        for (Map.Entry<ByteBuffer, AbstractType<?>> col : 
header.getRegularColumns().entrySet())
 +            assertFrozenUdt(UTF8Type.instance.compose(col.getKey()), 
col.getValue(), frozen, checkInner);
 +    }
 +
 +    private void assertFrozenUdt(String name, AbstractType<?> type, boolean 
frozen, boolean checkInner)
 +    {
 +        if (type instanceof CompositeType)
 +        {
 +            if (checkInner)
 +                for (AbstractType<?> component : ((CompositeType) type).types)
 +                    assertFrozenUdt(name, component, frozen, true);
 +        }
 +        else if (type instanceof CollectionType)
 +        {
 +            if (checkInner)
 +            {
 +                if (type instanceof MapType)
 +                {
 +                    MapType map = (MapType) type;
 +                    // only descend for non-frozen types (checking frozen in 
frozen is just stupid)
 +                    if (map.isMultiCell())
 +                    {
 +                        assertFrozenUdt(name + "<map-key>", 
map.getKeysType(), frozen, true);
 +                        assertFrozenUdt(name + "<map-value>", 
map.getValuesType(), frozen, true);
 +                    }
 +                }
 +                else if (type instanceof SetType)
 +                {
 +                    SetType set = (SetType) type;
 +                    // only descend for non-frozen types (checking frozen in 
frozen is just stupid)
 +                    if (set.isMultiCell())
 +                        assertFrozenUdt(name + "<set>", 
set.getElementsType(), frozen, true);
 +                }
 +                else if (type instanceof ListType)
 +                {
 +                    ListType list = (ListType) type;
 +                    // only descend for non-frozen types (checking frozen in 
frozen is just stupid)
 +                    if (list.isMultiCell())
 +                        assertFrozenUdt(name + "<list>", 
list.getElementsType(), frozen, true);
 +                }
 +            }
 +        }
 +        else if (type instanceof TupleType)
 +        {
 +            if (checkInner)
 +            {
 +                TupleType tuple = (TupleType) type;
 +                // only descend for non-frozen types (checking frozen in 
frozen is just stupid)
 +                if (tuple.isMultiCell())
 +                    for (AbstractType<?> component : tuple.allTypes())
 +                        assertFrozenUdt(name + "<tuple>", component, frozen, 
true);
 +            }
 +        }
 +
 +        if (type instanceof UserType)
 +        {
 +            String typeString = type.toString();
 +            assertEquals(name + ": " + typeString, frozen, 
!type.isMultiCell());
 +            if (typeString.startsWith(UserType.class.getName() + '('))
 +                if (frozen)
 +                    fail(name + ": " + typeString);
 +            if (typeString.startsWith(FrozenType.class.getName() + '(' + 
UserType.class.getName() + '('))
 +                if (!frozen)
 +                    fail(name + ": " + typeString);
 +        }
 +    }
 +
 +    private SerializationHeader.Component readHeader(File sstable) throws 
Exception
 +    {
 +        Descriptor desc = Descriptor.fromFilename(sstable.getParentFile(), 
sstable.getName()).left;
 +        return (SerializationHeader.Component) 
desc.getMetadataSerializer().deserialize(desc, MetadataType.HEADER);
 +    }
 +
 +    private static final Component[] requiredComponents = new Component[]{ 
Component.DATA, Component.FILTER, Component.PRIMARY_INDEX, Component.TOC };
 +}
diff --cc 
test/unit/org/apache/cassandra/io/sstable/format/ClientModeSSTableTest.java
index d86a44f,48a8af5..90522d6
--- 
a/test/unit/org/apache/cassandra/io/sstable/format/ClientModeSSTableTest.java
+++ 
b/test/unit/org/apache/cassandra/io/sstable/format/ClientModeSSTableTest.java
@@@ -53,7 -55,7 +53,8 @@@ public class ClientModeSSTableTes
      @BeforeClass
      public static void defineSchema() throws ConfigurationException
      {
-         DatabaseDescriptor.toolInitialization();
 -        Config.setClientMode(true);
++        DatabaseDescriptor.clientInitialization();
++        DatabaseDescriptor.applyAddressConfig();
  
          metadata = CFMetaData.Builder.createDense(KSNAME, CFNAME, false, 
false)
                                                  .addPartitionKey("key", 
BytesType.instance)
diff --cc test/unit/org/apache/cassandra/net/MessagingServiceTest.java
index e7d90c4,3be1990..82630b4
--- a/test/unit/org/apache/cassandra/net/MessagingServiceTest.java
+++ b/test/unit/org/apache/cassandra/net/MessagingServiceTest.java
@@@ -20,35 -20,12 +20,36 @@@
   */
  package org.apache.cassandra.net;
  
 +import java.io.ByteArrayInputStream;
 +import java.io.ByteArrayOutputStream;
 +import java.io.IOException;
 +import java.net.InetAddress;
 +import java.net.UnknownHostException;
 +import java.util.Arrays;
 +import java.util.Collections;
  import java.util.List;
 +import java.util.Map;
 +import java.util.Set;
 +import java.util.concurrent.ConcurrentHashMap;
 +import java.util.concurrent.TimeUnit;
 +import java.util.regex.*;
 +import java.util.regex.Matcher;
  
 +import com.google.common.collect.Iterables;
 +import com.codahale.metrics.Timer;
 +
 +import org.apache.cassandra.config.DatabaseDescriptor;
 +import org.apache.cassandra.db.monitoring.ApproximateTime;
 +import org.apache.cassandra.io.util.DataInputPlus.DataInputStreamPlus;
 +import org.apache.cassandra.io.util.DataOutputStreamPlus;
 +import org.apache.cassandra.io.util.WrappedDataOutputStreamPlus;
++import org.apache.cassandra.utils.FBUtilities;
 +import org.caffinitas.ohc.histo.EstimatedHistogram;
  import org.junit.Before;
 +import org.junit.BeforeClass;
  import org.junit.Test;
  
 -import static org.junit.Assert.assertEquals;
 +import static org.junit.Assert.*;
  
  public class MessagingServiceTest
  {
@@@ -98,257 -55,11 +99,257 @@@
          assertEquals(0, logs.size());
  
          for (int i = 0; i < 2500; i++)
 -            messagingService.incrementDroppedMessages(verb, i % 2 == 0);
 +            messagingService.incrementDroppedMessages(verb, i, i % 2 == 0);
  
          logs = messagingService.getDroppedMessagesLogs();
 -        assertEquals("READ messages were dropped in last 5000 ms: 1250 for 
internal timeout and 1250 for cross node timeout", logs.get(0));
 -        assertEquals(7500, 
(int)messagingService.getDroppedMessages().get(verb.toString()));
 +        assertEquals(1, logs.size());
 +        matcher = regexp.matcher(logs.get(0));
 +        assertTrue(matcher.find());
 +        assertEquals(1250, Integer.parseInt(matcher.group(1)));
 +        assertEquals(1250, Integer.parseInt(matcher.group(2)));
 +        assertTrue(Integer.parseInt(matcher.group(3)) > 0);
 +        assertTrue(Integer.parseInt(matcher.group(4)) > 0);
 +        assertEquals(7500, (int) 
messagingService.getDroppedMessages().get(verb.toString()));
      }
  
 +    @Test
 +    public void testDCLatency() throws Exception
 +    {
 +        int latency = 100;
 +        ConcurrentHashMap<String, Timer> dcLatency = 
MessagingService.instance().metrics.dcLatency;
 +        dcLatency.clear();
 +
 +        long now = ApproximateTime.currentTimeMillis();
 +        long sentAt = now - latency;
 +        assertNull(dcLatency.get("datacenter1"));
 +        addDCLatency(sentAt, now);
 +        assertNotNull(dcLatency.get("datacenter1"));
 +        assertEquals(1, dcLatency.get("datacenter1").getCount());
 +        long expectedBucket = 
bucketOffsets[Math.abs(Arrays.binarySearch(bucketOffsets, 
TimeUnit.MILLISECONDS.toNanos(latency))) - 1];
 +        assertEquals(expectedBucket, 
dcLatency.get("datacenter1").getSnapshot().getMax());
 +    }
 +
 +    @Test
 +    public void testNegativeDCLatency() throws Exception
 +    {
 +        // if clocks are off should just not track anything
 +        int latency = -100;
 +
 +        ConcurrentHashMap<String, Timer> dcLatency = 
MessagingService.instance().metrics.dcLatency;
 +        dcLatency.clear();
 +
 +        long now = ApproximateTime.currentTimeMillis();
 +        long sentAt = now - latency;
 +
 +        assertNull(dcLatency.get("datacenter1"));
 +        addDCLatency(sentAt, now);
 +        assertNull(dcLatency.get("datacenter1"));
 +    }
 +
 +    @Test
 +    public void 
testUpdatesBackPressureOnSendWhenEnabledAndWithSupportedCallback() throws 
UnknownHostException
 +    {
 +        MockBackPressureStrategy.MockBackPressureState backPressureState = 
(MockBackPressureStrategy.MockBackPressureState) 
messagingService.getConnectionPool(InetAddress.getByName("127.0.0.2")).getBackPressureState();
 +        IAsyncCallback bpCallback = new BackPressureCallback();
 +        IAsyncCallback noCallback = new NoBackPressureCallback();
 +        MessageOut<?> ignored = null;
 +
 +        DatabaseDescriptor.setBackPressureEnabled(true);
 +        
messagingService.updateBackPressureOnSend(InetAddress.getByName("127.0.0.2"), 
noCallback, ignored);
 +        assertFalse(backPressureState.onSend);
 +
 +        DatabaseDescriptor.setBackPressureEnabled(false);
 +        
messagingService.updateBackPressureOnSend(InetAddress.getByName("127.0.0.2"), 
bpCallback, ignored);
 +        assertFalse(backPressureState.onSend);
 +
 +        DatabaseDescriptor.setBackPressureEnabled(true);
 +        
messagingService.updateBackPressureOnSend(InetAddress.getByName("127.0.0.2"), 
bpCallback, ignored);
 +        assertTrue(backPressureState.onSend);
 +    }
 +
 +    @Test
 +    public void 
testUpdatesBackPressureOnReceiveWhenEnabledAndWithSupportedCallback() throws 
UnknownHostException
 +    {
 +        MockBackPressureStrategy.MockBackPressureState backPressureState = 
(MockBackPressureStrategy.MockBackPressureState) 
messagingService.getConnectionPool(InetAddress.getByName("127.0.0.2")).getBackPressureState();
 +        IAsyncCallback bpCallback = new BackPressureCallback();
 +        IAsyncCallback noCallback = new NoBackPressureCallback();
 +        boolean timeout = false;
 +
 +        DatabaseDescriptor.setBackPressureEnabled(true);
 +        
messagingService.updateBackPressureOnReceive(InetAddress.getByName("127.0.0.2"),
 noCallback, timeout);
 +        assertFalse(backPressureState.onReceive);
 +        assertFalse(backPressureState.onTimeout);
 +
 +        DatabaseDescriptor.setBackPressureEnabled(false);
 +        
messagingService.updateBackPressureOnReceive(InetAddress.getByName("127.0.0.2"),
 bpCallback, timeout);
 +        assertFalse(backPressureState.onReceive);
 +        assertFalse(backPressureState.onTimeout);
 +
 +        DatabaseDescriptor.setBackPressureEnabled(true);
 +        
messagingService.updateBackPressureOnReceive(InetAddress.getByName("127.0.0.2"),
 bpCallback, timeout);
 +        assertTrue(backPressureState.onReceive);
 +        assertFalse(backPressureState.onTimeout);
 +    }
 +
 +    @Test
 +    public void 
testUpdatesBackPressureOnTimeoutWhenEnabledAndWithSupportedCallback() throws 
UnknownHostException
 +    {
 +        MockBackPressureStrategy.MockBackPressureState backPressureState = 
(MockBackPressureStrategy.MockBackPressureState) 
messagingService.getConnectionPool(InetAddress.getByName("127.0.0.2")).getBackPressureState();
 +        IAsyncCallback bpCallback = new BackPressureCallback();
 +        IAsyncCallback noCallback = new NoBackPressureCallback();
 +        boolean timeout = true;
 +
 +        DatabaseDescriptor.setBackPressureEnabled(true);
 +        
messagingService.updateBackPressureOnReceive(InetAddress.getByName("127.0.0.2"),
 noCallback, timeout);
 +        assertFalse(backPressureState.onReceive);
 +        assertFalse(backPressureState.onTimeout);
 +
 +        DatabaseDescriptor.setBackPressureEnabled(false);
 +        
messagingService.updateBackPressureOnReceive(InetAddress.getByName("127.0.0.2"),
 bpCallback, timeout);
 +        assertFalse(backPressureState.onReceive);
 +        assertFalse(backPressureState.onTimeout);
 +
 +        DatabaseDescriptor.setBackPressureEnabled(true);
 +        
messagingService.updateBackPressureOnReceive(InetAddress.getByName("127.0.0.2"),
 bpCallback, timeout);
 +        assertFalse(backPressureState.onReceive);
 +        assertTrue(backPressureState.onTimeout);
 +    }
 +
 +    @Test
 +    public void testAppliesBackPressureWhenEnabled() throws 
UnknownHostException
 +    {
 +        DatabaseDescriptor.setBackPressureEnabled(false);
 +        
messagingService.applyBackPressure(Arrays.asList(InetAddress.getByName("127.0.0.2")),
 ONE_SECOND);
 +        assertFalse(MockBackPressureStrategy.applied);
 +
 +        DatabaseDescriptor.setBackPressureEnabled(true);
 +        
messagingService.applyBackPressure(Arrays.asList(InetAddress.getByName("127.0.0.2")),
 ONE_SECOND);
 +        assertTrue(MockBackPressureStrategy.applied);
 +    }
 +
 +    @Test
 +    public void testDoesntApplyBackPressureToBroadcastAddress() throws 
UnknownHostException
 +    {
 +        DatabaseDescriptor.setBackPressureEnabled(true);
 +        
messagingService.applyBackPressure(Arrays.asList(InetAddress.getByName("127.0.0.1")),
 ONE_SECOND);
 +        assertFalse(MockBackPressureStrategy.applied);
 +    }
 +
 +    private static void addDCLatency(long sentAt, long nowTime) throws 
IOException
 +    {
 +        ByteArrayOutputStream baos = new ByteArrayOutputStream();
 +        try (DataOutputStreamPlus out = new WrappedDataOutputStreamPlus(baos))
 +        {
 +            out.writeInt((int) sentAt);
 +        }
 +        DataInputStreamPlus in = new DataInputStreamPlus(new 
ByteArrayInputStream(baos.toByteArray()));
-         MessageIn.readConstructionTime(InetAddress.getLocalHost(), in, 
nowTime);
++        MessageIn.readConstructionTime(FBUtilities.getLocalAddress(), in, 
nowTime);
 +    }
 +
 +    public static class MockBackPressureStrategy implements 
BackPressureStrategy<MockBackPressureStrategy.MockBackPressureState>
 +    {
 +        public static volatile boolean applied = false;
 +
 +        public MockBackPressureStrategy(Map<String, Object> args)
 +        {
 +        }
 +
 +        @Override
 +        public void apply(Set<MockBackPressureState> states, long timeout, 
TimeUnit unit)
 +        {
 +            if (!Iterables.isEmpty(states))
 +                applied = true;
 +        }
 +
 +        @Override
 +        public MockBackPressureState newState(InetAddress host)
 +        {
 +            return new MockBackPressureState(host);
 +        }
 +
 +        public static class MockBackPressureState implements BackPressureState
 +        {
 +            private final InetAddress host;
 +            public volatile boolean onSend = false;
 +            public volatile boolean onReceive = false;
 +            public volatile boolean onTimeout = false;
 +
 +            private MockBackPressureState(InetAddress host)
 +            {
 +                this.host = host;
 +            }
 +
 +            @Override
 +            public void onMessageSent(MessageOut<?> message)
 +            {
 +                onSend = true;
 +            }
 +
 +            @Override
 +            public void onResponseReceived()
 +            {
 +                onReceive = true;
 +            }
 +
 +            @Override
 +            public void onResponseTimeout()
 +            {
 +                onTimeout = true;
 +            }
 +
 +            @Override
 +            public double getBackPressureRateLimit()
 +            {
 +                throw new UnsupportedOperationException("Not supported yet.");
 +            }
 +
 +            @Override
 +            public InetAddress getHost()
 +            {
 +                return host;
 +            }
 +        }
 +    }
 +
 +    private static class BackPressureCallback implements IAsyncCallback
 +    {
 +        @Override
 +        public boolean supportsBackPressure()
 +        {
 +            return true;
 +        }
 +
 +        @Override
 +        public boolean isLatencyForSnitch()
 +        {
 +            return false;
 +        }
 +
 +        @Override
 +        public void response(MessageIn msg)
 +        {
 +            throw new UnsupportedOperationException("Not supported.");
 +        }
 +    }
 +
 +    private static class NoBackPressureCallback implements IAsyncCallback
 +    {
 +        @Override
 +        public boolean supportsBackPressure()
 +        {
 +            return false;
 +        }
 +
 +        @Override
 +        public boolean isLatencyForSnitch()
 +        {
 +            return false;
 +        }
 +
 +        @Override
 +        public void response(MessageIn msg)
 +        {
 +            throw new UnsupportedOperationException("Not supported.");
 +        }
 +    }
  }
diff --cc 
test/unit/org/apache/cassandra/repair/messages/RepairMessageSerializationsTest.java
index 028d899,5dbed3f..f2dc8c7
--- 
a/test/unit/org/apache/cassandra/repair/messages/RepairMessageSerializationsTest.java
+++ 
b/test/unit/org/apache/cassandra/repair/messages/RepairMessageSerializationsTest.java
@@@ -49,15 -49,14 +49,16 @@@ import org.apache.cassandra.utils.Merkl
  
  public class RepairMessageSerializationsTest
  {
--    private static final int PROTOCOL_VERSION = 
MessagingService.current_version;
      private static final int GC_BEFORE = 1000000;
  
++    private static int protocolVersion;
      private static IPartitioner originalPartitioner;
  
      @BeforeClass
      public static void before()
      {
 +        DatabaseDescriptor.daemonInitialization();
++        protocolVersion = MessagingService.current_version;
          originalPartitioner = 
StorageService.instance.setPartitionerUnsafe(Murmur3Partitioner.instance);
      }
  
@@@ -94,16 -93,16 +95,16 @@@
  
      private <T extends RepairMessage> T serializeRoundTrip(T msg, 
IVersionedSerializer<T> serializer) throws IOException
      {
--        long size = serializer.serializedSize(msg, PROTOCOL_VERSION);
++        long size = serializer.serializedSize(msg, protocolVersion);
  
          ByteBuffer buf = ByteBuffer.allocate((int)size);
          DataOutputPlus out = new DataOutputBufferFixed(buf);
--        serializer.serialize(msg, out, PROTOCOL_VERSION);
++        serializer.serialize(msg, out, protocolVersion);
          Assert.assertEquals(size, buf.position());
  
          buf.flip();
          DataInputPlus in = new DataInputBuffer(buf, false);
--        T deserialized = serializer.deserialize(in, PROTOCOL_VERSION);
++        T deserialized = serializer.deserialize(in, protocolVersion);
          Assert.assertEquals(msg, deserialized);
          Assert.assertEquals(msg.hashCode(), deserialized.hashCode());
          return deserialized;
diff --cc 
test/unit/org/apache/cassandra/schema/TupleTypesRepresentationTest.java
index 7c7cacb,0000000..36cba3c
mode 100644,000000..100644
--- a/test/unit/org/apache/cassandra/schema/TupleTypesRepresentationTest.java
+++ b/test/unit/org/apache/cassandra/schema/TupleTypesRepresentationTest.java
@@@ -1,403 -1,0 +1,404 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.cassandra.schema;
 +
 +import java.io.PrintWriter;
 +import java.io.StringWriter;
 +import java.nio.ByteBuffer;
 +import java.util.Arrays;
 +
 +import org.junit.Ignore;
 +import org.junit.Test;
 +
 +import org.apache.cassandra.config.DatabaseDescriptor;
 +import org.apache.cassandra.cql3.CQL3Type;
 +import org.apache.cassandra.cql3.CQLFragmentParser;
 +import org.apache.cassandra.cql3.CqlParser;
 +import org.apache.cassandra.cql3.FieldIdentifier;
 +import org.apache.cassandra.db.marshal.AbstractType;
 +import org.apache.cassandra.db.marshal.TypeParser;
 +import org.apache.cassandra.db.marshal.UTF8Type;
 +import org.apache.cassandra.db.marshal.UserType;
 +import org.apache.cassandra.utils.ByteBufferUtil;
 +
 +import static org.junit.Assert.assertEquals;
 +
 +/**
 + * Verifies that the string representations of {@link AbstractType} and 
{@link CQL3Type} are as expected and compatible.
 + *
 + * C* 3.0 is known to <em>not</em> enclose a frozen UDT in a "frozen bracket" 
in the {@link AbstractType}.
 + * The string representation of a frozuen UDT using the {@link CQL3Type} type 
hierarchy is correct in C* 3.0.
 + */
 +public class TupleTypesRepresentationTest
 +{
 +    static
 +    {
 +        DatabaseDescriptor.toolInitialization();
++        DatabaseDescriptor.applyAddressConfig();
 +    }
 +
 +    private static final String keyspace = "ks";
 +    private static final String mcUdtName = "mc_udt";
 +    private static final ByteBuffer mcUdtNameBytes = 
ByteBufferUtil.bytes(mcUdtName);
 +    private static final String iUdtName = "i_udt";
 +    private static final ByteBuffer iUdtNameBytes = 
ByteBufferUtil.bytes(iUdtName);
 +    private static final String fUdtName = "f_udt";
 +    private static final ByteBuffer fUdtNameBytes = 
ByteBufferUtil.bytes(fUdtName);
 +    private static final String udtField1 = "a";
 +    private static final String udtField2 = "b";
 +    private static final AbstractType<?> udtType1 = UTF8Type.instance;
 +    private static final AbstractType<?> udtType2 = UTF8Type.instance;
 +
 +    private static final Types types = Types.builder()
 +                                            .add(new UserType(keyspace,
 +                                                              mcUdtNameBytes,
 +                                                              
Arrays.asList(new FieldIdentifier(ByteBufferUtil.bytes(udtField1)),
 +                                                                            
new FieldIdentifier(ByteBufferUtil.bytes(udtField2))),
 +                                                              
Arrays.asList(udtType1,
 +                                                                            
udtType2),
 +                                                              true))
 +                                            .add(new UserType(keyspace,
 +                                                              iUdtNameBytes,
 +                                                              
Arrays.asList(new FieldIdentifier(ByteBufferUtil.bytes(udtField1)),
 +                                                                            
new FieldIdentifier(ByteBufferUtil.bytes(udtField2))),
 +                                                              
Arrays.asList(udtType1,
 +                                                                            
udtType2),
 +                                                              true))
 +                                            .add(new UserType(keyspace,
 +                                                              fUdtNameBytes,
 +                                                              
Arrays.asList(new FieldIdentifier(ByteBufferUtil.bytes(udtField1)),
 +                                                                            
new FieldIdentifier(ByteBufferUtil.bytes(udtField2))),
 +                                                              
Arrays.asList(udtType1,
 +                                                                            
udtType2),
 +                                                              true))
 +                                            .build();
 +
 +    static class TypeDef
 +    {
 +        final String typeString;
 +        final String cqlTypeString;
 +        final String droppedCqlTypeString;
 +        final boolean multiCell;
 +        final String cqlValue;
 +
 +        final AbstractType<?> type;
 +        final CQL3Type cqlType;
 +
 +        final AbstractType<?> droppedType;
 +        final CQL3Type droppedCqlType;
 +
 +        TypeDef(String typeString, String cqlTypeString, String 
droppedCqlTypeString, boolean multiCell, String cqlValue)
 +        {
 +            this.typeString = typeString;
 +            this.cqlTypeString = cqlTypeString;
 +            this.droppedCqlTypeString = droppedCqlTypeString;
 +            this.multiCell = multiCell;
 +            this.cqlValue = cqlValue;
 +
 +            cqlType = CQLFragmentParser.parseAny(CqlParser::comparatorType, 
cqlTypeString, "non-dropped type")
 +                                       .prepare(keyspace, types);
 +            type = cqlType.getType();
 +
 +            droppedCqlType = 
CQLFragmentParser.parseAny(CqlParser::comparatorType, droppedCqlTypeString, 
"dropped type")
 +                                              .prepare(keyspace, types);
 +            // NOTE: TupleType is *always* parsed as frozen, but never 
toString()'d with the surrounding FrozenType
 +            droppedType = droppedCqlType.getType();
 +        }
 +
 +        @Override
 +        public String toString()
 +        {
 +            return "TypeDef{\n" +
 +                   "typeString='" + typeString + "'\n" +
 +                   ", type=" + type + '\n' +
 +                   ", cqlTypeString='" + cqlTypeString + "'\n" +
 +                   ", cqlType=" + cqlType + '\n' +
 +                   ", droppedType=" + droppedType + '\n' +
 +                   ", droppedCqlTypeString='" + droppedCqlTypeString + "'\n" +
 +                   ", droppedCqlType=" + droppedCqlType + '\n' +
 +                   '}';
 +        }
 +    }
 +
 +    private static final TypeDef text = new TypeDef(
 +            "org.apache.cassandra.db.marshal.UTF8Type",
 +            "text",
 +            "text",
 +            false,
 +            "'foobar'");
 +
 +    private static final TypeDef tuple_text__text_ = new TypeDef(
 +            
"org.apache.cassandra.db.marshal.TupleType(org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type)",
 +            "tuple<text, text>",
 +            "frozen<tuple<text, text>>",
 +            false,
 +            "('foo','bar')");
 +
 +    // Currently, dropped non-frozen-UDT columns are recorded as 
frozen<tuple<...>>, which is technically wrong
 +    //private static final TypeDef mc_udt = new TypeDef(
 +    //        
"org.apache.cassandra.db.marshal.UserType(ks,6d635f756474,61:org.apache.cassandra.db.marshal.UTF8Type,62:org.apache.cassandra.db.marshal.UTF8Type)",
 +    //        "mc_udt",
 +    //        "tuple<text, text>",
 +    //        true,
 +    //        "{a:'foo',b:'bar'}");
 +
 +    private static final TypeDef frozen_f_udt_ = new TypeDef(
 +            
"org.apache.cassandra.db.marshal.FrozenType(org.apache.cassandra.db.marshal.UserType(ks,665f756474,61:org.apache.cassandra.db.marshal.UTF8Type,62:org.apache.cassandra.db.marshal.UTF8Type))",
 +            "frozen<f_udt>",
 +            "frozen<tuple<text, text>>",
 +            false,
 +            "{a:'foo',b:'bar'}");
 +
 +    private static final TypeDef list_text_ = new TypeDef(
 +            
"org.apache.cassandra.db.marshal.ListType(org.apache.cassandra.db.marshal.UTF8Type)",
 +            "list<text>",
 +            "list<text>",
 +            true,
 +            "['foobar']");
 +
 +    private static final TypeDef frozen_list_text__ = new TypeDef(
 +            
"org.apache.cassandra.db.marshal.FrozenType(org.apache.cassandra.db.marshal.ListType(org.apache.cassandra.db.marshal.UTF8Type))",
 +            "frozen<list<text>>",
 +            "frozen<list<text>>",
 +            true,
 +            "['foobar']");
 +
 +    private static final TypeDef set_text_ = new TypeDef(
 +            
"org.apache.cassandra.db.marshal.SetType(org.apache.cassandra.db.marshal.UTF8Type)",
 +            "set<text>",
 +            "set<text>",
 +            true,
 +            "{'foobar'}");
 +
 +    private static final TypeDef frozen_set_text__ = new TypeDef(
 +            
"org.apache.cassandra.db.marshal.FrozenType(org.apache.cassandra.db.marshal.SetType(org.apache.cassandra.db.marshal.UTF8Type))",
 +            "frozen<set<text>>",
 +            "frozen<set<text>>",
 +            true,
 +            "{'foobar'}");
 +
 +    private static final TypeDef map_text__text_ = new TypeDef(
 +            
"org.apache.cassandra.db.marshal.MapType(org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type)",
 +            "map<text, text>",
 +            "map<text, text>",
 +            true,
 +            "{'foo':'bar'}");
 +
 +    private static final TypeDef frozen_map_text__text__ = new TypeDef(
 +            
"org.apache.cassandra.db.marshal.FrozenType(org.apache.cassandra.db.marshal.MapType(org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type))",
 +            "frozen<map<text, text>>",
 +            "frozen<map<text, text>>",
 +            true,
 +            "{'foo':'bar'}");
 +
 +    private static final TypeDef list_frozen_tuple_text__text___ = new 
TypeDef(
 +            // in consequence, this should be:
 +            // 
"org.apache.cassandra.db.marshal.ListType(org.apache.cassandra.db.marshal.FrozenType(org.apache.cassandra.db.marshal.TupleType(org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type)))",
 +            
"org.apache.cassandra.db.marshal.ListType(org.apache.cassandra.db.marshal.TupleType(org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type))",
 +            "list<frozen<tuple<text, text>>>",
 +            "list<frozen<tuple<text, text>>>",
 +            true,
 +            "[('foo','bar')]");
 +
 +    private static final TypeDef frozen_list_tuple_text__text___ = new 
TypeDef(
 +            
"org.apache.cassandra.db.marshal.FrozenType(org.apache.cassandra.db.marshal.ListType(org.apache.cassandra.db.marshal.TupleType(org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type)))",
 +            "frozen<list<frozen<tuple<text, text>>>>",
 +            "frozen<list<frozen<tuple<text, text>>>>",
 +            true,
 +            "[('foo','bar')]");
 +
 +    private static final TypeDef set_frozen_tuple_text__text___ = new TypeDef(
 +            // in consequence, this should be:
 +            // 
"org.apache.cassandra.db.marshal.SetType(org.apache.cassandra.db.marshal.FrozenType(org.apache.cassandra.db.marshal.TupleType(org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type)))",
 +            
"org.apache.cassandra.db.marshal.SetType(org.apache.cassandra.db.marshal.TupleType(org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type))",
 +            "set<frozen<tuple<text, text>>>",
 +            "set<frozen<tuple<text, text>>>",
 +            true,
 +            "{('foo','bar')}");
 +
 +    private static final TypeDef frozen_set_tuple_text__text___ = new TypeDef(
 +            
"org.apache.cassandra.db.marshal.FrozenType(org.apache.cassandra.db.marshal.SetType(org.apache.cassandra.db.marshal.TupleType(org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type)))",
 +            "frozen<set<frozen<tuple<text, text>>>>",
 +            "frozen<set<frozen<tuple<text, text>>>>",
 +            true,
 +            "{('foo','bar')}");
 +
 +    private static final TypeDef map_text__frozen_tuple_text__text___ = new 
TypeDef(
 +            // in consequence, this should be:
 +            // 
"org.apache.cassandra.db.marshal.MapType(org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.FrozenType(org.apache.cassandra.db.marshal.TupleType(org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type)))",
 +            
"org.apache.cassandra.db.marshal.MapType(org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.TupleType(org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type))",
 +            "map<text, frozen<tuple<text, text>>>",
 +            "map<text, frozen<tuple<text, text>>>",
 +            true,
 +            "{'foobar':('foo','bar')}");
 +
 +    private static final TypeDef frozen_map_text__tuple_text__text___ = new 
TypeDef(
 +            
"org.apache.cassandra.db.marshal.FrozenType(org.apache.cassandra.db.marshal.MapType(org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.TupleType(org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UTF8Type)))",
 +            "frozen<map<text, frozen<tuple<text, text>>>>",
 +            "frozen<map<text, frozen<tuple<text, text>>>>",
 +            true,
 +            "{'foobar':('foo','bar')}");
 +
 +    private static final TypeDef list_frozen_i_udt__ = new TypeDef(
 +            
"org.apache.cassandra.db.marshal.ListType(org.apache.cassandra.db.marshal.FrozenType(org.apache.cassandra.db.marshal.UserType(ks,695f756474,61:org.apache.cassandra.db.marshal.UTF8Type,62:org.apache.cassandra.db.marshal.UTF8Type)))",
 +            "list<frozen<i_udt>>",
 +            "list<frozen<tuple<text, text>>>",
 +            true,
 +            "[{a:'foo',b:'bar'}]");
 +
 +    private static final TypeDef frozen_list_i_udt__ = new TypeDef(
 +            
"org.apache.cassandra.db.marshal.FrozenType(org.apache.cassandra.db.marshal.ListType(org.apache.cassandra.db.marshal.UserType(ks,695f756474,61:org.apache.cassandra.db.marshal.UTF8Type,62:org.apache.cassandra.db.marshal.UTF8Type)))",
 +            "frozen<list<frozen<i_udt>>>",
 +            "frozen<list<frozen<tuple<text, text>>>>",
 +            true,
 +            "[{a:'foo',b:'bar'}]");
 +
 +    private static final TypeDef set_frozen_i_udt__ = new TypeDef(
 +            
"org.apache.cassandra.db.marshal.SetType(org.apache.cassandra.db.marshal.FrozenType(org.apache.cassandra.db.marshal.UserType(ks,695f756474,61:org.apache.cassandra.db.marshal.UTF8Type,62:org.apache.cassandra.db.marshal.UTF8Type)))",
 +            "set<frozen<i_udt>>",
 +            "set<frozen<tuple<text, text>>>",
 +            true,
 +            "{{a:'foo',b:'bar'}}");
 +
 +    private static final TypeDef frozen_set_i_udt__ = new TypeDef(
 +            
"org.apache.cassandra.db.marshal.FrozenType(org.apache.cassandra.db.marshal.SetType(org.apache.cassandra.db.marshal.UserType(ks,695f756474,61:org.apache.cassandra.db.marshal.UTF8Type,62:org.apache.cassandra.db.marshal.UTF8Type)))",
 +            "frozen<set<frozen<i_udt>>>",
 +            "frozen<set<frozen<tuple<text, text>>>>",
 +            true,
 +            "{{a:'foo',b:'bar'}}");
 +
 +    private static final TypeDef map_text__frozen_i_udt__ = new TypeDef(
 +            
"org.apache.cassandra.db.marshal.MapType(org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.FrozenType(org.apache.cassandra.db.marshal.UserType(ks,695f756474,61:org.apache.cassandra.db.marshal.UTF8Type,62:org.apache.cassandra.db.marshal.UTF8Type)))",
 +            "map<text, frozen<i_udt>>",
 +            "map<text, frozen<tuple<text, text>>>",
 +            true,
 +            "{'foobar':{a:'foo',b:'bar'}}");
 +
 +    private static final TypeDef frozen_map_text__i_udt__ = new TypeDef(
 +            
"org.apache.cassandra.db.marshal.FrozenType(org.apache.cassandra.db.marshal.MapType(org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.UserType(ks,695f756474,61:org.apache.cassandra.db.marshal.UTF8Type,62:org.apache.cassandra.db.marshal.UTF8Type)))",
 +            "frozen<map<text, frozen<i_udt>>>",
 +            "frozen<map<text, frozen<tuple<text, text>>>>",
 +            true,
 +            "{'foobar':{a:'foo',b:'bar'}}");
 +
 +    private static final TypeDef[] allTypes = {
 +            text,
 +            tuple_text__text_,
 +            frozen_f_udt_,
 +            list_text_,
 +            frozen_list_text__,
 +            set_text_,
 +            frozen_set_text__,
 +            map_text__text_,
 +            frozen_map_text__text__,
 +            list_frozen_tuple_text__text___,
 +            frozen_list_tuple_text__text___,
 +            set_frozen_tuple_text__text___,
 +            frozen_set_tuple_text__text___,
 +            map_text__frozen_tuple_text__text___,
 +            frozen_map_text__tuple_text__text___,
 +            list_frozen_i_udt__,
 +            frozen_list_i_udt__,
 +            set_frozen_i_udt__,
 +            frozen_set_i_udt__,
 +            map_text__frozen_i_udt__,
 +            frozen_map_text__i_udt__,
 +            };
 +
 +    @Ignore("Only used to ")
 +    @Test
 +    public void generateCqlStatements() throws InterruptedException
 +    {
 +        StringWriter sw = new StringWriter();
 +        PrintWriter pw = new PrintWriter(sw);
 +
 +        pw.println("DROP TABLE sstableheaderfixtest;");
 +        pw.println();
 +        pw.println("CREATE TYPE i_udt (a text, b text);");
 +        pw.println("CREATE TYPE f_udt (a text, b text);");
 +        pw.println("CREATE TYPE mc_udt (a text, b text);");
 +        pw.println();
 +        pw.println("CREATE TABLE sstableheaderfixtest (");
 +        pw.print("  id int PRIMARY KEY");
 +        for (TypeDef typeDef : allTypes)
 +        {
 +            String cname = typeDef.cqlTypeString.replaceAll("[, <>]", "_");
 +            pw.printf(",%n  %s %s", cname, typeDef.cqlTypeString);
 +        }
 +        pw.println(");");
 +        pw.println();
 +
 +        pw.printf("INSERT INTO sstableheaderfixtest%n  (id");
 +        for (TypeDef typeDef : allTypes)
 +        {
 +            String cname = typeDef.cqlTypeString.replaceAll("[, <>]", "_");
 +            pw.printf(",%n    %s", cname);
 +        }
 +        pw.printf(")%n  VALUES%n  (1");
 +        for (TypeDef typeDef : allTypes)
 +        {
 +            pw.printf(",%n    %s", typeDef.cqlValue);
 +        }
 +        pw.println(");");
 +
 +        pw.println();
 +        pw.println();
 +        pw.println("-- Run tools/bin/sstablemetadata 
data/data/<keyspace>/<table>/*-Data.db to show the sstable");
 +        pw.println("-- serialization-header (types not shown in the C* 3.0 
variant of the sstablemetadata tool)");
 +
 +        sw.flush();
 +
 +        System.out.println(sw.toString());
 +
 +        Thread.sleep(1000);
 +    }
 +
 +    @Test
 +    public void verifyTypes()
 +    {
 +        AssertionError master = null;
 +        for (TypeDef typeDef : allTypes)
 +        {
 +            try
 +            {
 +                assertEquals(typeDef.toString() + "\n typeString vs type\n", 
typeDef.typeString, typeDef.type.toString());
 +                assertEquals(typeDef.toString() + "\n typeString vs 
cqlType.getType()\n", typeDef.typeString, typeDef.cqlType.getType().toString());
 +                AbstractType<?> expanded = 
SchemaKeyspace.expandUserTypes(typeDef.type);
 +                CQL3Type expandedCQL = expanded.asCQL3Type();
 +                // Note: cannot include this commented-out assertion, because 
the parsed CQL3Type instance for
 +                // 'frozen<list<tuple<text, text>>>' returns 
'frozen<list<frozen<tuple<text, text>>>>' via it's CQL3Type.toString()
 +                // implementation.
 +                assertEquals(typeDef.toString() + "\n droppedCqlType\n", 
typeDef.droppedCqlType, expandedCQL);
 +                assertEquals(typeDef.toString() + "\n 
droppedCqlTypeString\n", typeDef.droppedCqlTypeString, expandedCQL.toString());
 +                assertEquals(typeDef.toString() + "\n multiCell\n", 
typeDef.type.isMultiCell(), typeDef.droppedType.isMultiCell());
 +
 +                AbstractType<?> parsedType = 
TypeParser.parse(typeDef.typeString);
 +                assertEquals(typeDef.toString(), typeDef.typeString, 
parsedType.toString());
 +            }
 +            catch (AssertionError ae)
 +            {
 +                if (master == null)
 +                    master = ae;
 +                else
 +                    master.addSuppressed(ae);
 +            }
 +        }
 +        if (master != null)
 +            throw master;
 +    }
 +}
diff --cc test/unit/org/apache/cassandra/security/SSLFactoryTest.java
index b3510bd,ca18073..ec0c810
--- a/test/unit/org/apache/cassandra/security/SSLFactoryTest.java
+++ b/test/unit/org/apache/cassandra/security/SSLFactoryTest.java
@@@ -29,12 -28,13 +28,20 @@@ import com.google.common.base.Predicate
  import com.google.common.collect.Iterables;
  import com.google.common.collect.Lists;
  
++import org.apache.cassandra.config.DatabaseDescriptor;
  import org.apache.cassandra.config.EncryptionOptions;
  import org.apache.cassandra.config.EncryptionOptions.ServerEncryptionOptions;
+ import org.apache.cassandra.utils.FBUtilities;
++import org.junit.BeforeClass;
  import org.junit.Test;
  
  public class SSLFactoryTest
  {
++    @BeforeClass
++    public static void beforeClass()
++    {
++        DatabaseDescriptor.daemonInitialization();
++    }
  
      @Test
      public void testFilterCipherSuites()
diff --cc test/unit/org/apache/cassandra/streaming/SessionInfoTest.java
index 80ef29d,80ef29d..37a6e41
--- a/test/unit/org/apache/cassandra/streaming/SessionInfoTest.java
+++ b/test/unit/org/apache/cassandra/streaming/SessionInfoTest.java
@@@ -25,10 -25,10 +25,18 @@@ import java.util.UUID
  
  import org.junit.Test;
  
++import org.apache.cassandra.config.DatabaseDescriptor;
  import org.apache.cassandra.utils.FBUtilities;
++import org.junit.BeforeClass;
  
  public class SessionInfoTest
  {
++    @BeforeClass
++    public static void beforeClass()
++    {
++        DatabaseDescriptor.daemonInitialization();
++    }
++
      /**
       * Test if total numbers are collect
       */
diff --cc test/unit/org/apache/cassandra/tools/CompactionStressTest.java
index c8b0b97,0000000..cdf8ac1
mode 100644,000000..100644
--- a/test/unit/org/apache/cassandra/tools/CompactionStressTest.java
+++ b/test/unit/org/apache/cassandra/tools/CompactionStressTest.java
@@@ -1,60 -1,0 +1,67 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +
 +package org.apache.cassandra.tools;
 +
 +import java.io.File;
 +
++import org.junit.BeforeClass;
 +import org.junit.Test;
 +import org.junit.runner.RunWith;
 +
 +import org.apache.cassandra.OrderedJUnit4ClassRunner;
 +
 +@RunWith(OrderedJUnit4ClassRunner.class)
 +public class CompactionStressTest extends ToolsTester
 +{
++    @BeforeClass
++    public static void setupTester()
++    {
++        // override ToolsTester.setupTester() to avoid 
`DatabaseDescriptor.toolInitialization()`
++    }
++
 +    @Test
 +    public void testNoArgs()
 +    {
 +        runTool(0, "org.apache.cassandra.stress.CompactionStress");
 +    }
 +
 +    @Test
 +    public void testWriteAndCompact()
 +    {
 +        ClassLoader classLoader = getClass().getClassLoader();
 +        File file = new 
File(classLoader.getResource("blogpost.yaml").getFile());
 +        String profileFile = file.getAbsolutePath();
 +
 +        runTool(0,
 +                "org.apache.cassandra.stress.CompactionStress",
 +                "write",
 +                "-d", "build/test/cassandra",
 +                "-g", "0",
 +                "-p", profileFile,
 +                "-t", "4");
 +
 +        runTool(0,
 +                "org.apache.cassandra.stress.CompactionStress",
 +                "compact",
 +                "-d", "build/test/cassandra",
 +                "-p", profileFile,
 +                "-t", "4");
 +    }
 +
 +}
diff --cc test/unit/org/apache/cassandra/tools/ToolsTester.java
index ead4e31,0000000..a690ca7
mode 100644,000000..100644
--- a/test/unit/org/apache/cassandra/tools/ToolsTester.java
+++ b/test/unit/org/apache/cassandra/tools/ToolsTester.java
@@@ -1,293 -1,0 +1,298 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +
 +package org.apache.cassandra.tools;
 +
 +import java.io.File;
 +import java.io.IOException;
 +import java.lang.management.ManagementFactory;
 +import java.lang.management.ThreadInfo;
 +import java.lang.management.ThreadMXBean;
 +import java.lang.reflect.InvocationTargetException;
 +import java.lang.reflect.Method;
 +import java.security.Permission;
 +import java.util.Arrays;
 +import java.util.Collections;
 +import java.util.List;
 +import java.util.Set;
 +import java.util.regex.Pattern;
 +import java.util.stream.Collectors;
 +
++import org.apache.cassandra.config.DatabaseDescriptor;
++
 +import org.apache.commons.io.FileUtils;
 +import org.junit.BeforeClass;
 +
 +import org.slf4j.LoggerFactory;
 +
 +import static org.junit.Assert.assertEquals;
 +import static org.junit.Assert.assertFalse;
 +import static org.junit.Assert.assertTrue;
 +import static org.junit.Assert.fail;
 +
 +/**
 + * Base unit test class for standalone tools
 + */
 +public abstract class ToolsTester
 +{
 +    private static List<ThreadInfo> initialThreads;
 +
 +    static final String[] EXPECTED_THREADS_WITH_SCHEMA = {
 +    "PerDiskMemtableFlushWriter_0:[1-9]",
 +    "MemtablePostFlush:[1-9]",
 +    "MemtableFlushWriter:[1-9]",
 +    "MemtableReclaimMemory:[1-9]",
 +    };
 +    static final String[] OPTIONAL_THREADS_WITH_SCHEMA = {
 +    "ScheduledTasks:[1-9]",
 +    "OptionalTasks:[1-9]",
 +    "Reference-Reaper:[1-9]",
 +    "LocalPool-Cleaner:[1-9]",
 +    "CacheCleanupExecutor:[1-9]",
 +    "CompactionExecutor:[1-9]",
 +    "ValidationExecutor:[1-9]",
 +    "NonPeriodicTasks:[1-9]",
 +    "Sampler:[1-9]",
 +    "SecondaryIndexManagement:[1-9]",
 +    "Strong-Reference-Leak-Detector:[1-9]",
 +    "Background_Reporter:[1-9]",
 +    "EXPIRING-MAP-REAPER:[1-9]",
 +    };
 +
 +    public void assertNoUnexpectedThreadsStarted(String[] 
expectedThreadNames, String[] optionalThreadNames)
 +    {
 +        ThreadMXBean threads = ManagementFactory.getThreadMXBean();
 +
 +        Set<String> initial = initialThreads
 +                              .stream()
 +                              .map(ThreadInfo::getThreadName)
 +                              .collect(Collectors.toSet());
 +
 +        Set<String> current = 
Arrays.stream(threads.getThreadInfo(threads.getAllThreadIds()))
 +                                    .map(ThreadInfo::getThreadName)
 +                                    .collect(Collectors.toSet());
 +
 +        List<Pattern> expected = expectedThreadNames != null
 +                                 ? 
Arrays.stream(expectedThreadNames).map(Pattern::compile).collect(Collectors.toList())
 +                                 : Collections.emptyList();
 +
 +        List<Pattern> optional = optionalThreadNames != null
 +                                 ? 
Arrays.stream(optionalThreadNames).map(Pattern::compile).collect(Collectors.toList())
 +                                 : Collections.emptyList();
 +
 +        current.removeAll(initial);
 +
 +        List<Pattern> notPresent = expected.stream()
 +                                           .filter(threadNamePattern -> 
!current.stream().anyMatch(threadName -> 
threadNamePattern.matcher(threadName).matches()))
 +                                           .collect(Collectors.toList());
 +
 +        Set<String> remain = current.stream()
 +                                    .filter(threadName -> 
expected.stream().anyMatch(pattern -> pattern.matcher(threadName).matches()))
 +                                    .filter(threadName -> 
optional.stream().anyMatch(pattern -> pattern.matcher(threadName).matches()))
 +                                    .collect(Collectors.toSet());
 +
 +        if (!current.isEmpty())
 +            System.err.println("Unexpected thread names: " + remain);
 +        if (!notPresent.isEmpty())
 +            System.err.println("Mandatory thread missing: " + notPresent);
 +
 +        assertTrue("Wrong thread status", remain.isEmpty() && 
notPresent.isEmpty());
 +    }
 +
 +    public void assertSchemaNotLoaded()
 +    {
 +        assertClassNotLoaded("org.apache.cassandra.config.Schema");
 +    }
 +
 +    public void assertSchemaLoaded()
 +    {
 +        assertClassLoaded("org.apache.cassandra.config.Schema");
 +    }
 +
 +    public void assertKeyspaceNotLoaded()
 +    {
 +        assertClassNotLoaded("org.apache.cassandra.db.Keyspace");
 +    }
 +
 +    public void assertKeyspaceLoaded()
 +    {
 +        assertClassLoaded("org.apache.cassandra.db.Keyspace");
 +    }
 +
 +    public void assertServerNotLoaded()
 +    {
 +        assertClassNotLoaded("org.apache.cassandra.transport.Server");
 +    }
 +
 +    public void assertSystemKSNotLoaded()
 +    {
 +        assertClassNotLoaded("org.apache.cassandra.db.SystemKeyspace");
 +    }
 +
 +    public void assertCLSMNotLoaded()
 +    {
 +        
assertClassNotLoaded("org.apache.cassandra.db.commitlog.CommitLogSegmentManager");
 +    }
 +
 +    public void assertClassLoaded(String clazz)
 +    {
 +        assertClassLoadedStatus(clazz, true);
 +    }
 +
 +    public void assertClassNotLoaded(String clazz)
 +    {
 +        assertClassLoadedStatus(clazz, false);
 +    }
 +
 +    private void assertClassLoadedStatus(String clazz, boolean expected)
 +    {
 +        for (ClassLoader cl = Thread.currentThread().getContextClassLoader(); 
cl != null; cl = cl.getParent())
 +        {
 +            try
 +            {
 +                Method mFindLoadedClass = 
ClassLoader.class.getDeclaredMethod("findLoadedClass", String.class);
 +                mFindLoadedClass.setAccessible(true);
 +                boolean loaded = mFindLoadedClass.invoke(cl, clazz) != null;
 +
 +                if (expected)
 +                {
 +                    if (loaded)
 +                        return;
 +                }
 +                else
 +                    assertFalse(clazz + " has been loaded", loaded);
 +            }
 +            catch (Exception e)
 +            {
 +                throw new RuntimeException(e);
 +            }
 +        }
 +
 +        if (expected)
 +            fail(clazz + " has not been loaded");
 +    }
 +
 +    public void runTool(int expectedExitCode, String clazz, String... args)
 +    {
 +        try
 +        {
 +            // install security manager to get informed about the exit-code
 +            System.setSecurityManager(new SecurityManager()
 +            {
 +                public void checkExit(int status)
 +                {
 +                    throw new SystemExitException(status);
 +                }
 +
 +                public void checkPermission(Permission perm)
 +                {
 +                }
 +
 +                public void checkPermission(Permission perm, Object context)
 +                {
 +                }
 +            });
 +
 +            try
 +            {
 +                Class.forName(clazz).getDeclaredMethod("main", 
String[].class).invoke(null, (Object) args);
 +            }
 +            catch (InvocationTargetException e)
 +            {
 +                Throwable cause = e.getCause();
 +                if (cause instanceof Error)
 +                    throw (Error) cause;
 +                if (cause instanceof RuntimeException)
 +                    throw (RuntimeException) cause;
 +                throw e;
 +            }
 +
 +            assertEquals("Unexpected exit code", expectedExitCode, 0);
 +        }
 +        catch (SystemExitException e)
 +        {
 +            assertEquals("Unexpected exit code", expectedExitCode, e.status);
 +        }
 +        catch (InvocationTargetException e)
 +        {
 +            throw new RuntimeException(e.getTargetException());
 +        }
 +        catch (Exception e)
 +        {
 +            throw new RuntimeException(e);
 +        }
 +        finally
 +        {
 +            // uninstall security manager
 +            System.setSecurityManager(null);
 +        }
 +    }
 +
 +    @BeforeClass
 +    public static void setupTester()
 +    {
 +        System.setProperty("cassandra.partitioner", 
"org.apache.cassandra.dht.Murmur3Partitioner");
 +
 +        // may start an async appender
 +        LoggerFactory.getLogger(ToolsTester.class);
 +
 +        ThreadMXBean threads = ManagementFactory.getThreadMXBean();
 +        initialThreads = 
Arrays.asList(threads.getThreadInfo(threads.getAllThreadIds()));
++
++        DatabaseDescriptor.toolInitialization();
++        DatabaseDescriptor.applyAddressConfig();
 +    }
 +
 +    public static class SystemExitException extends Error
 +    {
 +        public final int status;
 +
 +        public SystemExitException(int status)
 +        {
 +            this.status = status;
 +        }
 +    }
 +
 +    public static String findOneSSTable(String ks, String cf) throws 
IOException
 +    {
 +        File cfDir = sstableDir(ks, cf);
 +        File[] sstableFiles = cfDir.listFiles((file) -> file.isFile() && 
file.getName().endsWith("-Data.db"));
 +        return sstableFiles[0].getAbsolutePath();
 +    }
 +
 +    public static String sstableDirName(String ks, String cf) throws 
IOException
 +    {
 +        return sstableDir(ks, cf).getAbsolutePath();
 +    }
 +
 +    public static File sstableDir(String ks, String cf) throws IOException
 +    {
 +        File dataDir = copySSTables();
 +        File ksDir = new File(dataDir, ks);
 +        File[] cfDirs = ksDir.listFiles((dir, name) -> cf.equals(name) || 
name.startsWith(cf + '-'));
 +        return cfDirs[0];
 +    }
 +
 +    public static File copySSTables() throws IOException
 +    {
 +        File dataDir = new File("build/test/cassandra/data");
 +        File srcDir = new File("test/data/legacy-sstables/ma");
 +        FileUtils.copyDirectory(new File(srcDir, "legacy_tables"), new 
File(dataDir, "legacy_sstables"));
 +        return dataDir;
 +    }
 +}
diff --cc test/unit/org/apache/cassandra/triggers/TriggersTest.java
index 165a7fe,70e040b..5f2a553
--- a/test/unit/org/apache/cassandra/triggers/TriggersTest.java
+++ b/test/unit/org/apache/cassandra/triggers/TriggersTest.java
@@@ -17,7 -17,7 +17,6 @@@
   */
  package org.apache.cassandra.triggers;
  
- import java.net.InetAddress;
 -import java.nio.ByteBuffer;
  import java.util.Collection;
  import java.util.Collections;
  
diff --cc test/unit/org/apache/cassandra/utils/MerkleTreeTest.java
index 64aea24,64aea24..fb517d7
--- a/test/unit/org/apache/cassandra/utils/MerkleTreeTest.java
+++ b/test/unit/org/apache/cassandra/utils/MerkleTreeTest.java
@@@ -24,6 -24,6 +24,7 @@@ import java.util.*
  import com.google.common.collect.Lists;
  
  import org.junit.Before;
++import org.junit.BeforeClass;
  import org.junit.Test;
  import org.apache.cassandra.config.DatabaseDescriptor;
  import org.apache.cassandra.dht.IPartitioner;
@@@ -61,6 -61,6 +62,12 @@@ public class MerkleTreeTes
          return new Range<>(partitioner.getMinimumToken(), 
partitioner.getMinimumToken());
      }
  
++    @BeforeClass
++    public static void beforeClass()
++    {
++        DatabaseDescriptor.daemonInitialization();
++    }
++
      @Before
      public void clear()
      {
diff --cc test/unit/org/apache/cassandra/utils/TopKSamplerTest.java
index 42aef0c,bb6e3a8..d1fdf47
--- a/test/unit/org/apache/cassandra/utils/TopKSamplerTest.java
+++ b/test/unit/org/apache/cassandra/utils/TopKSamplerTest.java
@@@ -29,17 -29,16 +29,25 @@@ import java.util.concurrent.atomic.Atom
  
  import com.google.common.collect.Maps;
  import com.google.common.util.concurrent.Uninterruptibles;
++import org.junit.BeforeClass;
  import org.junit.Test;
  
  import com.clearspring.analytics.hash.MurmurHash;
  import com.clearspring.analytics.stream.Counter;
  import junit.framework.Assert;
 +import org.apache.cassandra.concurrent.NamedThreadFactory;
++import org.apache.cassandra.config.DatabaseDescriptor;
  import org.apache.cassandra.utils.TopKSampler.SamplerResult;
  
  public class TopKSamplerTest
  {
  
++    @BeforeClass
++    public static void beforeClass()
++    {
++        DatabaseDescriptor.daemonInitialization();
++    }
++
      @Test
      public void testSamplerSingleInsertionsEqualMulti() throws 
TimeoutException
      {


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to