http://git-wip-us.apache.org/repos/asf/accumulo/blob/a4afd1bf/test/src/main/java/org/apache/accumulo/test/functional/ConfigurableMacBase.java ---------------------------------------------------------------------- diff --cc test/src/main/java/org/apache/accumulo/test/functional/ConfigurableMacBase.java index 71777bf,0000000..cce906f mode 100644,000000..100644 --- a/test/src/main/java/org/apache/accumulo/test/functional/ConfigurableMacBase.java +++ b/test/src/main/java/org/apache/accumulo/test/functional/ConfigurableMacBase.java @@@ -1,188 -1,0 +1,188 @@@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.accumulo.test.functional; + +import static org.junit.Assert.assertTrue; + +import java.io.BufferedOutputStream; +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.OutputStream; +import java.util.Map; + +import org.apache.accumulo.core.client.AccumuloException; +import org.apache.accumulo.core.client.AccumuloSecurityException; +import org.apache.accumulo.core.client.ClientConfiguration; +import org.apache.accumulo.core.client.Connector; +import org.apache.accumulo.core.client.Instance; +import org.apache.accumulo.core.client.ZooKeeperInstance; +import org.apache.accumulo.core.client.security.tokens.PasswordToken; +import org.apache.accumulo.core.conf.Property; +import org.apache.accumulo.core.util.MonitorUtil; +import org.apache.accumulo.harness.AccumuloClusterHarness; +import org.apache.accumulo.harness.AccumuloITBase; +import org.apache.accumulo.minicluster.MiniAccumuloCluster; +import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl; +import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl; +import org.apache.accumulo.minicluster.impl.ZooKeeperBindException; - import org.apache.accumulo.test.categories.MiniClusterOnlyTest; ++import org.apache.accumulo.test.categories.MiniClusterOnlyTests; +import org.apache.accumulo.test.util.CertUtils; +import org.apache.commons.io.FileUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.zookeeper.KeeperException; +import org.junit.After; +import org.junit.Before; +import org.junit.experimental.categories.Category; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * General Integration-Test base class that provides access to a {@link MiniAccumuloCluster} for testing. Tests using these typically do very disruptive things + * to the instance, and require specific configuration. Most tests don't need this level of control and should extend {@link AccumuloClusterHarness} instead. + */ - @Category(MiniClusterOnlyTest.class) ++@Category(MiniClusterOnlyTests.class) +public class ConfigurableMacBase extends AccumuloITBase { + public static final Logger log = LoggerFactory.getLogger(ConfigurableMacBase.class); + + protected MiniAccumuloClusterImpl cluster; + + protected void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {} + + protected void beforeClusterStart(MiniAccumuloConfigImpl cfg) throws Exception {} + + protected static final String ROOT_PASSWORD = "testRootPassword1"; + + public static void configureForEnvironment(MiniAccumuloConfigImpl cfg, Class<?> testClass, File folder) { + if ("true".equals(System.getProperty("org.apache.accumulo.test.functional.useSslForIT"))) { + configureForSsl(cfg, folder); + } + if ("true".equals(System.getProperty("org.apache.accumulo.test.functional.useCredProviderForIT"))) { + cfg.setUseCredentialProvider(true); + } + } + + protected static void configureForSsl(MiniAccumuloConfigImpl cfg, File sslDir) { + Map<String,String> siteConfig = cfg.getSiteConfig(); + if ("true".equals(siteConfig.get(Property.INSTANCE_RPC_SSL_ENABLED.getKey()))) { + // already enabled; don't mess with it + return; + } + + // create parent directories, and ensure sslDir is empty + assertTrue(sslDir.mkdirs() || sslDir.isDirectory()); + FileUtils.deleteQuietly(sslDir); + assertTrue(sslDir.mkdir()); + + File rootKeystoreFile = new File(sslDir, "root-" + cfg.getInstanceName() + ".jks"); + File localKeystoreFile = new File(sslDir, "local-" + cfg.getInstanceName() + ".jks"); + File publicTruststoreFile = new File(sslDir, "public-" + cfg.getInstanceName() + ".jks"); + final String rootKeystorePassword = "root_keystore_password", truststorePassword = "truststore_password"; + try { + new CertUtils(Property.RPC_SSL_KEYSTORE_TYPE.getDefaultValue(), "o=Apache Accumulo,cn=MiniAccumuloCluster", "RSA", 2048, "sha1WithRSAEncryption") + .createAll(rootKeystoreFile, localKeystoreFile, publicTruststoreFile, cfg.getInstanceName(), rootKeystorePassword, cfg.getRootPassword(), + truststorePassword); + } catch (Exception e) { + throw new RuntimeException("error creating MAC keystore", e); + } + + siteConfig.put(Property.INSTANCE_RPC_SSL_ENABLED.getKey(), "true"); + siteConfig.put(Property.RPC_SSL_KEYSTORE_PATH.getKey(), localKeystoreFile.getAbsolutePath()); + siteConfig.put(Property.RPC_SSL_KEYSTORE_PASSWORD.getKey(), cfg.getRootPassword()); + siteConfig.put(Property.RPC_SSL_TRUSTSTORE_PATH.getKey(), publicTruststoreFile.getAbsolutePath()); + siteConfig.put(Property.RPC_SSL_TRUSTSTORE_PASSWORD.getKey(), truststorePassword); + cfg.setSiteConfig(siteConfig); + } + + @Before + public void setUp() throws Exception { + createMiniAccumulo(); + Exception lastException = null; + for (int i = 0; i < 3; i++) { + try { + cluster.start(); + return; + } catch (ZooKeeperBindException e) { + lastException = e; + log.warn("Failed to start MiniAccumuloCluster, assumably due to ZooKeeper issues", lastException); + Thread.sleep(3000); + createMiniAccumulo(); + } + } + throw new RuntimeException("Failed to start MiniAccumuloCluster after three attempts", lastException); + } + + private void createMiniAccumulo() throws Exception { + // createTestDir will give us a empty directory, we don't need to clean it up ourselves + File baseDir = createTestDir(this.getClass().getName() + "_" + this.testName.getMethodName()); + MiniAccumuloConfigImpl cfg = new MiniAccumuloConfigImpl(baseDir, ROOT_PASSWORD); + String nativePathInDevTree = NativeMapIT.nativeMapLocation().getAbsolutePath(); + String nativePathInMapReduce = new File(System.getProperty("user.dir")).toString(); + cfg.setNativeLibPaths(nativePathInDevTree, nativePathInMapReduce); + cfg.setProperty(Property.GC_FILE_ARCHIVE, Boolean.TRUE.toString()); + Configuration coreSite = new Configuration(false); + configure(cfg, coreSite); + cfg.setProperty(Property.TSERV_NATIVEMAP_ENABLED, Boolean.TRUE.toString()); + configureForEnvironment(cfg, getClass(), getSslDir(baseDir)); + cluster = new MiniAccumuloClusterImpl(cfg); + if (coreSite.size() > 0) { + File csFile = new File(cluster.getConfig().getConfDir(), "core-site.xml"); + if (csFile.exists()) { + coreSite.addResource(new Path(csFile.getAbsolutePath())); + } + File tmp = new File(csFile.getAbsolutePath() + ".tmp"); + OutputStream out = new BufferedOutputStream(new FileOutputStream(tmp)); + coreSite.writeXml(out); + out.close(); + assertTrue(tmp.renameTo(csFile)); + } + beforeClusterStart(cfg); + } + + @After + public void tearDown() throws Exception { + if (cluster != null) + try { + cluster.stop(); + } catch (Exception e) { + // ignored + } + } + + protected MiniAccumuloClusterImpl getCluster() { + return cluster; + } + + protected Connector getConnector() throws AccumuloException, AccumuloSecurityException { + return getCluster().getConnector("root", new PasswordToken(ROOT_PASSWORD)); + } + + protected Process exec(Class<?> clazz, String... args) throws IOException { + return getCluster().exec(clazz, args); + } + + protected String getMonitor() throws KeeperException, InterruptedException { + Instance instance = new ZooKeeperInstance(getCluster().getClientConfig()); + return MonitorUtil.getLocation(instance); + } + + protected ClientConfiguration getClientConfig() throws Exception { + return new ClientConfiguration(getCluster().getConfig().getClientConfFile()); + } + +}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/a4afd1bf/test/src/main/java/org/apache/accumulo/test/functional/ExamplesIT.java ---------------------------------------------------------------------- diff --cc test/src/main/java/org/apache/accumulo/test/functional/ExamplesIT.java index 279e517,0000000..a69f4a5 mode 100644,000000..100644 --- a/test/src/main/java/org/apache/accumulo/test/functional/ExamplesIT.java +++ b/test/src/main/java/org/apache/accumulo/test/functional/ExamplesIT.java @@@ -1,669 -1,0 +1,673 @@@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.accumulo.test.functional; + +import static com.google.common.util.concurrent.Uninterruptibles.sleepUninterruptibly; +import static java.nio.charset.StandardCharsets.UTF_8; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assume.assumeTrue; + +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.Map.Entry; +import java.util.concurrent.TimeUnit; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import org.apache.accumulo.cluster.standalone.StandaloneAccumuloCluster; +import org.apache.accumulo.cluster.standalone.StandaloneClusterControl; +import org.apache.accumulo.core.cli.BatchWriterOpts; +import org.apache.accumulo.core.client.BatchScanner; +import org.apache.accumulo.core.client.BatchWriter; +import org.apache.accumulo.core.client.BatchWriterConfig; +import org.apache.accumulo.core.client.Connector; +import org.apache.accumulo.core.client.IteratorSetting; +import org.apache.accumulo.core.client.MutationsRejectedException; +import org.apache.accumulo.core.client.security.tokens.AuthenticationToken; +import org.apache.accumulo.core.client.security.tokens.KerberosToken; +import org.apache.accumulo.core.client.security.tokens.PasswordToken; +import org.apache.accumulo.core.conf.Property; +import org.apache.accumulo.core.data.Key; +import org.apache.accumulo.core.data.Mutation; +import org.apache.accumulo.core.data.Value; +import org.apache.accumulo.core.iterators.user.AgeOffFilter; +import org.apache.accumulo.core.iterators.user.SummingCombiner; +import org.apache.accumulo.core.security.Authorizations; +import org.apache.accumulo.examples.simple.client.Flush; +import org.apache.accumulo.examples.simple.client.RandomBatchScanner; +import org.apache.accumulo.examples.simple.client.RandomBatchWriter; +import org.apache.accumulo.examples.simple.client.ReadWriteExample; +import org.apache.accumulo.examples.simple.client.RowOperations; +import org.apache.accumulo.examples.simple.client.SequentialBatchWriter; +import org.apache.accumulo.examples.simple.client.TraceDumpExample; +import org.apache.accumulo.examples.simple.client.TracingExample; +import org.apache.accumulo.examples.simple.combiner.StatsCombiner; +import org.apache.accumulo.examples.simple.constraints.MaxMutationSize; +import org.apache.accumulo.examples.simple.dirlist.Ingest; +import org.apache.accumulo.examples.simple.dirlist.QueryUtil; +import org.apache.accumulo.examples.simple.helloworld.InsertWithBatchWriter; +import org.apache.accumulo.examples.simple.helloworld.ReadData; +import org.apache.accumulo.examples.simple.isolation.InterferenceTest; +import org.apache.accumulo.examples.simple.mapreduce.RegexExample; +import org.apache.accumulo.examples.simple.mapreduce.RowHash; +import org.apache.accumulo.examples.simple.mapreduce.TableToFile; +import org.apache.accumulo.examples.simple.mapreduce.TeraSortIngest; +import org.apache.accumulo.examples.simple.mapreduce.WordCount; +import org.apache.accumulo.examples.simple.mapreduce.bulk.BulkIngestExample; +import org.apache.accumulo.examples.simple.mapreduce.bulk.GenerateTestData; +import org.apache.accumulo.examples.simple.mapreduce.bulk.SetupTable; +import org.apache.accumulo.examples.simple.mapreduce.bulk.VerifyIngest; +import org.apache.accumulo.examples.simple.shard.ContinuousQuery; +import org.apache.accumulo.examples.simple.shard.Index; +import org.apache.accumulo.examples.simple.shard.Query; +import org.apache.accumulo.examples.simple.shard.Reverse; +import org.apache.accumulo.harness.AccumuloClusterHarness; +import org.apache.accumulo.minicluster.MemoryUnit; +import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl; +import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl.LogWriter; +import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl; +import org.apache.accumulo.start.Main; +import org.apache.accumulo.test.TestIngest; ++import org.apache.accumulo.test.categories.StandaloneCapableClusterTests; ++import org.apache.accumulo.test.categories.SunnyDayTests; +import org.apache.accumulo.tracer.TraceServer; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.util.Tool; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; ++import org.junit.experimental.categories.Category; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.collect.Iterators; + ++@Category({StandaloneCapableClusterTests.class, SunnyDayTests.class}) +public class ExamplesIT extends AccumuloClusterHarness { + private static final Logger log = LoggerFactory.getLogger(ExamplesIT.class); + private static final BatchWriterOpts bwOpts = new BatchWriterOpts(); + private static final BatchWriterConfig bwc = new BatchWriterConfig(); + private static final String visibility = "A|B"; + private static final String auths = "A,B"; + + Connector c; + String instance; + String keepers; + String user; + String passwd; + String keytab; + BatchWriter bw; + IteratorSetting is; + String dir; + FileSystem fs; + Authorizations origAuths; + boolean saslEnabled; + + @Override + public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopConf) { + // 128MB * 3 + cfg.setDefaultMemory(cfg.getDefaultMemory() * 3, MemoryUnit.BYTE); + } + + @Before + public void getClusterInfo() throws Exception { + c = getConnector(); + user = getAdminPrincipal(); + AuthenticationToken token = getAdminToken(); + if (token instanceof KerberosToken) { + keytab = getAdminUser().getKeytab().getAbsolutePath(); + saslEnabled = true; + } else if (token instanceof PasswordToken) { + passwd = new String(((PasswordToken) getAdminToken()).getPassword(), UTF_8); + saslEnabled = false; + } else { + Assert.fail("Unknown token type: " + token); + } + fs = getCluster().getFileSystem(); + instance = c.getInstance().getInstanceName(); + keepers = c.getInstance().getZooKeepers(); + dir = new Path(cluster.getTemporaryPath(), getClass().getName()).toString(); + + origAuths = c.securityOperations().getUserAuthorizations(user); + c.securityOperations().changeUserAuthorizations(user, new Authorizations(auths.split(","))); + } + + @After + public void resetAuths() throws Exception { + if (null != origAuths) { + getConnector().securityOperations().changeUserAuthorizations(getAdminPrincipal(), origAuths); + } + } + + @Override + public int defaultTimeoutSeconds() { + return 6 * 60; + } + + @Test + public void testTrace() throws Exception { + Process trace = null; + if (ClusterType.MINI == getClusterType()) { + MiniAccumuloClusterImpl impl = (MiniAccumuloClusterImpl) cluster; + trace = impl.exec(TraceServer.class); + while (!c.tableOperations().exists("trace")) + sleepUninterruptibly(500, TimeUnit.MILLISECONDS); + } + String[] args; + if (saslEnabled) { + args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "-C", "-D", "-c"}; + } else { + args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "-C", "-D", "-c"}; + } + Entry<Integer,String> pair = cluster.getClusterControl().execWithStdout(TracingExample.class, args); + Assert.assertEquals("Expected return code of zero. STDOUT=" + pair.getValue(), 0, pair.getKey().intValue()); + String result = pair.getValue(); + Pattern pattern = Pattern.compile("TraceID: ([0-9a-f]+)"); + Matcher matcher = pattern.matcher(result); + int count = 0; + while (matcher.find()) { + if (saslEnabled) { + args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "--traceid", matcher.group(1)}; + } else { + args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "--traceid", matcher.group(1)}; + } + pair = cluster.getClusterControl().execWithStdout(TraceDumpExample.class, args); + count++; + } + assertTrue(count > 0); + assertTrue("Output did not contain myApp@myHost", pair.getValue().contains("myApp@myHost")); + if (ClusterType.MINI == getClusterType() && null != trace) { + trace.destroy(); + } + } + + @Test + public void testClasspath() throws Exception { + Entry<Integer,String> entry = getCluster().getClusterControl().execWithStdout(Main.class, new String[] {"classpath"}); + assertEquals(0, entry.getKey().intValue()); + String result = entry.getValue(); + int level1 = result.indexOf("Level 1"); + int level2 = result.indexOf("Level 2"); + int level3 = result.indexOf("Level 3"); + int level4 = result.indexOf("Level 4"); + assertTrue("Level 1 classloader not present.", level1 >= 0); + assertTrue("Level 2 classloader not present.", level2 > 0); + assertTrue("Level 3 classloader not present.", level3 > 0); + assertTrue("Level 4 classloader not present.", level4 > 0); + assertTrue(level1 < level2); + assertTrue(level2 < level3); + assertTrue(level3 < level4); + } + + @Test + public void testDirList() throws Exception { + String[] names = getUniqueNames(3); + String dirTable = names[0], indexTable = names[1], dataTable = names[2]; + String[] args; + String dirListDirectory; + switch (getClusterType()) { + case MINI: + dirListDirectory = ((MiniAccumuloClusterImpl) getCluster()).getConfig().getDir().getAbsolutePath(); + break; + case STANDALONE: + dirListDirectory = ((StandaloneAccumuloCluster) getCluster()).getAccumuloHome(); + break; + default: + throw new RuntimeException("Unknown cluster type"); + } + assumeTrue(new File(dirListDirectory).exists()); + // Index a directory listing on /tmp. If this is running against a standalone cluster, we can't guarantee Accumulo source will be there. + if (saslEnabled) { + args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "--dirTable", dirTable, "--indexTable", indexTable, "--dataTable", + dataTable, "--vis", visibility, "--chunkSize", Integer.toString(10000), dirListDirectory}; + } else { + args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "--dirTable", dirTable, "--indexTable", indexTable, "--dataTable", + dataTable, "--vis", visibility, "--chunkSize", Integer.toString(10000), dirListDirectory}; + } + Entry<Integer,String> entry = getClusterControl().execWithStdout(Ingest.class, args); + assertEquals("Got non-zero return code. Stdout=" + entry.getValue(), 0, entry.getKey().intValue()); + + String expectedFile; + switch (getClusterType()) { + case MINI: + // Should be present in a minicluster dir + expectedFile = "accumulo-site.xml"; + break; + case STANDALONE: + // Should be in place on standalone installs (not having to follow symlinks) + expectedFile = "LICENSE"; + break; + default: + throw new RuntimeException("Unknown cluster type"); + } + if (saslEnabled) { + args = new String[] {"-i", instance, "-z", keepers, "--keytab", keytab, "-u", user, "-t", indexTable, "--auths", auths, "--search", "--path", + expectedFile}; + } else { + args = new String[] {"-i", instance, "-z", keepers, "-p", passwd, "-u", user, "-t", indexTable, "--auths", auths, "--search", "--path", expectedFile}; + } + entry = getClusterControl().execWithStdout(QueryUtil.class, args); + if (ClusterType.MINI == getClusterType()) { + MiniAccumuloClusterImpl impl = (MiniAccumuloClusterImpl) cluster; + for (LogWriter writer : impl.getLogWriters()) { + writer.flush(); + } + } + + log.info("result " + entry.getValue()); + assertEquals(0, entry.getKey().intValue()); + assertTrue(entry.getValue().contains(expectedFile)); + } + + @Test + public void testAgeoffFilter() throws Exception { + String tableName = getUniqueNames(1)[0]; + c.tableOperations().create(tableName); + is = new IteratorSetting(10, AgeOffFilter.class); + AgeOffFilter.setTTL(is, 1000L); + c.tableOperations().attachIterator(tableName, is); + sleepUninterruptibly(500, TimeUnit.MILLISECONDS); // let zookeeper updates propagate. + bw = c.createBatchWriter(tableName, bwc); + Mutation m = new Mutation("foo"); + m.put("a", "b", "c"); + bw.addMutation(m); + bw.close(); + sleepUninterruptibly(1, TimeUnit.SECONDS); + assertEquals(0, Iterators.size(c.createScanner(tableName, Authorizations.EMPTY).iterator())); + } + + @Test + public void testStatsCombiner() throws Exception { + String table = getUniqueNames(1)[0]; + c.tableOperations().create(table); + is = new IteratorSetting(10, StatsCombiner.class); + StatsCombiner.setCombineAllColumns(is, true); + + c.tableOperations().attachIterator(table, is); + bw = c.createBatchWriter(table, bwc); + // Write two mutations otherwise the NativeMap would dedupe them into a single update + Mutation m = new Mutation("foo"); + m.put("a", "b", "1"); + bw.addMutation(m); + m = new Mutation("foo"); + m.put("a", "b", "3"); + bw.addMutation(m); + bw.flush(); + + Iterator<Entry<Key,Value>> iter = c.createScanner(table, Authorizations.EMPTY).iterator(); + assertTrue("Iterator had no results", iter.hasNext()); + Entry<Key,Value> e = iter.next(); + assertEquals("Results ", "1,3,4,2", e.getValue().toString()); + assertFalse("Iterator had additional results", iter.hasNext()); + + m = new Mutation("foo"); + m.put("a", "b", "0,20,20,2"); + bw.addMutation(m); + bw.close(); + + iter = c.createScanner(table, Authorizations.EMPTY).iterator(); + assertTrue("Iterator had no results", iter.hasNext()); + e = iter.next(); + assertEquals("Results ", "0,20,24,4", e.getValue().toString()); + assertFalse("Iterator had additional results", iter.hasNext()); + } + + @Test + public void testBloomFilters() throws Exception { + String tableName = getUniqueNames(1)[0]; + c.tableOperations().create(tableName); + c.tableOperations().setProperty(tableName, Property.TABLE_BLOOM_ENABLED.getKey(), "true"); + String[] args; + if (saslEnabled) { + args = new String[] {"--seed", "7", "-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "--num", "100000", "--min", "0", "--max", + "1000000000", "--size", "50", "--batchMemory", "2M", "--batchLatency", "60s", "--batchThreads", "3", "-t", tableName}; + } else { + args = new String[] {"--seed", "7", "-i", instance, "-z", keepers, "-u", user, "-p", passwd, "--num", "100000", "--min", "0", "--max", "1000000000", + "--size", "50", "--batchMemory", "2M", "--batchLatency", "60s", "--batchThreads", "3", "-t", tableName}; + } + goodExec(RandomBatchWriter.class, args); + c.tableOperations().flush(tableName, null, null, true); + long diff = 0, diff2 = 0; + // try the speed test a couple times in case the system is loaded with other tests + for (int i = 0; i < 2; i++) { + long now = System.currentTimeMillis(); + if (saslEnabled) { + args = new String[] {"--seed", "7", "-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "--num", "10000", "--min", "0", "--max", + "1000000000", "--size", "50", "--scanThreads", "4", "-t", tableName}; + } else { + args = new String[] {"--seed", "7", "-i", instance, "-z", keepers, "-u", user, "-p", passwd, "--num", "10000", "--min", "0", "--max", "1000000000", + "--size", "50", "--scanThreads", "4", "-t", tableName}; + } + goodExec(RandomBatchScanner.class, args); + diff = System.currentTimeMillis() - now; + now = System.currentTimeMillis(); + if (saslEnabled) { + args = new String[] {"--seed", "8", "-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "--num", "10000", "--min", "0", "--max", + "1000000000", "--size", "50", "--scanThreads", "4", "-t", tableName}; + } else { + args = new String[] {"--seed", "8", "-i", instance, "-z", keepers, "-u", user, "-p", passwd, "--num", "10000", "--min", "0", "--max", "1000000000", + "--size", "50", "--scanThreads", "4", "-t", tableName}; + } + int retCode = getClusterControl().exec(RandomBatchScanner.class, args); + assertEquals(1, retCode); + diff2 = System.currentTimeMillis() - now; + if (diff2 < diff) + break; + } + assertTrue(diff2 < diff); + } + + @Test + public void testShardedIndex() throws Exception { + File src = new File(System.getProperty("user.dir") + "/src"); + assumeTrue(src.exists()); + String[] names = getUniqueNames(3); + final String shard = names[0], index = names[1]; + c.tableOperations().create(shard); + c.tableOperations().create(index); + bw = c.createBatchWriter(shard, bwc); + Index.index(30, src, "\\W+", bw); + bw.close(); + BatchScanner bs = c.createBatchScanner(shard, Authorizations.EMPTY, 4); + List<String> found = Query.query(bs, Arrays.asList("foo", "bar"), null); + bs.close(); + // should find ourselves + boolean thisFile = false; + for (String file : found) { + if (file.endsWith("/ExamplesIT.java")) + thisFile = true; + } + assertTrue(thisFile); + + String[] args; + if (saslEnabled) { + args = new String[] {"-i", instance, "-z", keepers, "--shardTable", shard, "--doc2Term", index, "-u", user, "--keytab", keytab}; + } else { + args = new String[] {"-i", instance, "-z", keepers, "--shardTable", shard, "--doc2Term", index, "-u", getAdminPrincipal(), "-p", passwd}; + } + // create a reverse index + goodExec(Reverse.class, args); + + if (saslEnabled) { + args = new String[] {"-i", instance, "-z", keepers, "--shardTable", shard, "--doc2Term", index, "-u", user, "--keytab", keytab, "--terms", "5", + "--count", "1000"}; + } else { + args = new String[] {"-i", instance, "-z", keepers, "--shardTable", shard, "--doc2Term", index, "-u", user, "-p", passwd, "--terms", "5", "--count", + "1000"}; + } + // run some queries + goodExec(ContinuousQuery.class, args); + } + + @Test + public void testMaxMutationConstraint() throws Exception { + String tableName = getUniqueNames(1)[0]; + c.tableOperations().create(tableName); + c.tableOperations().addConstraint(tableName, MaxMutationSize.class.getName()); + TestIngest.Opts opts = new TestIngest.Opts(); + opts.rows = 1; + opts.cols = 1000; + opts.setTableName(tableName); + if (saslEnabled) { + opts.updateKerberosCredentials(cluster.getClientConfig()); + } else { + opts.setPrincipal(getAdminPrincipal()); + } + try { + TestIngest.ingest(c, opts, bwOpts); + } catch (MutationsRejectedException ex) { + assertEquals(1, ex.getConstraintViolationSummaries().size()); + } + } + + @Test + public void testBulkIngest() throws Exception { + // TODO Figure out a way to run M/R with Kerberos + assumeTrue(getAdminToken() instanceof PasswordToken); + String tableName = getUniqueNames(1)[0]; + FileSystem fs = getFileSystem(); + Path p = new Path(dir, "tmp"); + if (fs.exists(p)) { + fs.delete(p, true); + } + goodExec(GenerateTestData.class, "--start-row", "0", "--count", "10000", "--output", dir + "/tmp/input/data"); + + List<String> commonArgs = new ArrayList<>(Arrays.asList(new String[] {"-i", instance, "-z", keepers, "-u", user, "--table", tableName})); + if (saslEnabled) { + commonArgs.add("--keytab"); + commonArgs.add(keytab); + } else { + commonArgs.add("-p"); + commonArgs.add(passwd); + } + + List<String> args = new ArrayList<>(commonArgs); + goodExec(SetupTable.class, args.toArray(new String[0])); + + args = new ArrayList<>(commonArgs); + args.addAll(Arrays.asList(new String[] {"--inputDir", dir + "/tmp/input", "--workDir", dir + "/tmp"})); + goodExec(BulkIngestExample.class, args.toArray(new String[0])); + + args = new ArrayList<>(commonArgs); + args.addAll(Arrays.asList(new String[] {"--start-row", "0", "--count", "10000"})); + goodExec(VerifyIngest.class, args.toArray(new String[0])); + } + + @Test + public void testTeraSortAndRead() throws Exception { + // TODO Figure out a way to run M/R with Kerberos + assumeTrue(getAdminToken() instanceof PasswordToken); + String tableName = getUniqueNames(1)[0]; + String[] args; + if (saslEnabled) { + args = new String[] {"--count", (1000 * 1000) + "", "-nk", "10", "-xk", "10", "-nv", "10", "-xv", "10", "-t", tableName, "-i", instance, "-z", keepers, + "-u", user, "--keytab", keytab, "--splits", "4"}; + } else { + args = new String[] {"--count", (1000 * 1000) + "", "-nk", "10", "-xk", "10", "-nv", "10", "-xv", "10", "-t", tableName, "-i", instance, "-z", keepers, + "-u", user, "-p", passwd, "--splits", "4"}; + } + goodExec(TeraSortIngest.class, args); + Path output = new Path(dir, "tmp/nines"); + if (fs.exists(output)) { + fs.delete(output, true); + } + if (saslEnabled) { + args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "-t", tableName, "--rowRegex", ".*999.*", "--output", + output.toString()}; + } else { + args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "-t", tableName, "--rowRegex", ".*999.*", "--output", output.toString()}; + } + goodExec(RegexExample.class, args); + if (saslEnabled) { + args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "-t", tableName, "--column", "c:"}; + } else { + args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "-t", tableName, "--column", "c:"}; + } + goodExec(RowHash.class, args); + output = new Path(dir, "tmp/tableFile"); + if (fs.exists(output)) { + fs.delete(output, true); + } + if (saslEnabled) { + args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "-t", tableName, "--output", output.toString()}; + } else { + args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "-t", tableName, "--output", output.toString()}; + } + goodExec(TableToFile.class, args); + } + + @Test + public void testWordCount() throws Exception { + // TODO Figure out a way to run M/R with Kerberos + assumeTrue(getAdminToken() instanceof PasswordToken); + String tableName = getUniqueNames(1)[0]; + c.tableOperations().create(tableName); + is = new IteratorSetting(10, SummingCombiner.class); + SummingCombiner.setColumns(is, Collections.singletonList(new IteratorSetting.Column(new Text("count")))); + SummingCombiner.setEncodingType(is, SummingCombiner.Type.STRING); + c.tableOperations().attachIterator(tableName, is); + Path readme = new Path(new Path(System.getProperty("user.dir")).getParent(), "README.md"); + if (!new File(readme.toString()).exists()) { + log.info("Not running test: README.md does not exist)"); + return; + } + fs.copyFromLocalFile(readme, new Path(dir + "/tmp/wc/README.md")); + String[] args; + if (saslEnabled) { + args = new String[] {"-i", instance, "-u", user, "--keytab", keytab, "-z", keepers, "--input", dir + "/tmp/wc", "-t", tableName}; + } else { + args = new String[] {"-i", instance, "-u", user, "-p", passwd, "-z", keepers, "--input", dir + "/tmp/wc", "-t", tableName}; + } + goodExec(WordCount.class, args); + } + + @Test + public void testInsertWithBatchWriterAndReadData() throws Exception { + String tableName = getUniqueNames(1)[0]; + String[] args; + if (saslEnabled) { + args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "-t", tableName}; + } else { + args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "-t", tableName}; + } + goodExec(InsertWithBatchWriter.class, args); + goodExec(ReadData.class, args); + } + + @Test + public void testIsolatedScansWithInterference() throws Exception { + String[] args; + if (saslEnabled) { + args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "-t", getUniqueNames(1)[0], "--iterations", "100000", "--isolated"}; + } else { + args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "-t", getUniqueNames(1)[0], "--iterations", "100000", "--isolated"}; + } + goodExec(InterferenceTest.class, args); + } + + @Test + public void testScansWithInterference() throws Exception { + String[] args; + if (saslEnabled) { + args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "-t", getUniqueNames(1)[0], "--iterations", "100000"}; + } else { + args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "-t", getUniqueNames(1)[0], "--iterations", "100000"}; + } + goodExec(InterferenceTest.class, args); + } + + @Test + public void testRowOperations() throws Exception { + String[] args; + if (saslEnabled) { + args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab}; + } else { + args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd}; + } + goodExec(RowOperations.class, args); + } + + @Test + public void testBatchWriter() throws Exception { + String tableName = getUniqueNames(1)[0]; + c.tableOperations().create(tableName); + String[] args; + if (saslEnabled) { + args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "-t", tableName, "--start", "0", "--num", "100000", "--size", "50", + "--batchMemory", "10000000", "--batchLatency", "1000", "--batchThreads", "4", "--vis", visibility}; + } else { + args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "-t", tableName, "--start", "0", "--num", "100000", "--size", "50", + "--batchMemory", "10000000", "--batchLatency", "1000", "--batchThreads", "4", "--vis", visibility}; + } + goodExec(SequentialBatchWriter.class, args); + + } + + @Test + public void testReadWriteAndDelete() throws Exception { + String tableName = getUniqueNames(1)[0]; + String[] args; + if (saslEnabled) { + args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "--auths", auths, "--table", tableName, "--createtable", "-c", + "--debug"}; + } else { + args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "--auths", auths, "--table", tableName, "--createtable", "-c", "--debug"}; + } + goodExec(ReadWriteExample.class, args); + if (saslEnabled) { + args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "--auths", auths, "--table", tableName, "-d", "--debug"}; + } else { + args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "--auths", auths, "--table", tableName, "-d", "--debug"}; + } + goodExec(ReadWriteExample.class, args); + + } + + @Test + public void testRandomBatchesAndFlush() throws Exception { + String tableName = getUniqueNames(1)[0]; + c.tableOperations().create(tableName); + String[] args; + if (saslEnabled) { + args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "--table", tableName, "--num", "100000", "--min", "0", "--max", + "100000", "--size", "100", "--batchMemory", "1000000", "--batchLatency", "1000", "--batchThreads", "4", "--vis", visibility}; + } else { + args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "--table", tableName, "--num", "100000", "--min", "0", "--max", "100000", + "--size", "100", "--batchMemory", "1000000", "--batchLatency", "1000", "--batchThreads", "4", "--vis", visibility}; + } + goodExec(RandomBatchWriter.class, args); + + if (saslEnabled) { + args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "--table", tableName, "--num", "10000", "--min", "0", "--max", + "100000", "--size", "100", "--scanThreads", "4", "--auths", auths}; + } else { + args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "--table", tableName, "--num", "10000", "--min", "0", "--max", "100000", + "--size", "100", "--scanThreads", "4", "--auths", auths}; + } + goodExec(RandomBatchScanner.class, args); + + if (saslEnabled) { + args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "--table", tableName}; + } else { + args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "--table", tableName}; + } + goodExec(Flush.class, args); + } + + private void goodExec(Class<?> theClass, String... args) throws InterruptedException, IOException { + Entry<Integer,String> pair; + if (Tool.class.isAssignableFrom(theClass) && ClusterType.STANDALONE == getClusterType()) { + StandaloneClusterControl control = (StandaloneClusterControl) getClusterControl(); + pair = control.execMapreduceWithStdout(theClass, args); + } else { + // We're already slurping stdout into memory (not redirecting to file). Might as well add it to error message. + pair = getClusterControl().execWithStdout(theClass, args); + } + Assert.assertEquals("stdout=" + pair.getValue(), 0, pair.getKey().intValue()); + } +} http://git-wip-us.apache.org/repos/asf/accumulo/blob/a4afd1bf/test/src/main/java/org/apache/accumulo/test/functional/KerberosIT.java ---------------------------------------------------------------------- diff --cc test/src/main/java/org/apache/accumulo/test/functional/KerberosIT.java index 1bdc71a,0000000..d00e5a0 mode 100644,000000..100644 --- a/test/src/main/java/org/apache/accumulo/test/functional/KerberosIT.java +++ b/test/src/main/java/org/apache/accumulo/test/functional/KerberosIT.java @@@ -1,659 -1,0 +1,659 @@@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.accumulo.test.functional; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; +import java.lang.reflect.UndeclaredThrowableException; +import java.security.PrivilegedExceptionAction; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.Iterator; +import java.util.Map; +import java.util.Map.Entry; +import java.util.concurrent.TimeUnit; + +import org.apache.accumulo.cluster.ClusterUser; +import org.apache.accumulo.core.client.AccumuloException; +import org.apache.accumulo.core.client.AccumuloSecurityException; +import org.apache.accumulo.core.client.BatchScanner; +import org.apache.accumulo.core.client.BatchWriter; +import org.apache.accumulo.core.client.BatchWriterConfig; +import org.apache.accumulo.core.client.Connector; +import org.apache.accumulo.core.client.Scanner; +import org.apache.accumulo.core.client.TableExistsException; +import org.apache.accumulo.core.client.TableNotFoundException; +import org.apache.accumulo.core.client.admin.CompactionConfig; +import org.apache.accumulo.core.client.admin.DelegationTokenConfig; +import org.apache.accumulo.core.client.impl.AuthenticationTokenIdentifier; +import org.apache.accumulo.core.client.impl.DelegationTokenImpl; +import org.apache.accumulo.core.client.security.tokens.AuthenticationToken; +import org.apache.accumulo.core.client.security.tokens.KerberosToken; +import org.apache.accumulo.core.client.security.tokens.PasswordToken; +import org.apache.accumulo.core.conf.Property; +import org.apache.accumulo.core.data.Key; +import org.apache.accumulo.core.data.Mutation; +import org.apache.accumulo.core.data.Range; +import org.apache.accumulo.core.data.Value; +import org.apache.accumulo.core.metadata.MetadataTable; +import org.apache.accumulo.core.metadata.RootTable; +import org.apache.accumulo.core.security.Authorizations; +import org.apache.accumulo.core.security.ColumnVisibility; +import org.apache.accumulo.core.security.SystemPermission; +import org.apache.accumulo.core.security.TablePermission; +import org.apache.accumulo.harness.AccumuloITBase; +import org.apache.accumulo.harness.MiniClusterConfigurationCallback; +import org.apache.accumulo.harness.MiniClusterHarness; +import org.apache.accumulo.harness.TestingKdc; +import org.apache.accumulo.minicluster.ServerType; +import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl; +import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl; - import org.apache.accumulo.test.categories.MiniClusterOnlyTest; ++import org.apache.accumulo.test.categories.MiniClusterOnlyTests; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; +import org.apache.hadoop.minikdc.MiniKdc; +import org.apache.hadoop.security.UserGroupInformation; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.collect.Iterables; +import com.google.common.collect.Sets; + +/** + * MAC test which uses {@link MiniKdc} to simulate ta secure environment. Can be used as a sanity check for Kerberos/SASL testing. + */ - @Category(MiniClusterOnlyTest.class) ++@Category(MiniClusterOnlyTests.class) +public class KerberosIT extends AccumuloITBase { + private static final Logger log = LoggerFactory.getLogger(KerberosIT.class); + + private static TestingKdc kdc; + private static String krbEnabledForITs = null; + private static ClusterUser rootUser; + + @BeforeClass + public static void startKdc() throws Exception { + kdc = new TestingKdc(); + kdc.start(); + krbEnabledForITs = System.getProperty(MiniClusterHarness.USE_KERBEROS_FOR_IT_OPTION); + if (null == krbEnabledForITs || !Boolean.parseBoolean(krbEnabledForITs)) { + System.setProperty(MiniClusterHarness.USE_KERBEROS_FOR_IT_OPTION, "true"); + } + rootUser = kdc.getRootUser(); + } + + @AfterClass + public static void stopKdc() throws Exception { + if (null != kdc) { + kdc.stop(); + } + if (null != krbEnabledForITs) { + System.setProperty(MiniClusterHarness.USE_KERBEROS_FOR_IT_OPTION, krbEnabledForITs); + } + UserGroupInformation.setConfiguration(new Configuration(false)); + } + + @Override + public int defaultTimeoutSeconds() { + return 60 * 5; + } + + private MiniAccumuloClusterImpl mac; + + @Before + public void startMac() throws Exception { + MiniClusterHarness harness = new MiniClusterHarness(); + mac = harness.create(this, new PasswordToken("unused"), kdc, new MiniClusterConfigurationCallback() { + + @Override + public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration coreSite) { + Map<String,String> site = cfg.getSiteConfig(); + site.put(Property.INSTANCE_ZK_TIMEOUT.getKey(), "15s"); + cfg.setSiteConfig(site); + } + + }); + + mac.getConfig().setNumTservers(1); + mac.start(); + // Enabled kerberos auth + Configuration conf = new Configuration(false); + conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos"); + UserGroupInformation.setConfiguration(conf); + } + + @After + public void stopMac() throws Exception { + if (null != mac) { + mac.stop(); + } + } + + @Test + public void testAdminUser() throws Exception { + // Login as the client (provided to `accumulo init` as the "root" user) + UserGroupInformation ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath()); + ugi.doAs(new PrivilegedExceptionAction<Void>() { + @Override + public Void run() throws Exception { + final Connector conn = mac.getConnector(rootUser.getPrincipal(), new KerberosToken()); + + // The "root" user should have all system permissions + for (SystemPermission perm : SystemPermission.values()) { + assertTrue("Expected user to have permission: " + perm, conn.securityOperations().hasSystemPermission(conn.whoami(), perm)); + } + + // and the ability to modify the root and metadata tables + for (String table : Arrays.asList(RootTable.NAME, MetadataTable.NAME)) { + assertTrue(conn.securityOperations().hasTablePermission(conn.whoami(), table, TablePermission.ALTER_TABLE)); + } + return null; + } + }); + } + + @Test + public void testNewUser() throws Exception { + String newUser = testName.getMethodName(); + final File newUserKeytab = new File(kdc.getKeytabDir(), newUser + ".keytab"); + if (newUserKeytab.exists() && !newUserKeytab.delete()) { + log.warn("Unable to delete {}", newUserKeytab); + } + + // Create a new user + kdc.createPrincipal(newUserKeytab, newUser); + + final String newQualifiedUser = kdc.qualifyUser(newUser); + final HashSet<String> users = Sets.newHashSet(rootUser.getPrincipal()); + + // Login as the "root" user + UserGroupInformation ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath()); + log.info("Logged in as {}", rootUser.getPrincipal()); + + ugi.doAs(new PrivilegedExceptionAction<Void>() { + @Override + public Void run() throws Exception { + Connector conn = mac.getConnector(rootUser.getPrincipal(), new KerberosToken()); + log.info("Created connector as {}", rootUser.getPrincipal()); + assertEquals(rootUser.getPrincipal(), conn.whoami()); + + // Make sure the system user doesn't exist -- this will force some RPC to happen server-side + createTableWithDataAndCompact(conn); + + assertEquals(users, conn.securityOperations().listLocalUsers()); + + return null; + } + }); + // Switch to a new user + ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(newQualifiedUser, newUserKeytab.getAbsolutePath()); + log.info("Logged in as {}", newQualifiedUser); + ugi.doAs(new PrivilegedExceptionAction<Void>() { + @Override + public Void run() throws Exception { + Connector conn = mac.getConnector(newQualifiedUser, new KerberosToken()); + log.info("Created connector as {}", newQualifiedUser); + assertEquals(newQualifiedUser, conn.whoami()); + + // The new user should have no system permissions + for (SystemPermission perm : SystemPermission.values()) { + assertFalse(conn.securityOperations().hasSystemPermission(newQualifiedUser, perm)); + } + + users.add(newQualifiedUser); + + // Same users as before, plus the new user we just created + assertEquals(users, conn.securityOperations().listLocalUsers()); + return null; + } + + }); + } + + @Test + public void testUserPrivilegesThroughGrant() throws Exception { + String user1 = testName.getMethodName(); + final File user1Keytab = new File(kdc.getKeytabDir(), user1 + ".keytab"); + if (user1Keytab.exists() && !user1Keytab.delete()) { + log.warn("Unable to delete {}", user1Keytab); + } + + // Create some new users + kdc.createPrincipal(user1Keytab, user1); + + final String qualifiedUser1 = kdc.qualifyUser(user1); + + // Log in as user1 + UserGroupInformation ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(user1, user1Keytab.getAbsolutePath()); + log.info("Logged in as {}", user1); + ugi.doAs(new PrivilegedExceptionAction<Void>() { + @Override + public Void run() throws Exception { + // Indirectly creates this user when we use it + Connector conn = mac.getConnector(qualifiedUser1, new KerberosToken()); + log.info("Created connector as {}", qualifiedUser1); + + // The new user should have no system permissions + for (SystemPermission perm : SystemPermission.values()) { + assertFalse(conn.securityOperations().hasSystemPermission(qualifiedUser1, perm)); + } + + return null; + } + }); + + ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath()); + ugi.doAs(new PrivilegedExceptionAction<Void>() { + @Override + public Void run() throws Exception { + Connector conn = mac.getConnector(rootUser.getPrincipal(), new KerberosToken()); + conn.securityOperations().grantSystemPermission(qualifiedUser1, SystemPermission.CREATE_TABLE); + return null; + } + }); + + // Switch back to the original user + ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(user1, user1Keytab.getAbsolutePath()); + ugi.doAs(new PrivilegedExceptionAction<Void>() { + @Override + public Void run() throws Exception { + Connector conn = mac.getConnector(qualifiedUser1, new KerberosToken()); + + // Shouldn't throw an exception since we granted the create table permission + final String table = testName.getMethodName() + "_user_table"; + conn.tableOperations().create(table); + + // Make sure we can actually use the table we made + BatchWriter bw = conn.createBatchWriter(table, new BatchWriterConfig()); + Mutation m = new Mutation("a"); + m.put("b", "c", "d"); + bw.addMutation(m); + bw.close(); + + conn.tableOperations().compact(table, new CompactionConfig().setWait(true).setFlush(true)); + return null; + } + }); + } + + @Test + public void testUserPrivilegesForTable() throws Exception { + String user1 = testName.getMethodName(); + final File user1Keytab = new File(kdc.getKeytabDir(), user1 + ".keytab"); + if (user1Keytab.exists() && !user1Keytab.delete()) { + log.warn("Unable to delete {}", user1Keytab); + } + + // Create some new users -- cannot contain realm + kdc.createPrincipal(user1Keytab, user1); + + final String qualifiedUser1 = kdc.qualifyUser(user1); + + // Log in as user1 + UserGroupInformation ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(qualifiedUser1, user1Keytab.getAbsolutePath()); + log.info("Logged in as {}", user1); + ugi.doAs(new PrivilegedExceptionAction<Void>() { + @Override + public Void run() throws Exception { + // Indirectly creates this user when we use it + Connector conn = mac.getConnector(qualifiedUser1, new KerberosToken()); + log.info("Created connector as {}", qualifiedUser1); + + // The new user should have no system permissions + for (SystemPermission perm : SystemPermission.values()) { + assertFalse(conn.securityOperations().hasSystemPermission(qualifiedUser1, perm)); + } + return null; + } + + }); + + final String table = testName.getMethodName() + "_user_table"; + final String viz = "viz"; + + ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath()); + + ugi.doAs(new PrivilegedExceptionAction<Void>() { + @Override + public Void run() throws Exception { + Connector conn = mac.getConnector(rootUser.getPrincipal(), new KerberosToken()); + conn.tableOperations().create(table); + // Give our unprivileged user permission on the table we made for them + conn.securityOperations().grantTablePermission(qualifiedUser1, table, TablePermission.READ); + conn.securityOperations().grantTablePermission(qualifiedUser1, table, TablePermission.WRITE); + conn.securityOperations().grantTablePermission(qualifiedUser1, table, TablePermission.ALTER_TABLE); + conn.securityOperations().grantTablePermission(qualifiedUser1, table, TablePermission.DROP_TABLE); + conn.securityOperations().changeUserAuthorizations(qualifiedUser1, new Authorizations(viz)); + return null; + } + }); + + // Switch back to the original user + ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(qualifiedUser1, user1Keytab.getAbsolutePath()); + ugi.doAs(new PrivilegedExceptionAction<Void>() { + @Override + public Void run() throws Exception { + Connector conn = mac.getConnector(qualifiedUser1, new KerberosToken()); + + // Make sure we can actually use the table we made + + // Write data + final long ts = 1000l; + BatchWriter bw = conn.createBatchWriter(table, new BatchWriterConfig()); + Mutation m = new Mutation("a"); + m.put("b", "c", new ColumnVisibility(viz.getBytes()), ts, "d"); + bw.addMutation(m); + bw.close(); + + // Compact + conn.tableOperations().compact(table, new CompactionConfig().setWait(true).setFlush(true)); + + // Alter + conn.tableOperations().setProperty(table, Property.TABLE_BLOOM_ENABLED.getKey(), "true"); + + // Read (and proper authorizations) + Scanner s = conn.createScanner(table, new Authorizations(viz)); + Iterator<Entry<Key,Value>> iter = s.iterator(); + assertTrue("No results from iterator", iter.hasNext()); + Entry<Key,Value> entry = iter.next(); + assertEquals(new Key("a", "b", "c", viz, ts), entry.getKey()); + assertEquals(new Value("d".getBytes()), entry.getValue()); + assertFalse("Had more results from iterator", iter.hasNext()); + return null; + } + }); + } + + @Test + public void testDelegationToken() throws Exception { + final String tableName = getUniqueNames(1)[0]; + + // Login as the "root" user + UserGroupInformation root = UserGroupInformation.loginUserFromKeytabAndReturnUGI(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath()); + log.info("Logged in as {}", rootUser.getPrincipal()); + + final int numRows = 100, numColumns = 10; + + // As the "root" user, open up the connection and get a delegation token + final AuthenticationToken delegationToken = root.doAs(new PrivilegedExceptionAction<AuthenticationToken>() { + @Override + public AuthenticationToken run() throws Exception { + Connector conn = mac.getConnector(rootUser.getPrincipal(), new KerberosToken()); + log.info("Created connector as {}", rootUser.getPrincipal()); + assertEquals(rootUser.getPrincipal(), conn.whoami()); + + conn.tableOperations().create(tableName); + BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig()); + for (int r = 0; r < numRows; r++) { + Mutation m = new Mutation(Integer.toString(r)); + for (int c = 0; c < numColumns; c++) { + String col = Integer.toString(c); + m.put(col, col, col); + } + bw.addMutation(m); + } + bw.close(); + + return conn.securityOperations().getDelegationToken(new DelegationTokenConfig()); + } + }); + + // The above login with keytab doesn't have a way to logout, so make a fake user that won't have krb credentials + UserGroupInformation userWithoutPrivs = UserGroupInformation.createUserForTesting("fake_user", new String[0]); + int recordsSeen = userWithoutPrivs.doAs(new PrivilegedExceptionAction<Integer>() { + @Override + public Integer run() throws Exception { + Connector conn = mac.getConnector(rootUser.getPrincipal(), delegationToken); + + BatchScanner bs = conn.createBatchScanner(tableName, Authorizations.EMPTY, 2); + bs.setRanges(Collections.singleton(new Range())); + int recordsSeen = Iterables.size(bs); + bs.close(); + return recordsSeen; + } + }); + + assertEquals(numRows * numColumns, recordsSeen); + } + + @Test + public void testDelegationTokenAsDifferentUser() throws Exception { + // Login as the "root" user + UserGroupInformation ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath()); + log.info("Logged in as {}", rootUser.getPrincipal()); + + final AuthenticationToken delegationToken; + try { + delegationToken = ugi.doAs(new PrivilegedExceptionAction<AuthenticationToken>() { + @Override + public AuthenticationToken run() throws Exception { + // As the "root" user, open up the connection and get a delegation token + Connector conn = mac.getConnector(rootUser.getPrincipal(), new KerberosToken()); + log.info("Created connector as {}", rootUser.getPrincipal()); + assertEquals(rootUser.getPrincipal(), conn.whoami()); + return conn.securityOperations().getDelegationToken(new DelegationTokenConfig()); + } + }); + } catch (UndeclaredThrowableException ex) { + throw ex; + } + + // make a fake user that won't have krb credentials + UserGroupInformation userWithoutPrivs = UserGroupInformation.createUserForTesting("fake_user", new String[0]); + try { + // Use the delegation token to try to log in as a different user + userWithoutPrivs.doAs(new PrivilegedExceptionAction<Void>() { + @Override + public Void run() throws Exception { + mac.getConnector("some_other_user", delegationToken); + return null; + } + }); + fail("Using a delegation token as a different user should throw an exception"); + } catch (UndeclaredThrowableException e) { + Throwable cause = e.getCause(); + assertNotNull(cause); + // We should get an AccumuloSecurityException from trying to use a delegation token for the wrong user + assertTrue("Expected cause to be AccumuloSecurityException, but was " + cause.getClass(), cause instanceof AccumuloSecurityException); + } + } + + @Test + public void testGetDelegationTokenDenied() throws Exception { + String newUser = testName.getMethodName(); + final File newUserKeytab = new File(kdc.getKeytabDir(), newUser + ".keytab"); + if (newUserKeytab.exists() && !newUserKeytab.delete()) { + log.warn("Unable to delete {}", newUserKeytab); + } + + // Create a new user + kdc.createPrincipal(newUserKeytab, newUser); + + final String qualifiedNewUser = kdc.qualifyUser(newUser); + + // Login as a normal user + UserGroupInformation ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(qualifiedNewUser, newUserKeytab.getAbsolutePath()); + try { + ugi.doAs(new PrivilegedExceptionAction<Void>() { + @Override + public Void run() throws Exception { + // As the "root" user, open up the connection and get a delegation token + Connector conn = mac.getConnector(qualifiedNewUser, new KerberosToken()); + log.info("Created connector as {}", qualifiedNewUser); + assertEquals(qualifiedNewUser, conn.whoami()); + + conn.securityOperations().getDelegationToken(new DelegationTokenConfig()); + return null; + } + }); + } catch (UndeclaredThrowableException ex) { + assertTrue(ex.getCause() instanceof AccumuloSecurityException); + } + } + + @Test + public void testRestartedMasterReusesSecretKey() throws Exception { + // Login as the "root" user + UserGroupInformation root = UserGroupInformation.loginUserFromKeytabAndReturnUGI(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath()); + log.info("Logged in as {}", rootUser.getPrincipal()); + + // As the "root" user, open up the connection and get a delegation token + final AuthenticationToken delegationToken1 = root.doAs(new PrivilegedExceptionAction<AuthenticationToken>() { + @Override + public AuthenticationToken run() throws Exception { + Connector conn = mac.getConnector(rootUser.getPrincipal(), new KerberosToken()); + log.info("Created connector as {}", rootUser.getPrincipal()); + assertEquals(rootUser.getPrincipal(), conn.whoami()); + + AuthenticationToken token = conn.securityOperations().getDelegationToken(new DelegationTokenConfig()); + + assertTrue("Could not get tables with delegation token", mac.getConnector(rootUser.getPrincipal(), token).tableOperations().list().size() > 0); + + return token; + } + }); + + log.info("Stopping master"); + mac.getClusterControl().stop(ServerType.MASTER); + Thread.sleep(5000); + log.info("Restarting master"); + mac.getClusterControl().start(ServerType.MASTER); + + // Make sure our original token is still good + root.doAs(new PrivilegedExceptionAction<Void>() { + @Override + public Void run() throws Exception { + Connector conn = mac.getConnector(rootUser.getPrincipal(), delegationToken1); + + assertTrue("Could not get tables with delegation token", conn.tableOperations().list().size() > 0); + + return null; + } + }); + + // Get a new token, so we can compare the keyId on the second to the first + final AuthenticationToken delegationToken2 = root.doAs(new PrivilegedExceptionAction<AuthenticationToken>() { + @Override + public AuthenticationToken run() throws Exception { + Connector conn = mac.getConnector(rootUser.getPrincipal(), new KerberosToken()); + log.info("Created connector as {}", rootUser.getPrincipal()); + assertEquals(rootUser.getPrincipal(), conn.whoami()); + + AuthenticationToken token = conn.securityOperations().getDelegationToken(new DelegationTokenConfig()); + + assertTrue("Could not get tables with delegation token", mac.getConnector(rootUser.getPrincipal(), token).tableOperations().list().size() > 0); + + return token; + } + }); + + // A restarted master should reuse the same secret key after a restart if the secret key hasn't expired (1day by default) + DelegationTokenImpl dt1 = (DelegationTokenImpl) delegationToken1; + DelegationTokenImpl dt2 = (DelegationTokenImpl) delegationToken2; + assertEquals(dt1.getIdentifier().getKeyId(), dt2.getIdentifier().getKeyId()); + } + + @Test(expected = AccumuloException.class) + public void testDelegationTokenWithInvalidLifetime() throws Throwable { + // Login as the "root" user + UserGroupInformation root = UserGroupInformation.loginUserFromKeytabAndReturnUGI(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath()); + log.info("Logged in as {}", rootUser.getPrincipal()); + + // As the "root" user, open up the connection and get a delegation token + try { + root.doAs(new PrivilegedExceptionAction<AuthenticationToken>() { + @Override + public AuthenticationToken run() throws Exception { + Connector conn = mac.getConnector(rootUser.getPrincipal(), new KerberosToken()); + log.info("Created connector as {}", rootUser.getPrincipal()); + assertEquals(rootUser.getPrincipal(), conn.whoami()); + + // Should fail + return conn.securityOperations().getDelegationToken(new DelegationTokenConfig().setTokenLifetime(Long.MAX_VALUE, TimeUnit.MILLISECONDS)); + } + }); + } catch (UndeclaredThrowableException e) { + Throwable cause = e.getCause(); + if (null != cause) { + throw cause; + } else { + throw e; + } + } + } + + @Test + public void testDelegationTokenWithReducedLifetime() throws Throwable { + // Login as the "root" user + UserGroupInformation root = UserGroupInformation.loginUserFromKeytabAndReturnUGI(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath()); + log.info("Logged in as {}", rootUser.getPrincipal()); + + // As the "root" user, open up the connection and get a delegation token + final AuthenticationToken dt = root.doAs(new PrivilegedExceptionAction<AuthenticationToken>() { + @Override + public AuthenticationToken run() throws Exception { + Connector conn = mac.getConnector(rootUser.getPrincipal(), new KerberosToken()); + log.info("Created connector as {}", rootUser.getPrincipal()); + assertEquals(rootUser.getPrincipal(), conn.whoami()); + + return conn.securityOperations().getDelegationToken(new DelegationTokenConfig().setTokenLifetime(5, TimeUnit.MINUTES)); + } + }); + + AuthenticationTokenIdentifier identifier = ((DelegationTokenImpl) dt).getIdentifier(); + assertTrue("Expected identifier to expire in no more than 5 minutes: " + identifier, + identifier.getExpirationDate() - identifier.getIssueDate() <= (5 * 60 * 1000)); + } + + @Test(expected = AccumuloSecurityException.class) + public void testRootUserHasIrrevocablePermissions() throws Exception { + // Login as the client (provided to `accumulo init` as the "root" user) + UserGroupInformation.loginUserFromKeytab(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath()); + + final Connector conn = mac.getConnector(rootUser.getPrincipal(), new KerberosToken()); + + // The server-side implementation should prevent the revocation of the 'root' user's systems permissions + // because once they're gone, it's possible that they could never be restored. + conn.securityOperations().revokeSystemPermission(rootUser.getPrincipal(), SystemPermission.GRANT); + } + + /** + * Creates a table, adds a record to it, and then compacts the table. A simple way to make sure that the system user exists (since the master does an RPC to + * the tserver which will create the system user if it doesn't already exist). + */ + private void createTableWithDataAndCompact(Connector conn) throws TableNotFoundException, AccumuloSecurityException, AccumuloException, TableExistsException { + final String table = testName.getMethodName() + "_table"; + conn.tableOperations().create(table); + BatchWriter bw = conn.createBatchWriter(table, new BatchWriterConfig()); + Mutation m = new Mutation("a"); + m.put("b", "c", "d"); + bw.addMutation(m); + bw.close(); + conn.tableOperations().compact(table, new CompactionConfig().setFlush(true).setWait(true)); + } +}
