http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/NativeMapIT.java
----------------------------------------------------------------------
diff --git 
a/test/src/test/java/org/apache/accumulo/test/functional/NativeMapIT.java 
b/test/src/test/java/org/apache/accumulo/test/functional/NativeMapIT.java
deleted file mode 100644
index 9175379..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/NativeMapIT.java
+++ /dev/null
@@ -1,613 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.Iterator;
-import java.util.Map.Entry;
-import java.util.NoSuchElementException;
-import java.util.Random;
-import java.util.TreeMap;
-
-import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
-import org.apache.accumulo.core.util.Pair;
-import org.apache.accumulo.tserver.NativeMap;
-import org.apache.hadoop.io.Text;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-public class NativeMapIT {
-
-  private Key nk(int r) {
-    return new Key(new Text(String.format("r%09d", r)));
-  }
-
-  private Key nk(int r, int cf, int cq, int cv, int ts, boolean deleted) {
-    Key k = new Key(new Text(String.format("r%09d", r)), new 
Text(String.format("cf%09d", cf)), new Text(String.format("cq%09d", cq)), new 
Text(String.format(
-        "cv%09d", cv)), ts);
-
-    k.setDeleted(deleted);
-
-    return k;
-  }
-
-  private Value nv(int v) {
-    return new Value(String.format("r%09d", v).getBytes(UTF_8));
-  }
-
-  public static File nativeMapLocation() {
-    File projectDir = new File(System.getProperty("user.dir")).getParentFile();
-    File nativeMapDir = new File(projectDir, 
"server/native/target/accumulo-native-" + Constants.VERSION + 
"/accumulo-native-" + Constants.VERSION);
-    return nativeMapDir;
-  }
-
-  @BeforeClass
-  public static void setUp() {
-    NativeMap.loadNativeLib(Collections.singletonList(nativeMapLocation()));
-  }
-
-  private void verifyIterator(int start, int end, int valueOffset, 
Iterator<Entry<Key,Value>> iter) {
-    for (int i = start; i <= end; i++) {
-      assertTrue(iter.hasNext());
-      Entry<Key,Value> entry = iter.next();
-      assertEquals(nk(i), entry.getKey());
-      assertEquals(nv(i + valueOffset), entry.getValue());
-    }
-
-    assertFalse(iter.hasNext());
-  }
-
-  private void insertAndVerify(NativeMap nm, int start, int end, int 
valueOffset) {
-    for (int i = start; i <= end; i++) {
-      nm.put(nk(i), nv(i + valueOffset));
-    }
-
-    for (int i = start; i <= end; i++) {
-      Value v = nm.get(nk(i));
-      assertNotNull(v);
-      assertEquals(nv(i + valueOffset), v);
-
-      Iterator<Entry<Key,Value>> iter2 = nm.iterator(nk(i));
-      assertTrue(iter2.hasNext());
-      Entry<Key,Value> entry = iter2.next();
-      assertEquals(nk(i), entry.getKey());
-      assertEquals(nv(i + valueOffset), entry.getValue());
-    }
-
-    assertNull(nm.get(nk(start - 1)));
-
-    assertNull(nm.get(nk(end + 1)));
-
-    Iterator<Entry<Key,Value>> iter = nm.iterator();
-    verifyIterator(start, end, valueOffset, iter);
-
-    for (int i = start; i <= end; i++) {
-      iter = nm.iterator(nk(i));
-      verifyIterator(i, end, valueOffset, iter);
-
-      // lookup nonexistant key that falls after existing key
-      iter = nm.iterator(nk(i, 1, 1, 1, 1, false));
-      verifyIterator(i + 1, end, valueOffset, iter);
-    }
-
-    assertEquals(end - start + 1, nm.size());
-  }
-
-  private void insertAndVerifyExhaustive(NativeMap nm, int num, int run) {
-    for (int i = 0; i < num; i++) {
-      for (int j = 0; j < num; j++) {
-        for (int k = 0; k < num; k++) {
-          for (int l = 0; l < num; l++) {
-            for (int ts = 0; ts < num; ts++) {
-              Key key = nk(i, j, k, l, ts, true);
-              Value value = new Value((i + "_" + j + "_" + k + "_" + l + "_" + 
ts + "_" + true + "_" + run).getBytes(UTF_8));
-
-              nm.put(key, value);
-
-              key = nk(i, j, k, l, ts, false);
-              value = new Value((i + "_" + j + "_" + k + "_" + l + "_" + ts + 
"_" + false + "_" + run).getBytes(UTF_8));
-
-              nm.put(key, value);
-            }
-          }
-        }
-      }
-    }
-
-    Iterator<Entry<Key,Value>> iter = nm.iterator();
-
-    for (int i = 0; i < num; i++) {
-      for (int j = 0; j < num; j++) {
-        for (int k = 0; k < num; k++) {
-          for (int l = 0; l < num; l++) {
-            for (int ts = num - 1; ts >= 0; ts--) {
-              Key key = nk(i, j, k, l, ts, true);
-              Value value = new Value((i + "_" + j + "_" + k + "_" + l + "_" + 
ts + "_" + true + "_" + run).getBytes(UTF_8));
-
-              assertTrue(iter.hasNext());
-              Entry<Key,Value> entry = iter.next();
-              assertEquals(key, entry.getKey());
-              assertEquals(value, entry.getValue());
-
-              key = nk(i, j, k, l, ts, false);
-              value = new Value((i + "_" + j + "_" + k + "_" + l + "_" + ts + 
"_" + false + "_" + run).getBytes(UTF_8));
-
-              assertTrue(iter.hasNext());
-              entry = iter.next();
-              assertEquals(key, entry.getKey());
-              assertEquals(value, entry.getValue());
-            }
-          }
-        }
-      }
-    }
-
-    assertFalse(iter.hasNext());
-
-    for (int i = 0; i < num; i++) {
-      for (int j = 0; j < num; j++) {
-        for (int k = 0; k < num; k++) {
-          for (int l = 0; l < num; l++) {
-            for (int ts = 0; ts < num; ts++) {
-              Key key = nk(i, j, k, l, ts, true);
-              Value value = new Value((i + "_" + j + "_" + k + "_" + l + "_" + 
ts + "_" + true + "_" + run).getBytes(UTF_8));
-
-              assertEquals(value, nm.get(key));
-
-              Iterator<Entry<Key,Value>> iter2 = nm.iterator(key);
-              assertTrue(iter2.hasNext());
-              Entry<Key,Value> entry = iter2.next();
-              assertEquals(key, entry.getKey());
-              assertEquals(value, entry.getValue());
-
-              key = nk(i, j, k, l, ts, false);
-              value = new Value((i + "_" + j + "_" + k + "_" + l + "_" + ts + 
"_" + false + "_" + run).getBytes(UTF_8));
-
-              assertEquals(value, nm.get(key));
-
-              Iterator<Entry<Key,Value>> iter3 = nm.iterator(key);
-              assertTrue(iter3.hasNext());
-              Entry<Key,Value> entry2 = iter3.next();
-              assertEquals(key, entry2.getKey());
-              assertEquals(value, entry2.getValue());
-            }
-          }
-        }
-      }
-    }
-
-    assertEquals(num * num * num * num * num * 2, nm.size());
-  }
-
-  @Test
-  public void test1() {
-    NativeMap nm = new NativeMap();
-    Iterator<Entry<Key,Value>> iter = nm.iterator();
-    assertFalse(iter.hasNext());
-    nm.delete();
-  }
-
-  @Test
-  public void test2() {
-    NativeMap nm = new NativeMap();
-
-    insertAndVerify(nm, 1, 10, 0);
-    insertAndVerify(nm, 1, 10, 1);
-    insertAndVerify(nm, 1, 10, 2);
-
-    nm.delete();
-  }
-
-  @Test
-  public void test4() {
-    NativeMap nm = new NativeMap();
-
-    insertAndVerifyExhaustive(nm, 3, 0);
-    insertAndVerifyExhaustive(nm, 3, 1);
-
-    nm.delete();
-  }
-
-  @Test
-  public void test5() {
-    NativeMap nm = new NativeMap();
-
-    insertAndVerify(nm, 1, 10, 0);
-
-    Iterator<Entry<Key,Value>> iter = nm.iterator();
-    iter.next();
-
-    nm.delete();
-
-    try {
-      nm.put(nk(1), nv(1));
-      assertTrue(false);
-    } catch (IllegalStateException e) {
-
-    }
-
-    try {
-      nm.get(nk(1));
-      assertTrue(false);
-    } catch (IllegalStateException e) {
-
-    }
-
-    try {
-      nm.iterator();
-      assertTrue(false);
-    } catch (IllegalStateException e) {
-
-    }
-
-    try {
-      nm.iterator(nk(1));
-      assertTrue(false);
-    } catch (IllegalStateException e) {
-
-    }
-
-    try {
-      nm.size();
-      assertTrue(false);
-    } catch (IllegalStateException e) {
-
-    }
-
-    try {
-      iter.next();
-      assertTrue(false);
-    } catch (IllegalStateException e) {
-
-    }
-
-  }
-
-  @Test
-  public void test7() {
-    NativeMap nm = new NativeMap();
-
-    insertAndVerify(nm, 1, 10, 0);
-
-    nm.delete();
-
-    try {
-      nm.delete();
-      assertTrue(false);
-    } catch (IllegalStateException e) {
-
-    }
-  }
-
-  @Test
-  public void test8() {
-    // test verifies that native map sorts keys sharing some common prefix 
properly
-
-    NativeMap nm = new NativeMap();
-
-    TreeMap<Key,Value> tm = new TreeMap<Key,Value>();
-
-    tm.put(new Key(new Text("fo")), new Value(new byte[] {'0'}));
-    tm.put(new Key(new Text("foo")), new Value(new byte[] {'1'}));
-    tm.put(new Key(new Text("foo1")), new Value(new byte[] {'2'}));
-    tm.put(new Key(new Text("foo2")), new Value(new byte[] {'3'}));
-
-    for (Entry<Key,Value> entry : tm.entrySet()) {
-      nm.put(entry.getKey(), entry.getValue());
-    }
-
-    Iterator<Entry<Key,Value>> iter = nm.iterator();
-
-    for (Entry<Key,Value> entry : tm.entrySet()) {
-      assertTrue(iter.hasNext());
-      Entry<Key,Value> entry2 = iter.next();
-
-      assertEquals(entry.getKey(), entry2.getKey());
-      assertEquals(entry.getValue(), entry2.getValue());
-    }
-
-    assertFalse(iter.hasNext());
-
-    nm.delete();
-  }
-
-  @Test
-  public void test9() {
-    NativeMap nm = new NativeMap();
-
-    Iterator<Entry<Key,Value>> iter = nm.iterator();
-
-    try {
-      iter.next();
-      assertTrue(false);
-    } catch (NoSuchElementException e) {
-
-    }
-
-    insertAndVerify(nm, 1, 1, 0);
-
-    iter = nm.iterator();
-    iter.next();
-
-    try {
-      iter.next();
-      assertTrue(false);
-    } catch (NoSuchElementException e) {
-
-    }
-
-    nm.delete();
-  }
-
-  @Test
-  public void test10() {
-    int start = 1;
-    int end = 10000;
-
-    NativeMap nm = new NativeMap();
-    for (int i = start; i <= end; i++) {
-      nm.put(nk(i), nv(i));
-    }
-
-    long mem1 = nm.getMemoryUsed();
-
-    for (int i = start; i <= end; i++) {
-      nm.put(nk(i), nv(i));
-    }
-
-    long mem2 = nm.getMemoryUsed();
-
-    if (mem1 != mem2) {
-      throw new RuntimeException("Memory changed after inserting duplicate 
data " + mem1 + " " + mem2);
-    }
-
-    for (int i = start; i <= end; i++) {
-      nm.put(nk(i), nv(i));
-    }
-
-    long mem3 = nm.getMemoryUsed();
-
-    if (mem1 != mem3) {
-      throw new RuntimeException("Memory changed after inserting duplicate 
data " + mem1 + " " + mem3);
-    }
-
-    byte bigrow[] = new byte[1000000];
-    byte bigvalue[] = new byte[bigrow.length];
-
-    for (int i = 0; i < bigrow.length; i++) {
-      bigrow[i] = (byte) (0xff & (i % 256));
-      bigvalue[i] = bigrow[i];
-    }
-
-    nm.put(new Key(new Text(bigrow)), new Value(bigvalue));
-
-    long mem4 = nm.getMemoryUsed();
-
-    Value val = nm.get(new Key(new Text(bigrow)));
-    if (val == null || !val.equals(new Value(bigvalue))) {
-      throw new RuntimeException("Did not get expected big value");
-    }
-
-    nm.put(new Key(new Text(bigrow)), new Value(bigvalue));
-
-    long mem5 = nm.getMemoryUsed();
-
-    if (mem4 != mem5) {
-      throw new RuntimeException("Memory changed after inserting duplicate 
data " + mem4 + " " + mem5);
-    }
-
-    val = nm.get(new Key(new Text(bigrow)));
-    if (val == null || !val.equals(new Value(bigvalue))) {
-      throw new RuntimeException("Did not get expected big value");
-    }
-
-    nm.delete();
-  }
-
-  // random length random field
-  private static byte[] rlrf(Random r, int maxLen) {
-    int len = r.nextInt(maxLen);
-
-    byte f[] = new byte[len];
-    r.nextBytes(f);
-
-    return f;
-  }
-
-  @Test
-  public void test11() {
-    NativeMap nm = new NativeMap();
-
-    // insert things with varying field sizes and value sizes
-
-    // generate random data
-    Random r = new Random(75);
-
-    ArrayList<Pair<Key,Value>> testData = new ArrayList<Pair<Key,Value>>();
-
-    for (int i = 0; i < 100000; i++) {
-
-      Key k = new Key(rlrf(r, 97), rlrf(r, 13), rlrf(r, 31), rlrf(r, 11), 
(r.nextLong() & 0x7fffffffffffffffl), false, false);
-      Value v = new Value(rlrf(r, 511));
-
-      testData.add(new Pair<Key,Value>(k, v));
-    }
-
-    // insert unsorted data
-    for (Pair<Key,Value> pair : testData) {
-      nm.put(pair.getFirst(), pair.getSecond());
-    }
-
-    for (int i = 0; i < 2; i++) {
-
-      // sort data
-      Collections.sort(testData, new Comparator<Pair<Key,Value>>() {
-        @Override
-        public int compare(Pair<Key,Value> o1, Pair<Key,Value> o2) {
-          return o1.getFirst().compareTo(o2.getFirst());
-        }
-      });
-
-      // verify
-      Iterator<Entry<Key,Value>> iter1 = nm.iterator();
-      Iterator<Pair<Key,Value>> iter2 = testData.iterator();
-
-      while (iter1.hasNext() && iter2.hasNext()) {
-        Entry<Key,Value> e = iter1.next();
-        Pair<Key,Value> p = iter2.next();
-
-        if (!e.getKey().equals(p.getFirst()))
-          throw new RuntimeException("Keys not equal");
-
-        if (!e.getValue().equals(p.getSecond()))
-          throw new RuntimeException("Values not equal");
-      }
-
-      if (iter1.hasNext())
-        throw new RuntimeException("Not all of native map consumed");
-
-      if (iter2.hasNext())
-        throw new RuntimeException("Not all of test data consumed");
-
-      System.out.println("test 11 nm mem " + nm.getMemoryUsed());
-
-      // insert data again w/ different value
-      Collections.shuffle(testData, r);
-      // insert unsorted data
-      for (Pair<Key,Value> pair : testData) {
-        pair.getSecond().set(rlrf(r, 511));
-        nm.put(pair.getFirst(), pair.getSecond());
-      }
-    }
-
-    nm.delete();
-  }
-
-  @Test
-  public void testBinary() {
-    NativeMap nm = new NativeMap();
-
-    byte emptyBytes[] = new byte[0];
-
-    for (int i = 0; i < 256; i++) {
-      for (int j = 0; j < 256; j++) {
-        byte row[] = new byte[] {'r', (byte) (0xff & i), (byte) (0xff & j)};
-        byte data[] = new byte[] {'v', (byte) (0xff & i), (byte) (0xff & j)};
-
-        Key k = new Key(row, emptyBytes, emptyBytes, emptyBytes, 1);
-        Value v = new Value(data);
-
-        nm.put(k, v);
-      }
-    }
-
-    Iterator<Entry<Key,Value>> iter = nm.iterator();
-    for (int i = 0; i < 256; i++) {
-      for (int j = 0; j < 256; j++) {
-        byte row[] = new byte[] {'r', (byte) (0xff & i), (byte) (0xff & j)};
-        byte data[] = new byte[] {'v', (byte) (0xff & i), (byte) (0xff & j)};
-
-        Key k = new Key(row, emptyBytes, emptyBytes, emptyBytes, 1);
-        Value v = new Value(data);
-
-        assertTrue(iter.hasNext());
-        Entry<Key,Value> entry = iter.next();
-
-        assertEquals(k, entry.getKey());
-        assertEquals(v, entry.getValue());
-
-      }
-    }
-
-    assertFalse(iter.hasNext());
-
-    for (int i = 0; i < 256; i++) {
-      for (int j = 0; j < 256; j++) {
-        byte row[] = new byte[] {'r', (byte) (0xff & i), (byte) (0xff & j)};
-        byte data[] = new byte[] {'v', (byte) (0xff & i), (byte) (0xff & j)};
-
-        Key k = new Key(row, emptyBytes, emptyBytes, emptyBytes, 1);
-        Value v = new Value(data);
-
-        Value v2 = nm.get(k);
-
-        assertEquals(v, v2);
-      }
-    }
-
-    nm.delete();
-  }
-
-  @Test
-  public void testEmpty() {
-    NativeMap nm = new NativeMap();
-
-    assertTrue(nm.size() == 0);
-    assertTrue(nm.getMemoryUsed() == 0);
-
-    nm.delete();
-  }
-
-  @Test
-  public void testConcurrentIter() throws IOException {
-    NativeMap nm = new NativeMap();
-
-    nm.put(nk(0), nv(0));
-    nm.put(nk(1), nv(1));
-    nm.put(nk(3), nv(3));
-
-    SortedKeyValueIterator<Key,Value> iter = nm.skvIterator();
-
-    // modify map after iter created
-    nm.put(nk(2), nv(2));
-
-    assertTrue(iter.hasTop());
-    assertEquals(iter.getTopKey(), nk(0));
-    iter.next();
-
-    assertTrue(iter.hasTop());
-    assertEquals(iter.getTopKey(), nk(1));
-    iter.next();
-
-    assertTrue(iter.hasTop());
-    assertEquals(iter.getTopKey(), nk(2));
-    iter.next();
-
-    assertTrue(iter.hasTop());
-    assertEquals(iter.getTopKey(), nk(3));
-    iter.next();
-
-    assertFalse(iter.hasTop());
-
-    nm.delete();
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/PermissionsIT.java
----------------------------------------------------------------------
diff --git 
a/test/src/test/java/org/apache/accumulo/test/functional/PermissionsIT.java 
b/test/src/test/java/org/apache/accumulo/test/functional/PermissionsIT.java
deleted file mode 100644
index 8700891..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/PermissionsIT.java
+++ /dev/null
@@ -1,707 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-import java.io.IOException;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-
-import org.apache.accumulo.cluster.ClusterUser;
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.ClientConfiguration;
-import org.apache.accumulo.core.client.ClientConfiguration.ClientProperty;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.MutationsRejectedException;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.TableExistsException;
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.client.security.SecurityErrorCode;
-import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.security.SystemPermission;
-import org.apache.accumulo.core.security.TablePermission;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.hadoop.io.Text;
-import org.junit.Assume;
-import org.junit.Before;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-// This test verifies the default permissions so a clean instance must be 
used. A shared instance might
-// not be representative of a fresh installation.
-public class PermissionsIT extends AccumuloClusterHarness {
-  private static final Logger log = 
LoggerFactory.getLogger(PermissionsIT.class);
-
-  @Override
-  public int defaultTimeoutSeconds() {
-    return 60;
-  }
-
-  @Before
-  public void limitToMini() throws Exception {
-    Assume.assumeTrue(ClusterType.MINI == getClusterType());
-    Connector c = getConnector();
-    Set<String> users = c.securityOperations().listLocalUsers();
-    ClusterUser user = getUser(0);
-    if (users.contains(user.getPrincipal())) {
-      c.securityOperations().dropLocalUser(user.getPrincipal());
-    }
-  }
-
-  private void loginAs(ClusterUser user) throws IOException {
-    // Force a re-login as the provided user
-    user.getToken();
-  }
-
-  @Test
-  public void systemPermissionsTest() throws Exception {
-    ClusterUser testUser = getUser(0), rootUser = getAdminUser();
-
-    // verify that the test is being run by root
-    Connector c = getConnector();
-    verifyHasOnlyTheseSystemPermissions(c, c.whoami(), 
SystemPermission.values());
-
-    // create the test user
-    String principal = testUser.getPrincipal();
-    AuthenticationToken token = testUser.getToken();
-    PasswordToken passwordToken = null;
-    if (token instanceof PasswordToken) {
-      passwordToken = (PasswordToken) token;
-    }
-    loginAs(rootUser);
-    c.securityOperations().createLocalUser(principal, passwordToken);
-    loginAs(testUser);
-    Connector test_user_conn = c.getInstance().getConnector(principal, token);
-    loginAs(rootUser);
-    verifyHasNoSystemPermissions(c, principal, SystemPermission.values());
-
-    // test each permission
-    for (SystemPermission perm : SystemPermission.values()) {
-      log.debug("Verifying the " + perm + " permission");
-
-      // test permission before and after granting it
-      String tableNamePrefix = getUniqueNames(1)[0];
-      testMissingSystemPermission(tableNamePrefix, c, rootUser, 
test_user_conn, testUser, perm);
-      loginAs(rootUser);
-      c.securityOperations().grantSystemPermission(principal, perm);
-      verifyHasOnlyTheseSystemPermissions(c, principal, perm);
-      testGrantedSystemPermission(tableNamePrefix, c, rootUser, 
test_user_conn, testUser, perm);
-      loginAs(rootUser);
-      c.securityOperations().revokeSystemPermission(principal, perm);
-      verifyHasNoSystemPermissions(c, principal, perm);
-    }
-  }
-
-  static Map<String,String> map(Iterable<Entry<String,String>> i) {
-    Map<String,String> result = new HashMap<String,String>();
-    for (Entry<String,String> e : i) {
-      result.put(e.getKey(), e.getValue());
-    }
-    return result;
-  }
-
-  private void testMissingSystemPermission(String tableNamePrefix, Connector 
root_conn, ClusterUser rootUser, Connector test_user_conn, ClusterUser testUser,
-      SystemPermission perm) throws Exception {
-    String tableName, user, password = "password", namespace;
-    boolean passwordBased = testUser.getPassword() != null;
-    log.debug("Confirming that the lack of the " + perm + " permission 
properly restricts the user");
-
-    // test permission prior to granting it
-    switch (perm) {
-      case CREATE_TABLE:
-        tableName = tableNamePrefix + "__CREATE_TABLE_WITHOUT_PERM_TEST__";
-        try {
-          loginAs(testUser);
-          test_user_conn.tableOperations().create(tableName);
-          throw new IllegalStateException("Should NOT be able to create a 
table");
-        } catch (AccumuloSecurityException e) {
-          loginAs(rootUser);
-          if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED 
|| root_conn.tableOperations().list().contains(tableName))
-            throw e;
-        }
-        break;
-      case DROP_TABLE:
-        tableName = tableNamePrefix + "__DROP_TABLE_WITHOUT_PERM_TEST__";
-        loginAs(rootUser);
-        root_conn.tableOperations().create(tableName);
-        try {
-          loginAs(testUser);
-          test_user_conn.tableOperations().delete(tableName);
-          throw new IllegalStateException("Should NOT be able to delete a 
table");
-        } catch (AccumuloSecurityException e) {
-          loginAs(rootUser);
-          if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED 
|| !root_conn.tableOperations().list().contains(tableName))
-            throw e;
-        }
-        break;
-      case ALTER_TABLE:
-        tableName = tableNamePrefix + "__ALTER_TABLE_WITHOUT_PERM_TEST__";
-        loginAs(rootUser);
-        root_conn.tableOperations().create(tableName);
-        try {
-          loginAs(testUser);
-          test_user_conn.tableOperations().setProperty(tableName, 
Property.TABLE_BLOOM_ERRORRATE.getKey(), "003.14159%");
-          throw new IllegalStateException("Should NOT be able to set a table 
property");
-        } catch (AccumuloSecurityException e) {
-          loginAs(rootUser);
-          if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED
-              || 
map(root_conn.tableOperations().getProperties(tableName)).get(Property.TABLE_BLOOM_ERRORRATE.getKey()).equals("003.14159%"))
-            throw e;
-        }
-        loginAs(rootUser);
-        root_conn.tableOperations().setProperty(tableName, 
Property.TABLE_BLOOM_ERRORRATE.getKey(), "003.14159%");
-        try {
-          loginAs(testUser);
-          test_user_conn.tableOperations().removeProperty(tableName, 
Property.TABLE_BLOOM_ERRORRATE.getKey());
-          throw new IllegalStateException("Should NOT be able to remove a 
table property");
-        } catch (AccumuloSecurityException e) {
-          loginAs(rootUser);
-          if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED
-              || 
!map(root_conn.tableOperations().getProperties(tableName)).get(Property.TABLE_BLOOM_ERRORRATE.getKey()).equals("003.14159%"))
-            throw e;
-        }
-        String table2 = tableName + "2";
-        try {
-          loginAs(testUser);
-          test_user_conn.tableOperations().rename(tableName, table2);
-          throw new IllegalStateException("Should NOT be able to rename a 
table");
-        } catch (AccumuloSecurityException e) {
-          loginAs(rootUser);
-          if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED 
|| !root_conn.tableOperations().list().contains(tableName)
-              || root_conn.tableOperations().list().contains(table2))
-            throw e;
-        }
-        break;
-      case CREATE_USER:
-        user = "__CREATE_USER_WITHOUT_PERM_TEST__";
-        try {
-          loginAs(testUser);
-          test_user_conn.securityOperations().createLocalUser(user, 
(passwordBased ? new PasswordToken(password) : null));
-          throw new IllegalStateException("Should NOT be able to create a 
user");
-        } catch (AccumuloSecurityException e) {
-          AuthenticationToken userToken = testUser.getToken();
-          loginAs(rootUser);
-          if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED
-              || (userToken instanceof PasswordToken && 
root_conn.securityOperations().authenticateUser(user, userToken)))
-            throw e;
-        }
-        break;
-      case DROP_USER:
-        user = "__DROP_USER_WITHOUT_PERM_TEST__";
-        loginAs(rootUser);
-        root_conn.securityOperations().createLocalUser(user, (passwordBased ? 
new PasswordToken(password) : null));
-        try {
-          loginAs(testUser);
-          test_user_conn.securityOperations().dropLocalUser(user);
-          throw new IllegalStateException("Should NOT be able to delete a 
user");
-        } catch (AccumuloSecurityException e) {
-          loginAs(rootUser);
-          if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED 
|| !root_conn.securityOperations().listLocalUsers().contains(user)) {
-            log.info("Failed to authenticate as " + user);
-            throw e;
-          }
-        }
-        break;
-      case ALTER_USER:
-        user = "__ALTER_USER_WITHOUT_PERM_TEST__";
-        loginAs(rootUser);
-        root_conn.securityOperations().createLocalUser(user, (passwordBased ? 
new PasswordToken(password) : null));
-        try {
-          loginAs(testUser);
-          test_user_conn.securityOperations().changeUserAuthorizations(user, 
new Authorizations("A", "B"));
-          throw new IllegalStateException("Should NOT be able to alter a 
user");
-        } catch (AccumuloSecurityException e) {
-          loginAs(rootUser);
-          if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED 
|| !root_conn.securityOperations().getUserAuthorizations(user).isEmpty())
-            throw e;
-        }
-        break;
-      case SYSTEM:
-        // test for system permission would go here
-        break;
-      case CREATE_NAMESPACE:
-        namespace = "__CREATE_NAMESPACE_WITHOUT_PERM_TEST__";
-        try {
-          loginAs(testUser);
-          test_user_conn.namespaceOperations().create(namespace);
-          throw new IllegalStateException("Should NOT be able to create a 
namespace");
-        } catch (AccumuloSecurityException e) {
-          loginAs(rootUser);
-          if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED 
|| root_conn.namespaceOperations().list().contains(namespace))
-            throw e;
-        }
-        break;
-      case DROP_NAMESPACE:
-        namespace = "__DROP_NAMESPACE_WITHOUT_PERM_TEST__";
-        loginAs(rootUser);
-        root_conn.namespaceOperations().create(namespace);
-        try {
-          loginAs(testUser);
-          test_user_conn.namespaceOperations().delete(namespace);
-          throw new IllegalStateException("Should NOT be able to delete a 
namespace");
-        } catch (AccumuloSecurityException e) {
-          loginAs(rootUser);
-          if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED 
|| !root_conn.namespaceOperations().list().contains(namespace))
-            throw e;
-        }
-        break;
-      case ALTER_NAMESPACE:
-        namespace = "__ALTER_NAMESPACE_WITHOUT_PERM_TEST__";
-        loginAs(rootUser);
-        root_conn.namespaceOperations().create(namespace);
-        try {
-          loginAs(testUser);
-          test_user_conn.namespaceOperations().setProperty(namespace, 
Property.TABLE_BLOOM_ERRORRATE.getKey(), "003.14159%");
-          throw new IllegalStateException("Should NOT be able to set a 
namespace property");
-        } catch (AccumuloSecurityException e) {
-          loginAs(rootUser);
-          if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED
-              || 
map(root_conn.namespaceOperations().getProperties(namespace)).get(Property.TABLE_BLOOM_ERRORRATE.getKey()).equals("003.14159%"))
-            throw e;
-        }
-        loginAs(rootUser);
-        root_conn.namespaceOperations().setProperty(namespace, 
Property.TABLE_BLOOM_ERRORRATE.getKey(), "003.14159%");
-        try {
-          loginAs(testUser);
-          test_user_conn.namespaceOperations().removeProperty(namespace, 
Property.TABLE_BLOOM_ERRORRATE.getKey());
-          throw new IllegalStateException("Should NOT be able to remove a 
namespace property");
-        } catch (AccumuloSecurityException e) {
-          loginAs(rootUser);
-          if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED
-              || 
!map(root_conn.namespaceOperations().getProperties(namespace)).get(Property.TABLE_BLOOM_ERRORRATE.getKey()).equals("003.14159%"))
-            throw e;
-        }
-        String namespace2 = namespace + "2";
-        try {
-          loginAs(testUser);
-          test_user_conn.namespaceOperations().rename(namespace, namespace2);
-          throw new IllegalStateException("Should NOT be able to rename a 
namespace");
-        } catch (AccumuloSecurityException e) {
-          loginAs(rootUser);
-          if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED 
|| !root_conn.namespaceOperations().list().contains(namespace)
-              || root_conn.namespaceOperations().list().contains(namespace2))
-            throw e;
-        }
-        break;
-      case OBTAIN_DELEGATION_TOKEN:
-        ClientConfiguration clientConf = cluster.getClientConfig();
-        if 
(clientConf.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), 
false)) {
-          // TODO Try to obtain a delegation token without the permission
-        }
-        break;
-      case GRANT:
-        loginAs(testUser);
-        try {
-          
test_user_conn.securityOperations().grantSystemPermission(testUser.getPrincipal(),
 SystemPermission.GRANT);
-          throw new IllegalStateException("Should NOT be able to grant 
System.GRANT to yourself");
-        } catch (AccumuloSecurityException e) {
-          // Expected
-          loginAs(rootUser);
-          
assertFalse(root_conn.securityOperations().hasSystemPermission(testUser.getPrincipal(),
 SystemPermission.GRANT));
-        }
-        break;
-      default:
-        throw new IllegalArgumentException("Unrecognized System Permission: " 
+ perm);
-    }
-  }
-
-  private void testGrantedSystemPermission(String tableNamePrefix, Connector 
root_conn, ClusterUser rootUser, Connector test_user_conn, ClusterUser testUser,
-      SystemPermission perm) throws Exception {
-    String tableName, user, password = "password", namespace;
-    boolean passwordBased = testUser.getPassword() != null;
-    log.debug("Confirming that the presence of the " + perm + " permission 
properly permits the user");
-
-    // test permission after granting it
-    switch (perm) {
-      case CREATE_TABLE:
-        tableName = tableNamePrefix + "__CREATE_TABLE_WITH_PERM_TEST__";
-        loginAs(testUser);
-        test_user_conn.tableOperations().create(tableName);
-        loginAs(rootUser);
-        if (!root_conn.tableOperations().list().contains(tableName))
-          throw new IllegalStateException("Should be able to create a table");
-        break;
-      case DROP_TABLE:
-        tableName = tableNamePrefix + "__DROP_TABLE_WITH_PERM_TEST__";
-        loginAs(rootUser);
-        root_conn.tableOperations().create(tableName);
-        loginAs(testUser);
-        test_user_conn.tableOperations().delete(tableName);
-        loginAs(rootUser);
-        if (root_conn.tableOperations().list().contains(tableName))
-          throw new IllegalStateException("Should be able to delete a table");
-        break;
-      case ALTER_TABLE:
-        tableName = tableNamePrefix + "__ALTER_TABLE_WITH_PERM_TEST__";
-        String table2 = tableName + "2";
-        loginAs(rootUser);
-        root_conn.tableOperations().create(tableName);
-        loginAs(testUser);
-        test_user_conn.tableOperations().setProperty(tableName, 
Property.TABLE_BLOOM_ERRORRATE.getKey(), "003.14159%");
-        loginAs(rootUser);
-        Map<String,String> properties = 
map(root_conn.tableOperations().getProperties(tableName));
-        if 
(!properties.get(Property.TABLE_BLOOM_ERRORRATE.getKey()).equals("003.14159%"))
-          throw new IllegalStateException("Should be able to set a table 
property");
-        loginAs(testUser);
-        test_user_conn.tableOperations().removeProperty(tableName, 
Property.TABLE_BLOOM_ERRORRATE.getKey());
-        loginAs(rootUser);
-        properties = map(root_conn.tableOperations().getProperties(tableName));
-        if 
(properties.get(Property.TABLE_BLOOM_ERRORRATE.getKey()).equals("003.14159%"))
-          throw new IllegalStateException("Should be able to remove a table 
property");
-        loginAs(testUser);
-        test_user_conn.tableOperations().rename(tableName, table2);
-        loginAs(rootUser);
-        if (root_conn.tableOperations().list().contains(tableName) || 
!root_conn.tableOperations().list().contains(table2))
-          throw new IllegalStateException("Should be able to rename a table");
-        break;
-      case CREATE_USER:
-        user = "__CREATE_USER_WITH_PERM_TEST__";
-        loginAs(testUser);
-        test_user_conn.securityOperations().createLocalUser(user, 
(passwordBased ? new PasswordToken(password) : null));
-        loginAs(rootUser);
-        if (passwordBased && 
!root_conn.securityOperations().authenticateUser(user, new 
PasswordToken(password)))
-          throw new IllegalStateException("Should be able to create a user");
-        break;
-      case DROP_USER:
-        user = "__DROP_USER_WITH_PERM_TEST__";
-        loginAs(rootUser);
-        root_conn.securityOperations().createLocalUser(user, (passwordBased ? 
new PasswordToken(password) : null));
-        loginAs(testUser);
-        test_user_conn.securityOperations().dropLocalUser(user);
-        loginAs(rootUser);
-        if (passwordBased && 
root_conn.securityOperations().authenticateUser(user, new 
PasswordToken(password)))
-          throw new IllegalStateException("Should be able to delete a user");
-        break;
-      case ALTER_USER:
-        user = "__ALTER_USER_WITH_PERM_TEST__";
-        loginAs(rootUser);
-        root_conn.securityOperations().createLocalUser(user, (passwordBased ? 
new PasswordToken(password) : null));
-        loginAs(testUser);
-        test_user_conn.securityOperations().changeUserAuthorizations(user, new 
Authorizations("A", "B"));
-        loginAs(rootUser);
-        if 
(root_conn.securityOperations().getUserAuthorizations(user).isEmpty())
-          throw new IllegalStateException("Should be able to alter a user");
-        break;
-      case SYSTEM:
-        // test for system permission would go here
-        break;
-      case CREATE_NAMESPACE:
-        namespace = "__CREATE_NAMESPACE_WITH_PERM_TEST__";
-        loginAs(testUser);
-        test_user_conn.namespaceOperations().create(namespace);
-        loginAs(rootUser);
-        if (!root_conn.namespaceOperations().list().contains(namespace))
-          throw new IllegalStateException("Should be able to create a 
namespace");
-        break;
-      case DROP_NAMESPACE:
-        namespace = "__DROP_NAMESPACE_WITH_PERM_TEST__";
-        loginAs(rootUser);
-        root_conn.namespaceOperations().create(namespace);
-        loginAs(testUser);
-        test_user_conn.namespaceOperations().delete(namespace);
-        loginAs(rootUser);
-        if (root_conn.namespaceOperations().list().contains(namespace))
-          throw new IllegalStateException("Should be able to delete a 
namespace");
-        break;
-      case ALTER_NAMESPACE:
-        namespace = "__ALTER_NAMESPACE_WITH_PERM_TEST__";
-        String namespace2 = namespace + "2";
-        loginAs(rootUser);
-        root_conn.namespaceOperations().create(namespace);
-        loginAs(testUser);
-        test_user_conn.namespaceOperations().setProperty(namespace, 
Property.TABLE_BLOOM_ERRORRATE.getKey(), "003.14159%");
-        loginAs(rootUser);
-        Map<String,String> propies = 
map(root_conn.namespaceOperations().getProperties(namespace));
-        if 
(!propies.get(Property.TABLE_BLOOM_ERRORRATE.getKey()).equals("003.14159%"))
-          throw new IllegalStateException("Should be able to set a table 
property");
-        loginAs(testUser);
-        test_user_conn.namespaceOperations().removeProperty(namespace, 
Property.TABLE_BLOOM_ERRORRATE.getKey());
-        loginAs(rootUser);
-        propies = 
map(root_conn.namespaceOperations().getProperties(namespace));
-        if 
(propies.get(Property.TABLE_BLOOM_ERRORRATE.getKey()).equals("003.14159%"))
-          throw new IllegalStateException("Should be able to remove a table 
property");
-        loginAs(testUser);
-        test_user_conn.namespaceOperations().rename(namespace, namespace2);
-        loginAs(rootUser);
-        if (root_conn.namespaceOperations().list().contains(namespace) || 
!root_conn.namespaceOperations().list().contains(namespace2))
-          throw new IllegalStateException("Should be able to rename a table");
-        break;
-      case OBTAIN_DELEGATION_TOKEN:
-        ClientConfiguration clientConf = cluster.getClientConfig();
-        if 
(clientConf.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), 
false)) {
-          // TODO Try to obtain a delegation token with the permission
-        }
-        break;
-      case GRANT:
-        loginAs(rootUser);
-        
root_conn.securityOperations().grantSystemPermission(testUser.getPrincipal(), 
SystemPermission.GRANT);
-        loginAs(testUser);
-        
test_user_conn.securityOperations().grantSystemPermission(testUser.getPrincipal(),
 SystemPermission.CREATE_TABLE);
-        loginAs(rootUser);
-        assertTrue("Test user should have CREATE_TABLE",
-            
root_conn.securityOperations().hasSystemPermission(testUser.getPrincipal(), 
SystemPermission.CREATE_TABLE));
-        assertTrue("Test user should have GRANT", 
root_conn.securityOperations().hasSystemPermission(testUser.getPrincipal(), 
SystemPermission.GRANT));
-        
root_conn.securityOperations().revokeSystemPermission(testUser.getPrincipal(), 
SystemPermission.CREATE_TABLE);
-        break;
-      default:
-        throw new IllegalArgumentException("Unrecognized System Permission: " 
+ perm);
-    }
-  }
-
-  private void verifyHasOnlyTheseSystemPermissions(Connector root_conn, String 
user, SystemPermission... perms) throws AccumuloException,
-      AccumuloSecurityException {
-    List<SystemPermission> permList = Arrays.asList(perms);
-    for (SystemPermission p : SystemPermission.values()) {
-      if (permList.contains(p)) {
-        // should have these
-        if (!root_conn.securityOperations().hasSystemPermission(user, p))
-          throw new IllegalStateException(user + " SHOULD have system 
permission " + p);
-      } else {
-        // should not have these
-        if (root_conn.securityOperations().hasSystemPermission(user, p))
-          throw new IllegalStateException(user + " SHOULD NOT have system 
permission " + p);
-      }
-    }
-  }
-
-  private void verifyHasNoSystemPermissions(Connector root_conn, String user, 
SystemPermission... perms) throws AccumuloException, AccumuloSecurityException {
-    for (SystemPermission p : perms)
-      if (root_conn.securityOperations().hasSystemPermission(user, p))
-        throw new IllegalStateException(user + " SHOULD NOT have system 
permission " + p);
-  }
-
-  @Test
-  public void tablePermissionTest() throws Exception {
-    // create the test user
-    ClusterUser testUser = getUser(0), rootUser = getAdminUser();
-
-    String principal = testUser.getPrincipal();
-    AuthenticationToken token = testUser.getToken();
-    PasswordToken passwordToken = null;
-    if (token instanceof PasswordToken) {
-      passwordToken = (PasswordToken) token;
-    }
-    loginAs(rootUser);
-    Connector c = getConnector();
-    c.securityOperations().createLocalUser(principal, passwordToken);
-    loginAs(testUser);
-    Connector test_user_conn = c.getInstance().getConnector(principal, token);
-
-    // check for read-only access to metadata table
-    loginAs(rootUser);
-    verifyHasOnlyTheseTablePermissions(c, c.whoami(), MetadataTable.NAME, 
TablePermission.READ, TablePermission.ALTER_TABLE);
-    verifyHasOnlyTheseTablePermissions(c, principal, MetadataTable.NAME, 
TablePermission.READ);
-    String tableName = getUniqueNames(1)[0] + "__TABLE_PERMISSION_TEST__";
-
-    // test each permission
-    for (TablePermission perm : TablePermission.values()) {
-      log.debug("Verifying the " + perm + " permission");
-
-      // test permission before and after granting it
-      createTestTable(c, principal, tableName);
-      loginAs(testUser);
-      testMissingTablePermission(test_user_conn, testUser, perm, tableName);
-      loginAs(rootUser);
-      c.securityOperations().grantTablePermission(principal, tableName, perm);
-      verifyHasOnlyTheseTablePermissions(c, principal, tableName, perm);
-      loginAs(testUser);
-      testGrantedTablePermission(test_user_conn, testUser, perm, tableName);
-
-      loginAs(rootUser);
-      createTestTable(c, principal, tableName);
-      c.securityOperations().revokeTablePermission(principal, tableName, perm);
-      verifyHasNoTablePermissions(c, principal, tableName, perm);
-    }
-  }
-
-  private void createTestTable(Connector c, String testUser, String tableName) 
throws Exception, MutationsRejectedException {
-    if (!c.tableOperations().exists(tableName)) {
-      // create the test table
-      c.tableOperations().create(tableName);
-      // put in some initial data
-      BatchWriter writer = c.createBatchWriter(tableName, new 
BatchWriterConfig());
-      Mutation m = new Mutation(new Text("row"));
-      m.put(new Text("cf"), new Text("cq"), new Value("val".getBytes()));
-      writer.addMutation(m);
-      writer.close();
-
-      // verify proper permissions for creator and test user
-      verifyHasOnlyTheseTablePermissions(c, c.whoami(), tableName, 
TablePermission.values());
-      verifyHasNoTablePermissions(c, testUser, tableName, 
TablePermission.values());
-
-    }
-  }
-
-  private void testMissingTablePermission(Connector test_user_conn, 
ClusterUser testUser, TablePermission perm, String tableName) throws Exception {
-    Scanner scanner;
-    BatchWriter writer;
-    Mutation m;
-    log.debug("Confirming that the lack of the " + perm + " permission 
properly restricts the user");
-
-    // test permission prior to granting it
-    switch (perm) {
-      case READ:
-        try {
-          scanner = test_user_conn.createScanner(tableName, 
Authorizations.EMPTY);
-          int i = 0;
-          for (Entry<Key,Value> entry : scanner)
-            i += 1 + entry.getKey().getRowData().length();
-          if (i != 0)
-            throw new IllegalStateException("Should NOT be able to read from 
the table");
-        } catch (RuntimeException e) {
-          AccumuloSecurityException se = (AccumuloSecurityException) 
e.getCause();
-          if (se.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED)
-            throw se;
-        }
-        break;
-      case WRITE:
-        try {
-          writer = test_user_conn.createBatchWriter(tableName, new 
BatchWriterConfig());
-          m = new Mutation(new Text("row"));
-          m.put(new Text("a"), new Text("b"), new Value("c".getBytes()));
-          writer.addMutation(m);
-          try {
-            writer.close();
-          } catch (MutationsRejectedException e1) {
-            if (e1.getSecurityErrorCodes().size() > 0)
-              throw new AccumuloSecurityException(test_user_conn.whoami(), 
org.apache.accumulo.core.client.impl.thrift.SecurityErrorCode.PERMISSION_DENIED,
 e1);
-          }
-          throw new IllegalStateException("Should NOT be able to write to a 
table");
-        } catch (AccumuloSecurityException e) {
-          if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED)
-            throw e;
-        }
-        break;
-      case BULK_IMPORT:
-        // test for bulk import permission would go here
-        break;
-      case ALTER_TABLE:
-        Map<String,Set<Text>> groups = new HashMap<String,Set<Text>>();
-        groups.put("tgroup", new HashSet<Text>(Arrays.asList(new Text("t1"), 
new Text("t2"))));
-        try {
-          test_user_conn.tableOperations().setLocalityGroups(tableName, 
groups);
-          throw new IllegalStateException("User should not be able to set 
locality groups");
-        } catch (AccumuloSecurityException e) {
-          if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED)
-            throw e;
-        }
-        break;
-      case DROP_TABLE:
-        try {
-          test_user_conn.tableOperations().delete(tableName);
-          throw new IllegalStateException("User should not be able delete the 
table");
-        } catch (AccumuloSecurityException e) {
-          if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED)
-            throw e;
-        }
-        break;
-      case GRANT:
-        try {
-          
test_user_conn.securityOperations().grantTablePermission(getAdminPrincipal(), 
tableName, TablePermission.GRANT);
-          throw new IllegalStateException("User should not be able grant 
permissions");
-        } catch (AccumuloSecurityException e) {
-          if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED)
-            throw e;
-        }
-        break;
-      default:
-        throw new IllegalArgumentException("Unrecognized table Permission: " + 
perm);
-    }
-  }
-
-  private void testGrantedTablePermission(Connector test_user_conn, 
ClusterUser normalUser, TablePermission perm, String tableName) throws 
AccumuloException,
-      TableExistsException, AccumuloSecurityException, TableNotFoundException, 
MutationsRejectedException {
-    Scanner scanner;
-    BatchWriter writer;
-    Mutation m;
-    log.debug("Confirming that the presence of the " + perm + " permission 
properly permits the user");
-
-    // test permission after granting it
-    switch (perm) {
-      case READ:
-        scanner = test_user_conn.createScanner(tableName, 
Authorizations.EMPTY);
-        Iterator<Entry<Key,Value>> iter = scanner.iterator();
-        while (iter.hasNext())
-          iter.next();
-        break;
-      case WRITE:
-        writer = test_user_conn.createBatchWriter(tableName, new 
BatchWriterConfig());
-        m = new Mutation(new Text("row"));
-        m.put(new Text("a"), new Text("b"), new Value("c".getBytes()));
-        writer.addMutation(m);
-        writer.close();
-        break;
-      case BULK_IMPORT:
-        // test for bulk import permission would go here
-        break;
-      case ALTER_TABLE:
-        Map<String,Set<Text>> groups = new HashMap<String,Set<Text>>();
-        groups.put("tgroup", new HashSet<Text>(Arrays.asList(new Text("t1"), 
new Text("t2"))));
-        break;
-      case DROP_TABLE:
-        test_user_conn.tableOperations().delete(tableName);
-        break;
-      case GRANT:
-        
test_user_conn.securityOperations().grantTablePermission(getAdminPrincipal(), 
tableName, TablePermission.GRANT);
-        break;
-      default:
-        throw new IllegalArgumentException("Unrecognized table Permission: " + 
perm);
-    }
-  }
-
-  private void verifyHasOnlyTheseTablePermissions(Connector root_conn, String 
user, String table, TablePermission... perms) throws AccumuloException,
-      AccumuloSecurityException {
-    List<TablePermission> permList = Arrays.asList(perms);
-    for (TablePermission p : TablePermission.values()) {
-      if (permList.contains(p)) {
-        // should have these
-        if (!root_conn.securityOperations().hasTablePermission(user, table, p))
-          throw new IllegalStateException(user + " SHOULD have table 
permission " + p + " for table " + table);
-      } else {
-        // should not have these
-        if (root_conn.securityOperations().hasTablePermission(user, table, p))
-          throw new IllegalStateException(user + " SHOULD NOT have table 
permission " + p + " for table " + table);
-      }
-    }
-  }
-
-  private void verifyHasNoTablePermissions(Connector root_conn, String user, 
String table, TablePermission... perms) throws AccumuloException,
-      AccumuloSecurityException {
-    for (TablePermission p : perms)
-      if (root_conn.securityOperations().hasTablePermission(user, table, p))
-        throw new IllegalStateException(user + " SHOULD NOT have table 
permission " + p + " for table " + table);
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/ReadWriteIT.java
----------------------------------------------------------------------
diff --git 
a/test/src/test/java/org/apache/accumulo/test/functional/ReadWriteIT.java 
b/test/src/test/java/org/apache/accumulo/test/functional/ReadWriteIT.java
deleted file mode 100644
index 4ef2958..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/ReadWriteIT.java
+++ /dev/null
@@ -1,456 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-
-import java.io.ByteArrayOutputStream;
-import java.io.IOException;
-import java.io.PrintStream;
-import java.net.URL;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-import java.util.TreeMap;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
-
-import org.apache.accumulo.cluster.ClusterControl;
-import org.apache.accumulo.cluster.standalone.StandaloneAccumuloCluster;
-import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.cli.BatchWriterOpts;
-import org.apache.accumulo.core.cli.ScannerOpts;
-import org.apache.accumulo.core.client.BatchScanner;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.ClientConfiguration;
-import org.apache.accumulo.core.client.ClientConfiguration.ClientProperty;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.ZooKeeperInstance;
-import org.apache.accumulo.core.client.admin.TableOperations;
-import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
-import org.apache.accumulo.core.client.security.tokens.KerberosToken;
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.file.rfile.PrintInfo;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import 
org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.util.MonitorUtil;
-import org.apache.accumulo.core.zookeeper.ZooUtil;
-import org.apache.accumulo.fate.zookeeper.ZooCache;
-import org.apache.accumulo.fate.zookeeper.ZooLock;
-import org.apache.accumulo.fate.zookeeper.ZooReader;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.accumulo.minicluster.ServerType;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.test.TestIngest;
-import org.apache.accumulo.test.TestMultiTableIngest;
-import org.apache.accumulo.test.VerifyIngest;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.Text;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.base.Charsets;
-import com.google.common.collect.Iterators;
-
-public class ReadWriteIT extends AccumuloClusterHarness {
-  @Override
-  public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration 
hadoopCoreSite) {
-    cfg.setProperty(Property.INSTANCE_ZK_TIMEOUT, "5s");
-  }
-
-  private static final Logger log = LoggerFactory.getLogger(ReadWriteIT.class);
-
-  static final int ROWS = 200000;
-  static final int COLS = 1;
-  static final String COLF = "colf";
-
-  @Override
-  protected int defaultTimeoutSeconds() {
-    return 6 * 60;
-  }
-
-  @Test(expected = RuntimeException.class)
-  public void invalidInstanceName() throws Exception {
-    final Connector conn = getConnector();
-    new ZooKeeperInstance("fake_instance_name", 
conn.getInstance().getZooKeepers());
-  }
-
-  @Test
-  public void sunnyDay() throws Exception {
-    // Start accumulo, create a table, insert some data, verify we can read it 
out.
-    // Shutdown cleanly.
-    log.debug("Starting Monitor");
-    cluster.getClusterControl().startAllServers(ServerType.MONITOR);
-    Connector connector = getConnector();
-    String tableName = getUniqueNames(1)[0];
-    ingest(connector, getCluster().getClientConfig(), getAdminPrincipal(), 
ROWS, COLS, 50, 0, tableName);
-    verify(connector, getCluster().getClientConfig(), getAdminPrincipal(), 
ROWS, COLS, 50, 0, tableName);
-    String monitorLocation = null;
-    while (null == monitorLocation) {
-      monitorLocation = MonitorUtil.getLocation(getConnector().getInstance());
-      if (null == monitorLocation) {
-        log.debug("Could not fetch monitor HTTP address from zookeeper");
-        Thread.sleep(2000);
-      }
-    }
-    URL url = new URL("http://"; + monitorLocation);
-    log.debug("Fetching web page " + url);
-    String result = FunctionalTestUtils.readAll(url.openStream());
-    assertTrue(result.length() > 100);
-    log.debug("Stopping accumulo cluster");
-    ClusterControl control = cluster.getClusterControl();
-    control.adminStopAll();
-    ZooReader zreader = new ZooReader(connector.getInstance().getZooKeepers(), 
connector.getInstance().getZooKeepersSessionTimeOut());
-    ZooCache zcache = new ZooCache(zreader, null);
-    byte[] masterLockData;
-    do {
-      masterLockData = ZooLock.getLockData(zcache, 
ZooUtil.getRoot(connector.getInstance()) + Constants.ZMASTER_LOCK, null);
-      if (null != masterLockData) {
-        log.info("Master lock is still held");
-        Thread.sleep(1000);
-      }
-    } while (null != masterLockData);
-
-    control.stopAllServers(ServerType.GARBAGE_COLLECTOR);
-    control.stopAllServers(ServerType.MONITOR);
-    control.stopAllServers(ServerType.TRACER);
-    log.debug("success!");
-    // Restarting everything
-    cluster.start();
-  }
-
-  public static void ingest(Connector connector, ClientConfiguration 
clientConfig, String principal, int rows, int cols, int width, int offset, 
String tableName)
-      throws Exception {
-    ingest(connector, clientConfig, principal, rows, cols, width, offset, 
COLF, tableName);
-  }
-
-  public static void ingest(Connector connector, ClientConfiguration 
clientConfig, String principal, int rows, int cols, int width, int offset, 
String colf,
-      String tableName) throws Exception {
-    TestIngest.Opts opts = new TestIngest.Opts();
-    opts.rows = rows;
-    opts.cols = cols;
-    opts.dataSize = width;
-    opts.startRow = offset;
-    opts.columnFamily = colf;
-    opts.createTable = true;
-    opts.setTableName(tableName);
-    if 
(clientConfig.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), 
false)) {
-      opts.updateKerberosCredentials(clientConfig);
-    } else {
-      opts.setPrincipal(principal);
-    }
-
-    TestIngest.ingest(connector, opts, new BatchWriterOpts());
-  }
-
-  public static void verify(Connector connector, ClientConfiguration 
clientConfig, String principal, int rows, int cols, int width, int offset, 
String tableName)
-      throws Exception {
-    verify(connector, clientConfig, principal, rows, cols, width, offset, 
COLF, tableName);
-  }
-
-  private static void verify(Connector connector, ClientConfiguration 
clientConfig, String principal, int rows, int cols, int width, int offset, 
String colf,
-      String tableName) throws Exception {
-    ScannerOpts scannerOpts = new ScannerOpts();
-    VerifyIngest.Opts opts = new VerifyIngest.Opts();
-    opts.rows = rows;
-    opts.cols = cols;
-    opts.dataSize = width;
-    opts.startRow = offset;
-    opts.columnFamily = colf;
-    opts.setTableName(tableName);
-    if 
(clientConfig.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), 
false)) {
-      opts.updateKerberosCredentials(clientConfig);
-    } else {
-      opts.setPrincipal(principal);
-    }
-
-    VerifyIngest.verifyIngest(connector, opts, scannerOpts);
-  }
-
-  public static String[] args(String... args) {
-    return args;
-  }
-
-  @Test
-  public void multiTableTest() throws Exception {
-    // Write to multiple tables
-    final String instance = cluster.getInstanceName();
-    final String keepers = cluster.getZooKeepers();
-    final ClusterControl control = cluster.getClusterControl();
-    final String prefix = getClass().getSimpleName() + "_" + 
testName.getMethodName();
-    ExecutorService svc = Executors.newFixedThreadPool(2);
-    Future<Integer> p1 = svc.submit(new Callable<Integer>() {
-      @Override
-      public Integer call() {
-        try {
-          ClientConfiguration clientConf = cluster.getClientConfig();
-          // Invocation is different for SASL. We're only logged in via this 
processes memory (not via some credentials cache on disk)
-          // Need to pass along the keytab because of that.
-          if 
(clientConf.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), 
false)) {
-            String principal = getAdminPrincipal();
-            AuthenticationToken token = getAdminToken();
-            assertTrue("Expected KerberosToken, but was " + token.getClass(), 
token instanceof KerberosToken);
-            KerberosToken kt = (KerberosToken) token;
-            assertNotNull("Expected keytab in token", kt.getKeytab());
-            return control.exec(
-                TestMultiTableIngest.class,
-                args("--count", Integer.toString(ROWS), "-i", instance, "-z", 
keepers, "--tablePrefix", prefix, "--keytab", kt.getKeytab().getAbsolutePath(),
-                    "-u", principal));
-          }
-
-          return control.exec(
-              TestMultiTableIngest.class,
-              args("--count", Integer.toString(ROWS), "-u", 
getAdminPrincipal(), "-i", instance, "-z", keepers, "-p", new String(
-                  ((PasswordToken) getAdminToken()).getPassword(), 
Charsets.UTF_8), "--tablePrefix", prefix));
-        } catch (IOException e) {
-          log.error("Error running MultiTableIngest", e);
-          return -1;
-        }
-      }
-    });
-    Future<Integer> p2 = svc.submit(new Callable<Integer>() {
-      @Override
-      public Integer call() {
-        try {
-          ClientConfiguration clientConf = cluster.getClientConfig();
-          // Invocation is different for SASL. We're only logged in via this 
processes memory (not via some credentials cache on disk)
-          // Need to pass along the keytab because of that.
-          if 
(clientConf.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), 
false)) {
-            String principal = getAdminPrincipal();
-            AuthenticationToken token = getAdminToken();
-            assertTrue("Expected KerberosToken, but was " + token.getClass(), 
token instanceof KerberosToken);
-            KerberosToken kt = (KerberosToken) token;
-            assertNotNull("Expected keytab in token", kt.getKeytab());
-            return control.exec(
-                TestMultiTableIngest.class,
-                args("--count", Integer.toString(ROWS), "--readonly", "-i", 
instance, "-z", keepers, "--tablePrefix", prefix, "--keytab", kt.getKeytab()
-                    .getAbsolutePath(), "-u", principal));
-          }
-
-          return control.exec(
-              TestMultiTableIngest.class,
-              args("--count", Integer.toString(ROWS), "--readonly", "-u", 
getAdminPrincipal(), "-i", instance, "-z", keepers, "-p", new String(
-                  ((PasswordToken) getAdminToken()).getPassword(), 
Charsets.UTF_8), "--tablePrefix", prefix));
-        } catch (IOException e) {
-          log.error("Error running MultiTableIngest", e);
-          return -1;
-        }
-      }
-    });
-    svc.shutdown();
-    while (!svc.isTerminated()) {
-      svc.awaitTermination(15, TimeUnit.SECONDS);
-    }
-    assertEquals(0, p1.get().intValue());
-    assertEquals(0, p2.get().intValue());
-  }
-
-  @Test
-  public void largeTest() throws Exception {
-    // write a few large values
-    Connector connector = getConnector();
-    String table = getUniqueNames(1)[0];
-    ingest(connector, getCluster().getClientConfig(), getAdminPrincipal(), 2, 
1, 500000, 0, table);
-    verify(connector, getCluster().getClientConfig(), getAdminPrincipal(), 2, 
1, 500000, 0, table);
-  }
-
-  @Test
-  public void interleaved() throws Exception {
-    // read and write concurrently
-    final Connector connector = getConnector();
-    final String tableName = getUniqueNames(1)[0];
-    interleaveTest(connector, tableName);
-  }
-
-  static void interleaveTest(final Connector connector, final String 
tableName) throws Exception {
-    final AtomicBoolean fail = new AtomicBoolean(false);
-    final int CHUNKSIZE = ROWS / 10;
-    ingest(connector, getCluster().getClientConfig(), getAdminPrincipal(), 
CHUNKSIZE, 1, 50, 0, tableName);
-    int i;
-    for (i = 0; i < ROWS; i += CHUNKSIZE) {
-      final int start = i;
-      Thread verify = new Thread() {
-        @Override
-        public void run() {
-          try {
-            verify(connector, getCluster().getClientConfig(), 
getAdminPrincipal(), CHUNKSIZE, 1, 50, start, tableName);
-          } catch (Exception ex) {
-            fail.set(true);
-          }
-        }
-      };
-      verify.start();
-      ingest(connector, getCluster().getClientConfig(), getAdminPrincipal(), 
CHUNKSIZE, 1, 50, i + CHUNKSIZE, tableName);
-      verify.join();
-      assertFalse(fail.get());
-    }
-    verify(connector, getCluster().getClientConfig(), getAdminPrincipal(), 
CHUNKSIZE, 1, 50, i, tableName);
-  }
-
-  public static Text t(String s) {
-    return new Text(s);
-  }
-
-  public static Mutation m(String row, String cf, String cq, String value) {
-    Mutation m = new Mutation(t(row));
-    m.put(t(cf), t(cq), new Value(value.getBytes()));
-    return m;
-  }
-
-  @Test
-  public void localityGroupPerf() throws Exception {
-    // verify that locality groups can make look-ups faster
-    final Connector connector = getConnector();
-    final String tableName = getUniqueNames(1)[0];
-    connector.tableOperations().create(tableName);
-    connector.tableOperations().setProperty(tableName, "table.group.g1", 
"colf");
-    connector.tableOperations().setProperty(tableName, "table.groups.enabled", 
"g1");
-    ingest(connector, getCluster().getClientConfig(), getAdminPrincipal(), 
2000, 1, 50, 0, tableName);
-    connector.tableOperations().compact(tableName, null, null, true, true);
-    BatchWriter bw = connector.createBatchWriter(tableName, new 
BatchWriterConfig());
-    bw.addMutation(m("zzzzzzzzzzz", "colf2", "cq", "value"));
-    bw.close();
-    long now = System.currentTimeMillis();
-    Scanner scanner = connector.createScanner(tableName, Authorizations.EMPTY);
-    scanner.fetchColumnFamily(new Text("colf"));
-    Iterators.size(scanner.iterator());
-    long diff = System.currentTimeMillis() - now;
-    now = System.currentTimeMillis();
-    scanner = connector.createScanner(tableName, Authorizations.EMPTY);
-    scanner.fetchColumnFamily(new Text("colf2"));
-    Iterators.size(scanner.iterator());
-    bw.close();
-    long diff2 = System.currentTimeMillis() - now;
-    assertTrue(diff2 < diff);
-  }
-
-  @Test
-  public void sunnyLG() throws Exception {
-    // create a locality group, write to it and ensure it exists in the RFiles 
that result
-    final Connector connector = getConnector();
-    final String tableName = getUniqueNames(1)[0];
-    connector.tableOperations().create(tableName);
-    Map<String,Set<Text>> groups = new TreeMap<String,Set<Text>>();
-    groups.put("g1", Collections.singleton(t("colf")));
-    connector.tableOperations().setLocalityGroups(tableName, groups);
-    ingest(connector, getCluster().getClientConfig(), getAdminPrincipal(), 
2000, 1, 50, 0, tableName);
-    verify(connector, getCluster().getClientConfig(), getAdminPrincipal(), 
2000, 1, 50, 0, tableName);
-    connector.tableOperations().flush(tableName, null, null, true);
-    BatchScanner bscanner = connector.createBatchScanner(MetadataTable.NAME, 
Authorizations.EMPTY, 1);
-    String tableId = connector.tableOperations().tableIdMap().get(tableName);
-    bscanner.setRanges(Collections.singletonList(new Range(new Text(tableId + 
";"), new Text(tableId + "<"))));
-    bscanner.fetchColumnFamily(DataFileColumnFamily.NAME);
-    boolean foundFile = false;
-    for (Entry<Key,Value> entry : bscanner) {
-      foundFile = true;
-      ByteArrayOutputStream baos = new ByteArrayOutputStream();
-      PrintStream newOut = new PrintStream(baos);
-      PrintStream oldOut = System.out;
-      try {
-        System.setOut(newOut);
-        List<String> args = new ArrayList<>();
-        args.add(entry.getKey().getColumnQualifier().toString());
-        if (ClusterType.STANDALONE == getClusterType() && 
cluster.getClientConfig().getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(),
 false)) {
-          args.add("--config");
-          StandaloneAccumuloCluster sac = (StandaloneAccumuloCluster) cluster;
-          String hadoopConfDir = sac.getHadoopConfDir();
-          args.add(new Path(hadoopConfDir, "core-site.xml").toString());
-          args.add(new Path(hadoopConfDir, "hdfs-site.xml").toString());
-        }
-        log.info("Invoking PrintInfo with " + args);
-        PrintInfo.main(args.toArray(new String[args.size()]));
-        newOut.flush();
-        String stdout = baos.toString();
-        assertTrue(stdout.contains("Locality group         : g1"));
-        assertTrue(stdout.contains("families      : [colf]"));
-      } finally {
-        newOut.close();
-        System.setOut(oldOut);
-      }
-    }
-    bscanner.close();
-    assertTrue(foundFile);
-  }
-
-  @Test
-  public void localityGroupChange() throws Exception {
-    // Make changes to locality groups and ensure nothing is lostssh
-    final Connector connector = getConnector();
-    String table = getUniqueNames(1)[0];
-    TableOperations to = connector.tableOperations();
-    to.create(table);
-    String[] config = new String[] {"lg1:colf", null, "lg1:colf,xyz", 
"lg1:colf,xyz;lg2:c1,c2"};
-    int i = 0;
-    for (String cfg : config) {
-      to.setLocalityGroups(table, getGroups(cfg));
-      ingest(connector, getCluster().getClientConfig(), getAdminPrincipal(), 
ROWS * (i + 1), 1, 50, ROWS * i, table);
-      to.flush(table, null, null, true);
-      verify(connector, getCluster().getClientConfig(), getAdminPrincipal(), 
0, 1, 50, ROWS * (i + 1), table);
-      i++;
-    }
-    to.delete(table);
-    to.create(table);
-    config = new String[] {"lg1:colf", null, "lg1:colf,xyz", 
"lg1:colf;lg2:colf",};
-    i = 1;
-    for (String cfg : config) {
-      ingest(connector, getCluster().getClientConfig(), getAdminPrincipal(), 
ROWS * i, 1, 50, 0, table);
-      ingest(connector, getCluster().getClientConfig(), getAdminPrincipal(), 
ROWS * i, 1, 50, 0, "xyz", table);
-      to.setLocalityGroups(table, getGroups(cfg));
-      to.flush(table, null, null, true);
-      verify(connector, getCluster().getClientConfig(), getAdminPrincipal(), 
ROWS * i, 1, 50, 0, table);
-      verify(connector, getCluster().getClientConfig(), getAdminPrincipal(), 
ROWS * i, 1, 50, 0, "xyz", table);
-      i++;
-    }
-  }
-
-  private Map<String,Set<Text>> getGroups(String cfg) {
-    Map<String,Set<Text>> groups = new TreeMap<String,Set<Text>>();
-    if (cfg != null) {
-      for (String group : cfg.split(";")) {
-        String[] parts = group.split(":");
-        Set<Text> cols = new HashSet<Text>();
-        for (String col : parts[1].split(",")) {
-          cols.add(t(col));
-        }
-        groups.put(parts[1], cols);
-      }
-    }
-    return groups;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/RecoveryWithEmptyRFileIT.java
----------------------------------------------------------------------
diff --git 
a/test/src/test/java/org/apache/accumulo/test/functional/RecoveryWithEmptyRFileIT.java
 
b/test/src/test/java/org/apache/accumulo/test/functional/RecoveryWithEmptyRFileIT.java
deleted file mode 100644
index 0408aa0..0000000
--- 
a/test/src/test/java/org/apache/accumulo/test/functional/RecoveryWithEmptyRFileIT.java
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-import java.util.Map.Entry;
-
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.file.rfile.CreateEmpty;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import 
org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.Text;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * XXX As a part of verifying lossy recovery via inserting an empty rfile, 
this test deletes test table tablets. This will require write access to the 
backing
- * files of the test Accumulo mini cluster.
- *
- * This test should read the file location from the test harness and that file 
should be on the local filesystem. If you want to take a paranoid approach just
- * make sure the test user doesn't have write access to the HDFS files of any 
colocated live Accumulo instance or any important local filesystem files..
- */
-public class RecoveryWithEmptyRFileIT extends ConfigurableMacBase {
-  private static final Logger log = 
LoggerFactory.getLogger(RecoveryWithEmptyRFileIT.class);
-
-  static final int ROWS = 200000;
-  static final int COLS = 1;
-  static final String COLF = "colf";
-
-  @Override
-  protected int defaultTimeoutSeconds() {
-    return 2 * 60;
-  }
-
-  @Override
-  public void configure(MiniAccumuloConfigImpl cfg, Configuration 
hadoopCoreSite) {
-    cfg.useMiniDFS(true);
-  }
-
-  @Test
-  public void replaceMissingRFile() throws Exception {
-    log.info("Ingest some data, verify it was stored properly, replace an 
underlying rfile with an empty one and verify we can scan.");
-    Connector connector = getConnector();
-    String tableName = getUniqueNames(1)[0];
-    ReadWriteIT.ingest(connector, cluster.getClientConfig(), "root", ROWS, 
COLS, 50, 0, tableName);
-    ReadWriteIT.verify(connector, cluster.getClientConfig(), "root", ROWS, 
COLS, 50, 0, tableName);
-
-    connector.tableOperations().flush(tableName, null, null, true);
-    connector.tableOperations().offline(tableName, true);
-
-    log.debug("Replacing rfile(s) with empty");
-    Scanner meta = connector.createScanner(MetadataTable.NAME, 
Authorizations.EMPTY);
-    String tableId = connector.tableOperations().tableIdMap().get(tableName);
-    meta.setRange(new Range(new Text(tableId + ";"), new Text(tableId + "<")));
-    meta.fetchColumnFamily(DataFileColumnFamily.NAME);
-    boolean foundFile = false;
-    for (Entry<Key,Value> entry : meta) {
-      foundFile = true;
-      Path rfile = new Path(entry.getKey().getColumnQualifier().toString());
-      log.debug("Removing rfile '" + rfile + "'");
-      cluster.getFileSystem().delete(rfile, false);
-      Process info = cluster.exec(CreateEmpty.class, rfile.toString());
-      assertEquals(0, info.waitFor());
-    }
-    meta.close();
-    assertTrue(foundFile);
-
-    log.trace("invalidate cached file handles by issuing a compaction");
-    connector.tableOperations().online(tableName, true);
-    connector.tableOperations().compact(tableName, null, null, false, true);
-
-    log.debug("make sure we can still scan");
-    Scanner scan = connector.createScanner(tableName, Authorizations.EMPTY);
-    scan.setRange(new Range());
-    long cells = 0l;
-    for (Entry<Key,Value> entry : scan) {
-      if (entry != null)
-        cells++;
-    }
-    scan.close();
-    assertEquals(0l, cells);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/RegexGroupBalanceIT.java
----------------------------------------------------------------------
diff --git 
a/test/src/test/java/org/apache/accumulo/test/functional/RegexGroupBalanceIT.java
 
b/test/src/test/java/org/apache/accumulo/test/functional/RegexGroupBalanceIT.java
deleted file mode 100644
index a8c5bca..0000000
--- 
a/test/src/test/java/org/apache/accumulo/test/functional/RegexGroupBalanceIT.java
+++ /dev/null
@@ -1,192 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.accumulo.test.functional;
-
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.SortedSet;
-import java.util.TreeSet;
-
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.server.master.balancer.RegexGroupBalancer;
-import org.apache.accumulo.server.master.state.TServerInstance;
-import org.apache.commons.lang3.mutable.MutableInt;
-import org.apache.hadoop.io.Text;
-import org.junit.Test;
-
-import com.google.common.collect.HashBasedTable;
-import com.google.common.collect.Table;
-
-public class RegexGroupBalanceIT extends ConfigurableMacBase {
-
-  @Override
-  public void beforeClusterStart(MiniAccumuloConfigImpl cfg) throws Exception {
-    cfg.setNumTservers(4);
-  }
-
-  @Test(timeout = 120000)
-  public void testBalancing() throws Exception {
-    Connector conn = getConnector();
-    String tablename = getUniqueNames(1)[0];
-    conn.tableOperations().create(tablename);
-
-    SortedSet<Text> splits = new TreeSet<>();
-    splits.add(new Text("01a"));
-    splits.add(new Text("01m"));
-    splits.add(new Text("01z"));
-
-    splits.add(new Text("02a"));
-    splits.add(new Text("02f"));
-    splits.add(new Text("02r"));
-    splits.add(new Text("02z"));
-
-    splits.add(new Text("03a"));
-    splits.add(new Text("03f"));
-    splits.add(new Text("03m"));
-    splits.add(new Text("03r"));
-
-    conn.tableOperations().setProperty(tablename, 
RegexGroupBalancer.REGEX_PROPERTY, "(\\d\\d).*");
-    conn.tableOperations().setProperty(tablename, 
RegexGroupBalancer.DEFAUT_GROUP_PROPERTY, "03");
-    conn.tableOperations().setProperty(tablename, 
RegexGroupBalancer.WAIT_TIME_PROPERTY, "50ms");
-    conn.tableOperations().setProperty(tablename, 
Property.TABLE_LOAD_BALANCER.getKey(), RegexGroupBalancer.class.getName());
-
-    conn.tableOperations().addSplits(tablename, splits);
-
-    while (true) {
-      Thread.sleep(250);
-
-      Table<String,String,MutableInt> groupLocationCounts = getCounts(conn, 
tablename);
-
-      boolean allGood = true;
-      allGood &= checkGroup(groupLocationCounts, "01", 1, 1, 3);
-      allGood &= checkGroup(groupLocationCounts, "02", 1, 1, 4);
-      allGood &= checkGroup(groupLocationCounts, "03", 1, 2, 4);
-      allGood &= checkTabletsPerTserver(groupLocationCounts, 3, 3, 4);
-
-      if (allGood) {
-        break;
-      }
-    }
-
-    splits.clear();
-    splits.add(new Text("01b"));
-    splits.add(new Text("01f"));
-    splits.add(new Text("01l"));
-    splits.add(new Text("01r"));
-    conn.tableOperations().addSplits(tablename, splits);
-
-    while (true) {
-      Thread.sleep(250);
-
-      Table<String,String,MutableInt> groupLocationCounts = getCounts(conn, 
tablename);
-
-      boolean allGood = true;
-      allGood &= checkGroup(groupLocationCounts, "01", 1, 2, 4);
-      allGood &= checkGroup(groupLocationCounts, "02", 1, 1, 4);
-      allGood &= checkGroup(groupLocationCounts, "03", 1, 2, 4);
-      allGood &= checkTabletsPerTserver(groupLocationCounts, 4, 4, 4);
-
-      if (allGood) {
-        break;
-      }
-    }
-
-    // merge group 01 down to one tablet
-    conn.tableOperations().merge(tablename, null, new Text("01z"));
-
-    while (true) {
-      Thread.sleep(250);
-
-      Table<String,String,MutableInt> groupLocationCounts = getCounts(conn, 
tablename);
-
-      boolean allGood = true;
-      allGood &= checkGroup(groupLocationCounts, "01", 1, 1, 1);
-      allGood &= checkGroup(groupLocationCounts, "02", 1, 1, 4);
-      allGood &= checkGroup(groupLocationCounts, "03", 1, 2, 4);
-      allGood &= checkTabletsPerTserver(groupLocationCounts, 2, 3, 4);
-
-      if (allGood) {
-        break;
-      }
-    }
-  }
-
-  private boolean checkTabletsPerTserver(Table<String,String,MutableInt> 
groupLocationCounts, int minTabletPerTserver, int maxTabletsPerTserver,
-      int totalTservser) {
-    // check that each tserver has between min and max tablets
-    for (Map<String,MutableInt> groups : 
groupLocationCounts.columnMap().values()) {
-      int sum = 0;
-      for (MutableInt mi : groups.values()) {
-        sum += mi.intValue();
-      }
-
-      if (sum < minTabletPerTserver || sum > maxTabletsPerTserver) {
-        return false;
-      }
-    }
-
-    return groupLocationCounts.columnKeySet().size() == totalTservser;
-  }
-
-  private boolean checkGroup(Table<String,String,MutableInt> 
groupLocationCounts, String group, int min, int max, int tsevers) {
-    Collection<MutableInt> counts = groupLocationCounts.row(group).values();
-    if (counts.size() == 0) {
-      return min == 0 && max == 0 && tsevers == 0;
-    }
-    return min == Collections.min(counts).intValue() && max == 
Collections.max(counts).intValue() && counts.size() == tsevers;
-  }
-
-  private Table<String,String,MutableInt> getCounts(Connector conn, String 
tablename) throws TableNotFoundException {
-    Scanner s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
-    
s.fetchColumnFamily(MetadataSchema.TabletsSection.CurrentLocationColumnFamily.NAME);
-    String tableId = conn.tableOperations().tableIdMap().get(tablename);
-    s.setRange(MetadataSchema.TabletsSection.getRange(tableId));
-
-    Table<String,String,MutableInt> groupLocationCounts = 
HashBasedTable.create();
-
-    for (Entry<Key,Value> entry : s) {
-      String group = entry.getKey().getRow().toString();
-      if (group.endsWith("<")) {
-        group = "03";
-      } else {
-        group = group.substring(tableId.length() + 1).substring(0, 2);
-      }
-      String loc = new TServerInstance(entry.getValue(), 
entry.getKey().getColumnQualifier()).toString();
-
-      MutableInt count = groupLocationCounts.get(group, loc);
-      if (count == null) {
-        count = new MutableInt(0);
-        groupLocationCounts.put(group, loc, count);
-      }
-
-      count.increment();
-    }
-    return groupLocationCounts;
-  }
-}

Reply via email to