HBASE-14678 Experiment: Temporarily disable balancer and a few others to see if 
root of crashed/timedout JVMs


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/93023f54
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/93023f54
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/93023f54

Branch: refs/heads/hbase-12439
Commit: 93023f544b673ccc99fc0e327f2eca8964128097
Parents: 35660b4
Author: stack <st...@apache.org>
Authored: Thu Oct 22 12:31:04 2015 -0700
Committer: stack <st...@apache.org>
Committed: Thu Oct 22 12:31:04 2015 -0700

----------------------------------------------------------------------
 .../hbase/TestPartialResultsFromClientSide.java | 832 -------------------
 .../balancer/TestStochasticLoadBalancer.java    | 532 ------------
 .../balancer/TestStochasticLoadBalancer2.java   |  90 --
 .../TestMasterFailoverWithProcedures.java       | 514 ------------
 .../hbase/client/TestReplicationShell.java      |  37 -
 .../apache/hadoop/hbase/client/TestShell.java   |  39 -
 6 files changed, 2044 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/93023f54/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.java
deleted file mode 100644
index 3794e59..0000000
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.java
+++ /dev/null
@@ -1,832 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.LinkedHashSet;
-import java.util.List;
-import java.util.Set;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.client.ClientScanner;
-import org.apache.hadoop.hbase.client.Delete;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.filter.ColumnPrefixFilter;
-import org.apache.hadoop.hbase.filter.ColumnRangeFilter;
-import org.apache.hadoop.hbase.filter.Filter;
-import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter;
-import org.apache.hadoop.hbase.filter.FirstKeyValueMatchingQualifiersFilter;
-import org.apache.hadoop.hbase.filter.RandomRowFilter;
-import org.apache.hadoop.hbase.testclassification.MediumTests;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-/**
- * These tests are focused on testing how partial results appear to a client. 
Partial results are
- * {@link Result}s that contain only a portion of a row's complete list of 
cells. Partial results
- * are formed when the server breaches its maximum result size when trying to 
service a client's RPC
- * request. It is the responsibility of the scanner on the client side to 
recognize when partial
- * results have been returned and to take action to form the complete results.
- * <p>
- * Unless the flag {@link Scan#setAllowPartialResults(boolean)} has been set 
to true, the caller of
- * {@link ResultScanner#next()} should never see partial results.
- */
-@Category(MediumTests.class)
-public class TestPartialResultsFromClientSide {
-  private static final Log LOG = 
LogFactory.getLog(TestPartialResultsFromClientSide.class);
-
-  private final static HBaseTestingUtility TEST_UTIL = new 
HBaseTestingUtility();
-
-  private static Table TABLE = null;
-
-  /**
-   * Table configuration
-   */
-  private static TableName TABLE_NAME = TableName.valueOf("testTable");
-
-  private static int NUM_ROWS = 5;
-  private static byte[] ROW = Bytes.toBytes("testRow");
-  private static byte[][] ROWS = HTestConst.makeNAscii(ROW, NUM_ROWS);
-
-  // Should keep this value below 10 to keep generation of expected kv's 
simple. If above 10 then
-  // table/row/cf1/... will be followed by table/row/cf10/... instead of 
table/row/cf2/... which
-  // breaks the simple generation of expected kv's
-  private static int NUM_FAMILIES = 10;
-  private static byte[] FAMILY = Bytes.toBytes("testFamily");
-  private static byte[][] FAMILIES = HTestConst.makeNAscii(FAMILY, 
NUM_FAMILIES);
-
-  private static int NUM_QUALIFIERS = 10;
-  private static byte[] QUALIFIER = Bytes.toBytes("testQualifier");
-  private static byte[][] QUALIFIERS = HTestConst.makeNAscii(QUALIFIER, 
NUM_QUALIFIERS);
-
-  private static int VALUE_SIZE = 1024;
-  private static byte[] VALUE = Bytes.createMaxByteArray(VALUE_SIZE);
-
-  private static int NUM_COLS = NUM_FAMILIES * NUM_QUALIFIERS;
-
-  // Approximation of how large the heap size of cells in our table. Should be 
accessed through
-  // getCellHeapSize().
-  private static long CELL_HEAP_SIZE = -1;
-
-  @BeforeClass
-  public static void setUpBeforeClass() throws Exception {
-    TEST_UTIL.startMiniCluster(3);
-    TABLE = createTestTable(TABLE_NAME, ROWS, FAMILIES, QUALIFIERS, VALUE);
-  }
-
-  static Table createTestTable(TableName name, byte[][] rows, byte[][] 
families,
-      byte[][] qualifiers, byte[] cellValue) throws IOException {
-    Table ht = TEST_UTIL.createTable(name, families);
-    List<Put> puts = createPuts(rows, families, qualifiers, cellValue);
-    ht.put(puts);
-
-    return ht;
-  }
-
-  @AfterClass
-  public static void tearDownAfterClass() throws Exception {
-    TEST_UTIL.shutdownMiniCluster();
-  }
-
-  /**
-   * Ensure that the expected key values appear in a result returned from a 
scanner that is
-   * combining partial results into complete results
-   * @throws Exception
-   */
-  @Test
-  public void testExpectedValuesOfPartialResults() throws Exception {
-    testExpectedValuesOfPartialResults(false);
-    testExpectedValuesOfPartialResults(true);
-  }
-
-  public void testExpectedValuesOfPartialResults(boolean reversed) throws 
Exception {
-    Scan partialScan = new Scan();
-    partialScan.setMaxVersions();
-    // Max result size of 1 ensures that each RPC request will return a single 
cell. The scanner
-    // will need to reconstruct the results into a complete result before 
returning to the caller
-    partialScan.setMaxResultSize(1);
-    partialScan.setReversed(reversed);
-    ResultScanner partialScanner = TABLE.getScanner(partialScan);
-
-    final int startRow = reversed ? ROWS.length - 1 : 0;
-    final int endRow = reversed ? -1 : ROWS.length;
-    final int loopDelta = reversed ? -1 : 1;
-    String message;
-
-    for (int row = startRow; row != endRow; row = row + loopDelta) {
-      message = "Ensuring the expected keyValues are present for row " + row;
-      List<Cell> expectedKeyValues = createKeyValuesForRow(ROWS[row], 
FAMILIES, QUALIFIERS, VALUE);
-      Result result = partialScanner.next();
-      assertFalse(result.isPartial());
-      verifyResult(result, expectedKeyValues, message);
-    }
-
-    partialScanner.close();
-  }
-
-  /**
-   * Ensure that we only see Results marked as partial when the allowPartial 
flag is set
-   * @throws Exception
-   */
-  @Test
-  public void testAllowPartialResults() throws Exception {
-    Scan scan = new Scan();
-    scan.setAllowPartialResults(true);
-    scan.setMaxResultSize(1);
-    ResultScanner scanner = TABLE.getScanner(scan);
-    Result result = scanner.next();
-
-    assertTrue(result != null);
-    assertTrue(result.isPartial());
-    assertTrue(result.rawCells() != null);
-    assertTrue(result.rawCells().length == 1);
-
-    scanner.close();
-
-    scan.setAllowPartialResults(false);
-    scanner = TABLE.getScanner(scan);
-    result = scanner.next();
-
-    assertTrue(result != null);
-    assertTrue(!result.isPartial());
-    assertTrue(result.rawCells() != null);
-    assertTrue(result.rawCells().length == NUM_COLS);
-
-    scanner.close();
-  }
-
-  /**
-   * Ensure that the results returned from a scanner that retrieves all 
results in a single RPC call
-   * matches the results that are returned from a scanner that must 
incrementally combine partial
-   * results into complete results. A variety of scan configurations can be 
tested
-   * @throws Exception
-   */
-  @Test
-  public void testEquivalenceOfScanResults() throws Exception {
-    Scan oneShotScan = new Scan();
-    oneShotScan.setMaxResultSize(Long.MAX_VALUE);
-
-    Scan partialScan = new Scan(oneShotScan);
-    partialScan.setMaxResultSize(1);
-
-    testEquivalenceOfScanResults(TABLE, oneShotScan, partialScan);
-  }
-
-  public void testEquivalenceOfScanResults(Table table, Scan scan1, Scan 
scan2) throws Exception {
-    ResultScanner scanner1 = table.getScanner(scan1);
-    ResultScanner scanner2 = table.getScanner(scan2);
-
-    Result r1 = null;
-    Result r2 = null;
-    int count = 0;
-
-    while ((r1 = scanner1.next()) != null) {
-      r2 = scanner2.next();
-
-      assertTrue(r2 != null);
-      compareResults(r1, r2, "Comparing result #" + count);
-      count++;
-    }
-
-    r2 = scanner2.next();
-    assertTrue("r2: " + r2 + " Should be null", r2 == null);
-
-    scanner1.close();
-    scanner2.close();
-  }
-
-  /**
-   * Order of cells in partial results matches the ordering of cells from 
complete results
-   * @throws Exception
-   */
-  @Test
-  public void testOrderingOfCellsInPartialResults() throws Exception {
-    Scan scan = new Scan();
-
-    for (int col = 1; col <= NUM_COLS; col++) {
-      scan.setMaxResultSize(getResultSizeForNumberOfCells(col));
-      testOrderingOfCellsInPartialResults(scan);
-
-      // Test again with a reversed scanner
-      scan.setReversed(true);
-      testOrderingOfCellsInPartialResults(scan);
-    }
-  }
-
-  public void testOrderingOfCellsInPartialResults(final Scan basePartialScan) 
throws Exception {
-    // Scan that retrieves results in pieces (partials). By setting 
allowPartialResults to be true
-    // the results will NOT be reconstructed and instead the caller will see 
the partial results
-    // returned by the server
-    Scan partialScan = new Scan(basePartialScan);
-    partialScan.setAllowPartialResults(true);
-    ResultScanner partialScanner = TABLE.getScanner(partialScan);
-
-    // Scan that retrieves all table results in single RPC request
-    Scan oneShotScan = new Scan(basePartialScan);
-    oneShotScan.setMaxResultSize(Long.MAX_VALUE);
-    oneShotScan.setCaching(ROWS.length);
-    ResultScanner oneShotScanner = TABLE.getScanner(oneShotScan);
-
-    Result oneShotResult = oneShotScanner.next();
-    Result partialResult = null;
-    int iterationCount = 0;
-
-    while (oneShotResult != null && oneShotResult.rawCells() != null) {
-      List<Cell> aggregatePartialCells = new ArrayList<Cell>();
-      do {
-        partialResult = partialScanner.next();
-        assertTrue("Partial Result is null. iteration: " + iterationCount, 
partialResult != null);
-        assertTrue("Partial cells are null. iteration: " + iterationCount,
-          partialResult.rawCells() != null);
-
-        for (Cell c : partialResult.rawCells()) {
-          aggregatePartialCells.add(c);
-        }
-      } while (partialResult.isPartial());
-
-      assertTrue("Number of cells differs. iteration: " + iterationCount,
-        oneShotResult.rawCells().length == aggregatePartialCells.size());
-      final Cell[] oneShotCells = oneShotResult.rawCells();
-      for (int cell = 0; cell < oneShotCells.length; cell++) {
-        Cell oneShotCell = oneShotCells[cell];
-        Cell partialCell = aggregatePartialCells.get(cell);
-
-        assertTrue("One shot cell was null", oneShotCell != null);
-        assertTrue("Partial cell was null", partialCell != null);
-        assertTrue("Cell differs. oneShotCell:" + oneShotCell + " 
partialCell:" + partialCell,
-          oneShotCell.equals(partialCell));
-      }
-
-      oneShotResult = oneShotScanner.next();
-      iterationCount++;
-    }
-
-    assertTrue(partialScanner.next() == null);
-
-    partialScanner.close();
-    oneShotScanner.close();
-  }
-
-  /**
-   * Setting the max result size allows us to control how many cells we expect 
to see on each call
-   * to next on the scanner. Test a variety of different sizes for correctness
-   * @throws Exception
-   */
-  @Test
-  public void testExpectedNumberOfCellsPerPartialResult() throws Exception {
-    Scan scan = new Scan();
-    testExpectedNumberOfCellsPerPartialResult(scan);
-
-    scan.setReversed(true);
-    testExpectedNumberOfCellsPerPartialResult(scan);
-  }
-
-  public void testExpectedNumberOfCellsPerPartialResult(Scan baseScan) throws 
Exception {
-    for (int expectedCells = 1; expectedCells <= NUM_COLS; expectedCells++) {
-      testExpectedNumberOfCellsPerPartialResult(baseScan, expectedCells);
-    }
-  }
-
-  public void testExpectedNumberOfCellsPerPartialResult(Scan baseScan, int 
expectedNumberOfCells)
-      throws Exception {
-
-    if (LOG.isInfoEnabled()) LOG.info("groupSize:" + expectedNumberOfCells);
-
-    // Use the cellHeapSize to set maxResultSize such that we know how many 
cells to expect back
-    // from the call. The returned results should NOT exceed 
expectedNumberOfCells but may be less
-    // than it in cases where expectedNumberOfCells is not an exact multiple 
of the number of
-    // columns in the table.
-    Scan scan = new Scan(baseScan);
-    scan.setAllowPartialResults(true);
-    
scan.setMaxResultSize(getResultSizeForNumberOfCells(expectedNumberOfCells));
-
-    ResultScanner scanner = TABLE.getScanner(scan);
-    Result result = null;
-    byte[] prevRow = null;
-    while ((result = scanner.next()) != null) {
-      assertTrue(result.rawCells() != null);
-
-      // Cases when cell count won't equal expectedNumberOfCells:
-      // 1. Returned result is the final result needed to form the complete 
result for that row
-      // 2. It is the first result we have seen for that row and thus may have 
been fetched as
-      // the last group of cells that fit inside the maxResultSize
-      assertTrue(
-        "Result's cell count differed from expected number. result: " + result,
-        result.rawCells().length == expectedNumberOfCells || 
!result.isPartial()
-          || !Bytes.equals(prevRow, result.getRow()));
-      prevRow = result.getRow();
-    }
-
-    scanner.close();
-  }
-
-  /**
-   * @return The approximate heap size of a cell in the test table. All cells 
should have
-   *         approximately the same heap size, so the value is cached to avoid 
repeating the
-   *         calculation
-   * @throws Exception
-   */
-  private long getCellHeapSize() throws Exception {
-    if (CELL_HEAP_SIZE == -1) {
-      // Do a partial scan that will return a single result with a single cell
-      Scan scan = new Scan();
-      scan.setMaxResultSize(1);
-      scan.setAllowPartialResults(true);
-      ResultScanner scanner = TABLE.getScanner(scan);
-
-      Result result = scanner.next();
-
-      assertTrue(result != null);
-      assertTrue(result.rawCells() != null);
-      assertTrue(result.rawCells().length == 1);
-
-      CELL_HEAP_SIZE = CellUtil.estimatedHeapSizeOf(result.rawCells()[0]);
-      if (LOG.isInfoEnabled()) LOG.info("Cell heap size: " + CELL_HEAP_SIZE);
-      scanner.close();
-    }
-
-    return CELL_HEAP_SIZE;
-  }
-
-  /**
-   * @param numberOfCells
-   * @return the result size that should be used in {@link 
Scan#setMaxResultSize(long)} if you want
-   *         the server to return exactly numberOfCells cells
-   * @throws Exception
-   */
-  private long getResultSizeForNumberOfCells(int numberOfCells) throws 
Exception {
-    return getCellHeapSize() * numberOfCells;
-  }
-
-  /**
-   * Test various combinations of batching and partial results for correctness
-   */
-  @Test
-  public void testPartialResultsAndBatch() throws Exception {
-    for (int batch = 1; batch <= NUM_COLS / 4; batch++) {
-      for (int cellsPerPartial = 1; cellsPerPartial <= NUM_COLS / 4; 
cellsPerPartial++) {
-        testPartialResultsAndBatch(batch, cellsPerPartial);
-      }
-    }
-  }
-
-  public void testPartialResultsAndBatch(final int batch, final int 
cellsPerPartialResult)
-      throws Exception {
-    if (LOG.isInfoEnabled()) {
-      LOG.info("batch: " + batch + " cellsPerPartialResult: " + 
cellsPerPartialResult);
-    }
-
-    Scan scan = new Scan();
-    
scan.setMaxResultSize(getResultSizeForNumberOfCells(cellsPerPartialResult));
-    scan.setBatch(batch);
-    ResultScanner scanner = TABLE.getScanner(scan);
-    Result result = scanner.next();
-    int repCount = 0;
-
-    while ((result = scanner.next()) != null) {
-      assertTrue(result.rawCells() != null);
-
-      if (result.isPartial()) {
-        final String error =
-            "Cells:" + result.rawCells().length + " Batch size:" + batch
-                + " cellsPerPartialResult:" + cellsPerPartialResult + " rep:" 
+ repCount;
-        assertTrue(error, result.rawCells().length <= Math.min(batch, 
cellsPerPartialResult));
-      } else {
-        assertTrue(result.rawCells().length <= batch);
-      }
-      repCount++;
-    }
-
-    scanner.close();
-  }
-
-  /**
-   * Test the method {@link Result#createCompleteResult(List, Result)}
-   * @throws Exception
-   */
-  @Test
-  public void testPartialResultsReassembly() throws Exception {
-    Scan scan = new Scan();
-    testPartialResultsReassembly(scan);
-    scan.setReversed(true);
-    testPartialResultsReassembly(scan);
-  }
-
-  public void testPartialResultsReassembly(Scan scanBase) throws Exception {
-    Scan partialScan = new Scan(scanBase);
-    partialScan.setMaxResultSize(1);
-    partialScan.setAllowPartialResults(true);
-    ResultScanner partialScanner = TABLE.getScanner(partialScan);
-
-    Scan oneShotScan = new Scan(scanBase);
-    oneShotScan.setMaxResultSize(Long.MAX_VALUE);
-    ResultScanner oneShotScanner = TABLE.getScanner(oneShotScan);
-
-    ArrayList<Result> partials = new ArrayList<>();
-    for (int i = 0; i < NUM_ROWS; i++) {
-      Result partialResult = null;
-      Result completeResult = null;
-      Result oneShotResult = null;
-      partials.clear();
-
-      do {
-        partialResult = partialScanner.next();
-        partials.add(partialResult);
-      } while (partialResult != null && partialResult.isPartial());
-
-      completeResult = Result.createCompleteResult(partials);
-      oneShotResult = oneShotScanner.next();
-
-      compareResults(completeResult, oneShotResult, null);
-    }
-
-    assertTrue(oneShotScanner.next() == null);
-    assertTrue(partialScanner.next() == null);
-
-    oneShotScanner.close();
-    partialScanner.close();
-  }
-
-  /**
-   * When reconstructing the complete result from its partials we ensure that 
the row of each
-   * partial result is the same. If one of the rows differs, an exception is 
thrown.
-   */
-  @Test
-  public void testExceptionThrownOnMismatchedPartialResults() throws 
IOException {
-    assertTrue(NUM_ROWS >= 2);
-
-    ArrayList<Result> partials = new ArrayList<>();
-    Scan scan = new Scan();
-    scan.setMaxResultSize(Long.MAX_VALUE);
-    ResultScanner scanner = TABLE.getScanner(scan);
-    Result r1 = scanner.next();
-    partials.add(r1);
-    Result r2 = scanner.next();
-    partials.add(r2);
-
-    assertFalse(Bytes.equals(r1.getRow(), r2.getRow()));
-
-    try {
-      Result.createCompleteResult(partials);
-      fail("r1 and r2 are from different rows. It should not be possible to 
combine them into"
-          + " a single result");
-    } catch (IOException e) {
-    }
-
-    scanner.close();
-  }
-
-  /**
-   * When a scan has a filter where {@link 
org.apache.hadoop.hbase.filter.Filter#hasFilterRow()} is
-   * true, the scanner should not return partial results. The scanner cannot 
return partial results
-   * because the entire row needs to be read for the include/exclude decision 
to be made
-   */
-  @Test
-  public void testNoPartialResultsWhenRowFilterPresent() throws Exception {
-    Scan scan = new Scan();
-    scan.setMaxResultSize(1);
-    scan.setAllowPartialResults(true);
-    // If a filter hasFilter() is true then partial results should not be 
returned else filter
-    // application server side would break.
-    scan.setFilter(new RandomRowFilter(1.0f));
-    ResultScanner scanner = TABLE.getScanner(scan);
-
-    Result r = null;
-    while ((r = scanner.next()) != null) {
-      assertFalse(r.isPartial());
-    }
-
-    scanner.close();
-  }
-
-  /**
-   * Examine the interaction between the maxResultSize and caching. If the 
caching limit is reached
-   * before the maxResultSize limit, we should not see partial results. On the 
other hand, if the
-   * maxResultSize limit is reached before the caching limit, it is likely 
that partial results will
-   * be seen.
-   * @throws Exception
-   */
-  @Test
-  public void testPartialResultsAndCaching() throws Exception {
-    for (int caching = 1; caching <= NUM_ROWS; caching++) {
-      for (int maxResultRows = 0; maxResultRows <= NUM_ROWS; maxResultRows++) {
-        testPartialResultsAndCaching(maxResultRows, caching);
-      }
-    }
-  }
-
-  /**
-   * @param resultSizeRowLimit The row limit that will be enforced through 
maxResultSize
-   * @param cachingRowLimit The row limit that will be enforced through caching
-   * @throws Exception
-   */
-  public void testPartialResultsAndCaching(int resultSizeRowLimit, int 
cachingRowLimit)
-      throws Exception {
-    Scan scan = new Scan();
-    scan.setAllowPartialResults(true);
-
-    // The number of cells specified in the call to 
getResultSizeForNumberOfCells is offset to
-    // ensure that the result size we specify is not an exact multiple of the 
number of cells
-    // in a row. This ensures that partial results will be returned when the 
result size limit
-    // is reached before the caching limit.
-    int cellOffset = NUM_COLS / 3;
-    long maxResultSize = getResultSizeForNumberOfCells(resultSizeRowLimit * 
NUM_COLS + cellOffset);
-    scan.setMaxResultSize(maxResultSize);
-    scan.setCaching(cachingRowLimit);
-
-    ResultScanner scanner = TABLE.getScanner(scan);
-    ClientScanner clientScanner = (ClientScanner) scanner;
-    Result r = null;
-
-    // Approximate the number of rows we expect will fit into the specified 
max rsult size. If this
-    // approximation is less than caching, then we expect that the max result 
size limit will be
-    // hit before the caching limit and thus partial results may be seen
-    boolean expectToSeePartialResults = resultSizeRowLimit < cachingRowLimit;
-    while ((r = clientScanner.next()) != null) {
-      assertTrue(!r.isPartial() || expectToSeePartialResults);
-    }
-
-    scanner.close();
-  }
-
-  /**
-   * Small scans should not return partial results because it would prevent 
small scans from
-   * retrieving all of the necessary results in a single RPC request which is 
what makese small
-   * scans useful. Thus, ensure that even when {@link 
Scan#getAllowPartialResults()} is true, small
-   * scans do not return partial results
-   * @throws Exception
-   */
-  @Test
-  public void testSmallScansDoNotAllowPartials() throws Exception {
-    Scan scan = new Scan();
-    testSmallScansDoNotAllowPartials(scan);
-    scan.setReversed(true);
-    testSmallScansDoNotAllowPartials(scan);
-  }
-
-  public void testSmallScansDoNotAllowPartials(Scan baseScan) throws Exception 
{
-    Scan scan = new Scan(baseScan);
-    scan.setAllowPartialResults(true);
-    scan.setSmall(true);
-    scan.setMaxResultSize(1);
-
-    ResultScanner scanner = TABLE.getScanner(scan);
-    Result r = null;
-
-    while ((r = scanner.next()) != null) {
-      assertFalse(r.isPartial());
-    }
-
-    scanner.close();
-  }
-
-  /**
-   * Make puts to put the input value into each combination of row, family, 
and qualifier
-   * @param rows
-   * @param families
-   * @param qualifiers
-   * @param value
-   * @return
-   * @throws IOException
-   */
-  static ArrayList<Put> createPuts(byte[][] rows, byte[][] families, byte[][] 
qualifiers,
-      byte[] value) throws IOException {
-    Put put;
-    ArrayList<Put> puts = new ArrayList<>();
-
-    for (int row = 0; row < rows.length; row++) {
-      put = new Put(rows[row]);
-      for (int fam = 0; fam < families.length; fam++) {
-        for (int qual = 0; qual < qualifiers.length; qual++) {
-          KeyValue kv = new KeyValue(rows[row], families[fam], 
qualifiers[qual], qual, value);
-          put.add(kv);
-        }
-      }
-      puts.add(put);
-    }
-
-    return puts;
-  }
-
-  /**
-   * Make key values to represent each possible combination of family and 
qualifier in the specified
-   * row.
-   * @param row
-   * @param families
-   * @param qualifiers
-   * @param value
-   * @return
-   */
-  static ArrayList<Cell> createKeyValuesForRow(byte[] row, byte[][] families, 
byte[][] qualifiers,
-      byte[] value) {
-    ArrayList<Cell> outList = new ArrayList<>();
-    for (int fam = 0; fam < families.length; fam++) {
-      for (int qual = 0; qual < qualifiers.length; qual++) {
-        outList.add(new KeyValue(row, families[fam], qualifiers[qual], qual, 
value));
-      }
-    }
-    return outList;
-  }
-
-  /**
-   * Verifies that result contains all the key values within expKvList. Fails 
the test otherwise
-   * @param result
-   * @param expKvList
-   * @param msg
-   */
-  static void verifyResult(Result result, List<Cell> expKvList, String msg) {
-    if (LOG.isInfoEnabled()) {
-      LOG.info(msg);
-      LOG.info("Expected count: " + expKvList.size());
-      LOG.info("Actual count: " + result.size());
-    }
-
-    if (expKvList.size() == 0) return;
-
-    int i = 0;
-    for (Cell kv : result.rawCells()) {
-      if (i >= expKvList.size()) {
-        break; // we will check the size later
-      }
-
-      Cell kvExp = expKvList.get(i++);
-      assertTrue("Not equal. get kv: " + kv.toString() + " exp kv: " + 
kvExp.toString(),
-        kvExp.equals(kv));
-    }
-
-    assertEquals(expKvList.size(), result.size());
-  }
-
-  /**
-   * Compares two results and fails the test if the results are different
-   * @param r1
-   * @param r2
-   * @param message
-   */
-  static void compareResults(Result r1, Result r2, final String message) {
-    if (LOG.isInfoEnabled()) {
-      if (message != null) LOG.info(message);
-      LOG.info("r1: " + r1);
-      LOG.info("r2: " + r2);
-    }
-
-    final String failureMessage = "Results r1:" + r1 + " \nr2:" + r2 + " are 
not equivalent";
-    if (r1 == null && r2 == null) fail(failureMessage);
-    else if (r1 == null || r2 == null) fail(failureMessage);
-
-    try {
-      Result.compareResults(r1, r2);
-    } catch (Exception e) {
-      fail(failureMessage);
-    }
-  }
-
-  @Test
-  public void testReadPointAndPartialResults() throws Exception {
-    TableName testName = TableName.valueOf("testReadPointAndPartialResults");
-    int numRows = 5;
-    int numFamilies = 5;
-    int numQualifiers = 5;
-    byte[][] rows = HTestConst.makeNAscii(Bytes.toBytes("testRow"), numRows);
-    byte[][] families = HTestConst.makeNAscii(Bytes.toBytes("testFamily"), 
numFamilies);
-    byte[][] qualifiers = 
HTestConst.makeNAscii(Bytes.toBytes("testQualifier"), numQualifiers);
-    byte[] value = Bytes.createMaxByteArray(100);
-
-    Table tmpTable = createTestTable(testName, rows, families, qualifiers, 
value);
-
-    Scan scan = new Scan();
-    scan.setMaxResultSize(1);
-    scan.setAllowPartialResults(true);
-
-    // Open scanner before deletes
-    ResultScanner scanner = tmpTable.getScanner(scan);
-
-    Delete delete1 = new Delete(rows[0]);
-    delete1.addColumn(families[0], qualifiers[0], 0);
-    tmpTable.delete(delete1);
-
-    Delete delete2 = new Delete(rows[1]);
-    delete2.addColumn(families[1], qualifiers[1], 1);
-    tmpTable.delete(delete2);
-
-    // Should see all cells because scanner was opened prior to deletes
-    int scannerCount = countCellsFromScanner(scanner);
-    int expectedCount = numRows * numFamilies * numQualifiers;
-    assertTrue("scannerCount: " + scannerCount + " expectedCount: " + 
expectedCount,
-      scannerCount == expectedCount);
-
-    // Minus 2 for the two cells that were deleted
-    scanner = tmpTable.getScanner(scan);
-    scannerCount = countCellsFromScanner(scanner);
-    expectedCount = numRows * numFamilies * numQualifiers - 2;
-    assertTrue("scannerCount: " + scannerCount + " expectedCount: " + 
expectedCount,
-      scannerCount == expectedCount);
-
-    scanner = tmpTable.getScanner(scan);
-    // Put in 2 new rows. The timestamps differ from the deleted rows
-    Put put1 = new Put(rows[0]);
-    put1.add(new KeyValue(rows[0], families[0], qualifiers[0], 1, value));
-    tmpTable.put(put1);
-
-    Put put2 = new Put(rows[1]);
-    put2.add(new KeyValue(rows[1], families[1], qualifiers[1], 2, value));
-    tmpTable.put(put2);
-
-    // Scanner opened prior to puts. Cell count shouldn't have changed
-    scannerCount = countCellsFromScanner(scanner);
-    expectedCount = numRows * numFamilies * numQualifiers - 2;
-    assertTrue("scannerCount: " + scannerCount + " expectedCount: " + 
expectedCount,
-      scannerCount == expectedCount);
-
-    // Now the scanner should see the cells that were added by puts
-    scanner = tmpTable.getScanner(scan);
-    scannerCount = countCellsFromScanner(scanner);
-    expectedCount = numRows * numFamilies * numQualifiers;
-    assertTrue("scannerCount: " + scannerCount + " expectedCount: " + 
expectedCount,
-      scannerCount == expectedCount);
-
-    TEST_UTIL.deleteTable(testName);
-  }
-
-  /**
-   * Exhausts the scanner by calling next repetitively. Once completely 
exhausted, close scanner and
-   * return total cell count
-   * @param scanner
-   * @return
-   * @throws Exception
-   */
-  private int countCellsFromScanner(ResultScanner scanner) throws Exception {
-    Result result = null;
-    int numCells = 0;
-    while ((result = scanner.next()) != null) {
-      numCells += result.rawCells().length;
-    }
-
-    scanner.close();
-    return numCells;
-  }
-
-  /**
-   * Test partial Result re-assembly in the presence of different filters. The 
Results from the
-   * partial scanner should match the Results returned from a scanner that 
receives all of the
-   * results in one RPC to the server. The partial scanner is tested with a 
variety of different
-   * result sizes (all of which are less than the size necessary to fetch an 
entire row)
-   * @throws Exception
-   */
-  @Test
-  public void testPartialResultsWithColumnFilter() throws Exception {
-    testPartialResultsWithColumnFilter(new FirstKeyOnlyFilter());
-    testPartialResultsWithColumnFilter(new 
ColumnPrefixFilter(Bytes.toBytes("testQualifier5")));
-    testPartialResultsWithColumnFilter(new 
ColumnRangeFilter(Bytes.toBytes("testQualifer1"), true,
-        Bytes.toBytes("testQualifier7"), true));
-
-    Set<byte[]> qualifiers = new LinkedHashSet<>();
-    qualifiers.add(Bytes.toBytes("testQualifier5"));
-    testPartialResultsWithColumnFilter(new 
FirstKeyValueMatchingQualifiersFilter(qualifiers));
-  }
-
-  public void testPartialResultsWithColumnFilter(Filter filter) throws 
Exception {
-    assertTrue(!filter.hasFilterRow());
-
-    Scan partialScan = new Scan();
-    partialScan.setFilter(filter);
-
-    Scan oneshotScan = new Scan();
-    oneshotScan.setFilter(filter);
-    oneshotScan.setMaxResultSize(Long.MAX_VALUE);
-
-    for (int i = 1; i <= NUM_COLS; i++) {
-      partialScan.setMaxResultSize(getResultSizeForNumberOfCells(i));
-      testEquivalenceOfScanResults(TABLE, partialScan, oneshotScan);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/93023f54/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.java
deleted file mode 100644
index 7abbeb4..0000000
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.java
+++ /dev/null
@@ -1,532 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.master.balancer;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Queue;
-import java.util.TreeMap;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.ClusterStatus;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.RegionLoad;
-import org.apache.hadoop.hbase.ServerLoad;
-import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.client.RegionReplicaUtil;
-import org.apache.hadoop.hbase.master.RackManager;
-import org.apache.hadoop.hbase.master.RegionPlan;
-import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster;
-import org.apache.hadoop.hbase.testclassification.FlakeyTests;
-import org.apache.hadoop.hbase.testclassification.MediumTests;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-@Category({FlakeyTests.class, MediumTests.class})
-public class TestStochasticLoadBalancer extends BalancerTestBase {
-  public static final String REGION_KEY = "testRegion";
-  private static final Log LOG = 
LogFactory.getLog(TestStochasticLoadBalancer.class);
-
-  @Test
-  public void testKeepRegionLoad() throws Exception {
-
-    ServerName sn = ServerName.valueOf("test:8080", 100);
-    int numClusterStatusToAdd = 20000;
-    for (int i = 0; i < numClusterStatusToAdd; i++) {
-      ServerLoad sl = mock(ServerLoad.class);
-
-      RegionLoad rl = mock(RegionLoad.class);
-      when(rl.getStores()).thenReturn(i);
-
-      Map<byte[], RegionLoad> regionLoadMap =
-          new TreeMap<byte[], RegionLoad>(Bytes.BYTES_COMPARATOR);
-      regionLoadMap.put(Bytes.toBytes(REGION_KEY), rl);
-      when(sl.getRegionsLoad()).thenReturn(regionLoadMap);
-
-      ClusterStatus clusterStatus = mock(ClusterStatus.class);
-      when(clusterStatus.getServers()).thenReturn(Arrays.asList(sn));
-      when(clusterStatus.getLoad(sn)).thenReturn(sl);
-
-      loadBalancer.setClusterStatus(clusterStatus);
-    }
-    assertTrue(loadBalancer.loads.get(REGION_KEY) != null);
-    assertTrue(loadBalancer.loads.get(REGION_KEY).size() == 15);
-
-    Queue<RegionLoad> loads = loadBalancer.loads.get(REGION_KEY);
-    int i = 0;
-    while(loads.size() > 0) {
-      RegionLoad rl = loads.remove();
-      assertEquals(i + (numClusterStatusToAdd - 15), rl.getStores());
-      i ++;
-    }
-  }
-
-  /**
-   * Test the load balancing algorithm.
-   *
-   * Invariant is that all servers should be hosting either floor(average) or
-   * ceiling(average)
-   *
-   * @throws Exception
-   */
-  @Test
-  public void testBalanceCluster() throws Exception {
-
-    for (int[] mockCluster : clusterStateMocks) {
-      Map<ServerName, List<HRegionInfo>> servers = 
mockClusterServers(mockCluster);
-      List<ServerAndLoad> list = convertToList(servers);
-      LOG.info("Mock Cluster : " + printMock(list) + " " + printStats(list));
-      List<RegionPlan> plans = loadBalancer.balanceCluster(servers);
-      List<ServerAndLoad> balancedCluster = reconcile(list, plans, servers);
-      LOG.info("Mock Balance : " + printMock(balancedCluster));
-      assertClusterAsBalanced(balancedCluster);
-      List<RegionPlan> secondPlans =  loadBalancer.balanceCluster(servers);
-      assertNull(secondPlans);
-      for (Map.Entry<ServerName, List<HRegionInfo>> entry : 
servers.entrySet()) {
-        returnRegions(entry.getValue());
-        returnServer(entry.getKey());
-      }
-    }
-
-  }
-
-  @Test
-  public void testMoveCost() throws Exception {
-    Configuration conf = HBaseConfiguration.create();
-    StochasticLoadBalancer.CostFunction
-        costFunction = new StochasticLoadBalancer.MoveCostFunction(conf);
-    for (int[] mockCluster : clusterStateMocks) {
-      BaseLoadBalancer.Cluster cluster = mockCluster(mockCluster);
-      costFunction.init(cluster);
-      double cost = costFunction.cost();
-      assertEquals(0.0f, cost, 0.001);
-
-      // cluster region number is smaller than maxMoves=600
-      cluster.setNumRegions(200);
-      cluster.setNumMovedRegions(10);
-      cost = costFunction.cost();
-      assertEquals(0.05f, cost, 0.001);
-      cluster.setNumMovedRegions(100);
-      cost = costFunction.cost();
-      assertEquals(0.5f, cost, 0.001);
-      cluster.setNumMovedRegions(200);
-      cost = costFunction.cost();
-      assertEquals(1.0f, cost, 0.001);
-
-
-      // cluster region number is bigger than maxMoves=2500
-      cluster.setNumRegions(10000);
-      cluster.setNumMovedRegions(250);
-      cost = costFunction.cost();
-      assertEquals(0.1f, cost, 0.001);
-      cluster.setNumMovedRegions(1250);
-      cost = costFunction.cost();
-      assertEquals(0.5f, cost, 0.001);
-      cluster.setNumMovedRegions(2500);
-      cost = costFunction.cost();
-      assertEquals(1.0f, cost, 0.01);
-    }
-  }
-
-  @Test
-  public void testSkewCost() {
-    Configuration conf = HBaseConfiguration.create();
-    StochasticLoadBalancer.CostFunction
-        costFunction = new 
StochasticLoadBalancer.RegionCountSkewCostFunction(conf);
-    for (int[] mockCluster : clusterStateMocks) {
-      costFunction.init(mockCluster(mockCluster));
-      double cost = costFunction.cost();
-      assertTrue(cost >= 0);
-      assertTrue(cost <= 1.01);
-    }
-
-    costFunction.init(mockCluster(new int[]{0, 0, 0, 0, 1}));
-    assertEquals(0,costFunction.cost(), 0.01);
-    costFunction.init(mockCluster(new int[]{0, 0, 0, 1, 1}));
-    assertEquals(0, costFunction.cost(), 0.01);
-    costFunction.init(mockCluster(new int[]{0, 0, 1, 1, 1}));
-    assertEquals(0, costFunction.cost(), 0.01);
-    costFunction.init(mockCluster(new int[]{0, 1, 1, 1, 1}));
-    assertEquals(0, costFunction.cost(), 0.01);
-    costFunction.init(mockCluster(new int[]{1, 1, 1, 1, 1}));
-    assertEquals(0, costFunction.cost(), 0.01);
-    costFunction.init(mockCluster(new int[]{10000, 0, 0, 0, 0}));
-    assertEquals(1, costFunction.cost(), 0.01);
-  }
-
-  @Test
-  public void testTableSkewCost() {
-    Configuration conf = HBaseConfiguration.create();
-    StochasticLoadBalancer.CostFunction
-        costFunction = new StochasticLoadBalancer.TableSkewCostFunction(conf);
-    for (int[] mockCluster : clusterStateMocks) {
-      BaseLoadBalancer.Cluster cluster = mockCluster(mockCluster);
-      costFunction.init(cluster);
-      double cost = costFunction.cost();
-      assertTrue(cost >= 0);
-      assertTrue(cost <= 1.01);
-    }
-  }
-
-  @Test
-  public void testCostFromArray() {
-    Configuration conf = HBaseConfiguration.create();
-    StochasticLoadBalancer.CostFromRegionLoadFunction
-        costFunction = new 
StochasticLoadBalancer.MemstoreSizeCostFunction(conf);
-    costFunction.init(mockCluster(new int[]{0, 0, 0, 0, 1}));
-
-    double[] statOne = new double[100];
-    for (int i =0; i < 100; i++) {
-      statOne[i] = 10;
-    }
-    assertEquals(0, costFunction.costFromArray(statOne), 0.01);
-
-    double[] statTwo= new double[101];
-    for (int i =0; i < 100; i++) {
-      statTwo[i] = 0;
-    }
-    statTwo[100] = 100;
-    assertEquals(1, costFunction.costFromArray(statTwo), 0.01);
-
-    double[] statThree = new double[200];
-    for (int i =0; i < 100; i++) {
-      statThree[i] = (0);
-      statThree[i+100] = 100;
-    }
-    assertEquals(0.5, costFunction.costFromArray(statThree), 0.01);
-  }
-
-  @Test(timeout =  60000)
-  public void testLosingRs() throws Exception {
-    int numNodes = 3;
-    int numRegions = 20;
-    int numRegionsPerServer = 3; //all servers except one
-    int replication = 1;
-    int numTables = 2;
-
-    Map<ServerName, List<HRegionInfo>> serverMap =
-        createServerMap(numNodes, numRegions, numRegionsPerServer, 
replication, numTables);
-    List<ServerAndLoad> list = convertToList(serverMap);
-
-
-    List<RegionPlan> plans = loadBalancer.balanceCluster(serverMap);
-    assertNotNull(plans);
-
-    // Apply the plan to the mock cluster.
-    List<ServerAndLoad> balancedCluster = reconcile(list, plans, serverMap);
-
-    assertClusterAsBalanced(balancedCluster);
-
-    ServerName sn = serverMap.keySet().toArray(new 
ServerName[serverMap.size()])[0];
-
-    ServerName deadSn = ServerName.valueOf(sn.getHostname(), sn.getPort(), 
sn.getStartcode() - 100);
-
-    serverMap.put(deadSn, new ArrayList<HRegionInfo>(0));
-
-    plans = loadBalancer.balanceCluster(serverMap);
-    assertNull(plans);
-  }
-
-  @Test
-  public void testReplicaCost() {
-    Configuration conf = HBaseConfiguration.create();
-    StochasticLoadBalancer.CostFunction
-        costFunction = new 
StochasticLoadBalancer.RegionReplicaHostCostFunction(conf);
-    for (int[] mockCluster : clusterStateMocks) {
-      BaseLoadBalancer.Cluster cluster = mockCluster(mockCluster);
-      costFunction.init(cluster);
-      double cost = costFunction.cost();
-      assertTrue(cost >= 0);
-      assertTrue(cost <= 1.01);
-    }
-  }
-
-  @Test
-  public void testReplicaCostForReplicas() {
-    Configuration conf = HBaseConfiguration.create();
-    StochasticLoadBalancer.CostFunction
-        costFunction = new 
StochasticLoadBalancer.RegionReplicaHostCostFunction(conf);
-
-    int [] servers = new int[] {3,3,3,3,3};
-    TreeMap<ServerName, List<HRegionInfo>> clusterState = 
mockClusterServers(servers);
-
-    BaseLoadBalancer.Cluster cluster;
-
-    cluster = new BaseLoadBalancer.Cluster(clusterState, null, null, null);
-    costFunction.init(cluster);
-    double costWithoutReplicas = costFunction.cost();
-    assertEquals(0, costWithoutReplicas, 0);
-
-    // replicate the region from first server to the last server
-    HRegionInfo replica1 = RegionReplicaUtil.getRegionInfoForReplica(
-      clusterState.firstEntry().getValue().get(0),1);
-    clusterState.lastEntry().getValue().add(replica1);
-
-    cluster = new BaseLoadBalancer.Cluster(clusterState, null, null, null);
-    costFunction.init(cluster);
-    double costWith1ReplicaDifferentServer = costFunction.cost();
-
-    assertEquals(0, costWith1ReplicaDifferentServer, 0);
-
-    // add a third replica to the last server
-    HRegionInfo replica2 = RegionReplicaUtil.getRegionInfoForReplica(replica1, 
2);
-    clusterState.lastEntry().getValue().add(replica2);
-
-    cluster = new BaseLoadBalancer.Cluster(clusterState, null, null, null);
-    costFunction.init(cluster);
-    double costWith1ReplicaSameServer = costFunction.cost();
-
-    assertTrue(costWith1ReplicaDifferentServer < costWith1ReplicaSameServer);
-
-    // test with replication = 4 for following:
-
-    HRegionInfo replica3;
-    Iterator<Entry<ServerName, List<HRegionInfo>>> it;
-    Entry<ServerName, List<HRegionInfo>> entry;
-
-    clusterState = mockClusterServers(servers);
-    it = clusterState.entrySet().iterator();
-    entry = it.next(); //first server
-    HRegionInfo hri = entry.getValue().get(0);
-    replica1 = RegionReplicaUtil.getRegionInfoForReplica(hri, 1);
-    replica2 = RegionReplicaUtil.getRegionInfoForReplica(hri, 2);
-    replica3 = RegionReplicaUtil.getRegionInfoForReplica(hri, 3);
-    entry.getValue().add(replica1);
-    entry.getValue().add(replica2);
-    it.next().getValue().add(replica3); //2nd server
-
-    cluster = new BaseLoadBalancer.Cluster(clusterState, null, null, null);
-    costFunction.init(cluster);
-    double costWith3ReplicasSameServer = costFunction.cost();
-
-    clusterState = mockClusterServers(servers);
-    hri = clusterState.firstEntry().getValue().get(0);
-    replica1 = RegionReplicaUtil.getRegionInfoForReplica(hri, 1);
-    replica2 = RegionReplicaUtil.getRegionInfoForReplica(hri, 2);
-    replica3 = RegionReplicaUtil.getRegionInfoForReplica(hri, 3);
-
-    clusterState.firstEntry().getValue().add(replica1);
-    clusterState.lastEntry().getValue().add(replica2);
-    clusterState.lastEntry().getValue().add(replica3);
-
-    cluster = new BaseLoadBalancer.Cluster(clusterState, null, null, null);
-    costFunction.init(cluster);
-    double costWith2ReplicasOnTwoServers = costFunction.cost();
-
-    assertTrue(costWith2ReplicasOnTwoServers < costWith3ReplicasSameServer);
-  }
-
-  @Test
-  public void testNeedsBalanceForColocatedReplicas() {
-    // check for the case where there are two hosts and with one rack, and 
where
-    // both the replicas are hosted on the same server
-    List<HRegionInfo> regions = randomRegions(1);
-    ServerName s1 = ServerName.valueOf("host1", 1000, 11111);
-    ServerName s2 = ServerName.valueOf("host11", 1000, 11111);
-    Map<ServerName, List<HRegionInfo>> map = new HashMap<ServerName, 
List<HRegionInfo>>();
-    map.put(s1, regions);
-    regions.add(RegionReplicaUtil.getRegionInfoForReplica(regions.get(0), 1));
-    // until the step above s1 holds two replicas of a region
-    regions = randomRegions(1);
-    map.put(s2, regions);
-    assertTrue(loadBalancer.needsBalance(new Cluster(map, null, null, null)));
-    // check for the case where there are two hosts on the same rack and there 
are two racks
-    // and both the replicas are on the same rack
-    map.clear();
-    regions = randomRegions(1);
-    List<HRegionInfo> regionsOnS2 = new ArrayList<HRegionInfo>(1);
-    regionsOnS2.add(RegionReplicaUtil.getRegionInfoForReplica(regions.get(0), 
1));
-    map.put(s1, regions);
-    map.put(s2, regionsOnS2);
-    // add another server so that the cluster has some host on another rack
-    map.put(ServerName.valueOf("host2", 1000, 11111), randomRegions(1));
-    assertTrue(loadBalancer.needsBalance(new Cluster(map, null, null,
-        new ForTestRackManagerOne())));
-  }
-
-  @Test (timeout = 60000)
-  public void testSmallCluster() {
-    int numNodes = 10;
-    int numRegions = 1000;
-    int numRegionsPerServer = 40; //all servers except one
-    int replication = 1;
-    int numTables = 10;
-    testWithCluster(numNodes, numRegions, numRegionsPerServer, replication, 
numTables, true, true);
-  }
-
-  @Test (timeout = 60000)
-  public void testSmallCluster2() {
-    int numNodes = 20;
-    int numRegions = 2000;
-    int numRegionsPerServer = 40; //all servers except one
-    int replication = 1;
-    int numTables = 10;
-    testWithCluster(numNodes, numRegions, numRegionsPerServer, replication, 
numTables, true, true);
-  }
-
-  @Test (timeout = 60000)
-  public void testSmallCluster3() {
-    int numNodes = 20;
-    int numRegions = 2000;
-    int numRegionsPerServer = 1; // all servers except one
-    int replication = 1;
-    int numTables = 10;
-    /* fails because of max moves */
-    testWithCluster(numNodes, numRegions, numRegionsPerServer, replication, 
numTables, false, false);
-  }
-
-  @Test (timeout = 800000)
-  public void testMidCluster() {
-    int numNodes = 100;
-    int numRegions = 10000;
-    int numRegionsPerServer = 60; // all servers except one
-    int replication = 1;
-    int numTables = 40;
-    testWithCluster(numNodes, numRegions, numRegionsPerServer, replication, 
numTables, true, true);
-  }
-
-  @Test (timeout = 800000)
-  public void testMidCluster2() {
-    int numNodes = 200;
-    int numRegions = 100000;
-    int numRegionsPerServer = 40; // all servers except one
-    int replication = 1;
-    int numTables = 400;
-    testWithCluster(numNodes,
-        numRegions,
-        numRegionsPerServer,
-        replication,
-        numTables,
-        false, /* num large num regions means may not always get to best 
balance with one run */
-        false);
-  }
-
-
-  @Test (timeout = 800000)
-  public void testMidCluster3() {
-    int numNodes = 100;
-    int numRegions = 2000;
-    int numRegionsPerServer = 9; // all servers except one
-    int replication = 1;
-    int numTables = 110;
-    testWithCluster(numNodes, numRegions, numRegionsPerServer, replication, 
numTables, true, true);
-    // TODO(eclark): Make sure that the tables are well distributed.
-  }
-
-  @Test
-  public void testLargeCluster() {
-    int numNodes = 1000;
-    int numRegions = 100000; //100 regions per RS
-    int numRegionsPerServer = 80; //all servers except one
-    int numTables = 100;
-    int replication = 1;
-    testWithCluster(numNodes, numRegions, numRegionsPerServer, replication, 
numTables, true, true);
-  }
-
-  @Test (timeout = 800000)
-  public void testRegionReplicasOnSmallCluster() {
-    int numNodes = 10;
-    int numRegions = 1000;
-    int replication = 3; // 3 replicas per region
-    int numRegionsPerServer = 80; //all regions are mostly balanced
-    int numTables = 10;
-    testWithCluster(numNodes, numRegions, numRegionsPerServer, replication, 
numTables, true, true);
-  }
-
-  @Test (timeout = 800000)
-  public void testRegionReplicationOnMidClusterSameHosts() {
-    conf.setLong(StochasticLoadBalancer.MAX_STEPS_KEY, 2000000L);
-    conf.setLong("hbase.master.balancer.stochastic.maxRunningTime", 90 * 
1000); // 90 sec
-    conf.setFloat("hbase.master.balancer.stochastic.maxMovePercent", 1.0f);
-    loadBalancer.setConf(conf);
-    int numHosts = 100;
-    int numRegions = 100 * 100;
-    int replication = 3; // 3 replicas per region
-    int numRegionsPerServer = 5;
-    int numTables = 10;
-    Map<ServerName, List<HRegionInfo>> serverMap =
-        createServerMap(numHosts, numRegions, numRegionsPerServer, 
replication, numTables);
-    int numNodesPerHost = 4;
-
-    // create a new map with 4 RS per host.
-    Map<ServerName, List<HRegionInfo>> newServerMap = new TreeMap<ServerName, 
List<HRegionInfo>>(serverMap);
-    for (Map.Entry<ServerName, List<HRegionInfo>> entry : 
serverMap.entrySet()) {
-      for (int i=1; i < numNodesPerHost; i++) {
-        ServerName s1 = entry.getKey();
-        ServerName s2 = ServerName.valueOf(s1.getHostname(), s1.getPort() + i, 
1); // create an RS for the same host
-        newServerMap.put(s2, new ArrayList<HRegionInfo>());
-      }
-    }
-
-    testWithCluster(newServerMap, null, true, true);
-  }
-
-  private static class ForTestRackManager extends RackManager {
-    int numRacks;
-    public ForTestRackManager(int numRacks) {
-      this.numRacks = numRacks;
-    }
-    @Override
-    public String getRack(ServerName server) {
-      return "rack_" + (server.hashCode() % numRacks);
-    }
-  }
-
-  private static class ForTestRackManagerOne extends RackManager {
-  @Override
-    public String getRack(ServerName server) {
-      return server.getHostname().endsWith("1") ? "rack1" : "rack2";
-    }
-  }
-
-  @Test (timeout = 800000)
-  public void testRegionReplicationOnMidClusterWithRacks() {
-    conf.setLong(StochasticLoadBalancer.MAX_STEPS_KEY, 10000000L);
-    conf.setFloat("hbase.master.balancer.stochastic.maxMovePercent", 1.0f);
-    conf.setLong("hbase.master.balancer.stochastic.maxRunningTime", 120 * 
1000); // 120 sec
-    loadBalancer.setConf(conf);
-    int numNodes = 30;
-    int numRegions = numNodes * 30;
-    int replication = 3; // 3 replicas per region
-    int numRegionsPerServer = 28;
-    int numTables = 10;
-    int numRacks = 4; // all replicas should be on a different rack
-    Map<ServerName, List<HRegionInfo>> serverMap =
-        createServerMap(numNodes, numRegions, numRegionsPerServer, 
replication, numTables);
-    RackManager rm = new ForTestRackManager(numRacks);
-
-    testWithCluster(serverMap, rm, false, true);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/93023f54/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer2.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer2.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer2.java
deleted file mode 100644
index 5008ac5..0000000
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer2.java
+++ /dev/null
@@ -1,90 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.master.balancer;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.testclassification.FlakeyTests;
-import org.apache.hadoop.hbase.testclassification.MediumTests;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-@Category({FlakeyTests.class, MediumTests.class})
-public class TestStochasticLoadBalancer2 extends BalancerTestBase {
-  private static final Log LOG = 
LogFactory.getLog(TestStochasticLoadBalancer2.class);
-
-  @Test (timeout = 800000)
-  public void testRegionReplicasOnMidCluster() {
-    conf.setFloat("hbase.master.balancer.stochastic.maxMovePercent", 1.0f);
-    conf.setLong(StochasticLoadBalancer.MAX_STEPS_KEY, 2000000L);
-    conf.setLong("hbase.master.balancer.stochastic.maxRunningTime", 90 * 
1000); // 90 sec
-    conf.setFloat("hbase.master.balancer.stochastic.localityCost", 0);
-    TestStochasticLoadBalancer.loadBalancer.setConf(conf);
-    int numNodes = 200;
-    int numRegions = 40 * 200;
-    int replication = 3; // 3 replicas per region
-    int numRegionsPerServer = 30; //all regions are mostly balanced
-    int numTables = 10;
-    testWithCluster(numNodes, numRegions, numRegionsPerServer, replication, 
numTables, true, true);
-  }
-
-  @Test (timeout = 800000)
-  public void testRegionReplicasOnLargeCluster() {
-    conf.setFloat("hbase.master.balancer.stochastic.maxMovePercent", 1.0f);
-    conf.setLong(StochasticLoadBalancer.MAX_STEPS_KEY, 2000000L);
-    conf.setLong("hbase.master.balancer.stochastic.maxRunningTime", 90 * 
1000); // 90 sec
-    conf.setFloat("hbase.master.balancer.stochastic.localityCost", 0);
-    loadBalancer.setConf(conf);
-    int numNodes = 1000;
-    int numRegions = 20 * numNodes; // 20 * replication regions per RS
-    int numRegionsPerServer = 19; // all servers except one
-    int numTables = 100;
-    int replication = 3;
-    testWithCluster(numNodes, numRegions, numRegionsPerServer, replication, 
numTables, true, true);
-  }
-
-  @Test (timeout = 800000)
-  public void testRegionReplicasOnMidClusterHighReplication() {
-    conf.setLong(StochasticLoadBalancer.MAX_STEPS_KEY, 4000000L);
-    conf.setLong("hbase.master.balancer.stochastic.maxRunningTime", 120 * 
1000); // 120 sec
-    conf.setFloat("hbase.master.balancer.stochastic.localityCost", 0);
-    conf.setFloat("hbase.master.balancer.stochastic.maxMovePercent", 1.0f);
-    loadBalancer.setConf(conf);
-    int numNodes = 80;
-    int numRegions = 6 * numNodes;
-    int replication = 80; // 80 replicas per region, one for each server
-    int numRegionsPerServer = 5;
-    int numTables = 10;
-    testWithCluster(numNodes, numRegions, numRegionsPerServer, replication, 
numTables, false, true);
-  }
-
-  @Test (timeout = 800000)
-  public void 
testRegionReplicationOnMidClusterReplicationGreaterThanNumNodes() {
-    conf.setLong(StochasticLoadBalancer.MAX_STEPS_KEY, 2000000L);
-    conf.setLong("hbase.master.balancer.stochastic.maxRunningTime", 120 * 
1000); // 120 sec
-    conf.setFloat("hbase.master.balancer.stochastic.localityCost", 0);
-    conf.setFloat("hbase.master.balancer.stochastic.maxMovePercent", 1.0f);
-    loadBalancer.setConf(conf);
-    int numNodes = 40;
-    int numRegions = 6 * 50;
-    int replication = 50; // 50 replicas per region, more than numNodes
-    int numRegionsPerServer = 6;
-    int numTables = 10;
-    testWithCluster(numNodes, numRegions, numRegionsPerServer, replication, 
numTables, true, false);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/93023f54/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java
deleted file mode 100644
index c8d3a62..0000000
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java
+++ /dev/null
@@ -1,514 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.master.procedure;
-
-import java.io.IOException;
-import java.util.concurrent.CountDownLatch;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.MiniHBaseCluster;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.master.HMaster;
-import org.apache.hadoop.hbase.procedure2.Procedure;
-import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
-import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
-import 
org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility.TestProcedure;
-import org.apache.hadoop.hbase.procedure2.store.ProcedureStore;
-import org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore;
-import 
org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableState;
-import 
org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableState;
-import 
org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableState;
-import 
org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableState;
-import 
org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableState;
-import org.apache.hadoop.hbase.testclassification.MasterTests;
-import org.apache.hadoop.hbase.testclassification.LargeTests;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.hadoop.hbase.util.ModifyRegionUtils;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-import org.mockito.Mockito;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-@Category({MasterTests.class, LargeTests.class})
-public class TestMasterFailoverWithProcedures {
-  private static final Log LOG = 
LogFactory.getLog(TestMasterFailoverWithProcedures.class);
-
-  protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
-
-  private static void setupConf(Configuration conf) {
-    // don't waste time retrying with the roll, the test is already slow 
enough.
-    conf.setInt("hbase.procedure.store.wal.max.retries.before.roll", 1);
-    conf.setInt("hbase.procedure.store.wal.wait.before.roll", 0);
-    conf.setInt("hbase.procedure.store.wal.max.roll.retries", 1);
-    conf.setInt("hbase.procedure.store.wal.sync.failure.roll.max", 1);
-  }
-
-  @Before
-  public void setup() throws Exception {
-    setupConf(UTIL.getConfiguration());
-    UTIL.startMiniCluster(2, 1);
-
-    final ProcedureExecutor<MasterProcedureEnv> procExec = 
getMasterProcedureExecutor();
-    ProcedureTestingUtility.setToggleKillBeforeStoreUpdate(procExec, false);
-    ProcedureTestingUtility.setKillBeforeStoreUpdate(procExec, false);
-  }
-
-  @After
-  public void tearDown() throws Exception {
-    try {
-      UTIL.shutdownMiniCluster();
-    } catch (Exception e) {
-      LOG.warn("failure shutting down cluster", e);
-    }
-  }
-
-  @Test(timeout=60000)
-  public void testWalRecoverLease() throws Exception {
-    final ProcedureStore masterStore = getMasterProcedureExecutor().getStore();
-    assertTrue("expected WALStore for this test", masterStore instanceof 
WALProcedureStore);
-
-    HMaster firstMaster = UTIL.getHBaseCluster().getMaster();
-    // Abort Latch for the master store
-    final CountDownLatch masterStoreAbort = new CountDownLatch(1);
-    masterStore.registerListener(new ProcedureStore.ProcedureStoreListener() {
-      @Override
-      public void postSync() {}
-
-      @Override
-      public void abortProcess() {
-        LOG.debug("Abort store of Master");
-        masterStoreAbort.countDown();
-      }
-    });
-
-    // startup a fake master the new WAL store will take the lease
-    // and the active master should abort.
-    HMaster backupMaster3 = Mockito.mock(HMaster.class);
-    
Mockito.doReturn(firstMaster.getConfiguration()).when(backupMaster3).getConfiguration();
-    Mockito.doReturn(true).when(backupMaster3).isActiveMaster();
-    final WALProcedureStore backupStore3 = new 
WALProcedureStore(firstMaster.getConfiguration(),
-        firstMaster.getMasterFileSystem().getFileSystem(),
-        ((WALProcedureStore)masterStore).getLogDir(),
-        new MasterProcedureEnv.WALStoreLeaseRecovery(backupMaster3));
-    // Abort Latch for the test store
-    final CountDownLatch backupStore3Abort = new CountDownLatch(1);
-    backupStore3.registerListener(new ProcedureStore.ProcedureStoreListener() {
-      @Override
-      public void postSync() {}
-
-      @Override
-      public void abortProcess() {
-        LOG.debug("Abort store of backupMaster3");
-        backupStore3Abort.countDown();
-        backupStore3.stop(true);
-      }
-    });
-    backupStore3.start(1);
-    backupStore3.recoverLease();
-
-    // Try to trigger a command on the master (WAL lease expired on the active 
one)
-    HTableDescriptor htd = 
MasterProcedureTestingUtility.createHTD(TableName.valueOf("mtb"), "f");
-    HRegionInfo[] regions = ModifyRegionUtils.createHRegionInfos(htd, null);
-    LOG.debug("submit proc");
-    try {
-      getMasterProcedureExecutor().submitProcedure(
-        new 
CreateTableProcedure(getMasterProcedureExecutor().getEnvironment(), htd, 
regions));
-      fail("expected RuntimeException 'sync aborted'");
-    } catch (RuntimeException e) {
-      LOG.info("got " + e.getMessage());
-    }
-    LOG.debug("wait master store abort");
-    masterStoreAbort.await();
-
-    // Now the real backup master should start up
-    LOG.debug("wait backup master to startup");
-    waitBackupMaster(UTIL, firstMaster);
-    assertEquals(true, firstMaster.isStopped());
-
-    // wait the store in here to abort (the test will fail due to timeout if 
it doesn't)
-    LOG.debug("wait the store to abort");
-    backupStore3.getStoreTracker().setDeleted(1, false);
-    try {
-      backupStore3.delete(1);
-      fail("expected RuntimeException 'sync aborted'");
-    } catch (RuntimeException e) {
-      LOG.info("got " + e.getMessage());
-    }
-    backupStore3Abort.await();
-  }
-
-  /**
-   * Tests proper fencing in case the current WAL store is fenced
-   */
-  @Test
-  public void testWALfencingWithoutWALRolling() throws IOException {
-    testWALfencing(false);
-  }
-
-  /**
-   * Tests proper fencing in case the current WAL store does not receive 
writes until after the
-   * new WAL does a couple of WAL rolls.
-   */
-  @Test
-  public void testWALfencingWithWALRolling() throws IOException {
-    testWALfencing(true);
-  }
-
-  public void testWALfencing(boolean walRolls) throws IOException {
-    final ProcedureStore procStore = getMasterProcedureExecutor().getStore();
-    assertTrue("expected WALStore for this test", procStore instanceof 
WALProcedureStore);
-
-    HMaster firstMaster = UTIL.getHBaseCluster().getMaster();
-
-    // cause WAL rolling after a delete in WAL:
-    
firstMaster.getConfiguration().setLong("hbase.procedure.store.wal.roll.threshold",
 1);
-
-    HMaster backupMaster3 = Mockito.mock(HMaster.class);
-    
Mockito.doReturn(firstMaster.getConfiguration()).when(backupMaster3).getConfiguration();
-    Mockito.doReturn(true).when(backupMaster3).isActiveMaster();
-    final WALProcedureStore procStore2 = new 
WALProcedureStore(firstMaster.getConfiguration(),
-        firstMaster.getMasterFileSystem().getFileSystem(),
-        ((WALProcedureStore)procStore).getLogDir(),
-        new MasterProcedureEnv.WALStoreLeaseRecovery(backupMaster3));
-
-    // start a second store which should fence the first one out
-    LOG.info("Starting new WALProcedureStore");
-    procStore2.start(1);
-    procStore2.recoverLease();
-
-    // before writing back to the WAL store, optionally do a couple of WAL 
rolls (which causes
-    // to delete the old WAL files).
-    if (walRolls) {
-      LOG.info("Inserting into second WALProcedureStore, causing WAL rolls");
-      for (int i = 0; i < 512; i++) {
-        // insert something to the second store then delete it, causing a WAL 
roll(s)
-        Procedure proc2 = new TestProcedure(i);
-        procStore2.insert(proc2, null);
-        procStore2.delete(proc2.getProcId()); // delete the procedure so that 
the WAL is removed later
-      }
-    }
-
-    // Now, insert something to the first store, should fail.
-    // If the store does a WAL roll and continue with another logId without 
checking higher logIds
-    // it will incorrectly succeed.
-    LOG.info("Inserting into first WALProcedureStore");
-    try {
-      procStore.insert(new TestProcedure(11), null);
-      fail("Inserting into Procedure Store should have failed");
-    } catch (Exception ex) {
-      LOG.info("Received expected exception", ex);
-    }
-  }
-
-  // ==========================================================================
-  //  Test Create Table
-  // ==========================================================================
-  @Test(timeout=60000)
-  public void testCreateWithFailover() throws Exception {
-    // TODO: Should we try every step? (master failover takes long time)
-    // It is already covered by TestCreateTableProcedure
-    // but without the master restart, only the executor/store is restarted.
-    // Without Master restart we may not find bug in the procedure code
-    // like missing "wait" for resources to be available (e.g. RS)
-    
testCreateWithFailoverAtStep(CreateTableState.CREATE_TABLE_ASSIGN_REGIONS.ordinal());
-  }
-
-  private void testCreateWithFailoverAtStep(final int step) throws Exception {
-    final TableName tableName = 
TableName.valueOf("testCreateWithFailoverAtStep" + step);
-
-    // create the table
-    ProcedureExecutor<MasterProcedureEnv> procExec = 
getMasterProcedureExecutor();
-    ProcedureTestingUtility.setKillBeforeStoreUpdate(procExec, true);
-    ProcedureTestingUtility.setToggleKillBeforeStoreUpdate(procExec, true);
-
-    // Start the Create procedure && kill the executor
-    byte[][] splitKeys = null;
-    HTableDescriptor htd = MasterProcedureTestingUtility.createHTD(tableName, 
"f1", "f2");
-    HRegionInfo[] regions = ModifyRegionUtils.createHRegionInfos(htd, 
splitKeys);
-    long procId = procExec.submitProcedure(
-      new CreateTableProcedure(procExec.getEnvironment(), htd, regions));
-    testRecoveryAndDoubleExecution(UTIL, procId, step, 
CreateTableState.values());
-
-    MasterProcedureTestingUtility.validateTableCreation(
-      UTIL.getHBaseCluster().getMaster(), tableName, regions, "f1", "f2");
-  }
-
-  // ==========================================================================
-  //  Test Delete Table
-  // ==========================================================================
-  @Test(timeout=60000)
-  public void testDeleteWithFailover() throws Exception {
-    // TODO: Should we try every step? (master failover takes long time)
-    // It is already covered by TestDeleteTableProcedure
-    // but without the master restart, only the executor/store is restarted.
-    // Without Master restart we may not find bug in the procedure code
-    // like missing "wait" for resources to be available (e.g. RS)
-    
testDeleteWithFailoverAtStep(DeleteTableState.DELETE_TABLE_UNASSIGN_REGIONS.ordinal());
-  }
-
-  private void testDeleteWithFailoverAtStep(final int step) throws Exception {
-    final TableName tableName = 
TableName.valueOf("testDeleteWithFailoverAtStep" + step);
-
-    // create the table
-    byte[][] splitKeys = null;
-    HRegionInfo[] regions = MasterProcedureTestingUtility.createTable(
-      getMasterProcedureExecutor(), tableName, splitKeys, "f1", "f2");
-    Path tableDir = FSUtils.getTableDir(getRootDir(), tableName);
-    MasterProcedureTestingUtility.validateTableCreation(
-      UTIL.getHBaseCluster().getMaster(), tableName, regions, "f1", "f2");
-    UTIL.getHBaseAdmin().disableTable(tableName);
-
-    ProcedureExecutor<MasterProcedureEnv> procExec = 
getMasterProcedureExecutor();
-    ProcedureTestingUtility.setKillBeforeStoreUpdate(procExec, true);
-    ProcedureTestingUtility.setToggleKillBeforeStoreUpdate(procExec, true);
-
-    // Start the Delete procedure && kill the executor
-    long procId = procExec.submitProcedure(
-      new DeleteTableProcedure(procExec.getEnvironment(), tableName));
-    testRecoveryAndDoubleExecution(UTIL, procId, step, 
DeleteTableState.values());
-
-    MasterProcedureTestingUtility.validateTableDeletion(
-      UTIL.getHBaseCluster().getMaster(), tableName, regions, "f1", "f2");
-  }
-
-  // ==========================================================================
-  //  Test Truncate Table
-  // ==========================================================================
-  @Test(timeout=90000)
-  public void testTruncateWithFailover() throws Exception {
-    // TODO: Should we try every step? (master failover takes long time)
-    // It is already covered by TestTruncateTableProcedure
-    // but without the master restart, only the executor/store is restarted.
-    // Without Master restart we may not find bug in the procedure code
-    // like missing "wait" for resources to be available (e.g. RS)
-    testTruncateWithFailoverAtStep(true, 
TruncateTableState.TRUNCATE_TABLE_ADD_TO_META.ordinal());
-  }
-
-  private void testTruncateWithFailoverAtStep(final boolean preserveSplits, 
final int step)
-      throws Exception {
-    final TableName tableName = 
TableName.valueOf("testTruncateWithFailoverAtStep" + step);
-
-    // create the table
-    final String[] families = new String[] { "f1", "f2" };
-    final byte[][] splitKeys = new byte[][] {
-      Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c")
-    };
-    HRegionInfo[] regions = MasterProcedureTestingUtility.createTable(
-      getMasterProcedureExecutor(), tableName, splitKeys, families);
-    // load and verify that there are rows in the table
-    MasterProcedureTestingUtility.loadData(
-      UTIL.getConnection(), tableName, 100, splitKeys, families);
-    assertEquals(100, UTIL.countRows(tableName));
-    // disable the table
-    UTIL.getHBaseAdmin().disableTable(tableName);
-
-    ProcedureExecutor<MasterProcedureEnv> procExec = 
getMasterProcedureExecutor();
-    ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
-
-    // Start the Truncate procedure && kill the executor
-    long procId = procExec.submitProcedure(
-      new TruncateTableProcedure(procExec.getEnvironment(), tableName, 
preserveSplits));
-    testRecoveryAndDoubleExecution(UTIL, procId, step, 
TruncateTableState.values());
-
-    ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, false);
-    UTIL.waitUntilAllRegionsAssigned(tableName);
-
-    // validate the table regions and layout
-    if (preserveSplits) {
-      assertEquals(1 + splitKeys.length, 
UTIL.getHBaseAdmin().getTableRegions(tableName).size());
-    } else {
-      regions = UTIL.getHBaseAdmin().getTableRegions(tableName).toArray(new 
HRegionInfo[1]);
-      assertEquals(1, regions.length);
-    }
-    MasterProcedureTestingUtility.validateTableCreation(
-      UTIL.getHBaseCluster().getMaster(), tableName, regions, families);
-
-    // verify that there are no rows in the table
-    assertEquals(0, UTIL.countRows(tableName));
-
-    // verify that the table is read/writable
-    MasterProcedureTestingUtility.loadData(
-      UTIL.getConnection(), tableName, 50, splitKeys, families);
-    assertEquals(50, UTIL.countRows(tableName));
-  }
-
-  // ==========================================================================
-  //  Test Disable Table
-  // ==========================================================================
-  @Test(timeout=60000)
-  public void testDisableTableWithFailover() throws Exception {
-    // TODO: Should we try every step? (master failover takes long time)
-    // It is already covered by TestDisableTableProcedure
-    // but without the master restart, only the executor/store is restarted.
-    // Without Master restart we may not find bug in the procedure code
-    // like missing "wait" for resources to be available (e.g. RS)
-    testDisableTableWithFailoverAtStep(
-      DisableTableState.DISABLE_TABLE_MARK_REGIONS_OFFLINE.ordinal());
-  }
-
-  private void testDisableTableWithFailoverAtStep(final int step) throws 
Exception {
-    final TableName tableName = 
TableName.valueOf("testDisableTableWithFailoverAtStep" + step);
-
-    // create the table
-    final byte[][] splitKeys = new byte[][] {
-      Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c")
-    };
-    MasterProcedureTestingUtility.createTable(
-      getMasterProcedureExecutor(), tableName, splitKeys, "f1", "f2");
-
-    ProcedureExecutor<MasterProcedureEnv> procExec = 
getMasterProcedureExecutor();
-    ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
-
-    // Start the Delete procedure && kill the executor
-    long procId = procExec.submitProcedure(
-      new DisableTableProcedure(procExec.getEnvironment(), tableName, false));
-    testRecoveryAndDoubleExecution(UTIL, procId, step, 
DisableTableState.values());
-
-    MasterProcedureTestingUtility.validateTableIsDisabled(
-      UTIL.getHBaseCluster().getMaster(), tableName);
-  }
-
-  // ==========================================================================
-  //  Test Enable Table
-  // ==========================================================================
-  @Test(timeout=60000)
-  public void testEnableTableWithFailover() throws Exception {
-    // TODO: Should we try every step? (master failover takes long time)
-    // It is already covered by TestEnableTableProcedure
-    // but without the master restart, only the executor/store is restarted.
-    // Without Master restart we may not find bug in the procedure code
-    // like missing "wait" for resources to be available (e.g. RS)
-    testEnableTableWithFailoverAtStep(
-      EnableTableState.ENABLE_TABLE_MARK_REGIONS_ONLINE.ordinal());
-  }
-
-  private void testEnableTableWithFailoverAtStep(final int step) throws 
Exception {
-    final TableName tableName = 
TableName.valueOf("testEnableTableWithFailoverAtStep" + step);
-
-    // create the table
-    final byte[][] splitKeys = new byte[][] {
-      Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c")
-    };
-    MasterProcedureTestingUtility.createTable(
-      getMasterProcedureExecutor(), tableName, splitKeys, "f1", "f2");
-    UTIL.getHBaseAdmin().disableTable(tableName);
-
-    ProcedureExecutor<MasterProcedureEnv> procExec = 
getMasterProcedureExecutor();
-    ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
-
-    // Start the Delete procedure && kill the executor
-    long procId = procExec.submitProcedure(
-      new EnableTableProcedure(procExec.getEnvironment(), tableName, false));
-    testRecoveryAndDoubleExecution(UTIL, procId, step, 
EnableTableState.values());
-
-    MasterProcedureTestingUtility.validateTableIsEnabled(
-      UTIL.getHBaseCluster().getMaster(), tableName);
-  }
-
-  // ==========================================================================
-  //  Test Helpers
-  // ==========================================================================
-  public static <TState> void testRecoveryAndDoubleExecution(final 
HBaseTestingUtility testUtil,
-      final long procId, final int lastStepBeforeFailover, TState[] states) 
throws Exception {
-    ProcedureExecutor<MasterProcedureEnv> procExec =
-      testUtil.getHBaseCluster().getMaster().getMasterProcedureExecutor();
-    ProcedureTestingUtility.waitProcedure(procExec, procId);
-
-    for (int i = 0; i < lastStepBeforeFailover; ++i) {
-      LOG.info("Restart "+ i +" exec state: " + states[i]);
-      ProcedureTestingUtility.assertProcNotYetCompleted(procExec, procId);
-      ProcedureTestingUtility.restart(procExec);
-      ProcedureTestingUtility.waitProcedure(procExec, procId);
-    }
-    ProcedureTestingUtility.assertProcNotYetCompleted(procExec, procId);
-
-    LOG.info("Trigger master failover");
-    masterFailover(testUtil);
-
-    procExec = 
testUtil.getHBaseCluster().getMaster().getMasterProcedureExecutor();
-    ProcedureTestingUtility.waitProcedure(procExec, procId);
-    ProcedureTestingUtility.assertProcNotFailed(procExec, procId);
-  }
-
-  // ==========================================================================
-  //  Master failover utils
-  // ==========================================================================
-  public static void masterFailover(final HBaseTestingUtility testUtil)
-      throws Exception {
-    MiniHBaseCluster cluster = testUtil.getMiniHBaseCluster();
-
-    // Kill the master
-    HMaster oldMaster = cluster.getMaster();
-    cluster.killMaster(cluster.getMaster().getServerName());
-
-    // Wait the secondary
-    waitBackupMaster(testUtil, oldMaster);
-  }
-
-  public static void waitBackupMaster(final HBaseTestingUtility testUtil,
-      final HMaster oldMaster) throws Exception {
-    MiniHBaseCluster cluster = testUtil.getMiniHBaseCluster();
-
-    HMaster newMaster = cluster.getMaster();
-    while (newMaster == null || newMaster == oldMaster) {
-      Thread.sleep(250);
-      newMaster = cluster.getMaster();
-    }
-
-    while (!(newMaster.isActiveMaster() && newMaster.isInitialized())) {
-      Thread.sleep(250);
-    }
-  }
-
-  // ==========================================================================
-  //  Helpers
-  // ==========================================================================
-  private MasterProcedureEnv getMasterProcedureEnv() {
-    return getMasterProcedureExecutor().getEnvironment();
-  }
-
-  private ProcedureExecutor<MasterProcedureEnv> getMasterProcedureExecutor() {
-    return UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor();
-  }
-
-  private FileSystem getFileSystem() {
-    return 
UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem();
-  }
-
-  private Path getRootDir() {
-    return 
UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
-  }
-
-  private Path getTempDir() {
-    return 
UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getTempDir();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/93023f54/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestReplicationShell.java
----------------------------------------------------------------------
diff --git 
a/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestReplicationShell.java
 
b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestReplicationShell.java
deleted file mode 100644
index 8379576..0000000
--- 
a/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestReplicationShell.java
+++ /dev/null
@@ -1,37 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.client;
-
-import java.io.IOException;
-
-import org.apache.hadoop.hbase.testclassification.ClientTests;
-import org.apache.hadoop.hbase.testclassification.LargeTests;
-import org.jruby.embed.PathType;
-import org.junit.Test;
-import org.junit.Ignore;
-import org.junit.experimental.categories.Category;
-
-@Category({ ClientTests.class, LargeTests.class })
-public class TestReplicationShell extends AbstractTestShell {
-  @Ignore ("Disabled because hangs on occasion.. about 10% of the time") @Test
-  public void testRunShellTests() throws IOException {
-    System.setProperty("shell.test.include", "replication_admin_test.rb");
-    // Start all ruby tests
-    jruby.runScriptlet(PathType.ABSOLUTE, "src/test/ruby/tests_runner.rb");
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/93023f54/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestShell.java
----------------------------------------------------------------------
diff --git 
a/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestShell.java 
b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestShell.java
deleted file mode 100644
index 976ba45..0000000
--- a/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestShell.java
+++ /dev/null
@@ -1,39 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.client;
-
-import java.io.IOException;
-
-import org.apache.hadoop.hbase.testclassification.ClientTests;
-import org.apache.hadoop.hbase.testclassification.LargeTests;
-import org.jruby.embed.PathType;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-@Category({ ClientTests.class, LargeTests.class })
-public class TestShell extends AbstractTestShell {
-
-  @Test
-  public void testRunShellTests() throws IOException {
-    System.setProperty("shell.test.exclude", "replication_admin_test.rb");
-    // Start all ruby tests
-    jruby.runScriptlet(PathType.ABSOLUTE, "src/test/ruby/tests_runner.rb");
-  }
-
-}

Reply via email to