mbautin updated the revision "[jira] [HBASE-4686] [89-fb] Fix per-store metrics 
aggregation
".
Reviewers: Liyin, JIRA

  Addressing Liyin's comment.

REVISION DETAIL
  https://reviews.facebook.net/D87

AFFECTED FILES
  src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
  src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
  src/main/java/org/apache/hadoop/hbase/regionserver/metrics/SchemaMetrics.java
  src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
  
src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java
  
src/test/java/org/apache/hadoop/hbase/regionserver/metrics/TestSchemaMetrics.java
Index: src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -133,7 +133,7 @@
  * regionName is a unique identifier for this HRegion. (startKey, endKey]
  * defines the keyspace for this HRegion.
  */
-public class HRegion implements HeapSize { // , Writable{
+public class HRegion implements HeapSize {
   public static final Log LOG = LogFactory.getLog(HRegion.class);
   static final String SPLITDIR = "splits";
   static final String MERGEDIR = "merges";
@@ -183,8 +183,6 @@
   final Path regiondir;
   KeyValue.KVComparator comparator;
 
-  private Pair<Long,Long> lastCompactInfo = null;
-
   /*
    * Data structure of write state flags used coordinating flushes,
    * compactions and closes.
@@ -3506,7 +3504,7 @@
 
   public static final long FIXED_OVERHEAD = ClassSize.align(
       (4 * Bytes.SIZEOF_LONG) + ClassSize.ARRAY +
-      (24 * ClassSize.REFERENCE) + ClassSize.OBJECT + Bytes.SIZEOF_INT);
+      (23 * ClassSize.REFERENCE) + ClassSize.OBJECT + Bytes.SIZEOF_INT);
 
   public static final long DEEP_OVERHEAD = ClassSize.align(FIXED_OVERHEAD +
       ClassSize.OBJECT + (2 * ClassSize.ATOMIC_BOOLEAN) +
Index: src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -105,7 +105,6 @@
 import org.apache.hadoop.hbase.regionserver.metrics.RegionServerDynamicMetrics;
 import org.apache.hadoop.hbase.regionserver.metrics.RegionServerMetrics;
 import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics;
-import org.apache.hadoop.hbase.regionserver.metrics.SchemaConfigured;
 import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics.StoreMetricType;
 import org.apache.hadoop.hbase.regionserver.wal.HLog;
 import org.apache.hadoop.hbase.replication.regionserver.Replication;
@@ -126,7 +125,6 @@
 import org.apache.zookeeper.WatchedEvent;
 import org.apache.zookeeper.Watcher;
 import org.apache.zookeeper.Watcher.Event.EventType;
-import org.apache.zookeeper.Watcher.Event.KeeperState;
 
 /**
  * HRegionServer makes a set of HRegions available to clients.  It checks in with
@@ -221,6 +219,8 @@
   private final LinkedList<byte[]> reservedSpace = new LinkedList<byte []>();
 
   private RegionServerMetrics metrics;
+
+  @SuppressWarnings("unused")
   private RegionServerDynamicMetrics dynamicMetrics;
 
   // Compactions
@@ -260,6 +260,7 @@
   private final HServerAddress address;
 
   // The main region server thread.
+  @SuppressWarnings("unused")
   private Thread regionServerThread;
 
   private final String machineName;
@@ -405,7 +406,6 @@
   @Override
   public void process(WatchedEvent event) {
     EventType type = event.getType();
-    KeeperState state = event.getState();
 
     // Ignore events if we're shutting down.
     if (this.stopRequested.get()) {
@@ -1079,13 +1079,6 @@
     long totalStaticIndexSize = 0;
     long totalStaticBloomSize = 0;
 
-    long tmpfiles;
-    long tmpindex;
-    long tmpfilesize;
-    long tmpbloomsize;
-    long tmpstaticsize;
-    SchemaMetrics schemaMetrics;
-
     // Note that this is a map of Doubles instead of Longs. This is because we
     // do effective integer division, which would perhaps truncate more than it
     // should because we do it only on one part of our sum at a time. Rather
@@ -1091,7 +1084,8 @@
     // should because we do it only on one part of our sum at a time. Rather
     // than dividing at the end, where it is difficult to know the proper
     // factor, everything is exact then truncated.
-    Map<String, MutableDouble> tempVals = new HashMap<String, MutableDouble>();
+    final Map<String, MutableDouble> tempVals =
+        new HashMap<String, MutableDouble>();
 
     synchronized (this.onlineRegions) {
       for (Map.Entry<Integer, HRegion> e: this.onlineRegions.entrySet()) {
@@ -1100,41 +1094,51 @@
         synchronized (r.stores) {
           stores += r.stores.size();
           for(Map.Entry<byte [], Store> ee: r.stores.entrySet()) {
-            Store store = ee.getValue();
-            tmpfiles = store.getStorefilesCount();
-            tmpindex = store.getStorefilesIndexSize();
-            tmpfilesize = store.getStorefilesSize();
-            tmpbloomsize = store.getTotalStaticBloomSize();
-            tmpstaticsize = store.getTotalStaticIndexSize();
-
-            schemaMetrics = store.getSchemaMetrics();
-            schemaMetrics.updateStoreMetric(
-                StoreMetricType.STORE_FILE_COUNT,
-                tmpfiles);
-
-            schemaMetrics.updateStoreMetric(
-                StoreMetricType.STORE_FILE_INDEX_SIZE,
-                (long) (tmpindex / (1024.0 * 1024)));
-
-            schemaMetrics.updateStoreMetric(
-                StoreMetricType.STORE_FILE_SIZE_MB,
-                (long) (tmpfilesize / (1024.0 * 1024)));
-
-            schemaMetrics.updateStoreMetric(
-                StoreMetricType.STATIC_BLOOM_SIZE_KB,
-                (long)(tmpbloomsize / 1024.0));
-
-            schemaMetrics.updateStoreMetric(StoreMetricType.MEMSTORE_SIZE_MB,
-                (long)(store.getMemStoreSize() / (1024.0 * 1024)));
-
-            schemaMetrics.updateStoreMetric(
-                StoreMetricType.STATIC_INDEX_SIZE_KB,
-                (long)(tmpstaticsize / 1024.0));
-
-            storefiles += tmpfiles;
-            storefileIndexSize += tmpindex;
-            totalStaticIndexSize += tmpstaticsize;
-            totalStaticBloomSize += tmpbloomsize;
+            final Store store = ee.getValue();
+            final SchemaMetrics schemaMetrics = store.getSchemaMetrics();
+
+            {
+              long tmpStorefiles = store.getStorefilesCount();
+              schemaMetrics.accumulateStoreMetric(tempVals,
+                  StoreMetricType.STORE_FILE_COUNT, tmpStorefiles);
+              storefiles += tmpStorefiles;
+            }
+
+
+            {
+              long tmpStorefileIndexSize = store.getStorefilesIndexSize();
+              schemaMetrics.accumulateStoreMetric(tempVals,
+                  StoreMetricType.STORE_FILE_INDEX_SIZE,
+                  (long) (tmpStorefileIndexSize / (1024.0 * 1024)));
+              storefileIndexSize += tmpStorefileIndexSize;
+            }
+
+            {
+              long tmpStorefilesSize = store.getStorefilesSize();
+              schemaMetrics.accumulateStoreMetric(tempVals,
+                  StoreMetricType.STORE_FILE_SIZE_MB,
+                  (long) (tmpStorefilesSize / (1024.0 * 1024)));
+            }
+
+            {
+              long tmpStaticBloomSize = store.getTotalStaticBloomSize();
+              schemaMetrics.accumulateStoreMetric(tempVals,
+                  StoreMetricType.STATIC_BLOOM_SIZE_KB,
+                  (long) (tmpStaticBloomSize / 1024.0));
+              totalStaticBloomSize += tmpStaticBloomSize;
+            }
+
+            {
+              long tmpStaticIndexSize = store.getTotalStaticIndexSize();
+              schemaMetrics.accumulateStoreMetric(tempVals,
+                  StoreMetricType.STATIC_INDEX_SIZE_KB,
+                  (long) (tmpStaticIndexSize / 1024.0));
+              totalStaticIndexSize += tmpStaticIndexSize;
+            }
+
+            schemaMetrics.accumulateStoreMetric(tempVals,
+                StoreMetricType.MEMSTORE_SIZE_MB,
+                (long) (store.getMemStoreSize() / (1024.0 * 1024)));
           }
         }
       }
@@ -1139,9 +1143,11 @@
         }
       }
     }
+
     for (Entry<String, MutableDouble> e : tempVals.entrySet()) {
       HRegion.setNumericMetric(e.getKey(), e.getValue().longValue());
     }
+
     this.metrics.stores.set(stores);
     this.metrics.storefiles.set(storefiles);
     this.metrics.memstoreSizeMB.set((int)(memstoreSize/(1024*1024)));
Index: src/main/java/org/apache/hadoop/hbase/regionserver/metrics/SchemaMetrics.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/regionserver/metrics/SchemaMetrics.java
+++ src/main/java/org/apache/hadoop/hbase/regionserver/metrics/SchemaMetrics.java
@@ -32,11 +32,14 @@
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
+import org.apache.commons.lang.mutable.MutableDouble;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory;
 import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
 
 /**
@@ -94,11 +97,11 @@
   private static final Log LOG = LogFactory.getLog(SchemaMetrics.class);
 
   public static enum BlockMetricType {
-    // Metric configuration: compactionAware | timeVarying
-    READ_TIME("Read",                   true,  true),
-    READ_COUNT("BlockReadCnt",          true,  false),
-    CACHE_HIT("BlockReadCacheHitCnt",   true,  false),
-    CACHE_MISS("BlockReadCacheMissCnt", true,  false),
+    // Metric configuration: compactionAware, timeVarying
+    READ_TIME("Read",                   true, true),
+    READ_COUNT("BlockReadCnt",          true, false),
+    CACHE_HIT("BlockReadCacheHitCnt",   true, false),
+    CACHE_MISS("BlockReadCacheMissCnt", true, false),
 
     CACHE_SIZE("blockCacheSize",        false, false),
     CACHED("blockCacheNumCached",       false, false),
@@ -376,13 +379,30 @@
   }
 
   /**
-   * Update the store metric to a certain value.
-   * @param storeMetricType the store metric to update
-   * @param value the value to update the metric to
+   * Used to accumulate store metrics across multiple regions in a region
+   * server.  These metrics are not "persistent", i.e. we keep overriding them
+   * on every update instead of incrementing, so we need to accumulate them in
+   * a temporary map before pushing them to the global metric collection.
+   * @param tmpMap a temporary map for accumulating store metrics
+   * @param storeMetricType the store metric type to increment
+   * @param val the value to add to the metric
    */
-  public void updateStoreMetric(StoreMetricType storeMetricType, long value) {
-    HRegion.setNumericMetric(storeMetricNames[storeMetricType.ordinal()],
-        value);
+  public void accumulateStoreMetric(final Map<String, MutableDouble> tmpMap,
+      StoreMetricType storeMetricType, double val) {
+    final String key = getStoreMetricName(storeMetricType);
+    if (tmpMap.get(key) != null) {
+      tmpMap.get(key).add(val);
+    } else {
+      tmpMap.put(key, new MutableDouble(val));
+    }
+
+    if (this != ALL_SCHEMA_METRICS) {
+      ALL_SCHEMA_METRICS.accumulateStoreMetric(tmpMap, storeMetricType, val);
+    }
+  }
+
+  public String getStoreMetricName(StoreMetricType storeMetricType) {
+    return storeMetricNames[storeMetricType.ordinal()];
   }
 
   /**
@@ -504,17 +524,33 @@
 
   // Methods used in testing
 
-  private static final String WORD_AND_DOT_RE_STR = "[^.]+\\.";
+  private static final String regexEscape(String s) {
+    return s.replace(".", "\\.");
+  }
+
+  /**
+   * Assume that table names used in tests don't contain dots, except for the
+   * META table.
+   */
+  private static final String WORD_AND_DOT_RE_STR = "([^.]+|" +
+      regexEscape(Bytes.toString(HConstants.META_TABLE_NAME)) +
+      ")\\.";
+
+  /** "tab.<table_name>." */
   private static final String TABLE_NAME_RE_STR =
-      "\\b" + TABLE_PREFIX.replace(".", "\\.") + WORD_AND_DOT_RE_STR;
+      "\\b" + regexEscape(TABLE_PREFIX) + WORD_AND_DOT_RE_STR;
+
+  /** "cf.<cf_name>." */
   private static final String CF_NAME_RE_STR =
-      "\\b" + CF_PREFIX.replace(".", "\\.") + WORD_AND_DOT_RE_STR;
-  private static final Pattern CF_NAME_RE = Pattern.compile(
-      CF_NAME_RE_STR);
+      "\\b" + regexEscape(CF_PREFIX) + WORD_AND_DOT_RE_STR;
+  private static final Pattern CF_NAME_RE = Pattern.compile(CF_NAME_RE_STR);
+
+  /** "tab.<table_name>.cf.<cf_name>." */
   private static final Pattern TABLE_AND_CF_NAME_RE = Pattern.compile(
       TABLE_NAME_RE_STR + CF_NAME_RE_STR);
+
   private static final Pattern BLOCK_CATEGORY_RE = Pattern.compile(
-      "\\b" + BLOCK_TYPE_PREFIX.replace(".", "\\.") + "[^.]+\\." +
+      "\\b" + regexEscape(BLOCK_TYPE_PREFIX) + "[^.]+\\." +
       // Also remove the special-case block type marker for meta blocks
       "|" + META_BLOCK_CATEGORY_STR + "(?=" +
       BlockMetricType.BLOCK_METRIC_TYPE_RE + ")");
@@ -539,7 +575,7 @@
       for (boolean isCompaction : BOOL_VALUES) {
         for (BlockMetricType metricType : BlockMetricType.values()) {
           int i = getBlockMetricIndex(blockCategory, isCompaction, metricType);
-          System.err.println("blockCategory=" + blockCategory + ", "
+          LOG.debug("blockCategory=" + blockCategory + ", "
               + "metricType=" + metricType + ", isCompaction=" + isCompaction
               + ", metricName=" + blockMetricNames[i]);
         }
@@ -642,18 +678,23 @@
         final long oldValue = getLong(oldMetrics, metricName);
         final long newValue = getLong(newMetrics, metricName);
         final long delta = newValue - oldValue;
-        if (oldValue != newValue) {
-          // Debug output for the unit test
-          System.err.println("Metric=" + metricName + ", delta=" + delta);
-        }
 
-        if (cfm != ALL_SCHEMA_METRICS) {
-          // Re-calculate values of metrics with no column family (or CF/table)
-          // specified based on all metrics with CF (or CF/table) specified.
-          final String aggregateMetricName =
-              cfTableMetricRE.matcher(metricName).replaceAll("");
-          putLong(allCfDeltas, aggregateMetricName,
-              getLong(allCfDeltas, aggregateMetricName) + delta);
+        // Re-calculate values of metrics with no column family (or CF/table)
+        // specified based on all metrics with CF (or CF/table) specified.
+        if (delta != 0) {
+          if (cfm != ALL_SCHEMA_METRICS) {
+            final String aggregateMetricName =
+                cfTableMetricRE.matcher(metricName).replaceAll("");
+            if (!aggregateMetricName.equals(metricName)) {
+              LOG.debug("Counting " + delta + " units of " + metricName
+                  + " towards " + aggregateMetricName);
+
+              putLong(allCfDeltas, aggregateMetricName,
+                  getLong(allCfDeltas, aggregateMetricName) + delta);
+            }
+          } else {
+            LOG.debug("Metric=" + metricName + ", delta=" + delta);
+          }
         }
 
         Matcher matcher = BLOCK_CATEGORY_RE.matcher(metricName);
@@ -660,6 +701,7 @@
         if (matcher.find()) {
            // Only process per-block-category metrics
           String metricNoBlockCategory = matcher.replaceAll("");
+
           putLong(allBlockCategoryDeltas, metricNoBlockCategory,
               getLong(allBlockCategoryDeltas, metricNoBlockCategory) + delta);
         }
@@ -674,7 +716,7 @@
         if (errors.length() > 0)
           errors.append("\n");
         errors.append("The all-CF metric " + key + " changed by "
-            + actual + " but the aggregation of per-column-family metrics "
+            + actual + " but the aggregation of per-CF/table metrics "
             + "yields " + expected);
       }
     }
Index: src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
===================================================================
--- src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
+++ src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
@@ -33,6 +33,7 @@
 import java.util.List;
 import java.util.Map;
 import java.util.NavigableSet;
+import java.util.Random;
 import java.util.UUID;
 
 import org.apache.commons.logging.Log;
@@ -63,14 +64,14 @@
 import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hadoop.hbase.util.Writables;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWrapper;
+import org.apache.hadoop.hdfs.DFSClient;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.mapred.MiniMRCluster;
-import org.apache.zookeeper.ZooKeeper;
-import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.security.UnixUserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.hdfs.server.namenode.NameNode;
-import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.zookeeper.ZooKeeper;
 
 import com.google.common.base.Preconditions;
 
@@ -111,6 +112,13 @@
       { Compression.Algorithm.GZ }
     });
 
+  /** This is for unit tests parameterized with a single boolean. */
+  public static final List<Object[]> BOOLEAN_PARAMETERIZED =
+      Arrays.asList(new Object[][] {
+          { new Boolean(false) },
+          { new Boolean(true) }
+      });
+
   /** Compression algorithms to use in testing */
   public static final Compression.Algorithm[] COMPRESSION_ALGORITHMS =
       new Compression.Algorithm[] {
@@ -407,6 +415,7 @@
       }
     }
     LOG.info("Minicluster is down");
+    clusterTestBuildDir = null;
   }
 
   /**
@@ -1136,4 +1145,82 @@
     }
   }
 
+  /** Creates a random table with the given parameters */
+  public HTable createRandomTable(String tableName,
+      final Collection<String> families,
+      final int maxVersions,
+      final int numColsPerRow,
+      final int numFlushes,
+      final int numRegions,
+      final int numRowsPerFlush)
+      throws IOException, InterruptedException {
+
+    LOG.info("\n\nCreating random table " + tableName + " with " + numRegions +
+        " regions, " + numFlushes + " storefiles per region, " +
+        numRowsPerFlush + " rows per flush, maxVersions=" +  maxVersions +
+        "\n");
+
+    final Random rand = new Random(tableName.hashCode() * 17L + 12938197137L);
+    final int numCF = families.size();
+    final byte[][] cfBytes = new byte[numCF][];
+    final byte[] tableNameBytes = Bytes.toBytes(tableName);
+
+    {
+      int cfIndex = 0;
+      for (String cf : families) {
+        cfBytes[cfIndex++] = Bytes.toBytes(cf);
+      }
+    }
+
+    final int actualStartKey = 0;
+    final int actualEndKey = Integer.MAX_VALUE;
+    final int keysPerRegion = (actualEndKey - actualStartKey) / numRegions;
+    final int splitStartKey = actualStartKey + keysPerRegion;
+    final int splitEndKey = actualEndKey - keysPerRegion;
+    final String keyFormat = "%08x";
+    final HTable table = createTable(tableNameBytes, cfBytes,
+        maxVersions,
+        Bytes.toBytes(String.format(keyFormat, splitStartKey)),
+        Bytes.toBytes(String.format(keyFormat, splitEndKey)),
+        numRegions);
+    hbaseCluster.flushcache(HConstants.META_TABLE_NAME);
+
+    for (int iFlush = 0; iFlush < numFlushes; ++iFlush) {
+      for (int iRow = 0; iRow < numRowsPerFlush; ++iRow) {
+        final byte[] row = Bytes.toBytes(String.format(keyFormat,
+            actualStartKey + rand.nextInt(actualEndKey - actualStartKey)));
+
+        Put put = new Put(row);
+        Delete del = new Delete(row);
+        for (int iCol = 0; iCol < numColsPerRow; ++iCol) {
+          final byte[] cf = cfBytes[rand.nextInt(numCF)];
+          final long ts = rand.nextInt();
+          final byte[] qual = Bytes.toBytes("col" + iCol);
+          if (rand.nextBoolean()) {
+            final byte[] value = Bytes.toBytes("value_for_row_" + iRow +
+                "_cf_" + Bytes.toStringBinary(cf) + "_col_" + iCol + "_ts_" +
+                ts + "_random_" + rand.nextLong());
+            put.add(cf, qual, ts, value);
+          } else if (rand.nextDouble() < 0.8) {
+            del.deleteColumn(cf, qual, ts);
+          } else {
+            del.deleteColumns(cf, qual, ts);
+          }
+        }
+
+        if (!put.isEmpty()) {
+          table.put(put);
+        }
+
+        if (!del.isEmpty()) {
+          table.delete(del);
+        }
+      }
+      LOG.info("Initiating flush #" + iFlush + " for table " + tableName);
+      table.flushCommits();
+      hbaseCluster.flushcache(tableNameBytes);
+    }
+
+    return table;
+  }
 }
Index: src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java
===================================================================
--- /dev/null
+++ src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java
@@ -0,0 +1,120 @@
+/*
+ * Copyright 2011 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import static org.junit.Assert.assertEquals;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Map;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics;
+import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics.
+    StoreMetricType;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * Test metrics incremented on region server operations.
+ */
+public class TestRegionServerMetrics {
+
+  private static final Log LOG =
+      LogFactory.getLog(TestRegionServerMetrics.class.getName());
+
+  private final static String TABLE_NAME =
+      TestRegionServerMetrics.class.getSimpleName() + "Table";
+  private String[] FAMILIES = new String[] { "cf1", "cf2", "anotherCF" };
+  private static final int MAX_VERSIONS = 1;
+  private static final int NUM_COLS_PER_ROW = 15;
+  private static final int NUM_FLUSHES = 3;
+  private static final int NUM_REGIONS = 4;
+
+  private static final SchemaMetrics ALL_METRICS =
+      SchemaMetrics.ALL_SCHEMA_METRICS;
+
+  private static final HBaseTestingUtility TEST_UTIL =
+      new HBaseTestingUtility();
+
+  private Map<String, Long> startingMetrics;
+
+  private final int META_AND_ROOT = 2;
+
+  @Before
+  public void setUp() throws Exception {
+    SchemaMetrics.setUseTableNameInTest(true);
+    startingMetrics = SchemaMetrics.getMetricsSnapshot();
+    TEST_UTIL.startMiniCluster();
+  }
+
+  @After
+  public void tearDown() throws IOException {
+    TEST_UTIL.shutdownMiniCluster();
+    SchemaMetrics.validateMetricChanges(startingMetrics);
+  }
+
+  private void assertStoreMetricEquals(long expected,
+      SchemaMetrics schemaMetrics, StoreMetricType storeMetricType) {
+    final String storeMetricName =
+        schemaMetrics.getStoreMetricName(storeMetricType);
+    Long startValue = startingMetrics.get(storeMetricName);
+    assertEquals("Invalid value for store metric " + storeMetricName
+        + " (type " + storeMetricType + ")", expected,
+        HRegion.getNumericMetric(storeMetricName)
+            - (startValue != null ? startValue : 0));
+  }
+
+  @Test
+  public void testMultipleRegions() throws IOException, InterruptedException {
+
+    TEST_UTIL.createRandomTable(
+        TABLE_NAME,
+        Arrays.asList(FAMILIES),
+        MAX_VERSIONS, NUM_COLS_PER_ROW, NUM_FLUSHES, NUM_REGIONS, 1000);
+
+    final HRegionServer rs =
+        TEST_UTIL.getMiniHBaseCluster().getRegionServer(0);
+
+    assertEquals(NUM_REGIONS + META_AND_ROOT, rs.getOnlineRegions().size());
+
+    rs.doMetrics();
+    for (HRegion r : rs.getOnlineRegions()) {
+      for (Map.Entry<byte[], Store> storeEntry : r.getStores().entrySet()) {
+        LOG.info("For region " + r.getRegionNameAsString() + ", CF " +
+            Bytes.toStringBinary(storeEntry.getKey()) + " found store files " +
+            ": " + storeEntry.getValue().getStorefiles());
+      }
+    }
+
+    assertStoreMetricEquals(NUM_FLUSHES * NUM_REGIONS * FAMILIES.length
+        + META_AND_ROOT, ALL_METRICS, StoreMetricType.STORE_FILE_COUNT);
+
+    for (String cf : FAMILIES) {
+      SchemaMetrics schemaMetrics = SchemaMetrics.getInstance(TABLE_NAME, cf);
+      assertStoreMetricEquals(NUM_FLUSHES * NUM_REGIONS,
+          schemaMetrics, StoreMetricType.STORE_FILE_COUNT);
+    }
+  }
+
+}
Index: src/test/java/org/apache/hadoop/hbase/regionserver/metrics/TestSchemaMetrics.java
===================================================================
--- src/test/java/org/apache/hadoop/hbase/regionserver/metrics/TestSchemaMetrics.java
+++ src/test/java/org/apache/hadoop/hbase/regionserver/metrics/TestSchemaMetrics.java
@@ -20,7 +20,6 @@
 
 package org.apache.hadoop.hbase.regionserver.metrics;
 
-import java.util.ArrayList;
 import java.util.Collection;
 import java.util.List;
 import java.util.Map;
@@ -26,6 +25,7 @@
 import java.util.Map;
 import java.util.Random;
 
+import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.io.hfile.BlockType;
 import org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory;
 import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics;
@@ -55,10 +55,7 @@
 
   @Parameters
   public static Collection<Object[]> parameters() {
-    List<Object[]> params = new ArrayList<Object[]>();
-    params.add(new Object[] { new Boolean(false) });
-    params.add(new Object[] { new Boolean(true) });
-    return params;
+    return HBaseTestingUtility.BOOLEAN_PARAMETERIZED;
   }
 
   public TestSchemaMetrics(boolean useTableName) {

Reply via email to