HIVE-17307 Change the metastore to not use the metrics code in hive/common.  
This closes #235.  (Alan Gates, reviewed by Vihang Karajgaonkar)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/a6b9cd52
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/a6b9cd52
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/a6b9cd52

Branch: refs/heads/hive-14535
Commit: a6b9cd527b095fc77e4adc839206455bd937dd07
Parents: 77c28af
Author: Alan Gates <ga...@hortonworks.com>
Authored: Wed Aug 30 12:55:54 2017 -0700
Committer: Alan Gates <ga...@hortonworks.com>
Committed: Wed Aug 30 12:55:54 2017 -0700

----------------------------------------------------------------------
 .../common/metrics/common/MetricsConstant.java  |  14 --
 .../hive/metastore/TestMetaStoreMetrics.java    |  78 +++---
 .../hive/metastore/HMSMetricsListener.java      |  47 ++--
 .../hadoop/hive/metastore/HiveMetaStore.java    | 124 +++++-----
 .../hadoop/hive/metastore/ObjectStore.java      |  24 +-
 .../hive/metastore/RetryingHMSHandler.java      |   4 +-
 .../hadoop/hive/metastore/txn/TxnHandler.java   |  22 +-
 .../hadoop/hive/metastore/TestObjectStore.java  |  25 +-
 standalone-metastore/pom.xml                    |  20 ++
 .../hadoop/hive/metastore/ThreadPool.java       |  63 +++++
 .../hive/metastore/metrics/JsonReporter.java    | 166 +++++++++++++
 .../hive/metastore/metrics/JvmPauseMonitor.java | 222 +++++++++++++++++
 .../hadoop/hive/metastore/metrics/Metrics.java  | 243 +++++++++++++++++++
 .../metastore/metrics/MetricsConstants.java     |  46 ++++
 .../hive/metastore/metrics/PerfLogger.java      | 194 +++++++++++++++
 .../hive/metastore/metrics/TestMetrics.java     | 200 +++++++++++++++
 16 files changed, 1312 insertions(+), 180 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/a6b9cd52/common/src/java/org/apache/hadoop/hive/common/metrics/common/MetricsConstant.java
----------------------------------------------------------------------
diff --git 
a/common/src/java/org/apache/hadoop/hive/common/metrics/common/MetricsConstant.java
 
b/common/src/java/org/apache/hadoop/hive/common/metrics/common/MetricsConstant.java
index b2e78c3..5d28e2d 100644
--- 
a/common/src/java/org/apache/hadoop/hive/common/metrics/common/MetricsConstant.java
+++ 
b/common/src/java/org/apache/hadoop/hive/common/metrics/common/MetricsConstant.java
@@ -47,20 +47,6 @@ public class MetricsConstant {
   public static final String SQL_OPERATION_PREFIX = "hs2_sql_operation_";
   public static final String COMPLETED_SQL_OPERATION_PREFIX = 
"hs2_completed_sql_operation_";
 
-  public static final String INIT_TOTAL_DATABASES = "init_total_count_dbs";
-  public static final String INIT_TOTAL_TABLES = "init_total_count_tables";
-  public static final String INIT_TOTAL_PARTITIONS = 
"init_total_count_partitions";
-
-  public static final String CREATE_TOTAL_DATABASES = "create_total_count_dbs";
-  public static final String CREATE_TOTAL_TABLES = "create_total_count_tables";
-  public static final String CREATE_TOTAL_PARTITIONS = 
"create_total_count_partitions";
-
-  public static final String DELETE_TOTAL_DATABASES = "delete_total_count_dbs";
-  public static final String DELETE_TOTAL_TABLES = "delete_total_count_tables";
-  public static final String DELETE_TOTAL_PARTITIONS = 
"delete_total_count_partitions";
-
-  public static final String DIRECTSQL_ERRORS = "directsql_errors";
-
   // The number of Hive operations that are waiting to enter the compile block
   public static final String WAITING_COMPILE_OPS = "waiting_compile_ops";
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a6b9cd52/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreMetrics.java
----------------------------------------------------------------------
diff --git 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreMetrics.java
 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreMetrics.java
index 9b6cab3..e1cb1f3 100644
--- 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreMetrics.java
+++ 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreMetrics.java
@@ -18,14 +18,13 @@
 package org.apache.hadoop.hive.metastore;
 
 import org.apache.hadoop.hive.cli.CliSessionState;
-import org.apache.hadoop.hive.common.metrics.MetricsTestUtils;
-import org.apache.hadoop.hive.common.metrics.common.MetricsConstant;
-import org.apache.hadoop.hive.common.metrics.common.MetricsFactory;
-import org.apache.hadoop.hive.common.metrics.metrics2.CodahaleMetrics;
 import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.metrics.Metrics;
+import org.apache.hadoop.hive.metastore.metrics.MetricsConstants;
 import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
 import org.apache.hadoop.hive.ql.Driver;
 import org.apache.hadoop.hive.ql.session.SessionState;
+import org.junit.Assert;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
@@ -38,7 +37,6 @@ public class TestMetaStoreMetrics {
 
   private static HiveConf hiveConf;
   private static Driver driver;
-  private static CodahaleMetrics metrics;
 
   @BeforeClass
   public static void before() throws Exception {
@@ -53,10 +51,6 @@ public class TestMetaStoreMetrics {
         .setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER,
             
"org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory");
 
-    MetricsFactory.close();
-    MetricsFactory.init(hiveConf);
-    metrics = (CodahaleMetrics) MetricsFactory.getInstance();
-
     //Increments one HMS connection
     MetaStoreUtils.startMetaStore(port, HadoopThriftAuthBridge.getBridge(), 
hiveConf);
 
@@ -69,23 +63,19 @@ public class TestMetaStoreMetrics {
   @Test
   public void testMethodCounts() throws Exception {
     driver.run("show databases");
-    String json = metrics.dumpJson();
 
     //one call by init, one called here.
-    MetricsTestUtils.verifyMetricsJson(json, MetricsTestUtils.TIMER, 
"api_get_all_databases", 2);
+    Assert.assertEquals(2, 
Metrics.getRegistry().getTimers().get("api_get_all_databases").getCount());
   }
 
   @Test
   public void testMetaDataCounts() throws Exception {
-    CodahaleMetrics metrics = (CodahaleMetrics) MetricsFactory.getInstance();
-    String json = metrics.dumpJson();
-
-    int initDbCount = (new Integer((MetricsTestUtils.getJsonNode(json, 
MetricsTestUtils.GAUGE,
-        MetricsConstant.INIT_TOTAL_DATABASES)).asText())).intValue();
-    int initTblCount = (new Integer((MetricsTestUtils.getJsonNode(json, 
MetricsTestUtils.GAUGE,
-        MetricsConstant.INIT_TOTAL_TABLES)).asText())).intValue();
-    int initPartCount = (new Integer((MetricsTestUtils.getJsonNode(json, 
MetricsTestUtils.GAUGE,
-        MetricsConstant.INIT_TOTAL_PARTITIONS)).asText())).intValue();
+    int initDbCount =
+        
(Integer)Metrics.getRegistry().getGauges().get(MetricsConstants.TOTAL_DATABASES).getValue();
+    int initTblCount =
+        
(Integer)Metrics.getRegistry().getGauges().get(MetricsConstants.TOTAL_TABLES).getValue();
+    int initPartCount =
+        
(Integer)Metrics.getRegistry().getGauges().get(MetricsConstants.TOTAL_PARTITIONS).getValue();
 
     //1 databases created
     driver.run("create database testdb1");
@@ -123,25 +113,22 @@ public class TestMetaStoreMetrics {
     driver.run("use default");
     driver.run("drop database tempdb cascade");
 
-    json = metrics.dumpJson();
-    MetricsTestUtils.verifyMetricsJson(json, MetricsTestUtils.COUNTER, 
MetricsConstant.CREATE_TOTAL_DATABASES, 2);
-    MetricsTestUtils.verifyMetricsJson(json, MetricsTestUtils.COUNTER, 
MetricsConstant.CREATE_TOTAL_TABLES, 7);
-    MetricsTestUtils.verifyMetricsJson(json, MetricsTestUtils.COUNTER, 
MetricsConstant.CREATE_TOTAL_PARTITIONS, 9);
+    Assert.assertEquals(2, 
Metrics.getRegistry().getCounters().get(MetricsConstants.CREATE_TOTAL_DATABASES).getCount());
+    Assert.assertEquals(7, 
Metrics.getRegistry().getCounters().get(MetricsConstants.CREATE_TOTAL_TABLES).getCount());
+    Assert.assertEquals(9, 
Metrics.getRegistry().getCounters().get(MetricsConstants.CREATE_TOTAL_PARTITIONS).getCount());
 
-    MetricsTestUtils.verifyMetricsJson(json, MetricsTestUtils.COUNTER, 
MetricsConstant.DELETE_TOTAL_DATABASES, 1);
-    MetricsTestUtils.verifyMetricsJson(json, MetricsTestUtils.COUNTER, 
MetricsConstant.DELETE_TOTAL_TABLES, 3);
-    MetricsTestUtils.verifyMetricsJson(json, MetricsTestUtils.COUNTER, 
MetricsConstant.DELETE_TOTAL_PARTITIONS, 3);
+    Assert.assertEquals(1, 
Metrics.getRegistry().getCounters().get(MetricsConstants.DELETE_TOTAL_DATABASES).getCount());
+    Assert.assertEquals(3, 
Metrics.getRegistry().getCounters().get(MetricsConstants.DELETE_TOTAL_TABLES).getCount());
+    Assert.assertEquals(3, 
Metrics.getRegistry().getCounters().get(MetricsConstants.DELETE_TOTAL_PARTITIONS).getCount());
 
     //to test initial metadata count metrics.
-    hiveConf.setVar(HiveConf.ConfVars.METASTORE_RAW_STORE_IMPL, 
ObjectStore.class.getName());
-    HiveMetaStore.HMSHandler baseHandler = new 
HiveMetaStore.HMSHandler("test", hiveConf, false);
-    baseHandler.init();
-    baseHandler.updateMetrics();
-
-    json = metrics.dumpJson();
-    MetricsTestUtils.verifyMetricsJson(json, MetricsTestUtils.GAUGE, 
MetricsConstant.INIT_TOTAL_DATABASES, initDbCount + 1);
-    MetricsTestUtils.verifyMetricsJson(json, MetricsTestUtils.GAUGE, 
MetricsConstant.INIT_TOTAL_TABLES, initTblCount + 4);
-    MetricsTestUtils.verifyMetricsJson(json, MetricsTestUtils.GAUGE, 
MetricsConstant.INIT_TOTAL_PARTITIONS, initPartCount + 6);
+    Assert.assertEquals(initDbCount + 1,
+        
Metrics.getRegistry().getGauges().get(MetricsConstants.TOTAL_DATABASES).getValue());
+    Assert.assertEquals(initTblCount + 4,
+        
Metrics.getRegistry().getGauges().get(MetricsConstants.TOTAL_TABLES).getValue());
+    Assert.assertEquals(initPartCount + 6,
+        
Metrics.getRegistry().getGauges().get(MetricsConstants.TOTAL_PARTITIONS).getValue());
+
   }
 
 
@@ -149,25 +136,26 @@ public class TestMetaStoreMetrics {
   public void testConnections() throws Exception {
 
     //initial state is one connection
-    String json = metrics.dumpJson();
-    int initialCount = (new Integer((MetricsTestUtils.getJsonNode(json, 
MetricsTestUtils.COUNTER,
-                                       
MetricsConstant.OPEN_CONNECTIONS)).asText())).intValue();
+    int initialCount =
+        
(Integer)Metrics.getRegistry().getGauges().get(MetricsConstants.OPEN_CONNECTIONS).getValue();
 
     //create two connections
     HiveMetaStoreClient msc = new HiveMetaStoreClient(hiveConf);
     HiveMetaStoreClient msc2 = new HiveMetaStoreClient(hiveConf);
 
-    json = metrics.dumpJson();
-    MetricsTestUtils.verifyMetricsJson(json, MetricsTestUtils.COUNTER, 
MetricsConstant.OPEN_CONNECTIONS, initialCount + 2);
+    Assert.assertEquals(initialCount + 2,
+        
Metrics.getRegistry().getGauges().get(MetricsConstants.OPEN_CONNECTIONS).getValue());
 
     //close one connection, verify still two left
     msc.close();
-    json = metrics.dumpJson();
-    MetricsTestUtils.verifyMetricsJson(json, MetricsTestUtils.COUNTER, 
MetricsConstant.OPEN_CONNECTIONS, initialCount + 1);
+    Thread.sleep(500);  // TODO Evil!  Need to figure out a way to remove this 
sleep.
+    Assert.assertEquals(initialCount + 1,
+        
Metrics.getRegistry().getGauges().get(MetricsConstants.OPEN_CONNECTIONS).getValue());
 
     //close one connection, verify still one left
     msc2.close();
-    json = metrics.dumpJson();
-    MetricsTestUtils.verifyMetricsJson(json, MetricsTestUtils.COUNTER, 
MetricsConstant.OPEN_CONNECTIONS, initialCount);
+    Thread.sleep(500);  // TODO Evil!  Need to figure out a way to remove this 
sleep.
+    Assert.assertEquals(initialCount,
+        
Metrics.getRegistry().getGauges().get(MetricsConstants.OPEN_CONNECTIONS).getValue());
   }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/a6b9cd52/metastore/src/java/org/apache/hadoop/hive/metastore/HMSMetricsListener.java
----------------------------------------------------------------------
diff --git 
a/metastore/src/java/org/apache/hadoop/hive/metastore/HMSMetricsListener.java 
b/metastore/src/java/org/apache/hadoop/hive/metastore/HMSMetricsListener.java
index 98288a0..a2ad06f 100644
--- 
a/metastore/src/java/org/apache/hadoop/hive/metastore/HMSMetricsListener.java
+++ 
b/metastore/src/java/org/apache/hadoop/hive/metastore/HMSMetricsListener.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -17,9 +17,9 @@
  */
 package org.apache.hadoop.hive.metastore;
 
+import com.codahale.metrics.Counter;
+import com.codahale.metrics.MetricRegistry;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.common.metrics.common.Metrics;
-import org.apache.hadoop.hive.common.metrics.common.MetricsConstant;
 import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.events.AddPartitionEvent;
 import org.apache.hadoop.hive.metastore.events.CreateDatabaseEvent;
@@ -27,6 +27,8 @@ import 
org.apache.hadoop.hive.metastore.events.CreateTableEvent;
 import org.apache.hadoop.hive.metastore.events.DropDatabaseEvent;
 import org.apache.hadoop.hive.metastore.events.DropPartitionEvent;
 import org.apache.hadoop.hive.metastore.events.DropTableEvent;
+import org.apache.hadoop.hive.metastore.metrics.Metrics;
+import org.apache.hadoop.hive.metastore.metrics.MetricsConstants;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -35,47 +37,54 @@ import org.slf4j.LoggerFactory;
  */
 public class HMSMetricsListener extends MetaStoreEventListener {
 
-  public static final Logger LOGGER = 
LoggerFactory.getLogger(HMSMetricsListener.class);
-  private Metrics metrics;
+  private static final Logger LOGGER = 
LoggerFactory.getLogger(HMSMetricsListener.class);
+
+  private Counter createdDatabases, deletedDatabases, createdTables, 
deletedTables, createdParts,
+      deletedParts;
 
-  public HMSMetricsListener(Configuration config, Metrics metrics) {
+  public HMSMetricsListener(Configuration config) {
     super(config);
-    this.metrics = metrics;
+    createdDatabases = 
Metrics.getOrCreateCounter(MetricsConstants.CREATE_TOTAL_DATABASES);
+    deletedDatabases = 
Metrics.getOrCreateCounter(MetricsConstants.DELETE_TOTAL_DATABASES);
+    createdTables = 
Metrics.getOrCreateCounter(MetricsConstants.CREATE_TOTAL_TABLES);
+    deletedTables = 
Metrics.getOrCreateCounter(MetricsConstants.DELETE_TOTAL_TABLES);
+    createdParts = 
Metrics.getOrCreateCounter(MetricsConstants.CREATE_TOTAL_PARTITIONS);
+    deletedParts = 
Metrics.getOrCreateCounter(MetricsConstants.DELETE_TOTAL_PARTITIONS);
   }
 
   @Override
   public void onCreateDatabase(CreateDatabaseEvent dbEvent) throws 
MetaException {
-    incrementCounterInternal(MetricsConstant.CREATE_TOTAL_DATABASES);
+    HiveMetaStore.HMSHandler.databaseCount.incrementAndGet();
+    createdDatabases.inc();
   }
 
   @Override
   public void onDropDatabase(DropDatabaseEvent dbEvent) throws MetaException {
-    incrementCounterInternal(MetricsConstant.DELETE_TOTAL_DATABASES);
+    HiveMetaStore.HMSHandler.databaseCount.decrementAndGet();
+    deletedDatabases.inc();
   }
 
   @Override
   public void onCreateTable(CreateTableEvent tableEvent) throws MetaException {
-    incrementCounterInternal(MetricsConstant.CREATE_TOTAL_TABLES);
+    HiveMetaStore.HMSHandler.tableCount.incrementAndGet();
+    createdTables.inc();
   }
 
   @Override
   public void onDropTable(DropTableEvent tableEvent) throws MetaException {
-    incrementCounterInternal(MetricsConstant.DELETE_TOTAL_TABLES);
+    HiveMetaStore.HMSHandler.tableCount.decrementAndGet();
+    deletedTables.inc();
   }
 
   @Override
   public void onDropPartition(DropPartitionEvent partitionEvent) throws 
MetaException {
-    incrementCounterInternal(MetricsConstant.DELETE_TOTAL_PARTITIONS);
+    HiveMetaStore.HMSHandler.partCount.decrementAndGet();
+    deletedParts.inc();
   }
 
   @Override
   public void onAddPartition(AddPartitionEvent partitionEvent) throws 
MetaException {
-    incrementCounterInternal(MetricsConstant.CREATE_TOTAL_PARTITIONS);
-  }
-
-  private void incrementCounterInternal(String name) {
-    if (metrics != null) {
-      metrics.incrementCounter(name);
-    }
+    HiveMetaStore.HMSHandler.partCount.incrementAndGet();
+    createdParts.inc();
   }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/a6b9cd52/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
----------------------------------------------------------------------
diff --git 
a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java 
b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
index df01b25..55def04 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
@@ -1,5 +1,4 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
+/** * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
  * regarding copyright ownership.  The ASF licenses this file
@@ -32,7 +31,6 @@ import java.util.AbstractMap;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
-import java.util.Formatter;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
@@ -52,6 +50,7 @@ import java.util.concurrent.Executors;
 import java.util.concurrent.Future;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.locks.Condition;
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReentrantLock;
@@ -59,6 +58,7 @@ import java.util.regex.Pattern;
 
 import javax.jdo.JDOException;
 
+import com.codahale.metrics.Counter;
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.ImmutableListMultimap;
 import com.google.common.collect.Lists;
@@ -69,7 +69,6 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.common.FileUtils;
-import org.apache.hadoop.hive.common.JvmPauseMonitor;
 import org.apache.hadoop.hive.common.LogUtils;
 import org.apache.hadoop.hive.common.LogUtils.LogInitializationException;
 import org.apache.hadoop.hive.common.StatsSetupConst;
@@ -77,10 +76,6 @@ import org.apache.hadoop.hive.common.auth.HiveAuthUtils;
 import org.apache.hadoop.hive.common.classification.InterfaceAudience;
 import org.apache.hadoop.hive.common.classification.InterfaceStability;
 import org.apache.hadoop.hive.common.cli.CommonCliOptions;
-import org.apache.hadoop.hive.common.metrics.common.Metrics;
-import org.apache.hadoop.hive.common.metrics.common.MetricsConstant;
-import org.apache.hadoop.hive.common.metrics.common.MetricsFactory;
-import org.apache.hadoop.hive.common.metrics.common.MetricsVariable;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.io.HdfsUtils;
@@ -127,6 +122,10 @@ import 
org.apache.hadoop.hive.metastore.events.PreReadDatabaseEvent;
 import org.apache.hadoop.hive.metastore.events.PreReadTableEvent;
 import org.apache.hadoop.hive.metastore.filemeta.OrcFileMetadataHandler;
 import org.apache.hadoop.hive.metastore.messaging.EventMessage.EventType;
+import org.apache.hadoop.hive.metastore.metrics.JvmPauseMonitor;
+import org.apache.hadoop.hive.metastore.metrics.Metrics;
+import org.apache.hadoop.hive.metastore.metrics.MetricsConstants;
+import org.apache.hadoop.hive.metastore.metrics.PerfLogger;
 import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
 import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
 import 
org.apache.hadoop.hive.metastore.security.MetastoreDelegationTokenManager;
@@ -244,8 +243,9 @@ public class HiveMetaStore extends ThriftHiveMetastore {
     private FileMetadataManager fileMetadataManager;
     private PartitionExpressionProxy expressionProxy;
 
-    //For Metrics
-    private int initDatabaseCount, initTableCount, initPartCount;
+    // Variables for metrics
+    // Package visible so that HMSMetricsListener can see them.
+    static AtomicInteger databaseCount, tableCount, partCount;
 
     private Warehouse wh; // hdfs warehouse
     private static final ThreadLocal<RawStore> threadLocalMS =
@@ -263,6 +263,14 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       }
     };
 
+    private static final ThreadLocal<Map<String, 
com.codahale.metrics.Timer.Context>> timerContexts =
+        new ThreadLocal<Map<String, com.codahale.metrics.Timer.Context>>() {
+      @Override
+      protected Map<String, com.codahale.metrics.Timer.Context> initialValue() 
{
+        return new HashMap<>();
+      }
+    };
+
     public static RawStore getRawStore() {
       return threadLocalMS.get();
     }
@@ -487,41 +495,14 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         }
       }
 
-      //Start Metrics for Embedded mode
+      //Start Metrics
       if (hiveConf.getBoolVar(ConfVars.METASTORE_METRICS)) {
-        try {
-          MetricsFactory.init(hiveConf);
-        } catch (Exception e) {
-          // log exception, but ignore inability to start
-          LOG.error("error in Metrics init: " + e.getClass().getName() + " "
-              + e.getMessage(), e);
-        }
-      }
-
-      Metrics metrics = MetricsFactory.getInstance();
-      if (metrics != null && 
hiveConf.getBoolVar(ConfVars.METASTORE_INIT_METADATA_COUNT_ENABLED)) {
         LOG.info("Begin calculating metadata count metrics.");
+        Metrics.initialize(hiveConf);
+        databaseCount = 
Metrics.getOrCreateGauge(MetricsConstants.TOTAL_DATABASES);
+        tableCount = Metrics.getOrCreateGauge(MetricsConstants.TOTAL_TABLES);
+        partCount = 
Metrics.getOrCreateGauge(MetricsConstants.TOTAL_PARTITIONS);
         updateMetrics();
-        LOG.info("Finished metadata count metrics: " + initDatabaseCount + " 
databases, " + initTableCount +
-          " tables, " + initPartCount + " partitions.");
-        metrics.addGauge(MetricsConstant.INIT_TOTAL_DATABASES, new 
MetricsVariable() {
-          @Override
-          public Object getValue() {
-            return initDatabaseCount;
-          }
-        });
-        metrics.addGauge(MetricsConstant.INIT_TOTAL_TABLES, new 
MetricsVariable() {
-          @Override
-          public Object getValue() {
-            return initTableCount;
-          }
-        });
-        metrics.addGauge(MetricsConstant.INIT_TOTAL_PARTITIONS, new 
MetricsVariable() {
-          @Override
-          public Object getValue() {
-            return initPartCount;
-          }
-        });
       }
 
       preListeners = 
MetaStoreUtils.getMetaStoreListeners(MetaStorePreEventListener.class,
@@ -534,8 +515,8 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       listeners.add(new AcidEventListener(hiveConf));
       transactionalListeners = 
MetaStoreUtils.getMetaStoreListeners(TransactionalMetaStoreEventListener.class,hiveConf,
               
hiveConf.getVar(ConfVars.METASTORE_TRANSACTIONAL_EVENT_LISTENERS));
-      if (metrics != null) {
-        listeners.add(new HMSMetricsListener(hiveConf, metrics));
+      if (Metrics.getRegistry() != null) {
+        listeners.add(new HMSMetricsListener(hiveConf));
       }
 
       endFunctionListeners = MetaStoreUtils.getMetaStoreListeners(
@@ -851,9 +832,14 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       incrementCounter(function);
       logInfo((getThreadLocalIpAddress() == null ? "" : "source:" + 
getThreadLocalIpAddress() + " ") +
           function + extraLogInfo);
-      if (MetricsFactory.getInstance() != null) {
-        
MetricsFactory.getInstance().startStoredScope(MetricsConstant.API_PREFIX + 
function);
-      }
+      com.codahale.metrics.Timer timer =
+          Metrics.getOrCreateTimer(MetricsConstants.API_PREFIX + function);
+      if (timer != null) {
+        // Timer will be null we aren't using the metrics
+        timerContexts.get().put(function, timer.time());
+      }
+      Counter counter = 
Metrics.getOrCreateCounter(MetricsConstants.ACTIVE_CALLS + function);
+      if (counter != null) counter.inc();
       return function;
     }
 
@@ -890,9 +876,12 @@ public class HiveMetaStore extends ThriftHiveMetastore {
     }
 
     private void endFunction(String function, MetaStoreEndFunctionContext 
context) {
-      if (MetricsFactory.getInstance() != null) {
-        MetricsFactory.getInstance().endStoredScope(MetricsConstant.API_PREFIX 
+ function);
+      com.codahale.metrics.Timer.Context timerContext = 
timerContexts.get().remove(function);
+      if (timerContext != null) {
+        timerContext.close();
       }
+      Counter counter = 
Metrics.getOrCreateCounter(MetricsConstants.ACTIVE_CALLS + function);
+      if (counter != null) counter.dec();
 
       for (MetaStoreEndFunctionListener listener : endFunctionListeners) {
         listener.onEndFunction(function, context);
@@ -907,6 +896,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
     @Override
     public void shutdown() {
       cleanupRawStore();
+      PerfLogger.getPerfLogger(false).cleanupPerfLogMetrics();
     }
 
     @Override
@@ -7254,9 +7244,11 @@ public class HiveMetaStore extends ThriftHiveMetastore {
 
     @VisibleForTesting
     public void updateMetrics() throws MetaException {
-      initTableCount = getMS().getTableCount();
-      initPartCount = getMS().getPartitionCount();
-      initDatabaseCount = getMS().getDatabaseCount();
+      if (databaseCount != null) {
+        tableCount.set(getMS().getTableCount());
+        partCount.set(getMS().getPartitionCount());
+        databaseCount.set(getMS().getDatabaseCount());
+      }
     }
 
     @Override
@@ -7538,7 +7530,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
           }
           if (conf.getBoolVar(ConfVars.METASTORE_METRICS)) {
             try {
-              MetricsFactory.close();
+              Metrics.shutdown();
             } catch (Exception e) {
               LOG.error("error in Metrics deinit: " + e.getClass().getName() + 
" "
                 + e.getMessage(), e);
@@ -7550,7 +7542,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       //Start Metrics for Standalone (Remote) Mode
       if (conf.getBoolVar(ConfVars.METASTORE_METRICS)) {
         try {
-          MetricsFactory.init(conf);
+          Metrics.initialize(conf);
         } catch (Exception e) {
           // log exception, but ignore inability to start
           LOG.error("error in Metrics init: " + e.getClass().getName() + " "
@@ -7572,6 +7564,8 @@ public class HiveMetaStore extends ThriftHiveMetastore {
     }
   }
 
+  private static AtomicInteger openConnections;
+
   /**
    * Start Metastore based on a passed {@link HadoopThriftAuthBridge}
    *
@@ -7700,6 +7694,10 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         serverSocket = new TServerSocketKeepAlive(serverSocket);
       }
 
+      // Metrics will have already been initialized if we're using them since 
HMSHandler
+      // initializes them.
+      openConnections = 
Metrics.getOrCreateGauge(MetricsConstants.OPEN_CONNECTIONS);
+
       TThreadPoolServer.Args args = new TThreadPoolServer.Args(serverSocket)
           .processor(processor)
           .transportFactory(transFactory)
@@ -7716,27 +7714,13 @@ public class HiveMetaStore extends ThriftHiveMetastore {
 
         @Override
         public ServerContext createContext(TProtocol tProtocol, TProtocol 
tProtocol1) {
-          try {
-            Metrics metrics = MetricsFactory.getInstance();
-            if (metrics != null) {
-              metrics.incrementCounter(MetricsConstant.OPEN_CONNECTIONS);
-            }
-          } catch (Exception e) {
-            LOG.warn("Error Reporting Metastore open connection to Metrics 
system", e);
-          }
+          openConnections.incrementAndGet();
           return null;
         }
 
         @Override
         public void deleteContext(ServerContext serverContext, TProtocol 
tProtocol, TProtocol tProtocol1) {
-          try {
-            Metrics metrics = MetricsFactory.getInstance();
-            if (metrics != null) {
-              metrics.decrementCounter(MetricsConstant.OPEN_CONNECTIONS);
-            }
-          } catch (Exception e) {
-            LOG.warn("Error Reporting Metastore close connection to Metrics 
system", e);
-          }
+          openConnections.decrementAndGet();
           // If the IMetaStoreClient#close was called, HMSHandler#shutdown 
would have already
           // cleaned up thread local RawStore. Otherwise, do it now.
           cleanupRawStore();

http://git-wip-us.apache.org/repos/asf/hive/blob/a6b9cd52/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
----------------------------------------------------------------------
diff --git 
a/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java 
b/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
index 0a80241..efbdb8f 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
@@ -61,6 +61,8 @@ import javax.jdo.datastore.DataStoreCache;
 import javax.jdo.identity.IntIdentity;
 import javax.sql.DataSource;
 
+import com.codahale.metrics.Counter;
+import com.codahale.metrics.MetricRegistry;
 import org.apache.commons.lang.ArrayUtils;
 import org.apache.commons.lang.exception.ExceptionUtils;
 import org.apache.hadoop.conf.Configurable;
@@ -71,9 +73,6 @@ import org.apache.hadoop.hive.common.ObjectPair;
 import org.apache.hadoop.hive.common.StatsSetupConst;
 import org.apache.hadoop.hive.common.classification.InterfaceAudience;
 import org.apache.hadoop.hive.common.classification.InterfaceStability;
-import org.apache.hadoop.hive.common.metrics.common.Metrics;
-import org.apache.hadoop.hive.common.metrics.common.MetricsConstant;
-import org.apache.hadoop.hive.common.metrics.common.MetricsFactory;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import 
org.apache.hadoop.hive.metastore.MetaStoreDirectSql.SqlFilterForPushdown;
@@ -126,6 +125,8 @@ import 
org.apache.hadoop.hive.metastore.api.UnknownTableException;
 import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
 import org.apache.hadoop.hive.metastore.datasource.DataSourceProvider;
 import org.apache.hadoop.hive.metastore.datasource.DataSourceProviderFactory;
+import org.apache.hadoop.hive.metastore.metrics.Metrics;
+import org.apache.hadoop.hive.metastore.metrics.MetricsConstants;
 import org.apache.hadoop.hive.metastore.model.MColumnDescriptor;
 import org.apache.hadoop.hive.metastore.model.MConstraint;
 import org.apache.hadoop.hive.metastore.model.MDBPrivilege;
@@ -245,6 +246,7 @@ public class ObjectStore implements RawStore, Configurable {
   private Transaction currentTransaction = null;
   private TXN_STATUS transactionStatus = TXN_STATUS.NO_STATE;
   private Pattern partitionValidationPattern;
+  private Counter directSqlErrors;
 
   /**
    * A Autocloseable wrapper around Query class to pass the Query object to 
the caller and let the caller release
@@ -325,6 +327,13 @@ public class ObjectStore implements RawStore, Configurable 
{
         partitionValidationPattern = null;
       }
 
+      // Note, if metrics have not been initialized this will return null, 
which means we aren't
+      // using metrics.  Thus we should always check whether this is non-null 
before using.
+      MetricRegistry registry = Metrics.getRegistry();
+      if (registry != null) {
+        directSqlErrors = 
Metrics.getOrCreateCounter(MetricsConstants.DIRECTSQL_ERRORS);
+      }
+
       if (!isInitialized) {
         throw new RuntimeException(
         "Unable to create persistence manager. Check dss.log for details");
@@ -2845,13 +2854,8 @@ public class ObjectStore implements RawStore, 
Configurable {
         start = doTrace ? System.nanoTime() : 0;
       }
 
-      Metrics metrics = MetricsFactory.getInstance();
-      if (metrics != null) {
-        try {
-          metrics.incrementCounter(MetricsConstant.DIRECTSQL_ERRORS);
-        } catch (Exception e) {
-          LOG.warn("Error reporting Direct SQL errors to metrics system", e);
-        }
+      if (directSqlErrors != null) {
+        directSqlErrors.inc();
       }
 
       doUseDirectSql = false;

http://git-wip-us.apache.org/repos/asf/hive/blob/a6b9cd52/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingHMSHandler.java
----------------------------------------------------------------------
diff --git 
a/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingHMSHandler.java 
b/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingHMSHandler.java
index a08c5bd..affb38f 100644
--- 
a/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingHMSHandler.java
+++ 
b/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingHMSHandler.java
@@ -26,6 +26,7 @@ import java.lang.reflect.UndeclaredThrowableException;
 import java.util.concurrent.TimeUnit;
 
 import org.apache.commons.lang.exception.ExceptionUtils;
+import org.apache.hadoop.hive.metastore.metrics.PerfLogger;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -34,7 +35,6 @@ import 
org.apache.hadoop.hive.common.classification.InterfaceStability;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
-import org.apache.hadoop.hive.ql.log.PerfLogger;
 import org.datanucleus.exceptions.NucleusException;
 
 @InterfaceAudience.Private
@@ -101,7 +101,7 @@ public class RetryingHMSHandler implements 
InvocationHandler {
     int retryCount = -1;
     int threadId = HiveMetaStore.HMSHandler.get();
     boolean error = true;
-    PerfLogger perfLogger = PerfLogger.getPerfLogger(origConf, false);
+    PerfLogger perfLogger = PerfLogger.getPerfLogger(false);
     perfLogger.PerfLogBegin(CLASS_NAME, method.getName());
     try {
       Result result = invokeInternal(proxy, method, args);

http://git-wip-us.apache.org/repos/asf/hive/blob/a6b9cd52/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
----------------------------------------------------------------------
diff --git 
a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java 
b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
index 5bfc029..1887c05 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
@@ -33,6 +33,8 @@ import org.apache.hadoop.hive.metastore.Warehouse;
 import org.apache.hadoop.hive.metastore.datasource.BoneCPDataSourceProvider;
 import org.apache.hadoop.hive.metastore.datasource.DataSourceProvider;
 import org.apache.hadoop.hive.metastore.datasource.HikariCPDataSourceProvider;
+import org.apache.hadoop.hive.metastore.metrics.Metrics;
+import org.apache.hadoop.hive.metastore.metrics.MetricsConstants;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.commons.dbcp.PoolingDataSource;
@@ -54,6 +56,7 @@ import java.util.*;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.Semaphore;
 import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.locks.ReentrantLock;
 import java.util.regex.Pattern;
 
@@ -188,8 +191,6 @@ abstract class TxnHandler implements TxnStore, 
TxnStore.MutexAPI {
 
   // Maximum number of open transactions that's allowed
   private static volatile int maxOpenTxns = 0;
-  // Current number of open txns
-  private static volatile long numOpenTxns = 0;
   // Whether number of open transactions reaches the threshold
   private static volatile boolean tooManyOpenTxns = false;
   // The AcidHouseKeeperService for counting open transactions
@@ -211,6 +212,9 @@ abstract class TxnHandler implements TxnStore, 
TxnStore.MutexAPI {
   private long retryInterval;
   private int retryLimit;
   private int retryNum;
+  // Current number of open txns
+  private AtomicInteger numOpenTxns;
+
   /**
    * Derby specific concurrency control
    */
@@ -276,6 +280,8 @@ abstract class TxnHandler implements TxnStore, 
TxnStore.MutexAPI {
       }
     }
 
+    numOpenTxns = Metrics.getOrCreateGauge(MetricsConstants.NUM_OPEN_TXNS);
+
     timeout = HiveConf.getTimeVar(conf, HiveConf.ConfVars.HIVE_TXN_TIMEOUT, 
TimeUnit.MILLISECONDS);
     buildJumpTable();
     retryInterval = HiveConf.getTimeVar(conf, 
HiveConf.ConfVars.HMSHANDLERINTERVAL,
@@ -460,11 +466,11 @@ abstract class TxnHandler implements TxnStore, 
TxnStore.MutexAPI {
       }
     }
 
-    if (!tooManyOpenTxns && numOpenTxns >= maxOpenTxns) {
+    if (!tooManyOpenTxns && numOpenTxns.get() >= maxOpenTxns) {
       tooManyOpenTxns = true;
     }
     if (tooManyOpenTxns) {
-      if (numOpenTxns < maxOpenTxns * 0.9) {
+      if (numOpenTxns.get() < maxOpenTxns * 0.9) {
         tooManyOpenTxns = false;
       } else {
         LOG.warn("Maximum allowed number of open transactions (" + maxOpenTxns 
+ ") has been " +
@@ -3159,7 +3165,13 @@ abstract class TxnHandler implements TxnStore, 
TxnStore.MutexAPI {
           LOG.error("Transaction database not properly configured, " +
               "can't find txn_state from TXNS.");
         } else {
-          numOpenTxns = rs.getLong(1);
+          Long numOpen = rs.getLong(1);
+          if (numOpen > Integer.MAX_VALUE) {
+            LOG.error("Open transaction count above " + Integer.MAX_VALUE +
+                ", can't count that high!");
+          } else {
+            numOpenTxns.set(numOpen.intValue());
+          }
         }
       } catch (SQLException e) {
         LOG.debug("Going to rollback");

http://git-wip-us.apache.org/repos/asf/hive/blob/a6b9cd52/metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore.java
----------------------------------------------------------------------
diff --git 
a/metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore.java 
b/metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore.java
index b28ea73..08228af 100644
--- a/metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore.java
+++ b/metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore.java
@@ -23,11 +23,7 @@ import java.util.HashSet;
 import java.util.List;
 import java.util.Set;
 
-import org.apache.hadoop.hive.common.metrics.common.MetricsConstant;
-import org.apache.hadoop.hive.common.metrics.common.MetricsFactory;
-import org.apache.hadoop.hive.common.metrics.metrics2.CodahaleMetrics;
-import org.apache.hadoop.hive.common.metrics.metrics2.MetricsReporting;
-import org.apache.hadoop.hive.common.metrics.MetricsTestUtils;
+import com.codahale.metrics.Counter;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
 import org.apache.hadoop.hive.metastore.api.Database;
@@ -51,6 +47,8 @@ import org.apache.hadoop.hive.metastore.api.SerDeInfo;
 import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
 import org.apache.hadoop.hive.metastore.api.Table;
 import org.apache.hadoop.hive.metastore.messaging.EventMessage;
+import org.apache.hadoop.hive.metastore.metrics.Metrics;
+import org.apache.hadoop.hive.metastore.metrics.MetricsConstants;
 import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
 import org.apache.hadoop.hive.serde.serdeConstants;
 import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
@@ -364,11 +362,12 @@ public class TestObjectStore {
   public void testDirectSqlErrorMetrics() throws Exception {
     HiveConf conf = new HiveConf();
     conf.setBoolVar(HiveConf.ConfVars.HIVE_SERVER2_METRICS_ENABLED, true);
-    conf.setVar(HiveConf.ConfVars.HIVE_METRICS_REPORTER, 
MetricsReporting.JSON_FILE.name()
-        + "," + MetricsReporting.JMX.name());
+    Metrics.initialize(conf);
 
-    MetricsFactory.init(conf);
-    CodahaleMetrics metrics = (CodahaleMetrics) MetricsFactory.getInstance();
+    // recall setup so that we get an object store with the metrics initalized
+    setUp();
+    Counter directSqlErrors =
+        
Metrics.getRegistry().getCounters().get(MetricsConstants.DIRECTSQL_ERRORS);
 
     objectStore.new GetDbHelper("foo", null, true, true) {
       @Override
@@ -383,9 +382,7 @@ public class TestObjectStore {
       }
     }.run(false);
 
-    String json = metrics.dumpJson();
-    MetricsTestUtils.verifyMetricsJson(json, MetricsTestUtils.COUNTER,
-        MetricsConstant.DIRECTSQL_ERRORS, "");
+    Assert.assertEquals(0, directSqlErrors.getCount());
 
     objectStore.new GetDbHelper("foo", null, true, true) {
       @Override
@@ -400,9 +397,7 @@ public class TestObjectStore {
       }
     }.run(false);
 
-    json = metrics.dumpJson();
-    MetricsTestUtils.verifyMetricsJson(json, MetricsTestUtils.COUNTER,
-        MetricsConstant.DIRECTSQL_ERRORS, 1);
+    Assert.assertEquals(1, directSqlErrors.getCount());
   }
 
   public static void dropAllStoreObjects(RawStore store) throws MetaException, 
InvalidObjectException, InvalidInputException {

http://git-wip-us.apache.org/repos/asf/hive/blob/a6b9cd52/standalone-metastore/pom.xml
----------------------------------------------------------------------
diff --git a/standalone-metastore/pom.xml b/standalone-metastore/pom.xml
index 2e0c51e..e38ef37 100644
--- a/standalone-metastore/pom.xml
+++ b/standalone-metastore/pom.xml
@@ -33,11 +33,31 @@
 
   <dependencies>
     <dependency>
+      <groupId>com.github.joshelser</groupId>
+      <artifactId>dropwizard-metrics-hadoop-metrics2-reporter</artifactId>
+      <version>${dropwizard-metrics-hadoop-metrics2-reporter.version}</version>
+    </dependency>
+    <dependency>
       <groupId>com.google.guava</groupId>
       <artifactId>guava</artifactId>
       <version>${guava.version}</version>
     </dependency>
     <dependency>
+      <groupId>io.dropwizard.metrics</groupId>
+      <artifactId>metrics-core</artifactId>
+      <version>${dropwizard.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>io.dropwizard.metrics</groupId>
+      <artifactId>metrics-jvm</artifactId>
+      <version>${dropwizard.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>io.dropwizard.metrics</groupId>
+      <artifactId>metrics-json</artifactId>
+      <version>${dropwizard.version}</version>
+    </dependency>
+    <dependency>
       <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-common</artifactId>
       <version>${hadoop.version}</version>

http://git-wip-us.apache.org/repos/asf/hive/blob/a6b9cd52/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ThreadPool.java
----------------------------------------------------------------------
diff --git 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ThreadPool.java
 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ThreadPool.java
new file mode 100644
index 0000000..d0fcd25
--- /dev/null
+++ 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ThreadPool.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+
+/**
+ * Utility singleton class to manage all the threads.
+ */
+public class ThreadPool {
+
+  static final private Logger LOG = LoggerFactory.getLogger(ThreadPool.class);
+  private static ThreadPool self = null;
+  private static ScheduledExecutorService pool;
+
+  public static synchronized ThreadPool initialize(Configuration conf) {
+    if (self == null) {
+      self = new ThreadPool(conf);
+      LOG.debug("ThreadPool initialized");
+    }
+    return self;
+  }
+
+  private ThreadPool(Configuration conf) {
+    pool = Executors.newScheduledThreadPool(MetastoreConf.getIntVar(conf,
+        MetastoreConf.ConfVars.THREAD_POOL_SIZE));
+  }
+
+  public static ScheduledExecutorService getPool() {
+    if (self == null) {
+      throw new RuntimeException("ThreadPool accessed before initialized");
+    }
+    return pool;
+  }
+
+  public static synchronized void shutdown() {
+    if (self != null) {
+      pool.shutdown();
+      self = null;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/a6b9cd52/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/metrics/JsonReporter.java
----------------------------------------------------------------------
diff --git 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/metrics/JsonReporter.java
 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/metrics/JsonReporter.java
new file mode 100644
index 0000000..b804cda
--- /dev/null
+++ 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/metrics/JsonReporter.java
@@ -0,0 +1,166 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.metrics;
+
+import com.codahale.metrics.Counter;
+import com.codahale.metrics.Gauge;
+import com.codahale.metrics.Histogram;
+import com.codahale.metrics.Meter;
+import com.codahale.metrics.MetricFilter;
+import com.codahale.metrics.MetricRegistry;
+import com.codahale.metrics.ScheduledReporter;
+import com.codahale.metrics.Timer;
+import com.codahale.metrics.json.MetricsModule;
+import com.fasterxml.jackson.core.JsonProcessingException;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.ObjectWriter;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.BufferedWriter;
+import java.io.IOException;
+import java.io.OutputStreamWriter;
+import java.net.URI;
+import java.util.SortedMap;
+import java.util.concurrent.TimeUnit;
+
+public class JsonReporter extends ScheduledReporter {
+  private static final Logger LOG = 
LoggerFactory.getLogger(JsonReporter.class);
+
+  private final Configuration conf;
+  private final MetricRegistry registry;
+  private ObjectWriter jsonWriter;
+  private Path path;
+  private Path tmpPath;
+  private FileSystem fs;
+
+  private JsonReporter(MetricRegistry registry, String name, MetricFilter 
filter,
+                       TimeUnit rateUnit, TimeUnit durationUnit, Configuration 
conf) {
+    super(registry, name, filter, rateUnit, durationUnit);
+    this.conf = conf;
+    this.registry = registry;
+  }
+
+  @Override
+  public void start(long period, TimeUnit unit) {
+    jsonWriter = new ObjectMapper().registerModule(new 
MetricsModule(TimeUnit.MILLISECONDS,
+        TimeUnit.MILLISECONDS, false)).writerWithDefaultPrettyPrinter();
+    String pathString = MetastoreConf.getVar(conf, MetastoreConf.ConfVars 
.METRICS_JSON_FILE_LOCATION);
+    path = new Path(pathString);
+
+    tmpPath = new Path(pathString + ".tmp");
+    URI tmpPathURI = tmpPath.toUri();
+    try {
+      if (tmpPathURI.getScheme() == null && tmpPathURI.getAuthority() == null) 
{
+        //default local
+        fs = FileSystem.getLocal(conf);
+      } else {
+        fs = FileSystem.get(tmpPathURI, conf);
+      }
+    }
+    catch (IOException e) {
+      LOG.error("Unable to access filesystem for path " + tmpPath + ". 
Aborting reporting", e);
+      return;
+    }
+    super.start(period, unit);
+  }
+
+  @Override
+  public void report(SortedMap<String, Gauge> sortedMap, SortedMap<String, 
Counter> sortedMap1,
+                     SortedMap<String, Histogram> sortedMap2, 
SortedMap<String, Meter> sortedMap3,
+                     SortedMap<String, Timer> sortedMap4) {
+
+    String json;
+    try {
+      json = jsonWriter.writeValueAsString(registry);
+    } catch (JsonProcessingException e) {
+      LOG.error("Unable to convert json to string ", e);
+      return;
+    }
+
+    BufferedWriter bw = null;
+    try {
+      fs.delete(tmpPath, true);
+      bw = new BufferedWriter(new OutputStreamWriter(fs.create(tmpPath, 
true)));
+      bw.write(json);
+      fs.setPermission(tmpPath, FsPermission.createImmutable((short) 0644));
+    } catch (IOException e) {
+      LOG.error("Unable to write to temp file " + tmpPath, e);
+      return;
+    } finally {
+      if (bw != null) {
+        try {
+          bw.close();
+        } catch (IOException e) {
+          // Not much we can do
+          LOG.error("Caught error closing json metric reporter file", e);
+        }
+      }
+    }
+
+    try {
+      fs.rename(tmpPath, path);
+      fs.setPermission(path, FsPermission.createImmutable((short) 0644));
+    } catch (IOException e) {
+      LOG.error("Unable to rename temp file " + tmpPath + " to " + path, e);
+    }
+  }
+
+  public static Builder forRegistry(MetricRegistry registry, Configuration 
conf) {
+    return new Builder(registry, conf);
+  }
+
+  public static class Builder {
+    private final MetricRegistry registry;
+    private final Configuration conf;
+    private TimeUnit rate = TimeUnit.SECONDS;
+    private TimeUnit duration = TimeUnit.MILLISECONDS;
+    private MetricFilter filter = MetricFilter.ALL;
+
+    private Builder(MetricRegistry registry, Configuration conf) {
+      this.registry = registry;
+      this.conf = conf;
+    }
+
+    public Builder convertRatesTo(TimeUnit rate) {
+      this.rate = rate;
+      return this;
+    }
+
+    public Builder convertDurationsTo(TimeUnit duration) {
+      this.duration = duration;
+      return this;
+    }
+
+    public Builder filter(MetricFilter filter) {
+      this.filter = filter;
+      return this;
+    }
+
+    public JsonReporter build() {
+      return new JsonReporter(registry, "json-reporter", filter, rate, 
duration, conf);
+    }
+
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/a6b9cd52/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/metrics/JvmPauseMonitor.java
----------------------------------------------------------------------
diff --git 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/metrics/JvmPauseMonitor.java
 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/metrics/JvmPauseMonitor.java
new file mode 100644
index 0000000..93414fc
--- /dev/null
+++ 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/metrics/JvmPauseMonitor.java
@@ -0,0 +1,222 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.metrics;
+
+import com.codahale.metrics.Counter;
+import com.google.common.base.Joiner;
+import com.google.common.base.Preconditions;
+import com.google.common.base.Stopwatch;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.util.Daemon;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.lang.management.GarbageCollectorMXBean;
+import java.lang.management.ManagementFactory;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * Based on the JvmPauseMonitor from Hadoop.
+ */
+public class JvmPauseMonitor {
+  private static final Logger LOG = LoggerFactory.getLogger(
+    JvmPauseMonitor.class);
+
+  /** The target sleep time */
+  private static final long SLEEP_INTERVAL_MS = 500;
+
+  /** log WARN if we detect a pause longer than this threshold */
+  private final long warnThresholdMs;
+  private static final String WARN_THRESHOLD_KEY =
+    "jvm.pause.warn-threshold.ms";
+  private static final long WARN_THRESHOLD_DEFAULT = 10000;
+
+  /** log INFO if we detect a pause longer than this threshold */
+  private final long infoThresholdMs;
+  private static final String INFO_THRESHOLD_KEY =
+    "jvm.pause.info-threshold.ms";
+  private static final long INFO_THRESHOLD_DEFAULT = 1000;
+
+  private long numGcWarnThresholdExceeded = 0;
+  private long numGcInfoThresholdExceeded = 0;
+  private long totalGcExtraSleepTime = 0;
+
+  private Thread monitorThread;
+  private volatile boolean shouldRun = true;
+
+  public JvmPauseMonitor(Configuration conf) {
+    this.warnThresholdMs = conf.getLong(WARN_THRESHOLD_KEY, 
WARN_THRESHOLD_DEFAULT);
+    this.infoThresholdMs = conf.getLong(INFO_THRESHOLD_KEY, 
INFO_THRESHOLD_DEFAULT);
+  }
+
+  public void start() {
+    Preconditions.checkState(monitorThread == null,
+      "JvmPauseMonitor thread is Already started");
+    monitorThread = new Daemon(new Monitor());
+    monitorThread.start();
+  }
+
+  public void stop() {
+    shouldRun = false;
+    if (isStarted()) {
+      monitorThread.interrupt();
+      try {
+        monitorThread.join();
+      } catch (InterruptedException e) {
+        Thread.currentThread().interrupt();
+      }
+    }
+  }
+
+  public boolean isStarted() {
+    return monitorThread != null;
+  }
+
+  public long getNumGcWarnThreadholdExceeded() {
+    return numGcWarnThresholdExceeded;
+  }
+
+  public long getNumGcInfoThresholdExceeded() {
+    return numGcInfoThresholdExceeded;
+  }
+
+  public long getTotalGcExtraSleepTime() {
+    return totalGcExtraSleepTime;
+  }
+
+  private String formatMessage(long extraSleepTime,
+    Map<String, GcTimes> gcTimesAfterSleep,
+    Map<String, GcTimes> gcTimesBeforeSleep) {
+
+    Set<String> gcBeanNames = Sets.intersection(
+      gcTimesAfterSleep.keySet(),
+      gcTimesBeforeSleep.keySet());
+    List<String> gcDiffs = Lists.newArrayList();
+    for (String name : gcBeanNames) {
+      GcTimes diff = gcTimesAfterSleep.get(name).subtract(
+        gcTimesBeforeSleep.get(name));
+      if (diff.gcCount != 0) {
+        gcDiffs.add("GC pool '" + name + "' had collection(s): " +
+          diff.toString());
+      }
+    }
+
+    String ret = "Detected pause in JVM or host machine (eg GC): " +
+      "pause of approximately " + extraSleepTime + "ms\n";
+    if (gcDiffs.isEmpty()) {
+      ret += "No GCs detected";
+    } else {
+      ret += Joiner.on("\n").join(gcDiffs);
+    }
+    return ret;
+  }
+
+  private Map<String, GcTimes> getGcTimes() {
+    Map<String, GcTimes> map = Maps.newHashMap();
+    List<GarbageCollectorMXBean> gcBeans =
+      ManagementFactory.getGarbageCollectorMXBeans();
+    for (GarbageCollectorMXBean gcBean : gcBeans) {
+      map.put(gcBean.getName(), new GcTimes(gcBean));
+    }
+    return map;
+  }
+
+  private static class GcTimes {
+    private GcTimes(GarbageCollectorMXBean gcBean) {
+      gcCount = gcBean.getCollectionCount();
+      gcTimeMillis = gcBean.getCollectionTime();
+    }
+
+    private GcTimes(long count, long time) {
+      this.gcCount = count;
+      this.gcTimeMillis = time;
+    }
+
+    private GcTimes subtract(GcTimes other) {
+      return new GcTimes(this.gcCount - other.gcCount,
+        this.gcTimeMillis - other.gcTimeMillis);
+    }
+
+    @Override
+    public String toString() {
+      return "count=" + gcCount + " time=" + gcTimeMillis + "ms";
+    }
+
+    private final long gcCount;
+    private final long gcTimeMillis;
+  }
+
+  private class Monitor implements Runnable {
+    @Override
+    public void run() {
+      Stopwatch sw = new Stopwatch();
+      Map<String, GcTimes> gcTimesBeforeSleep = getGcTimes();
+      Counter jvmPauseWarnCnt = 
Metrics.getOrCreateCounter(MetricsConstants.JVM_PAUSE_WARN);
+      Counter jvmPauseInfoCnt = 
Metrics.getOrCreateCounter(MetricsConstants.JVM_PAUSE_INFO);
+      Counter jvmExtraSleepCnt = 
Metrics.getOrCreateCounter(MetricsConstants.JVM_EXTRA_SLEEP);
+      while (shouldRun) {
+        sw.reset().start();
+        try {
+          Thread.sleep(SLEEP_INTERVAL_MS);
+        } catch (InterruptedException ie) {
+          return;
+        }
+        long extraSleepTime = sw.elapsed(TimeUnit.MILLISECONDS) - 
SLEEP_INTERVAL_MS;
+        Map<String, GcTimes> gcTimesAfterSleep = getGcTimes();
+
+        if (extraSleepTime > warnThresholdMs) {
+          ++numGcWarnThresholdExceeded;
+          LOG.warn(formatMessage(
+            extraSleepTime, gcTimesAfterSleep, gcTimesBeforeSleep));
+          if (jvmPauseWarnCnt != null) jvmPauseWarnCnt.inc();
+        } else if (extraSleepTime > infoThresholdMs) {
+          ++numGcInfoThresholdExceeded;
+          LOG.info(formatMessage(
+            extraSleepTime, gcTimesAfterSleep, gcTimesBeforeSleep));
+          if (jvmPauseInfoCnt != null) jvmPauseInfoCnt.inc();
+        }
+        if (jvmExtraSleepCnt != null) jvmExtraSleepCnt.inc(extraSleepTime);
+        totalGcExtraSleepTime += extraSleepTime;
+        gcTimesBeforeSleep = gcTimesAfterSleep;
+      }
+    }
+  }
+
+  /**
+   * Simple 'main' to facilitate manual testing of the pause monitor.
+   *
+   * This main function just leaks memory into a list. Running this class
+   * with a 1GB heap will very quickly go into "GC hell" and result in
+   * log messages about the GC pauses.
+   */
+  public static void main(String []args) throws Exception {
+    new JvmPauseMonitor(new Configuration()).start();
+    List<String> list = Lists.newArrayList();
+    int i = 0;
+    while (true) {
+      list.add(String.valueOf(i++));
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/a6b9cd52/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/metrics/Metrics.java
----------------------------------------------------------------------
diff --git 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/metrics/Metrics.java
 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/metrics/Metrics.java
new file mode 100644
index 0000000..c0b1ebc
--- /dev/null
+++ 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/metrics/Metrics.java
@@ -0,0 +1,243 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.metrics;
+
+import com.codahale.metrics.ConsoleReporter;
+import com.codahale.metrics.Counter;
+import com.codahale.metrics.Gauge;
+import com.codahale.metrics.JmxReporter;
+import com.codahale.metrics.MetricRegistry;
+import com.codahale.metrics.Reporter;
+import com.codahale.metrics.ScheduledReporter;
+import com.codahale.metrics.Timer;
+import com.codahale.metrics.jvm.BufferPoolMetricSet;
+import com.codahale.metrics.jvm.ClassLoadingGaugeSet;
+import com.codahale.metrics.jvm.GarbageCollectorMetricSet;
+import com.codahale.metrics.jvm.MemoryUsageGaugeSet;
+import com.codahale.metrics.jvm.ThreadStatesGaugeSet;
+import com.github.joshelser.dropwizard.metrics.hadoop.HadoopMetrics2Reporter;
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.lang.management.ManagementFactory;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+
+public class Metrics {
+  private static final Logger LOGGER = LoggerFactory.getLogger(Metrics.class);
+
+  private static Metrics self;
+  private static final AtomicInteger singletonAtomicInteger = new 
AtomicInteger();
+
+  private final MetricRegistry registry;
+  private List<Reporter> reporters;
+  private List<ScheduledReporter> scheduledReporters;
+  private Map<String, AtomicInteger> gaugeAtomics;
+  private boolean hadoopMetricsStarted;
+
+  public static synchronized Metrics initialize(Configuration conf) {
+    if (self == null) {
+      self = new Metrics(conf);
+    }
+    return self;
+  }
+
+  public static MetricRegistry getRegistry() {
+    if (self == null) return null;
+    return self.registry;
+  }
+
+  public static void shutdown() {
+    if (self != null) {
+      for (ScheduledReporter reporter : self.scheduledReporters) {
+        reporter.stop();
+        reporter.close();
+      }
+      if (self.hadoopMetricsStarted) DefaultMetricsSystem.shutdown();
+      self = null;
+    }
+  }
+
+  /**
+   * Get an existing counter or create a new one if the requested one does not 
yet exist.  Creation
+   * is synchronized to assure that only one instance of the counter is 
created.
+   * @param name name of the counter
+   * @return new Counter, or existing one if it already exists.  This will 
return null if the
+   * metrics have not been initialized.
+   */
+  public static Counter getOrCreateCounter(String name) {
+    if (self == null) return null;
+    Map<String, Counter> counters = self.registry.getCounters();
+    Counter counter = counters.get(name);
+    if (counter != null) return counter;
+    // Looks like it doesn't exist.  Lock so that two threads don't create it 
at once.
+    synchronized (Metrics.class) {
+      // Recheck to make sure someone didn't create it while we waited.
+      counters = self.registry.getCounters();
+      counter = counters.get(name);
+      if (counter != null) return counter;
+      return self.registry.counter(name);
+    }
+  }
+
+  /**
+   * Get an existing timer or create a new one if the requested one does not 
yet exist.  Creation
+   * is synchronized to assure that only one instance of the counter is 
created.
+   * @param name timer name
+   * @return new Timer, or existing one if it already exists, null if the 
metrics have not been
+   * initialized.
+   */
+  public static Timer getOrCreateTimer(String name) {
+    if (self == null) return null;
+    Map<String, Timer> timers = self.registry.getTimers();
+    Timer timer = timers.get(name);
+    if (timer != null) return timer;
+    synchronized (Metrics.class) {
+      timers = self.registry.getTimers();
+      timer = timers.get(name);
+      if (timer != null) return timer;
+      return self.registry.timer(name);
+    }
+  }
+
+  /**
+   * Get the AtomicInteger behind an existing gauge, or create a new gauge if 
it does not already
+   * exist.
+   * @param name Name of gauge.  This should come from MetricConstants
+   * @return AtomicInteger underlying this gauge.
+   */
+  public static AtomicInteger getOrCreateGauge(String name) {
+    // We return a garbage value if metrics haven't been initialized so that 
callers don't have
+    // to keep checking if the resulting value is null.
+    if (self == null) return singletonAtomicInteger;
+    AtomicInteger ai = self.gaugeAtomics.get(name);
+    if (ai != null) return ai;
+    synchronized (Metrics.class) {
+      ai = self.gaugeAtomics.get(name);
+      if (ai != null) return ai;
+      ai = new AtomicInteger();
+      final AtomicInteger forGauge = ai;
+      self.gaugeAtomics.put(name, ai);
+      self.registry.register(name, new Gauge<Integer>() {
+        @Override
+        public Integer getValue() {
+          return forGauge.get();
+        }
+      });
+      return ai;
+    }
+  }
+
+  @VisibleForTesting
+  static List<Reporter> getReporters() {
+    return self.reporters;
+  }
+
+  private Metrics(Configuration conf) {
+    registry = new MetricRegistry();
+
+    registry.registerAll(new GarbageCollectorMetricSet());
+    registry.registerAll(new 
BufferPoolMetricSet(ManagementFactory.getPlatformMBeanServer()));
+    registry.registerAll(new MemoryUsageGaugeSet());
+    registry.registerAll(new ThreadStatesGaugeSet());
+    registry.registerAll(new ClassLoadingGaugeSet());
+
+    /*
+     * This is little complicated.  First we look for our own config values on 
this.  If those
+     * aren't set we use the Hive ones.  But Hive also has multiple ways to do 
this, so we need to
+     * look in both of theirs as well.  We can't use theirs directly because 
they wrap the
+     * codahale reporters in their own and we do not.
+     */
+    // Check our config value first.  I'm explicitly avoiding getting the 
default value for now,
+    // as I don't want our default to override a Hive set value.
+    String reportersToStart = 
conf.get(MetastoreConf.ConfVars.METRICS_REPORTERS.varname);
+    if (reportersToStart == null) {
+      // Now look in the current Hive config value.  Again, avoiding getting 
defaults
+      reportersToStart =
+          
conf.get(MetastoreConf.ConfVars.HIVE_CODAHALE_METRICS_REPORTER_CLASSES.hiveName);
+      if (reportersToStart == null) {
+        // Last chance, look in the old Hive config value.  Still avoiding 
defaults.
+        reportersToStart =
+            conf.get(MetastoreConf.ConfVars.HIVE_METRICS_REPORTER.hiveName);
+        if (reportersToStart == null) {
+          // Alright fine, we'll use our defaults
+          reportersToStart = MetastoreConf.getVar(conf, 
MetastoreConf.ConfVars.METRICS_REPORTERS);
+        }
+      }
+    }
+
+    reporters = new ArrayList<>();
+    scheduledReporters = new ArrayList<>();
+    if (reportersToStart != null && reportersToStart.length() > 0) {
+      String[] reporterNames = reportersToStart.toLowerCase().split(",");
+      for (String reporterName : reporterNames) {
+        if (reporterName.equals("console") || 
reporterName.endsWith("consolemetricsreporter")) {
+          ConsoleReporter reporter = ConsoleReporter.forRegistry(registry)
+              .convertRatesTo(TimeUnit.SECONDS)
+              .convertDurationsTo(TimeUnit.MILLISECONDS)
+              .build();
+          reporter.start(15, TimeUnit.SECONDS);
+          reporters.add(reporter);
+          scheduledReporters.add(reporter);
+        } else if (reporterName.equals("jmx") || 
reporterName.endsWith("jmxmetricsreporter")) {
+          JmxReporter reporter = JmxReporter.forRegistry(registry)
+              .convertRatesTo(TimeUnit.SECONDS)
+              .convertDurationsTo(TimeUnit.MILLISECONDS)
+              .build();
+          reporter.start();
+          reporters.add(reporter);
+        } else if (reporterName.startsWith("json") || 
reporterName.endsWith("jsonfilemetricsreporter")) {
+          // We have to initialize the thread pool before we start this one, 
as it uses it
+          JsonReporter reporter = JsonReporter.forRegistry(registry, conf)
+              .convertRatesTo(TimeUnit.SECONDS)
+              .convertDurationsTo(TimeUnit.MILLISECONDS)
+              .build();
+          reporter.start(MetastoreConf.getTimeVar(conf,
+              MetastoreConf.ConfVars.METRICS_JSON_FILE_INTERVAL, 
TimeUnit.SECONDS), TimeUnit.SECONDS);
+          reporters.add(reporter);
+          scheduledReporters.add(reporter);
+        } else if (reporterName.startsWith("hadoop") || 
reporterName.endsWith("metrics2reporter")) {
+          HadoopMetrics2Reporter reporter = 
HadoopMetrics2Reporter.forRegistry(registry)
+              .convertRatesTo(TimeUnit.SECONDS)
+              .convertDurationsTo(TimeUnit.MILLISECONDS)
+              .build(DefaultMetricsSystem.initialize("metastore"), 
"metastore", "Runtime metadata" +
+                  " catalog", "general-metadata");
+          reporter.start(1, TimeUnit.MINUTES);
+          reporters.add(reporter);
+          scheduledReporters.add(reporter);
+          hadoopMetricsStarted = true;
+        } else {
+          throw new RuntimeException("Unknown metric type " + reporterName);
+        }
+      }
+    } else {
+      LOGGER.warn("No metrics reporters configured.");
+    }
+
+    // Create map for tracking gauges
+    gaugeAtomics = new HashMap<>();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/a6b9cd52/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/metrics/MetricsConstants.java
----------------------------------------------------------------------
diff --git 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/metrics/MetricsConstants.java
 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/metrics/MetricsConstants.java
new file mode 100644
index 0000000..3b188f8
--- /dev/null
+++ 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/metrics/MetricsConstants.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.metrics;
+
+public class MetricsConstants {
+  public static final String ACTIVE_CALLS = "active_calls_";
+  public static final String API_PREFIX = "api_";
+
+  public static final String CREATE_TOTAL_DATABASES = "create_total_count_dbs";
+  public static final String CREATE_TOTAL_TABLES = "create_total_count_tables";
+  public static final String CREATE_TOTAL_PARTITIONS = 
"create_total_count_partitions";
+
+  public static final String DELETE_TOTAL_DATABASES = "delete_total_count_dbs";
+  public static final String DELETE_TOTAL_TABLES = "delete_total_count_tables";
+  public static final String DELETE_TOTAL_PARTITIONS = 
"delete_total_count_partitions";
+
+  public static final String DIRECTSQL_ERRORS = "directsql_errors";
+
+  public static final String JVM_PAUSE_INFO = "jvm.pause.info-threshold";
+  public static final String JVM_PAUSE_WARN = "jvm.pause.warn-threshold";
+  public static final String JVM_EXTRA_SLEEP = "jvm.pause.extraSleepTime";
+
+  public static final String NUM_OPEN_TXNS = "num_open_transactions";
+  public static final String NUM_TIMED_OUT_TXNS = "num_timed_out_transactions";
+
+  public static final String OPEN_CONNECTIONS = "open_connections";
+
+  public static final String TOTAL_DATABASES = "total_count_dbs";
+  public static final String TOTAL_TABLES = "total_count_tables";
+  public static final String TOTAL_PARTITIONS = "total_count_partitions";
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/a6b9cd52/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/metrics/PerfLogger.java
----------------------------------------------------------------------
diff --git 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/metrics/PerfLogger.java
 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/metrics/PerfLogger.java
new file mode 100644
index 0000000..a2def26
--- /dev/null
+++ 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/metrics/PerfLogger.java
@@ -0,0 +1,194 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore.metrics;
+
+import com.codahale.metrics.Timer;
+import com.google.common.collect.ImmutableMap;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * PerfLogger.
+ *
+ * Can be used to measure and log the time spent by a piece of code.
+ */
+public class PerfLogger {
+  protected final Map<String, Long> startTimes = new HashMap<>();
+  protected final Map<String, Long> endTimes = new HashMap<>();
+
+  static final private Logger LOG = 
LoggerFactory.getLogger(PerfLogger.class.getName());
+  protected static final ThreadLocal<PerfLogger> perfLogger = new 
ThreadLocal<>();
+
+
+  private PerfLogger() {
+    // Use getPerfLogger to get an instance of PerfLogger
+  }
+
+  /**
+   * Get the singleton PerfLogger instance.
+   * @param resetPerfLogger if false, get the current PerfLogger, or create a 
new one if a
+   *                        current one does not exist.  If true, a new 
instance of PerfLogger
+   *                        will be returned rather than the existing one.  
Note that the
+   *                        existing PerfLogger is not shutdown and any object 
which already has a
+   *                        reference to it may continue to use it. But all 
future calls to this
+   *                        method with this set to false will get the new 
PerfLogger instance.
+   * @return a PerfLogger
+   */
+  public static PerfLogger getPerfLogger(boolean resetPerfLogger) {
+    PerfLogger result = perfLogger.get();
+    if (resetPerfLogger || result == null) {
+      result = new PerfLogger();
+      perfLogger.set(result);
+    }
+    return result;
+  }
+
+  public static void setPerfLogger(PerfLogger resetPerfLogger) {
+    perfLogger.set(resetPerfLogger);
+  }
+
+  /**
+   * Call this function when you start to measure time spent by a piece of 
code.
+   * @param callerName the logging object to be used.
+   * @param method method or ID that identifies this perf log element.
+   */
+  public void PerfLogBegin(String callerName, String method) {
+    long startTime = System.currentTimeMillis();
+    startTimes.put(method, new Long(startTime));
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("<PERFLOG method=" + method + " from=" + callerName + ">");
+    }
+    beginMetrics(method);
+  }
+  /**
+   * Call this function in correspondence of PerfLogBegin to mark the end of 
the measurement.
+   * @param callerName
+   * @param method
+   * @return long duration  the difference between now and startTime, or -1 if 
startTime is null
+   */
+  public long PerfLogEnd(String callerName, String method) {
+    return PerfLogEnd(callerName, method, null);
+  }
+
+  /**
+   * Call this function in correspondence of PerfLogBegin to mark the end of 
the measurement.
+   * @param callerName
+   * @param method
+   * @return long duration  the difference between now and startTime, or -1 if 
startTime is null
+   */
+  public long PerfLogEnd(String callerName, String method, String 
additionalInfo) {
+    Long startTime = startTimes.get(method);
+    long endTime = System.currentTimeMillis();
+    endTimes.put(method, new Long(endTime));
+    long duration = startTime == null ? -1 : endTime - startTime.longValue();
+
+    if (LOG.isDebugEnabled()) {
+      StringBuilder sb = new StringBuilder("</PERFLOG method=").append(method);
+      if (startTime != null) {
+        sb.append(" start=").append(startTime);
+      }
+      sb.append(" end=").append(endTime);
+      if (startTime != null) {
+        sb.append(" duration=").append(duration);
+      }
+      sb.append(" from=").append(callerName);
+      if (additionalInfo != null) {
+        sb.append(" ").append(additionalInfo);
+      }
+      sb.append(">");
+      LOG.debug(sb.toString());
+    }
+    endMetrics(method);
+    return duration;
+  }
+
+  public Long getStartTime(String method) {
+    long startTime = 0L;
+
+    if (startTimes.containsKey(method)) {
+      startTime = startTimes.get(method);
+    }
+    return startTime;
+  }
+
+  public Long getEndTime(String method) {
+    long endTime = 0L;
+
+    if (endTimes.containsKey(method)) {
+      endTime = endTimes.get(method);
+    }
+    return endTime;
+  }
+
+  public boolean startTimeHasMethod(String method) {
+    return startTimes.containsKey(method);
+  }
+
+  public boolean endTimeHasMethod(String method) {
+    return endTimes.containsKey(method);
+  }
+
+  public Long getDuration(String method) {
+    long duration = 0;
+    if (startTimes.containsKey(method) && endTimes.containsKey(method)) {
+      duration = endTimes.get(method) - startTimes.get(method);
+    }
+    return duration;
+  }
+
+
+  public ImmutableMap<String, Long> getStartTimes() {
+    return ImmutableMap.copyOf(startTimes);
+  }
+
+  public ImmutableMap<String, Long> getEndTimes() {
+    return ImmutableMap.copyOf(endTimes);
+  }
+
+  //Methods for metrics integration.  Each thread-local PerfLogger will 
open/close scope during each perf-log method.
+  protected transient Map<String, Timer.Context> timerContexts = new 
HashMap<>();
+
+  private void beginMetrics(String method) {
+    Timer timer = Metrics.getOrCreateTimer(MetricsConstants.API_PREFIX + 
method);
+    if (timer != null) {
+      timerContexts.put(method, timer.time());
+    }
+
+  }
+
+  private void endMetrics(String method) {
+    Timer.Context context = timerContexts.remove(method);
+    if (context != null) {
+      context.close();
+    }
+  }
+
+  /**
+   * Cleans up any dangling perfLog metric call scopes.
+   */
+  public void cleanupPerfLogMetrics() {
+    for (Timer.Context context : timerContexts.values()) {
+      context.close();
+    }
+    timerContexts.clear();
+  }
+}

Reply via email to