http://git-wip-us.apache.org/repos/asf/phoenix/blob/b7f46c10/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
index e24591d..b3bec6e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
@@ -51,6 +51,7 @@ import org.apache.hadoop.hbase.regionserver.ScanType;
 import org.apache.hadoop.hbase.regionserver.Store;
 import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
 import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
+import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.phoenix.hbase.index.builder.IndexBuildManager;
 import org.apache.phoenix.hbase.index.builder.IndexBuilder;
@@ -64,6 +65,11 @@ import org.apache.phoenix.hbase.index.write.IndexWriter;
 import org.apache.phoenix.hbase.index.write.recovery.PerRegionIndexWriteCache;
 import 
org.apache.phoenix.hbase.index.write.recovery.StoreFailuresInCachePolicy;
 import 
org.apache.phoenix.hbase.index.write.recovery.TrackingParallelWriterIndexCommitter;
+import org.apache.phoenix.trace.TracingCompat;
+import org.apache.phoenix.trace.util.NullSpan;
+import org.apache.phoenix.trace.util.Tracing;
+import org.cloudera.htrace.Span;
+import org.cloudera.htrace.Trace;
 
 import com.google.common.collect.Multimap;
 
@@ -134,10 +140,18 @@ public class Indexer extends BaseRegionObserver {
     private static final int INDEX_WAL_COMPRESSION_MINIMUM_SUPPORTED_VERSION = 
VersionUtil
             .encodeVersion("0.94.9");
 
+    /**
+     * Raw configuration, for tracing. Coprocessors generally will get a 
subset configuration (if
+     * they are on a per-table basis), so we need the raw one from the server, 
so we can get the
+     * actual configuration keys
+     */
+    private Configuration rawConf;
+
   @Override
   public void start(CoprocessorEnvironment e) throws IOException {
       try {
         final RegionCoprocessorEnvironment env = 
(RegionCoprocessorEnvironment) e;
+            this.rawConf = env.getRegionServerServices().getConfiguration();
         String serverName = 
env.getRegionServerServices().getServerName().getServerName();
         if (env.getConfiguration().getBoolean(CHECK_VERSION_CONF_KEY, true)) {
           // make sure the right version <-> combinations are allowed.
@@ -312,12 +326,24 @@ public class Indexer extends BaseRegionObserver {
     // don't worry which one we get
     WALEdit edit = miniBatchOp.getWalEdit(0);
 
+        // get the current span, or just use a null-span to avoid a bunch of 
if statements
+        Span current = Trace.startSpan("Starting to build index 
updates").getSpan();
+        if (current == null) {
+            current = NullSpan.INSTANCE;
+        }
+
     // get the index updates for all elements in this batch
     Collection<Pair<Mutation, byte[]>> indexUpdates =
         this.builder.getIndexUpdate(miniBatchOp, mutations.values());
 
+        current.addTimelineAnnotation("Built index updates, doing preStep");
+        TracingCompat.addAnnotation(current, "index update count", 
indexUpdates.size());
+
     // write them, either to WAL or the index tables
     doPre(indexUpdates, edit, durability);
+
+        // close the span
+        current.stop();
   }
 
   private class MultiMutation extends Mutation {
@@ -458,16 +484,24 @@ public class Indexer extends BaseRegionObserver {
       return;
     }
 
+        // get the current span, or just use a null-span to avoid a bunch of 
if statements
+        Span current = Trace.startSpan("Completing index writes").getSpan();
+        if (current == null) {
+            current = NullSpan.INSTANCE;
+        }
+
     // there is a little bit of excess here- we iterate all the non-indexed 
kvs for this check first
     // and then do it again later when getting out the index updates. This 
should be pretty minor
     // though, compared to the rest of the runtime
     IndexedKeyValue ikv = getFirstIndexedKeyValue(edit);
+
     /*
      * early exit - we have nothing to write, so we don't need to do anything 
else. NOTE: we don't
      * release the WAL Rolling lock (INDEX_UPDATE_LOCK) since we never take it 
in doPre if there are
      * no index updates.
      */
     if (ikv == null) {
+            current.stop();
       return;
     }
 
@@ -483,6 +517,7 @@ public class Indexer extends BaseRegionObserver {
       // references originally - therefore, we just pass in a null factory 
here and use the ones
       // already specified on each reference
       try {
+                current.addTimelineAnnotation("Actually doing index update for 
first time");
           writer.writeAndKillYourselfOnFailure(indexUpdates);
       } finally {
         // With a custom kill policy, we may throw instead of kill the server.
@@ -492,6 +527,10 @@ public class Indexer extends BaseRegionObserver {
         // mark the batch as having been written. In the single-update case, 
this never gets check
         // again, but in the batch case, we will check it again (see above).
         ikv.markBatchFinished();
+
+                // finish the span
+
+                current.stop();
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b7f46c10/phoenix-core/src/main/java/org/apache/phoenix/iterate/ParallelIterators.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ParallelIterators.java 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ParallelIterators.java
index 92fc54a..2af5896 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ParallelIterators.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ParallelIterators.java
@@ -40,6 +40,7 @@ import org.apache.phoenix.query.*;
 import org.apache.phoenix.schema.*;
 import org.apache.phoenix.schema.PTable.IndexType;
 import org.apache.phoenix.schema.PTable.ViewType;
+import org.apache.phoenix.trace.util.Tracing;
 import org.apache.phoenix.util.*;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -250,7 +251,7 @@ public class ParallelIterators extends ExplainTable 
implements ResultIterators {
                     // Delay the swapping of start/stop row until row so we 
don't muck with the intersect logic
                     ScanUtil.swapStartStopRowIfReversed(splitScan);
                     Future<PeekingResultIterator> future =
-                        executor.submit(new 
JobCallable<PeekingResultIterator>() {
+                        executor.submit(Tracing.wrap(new 
JobCallable<PeekingResultIterator>() {
 
                         @Override
                         public PeekingResultIterator call() throws Exception {
@@ -274,7 +275,7 @@ public class ParallelIterators extends ExplainTable 
implements ResultIterators {
                         public Object getJobId() {
                             return ParallelIterators.this;
                         }
-                    });
+                    }, "Parallel scanner for table: " + 
tableRef.getTable().getName().getString()));
                     futures.add(new 
Pair<byte[],Future<PeekingResultIterator>>(split.getLowerRange(),future));
                 }
             }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b7f46c10/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java 
b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
index 609c2ba..d6dd5f7 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
@@ -51,7 +51,10 @@ import java.util.concurrent.Executor;
 
 import javax.annotation.Nullable;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.HConstants;
+import org.apache.phoenix.call.CallRunner;
 import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.exception.SQLExceptionInfo;
 import org.apache.phoenix.execute.MutationState;
@@ -72,6 +75,8 @@ import org.apache.phoenix.schema.PMetaData.Pruner;
 import org.apache.phoenix.schema.PName;
 import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.PTableType;
+import org.apache.phoenix.trace.TracingCompat;
+import org.apache.phoenix.trace.util.Tracing;
 import org.apache.phoenix.util.DateUtil;
 import org.apache.phoenix.util.JDBCUtil;
 import org.apache.phoenix.util.NumberUtil;
@@ -80,6 +85,8 @@ import org.apache.phoenix.util.PropertiesUtil;
 import org.apache.phoenix.util.ReadOnlyProps;
 import org.apache.phoenix.util.SQLCloseable;
 import org.apache.phoenix.util.SQLCloseables;
+import org.cloudera.htrace.Sampler;
+import org.cloudera.htrace.Trace;
 
 import com.google.common.base.Objects;
 import com.google.common.base.Strings;
@@ -101,6 +108,8 @@ import com.google.common.collect.Maps;
  * @since 0.1
  */
 public class PhoenixConnection implements Connection, 
org.apache.phoenix.jdbc.Jdbc7Shim.Connection, MetaDataMutated  {
+    private static final Log LOG = LogFactory.getLog(PhoenixConnection.class);
+
     private final String url;
     private final ConnectionQueryServices services;
     private final Properties info;
@@ -115,7 +124,19 @@ public class PhoenixConnection implements Connection, 
org.apache.phoenix.jdbc.Jd
     private final String datePattern;
     
     private boolean isClosed = false;
+    private Sampler<?> sampler;
     
+    static {
+        // add the phoenix span receiver so we can log the traces. We have a 
single trace
+        // source for the whole JVM
+        try {
+            Trace.addReceiver(TracingCompat.newTraceMetricSource());
+        } catch (RuntimeException e) {
+            LOG.error("Tracing will outputs will not be written to any metrics 
sink! No "
+                    + "TraceMetricsSink found on the classpath", e);
+        }
+    }
+
     private static Properties newPropsWithSCN(long scn, Properties props) {
         props = new Properties(props);
         props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, 
Long.toString(scn));
@@ -125,15 +146,18 @@ public class PhoenixConnection implements Connection, 
org.apache.phoenix.jdbc.Jd
     public PhoenixConnection(PhoenixConnection connection) throws SQLException 
{
         this(connection.getQueryServices(), connection.getURL(), 
connection.getClientInfo(), connection.getMetaDataCache());
         this.isAutoCommit = connection.isAutoCommit;
+        this.sampler = connection.sampler;
     }
     
     public PhoenixConnection(PhoenixConnection connection, long scn) throws 
SQLException {
         this(connection.getQueryServices(), connection, scn);
+        this.sampler = connection.sampler;
     }
     
     public PhoenixConnection(ConnectionQueryServices services, 
PhoenixConnection connection, long scn) throws SQLException {
         this(services, connection.getURL(), 
newPropsWithSCN(scn,connection.getClientInfo()), connection.getMetaDataCache());
         this.isAutoCommit = connection.isAutoCommit;
+        this.sampler = connection.sampler;
     }
     
     public PhoenixConnection(ConnectionQueryServices services, String url, 
Properties info, PMetaData metaData) throws SQLException {
@@ -199,6 +223,13 @@ public class PhoenixConnection implements Connection, 
org.apache.phoenix.jdbc.Jd
         });
         this.mutationState = new MutationState(maxSize, this);
         this.services.addConnection(this);
+
+        // setup tracing, if its enabled
+        this.sampler = Tracing.getConfiguredSampler(this);
+    }
+
+    public Sampler<?> getSampler() {
+        return this.sampler;
     }
 
     public int executeStatements(Reader reader, List<Object> binds, 
PrintStream out) throws IOException, SQLException {
@@ -359,7 +390,12 @@ public class PhoenixConnection implements Connection, 
org.apache.phoenix.jdbc.Jd
 
     @Override
     public void commit() throws SQLException {
-        mutationState.commit();
+        CallRunner.run(new CallRunner.CallableThrowable<Void, SQLException>() {
+            public Void call() throws SQLException {
+                mutationState.commit();
+                return null;
+            }
+        }, Tracing.withTracing(this, "committing mutations"));
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b7f46c10/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java 
b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
index f5b8367..42acc60 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
@@ -31,9 +31,9 @@ import java.util.Arrays;
 import java.util.Collections;
 import java.util.List;
 import java.util.Map;
-import java.util.concurrent.Callable;
 
 import org.apache.hadoop.hbase.util.Pair;
+import org.apache.phoenix.call.CallRunner;
 import org.apache.phoenix.compile.ColumnProjector;
 import org.apache.phoenix.compile.ColumnResolver;
 import org.apache.phoenix.compile.CreateIndexCompiler;
@@ -110,6 +110,7 @@ import org.apache.phoenix.schema.SortOrder;
 import org.apache.phoenix.schema.TableRef;
 import org.apache.phoenix.schema.tuple.SingleKeyValueTuple;
 import org.apache.phoenix.schema.tuple.Tuple;
+import org.apache.phoenix.trace.util.Tracing;
 import org.apache.phoenix.util.ByteUtil;
 import org.apache.phoenix.util.KeyValueUtil;
 import org.apache.phoenix.util.PhoenixContextExecutor;
@@ -186,7 +187,7 @@ public class PhoenixStatement implements Statement, 
SQLCloseable, org.apache.pho
         return new PhoenixResultSet(iterator, projector, this);
     }
     
-    protected boolean execute(CompilableStatement stmt) throws SQLException {
+    protected boolean execute(final CompilableStatement stmt) throws 
SQLException {
         if (stmt.getOperation().isMutation()) {
             executeMutation(stmt);
             return false;
@@ -202,13 +203,16 @@ public class PhoenixStatement implements Statement, 
SQLCloseable, org.apache.pho
     
     protected PhoenixResultSet executeQuery(final CompilableStatement stmt) 
throws SQLException {
         try {
-            return PhoenixContextExecutor.call(new 
Callable<PhoenixResultSet>() {
+            return CallRunner.run(
+                new CallRunner.CallableThrowable<PhoenixResultSet, 
SQLException>() {
                 @Override
-                public PhoenixResultSet call() throws Exception {
+                    public PhoenixResultSet call() throws SQLException {
                     try {
                         QueryPlan plan = 
stmt.compilePlan(PhoenixStatement.this, Sequence.ValueOp.RESERVE_SEQUENCE);
                         plan = 
connection.getQueryServices().getOptimizer().optimize(
                                 PhoenixStatement.this, plan);
+                         // this will create its own trace internally, so we 
don't wrap this
+                         // whole thing in tracing
                         PhoenixResultSet rs = newResultSet(plan.iterator(), 
plan.getProjector());
                         resultSets.add(rs);
                         setLastQueryPlan(plan);
@@ -225,7 +229,7 @@ public class PhoenixStatement implements Statement, 
SQLCloseable, org.apache.pho
                         throw e;
                     }
                 }
-            });
+                }, PhoenixContextExecutor.inContext());
         } catch (Exception e) {
             Throwables.propagateIfInstanceOf(e, SQLException.class);
             throw Throwables.propagate(e);
@@ -234,11 +238,11 @@ public class PhoenixStatement implements Statement, 
SQLCloseable, org.apache.pho
     
     protected int executeMutation(final CompilableStatement stmt) throws 
SQLException {
         try {
-            return PhoenixContextExecutor.call(
-                    new Callable<Integer>() {
+            return CallRunner
+                    .run(
+                        new CallRunner.CallableThrowable<Integer, 
SQLException>() {
                         @Override
-                        public Integer call() throws Exception {
-
+                            public Integer call() throws SQLException {
                             // Note that the upsert select statements will 
need to commit any open transaction here,
                             // since they'd update data directly from 
coprocessors, and should thus operate on
                             // the latest state
@@ -266,7 +270,8 @@ public class PhoenixStatement implements Statement, 
SQLCloseable, org.apache.pho
                                 throw e;
                             }
                         }
-                    });
+                    }, PhoenixContextExecutor.inContext(),
+                        Tracing.withTracing(connection, this.toString()));
         } catch (Exception e) {
             Throwables.propagateIfInstanceOf(e, SQLException.class);
             throw Throwables.propagate(e);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b7f46c10/phoenix-core/src/main/java/org/apache/phoenix/trace/PhoenixTableMetricsWriter.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/trace/PhoenixTableMetricsWriter.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/trace/PhoenixTableMetricsWriter.java
new file mode 100644
index 0000000..b6d3c67
--- /dev/null
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/trace/PhoenixTableMetricsWriter.java
@@ -0,0 +1,255 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.trace;
+
+import static org.apache.phoenix.metrics.MetricInfo.ANNOTATION;
+import static org.apache.phoenix.metrics.MetricInfo.TAG;
+import static org.apache.phoenix.metrics.MetricInfo.DESCRIPTION;
+import static org.apache.phoenix.metrics.MetricInfo.END;
+import static org.apache.phoenix.metrics.MetricInfo.HOSTNAME;
+import static org.apache.phoenix.metrics.MetricInfo.PARENT;
+import static org.apache.phoenix.metrics.MetricInfo.SPAN;
+import static org.apache.phoenix.metrics.MetricInfo.START;
+import static org.apache.phoenix.metrics.MetricInfo.TRACE;
+
+import java.sql.Connection;
+import java.sql.PreparedStatement;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.phoenix.metrics.MetricInfo;
+import org.apache.phoenix.metrics.MetricsWriter;
+import org.apache.phoenix.metrics.PhoenixAbstractMetric;
+import org.apache.phoenix.metrics.PhoenixMetricTag;
+import org.apache.phoenix.metrics.PhoenixMetricsRecord;
+import org.apache.phoenix.util.QueryUtil;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Joiner;
+
+/**
+ * Sink that writes phoenix metrics to a phoenix table
+ * <p>
+ * Each metric record should only correspond to a single completed span. Each 
span is only updated
+ * in the phoenix table <i>once</i>
+ */
+public class PhoenixTableMetricsWriter implements MetricsWriter {
+
+    private static final String VARIABLE_VALUE = "?";
+
+    public static final Log LOG = 
LogFactory.getLog(PhoenixTableMetricsWriter.class);
+
+    private static final Joiner COLUMN_JOIN = Joiner.on(".");
+    static final String TAG_FAMILY = "tags";
+    /** Count of the number of tags we are storing for this row */
+    static final String TAG_COUNT = COLUMN_JOIN.join(TAG_FAMILY, "count");
+
+    static final String ANNOTATION_FAMILY = "annotations";
+    static final String ANNOTATION_COUNT = COLUMN_JOIN.join(ANNOTATION_FAMILY, 
"count");
+
+    /** Join strings on a comma */
+    private static final Joiner COMMAS = Joiner.on(',');
+
+    private Connection conn;
+
+    private String table;
+
+    @Override
+    public void initialize() {
+        try {
+            // create the phoenix connection
+            Configuration conf = HBaseConfiguration.create();
+            Connection conn = QueryUtil.getConnection(conf);
+            // enable bulk loading when we have enough data
+            conn.setAutoCommit(true);
+
+            String tableName =
+                    conf.get(TracingCompat.TARGET_TABLE_CONF_KEY,
+                        TracingCompat.DEFAULT_STATS_TABLE_NAME);
+
+            initializeInternal(conn, tableName);
+        } catch (Exception e) {
+            throw new RuntimeException(e);
+        }
+    }
+
+    private void initializeInternal(Connection conn, String tableName) throws 
SQLException {
+        this.conn = conn;
+
+        // ensure that the target table already exists
+        createTable(conn, tableName);
+    }
+
+    /**
+     * Used for <b>TESTING ONLY</b>
+     * <p>
+     * Initialize the connection and setup the table to use the
+     * {@link TracingCompat#DEFAULT_STATS_TABLE_NAME}
+     * @param conn to store for upserts and to create the table (if necessary)
+     * @throws SQLException if any phoenix operation fails
+     */
+    @VisibleForTesting
+    public void initForTesting(Connection conn) throws SQLException {
+        initializeInternal(conn, TracingCompat.DEFAULT_STATS_TABLE_NAME);
+    }
+
+    /**
+     * Create a stats table with the given name. Stores the name for use later 
when creating upsert
+     * statements
+     * @param conn connection to use when creating the table
+     * @param table name of the table to create
+     * @throws SQLException if any phoenix operations fails
+     */
+    private void createTable(Connection conn, String table) throws 
SQLException {
+        // only primary-key columns can be marked non-null
+        String ddl =
+                "create table if not exists " + table + "( " + 
+                        TRACE.columnName + " bigint not null, " +
+                        PARENT.columnName + " bigint not null, " +
+                        SPAN.columnName + " bigint not null, " +
+                        DESCRIPTION.columnName + " varchar, " +
+                        START.columnName + " bigint, " +
+                        END.columnName + " bigint, " +
+                        HOSTNAME.columnName + " varchar, " +
+                        TAG_COUNT + " smallint, " +
+                        ANNOTATION_COUNT + " smallint" +
+                        "  CONSTRAINT pk PRIMARY KEY (" + TRACE.columnName + 
", "
+                            + PARENT.columnName + ", " + SPAN.columnName + 
"))\n";
+        PreparedStatement stmt = conn.prepareStatement(ddl);
+        stmt.execute();
+        this.table = table;
+    }
+
+    @Override
+    public void flush() {
+        try {
+            this.conn.commit();
+            this.conn.rollback();
+        } catch (SQLException e) {
+            LOG.error("Failed to commit changes to table", e);
+        }
+    }
+
+    /**
+     * Add a new metric record to be written.
+     * @param record
+     */
+    @Override
+    public void addMetrics(PhoenixMetricsRecord record) {
+        // its not a tracing record, we are done. This could also be handled 
by filters, but safer
+        // to do it here, in case it gets misconfigured
+        if (!record.name().startsWith(TracingCompat.METRIC_SOURCE_KEY)) {
+            return;
+        }
+        String stmt = "UPSERT INTO " + table + " (";
+        // drop it into the queue of things that should be written
+        List<String> keys = new ArrayList<String>();
+        List<Object> values = new ArrayList<Object>();
+        // we need to keep variable values in a separate set since they may 
have spaces, which
+        // causes the parser to barf. Instead, we need to add them after the 
statement is prepared
+        List<String> variableValues = new 
ArrayList<String>(record.tags().size());
+        keys.add(TRACE.columnName);
+        
values.add(Long.parseLong(record.name().substring(TracingCompat.METRIC_SOURCE_KEY.length())));
+
+        keys.add(DESCRIPTION.columnName);
+        values.add(VARIABLE_VALUE);
+        variableValues.add(record.description());
+
+        // add each of the metrics
+        for (PhoenixAbstractMetric metric : record.metrics()) {
+            // name of the metric is also the column name to which we write
+            keys.add(MetricInfo.getColumnName(metric.getName()));
+            values.add((Long) metric.value());
+        }
+
+        // get the tags out so we can set them later (otherwise, need to be a 
single value)
+        int annotationCount = 0;
+        int tagCount = 0;
+        for (PhoenixMetricTag tag : record.tags()) {
+            if (tag.name().equals(ANNOTATION.traceName)) {
+                addDynamicEntry(keys, values, variableValues, 
ANNOTATION_FAMILY, tag, ANNOTATION,
+                    annotationCount);
+                annotationCount++;
+            } else if (tag.name().equals(TAG.traceName)) {
+                addDynamicEntry(keys, values, variableValues, TAG_FAMILY, tag, 
TAG, tagCount);
+                tagCount++;
+            } else if (tag.name().equals(HOSTNAME.traceName)) {
+                keys.add(HOSTNAME.columnName);
+                values.add(VARIABLE_VALUE);
+                variableValues.add(tag.value());
+            } else if (tag.name().equals("Context")) {
+                // ignored
+            } else {
+                LOG.error("Got an unexpected tag: " + tag);
+            }
+        }
+
+        // add the tag count, now that we know it
+        keys.add(TAG_COUNT);
+        // ignore the hostname in the tags, if we know it
+        values.add(tagCount);
+
+        keys.add(ANNOTATION_COUNT);
+        values.add(annotationCount);
+
+        // compile the statement together
+        stmt += COMMAS.join(keys);
+        stmt += ") VALUES (" + COMMAS.join(values) + ")";
+
+        if (LOG.isTraceEnabled()) {
+            LOG.trace("Logging metrics to phoenix table via: " + stmt);
+            LOG.trace("With tags: " + variableValues);
+        }
+        try {
+            PreparedStatement ps = conn.prepareStatement(stmt);
+            // add everything that wouldn't/may not parse
+            int index = 1;
+            for (String tag : variableValues) {
+                ps.setString(index++, tag);
+            }
+            ps.execute();
+        } catch (SQLException e) {
+            LOG.error("Could not write metric: \n" + record + " to prepared 
statement:\n" + stmt, e);
+        }
+    }
+
+    public static String getDynamicColumnName(String family, String column, 
int count) {
+        return COLUMN_JOIN.join(family, column) + count;
+    }
+
+    private void addDynamicEntry(List<String> keys, List<Object> values,
+            List<String> variableValues, String family, PhoenixMetricTag tag,
+            MetricInfo metric, int count) {
+        // <family><.dynColumn><count> <VARCHAR>
+        keys.add(getDynamicColumnName(family, metric.columnName, count) + " 
VARCHAR");
+
+        // build the annotation value
+        String val = tag.description() + " - " + tag.value();
+        values.add(VARIABLE_VALUE);
+        variableValues.add(val);
+    }
+
+    public void clearForTesting() throws SQLException {
+        this.conn.rollback();
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b7f46c10/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceReader.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceReader.java 
b/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceReader.java
new file mode 100644
index 0000000..4760329
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceReader.java
@@ -0,0 +1,375 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */package org.apache.phoenix.trace;
+
+import java.sql.Connection;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.TreeSet;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.phoenix.metrics.MetricInfo;
+import org.apache.phoenix.trace.PhoenixTableMetricsWriter;
+import org.cloudera.htrace.Span;
+
+import com.google.common.base.Joiner;
+import com.google.common.primitives.Longs;
+
+/**
+ * Read the traces written to phoenix tables by the {@link 
PhoenixTableMetricsWriter}.
+ */
+public class TraceReader {
+
+    private static final Log LOG = LogFactory.getLog(TraceReader.class);
+    private static final int DEFAULT_PAGE_SIZE = 100;
+    private static final String PAGE_SIZE_CONF_KEY = 
"phoenix.trace.read.pagesize";
+    private final Joiner comma = Joiner.on(',');
+    private String knownColumns;
+    {
+        // the order here dictates the order we pull out the values below. For 
now, just keep them
+        // in sync - so we can be efficient pulling them off the results.
+        knownColumns =
+                comma.join(MetricInfo.TRACE.columnName, 
MetricInfo.PARENT.columnName,
+                    MetricInfo.SPAN.columnName, 
MetricInfo.DESCRIPTION.columnName,
+                    MetricInfo.START.columnName, MetricInfo.END.columnName,
+                    MetricInfo.HOSTNAME.columnName, 
PhoenixTableMetricsWriter.TAG_COUNT,
+                    PhoenixTableMetricsWriter.ANNOTATION_COUNT);
+    }
+
+    private Connection conn;
+    private String table;
+    private int pageSize;
+
+    public TraceReader(Connection conn, String statsTableName) throws 
SQLException {
+        this.conn = conn;
+        this.table = statsTableName;
+        String ps = conn.getClientInfo(PAGE_SIZE_CONF_KEY);
+        this.pageSize = ps == null ? DEFAULT_PAGE_SIZE : Integer.parseInt(ps);
+    }
+
+    public TraceReader(Connection conn) throws SQLException {
+        this(conn, TracingCompat.DEFAULT_STATS_TABLE_NAME);
+    }
+
+    /**
+     * Read all the currently stored traces.
+     * <p>
+     * <b>Be Careful!</b> This could cause an OOME if there are a lot of 
traces.
+     * @param limit max number of traces to return. If -1, returns all known 
traces.
+     * @return the found traces
+     * @throws SQLException
+     */
+    public Collection<TraceHolder> readAll(int limit) throws SQLException {
+        Set<TraceHolder> traces = new HashSet<TraceHolder>();
+        // read all the known columns from the table, sorting first by trace 
column (so the same
+        // trace
+        // goes together), and then by start time (so parent spans always 
appear before child spans)
+        String query =
+                "SELECT " + knownColumns + " FROM " + 
TracingCompat.DEFAULT_STATS_TABLE_NAME
+                        + " ORDER BY " + MetricInfo.TRACE.columnName + " DESC, 
"
+                        + MetricInfo.START.columnName + " ASC" + " LIMIT " + 
pageSize;
+        int resultCount = 0;
+        ResultSet results = conn.prepareStatement(query).executeQuery();
+        TraceHolder trace = null;
+        // the spans that are not the root span, but haven't seen their parent 
yet
+        List<SpanInfo> orphans = null;
+        while (results.next()) {
+            int index = 1;
+            long traceid = results.getLong(index++);
+            long parent = results.getLong(index++);
+            long span = results.getLong(index++);
+            String desc = results.getString(index++);
+            long start = results.getLong(index++);
+            long end = results.getLong(index++);
+            String host = results.getString(index++);
+            int tagCount = results.getInt(index++);
+            int annotationCount = results.getInt(index++);
+            // we have a new trace
+            if (trace == null || traceid != trace.traceid) {
+                // only increment if we are on a new trace, to ensure we get 
at least one
+                if (trace != null) {
+                    resultCount++;
+                }
+                // we beyond the limit, so we stop
+                if (resultCount >= limit) {
+                    break;
+                }
+                trace = new TraceHolder();
+                // add the orphans, so we can track them later
+                orphans = new ArrayList<SpanInfo>();
+                trace.orphans = orphans;
+                trace.traceid = traceid;
+                traces.add(trace);
+            }
+
+            // search the spans to determine the if we have a known parent
+            SpanInfo parentSpan = null;
+            if (parent != Span.ROOT_SPAN_ID) {
+                // find the parent
+                for (SpanInfo p : trace.spans) {
+                    if (p.id == parent) {
+                        parentSpan = p;
+                        break;
+                    }
+                }
+            }
+            SpanInfo spanInfo =
+                    new SpanInfo(parentSpan, parent, span, desc, start, end, 
host, tagCount,
+                            annotationCount);
+            // search the orphans to see if this is the parent id
+
+            for (int i = 0; i < orphans.size(); i++) {
+                SpanInfo orphan = orphans.get(i);
+                // we found the parent for the orphan
+                if (orphan.parentId == span) {
+                    // update the bi-directional relationship
+                    orphan.parent = spanInfo;
+                    spanInfo.children.add(orphan);
+                    // / its no longer an orphan
+                    LOG.trace("Found parent for span: " + span);
+                    orphans.remove(i--);
+                }
+            }
+
+            if (parentSpan != null) {
+                // add this as a child to the parent span
+                parentSpan.children.add(spanInfo);
+            } else if (parent != Span.ROOT_SPAN_ID) {
+                // add the span to the orphan pile to check for the remaining 
spans we see
+                LOG.info("No parent span found for span: " + span + " (root 
span id: "
+                        + Span.ROOT_SPAN_ID + ")");
+                orphans.add(spanInfo);
+            }
+
+            // add the span to the full known list
+            trace.spans.add(spanInfo);
+
+            // go back and find the tags for the row
+            spanInfo.tags.addAll(getTags(traceid, parent, span, tagCount));
+
+            spanInfo.annotations.addAll(getAnnotations(traceid, parent, span, 
annotationCount));
+        }
+
+        // make sure we clean up after ourselves
+        results.close();
+
+        return traces;
+    }
+
+    private Collection<? extends String> getTags(long traceid, long parent, 
long span, int count)
+            throws SQLException {
+        return getDynamicCountColumns(traceid, parent, span, count,
+            PhoenixTableMetricsWriter.TAG_FAMILY, MetricInfo.TAG.columnName);
+    }
+
+    private Collection<? extends String> getAnnotations(long traceid, long 
parent, long span,
+            int count) throws SQLException {
+        return getDynamicCountColumns(traceid, parent, span, count,
+            PhoenixTableMetricsWriter.ANNOTATION_FAMILY, 
MetricInfo.ANNOTATION.columnName);
+    }
+
+    private Collection<? extends String> getDynamicCountColumns(long traceid, 
long parent,
+            long span, int count, String family, String columnName) throws 
SQLException {
+        if (count == 0) {
+            return Collections.emptyList();
+        }
+
+        // build the column strings, family.column<index>
+        String[] parts = new String[count];
+        for (int i = 0; i < count; i++) {
+            parts[i] = PhoenixTableMetricsWriter.getDynamicColumnName(family, 
columnName, i);
+        }
+        // join the columns together
+        String columns = comma.join(parts);
+
+        // redo them and add "VARCHAR to the end, so we can specify the columns
+        for (int i = 0; i < count; i++) {
+            parts[i] = parts[i] + " VARCHAR";
+        }
+
+        String dynamicColumns = comma.join(parts);
+        String request =
+                "SELECT " + columns + " from " + table + "(" + dynamicColumns 
+ ") WHERE "
+                        + MetricInfo.TRACE.columnName + "=" + traceid + " AND "
+                        + MetricInfo.PARENT.columnName + "=" + parent + " AND "
+                        + MetricInfo.SPAN.columnName + "=" + span;
+        LOG.trace("Requesting columns with: " + request);
+        ResultSet results = conn.createStatement().executeQuery(request);
+        List<String> cols = new ArrayList<String>();
+        while (results.next()) {
+            for (int index = 1; index <= count; index++) {
+                cols.add(results.getString(index));
+            }
+        }
+        if (cols.size() < count) {
+            LOG.error("Missing tags! Expected " + count + ", but only got " + 
cols.size()
+                    + " tags from rquest " + request);
+        }
+        return cols;
+    }
+
+    /**
+     * Holds information about a trace
+     */
+    public static class TraceHolder {
+        public List<SpanInfo> orphans;
+        public long traceid;
+        public TreeSet<SpanInfo> spans = new TreeSet<SpanInfo>();
+
+        @Override
+        public int hashCode() {
+            return new Long(traceid).hashCode();
+        }
+
+        @Override
+        public boolean equals(Object o) {
+            if (o instanceof TraceHolder) {
+                return traceid == ((TraceHolder) o).traceid;
+            }
+            return false;
+        }
+
+        @Override
+        public String toString() {
+            StringBuilder sb = new StringBuilder("Trace: " + traceid + "\n");
+            // get the first span, which is always going to be the root span
+            SpanInfo root = spans.iterator().next();
+            if (root.parent != null) {
+                sb.append("Root span not present! Just printing found 
spans\n");
+                for (SpanInfo span : spans) {
+                    sb.append(span.toString() + "\n");
+                }
+            } else {
+                // print the tree of spans
+                List<SpanInfo> toPrint = new ArrayList<SpanInfo>();
+                toPrint.add(root);
+                while (!toPrint.isEmpty()) {
+                    SpanInfo span = toPrint.remove(0);
+                    sb.append(span.toString() + "\n");
+                    toPrint.addAll(span.children);
+                }
+            }
+            if (orphans.size() > 0) {
+                sb.append("Found orphan spans:\n" + orphans);
+            }
+            return sb.toString();
+        }
+    }
+
+    public static class SpanInfo implements Comparable<SpanInfo> {
+        public SpanInfo parent;
+        public List<SpanInfo> children = new ArrayList<SpanInfo>();
+        public String description;
+        public long id;
+        public long start;
+        public long end;
+        public String hostname;
+        public int tagCount;
+        public List<String> tags = new ArrayList<String>();
+        public int annotationCount;
+        public List<String> annotations = new ArrayList<String>();
+        private long parentId;
+
+        public SpanInfo(SpanInfo parent, long parentid, long span, String 
desc, long start,
+                long end, String host, int tagCount, int annotationCount) {
+            this.parent = parent;
+            this.parentId = parentid;
+            this.id = span;
+            this.description = desc;
+            this.start = start;
+            this.end = end;
+            this.hostname = host;
+            this.tagCount = tagCount;
+            this.annotationCount = annotationCount;
+        }
+
+        @Override
+        public int hashCode() {
+            return new Long(id).hashCode();
+        }
+
+        @Override
+        public boolean equals(Object o) {
+            if (o instanceof SpanInfo) {
+                return id == ((SpanInfo) o).id;
+            }
+            return false;
+        }
+
+        /**
+         * Do the same sorting that we would get from reading the table with a 
{@link TraceReader},
+         * specifically, by trace and then by start/end. However, these are 
only every stored in a
+         * single trace, so we can just sort on start/end times.
+         */
+        @Override
+        public int compareTo(SpanInfo o) {
+            // root span always comes first
+            if (this.parentId == Span.ROOT_SPAN_ID) {
+                return -1;
+            } else if (o.parentId == Span.ROOT_SPAN_ID) {
+                return 1;
+            }
+
+            int compare = Longs.compare(start, o.start);
+            if (compare == 0) {
+                compare = Longs.compare(end, o.end);
+                if (compare == 0) {
+                    return Longs.compare(id, o.id);
+                }
+            }
+            return compare;
+        }
+
+        @Override
+        public String toString() {
+            StringBuilder sb = new StringBuilder("Span: " + id + "\n");
+            sb.append("\tdescription=" + description);
+            sb.append("\n");
+            sb.append("\tparent="
+                    + (parent == null ? (parentId == Span.ROOT_SPAN_ID ? 
"ROOT" : "[orphan - id: "
+                            + parentId + "]") : parent.id));
+            sb.append("\n");
+            sb.append("\tstart,end=" + start + "," + end);
+            sb.append("\n");
+            sb.append("\telapsed=" + (end - start));
+            sb.append("\n");
+            sb.append("\thostname=" + hostname);
+            sb.append("\n");
+            sb.append("\ttags=(" + tagCount + ") " + tags);
+            sb.append("\n");
+            sb.append("\tannotations=(" + annotationCount + ") " + 
annotations);
+            sb.append("\n");
+            sb.append("\tchildren=");
+            for (SpanInfo child : children) {
+                sb.append(child.id + ", ");
+            }
+            sb.append("\n");
+            return sb.toString();
+        }
+
+        public long getParentIdForTesting() {
+            return parentId;
+        }
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b7f46c10/phoenix-core/src/main/java/org/apache/phoenix/trace/TracingIterator.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/trace/TracingIterator.java 
b/phoenix-core/src/main/java/org/apache/phoenix/trace/TracingIterator.java
new file mode 100644
index 0000000..0b12c43
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/trace/TracingIterator.java
@@ -0,0 +1,58 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.trace;
+
+import java.sql.SQLException;
+
+import org.apache.phoenix.iterate.DelegateResultIterator;
+import org.apache.phoenix.iterate.ResultIterator;
+import org.apache.phoenix.schema.tuple.Tuple;
+import org.cloudera.htrace.TraceScope;
+
+/**
+ * A simple iterator that closes the trace scope when the iterator is closed.
+ */
+public class TracingIterator extends DelegateResultIterator {
+
+    private TraceScope scope;
+    private boolean started;
+
+    /**
+     * @param scope a scope with a non-null span
+     * @param iterator delegate
+     */
+    public TracingIterator(TraceScope scope, ResultIterator iterator) {
+        super(iterator);
+        this.scope = scope;
+    }
+
+    @Override
+    public void close() throws SQLException {
+        scope.close();
+        super.close();
+    }
+
+    @Override
+    public Tuple next() throws SQLException {
+        if (!started) {
+            scope.getSpan().addTimelineAnnotation("First request completed");
+            started = true;
+        }
+        return super.next();
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b7f46c10/phoenix-core/src/main/java/org/apache/phoenix/trace/util/ConfigurationAdapter.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/trace/util/ConfigurationAdapter.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/trace/util/ConfigurationAdapter.java
new file mode 100644
index 0000000..a96be7d
--- /dev/null
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/trace/util/ConfigurationAdapter.java
@@ -0,0 +1,56 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.trace.util;
+
+import org.apache.hadoop.conf.Configuration;
+
+import org.apache.phoenix.jdbc.PhoenixConnection;
+
+/**
+ * Helper class to wrap access to configured properties.
+ */
+abstract class ConfigurationAdapter {
+
+  public abstract String get(String key);
+
+  public static class ConnectionConfigurationAdapter extends 
ConfigurationAdapter {
+    private PhoenixConnection conn;
+
+    public ConnectionConfigurationAdapter(PhoenixConnection connection) {
+      this.conn = connection;
+    }
+
+    @Override
+    public String get(String key) {
+      return conn.getClientInfo(key);
+    }
+  }
+
+  public static class HadoopConfigConfigurationAdapter extends 
ConfigurationAdapter {
+    private Configuration conf;
+
+    public HadoopConfigConfigurationAdapter(Configuration conf) {
+      this.conf = conf;
+    }
+
+    @Override
+    public String get(String key) {
+      return conf.get(key);
+    }
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b7f46c10/phoenix-core/src/main/java/org/apache/phoenix/trace/util/NullSpan.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/trace/util/NullSpan.java 
b/phoenix-core/src/main/java/org/apache/phoenix/trace/util/NullSpan.java
new file mode 100644
index 0000000..3799fdb
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/trace/util/NullSpan.java
@@ -0,0 +1,112 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.trace.util;
+
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+
+import org.cloudera.htrace.Span;
+import org.cloudera.htrace.TimelineAnnotation;
+
+/**
+ * Fake {@link Span} that doesn't save any state, in place of <tt>null</tt> 
return values, to avoid
+ * <tt>null</tt> check.
+ */
+public class NullSpan implements Span {
+
+  public static Span INSTANCE = new NullSpan();
+
+  /**
+   * Private constructor to limit garbage
+   */
+  private NullSpan() {
+  }
+
+  @Override
+  public void stop() {
+  }
+
+  @Override
+  public long getStartTimeMillis() {
+    return 0;
+  }
+
+  @Override
+  public long getStopTimeMillis() {
+    return 0;
+  }
+
+  @Override
+  public long getAccumulatedMillis() {
+    return 0;
+  }
+
+  @Override
+  public boolean isRunning() {
+    return false;
+  }
+
+  @Override
+  public String getDescription() {
+    return "NullSpan";
+  }
+
+  @Override
+  public long getSpanId() {
+    return 0;
+  }
+
+  @Override
+  public long getTraceId() {
+    return 0;
+  }
+
+  @Override
+  public Span child(String description) {
+    return INSTANCE;
+  }
+
+  @Override
+  public long getParentId() {
+    return 0;
+  }
+
+  @Override
+  public void addKVAnnotation(byte[] key, byte[] value) {
+  }
+
+  @Override
+  public void addTimelineAnnotation(String msg) {
+  }
+
+  @Override
+  public Map<byte[], byte[]> getKVAnnotations() {
+    return Collections.emptyMap();
+  }
+
+  @Override
+  public List<TimelineAnnotation> getTimelineAnnotations() {
+    return Collections.emptyList();
+  }
+
+  @Override
+  public String getProcessId() {
+    return null;
+  }
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b7f46c10/phoenix-core/src/main/java/org/apache/phoenix/trace/util/Tracing.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/trace/util/Tracing.java 
b/phoenix-core/src/main/java/org/apache/phoenix/trace/util/Tracing.java
new file mode 100644
index 0000000..5913cfb
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/trace/util/Tracing.java
@@ -0,0 +1,282 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.trace.util;
+
+import java.util.Properties;
+import java.util.concurrent.Callable;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.client.OperationWithAttributes;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.call.CallRunner;
+import org.apache.phoenix.call.CallWrapper;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.cloudera.htrace.Sampler;
+import org.cloudera.htrace.Span;
+import org.cloudera.htrace.Trace;
+import org.cloudera.htrace.TraceInfo;
+import org.cloudera.htrace.TraceScope;
+import org.cloudera.htrace.Tracer;
+import org.cloudera.htrace.impl.ProbabilitySampler;
+import org.cloudera.htrace.wrappers.TraceCallable;
+import org.cloudera.htrace.wrappers.TraceRunnable;
+
+import com.google.common.base.Function;
+
+/**
+ * Helper class to manage using the {@link Tracer} within Phoenix
+ */
+public class Tracing {
+
+    private static final Log LOG = LogFactory.getLog(Tracing.class);
+
+    private static final String SEPARATOR = ".";
+    // Constants for tracing across the wire
+    public static final String TRACE_ID_ATTRIBUTE_KEY = 
"phoenix.trace.traceid";
+    public static final String SPAN_ID_ATTRIBUTE_KEY = "phoenix.trace.spanid";
+    private static final String START_SPAN_MESSAGE = "Span received on server. 
Starting child";
+
+    // Constants for passing into the metrics system
+    public static final String TRACE_METRIC_PREFIX = "phoenix.trace.instance";
+    // Constants for for configuring tracing
+    public static final String TRACING_LEVEL_KEY = 
"org.apache.phoenix.trace.frequency";
+    protected static final String PROBABILITY_THRESHOLD_KEY =
+            "com.salesforce.phoenix.trace.probability.threshold";
+
+    /**
+     * We always trace on the server, assuming the client has requested 
tracing on the request
+     */
+    public static Sampler<?> SERVER_TRACE_LEVEL = Sampler.ALWAYS;
+
+    /**
+     * Manage the types of frequencies that we support. By default, we never 
turn on tracing.
+     */
+    public static enum Frequency {
+        NEVER("never", CREATE_NEVER), // default
+        ALWAYS("always", CREATE_ALWAYS), PROBABILITY("probability", 
CREATE_PROBABILITY);
+
+        String key;
+        Function<ConfigurationAdapter, Sampler<?>> builder;
+
+        private Frequency(String key, Function<ConfigurationAdapter, 
Sampler<?>> builder) {
+            this.key = key;
+            this.builder = builder;
+        }
+
+        public String getKey() {
+            return key;
+        }
+
+        static Frequency getSampler(String key) {
+            for (Frequency type : Frequency.values()) {
+                if (type.key.equals(key)) {
+                    return type;
+                }
+            }
+            return NEVER;
+        }
+    }
+
+    private static Function<ConfigurationAdapter, Sampler<?>> CREATE_ALWAYS =
+            new Function<ConfigurationAdapter, Sampler<?>>() {
+                @Override
+                public Sampler<?> apply(ConfigurationAdapter arg0) {
+                    return Sampler.ALWAYS;
+                }
+            };
+
+    private static Function<ConfigurationAdapter, Sampler<?>> CREATE_NEVER =
+            new Function<ConfigurationAdapter, Sampler<?>>() {
+                @Override
+                public Sampler<?> apply(ConfigurationAdapter arg0) {
+                    return Sampler.NEVER;
+                }
+            };
+
+    private static Function<ConfigurationAdapter, Sampler<?>> 
CREATE_PROBABILITY =
+            new Function<ConfigurationAdapter, Sampler<?>>() {
+                @Override
+                public Sampler<?> apply(ConfigurationAdapter conn) {
+                    // get the connection properties for the probability 
information
+                    double threshold = 
Double.parseDouble(conn.get(PROBABILITY_THRESHOLD_KEY));
+                    return new ProbabilitySampler(threshold);
+                }
+            };
+
+    public static Sampler<?> getConfiguredSampler(PhoenixConnection 
connection) {
+        String tracelevel = connection.getClientInfo(TRACING_LEVEL_KEY);
+        return getSampler(tracelevel, new 
ConfigurationAdapter.ConnectionConfigurationAdapter(
+                connection));
+    }
+
+    public static Sampler<?> getConfiguredSampler(Configuration conf) {
+        String tracelevel = conf.get(TRACING_LEVEL_KEY);
+        return getSampler(tracelevel, new 
ConfigurationAdapter.HadoopConfigConfigurationAdapter(
+                conf));
+    }
+
+    private static Sampler<?> getSampler(String traceLevel, 
ConfigurationAdapter conf) {
+        return Frequency.getSampler(traceLevel).builder.apply(conf);
+    }
+
+    public static void setSampling(Properties props, Frequency freq) {
+        props.setProperty(TRACING_LEVEL_KEY, freq.key);
+    }
+
+    /**
+     * Start a span with the currently configured sampling frequency. Creates 
a new 'current' span
+     * on this thread - the previous 'current' span will be replaced with this 
newly created span.
+     * <p>
+     * Hands back the direct span as you shouldn't be detaching the span - use 
{@link TraceRunnable}
+     * instead to detach a span from this operation.
+     * @param connection from which to read parameters
+     * @param string description of the span to start
+     * @return the underlying span.
+     */
+    public static TraceScope startNewSpan(PhoenixConnection connection, String 
string) {
+        Sampler<?> sampler = connection.getSampler();
+        TraceScope scope = Trace.startSpan(string, sampler);
+        return scope;
+    }
+
+    public static String getSpanName(Span span) {
+        return Tracing.TRACE_METRIC_PREFIX + span.getTraceId() + SEPARATOR + 
span.getParentId()
+                + SEPARATOR + span.getSpanId();
+    }
+
+    /**
+     * Check to see if tracing is current enabled. The trace for this thread 
is returned, if we are
+     * already tracing. Otherwise, checks to see if mutation has tracing 
enabled, and if so, starts
+     * a new span with the {@link Mutation}'s specified span as its parent.
+     * <p>
+     * This should only be run on the server-side as we base tracing on if we 
are currently tracing
+     * (started higher in the call-stack) or if the {@link Mutation} has the 
tracing attributes
+     * defined. As such, we would expect to continue the trace on the 
server-side based on the
+     * original sampling parameters.
+     * @param scan {@link Mutation} to check
+     * @param conf {@link Configuration} to read for the current sampler
+     * @param description description of the child span to start
+     * @return <tt>null</tt> if tracing is not enabled, or the parent {@link 
Span}
+     */
+    public static Span childOnServer(OperationWithAttributes scan, 
Configuration conf,
+            String description) {
+        // check to see if we are currently tracing. Generally, this will only 
work when we go to
+        // 0.96. CPs should always be setting up and tearing down their own 
tracing
+        Span current = Trace.currentSpan();
+        if (current == null) {
+            // its not tracing yet, but maybe it should be.
+            current = enable(scan, conf, description);
+        } else {
+            current = Trace.startSpan(description, current).getSpan();
+        }
+        return current;
+    }
+
+    /**
+     * Check to see if this mutation has tracing enabled, and if so, get a new 
span with the
+     * {@link Mutation}'s specified span as its parent.
+     * @param map mutation to check
+     * @param conf {@link Configuration} to check for the {@link Sampler} 
configuration, if we are
+     *            tracing
+     * @param description on the child to start
+     * @return a child span of the mutation, or <tt>null</tt> if tracing is 
not enabled.
+     */
+    @SuppressWarnings("unchecked")
+    private static Span enable(OperationWithAttributes map, Configuration 
conf, String description) {
+        byte[] traceid = map.getAttribute(TRACE_ID_ATTRIBUTE_KEY);
+        if (traceid == null) {
+            return NullSpan.INSTANCE;
+        }
+        byte[] spanid = map.getAttribute(SPAN_ID_ATTRIBUTE_KEY);
+        if (spanid == null) {
+            LOG.error("TraceID set to " + Bytes.toLong(traceid) + ", but span 
id was not set!");
+            return NullSpan.INSTANCE;
+        }
+        Sampler<?> sampler = SERVER_TRACE_LEVEL;
+        TraceInfo parent = new TraceInfo(Bytes.toLong(traceid), 
Bytes.toLong(spanid));
+        return Trace.startSpan(START_SPAN_MESSAGE + ": " + description,
+            (Sampler<TraceInfo>) sampler, parent).getSpan();
+    }
+
+    public static Span child(Span s, String d) {
+        if (s == null) {
+            return NullSpan.INSTANCE;
+        }
+        return s.child(d);
+    }
+
+    /**
+     * Wrap the callable in a TraceCallable, if tracing.
+     * @param callable to call
+     * @param description description of the operation being run. If 
<tt>null</tt> uses the current
+     *            thread name
+     * @return The callable provided, wrapped if tracing, 'callable' if not.
+     */
+    public static <V> Callable<V> wrap(Callable<V> callable, String 
description) {
+        if (Trace.isTracing()) {
+            return new TraceCallable<V>(Trace.currentSpan(), callable, 
description);
+        }
+        return callable;
+    }
+
+    /**
+     * Helper to automatically start and complete tracing on the given method, 
used in conjuction
+     * with {@link CallRunner#run}.
+     * <p>
+     * This will always attempt start a new span (which will always start, 
unless the
+     * {@link Sampler} says it shouldn't be traced). If you are just looking 
for flexible tracing
+     * that only turns on if the current thread/query is already tracing, use
+     * {@link #wrap(Callable, String)} or {@link Trace#wrap(Callable)}.
+     * <p>
+     * Ensures that the trace is closed, even if there is an exception from the
+     * {@link org.apache.phoenix.call.CallRunner.CallableThrowable}.
+     * <p>
+     * Generally, this should wrap a long-running operation.
+     * @param conn connection from which to determine if we are tracing, ala
+     *            {@link #startNewSpan(PhoenixConnection, String)}
+     * @param desc description of the operation being run
+     * @return the value returned from the call
+     */
+    public static CallWrapper withTracing(PhoenixConnection conn, String desc) 
{
+        return new TracingWrapper(conn, desc);
+    }
+
+    private static class TracingWrapper implements CallWrapper {
+        private TraceScope scope;
+        private final PhoenixConnection conn;
+        private final String desc;
+
+        public TracingWrapper(PhoenixConnection conn, String desc){
+            this.conn = conn;
+            this.desc = desc;
+        }
+
+        @Override
+        public void before() {
+            scope = Tracing.startNewSpan(conn, "Executing " + desc);
+        }
+
+        @Override
+        public void after() {
+            scope.close();
+        }
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b7f46c10/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixContextExecutor.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixContextExecutor.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixContextExecutor.java
index a725c2c..9106a5d 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixContextExecutor.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixContextExecutor.java
@@ -19,6 +19,8 @@ package org.apache.phoenix.util;
 
 import java.util.concurrent.Callable;
 
+import org.apache.phoenix.call.CallWrapper;
+
 import com.google.common.base.Throwables;
 
 /**
@@ -34,6 +36,27 @@ import com.google.common.base.Throwables;
  */
 public class PhoenixContextExecutor {
 
+    private static class CurrentContextWrapper implements CallWrapper {
+        private ClassLoader saveCcl;
+
+        @Override
+        public void before() {
+            saveCcl = Thread.currentThread().getContextClassLoader();
+            Thread.currentThread().setContextClassLoader(
+                PhoenixContextExecutor.class.getClassLoader());
+        }
+
+        @Override
+        public void after() {
+            Thread.currentThread().setContextClassLoader(saveCcl);
+
+        };
+    }
+
+    public static CallWrapper inContext() {
+        return new CurrentContextWrapper();
+    }
+
     /**
      * Execute an operation (synchronously) using the context classloader used 
to load this class,
      * instead of the currently-set context classloader of the current thread. 
This allows loading

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b7f46c10/phoenix-core/src/main/java/org/apache/phoenix/util/QueryUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/QueryUtil.java 
b/phoenix-core/src/main/java/org/apache/phoenix/util/QueryUtil.java
index ebd8465..055c2d8 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/QueryUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/QueryUtil.java
@@ -20,13 +20,22 @@ package org.apache.phoenix.util;
 
 import static org.apache.phoenix.util.SchemaUtil.getEscapedFullColumnName;
 
+import java.sql.Connection;
 import java.sql.DatabaseMetaData;
+import java.sql.DriverManager;
 import java.sql.ResultSet;
 import java.sql.SQLException;
 import java.util.List;
 
 import javax.annotation.Nullable;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.zookeeper.ZKConfig;
+import org.apache.phoenix.jdbc.PhoenixDriver;
+
 import com.google.common.base.Function;
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
@@ -35,6 +44,8 @@ import com.google.common.collect.Lists;
 
 public final class QueryUtil {
 
+    private static final Log LOG = LogFactory.getLog(QueryUtil.class);
+
     /**
      *  Column family name index within ResultSet resulting from {@link 
DatabaseMetaData#getColumns(String, String, String, String)}
      */
@@ -152,6 +163,12 @@ public final class QueryUtil {
         return PhoenixRuntime.JDBC_PROTOCOL + 
PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR + server;
     }
 
+    public static String getUrl(String server, long port) {
+        String serverUrl = getUrl(server);
+        return serverUrl + PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR + port
+                + PhoenixRuntime.JDBC_PROTOCOL_TERMINATOR;
+    }
+
     public static String getExplainPlan(ResultSet rs) throws SQLException {
         StringBuilder buf = new StringBuilder();
         while (rs.next()) {
@@ -163,4 +180,16 @@ public final class QueryUtil {
         }
         return buf.toString();
     }
+    public static Connection getConnection(Configuration conf) throws 
ClassNotFoundException,
+          SQLException {
+        // read the hbase properties from the configuration
+        String server = ZKConfig.getZKQuorumServersString(conf);
+        // make sure we load the phoenix driver
+        Class.forName(PhoenixDriver.class.getName());
+        int port =
+            conf.getInt(HConstants.ZOOKEEPER_CLIENT_PORT, 
HConstants.DEFAULT_ZOOKEPER_CLIENT_PORT);
+        String jdbcUrl = getUrl(server, port);
+        LOG.info("Creating connection with the jdbc url:" + jdbcUrl);
+        return DriverManager.getConnection(jdbcUrl);
+      }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b7f46c10/phoenix-core/src/test/resources/hadoop-metrics2.properties
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/resources/hadoop-metrics2.properties 
b/phoenix-core/src/test/resources/hadoop-metrics2.properties
new file mode 100644
index 0000000..ebbbe84
--- /dev/null
+++ b/phoenix-core/src/test/resources/hadoop-metrics2.properties
@@ -0,0 +1,25 @@
+#There are two options with file names:
+#  1. hadoop-metrics2-[prefix].properties
+#  2. hadoop-metrics2.properties
+# Either will be loaded by the metrics system (but not both).
+#
+# NOTE: The metrics system is only initialized once per JVM (but does 
ref-counting, so we can't
+#shutdown and restart), so we only load the first prefix that we find. 
Generally, this will be
+#phoenix (unless someone else registers first, but for many clients, there 
should only be one).
+#
+# Usually, you would use hadoop-metrics2-phoenix.properties, but we use the 
generic
+# hadoop-metrics2.properties to ensure it these are loaded regardless of where 
we are running,
+# assuming there isn't another config on the classpath.
+
+#
+# When specifying sinks, the syntax to use is:
+#    [prefix].[source|sink].[instance].[options]
+# See javadoc of package-info.java for org.apache.hadoop.metrics2 for detail
+
+
+# Don't attempt to start jmx mbeans for all sources.
+#  For right now, all metrics are exported to a phoenix table
+*.source.start_mbeans=false
+
+# Frequency, in seconds, of sampling from the sources
+*.period=10
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b7f46c10/phoenix-core/src/test/resources/log4j.properties
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/resources/log4j.properties 
b/phoenix-core/src/test/resources/log4j.properties
index 6b1ce50..8e54793 100644
--- a/phoenix-core/src/test/resources/log4j.properties
+++ b/phoenix-core/src/test/resources/log4j.properties
@@ -52,12 +52,12 @@ log4j.appender.DRFA.layout.ConversionPattern=%d %-5p [%t] 
%C{2}(%L): %m%n
 log4j.appender.console=org.apache.log4j.ConsoleAppender
 log4j.appender.console.target=System.err
 log4j.appender.console.layout=org.apache.log4j.PatternLayout
-log4j.appender.console.layout.ConversionPattern=%d %-5p [%t] %C{2}(%L): %m%n
+log4j.appender.console.layout.ConversionPattern=%d %-5p [%t] %C(%L): %m%n
 
 # Custom Logging levels
 
 #log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
-
+log4j.logger.org.mortbay.log=WARN
 log4j.logger.org.apache.hadoop=WARN
 log4j.logger.org.apache.zookeeper=ERROR
 log4j.logger.org.apache.hadoop.hbase=DEBUG

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b7f46c10/phoenix-hadoop-compat/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-hadoop-compat/pom.xml b/phoenix-hadoop-compat/pom.xml
index a51f571..c2f810a 100644
--- a/phoenix-hadoop-compat/pom.xml
+++ b/phoenix-hadoop-compat/pom.xml
@@ -39,7 +39,7 @@
       <plugin>
         <groupId>org.apache.maven.plugins</groupId>
         <artifactId>maven-jar-plugin</artifactId>
-        <version>2.4</version>
+        <version>2.4</version><!--$NO-MVN-MAN-VER$-->
         <executions>
           <execution>
             <phase>prepare-package
@@ -59,4 +59,31 @@
       </plugin>
     </plugins>
   </build>
-</project>
+
+  <dependencies>
+    <dependency>
+      <groupId>org.cloudera.htrace</groupId>
+      <artifactId>htrace-core</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-hadoop-compat</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>com.google.guava</groupId>
+      <artifactId>guava</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>log4j</groupId>
+      <artifactId>log4j</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.slf4j</groupId>
+      <artifactId>slf4j-api</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-common</artifactId>
+    </dependency>
+  </dependencies>
+</project>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b7f46c10/phoenix-hadoop-compat/src/main/java/org/apache/phoenix/metrics/MetricInfo.java
----------------------------------------------------------------------
diff --git 
a/phoenix-hadoop-compat/src/main/java/org/apache/phoenix/metrics/MetricInfo.java
 
b/phoenix-hadoop-compat/src/main/java/org/apache/phoenix/metrics/MetricInfo.java
new file mode 100644
index 0000000..e6ad976
--- /dev/null
+++ 
b/phoenix-hadoop-compat/src/main/java/org/apache/phoenix/metrics/MetricInfo.java
@@ -0,0 +1,51 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.metrics;
+
+/**
+ * Metrics and their conversion from the trace name to the name we store in 
the stats table
+ */
+public enum MetricInfo {
+
+    TRACE("", "trace_id"),
+    SPAN("span_id", "span_id"),
+    PARENT("parent_id", "parent_id"),
+    START("start_time", "start_time"),
+    END("end_time", "end_time"),
+    TAG("phoenix.tag", "t"),
+    ANNOTATION("phoenix.annotation", "a"),
+    HOSTNAME("Hostname", "hostname"),
+    DESCRIPTION("", "description");
+
+    public final String traceName;
+    public final String columnName;
+
+    private MetricInfo(String traceName, String columnName) {
+        this.traceName = traceName;
+        this.columnName = columnName;
+    }
+
+    public static String getColumnName(String traceName) {
+        for (MetricInfo info : MetricInfo.values()) {
+            if (info.traceName.equals(traceName)) {
+                return info.columnName;
+            }
+        }
+        throw new IllegalArgumentException("Unknown tracename: " + traceName);
+    }
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b7f46c10/phoenix-hadoop-compat/src/main/java/org/apache/phoenix/metrics/Metrics.java
----------------------------------------------------------------------
diff --git 
a/phoenix-hadoop-compat/src/main/java/org/apache/phoenix/metrics/Metrics.java 
b/phoenix-hadoop-compat/src/main/java/org/apache/phoenix/metrics/Metrics.java
new file mode 100644
index 0000000..e0667ab
--- /dev/null
+++ 
b/phoenix-hadoop-compat/src/main/java/org/apache/phoenix/metrics/Metrics.java
@@ -0,0 +1,39 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.metrics;
+
+import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
+
+public class Metrics {
+
+  private static volatile MetricsManager manager;
+
+  /**
+   * @return get the first {@link MetricsManager} on the classpath. Always 
returns the same object
+   */
+  public static MetricsManager getManager(){
+    if(manager == null){
+      synchronized(Metrics.class){
+        if(manager == null){
+          manager = 
CompatibilitySingletonFactory.getInstance(MetricsManager.class);
+        }
+      }
+    }
+    return manager;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b7f46c10/phoenix-hadoop-compat/src/main/java/org/apache/phoenix/metrics/MetricsManager.java
----------------------------------------------------------------------
diff --git 
a/phoenix-hadoop-compat/src/main/java/org/apache/phoenix/metrics/MetricsManager.java
 
b/phoenix-hadoop-compat/src/main/java/org/apache/phoenix/metrics/MetricsManager.java
new file mode 100644
index 0000000..13c9435
--- /dev/null
+++ 
b/phoenix-hadoop-compat/src/main/java/org/apache/phoenix/metrics/MetricsManager.java
@@ -0,0 +1,58 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.metrics;
+
+/**
+ * Metrics management system. Backed by the underlying hadoop metrics, 
depending on the project on
+ * the classpath.
+ * <p>
+ * The underlying types passed to method must match the expected metrics type 
- this will vary for
+ * the underlying metrics systems (hadoop1 vs hadoop2), but can't be specified 
at this layer because
+ * we must be compatible with both systems.
+ */
+public interface MetricsManager {
+
+    /**
+     * @param metricsSystemName the metrics prefix to initialize, if it hasn't 
already been
+     *            initialized. Not assumed to be thread-safe, unless otherwise 
noted in the
+     *            implementation.
+     */
+    public abstract void initialize(String metricsSystemName);
+
+    /**
+     * Register a metrics sink
+     * @param <T> the type of the sink
+     * @param sink to register
+     * @param name of the sink. Must be unique.
+     * @param desc the description of the sink
+     * @return the sink
+     */
+    public abstract <T> T register(String name, String desc, T sink);
+
+    /**
+     * Register a metrics source.
+     * @param name name of the source - must be unique
+     * @param description description of the source
+     * @param source to register.
+     * @param <T> the type of the source
+     * @return the source
+     */
+    public abstract <T> T registerSource(String name, String description, T 
source);
+
+    public void shutdown();
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b7f46c10/phoenix-hadoop-compat/src/main/java/org/apache/phoenix/metrics/MetricsWriter.java
----------------------------------------------------------------------
diff --git 
a/phoenix-hadoop-compat/src/main/java/org/apache/phoenix/metrics/MetricsWriter.java
 
b/phoenix-hadoop-compat/src/main/java/org/apache/phoenix/metrics/MetricsWriter.java
new file mode 100644
index 0000000..0e8b9fe
--- /dev/null
+++ 
b/phoenix-hadoop-compat/src/main/java/org/apache/phoenix/metrics/MetricsWriter.java
@@ -0,0 +1,31 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.metrics;
+
+
+/**
+ * Generic writer for a phoenix metric
+ */
+public interface MetricsWriter {
+
+    public void initialize();
+
+    public void flush();
+
+    public void addMetrics(PhoenixMetricsRecord record);
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b7f46c10/phoenix-hadoop-compat/src/main/java/org/apache/phoenix/metrics/PhoenixAbstractMetric.java
----------------------------------------------------------------------
diff --git 
a/phoenix-hadoop-compat/src/main/java/org/apache/phoenix/metrics/PhoenixAbstractMetric.java
 
b/phoenix-hadoop-compat/src/main/java/org/apache/phoenix/metrics/PhoenixAbstractMetric.java
new file mode 100644
index 0000000..27ae6b8
--- /dev/null
+++ 
b/phoenix-hadoop-compat/src/main/java/org/apache/phoenix/metrics/PhoenixAbstractMetric.java
@@ -0,0 +1,30 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.metrics;
+
+
+public interface PhoenixAbstractMetric {
+
+    public String getName();
+
+    /**
+     * Get the value of the metric
+     * @return the value of the metric
+     */
+    public Number value();
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b7f46c10/phoenix-hadoop-compat/src/main/java/org/apache/phoenix/metrics/PhoenixMetricTag.java
----------------------------------------------------------------------
diff --git 
a/phoenix-hadoop-compat/src/main/java/org/apache/phoenix/metrics/PhoenixMetricTag.java
 
b/phoenix-hadoop-compat/src/main/java/org/apache/phoenix/metrics/PhoenixMetricTag.java
new file mode 100644
index 0000000..123cc1c
--- /dev/null
+++ 
b/phoenix-hadoop-compat/src/main/java/org/apache/phoenix/metrics/PhoenixMetricTag.java
@@ -0,0 +1,27 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.metrics;
+
+public interface PhoenixMetricTag {
+
+    public String name();
+
+    public String description();
+
+    public String value();
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b7f46c10/phoenix-hadoop-compat/src/main/java/org/apache/phoenix/metrics/PhoenixMetricsRecord.java
----------------------------------------------------------------------
diff --git 
a/phoenix-hadoop-compat/src/main/java/org/apache/phoenix/metrics/PhoenixMetricsRecord.java
 
b/phoenix-hadoop-compat/src/main/java/org/apache/phoenix/metrics/PhoenixMetricsRecord.java
new file mode 100644
index 0000000..68f7c46
--- /dev/null
+++ 
b/phoenix-hadoop-compat/src/main/java/org/apache/phoenix/metrics/PhoenixMetricsRecord.java
@@ -0,0 +1,35 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.metrics;
+
+import java.util.Collection;
+
+
+/**
+ *
+ */
+public interface PhoenixMetricsRecord {
+
+    public String name();
+
+    public String description();
+
+    public Iterable<PhoenixAbstractMetric> metrics();
+
+    public Collection<PhoenixMetricTag> tags();
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b7f46c10/phoenix-hadoop-compat/src/main/java/org/apache/phoenix/trace/PhoenixSpanReceiver.java
----------------------------------------------------------------------
diff --git 
a/phoenix-hadoop-compat/src/main/java/org/apache/phoenix/trace/PhoenixSpanReceiver.java
 
b/phoenix-hadoop-compat/src/main/java/org/apache/phoenix/trace/PhoenixSpanReceiver.java
new file mode 100644
index 0000000..7e4e09c
--- /dev/null
+++ 
b/phoenix-hadoop-compat/src/main/java/org/apache/phoenix/trace/PhoenixSpanReceiver.java
@@ -0,0 +1,26 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.trace;
+
+import org.cloudera.htrace.SpanReceiver;
+
+/**
+ * Marker interface for phoenix specific receivers.
+ */
+public interface PhoenixSpanReceiver extends SpanReceiver {
+}
\ No newline at end of file

Reply via email to