http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/48867732/thirdparty/rocksdb/db/internal_stats.cc
----------------------------------------------------------------------
diff --git a/thirdparty/rocksdb/db/internal_stats.cc 
b/thirdparty/rocksdb/db/internal_stats.cc
new file mode 100644
index 0000000..54723ea
--- /dev/null
+++ b/thirdparty/rocksdb/db/internal_stats.cc
@@ -0,0 +1,1174 @@
+//  This source code is licensed under both the GPLv2 (found in the
+//  COPYING file in the root directory) and Apache 2.0 License
+//  (found in the LICENSE.Apache file in the root directory).
+//
+// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include "db/internal_stats.h"
+
+#ifndef __STDC_FORMAT_MACROS
+#define __STDC_FORMAT_MACROS
+#endif
+
+#include <inttypes.h>
+#include <string>
+#include <algorithm>
+#include <utility>
+#include <vector>
+#include "db/column_family.h"
+
+#include "db/db_impl.h"
+#include "util/string_util.h"
+
+namespace rocksdb {
+
+#ifndef ROCKSDB_LITE
+
+const std::map<LevelStatType, LevelStat> InternalStats::compaction_level_stats 
=
+    {
+        {LevelStatType::NUM_FILES, LevelStat{"NumFiles", "Files"}},
+        {LevelStatType::COMPACTED_FILES,
+         LevelStat{"CompactedFiles", "CompactedFiles"}},
+        {LevelStatType::SIZE_BYTES, LevelStat{"SizeBytes", "Size"}},
+        {LevelStatType::SCORE, LevelStat{"Score", "Score"}},
+        {LevelStatType::READ_GB, LevelStat{"ReadGB", "Read(GB)"}},
+        {LevelStatType::RN_GB, LevelStat{"RnGB", "Rn(GB)"}},
+        {LevelStatType::RNP1_GB, LevelStat{"Rnp1GB", "Rnp1(GB)"}},
+        {LevelStatType::WRITE_GB, LevelStat{"WriteGB", "Write(GB)"}},
+        {LevelStatType::W_NEW_GB, LevelStat{"WnewGB", "Wnew(GB)"}},
+        {LevelStatType::MOVED_GB, LevelStat{"MovedGB", "Moved(GB)"}},
+        {LevelStatType::WRITE_AMP, LevelStat{"WriteAmp", "W-Amp"}},
+        {LevelStatType::READ_MBPS, LevelStat{"ReadMBps", "Rd(MB/s)"}},
+        {LevelStatType::WRITE_MBPS, LevelStat{"WriteMBps", "Wr(MB/s)"}},
+        {LevelStatType::COMP_SEC, LevelStat{"CompSec", "Comp(sec)"}},
+        {LevelStatType::COMP_COUNT, LevelStat{"CompCount", "Comp(cnt)"}},
+        {LevelStatType::AVG_SEC, LevelStat{"AvgSec", "Avg(sec)"}},
+        {LevelStatType::KEY_IN, LevelStat{"KeyIn", "KeyIn"}},
+        {LevelStatType::KEY_DROP, LevelStat{"KeyDrop", "KeyDrop"}},
+};
+
+namespace {
+const double kMB = 1048576.0;
+const double kGB = kMB * 1024;
+const double kMicrosInSec = 1000000.0;
+
+void PrintLevelStatsHeader(char* buf, size_t len, const std::string& cf_name) {
+  int written_size =
+      snprintf(buf, len, "\n** Compaction Stats [%s] **\n", cf_name.c_str());
+  auto hdr = [](LevelStatType t) {
+    return InternalStats::compaction_level_stats.at(t).header_name.c_str();
+  };
+  int line_size = snprintf(
+      buf + written_size, len - written_size,
+      "Level    %s   %s     %s %s  %s %s %s %s %s %s %s %s %s %s %s %s %s\n",
+      // Note that we skip COMPACTED_FILES and merge it with Files column
+      hdr(LevelStatType::NUM_FILES), hdr(LevelStatType::SIZE_BYTES),
+      hdr(LevelStatType::SCORE), hdr(LevelStatType::READ_GB),
+      hdr(LevelStatType::RN_GB), hdr(LevelStatType::RNP1_GB),
+      hdr(LevelStatType::WRITE_GB), hdr(LevelStatType::W_NEW_GB),
+      hdr(LevelStatType::MOVED_GB), hdr(LevelStatType::WRITE_AMP),
+      hdr(LevelStatType::READ_MBPS), hdr(LevelStatType::WRITE_MBPS),
+      hdr(LevelStatType::COMP_SEC), hdr(LevelStatType::COMP_COUNT),
+      hdr(LevelStatType::AVG_SEC), hdr(LevelStatType::KEY_IN),
+      hdr(LevelStatType::KEY_DROP));
+
+  written_size += line_size;
+  snprintf(buf + written_size, len - written_size, "%s\n",
+           std::string(line_size, '-').c_str());
+}
+
+void PrepareLevelStats(std::map<LevelStatType, double>* level_stats,
+                       int num_files, int being_compacted,
+                       double total_file_size, double score, double w_amp,
+                       const InternalStats::CompactionStats& stats) {
+  uint64_t bytes_read =
+      stats.bytes_read_non_output_levels + stats.bytes_read_output_level;
+  int64_t bytes_new =
+      stats.bytes_written - stats.bytes_read_output_level;
+  double elapsed = (stats.micros + 1) / kMicrosInSec;
+
+  (*level_stats)[LevelStatType::NUM_FILES] = num_files;
+  (*level_stats)[LevelStatType::COMPACTED_FILES] = being_compacted;
+  (*level_stats)[LevelStatType::SIZE_BYTES] = total_file_size;
+  (*level_stats)[LevelStatType::SCORE] = score;
+  (*level_stats)[LevelStatType::READ_GB] = bytes_read / kGB;
+  (*level_stats)[LevelStatType::RN_GB] =
+      stats.bytes_read_non_output_levels / kGB;
+  (*level_stats)[LevelStatType::RNP1_GB] = stats.bytes_read_output_level / kGB;
+  (*level_stats)[LevelStatType::WRITE_GB] = stats.bytes_written / kGB;
+  (*level_stats)[LevelStatType::W_NEW_GB] = bytes_new / kGB;
+  (*level_stats)[LevelStatType::MOVED_GB] = stats.bytes_moved / kGB;
+  (*level_stats)[LevelStatType::WRITE_AMP] = w_amp;
+  (*level_stats)[LevelStatType::READ_MBPS] = bytes_read / kMB / elapsed;
+  (*level_stats)[LevelStatType::WRITE_MBPS] =
+      stats.bytes_written / kMB / elapsed;
+  (*level_stats)[LevelStatType::COMP_SEC] = stats.micros / kMicrosInSec;
+  (*level_stats)[LevelStatType::COMP_COUNT] = stats.count;
+  (*level_stats)[LevelStatType::AVG_SEC] =
+      stats.count == 0 ? 0 : stats.micros / kMicrosInSec / stats.count;
+  (*level_stats)[LevelStatType::KEY_IN] =
+      static_cast<double>(stats.num_input_records);
+  (*level_stats)[LevelStatType::KEY_DROP] =
+      static_cast<double>(stats.num_dropped_records);
+}
+
+void PrintLevelStats(char* buf, size_t len, const std::string& name,
+                     const std::map<LevelStatType, double>& stat_value) {
+  snprintf(buf, len,
+           "%4s "      /*  Level */
+           "%6d/%-3d " /*  Files */
+           "%8s "      /*  Size */
+           "%5.1f "    /*  Score */
+           "%8.1f "    /*  Read(GB) */
+           "%7.1f "    /*  Rn(GB) */
+           "%8.1f "    /*  Rnp1(GB) */
+           "%9.1f "    /*  Write(GB) */
+           "%8.1f "    /*  Wnew(GB) */
+           "%9.1f "    /*  Moved(GB) */
+           "%5.1f "    /*  W-Amp */
+           "%8.1f "    /*  Rd(MB/s) */
+           "%8.1f "    /*  Wr(MB/s) */
+           "%9.0f "    /*  Comp(sec) */
+           "%9d "      /*  Comp(cnt) */
+           "%8.3f "    /*  Avg(sec) */
+           "%7s "      /*  KeyIn */
+           "%6s\n",    /*  KeyDrop */
+           name.c_str(),
+           static_cast<int>(stat_value.at(LevelStatType::NUM_FILES)),
+           static_cast<int>(stat_value.at(LevelStatType::COMPACTED_FILES)),
+           BytesToHumanString(
+               static_cast<uint64_t>(stat_value.at(LevelStatType::SIZE_BYTES)))
+               .c_str(),
+           stat_value.at(LevelStatType::SCORE),
+           stat_value.at(LevelStatType::READ_GB),
+           stat_value.at(LevelStatType::RN_GB),
+           stat_value.at(LevelStatType::RNP1_GB),
+           stat_value.at(LevelStatType::WRITE_GB),
+           stat_value.at(LevelStatType::W_NEW_GB),
+           stat_value.at(LevelStatType::MOVED_GB),
+           stat_value.at(LevelStatType::WRITE_AMP),
+           stat_value.at(LevelStatType::READ_MBPS),
+           stat_value.at(LevelStatType::WRITE_MBPS),
+           stat_value.at(LevelStatType::COMP_SEC),
+           static_cast<int>(stat_value.at(LevelStatType::COMP_COUNT)),
+           stat_value.at(LevelStatType::AVG_SEC),
+           NumberToHumanString(
+               static_cast<std::int64_t>(stat_value.at(LevelStatType::KEY_IN)))
+               .c_str(),
+           NumberToHumanString(static_cast<std::int64_t>(
+                                   stat_value.at(LevelStatType::KEY_DROP)))
+               .c_str());
+}
+
+void PrintLevelStats(char* buf, size_t len, const std::string& name,
+                     int num_files, int being_compacted, double 
total_file_size,
+                     double score, double w_amp,
+                     const InternalStats::CompactionStats& stats) {
+  std::map<LevelStatType, double> level_stats;
+  PrepareLevelStats(&level_stats, num_files, being_compacted, total_file_size,
+                    score, w_amp, stats);
+  PrintLevelStats(buf, len, name, level_stats);
+}
+
+// Assumes that trailing numbers represent an optional argument. This requires
+// property names to not end with numbers.
+std::pair<Slice, Slice> GetPropertyNameAndArg(const Slice& property) {
+  Slice name = property, arg = property;
+  size_t sfx_len = 0;
+  while (sfx_len < property.size() &&
+         isdigit(property[property.size() - sfx_len - 1])) {
+    ++sfx_len;
+  }
+  name.remove_suffix(sfx_len);
+  arg.remove_prefix(property.size() - sfx_len);
+  return {name, arg};
+}
+}  // anonymous namespace
+
+static const std::string rocksdb_prefix = "rocksdb.";
+
+static const std::string num_files_at_level_prefix = "num-files-at-level";
+static const std::string compression_ratio_at_level_prefix =
+    "compression-ratio-at-level";
+static const std::string allstats = "stats";
+static const std::string sstables = "sstables";
+static const std::string cfstats = "cfstats";
+static const std::string cfstats_no_file_histogram =
+    "cfstats-no-file-histogram";
+static const std::string cf_file_histogram = "cf-file-histogram";
+static const std::string dbstats = "dbstats";
+static const std::string levelstats = "levelstats";
+static const std::string num_immutable_mem_table = "num-immutable-mem-table";
+static const std::string num_immutable_mem_table_flushed =
+    "num-immutable-mem-table-flushed";
+static const std::string mem_table_flush_pending = "mem-table-flush-pending";
+static const std::string compaction_pending = "compaction-pending";
+static const std::string background_errors = "background-errors";
+static const std::string cur_size_active_mem_table =
+                          "cur-size-active-mem-table";
+static const std::string cur_size_all_mem_tables = "cur-size-all-mem-tables";
+static const std::string size_all_mem_tables = "size-all-mem-tables";
+static const std::string num_entries_active_mem_table =
+                          "num-entries-active-mem-table";
+static const std::string num_entries_imm_mem_tables =
+                          "num-entries-imm-mem-tables";
+static const std::string num_deletes_active_mem_table =
+                          "num-deletes-active-mem-table";
+static const std::string num_deletes_imm_mem_tables =
+                          "num-deletes-imm-mem-tables";
+static const std::string estimate_num_keys = "estimate-num-keys";
+static const std::string estimate_table_readers_mem =
+                          "estimate-table-readers-mem";
+static const std::string is_file_deletions_enabled =
+                          "is-file-deletions-enabled";
+static const std::string num_snapshots = "num-snapshots";
+static const std::string oldest_snapshot_time = "oldest-snapshot-time";
+static const std::string num_live_versions = "num-live-versions";
+static const std::string current_version_number =
+    "current-super-version-number";
+static const std::string estimate_live_data_size = "estimate-live-data-size";
+static const std::string min_log_number_to_keep = "min-log-number-to-keep";
+static const std::string base_level = "base-level";
+static const std::string total_sst_files_size = "total-sst-files-size";
+static const std::string estimate_pending_comp_bytes =
+    "estimate-pending-compaction-bytes";
+static const std::string aggregated_table_properties =
+    "aggregated-table-properties";
+static const std::string aggregated_table_properties_at_level =
+    aggregated_table_properties + "-at-level";
+static const std::string num_running_compactions = "num-running-compactions";
+static const std::string num_running_flushes = "num-running-flushes";
+static const std::string actual_delayed_write_rate =
+    "actual-delayed-write-rate";
+static const std::string is_write_stopped = "is-write-stopped";
+
+const std::string DB::Properties::kNumFilesAtLevelPrefix =
+                      rocksdb_prefix + num_files_at_level_prefix;
+const std::string DB::Properties::kCompressionRatioAtLevelPrefix =
+                      rocksdb_prefix + compression_ratio_at_level_prefix;
+const std::string DB::Properties::kStats = rocksdb_prefix + allstats;
+const std::string DB::Properties::kSSTables = rocksdb_prefix + sstables;
+const std::string DB::Properties::kCFStats = rocksdb_prefix + cfstats;
+const std::string DB::Properties::kCFStatsNoFileHistogram =
+    rocksdb_prefix + cfstats_no_file_histogram;
+const std::string DB::Properties::kCFFileHistogram =
+    rocksdb_prefix + cf_file_histogram;
+const std::string DB::Properties::kDBStats = rocksdb_prefix + dbstats;
+const std::string DB::Properties::kLevelStats = rocksdb_prefix + levelstats;
+const std::string DB::Properties::kNumImmutableMemTable =
+                      rocksdb_prefix + num_immutable_mem_table;
+const std::string DB::Properties::kNumImmutableMemTableFlushed =
+    rocksdb_prefix + num_immutable_mem_table_flushed;
+const std::string DB::Properties::kMemTableFlushPending =
+                      rocksdb_prefix + mem_table_flush_pending;
+const std::string DB::Properties::kCompactionPending =
+                      rocksdb_prefix + compaction_pending;
+const std::string DB::Properties::kNumRunningCompactions =
+    rocksdb_prefix + num_running_compactions;
+const std::string DB::Properties::kNumRunningFlushes =
+    rocksdb_prefix + num_running_flushes;
+const std::string DB::Properties::kBackgroundErrors =
+                      rocksdb_prefix + background_errors;
+const std::string DB::Properties::kCurSizeActiveMemTable =
+                      rocksdb_prefix + cur_size_active_mem_table;
+const std::string DB::Properties::kCurSizeAllMemTables =
+    rocksdb_prefix + cur_size_all_mem_tables;
+const std::string DB::Properties::kSizeAllMemTables =
+    rocksdb_prefix + size_all_mem_tables;
+const std::string DB::Properties::kNumEntriesActiveMemTable =
+                      rocksdb_prefix + num_entries_active_mem_table;
+const std::string DB::Properties::kNumEntriesImmMemTables =
+                      rocksdb_prefix + num_entries_imm_mem_tables;
+const std::string DB::Properties::kNumDeletesActiveMemTable =
+                      rocksdb_prefix + num_deletes_active_mem_table;
+const std::string DB::Properties::kNumDeletesImmMemTables =
+                      rocksdb_prefix + num_deletes_imm_mem_tables;
+const std::string DB::Properties::kEstimateNumKeys =
+                      rocksdb_prefix + estimate_num_keys;
+const std::string DB::Properties::kEstimateTableReadersMem =
+                      rocksdb_prefix + estimate_table_readers_mem;
+const std::string DB::Properties::kIsFileDeletionsEnabled =
+                      rocksdb_prefix + is_file_deletions_enabled;
+const std::string DB::Properties::kNumSnapshots =
+                      rocksdb_prefix + num_snapshots;
+const std::string DB::Properties::kOldestSnapshotTime =
+                      rocksdb_prefix + oldest_snapshot_time;
+const std::string DB::Properties::kNumLiveVersions =
+                      rocksdb_prefix + num_live_versions;
+const std::string DB::Properties::kCurrentSuperVersionNumber =
+    rocksdb_prefix + current_version_number;
+const std::string DB::Properties::kEstimateLiveDataSize =
+                      rocksdb_prefix + estimate_live_data_size;
+const std::string DB::Properties::kMinLogNumberToKeep =
+    rocksdb_prefix + min_log_number_to_keep;
+const std::string DB::Properties::kTotalSstFilesSize =
+                      rocksdb_prefix + total_sst_files_size;
+const std::string DB::Properties::kBaseLevel = rocksdb_prefix + base_level;
+const std::string DB::Properties::kEstimatePendingCompactionBytes =
+    rocksdb_prefix + estimate_pending_comp_bytes;
+const std::string DB::Properties::kAggregatedTableProperties =
+    rocksdb_prefix + aggregated_table_properties;
+const std::string DB::Properties::kAggregatedTablePropertiesAtLevel =
+    rocksdb_prefix + aggregated_table_properties_at_level;
+const std::string DB::Properties::kActualDelayedWriteRate =
+    rocksdb_prefix + actual_delayed_write_rate;
+const std::string DB::Properties::kIsWriteStopped =
+    rocksdb_prefix + is_write_stopped;
+
+const std::unordered_map<std::string, DBPropertyInfo>
+    InternalStats::ppt_name_to_info = {
+        {DB::Properties::kNumFilesAtLevelPrefix,
+         {false, &InternalStats::HandleNumFilesAtLevel, nullptr, nullptr}},
+        {DB::Properties::kCompressionRatioAtLevelPrefix,
+         {false, &InternalStats::HandleCompressionRatioAtLevelPrefix, nullptr,
+          nullptr}},
+        {DB::Properties::kLevelStats,
+         {false, &InternalStats::HandleLevelStats, nullptr, nullptr}},
+        {DB::Properties::kStats,
+         {false, &InternalStats::HandleStats, nullptr, nullptr}},
+        {DB::Properties::kCFStats,
+         {false, &InternalStats::HandleCFStats, nullptr,
+          &InternalStats::HandleCFMapStats}},
+        {DB::Properties::kCFStatsNoFileHistogram,
+         {false, &InternalStats::HandleCFStatsNoFileHistogram, nullptr,
+          nullptr}},
+        {DB::Properties::kCFFileHistogram,
+         {false, &InternalStats::HandleCFFileHistogram, nullptr, nullptr}},
+        {DB::Properties::kDBStats,
+         {false, &InternalStats::HandleDBStats, nullptr, nullptr}},
+        {DB::Properties::kSSTables,
+         {false, &InternalStats::HandleSsTables, nullptr, nullptr}},
+        {DB::Properties::kAggregatedTableProperties,
+         {false, &InternalStats::HandleAggregatedTableProperties, nullptr,
+          nullptr}},
+        {DB::Properties::kAggregatedTablePropertiesAtLevel,
+         {false, &InternalStats::HandleAggregatedTablePropertiesAtLevel,
+          nullptr, nullptr}},
+        {DB::Properties::kNumImmutableMemTable,
+         {false, nullptr, &InternalStats::HandleNumImmutableMemTable, 
nullptr}},
+        {DB::Properties::kNumImmutableMemTableFlushed,
+         {false, nullptr, &InternalStats::HandleNumImmutableMemTableFlushed,
+          nullptr}},
+        {DB::Properties::kMemTableFlushPending,
+         {false, nullptr, &InternalStats::HandleMemTableFlushPending, 
nullptr}},
+        {DB::Properties::kCompactionPending,
+         {false, nullptr, &InternalStats::HandleCompactionPending, nullptr}},
+        {DB::Properties::kBackgroundErrors,
+         {false, nullptr, &InternalStats::HandleBackgroundErrors, nullptr}},
+        {DB::Properties::kCurSizeActiveMemTable,
+         {false, nullptr, &InternalStats::HandleCurSizeActiveMemTable,
+          nullptr}},
+        {DB::Properties::kCurSizeAllMemTables,
+         {false, nullptr, &InternalStats::HandleCurSizeAllMemTables, nullptr}},
+        {DB::Properties::kSizeAllMemTables,
+         {false, nullptr, &InternalStats::HandleSizeAllMemTables, nullptr}},
+        {DB::Properties::kNumEntriesActiveMemTable,
+         {false, nullptr, &InternalStats::HandleNumEntriesActiveMemTable,
+          nullptr}},
+        {DB::Properties::kNumEntriesImmMemTables,
+         {false, nullptr, &InternalStats::HandleNumEntriesImmMemTables,
+          nullptr}},
+        {DB::Properties::kNumDeletesActiveMemTable,
+         {false, nullptr, &InternalStats::HandleNumDeletesActiveMemTable,
+          nullptr}},
+        {DB::Properties::kNumDeletesImmMemTables,
+         {false, nullptr, &InternalStats::HandleNumDeletesImmMemTables,
+          nullptr}},
+        {DB::Properties::kEstimateNumKeys,
+         {false, nullptr, &InternalStats::HandleEstimateNumKeys, nullptr}},
+        {DB::Properties::kEstimateTableReadersMem,
+         {true, nullptr, &InternalStats::HandleEstimateTableReadersMem,
+          nullptr}},
+        {DB::Properties::kIsFileDeletionsEnabled,
+         {false, nullptr, &InternalStats::HandleIsFileDeletionsEnabled,
+          nullptr}},
+        {DB::Properties::kNumSnapshots,
+         {false, nullptr, &InternalStats::HandleNumSnapshots, nullptr}},
+        {DB::Properties::kOldestSnapshotTime,
+         {false, nullptr, &InternalStats::HandleOldestSnapshotTime, nullptr}},
+        {DB::Properties::kNumLiveVersions,
+         {false, nullptr, &InternalStats::HandleNumLiveVersions, nullptr}},
+        {DB::Properties::kCurrentSuperVersionNumber,
+         {false, nullptr, &InternalStats::HandleCurrentSuperVersionNumber,
+          nullptr}},
+        {DB::Properties::kEstimateLiveDataSize,
+         {true, nullptr, &InternalStats::HandleEstimateLiveDataSize, nullptr}},
+        {DB::Properties::kMinLogNumberToKeep,
+         {false, nullptr, &InternalStats::HandleMinLogNumberToKeep, nullptr}},
+        {DB::Properties::kBaseLevel,
+         {false, nullptr, &InternalStats::HandleBaseLevel, nullptr}},
+        {DB::Properties::kTotalSstFilesSize,
+         {false, nullptr, &InternalStats::HandleTotalSstFilesSize, nullptr}},
+        {DB::Properties::kEstimatePendingCompactionBytes,
+         {false, nullptr, &InternalStats::HandleEstimatePendingCompactionBytes,
+          nullptr}},
+        {DB::Properties::kNumRunningFlushes,
+         {false, nullptr, &InternalStats::HandleNumRunningFlushes, nullptr}},
+        {DB::Properties::kNumRunningCompactions,
+         {false, nullptr, &InternalStats::HandleNumRunningCompactions,
+          nullptr}},
+        {DB::Properties::kActualDelayedWriteRate,
+         {false, nullptr, &InternalStats::HandleActualDelayedWriteRate,
+          nullptr}},
+        {DB::Properties::kIsWriteStopped,
+         {false, nullptr, &InternalStats::HandleIsWriteStopped, nullptr}},
+};
+
+const DBPropertyInfo* GetPropertyInfo(const Slice& property) {
+  std::string ppt_name = GetPropertyNameAndArg(property).first.ToString();
+  auto ppt_info_iter = InternalStats::ppt_name_to_info.find(ppt_name);
+  if (ppt_info_iter == InternalStats::ppt_name_to_info.end()) {
+    return nullptr;
+  }
+  return &ppt_info_iter->second;
+}
+
+bool InternalStats::GetStringProperty(const DBPropertyInfo& property_info,
+                                      const Slice& property,
+                                      std::string* value) {
+  assert(value != nullptr);
+  assert(property_info.handle_string != nullptr);
+  Slice arg = GetPropertyNameAndArg(property).second;
+  return (this->*(property_info.handle_string))(value, arg);
+}
+
+bool InternalStats::GetMapProperty(const DBPropertyInfo& property_info,
+                                   const Slice& property,
+                                   std::map<std::string, double>* value) {
+  assert(value != nullptr);
+  assert(property_info.handle_map != nullptr);
+  return (this->*(property_info.handle_map))(value);
+}
+
+bool InternalStats::GetIntProperty(const DBPropertyInfo& property_info,
+                                   uint64_t* value, DBImpl* db) {
+  assert(value != nullptr);
+  assert(property_info.handle_int != nullptr &&
+         !property_info.need_out_of_mutex);
+  db->mutex_.AssertHeld();
+  return (this->*(property_info.handle_int))(value, db, nullptr /* version */);
+}
+
+bool InternalStats::GetIntPropertyOutOfMutex(
+    const DBPropertyInfo& property_info, Version* version, uint64_t* value) {
+  assert(value != nullptr);
+  assert(property_info.handle_int != nullptr &&
+         property_info.need_out_of_mutex);
+  return (this->*(property_info.handle_int))(value, nullptr /* db */, version);
+}
+
+bool InternalStats::HandleNumFilesAtLevel(std::string* value, Slice suffix) {
+  uint64_t level;
+  const auto* vstorage = cfd_->current()->storage_info();
+  bool ok = ConsumeDecimalNumber(&suffix, &level) && suffix.empty();
+  if (!ok || static_cast<int>(level) >= number_levels_) {
+    return false;
+  } else {
+    char buf[100];
+    snprintf(buf, sizeof(buf), "%d",
+             vstorage->NumLevelFiles(static_cast<int>(level)));
+    *value = buf;
+    return true;
+  }
+}
+
+bool InternalStats::HandleCompressionRatioAtLevelPrefix(std::string* value,
+                                                        Slice suffix) {
+  uint64_t level;
+  const auto* vstorage = cfd_->current()->storage_info();
+  bool ok = ConsumeDecimalNumber(&suffix, &level) && suffix.empty();
+  if (!ok || level >= static_cast<uint64_t>(number_levels_)) {
+    return false;
+  }
+  *value = ToString(
+      vstorage->GetEstimatedCompressionRatioAtLevel(static_cast<int>(level)));
+  return true;
+}
+
+bool InternalStats::HandleLevelStats(std::string* value, Slice suffix) {
+  char buf[1000];
+  const auto* vstorage = cfd_->current()->storage_info();
+  snprintf(buf, sizeof(buf),
+           "Level Files Size(MB)\n"
+           "--------------------\n");
+  value->append(buf);
+
+  for (int level = 0; level < number_levels_; level++) {
+    snprintf(buf, sizeof(buf), "%3d %8d %8.0f\n", level,
+             vstorage->NumLevelFiles(level),
+             vstorage->NumLevelBytes(level) / kMB);
+    value->append(buf);
+  }
+  return true;
+}
+
+bool InternalStats::HandleStats(std::string* value, Slice suffix) {
+  if (!HandleCFStats(value, suffix)) {
+    return false;
+  }
+  if (!HandleDBStats(value, suffix)) {
+    return false;
+  }
+  return true;
+}
+
+bool InternalStats::HandleCFMapStats(std::map<std::string, double>* cf_stats) {
+  DumpCFMapStats(cf_stats);
+  return true;
+}
+
+bool InternalStats::HandleCFStats(std::string* value, Slice suffix) {
+  DumpCFStats(value);
+  return true;
+}
+
+bool InternalStats::HandleCFStatsNoFileHistogram(std::string* value,
+                                                 Slice suffix) {
+  DumpCFStatsNoFileHistogram(value);
+  return true;
+}
+
+bool InternalStats::HandleCFFileHistogram(std::string* value, Slice suffix) {
+  DumpCFFileHistogram(value);
+  return true;
+}
+
+bool InternalStats::HandleDBStats(std::string* value, Slice suffix) {
+  DumpDBStats(value);
+  return true;
+}
+
+bool InternalStats::HandleSsTables(std::string* value, Slice suffix) {
+  auto* current = cfd_->current();
+  *value = current->DebugString(true, true);
+  return true;
+}
+
+bool InternalStats::HandleAggregatedTableProperties(std::string* value,
+                                                    Slice suffix) {
+  std::shared_ptr<const TableProperties> tp;
+  auto s = cfd_->current()->GetAggregatedTableProperties(&tp);
+  if (!s.ok()) {
+    return false;
+  }
+  *value = tp->ToString();
+  return true;
+}
+
+bool InternalStats::HandleAggregatedTablePropertiesAtLevel(std::string* value,
+                                                           Slice suffix) {
+  uint64_t level;
+  bool ok = ConsumeDecimalNumber(&suffix, &level) && suffix.empty();
+  if (!ok || static_cast<int>(level) >= number_levels_) {
+    return false;
+  }
+  std::shared_ptr<const TableProperties> tp;
+  auto s = cfd_->current()->GetAggregatedTableProperties(
+      &tp, static_cast<int>(level));
+  if (!s.ok()) {
+    return false;
+  }
+  *value = tp->ToString();
+  return true;
+}
+
+bool InternalStats::HandleNumImmutableMemTable(uint64_t* value, DBImpl* db,
+                                               Version* version) {
+  *value = cfd_->imm()->NumNotFlushed();
+  return true;
+}
+
+bool InternalStats::HandleNumImmutableMemTableFlushed(uint64_t* value,
+                                                      DBImpl* db,
+                                                      Version* version) {
+  *value = cfd_->imm()->NumFlushed();
+  return true;
+}
+
+bool InternalStats::HandleMemTableFlushPending(uint64_t* value, DBImpl* db,
+                                               Version* version) {
+  // Return number of mem tables that are ready to flush (made immutable)
+  *value = (cfd_->imm()->IsFlushPending() ? 1 : 0);
+  return true;
+}
+
+bool InternalStats::HandleNumRunningFlushes(uint64_t* value, DBImpl* db,
+                                            Version* version) {
+  *value = db->num_running_flushes();
+  return true;
+}
+
+bool InternalStats::HandleCompactionPending(uint64_t* value, DBImpl* db,
+                                            Version* version) {
+  // 1 if the system already determines at least one compaction is needed.
+  // 0 otherwise,
+  const auto* vstorage = cfd_->current()->storage_info();
+  *value = (cfd_->compaction_picker()->NeedsCompaction(vstorage) ? 1 : 0);
+  return true;
+}
+
+bool InternalStats::HandleNumRunningCompactions(uint64_t* value, DBImpl* db,
+                                                Version* version) {
+  *value = db->num_running_compactions_;
+  return true;
+}
+
+bool InternalStats::HandleBackgroundErrors(uint64_t* value, DBImpl* db,
+                                           Version* version) {
+  // Accumulated number of  errors in background flushes or compactions.
+  *value = GetBackgroundErrorCount();
+  return true;
+}
+
+bool InternalStats::HandleCurSizeActiveMemTable(uint64_t* value, DBImpl* db,
+                                                Version* version) {
+  // Current size of the active memtable
+  *value = cfd_->mem()->ApproximateMemoryUsage();
+  return true;
+}
+
+bool InternalStats::HandleCurSizeAllMemTables(uint64_t* value, DBImpl* db,
+                                              Version* version) {
+  // Current size of the active memtable + immutable memtables
+  *value = cfd_->mem()->ApproximateMemoryUsage() +
+           cfd_->imm()->ApproximateUnflushedMemTablesMemoryUsage();
+  return true;
+}
+
+bool InternalStats::HandleSizeAllMemTables(uint64_t* value, DBImpl* db,
+                                           Version* version) {
+  *value = cfd_->mem()->ApproximateMemoryUsage() +
+           cfd_->imm()->ApproximateMemoryUsage();
+  return true;
+}
+
+bool InternalStats::HandleNumEntriesActiveMemTable(uint64_t* value, DBImpl* db,
+                                                   Version* version) {
+  // Current number of entires in the active memtable
+  *value = cfd_->mem()->num_entries();
+  return true;
+}
+
+bool InternalStats::HandleNumEntriesImmMemTables(uint64_t* value, DBImpl* db,
+                                                 Version* version) {
+  // Current number of entries in the immutable memtables
+  *value = cfd_->imm()->current()->GetTotalNumEntries();
+  return true;
+}
+
+bool InternalStats::HandleNumDeletesActiveMemTable(uint64_t* value, DBImpl* db,
+                                                   Version* version) {
+  // Current number of entires in the active memtable
+  *value = cfd_->mem()->num_deletes();
+  return true;
+}
+
+bool InternalStats::HandleNumDeletesImmMemTables(uint64_t* value, DBImpl* db,
+                                                 Version* version) {
+  // Current number of entries in the immutable memtables
+  *value = cfd_->imm()->current()->GetTotalNumDeletes();
+  return true;
+}
+
+bool InternalStats::HandleEstimateNumKeys(uint64_t* value, DBImpl* db,
+                                          Version* version) {
+  // Estimate number of entries in the column family:
+  // Use estimated entries in tables + total entries in memtables.
+  const auto* vstorage = cfd_->current()->storage_info();
+  uint64_t estimate_keys = cfd_->mem()->num_entries() +
+                           cfd_->imm()->current()->GetTotalNumEntries() +
+                           vstorage->GetEstimatedActiveKeys();
+  uint64_t estimate_deletes =
+      cfd_->mem()->num_deletes() + 
cfd_->imm()->current()->GetTotalNumDeletes();
+  *value = estimate_keys > estimate_deletes * 2
+               ? estimate_keys - (estimate_deletes * 2)
+               : 0;
+  return true;
+}
+
+bool InternalStats::HandleNumSnapshots(uint64_t* value, DBImpl* db,
+                                       Version* version) {
+  *value = db->snapshots().count();
+  return true;
+}
+
+bool InternalStats::HandleOldestSnapshotTime(uint64_t* value, DBImpl* db,
+                                             Version* version) {
+  *value = static_cast<uint64_t>(db->snapshots().GetOldestSnapshotTime());
+  return true;
+}
+
+bool InternalStats::HandleNumLiveVersions(uint64_t* value, DBImpl* db,
+                                          Version* version) {
+  *value = cfd_->GetNumLiveVersions();
+  return true;
+}
+
+bool InternalStats::HandleCurrentSuperVersionNumber(uint64_t* value, DBImpl* 
db,
+                                                    Version* version) {
+  *value = cfd_->GetSuperVersionNumber();
+  return true;
+}
+
+bool InternalStats::HandleIsFileDeletionsEnabled(uint64_t* value, DBImpl* db,
+                                                 Version* version) {
+  *value = db->IsFileDeletionsEnabled();
+  return true;
+}
+
+bool InternalStats::HandleBaseLevel(uint64_t* value, DBImpl* db,
+                                    Version* version) {
+  const auto* vstorage = cfd_->current()->storage_info();
+  *value = vstorage->base_level();
+  return true;
+}
+
+bool InternalStats::HandleTotalSstFilesSize(uint64_t* value, DBImpl* db,
+                                            Version* version) {
+  *value = cfd_->GetTotalSstFilesSize();
+  return true;
+}
+
+bool InternalStats::HandleEstimatePendingCompactionBytes(uint64_t* value,
+                                                         DBImpl* db,
+                                                         Version* version) {
+  const auto* vstorage = cfd_->current()->storage_info();
+  *value = vstorage->estimated_compaction_needed_bytes();
+  return true;
+}
+
+bool InternalStats::HandleEstimateTableReadersMem(uint64_t* value, DBImpl* db,
+                                                  Version* version) {
+  *value = (version == nullptr) ? 0 : version->GetMemoryUsageByTableReaders();
+  return true;
+}
+
+bool InternalStats::HandleEstimateLiveDataSize(uint64_t* value, DBImpl* db,
+                                               Version* version) {
+  const auto* vstorage = cfd_->current()->storage_info();
+  *value = vstorage->EstimateLiveDataSize();
+  return true;
+}
+
+bool InternalStats::HandleMinLogNumberToKeep(uint64_t* value, DBImpl* db,
+                                             Version* version) {
+  *value = db->MinLogNumberToKeep();
+  return true;
+}
+
+bool InternalStats::HandleActualDelayedWriteRate(uint64_t* value, DBImpl* db,
+                                                 Version* version) {
+  const WriteController& wc = db->write_controller();
+  if (!wc.NeedsDelay()) {
+    *value = 0;
+  } else {
+    *value = wc.delayed_write_rate();
+  }
+  return true;
+}
+
+bool InternalStats::HandleIsWriteStopped(uint64_t* value, DBImpl* db,
+                                         Version* version) {
+  *value = db->write_controller().IsStopped() ? 1 : 0;
+  return true;
+}
+
+void InternalStats::DumpDBStats(std::string* value) {
+  char buf[1000];
+  // DB-level stats, only available from default column family
+  double seconds_up = (env_->NowMicros() - started_at_ + 1) / kMicrosInSec;
+  double interval_seconds_up = seconds_up - db_stats_snapshot_.seconds_up;
+  snprintf(buf, sizeof(buf),
+           "\n** DB Stats **\nUptime(secs): %.1f total, %.1f interval\n",
+           seconds_up, interval_seconds_up);
+  value->append(buf);
+  // Cumulative
+  uint64_t user_bytes_written = GetDBStats(InternalStats::BYTES_WRITTEN);
+  uint64_t num_keys_written = GetDBStats(InternalStats::NUMBER_KEYS_WRITTEN);
+  uint64_t write_other = GetDBStats(InternalStats::WRITE_DONE_BY_OTHER);
+  uint64_t write_self = GetDBStats(InternalStats::WRITE_DONE_BY_SELF);
+  uint64_t wal_bytes = GetDBStats(InternalStats::WAL_FILE_BYTES);
+  uint64_t wal_synced = GetDBStats(InternalStats::WAL_FILE_SYNCED);
+  uint64_t write_with_wal = GetDBStats(InternalStats::WRITE_WITH_WAL);
+  uint64_t write_stall_micros = GetDBStats(InternalStats::WRITE_STALL_MICROS);
+
+  const int kHumanMicrosLen = 32;
+  char human_micros[kHumanMicrosLen];
+
+  // Data
+  // writes: total number of write requests.
+  // keys: total number of key updates issued by all the write requests
+  // commit groups: number of group commits issued to the DB. Each group can
+  //                contain one or more writes.
+  // so writes/keys is the average number of put in multi-put or put
+  // writes/groups is the average group commit size.
+  //
+  // The format is the same for interval stats.
+  snprintf(buf, sizeof(buf),
+           "Cumulative writes: %s writes, %s keys, %s commit groups, "
+           "%.1f writes per commit group, ingest: %.2f GB, %.2f MB/s\n",
+           NumberToHumanString(write_other + write_self).c_str(),
+           NumberToHumanString(num_keys_written).c_str(),
+           NumberToHumanString(write_self).c_str(),
+           (write_other + write_self) / static_cast<double>(write_self + 1),
+           user_bytes_written / kGB, user_bytes_written / kMB / seconds_up);
+  value->append(buf);
+  // WAL
+  snprintf(buf, sizeof(buf),
+           "Cumulative WAL: %s writes, %s syncs, "
+           "%.2f writes per sync, written: %.2f GB, %.2f MB/s\n",
+           NumberToHumanString(write_with_wal).c_str(),
+           NumberToHumanString(wal_synced).c_str(),
+           write_with_wal / static_cast<double>(wal_synced + 1),
+           wal_bytes / kGB, wal_bytes / kMB / seconds_up);
+  value->append(buf);
+  // Stall
+  AppendHumanMicros(write_stall_micros, human_micros, kHumanMicrosLen, true);
+  snprintf(buf, sizeof(buf),
+           "Cumulative stall: %s, %.1f percent\n",
+           human_micros,
+           // 10000 = divide by 1M to get secs, then multiply by 100 for pct
+           write_stall_micros / 10000.0 / std::max(seconds_up, 0.001));
+  value->append(buf);
+
+  // Interval
+  uint64_t interval_write_other = write_other - db_stats_snapshot_.write_other;
+  uint64_t interval_write_self = write_self - db_stats_snapshot_.write_self;
+  uint64_t interval_num_keys_written =
+      num_keys_written - db_stats_snapshot_.num_keys_written;
+  snprintf(buf, sizeof(buf),
+           "Interval writes: %s writes, %s keys, %s commit groups, "
+           "%.1f writes per commit group, ingest: %.2f MB, %.2f MB/s\n",
+           NumberToHumanString(
+               interval_write_other + interval_write_self).c_str(),
+           NumberToHumanString(interval_num_keys_written).c_str(),
+           NumberToHumanString(interval_write_self).c_str(),
+           static_cast<double>(interval_write_other + interval_write_self) /
+               (interval_write_self + 1),
+           (user_bytes_written - db_stats_snapshot_.ingest_bytes) / kMB,
+           (user_bytes_written - db_stats_snapshot_.ingest_bytes) / kMB /
+               std::max(interval_seconds_up, 0.001)),
+  value->append(buf);
+
+  uint64_t interval_write_with_wal =
+      write_with_wal - db_stats_snapshot_.write_with_wal;
+  uint64_t interval_wal_synced = wal_synced - db_stats_snapshot_.wal_synced;
+  uint64_t interval_wal_bytes = wal_bytes - db_stats_snapshot_.wal_bytes;
+
+  snprintf(buf, sizeof(buf),
+           "Interval WAL: %s writes, %s syncs, "
+           "%.2f writes per sync, written: %.2f MB, %.2f MB/s\n",
+           NumberToHumanString(interval_write_with_wal).c_str(),
+           NumberToHumanString(interval_wal_synced).c_str(),
+           interval_write_with_wal /
+              static_cast<double>(interval_wal_synced + 1),
+           interval_wal_bytes / kGB,
+           interval_wal_bytes / kMB / std::max(interval_seconds_up, 0.001));
+  value->append(buf);
+
+  // Stall
+  AppendHumanMicros(
+      write_stall_micros - db_stats_snapshot_.write_stall_micros,
+      human_micros, kHumanMicrosLen, true);
+  snprintf(buf, sizeof(buf),
+           "Interval stall: %s, %.1f percent\n",
+           human_micros,
+           // 10000 = divide by 1M to get secs, then multiply by 100 for pct
+           (write_stall_micros - db_stats_snapshot_.write_stall_micros) /
+               10000.0 / std::max(interval_seconds_up, 0.001));
+  value->append(buf);
+
+  db_stats_snapshot_.seconds_up = seconds_up;
+  db_stats_snapshot_.ingest_bytes = user_bytes_written;
+  db_stats_snapshot_.write_other = write_other;
+  db_stats_snapshot_.write_self = write_self;
+  db_stats_snapshot_.num_keys_written = num_keys_written;
+  db_stats_snapshot_.wal_bytes = wal_bytes;
+  db_stats_snapshot_.wal_synced = wal_synced;
+  db_stats_snapshot_.write_with_wal = write_with_wal;
+  db_stats_snapshot_.write_stall_micros = write_stall_micros;
+}
+
+/**
+ * Dump Compaction Level stats to a map of stat name to value in double.
+ * The level in stat name is represented with a prefix "Lx" where "x"
+ * is the level number. A special level "Sum" represents the sum of a stat
+ * for all levels.
+ */
+void InternalStats::DumpCFMapStats(std::map<std::string, double>* cf_stats) {
+  CompactionStats compaction_stats_sum(0);
+  std::map<int, std::map<LevelStatType, double>> levels_stats;
+  DumpCFMapStats(&levels_stats, &compaction_stats_sum);
+  for (auto const& level_ent : levels_stats) {
+    auto level_str =
+        level_ent.first == -1 ? "Sum" : "L" + ToString(level_ent.first);
+    for (auto const& stat_ent : level_ent.second) {
+      auto stat_type = stat_ent.first;
+      auto key_str =
+          level_str + "." +
+          InternalStats::compaction_level_stats.at(stat_type).property_name;
+      (*cf_stats)[key_str] = stat_ent.second;
+    }
+  }
+}
+
+void InternalStats::DumpCFMapStats(
+    std::map<int, std::map<LevelStatType, double>>* levels_stats,
+    CompactionStats* compaction_stats_sum) {
+  const VersionStorageInfo* vstorage = cfd_->current()->storage_info();
+
+  int num_levels_to_check =
+      (cfd_->ioptions()->compaction_style != kCompactionStyleFIFO)
+          ? vstorage->num_levels() - 1
+          : 1;
+
+  // Compaction scores are sorted based on its value. Restore them to the
+  // level order
+  std::vector<double> compaction_score(number_levels_, 0);
+  for (int i = 0; i < num_levels_to_check; ++i) {
+    compaction_score[vstorage->CompactionScoreLevel(i)] =
+        vstorage->CompactionScore(i);
+  }
+  // Count # of files being compacted for each level
+  std::vector<int> files_being_compacted(number_levels_, 0);
+  for (int level = 0; level < number_levels_; ++level) {
+    for (auto* f : vstorage->LevelFiles(level)) {
+      if (f->being_compacted) {
+        ++files_being_compacted[level];
+      }
+    }
+  }
+
+  int total_files = 0;
+  int total_files_being_compacted = 0;
+  double total_file_size = 0;
+  uint64_t flush_ingest = cf_stats_value_[BYTES_FLUSHED];
+  uint64_t add_file_ingest = cf_stats_value_[BYTES_INGESTED_ADD_FILE];
+  uint64_t curr_ingest = flush_ingest + add_file_ingest;
+  for (int level = 0; level < number_levels_; level++) {
+    int files = vstorage->NumLevelFiles(level);
+    total_files += files;
+    total_files_being_compacted += files_being_compacted[level];
+    if (comp_stats_[level].micros > 0 || files > 0) {
+      compaction_stats_sum->Add(comp_stats_[level]);
+      total_file_size += vstorage->NumLevelBytes(level);
+      uint64_t input_bytes;
+      if (level == 0) {
+        input_bytes = curr_ingest;
+      } else {
+        input_bytes = comp_stats_[level].bytes_read_non_output_levels;
+      }
+      double w_amp =
+          (input_bytes == 0)
+              ? 0.0
+              : static_cast<double>(comp_stats_[level].bytes_written) /
+                    input_bytes;
+      std::map<LevelStatType, double> level_stats;
+      PrepareLevelStats(&level_stats, files, files_being_compacted[level],
+                        static_cast<double>(vstorage->NumLevelBytes(level)),
+                        compaction_score[level], w_amp, comp_stats_[level]);
+      (*levels_stats)[level] = level_stats;
+    }
+  }
+  // Cumulative summary
+  double w_amp = compaction_stats_sum->bytes_written /
+                 static_cast<double>(curr_ingest + 1);
+  // Stats summary across levels
+  std::map<LevelStatType, double> sum_stats;
+  PrepareLevelStats(&sum_stats, total_files, total_files_being_compacted,
+                    total_file_size, 0, w_amp, *compaction_stats_sum);
+  (*levels_stats)[-1] = sum_stats;  //  -1 is for the Sum level
+}
+
+void InternalStats::DumpCFStats(std::string* value) {
+  DumpCFStatsNoFileHistogram(value);
+  DumpCFFileHistogram(value);
+}
+
+void InternalStats::DumpCFStatsNoFileHistogram(std::string* value) {
+  char buf[2000];
+  // Per-ColumnFamily stats
+  PrintLevelStatsHeader(buf, sizeof(buf), cfd_->GetName());
+  value->append(buf);
+
+  // Print stats for each level
+  std::map<int, std::map<LevelStatType, double>> levels_stats;
+  CompactionStats compaction_stats_sum(0);
+  DumpCFMapStats(&levels_stats, &compaction_stats_sum);
+  for (int l = 0; l < number_levels_; ++l) {
+    if (levels_stats.find(l) != levels_stats.end()) {
+      PrintLevelStats(buf, sizeof(buf), "L" + ToString(l), levels_stats[l]);
+      value->append(buf);
+    }
+  }
+
+  // Print sum of level stats
+  PrintLevelStats(buf, sizeof(buf), "Sum", levels_stats[-1]);
+  value->append(buf);
+
+  uint64_t flush_ingest = cf_stats_value_[BYTES_FLUSHED];
+  uint64_t add_file_ingest = cf_stats_value_[BYTES_INGESTED_ADD_FILE];
+  uint64_t ingest_files_addfile = cf_stats_value_[INGESTED_NUM_FILES_TOTAL];
+  uint64_t ingest_l0_files_addfile =
+      cf_stats_value_[INGESTED_LEVEL0_NUM_FILES_TOTAL];
+  uint64_t ingest_keys_addfile = cf_stats_value_[INGESTED_NUM_KEYS_TOTAL];
+  // Cumulative summary
+  uint64_t total_stall_count =
+      cf_stats_count_[LEVEL0_SLOWDOWN_TOTAL] +
+      cf_stats_count_[LEVEL0_NUM_FILES_TOTAL] +
+      cf_stats_count_[SOFT_PENDING_COMPACTION_BYTES_LIMIT] +
+      cf_stats_count_[HARD_PENDING_COMPACTION_BYTES_LIMIT] +
+      cf_stats_count_[MEMTABLE_COMPACTION] + 
cf_stats_count_[MEMTABLE_SLOWDOWN];
+  // Interval summary
+  uint64_t interval_flush_ingest =
+      flush_ingest - cf_stats_snapshot_.ingest_bytes_flush;
+  uint64_t interval_add_file_inget =
+      add_file_ingest - cf_stats_snapshot_.ingest_bytes_addfile;
+  uint64_t interval_ingest =
+      interval_flush_ingest + interval_add_file_inget + 1;
+  CompactionStats interval_stats(compaction_stats_sum);
+  interval_stats.Subtract(cf_stats_snapshot_.comp_stats);
+  double w_amp =
+      interval_stats.bytes_written / static_cast<double>(interval_ingest);
+  PrintLevelStats(buf, sizeof(buf), "Int", 0, 0, 0, 0, w_amp, interval_stats);
+  value->append(buf);
+
+  double seconds_up = (env_->NowMicros() - started_at_ + 1) / kMicrosInSec;
+  double interval_seconds_up = seconds_up - cf_stats_snapshot_.seconds_up;
+  snprintf(buf, sizeof(buf), "Uptime(secs): %.1f total, %.1f interval\n",
+           seconds_up, interval_seconds_up);
+  value->append(buf);
+  snprintf(buf, sizeof(buf), "Flush(GB): cumulative %.3f, interval %.3f\n",
+           flush_ingest / kGB, interval_flush_ingest / kGB);
+  value->append(buf);
+  snprintf(buf, sizeof(buf), "AddFile(GB): cumulative %.3f, interval %.3f\n",
+           add_file_ingest / kGB, interval_add_file_inget / kGB);
+  value->append(buf);
+
+  uint64_t interval_ingest_files_addfile =
+      ingest_files_addfile - cf_stats_snapshot_.ingest_files_addfile;
+  snprintf(buf, sizeof(buf), "AddFile(Total Files): cumulative %" PRIu64
+                             ", interval %" PRIu64 "\n",
+           ingest_files_addfile, interval_ingest_files_addfile);
+  value->append(buf);
+
+  uint64_t interval_ingest_l0_files_addfile =
+      ingest_l0_files_addfile - cf_stats_snapshot_.ingest_l0_files_addfile;
+  snprintf(buf, sizeof(buf),
+           "AddFile(L0 Files): cumulative %" PRIu64 ", interval %" PRIu64 "\n",
+           ingest_l0_files_addfile, interval_ingest_l0_files_addfile);
+  value->append(buf);
+
+  uint64_t interval_ingest_keys_addfile =
+      ingest_keys_addfile - cf_stats_snapshot_.ingest_keys_addfile;
+  snprintf(buf, sizeof(buf),
+           "AddFile(Keys): cumulative %" PRIu64 ", interval %" PRIu64 "\n",
+           ingest_keys_addfile, interval_ingest_keys_addfile);
+  value->append(buf);
+
+  // Compact
+  uint64_t compact_bytes_read = 0;
+  uint64_t compact_bytes_write = 0;
+  uint64_t compact_micros = 0;
+  for (int level = 0; level < number_levels_; level++) {
+    compact_bytes_read += comp_stats_[level].bytes_read_output_level +
+                          comp_stats_[level].bytes_read_non_output_levels;
+    compact_bytes_write += comp_stats_[level].bytes_written;
+    compact_micros += comp_stats_[level].micros;
+  }
+
+  snprintf(buf, sizeof(buf),
+           "Cumulative compaction: %.2f GB write, %.2f MB/s write, "
+           "%.2f GB read, %.2f MB/s read, %.1f seconds\n",
+           compact_bytes_write / kGB, compact_bytes_write / kMB / seconds_up,
+           compact_bytes_read / kGB, compact_bytes_read / kMB / seconds_up,
+           compact_micros / kMicrosInSec);
+  value->append(buf);
+
+  // Compaction interval
+  uint64_t interval_compact_bytes_write =
+      compact_bytes_write - cf_stats_snapshot_.compact_bytes_write;
+  uint64_t interval_compact_bytes_read =
+      compact_bytes_read - cf_stats_snapshot_.compact_bytes_read;
+  uint64_t interval_compact_micros =
+      compact_micros - cf_stats_snapshot_.compact_micros;
+
+  snprintf(
+      buf, sizeof(buf),
+      "Interval compaction: %.2f GB write, %.2f MB/s write, "
+      "%.2f GB read, %.2f MB/s read, %.1f seconds\n",
+      interval_compact_bytes_write / kGB,
+      interval_compact_bytes_write / kMB / std::max(interval_seconds_up, 
0.001),
+      interval_compact_bytes_read / kGB,
+      interval_compact_bytes_read / kMB / std::max(interval_seconds_up, 0.001),
+      interval_compact_micros / kMicrosInSec);
+  value->append(buf);
+  cf_stats_snapshot_.compact_bytes_write = compact_bytes_write;
+  cf_stats_snapshot_.compact_bytes_read = compact_bytes_read;
+  cf_stats_snapshot_.compact_micros = compact_micros;
+
+  snprintf(buf, sizeof(buf), "Stalls(count): %" PRIu64
+                             " level0_slowdown, "
+                             "%" PRIu64
+                             " level0_slowdown_with_compaction, "
+                             "%" PRIu64
+                             " level0_numfiles, "
+                             "%" PRIu64
+                             " level0_numfiles_with_compaction, "
+                             "%" PRIu64
+                             " stop for pending_compaction_bytes, "
+                             "%" PRIu64
+                             " slowdown for pending_compaction_bytes, "
+                             "%" PRIu64
+                             " memtable_compaction, "
+                             "%" PRIu64
+                             " memtable_slowdown, "
+                             "interval %" PRIu64 " total count\n",
+           cf_stats_count_[LEVEL0_SLOWDOWN_TOTAL],
+           cf_stats_count_[LEVEL0_SLOWDOWN_WITH_COMPACTION],
+           cf_stats_count_[LEVEL0_NUM_FILES_TOTAL],
+           cf_stats_count_[LEVEL0_NUM_FILES_WITH_COMPACTION],
+           cf_stats_count_[HARD_PENDING_COMPACTION_BYTES_LIMIT],
+           cf_stats_count_[SOFT_PENDING_COMPACTION_BYTES_LIMIT],
+           cf_stats_count_[MEMTABLE_COMPACTION],
+           cf_stats_count_[MEMTABLE_SLOWDOWN],
+           total_stall_count - cf_stats_snapshot_.stall_count);
+  value->append(buf);
+
+  cf_stats_snapshot_.seconds_up = seconds_up;
+  cf_stats_snapshot_.ingest_bytes_flush = flush_ingest;
+  cf_stats_snapshot_.ingest_bytes_addfile = add_file_ingest;
+  cf_stats_snapshot_.ingest_files_addfile = ingest_files_addfile;
+  cf_stats_snapshot_.ingest_l0_files_addfile = ingest_l0_files_addfile;
+  cf_stats_snapshot_.ingest_keys_addfile = ingest_keys_addfile;
+  cf_stats_snapshot_.comp_stats = compaction_stats_sum;
+  cf_stats_snapshot_.stall_count = total_stall_count;
+}
+
+void InternalStats::DumpCFFileHistogram(std::string* value) {
+  char buf[2000];
+  snprintf(buf, sizeof(buf),
+           "\n** File Read Latency Histogram By Level [%s] **\n",
+           cfd_->GetName().c_str());
+  value->append(buf);
+
+  for (int level = 0; level < number_levels_; level++) {
+    if (!file_read_latency_[level].Empty()) {
+      char buf2[5000];
+      snprintf(buf2, sizeof(buf2),
+               "** Level %d read latency histogram (micros):\n%s\n", level,
+               file_read_latency_[level].ToString().c_str());
+      value->append(buf2);
+    }
+  }
+}
+
+#else
+
+const DBPropertyInfo* GetPropertyInfo(const Slice& property) { return nullptr; 
}
+
+#endif  // !ROCKSDB_LITE
+
+}  // namespace rocksdb

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/48867732/thirdparty/rocksdb/db/internal_stats.h
----------------------------------------------------------------------
diff --git a/thirdparty/rocksdb/db/internal_stats.h 
b/thirdparty/rocksdb/db/internal_stats.h
new file mode 100644
index 0000000..1dd393f
--- /dev/null
+++ b/thirdparty/rocksdb/db/internal_stats.h
@@ -0,0 +1,588 @@
+//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
+//  This source code is licensed under both the GPLv2 (found in the
+//  COPYING file in the root directory) and Apache 2.0 License
+//  (found in the LICENSE.Apache file in the root directory).
+//
+// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+//
+
+#pragma once
+#include <map>
+#include <string>
+#include <vector>
+
+#include "db/version_set.h"
+
+class ColumnFamilyData;
+
+namespace rocksdb {
+
+class MemTableList;
+class DBImpl;
+
+// Config for retrieving a property's value.
+struct DBPropertyInfo {
+  bool need_out_of_mutex;
+
+  // gcc had an internal error for initializing union of pointer-to-member-
+  // functions. Workaround is to populate exactly one of the following function
+  // pointers with a non-nullptr value.
+
+  // @param value Value-result argument for storing the property's string value
+  // @param suffix Argument portion of the property. For example, suffix would
+  //      be "5" for the property "rocksdb.num-files-at-level5". So far, only
+  //      certain string properties take an argument.
+  bool (InternalStats::*handle_string)(std::string* value, Slice suffix);
+
+  // @param value Value-result argument for storing the property's uint64 value
+  // @param db Many of the int properties rely on DBImpl methods.
+  // @param version Version is needed in case the property is retrieved without
+  //      holding db mutex, which is only supported for int properties.
+  bool (InternalStats::*handle_int)(uint64_t* value, DBImpl* db,
+                                    Version* version);
+  bool (InternalStats::*handle_map)(
+      std::map<std::string, double>* compaction_stats);
+};
+
+extern const DBPropertyInfo* GetPropertyInfo(const Slice& property);
+
+#ifndef ROCKSDB_LITE
+#undef SCORE
+enum class LevelStatType {
+  INVALID = 0,
+  NUM_FILES,
+  COMPACTED_FILES,
+  SIZE_BYTES,
+  SCORE,
+  READ_GB,
+  RN_GB,
+  RNP1_GB,
+  WRITE_GB,
+  W_NEW_GB,
+  MOVED_GB,
+  WRITE_AMP,
+  READ_MBPS,
+  WRITE_MBPS,
+  COMP_SEC,
+  COMP_COUNT,
+  AVG_SEC,
+  KEY_IN,
+  KEY_DROP,
+  TOTAL  // total number of types
+};
+
+struct LevelStat {
+  // This what will be L?.property_name in the flat map returned to the user
+  std::string property_name;
+  // This will be what we will print in the header in the cli
+  std::string header_name;
+};
+
+class InternalStats {
+ public:
+  static const std::map<LevelStatType, LevelStat> compaction_level_stats;
+
+  enum InternalCFStatsType {
+    LEVEL0_SLOWDOWN_TOTAL,
+    LEVEL0_SLOWDOWN_WITH_COMPACTION,
+    MEMTABLE_COMPACTION,
+    MEMTABLE_SLOWDOWN,
+    LEVEL0_NUM_FILES_TOTAL,
+    LEVEL0_NUM_FILES_WITH_COMPACTION,
+    SOFT_PENDING_COMPACTION_BYTES_LIMIT,
+    HARD_PENDING_COMPACTION_BYTES_LIMIT,
+    WRITE_STALLS_ENUM_MAX,
+    BYTES_FLUSHED,
+    BYTES_INGESTED_ADD_FILE,
+    INGESTED_NUM_FILES_TOTAL,
+    INGESTED_LEVEL0_NUM_FILES_TOTAL,
+    INGESTED_NUM_KEYS_TOTAL,
+    INTERNAL_CF_STATS_ENUM_MAX,
+  };
+
+  enum InternalDBStatsType {
+    WAL_FILE_BYTES,
+    WAL_FILE_SYNCED,
+    BYTES_WRITTEN,
+    NUMBER_KEYS_WRITTEN,
+    WRITE_DONE_BY_OTHER,
+    WRITE_DONE_BY_SELF,
+    WRITE_WITH_WAL,
+    WRITE_STALL_MICROS,
+    INTERNAL_DB_STATS_ENUM_MAX,
+  };
+
+  InternalStats(int num_levels, Env* env, ColumnFamilyData* cfd)
+      : db_stats_{},
+        cf_stats_value_{},
+        cf_stats_count_{},
+        comp_stats_(num_levels),
+        file_read_latency_(num_levels),
+        bg_error_count_(0),
+        number_levels_(num_levels),
+        env_(env),
+        cfd_(cfd),
+        started_at_(env->NowMicros()) {}
+
+  // Per level compaction stats.  comp_stats_[level] stores the stats for
+  // compactions that produced data for the specified "level".
+  struct CompactionStats {
+    uint64_t micros;
+
+    // The number of bytes read from all non-output levels
+    uint64_t bytes_read_non_output_levels;
+
+    // The number of bytes read from the compaction output level.
+    uint64_t bytes_read_output_level;
+
+    // Total number of bytes written during compaction
+    uint64_t bytes_written;
+
+    // Total number of bytes moved to the output level
+    uint64_t bytes_moved;
+
+    // The number of compaction input files in all non-output levels.
+    int num_input_files_in_non_output_levels;
+
+    // The number of compaction input files in the output level.
+    int num_input_files_in_output_level;
+
+    // The number of compaction output files.
+    int num_output_files;
+
+    // Total incoming entries during compaction between levels N and N+1
+    uint64_t num_input_records;
+
+    // Accumulated diff number of entries
+    // (num input entries - num output entires) for compaction  levels N and 
N+1
+    uint64_t num_dropped_records;
+
+    // Number of compactions done
+    int count;
+
+    explicit CompactionStats(int _count = 0)
+        : micros(0),
+          bytes_read_non_output_levels(0),
+          bytes_read_output_level(0),
+          bytes_written(0),
+          bytes_moved(0),
+          num_input_files_in_non_output_levels(0),
+          num_input_files_in_output_level(0),
+          num_output_files(0),
+          num_input_records(0),
+          num_dropped_records(0),
+          count(_count) {}
+
+    explicit CompactionStats(const CompactionStats& c)
+        : micros(c.micros),
+          bytes_read_non_output_levels(c.bytes_read_non_output_levels),
+          bytes_read_output_level(c.bytes_read_output_level),
+          bytes_written(c.bytes_written),
+          bytes_moved(c.bytes_moved),
+          num_input_files_in_non_output_levels(
+              c.num_input_files_in_non_output_levels),
+          num_input_files_in_output_level(
+              c.num_input_files_in_output_level),
+          num_output_files(c.num_output_files),
+          num_input_records(c.num_input_records),
+          num_dropped_records(c.num_dropped_records),
+          count(c.count) {}
+
+    void Clear() {
+      this->micros = 0;
+      this->bytes_read_non_output_levels = 0;
+      this->bytes_read_output_level = 0;
+      this->bytes_written = 0;
+      this->bytes_moved = 0;
+      this->num_input_files_in_non_output_levels = 0;
+      this->num_input_files_in_output_level = 0;
+      this->num_output_files = 0;
+      this->num_input_records = 0;
+      this->num_dropped_records = 0;
+      this->count = 0;
+    }
+
+    void Add(const CompactionStats& c) {
+      this->micros += c.micros;
+      this->bytes_read_non_output_levels += c.bytes_read_non_output_levels;
+      this->bytes_read_output_level += c.bytes_read_output_level;
+      this->bytes_written += c.bytes_written;
+      this->bytes_moved += c.bytes_moved;
+      this->num_input_files_in_non_output_levels +=
+          c.num_input_files_in_non_output_levels;
+      this->num_input_files_in_output_level +=
+          c.num_input_files_in_output_level;
+      this->num_output_files += c.num_output_files;
+      this->num_input_records += c.num_input_records;
+      this->num_dropped_records += c.num_dropped_records;
+      this->count += c.count;
+    }
+
+    void Subtract(const CompactionStats& c) {
+      this->micros -= c.micros;
+      this->bytes_read_non_output_levels -= c.bytes_read_non_output_levels;
+      this->bytes_read_output_level -= c.bytes_read_output_level;
+      this->bytes_written -= c.bytes_written;
+      this->bytes_moved -= c.bytes_moved;
+      this->num_input_files_in_non_output_levels -=
+          c.num_input_files_in_non_output_levels;
+      this->num_input_files_in_output_level -=
+          c.num_input_files_in_output_level;
+      this->num_output_files -= c.num_output_files;
+      this->num_input_records -= c.num_input_records;
+      this->num_dropped_records -= c.num_dropped_records;
+      this->count -= c.count;
+    }
+  };
+
+  void Clear() {
+    for (int i = 0; i < INTERNAL_DB_STATS_ENUM_MAX; i++) {
+      db_stats_[i].store(0);
+    }
+    for (int i = 0; i < INTERNAL_CF_STATS_ENUM_MAX; i++) {
+      cf_stats_count_[i] = 0;
+      cf_stats_value_[i] = 0;
+    }
+    for (auto& comp_stat : comp_stats_) {
+      comp_stat.Clear();
+    }
+    for (auto& h : file_read_latency_) {
+      h.Clear();
+    }
+    cf_stats_snapshot_.Clear();
+    db_stats_snapshot_.Clear();
+    bg_error_count_ = 0;
+    started_at_ = env_->NowMicros();
+  }
+
+  void AddCompactionStats(int level, const CompactionStats& stats) {
+    comp_stats_[level].Add(stats);
+  }
+
+  void IncBytesMoved(int level, uint64_t amount) {
+    comp_stats_[level].bytes_moved += amount;
+  }
+
+  void AddCFStats(InternalCFStatsType type, uint64_t value) {
+    cf_stats_value_[type] += value;
+    ++cf_stats_count_[type];
+  }
+
+  void AddDBStats(InternalDBStatsType type, uint64_t value,
+                  bool concurrent = false) {
+    auto& v = db_stats_[type];
+    if (concurrent) {
+      v.fetch_add(value, std::memory_order_relaxed);
+    } else {
+      v.store(v.load(std::memory_order_relaxed) + value,
+              std::memory_order_relaxed);
+    }
+  }
+
+  uint64_t GetDBStats(InternalDBStatsType type) {
+    return db_stats_[type].load(std::memory_order_relaxed);
+  }
+
+  HistogramImpl* GetFileReadHist(int level) {
+    return &file_read_latency_[level];
+  }
+
+  uint64_t GetBackgroundErrorCount() const { return bg_error_count_; }
+
+  uint64_t BumpAndGetBackgroundErrorCount() { return ++bg_error_count_; }
+
+  bool GetStringProperty(const DBPropertyInfo& property_info,
+                         const Slice& property, std::string* value);
+
+  bool GetMapProperty(const DBPropertyInfo& property_info,
+                      const Slice& property,
+                      std::map<std::string, double>* value);
+
+  bool GetIntProperty(const DBPropertyInfo& property_info, uint64_t* value,
+                      DBImpl* db);
+
+  bool GetIntPropertyOutOfMutex(const DBPropertyInfo& property_info,
+                                Version* version, uint64_t* value);
+
+  // Store a mapping from the user-facing DB::Properties string to our
+  // DBPropertyInfo struct used internally for retrieving properties.
+  static const std::unordered_map<std::string, DBPropertyInfo> 
ppt_name_to_info;
+
+ private:
+  void DumpDBStats(std::string* value);
+  void DumpCFMapStats(std::map<std::string, double>* cf_stats);
+  void DumpCFMapStats(
+      std::map<int, std::map<LevelStatType, double>>* level_stats,
+      CompactionStats* compaction_stats_sum);
+  void DumpCFStats(std::string* value);
+  void DumpCFStatsNoFileHistogram(std::string* value);
+  void DumpCFFileHistogram(std::string* value);
+
+  // Per-DB stats
+  std::atomic<uint64_t> db_stats_[INTERNAL_DB_STATS_ENUM_MAX];
+  // Per-ColumnFamily stats
+  uint64_t cf_stats_value_[INTERNAL_CF_STATS_ENUM_MAX];
+  uint64_t cf_stats_count_[INTERNAL_CF_STATS_ENUM_MAX];
+  // Per-ColumnFamily/level compaction stats
+  std::vector<CompactionStats> comp_stats_;
+  std::vector<HistogramImpl> file_read_latency_;
+
+  // Used to compute per-interval statistics
+  struct CFStatsSnapshot {
+    // ColumnFamily-level stats
+    CompactionStats comp_stats;
+    uint64_t ingest_bytes_flush;      // Bytes written to L0 (Flush)
+    uint64_t stall_count;             // Stall count
+    // Stats from compaction jobs - bytes written, bytes read, duration.
+    uint64_t compact_bytes_write;
+    uint64_t compact_bytes_read;
+    uint64_t compact_micros;
+    double seconds_up;
+
+    // AddFile specific stats
+    uint64_t ingest_bytes_addfile;     // Total Bytes ingested
+    uint64_t ingest_files_addfile;     // Total number of files ingested
+    uint64_t ingest_l0_files_addfile;  // Total number of files ingested to L0
+    uint64_t ingest_keys_addfile;      // Total number of keys ingested
+
+    CFStatsSnapshot()
+        : comp_stats(0),
+          ingest_bytes_flush(0),
+          stall_count(0),
+          compact_bytes_write(0),
+          compact_bytes_read(0),
+          compact_micros(0),
+          seconds_up(0),
+          ingest_bytes_addfile(0),
+          ingest_files_addfile(0),
+          ingest_l0_files_addfile(0),
+          ingest_keys_addfile(0) {}
+
+    void Clear() {
+      comp_stats.Clear();
+      ingest_bytes_flush = 0;
+      stall_count = 0;
+      compact_bytes_write = 0;
+      compact_bytes_read = 0;
+      compact_micros = 0;
+      seconds_up = 0;
+      ingest_bytes_addfile = 0;
+      ingest_files_addfile = 0;
+      ingest_l0_files_addfile = 0;
+      ingest_keys_addfile = 0;
+    }
+  } cf_stats_snapshot_;
+
+  struct DBStatsSnapshot {
+    // DB-level stats
+    uint64_t ingest_bytes;            // Bytes written by user
+    uint64_t wal_bytes;               // Bytes written to WAL
+    uint64_t wal_synced;              // Number of times WAL is synced
+    uint64_t write_with_wal;          // Number of writes that request WAL
+    // These count the number of writes processed by the calling thread or
+    // another thread.
+    uint64_t write_other;
+    uint64_t write_self;
+    // Total number of keys written. write_self and write_other measure number
+    // of write requests written, Each of the write request can contain updates
+    // to multiple keys. num_keys_written is total number of keys updated by 
all
+    // those writes.
+    uint64_t num_keys_written;
+    // Total time writes delayed by stalls.
+    uint64_t write_stall_micros;
+    double seconds_up;
+
+    DBStatsSnapshot()
+        : ingest_bytes(0),
+          wal_bytes(0),
+          wal_synced(0),
+          write_with_wal(0),
+          write_other(0),
+          write_self(0),
+          num_keys_written(0),
+          write_stall_micros(0),
+          seconds_up(0) {}
+
+    void Clear() {
+      ingest_bytes = 0;
+      wal_bytes = 0;
+      wal_synced = 0;
+      write_with_wal = 0;
+      write_other = 0;
+      write_self = 0;
+      num_keys_written = 0;
+      write_stall_micros = 0;
+      seconds_up = 0;
+    }
+  } db_stats_snapshot_;
+
+  // Handler functions for getting property values. They use "value" as a 
value-
+  // result argument, and return true upon successfully setting "value".
+  bool HandleNumFilesAtLevel(std::string* value, Slice suffix);
+  bool HandleCompressionRatioAtLevelPrefix(std::string* value, Slice suffix);
+  bool HandleLevelStats(std::string* value, Slice suffix);
+  bool HandleStats(std::string* value, Slice suffix);
+  bool HandleCFMapStats(std::map<std::string, double>* compaction_stats);
+  bool HandleCFStats(std::string* value, Slice suffix);
+  bool HandleCFStatsNoFileHistogram(std::string* value, Slice suffix);
+  bool HandleCFFileHistogram(std::string* value, Slice suffix);
+  bool HandleDBStats(std::string* value, Slice suffix);
+  bool HandleSsTables(std::string* value, Slice suffix);
+  bool HandleAggregatedTableProperties(std::string* value, Slice suffix);
+  bool HandleAggregatedTablePropertiesAtLevel(std::string* value, Slice 
suffix);
+  bool HandleNumImmutableMemTable(uint64_t* value, DBImpl* db,
+                                  Version* version);
+  bool HandleNumImmutableMemTableFlushed(uint64_t* value, DBImpl* db,
+                                         Version* version);
+  bool HandleMemTableFlushPending(uint64_t* value, DBImpl* db,
+                                  Version* version);
+  bool HandleNumRunningFlushes(uint64_t* value, DBImpl* db, Version* version);
+  bool HandleCompactionPending(uint64_t* value, DBImpl* db, Version* version);
+  bool HandleNumRunningCompactions(uint64_t* value, DBImpl* db,
+                                   Version* version);
+  bool HandleBackgroundErrors(uint64_t* value, DBImpl* db, Version* version);
+  bool HandleCurSizeActiveMemTable(uint64_t* value, DBImpl* db,
+                                   Version* version);
+  bool HandleCurSizeAllMemTables(uint64_t* value, DBImpl* db, Version* 
version);
+  bool HandleSizeAllMemTables(uint64_t* value, DBImpl* db, Version* version);
+  bool HandleNumEntriesActiveMemTable(uint64_t* value, DBImpl* db,
+                                      Version* version);
+  bool HandleNumEntriesImmMemTables(uint64_t* value, DBImpl* db,
+                                    Version* version);
+  bool HandleNumDeletesActiveMemTable(uint64_t* value, DBImpl* db,
+                                      Version* version);
+  bool HandleNumDeletesImmMemTables(uint64_t* value, DBImpl* db,
+                                    Version* version);
+  bool HandleEstimateNumKeys(uint64_t* value, DBImpl* db, Version* version);
+  bool HandleNumSnapshots(uint64_t* value, DBImpl* db, Version* version);
+  bool HandleOldestSnapshotTime(uint64_t* value, DBImpl* db, Version* version);
+  bool HandleNumLiveVersions(uint64_t* value, DBImpl* db, Version* version);
+  bool HandleCurrentSuperVersionNumber(uint64_t* value, DBImpl* db,
+                                       Version* version);
+  bool HandleIsFileDeletionsEnabled(uint64_t* value, DBImpl* db,
+                                    Version* version);
+  bool HandleBaseLevel(uint64_t* value, DBImpl* db, Version* version);
+  bool HandleTotalSstFilesSize(uint64_t* value, DBImpl* db, Version* version);
+  bool HandleEstimatePendingCompactionBytes(uint64_t* value, DBImpl* db,
+                                            Version* version);
+  bool HandleEstimateTableReadersMem(uint64_t* value, DBImpl* db,
+                                     Version* version);
+  bool HandleEstimateLiveDataSize(uint64_t* value, DBImpl* db,
+                                  Version* version);
+  bool HandleMinLogNumberToKeep(uint64_t* value, DBImpl* db, Version* version);
+  bool HandleActualDelayedWriteRate(uint64_t* value, DBImpl* db,
+                                    Version* version);
+  bool HandleIsWriteStopped(uint64_t* value, DBImpl* db, Version* version);
+
+  // Total number of background errors encountered. Every time a flush task
+  // or compaction task fails, this counter is incremented. The failure can
+  // be caused by any possible reason, including file system errors, out of
+  // resources, or input file corruption. Failing when retrying the same flush
+  // or compaction will cause the counter to increase too.
+  uint64_t bg_error_count_;
+
+  const int number_levels_;
+  Env* env_;
+  ColumnFamilyData* cfd_;
+  uint64_t started_at_;
+};
+
+#else
+
+class InternalStats {
+ public:
+  enum InternalCFStatsType {
+    LEVEL0_SLOWDOWN_TOTAL,
+    LEVEL0_SLOWDOWN_WITH_COMPACTION,
+    MEMTABLE_COMPACTION,
+    MEMTABLE_SLOWDOWN,
+    LEVEL0_NUM_FILES_TOTAL,
+    LEVEL0_NUM_FILES_WITH_COMPACTION,
+    SOFT_PENDING_COMPACTION_BYTES_LIMIT,
+    HARD_PENDING_COMPACTION_BYTES_LIMIT,
+    WRITE_STALLS_ENUM_MAX,
+    BYTES_FLUSHED,
+    BYTES_INGESTED_ADD_FILE,
+    INGESTED_NUM_FILES_TOTAL,
+    INGESTED_LEVEL0_NUM_FILES_TOTAL,
+    INGESTED_NUM_KEYS_TOTAL,
+    INTERNAL_CF_STATS_ENUM_MAX,
+  };
+
+  enum InternalDBStatsType {
+    WAL_FILE_BYTES,
+    WAL_FILE_SYNCED,
+    BYTES_WRITTEN,
+    NUMBER_KEYS_WRITTEN,
+    WRITE_DONE_BY_OTHER,
+    WRITE_DONE_BY_SELF,
+    WRITE_WITH_WAL,
+    WRITE_STALL_MICROS,
+    INTERNAL_DB_STATS_ENUM_MAX,
+  };
+
+  InternalStats(int num_levels, Env* env, ColumnFamilyData* cfd) {}
+
+  struct CompactionStats {
+    uint64_t micros;
+    uint64_t bytes_read_non_output_levels;
+    uint64_t bytes_read_output_level;
+    uint64_t bytes_written;
+    uint64_t bytes_moved;
+    int num_input_files_in_non_output_levels;
+    int num_input_files_in_output_level;
+    int num_output_files;
+    uint64_t num_input_records;
+    uint64_t num_dropped_records;
+    int count;
+
+    explicit CompactionStats(int _count = 0) {}
+
+    explicit CompactionStats(const CompactionStats& c) {}
+
+    void Add(const CompactionStats& c) {}
+
+    void Subtract(const CompactionStats& c) {}
+  };
+
+  void AddCompactionStats(int level, const CompactionStats& stats) {}
+
+  void IncBytesMoved(int level, uint64_t amount) {}
+
+  void AddCFStats(InternalCFStatsType type, uint64_t value) {}
+
+  void AddDBStats(InternalDBStatsType type, uint64_t value,
+                  bool concurrent = false) {}
+
+  HistogramImpl* GetFileReadHist(int level) { return nullptr; }
+
+  uint64_t GetBackgroundErrorCount() const { return 0; }
+
+  uint64_t BumpAndGetBackgroundErrorCount() { return 0; }
+
+  bool GetStringProperty(const DBPropertyInfo& property_info,
+                         const Slice& property, std::string* value) {
+    return false;
+  }
+
+  bool GetMapProperty(const DBPropertyInfo& property_info,
+                      const Slice& property,
+                      std::map<std::string, double>* value) {
+    return false;
+  }
+
+  bool GetIntProperty(const DBPropertyInfo& property_info, uint64_t* value,
+                      DBImpl* db) const {
+    return false;
+  }
+
+  bool GetIntPropertyOutOfMutex(const DBPropertyInfo& property_info,
+                                Version* version, uint64_t* value) const {
+    return false;
+  }
+};
+#endif  // !ROCKSDB_LITE
+
+}  // namespace rocksdb

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/48867732/thirdparty/rocksdb/db/job_context.h
----------------------------------------------------------------------
diff --git a/thirdparty/rocksdb/db/job_context.h 
b/thirdparty/rocksdb/db/job_context.h
new file mode 100644
index 0000000..950a3a6
--- /dev/null
+++ b/thirdparty/rocksdb/db/job_context.h
@@ -0,0 +1,129 @@
+//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
+//  This source code is licensed under both the GPLv2 (found in the
+//  COPYING file in the root directory) and Apache 2.0 License
+//  (found in the LICENSE.Apache file in the root directory).
+//
+// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#pragma once
+
+#include <string>
+#include <vector>
+
+#include "db/log_writer.h"
+
+namespace rocksdb {
+
+class MemTable;
+
+struct JobContext {
+  inline bool HaveSomethingToDelete() const {
+    return full_scan_candidate_files.size() || sst_delete_files.size() ||
+           log_delete_files.size() || manifest_delete_files.size() ||
+           new_superversion != nullptr || superversions_to_free.size() > 0 ||
+           memtables_to_free.size() > 0 || logs_to_free.size() > 0;
+  }
+
+  // Structure to store information for candidate files to delete.
+  struct CandidateFileInfo {
+    std::string file_name;
+    uint32_t path_id;
+    CandidateFileInfo(std::string name, uint32_t path)
+        : file_name(std::move(name)), path_id(path) {}
+    bool operator==(const CandidateFileInfo& other) const {
+      return file_name == other.file_name && path_id == other.path_id;
+    }
+  };
+
+  // Unique job id
+  int job_id;
+
+  // a list of all files that we'll consider deleting
+  // (every once in a while this is filled up with all files
+  // in the DB directory)
+  // (filled only if we're doing full scan)
+  std::vector<CandidateFileInfo> full_scan_candidate_files;
+
+  // the list of all live sst files that cannot be deleted
+  std::vector<FileDescriptor> sst_live;
+
+  // a list of sst files that we need to delete
+  std::vector<FileMetaData*> sst_delete_files;
+
+  // a list of log files that we need to delete
+  std::vector<uint64_t> log_delete_files;
+
+  // a list of log files that we need to preserve during full purge since they
+  // will be reused later
+  std::vector<uint64_t> log_recycle_files;
+
+  // a list of manifest files that we need to delete
+  std::vector<std::string> manifest_delete_files;
+
+  // a list of memtables to be free
+  autovector<MemTable*> memtables_to_free;
+
+  autovector<SuperVersion*> superversions_to_free;
+
+  autovector<log::Writer*> logs_to_free;
+
+  SuperVersion* new_superversion;  // if nullptr no new superversion
+
+  // the current manifest_file_number, log_number and prev_log_number
+  // that corresponds to the set of files in 'live'.
+  uint64_t manifest_file_number;
+  uint64_t pending_manifest_file_number;
+  uint64_t log_number;
+  uint64_t prev_log_number;
+
+  uint64_t min_pending_output = 0;
+  uint64_t prev_total_log_size = 0;
+  size_t num_alive_log_files = 0;
+  uint64_t size_log_to_delete = 0;
+
+  explicit JobContext(int _job_id, bool create_superversion = false) {
+    job_id = _job_id;
+    manifest_file_number = 0;
+    pending_manifest_file_number = 0;
+    log_number = 0;
+    prev_log_number = 0;
+    new_superversion = create_superversion ? new SuperVersion() : nullptr;
+  }
+
+  // For non-empty JobContext Clean() has to be called at least once before
+  // before destruction (see asserts in ~JobContext()). Should be called with
+  // unlocked DB mutex. Destructor doesn't call Clean() to avoid accidentally
+  // doing potentially slow Clean() with locked DB mutex.
+  void Clean() {
+    // free pending memtables
+    for (auto m : memtables_to_free) {
+      delete m;
+    }
+    // free superversions
+    for (auto s : superversions_to_free) {
+      delete s;
+    }
+    for (auto l : logs_to_free) {
+      delete l;
+    }
+    // if new_superversion was not used, it will be non-nullptr and needs
+    // to be freed here
+    delete new_superversion;
+
+    memtables_to_free.clear();
+    superversions_to_free.clear();
+    logs_to_free.clear();
+    new_superversion = nullptr;
+  }
+
+  ~JobContext() {
+    assert(memtables_to_free.size() == 0);
+    assert(superversions_to_free.size() == 0);
+    assert(new_superversion == nullptr);
+    assert(logs_to_free.size() == 0);
+  }
+};
+
+}  // namespace rocksdb

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/48867732/thirdparty/rocksdb/db/log_format.h
----------------------------------------------------------------------
diff --git a/thirdparty/rocksdb/db/log_format.h 
b/thirdparty/rocksdb/db/log_format.h
new file mode 100644
index 0000000..be22201
--- /dev/null
+++ b/thirdparty/rocksdb/db/log_format.h
@@ -0,0 +1,45 @@
+//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
+//  This source code is licensed under both the GPLv2 (found in the
+//  COPYING file in the root directory) and Apache 2.0 License
+//  (found in the LICENSE.Apache file in the root directory).
+//
+// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+//
+// Log format information shared by reader and writer.
+// See ../doc/log_format.txt for more detail.
+
+#pragma once
+namespace rocksdb {
+namespace log {
+
+enum RecordType {
+  // Zero is reserved for preallocated files
+  kZeroType = 0,
+  kFullType = 1,
+
+  // For fragments
+  kFirstType = 2,
+  kMiddleType = 3,
+  kLastType = 4,
+
+  // For recycled log files
+  kRecyclableFullType = 5,
+  kRecyclableFirstType = 6,
+  kRecyclableMiddleType = 7,
+  kRecyclableLastType = 8,
+};
+static const int kMaxRecordType = kRecyclableLastType;
+
+static const unsigned int kBlockSize = 32768;
+
+// Header is checksum (4 bytes), length (2 bytes), type (1 byte)
+static const int kHeaderSize = 4 + 2 + 1;
+
+// Recyclable header is checksum (4 bytes), type (1 byte), log number
+// (4 bytes), length (2 bytes).
+static const int kRecyclableHeaderSize = 4 + 1 + 4 + 2;
+
+}  // namespace log
+}  // namespace rocksdb

Reply via email to