http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/d3a13a49/thirdparty/rocksdb/util/arena_test.cc
----------------------------------------------------------------------
diff --git a/thirdparty/rocksdb/util/arena_test.cc 
b/thirdparty/rocksdb/util/arena_test.cc
deleted file mode 100644
index 53777a2..0000000
--- a/thirdparty/rocksdb/util/arena_test.cc
+++ /dev/null
@@ -1,203 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "util/arena.h"
-#include "util/random.h"
-#include "util/testharness.h"
-
-namespace rocksdb {
-
-namespace {
-const size_t kHugePageSize = 2 * 1024 * 1024;
-}  // namespace
-class ArenaTest : public testing::Test {};
-
-TEST_F(ArenaTest, Empty) { Arena arena0; }
-
-namespace {
-bool CheckMemoryAllocated(size_t allocated, size_t expected) {
-  // The value returned by Arena::MemoryAllocatedBytes() may be greater than
-  // the requested memory. We choose a somewhat arbitrary upper bound of
-  // max_expected = expected * 1.1 to detect critical overallocation.
-  size_t max_expected = expected + expected / 10;
-  return allocated >= expected && allocated <= max_expected;
-}
-
-void MemoryAllocatedBytesTest(size_t huge_page_size) {
-  const int N = 17;
-  size_t req_sz;  // requested size
-  size_t bsz = 32 * 1024;  // block size
-  size_t expected_memory_allocated;
-
-  Arena arena(bsz, nullptr, huge_page_size);
-
-  // requested size > quarter of a block:
-  //   allocate requested size separately
-  req_sz = 12 * 1024;
-  for (int i = 0; i < N; i++) {
-    arena.Allocate(req_sz);
-  }
-  expected_memory_allocated = req_sz * N + Arena::kInlineSize;
-  ASSERT_PRED2(CheckMemoryAllocated, arena.MemoryAllocatedBytes(),
-               expected_memory_allocated);
-
-  arena.Allocate(Arena::kInlineSize - 1);
-
-  // requested size < quarter of a block:
-  //   allocate a block with the default size, then try to use unused part
-  //   of the block. So one new block will be allocated for the first
-  //   Allocate(99) call. All the remaining calls won't lead to new allocation.
-  req_sz = 99;
-  for (int i = 0; i < N; i++) {
-    arena.Allocate(req_sz);
-  }
-  if (huge_page_size) {
-    ASSERT_TRUE(
-        CheckMemoryAllocated(arena.MemoryAllocatedBytes(),
-                             expected_memory_allocated + bsz) ||
-        CheckMemoryAllocated(arena.MemoryAllocatedBytes(),
-                             expected_memory_allocated + huge_page_size));
-  } else {
-    expected_memory_allocated += bsz;
-    ASSERT_PRED2(CheckMemoryAllocated, arena.MemoryAllocatedBytes(),
-                 expected_memory_allocated);
-  }
-
-  // requested size > size of a block:
-  //   allocate requested size separately
-  expected_memory_allocated = arena.MemoryAllocatedBytes();
-  req_sz = 8 * 1024 * 1024;
-  for (int i = 0; i < N; i++) {
-    arena.Allocate(req_sz);
-  }
-  expected_memory_allocated += req_sz * N;
-  ASSERT_PRED2(CheckMemoryAllocated, arena.MemoryAllocatedBytes(),
-               expected_memory_allocated);
-}
-
-// Make sure we didn't count the allocate but not used memory space in
-// Arena::ApproximateMemoryUsage()
-static void ApproximateMemoryUsageTest(size_t huge_page_size) {
-  const size_t kBlockSize = 4096;
-  const size_t kEntrySize = kBlockSize / 8;
-  const size_t kZero = 0;
-  Arena arena(kBlockSize, nullptr, huge_page_size);
-  ASSERT_EQ(kZero, arena.ApproximateMemoryUsage());
-
-  // allocate inline bytes
-  EXPECT_TRUE(arena.IsInInlineBlock());
-  arena.AllocateAligned(8);
-  EXPECT_TRUE(arena.IsInInlineBlock());
-  arena.AllocateAligned(Arena::kInlineSize / 2 - 16);
-  EXPECT_TRUE(arena.IsInInlineBlock());
-  arena.AllocateAligned(Arena::kInlineSize / 2);
-  EXPECT_TRUE(arena.IsInInlineBlock());
-  ASSERT_EQ(arena.ApproximateMemoryUsage(), Arena::kInlineSize - 8);
-  ASSERT_PRED2(CheckMemoryAllocated, arena.MemoryAllocatedBytes(),
-               Arena::kInlineSize);
-
-  auto num_blocks = kBlockSize / kEntrySize;
-
-  // first allocation
-  arena.AllocateAligned(kEntrySize);
-  EXPECT_FALSE(arena.IsInInlineBlock());
-  auto mem_usage = arena.MemoryAllocatedBytes();
-  if (huge_page_size) {
-    ASSERT_TRUE(
-        CheckMemoryAllocated(mem_usage, kBlockSize + Arena::kInlineSize) ||
-        CheckMemoryAllocated(mem_usage, huge_page_size + Arena::kInlineSize));
-  } else {
-    ASSERT_PRED2(CheckMemoryAllocated, mem_usage,
-                 kBlockSize + Arena::kInlineSize);
-  }
-  auto usage = arena.ApproximateMemoryUsage();
-  ASSERT_LT(usage, mem_usage);
-  for (size_t i = 1; i < num_blocks; ++i) {
-    arena.AllocateAligned(kEntrySize);
-    ASSERT_EQ(mem_usage, arena.MemoryAllocatedBytes());
-    ASSERT_EQ(arena.ApproximateMemoryUsage(), usage + kEntrySize);
-    EXPECT_FALSE(arena.IsInInlineBlock());
-    usage = arena.ApproximateMemoryUsage();
-  }
-  if (huge_page_size) {
-    ASSERT_TRUE(usage > mem_usage ||
-                usage + huge_page_size - kBlockSize == mem_usage);
-  } else {
-    ASSERT_GT(usage, mem_usage);
-  }
-}
-
-static void SimpleTest(size_t huge_page_size) {
-  std::vector<std::pair<size_t, char*>> allocated;
-  Arena arena(Arena::kMinBlockSize, nullptr, huge_page_size);
-  const int N = 100000;
-  size_t bytes = 0;
-  Random rnd(301);
-  for (int i = 0; i < N; i++) {
-    size_t s;
-    if (i % (N / 10) == 0) {
-      s = i;
-    } else {
-      s = rnd.OneIn(4000)
-              ? rnd.Uniform(6000)
-              : (rnd.OneIn(10) ? rnd.Uniform(100) : rnd.Uniform(20));
-    }
-    if (s == 0) {
-      // Our arena disallows size 0 allocations.
-      s = 1;
-    }
-    char* r;
-    if (rnd.OneIn(10)) {
-      r = arena.AllocateAligned(s);
-    } else {
-      r = arena.Allocate(s);
-    }
-
-    for (unsigned int b = 0; b < s; b++) {
-      // Fill the "i"th allocation with a known bit pattern
-      r[b] = i % 256;
-    }
-    bytes += s;
-    allocated.push_back(std::make_pair(s, r));
-    ASSERT_GE(arena.ApproximateMemoryUsage(), bytes);
-    if (i > N / 10) {
-      ASSERT_LE(arena.ApproximateMemoryUsage(), bytes * 1.10);
-    }
-  }
-  for (unsigned int i = 0; i < allocated.size(); i++) {
-    size_t num_bytes = allocated[i].first;
-    const char* p = allocated[i].second;
-    for (unsigned int b = 0; b < num_bytes; b++) {
-      // Check the "i"th allocation for the known bit pattern
-      ASSERT_EQ(int(p[b]) & 0xff, (int)(i % 256));
-    }
-  }
-}
-}  // namespace
-
-TEST_F(ArenaTest, MemoryAllocatedBytes) {
-  MemoryAllocatedBytesTest(0);
-  MemoryAllocatedBytesTest(kHugePageSize);
-}
-
-TEST_F(ArenaTest, ApproximateMemoryUsage) {
-  ApproximateMemoryUsageTest(0);
-  ApproximateMemoryUsageTest(kHugePageSize);
-}
-
-TEST_F(ArenaTest, Simple) {
-  SimpleTest(0);
-  SimpleTest(kHugePageSize);
-}
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/d3a13a49/thirdparty/rocksdb/util/auto_roll_logger_test.cc
----------------------------------------------------------------------
diff --git a/thirdparty/rocksdb/util/auto_roll_logger_test.cc 
b/thirdparty/rocksdb/util/auto_roll_logger_test.cc
deleted file mode 100644
index 9b39748..0000000
--- a/thirdparty/rocksdb/util/auto_roll_logger_test.cc
+++ /dev/null
@@ -1,490 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-
-#ifndef ROCKSDB_LITE
-
-#include "util/auto_roll_logger.h"
-#include <errno.h>
-#include <sys/stat.h>
-#include <algorithm>
-#include <cmath>
-#include <fstream>
-#include <iostream>
-#include <iterator>
-#include <string>
-#include <thread>
-#include <vector>
-#include "port/port.h"
-#include "rocksdb/db.h"
-#include "util/logging.h"
-#include "util/sync_point.h"
-#include "util/testharness.h"
-
-namespace rocksdb {
-namespace {
-class NoSleepEnv : public EnvWrapper {
- public:
-  NoSleepEnv(Env* base) : EnvWrapper(base) {}
-  virtual void SleepForMicroseconds(int micros) override {
-    fake_time_ += static_cast<uint64_t>(micros);
-  }
-
-  virtual uint64_t NowNanos() override { return fake_time_ * 1000; }
-
-  virtual uint64_t NowMicros() override { return fake_time_; }
-
- private:
-  uint64_t fake_time_ = 6666666666;
-};
-}  // namespace
-
-class AutoRollLoggerTest : public testing::Test {
- public:
-  static void InitTestDb() {
-#ifdef OS_WIN
-    // Replace all slashes in the path so windows CompSpec does not
-    // become confused
-    std::string testDir(kTestDir);
-    std::replace_if(testDir.begin(), testDir.end(),
-                    [](char ch) { return ch == '/'; }, '\\');
-    std::string deleteCmd = "if exist " + testDir + " rd /s /q " + testDir;
-#else
-    std::string deleteCmd = "rm -rf " + kTestDir;
-#endif
-    ASSERT_TRUE(system(deleteCmd.c_str()) == 0);
-    Env::Default()->CreateDir(kTestDir);
-  }
-
-  void RollLogFileBySizeTest(AutoRollLogger* logger, size_t log_max_size,
-                             const std::string& log_message);
-  void RollLogFileByTimeTest(Env*, AutoRollLogger* logger, size_t time,
-                             const std::string& log_message);
-
-  static const std::string kSampleMessage;
-  static const std::string kTestDir;
-  static const std::string kLogFile;
-  static Env* default_env;
-};
-
-const std::string AutoRollLoggerTest::kSampleMessage(
-    "this is the message to be written to the log file!!");
-const std::string AutoRollLoggerTest::kTestDir(test::TmpDir() + 
"/db_log_test");
-const std::string AutoRollLoggerTest::kLogFile(test::TmpDir() +
-                                               "/db_log_test/LOG");
-Env* AutoRollLoggerTest::default_env = Env::Default();
-
-// In this test we only want to Log some simple log message with
-// no format. LogMessage() provides such a simple interface and
-// avoids the [format-security] warning which occurs when you
-// call ROCKS_LOG_INFO(logger, log_message) directly.
-namespace {
-void LogMessage(Logger* logger, const char* message) {
-  ROCKS_LOG_INFO(logger, "%s", message);
-}
-
-void LogMessage(const InfoLogLevel log_level, Logger* logger,
-                const char* message) {
-  Log(log_level, logger, "%s", message);
-}
-}  // namespace
-
-void AutoRollLoggerTest::RollLogFileBySizeTest(AutoRollLogger* logger,
-                                               size_t log_max_size,
-                                               const std::string& log_message) 
{
-  logger->SetInfoLogLevel(InfoLogLevel::INFO_LEVEL);
-  // measure the size of each message, which is supposed
-  // to be equal or greater than log_message.size()
-  LogMessage(logger, log_message.c_str());
-  size_t message_size = logger->GetLogFileSize();
-  size_t current_log_size = message_size;
-
-  // Test the cases when the log file will not be rolled.
-  while (current_log_size + message_size < log_max_size) {
-    LogMessage(logger, log_message.c_str());
-    current_log_size += message_size;
-    ASSERT_EQ(current_log_size, logger->GetLogFileSize());
-  }
-
-  // Now the log file will be rolled
-  LogMessage(logger, log_message.c_str());
-  // Since rotation is checked before actual logging, we need to
-  // trigger the rotation by logging another message.
-  LogMessage(logger, log_message.c_str());
-
-  ASSERT_TRUE(message_size == logger->GetLogFileSize());
-}
-
-void AutoRollLoggerTest::RollLogFileByTimeTest(Env* env, AutoRollLogger* 
logger,
-                                               size_t time,
-                                               const std::string& log_message) 
{
-  uint64_t expected_ctime;
-  uint64_t actual_ctime;
-
-  uint64_t total_log_size;
-  EXPECT_OK(env->GetFileSize(kLogFile, &total_log_size));
-  expected_ctime = logger->TEST_ctime();
-  logger->SetCallNowMicrosEveryNRecords(0);
-
-  // -- Write to the log for several times, which is supposed
-  // to be finished before time.
-  for (int i = 0; i < 10; ++i) {
-    env->SleepForMicroseconds(50000);
-    LogMessage(logger, log_message.c_str());
-    EXPECT_OK(logger->GetStatus());
-    // Make sure we always write to the same log file (by
-    // checking the create time);
-
-    actual_ctime = logger->TEST_ctime();
-
-    // Also make sure the log size is increasing.
-    EXPECT_EQ(expected_ctime, actual_ctime);
-    EXPECT_GT(logger->GetLogFileSize(), total_log_size);
-    total_log_size = logger->GetLogFileSize();
-  }
-
-  // -- Make the log file expire
-  env->SleepForMicroseconds(static_cast<int>(time * 1000000));
-  LogMessage(logger, log_message.c_str());
-
-  // At this time, the new log file should be created.
-  actual_ctime = logger->TEST_ctime();
-  EXPECT_LT(expected_ctime, actual_ctime);
-  EXPECT_LT(logger->GetLogFileSize(), total_log_size);
-}
-
-TEST_F(AutoRollLoggerTest, RollLogFileBySize) {
-    InitTestDb();
-    size_t log_max_size = 1024 * 5;
-
-    AutoRollLogger logger(Env::Default(), kTestDir, "", log_max_size, 0);
-
-    RollLogFileBySizeTest(&logger, log_max_size,
-                          kSampleMessage + ":RollLogFileBySize");
-}
-
-TEST_F(AutoRollLoggerTest, RollLogFileByTime) {
-  NoSleepEnv nse(Env::Default());
-
-  size_t time = 2;
-  size_t log_size = 1024 * 5;
-
-  InitTestDb();
-  // -- Test the existence of file during the server restart.
-  ASSERT_EQ(Status::NotFound(), default_env->FileExists(kLogFile));
-  AutoRollLogger logger(&nse, kTestDir, "", log_size, time);
-  ASSERT_OK(default_env->FileExists(kLogFile));
-
-  RollLogFileByTimeTest(&nse, &logger, time,
-                        kSampleMessage + ":RollLogFileByTime");
-}
-
-TEST_F(AutoRollLoggerTest, OpenLogFilesMultipleTimesWithOptionLog_max_size) {
-  // If only 'log_max_size' options is specified, then every time
-  // when rocksdb is restarted, a new empty log file will be created.
-  InitTestDb();
-  // WORKAROUND:
-  // avoid complier's complaint of "comparison between signed
-  // and unsigned integer expressions" because literal 0 is
-  // treated as "singed".
-  size_t kZero = 0;
-  size_t log_size = 1024;
-
-  AutoRollLogger* logger = new AutoRollLogger(
-    Env::Default(), kTestDir, "", log_size, 0);
-
-  LogMessage(logger, kSampleMessage.c_str());
-  ASSERT_GT(logger->GetLogFileSize(), kZero);
-  delete logger;
-
-  // reopens the log file and an empty log file will be created.
-  logger = new AutoRollLogger(
-    Env::Default(), kTestDir, "", log_size, 0);
-  ASSERT_EQ(logger->GetLogFileSize(), kZero);
-  delete logger;
-}
-
-TEST_F(AutoRollLoggerTest, CompositeRollByTimeAndSizeLogger) {
-  size_t time = 2, log_max_size = 1024 * 5;
-
-  InitTestDb();
-
-  NoSleepEnv nse(Env::Default());
-  AutoRollLogger logger(&nse, kTestDir, "", log_max_size, time);
-
-  // Test the ability to roll by size
-  RollLogFileBySizeTest(&logger, log_max_size,
-                        kSampleMessage + ":CompositeRollByTimeAndSizeLogger");
-
-  // Test the ability to roll by Time
-  RollLogFileByTimeTest(&nse, &logger, time,
-                        kSampleMessage + ":CompositeRollByTimeAndSizeLogger");
-}
-
-#ifndef OS_WIN
-// TODO: does not build for Windows because of PosixLogger use below. Need to
-// port
-TEST_F(AutoRollLoggerTest, CreateLoggerFromOptions) {
-  DBOptions options;
-  NoSleepEnv nse(Env::Default());
-  shared_ptr<Logger> logger;
-
-  // Normal logger
-  ASSERT_OK(CreateLoggerFromOptions(kTestDir, options, &logger));
-  ASSERT_TRUE(dynamic_cast<PosixLogger*>(logger.get()));
-
-  // Only roll by size
-  InitTestDb();
-  options.max_log_file_size = 1024;
-  ASSERT_OK(CreateLoggerFromOptions(kTestDir, options, &logger));
-  AutoRollLogger* auto_roll_logger =
-    dynamic_cast<AutoRollLogger*>(logger.get());
-  ASSERT_TRUE(auto_roll_logger);
-  RollLogFileBySizeTest(
-      auto_roll_logger, options.max_log_file_size,
-      kSampleMessage + ":CreateLoggerFromOptions - size");
-
-  // Only roll by Time
-  options.env = &nse;
-  InitTestDb();
-  options.max_log_file_size = 0;
-  options.log_file_time_to_roll = 2;
-  ASSERT_OK(CreateLoggerFromOptions(kTestDir, options, &logger));
-  auto_roll_logger =
-    dynamic_cast<AutoRollLogger*>(logger.get());
-  RollLogFileByTimeTest(&nse, auto_roll_logger, options.log_file_time_to_roll,
-                        kSampleMessage + ":CreateLoggerFromOptions - time");
-
-  // roll by both Time and size
-  InitTestDb();
-  options.max_log_file_size = 1024 * 5;
-  options.log_file_time_to_roll = 2;
-  ASSERT_OK(CreateLoggerFromOptions(kTestDir, options, &logger));
-  auto_roll_logger =
-    dynamic_cast<AutoRollLogger*>(logger.get());
-  RollLogFileBySizeTest(auto_roll_logger, options.max_log_file_size,
-                        kSampleMessage + ":CreateLoggerFromOptions - both");
-  RollLogFileByTimeTest(&nse, auto_roll_logger, options.log_file_time_to_roll,
-                        kSampleMessage + ":CreateLoggerFromOptions - both");
-}
-
-TEST_F(AutoRollLoggerTest, LogFlushWhileRolling) {
-  DBOptions options;
-  shared_ptr<Logger> logger;
-
-  InitTestDb();
-  options.max_log_file_size = 1024 * 5;
-  ASSERT_OK(CreateLoggerFromOptions(kTestDir, options, &logger));
-  AutoRollLogger* auto_roll_logger =
-      dynamic_cast<AutoRollLogger*>(logger.get());
-  ASSERT_TRUE(auto_roll_logger);
-  rocksdb::port::Thread flush_thread;
-
-  // Notes:
-  // (1) Need to pin the old logger before beginning the roll, as rolling grabs
-  //     the mutex, which would prevent us from accessing the old logger. This
-  //     also marks flush_thread with AutoRollLogger::Flush:PinnedLogger.
-  // (2) Need to reset logger during PosixLogger::Flush() to exercise a race
-  //     condition case, which is executing the flush with the pinned (old)
-  //     logger after auto-roll logger has cut over to a new logger.
-  // (3) PosixLogger::Flush() happens in both threads but its SyncPoints only
-  //     are enabled in flush_thread (the one pinning the old logger).
-  rocksdb::SyncPoint::GetInstance()->LoadDependencyAndMarkers(
-      {{"AutoRollLogger::Flush:PinnedLogger",
-        "AutoRollLoggerTest::LogFlushWhileRolling:PreRollAndPostThreadInit"},
-       {"PosixLogger::Flush:Begin1",
-        "AutoRollLogger::ResetLogger:BeforeNewLogger"},
-       {"AutoRollLogger::ResetLogger:AfterNewLogger",
-        "PosixLogger::Flush:Begin2"}},
-      {{"AutoRollLogger::Flush:PinnedLogger", "PosixLogger::Flush:Begin1"},
-       {"AutoRollLogger::Flush:PinnedLogger", "PosixLogger::Flush:Begin2"}});
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  flush_thread = port::Thread ([&]() { auto_roll_logger->Flush(); });
-  TEST_SYNC_POINT(
-      "AutoRollLoggerTest::LogFlushWhileRolling:PreRollAndPostThreadInit");
-  RollLogFileBySizeTest(auto_roll_logger, options.max_log_file_size,
-                        kSampleMessage + ":LogFlushWhileRolling");
-  flush_thread.join();
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-}
-
-#endif  // OS_WIN
-
-TEST_F(AutoRollLoggerTest, InfoLogLevel) {
-  InitTestDb();
-
-  size_t log_size = 8192;
-  size_t log_lines = 0;
-  // an extra-scope to force the AutoRollLogger to flush the log file when it
-  // becomes out of scope.
-  {
-    AutoRollLogger logger(Env::Default(), kTestDir, "", log_size, 0);
-    for (int log_level = InfoLogLevel::HEADER_LEVEL;
-         log_level >= InfoLogLevel::DEBUG_LEVEL; log_level--) {
-      logger.SetInfoLogLevel((InfoLogLevel)log_level);
-      for (int log_type = InfoLogLevel::DEBUG_LEVEL;
-           log_type <= InfoLogLevel::HEADER_LEVEL; log_type++) {
-        // log messages with log level smaller than log_level will not be
-        // logged.
-        LogMessage((InfoLogLevel)log_type, &logger, kSampleMessage.c_str());
-      }
-      log_lines += InfoLogLevel::HEADER_LEVEL - log_level + 1;
-    }
-    for (int log_level = InfoLogLevel::HEADER_LEVEL;
-         log_level >= InfoLogLevel::DEBUG_LEVEL; log_level--) {
-      logger.SetInfoLogLevel((InfoLogLevel)log_level);
-
-      // again, messages with level smaller than log_level will not be logged.
-      ROCKS_LOG_HEADER(&logger, "%s", kSampleMessage.c_str());
-      ROCKS_LOG_DEBUG(&logger, "%s", kSampleMessage.c_str());
-      ROCKS_LOG_INFO(&logger, "%s", kSampleMessage.c_str());
-      ROCKS_LOG_WARN(&logger, "%s", kSampleMessage.c_str());
-      ROCKS_LOG_ERROR(&logger, "%s", kSampleMessage.c_str());
-      ROCKS_LOG_FATAL(&logger, "%s", kSampleMessage.c_str());
-      log_lines += InfoLogLevel::HEADER_LEVEL - log_level + 1;
-    }
-  }
-  std::ifstream inFile(AutoRollLoggerTest::kLogFile.c_str());
-  size_t lines = std::count(std::istreambuf_iterator<char>(inFile),
-                         std::istreambuf_iterator<char>(), '\n');
-  ASSERT_EQ(log_lines, lines);
-  inFile.close();
-}
-
-// Test the logger Header function for roll over logs
-// We expect the new logs creates as roll over to carry the headers specified
-static std::vector<std::string> GetOldFileNames(const std::string& path) {
-  std::vector<std::string> ret;
-
-  const std::string dirname = path.substr(/*start=*/0, path.find_last_of("/"));
-  const std::string fname = path.substr(path.find_last_of("/") + 1);
-
-  std::vector<std::string> children;
-  Env::Default()->GetChildren(dirname, &children);
-
-  // We know that the old log files are named [path]<something>
-  // Return all entities that match the pattern
-  for (auto& child : children) {
-    if (fname != child && child.find(fname) == 0) {
-      ret.push_back(dirname + "/" + child);
-    }
-  }
-
-  return ret;
-}
-
-// Return the number of lines where a given pattern was found in the file
-static size_t GetLinesCount(const std::string& fname,
-                            const std::string& pattern) {
-  std::stringstream ssbuf;
-  std::string line;
-  size_t count = 0;
-
-  std::ifstream inFile(fname.c_str());
-  ssbuf << inFile.rdbuf();
-
-  while (getline(ssbuf, line)) {
-    if (line.find(pattern) != std::string::npos) {
-      count++;
-    }
-  }
-
-  return count;
-}
-
-TEST_F(AutoRollLoggerTest, LogHeaderTest) {
-  static const size_t MAX_HEADERS = 10;
-  static const size_t LOG_MAX_SIZE = 1024 * 5;
-  static const std::string HEADER_STR = "Log header line";
-
-  // test_num == 0 -> standard call to Header()
-  // test_num == 1 -> call to Log() with InfoLogLevel::HEADER_LEVEL
-  for (int test_num = 0; test_num < 2; test_num++) {
-
-    InitTestDb();
-
-    AutoRollLogger logger(Env::Default(), kTestDir, /*db_log_dir=*/ "",
-                          LOG_MAX_SIZE, /*log_file_time_to_roll=*/ 0);
-
-    if (test_num == 0) {
-      // Log some headers explicitly using Header()
-      for (size_t i = 0; i < MAX_HEADERS; i++) {
-        Header(&logger, "%s %d", HEADER_STR.c_str(), i);
-      }
-    } else if (test_num == 1) {
-      // HEADER_LEVEL should make this behave like calling Header()
-      for (size_t i = 0; i < MAX_HEADERS; i++) {
-        ROCKS_LOG_HEADER(&logger, "%s %d", HEADER_STR.c_str(), i);
-      }
-    }
-
-    const std::string newfname = logger.TEST_log_fname();
-
-    // Log enough data to cause a roll over
-    int i = 0;
-    for (size_t iter = 0; iter < 2; iter++) {
-      while (logger.GetLogFileSize() < LOG_MAX_SIZE) {
-        Info(&logger, (kSampleMessage + ":LogHeaderTest line %d").c_str(), i);
-        ++i;
-      }
-
-      Info(&logger, "Rollover");
-    }
-
-    // Flush the log for the latest file
-    LogFlush(&logger);
-
-    const auto oldfiles = GetOldFileNames(newfname);
-
-    ASSERT_EQ(oldfiles.size(), (size_t) 2);
-
-    for (auto& oldfname : oldfiles) {
-      // verify that the files rolled over
-      ASSERT_NE(oldfname, newfname);
-      // verify that the old log contains all the header logs
-      ASSERT_EQ(GetLinesCount(oldfname, HEADER_STR), MAX_HEADERS);
-    }
-  }
-}
-
-TEST_F(AutoRollLoggerTest, LogFileExistence) {
-  rocksdb::DB* db;
-  rocksdb::Options options;
-#ifdef OS_WIN
-  // Replace all slashes in the path so windows CompSpec does not
-  // become confused
-  std::string testDir(kTestDir);
-  std::replace_if(testDir.begin(), testDir.end(),
-    [](char ch) { return ch == '/'; }, '\\');
-  std::string deleteCmd = "if exist " + testDir + " rd /s /q " + testDir;
-#else
-  std::string deleteCmd = "rm -rf " + kTestDir;
-#endif
-  ASSERT_EQ(system(deleteCmd.c_str()), 0);
-  options.max_log_file_size = 100 * 1024 * 1024;
-  options.create_if_missing = true;
-  ASSERT_OK(rocksdb::DB::Open(options, kTestDir, &db));
-  ASSERT_OK(default_env->FileExists(kLogFile));
-  delete db;
-}
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
-
-#else
-#include <stdio.h>
-
-int main(int argc, char** argv) {
-  fprintf(stderr,
-          "SKIPPED as AutoRollLogger is not supported in ROCKSDB_LITE\n");
-  return 0;
-}
-
-#endif  // !ROCKSDB_LITE

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/d3a13a49/thirdparty/rocksdb/util/autovector_test.cc
----------------------------------------------------------------------
diff --git a/thirdparty/rocksdb/util/autovector_test.cc 
b/thirdparty/rocksdb/util/autovector_test.cc
deleted file mode 100644
index 2d7bcea..0000000
--- a/thirdparty/rocksdb/util/autovector_test.cc
+++ /dev/null
@@ -1,327 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include <atomic>
-#include <iostream>
-#include <string>
-#include <utility>
-
-#include "rocksdb/env.h"
-#include "util/autovector.h"
-#include "util/string_util.h"
-#include "util/testharness.h"
-#include "util/testutil.h"
-
-using std::cout;
-using std::endl;
-
-namespace rocksdb {
-
-class AutoVectorTest : public testing::Test {};
-const unsigned long kSize = 8;
-
-namespace {
-template <class T>
-void AssertAutoVectorOnlyInStack(autovector<T, kSize>* vec, bool result) {
-#ifndef ROCKSDB_LITE
-  ASSERT_EQ(vec->only_in_stack(), result);
-#endif  // !ROCKSDB_LITE
-}
-}  // namespace
-
-TEST_F(AutoVectorTest, PushBackAndPopBack) {
-  autovector<size_t, kSize> vec;
-  ASSERT_TRUE(vec.empty());
-  ASSERT_EQ(0ul, vec.size());
-
-  for (size_t i = 0; i < 1000 * kSize; ++i) {
-    vec.push_back(i);
-    ASSERT_TRUE(!vec.empty());
-    if (i < kSize) {
-      AssertAutoVectorOnlyInStack(&vec, true);
-    } else {
-      AssertAutoVectorOnlyInStack(&vec, false);
-    }
-    ASSERT_EQ(i + 1, vec.size());
-    ASSERT_EQ(i, vec[i]);
-    ASSERT_EQ(i, vec.at(i));
-  }
-
-  size_t size = vec.size();
-  while (size != 0) {
-    vec.pop_back();
-    // will always be in heap
-    AssertAutoVectorOnlyInStack(&vec, false);
-    ASSERT_EQ(--size, vec.size());
-  }
-
-  ASSERT_TRUE(vec.empty());
-}
-
-TEST_F(AutoVectorTest, EmplaceBack) {
-  typedef std::pair<size_t, std::string> ValType;
-  autovector<ValType, kSize> vec;
-
-  for (size_t i = 0; i < 1000 * kSize; ++i) {
-    vec.emplace_back(i, ToString(i + 123));
-    ASSERT_TRUE(!vec.empty());
-    if (i < kSize) {
-      AssertAutoVectorOnlyInStack(&vec, true);
-    } else {
-      AssertAutoVectorOnlyInStack(&vec, false);
-    }
-
-    ASSERT_EQ(i + 1, vec.size());
-    ASSERT_EQ(i, vec[i].first);
-    ASSERT_EQ(ToString(i + 123), vec[i].second);
-  }
-
-  vec.clear();
-  ASSERT_TRUE(vec.empty());
-  AssertAutoVectorOnlyInStack(&vec, false);
-}
-
-TEST_F(AutoVectorTest, Resize) {
-  autovector<size_t, kSize> vec;
-
-  vec.resize(kSize);
-  AssertAutoVectorOnlyInStack(&vec, true);
-  for (size_t i = 0; i < kSize; ++i) {
-    vec[i] = i;
-  }
-
-  vec.resize(kSize * 2);
-  AssertAutoVectorOnlyInStack(&vec, false);
-  for (size_t i = 0; i < kSize; ++i) {
-    ASSERT_EQ(vec[i], i);
-  }
-  for (size_t i = 0; i < kSize; ++i) {
-    vec[i + kSize] = i;
-  }
-
-  vec.resize(1);
-  ASSERT_EQ(1U, vec.size());
-}
-
-namespace {
-void AssertEqual(
-    const autovector<size_t, kSize>& a, const autovector<size_t, kSize>& b) {
-  ASSERT_EQ(a.size(), b.size());
-  ASSERT_EQ(a.empty(), b.empty());
-#ifndef ROCKSDB_LITE
-  ASSERT_EQ(a.only_in_stack(), b.only_in_stack());
-#endif  // !ROCKSDB_LITE
-  for (size_t i = 0; i < a.size(); ++i) {
-    ASSERT_EQ(a[i], b[i]);
-  }
-}
-}  // namespace
-
-TEST_F(AutoVectorTest, CopyAndAssignment) {
-  // Test both heap-allocated and stack-allocated cases.
-  for (auto size : { kSize / 2, kSize * 1000 }) {
-    autovector<size_t, kSize> vec;
-    for (size_t i = 0; i < size; ++i) {
-      vec.push_back(i);
-    }
-
-    {
-      autovector<size_t, kSize> other;
-      other = vec;
-      AssertEqual(other, vec);
-    }
-
-    {
-      autovector<size_t, kSize> other(vec);
-      AssertEqual(other, vec);
-    }
-  }
-}
-
-TEST_F(AutoVectorTest, Iterators) {
-  autovector<std::string, kSize> vec;
-  for (size_t i = 0; i < kSize * 1000; ++i) {
-    vec.push_back(ToString(i));
-  }
-
-  // basic operator test
-  ASSERT_EQ(vec.front(), *vec.begin());
-  ASSERT_EQ(vec.back(), *(vec.end() - 1));
-  ASSERT_TRUE(vec.begin() < vec.end());
-
-  // non-const iterator
-  size_t index = 0;
-  for (const auto& item : vec) {
-    ASSERT_EQ(vec[index++], item);
-  }
-
-  index = vec.size() - 1;
-  for (auto pos = vec.rbegin(); pos != vec.rend(); ++pos) {
-    ASSERT_EQ(vec[index--], *pos);
-  }
-
-  // const iterator
-  const auto& cvec = vec;
-  index = 0;
-  for (const auto& item : cvec) {
-    ASSERT_EQ(cvec[index++], item);
-  }
-
-  index = vec.size() - 1;
-  for (auto pos = cvec.rbegin(); pos != cvec.rend(); ++pos) {
-    ASSERT_EQ(cvec[index--], *pos);
-  }
-
-  // forward and backward
-  auto pos = vec.begin();
-  while (pos != vec.end()) {
-    auto old_val = *pos;
-    auto old = pos++;
-    // HACK: make sure -> works
-    ASSERT_TRUE(!old->empty());
-    ASSERT_EQ(old_val, *old);
-    ASSERT_TRUE(pos == vec.end() || old_val != *pos);
-  }
-
-  pos = vec.begin();
-  for (size_t i = 0; i < vec.size(); i += 2) {
-    // Cannot use ASSERT_EQ since that macro depends on iostream serialization
-    ASSERT_TRUE(pos + 2 - 2 == pos);
-    pos += 2;
-    ASSERT_TRUE(pos >= vec.begin());
-    ASSERT_TRUE(pos <= vec.end());
-
-    size_t diff = static_cast<size_t>(pos - vec.begin());
-    ASSERT_EQ(i + 2, diff);
-  }
-}
-
-namespace {
-std::vector<std::string> GetTestKeys(size_t size) {
-  std::vector<std::string> keys;
-  keys.resize(size);
-
-  int index = 0;
-  for (auto& key : keys) {
-    key = "item-" + rocksdb::ToString(index++);
-  }
-  return keys;
-}
-}  // namespace
-
-template <class TVector>
-void BenchmarkVectorCreationAndInsertion(
-    std::string name, size_t ops, size_t item_size,
-    const std::vector<typename TVector::value_type>& items) {
-  auto env = Env::Default();
-
-  int index = 0;
-  auto start_time = env->NowNanos();
-  auto ops_remaining = ops;
-  while(ops_remaining--) {
-    TVector v;
-    for (size_t i = 0; i < item_size; ++i) {
-      v.push_back(items[index++]);
-    }
-  }
-  auto elapsed = env->NowNanos() - start_time;
-  cout << "created " << ops << " " << name << " instances:\n\t"
-       << "each was inserted with " << item_size << " elements\n\t"
-       << "total time elapsed: " << elapsed << " (ns)" << endl;
-}
-
-template <class TVector>
-size_t BenchmarkSequenceAccess(std::string name, size_t ops, size_t elem_size) 
{
-  TVector v;
-  for (const auto& item : GetTestKeys(elem_size)) {
-    v.push_back(item);
-  }
-  auto env = Env::Default();
-
-  auto ops_remaining = ops;
-  auto start_time = env->NowNanos();
-  size_t total = 0;
-  while (ops_remaining--) {
-    auto end = v.end();
-    for (auto pos = v.begin(); pos != end; ++pos) {
-      total += pos->size();
-    }
-  }
-  auto elapsed = env->NowNanos() - start_time;
-  cout << "performed " << ops << " sequence access against " << name << "\n\t"
-       << "size: " << elem_size << "\n\t"
-       << "total time elapsed: " << elapsed << " (ns)" << endl;
-  // HACK avoid compiler's optimization to ignore total
-  return total;
-}
-
-// This test case only reports the performance between std::vector<std::string>
-// and autovector<std::string>. We chose string for comparison because in most
-// of our use cases we used std::vector<std::string>.
-TEST_F(AutoVectorTest, PerfBench) {
-  // We run same operations for kOps times in order to get a more fair result.
-  size_t kOps = 100000;
-
-  // Creation and insertion test
-  // Test the case when there is:
-  //  * no element inserted: internal array of std::vector may not really get
-  //    initialize.
-  //  * one element inserted: internal array of std::vector must have
-  //    initialized.
-  //  * kSize elements inserted. This shows the most time we'll spend if we
-  //    keep everything in stack.
-  //  * 2 * kSize elements inserted. The internal vector of
-  //    autovector must have been initialized.
-  cout << "=====================================================" << endl;
-  cout << "Creation and Insertion Test (value type: std::string)" << endl;
-  cout << "=====================================================" << endl;
-
-  // pre-generated unique keys
-  auto string_keys = GetTestKeys(kOps * 2 * kSize);
-  for (auto insertions : { 0ul, 1ul, kSize / 2, kSize, 2 * kSize }) {
-    BenchmarkVectorCreationAndInsertion<std::vector<std::string>>(
-        "std::vector<std::string>", kOps, insertions, string_keys);
-    BenchmarkVectorCreationAndInsertion<autovector<std::string, kSize>>(
-        "autovector<std::string>", kOps, insertions, string_keys);
-    cout << "-----------------------------------" << endl;
-  }
-
-  cout << "=====================================================" << endl;
-  cout << "Creation and Insertion Test (value type: uint64_t)" << endl;
-  cout << "=====================================================" << endl;
-
-  // pre-generated unique keys
-  std::vector<uint64_t> int_keys(kOps * 2 * kSize);
-  for (size_t i = 0; i < kOps * 2 * kSize; ++i) {
-    int_keys[i] = i;
-  }
-  for (auto insertions : { 0ul, 1ul, kSize / 2, kSize, 2 * kSize }) {
-    BenchmarkVectorCreationAndInsertion<std::vector<uint64_t>>(
-        "std::vector<uint64_t>", kOps, insertions, int_keys);
-    BenchmarkVectorCreationAndInsertion<autovector<uint64_t, kSize>>(
-      "autovector<uint64_t>", kOps, insertions, int_keys
-    );
-    cout << "-----------------------------------" << endl;
-  }
-
-  // Sequence Access Test
-  cout << "=====================================================" << endl;
-  cout << "Sequence Access Test" << endl;
-  cout << "=====================================================" << endl;
-  for (auto elem_size : { kSize / 2, kSize, 2 * kSize }) {
-    BenchmarkSequenceAccess<std::vector<std::string>>("std::vector", kOps,
-                                                      elem_size);
-    BenchmarkSequenceAccess<autovector<std::string, kSize>>("autovector", kOps,
-                                                            elem_size);
-    cout << "-----------------------------------" << endl;
-  }
-}
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/d3a13a49/thirdparty/rocksdb/util/bloom_test.cc
----------------------------------------------------------------------
diff --git a/thirdparty/rocksdb/util/bloom_test.cc 
b/thirdparty/rocksdb/util/bloom_test.cc
deleted file mode 100644
index 9c32341..0000000
--- a/thirdparty/rocksdb/util/bloom_test.cc
+++ /dev/null
@@ -1,321 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2012 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#ifndef GFLAGS
-#include <cstdio>
-int main() {
-  fprintf(stderr, "Please install gflags to run this test... Skipping...\n");
-  return 0;
-}
-#else
-
-#include <gflags/gflags.h>
-#include <vector>
-
-#include "rocksdb/filter_policy.h"
-#include "table/full_filter_bits_builder.h"
-#include "util/arena.h"
-#include "util/logging.h"
-#include "util/testharness.h"
-#include "util/testutil.h"
-
-using GFLAGS::ParseCommandLineFlags;
-
-DEFINE_int32(bits_per_key, 10, "");
-
-namespace rocksdb {
-
-static const int kVerbose = 1;
-
-static Slice Key(int i, char* buffer) {
-  std::string s;
-  PutFixed32(&s, static_cast<uint32_t>(i));
-  memcpy(buffer, s.c_str(), sizeof(i));
-  return Slice(buffer, sizeof(i));
-}
-
-static int NextLength(int length) {
-  if (length < 10) {
-    length += 1;
-  } else if (length < 100) {
-    length += 10;
-  } else if (length < 1000) {
-    length += 100;
-  } else {
-    length += 1000;
-  }
-  return length;
-}
-
-class BloomTest : public testing::Test {
- private:
-  const FilterPolicy* policy_;
-  std::string filter_;
-  std::vector<std::string> keys_;
-
- public:
-  BloomTest() : policy_(
-      NewBloomFilterPolicy(FLAGS_bits_per_key)) {}
-
-  ~BloomTest() {
-    delete policy_;
-  }
-
-  void Reset() {
-    keys_.clear();
-    filter_.clear();
-  }
-
-  void Add(const Slice& s) {
-    keys_.push_back(s.ToString());
-  }
-
-  void Build() {
-    std::vector<Slice> key_slices;
-    for (size_t i = 0; i < keys_.size(); i++) {
-      key_slices.push_back(Slice(keys_[i]));
-    }
-    filter_.clear();
-    policy_->CreateFilter(&key_slices[0], static_cast<int>(key_slices.size()),
-                          &filter_);
-    keys_.clear();
-    if (kVerbose >= 2) DumpFilter();
-  }
-
-  size_t FilterSize() const {
-    return filter_.size();
-  }
-
-  void DumpFilter() {
-    fprintf(stderr, "F(");
-    for (size_t i = 0; i+1 < filter_.size(); i++) {
-      const unsigned int c = static_cast<unsigned int>(filter_[i]);
-      for (int j = 0; j < 8; j++) {
-        fprintf(stderr, "%c", (c & (1 <<j)) ? '1' : '.');
-      }
-    }
-    fprintf(stderr, ")\n");
-  }
-
-  bool Matches(const Slice& s) {
-    if (!keys_.empty()) {
-      Build();
-    }
-    return policy_->KeyMayMatch(s, filter_);
-  }
-
-  double FalsePositiveRate() {
-    char buffer[sizeof(int)];
-    int result = 0;
-    for (int i = 0; i < 10000; i++) {
-      if (Matches(Key(i + 1000000000, buffer))) {
-        result++;
-      }
-    }
-    return result / 10000.0;
-  }
-};
-
-TEST_F(BloomTest, EmptyFilter) {
-  ASSERT_TRUE(! Matches("hello"));
-  ASSERT_TRUE(! Matches("world"));
-}
-
-TEST_F(BloomTest, Small) {
-  Add("hello");
-  Add("world");
-  ASSERT_TRUE(Matches("hello"));
-  ASSERT_TRUE(Matches("world"));
-  ASSERT_TRUE(! Matches("x"));
-  ASSERT_TRUE(! Matches("foo"));
-}
-
-TEST_F(BloomTest, VaryingLengths) {
-  char buffer[sizeof(int)];
-
-  // Count number of filters that significantly exceed the false positive rate
-  int mediocre_filters = 0;
-  int good_filters = 0;
-
-  for (int length = 1; length <= 10000; length = NextLength(length)) {
-    Reset();
-    for (int i = 0; i < length; i++) {
-      Add(Key(i, buffer));
-    }
-    Build();
-
-    ASSERT_LE(FilterSize(), (size_t)((length * 10 / 8) + 40)) << length;
-
-    // All added keys must match
-    for (int i = 0; i < length; i++) {
-      ASSERT_TRUE(Matches(Key(i, buffer)))
-          << "Length " << length << "; key " << i;
-    }
-
-    // Check false positive rate
-    double rate = FalsePositiveRate();
-    if (kVerbose >= 1) {
-      fprintf(stderr, "False positives: %5.2f%% @ length = %6d ; bytes = 
%6d\n",
-              rate*100.0, length, static_cast<int>(FilterSize()));
-    }
-    ASSERT_LE(rate, 0.02);   // Must not be over 2%
-    if (rate > 0.0125) mediocre_filters++;  // Allowed, but not too often
-    else good_filters++;
-  }
-  if (kVerbose >= 1) {
-    fprintf(stderr, "Filters: %d good, %d mediocre\n",
-            good_filters, mediocre_filters);
-  }
-  ASSERT_LE(mediocre_filters, good_filters/5);
-}
-
-// Different bits-per-byte
-
-class FullBloomTest : public testing::Test {
- private:
-  const FilterPolicy* policy_;
-  std::unique_ptr<FilterBitsBuilder> bits_builder_;
-  std::unique_ptr<FilterBitsReader> bits_reader_;
-  std::unique_ptr<const char[]> buf_;
-  size_t filter_size_;
-
- public:
-  FullBloomTest() :
-      policy_(NewBloomFilterPolicy(FLAGS_bits_per_key, false)),
-      filter_size_(0) {
-    Reset();
-  }
-
-  ~FullBloomTest() {
-    delete policy_;
-  }
-
-  FullFilterBitsBuilder* GetFullFilterBitsBuilder() {
-    return dynamic_cast<FullFilterBitsBuilder*>(bits_builder_.get());
-  }
-
-  void Reset() {
-    bits_builder_.reset(policy_->GetFilterBitsBuilder());
-    bits_reader_.reset(nullptr);
-    buf_.reset(nullptr);
-    filter_size_ = 0;
-  }
-
-  void Add(const Slice& s) {
-    bits_builder_->AddKey(s);
-  }
-
-  void Build() {
-    Slice filter = bits_builder_->Finish(&buf_);
-    bits_reader_.reset(policy_->GetFilterBitsReader(filter));
-    filter_size_ = filter.size();
-  }
-
-  size_t FilterSize() const {
-    return filter_size_;
-  }
-
-  bool Matches(const Slice& s) {
-    if (bits_reader_ == nullptr) {
-      Build();
-    }
-    return bits_reader_->MayMatch(s);
-  }
-
-  double FalsePositiveRate() {
-    char buffer[sizeof(int)];
-    int result = 0;
-    for (int i = 0; i < 10000; i++) {
-      if (Matches(Key(i + 1000000000, buffer))) {
-        result++;
-      }
-    }
-    return result / 10000.0;
-  }
-};
-
-TEST_F(FullBloomTest, FilterSize) {
-  uint32_t dont_care1, dont_care2;
-  auto full_bits_builder = GetFullFilterBitsBuilder();
-  for (int n = 1; n < 100; n++) {
-    auto space = full_bits_builder->CalculateSpace(n, &dont_care1, 
&dont_care2);
-    auto n2 = full_bits_builder->CalculateNumEntry(space);
-    ASSERT_GE(n2, n);
-    auto space2 =
-        full_bits_builder->CalculateSpace(n2, &dont_care1, &dont_care2);
-    ASSERT_EQ(space, space2);
-  }
-}
-
-TEST_F(FullBloomTest, FullEmptyFilter) {
-  // Empty filter is not match, at this level
-  ASSERT_TRUE(!Matches("hello"));
-  ASSERT_TRUE(!Matches("world"));
-}
-
-TEST_F(FullBloomTest, FullSmall) {
-  Add("hello");
-  Add("world");
-  ASSERT_TRUE(Matches("hello"));
-  ASSERT_TRUE(Matches("world"));
-  ASSERT_TRUE(!Matches("x"));
-  ASSERT_TRUE(!Matches("foo"));
-}
-
-TEST_F(FullBloomTest, FullVaryingLengths) {
-  char buffer[sizeof(int)];
-
-  // Count number of filters that significantly exceed the false positive rate
-  int mediocre_filters = 0;
-  int good_filters = 0;
-
-  for (int length = 1; length <= 10000; length = NextLength(length)) {
-    Reset();
-    for (int i = 0; i < length; i++) {
-      Add(Key(i, buffer));
-    }
-    Build();
-
-    ASSERT_LE(FilterSize(), (size_t)((length * 10 / 8) + 128 + 5)) << length;
-
-    // All added keys must match
-    for (int i = 0; i < length; i++) {
-      ASSERT_TRUE(Matches(Key(i, buffer)))
-          << "Length " << length << "; key " << i;
-    }
-
-    // Check false positive rate
-    double rate = FalsePositiveRate();
-    if (kVerbose >= 1) {
-      fprintf(stderr, "False positives: %5.2f%% @ length = %6d ; bytes = 
%6d\n",
-              rate*100.0, length, static_cast<int>(FilterSize()));
-    }
-    ASSERT_LE(rate, 0.02);   // Must not be over 2%
-    if (rate > 0.0125)
-      mediocre_filters++;  // Allowed, but not too often
-    else
-      good_filters++;
-  }
-  if (kVerbose >= 1) {
-    fprintf(stderr, "Filters: %d good, %d mediocre\n",
-            good_filters, mediocre_filters);
-  }
-  ASSERT_LE(mediocre_filters, good_filters/5);
-}
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  ParseCommandLineFlags(&argc, &argv, true);
-
-  return RUN_ALL_TESTS();
-}
-
-#endif  // GFLAGS

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/d3a13a49/thirdparty/rocksdb/util/coding_test.cc
----------------------------------------------------------------------
diff --git a/thirdparty/rocksdb/util/coding_test.cc 
b/thirdparty/rocksdb/util/coding_test.cc
deleted file mode 100644
index 49fb73d..0000000
--- a/thirdparty/rocksdb/util/coding_test.cc
+++ /dev/null
@@ -1,204 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "util/coding.h"
-
-#include "util/testharness.h"
-
-namespace rocksdb {
-
-class Coding { };
-
-TEST(Coding, Fixed32) {
-  std::string s;
-  for (uint32_t v = 0; v < 100000; v++) {
-    PutFixed32(&s, v);
-  }
-
-  const char* p = s.data();
-  for (uint32_t v = 0; v < 100000; v++) {
-    uint32_t actual = DecodeFixed32(p);
-    ASSERT_EQ(v, actual);
-    p += sizeof(uint32_t);
-  }
-}
-
-TEST(Coding, Fixed64) {
-  std::string s;
-  for (int power = 0; power <= 63; power++) {
-    uint64_t v = static_cast<uint64_t>(1) << power;
-    PutFixed64(&s, v - 1);
-    PutFixed64(&s, v + 0);
-    PutFixed64(&s, v + 1);
-  }
-
-  const char* p = s.data();
-  for (int power = 0; power <= 63; power++) {
-    uint64_t v = static_cast<uint64_t>(1) << power;
-    uint64_t actual = 0;
-    actual = DecodeFixed64(p);
-    ASSERT_EQ(v-1, actual);
-    p += sizeof(uint64_t);
-
-    actual = DecodeFixed64(p);
-    ASSERT_EQ(v+0, actual);
-    p += sizeof(uint64_t);
-
-    actual = DecodeFixed64(p);
-    ASSERT_EQ(v+1, actual);
-    p += sizeof(uint64_t);
-  }
-}
-
-// Test that encoding routines generate little-endian encodings
-TEST(Coding, EncodingOutput) {
-  std::string dst;
-  PutFixed32(&dst, 0x04030201);
-  ASSERT_EQ(4U, dst.size());
-  ASSERT_EQ(0x01, static_cast<int>(dst[0]));
-  ASSERT_EQ(0x02, static_cast<int>(dst[1]));
-  ASSERT_EQ(0x03, static_cast<int>(dst[2]));
-  ASSERT_EQ(0x04, static_cast<int>(dst[3]));
-
-  dst.clear();
-  PutFixed64(&dst, 0x0807060504030201ull);
-  ASSERT_EQ(8U, dst.size());
-  ASSERT_EQ(0x01, static_cast<int>(dst[0]));
-  ASSERT_EQ(0x02, static_cast<int>(dst[1]));
-  ASSERT_EQ(0x03, static_cast<int>(dst[2]));
-  ASSERT_EQ(0x04, static_cast<int>(dst[3]));
-  ASSERT_EQ(0x05, static_cast<int>(dst[4]));
-  ASSERT_EQ(0x06, static_cast<int>(dst[5]));
-  ASSERT_EQ(0x07, static_cast<int>(dst[6]));
-  ASSERT_EQ(0x08, static_cast<int>(dst[7]));
-}
-
-TEST(Coding, Varint32) {
-  std::string s;
-  for (uint32_t i = 0; i < (32 * 32); i++) {
-    uint32_t v = (i / 32) << (i % 32);
-    PutVarint32(&s, v);
-  }
-
-  const char* p = s.data();
-  const char* limit = p + s.size();
-  for (uint32_t i = 0; i < (32 * 32); i++) {
-    uint32_t expected = (i / 32) << (i % 32);
-    uint32_t actual = 0;
-    const char* start = p;
-    p = GetVarint32Ptr(p, limit, &actual);
-    ASSERT_TRUE(p != nullptr);
-    ASSERT_EQ(expected, actual);
-    ASSERT_EQ(VarintLength(actual), p - start);
-  }
-  ASSERT_EQ(p, s.data() + s.size());
-}
-
-TEST(Coding, Varint64) {
-  // Construct the list of values to check
-  std::vector<uint64_t> values;
-  // Some special values
-  values.push_back(0);
-  values.push_back(100);
-  values.push_back(~static_cast<uint64_t>(0));
-  values.push_back(~static_cast<uint64_t>(0) - 1);
-  for (uint32_t k = 0; k < 64; k++) {
-    // Test values near powers of two
-    const uint64_t power = 1ull << k;
-    values.push_back(power);
-    values.push_back(power-1);
-    values.push_back(power+1);
-  };
-
-  std::string s;
-  for (unsigned int i = 0; i < values.size(); i++) {
-    PutVarint64(&s, values[i]);
-  }
-
-  const char* p = s.data();
-  const char* limit = p + s.size();
-  for (unsigned int i = 0; i < values.size(); i++) {
-    ASSERT_TRUE(p < limit);
-    uint64_t actual = 0;
-    const char* start = p;
-    p = GetVarint64Ptr(p, limit, &actual);
-    ASSERT_TRUE(p != nullptr);
-    ASSERT_EQ(values[i], actual);
-    ASSERT_EQ(VarintLength(actual), p - start);
-  }
-  ASSERT_EQ(p, limit);
-
-}
-
-TEST(Coding, Varint32Overflow) {
-  uint32_t result;
-  std::string input("\x81\x82\x83\x84\x85\x11");
-  ASSERT_TRUE(GetVarint32Ptr(input.data(), input.data() + input.size(), 
&result)
-              == nullptr);
-}
-
-TEST(Coding, Varint32Truncation) {
-  uint32_t large_value = (1u << 31) + 100;
-  std::string s;
-  PutVarint32(&s, large_value);
-  uint32_t result;
-  for (unsigned int len = 0; len < s.size() - 1; len++) {
-    ASSERT_TRUE(GetVarint32Ptr(s.data(), s.data() + len, &result) == nullptr);
-  }
-  ASSERT_TRUE(
-      GetVarint32Ptr(s.data(), s.data() + s.size(), &result) != nullptr);
-  ASSERT_EQ(large_value, result);
-}
-
-TEST(Coding, Varint64Overflow) {
-  uint64_t result;
-  std::string input("\x81\x82\x83\x84\x85\x81\x82\x83\x84\x85\x11");
-  ASSERT_TRUE(GetVarint64Ptr(input.data(), input.data() + input.size(), 
&result)
-              == nullptr);
-}
-
-TEST(Coding, Varint64Truncation) {
-  uint64_t large_value = (1ull << 63) + 100ull;
-  std::string s;
-  PutVarint64(&s, large_value);
-  uint64_t result;
-  for (unsigned int len = 0; len < s.size() - 1; len++) {
-    ASSERT_TRUE(GetVarint64Ptr(s.data(), s.data() + len, &result) == nullptr);
-  }
-  ASSERT_TRUE(
-      GetVarint64Ptr(s.data(), s.data() + s.size(), &result) != nullptr);
-  ASSERT_EQ(large_value, result);
-}
-
-TEST(Coding, Strings) {
-  std::string s;
-  PutLengthPrefixedSlice(&s, Slice(""));
-  PutLengthPrefixedSlice(&s, Slice("foo"));
-  PutLengthPrefixedSlice(&s, Slice("bar"));
-  PutLengthPrefixedSlice(&s, Slice(std::string(200, 'x')));
-
-  Slice input(s);
-  Slice v;
-  ASSERT_TRUE(GetLengthPrefixedSlice(&input, &v));
-  ASSERT_EQ("", v.ToString());
-  ASSERT_TRUE(GetLengthPrefixedSlice(&input, &v));
-  ASSERT_EQ("foo", v.ToString());
-  ASSERT_TRUE(GetLengthPrefixedSlice(&input, &v));
-  ASSERT_EQ("bar", v.ToString());
-  ASSERT_TRUE(GetLengthPrefixedSlice(&input, &v));
-  ASSERT_EQ(std::string(200, 'x'), v.ToString());
-  ASSERT_EQ("", input.ToString());
-}
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/d3a13a49/thirdparty/rocksdb/util/crc32c_test.cc
----------------------------------------------------------------------
diff --git a/thirdparty/rocksdb/util/crc32c_test.cc 
b/thirdparty/rocksdb/util/crc32c_test.cc
deleted file mode 100644
index 306194e..0000000
--- a/thirdparty/rocksdb/util/crc32c_test.cc
+++ /dev/null
@@ -1,78 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "util/crc32c.h"
-#include "util/testharness.h"
-
-namespace rocksdb {
-namespace crc32c {
-
-class CRC { };
-
-TEST(CRC, StandardResults) {
-  // From rfc3720 section B.4.
-  char buf[32];
-
-  memset(buf, 0, sizeof(buf));
-  ASSERT_EQ(0x8a9136aaU, Value(buf, sizeof(buf)));
-
-  memset(buf, 0xff, sizeof(buf));
-  ASSERT_EQ(0x62a8ab43U, Value(buf, sizeof(buf)));
-
-  for (int i = 0; i < 32; i++) {
-    buf[i] = i;
-  }
-  ASSERT_EQ(0x46dd794eU, Value(buf, sizeof(buf)));
-
-  for (int i = 0; i < 32; i++) {
-    buf[i] = 31 - i;
-  }
-  ASSERT_EQ(0x113fdb5cU, Value(buf, sizeof(buf)));
-
-  unsigned char data[48] = {
-    0x01, 0xc0, 0x00, 0x00,
-    0x00, 0x00, 0x00, 0x00,
-    0x00, 0x00, 0x00, 0x00,
-    0x00, 0x00, 0x00, 0x00,
-    0x14, 0x00, 0x00, 0x00,
-    0x00, 0x00, 0x04, 0x00,
-    0x00, 0x00, 0x00, 0x14,
-    0x00, 0x00, 0x00, 0x18,
-    0x28, 0x00, 0x00, 0x00,
-    0x00, 0x00, 0x00, 0x00,
-    0x02, 0x00, 0x00, 0x00,
-    0x00, 0x00, 0x00, 0x00,
-  };
-  ASSERT_EQ(0xd9963a56, Value(reinterpret_cast<char*>(data), sizeof(data)));
-}
-
-TEST(CRC, Values) {
-  ASSERT_NE(Value("a", 1), Value("foo", 3));
-}
-
-TEST(CRC, Extend) {
-  ASSERT_EQ(Value("hello world", 11),
-            Extend(Value("hello ", 6), "world", 5));
-}
-
-TEST(CRC, Mask) {
-  uint32_t crc = Value("foo", 3);
-  ASSERT_NE(crc, Mask(crc));
-  ASSERT_NE(crc, Mask(Mask(crc)));
-  ASSERT_EQ(crc, Unmask(Mask(crc)));
-  ASSERT_EQ(crc, Unmask(Unmask(Mask(Mask(crc)))));
-}
-
-}  // namespace crc32c
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/d3a13a49/thirdparty/rocksdb/util/delete_scheduler_test.cc
----------------------------------------------------------------------
diff --git a/thirdparty/rocksdb/util/delete_scheduler_test.cc 
b/thirdparty/rocksdb/util/delete_scheduler_test.cc
deleted file mode 100644
index 208bdd7..0000000
--- a/thirdparty/rocksdb/util/delete_scheduler_test.cc
+++ /dev/null
@@ -1,563 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS
-#endif
-
-#include <inttypes.h>
-#include <atomic>
-#include <thread>
-#include <vector>
-
-#include "rocksdb/env.h"
-#include "rocksdb/options.h"
-#include "util/delete_scheduler.h"
-#include "util/sst_file_manager_impl.h"
-#include "util/string_util.h"
-#include "util/sync_point.h"
-#include "util/testharness.h"
-#include "util/testutil.h"
-
-#ifndef ROCKSDB_LITE
-
-namespace rocksdb {
-
-class DeleteSchedulerTest : public testing::Test {
- public:
-  DeleteSchedulerTest() : env_(Env::Default()) {
-    dummy_files_dir_ = test::TmpDir(env_) + "/delete_scheduler_dummy_data_dir";
-    DestroyAndCreateDir(dummy_files_dir_);
-    trash_dir_ = test::TmpDir(env_) + "/delete_scheduler_trash";
-    DestroyAndCreateDir(trash_dir_);
-  }
-
-  ~DeleteSchedulerTest() {
-    rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-    rocksdb::SyncPoint::GetInstance()->LoadDependency({});
-    rocksdb::SyncPoint::GetInstance()->ClearAllCallBacks();
-    test::DestroyDir(env_, dummy_files_dir_);
-  }
-
-  void DestroyAndCreateDir(const std::string& dir) {
-    ASSERT_OK(test::DestroyDir(env_, dir));
-    EXPECT_OK(env_->CreateDir(dir));
-  }
-
-  int CountFilesInDir(const std::string& dir) {
-    std::vector<std::string> files_in_dir;
-    EXPECT_OK(env_->GetChildren(dir, &files_in_dir));
-    // Ignore "." and ".."
-    return static_cast<int>(files_in_dir.size()) - 2;
-  }
-
-  std::string NewDummyFile(const std::string& file_name, uint64_t size = 1024) 
{
-    std::string file_path = dummy_files_dir_ + "/" + file_name;
-    std::unique_ptr<WritableFile> f;
-    env_->NewWritableFile(file_path, &f, EnvOptions());
-    std::string data(size, 'A');
-    EXPECT_OK(f->Append(data));
-    EXPECT_OK(f->Close());
-    sst_file_mgr_->OnAddFile(file_path);
-    return file_path;
-  }
-
-  void NewDeleteScheduler() {
-    ASSERT_OK(env_->CreateDirIfMissing(trash_dir_));
-    sst_file_mgr_.reset(
-        new SstFileManagerImpl(env_, nullptr, trash_dir_, 
rate_bytes_per_sec_));
-    delete_scheduler_ = sst_file_mgr_->delete_scheduler();
-    // Tests in this file are for DeleteScheduler component and dont create any
-    // DBs, so we need to use set this value to 100% (instead of default 25%)
-    delete_scheduler_->TEST_SetMaxTrashDBRatio(1.1);
-  }
-
-  Env* env_;
-  std::string dummy_files_dir_;
-  std::string trash_dir_;
-  int64_t rate_bytes_per_sec_;
-  DeleteScheduler* delete_scheduler_;
-  std::unique_ptr<SstFileManagerImpl> sst_file_mgr_;
-};
-
-// Test the basic functionality of DeleteScheduler (Rate Limiting).
-// 1- Create 100 dummy files
-// 2- Delete the 100 dummy files using DeleteScheduler
-// --- Hold DeleteScheduler::BackgroundEmptyTrash ---
-// 3- Wait for DeleteScheduler to delete all files in trash
-// 4- Verify that BackgroundEmptyTrash used to correct penlties for the files
-// 5- Make sure that all created files were completely deleted
-TEST_F(DeleteSchedulerTest, BasicRateLimiting) {
-  rocksdb::SyncPoint::GetInstance()->LoadDependency({
-      {"DeleteSchedulerTest::BasicRateLimiting:1",
-       "DeleteScheduler::BackgroundEmptyTrash"},
-  });
-
-  std::vector<uint64_t> penalties;
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "DeleteScheduler::BackgroundEmptyTrash:Wait",
-      [&](void* arg) { penalties.push_back(*(static_cast<uint64_t*>(arg))); });
-
-  int num_files = 100;  // 100 files
-  uint64_t file_size = 1024;  // every file is 1 kb
-  std::vector<uint64_t> delete_kbs_per_sec = {512, 200, 100, 50, 25};
-
-  for (size_t t = 0; t < delete_kbs_per_sec.size(); t++) {
-    penalties.clear();
-    rocksdb::SyncPoint::GetInstance()->ClearTrace();
-    rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-    DestroyAndCreateDir(dummy_files_dir_);
-    rate_bytes_per_sec_ = delete_kbs_per_sec[t] * 1024;
-    NewDeleteScheduler();
-
-    // Create 100 dummy files, every file is 1 Kb
-    std::vector<std::string> generated_files;
-    for (int i = 0; i < num_files; i++) {
-      std::string file_name = "file" + ToString(i) + ".data";
-      generated_files.push_back(NewDummyFile(file_name, file_size));
-    }
-
-    // Delete dummy files and measure time spent to empty trash
-    for (int i = 0; i < num_files; i++) {
-      ASSERT_OK(delete_scheduler_->DeleteFile(generated_files[i]));
-    }
-    ASSERT_EQ(CountFilesInDir(dummy_files_dir_), 0);
-
-    uint64_t delete_start_time = env_->NowMicros();
-    TEST_SYNC_POINT("DeleteSchedulerTest::BasicRateLimiting:1");
-    delete_scheduler_->WaitForEmptyTrash();
-    uint64_t time_spent_deleting = env_->NowMicros() - delete_start_time;
-
-    auto bg_errors = delete_scheduler_->GetBackgroundErrors();
-    ASSERT_EQ(bg_errors.size(), 0);
-
-    uint64_t total_files_size = 0;
-    uint64_t expected_penlty = 0;
-    ASSERT_EQ(penalties.size(), num_files);
-    for (int i = 0; i < num_files; i++) {
-      total_files_size += file_size;
-      expected_penlty = ((total_files_size * 1000000) / rate_bytes_per_sec_);
-      ASSERT_EQ(expected_penlty, penalties[i]);
-    }
-    ASSERT_GT(time_spent_deleting, expected_penlty * 0.9);
-
-    ASSERT_EQ(CountFilesInDir(trash_dir_), 0);
-    rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-  }
-}
-
-// Same as the BasicRateLimiting test but delete files in multiple threads.
-// 1- Create 100 dummy files
-// 2- Delete the 100 dummy files using DeleteScheduler using 10 threads
-// --- Hold DeleteScheduler::BackgroundEmptyTrash ---
-// 3- Wait for DeleteScheduler to delete all files in queue
-// 4- Verify that BackgroundEmptyTrash used to correct penlties for the files
-// 5- Make sure that all created files were completely deleted
-TEST_F(DeleteSchedulerTest, RateLimitingMultiThreaded) {
-  rocksdb::SyncPoint::GetInstance()->LoadDependency({
-      {"DeleteSchedulerTest::RateLimitingMultiThreaded:1",
-       "DeleteScheduler::BackgroundEmptyTrash"},
-  });
-
-  std::vector<uint64_t> penalties;
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "DeleteScheduler::BackgroundEmptyTrash:Wait",
-      [&](void* arg) { penalties.push_back(*(static_cast<uint64_t*>(arg))); });
-
-  int thread_cnt = 10;
-  int num_files = 10;  // 10 files per thread
-  uint64_t file_size = 1024;  // every file is 1 kb
-
-  std::vector<uint64_t> delete_kbs_per_sec = {512, 200, 100, 50, 25};
-  for (size_t t = 0; t < delete_kbs_per_sec.size(); t++) {
-    penalties.clear();
-    rocksdb::SyncPoint::GetInstance()->ClearTrace();
-    rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-    DestroyAndCreateDir(dummy_files_dir_);
-    rate_bytes_per_sec_ = delete_kbs_per_sec[t] * 1024;
-    NewDeleteScheduler();
-
-    // Create 100 dummy files, every file is 1 Kb
-    std::vector<std::string> generated_files;
-    for (int i = 0; i < num_files * thread_cnt; i++) {
-      std::string file_name = "file" + ToString(i) + ".data";
-      generated_files.push_back(NewDummyFile(file_name, file_size));
-    }
-
-    // Delete dummy files using 10 threads and measure time spent to empty 
trash
-    std::atomic<int> thread_num(0);
-    std::vector<port::Thread> threads;
-    std::function<void()> delete_thread = [&]() {
-      int idx = thread_num.fetch_add(1);
-      int range_start = idx * num_files;
-      int range_end = range_start + num_files;
-      for (int j = range_start; j < range_end; j++) {
-        ASSERT_OK(delete_scheduler_->DeleteFile(generated_files[j]));
-      }
-    };
-
-    for (int i = 0; i < thread_cnt; i++) {
-      threads.emplace_back(delete_thread);
-    }
-
-    for (size_t i = 0; i < threads.size(); i++) {
-      threads[i].join();
-    }
-
-    uint64_t delete_start_time = env_->NowMicros();
-    TEST_SYNC_POINT("DeleteSchedulerTest::RateLimitingMultiThreaded:1");
-    delete_scheduler_->WaitForEmptyTrash();
-    uint64_t time_spent_deleting = env_->NowMicros() - delete_start_time;
-
-    auto bg_errors = delete_scheduler_->GetBackgroundErrors();
-    ASSERT_EQ(bg_errors.size(), 0);
-
-    uint64_t total_files_size = 0;
-    uint64_t expected_penlty = 0;
-    ASSERT_EQ(penalties.size(), num_files * thread_cnt);
-    for (int i = 0; i < num_files * thread_cnt; i++) {
-      total_files_size += file_size;
-      expected_penlty = ((total_files_size * 1000000) / rate_bytes_per_sec_);
-      ASSERT_EQ(expected_penlty, penalties[i]);
-    }
-    ASSERT_GT(time_spent_deleting, expected_penlty * 0.9);
-
-    ASSERT_EQ(CountFilesInDir(dummy_files_dir_), 0);
-    ASSERT_EQ(CountFilesInDir(trash_dir_), 0);
-    rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-  }
-}
-
-// Disable rate limiting by setting rate_bytes_per_sec_ to 0 and make sure
-// that when DeleteScheduler delete a file it delete it immediately and dont
-// move it to trash
-TEST_F(DeleteSchedulerTest, DisableRateLimiting) {
-  int bg_delete_file = 0;
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "DeleteScheduler::DeleteTrashFile:DeleteFile",
-      [&](void* arg) { bg_delete_file++; });
-
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  rate_bytes_per_sec_ = 0;
-  NewDeleteScheduler();
-
-  for (int i = 0; i < 10; i++) {
-    // Every file we delete will be deleted immediately
-    std::string dummy_file = NewDummyFile("dummy.data");
-    ASSERT_OK(delete_scheduler_->DeleteFile(dummy_file));
-    ASSERT_TRUE(env_->FileExists(dummy_file).IsNotFound());
-    ASSERT_EQ(CountFilesInDir(dummy_files_dir_), 0);
-    ASSERT_EQ(CountFilesInDir(trash_dir_), 0);
-  }
-
-  ASSERT_EQ(bg_delete_file, 0);
-
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-}
-
-// Testing that moving files to trash with the same name is not a problem
-// 1- Create 10 files with the same name "conflict.data"
-// 2- Delete the 10 files using DeleteScheduler
-// 3- Make sure that trash directory contain 10 files ("conflict.data" x 10)
-// --- Hold DeleteScheduler::BackgroundEmptyTrash ---
-// 4- Make sure that files are deleted from trash
-TEST_F(DeleteSchedulerTest, ConflictNames) {
-  rocksdb::SyncPoint::GetInstance()->LoadDependency({
-      {"DeleteSchedulerTest::ConflictNames:1",
-       "DeleteScheduler::BackgroundEmptyTrash"},
-  });
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  rate_bytes_per_sec_ = 1024 * 1024;  // 1 Mb/sec
-  NewDeleteScheduler();
-
-  // Create "conflict.data" and move it to trash 10 times
-  for (int i = 0; i < 10; i++) {
-    std::string dummy_file = NewDummyFile("conflict.data");
-    ASSERT_OK(delete_scheduler_->DeleteFile(dummy_file));
-  }
-  ASSERT_EQ(CountFilesInDir(dummy_files_dir_), 0);
-  // 10 files ("conflict.data" x 10) in trash
-  ASSERT_EQ(CountFilesInDir(trash_dir_), 10);
-
-  // Hold BackgroundEmptyTrash
-  TEST_SYNC_POINT("DeleteSchedulerTest::ConflictNames:1");
-  delete_scheduler_->WaitForEmptyTrash();
-  ASSERT_EQ(CountFilesInDir(trash_dir_), 0);
-
-  auto bg_errors = delete_scheduler_->GetBackgroundErrors();
-  ASSERT_EQ(bg_errors.size(), 0);
-
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-}
-
-// 1- Create 10 dummy files
-// 2- Delete the 10 files using DeleteScheduler (move them to trsah)
-// 3- Delete the 10 files directly (using env_->DeleteFile)
-// --- Hold DeleteScheduler::BackgroundEmptyTrash ---
-// 4- Make sure that DeleteScheduler failed to delete the 10 files and
-//    reported 10 background errors
-TEST_F(DeleteSchedulerTest, BackgroundError) {
-  rocksdb::SyncPoint::GetInstance()->LoadDependency({
-      {"DeleteSchedulerTest::BackgroundError:1",
-       "DeleteScheduler::BackgroundEmptyTrash"},
-  });
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  rate_bytes_per_sec_ = 1024 * 1024;  // 1 Mb/sec
-  NewDeleteScheduler();
-
-  // Generate 10 dummy files and move them to trash
-  for (int i = 0; i < 10; i++) {
-    std::string file_name = "data_" + ToString(i) + ".data";
-    ASSERT_OK(delete_scheduler_->DeleteFile(NewDummyFile(file_name)));
-  }
-  ASSERT_EQ(CountFilesInDir(dummy_files_dir_), 0);
-  ASSERT_EQ(CountFilesInDir(trash_dir_), 10);
-
-  // Delete 10 files from trash, this will cause background errors in
-  // BackgroundEmptyTrash since we already deleted the files it was
-  // goind to delete
-  for (int i = 0; i < 10; i++) {
-    std::string file_name = "data_" + ToString(i) + ".data";
-    ASSERT_OK(env_->DeleteFile(trash_dir_ + "/" + file_name));
-  }
-
-  // Hold BackgroundEmptyTrash
-  TEST_SYNC_POINT("DeleteSchedulerTest::BackgroundError:1");
-  delete_scheduler_->WaitForEmptyTrash();
-  auto bg_errors = delete_scheduler_->GetBackgroundErrors();
-  ASSERT_EQ(bg_errors.size(), 10);
-
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-}
-
-// 1- Create 10 dummy files
-// 2- Delete 10 dummy files using DeleteScheduler
-// 3- Wait for DeleteScheduler to delete all files in queue
-// 4- Make sure all files in trash directory were deleted
-// 5- Repeat previous steps 5 times
-TEST_F(DeleteSchedulerTest, StartBGEmptyTrashMultipleTimes) {
-  int bg_delete_file = 0;
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "DeleteScheduler::DeleteTrashFile:DeleteFile",
-      [&](void* arg) { bg_delete_file++; });
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  rate_bytes_per_sec_ = 1024 * 1024;  // 1 MB / sec
-  NewDeleteScheduler();
-
-  // Move files to trash, wait for empty trash, start again
-  for (int run = 1; run <= 5; run++) {
-    // Generate 10 dummy files and move them to trash
-    for (int i = 0; i < 10; i++) {
-      std::string file_name = "data_" + ToString(i) + ".data";
-      ASSERT_OK(delete_scheduler_->DeleteFile(NewDummyFile(file_name)));
-    }
-    ASSERT_EQ(CountFilesInDir(dummy_files_dir_), 0);
-    delete_scheduler_->WaitForEmptyTrash();
-    ASSERT_EQ(bg_delete_file, 10 * run);
-    ASSERT_EQ(CountFilesInDir(trash_dir_), 0);
-
-    auto bg_errors = delete_scheduler_->GetBackgroundErrors();
-    ASSERT_EQ(bg_errors.size(), 0);
-  }
-
-  ASSERT_EQ(bg_delete_file, 50);
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-}
-
-// 1- Create a DeleteScheduler with very slow rate limit (1 Byte / sec)
-// 2- Delete 100 files using DeleteScheduler
-// 3- Delete the DeleteScheduler (call the destructor while queue is not empty)
-// 4- Make sure that not all files were deleted from trash and that
-//    DeleteScheduler background thread did not delete all files
-TEST_F(DeleteSchedulerTest, DestructorWithNonEmptyQueue) {
-  int bg_delete_file = 0;
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "DeleteScheduler::DeleteTrashFile:DeleteFile",
-      [&](void* arg) { bg_delete_file++; });
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  rate_bytes_per_sec_ = 1;  // 1 Byte / sec
-  NewDeleteScheduler();
-
-  for (int i = 0; i < 100; i++) {
-    std::string file_name = "data_" + ToString(i) + ".data";
-    ASSERT_OK(delete_scheduler_->DeleteFile(NewDummyFile(file_name)));
-  }
-
-  // Deleting 100 files will need >28 hours to delete
-  // we will delete the DeleteScheduler while delete queue is not empty
-  sst_file_mgr_.reset();
-
-  ASSERT_LT(bg_delete_file, 100);
-  ASSERT_GT(CountFilesInDir(trash_dir_), 0);
-
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-}
-
-// 1- Delete the trash directory
-// 2- Delete 10 files using DeleteScheduler
-// 3- Make sure that the 10 files were deleted immediately since 
DeleteScheduler
-//    failed to move them to trash directory
-TEST_F(DeleteSchedulerTest, MoveToTrashError) {
-  int bg_delete_file = 0;
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "DeleteScheduler::DeleteTrashFile:DeleteFile",
-      [&](void* arg) { bg_delete_file++; });
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  rate_bytes_per_sec_ = 1024;  // 1 Kb / sec
-  NewDeleteScheduler();
-
-  // We will delete the trash directory, that mean that DeleteScheduler wont
-  // be able to move files to trash and will delete files them immediately.
-  ASSERT_OK(test::DestroyDir(env_, trash_dir_));
-  for (int i = 0; i < 10; i++) {
-    std::string file_name = "data_" + ToString(i) + ".data";
-    ASSERT_OK(delete_scheduler_->DeleteFile(NewDummyFile(file_name)));
-  }
-
-  ASSERT_EQ(CountFilesInDir(dummy_files_dir_), 0);
-  ASSERT_EQ(bg_delete_file, 0);
-
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-}
-
-TEST_F(DeleteSchedulerTest, DISABLED_DynamicRateLimiting1) {
-  std::vector<uint64_t> penalties;
-  int bg_delete_file = 0;
-  int fg_delete_file = 0;
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "DeleteScheduler::DeleteTrashFile:DeleteFile",
-      [&](void* arg) { bg_delete_file++; });
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "DeleteScheduler::DeleteFile",
-      [&](void* arg) { fg_delete_file++; });
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "DeleteScheduler::BackgroundEmptyTrash:Wait",
-      [&](void* arg) { penalties.push_back(*(static_cast<int*>(arg))); });
-
-  rocksdb::SyncPoint::GetInstance()->LoadDependency({
-      {"DeleteSchedulerTest::DynamicRateLimiting1:1",
-       "DeleteScheduler::BackgroundEmptyTrash"},
-  });
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  rate_bytes_per_sec_ = 0;  // Disable rate limiting initially
-  NewDeleteScheduler();
-
-
-  int num_files = 10;  // 10 files
-  uint64_t file_size = 1024;  // every file is 1 kb
-
-  std::vector<int64_t> delete_kbs_per_sec = {512, 200, 0, 100, 50, -2, 25};
-  for (size_t t = 0; t < delete_kbs_per_sec.size(); t++) {
-    penalties.clear();
-    bg_delete_file = 0;
-    fg_delete_file = 0;
-    rocksdb::SyncPoint::GetInstance()->ClearTrace();
-    rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-    DestroyAndCreateDir(dummy_files_dir_);
-    rate_bytes_per_sec_ = delete_kbs_per_sec[t] * 1024;
-    delete_scheduler_->SetRateBytesPerSecond(rate_bytes_per_sec_);
-
-    // Create 100 dummy files, every file is 1 Kb
-    std::vector<std::string> generated_files;
-    for (int i = 0; i < num_files; i++) {
-      std::string file_name = "file" + ToString(i) + ".data";
-      generated_files.push_back(NewDummyFile(file_name, file_size));
-    }
-
-    // Delete dummy files and measure time spent to empty trash
-    for (int i = 0; i < num_files; i++) {
-      ASSERT_OK(delete_scheduler_->DeleteFile(generated_files[i]));
-    }
-    ASSERT_EQ(CountFilesInDir(dummy_files_dir_), 0);
-
-    if (rate_bytes_per_sec_ > 0) {
-      uint64_t delete_start_time = env_->NowMicros();
-      TEST_SYNC_POINT("DeleteSchedulerTest::DynamicRateLimiting1:1");
-      delete_scheduler_->WaitForEmptyTrash();
-      uint64_t time_spent_deleting = env_->NowMicros() - delete_start_time;
-
-      auto bg_errors = delete_scheduler_->GetBackgroundErrors();
-      ASSERT_EQ(bg_errors.size(), 0);
-
-      uint64_t total_files_size = 0;
-      uint64_t expected_penlty = 0;
-      ASSERT_EQ(penalties.size(), num_files);
-      for (int i = 0; i < num_files; i++) {
-        total_files_size += file_size;
-        expected_penlty = ((total_files_size * 1000000) / rate_bytes_per_sec_);
-        ASSERT_EQ(expected_penlty, penalties[i]);
-      }
-      ASSERT_GT(time_spent_deleting, expected_penlty * 0.9);
-      ASSERT_EQ(bg_delete_file, num_files);
-      ASSERT_EQ(fg_delete_file, 0);
-    } else {
-      ASSERT_EQ(penalties.size(), 0);
-      ASSERT_EQ(bg_delete_file, 0);
-      ASSERT_EQ(fg_delete_file, num_files);
-    }
-
-    ASSERT_EQ(CountFilesInDir(trash_dir_), 0);
-    rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-  }
-}
-
-TEST_F(DeleteSchedulerTest, ImmediateDeleteOn25PercDBSize) {
-  int bg_delete_file = 0;
-  int fg_delete_file = 0;
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "DeleteScheduler::DeleteTrashFile:DeleteFile",
-      [&](void* arg) { bg_delete_file++; });
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "DeleteScheduler::DeleteFile", [&](void* arg) { fg_delete_file++; });
-
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  int num_files = 100;  // 100 files
-  uint64_t file_size = 1024 * 10; // 100 KB as a file size
-  rate_bytes_per_sec_ = 1;  // 1 byte per sec (very slow trash delete)
-
-  NewDeleteScheduler();
-  delete_scheduler_->TEST_SetMaxTrashDBRatio(0.25);
-
-  std::vector<std::string> generated_files;
-  for (int i = 0; i < num_files; i++) {
-    std::string file_name = "file" + ToString(i) + ".data";
-    generated_files.push_back(NewDummyFile(file_name, file_size));
-  }
-
-  for (std::string& file_name : generated_files) {
-    delete_scheduler_->DeleteFile(file_name);
-  }
-
-  // When we end up with 26 files in trash we will start
-  // deleting new files immediately
-  ASSERT_EQ(fg_delete_file, 74);
-
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-}
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
-
-#else
-int main(int argc, char** argv) {
-  printf("DeleteScheduler is not supported in ROCKSDB_LITE\n");
-  return 0;
-}
-#endif  // ROCKSDB_LITE

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/d3a13a49/thirdparty/rocksdb/util/dynamic_bloom_test.cc
----------------------------------------------------------------------
diff --git a/thirdparty/rocksdb/util/dynamic_bloom_test.cc 
b/thirdparty/rocksdb/util/dynamic_bloom_test.cc
deleted file mode 100644
index f50036b..0000000
--- a/thirdparty/rocksdb/util/dynamic_bloom_test.cc
+++ /dev/null
@@ -1,340 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef GFLAGS
-#include <cstdio>
-int main() {
-  fprintf(stderr, "Please install gflags to run this test... Skipping...\n");
-  return 0;
-}
-#else
-
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS
-#endif
-
-#include <gflags/gflags.h>
-#include <inttypes.h>
-#include <algorithm>
-#include <atomic>
-#include <functional>
-#include <memory>
-#include <thread>
-#include <vector>
-
-#include "dynamic_bloom.h"
-#include "port/port.h"
-#include "util/arena.h"
-#include "util/logging.h"
-#include "util/testharness.h"
-#include "util/testutil.h"
-#include "util/stop_watch.h"
-
-using GFLAGS::ParseCommandLineFlags;
-
-DEFINE_int32(bits_per_key, 10, "");
-DEFINE_int32(num_probes, 6, "");
-DEFINE_bool(enable_perf, false, "");
-
-namespace rocksdb {
-
-static Slice Key(uint64_t i, char* buffer) {
-  memcpy(buffer, &i, sizeof(i));
-  return Slice(buffer, sizeof(i));
-}
-
-class DynamicBloomTest : public testing::Test {};
-
-TEST_F(DynamicBloomTest, EmptyFilter) {
-  Arena arena;
-  DynamicBloom bloom1(&arena, 100, 0, 2);
-  ASSERT_TRUE(!bloom1.MayContain("hello"));
-  ASSERT_TRUE(!bloom1.MayContain("world"));
-
-  DynamicBloom bloom2(&arena, CACHE_LINE_SIZE * 8 * 2 - 1, 1, 2);
-  ASSERT_TRUE(!bloom2.MayContain("hello"));
-  ASSERT_TRUE(!bloom2.MayContain("world"));
-}
-
-TEST_F(DynamicBloomTest, Small) {
-  Arena arena;
-  DynamicBloom bloom1(&arena, 100, 0, 2);
-  bloom1.Add("hello");
-  bloom1.Add("world");
-  ASSERT_TRUE(bloom1.MayContain("hello"));
-  ASSERT_TRUE(bloom1.MayContain("world"));
-  ASSERT_TRUE(!bloom1.MayContain("x"));
-  ASSERT_TRUE(!bloom1.MayContain("foo"));
-
-  DynamicBloom bloom2(&arena, CACHE_LINE_SIZE * 8 * 2 - 1, 1, 2);
-  bloom2.Add("hello");
-  bloom2.Add("world");
-  ASSERT_TRUE(bloom2.MayContain("hello"));
-  ASSERT_TRUE(bloom2.MayContain("world"));
-  ASSERT_TRUE(!bloom2.MayContain("x"));
-  ASSERT_TRUE(!bloom2.MayContain("foo"));
-}
-
-TEST_F(DynamicBloomTest, SmallConcurrentAdd) {
-  Arena arena;
-  DynamicBloom bloom1(&arena, 100, 0, 2);
-  bloom1.AddConcurrently("hello");
-  bloom1.AddConcurrently("world");
-  ASSERT_TRUE(bloom1.MayContain("hello"));
-  ASSERT_TRUE(bloom1.MayContain("world"));
-  ASSERT_TRUE(!bloom1.MayContain("x"));
-  ASSERT_TRUE(!bloom1.MayContain("foo"));
-
-  DynamicBloom bloom2(&arena, CACHE_LINE_SIZE * 8 * 2 - 1, 1, 2);
-  bloom2.AddConcurrently("hello");
-  bloom2.AddConcurrently("world");
-  ASSERT_TRUE(bloom2.MayContain("hello"));
-  ASSERT_TRUE(bloom2.MayContain("world"));
-  ASSERT_TRUE(!bloom2.MayContain("x"));
-  ASSERT_TRUE(!bloom2.MayContain("foo"));
-}
-
-static uint32_t NextNum(uint32_t num) {
-  if (num < 10) {
-    num += 1;
-  } else if (num < 100) {
-    num += 10;
-  } else if (num < 1000) {
-    num += 100;
-  } else {
-    num += 1000;
-  }
-  return num;
-}
-
-TEST_F(DynamicBloomTest, VaryingLengths) {
-  char buffer[sizeof(uint64_t)];
-
-  // Count number of filters that significantly exceed the false positive rate
-  int mediocre_filters = 0;
-  int good_filters = 0;
-  uint32_t num_probes = static_cast<uint32_t>(FLAGS_num_probes);
-
-  fprintf(stderr, "bits_per_key: %d  num_probes: %d\n", FLAGS_bits_per_key,
-          num_probes);
-
-  for (uint32_t enable_locality = 0; enable_locality < 2; ++enable_locality) {
-    for (uint32_t num = 1; num <= 10000; num = NextNum(num)) {
-      uint32_t bloom_bits = 0;
-      Arena arena;
-      if (enable_locality == 0) {
-        bloom_bits = std::max(num * FLAGS_bits_per_key, 64U);
-      } else {
-        bloom_bits = std::max(num * FLAGS_bits_per_key,
-                              enable_locality * CACHE_LINE_SIZE * 8);
-      }
-      DynamicBloom bloom(&arena, bloom_bits, enable_locality, num_probes);
-      for (uint64_t i = 0; i < num; i++) {
-        bloom.Add(Key(i, buffer));
-        ASSERT_TRUE(bloom.MayContain(Key(i, buffer)));
-      }
-
-      // All added keys must match
-      for (uint64_t i = 0; i < num; i++) {
-        ASSERT_TRUE(bloom.MayContain(Key(i, buffer))) << "Num " << num
-                                                      << "; key " << i;
-      }
-
-      // Check false positive rate
-
-      int result = 0;
-      for (uint64_t i = 0; i < 10000; i++) {
-        if (bloom.MayContain(Key(i + 1000000000, buffer))) {
-          result++;
-        }
-      }
-      double rate = result / 10000.0;
-
-      fprintf(stderr,
-              "False positives: %5.2f%% @ num = %6u, bloom_bits = %6u, "
-              "enable locality?%u\n",
-              rate * 100.0, num, bloom_bits, enable_locality);
-
-      if (rate > 0.0125)
-        mediocre_filters++;  // Allowed, but not too often
-      else
-        good_filters++;
-    }
-
-    fprintf(stderr, "Filters: %d good, %d mediocre\n", good_filters,
-            mediocre_filters);
-    ASSERT_LE(mediocre_filters, good_filters / 5);
-  }
-}
-
-TEST_F(DynamicBloomTest, perf) {
-  StopWatchNano timer(Env::Default());
-  uint32_t num_probes = static_cast<uint32_t>(FLAGS_num_probes);
-
-  if (!FLAGS_enable_perf) {
-    return;
-  }
-
-  for (uint32_t m = 1; m <= 8; ++m) {
-    Arena arena;
-    const uint32_t num_keys = m * 8 * 1024 * 1024;
-    fprintf(stderr, "testing %" PRIu32 "M keys\n", m * 8);
-
-    DynamicBloom std_bloom(&arena, num_keys * 10, 0, num_probes);
-
-    timer.Start();
-    for (uint64_t i = 1; i <= num_keys; ++i) {
-      std_bloom.Add(Slice(reinterpret_cast<const char*>(&i), 8));
-    }
-
-    uint64_t elapsed = timer.ElapsedNanos();
-    fprintf(stderr, "standard bloom, avg add latency %" PRIu64 "\n",
-            elapsed / num_keys);
-
-    uint32_t count = 0;
-    timer.Start();
-    for (uint64_t i = 1; i <= num_keys; ++i) {
-      if (std_bloom.MayContain(Slice(reinterpret_cast<const char*>(&i), 8))) {
-        ++count;
-      }
-    }
-    ASSERT_EQ(count, num_keys);
-    elapsed = timer.ElapsedNanos();
-    assert(count > 0);
-    fprintf(stderr, "standard bloom, avg query latency %" PRIu64 "\n",
-            elapsed / count);
-
-    // Locality enabled version
-    DynamicBloom blocked_bloom(&arena, num_keys * 10, 1, num_probes);
-
-    timer.Start();
-    for (uint64_t i = 1; i <= num_keys; ++i) {
-      blocked_bloom.Add(Slice(reinterpret_cast<const char*>(&i), 8));
-    }
-
-    elapsed = timer.ElapsedNanos();
-    fprintf(stderr,
-            "blocked bloom(enable locality), avg add latency %" PRIu64 "\n",
-            elapsed / num_keys);
-
-    count = 0;
-    timer.Start();
-    for (uint64_t i = 1; i <= num_keys; ++i) {
-      if (blocked_bloom.MayContain(
-              Slice(reinterpret_cast<const char*>(&i), 8))) {
-        ++count;
-      }
-    }
-
-    elapsed = timer.ElapsedNanos();
-    assert(count > 0);
-    fprintf(stderr,
-            "blocked bloom(enable locality), avg query latency %" PRIu64 "\n",
-            elapsed / count);
-    ASSERT_TRUE(count == num_keys);
-  }
-}
-
-TEST_F(DynamicBloomTest, concurrent_with_perf) {
-  StopWatchNano timer(Env::Default());
-  uint32_t num_probes = static_cast<uint32_t>(FLAGS_num_probes);
-
-  uint32_t m_limit = FLAGS_enable_perf ? 8 : 1;
-  uint32_t locality_limit = FLAGS_enable_perf ? 1 : 0;
-
-  uint32_t num_threads = 4;
-  std::vector<port::Thread> threads;
-
-  for (uint32_t m = 1; m <= m_limit; ++m) {
-    for (uint32_t locality = 0; locality <= locality_limit; ++locality) {
-      Arena arena;
-      const uint32_t num_keys = m * 8 * 1024 * 1024;
-      fprintf(stderr, "testing %" PRIu32 "M keys with %" PRIu32 " locality\n",
-              m * 8, locality);
-
-      DynamicBloom std_bloom(&arena, num_keys * 10, locality, num_probes);
-
-      timer.Start();
-
-      std::function<void(size_t)> adder = [&](size_t t) {
-        for (uint64_t i = 1 + t; i <= num_keys; i += num_threads) {
-          std_bloom.AddConcurrently(
-              Slice(reinterpret_cast<const char*>(&i), 8));
-        }
-      };
-      for (size_t t = 0; t < num_threads; ++t) {
-        threads.emplace_back(adder, t);
-      }
-      while (threads.size() > 0) {
-        threads.back().join();
-        threads.pop_back();
-      }
-
-      uint64_t elapsed = timer.ElapsedNanos();
-      fprintf(stderr, "standard bloom, avg parallel add latency %" PRIu64
-                      " nanos/key\n",
-              elapsed / num_keys);
-
-      timer.Start();
-
-      std::function<void(size_t)> hitter = [&](size_t t) {
-        for (uint64_t i = 1 + t; i <= num_keys; i += num_threads) {
-          bool f =
-              std_bloom.MayContain(Slice(reinterpret_cast<const char*>(&i), 
8));
-          ASSERT_TRUE(f);
-        }
-      };
-      for (size_t t = 0; t < num_threads; ++t) {
-        threads.emplace_back(hitter, t);
-      }
-      while (threads.size() > 0) {
-        threads.back().join();
-        threads.pop_back();
-      }
-
-      elapsed = timer.ElapsedNanos();
-      fprintf(stderr, "standard bloom, avg parallel hit latency %" PRIu64
-                      " nanos/key\n",
-              elapsed / num_keys);
-
-      timer.Start();
-
-      std::atomic<uint32_t> false_positives(0);
-      std::function<void(size_t)> misser = [&](size_t t) {
-        for (uint64_t i = num_keys + 1 + t; i <= 2 * num_keys;
-             i += num_threads) {
-          bool f =
-              std_bloom.MayContain(Slice(reinterpret_cast<const char*>(&i), 
8));
-          if (f) {
-            ++false_positives;
-          }
-        }
-      };
-      for (size_t t = 0; t < num_threads; ++t) {
-        threads.emplace_back(misser, t);
-      }
-      while (threads.size() > 0) {
-        threads.back().join();
-        threads.pop_back();
-      }
-
-      elapsed = timer.ElapsedNanos();
-      fprintf(stderr, "standard bloom, avg parallel miss latency %" PRIu64
-                      " nanos/key, %f%% false positive rate\n",
-              elapsed / num_keys, false_positives.load() * 100.0 / num_keys);
-    }
-  }
-}
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  ParseCommandLineFlags(&argc, &argv, true);
-
-  return RUN_ALL_TESTS();
-}
-
-#endif  // GFLAGS

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/d3a13a49/thirdparty/rocksdb/util/event_logger_test.cc
----------------------------------------------------------------------
diff --git a/thirdparty/rocksdb/util/event_logger_test.cc 
b/thirdparty/rocksdb/util/event_logger_test.cc
deleted file mode 100644
index 13b6394..0000000
--- a/thirdparty/rocksdb/util/event_logger_test.cc
+++ /dev/null
@@ -1,43 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include <string>
-
-#include "util/event_logger.h"
-#include "util/testharness.h"
-
-namespace rocksdb {
-
-class EventLoggerTest : public testing::Test {};
-
-class StringLogger : public Logger {
- public:
-  using Logger::Logv;
-  virtual void Logv(const char* format, va_list ap) override {
-    vsnprintf(buffer_, sizeof(buffer_), format, ap);
-  }
-  char* buffer() { return buffer_; }
-
- private:
-  char buffer_[1000];
-};
-
-TEST_F(EventLoggerTest, SimpleTest) {
-  StringLogger logger;
-  EventLogger event_logger(&logger);
-  event_logger.Log() << "id" << 5 << "event"
-                     << "just_testing";
-  std::string output(logger.buffer());
-  ASSERT_TRUE(output.find("\"event\": \"just_testing\"") != std::string::npos);
-  ASSERT_TRUE(output.find("\"id\": 5") != std::string::npos);
-  ASSERT_TRUE(output.find("\"time_micros\"") != std::string::npos);
-}
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}

Reply via email to