http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/d3a13a49/thirdparty/rocksdb/utilities/document/document_db_test.cc
----------------------------------------------------------------------
diff --git a/thirdparty/rocksdb/utilities/document/document_db_test.cc 
b/thirdparty/rocksdb/utilities/document/document_db_test.cc
deleted file mode 100644
index e8f4138..0000000
--- a/thirdparty/rocksdb/utilities/document/document_db_test.cc
+++ /dev/null
@@ -1,336 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef ROCKSDB_LITE
-
-#include <algorithm>
-
-#include "rocksdb/utilities/json_document.h"
-#include "rocksdb/utilities/document_db.h"
-
-#include "util/testharness.h"
-#include "util/testutil.h"
-
-namespace rocksdb {
-
-class DocumentDBTest : public testing::Test {
- public:
-  DocumentDBTest() {
-    dbname_ = test::TmpDir() + "/document_db_test";
-    DestroyDB(dbname_, Options());
-  }
-  ~DocumentDBTest() {
-    delete db_;
-    DestroyDB(dbname_, Options());
-  }
-
-  void AssertCursorIDs(Cursor* cursor, std::vector<int64_t> expected) {
-    std::vector<int64_t> got;
-    while (cursor->Valid()) {
-      ASSERT_TRUE(cursor->Valid());
-      ASSERT_TRUE(cursor->document().Contains("_id"));
-      got.push_back(cursor->document()["_id"].GetInt64());
-      cursor->Next();
-    }
-    std::sort(expected.begin(), expected.end());
-    std::sort(got.begin(), got.end());
-    ASSERT_TRUE(got == expected);
-  }
-
-  // converts ' to ", so that we don't have to escape " all over the place
-  std::string ConvertQuotes(const std::string& input) {
-    std::string output;
-    for (auto x : input) {
-      if (x == '\'') {
-        output.push_back('\"');
-      } else {
-        output.push_back(x);
-      }
-    }
-    return output;
-  }
-
-  void CreateIndexes(std::vector<DocumentDB::IndexDescriptor> indexes) {
-    for (auto i : indexes) {
-      ASSERT_OK(db_->CreateIndex(WriteOptions(), i));
-    }
-  }
-
-  JSONDocument* Parse(const std::string& doc) {
-    return JSONDocument::ParseJSON(ConvertQuotes(doc).c_str());
-  }
-
-  std::string dbname_;
-  DocumentDB* db_;
-};
-
-TEST_F(DocumentDBTest, SimpleQueryTest) {
-  DocumentDBOptions options;
-  DocumentDB::IndexDescriptor index;
-  index.description = Parse("{\"name\": 1}");
-  index.name = "name_index";
-
-  ASSERT_OK(DocumentDB::Open(options, dbname_, {}, &db_));
-  CreateIndexes({index});
-  delete db_;
-  // now there is index present
-  ASSERT_OK(DocumentDB::Open(options, dbname_, {index}, &db_));
-  delete index.description;
-
-  std::vector<std::string> json_objects = {
-      "{\"_id\': 1, \"name\": \"One\"}",   "{\"_id\": 2, \"name\": \"Two\"}",
-      "{\"_id\": 3, \"name\": \"Three\"}", "{\"_id\": 4, \"name\": \"Four\"}"};
-
-  for (auto& json : json_objects) {
-    std::unique_ptr<JSONDocument> document(Parse(json));
-    ASSERT_TRUE(document.get() != nullptr);
-    ASSERT_OK(db_->Insert(WriteOptions(), *document));
-  }
-
-  // inserting a document with existing primary key should return failure
-  {
-    std::unique_ptr<JSONDocument> document(Parse(json_objects[0]));
-    ASSERT_TRUE(document.get() != nullptr);
-    Status s = db_->Insert(WriteOptions(), *document);
-    ASSERT_TRUE(s.IsInvalidArgument());
-  }
-
-  // find equal to "Two"
-  {
-    std::unique_ptr<JSONDocument> query(
-        Parse("[{'$filter': {'name': 'Two', '$index': 'name_index'}}]"));
-    std::unique_ptr<Cursor> cursor(db_->Query(ReadOptions(), *query));
-    AssertCursorIDs(cursor.get(), {2});
-  }
-
-  // find less than "Three"
-  {
-    std::unique_ptr<JSONDocument> query(Parse(
-        "[{'$filter': {'name': {'$lt': 'Three'}, '$index': "
-        "'name_index'}}]"));
-    std::unique_ptr<Cursor> cursor(db_->Query(ReadOptions(), *query));
-
-    AssertCursorIDs(cursor.get(), {1, 4});
-  }
-
-  // find less than "Three" without index
-  {
-    std::unique_ptr<JSONDocument> query(
-        Parse("[{'$filter': {'name': {'$lt': 'Three'} }}]"));
-    std::unique_ptr<Cursor> cursor(db_->Query(ReadOptions(), *query));
-    AssertCursorIDs(cursor.get(), {1, 4});
-  }
-
-  // remove less or equal to "Three"
-  {
-    std::unique_ptr<JSONDocument> query(
-        Parse("{'name': {'$lte': 'Three'}, '$index': 'name_index'}"));
-    ASSERT_OK(db_->Remove(ReadOptions(), WriteOptions(), *query));
-  }
-
-  // find all -- only "Two" left, everything else should be deleted
-  {
-    std::unique_ptr<JSONDocument> query(Parse("[]"));
-    std::unique_ptr<Cursor> cursor(db_->Query(ReadOptions(), *query));
-    AssertCursorIDs(cursor.get(), {2});
-  }
-}
-
-TEST_F(DocumentDBTest, ComplexQueryTest) {
-  DocumentDBOptions options;
-  DocumentDB::IndexDescriptor priority_index;
-  priority_index.description = Parse("{'priority': 1}");
-  priority_index.name = "priority";
-  DocumentDB::IndexDescriptor job_name_index;
-  job_name_index.description = Parse("{'job_name': 1}");
-  job_name_index.name = "job_name";
-  DocumentDB::IndexDescriptor progress_index;
-  progress_index.description = Parse("{'progress': 1}");
-  progress_index.name = "progress";
-
-  ASSERT_OK(DocumentDB::Open(options, dbname_, {}, &db_));
-  CreateIndexes({priority_index, progress_index});
-  delete priority_index.description;
-  delete progress_index.description;
-
-  std::vector<std::string> json_objects = {
-      "{'_id': 1, 'job_name': 'play', 'priority': 10, 'progress': 14.2}",
-      "{'_id': 2, 'job_name': 'white', 'priority': 2, 'progress': 45.1}",
-      "{'_id': 3, 'job_name': 'straw', 'priority': 5, 'progress': 83.2}",
-      "{'_id': 4, 'job_name': 'temporary', 'priority': 3, 'progress': 14.9}",
-      "{'_id': 5, 'job_name': 'white', 'priority': 4, 'progress': 44.2}",
-      "{'_id': 6, 'job_name': 'tea', 'priority': 1, 'progress': 12.4}",
-      "{'_id': 7, 'job_name': 'delete', 'priority': 2, 'progress': 77.54}",
-      "{'_id': 8, 'job_name': 'rock', 'priority': 3, 'progress': 93.24}",
-      "{'_id': 9, 'job_name': 'steady', 'priority': 3, 'progress': 9.1}",
-      "{'_id': 10, 'job_name': 'white', 'priority': 1, 'progress': 61.4}",
-      "{'_id': 11, 'job_name': 'who', 'priority': 4, 'progress': 39.41}",
-      "{'_id': 12, 'job_name': 'who', 'priority': -1, 'progress': 39.42}",
-      "{'_id': 13, 'job_name': 'who', 'priority': -2, 'progress': 39.42}", };
-
-  // add index on the fly!
-  CreateIndexes({job_name_index});
-  delete job_name_index.description;
-
-  for (auto& json : json_objects) {
-    std::unique_ptr<JSONDocument> document(Parse(json));
-    ASSERT_TRUE(document != nullptr);
-    ASSERT_OK(db_->Insert(WriteOptions(), *document));
-  }
-
-  // 2 < priority < 4 AND progress > 10.0, index priority
-  {
-    std::unique_ptr<JSONDocument> query(Parse(
-        "[{'$filter': {'priority': {'$lt': 4, '$gt': 2}, 'progress': {'$gt': "
-        "10.0}, '$index': 'priority'}}]"));
-    std::unique_ptr<Cursor> cursor(db_->Query(ReadOptions(), *query));
-    AssertCursorIDs(cursor.get(), {4, 8});
-  }
-
-  // -1 <= priority <= 1, index priority
-  {
-    std::unique_ptr<JSONDocument> query(Parse(
-        "[{'$filter': {'priority': {'$lte': 1, '$gte': -1},"
-        " '$index': 'priority'}}]"));
-    std::unique_ptr<Cursor> cursor(db_->Query(ReadOptions(), *query));
-    AssertCursorIDs(cursor.get(), {6, 10, 12});
-  }
-
-  // 2 < priority < 4 AND progress > 10.0, index progress
-  {
-    std::unique_ptr<JSONDocument> query(Parse(
-        "[{'$filter': {'priority': {'$lt': 4, '$gt': 2}, 'progress': {'$gt': "
-        "10.0}, '$index': 'progress'}}]"));
-    std::unique_ptr<Cursor> cursor(db_->Query(ReadOptions(), *query));
-    AssertCursorIDs(cursor.get(), {4, 8});
-  }
-
-  // job_name == 'white' AND priority >= 2, index job_name
-  {
-    std::unique_ptr<JSONDocument> query(Parse(
-        "[{'$filter': {'job_name': 'white', 'priority': {'$gte': "
-        "2}, '$index': 'job_name'}}]"));
-    std::unique_ptr<Cursor> cursor(db_->Query(ReadOptions(), *query));
-    AssertCursorIDs(cursor.get(), {2, 5});
-  }
-
-  // 35.0 <= progress < 65.5, index progress
-  {
-    std::unique_ptr<JSONDocument> query(Parse(
-        "[{'$filter': {'progress': {'$gt': 5.0, '$gte': 35.0, '$lt': 65.5}, "
-        "'$index': 'progress'}}]"));
-    std::unique_ptr<Cursor> cursor(db_->Query(ReadOptions(), *query));
-    AssertCursorIDs(cursor.get(), {2, 5, 10, 11, 12, 13});
-  }
-
-  // 2 < priority <= 4, index priority
-  {
-    std::unique_ptr<JSONDocument> query(Parse(
-        "[{'$filter': {'priority': {'$gt': 2, '$lt': 8, '$lte': 4}, "
-        "'$index': 'priority'}}]"));
-    std::unique_ptr<Cursor> cursor(db_->Query(ReadOptions(), *query));
-    AssertCursorIDs(cursor.get(), {4, 5, 8, 9, 11});
-  }
-
-  // Delete all whose progress is bigger than 50%
-  {
-    std::unique_ptr<JSONDocument> query(
-        Parse("{'progress': {'$gt': 50.0}, '$index': 'progress'}"));
-    ASSERT_OK(db_->Remove(ReadOptions(), WriteOptions(), *query));
-  }
-
-  // 2 < priority < 6, index priority
-  {
-    std::unique_ptr<JSONDocument> query(Parse(
-        "[{'$filter': {'priority': {'$gt': 2, '$lt': 6}, "
-        "'$index': 'priority'}}]"));
-    std::unique_ptr<Cursor> cursor(db_->Query(ReadOptions(), *query));
-    AssertCursorIDs(cursor.get(), {4, 5, 9, 11});
-  }
-
-  // update set priority to 10 where job_name is 'white'
-  {
-    std::unique_ptr<JSONDocument> query(Parse("{'job_name': 'white'}"));
-    std::unique_ptr<JSONDocument> update(Parse("{'$set': {'priority': 10}}"));
-    ASSERT_OK(db_->Update(ReadOptions(), WriteOptions(), *query, *update));
-  }
-
-  // update twice: set priority to 15 where job_name is 'white'
-  {
-    std::unique_ptr<JSONDocument> query(Parse("{'job_name': 'white'}"));
-    std::unique_ptr<JSONDocument> update(Parse("{'$set': {'priority': 10},"
-                                               "'$set': {'priority': 15}}"));
-    ASSERT_OK(db_->Update(ReadOptions(), WriteOptions(), *query, *update));
-  }
-
-  // update twice: set priority to 15 and
-  // progress to 40 where job_name is 'white'
-  {
-    std::unique_ptr<JSONDocument> query(Parse("{'job_name': 'white'}"));
-    std::unique_ptr<JSONDocument> update(
-        Parse("{'$set': {'priority': 10, 'progress': 35},"
-              "'$set': {'priority': 15, 'progress': 40}}"));
-    ASSERT_OK(db_->Update(ReadOptions(), WriteOptions(), *query, *update));
-  }
-
-  // priority < 0
-  {
-    std::unique_ptr<JSONDocument> query(
-        Parse("[{'$filter': {'priority': {'$lt': 0}, '$index': 
'priority'}}]"));
-    std::unique_ptr<Cursor> cursor(db_->Query(ReadOptions(), *query));
-    ASSERT_OK(cursor->status());
-    AssertCursorIDs(cursor.get(), {12, 13});
-  }
-
-  // -2 < priority < 0
-  {
-    std::unique_ptr<JSONDocument> query(
-        Parse("[{'$filter': {'priority': {'$gt': -2, '$lt': 0},"
-        " '$index': 'priority'}}]"));
-    std::unique_ptr<Cursor> cursor(db_->Query(ReadOptions(), *query));
-    ASSERT_OK(cursor->status());
-    AssertCursorIDs(cursor.get(), {12});
-  }
-
-  // -2 <= priority < 0
-  {
-    std::unique_ptr<JSONDocument> query(
-        Parse("[{'$filter': {'priority': {'$gte': -2, '$lt': 0},"
-        " '$index': 'priority'}}]"));
-    std::unique_ptr<Cursor> cursor(db_->Query(ReadOptions(), *query));
-    ASSERT_OK(cursor->status());
-    AssertCursorIDs(cursor.get(), {12, 13});
-  }
-
-  // 4 < priority
-  {
-    std::unique_ptr<JSONDocument> query(
-        Parse("[{'$filter': {'priority': {'$gt': 4}, '$index': 
'priority'}}]"));
-    std::unique_ptr<Cursor> cursor(db_->Query(ReadOptions(), *query));
-    ASSERT_OK(cursor->status());
-    AssertCursorIDs(cursor.get(), {1, 2, 5});
-  }
-
-  Status s = db_->DropIndex("doesnt-exist");
-  ASSERT_TRUE(!s.ok());
-  ASSERT_OK(db_->DropIndex("priority"));
-}
-
-}  //  namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
-
-#else
-#include <stdio.h>
-
-int main(int argc, char** argv) {
-  fprintf(stderr, "SKIPPED as DocumentDB is not supported in ROCKSDB_LITE\n");
-  return 0;
-}
-
-#endif  // !ROCKSDB_LITE

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/d3a13a49/thirdparty/rocksdb/utilities/document/json_document_test.cc
----------------------------------------------------------------------
diff --git a/thirdparty/rocksdb/utilities/document/json_document_test.cc 
b/thirdparty/rocksdb/utilities/document/json_document_test.cc
deleted file mode 100644
index c7bfb39..0000000
--- a/thirdparty/rocksdb/utilities/document/json_document_test.cc
+++ /dev/null
@@ -1,341 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef ROCKSDB_LITE
-
-#include <map>
-#include <set>
-#include <string>
-
-#include "rocksdb/utilities/json_document.h"
-
-#include "util/testutil.h"
-#include "util/testharness.h"
-
-namespace rocksdb {
-namespace {
-void AssertField(const JSONDocument& json, const std::string& field) {
-  ASSERT_TRUE(json.Contains(field));
-  ASSERT_TRUE(json[field].IsNull());
-}
-
-void AssertField(const JSONDocument& json, const std::string& field,
-                 const std::string& expected) {
-  ASSERT_TRUE(json.Contains(field));
-  ASSERT_TRUE(json[field].IsString());
-  ASSERT_EQ(expected, json[field].GetString());
-}
-
-void AssertField(const JSONDocument& json, const std::string& field,
-                 int64_t expected) {
-  ASSERT_TRUE(json.Contains(field));
-  ASSERT_TRUE(json[field].IsInt64());
-  ASSERT_EQ(expected, json[field].GetInt64());
-}
-
-void AssertField(const JSONDocument& json, const std::string& field,
-                 bool expected) {
-  ASSERT_TRUE(json.Contains(field));
-  ASSERT_TRUE(json[field].IsBool());
-  ASSERT_EQ(expected, json[field].GetBool());
-}
-
-void AssertField(const JSONDocument& json, const std::string& field,
-                 double expected) {
-  ASSERT_TRUE(json.Contains(field));
-  ASSERT_TRUE(json[field].IsDouble());
-  ASSERT_DOUBLE_EQ(expected, json[field].GetDouble());
-}
-}  // namespace
-
-class JSONDocumentTest : public testing::Test {
- public:
-  JSONDocumentTest()
-  : rnd_(101)
-  {}
-
-  void AssertSampleJSON(const JSONDocument& json) {
-    AssertField(json, "title", std::string("json"));
-    AssertField(json, "type", std::string("object"));
-    // properties
-    ASSERT_TRUE(json.Contains("properties"));
-    ASSERT_TRUE(json["properties"].Contains("flags"));
-    ASSERT_TRUE(json["properties"]["flags"].IsArray());
-    ASSERT_EQ(3u, json["properties"]["flags"].Count());
-    ASSERT_TRUE(json["properties"]["flags"][0].IsInt64());
-    ASSERT_EQ(10, json["properties"]["flags"][0].GetInt64());
-    ASSERT_TRUE(json["properties"]["flags"][1].IsString());
-    ASSERT_EQ("parse", json["properties"]["flags"][1].GetString());
-    ASSERT_TRUE(json["properties"]["flags"][2].IsObject());
-    AssertField(json["properties"]["flags"][2], "tag", std::string("no"));
-    AssertField(json["properties"]["flags"][2], std::string("status"));
-    AssertField(json["properties"], "age", 110.5e-4);
-    AssertField(json["properties"], "depth", static_cast<int64_t>(-10));
-    // test iteration
-    std::set<std::string> expected({"flags", "age", "depth"});
-    for (auto item : json["properties"].Items()) {
-      auto iter = expected.find(item.first);
-      ASSERT_TRUE(iter != expected.end());
-      expected.erase(iter);
-    }
-    ASSERT_EQ(0U, expected.size());
-    ASSERT_TRUE(json.Contains("latlong"));
-    ASSERT_TRUE(json["latlong"].IsArray());
-    ASSERT_EQ(2u, json["latlong"].Count());
-    ASSERT_TRUE(json["latlong"][0].IsDouble());
-    ASSERT_EQ(53.25, json["latlong"][0].GetDouble());
-    ASSERT_TRUE(json["latlong"][1].IsDouble());
-    ASSERT_EQ(43.75, json["latlong"][1].GetDouble());
-    AssertField(json, "enabled", true);
-  }
-
-  const std::string kSampleJSON =
-      "{ \"title\" : \"json\", \"type\" : \"object\", \"properties\" : { "
-      "\"flags\": [10, \"parse\", {\"tag\": \"no\", \"status\": null}], "
-      "\"age\": 110.5e-4, \"depth\": -10 }, \"latlong\": [53.25, 43.75], "
-      "\"enabled\": true }";
-
-  const std::string kSampleJSONDifferent =
-      "{ \"title\" : \"json\", \"type\" : \"object\", \"properties\" : { "
-      "\"flags\": [10, \"parse\", {\"tag\": \"no\", \"status\": 2}], "
-      "\"age\": 110.5e-4, \"depth\": -10 }, \"latlong\": [53.25, 43.75], "
-      "\"enabled\": true }";
-
-  Random rnd_;
-};
-
-TEST_F(JSONDocumentTest, MakeNullTest) {
-  JSONDocument x;
-  ASSERT_TRUE(x.IsNull());
-  ASSERT_TRUE(x.IsOwner());
-  ASSERT_TRUE(!x.IsBool());
-}
-
-TEST_F(JSONDocumentTest, MakeBoolTest) {
-  {
-    JSONDocument x(true);
-    ASSERT_TRUE(x.IsOwner());
-    ASSERT_TRUE(x.IsBool());
-    ASSERT_TRUE(!x.IsInt64());
-    ASSERT_EQ(x.GetBool(), true);
-  }
-
-  {
-    JSONDocument x(false);
-    ASSERT_TRUE(x.IsOwner());
-    ASSERT_TRUE(x.IsBool());
-    ASSERT_TRUE(!x.IsInt64());
-    ASSERT_EQ(x.GetBool(), false);
-  }
-}
-
-TEST_F(JSONDocumentTest, MakeInt64Test) {
-  JSONDocument x(static_cast<int64_t>(16));
-  ASSERT_TRUE(x.IsInt64());
-  ASSERT_TRUE(x.IsInt64());
-  ASSERT_TRUE(!x.IsBool());
-  ASSERT_TRUE(x.IsOwner());
-  ASSERT_EQ(x.GetInt64(), 16);
-}
-
-TEST_F(JSONDocumentTest, MakeStringTest) {
-  JSONDocument x("string");
-  ASSERT_TRUE(x.IsOwner());
-  ASSERT_TRUE(x.IsString());
-  ASSERT_TRUE(!x.IsBool());
-  ASSERT_EQ(x.GetString(), "string");
-}
-
-TEST_F(JSONDocumentTest, MakeDoubleTest) {
-  JSONDocument x(5.6);
-  ASSERT_TRUE(x.IsOwner());
-  ASSERT_TRUE(x.IsDouble());
-  ASSERT_TRUE(!x.IsBool());
-  ASSERT_EQ(x.GetDouble(), 5.6);
-}
-
-TEST_F(JSONDocumentTest, MakeByTypeTest) {
-  {
-    JSONDocument x(JSONDocument::kNull);
-    ASSERT_TRUE(x.IsNull());
-  }
-  {
-    JSONDocument x(JSONDocument::kBool);
-    ASSERT_TRUE(x.IsBool());
-  }
-  {
-    JSONDocument x(JSONDocument::kString);
-    ASSERT_TRUE(x.IsString());
-  }
-  {
-    JSONDocument x(JSONDocument::kInt64);
-    ASSERT_TRUE(x.IsInt64());
-  }
-  {
-    JSONDocument x(JSONDocument::kDouble);
-    ASSERT_TRUE(x.IsDouble());
-  }
-  {
-    JSONDocument x(JSONDocument::kObject);
-    ASSERT_TRUE(x.IsObject());
-  }
-  {
-    JSONDocument x(JSONDocument::kArray);
-    ASSERT_TRUE(x.IsArray());
-  }
-}
-
-TEST_F(JSONDocumentTest, Parsing) {
-  std::unique_ptr<JSONDocument> parsed_json(
-          JSONDocument::ParseJSON(kSampleJSON.c_str()));
-  ASSERT_TRUE(parsed_json->IsOwner());
-  ASSERT_TRUE(parsed_json != nullptr);
-  AssertSampleJSON(*parsed_json);
-
-  // test deep copying
-  JSONDocument copied_json_document(*parsed_json);
-  AssertSampleJSON(copied_json_document);
-  ASSERT_TRUE(copied_json_document == *parsed_json);
-
-  std::unique_ptr<JSONDocument> parsed_different_sample(
-      JSONDocument::ParseJSON(kSampleJSONDifferent.c_str()));
-  ASSERT_TRUE(parsed_different_sample != nullptr);
-  ASSERT_TRUE(!(*parsed_different_sample == copied_json_document));
-
-  // parse error
-  const std::string kFaultyJSON =
-      kSampleJSON.substr(0, kSampleJSON.size() - 10);
-  ASSERT_TRUE(JSONDocument::ParseJSON(kFaultyJSON.c_str()) == nullptr);
-}
-
-TEST_F(JSONDocumentTest, Serialization) {
-  std::unique_ptr<JSONDocument> parsed_json(
-            JSONDocument::ParseJSON(kSampleJSON.c_str()));
-  ASSERT_TRUE(parsed_json != nullptr);
-  ASSERT_TRUE(parsed_json->IsOwner());
-  std::string serialized;
-  parsed_json->Serialize(&serialized);
-
-  std::unique_ptr<JSONDocument> deserialized_json(
-            JSONDocument::Deserialize(Slice(serialized)));
-  ASSERT_TRUE(deserialized_json != nullptr);
-  AssertSampleJSON(*deserialized_json);
-
-  // deserialization failure
-  ASSERT_TRUE(JSONDocument::Deserialize(
-                  Slice(serialized.data(), serialized.size() - 10)) == 
nullptr);
-}
-
-TEST_F(JSONDocumentTest, OperatorEqualsTest) {
-  // kNull
-  ASSERT_TRUE(JSONDocument() == JSONDocument());
-
-  // kBool
-  ASSERT_TRUE(JSONDocument(false) != JSONDocument());
-  ASSERT_TRUE(JSONDocument(false) == JSONDocument(false));
-  ASSERT_TRUE(JSONDocument(true) == JSONDocument(true));
-  ASSERT_TRUE(JSONDocument(false) != JSONDocument(true));
-
-  // kString
-  ASSERT_TRUE(JSONDocument("test") != JSONDocument());
-  ASSERT_TRUE(JSONDocument("test") == JSONDocument("test"));
-
-  // kInt64
-  ASSERT_TRUE(JSONDocument(static_cast<int64_t>(15)) != JSONDocument());
-  ASSERT_TRUE(JSONDocument(static_cast<int64_t>(15)) !=
-              JSONDocument(static_cast<int64_t>(14)));
-  ASSERT_TRUE(JSONDocument(static_cast<int64_t>(15)) ==
-              JSONDocument(static_cast<int64_t>(15)));
-
-  unique_ptr<JSONDocument> arrayWithInt8Doc(JSONDocument::ParseJSON("[8]"));
-  ASSERT_TRUE(arrayWithInt8Doc != nullptr);
-  ASSERT_TRUE(arrayWithInt8Doc->IsArray());
-  ASSERT_TRUE((*arrayWithInt8Doc)[0].IsInt64());
-  ASSERT_TRUE((*arrayWithInt8Doc)[0] == JSONDocument(static_cast<int64_t>(8)));
-
-  unique_ptr<JSONDocument> arrayWithInt16Doc(JSONDocument::ParseJSON("[512]"));
-  ASSERT_TRUE(arrayWithInt16Doc != nullptr);
-  ASSERT_TRUE(arrayWithInt16Doc->IsArray());
-  ASSERT_TRUE((*arrayWithInt16Doc)[0].IsInt64());
-  ASSERT_TRUE((*arrayWithInt16Doc)[0] ==
-              JSONDocument(static_cast<int64_t>(512)));
-
-  unique_ptr<JSONDocument> arrayWithInt32Doc(
-    JSONDocument::ParseJSON("[1000000]"));
-  ASSERT_TRUE(arrayWithInt32Doc != nullptr);
-  ASSERT_TRUE(arrayWithInt32Doc->IsArray());
-  ASSERT_TRUE((*arrayWithInt32Doc)[0].IsInt64());
-  ASSERT_TRUE((*arrayWithInt32Doc)[0] ==
-               JSONDocument(static_cast<int64_t>(1000000)));
-
-  // kDouble
-  ASSERT_TRUE(JSONDocument(15.) != JSONDocument());
-  ASSERT_TRUE(JSONDocument(15.) != JSONDocument(14.));
-  ASSERT_TRUE(JSONDocument(15.) == JSONDocument(15.));
-}
-
-TEST_F(JSONDocumentTest, JSONDocumentBuilderTest) {
-  unique_ptr<JSONDocument> parsedArray(
-    JSONDocument::ParseJSON("[1, [123, \"a\", \"b\"], {\"b\":\"c\"}]"));
-  ASSERT_TRUE(parsedArray != nullptr);
-
-  JSONDocumentBuilder builder;
-  ASSERT_TRUE(builder.WriteStartArray());
-  ASSERT_TRUE(builder.WriteJSONDocument(1));
-
-  ASSERT_TRUE(builder.WriteStartArray());
-    ASSERT_TRUE(builder.WriteJSONDocument(123));
-    ASSERT_TRUE(builder.WriteJSONDocument("a"));
-    ASSERT_TRUE(builder.WriteJSONDocument("b"));
-  ASSERT_TRUE(builder.WriteEndArray());
-
-  ASSERT_TRUE(builder.WriteStartObject());
-    ASSERT_TRUE(builder.WriteKeyValue("b", "c"));
-  ASSERT_TRUE(builder.WriteEndObject());
-
-  ASSERT_TRUE(builder.WriteEndArray());
-
-  ASSERT_TRUE(*parsedArray == builder.GetJSONDocument());
-}
-
-TEST_F(JSONDocumentTest, OwnershipTest) {
-  std::unique_ptr<JSONDocument> parsed(
-          JSONDocument::ParseJSON(kSampleJSON.c_str()));
-  ASSERT_TRUE(parsed != nullptr);
-  ASSERT_TRUE(parsed->IsOwner());
-
-  // Copy constructor from owner -> owner
-  JSONDocument copy_constructor(*parsed);
-  ASSERT_TRUE(copy_constructor.IsOwner());
-
-  // Copy constructor from non-owner -> non-owner
-  JSONDocument non_owner((*parsed)["properties"]);
-  ASSERT_TRUE(!non_owner.IsOwner());
-
-  // Move constructor from owner -> owner
-  JSONDocument moved_from_owner(std::move(copy_constructor));
-  ASSERT_TRUE(moved_from_owner.IsOwner());
-
-  // Move constructor from non-owner -> non-owner
-  JSONDocument moved_from_non_owner(std::move(non_owner));
-  ASSERT_TRUE(!moved_from_non_owner.IsOwner());
-}
-
-}  //  namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
-
-#else
-#include <stdio.h>
-
-int main(int argc, char** argv) {
-  fprintf(stderr, "SKIPPED as JSONDocument is not supported in 
ROCKSDB_LITE\n");
-  return 0;
-}
-
-#endif  // !ROCKSDB_LITE

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/d3a13a49/thirdparty/rocksdb/utilities/env_librados_test.cc
----------------------------------------------------------------------
diff --git a/thirdparty/rocksdb/utilities/env_librados_test.cc 
b/thirdparty/rocksdb/utilities/env_librados_test.cc
deleted file mode 100644
index 7d9b252..0000000
--- a/thirdparty/rocksdb/utilities/env_librados_test.cc
+++ /dev/null
@@ -1,1146 +0,0 @@
-//  Copyright (c) 2016, Red Hat, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef ROCKSDB_LITE
-
-#include "rocksdb/utilities/env_librados.h"
-#include <rados/librados.hpp>
-#include "env/mock_env.h"
-#include "util/testharness.h"
-
-#include "rocksdb/db.h"
-#include "rocksdb/slice.h"
-#include "rocksdb/options.h"
-#include "util/random.h"
-#include <chrono>
-#include <ostream>
-#include "rocksdb/utilities/transaction_db.h"
-
-class Timer {
-  typedef std::chrono::high_resolution_clock high_resolution_clock;
-  typedef std::chrono::milliseconds milliseconds;
-public:
-  explicit Timer(bool run = false)
-  {
-    if (run)
-      Reset();
-  }
-  void Reset()
-  {
-    _start = high_resolution_clock::now();
-  }
-  milliseconds Elapsed() const
-  {
-    return 
std::chrono::duration_cast<milliseconds>(high_resolution_clock::now() - _start);
-  }
-  template <typename T, typename Traits>
-  friend std::basic_ostream<T, Traits>& operator<<(std::basic_ostream<T, 
Traits>& out, const Timer& timer)
-  {
-    return out << timer.Elapsed().count();
-  }
-private:
-  high_resolution_clock::time_point _start;
-};
-
-namespace rocksdb {
-
-class EnvLibradosTest : public testing::Test {
-public:
-  // we will use all of these below
-  const std::string db_name = "env_librados_test_db";
-  const std::string db_pool = db_name + "_pool";
-  const char *keyring = "admin";
-  const char *config = "../ceph/src/ceph.conf";
-
-  EnvLibrados* env_;
-  const EnvOptions soptions_;
-
-  EnvLibradosTest()
-    : env_(new EnvLibrados(db_name, config, db_pool)) {
-  }
-  ~EnvLibradosTest() {
-    delete env_;
-    librados::Rados rados;
-    int ret = 0;
-    do {
-      ret = rados.init("admin"); // just use the client.admin keyring
-      if (ret < 0) { // let's handle any error that might have come back
-        std::cerr << "couldn't initialize rados! error " << ret << std::endl;
-        ret = EXIT_FAILURE;
-        break;
-      }
-
-      ret = rados.conf_read_file(config);
-      if (ret < 0) {
-        // This could fail if the config file is malformed, but it'd be hard.
-        std::cerr << "failed to parse config file " << config
-                  << "! error" << ret << std::endl;
-        ret = EXIT_FAILURE;
-        break;
-      }
-
-      /*
-       * next, we actually connect to the cluster
-       */
-
-      ret = rados.connect();
-      if (ret < 0) {
-        std::cerr << "couldn't connect to cluster! error " << ret << std::endl;
-        ret = EXIT_FAILURE;
-        break;
-      }
-
-      /*
-       * And now we're done, so let's remove our pool and then
-       * shut down the connection gracefully.
-       */
-      int delete_ret = rados.pool_delete(db_pool.c_str());
-      if (delete_ret < 0) {
-        // be careful not to
-        std::cerr << "We failed to delete our test pool!" << db_pool << 
delete_ret << std::endl;
-        ret = EXIT_FAILURE;
-      }
-    } while (0);
-  }
-};
-
-TEST_F(EnvLibradosTest, Basics) {
-  uint64_t file_size;
-  unique_ptr<WritableFile> writable_file;
-  std::vector<std::string> children;
-
-  ASSERT_OK(env_->CreateDir("/dir"));
-  // Check that the directory is empty.
-  ASSERT_EQ(Status::NotFound(), env_->FileExists("/dir/non_existent"));
-  ASSERT_TRUE(!env_->GetFileSize("/dir/non_existent", &file_size).ok());
-  ASSERT_OK(env_->GetChildren("/dir", &children));
-  ASSERT_EQ(0U, children.size());
-
-  // Create a file.
-  ASSERT_OK(env_->NewWritableFile("/dir/f", &writable_file, soptions_));
-  writable_file.reset();
-
-  // Check that the file exists.
-  ASSERT_OK(env_->FileExists("/dir/f"));
-  ASSERT_OK(env_->GetFileSize("/dir/f", &file_size));
-  ASSERT_EQ(0U, file_size);
-  ASSERT_OK(env_->GetChildren("/dir", &children));
-  ASSERT_EQ(1U, children.size());
-  ASSERT_EQ("f", children[0]);
-
-  // Write to the file.
-  ASSERT_OK(env_->NewWritableFile("/dir/f", &writable_file, soptions_));
-  ASSERT_OK(writable_file->Append("abc"));
-  writable_file.reset();
-
-
-  // Check for expected size.
-  ASSERT_OK(env_->GetFileSize("/dir/f", &file_size));
-  ASSERT_EQ(3U, file_size);
-
-
-  // Check that renaming works.
-  ASSERT_TRUE(!env_->RenameFile("/dir/non_existent", "/dir/g").ok());
-  ASSERT_OK(env_->RenameFile("/dir/f", "/dir/g"));
-  ASSERT_EQ(Status::NotFound(), env_->FileExists("/dir/f"));
-  ASSERT_OK(env_->FileExists("/dir/g"));
-  ASSERT_OK(env_->GetFileSize("/dir/g", &file_size));
-  ASSERT_EQ(3U, file_size);
-
-  // Check that opening non-existent file fails.
-  unique_ptr<SequentialFile> seq_file;
-  unique_ptr<RandomAccessFile> rand_file;
-  ASSERT_TRUE(
-    !env_->NewSequentialFile("/dir/non_existent", &seq_file, soptions_).ok());
-  ASSERT_TRUE(!seq_file);
-  ASSERT_TRUE(!env_->NewRandomAccessFile("/dir/non_existent", &rand_file,
-                                         soptions_).ok());
-  ASSERT_TRUE(!rand_file);
-
-  // Check that deleting works.
-  ASSERT_TRUE(!env_->DeleteFile("/dir/non_existent").ok());
-  ASSERT_OK(env_->DeleteFile("/dir/g"));
-  ASSERT_EQ(Status::NotFound(), env_->FileExists("/dir/g"));
-  ASSERT_OK(env_->GetChildren("/dir", &children));
-  ASSERT_EQ(0U, children.size());
-  ASSERT_OK(env_->DeleteDir("/dir"));
-}
-
-TEST_F(EnvLibradosTest, ReadWrite) {
-  unique_ptr<WritableFile> writable_file;
-  unique_ptr<SequentialFile> seq_file;
-  unique_ptr<RandomAccessFile> rand_file;
-  Slice result;
-  char scratch[100];
-
-  ASSERT_OK(env_->CreateDir("/dir"));
-
-  ASSERT_OK(env_->NewWritableFile("/dir/f", &writable_file, soptions_));
-  ASSERT_OK(writable_file->Append("hello "));
-  ASSERT_OK(writable_file->Append("world"));
-  writable_file.reset();
-
-  // Read sequentially.
-  ASSERT_OK(env_->NewSequentialFile("/dir/f", &seq_file, soptions_));
-  ASSERT_OK(seq_file->Read(5, &result, scratch));  // Read "hello".
-  ASSERT_EQ(0, result.compare("hello"));
-  ASSERT_OK(seq_file->Skip(1));
-  ASSERT_OK(seq_file->Read(1000, &result, scratch));  // Read "world".
-  ASSERT_EQ(0, result.compare("world"));
-  ASSERT_OK(seq_file->Read(1000, &result, scratch));  // Try reading past EOF.
-  ASSERT_EQ(0U, result.size());
-  ASSERT_OK(seq_file->Skip(100));  // Try to skip past end of file.
-  ASSERT_OK(seq_file->Read(1000, &result, scratch));
-  ASSERT_EQ(0U, result.size());
-
-  // Random reads.
-  ASSERT_OK(env_->NewRandomAccessFile("/dir/f", &rand_file, soptions_));
-  ASSERT_OK(rand_file->Read(6, 5, &result, scratch));  // Read "world".
-  ASSERT_EQ(0, result.compare("world"));
-  ASSERT_OK(rand_file->Read(0, 5, &result, scratch));  // Read "hello".
-  ASSERT_EQ(0, result.compare("hello"));
-  ASSERT_OK(rand_file->Read(10, 100, &result, scratch));  // Read "d".
-  ASSERT_EQ(0, result.compare("d"));
-
-  // Too high offset.
-  ASSERT_OK(rand_file->Read(1000, 5, &result, scratch));
-}
-
-TEST_F(EnvLibradosTest, Locks) {
-  FileLock* lock = nullptr;
-  unique_ptr<WritableFile> writable_file;
-
-  ASSERT_OK(env_->CreateDir("/dir"));
-
-  ASSERT_OK(env_->NewWritableFile("/dir/f", &writable_file, soptions_));
-
-  // These are no-ops, but we test they return success.
-  ASSERT_OK(env_->LockFile("some file", &lock));
-  ASSERT_OK(env_->UnlockFile(lock));
-
-  ASSERT_OK(env_->LockFile("/dir/f", &lock));
-  ASSERT_OK(env_->UnlockFile(lock));
-}
-
-TEST_F(EnvLibradosTest, Misc) {
-  std::string test_dir;
-  ASSERT_OK(env_->GetTestDirectory(&test_dir));
-  ASSERT_TRUE(!test_dir.empty());
-
-  unique_ptr<WritableFile> writable_file;
-  ASSERT_TRUE(!env_->NewWritableFile("/a/b", &writable_file, soptions_).ok());
-
-  ASSERT_OK(env_->NewWritableFile("/a", &writable_file, soptions_));
-  // These are no-ops, but we test they return success.
-  ASSERT_OK(writable_file->Sync());
-  ASSERT_OK(writable_file->Flush());
-  ASSERT_OK(writable_file->Close());
-  writable_file.reset();
-}
-
-TEST_F(EnvLibradosTest, LargeWrite) {
-  const size_t kWriteSize = 300 * 1024;
-  char* scratch = new char[kWriteSize * 2];
-
-  std::string write_data;
-  for (size_t i = 0; i < kWriteSize; ++i) {
-    write_data.append(1, 'h');
-  }
-
-  unique_ptr<WritableFile> writable_file;
-  ASSERT_OK(env_->CreateDir("/dir"));
-  ASSERT_OK(env_->NewWritableFile("/dir/g", &writable_file, soptions_));
-  ASSERT_OK(writable_file->Append("foo"));
-  ASSERT_OK(writable_file->Append(write_data));
-  writable_file.reset();
-
-  unique_ptr<SequentialFile> seq_file;
-  Slice result;
-  ASSERT_OK(env_->NewSequentialFile("/dir/g", &seq_file, soptions_));
-  ASSERT_OK(seq_file->Read(3, &result, scratch));  // Read "foo".
-  ASSERT_EQ(0, result.compare("foo"));
-
-  size_t read = 0;
-  std::string read_data;
-  while (read < kWriteSize) {
-    ASSERT_OK(seq_file->Read(kWriteSize - read, &result, scratch));
-    read_data.append(result.data(), result.size());
-    read += result.size();
-  }
-  ASSERT_TRUE(write_data == read_data);
-  delete[] scratch;
-}
-
-TEST_F(EnvLibradosTest, FrequentlySmallWrite) {
-  const size_t kWriteSize = 1 << 10;
-  char* scratch = new char[kWriteSize * 2];
-
-  std::string write_data;
-  for (size_t i = 0; i < kWriteSize; ++i) {
-    write_data.append(1, 'h');
-  }
-
-  unique_ptr<WritableFile> writable_file;
-  ASSERT_OK(env_->CreateDir("/dir"));
-  ASSERT_OK(env_->NewWritableFile("/dir/g", &writable_file, soptions_));
-  ASSERT_OK(writable_file->Append("foo"));
-
-  for (size_t i = 0; i < kWriteSize; ++i) {
-    ASSERT_OK(writable_file->Append("h"));
-  }
-  writable_file.reset();
-
-  unique_ptr<SequentialFile> seq_file;
-  Slice result;
-  ASSERT_OK(env_->NewSequentialFile("/dir/g", &seq_file, soptions_));
-  ASSERT_OK(seq_file->Read(3, &result, scratch));  // Read "foo".
-  ASSERT_EQ(0, result.compare("foo"));
-
-  size_t read = 0;
-  std::string read_data;
-  while (read < kWriteSize) {
-    ASSERT_OK(seq_file->Read(kWriteSize - read, &result, scratch));
-    read_data.append(result.data(), result.size());
-    read += result.size();
-  }
-  ASSERT_TRUE(write_data == read_data);
-  delete[] scratch;
-}
-
-TEST_F(EnvLibradosTest, Truncate) {
-  const size_t kWriteSize = 300 * 1024;
-  const size_t truncSize = 1024;
-  std::string write_data;
-  for (size_t i = 0; i < kWriteSize; ++i) {
-    write_data.append(1, 'h');
-  }
-
-  unique_ptr<WritableFile> writable_file;
-  ASSERT_OK(env_->CreateDir("/dir"));
-  ASSERT_OK(env_->NewWritableFile("/dir/g", &writable_file, soptions_));
-  ASSERT_OK(writable_file->Append(write_data));
-  ASSERT_EQ(writable_file->GetFileSize(), kWriteSize);
-  ASSERT_OK(writable_file->Truncate(truncSize));
-  ASSERT_EQ(writable_file->GetFileSize(), truncSize);
-  writable_file.reset();
-}
-
-TEST_F(EnvLibradosTest, DBBasics) {
-  std::string kDBPath = "/tmp/DBBasics";
-  DB* db;
-  Options options;
-  // Optimize RocksDB. This is the easiest way to get RocksDB to perform well
-  options.IncreaseParallelism();
-  options.OptimizeLevelStyleCompaction();
-  // create the DB if it's not already present
-  options.create_if_missing = true;
-  options.env = env_;
-
-  // open DB
-  Status s = DB::Open(options, kDBPath, &db);
-  assert(s.ok());
-
-  // Put key-value
-  s = db->Put(WriteOptions(), "key1", "value");
-  assert(s.ok());
-  std::string value;
-  // get value
-  s = db->Get(ReadOptions(), "key1", &value);
-  assert(s.ok());
-  assert(value == "value");
-
-  // atomically apply a set of updates
-  {
-    WriteBatch batch;
-    batch.Delete("key1");
-    batch.Put("key2", value);
-    s = db->Write(WriteOptions(), &batch);
-  }
-
-  s = db->Get(ReadOptions(), "key1", &value);
-  assert(s.IsNotFound());
-
-  db->Get(ReadOptions(), "key2", &value);
-  assert(value == "value");
-
-  delete db;
-}
-
-TEST_F(EnvLibradosTest, DBLoadKeysInRandomOrder) {
-  char key[20] = {0}, value[20] = {0};
-  int max_loop = 1 << 10;
-  Timer timer(false);
-  std::cout << "Test size : loop(" << max_loop << ")" << std::endl;
-  /**********************************
-            use default env
-  ***********************************/
-  std::string kDBPath1 = "/tmp/DBLoadKeysInRandomOrder1";
-  DB* db1;
-  Options options1;
-  // Optimize Rocksdb. This is the easiest way to get RocksDB to perform well
-  options1.IncreaseParallelism();
-  options1.OptimizeLevelStyleCompaction();
-  // create the DB if it's not already present
-  options1.create_if_missing = true;
-
-  // open DB
-  Status s1 = DB::Open(options1, kDBPath1, &db1);
-  assert(s1.ok());
-
-  rocksdb::Random64 r1(time(nullptr));
-
-  timer.Reset();
-  for (int i = 0; i < max_loop; ++i) {
-    snprintf(key,
-             20,
-             "%16lx",
-             (unsigned long)r1.Uniform(std::numeric_limits<uint64_t>::max()));
-    snprintf(value,
-             20,
-             "%16lx",
-             (unsigned long)r1.Uniform(std::numeric_limits<uint64_t>::max()));
-    // Put key-value
-    s1 = db1->Put(WriteOptions(), key, value);
-    assert(s1.ok());
-  }
-  std::cout << "Time by default : " << timer << "ms" << std::endl;
-  delete db1;
-
-  /**********************************
-            use librados env
-  ***********************************/
-  std::string kDBPath2 = "/tmp/DBLoadKeysInRandomOrder2";
-  DB* db2;
-  Options options2;
-  // Optimize RocksDB. This is the easiest way to get RocksDB to perform well
-  options2.IncreaseParallelism();
-  options2.OptimizeLevelStyleCompaction();
-  // create the DB if it's not already present
-  options2.create_if_missing = true;
-  options2.env = env_;
-
-  // open DB
-  Status s2 = DB::Open(options2, kDBPath2, &db2);
-  assert(s2.ok());
-
-  rocksdb::Random64 r2(time(nullptr));
-
-  timer.Reset();
-  for (int i = 0; i < max_loop; ++i) {
-    snprintf(key,
-             20,
-             "%16lx",
-             (unsigned long)r2.Uniform(std::numeric_limits<uint64_t>::max()));
-    snprintf(value,
-             20,
-             "%16lx",
-             (unsigned long)r2.Uniform(std::numeric_limits<uint64_t>::max()));
-    // Put key-value
-    s2 = db2->Put(WriteOptions(), key, value);
-    assert(s2.ok());
-  }
-  std::cout << "Time by librados : " << timer << "ms" << std::endl;
-  delete db2;
-}
-
-TEST_F(EnvLibradosTest, DBBulkLoadKeysInRandomOrder) {
-  char key[20] = {0}, value[20] = {0};
-  int max_loop = 1 << 6;
-  int bulk_size = 1 << 15;
-  Timer timer(false);
-  std::cout << "Test size : loop(" << max_loop << "); bulk_size(" << bulk_size 
<< ")" << std::endl;
-  /**********************************
-            use default env
-  ***********************************/
-  std::string kDBPath1 = "/tmp/DBBulkLoadKeysInRandomOrder1";
-  DB* db1;
-  Options options1;
-  // Optimize Rocksdb. This is the easiest way to get RocksDB to perform well
-  options1.IncreaseParallelism();
-  options1.OptimizeLevelStyleCompaction();
-  // create the DB if it's not already present
-  options1.create_if_missing = true;
-
-  // open DB
-  Status s1 = DB::Open(options1, kDBPath1, &db1);
-  assert(s1.ok());
-
-  rocksdb::Random64 r1(time(nullptr));
-
-  timer.Reset();
-  for (int i = 0; i < max_loop; ++i) {
-    WriteBatch batch;
-    for (int j = 0; j < bulk_size; ++j) {
-      snprintf(key,
-               20,
-               "%16lx",
-               (unsigned 
long)r1.Uniform(std::numeric_limits<uint64_t>::max()));
-      snprintf(value,
-               20,
-               "%16lx",
-               (unsigned 
long)r1.Uniform(std::numeric_limits<uint64_t>::max()));
-      batch.Put(key, value);
-    }
-    s1 = db1->Write(WriteOptions(), &batch);
-    assert(s1.ok());
-  }
-  std::cout << "Time by default : " << timer << "ms" << std::endl;
-  delete db1;
-
-  /**********************************
-            use librados env
-  ***********************************/
-  std::string kDBPath2 = "/tmp/DBBulkLoadKeysInRandomOrder2";
-  DB* db2;
-  Options options2;
-  // Optimize RocksDB. This is the easiest way to get RocksDB to perform well
-  options2.IncreaseParallelism();
-  options2.OptimizeLevelStyleCompaction();
-  // create the DB if it's not already present
-  options2.create_if_missing = true;
-  options2.env = env_;
-
-  // open DB
-  Status s2 = DB::Open(options2, kDBPath2, &db2);
-  assert(s2.ok());
-
-  rocksdb::Random64 r2(time(nullptr));
-
-  timer.Reset();
-  for (int i = 0; i < max_loop; ++i) {
-    WriteBatch batch;
-    for (int j = 0; j < bulk_size; ++j) {
-      snprintf(key,
-               20,
-               "%16lx",
-               (unsigned 
long)r2.Uniform(std::numeric_limits<uint64_t>::max()));
-      snprintf(value,
-               20,
-               "%16lx",
-               (unsigned 
long)r2.Uniform(std::numeric_limits<uint64_t>::max()));
-      batch.Put(key, value);
-    }
-    s2 = db2->Write(WriteOptions(), &batch);
-    assert(s2.ok());
-  }
-  std::cout << "Time by librados : " << timer << "ms" << std::endl;
-  delete db2;
-}
-
-TEST_F(EnvLibradosTest, DBBulkLoadKeysInSequentialOrder) {
-  char key[20] = {0}, value[20] = {0};
-  int max_loop = 1 << 6;
-  int bulk_size = 1 << 15;
-  Timer timer(false);
-  std::cout << "Test size : loop(" << max_loop << "); bulk_size(" << bulk_size 
<< ")" << std::endl;
-  /**********************************
-            use default env
-  ***********************************/
-  std::string kDBPath1 = "/tmp/DBBulkLoadKeysInSequentialOrder1";
-  DB* db1;
-  Options options1;
-  // Optimize Rocksdb. This is the easiest way to get RocksDB to perform well
-  options1.IncreaseParallelism();
-  options1.OptimizeLevelStyleCompaction();
-  // create the DB if it's not already present
-  options1.create_if_missing = true;
-
-  // open DB
-  Status s1 = DB::Open(options1, kDBPath1, &db1);
-  assert(s1.ok());
-
-  rocksdb::Random64 r1(time(nullptr));
-
-  timer.Reset();
-  for (int i = 0; i < max_loop; ++i) {
-    WriteBatch batch;
-    for (int j = 0; j < bulk_size; ++j) {
-      snprintf(key,
-               20,
-               "%019lld",
-               (long long)(i * bulk_size + j));
-      snprintf(value,
-               20,
-               "%16lx",
-               (unsigned 
long)r1.Uniform(std::numeric_limits<uint64_t>::max()));
-      batch.Put(key, value);
-    }
-    s1 = db1->Write(WriteOptions(), &batch);
-    assert(s1.ok());
-  }
-  std::cout << "Time by default : " << timer << "ms" << std::endl;
-  delete db1;
-
-  /**********************************
-            use librados env
-  ***********************************/
-  std::string kDBPath2 = "/tmp/DBBulkLoadKeysInSequentialOrder2";
-  DB* db2;
-  Options options2;
-  // Optimize RocksDB. This is the easiest way to get RocksDB to perform well
-  options2.IncreaseParallelism();
-  options2.OptimizeLevelStyleCompaction();
-  // create the DB if it's not already present
-  options2.create_if_missing = true;
-  options2.env = env_;
-
-  // open DB
-  Status s2 = DB::Open(options2, kDBPath2, &db2);
-  assert(s2.ok());
-
-  rocksdb::Random64 r2(time(nullptr));
-
-  timer.Reset();
-  for (int i = 0; i < max_loop; ++i) {
-    WriteBatch batch;
-    for (int j = 0; j < bulk_size; ++j) {
-      snprintf(key,
-               20,
-               "%16lx",
-               (unsigned 
long)r2.Uniform(std::numeric_limits<uint64_t>::max()));
-      snprintf(value,
-               20,
-               "%16lx",
-               (unsigned 
long)r2.Uniform(std::numeric_limits<uint64_t>::max()));
-      batch.Put(key, value);
-    }
-    s2 = db2->Write(WriteOptions(), &batch);
-    assert(s2.ok());
-  }
-  std::cout << "Time by librados : " << timer << "ms" << std::endl;
-  delete db2;
-}
-
-TEST_F(EnvLibradosTest, DBRandomRead) {
-  char key[20] = {0}, value[20] = {0};
-  int max_loop = 1 << 6;
-  int bulk_size = 1 << 10;
-  int read_loop = 1 << 20;
-  Timer timer(false);
-  std::cout << "Test size : keys_num(" << max_loop << ", " << bulk_size << "); 
read_loop(" << read_loop << ")" << std::endl;
-  /**********************************
-            use default env
-  ***********************************/
-  std::string kDBPath1 = "/tmp/DBRandomRead1";
-  DB* db1;
-  Options options1;
-  // Optimize Rocksdb. This is the easiest way to get RocksDB to perform well
-  options1.IncreaseParallelism();
-  options1.OptimizeLevelStyleCompaction();
-  // create the DB if it's not already present
-  options1.create_if_missing = true;
-
-  // open DB
-  Status s1 = DB::Open(options1, kDBPath1, &db1);
-  assert(s1.ok());
-
-  rocksdb::Random64 r1(time(nullptr));
-
-
-  for (int i = 0; i < max_loop; ++i) {
-    WriteBatch batch;
-    for (int j = 0; j < bulk_size; ++j) {
-      snprintf(key,
-               20,
-               "%019lld",
-               (long long)(i * bulk_size + j));
-      snprintf(value,
-               20,
-               "%16lx",
-               (unsigned 
long)r1.Uniform(std::numeric_limits<uint64_t>::max()));
-      batch.Put(key, value);
-    }
-    s1 = db1->Write(WriteOptions(), &batch);
-    assert(s1.ok());
-  }
-  timer.Reset();
-  int base1 = 0, offset1 = 0;
-  for (int i = 0; i < read_loop; ++i) {
-    base1 = r1.Uniform(max_loop);
-    offset1 = r1.Uniform(bulk_size);
-    std::string value1;
-    snprintf(key,
-             20,
-             "%019lld",
-             (long long)(base1 * bulk_size + offset1));
-    s1 = db1->Get(ReadOptions(), key, &value1);
-    assert(s1.ok());
-  }
-  std::cout << "Time by default : " << timer << "ms" << std::endl;
-  delete db1;
-
-  /**********************************
-            use librados env
-  ***********************************/
-  std::string kDBPath2 = "/tmp/DBRandomRead2";
-  DB* db2;
-  Options options2;
-  // Optimize RocksDB. This is the easiest way to get RocksDB to perform well
-  options2.IncreaseParallelism();
-  options2.OptimizeLevelStyleCompaction();
-  // create the DB if it's not already present
-  options2.create_if_missing = true;
-  options2.env = env_;
-
-  // open DB
-  Status s2 = DB::Open(options2, kDBPath2, &db2);
-  assert(s2.ok());
-
-  rocksdb::Random64 r2(time(nullptr));
-
-  for (int i = 0; i < max_loop; ++i) {
-    WriteBatch batch;
-    for (int j = 0; j < bulk_size; ++j) {
-      snprintf(key,
-               20,
-               "%019lld",
-               (long long)(i * bulk_size + j));
-      snprintf(value,
-               20,
-               "%16lx",
-               (unsigned 
long)r2.Uniform(std::numeric_limits<uint64_t>::max()));
-      batch.Put(key, value);
-    }
-    s2 = db2->Write(WriteOptions(), &batch);
-    assert(s2.ok());
-  }
-
-  timer.Reset();
-  int base2 = 0, offset2 = 0;
-  for (int i = 0; i < read_loop; ++i) {
-    base2 = r2.Uniform(max_loop);
-    offset2 = r2.Uniform(bulk_size);
-    std::string value2;
-    snprintf(key,
-             20,
-             "%019lld",
-             (long long)(base2 * bulk_size + offset2));
-    s2 = db2->Get(ReadOptions(), key, &value2);
-    if (!s2.ok()) {
-      std::cout << s2.ToString() << std::endl;
-    }
-    assert(s2.ok());
-  }
-  std::cout << "Time by librados : " << timer << "ms" << std::endl;
-  delete db2;
-}
-
-class EnvLibradosMutipoolTest : public testing::Test {
-public:
-  // we will use all of these below
-  const std::string client_name = "client.admin";
-  const std::string cluster_name = "ceph";
-  const uint64_t flags = 0;
-  const std::string db_name = "env_librados_test_db";
-  const std::string db_pool = db_name + "_pool";
-  const std::string wal_dir = "/wal";
-  const std::string wal_pool = db_name + "_wal_pool";
-  const size_t write_buffer_size = 1 << 20;
-  const char *keyring = "admin";
-  const char *config = "../ceph/src/ceph.conf";
-
-  EnvLibrados* env_;
-  const EnvOptions soptions_;
-
-  EnvLibradosMutipoolTest() {
-    env_ = new EnvLibrados(client_name, cluster_name, flags, db_name, config, 
db_pool, wal_dir, wal_pool, write_buffer_size);
-  }
-  ~EnvLibradosMutipoolTest() {
-    delete env_;
-    librados::Rados rados;
-    int ret = 0;
-    do {
-      ret = rados.init("admin"); // just use the client.admin keyring
-      if (ret < 0) { // let's handle any error that might have come back
-        std::cerr << "couldn't initialize rados! error " << ret << std::endl;
-        ret = EXIT_FAILURE;
-        break;
-      }
-
-      ret = rados.conf_read_file(config);
-      if (ret < 0) {
-        // This could fail if the config file is malformed, but it'd be hard.
-        std::cerr << "failed to parse config file " << config
-                  << "! error" << ret << std::endl;
-        ret = EXIT_FAILURE;
-        break;
-      }
-
-      /*
-       * next, we actually connect to the cluster
-       */
-
-      ret = rados.connect();
-      if (ret < 0) {
-        std::cerr << "couldn't connect to cluster! error " << ret << std::endl;
-        ret = EXIT_FAILURE;
-        break;
-      }
-
-      /*
-       * And now we're done, so let's remove our pool and then
-       * shut down the connection gracefully.
-       */
-      int delete_ret = rados.pool_delete(db_pool.c_str());
-      if (delete_ret < 0) {
-        // be careful not to
-        std::cerr << "We failed to delete our test pool!" << db_pool << 
delete_ret << std::endl;
-        ret = EXIT_FAILURE;
-      }
-      delete_ret = rados.pool_delete(wal_pool.c_str());
-      if (delete_ret < 0) {
-        // be careful not to
-        std::cerr << "We failed to delete our test pool!" << wal_pool << 
delete_ret << std::endl;
-        ret = EXIT_FAILURE;
-      }
-    } while (0);
-  }
-};
-
-TEST_F(EnvLibradosMutipoolTest, Basics) {
-  uint64_t file_size;
-  unique_ptr<WritableFile> writable_file;
-  std::vector<std::string> children;
-  std::vector<std::string> v = {"/tmp/dir1", "/tmp/dir2", "/tmp/dir3", 
"/tmp/dir4", "dir"};
-
-  for (size_t i = 0; i < v.size(); ++i) {
-    std::string dir = v[i];
-    std::string dir_non_existent = dir + "/non_existent";
-    std::string dir_f = dir + "/f";
-    std::string dir_g = dir + "/g";
-
-    ASSERT_OK(env_->CreateDir(dir.c_str()));
-    // Check that the directory is empty.
-    ASSERT_EQ(Status::NotFound(), env_->FileExists(dir_non_existent.c_str()));
-    ASSERT_TRUE(!env_->GetFileSize(dir_non_existent.c_str(), &file_size).ok());
-    ASSERT_OK(env_->GetChildren(dir.c_str(), &children));
-    ASSERT_EQ(0U, children.size());
-
-    // Create a file.
-    ASSERT_OK(env_->NewWritableFile(dir_f.c_str(), &writable_file, soptions_));
-    writable_file.reset();
-
-    // Check that the file exists.
-    ASSERT_OK(env_->FileExists(dir_f.c_str()));
-    ASSERT_OK(env_->GetFileSize(dir_f.c_str(), &file_size));
-    ASSERT_EQ(0U, file_size);
-    ASSERT_OK(env_->GetChildren(dir.c_str(), &children));
-    ASSERT_EQ(1U, children.size());
-    ASSERT_EQ("f", children[0]);
-
-    // Write to the file.
-    ASSERT_OK(env_->NewWritableFile(dir_f.c_str(), &writable_file, soptions_));
-    ASSERT_OK(writable_file->Append("abc"));
-    writable_file.reset();
-
-
-    // Check for expected size.
-    ASSERT_OK(env_->GetFileSize(dir_f.c_str(), &file_size));
-    ASSERT_EQ(3U, file_size);
-
-
-    // Check that renaming works.
-    ASSERT_TRUE(!env_->RenameFile(dir_non_existent.c_str(), 
dir_g.c_str()).ok());
-    ASSERT_OK(env_->RenameFile(dir_f.c_str(), dir_g.c_str()));
-    ASSERT_EQ(Status::NotFound(), env_->FileExists(dir_f.c_str()));
-    ASSERT_OK(env_->FileExists(dir_g.c_str()));
-    ASSERT_OK(env_->GetFileSize(dir_g.c_str(), &file_size));
-    ASSERT_EQ(3U, file_size);
-
-    // Check that opening non-existent file fails.
-    unique_ptr<SequentialFile> seq_file;
-    unique_ptr<RandomAccessFile> rand_file;
-    ASSERT_TRUE(
-      !env_->NewSequentialFile(dir_non_existent.c_str(), &seq_file, 
soptions_).ok());
-    ASSERT_TRUE(!seq_file);
-    ASSERT_TRUE(!env_->NewRandomAccessFile(dir_non_existent.c_str(), 
&rand_file,
-                                           soptions_).ok());
-    ASSERT_TRUE(!rand_file);
-
-    // Check that deleting works.
-    ASSERT_TRUE(!env_->DeleteFile(dir_non_existent.c_str()).ok());
-    ASSERT_OK(env_->DeleteFile(dir_g.c_str()));
-    ASSERT_EQ(Status::NotFound(), env_->FileExists(dir_g.c_str()));
-    ASSERT_OK(env_->GetChildren(dir.c_str(), &children));
-    ASSERT_EQ(0U, children.size());
-    ASSERT_OK(env_->DeleteDir(dir.c_str()));
-  }
-}
-
-TEST_F(EnvLibradosMutipoolTest, DBBasics) {
-  std::string kDBPath = "/tmp/DBBasics";
-  std::string walPath = "/tmp/wal";
-  DB* db;
-  Options options;
-  // Optimize RocksDB. This is the easiest way to get RocksDB to perform well
-  options.IncreaseParallelism();
-  options.OptimizeLevelStyleCompaction();
-  // create the DB if it's not already present
-  options.create_if_missing = true;
-  options.env = env_;
-  options.wal_dir = walPath;
-
-  // open DB
-  Status s = DB::Open(options, kDBPath, &db);
-  assert(s.ok());
-
-  // Put key-value
-  s = db->Put(WriteOptions(), "key1", "value");
-  assert(s.ok());
-  std::string value;
-  // get value
-  s = db->Get(ReadOptions(), "key1", &value);
-  assert(s.ok());
-  assert(value == "value");
-
-  // atomically apply a set of updates
-  {
-    WriteBatch batch;
-    batch.Delete("key1");
-    batch.Put("key2", value);
-    s = db->Write(WriteOptions(), &batch);
-  }
-
-  s = db->Get(ReadOptions(), "key1", &value);
-  assert(s.IsNotFound());
-
-  db->Get(ReadOptions(), "key2", &value);
-  assert(value == "value");
-
-  delete db;
-}
-
-TEST_F(EnvLibradosMutipoolTest, DBBulkLoadKeysInRandomOrder) {
-  char key[20] = {0}, value[20] = {0};
-  int max_loop = 1 << 6;
-  int bulk_size = 1 << 15;
-  Timer timer(false);
-  std::cout << "Test size : loop(" << max_loop << "); bulk_size(" << bulk_size 
<< ")" << std::endl;
-  /**********************************
-            use default env
-  ***********************************/
-  std::string kDBPath1 = "/tmp/DBBulkLoadKeysInRandomOrder1";
-  std::string walPath = "/tmp/wal";
-  DB* db1;
-  Options options1;
-  // Optimize Rocksdb. This is the easiest way to get RocksDB to perform well
-  options1.IncreaseParallelism();
-  options1.OptimizeLevelStyleCompaction();
-  // create the DB if it's not already present
-  options1.create_if_missing = true;
-
-  // open DB
-  Status s1 = DB::Open(options1, kDBPath1, &db1);
-  assert(s1.ok());
-
-  rocksdb::Random64 r1(time(nullptr));
-
-  timer.Reset();
-  for (int i = 0; i < max_loop; ++i) {
-    WriteBatch batch;
-    for (int j = 0; j < bulk_size; ++j) {
-      snprintf(key,
-               20,
-               "%16lx",
-               (unsigned 
long)r1.Uniform(std::numeric_limits<uint64_t>::max()));
-      snprintf(value,
-               20,
-               "%16lx",
-               (unsigned 
long)r1.Uniform(std::numeric_limits<uint64_t>::max()));
-      batch.Put(key, value);
-    }
-    s1 = db1->Write(WriteOptions(), &batch);
-    assert(s1.ok());
-  }
-  std::cout << "Time by default : " << timer << "ms" << std::endl;
-  delete db1;
-
-  /**********************************
-            use librados env
-  ***********************************/
-  std::string kDBPath2 = "/tmp/DBBulkLoadKeysInRandomOrder2";
-  DB* db2;
-  Options options2;
-  // Optimize RocksDB. This is the easiest way to get RocksDB to perform well
-  options2.IncreaseParallelism();
-  options2.OptimizeLevelStyleCompaction();
-  // create the DB if it's not already present
-  options2.create_if_missing = true;
-  options2.env = env_;
-  options2.wal_dir = walPath;
-
-  // open DB
-  Status s2 = DB::Open(options2, kDBPath2, &db2);
-  if (!s2.ok()) {
-    std::cerr << s2.ToString() << std::endl;
-  }
-  assert(s2.ok());
-
-  rocksdb::Random64 r2(time(nullptr));
-
-  timer.Reset();
-  for (int i = 0; i < max_loop; ++i) {
-    WriteBatch batch;
-    for (int j = 0; j < bulk_size; ++j) {
-      snprintf(key,
-               20,
-               "%16lx",
-               (unsigned 
long)r2.Uniform(std::numeric_limits<uint64_t>::max()));
-      snprintf(value,
-               20,
-               "%16lx",
-               (unsigned 
long)r2.Uniform(std::numeric_limits<uint64_t>::max()));
-      batch.Put(key, value);
-    }
-    s2 = db2->Write(WriteOptions(), &batch);
-    assert(s2.ok());
-  }
-  std::cout << "Time by librados : " << timer << "ms" << std::endl;
-  delete db2;
-}
-
-TEST_F(EnvLibradosMutipoolTest, DBTransactionDB) {
-  std::string kDBPath = "/tmp/DBTransactionDB";
-  // open DB
-  Options options;
-  TransactionDBOptions txn_db_options;
-  options.create_if_missing = true;
-  options.env = env_;
-  TransactionDB* txn_db;
-
-  Status s = TransactionDB::Open(options, txn_db_options, kDBPath, &txn_db);
-  assert(s.ok());
-
-  WriteOptions write_options;
-  ReadOptions read_options;
-  TransactionOptions txn_options;
-  std::string value;
-
-  ////////////////////////////////////////////////////////
-  //
-  // Simple OptimisticTransaction Example ("Read Committed")
-  //
-  ////////////////////////////////////////////////////////
-
-  // Start a transaction
-  Transaction* txn = txn_db->BeginTransaction(write_options);
-  assert(txn);
-
-  // Read a key in this transaction
-  s = txn->Get(read_options, "abc", &value);
-  assert(s.IsNotFound());
-
-  // Write a key in this transaction
-  s = txn->Put("abc", "def");
-  assert(s.ok());
-
-  // Read a key OUTSIDE this transaction. Does not affect txn.
-  s = txn_db->Get(read_options, "abc", &value);
-
-  // Write a key OUTSIDE of this transaction.
-  // Does not affect txn since this is an unrelated key.  If we wrote key 'abc'
-  // here, the transaction would fail to commit.
-  s = txn_db->Put(write_options, "xyz", "zzz");
-
-  // Commit transaction
-  s = txn->Commit();
-  assert(s.ok());
-  delete txn;
-
-  ////////////////////////////////////////////////////////
-  //
-  // "Repeatable Read" (Snapshot Isolation) Example
-  //   -- Using a single Snapshot
-  //
-  ////////////////////////////////////////////////////////
-
-  // Set a snapshot at start of transaction by setting set_snapshot=true
-  txn_options.set_snapshot = true;
-  txn = txn_db->BeginTransaction(write_options, txn_options);
-
-  const Snapshot* snapshot = txn->GetSnapshot();
-
-  // Write a key OUTSIDE of transaction
-  s = txn_db->Put(write_options, "abc", "xyz");
-  assert(s.ok());
-
-  // Attempt to read a key using the snapshot.  This will fail since
-  // the previous write outside this txn conflicts with this read.
-  read_options.snapshot = snapshot;
-  s = txn->GetForUpdate(read_options, "abc", &value);
-  assert(s.IsBusy());
-
-  txn->Rollback();
-
-  delete txn;
-  // Clear snapshot from read options since it is no longer valid
-  read_options.snapshot = nullptr;
-  snapshot = nullptr;
-
-  ////////////////////////////////////////////////////////
-  //
-  // "Read Committed" (Monotonic Atomic Views) Example
-  //   --Using multiple Snapshots
-  //
-  ////////////////////////////////////////////////////////
-
-  // In this example, we set the snapshot multiple times.  This is probably
-  // only necessary if you have very strict isolation requirements to
-  // implement.
-
-  // Set a snapshot at start of transaction
-  txn_options.set_snapshot = true;
-  txn = txn_db->BeginTransaction(write_options, txn_options);
-
-  // Do some reads and writes to key "x"
-  read_options.snapshot = txn_db->GetSnapshot();
-  s = txn->Get(read_options, "x", &value);
-  txn->Put("x", "x");
-
-  // Do a write outside of the transaction to key "y"
-  s = txn_db->Put(write_options, "y", "y");
-
-  // Set a new snapshot in the transaction
-  txn->SetSnapshot();
-  txn->SetSavePoint();
-  read_options.snapshot = txn_db->GetSnapshot();
-
-  // Do some reads and writes to key "y"
-  // Since the snapshot was advanced, the write done outside of the
-  // transaction does not conflict.
-  s = txn->GetForUpdate(read_options, "y", &value);
-  txn->Put("y", "y");
-
-  // Decide we want to revert the last write from this transaction.
-  txn->RollbackToSavePoint();
-
-  // Commit.
-  s = txn->Commit();
-  assert(s.ok());
-  delete txn;
-  // Clear snapshot from read options since it is no longer valid
-  read_options.snapshot = nullptr;
-
-  // Cleanup
-  delete txn_db;
-  DestroyDB(kDBPath, options);
-}
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
-
-#else
-#include <stdio.h>
-
-int main(int argc, char** argv) {
-  fprintf(stderr, "SKIPPED as EnvMirror is not supported in ROCKSDB_LITE\n");
-  return 0;
-}
-
-#endif  // !ROCKSDB_LITE

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/d3a13a49/thirdparty/rocksdb/utilities/env_mirror_test.cc
----------------------------------------------------------------------
diff --git a/thirdparty/rocksdb/utilities/env_mirror_test.cc 
b/thirdparty/rocksdb/utilities/env_mirror_test.cc
deleted file mode 100644
index 2bf8ec8..0000000
--- a/thirdparty/rocksdb/utilities/env_mirror_test.cc
+++ /dev/null
@@ -1,222 +0,0 @@
-//  Copyright (c) 2015, Red Hat, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef ROCKSDB_LITE
-
-#include "rocksdb/utilities/env_mirror.h"
-#include "env/mock_env.h"
-#include "util/testharness.h"
-
-namespace rocksdb {
-
-class EnvMirrorTest : public testing::Test {
- public:
-  Env* default_;
-  MockEnv* a_, *b_;
-  EnvMirror* env_;
-  const EnvOptions soptions_;
-
-  EnvMirrorTest()
-      : default_(Env::Default()),
-        a_(new MockEnv(default_)),
-        b_(new MockEnv(default_)),
-        env_(new EnvMirror(a_, b_)) {}
-  ~EnvMirrorTest() {
-    delete env_;
-    delete a_;
-    delete b_;
-  }
-};
-
-TEST_F(EnvMirrorTest, Basics) {
-  uint64_t file_size;
-  unique_ptr<WritableFile> writable_file;
-  std::vector<std::string> children;
-
-  ASSERT_OK(env_->CreateDir("/dir"));
-
-  // Check that the directory is empty.
-  ASSERT_EQ(Status::NotFound(), env_->FileExists("/dir/non_existent"));
-  ASSERT_TRUE(!env_->GetFileSize("/dir/non_existent", &file_size).ok());
-  ASSERT_OK(env_->GetChildren("/dir", &children));
-  ASSERT_EQ(0U, children.size());
-
-  // Create a file.
-  ASSERT_OK(env_->NewWritableFile("/dir/f", &writable_file, soptions_));
-  writable_file.reset();
-
-  // Check that the file exists.
-  ASSERT_OK(env_->FileExists("/dir/f"));
-  ASSERT_OK(a_->FileExists("/dir/f"));
-  ASSERT_OK(b_->FileExists("/dir/f"));
-  ASSERT_OK(env_->GetFileSize("/dir/f", &file_size));
-  ASSERT_EQ(0U, file_size);
-  ASSERT_OK(env_->GetChildren("/dir", &children));
-  ASSERT_EQ(1U, children.size());
-  ASSERT_EQ("f", children[0]);
-  ASSERT_OK(a_->GetChildren("/dir", &children));
-  ASSERT_EQ(1U, children.size());
-  ASSERT_EQ("f", children[0]);
-  ASSERT_OK(b_->GetChildren("/dir", &children));
-  ASSERT_EQ(1U, children.size());
-  ASSERT_EQ("f", children[0]);
-
-  // Write to the file.
-  ASSERT_OK(env_->NewWritableFile("/dir/f", &writable_file, soptions_));
-  ASSERT_OK(writable_file->Append("abc"));
-  writable_file.reset();
-
-  // Check for expected size.
-  ASSERT_OK(env_->GetFileSize("/dir/f", &file_size));
-  ASSERT_EQ(3U, file_size);
-  ASSERT_OK(a_->GetFileSize("/dir/f", &file_size));
-  ASSERT_EQ(3U, file_size);
-  ASSERT_OK(b_->GetFileSize("/dir/f", &file_size));
-  ASSERT_EQ(3U, file_size);
-
-  // Check that renaming works.
-  ASSERT_TRUE(!env_->RenameFile("/dir/non_existent", "/dir/g").ok());
-  ASSERT_OK(env_->RenameFile("/dir/f", "/dir/g"));
-  ASSERT_EQ(Status::NotFound(), env_->FileExists("/dir/f"));
-  ASSERT_OK(env_->FileExists("/dir/g"));
-  ASSERT_OK(env_->GetFileSize("/dir/g", &file_size));
-  ASSERT_EQ(3U, file_size);
-  ASSERT_OK(a_->FileExists("/dir/g"));
-  ASSERT_OK(a_->GetFileSize("/dir/g", &file_size));
-  ASSERT_EQ(3U, file_size);
-  ASSERT_OK(b_->FileExists("/dir/g"));
-  ASSERT_OK(b_->GetFileSize("/dir/g", &file_size));
-  ASSERT_EQ(3U, file_size);
-
-  // Check that opening non-existent file fails.
-  unique_ptr<SequentialFile> seq_file;
-  unique_ptr<RandomAccessFile> rand_file;
-  ASSERT_TRUE(
-      !env_->NewSequentialFile("/dir/non_existent", &seq_file, 
soptions_).ok());
-  ASSERT_TRUE(!seq_file);
-  ASSERT_TRUE(!env_->NewRandomAccessFile("/dir/non_existent", &rand_file,
-                                         soptions_).ok());
-  ASSERT_TRUE(!rand_file);
-
-  // Check that deleting works.
-  ASSERT_TRUE(!env_->DeleteFile("/dir/non_existent").ok());
-  ASSERT_OK(env_->DeleteFile("/dir/g"));
-  ASSERT_EQ(Status::NotFound(), env_->FileExists("/dir/g"));
-  ASSERT_OK(env_->GetChildren("/dir", &children));
-  ASSERT_EQ(0U, children.size());
-  ASSERT_OK(env_->DeleteDir("/dir"));
-}
-
-TEST_F(EnvMirrorTest, ReadWrite) {
-  unique_ptr<WritableFile> writable_file;
-  unique_ptr<SequentialFile> seq_file;
-  unique_ptr<RandomAccessFile> rand_file;
-  Slice result;
-  char scratch[100];
-
-  ASSERT_OK(env_->CreateDir("/dir"));
-
-  ASSERT_OK(env_->NewWritableFile("/dir/f", &writable_file, soptions_));
-  ASSERT_OK(writable_file->Append("hello "));
-  ASSERT_OK(writable_file->Append("world"));
-  writable_file.reset();
-
-  // Read sequentially.
-  ASSERT_OK(env_->NewSequentialFile("/dir/f", &seq_file, soptions_));
-  ASSERT_OK(seq_file->Read(5, &result, scratch));  // Read "hello".
-  ASSERT_EQ(0, result.compare("hello"));
-  ASSERT_OK(seq_file->Skip(1));
-  ASSERT_OK(seq_file->Read(1000, &result, scratch));  // Read "world".
-  ASSERT_EQ(0, result.compare("world"));
-  ASSERT_OK(seq_file->Read(1000, &result, scratch));  // Try reading past EOF.
-  ASSERT_EQ(0U, result.size());
-  ASSERT_OK(seq_file->Skip(100));  // Try to skip past end of file.
-  ASSERT_OK(seq_file->Read(1000, &result, scratch));
-  ASSERT_EQ(0U, result.size());
-
-  // Random reads.
-  ASSERT_OK(env_->NewRandomAccessFile("/dir/f", &rand_file, soptions_));
-  ASSERT_OK(rand_file->Read(6, 5, &result, scratch));  // Read "world".
-  ASSERT_EQ(0, result.compare("world"));
-  ASSERT_OK(rand_file->Read(0, 5, &result, scratch));  // Read "hello".
-  ASSERT_EQ(0, result.compare("hello"));
-  ASSERT_OK(rand_file->Read(10, 100, &result, scratch));  // Read "d".
-  ASSERT_EQ(0, result.compare("d"));
-
-  // Too high offset.
-  ASSERT_TRUE(!rand_file->Read(1000, 5, &result, scratch).ok());
-}
-
-TEST_F(EnvMirrorTest, Locks) {
-  FileLock* lock;
-
-  // These are no-ops, but we test they return success.
-  ASSERT_OK(env_->LockFile("some file", &lock));
-  ASSERT_OK(env_->UnlockFile(lock));
-}
-
-TEST_F(EnvMirrorTest, Misc) {
-  std::string test_dir;
-  ASSERT_OK(env_->GetTestDirectory(&test_dir));
-  ASSERT_TRUE(!test_dir.empty());
-
-  unique_ptr<WritableFile> writable_file;
-  ASSERT_OK(env_->NewWritableFile("/a/b", &writable_file, soptions_));
-
-  // These are no-ops, but we test they return success.
-  ASSERT_OK(writable_file->Sync());
-  ASSERT_OK(writable_file->Flush());
-  ASSERT_OK(writable_file->Close());
-  writable_file.reset();
-}
-
-TEST_F(EnvMirrorTest, LargeWrite) {
-  const size_t kWriteSize = 300 * 1024;
-  char* scratch = new char[kWriteSize * 2];
-
-  std::string write_data;
-  for (size_t i = 0; i < kWriteSize; ++i) {
-    write_data.append(1, static_cast<char>(i));
-  }
-
-  unique_ptr<WritableFile> writable_file;
-  ASSERT_OK(env_->NewWritableFile("/dir/f", &writable_file, soptions_));
-  ASSERT_OK(writable_file->Append("foo"));
-  ASSERT_OK(writable_file->Append(write_data));
-  writable_file.reset();
-
-  unique_ptr<SequentialFile> seq_file;
-  Slice result;
-  ASSERT_OK(env_->NewSequentialFile("/dir/f", &seq_file, soptions_));
-  ASSERT_OK(seq_file->Read(3, &result, scratch));  // Read "foo".
-  ASSERT_EQ(0, result.compare("foo"));
-
-  size_t read = 0;
-  std::string read_data;
-  while (read < kWriteSize) {
-    ASSERT_OK(seq_file->Read(kWriteSize - read, &result, scratch));
-    read_data.append(result.data(), result.size());
-    read += result.size();
-  }
-  ASSERT_TRUE(write_data == read_data);
-  delete[] scratch;
-}
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
-
-#else
-#include <stdio.h>
-
-int main(int argc, char** argv) {
-  fprintf(stderr, "SKIPPED as EnvMirror is not supported in ROCKSDB_LITE\n");
-  return 0;
-}
-
-#endif  // !ROCKSDB_LITE

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/d3a13a49/thirdparty/rocksdb/utilities/env_timed_test.cc
----------------------------------------------------------------------
diff --git a/thirdparty/rocksdb/utilities/env_timed_test.cc 
b/thirdparty/rocksdb/utilities/env_timed_test.cc
deleted file mode 100644
index 41d05e1..0000000
--- a/thirdparty/rocksdb/utilities/env_timed_test.cc
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright (c) 2017-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef ROCKSDB_LITE
-
-#include "rocksdb/env.h"
-#include "rocksdb/perf_context.h"
-#include "util/testharness.h"
-
-namespace rocksdb {
-
-class TimedEnvTest : public testing::Test {
-};
-
-TEST_F(TimedEnvTest, BasicTest) {
-  SetPerfLevel(PerfLevel::kEnableTime);
-  ASSERT_EQ(0, get_perf_context()->env_new_writable_file_nanos);
-
-  std::unique_ptr<Env> mem_env(NewMemEnv(Env::Default()));
-  std::unique_ptr<Env> timed_env(NewTimedEnv(mem_env.get()));
-  std::unique_ptr<WritableFile> writable_file;
-  timed_env->NewWritableFile("f", &writable_file, EnvOptions());
-
-  ASSERT_GT(get_perf_context()->env_new_writable_file_nanos, 0);
-}
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
-
-#else  // ROCKSDB_LITE
-#include <stdio.h>
-
-int main(int argc, char** argv) {
-  fprintf(stderr, "SKIPPED as TimedEnv is not supported in ROCKSDB_LITE\n");
-  return 0;
-}
-
-#endif  // ROCKSDB_LITE

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/d3a13a49/thirdparty/rocksdb/utilities/geodb/geodb_test.cc
----------------------------------------------------------------------
diff --git a/thirdparty/rocksdb/utilities/geodb/geodb_test.cc 
b/thirdparty/rocksdb/utilities/geodb/geodb_test.cc
deleted file mode 100644
index dcdb982..0000000
--- a/thirdparty/rocksdb/utilities/geodb/geodb_test.cc
+++ /dev/null
@@ -1,140 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-#ifndef ROCKSDB_LITE
-#include "utilities/geodb/geodb_impl.h"
-
-#include <cctype>
-#include "util/testharness.h"
-
-namespace rocksdb {
-
-class GeoDBTest : public testing::Test {
- public:
-  static const std::string kDefaultDbName;
-  static Options options;
-  DB* db;
-  GeoDB* geodb;
-
-  GeoDBTest() {
-    GeoDBOptions geodb_options;
-    EXPECT_OK(DestroyDB(kDefaultDbName, options));
-    options.create_if_missing = true;
-    Status status = DB::Open(options, kDefaultDbName, &db);
-    geodb =  new GeoDBImpl(db, geodb_options);
-  }
-
-  ~GeoDBTest() {
-    delete geodb;
-  }
-
-  GeoDB* getdb() {
-    return geodb;
-  }
-};
-
-const std::string GeoDBTest::kDefaultDbName = test::TmpDir() + "/geodb_test";
-Options GeoDBTest::options = Options();
-
-// Insert, Get and Remove
-TEST_F(GeoDBTest, SimpleTest) {
-  GeoPosition pos1(100, 101);
-  std::string id1("id1");
-  std::string value1("value1");
-
-  // insert first object into database
-  GeoObject obj1(pos1, id1, value1);
-  Status status = getdb()->Insert(obj1);
-  ASSERT_TRUE(status.ok());
-
-  // insert second object into database
-  GeoPosition pos2(200, 201);
-  std::string id2("id2");
-  std::string value2 = "value2";
-  GeoObject obj2(pos2, id2, value2);
-  status = getdb()->Insert(obj2);
-  ASSERT_TRUE(status.ok());
-
-  // retrieve first object using position
-  std::string value;
-  status = getdb()->GetByPosition(pos1, Slice(id1), &value);
-  ASSERT_TRUE(status.ok());
-  ASSERT_EQ(value, value1);
-
-  // retrieve first object using id
-  GeoObject obj;
-  status = getdb()->GetById(Slice(id1), &obj);
-  ASSERT_TRUE(status.ok());
-  ASSERT_EQ(obj.position.latitude, 100);
-  ASSERT_EQ(obj.position.longitude, 101);
-  ASSERT_EQ(obj.id.compare(id1), 0);
-  ASSERT_EQ(obj.value, value1);
-
-  // delete first object
-  status = getdb()->Remove(Slice(id1));
-  ASSERT_TRUE(status.ok());
-  status = getdb()->GetByPosition(pos1, Slice(id1), &value);
-  ASSERT_TRUE(status.IsNotFound());
-  status = getdb()->GetById(id1, &obj);
-  ASSERT_TRUE(status.IsNotFound());
-
-  // check that we can still find second object
-  status = getdb()->GetByPosition(pos2, id2, &value);
-  ASSERT_TRUE(status.ok());
-  ASSERT_EQ(value, value2);
-  status = getdb()->GetById(id2, &obj);
-  ASSERT_TRUE(status.ok());
-}
-
-// Search.
-// Verify distances via http://www.stevemorse.org/nearest/distance.php
-TEST_F(GeoDBTest, Search) {
-  GeoPosition pos1(45, 45);
-  std::string id1("mid1");
-  std::string value1 = "midvalue1";
-
-  // insert object at 45 degree latitude
-  GeoObject obj1(pos1, id1, value1);
-  Status status = getdb()->Insert(obj1);
-  ASSERT_TRUE(status.ok());
-
-  // search all objects centered at 46 degree latitude with
-  // a radius of 200 kilometers. We should find the one object that
-  // we inserted earlier.
-  GeoIterator* iter1 = getdb()->SearchRadial(GeoPosition(46, 46), 200000);
-  ASSERT_TRUE(status.ok());
-  ASSERT_EQ(iter1->geo_object().value, "midvalue1");
-  uint32_t size = 0;
-  while (iter1->Valid()) {
-    size++;
-    iter1->Next();
-  }
-  ASSERT_EQ(size, 1U);
-  delete iter1;
-
-  // search all objects centered at 46 degree latitude with
-  // a radius of 2 kilometers. There should be none.
-  GeoIterator* iter2 = getdb()->SearchRadial(GeoPosition(46, 46), 2);
-  ASSERT_TRUE(status.ok());
-  ASSERT_FALSE(iter2->Valid());
-  delete iter2;
-}
-
-}  // namespace rocksdb
-
-int main(int argc, char* argv[]) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
-#else
-
-#include <stdio.h>
-
-int main() {
-  fprintf(stderr, "SKIPPED\n");
-  return 0;
-}
-
-#endif  // !ROCKSDB_LITE

Reply via email to