[nifi-minifi-cpp] branch main updated: MINIFICPP-1288 - Remove FlowController::reload() as it is both flawed and unused

2020-07-30 Thread jeremydyer
This is an automated email from the ASF dual-hosted git repository.

jeremydyer pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/nifi-minifi-cpp.git


The following commit(s) were added to refs/heads/main by this push:
 new 97947e0  MINIFICPP-1288 - Remove FlowController::reload() as it is 
both flawed and unused
97947e0 is described below

commit 97947e01eb9ccb0aa5965561b30de9529ad72ce1
Author: Adam Hunyadi 
AuthorDate: Wed Jul 29 10:19:51 2020 +0200

MINIFICPP-1288 - Remove FlowController::reload() as it is both flawed and 
unused
---
 libminifi/include/FlowController.h |  2 --
 libminifi/src/FlowController.cpp   | 19 ---
 libminifi/test/unit/ProvenanceTestHelper.h |  4 
 3 files changed, 25 deletions(-)

diff --git a/libminifi/include/FlowController.h 
b/libminifi/include/FlowController.h
index 86e504c..af06f57 100644
--- a/libminifi/include/FlowController.h
+++ b/libminifi/include/FlowController.h
@@ -134,8 +134,6 @@ class FlowController : public 
core::controller::ControllerServiceProvider, publi
   virtual void waitUnload(uint64_t timeToWaitMs);
   // Unload the current flow xml, clean the root process group and all its 
children
   virtual void unload();
-  // Load new xml
-  virtual void reload(std::string yamlFile);
   // update property value
   void updatePropertyValue(std::string processorName, std::string 
propertyName, std::string propertyValue) {
 if (root_ != nullptr)
diff --git a/libminifi/src/FlowController.cpp b/libminifi/src/FlowController.cpp
index da1c676..cc67fdd 100644
--- a/libminifi/src/FlowController.cpp
+++ b/libminifi/src/FlowController.cpp
@@ -382,25 +382,6 @@ void FlowController::load(const 
std::shared_ptr , bool
   }
 }
 
-void FlowController::reload(std::string yamlFile) {
-  std::lock_guard flow_lock(mutex_);
-  logger_->log_info("Starting to reload Flow Controller with yaml %s", 
yamlFile);
-  stop(true);
-  unload();
-  std::string oldYamlFile = this->configuration_filename_;
-  this->configuration_filename_ = yamlFile;
-  load();
-  start();
-  if (this->root_ != nullptr) {
-this->configuration_filename_ = oldYamlFile;
-logger_->log_info("Rollback Flow Controller to YAML %s", oldYamlFile);
-stop(true);
-unload();
-load();
-start();
-  }
-}
-
 void FlowController::loadFlowRepo() {
   if (this->flow_file_repo_ != nullptr) {
 logger_->log_debug("Getting connection map");
diff --git a/libminifi/test/unit/ProvenanceTestHelper.h 
b/libminifi/test/unit/ProvenanceTestHelper.h
index 7f92bc7..cce7bf6 100644
--- a/libminifi/test/unit/ProvenanceTestHelper.h
+++ b/libminifi/test/unit/ProvenanceTestHelper.h
@@ -270,10 +270,6 @@ class TestFlowController : public minifi::FlowController {
 stop(true);
   }
 
-  void reload(std::string file) override {
-
-  }
-
   bool isRunning() override {
 return true;
   }



[nifi] branch master updated: NIFI-7299 Add basic OAuth2 token provider service that can fetch access tokens when supplied with appropriate credentials.

2020-06-04 Thread jeremydyer
This is an automated email from the ASF dual-hosted git repository.

jeremydyer pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/nifi.git


The following commit(s) were added to refs/heads/master by this push:
 new 915617d  NIFI-7299 Add basic OAuth2 token provider service that can 
fetch access tokens when supplied with appropriate credentials.
915617d is described below

commit 915617dbe76d6842bb971e193e7399c8eda76121
Author: Mike Thomsen 
AuthorDate: Sat Mar 28 10:57:11 2020 -0400

NIFI-7299 Add basic OAuth2 token provider service that can fetch access 
tokens when supplied with appropriate credentials.

Added skeleton of oauth2 provider.
Added copy of our code.
Refactored a few things.
Updated apis to better match flow descriptions.
Updated poms and other artifacts.
Updated copyright notice.
Updated LICENSE.

This closes #4173

Signed-off-by: Jeremy Dyer 
---
 nifi-assembly/pom.xml  |   6 +
 .../nifi-oauth2-provider-api/pom.xml   |  40 
 .../java/org/apache/nifi/oauth2/AccessToken.java   |  69 +++
 .../oauth2/AccessTokenAcquisitionException.java|  28 +++
 .../apache/nifi/oauth2/OAuth2TokenProvider.java|  58 ++
 .../nifi-oauth2-provider-nar/pom.xml   |  47 +
 .../src/main/resources/META-INF/LICENSE| 202 +
 .../src/main/resources/META-INF/NOTICE |  42 +
 .../nifi-oauth2-provider-service/pom.xml   |  98 ++
 .../nifi/oauth2/OAuth2TokenProviderImpl.java   | 155 
 .../src/main/java/org/apache/nifi/oauth2/Util.java | 139 ++
 .../org.apache.nifi.controller.ControllerService   |  15 ++
 .../nifi/oauth2/OAuth2TokenProviderImplTest.java   | 194 
 .../nifi-oauth2-provider-bundle/pom.xml|  48 +
 .../nifi-standard-services-api-nar/pom.xml |   6 +
 nifi-nar-bundles/nifi-standard-services/pom.xml|   7 +-
 16 files changed, 1152 insertions(+), 2 deletions(-)

diff --git a/nifi-assembly/pom.xml b/nifi-assembly/pom.xml
index 7f96c87..4912c86 100644
--- a/nifi-assembly/pom.xml
+++ b/nifi-assembly/pom.xml
@@ -429,6 +429,12 @@ language governing permissions and limitations under the 
License. -->
 
 
 org.apache.nifi
+nifi-oauth2-provider-nar
+1.12.0-SNAPSHOT
+nar
+
+
+org.apache.nifi
 nifi-azure-nar
 1.12.0-SNAPSHOT
 nar
diff --git 
a/nifi-nar-bundles/nifi-standard-services/nifi-oauth2-provider-api/pom.xml 
b/nifi-nar-bundles/nifi-standard-services/nifi-oauth2-provider-api/pom.xml
new file mode 100644
index 000..d7efc35
--- /dev/null
+++ b/nifi-nar-bundles/nifi-standard-services/nifi-oauth2-provider-api/pom.xml
@@ -0,0 +1,40 @@
+
+
+http://maven.apache.org/POM/4.0.0; 
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance; 
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
https://maven.apache.org/xsd/maven-4.0.0.xsd;>
+4.0.0
+
+
+org.apache.nifi
+nifi-standard-services
+1.12.0-SNAPSHOT
+
+
+nifi-oauth2-provider-api
+jar
+
+
+
+org.apache.nifi
+nifi-api
+provided
+
+
+org.apache.nifi
+nifi-ssl-context-service-api
+compile
+
+
+
diff --git 
a/nifi-nar-bundles/nifi-standard-services/nifi-oauth2-provider-api/src/main/java/org/apache/nifi/oauth2/AccessToken.java
 
b/nifi-nar-bundles/nifi-standard-services/nifi-oauth2-provider-api/src/main/java/org/apache/nifi/oauth2/AccessToken.java
new file mode 100644
index 000..2e261dd
--- /dev/null
+++ 
b/nifi-nar-bundles/nifi-standard-services/nifi-oauth2-provider-api/src/main/java/org/apache/nifi/oauth2/AccessToken.java
@@ -0,0 +1,69 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.nifi.oauth2;
+
+public class AccessToken {
+private String accessToken;
+private String refreshToken;
+private String tokenType;
+private Integer expires;
+private String 

[nifi-minifi-cpp] 04/04: MINIFICPP-762: Fix windows issues

2019-03-14 Thread jeremydyer
This is an automated email from the ASF dual-hosted git repository.

jeremydyer pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/nifi-minifi-cpp.git

commit db5a636a708409946c2f9cc300a04ff32840c337
Author: Marc Parisi 
AuthorDate: Thu Mar 14 07:52:11 2019 -0400

MINIFICPP-762: Fix windows issues
---
 CMakeLists.txt | 9 -
 1 file changed, 8 insertions(+), 1 deletion(-)

diff --git a/CMakeLists.txt b/CMakeLists.txt
index e79a30f..2d3a5ae 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -249,7 +249,7 @@ endif (OPENSSL_FOUND)
 else()
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake/ssl")
 endif()
-if(NOT USE_SYSTEM_ZLIB)
+if(WIN32 OR NOT USE_SYSTEM_ZLIB)
   message("Using bundled zlib")
 
   if (WIN32)
@@ -276,10 +276,13 @@ if(NOT USE_SYSTEM_ZLIB)
   set_target_properties(z PROPERTIES IMPORTED_LOCATION 
"${CMAKE_CURRENT_BINARY_DIR}/${BYPRODUCT}")
   add_dependencies(z zlib-external)
   set(ZLIB_FOUND "YES" CACHE STRING "" FORCE)
+  set(ZLIB_INCLUDE_DIR "${CMAKE_CURRENT_SOURCE_DIR}/thirdparty/zlib/include" 
CACHE STRING "" FORCE)
   set(ZLIB_INCLUDE_DIRS "${CMAKE_CURRENT_SOURCE_DIR}/thirdparty/zlib/include" 
CACHE STRING "" FORCE)
   set(ZLIB_LIBRARIES "${CMAKE_CURRENT_BINARY_DIR}/${BYPRODUCT}" CACHE STRING 
"" FORCE)
   set(ZLIB_LIBRARY "${CMAKE_CURRENT_BINARY_DIR}/${BYPRODUCT}" CACHE STRING "" 
FORCE)
   set(ZLIB_LIBRARY "${CMAKE_CURRENT_BINARY_DIR}/${BYPRODUCT}" CACHE STRING "" 
FORCE)
+  set(ZLIB_LIBRARY_RELEASE "${CMAKE_CURRENT_BINARY_DIR}/${BYPRODUCT}" CACHE 
STRING "" FORCE)
+  set(ZLIB_LIBRARY_DEBUG "${CMAKE_CURRENT_BINARY_DIR}/${BYPRODUCT}" CACHE 
STRING "" FORCE)
   message("ZLIBV LIBR is ${ZLIB_LIBRARIES}")
 else()
   find_package (ZLIB REQUIRED)
@@ -457,6 +460,10 @@ if(NOT USE_SYSTEM_OPENSSL OR USE_SYSTEM_OPENSSL STREQUAL 
"OFF")
   add_dependencies(minifi libressl-portable)
 endif()
 
+if (WIN32 OR NOT USE_SYSTEM_ZLIB)
+   add_dependencies(minifi zlib-external)
+endif(WIN32 OR NOT USE_SYSTEM_ZLIB)
+
  EXTENSIONS
 option(DISABLE_CURL "Disables libCurl Properties." OFF)
 if ((DISABLE_CURL STREQUAL "OFF" OR NOT DISABLE_CURL) AND NOT DISABLE_CIVET)



[nifi-minifi-cpp] 02/04: MINIFICPP-762-Update: send make docker to dev null

2019-03-14 Thread jeremydyer
This is an automated email from the ASF dual-hosted git repository.

jeremydyer pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/nifi-minifi-cpp.git

commit 1956d876d4e7fb8d3645d42a166555d3f2832afd
Author: Marc Parisi 
AuthorDate: Wed Mar 13 20:19:46 2019 -0400

MINIFICPP-762-Update: send make docker to dev null
---
 .travis.yml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/.travis.yml b/.travis.yml
index 063ffbc..3191061 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -59,7 +59,7 @@ matrix:
   services:
 - docker
   script:
-- ./bootstrap.sh -e -t && cd build  && make docker
+- ./bootstrap.sh -e -t && cd build  && make docker > /dev/null
 - os: linux
   dist: trusty
   sudo: required



[nifi-minifi-cpp] branch master updated (41d2604 -> db5a636)

2019-03-14 Thread jeremydyer
This is an automated email from the ASF dual-hosted git repository.

jeremydyer pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/nifi-minifi-cpp.git.


from 41d2604  MINIFICPP-762: Add convenience installs for static openssl 
libs
 new 7b2137c  MINIFICPP-762: Update static linkings. Download and build 
dependencies that we can't guarantee on platforms
 new 1956d87  MINIFICPP-762-Update: send make docker to dev null
 new 662d2ce  remove docker target
 new db5a636  MINIFICPP-762: Fix windows issues

The 4 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .travis.yml|   4 +-
 CMakeLists.txt | 164 ++---
 README.md  |   2 +
 bootstrap.sh   |  10 ++
 bstrp_functions.sh |  19 ++-
 centos.sh  |   4 +-
 cmake/BuildTests.cmake |   5 +-
 cmake/ssl/FindOpenSSL.cmake|  28 
 controller/CMakeLists.txt  |   7 +-
 docker/Dockerfile  |   2 +-
 extensions/coap/tests/CMakeLists.txt   |   8 +-
 extensions/expression-language/CMakeLists.txt  |   1 +
 extensions/http-curl/CMakeLists.txt|  14 +-
 extensions/http-curl/tests/CMakeLists.txt  |  12 +-
 extensions/libarchive/CMakeLists.txt   |  18 +--
 extensions/mqtt/CMakeLists.txt |   7 +-
 extensions/pcap/CMakeLists.txt |   9 +-
 libminifi/CMakeLists.txt   |   4 +-
 libminifi/src/processors/GetTCP.cpp|   4 +
 libminifi/test/archive-tests/CMakeLists.txt|   8 +-
 libminifi/test/civetweb-tests/CMakeLists.txt   |   4 +-
 libminifi/test/script-tests/CMakeLists.txt |  12 +-
 main/CMakeLists.txt|  33 +++--
 nanofi/CMakeLists.txt  |   4 +-
 rheldistro.sh  |   2 +-
 thirdparty/date/CMakeLists.txt |   6 +-
 .../libarchive-3.3.2/libarchive/CMakeLists.txt |  20 +--
 27 files changed, 253 insertions(+), 158 deletions(-)



[nifi-minifi-cpp] 01/04: MINIFICPP-762: Update static linkings. Download and build dependencies that we can't guarantee on platforms

2019-03-14 Thread jeremydyer
This is an automated email from the ASF dual-hosted git repository.

jeremydyer pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/nifi-minifi-cpp.git

commit 7b2137c0f25ef68f2e9f7c612f8b30464c166583
Author: Marc Parisi 
AuthorDate: Mon Mar 11 12:21:49 2019 -0400

MINIFICPP-762: Update static linkings. Download and build
dependencies that we can't guarantee on platforms
---
 .travis.yml|   5 +
 CMakeLists.txt | 157 +
 README.md  |   2 +
 bootstrap.sh   |  10 ++
 bstrp_functions.sh |  19 ++-
 centos.sh  |   4 +-
 cmake/BuildTests.cmake |   5 +-
 cmake/ssl/FindOpenSSL.cmake|  28 
 controller/CMakeLists.txt  |   7 +-
 docker/Dockerfile  |   2 +-
 extensions/coap/tests/CMakeLists.txt   |   8 +-
 extensions/expression-language/CMakeLists.txt  |   1 +
 extensions/http-curl/CMakeLists.txt|  14 +-
 extensions/http-curl/tests/CMakeLists.txt  |  12 +-
 extensions/libarchive/CMakeLists.txt   |  18 +--
 extensions/mqtt/CMakeLists.txt |   7 +-
 extensions/pcap/CMakeLists.txt |   9 +-
 libminifi/CMakeLists.txt   |   4 +-
 libminifi/src/processors/GetTCP.cpp|   4 +
 libminifi/test/archive-tests/CMakeLists.txt|   8 +-
 libminifi/test/civetweb-tests/CMakeLists.txt   |   4 +-
 libminifi/test/script-tests/CMakeLists.txt |  12 +-
 main/CMakeLists.txt|  33 +++--
 nanofi/CMakeLists.txt  |   4 +-
 rheldistro.sh  |   2 +-
 thirdparty/date/CMakeLists.txt |   6 +-
 .../libarchive-3.3.2/libarchive/CMakeLists.txt |  20 +--
 27 files changed, 250 insertions(+), 155 deletions(-)

diff --git a/.travis.yml b/.travis.yml
index 6e6c08d..063ffbc 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -60,6 +60,11 @@ matrix:
 - docker
   script:
 - ./bootstrap.sh -e -t && cd build  && make docker
+- os: linux
+  dist: trusty
+  sudo: required
+  script:
+- ./bootstrap.sh -e -t && cd build  && cmake -DUSE_SHARED_LIBS= .. && 
make -j2 VERBOSE=1 && sudo make test ARGS="-j2 --output-on-failure"
 - os: osx
   osx_image: xcode8.3
   env:
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 058b992..e79a30f 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -23,31 +23,39 @@ set(PROJECT_NAME "nifi-minifi-cpp")
 set(PROJECT_VERSION_MAJOR 0)
 set(PROJECT_VERSION_MINOR 6)
 set(PROJECT_VERSION_PATCH 0)
+
+include(CMakeDependentOption)
+include(CheckIncludeFile)
+include(FeatureSummary)
+include(ExternalProject)
+
 option(SKIP_TESTS "Skips building all tests." OFF)
+
 option(PORTABLE "Instructs the compiler to remove architecture specific 
optimizations" ON)
-option(USE_SYSTEM_OPENSSL "Instructs the build system to search for and use an 
SSL library available in the host system" ON)
+
+option(USE_SHARED_LIBS "Builds using shared libraries" ON)
+
+option(ENABLE_PYTHON "Instructs the build system to enable building shared 
objects for the python lib" OFF)
+
+cmake_dependent_option(STATIC_BUILD "Attempts to statically link as many 
dependencies as possible." ON "NOT ENABLE_PYTHON; NOT USE_SHARED_LIBS" OFF)  
+
+cmake_dependent_option(USE_SYSTEM_OPENSSL "Instructs the build system to 
search for and use an SSL library available in the host system" ON "NOT 
STATIC_BUILD" OFF)
+
 option(OPENSSL_OFF "Disables OpenSSL" OFF)
 option(ENABLE_OPS "Enable Operations/zlib Tools" ON)
-option(USE_SYSTEM_UUID "Instructs the build system to search for and use an 
UUID library available in the host system" OFF)
+option(USE_SYSTEM_UUID "Instructs the build system to search for and use a 
UUID library available in the host system" OFF)
+
 option(ENABLE_JNI "Instructs the build system to enable the JNI extension" OFF)
-option(USE_SYSTEM_CURL "Instructs the build system to search for and use a 
cURL library available in the host system" ON)
-option(BUILD_SHARED_LIBS "Build yaml cpp shared lib" OFF)
-if (WIN32)
-option(USE_SYSTEM_ZLIB "Instructs the build system to search for and use a 
zlib library available in the host system" OFF)
-else()
-option(USE_SYSTEM_ZLIB "Instructs the build system to search for and use a 
zlib library available in the host system" ON)
-endif()
-option(ENABLE_PYTHON "Instructs the build system to

[nifi-minifi-cpp] 03/04: remove docker target

2019-03-14 Thread jeremydyer
This is an automated email from the ASF dual-hosted git repository.

jeremydyer pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/nifi-minifi-cpp.git

commit 662d2ce96345cb471e6bbd941e13bf3dc80e53f6
Author: Marc Parisi 
AuthorDate: Wed Mar 13 21:00:12 2019 -0400

remove docker target
---
 .travis.yml | 7 ---
 1 file changed, 7 deletions(-)

diff --git a/.travis.yml b/.travis.yml
index 3191061..69dff4e 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -56,13 +56,6 @@ matrix:
 - os: linux
   dist: trusty
   sudo: required
-  services:
-- docker
-  script:
-- ./bootstrap.sh -e -t && cd build  && make docker > /dev/null
-- os: linux
-  dist: trusty
-  sudo: required
   script:
 - ./bootstrap.sh -e -t && cd build  && cmake -DUSE_SHARED_LIBS= .. && 
make -j2 VERBOSE=1 && sudo make test ARGS="-j2 --output-on-failure"
 - os: osx



[nifi-minifi-cpp] branch master updated: MINIFICPP-730 support dynamic properties for PublishKafka

2019-02-08 Thread jeremydyer
This is an automated email from the ASF dual-hosted git repository.

jeremydyer pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/nifi-minifi-cpp.git


The following commit(s) were added to refs/heads/master by this push:
 new 5ad6154  MINIFICPP-730 support dynamic properties for PublishKafka
5ad6154 is described below

commit 5ad6154607ce6a6cda2d97e402d1e767fc0c6971
Author: Jeremy Dyer 
AuthorDate: Fri Feb 8 09:39:50 2019 -0500

MINIFICPP-730 support dynamic properties for PublishKafka

Signed-off-by: Jeremy Dyer 

This closes #481
---
 extensions/librdkafka/PublishKafka.h | 5 +
 1 file changed, 5 insertions(+)

diff --git a/extensions/librdkafka/PublishKafka.h 
b/extensions/librdkafka/PublishKafka.h
index 8160dbd..46915b9 100644
--- a/extensions/librdkafka/PublishKafka.h
+++ b/extensions/librdkafka/PublishKafka.h
@@ -175,6 +175,11 @@ public:
   };
 
 public:
+
+virtual bool supportsDynamicProperties() {
+  return true;
+};
+
   /**
* Function that's executed when the processor is scheduled.
* @param context process context.



[3/3] nifi-registry git commit: [NIFIREG-196] update client deps

2018-08-19 Thread jeremydyer
[NIFIREG-196] update client deps

Signed-off-by: Jeremy Dyer 

This closes# 138


Project: http://git-wip-us.apache.org/repos/asf/nifi-registry/repo
Commit: http://git-wip-us.apache.org/repos/asf/nifi-registry/commit/b2e8ef34
Tree: http://git-wip-us.apache.org/repos/asf/nifi-registry/tree/b2e8ef34
Diff: http://git-wip-us.apache.org/repos/asf/nifi-registry/diff/b2e8ef34

Branch: refs/heads/master
Commit: b2e8ef340643a597c098437da86e9e3555004e10
Parents: b11eb53
Author: Scott Aslan 
Authored: Fri Aug 17 13:52:37 2018 -0400
Committer: Jeremy Dyer 
Committed: Sun Aug 19 15:15:15 2018 -0400

--
 .../src/main/frontend/Gruntfile.js  |1 +
 .../src/main/frontend/package-lock.json | 5653 ++
 .../src/main/frontend/package.json  |   39 +-
 3 files changed, 3308 insertions(+), 2385 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/nifi-registry/blob/b2e8ef34/nifi-registry-web-ui/src/main/frontend/Gruntfile.js
--
diff --git a/nifi-registry-web-ui/src/main/frontend/Gruntfile.js 
b/nifi-registry-web-ui/src/main/frontend/Gruntfile.js
index 2f71770..8b1fee1 100644
--- a/nifi-registry-web-ui/src/main/frontend/Gruntfile.js
+++ b/nifi-registry-web-ui/src/main/frontend/Gruntfile.js
@@ -22,6 +22,7 @@ module.exports = function (grunt) {
 grunt.initConfig({
 sass: {
 options: {
+implementation: require('node-sass'),
 outputStyle: 'compressed',
 sourceMap: true
 },



[1/3] nifi-registry git commit: [NIFIREG-196] update client deps [Forced Update!]

2018-08-19 Thread jeremydyer
Repository: nifi-registry
Updated Branches:
  refs/heads/master 225f5482d -> b2e8ef340 (forced update)


http://git-wip-us.apache.org/repos/asf/nifi-registry/blob/b2e8ef34/nifi-registry-web-ui/src/main/frontend/package.json
--
diff --git a/nifi-registry-web-ui/src/main/frontend/package.json 
b/nifi-registry-web-ui/src/main/frontend/package.json
index 85ef8c7..5129eb4 100644
--- a/nifi-registry-web-ui/src/main/frontend/package.json
+++ b/nifi-registry-web-ui/src/main/frontend/package.json
@@ -1,4 +1,18 @@
 {
+  "//": "Licensed to the Apache Software Foundation (ASF) under one or more",
+  "//": "contributor license agreements.  See the NOTICE file distributed 
with",
+  "//": "this work for additional information regarding copyright ownership.",
+  "//": "The ASF licenses this file to You under the Apache License, Version 
2.0",
+  "//": "(the \"License\"); you may not use this file except in compliance 
with",
+  "//": "the License.  You may obtain a copy of the License at",
+  "//": "",
+  "//": "http://www.apache.org/licenses/LICENSE-2.0;,
+  "//": "",
+  "//": "Unless required by applicable law or agreed to in writing, software",
+  "//": "distributed under the License is distributed on an \"AS IS\" BASIS,",
+  "//": "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 
implied.",
+  "//": "See the License for the specific language governing permissions and",
+  "//": "limitations under the License.",
   "name": "nifi-registry",
   "version": "0.0.1",
   "description": "",
@@ -45,23 +59,22 @@
 "roboto-fontface": "0.9.0"
   },
   "devDependencies": {
-"canonical-path": "0.0.2",
-"grunt": "0.4.5",
-"grunt-cli": "1.2.0",
+"grunt": "1.0.3",
+"grunt-cli": "1.3.0",
 "grunt-contrib-compress": "1.4.3",
-"grunt-sass": "2.0.0",
+"grunt-sass": "3.0.1",
+"node-sass": "4.9.3",
 "grunt-systemjs-builder": "1.0.0",
-"jasmine-core": "2.8.0",
-"karma": "1.7.1",
+"jasmine-core": "3.2.1",
+"karma": "3.0.0",
 "karma-chrome-launcher": "2.2.0",
 "karma-cli": "1.0.1",
-"karma-coverage": "1.1.1",
-"karma-jasmine": "1.1.1",
-"karma-jasmine-html-reporter": "0.2.2",
-"karma-spec-reporter": "0.0.31",
-"load-grunt-tasks": "3.5.2",
-"lodash": "4.16.2",
-"protractor": "4.0.14"
+"karma-coverage": "1.1.2",
+"karma-jasmine": "1.1.2",
+"karma-jasmine-html-reporter": "1.3.0",
+"karma-spec-reporter": "0.0.32",
+"load-grunt-tasks": "4.0.0",
+"protractor": "5.4.0"
   },
   "bundleDependencies": [],
   "private": true



[2/3] nifi-registry git commit: [NIFIREG-196] update client deps

2018-08-19 Thread jeremydyer
http://git-wip-us.apache.org/repos/asf/nifi-registry/blob/b2e8ef34/nifi-registry-web-ui/src/main/frontend/package-lock.json
--
diff --git a/nifi-registry-web-ui/src/main/frontend/package-lock.json 
b/nifi-registry-web-ui/src/main/frontend/package-lock.json
index 2da9214..6a85fd7 100644
--- a/nifi-registry-web-ui/src/main/frontend/package-lock.json
+++ b/nifi-registry-web-ui/src/main/frontend/package-lock.json
@@ -1,4 +1,18 @@
 {
+  "//": "Licensed to the Apache Software Foundation (ASF) under one or more",
+  "//": "contributor license agreements.  See the NOTICE file distributed 
with",
+  "//": "this work for additional information regarding copyright ownership.",
+  "//": "The ASF licenses this file to You under the Apache License, Version 
2.0",
+  "//": "(the \"License\"); you may not use this file except in compliance 
with",
+  "//": "the License.  You may obtain a copy of the License at",
+  "//": "",
+  "//": "http://www.apache.org/licenses/LICENSE-2.0;,
+  "//": "",
+  "//": "Unless required by applicable law or agreed to in writing, software",
+  "//": "distributed under the License is distributed on an \"AS IS\" BASIS,",
+  "//": "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 
implied.",
+  "//": "See the License for the specific language governing permissions and",
+  "//": "limitations under the License.",
   "name": "nifi-registry",
   "version": "0.0.1",
   "lockfileVersion": 1,
@@ -113,12 +127,6 @@
   "resolved": "https://registry.npmjs.org/@nifi-fds/core/-/core-0.1.0.tgz;,
   "integrity": 
"sha512-aUoXOjhgQBZSxmzpBeYupIGvtw+J7Os+6n3q9JS22vcr7uVh588UMb1STKH1nVPRtDgkiU9ooGuXp9hkSCnC0w=="
 },
-"@types/jasmine": {
-  "version": "2.8.6",
-  "resolved": 
"https://registry.npmjs.org/@types/jasmine/-/jasmine-2.8.6.tgz;,
-  "integrity": 
"sha512-clg9raJTY0EOo5pVZKX3ZlMjlYzVU73L71q5OV1jhE2Uezb7oF94jh4CvwrW6wInquQAdhOxJz5VDF2TLUGmmA==",
-  "dev": true
-},
 "@types/node": {
   "version": "6.0.106",
   "resolved": "https://registry.npmjs.org/@types/node/-/node-6.0.106.tgz;,
@@ -131,36 +139,40 @@
   "integrity": "sha1-vShOV8hPEyXacCur/IKlMoGQwMU=",
   "dev": true
 },
+"@types/selenium-webdriver": {
+  "version": "3.0.10",
+  "resolved": 
"https://registry.npmjs.org/@types/selenium-webdriver/-/selenium-webdriver-3.0.10.tgz;,
+  "integrity": 
"sha512-ikB0JHv6vCR1KYUQAzTO4gi/lXLElT4Tx+6De2pc/OZwizE9LRNiTa+U8TBFKBD/nntPnr/MPSHSnOTybjhqNA==",
+  "dev": true
+},
 "abbrev": {
   "version": "1.1.1",
   "resolved": "https://registry.npmjs.org/abbrev/-/abbrev-1.1.1.tgz;,
   "integrity": "sha1-+PLIh60Qv2f2NPAFtph/7TF5qsg=",
   "dev": true
 },
+"accepts": {
+  "version": "1.3.5",
+  "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.5.tgz;,
+  "integrity": "sha1-63d99gEXI6OxTopywIBcjoZ0a9I=",
+  "dev": true,
+  "requires": {
+"mime-types": "2.1.18",
+"negotiator": "0.6.1"
+  }
+},
+"adm-zip": {
+  "version": "0.4.11",
+  "resolved": "https://registry.npmjs.org/adm-zip/-/adm-zip-0.4.11.tgz;,
+  "integrity": 
"sha512-L8vcjDTCOIJk7wFvmlEUN7AsSb8T+2JrdP7KINBjzr24TJ5Mwj590sLu3BC7zNZowvJWa/JtPmD8eJCzdtDWjA==",
+  "dev": true
+},
 "after": {
   "version": "0.8.2",
   "resolved": "https://registry.npmjs.org/after/-/after-0.8.2.tgz;,
   "integrity": "sha1-/ts5T58OAqqXaOcCvaI7UF+ufh8=",
   "dev": true
 },
-"agent-base": {
-  "version": "2.1.1",
-  "resolved": 
"https://registry.npmjs.org/agent-base/-/agent-base-2.1.1.tgz;,
-  "integrity": "sha1-1t4Q1a9hMtW9aSQn1G/FOFOQlMc=",
-  "dev": true,
-  "requires": {
-"extend": "3.0.1",
-"semver": "5.0.3"
-  },
-  "dependencies": {
-"semver": {
-  "version": "5.0.3",
-  "resolved": "https://registry.npmjs.org/semver/-/semver-5.0.3.tgz;,
-  "integrity": "sha1-d0Zt5YnNXTyV8TiqeLxWmjy10no=",
-  "dev": true
-}
-  }
-},
 "ajv": {
   "version": "5.5.2",
   "resolved": "https://registry.npmjs.org/ajv/-/ajv-5.5.2.tgz;,
@@ -215,16 +227,6 @@
   "integrity": "sha1-tDLdM1i2NM914eRmQ2gkBTPB3b4=",
   "dev": true
 },
-"anymatch": {
-  "version": "1.3.2",
-  "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-1.3.2.tgz;,
-  "integrity": "sha1-VT3Lj5HjyImEXf26NMd3IbkLnXo=",
-  "dev": true,
-  "requires": {
-"micromatch": "2.3.11",
-"normalize-path": "2.1.1"
-  }
-},
 "aproba": {
   "version": "1.2.0",
   "resolved": "https://registry.npmjs.org/aproba/-/aproba-1.2.0.tgz;,
@@ -241,7 +243,7 @@
 "async": "2.6.0",
 "buffer-crc32": "0.2.13",
 "glob": "7.1.2",
-"lodash": "4.16.2",
+"lodash": "4.17.10",
 "readable-stream": "2.3.6",
 "tar-stream": "1.5.5",
 

[1/3] nifi-registry git commit: [NIFIREG-196] update client deps

2018-08-19 Thread jeremydyer
Repository: nifi-registry
Updated Branches:
  refs/heads/master b11eb533f -> 225f5482d


http://git-wip-us.apache.org/repos/asf/nifi-registry/blob/225f5482/nifi-registry-web-ui/src/main/frontend/package.json
--
diff --git a/nifi-registry-web-ui/src/main/frontend/package.json 
b/nifi-registry-web-ui/src/main/frontend/package.json
index 85ef8c7..5129eb4 100644
--- a/nifi-registry-web-ui/src/main/frontend/package.json
+++ b/nifi-registry-web-ui/src/main/frontend/package.json
@@ -1,4 +1,18 @@
 {
+  "//": "Licensed to the Apache Software Foundation (ASF) under one or more",
+  "//": "contributor license agreements.  See the NOTICE file distributed 
with",
+  "//": "this work for additional information regarding copyright ownership.",
+  "//": "The ASF licenses this file to You under the Apache License, Version 
2.0",
+  "//": "(the \"License\"); you may not use this file except in compliance 
with",
+  "//": "the License.  You may obtain a copy of the License at",
+  "//": "",
+  "//": "http://www.apache.org/licenses/LICENSE-2.0;,
+  "//": "",
+  "//": "Unless required by applicable law or agreed to in writing, software",
+  "//": "distributed under the License is distributed on an \"AS IS\" BASIS,",
+  "//": "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 
implied.",
+  "//": "See the License for the specific language governing permissions and",
+  "//": "limitations under the License.",
   "name": "nifi-registry",
   "version": "0.0.1",
   "description": "",
@@ -45,23 +59,22 @@
 "roboto-fontface": "0.9.0"
   },
   "devDependencies": {
-"canonical-path": "0.0.2",
-"grunt": "0.4.5",
-"grunt-cli": "1.2.0",
+"grunt": "1.0.3",
+"grunt-cli": "1.3.0",
 "grunt-contrib-compress": "1.4.3",
-"grunt-sass": "2.0.0",
+"grunt-sass": "3.0.1",
+"node-sass": "4.9.3",
 "grunt-systemjs-builder": "1.0.0",
-"jasmine-core": "2.8.0",
-"karma": "1.7.1",
+"jasmine-core": "3.2.1",
+"karma": "3.0.0",
 "karma-chrome-launcher": "2.2.0",
 "karma-cli": "1.0.1",
-"karma-coverage": "1.1.1",
-"karma-jasmine": "1.1.1",
-"karma-jasmine-html-reporter": "0.2.2",
-"karma-spec-reporter": "0.0.31",
-"load-grunt-tasks": "3.5.2",
-"lodash": "4.16.2",
-"protractor": "4.0.14"
+"karma-coverage": "1.1.2",
+"karma-jasmine": "1.1.2",
+"karma-jasmine-html-reporter": "1.3.0",
+"karma-spec-reporter": "0.0.32",
+"load-grunt-tasks": "4.0.0",
+"protractor": "5.4.0"
   },
   "bundleDependencies": [],
   "private": true



[2/3] nifi-registry git commit: [NIFIREG-196] update client deps

2018-08-19 Thread jeremydyer
http://git-wip-us.apache.org/repos/asf/nifi-registry/blob/225f5482/nifi-registry-web-ui/src/main/frontend/package-lock.json
--
diff --git a/nifi-registry-web-ui/src/main/frontend/package-lock.json 
b/nifi-registry-web-ui/src/main/frontend/package-lock.json
index 2da9214..6a85fd7 100644
--- a/nifi-registry-web-ui/src/main/frontend/package-lock.json
+++ b/nifi-registry-web-ui/src/main/frontend/package-lock.json
@@ -1,4 +1,18 @@
 {
+  "//": "Licensed to the Apache Software Foundation (ASF) under one or more",
+  "//": "contributor license agreements.  See the NOTICE file distributed 
with",
+  "//": "this work for additional information regarding copyright ownership.",
+  "//": "The ASF licenses this file to You under the Apache License, Version 
2.0",
+  "//": "(the \"License\"); you may not use this file except in compliance 
with",
+  "//": "the License.  You may obtain a copy of the License at",
+  "//": "",
+  "//": "http://www.apache.org/licenses/LICENSE-2.0;,
+  "//": "",
+  "//": "Unless required by applicable law or agreed to in writing, software",
+  "//": "distributed under the License is distributed on an \"AS IS\" BASIS,",
+  "//": "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 
implied.",
+  "//": "See the License for the specific language governing permissions and",
+  "//": "limitations under the License.",
   "name": "nifi-registry",
   "version": "0.0.1",
   "lockfileVersion": 1,
@@ -113,12 +127,6 @@
   "resolved": "https://registry.npmjs.org/@nifi-fds/core/-/core-0.1.0.tgz;,
   "integrity": 
"sha512-aUoXOjhgQBZSxmzpBeYupIGvtw+J7Os+6n3q9JS22vcr7uVh588UMb1STKH1nVPRtDgkiU9ooGuXp9hkSCnC0w=="
 },
-"@types/jasmine": {
-  "version": "2.8.6",
-  "resolved": 
"https://registry.npmjs.org/@types/jasmine/-/jasmine-2.8.6.tgz;,
-  "integrity": 
"sha512-clg9raJTY0EOo5pVZKX3ZlMjlYzVU73L71q5OV1jhE2Uezb7oF94jh4CvwrW6wInquQAdhOxJz5VDF2TLUGmmA==",
-  "dev": true
-},
 "@types/node": {
   "version": "6.0.106",
   "resolved": "https://registry.npmjs.org/@types/node/-/node-6.0.106.tgz;,
@@ -131,36 +139,40 @@
   "integrity": "sha1-vShOV8hPEyXacCur/IKlMoGQwMU=",
   "dev": true
 },
+"@types/selenium-webdriver": {
+  "version": "3.0.10",
+  "resolved": 
"https://registry.npmjs.org/@types/selenium-webdriver/-/selenium-webdriver-3.0.10.tgz;,
+  "integrity": 
"sha512-ikB0JHv6vCR1KYUQAzTO4gi/lXLElT4Tx+6De2pc/OZwizE9LRNiTa+U8TBFKBD/nntPnr/MPSHSnOTybjhqNA==",
+  "dev": true
+},
 "abbrev": {
   "version": "1.1.1",
   "resolved": "https://registry.npmjs.org/abbrev/-/abbrev-1.1.1.tgz;,
   "integrity": "sha1-+PLIh60Qv2f2NPAFtph/7TF5qsg=",
   "dev": true
 },
+"accepts": {
+  "version": "1.3.5",
+  "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.5.tgz;,
+  "integrity": "sha1-63d99gEXI6OxTopywIBcjoZ0a9I=",
+  "dev": true,
+  "requires": {
+"mime-types": "2.1.18",
+"negotiator": "0.6.1"
+  }
+},
+"adm-zip": {
+  "version": "0.4.11",
+  "resolved": "https://registry.npmjs.org/adm-zip/-/adm-zip-0.4.11.tgz;,
+  "integrity": 
"sha512-L8vcjDTCOIJk7wFvmlEUN7AsSb8T+2JrdP7KINBjzr24TJ5Mwj590sLu3BC7zNZowvJWa/JtPmD8eJCzdtDWjA==",
+  "dev": true
+},
 "after": {
   "version": "0.8.2",
   "resolved": "https://registry.npmjs.org/after/-/after-0.8.2.tgz;,
   "integrity": "sha1-/ts5T58OAqqXaOcCvaI7UF+ufh8=",
   "dev": true
 },
-"agent-base": {
-  "version": "2.1.1",
-  "resolved": 
"https://registry.npmjs.org/agent-base/-/agent-base-2.1.1.tgz;,
-  "integrity": "sha1-1t4Q1a9hMtW9aSQn1G/FOFOQlMc=",
-  "dev": true,
-  "requires": {
-"extend": "3.0.1",
-"semver": "5.0.3"
-  },
-  "dependencies": {
-"semver": {
-  "version": "5.0.3",
-  "resolved": "https://registry.npmjs.org/semver/-/semver-5.0.3.tgz;,
-  "integrity": "sha1-d0Zt5YnNXTyV8TiqeLxWmjy10no=",
-  "dev": true
-}
-  }
-},
 "ajv": {
   "version": "5.5.2",
   "resolved": "https://registry.npmjs.org/ajv/-/ajv-5.5.2.tgz;,
@@ -215,16 +227,6 @@
   "integrity": "sha1-tDLdM1i2NM914eRmQ2gkBTPB3b4=",
   "dev": true
 },
-"anymatch": {
-  "version": "1.3.2",
-  "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-1.3.2.tgz;,
-  "integrity": "sha1-VT3Lj5HjyImEXf26NMd3IbkLnXo=",
-  "dev": true,
-  "requires": {
-"micromatch": "2.3.11",
-"normalize-path": "2.1.1"
-  }
-},
 "aproba": {
   "version": "1.2.0",
   "resolved": "https://registry.npmjs.org/aproba/-/aproba-1.2.0.tgz;,
@@ -241,7 +243,7 @@
 "async": "2.6.0",
 "buffer-crc32": "0.2.13",
 "glob": "7.1.2",
-"lodash": "4.16.2",
+"lodash": "4.17.10",
 "readable-stream": "2.3.6",
 "tar-stream": "1.5.5",
 

[3/3] nifi-registry git commit: [NIFIREG-196] update client deps

2018-08-19 Thread jeremydyer
[NIFIREG-196] update client deps


Project: http://git-wip-us.apache.org/repos/asf/nifi-registry/repo
Commit: http://git-wip-us.apache.org/repos/asf/nifi-registry/commit/225f5482
Tree: http://git-wip-us.apache.org/repos/asf/nifi-registry/tree/225f5482
Diff: http://git-wip-us.apache.org/repos/asf/nifi-registry/diff/225f5482

Branch: refs/heads/master
Commit: 225f5482d865ab46c311aa5f7e785d0e46b95ce5
Parents: b11eb53
Author: Scott Aslan 
Authored: Fri Aug 17 13:52:37 2018 -0400
Committer: Scott Aslan 
Committed: Fri Aug 17 13:52:37 2018 -0400

--
 .../src/main/frontend/Gruntfile.js  |1 +
 .../src/main/frontend/package-lock.json | 5653 ++
 .../src/main/frontend/package.json  |   39 +-
 3 files changed, 3308 insertions(+), 2385 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/nifi-registry/blob/225f5482/nifi-registry-web-ui/src/main/frontend/Gruntfile.js
--
diff --git a/nifi-registry-web-ui/src/main/frontend/Gruntfile.js 
b/nifi-registry-web-ui/src/main/frontend/Gruntfile.js
index 2f71770..8b1fee1 100644
--- a/nifi-registry-web-ui/src/main/frontend/Gruntfile.js
+++ b/nifi-registry-web-ui/src/main/frontend/Gruntfile.js
@@ -22,6 +22,7 @@ module.exports = function (grunt) {
 grunt.initConfig({
 sass: {
 options: {
+implementation: require('node-sass'),
 outputStyle: 'compressed',
 sourceMap: true
 },



nifi-site git commit: Moved Jeremy Dyer and Kevin Doran from Committer to PMC list

2018-08-08 Thread jeremydyer
Repository: nifi-site
Updated Branches:
  refs/heads/master 8909f9fcf -> ab6f30da3


Moved Jeremy Dyer and Kevin Doran from Committer to PMC list


Project: http://git-wip-us.apache.org/repos/asf/nifi-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/nifi-site/commit/ab6f30da
Tree: http://git-wip-us.apache.org/repos/asf/nifi-site/tree/ab6f30da
Diff: http://git-wip-us.apache.org/repos/asf/nifi-site/diff/ab6f30da

Branch: refs/heads/master
Commit: ab6f30da397ac8fa2ce7a9733b2c7ccd89c6245a
Parents: 8909f9f
Author: Jeremy Dyer 
Authored: Wed Aug 8 20:23:28 2018 -0400
Committer: Jeremy Dyer 
Committed: Wed Aug 8 20:27:18 2018 -0400

--
 src/pages/html/people.hbs | 20 ++--
 1 file changed, 10 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/nifi-site/blob/ab6f30da/src/pages/html/people.hbs
--
diff --git a/src/pages/html/people.hbs b/src/pages/html/people.hbs
index c014ab5..2959a09 100644
--- a/src/pages/html/people.hbs
+++ b/src/pages/html/people.hbs
@@ -164,6 +164,16 @@ title: Apache NiFi Team
 Marc Parisi
 
 
+
+    jeremydyer
+Jeremy Dyer
+
+
+
+kdoran
+Kevin Doran
+
+
 
 
 
@@ -200,11 +210,6 @@ title: Apache NiFi Team
 
 
 
-    jeremydyer
-Jeremy Dyer
-
-
-
 jfrazee
 Joey Frazee
 
@@ -240,11 +245,6 @@ title: Apache NiFi Team
 
 
 
-kdoran
-Kevin Doran
-
-
-
 zenfenan
 Sivaprasanna Sethuraman
 



svn commit: r1837682 - in /nifi/site/trunk: ./ minifi/

2018-08-08 Thread jeremydyer
Author: jeremydyer
Date: Thu Aug  9 00:29:36 2018
New Revision: 1837682

URL: http://svn.apache.org/viewvc?rev=1837682=rev
Log:
Updating with Aldrin's minifi documentation changes

Modified:
nifi/site/trunk/developer-guide.html
nifi/site/trunk/docs.html
nifi/site/trunk/download.html
nifi/site/trunk/faq.html
nifi/site/trunk/fds-release-guide.html
nifi/site/trunk/fds.html
nifi/site/trunk/index.html
nifi/site/trunk/licensing-guide.html
nifi/site/trunk/mailing_lists.html
nifi/site/trunk/minifi/download.html
nifi/site/trunk/minifi/faq.html
nifi/site/trunk/minifi/getting-started.html
nifi/site/trunk/minifi/index.html
nifi/site/trunk/minifi/minifi-java-agent-quick-start.html
nifi/site/trunk/minifi/minifi-toolkit.html
nifi/site/trunk/minifi/system-admin-guide.html
nifi/site/trunk/people.html
nifi/site/trunk/powered-by-nifi.html
nifi/site/trunk/quickstart.html
nifi/site/trunk/registry.html
nifi/site/trunk/release-guide.html
nifi/site/trunk/security.html
nifi/site/trunk/videos.html

Modified: nifi/site/trunk/developer-guide.html
URL: 
http://svn.apache.org/viewvc/nifi/site/trunk/developer-guide.html?rev=1837682=1837681=1837682=diff
==
--- nifi/site/trunk/developer-guide.html (original)
+++ nifi/site/trunk/developer-guide.html Thu Aug  9 00:29:36 2018
@@ -75,6 +75,7 @@
 
 Quickstart
 Release 
Guide
+GPG Guide
 FDS 
Release Guide
 Licensing 
Guide
 Developer 
Guide
@@ -117,7 +118,7 @@
 https://www.apache.org/events/current-event-234x60.png; style="margin:0px 
10px" />
 
 
-Copyright  2017 The Apache Software Foundation, 
Licensed under the Copyright  2018 The Apache Software Foundation, 
Licensed under the https://www.apache.org/licenses/LICENSE-2.0;>Apache License, Version 
2.0.Apache, the
 Apache feather logo, NiFi, Apache NiFi and the project 
logo are trademarks of The Apache Software
 Foundation.

Modified: nifi/site/trunk/docs.html
URL: 
http://svn.apache.org/viewvc/nifi/site/trunk/docs.html?rev=1837682=1837681=1837682=diff
==
--- nifi/site/trunk/docs.html (original)
+++ nifi/site/trunk/docs.html Thu Aug  9 00:29:36 2018
@@ -75,6 +75,7 @@
 
 Quickstart
 Release 
Guide
+GPG Guide
 FDS 
Release Guide
 Licensing 
Guide
 Developer 
Guide
@@ -117,7 +118,7 @@
 https://www.apache.org/events/current-event-234x60.png; style="margin:0px 
10px" />
 
 
-Copyright  2017 The Apache Software Foundation, 
Licensed under the Copyright  2018 The Apache Software Foundation, 
Licensed under the https://www.apache.org/licenses/LICENSE-2.0;>Apache License, Version 
2.0.Apache, the
 Apache feather logo, NiFi, Apache NiFi and the project 
logo are trademarks of The Apache Software
 Foundation.

Modified: nifi/site/trunk/download.html
URL: 
http://svn.apache.org/viewvc/nifi/site/trunk/download.html?rev=1837682=1837681=1837682=diff
==
--- nifi/site/trunk/download.html (original)
+++ nifi/site/trunk/download.html Thu Aug  9 00:29:36 2018
@@ -75,6 +75,7 @@
 
 Quickstart
 Release 
Guide
+GPG Guide
 FDS 
Release Guide
 Licensing 
Guide
 Developer 
Guide
@@ -126,49 +127,52 @@
 
 Releases
 
-1.7.0
+1.7.1 
 
+Released July 16, 2018
 
 Sources:
 
-https://www.apache.org/dyn/closer.lua?path=/nifi/1.7.0/nifi-1.7.0-source-release.zip;>nifi-1.7.0-source-release.zip
 ( https://www.apache.org/dist/nifi/1.7.0/nifi-1.7.0-source-release.zip.asc;>asc,
 https://www.apache.org/dist/nifi/1.7.0/nifi-1.7.0-source-release.zip.sha1;>sha1,
 https://www.apache.org/dist/nifi/1.7.0/nifi-1.7.0-source-release.zip.sha256;>sha256,
 https://www.apache.org/dist/nifi/1.7.0/nifi-1.7.0-source-release.zip.sha512;>sha512
 )
+https:/

svn commit: r1837681 - in /nifi/site/trunk: ./ minifi/

2018-08-08 Thread jeremydyer
Author: jeremydyer
Date: Thu Aug  9 00:25:06 2018
New Revision: 1837681

URL: http://svn.apache.org/viewvc?rev=1837681=rev
Log:
Moved Jeremy Dyer and Kevin Doran from committer to PMC list

Modified:
nifi/site/trunk/developer-guide.html
nifi/site/trunk/docs.html
nifi/site/trunk/download.html
nifi/site/trunk/faq.html
nifi/site/trunk/fds-release-guide.html
nifi/site/trunk/fds.html
nifi/site/trunk/index.html
nifi/site/trunk/licensing-guide.html
nifi/site/trunk/mailing_lists.html
nifi/site/trunk/minifi/download.html
nifi/site/trunk/minifi/faq.html
nifi/site/trunk/minifi/getting-started.html
nifi/site/trunk/minifi/index.html
nifi/site/trunk/minifi/minifi-java-agent-quick-start.html
nifi/site/trunk/minifi/minifi-toolkit.html
nifi/site/trunk/minifi/system-admin-guide.html
nifi/site/trunk/people.html
nifi/site/trunk/powered-by-nifi.html
nifi/site/trunk/quickstart.html
nifi/site/trunk/registry.html
nifi/site/trunk/release-guide.html
nifi/site/trunk/security.html
nifi/site/trunk/videos.html

Modified: nifi/site/trunk/developer-guide.html
URL: 
http://svn.apache.org/viewvc/nifi/site/trunk/developer-guide.html?rev=1837681=1837680=1837681=diff
==
--- nifi/site/trunk/developer-guide.html (original)
+++ nifi/site/trunk/developer-guide.html Thu Aug  9 00:25:06 2018
@@ -75,7 +75,6 @@
 
 Quickstart
 Release 
Guide
-GPG Guide
 FDS 
Release Guide
 Licensing 
Guide
 Developer 
Guide
@@ -118,7 +117,7 @@
 https://www.apache.org/events/current-event-234x60.png; style="margin:0px 
10px" />
 
 
-Copyright  2018 The Apache Software Foundation, 
Licensed under the Copyright  2017 The Apache Software Foundation, 
Licensed under the https://www.apache.org/licenses/LICENSE-2.0;>Apache License, Version 
2.0.Apache, the
 Apache feather logo, NiFi, Apache NiFi and the project 
logo are trademarks of The Apache Software
 Foundation.

Modified: nifi/site/trunk/docs.html
URL: 
http://svn.apache.org/viewvc/nifi/site/trunk/docs.html?rev=1837681=1837680=1837681=diff
==
--- nifi/site/trunk/docs.html (original)
+++ nifi/site/trunk/docs.html Thu Aug  9 00:25:06 2018
@@ -75,7 +75,6 @@
 
 Quickstart
 Release 
Guide
-GPG Guide
 FDS 
Release Guide
 Licensing 
Guide
 Developer 
Guide
@@ -118,7 +117,7 @@
 https://www.apache.org/events/current-event-234x60.png; style="margin:0px 
10px" />
 
 
-Copyright  2018 The Apache Software Foundation, 
Licensed under the Copyright  2017 The Apache Software Foundation, 
Licensed under the https://www.apache.org/licenses/LICENSE-2.0;>Apache License, Version 
2.0.Apache, the
 Apache feather logo, NiFi, Apache NiFi and the project 
logo are trademarks of The Apache Software
 Foundation.

Modified: nifi/site/trunk/download.html
URL: 
http://svn.apache.org/viewvc/nifi/site/trunk/download.html?rev=1837681=1837680=1837681=diff
==
--- nifi/site/trunk/download.html (original)
+++ nifi/site/trunk/download.html Thu Aug  9 00:25:06 2018
@@ -75,7 +75,6 @@
 
 Quickstart
 Release 
Guide
-GPG Guide
 FDS 
Release Guide
 Licensing 
Guide
 Developer 
Guide
@@ -127,52 +126,49 @@
 
 Releases
 
-1.7.1 
+1.7.0
 
-Released July 16, 2018
 
 Sources:
 
-https://www.apache.org/dyn/closer.lua?path=/nifi/1.7.1/nifi-1.7.1-source-release.zip;>nifi-1.7.1-source-release.zip
 [52 MB] ( https://www.apache.org/dist/nifi/1.7.1/nifi-1.7.1-source-release.zip.asc;>asc,
 https://www.apache.org/dist/nifi/1.7.1/nifi-1.7.1-source-release.zip.sha1;>sha1,
 https://www.apache.org/dist/nifi/1.7.1/nifi-1.7.1-source-release.zip.sha256;>sha256,
 https://www.apache.org/dist/nifi/1.7.1/nifi-1.7.1-source-release.zip.sha512;>sha512
 )
+  

nifi git commit: NIFI-5480: Use FlowController's maps of components in order to look up component by ID rather than iterating recursively through all Process Groups to find the component

2018-08-01 Thread jeremydyer
Repository: nifi
Updated Branches:
  refs/heads/master b4894c557 -> 4cca9bef7


NIFI-5480: Use FlowController's maps of components in order to look up 
component by ID rather than iterating recursively through all Process Groups to 
find the component

This closes #2932

Signed-off-by: Jeremy Dyer 


Project: http://git-wip-us.apache.org/repos/asf/nifi/repo
Commit: http://git-wip-us.apache.org/repos/asf/nifi/commit/4cca9bef
Tree: http://git-wip-us.apache.org/repos/asf/nifi/tree/4cca9bef
Diff: http://git-wip-us.apache.org/repos/asf/nifi/diff/4cca9bef

Branch: refs/heads/master
Commit: 4cca9bef7c9bcad912cb7df6f3976715d09a6daf
Parents: b4894c5
Author: Mark Payne 
Authored: Wed Aug 1 17:49:51 2018 -0400
Committer: Jeremy Dyer 
Committed: Wed Aug 1 19:33:56 2018 -0400

--
 .../org/apache/nifi/groups/ProcessGroup.java|   8 --
 .../apache/nifi/controller/FlowController.java  |  33 -
 .../reporting/StandardReportingContext.java |   4 +-
 .../nifi/groups/StandardProcessGroup.java   | 134 ++-
 .../service/mock/MockProcessGroup.java  |   5 -
 .../StandardAuthorizableLookup.java |   4 +-
 .../nifi/web/controller/ControllerFacade.java   |   6 +-
 .../web/dao/impl/StandardProcessGroupDAO.java   |  58 ++--
 8 files changed, 130 insertions(+), 122 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/nifi/blob/4cca9bef/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core-api/src/main/java/org/apache/nifi/groups/ProcessGroup.java
--
diff --git 
a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core-api/src/main/java/org/apache/nifi/groups/ProcessGroup.java
 
b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core-api/src/main/java/org/apache/nifi/groups/ProcessGroup.java
index e9c4d87..01451df 100644
--- 
a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core-api/src/main/java/org/apache/nifi/groups/ProcessGroup.java
+++ 
b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core-api/src/main/java/org/apache/nifi/groups/ProcessGroup.java
@@ -771,14 +771,6 @@ public interface ProcessGroup extends 
ComponentAuthorizable, Positionable, Versi
 void remove(final Snippet snippet);
 
 /**
- * @param identifier of connectable
- * @return the Connectable with the given ID, if it exists; otherwise
- * returns null. This performs a recursive search of all ProcessGroups'
- * input ports, output ports, funnels, processors
- */
-Connectable findLocalConnectable(String identifier);
-
-/**
  * @param identifier of remote group port
  * @return the RemoteGroupPort with the given ID, if it exists; otherwise
  * returns null.

http://git-wip-us.apache.org/repos/asf/nifi/blob/4cca9bef/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/FlowController.java
--
diff --git 
a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/FlowController.java
 
b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/FlowController.java
index e7623e5..b83749c 100644
--- 
a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/FlowController.java
+++ 
b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/FlowController.java
@@ -2697,6 +2697,35 @@ public class FlowController implements EventAccess, 
ControllerServiceProvider, R
 allProcessors.remove(identifier);
 }
 
+public Connectable findLocalConnectable(final String id) {
+final ProcessorNode procNode = getProcessorNode(id);
+if (procNode != null) {
+return procNode;
+}
+
+final Port inPort = getInputPort(id);
+if (inPort != null) {
+return inPort;
+}
+
+final Port outPort = getOutputPort(id);
+if (outPort != null) {
+return outPort;
+}
+
+final Funnel funnel = getFunnel(id);
+if (funnel != null) {
+return funnel;
+}
+
+final RemoteGroupPort remoteGroupPort = 
getRootGroup().findRemoteGroupPort(id);
+if (remoteGroupPort != null) {
+return remoteGroupPort;
+}
+
+return null;
+}
+
 public ProcessorNode getProcessorNode(final String id) {
 return allProcessors.get(id);
 }
@@ -4933,7 +4962,7 @@ public class FlowController implements EventAccess, 
ControllerServiceProvider, R
 authorizable = new 

nifi-minifi git commit: MINIFI-458-RC2prepare for next development iteration

2018-07-10 Thread jeremydyer
Repository: nifi-minifi
Updated Branches:
  refs/heads/master 58b8c598c -> 16af0f48b


MINIFI-458-RC2prepare for next development iteration


Project: http://git-wip-us.apache.org/repos/asf/nifi-minifi/repo
Commit: http://git-wip-us.apache.org/repos/asf/nifi-minifi/commit/16af0f48
Tree: http://git-wip-us.apache.org/repos/asf/nifi-minifi/tree/16af0f48
Diff: http://git-wip-us.apache.org/repos/asf/nifi-minifi/diff/16af0f48

Branch: refs/heads/master
Commit: 16af0f48bd7fc1d8fe445776eb2f50a64f1d96c0
Parents: 58b8c59
Author: Jeremy Dyer 
Authored: Thu Jun 28 12:30:50 2018 -0400
Committer: Jeremy Dyer 
Committed: Thu Jun 28 12:30:50 2018 -0400

--
 minifi-api/pom.xml  |  2 +-
 minifi-assembly/pom.xml |  4 +-
 minifi-bootstrap/pom.xml|  2 +-
 minifi-c2/minifi-c2-api/pom.xml |  2 +-
 minifi-c2/minifi-c2-assembly/pom.xml|  2 +-
 .../minifi-c2-cache-filesystem/pom.xml  |  2 +-
 .../minifi-c2-cache/minifi-c2-cache-s3/pom.xml  |  2 +-
 minifi-c2/minifi-c2-cache/pom.xml   |  2 +-
 minifi-c2/minifi-c2-docker/pom.xml  |  4 +-
 minifi-c2/minifi-c2-integration-tests/pom.xml   |  2 +-
 minifi-c2/minifi-c2-jetty/pom.xml   |  2 +-
 .../minifi-c2-provider-cache/pom.xml|  2 +-
 .../minifi-c2-provider-delegating/pom.xml   |  2 +-
 .../minifi-c2-provider-nifi-rest/pom.xml|  2 +-
 .../minifi-c2-provider-util/pom.xml |  2 +-
 minifi-c2/minifi-c2-provider/pom.xml|  2 +-
 minifi-c2/minifi-c2-service/pom.xml |  2 +-
 minifi-c2/pom.xml   |  2 +-
 minifi-commons/minifi-commons-schema/pom.xml|  2 +-
 minifi-commons/minifi-utils/pom.xml |  2 +-
 minifi-commons/pom.xml  |  2 +-
 minifi-docker/pom.xml   |  4 +-
 minifi-docs/pom.xml |  2 +-
 minifi-integration-tests/pom.xml|  2 +-
 .../minifi-framework-nar/pom.xml|  2 +-
 .../minifi-framework-core/pom.xml   |  2 +-
 .../minifi-framework/minifi-nar-utils/pom.xml   |  2 +-
 .../minifi-framework/minifi-resources/pom.xml   |  2 +-
 .../minifi-framework/minifi-runtime/pom.xml |  2 +-
 .../minifi-framework/pom.xml|  2 +-
 .../minifi-framework-bundle/pom.xml |  2 +-
 .../minifi-provenance-reporting-nar/pom.xml |  2 +-
 .../minifi-provenance-reporting-bundle/pom.xml  |  2 +-
 .../pom.xml |  2 +-
 .../minifi-provenance-repositories/pom.xml  |  2 +-
 .../minifi-provenance-repository-nar/pom.xml|  2 +-
 .../minifi-provenance-repository-bundle/pom.xml |  2 +-
 .../minifi-ssl-context-service-nar/pom.xml  |  2 +-
 minifi-nar-bundles/minifi-standard-nar/pom.xml  |  2 +-
 .../minifi-update-attribute-nar/pom.xml |  2 +-
 minifi-nar-bundles/pom.xml  |  2 +-
 minifi-toolkit/minifi-toolkit-assembly/pom.xml  |  2 +-
 .../minifi-toolkit-configuration/pom.xml|  2 +-
 minifi-toolkit/pom.xml  |  2 +-
 pom.xml | 42 ++--
 45 files changed, 68 insertions(+), 68 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/nifi-minifi/blob/16af0f48/minifi-api/pom.xml
--
diff --git a/minifi-api/pom.xml b/minifi-api/pom.xml
index c0080ce..6dcf082 100644
--- a/minifi-api/pom.xml
+++ b/minifi-api/pom.xml
@@ -20,7 +20,7 @@ limitations under the License.
 
 minifi
 org.apache.nifi.minifi
-0.5.0
+0.6.0-SNAPSHOT
 
 minifi-api
 jar

http://git-wip-us.apache.org/repos/asf/nifi-minifi/blob/16af0f48/minifi-assembly/pom.xml
--
diff --git a/minifi-assembly/pom.xml b/minifi-assembly/pom.xml
index 1054ec1..f44c0d4 100644
--- a/minifi-assembly/pom.xml
+++ b/minifi-assembly/pom.xml
@@ -20,7 +20,7 @@ limitations under the License.
 
 minifi
 org.apache.nifi.minifi
-0.5.0
+0.6.0-SNAPSHOT
 
 minifi-assembly
 pom
@@ -185,7 +185,7 @@ limitations under the License.
 org.apache.nifi.minifi
 minifi-provenance-reporting-nar
 nar
-0.5.0
+0.6.0-SNAPSHOT
 
 
 

http://git-wip-us.apache.org/repos/asf/nifi-minifi/blob/16af0f48/minifi-bootstrap/pom.xml
--
diff --git a/minifi-bootstrap/pom.xml b/minifi-bootstrap/pom.xml
index 253d68f..baddf27 100644
--- a/minifi-bootstrap/pom.xml
+++ b/minifi-bootstrap/pom.xml
@@ -20,7 +20,7 @@ limitations under the License.
 
 minifi
 

nifi-minifi git commit: NIFI-458-RC2prepare release minifi-0.5.0-RC2

2018-07-10 Thread jeremydyer
Repository: nifi-minifi
Updated Branches:
  refs/heads/master 05f516d3d -> 58b8c598c


NIFI-458-RC2prepare release minifi-0.5.0-RC2


Project: http://git-wip-us.apache.org/repos/asf/nifi-minifi/repo
Commit: http://git-wip-us.apache.org/repos/asf/nifi-minifi/commit/58b8c598
Tree: http://git-wip-us.apache.org/repos/asf/nifi-minifi/tree/58b8c598
Diff: http://git-wip-us.apache.org/repos/asf/nifi-minifi/diff/58b8c598

Branch: refs/heads/master
Commit: 58b8c598c0866c8f1200164ab14f3df0d632d522
Parents: 05f516d
Author: Jeremy Dyer 
Authored: Thu Jun 28 12:28:30 2018 -0400
Committer: Jeremy Dyer 
Committed: Thu Jun 28 12:28:30 2018 -0400

--
 minifi-api/pom.xml  |  2 +-
 minifi-assembly/pom.xml |  4 +-
 minifi-bootstrap/pom.xml|  2 +-
 minifi-c2/minifi-c2-api/pom.xml |  2 +-
 minifi-c2/minifi-c2-assembly/pom.xml|  2 +-
 .../minifi-c2-cache-filesystem/pom.xml  |  2 +-
 .../minifi-c2-cache/minifi-c2-cache-s3/pom.xml  |  2 +-
 minifi-c2/minifi-c2-cache/pom.xml   |  2 +-
 minifi-c2/minifi-c2-docker/pom.xml  |  4 +-
 minifi-c2/minifi-c2-integration-tests/pom.xml   |  2 +-
 minifi-c2/minifi-c2-jetty/pom.xml   |  2 +-
 .../minifi-c2-provider-cache/pom.xml|  2 +-
 .../minifi-c2-provider-delegating/pom.xml   |  2 +-
 .../minifi-c2-provider-nifi-rest/pom.xml|  2 +-
 .../minifi-c2-provider-util/pom.xml |  2 +-
 minifi-c2/minifi-c2-provider/pom.xml|  2 +-
 minifi-c2/minifi-c2-service/pom.xml |  2 +-
 minifi-c2/pom.xml   |  2 +-
 minifi-commons/minifi-commons-schema/pom.xml|  2 +-
 minifi-commons/minifi-utils/pom.xml |  2 +-
 minifi-commons/pom.xml  |  2 +-
 minifi-docker/pom.xml   |  4 +-
 minifi-docs/pom.xml |  2 +-
 minifi-integration-tests/pom.xml|  2 +-
 .../minifi-framework-nar/pom.xml|  2 +-
 .../minifi-framework-core/pom.xml   |  2 +-
 .../minifi-framework/minifi-nar-utils/pom.xml   |  2 +-
 .../minifi-framework/minifi-resources/pom.xml   |  2 +-
 .../minifi-framework/minifi-runtime/pom.xml |  2 +-
 .../minifi-framework/pom.xml|  2 +-
 .../minifi-framework-bundle/pom.xml |  2 +-
 .../minifi-provenance-reporting-nar/pom.xml |  2 +-
 .../minifi-provenance-reporting-bundle/pom.xml  |  2 +-
 .../pom.xml |  2 +-
 .../minifi-provenance-repositories/pom.xml  |  2 +-
 .../minifi-provenance-repository-nar/pom.xml|  2 +-
 .../minifi-provenance-repository-bundle/pom.xml |  2 +-
 .../minifi-ssl-context-service-nar/pom.xml  |  2 +-
 minifi-nar-bundles/minifi-standard-nar/pom.xml  |  2 +-
 .../minifi-update-attribute-nar/pom.xml |  2 +-
 minifi-nar-bundles/pom.xml  |  2 +-
 minifi-toolkit/minifi-toolkit-assembly/pom.xml  |  2 +-
 .../minifi-toolkit-configuration/pom.xml|  2 +-
 minifi-toolkit/pom.xml  |  2 +-
 pom.xml | 42 ++--
 45 files changed, 68 insertions(+), 68 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/nifi-minifi/blob/58b8c598/minifi-api/pom.xml
--
diff --git a/minifi-api/pom.xml b/minifi-api/pom.xml
index 54b26d3..c0080ce 100644
--- a/minifi-api/pom.xml
+++ b/minifi-api/pom.xml
@@ -20,7 +20,7 @@ limitations under the License.
 
 minifi
 org.apache.nifi.minifi
-0.5.0-SNAPSHOT
+0.5.0
 
 minifi-api
 jar

http://git-wip-us.apache.org/repos/asf/nifi-minifi/blob/58b8c598/minifi-assembly/pom.xml
--
diff --git a/minifi-assembly/pom.xml b/minifi-assembly/pom.xml
index 3d651a1..1054ec1 100644
--- a/minifi-assembly/pom.xml
+++ b/minifi-assembly/pom.xml
@@ -20,7 +20,7 @@ limitations under the License.
 
 minifi
 org.apache.nifi.minifi
-0.5.0-SNAPSHOT
+0.5.0
 
 minifi-assembly
 pom
@@ -185,7 +185,7 @@ limitations under the License.
 org.apache.nifi.minifi
 minifi-provenance-reporting-nar
 nar
-0.5.0-SNAPSHOT
+0.5.0
 
 
 

http://git-wip-us.apache.org/repos/asf/nifi-minifi/blob/58b8c598/minifi-bootstrap/pom.xml
--
diff --git a/minifi-bootstrap/pom.xml b/minifi-bootstrap/pom.xml
index 3c6f5dc..253d68f 100644
--- a/minifi-bootstrap/pom.xml
+++ b/minifi-bootstrap/pom.xml
@@ -20,7 +20,7 @@ limitations under the License.
 
 minifi
 

svn commit: r1835331 - in /nifi/site/trunk: ./ minifi/

2018-07-07 Thread jeremydyer
Author: jeremydyer
Date: Sat Jul  7 18:58:37 2018
New Revision: 1835331

URL: http://svn.apache.org/viewvc?rev=1835331=rev
Log:
Added MiNiFi 0.5.0 release

Modified:
nifi/site/trunk/developer-guide.html
nifi/site/trunk/docs.html
nifi/site/trunk/download.html
nifi/site/trunk/faq.html
nifi/site/trunk/fds-release-guide.html
nifi/site/trunk/fds.html
nifi/site/trunk/index.html
nifi/site/trunk/licensing-guide.html
nifi/site/trunk/mailing_lists.html
nifi/site/trunk/minifi/download.html
nifi/site/trunk/minifi/faq.html
nifi/site/trunk/minifi/getting-started.html
nifi/site/trunk/minifi/index.html
nifi/site/trunk/minifi/minifi-java-agent-quick-start.html
nifi/site/trunk/minifi/minifi-toolkit.html
nifi/site/trunk/minifi/system-admin-guide.html
nifi/site/trunk/people.html
nifi/site/trunk/powered-by-nifi.html
nifi/site/trunk/quickstart.html
nifi/site/trunk/registry.html
nifi/site/trunk/release-guide.html
nifi/site/trunk/security.html
nifi/site/trunk/videos.html

Modified: nifi/site/trunk/developer-guide.html
URL: 
http://svn.apache.org/viewvc/nifi/site/trunk/developer-guide.html?rev=1835331=1835330=1835331=diff
==
--- nifi/site/trunk/developer-guide.html (original)
+++ nifi/site/trunk/developer-guide.html Sat Jul  7 18:58:37 2018
@@ -117,7 +117,7 @@
 https://www.apache.org/events/current-event-234x60.png; style="margin:0px 
10px" />
 
 
-Copyright  2018 The Apache Software Foundation, 
Licensed under the Copyright  2017 The Apache Software Foundation, 
Licensed under the https://www.apache.org/licenses/LICENSE-2.0;>Apache License, Version 
2.0.Apache, the
 Apache feather logo, NiFi, Apache NiFi and the project 
logo are trademarks of The Apache Software
 Foundation.

Modified: nifi/site/trunk/docs.html
URL: 
http://svn.apache.org/viewvc/nifi/site/trunk/docs.html?rev=1835331=1835330=1835331=diff
==
--- nifi/site/trunk/docs.html (original)
+++ nifi/site/trunk/docs.html Sat Jul  7 18:58:37 2018
@@ -117,7 +117,7 @@
 https://www.apache.org/events/current-event-234x60.png; style="margin:0px 
10px" />
 
 
-Copyright  2018 The Apache Software Foundation, 
Licensed under the Copyright  2017 The Apache Software Foundation, 
Licensed under the https://www.apache.org/licenses/LICENSE-2.0;>Apache License, Version 
2.0.Apache, the
 Apache feather logo, NiFi, Apache NiFi and the project 
logo are trademarks of The Apache Software
 Foundation.

Modified: nifi/site/trunk/download.html
URL: 
http://svn.apache.org/viewvc/nifi/site/trunk/download.html?rev=1835331=1835330=1835331=diff
==
--- nifi/site/trunk/download.html (original)
+++ nifi/site/trunk/download.html Sat Jul  7 18:58:37 2018
@@ -131,20 +131,20 @@
 
 Sources:
 
-https://www.apache.org/dyn/closer.lua?path=/nifi/1.7.0/nifi-1.7.0-source-release.zip;>nifi-1.7.0-source-release.zip
 [52 MB] ( https://www.apache.org/dist/nifi/1.7.0/nifi-1.7.0-source-release.zip.asc;>asc,
 https://www.apache.org/dist/nifi/1.7.0/nifi-1.7.0-source-release.zip.sha1;>sha1,
 https://www.apache.org/dist/nifi/1.7.0/nifi-1.7.0-source-release.zip.sha256;>sha256,
 https://www.apache.org/dist/nifi/1.7.0/nifi-1.7.0-source-release.zip.sha512;>sha512
 )
+https://www.apache.org/dyn/closer.lua?path=/nifi/1.7.0/nifi-1.7.0-source-release.zip;>nifi-1.7.0-source-release.zip
 ( https://www.apache.org/dist/nifi/1.7.0/nifi-1.7.0-source-release.zip.asc;>asc,
 https://www.apache.org/dist/nifi/1.7.0/nifi-1.7.0-source-release.zip.sha1;>sha1,
 https://www.apache.org/dist/nifi/1.7.0/nifi-1.7.0-source-release.zip.sha256;>sha256,
 https://www.apache.org/dist/nifi/1.7.0/nifi-1.7.0-source-release.zip.sha512;>sha512
 )
 
 
 
 Binaries
 
-https://www.apache.org/dyn/closer.lua?path=/nifi/1.7.0/nifi-1.7.0-bin.tar.gz;>nifi-1.7.0-bin.tar.gz
 [1.2 GB] ( https://www.apache.org/dist/nifi/1.7.0/nifi-1.7.0-bin.tar.gz.asc;>asc,
 https://www.apache.org/dist/nifi/1.7.0/nifi-1.7.0-bin.tar.gz.sha1;>sha1,
 https://www.apache.org/dist/nifi/1.7.0/nifi-1.7.0-bin.tar.gz.sha256;>sha256,
 https://www.apache.org/dist/nifi/1.7.0/nifi-1.7.0-bin.tar.gz.sha512;>sha512
 )
+https://www.apache.org/dyn/closer.lua?path=/nifi/1.7.0/nifi-1.7.0-bin.tar.g

[1/2] nifi-site git commit: Added Apache NiFi MiNiFi 0.5.0 to the downloads page

2018-07-07 Thread jeremydyer
Repository: nifi-site
Updated Branches:
  refs/heads/master d65e893f7 -> 3e8e4d4f1


Added Apache NiFi MiNiFi 0.5.0 to the downloads page


Project: http://git-wip-us.apache.org/repos/asf/nifi-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/nifi-site/commit/9cde2210
Tree: http://git-wip-us.apache.org/repos/asf/nifi-site/tree/9cde2210
Diff: http://git-wip-us.apache.org/repos/asf/nifi-site/diff/9cde2210

Branch: refs/heads/master
Commit: 9cde221022e2223252fcd5fc9d820bb1fb8a5ae5
Parents: d65e893
Author: Jeremy Dyer 
Authored: Sat Jul 7 14:32:11 2018 -0400
Committer: Jeremy Dyer 
Committed: Sat Jul 7 14:35:46 2018 -0400

--
 src/pages/html/minifi/download.hbs | 33 +
 1 file changed, 33 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/nifi-site/blob/9cde2210/src/pages/html/minifi/download.hbs
--
diff --git a/src/pages/html/minifi/download.hbs 
b/src/pages/html/minifi/download.hbs
index 79a916b..f9d51b3 100644
--- a/src/pages/html/minifi/download.hbs
+++ b/src/pages/html/minifi/download.hbs
@@ -24,6 +24,39 @@ title: Apache NiFi - MiNiFi Downloads
 Releases
 MiNiFi (Java)
 
+  0.5.0
+  
+  
+  Sources:
+  
+  https://www.apache.org/dyn/closer.lua?path=/nifi/minifi/0.5.0/minifi-0.5.0-source-release.zip;>minifi-0.5.0-source-release.zip
+( https://www.apache.org/dist/nifi/minifi/0.5.0/minifi-0.5.0-source-release.zip.asc;>asc,
+  https://www.apache.org/dist/nifi/minifi/0.5.0/minifi-0.5.0-source-release.zip.md5;>md5,
+  https://www.apache.org/dist/nifi/minifi/0.5.0/minifi-0.5.0-source-release.zip.sha1;>sha1,
+  https://www.apache.org/dist/nifi/minifi/0.5.0/minifi-0.5.0-source-release.zip.sha256;>sha256
 )
+  
+  
+  
+  
+  Binaries
+  
+  https://www.apache.org/dyn/closer.lua?path=/nifi/minifi/0.5.0/minifi-0.5.0-bin.tar.gz;>minifi-0.5.0-bin.tar.gz
+( https://www.apache.org/dist/nifi/minifi/0.5.0/minifi-0.5.0-bin.tar.gz.asc;>asc,
+  https://www.apache.org/dist/nifi/minifi/0.5.0/minifi-0.5.0-bin.tar.gz.md5;>md5,
+  https://www.apache.org/dist/nifi/minifi/0.5.0/minifi-0.5.0-bin.tar.gz.sha1;>sha1,
+  https://www.apache.org/dist/nifi/minifi/0.5.0/minifi-0.5.0-bin.tar.gz.sha256;>sha256
 )
+  
+  https://www.apache.org/dyn/closer.lua?path=/nifi/minifi/0.5.0/minifi-0.5.0-bin.zip;>minifi-0.5.0-bin.zip
+( https://www.apache.org/dist/nifi/minifi/0.5.0/minifi-0.5.0-bin.zip.asc;>asc,
+  https://www.apache.org/dist/nifi/minifi/0.5.0/minifi-0.5.0-bin.zip.md5;>md5,
+  https://www.apache.org/dist/nifi/minifi/0.5.0/minifi-0.5.0-bin.zip.sha1;>sha1,
+  https://www.apache.org/dist/nifi/minifi/0.5.0/minifi-0.5.0-bin.zip.sha256;>sha256
 )
+  
+  
+
+  https://cwiki.apache.org/confluence/display/MINIFI/Release+Notes#ReleaseNotes-Version0.5.0;>Release
 Notes
+  
+  
   0.4.0
   
   



[2/2] nifi-site git commit: Added MiNiFi 0.5.0 toolkit and C2 binaries

2018-07-07 Thread jeremydyer
Added MiNiFi 0.5.0 toolkit and C2 binaries


Project: http://git-wip-us.apache.org/repos/asf/nifi-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/nifi-site/commit/3e8e4d4f
Tree: http://git-wip-us.apache.org/repos/asf/nifi-site/tree/3e8e4d4f
Diff: http://git-wip-us.apache.org/repos/asf/nifi-site/diff/3e8e4d4f

Branch: refs/heads/master
Commit: 3e8e4d4f105f17de1455929343bb1a38eba5324e
Parents: 9cde221
Author: Jeremy Dyer 
Authored: Sat Jul 7 14:43:26 2018 -0400
Committer: Jeremy Dyer 
Committed: Sat Jul 7 14:43:26 2018 -0400

--
 src/pages/html/minifi/download.hbs | 37 -
 1 file changed, 32 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/nifi-site/blob/3e8e4d4f/src/pages/html/minifi/download.hbs
--
diff --git a/src/pages/html/minifi/download.hbs 
b/src/pages/html/minifi/download.hbs
index f9d51b3..b37b093 100644
--- a/src/pages/html/minifi/download.hbs
+++ b/src/pages/html/minifi/download.hbs
@@ -31,7 +31,6 @@ title: Apache NiFi - MiNiFi Downloads
   
   https://www.apache.org/dyn/closer.lua?path=/nifi/minifi/0.5.0/minifi-0.5.0-source-release.zip;>minifi-0.5.0-source-release.zip
 ( https://www.apache.org/dist/nifi/minifi/0.5.0/minifi-0.5.0-source-release.zip.asc;>asc,
-  https://www.apache.org/dist/nifi/minifi/0.5.0/minifi-0.5.0-source-release.zip.md5;>md5,
   https://www.apache.org/dist/nifi/minifi/0.5.0/minifi-0.5.0-source-release.zip.sha1;>sha1,
   https://www.apache.org/dist/nifi/minifi/0.5.0/minifi-0.5.0-source-release.zip.sha256;>sha256
 )
   
@@ -42,13 +41,11 @@ title: Apache NiFi - MiNiFi Downloads
   
   https://www.apache.org/dyn/closer.lua?path=/nifi/minifi/0.5.0/minifi-0.5.0-bin.tar.gz;>minifi-0.5.0-bin.tar.gz
 ( https://www.apache.org/dist/nifi/minifi/0.5.0/minifi-0.5.0-bin.tar.gz.asc;>asc,
-  https://www.apache.org/dist/nifi/minifi/0.5.0/minifi-0.5.0-bin.tar.gz.md5;>md5,
   https://www.apache.org/dist/nifi/minifi/0.5.0/minifi-0.5.0-bin.tar.gz.sha1;>sha1,
   https://www.apache.org/dist/nifi/minifi/0.5.0/minifi-0.5.0-bin.tar.gz.sha256;>sha256
 )
   
   https://www.apache.org/dyn/closer.lua?path=/nifi/minifi/0.5.0/minifi-0.5.0-bin.zip;>minifi-0.5.0-bin.zip
 ( https://www.apache.org/dist/nifi/minifi/0.5.0/minifi-0.5.0-bin.zip.asc;>asc,
-  https://www.apache.org/dist/nifi/minifi/0.5.0/minifi-0.5.0-bin.zip.md5;>md5,
   https://www.apache.org/dist/nifi/minifi/0.5.0/minifi-0.5.0-bin.zip.sha1;>sha1,
   https://www.apache.org/dist/nifi/minifi/0.5.0/minifi-0.5.0-bin.zip.sha256;>sha256
 )
   
@@ -218,18 +215,31 @@ title: Apache NiFi - MiNiFi Downloads
 
 MiNiFi Toolkit Binaries
 
+  0.5.0 - Compatible with MiNiFi Java & C++ 0.5.0+
+
+  
+https://www.apache.org/dyn/closer.lua?path=/nifi/minifi/0.5.0/minifi-toolkit-0.5.0-bin.tar.gz;>minifi-toolkit-0.5.0-bin.tar.gz
+( https://www.apache.org/dist/nifi/minifi/0.5.0/minifi-toolkit-0.5.0-bin.tar.gz.asc;>asc,
+  https://www.apache.org/dist/nifi/minifi/0.5.0/minifi-toolkit-0.5.0-bin.tar.gz.sha1;>sha1,
+  https://www.apache.org/dist/nifi/minifi/0.5.0/minifi-toolkit-0.5.0-bin.tar.gz.sha256;>sha256
 )
+  
+  https://www.apache.org/dyn/closer.lua?path=/nifi/minifi/0.5.0/minifi-toolkit-0.5.0-bin.zip;>minifi-toolkit-0.5.0-bin.zip
+( https://www.apache.org/dist/nifi/minifi/0.5.0/minifi-toolkit-0.5.0-bin.zip.asc;>asc,
+  https://www.apache.org/dist/nifi/minifi/0.5.0/minifi-toolkit-0.5.0-bin.zip.sha1;>sha1,
+  https://www.apache.org/dist/nifi/minifi/0.5.0/minifi-toolkit-0.5.0-bin.zip.sha256;>sha256
 )
+  
+
+  
   0.4.0 - Compatible with MiNiFi Java & C++ 0.4.0+
 
   
 https://www.apache.org/dyn/closer.lua?path=/nifi/minifi/0.4.0/minifi-toolkit-0.4.0-bin.tar.gz;>minifi-toolkit-0.4.0-bin.tar.gz
 ( https://www.apache.org/dist/nifi/minifi/0.4.0/minifi-toolkit-0.4.0-bin.tar.gz.asc;>asc,
-  https://www.apache.org/dist/nifi/minifi/0.4.0/minifi-toolkit-0.4.0-bin.tar.gz.md5;>md5,
   https://www.apache.org/dist/nifi/minifi/0.4.0/minifi-toolkit-0.4.0-bin.tar.gz.sha1;>sha1,
   

svn commit: r27871 - /dev/nifi/nifi-minifi/0.5.0/

2018-07-02 Thread jeremydyer
Author: jeremydyer
Date: Mon Jul  2 17:26:42 2018
New Revision: 27871

Log:
Apache NiFi MiNiFi 0.5.0 RC2 binary artifacts, hashes, and signatures

Added:
dev/nifi/nifi-minifi/0.5.0/
  - copied from r27790, dev/nifi/nifi-minifi/0.5.0/
dev/nifi/nifi-minifi/0.5.0/minifi-0.5.0-bin.tar.gz   (with props)
dev/nifi/nifi-minifi/0.5.0/minifi-0.5.0-bin.tar.gz.asc
dev/nifi/nifi-minifi/0.5.0/minifi-0.5.0-bin.tar.gz.sha1
dev/nifi/nifi-minifi/0.5.0/minifi-0.5.0-bin.tar.gz.sha256
dev/nifi/nifi-minifi/0.5.0/minifi-0.5.0-bin.zip   (with props)
dev/nifi/nifi-minifi/0.5.0/minifi-0.5.0-bin.zip.asc
dev/nifi/nifi-minifi/0.5.0/minifi-0.5.0-bin.zip.sha1
dev/nifi/nifi-minifi/0.5.0/minifi-0.5.0-bin.zip.sha256

Added: dev/nifi/nifi-minifi/0.5.0/minifi-0.5.0-bin.tar.gz
==
Binary file - no diff available.

Propchange: dev/nifi/nifi-minifi/0.5.0/minifi-0.5.0-bin.tar.gz
--
svn:mime-type = application/octet-stream

Added: dev/nifi/nifi-minifi/0.5.0/minifi-0.5.0-bin.tar.gz.asc
==
--- dev/nifi/nifi-minifi/0.5.0/minifi-0.5.0-bin.tar.gz.asc (added)
+++ dev/nifi/nifi-minifi/0.5.0/minifi-0.5.0-bin.tar.gz.asc Mon Jul  2 17:26:42 
2018
@@ -0,0 +1,16 @@
+-BEGIN PGP SIGNATURE-
+
+iQIzBAABCgAdFiEEUKpgrV1YMRGHsL61xuVQ2mspWtUFAls6PtoACgkQxuVQ2msp
+WtVRuxAAilJolMcahvFDWRqcZ/KaQaqHj7J9MjZx1YlLfM4UMame0bve4GqtabOp
+r9IwodXxjJ0dgyoEzfklCfVihpP0zfOOaCYSzcsbYh5z6+oFWvefBRQKPIEvN2UO
+UJwGQsN3Ord7XCKStH6Xmuwc2MZ7uyZWue2N7kIO8fWLUxkieNnaj53WfczO9EUq
+3LPb17G+WrdofWgAJ1KTPwxya5pJSbCXeTmQzbHtXNX837oCjLgxetMsyE7mJGJZ
+JEQl4aLxY1eJwRa7VedZJbCSYXm9vN+f0McXhIAhdGrBQsRBtSCpiMSEvcw9YeRX
+rW3qm0FGR+NafSf31N9sG9wjHOYL7odKLs8nvrrxwfTBBMrEsApfBULW9jpw9tJz
+O8l60mV/LLJfBM4TCq/Wgg5DA1uAPsReRpfPMc7jM/ucSWgDvFCea0t3b7eoug+u
+IuDxJFz0ZF4t5SZUUVWahfJHLNfwmFrqzfMnzz0bkdd09j8Iakr7gmyuIoTxoR+w
+S2r5tfTOIPybYj6dMK5IBDeCnwkfScCrdCAMwTDhDwyW90xpRlBSHvdoNEkKKrEz
+u8OY4jo5nplumiAWmwemXmp5GaQmrf9AMfmbw25wBBpCycALuHwgT6X0lF+jTbXp
+ZNaDD7feyERjo1B0Wa8yGCtEQEitC3tVtHG8oQEdJ4eA7xIHg2E=
+=49Ur
+-END PGP SIGNATURE-

Added: dev/nifi/nifi-minifi/0.5.0/minifi-0.5.0-bin.tar.gz.sha1
==
--- dev/nifi/nifi-minifi/0.5.0/minifi-0.5.0-bin.tar.gz.sha1 (added)
+++ dev/nifi/nifi-minifi/0.5.0/minifi-0.5.0-bin.tar.gz.sha1 Mon Jul  2 17:26:42 
2018
@@ -0,0 +1 @@
+4cabfbce604eeffe67ae11c90f51ce44f196e271

Added: dev/nifi/nifi-minifi/0.5.0/minifi-0.5.0-bin.tar.gz.sha256
==
--- dev/nifi/nifi-minifi/0.5.0/minifi-0.5.0-bin.tar.gz.sha256 (added)
+++ dev/nifi/nifi-minifi/0.5.0/minifi-0.5.0-bin.tar.gz.sha256 Mon Jul  2 
17:26:42 2018
@@ -0,0 +1 @@
+e4df2bc2388fe4ea45084ba0651a5457d0f1542b1f2984c3c7b5fb695f110641

Added: dev/nifi/nifi-minifi/0.5.0/minifi-0.5.0-bin.zip
==
Binary file - no diff available.

Propchange: dev/nifi/nifi-minifi/0.5.0/minifi-0.5.0-bin.zip
--
svn:mime-type = application/octet-stream

Added: dev/nifi/nifi-minifi/0.5.0/minifi-0.5.0-bin.zip.asc
==
--- dev/nifi/nifi-minifi/0.5.0/minifi-0.5.0-bin.zip.asc (added)
+++ dev/nifi/nifi-minifi/0.5.0/minifi-0.5.0-bin.zip.asc Mon Jul  2 17:26:42 2018
@@ -0,0 +1,16 @@
+-BEGIN PGP SIGNATURE-
+
+iQIzBAABCgAdFiEEUKpgrV1YMRGHsL61xuVQ2mspWtUFAls6PvIACgkQxuVQ2msp
+WtWq0w/9Eq7sSpgEESSEatrQEJ7YuC1pWvsg+c2ZSR4AcaoEE2sMjHaP9gMHFQMf
+vSf8UuqPwyy2E2AkZI9Box9ZbrFafUIH8IUoioxUWvjcPXBqORgWFIEwvuIl4FDj
+YC58BBfgpCeViFR9kTtqCkNhHsBjYf2G789BCB+xiAOXdiu0wrNyzJ9TmrKYjPeq
+2uZPOFGHwyfds87C199+Dt24DEqr7KqYka5GgwA6kEhurdp6zV48RpXNetExn7xo
+TIr2DDntIOQJ4VCySlCjzmBGOXbtFzeL+ACsbP9b7s9ufDIBKCrTb5y5n1Bey084
+paT6SYO1eNcG0MB3dPuJTM5W1vWfVbSAA0I4mMW61BJo9nI4hq8uqGnSVqIYUlw+
+J0MkwO676GQQ7gHyHO8y4P2zNotMthzkpmSbRxy1srh0fpvYt3tgCiZ4vioN9VhD
+Z9GU/XZTGd8DiUKu+qZky5SqHvRRazeN8gMAjcWsOyxQkoMHOwxEOEewO4u7gUxh
+aw2rAYd9IgNhmg1UFnpOWQFdl+8CcbitMuNPqI5WtaaiXFkgQu9hfQJIJlMcaVb3
+yBv6Enl94Q4CmCoZAscMtZNzTzDxLIr8rytaOAkzsSku5ZRvee4PBNEFNpKiN5oC
+ta5kHfbfmxJAxjEQ9nvbADNNGteFyvhZmIm0zsnfsJsC9GNVmeA=
+=uHzC
+-END PGP SIGNATURE-

Added: dev/nifi/nifi-minifi/0.5.0/minifi-0.5.0-bin.zip.sha1
==
--- dev/nifi/nifi-minifi/0.5.0/minifi-0.5.0-bin.zip.sha1 (added)
+++ dev/nifi/nifi-minifi/0.5.0/minifi-0.5.0-bin.zip.sha1 Mon Jul  2 17:26:42 
2018
@@ -0,0 +1 @@
+6faa8c2ee5638d2cc7d54605ed1a1e8fc8dba1ce

Added: dev/nifi/nifi-minifi/0.5.0/minifi-0.5.0-bin.zip.sha256
==
--- dev/nifi/nifi

[nifi-minifi] Git Push Summary

2018-07-02 Thread jeremydyer
Repository: nifi-minifi
Updated Tags:  refs/tags/rel/minifi-0.5.0 [created] a1f91c7cd


[2/2] nifi-minifi git commit: MINIFI-458-RC2prepare for next development iteration

2018-06-29 Thread jeremydyer
MINIFI-458-RC2prepare for next development iteration


Project: http://git-wip-us.apache.org/repos/asf/nifi-minifi/repo
Commit: http://git-wip-us.apache.org/repos/asf/nifi-minifi/commit/16af0f48
Tree: http://git-wip-us.apache.org/repos/asf/nifi-minifi/tree/16af0f48
Diff: http://git-wip-us.apache.org/repos/asf/nifi-minifi/diff/16af0f48

Branch: refs/heads/MINIFI-458-RC2
Commit: 16af0f48bd7fc1d8fe445776eb2f50a64f1d96c0
Parents: 58b8c59
Author: Jeremy Dyer 
Authored: Thu Jun 28 12:30:50 2018 -0400
Committer: Jeremy Dyer 
Committed: Thu Jun 28 12:30:50 2018 -0400

--
 minifi-api/pom.xml  |  2 +-
 minifi-assembly/pom.xml |  4 +-
 minifi-bootstrap/pom.xml|  2 +-
 minifi-c2/minifi-c2-api/pom.xml |  2 +-
 minifi-c2/minifi-c2-assembly/pom.xml|  2 +-
 .../minifi-c2-cache-filesystem/pom.xml  |  2 +-
 .../minifi-c2-cache/minifi-c2-cache-s3/pom.xml  |  2 +-
 minifi-c2/minifi-c2-cache/pom.xml   |  2 +-
 minifi-c2/minifi-c2-docker/pom.xml  |  4 +-
 minifi-c2/minifi-c2-integration-tests/pom.xml   |  2 +-
 minifi-c2/minifi-c2-jetty/pom.xml   |  2 +-
 .../minifi-c2-provider-cache/pom.xml|  2 +-
 .../minifi-c2-provider-delegating/pom.xml   |  2 +-
 .../minifi-c2-provider-nifi-rest/pom.xml|  2 +-
 .../minifi-c2-provider-util/pom.xml |  2 +-
 minifi-c2/minifi-c2-provider/pom.xml|  2 +-
 minifi-c2/minifi-c2-service/pom.xml |  2 +-
 minifi-c2/pom.xml   |  2 +-
 minifi-commons/minifi-commons-schema/pom.xml|  2 +-
 minifi-commons/minifi-utils/pom.xml |  2 +-
 minifi-commons/pom.xml  |  2 +-
 minifi-docker/pom.xml   |  4 +-
 minifi-docs/pom.xml |  2 +-
 minifi-integration-tests/pom.xml|  2 +-
 .../minifi-framework-nar/pom.xml|  2 +-
 .../minifi-framework-core/pom.xml   |  2 +-
 .../minifi-framework/minifi-nar-utils/pom.xml   |  2 +-
 .../minifi-framework/minifi-resources/pom.xml   |  2 +-
 .../minifi-framework/minifi-runtime/pom.xml |  2 +-
 .../minifi-framework/pom.xml|  2 +-
 .../minifi-framework-bundle/pom.xml |  2 +-
 .../minifi-provenance-reporting-nar/pom.xml |  2 +-
 .../minifi-provenance-reporting-bundle/pom.xml  |  2 +-
 .../pom.xml |  2 +-
 .../minifi-provenance-repositories/pom.xml  |  2 +-
 .../minifi-provenance-repository-nar/pom.xml|  2 +-
 .../minifi-provenance-repository-bundle/pom.xml |  2 +-
 .../minifi-ssl-context-service-nar/pom.xml  |  2 +-
 minifi-nar-bundles/minifi-standard-nar/pom.xml  |  2 +-
 .../minifi-update-attribute-nar/pom.xml |  2 +-
 minifi-nar-bundles/pom.xml  |  2 +-
 minifi-toolkit/minifi-toolkit-assembly/pom.xml  |  2 +-
 .../minifi-toolkit-configuration/pom.xml|  2 +-
 minifi-toolkit/pom.xml  |  2 +-
 pom.xml | 42 ++--
 45 files changed, 68 insertions(+), 68 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/nifi-minifi/blob/16af0f48/minifi-api/pom.xml
--
diff --git a/minifi-api/pom.xml b/minifi-api/pom.xml
index c0080ce..6dcf082 100644
--- a/minifi-api/pom.xml
+++ b/minifi-api/pom.xml
@@ -20,7 +20,7 @@ limitations under the License.
 
 minifi
 org.apache.nifi.minifi
-0.5.0
+0.6.0-SNAPSHOT
 
 minifi-api
 jar

http://git-wip-us.apache.org/repos/asf/nifi-minifi/blob/16af0f48/minifi-assembly/pom.xml
--
diff --git a/minifi-assembly/pom.xml b/minifi-assembly/pom.xml
index 1054ec1..f44c0d4 100644
--- a/minifi-assembly/pom.xml
+++ b/minifi-assembly/pom.xml
@@ -20,7 +20,7 @@ limitations under the License.
 
 minifi
 org.apache.nifi.minifi
-0.5.0
+0.6.0-SNAPSHOT
 
 minifi-assembly
 pom
@@ -185,7 +185,7 @@ limitations under the License.
 org.apache.nifi.minifi
 minifi-provenance-reporting-nar
 nar
-0.5.0
+0.6.0-SNAPSHOT
 
 
 

http://git-wip-us.apache.org/repos/asf/nifi-minifi/blob/16af0f48/minifi-bootstrap/pom.xml
--
diff --git a/minifi-bootstrap/pom.xml b/minifi-bootstrap/pom.xml
index 253d68f..baddf27 100644
--- a/minifi-bootstrap/pom.xml
+++ b/minifi-bootstrap/pom.xml
@@ -20,7 +20,7 @@ limitations under the License.
 
 minifi
 org.apache.nifi.minifi
-0.5.0
+0.6.0-SNAPSHOT
 
 

[1/2] nifi-minifi git commit: NIFI-458-RC2prepare release minifi-0.5.0-RC2

2018-06-29 Thread jeremydyer
Repository: nifi-minifi
Updated Branches:
  refs/heads/MINIFI-458-RC2 05f516d3d -> 16af0f48b


NIFI-458-RC2prepare release minifi-0.5.0-RC2


Project: http://git-wip-us.apache.org/repos/asf/nifi-minifi/repo
Commit: http://git-wip-us.apache.org/repos/asf/nifi-minifi/commit/58b8c598
Tree: http://git-wip-us.apache.org/repos/asf/nifi-minifi/tree/58b8c598
Diff: http://git-wip-us.apache.org/repos/asf/nifi-minifi/diff/58b8c598

Branch: refs/heads/MINIFI-458-RC2
Commit: 58b8c598c0866c8f1200164ab14f3df0d632d522
Parents: 05f516d
Author: Jeremy Dyer 
Authored: Thu Jun 28 12:28:30 2018 -0400
Committer: Jeremy Dyer 
Committed: Thu Jun 28 12:28:30 2018 -0400

--
 minifi-api/pom.xml  |  2 +-
 minifi-assembly/pom.xml |  4 +-
 minifi-bootstrap/pom.xml|  2 +-
 minifi-c2/minifi-c2-api/pom.xml |  2 +-
 minifi-c2/minifi-c2-assembly/pom.xml|  2 +-
 .../minifi-c2-cache-filesystem/pom.xml  |  2 +-
 .../minifi-c2-cache/minifi-c2-cache-s3/pom.xml  |  2 +-
 minifi-c2/minifi-c2-cache/pom.xml   |  2 +-
 minifi-c2/minifi-c2-docker/pom.xml  |  4 +-
 minifi-c2/minifi-c2-integration-tests/pom.xml   |  2 +-
 minifi-c2/minifi-c2-jetty/pom.xml   |  2 +-
 .../minifi-c2-provider-cache/pom.xml|  2 +-
 .../minifi-c2-provider-delegating/pom.xml   |  2 +-
 .../minifi-c2-provider-nifi-rest/pom.xml|  2 +-
 .../minifi-c2-provider-util/pom.xml |  2 +-
 minifi-c2/minifi-c2-provider/pom.xml|  2 +-
 minifi-c2/minifi-c2-service/pom.xml |  2 +-
 minifi-c2/pom.xml   |  2 +-
 minifi-commons/minifi-commons-schema/pom.xml|  2 +-
 minifi-commons/minifi-utils/pom.xml |  2 +-
 minifi-commons/pom.xml  |  2 +-
 minifi-docker/pom.xml   |  4 +-
 minifi-docs/pom.xml |  2 +-
 minifi-integration-tests/pom.xml|  2 +-
 .../minifi-framework-nar/pom.xml|  2 +-
 .../minifi-framework-core/pom.xml   |  2 +-
 .../minifi-framework/minifi-nar-utils/pom.xml   |  2 +-
 .../minifi-framework/minifi-resources/pom.xml   |  2 +-
 .../minifi-framework/minifi-runtime/pom.xml |  2 +-
 .../minifi-framework/pom.xml|  2 +-
 .../minifi-framework-bundle/pom.xml |  2 +-
 .../minifi-provenance-reporting-nar/pom.xml |  2 +-
 .../minifi-provenance-reporting-bundle/pom.xml  |  2 +-
 .../pom.xml |  2 +-
 .../minifi-provenance-repositories/pom.xml  |  2 +-
 .../minifi-provenance-repository-nar/pom.xml|  2 +-
 .../minifi-provenance-repository-bundle/pom.xml |  2 +-
 .../minifi-ssl-context-service-nar/pom.xml  |  2 +-
 minifi-nar-bundles/minifi-standard-nar/pom.xml  |  2 +-
 .../minifi-update-attribute-nar/pom.xml |  2 +-
 minifi-nar-bundles/pom.xml  |  2 +-
 minifi-toolkit/minifi-toolkit-assembly/pom.xml  |  2 +-
 .../minifi-toolkit-configuration/pom.xml|  2 +-
 minifi-toolkit/pom.xml  |  2 +-
 pom.xml | 42 ++--
 45 files changed, 68 insertions(+), 68 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/nifi-minifi/blob/58b8c598/minifi-api/pom.xml
--
diff --git a/minifi-api/pom.xml b/minifi-api/pom.xml
index 54b26d3..c0080ce 100644
--- a/minifi-api/pom.xml
+++ b/minifi-api/pom.xml
@@ -20,7 +20,7 @@ limitations under the License.
 
 minifi
 org.apache.nifi.minifi
-0.5.0-SNAPSHOT
+0.5.0
 
 minifi-api
 jar

http://git-wip-us.apache.org/repos/asf/nifi-minifi/blob/58b8c598/minifi-assembly/pom.xml
--
diff --git a/minifi-assembly/pom.xml b/minifi-assembly/pom.xml
index 3d651a1..1054ec1 100644
--- a/minifi-assembly/pom.xml
+++ b/minifi-assembly/pom.xml
@@ -20,7 +20,7 @@ limitations under the License.
 
 minifi
 org.apache.nifi.minifi
-0.5.0-SNAPSHOT
+0.5.0
 
 minifi-assembly
 pom
@@ -185,7 +185,7 @@ limitations under the License.
 org.apache.nifi.minifi
 minifi-provenance-reporting-nar
 nar
-0.5.0-SNAPSHOT
+0.5.0
 
 
 

http://git-wip-us.apache.org/repos/asf/nifi-minifi/blob/58b8c598/minifi-bootstrap/pom.xml
--
diff --git a/minifi-bootstrap/pom.xml b/minifi-bootstrap/pom.xml
index 3c6f5dc..253d68f 100644
--- a/minifi-bootstrap/pom.xml
+++ b/minifi-bootstrap/pom.xml
@@ -20,7 +20,7 @@ limitations under the License.
 
 minifi

svn commit: r27790 - in /dev/nifi/nifi-minifi/0.5.0: ./ minifi-0.5.0-source-release.zip minifi-0.5.0-source-release.zip.asc minifi-0.5.0-source-release.zip.sha1 minifi-0.5.0-source-release.zip.sha256

2018-06-28 Thread jeremydyer
Author: jeremydyer
Date: Thu Jun 28 17:35:23 2018
New Revision: 27790

Log:
MiNiFi 0.5.0 RC2 artifacts

Added:
dev/nifi/nifi-minifi/0.5.0/
dev/nifi/nifi-minifi/0.5.0/minifi-0.5.0-source-release.zip   (with props)
dev/nifi/nifi-minifi/0.5.0/minifi-0.5.0-source-release.zip.asc
dev/nifi/nifi-minifi/0.5.0/minifi-0.5.0-source-release.zip.sha1
dev/nifi/nifi-minifi/0.5.0/minifi-0.5.0-source-release.zip.sha256

Added: dev/nifi/nifi-minifi/0.5.0/minifi-0.5.0-source-release.zip
==
Binary file - no diff available.

Propchange: dev/nifi/nifi-minifi/0.5.0/minifi-0.5.0-source-release.zip
--
svn:mime-type = application/octet-stream

Added: dev/nifi/nifi-minifi/0.5.0/minifi-0.5.0-source-release.zip.asc
==
--- dev/nifi/nifi-minifi/0.5.0/minifi-0.5.0-source-release.zip.asc (added)
+++ dev/nifi/nifi-minifi/0.5.0/minifi-0.5.0-source-release.zip.asc Thu Jun 28 
17:35:23 2018
@@ -0,0 +1,17 @@
+-BEGIN PGP SIGNATURE-
+
+iQJKBAABCgA0FiEEUKpgrV1YMRGHsL61xuVQ2mspWtUFAls1DU4WHGplcmVteWR5
+ZXJAYXBhY2hlLm9yZwAKCRDG5VDaayla1U6mD/4+x5x8y21DlQ3fSm/YtPj+8kNG
+ZcWI8SjyVkmmBFsX1fWjubyepScwPiU3BKVXYw1jpCVqFwMO2MBvJ6sSvy1p+p5H
+B3uI6pfNyBM7a/mDPG6FctwJ7nXmWvTBQbSqdku9CEH3uzRSKrLZqcARF+9sFJpK
+Ka88sYDKle09iXdT5aQrw4yt2ao9iVWqV4wJfHXycTtUQMJ4IrFxPvvjHtm/YJ/e
+jEvlK2V2Ov4lcugai6s8V2cTG0TXF0zip4mmjdPPWGzMBwN/4osy65EWDeaVrz1F
+S4biomsMF/u0bCI3Asv5se7gEoHlFP7BwR957sphLkiOJ68UCSHtWkdHTs9gCwpf
+Vc5xCxYo0WICmKoinRtb4bl06DUoOddSyWfwY0AD5/xOJAi+XnujjOI15EyNbfVj
+e4+/OB3+ovUrB1RcbKcw+/mABjsLd9ATTW68yKqnuqN/MsmOXclX547ZVF08Nda3
+HK574r2O5KJth9BrMwGC2R54DChbGanBpmX0MzLsVCpZc1DPZkZjJYCVZW/F4EkL
+SwTrd/l0T/iu7anCJP0+RI0gWpU14REKaesF+6HUqDzOxopb9IjdmwBNQebUZKbt
+lAj7kSlwZHHbmVwEP9UpJkTbsdtZhR1CopZkrM8is+x2ItF6UMJNovXggtuxKguJ
+bKRX3Tea2iRgxXdJBg==
+=NMXg
+-END PGP SIGNATURE-

Added: dev/nifi/nifi-minifi/0.5.0/minifi-0.5.0-source-release.zip.sha1
==
--- dev/nifi/nifi-minifi/0.5.0/minifi-0.5.0-source-release.zip.sha1 (added)
+++ dev/nifi/nifi-minifi/0.5.0/minifi-0.5.0-source-release.zip.sha1 Thu Jun 28 
17:35:23 2018
@@ -0,0 +1 @@
+be624db030aedd5c249e58c4c57d55ca917cd6ea

Added: dev/nifi/nifi-minifi/0.5.0/minifi-0.5.0-source-release.zip.sha256
==
--- dev/nifi/nifi-minifi/0.5.0/minifi-0.5.0-source-release.zip.sha256 (added)
+++ dev/nifi/nifi-minifi/0.5.0/minifi-0.5.0-source-release.zip.sha256 Thu Jun 
28 17:35:23 2018
@@ -0,0 +1 @@
+1f96ca7d8c2f52f9c15dad532065163f14cb01a2edbc783d4faa33656ff5ab88




[nifi-minifi] Git Push Summary

2018-06-28 Thread jeremydyer
Repository: nifi-minifi
Updated Tags:  refs/tags/minifi-0.5.0-RC2 [created] 05f516d3d


[nifi-minifi] Git Push Summary

2018-06-28 Thread jeremydyer
Repository: nifi-minifi
Updated Branches:
  refs/heads/MINIFI-458-RC2 [created] 05f516d3d


[2/2] nifi-minifi git commit: MINIFI-460 Prefer version property to hardcoded value for NiFi dep.

2018-06-28 Thread jeremydyer
MINIFI-460 Prefer version property to hardcoded value for NiFi dep.

This closes #132

Signed-off-by: Jeremy Dyer 


Project: http://git-wip-us.apache.org/repos/asf/nifi-minifi/repo
Commit: http://git-wip-us.apache.org/repos/asf/nifi-minifi/commit/05f516d3
Tree: http://git-wip-us.apache.org/repos/asf/nifi-minifi/tree/05f516d3
Diff: http://git-wip-us.apache.org/repos/asf/nifi-minifi/diff/05f516d3

Branch: refs/heads/master
Commit: 05f516d3da33a0547ccfad342414504cdacd4a68
Parents: 0dd5494
Author: Aldrin Piri 
Authored: Thu Jun 28 11:43:04 2018 -0400
Committer: Jeremy Dyer 
Committed: Thu Jun 28 12:17:27 2018 -0400

--
 .../minifi-provenance-reporting-nar/pom.xml  | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/nifi-minifi/blob/05f516d3/minifi-nar-bundles/minifi-provenance-reporting-bundle/minifi-provenance-reporting-nar/pom.xml
--
diff --git 
a/minifi-nar-bundles/minifi-provenance-reporting-bundle/minifi-provenance-reporting-nar/pom.xml
 
b/minifi-nar-bundles/minifi-provenance-reporting-bundle/minifi-provenance-reporting-nar/pom.xml
index d2d038a..62b2046 100644
--- 
a/minifi-nar-bundles/minifi-provenance-reporting-bundle/minifi-provenance-reporting-nar/pom.xml
+++ 
b/minifi-nar-bundles/minifi-provenance-reporting-bundle/minifi-provenance-reporting-nar/pom.xml
@@ -60,7 +60,7 @@
 
 org.apache.nifi
 nifi-avro-record-utils
-1.7.0
+${org.apache.nifi.version}
 
 
 
@@ -106,4 +106,4 @@
 
 
 
-
\ No newline at end of file
+



[1/2] nifi-minifi git commit: MINIFI-460 Providing needed depenencies for the MiNiFi Provenance Reporting NAR to support record functionality introduced in NiFi reporting tasks.

2018-06-28 Thread jeremydyer
Repository: nifi-minifi
Updated Branches:
  refs/heads/master 7bac01ee1 -> 05f516d3d


MINIFI-460 Providing needed depenencies for the MiNiFi Provenance Reporting NAR 
to support record functionality introduced in NiFi reporting tasks.


Project: http://git-wip-us.apache.org/repos/asf/nifi-minifi/repo
Commit: http://git-wip-us.apache.org/repos/asf/nifi-minifi/commit/0dd5494e
Tree: http://git-wip-us.apache.org/repos/asf/nifi-minifi/tree/0dd5494e
Diff: http://git-wip-us.apache.org/repos/asf/nifi-minifi/diff/0dd5494e

Branch: refs/heads/master
Commit: 0dd5494e6971d5ee41948e372bd76fd80d0bb7d7
Parents: 7bac01e
Author: Aldrin Piri 
Authored: Thu Jun 28 11:28:07 2018 -0400
Committer: Aldrin Piri 
Committed: Thu Jun 28 11:28:07 2018 -0400

--
 .../minifi-provenance-reporting-nar/pom.xml | 26 
 1 file changed, 26 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/nifi-minifi/blob/0dd5494e/minifi-nar-bundles/minifi-provenance-reporting-bundle/minifi-provenance-reporting-nar/pom.xml
--
diff --git 
a/minifi-nar-bundles/minifi-provenance-reporting-bundle/minifi-provenance-reporting-nar/pom.xml
 
b/minifi-nar-bundles/minifi-provenance-reporting-bundle/minifi-provenance-reporting-nar/pom.xml
index 6a75b1b..d2d038a 100644
--- 
a/minifi-nar-bundles/minifi-provenance-reporting-bundle/minifi-provenance-reporting-nar/pom.xml
+++ 
b/minifi-nar-bundles/minifi-provenance-reporting-bundle/minifi-provenance-reporting-nar/pom.xml
@@ -36,6 +36,32 @@
 nifi-standard-services-api-nar
 nar
 
+
+org.apache.nifi
+nifi-record-serialization-service-api
+${org.apache.nifi.version}
+
+
+org.apache.nifi
+nifi-schema-registry-service-api
+${org.apache.nifi.version}
+
+
+org.apache.nifi
+nifi-record
+${org.apache.nifi.version}
+
+
+org.apache.avro
+avro
+1.8.1
+compile
+
+
+org.apache.nifi
+nifi-avro-record-utils
+1.7.0
+
 
 
 



[nifi-minifi] Git Push Summary

2018-06-26 Thread jeremydyer
Repository: nifi-minifi
Updated Tags:  refs/tags/nifi-minifi-0.5.0-RC1 [created] f5c15eb6e


[nifi-minifi] Git Push Summary

2018-06-26 Thread jeremydyer
Repository: nifi-minifi
Updated Branches:
  refs/heads/MINIFI-458-RC1 [created] 7bac01ee1


nifi-minifi git commit: MINIFI-450 Handling closing of HTTP response in PullHttpChangeIngestor.

2018-06-26 Thread jeremydyer
Repository: nifi-minifi
Updated Branches:
  refs/heads/master ec6227246 -> 7bac01ee1


MINIFI-450 Handling closing of HTTP response in PullHttpChangeIngestor.

This closes #129

Signed-off-by: Jeremy Dyer 


Project: http://git-wip-us.apache.org/repos/asf/nifi-minifi/repo
Commit: http://git-wip-us.apache.org/repos/asf/nifi-minifi/commit/7bac01ee
Tree: http://git-wip-us.apache.org/repos/asf/nifi-minifi/tree/7bac01ee
Diff: http://git-wip-us.apache.org/repos/asf/nifi-minifi/diff/7bac01ee

Branch: refs/heads/master
Commit: 7bac01ee1758da618974851af99c6dd72a5c4689
Parents: ec62272
Author: Aldrin Piri 
Authored: Mon Jun 4 14:18:15 2018 -0400
Committer: Jeremy Dyer 
Committed: Tue Jun 26 12:39:46 2018 -0400

--
 .../ingestors/PullHttpChangeIngestor.java   | 60 +---
 1 file changed, 27 insertions(+), 33 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/nifi-minifi/blob/7bac01ee/minifi-bootstrap/src/main/java/org/apache/nifi/minifi/bootstrap/configuration/ingestors/PullHttpChangeIngestor.java
--
diff --git 
a/minifi-bootstrap/src/main/java/org/apache/nifi/minifi/bootstrap/configuration/ingestors/PullHttpChangeIngestor.java
 
b/minifi-bootstrap/src/main/java/org/apache/nifi/minifi/bootstrap/configuration/ingestors/PullHttpChangeIngestor.java
index f7add36..f363dcd 100644
--- 
a/minifi-bootstrap/src/main/java/org/apache/nifi/minifi/bootstrap/configuration/ingestors/PullHttpChangeIngestor.java
+++ 
b/minifi-bootstrap/src/main/java/org/apache/nifi/minifi/bootstrap/configuration/ingestors/PullHttpChangeIngestor.java
@@ -17,7 +17,6 @@
 
 package org.apache.nifi.minifi.bootstrap.configuration.ingestors;
 
-import okhttp3.Call;
 import okhttp3.Credentials;
 import okhttp3.HttpUrl;
 import okhttp3.OkHttpClient;
@@ -44,7 +43,6 @@ import javax.net.ssl.SSLSocketFactory;
 import javax.net.ssl.TrustManager;
 import javax.net.ssl.TrustManagerFactory;
 import javax.net.ssl.X509TrustManager;
-
 import java.io.File;
 import java.io.FileInputStream;
 import java.io.IOException;
@@ -148,7 +146,7 @@ public class PullHttpChangeIngestor extends 
AbstractPullChangeIngestor {
 queryReference.set(query);
 
 final String useEtagString = (String) 
properties.getOrDefault(USE_ETAG_KEY, "false");
-if ("true".equalsIgnoreCase(useEtagString) || 
"false".equalsIgnoreCase(useEtagString)){
+if ("true".equalsIgnoreCase(useEtagString) || 
"false".equalsIgnoreCase(useEtagString)) {
 useEtag = Boolean.parseBoolean(useEtagString);
 } else {
 throw new IllegalArgumentException("Property, " + USE_ETAG_KEY + 
", to specify whether to use the ETag header, must either be a value boolean 
value (\"true\" or \"false\") or left to " +
@@ -156,7 +154,7 @@ public class PullHttpChangeIngestor extends 
AbstractPullChangeIngestor {
 }
 
 final String overrideSecurityProperties = (String) 
properties.getOrDefault(OVERRIDE_SECURITY, "false");
-if ("true".equalsIgnoreCase(overrideSecurityProperties) || 
"false".equalsIgnoreCase(overrideSecurityProperties)){
+if ("true".equalsIgnoreCase(overrideSecurityProperties) || 
"false".equalsIgnoreCase(overrideSecurityProperties)) {
 overrideSecurity = 
Boolean.parseBoolean(overrideSecurityProperties);
 } else {
 throw new IllegalArgumentException("Property, " + 
OVERRIDE_SECURITY + ", to specify whether to override security properties must 
either be a value boolean value (\"true\" or \"false\")" +
@@ -222,36 +220,31 @@ public class PullHttpChangeIngestor extends 
AbstractPullChangeIngestor {
 
 @Override
 public void run() {
-try {
-logger.debug("Attempting to pull new config");
-HttpUrl.Builder builder = new HttpUrl.Builder()
-.host(hostReference.get())
-.port(portReference.get())
-.encodedPath(pathReference.get());
-String query = queryReference.get();
-if (!StringUtil.isNullOrEmpty(query)) {
-builder = builder.encodedQuery(query);
-}
-final HttpUrl url = builder
-.scheme(connectionScheme)
-.build();
-
-
-final Request.Builder requestBuilder = new Request.Builder()
-.get()
-.url(url);
-
-if (useEtag) {
-requestBuilder.addHeader("If-None-Match", lastEtag);
-}
+logger.debug("Attempting to pull new config");
+HttpUrl.Builder builder = new HttpUrl.Builder()
+.host(hostReference.get())
+.port(portReference.get())
+.encodedPath(pathReference.get());
+final String query = queryReference.get();
+if 

[nifi-minifi-cpp] Git Push Summary

2018-06-06 Thread jeremydyer
Repository: nifi-minifi-cpp
Updated Tags:  refs/tags/rel/minifi-cpp-0.5.0 [created] 7d8442a73


nifi-site git commit: MINIFICPP-519: Release Apache NiFi MiNiFi CPP 0.5.0

2018-06-06 Thread jeremydyer
Repository: nifi-site
Updated Branches:
  refs/heads/master 8bd32db00 -> 6cf39fd95


MINIFICPP-519: Release Apache NiFi MiNiFi CPP 0.5.0


Project: http://git-wip-us.apache.org/repos/asf/nifi-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/nifi-site/commit/6cf39fd9
Tree: http://git-wip-us.apache.org/repos/asf/nifi-site/tree/6cf39fd9
Diff: http://git-wip-us.apache.org/repos/asf/nifi-site/diff/6cf39fd9

Branch: refs/heads/master
Commit: 6cf39fd95313869b292bbf8b873587586756b581
Parents: 8bd32db
Author: Jeremy Dyer 
Authored: Wed Jun 6 12:47:48 2018 -0400
Committer: Jeremy Dyer 
Committed: Wed Jun 6 12:47:48 2018 -0400

--
 src/pages/html/minifi/download.hbs | 86 -
 1 file changed, 41 insertions(+), 45 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/nifi-site/blob/6cf39fd9/src/pages/html/minifi/download.hbs
--
diff --git a/src/pages/html/minifi/download.hbs 
b/src/pages/html/minifi/download.hbs
index e8c2546..79a916b 100644
--- a/src/pages/html/minifi/download.hbs
+++ b/src/pages/html/minifi/download.hbs
@@ -95,6 +95,47 @@ title: Apache NiFi - MiNiFi Downloads
 
 MiNiFi C++
 
+  cpp-0.5.0
+  
+  
+  Sources:
+  
+  https://www.apache.org/dyn/closer.lua?path=/nifi/nifi-minifi-cpp/0.5.0/nifi-minifi-cpp-0.5.0-source.tar.gz;>nifi-minifi-cpp-0.5.0-source.tar.gz
+( https://www.apache.org/dist/nifi/nifi-minifi-cpp/0.5.0/nifi-minifi-cpp-0.5.0-source.tar.gz.asc;>asc,
+  https://www.apache.org/dist/nifi/nifi-minifi-cpp/0.5.0/nifi-minifi-cpp-0.5.0-source.tar.gz.sha1;>sha1,
+  https://www.apache.org/dist/nifi/nifi-minifi-cpp/0.5.0/nifi-minifi-cpp-0.5.0-source.tar.gz.sha256;>sha256
 )
+  
+  
+  
+  
+  Binaries
+  
+  Linux - RHEL Based Distributions - x86_64
+  https://www.apache.org/dyn/closer.lua?path=/nifi/nifi-minifi-cpp/0.5.0/nifi-minifi-cpp-0.5.0-bin-linux-rhel.tar.gz;>nifi-minifi-cpp-0.5.0-bin-linux-rhel.tar.gz
+( https://www.apache.org/dist/nifi/nifi-minifi-cpp/0.5.0/nifi-minifi-cpp-0.5.0-bin-linux-rhel.tar.gz.asc;>asc,
+  https://www.apache.org/dist/nifi/nifi-minifi-cpp/0.5.0/nifi-minifi-cpp-0.5.0-bin-linux-rhel.tar.gz.sha1;>sha1,
+  https://www.apache.org/dist/nifi/nifi-minifi-cpp/0.5.0/nifi-minifi-cpp-0.5.0-bin-linux-rhel.tar.gz.sha256;>sha256
 )
+  
+  Linux - Debian Based Distributions - x86_64
+  https://www.apache.org/dyn/closer.lua?path=/nifi/nifi-minifi-cpp/0.5.0/nifi-minifi-cpp-0.5.0-bin-linux-debian.tar.gz;>nifi-minifi-cpp-0.5.0-bin-linux-debian.tar.gz
+( https://www.apache.org/dist/nifi/nifi-minifi-cpp/0.5.0/nifi-minifi-cpp-0.5.0-bin-linux-debian.tar.gz.asc;>asc,
+  https://www.apache.org/dist/nifi/nifi-minifi-cpp/0.5.0/nifi-minifi-cpp-0.5.0-bin-linux-debian.tar.gz.sha1;>sha1,
+  https://www.apache.org/dist/nifi/nifi-minifi-cpp/0.5.0/nifi-minifi-cpp-0.5.0-bin-linux-debian.tar.gz.sha256;>sha256
 )
+  
+  Darwin (OS X)
+  https://www.apache.org/dyn/closer.lua?path=/nifi/nifi-minifi-cpp/0.5.0/nifi-minifi-cpp-0.5.0-bin-darwin.tar.gz;>nifi-minifi-cpp-0.5.0-bin-darwin.tar.gz
+( https://www.apache.org/dist/nifi/nifi-minifi-cpp/0.5.0/nifi-minifi-cpp-0.5.0-bin-darwin.tar.gz.asc;>asc,
+  https://www.apache.org/dist/nifi/nifi-minifi-cpp/0.5.0/nifi-minifi-cpp-0.5.0-bin-darwin.tar.gz.sha1;>sha1,
+  https://www.apache.org/dist/nifi/nifi-minifi-cpp/0.5.0/nifi-minifi-cpp-0.5.0-bin-darwin.tar.gz.sha256;>sha256
 )
+  
+  
+  
+
+  https://cwiki.apache.org/confluence/display/MINIFI/Release+Notes#ReleaseNotes-Versioncpp-0.5.0;>Release
 Notes
+  
+  
+
+
   cpp-0.4.0
   
   
@@ -142,51 +183,6 @@ title: Apache NiFi - MiNiFi Downloads
   
   
 
-
-  cpp-0.3.0
-  
-  
-  Sources:
-  
-  https://www.apache.org/dyn/closer.lua?path=/nifi/nifi-minifi-cpp/0.3.0/nifi-minifi-cpp-0.3.0-source.tar.gz;>nifi-minifi-cpp-0.3.0-source.tar.gz
-   

svn commit: r1833051 - /nifi/site/trunk/minifi/download.html

2018-06-06 Thread jeremydyer
Author: jeremydyer
Date: Wed Jun  6 16:18:29 2018
New Revision: 1833051

URL: http://svn.apache.org/viewvc?rev=1833051=rev
Log:
MINIFICPP-519 updated download links for Apache NiFi MiNiFi CPP 0.5.0

Modified:
nifi/site/trunk/minifi/download.html

Modified: nifi/site/trunk/minifi/download.html
URL: 
http://svn.apache.org/viewvc/nifi/site/trunk/minifi/download.html?rev=1833051=1833050=1833051=diff
==
--- nifi/site/trunk/minifi/download.html (original)
+++ nifi/site/trunk/minifi/download.html Wed Jun  6 16:18:29 2018
@@ -194,6 +194,47 @@
 
 MiNiFi C++
 
+  cpp-0.5.0
+  
+  
+  Sources:
+  
+  https://www.apache.org/dyn/closer.lua?path=/nifi/nifi-minifi-cpp/0.5.0/nifi-minifi-cpp-0.5.0-source.tar.gz;>nifi-minifi-cpp-0.5.0-source.tar.gz
+( https://www.apache.org/dist/nifi/nifi-minifi-cpp/0.5.0/nifi-minifi-cpp-0.5.0-source.tar.gz.asc;>asc,
+  https://www.apache.org/dist/nifi/nifi-minifi-cpp/0.5.0/nifi-minifi-cpp-0.5.0-source.tar.gz.sha1;>sha1,
+  https://www.apache.org/dist/nifi/nifi-minifi-cpp/0.5.0/nifi-minifi-cpp-0.5.0-source.tar.gz.sha256;>sha256
 )
+  
+  
+  
+  
+  Binaries
+  
+  Linux - RHEL Based Distributions - x86_64
+  https://www.apache.org/dyn/closer.lua?path=/nifi/nifi-minifi-cpp/0.5.0/nifi-minifi-cpp-0.5.0-bin-linux-rhel.tar.gz;>nifi-minifi-cpp-0.5.0-bin-linux-rhel.tar.gz
+( https://www.apache.org/dist/nifi/nifi-minifi-cpp/0.5.0/nifi-minifi-cpp-0.5.0-bin-linux-rhel.tar.gz.asc;>asc,
+  https://www.apache.org/dist/nifi/nifi-minifi-cpp/0.5.0/nifi-minifi-cpp-0.5.0-bin-linux-rhel.tar.gz.sha1;>sha1,
+  https://www.apache.org/dist/nifi/nifi-minifi-cpp/0.5.0/nifi-minifi-cpp-0.5.0-bin-linux-rhel.tar.gz.sha256;>sha256
 )
+  
+  Linux - Debian Based Distributions - x86_64
+  https://www.apache.org/dyn/closer.lua?path=/nifi/nifi-minifi-cpp/0.5.0/nifi-minifi-cpp-0.5.0-bin-linux-debian.tar.gz;>nifi-minifi-cpp-0.5.0-bin-linux-debian.tar.gz
+( https://www.apache.org/dist/nifi/nifi-minifi-cpp/0.5.0/nifi-minifi-cpp-0.5.0-bin-linux-debian.tar.gz.asc;>asc,
+  https://www.apache.org/dist/nifi/nifi-minifi-cpp/0.5.0/nifi-minifi-cpp-0.5.0-bin-linux-debian.tar.gz.sha1;>sha1,
+  https://www.apache.org/dist/nifi/nifi-minifi-cpp/0.5.0/nifi-minifi-cpp-0.5.0-bin-linux-debian.tar.gz.sha256;>sha256
 )
+  
+  Darwin (OS X)
+  https://www.apache.org/dyn/closer.lua?path=/nifi/nifi-minifi-cpp/0.5.0/nifi-minifi-cpp-0.5.0-bin-darwin.tar.gz;>nifi-minifi-cpp-0.5.0-bin-darwin.tar.gz
+( https://www.apache.org/dist/nifi/nifi-minifi-cpp/0.5.0/nifi-minifi-cpp-0.5.0-bin-darwin.tar.gz.asc;>asc,
+  https://www.apache.org/dist/nifi/nifi-minifi-cpp/0.5.0/nifi-minifi-cpp-0.5.0-bin-darwin.tar.gz.sha1;>sha1,
+  https://www.apache.org/dist/nifi/nifi-minifi-cpp/0.5.0/nifi-minifi-cpp-0.5.0-bin-darwin.tar.gz.sha256;>sha256
 )
+  
+  
+  
+
+  https://cwiki.apache.org/confluence/display/MINIFI/Release+Notes#ReleaseNotes-Versioncpp-0.5.0;>Release
 Notes
+  
+  
+
+
   cpp-0.4.0
   
   
@@ -241,51 +282,6 @@
   
   
 
-
-  cpp-0.3.0
-  
-  
-  Sources:
-  
-  https://www.apache.org/dyn/closer.lua?path=/nifi/nifi-minifi-cpp/0.3.0/nifi-minifi-cpp-0.3.0-source.tar.gz;>nifi-minifi-cpp-0.3.0-source.tar.gz
-( https://www.apache.org/dist/nifi/nifi-minifi-cpp/0.3.0/nifi-minifi-cpp-0.3.0-source.tar.gz.asc;>asc,
-  https://www.apache.org/dist/nifi/nifi-minifi-cpp/0.3.0/nifi-minifi-cpp-0.3.0-source.tar.gz.md5;>md5,
-  https://www.apache.org/dist/nifi/nifi-minifi-cpp/0.3.0/nifi-minifi-cpp-0.3.0-source.tar.gz.sha1;>sha1,
-  https://www.apache.org/dist/nifi/nifi-minifi-cpp/0.3.0/nifi-minifi-cpp-0.3.0-source.tar.gz.sha256;>sha256
 )
-  
-  
-  
-  
-  Binaries
-  
- 

svn commit: r27204 - in /dev/nifi/nifi-minifi-cpp/0.5.0: nifi-minifi-cpp-0.5.0-bin-darwin.tar.gz.md5 nifi-minifi-cpp-0.5.0-source.tar.gz.md5

2018-05-31 Thread jeremydyer
Author: jeremydyer
Date: Fri Jun  1 01:25:57 2018
New Revision: 27204

Log:
updated bad md5 file

Modified:
dev/nifi/nifi-minifi-cpp/0.5.0/nifi-minifi-cpp-0.5.0-bin-darwin.tar.gz.md5
dev/nifi/nifi-minifi-cpp/0.5.0/nifi-minifi-cpp-0.5.0-source.tar.gz.md5

Modified: 
dev/nifi/nifi-minifi-cpp/0.5.0/nifi-minifi-cpp-0.5.0-bin-darwin.tar.gz.md5
==
--- dev/nifi/nifi-minifi-cpp/0.5.0/nifi-minifi-cpp-0.5.0-bin-darwin.tar.gz.md5 
(original)
+++ dev/nifi/nifi-minifi-cpp/0.5.0/nifi-minifi-cpp-0.5.0-bin-darwin.tar.gz.md5 
Fri Jun  1 01:25:57 2018
@@ -1 +1 @@
-MD5
+e498f93eefd1d3ddb97e4383be25f8b3

Modified: dev/nifi/nifi-minifi-cpp/0.5.0/nifi-minifi-cpp-0.5.0-source.tar.gz.md5
==
--- dev/nifi/nifi-minifi-cpp/0.5.0/nifi-minifi-cpp-0.5.0-source.tar.gz.md5 
(original)
+++ dev/nifi/nifi-minifi-cpp/0.5.0/nifi-minifi-cpp-0.5.0-source.tar.gz.md5 Fri 
Jun  1 01:25:57 2018
@@ -1 +1 @@
-MD5
+9ec230b9ac3004981000276015860c52




svn commit: r27203 - /dev/nifi/KEYS

2018-05-31 Thread jeremydyer
Author: jeremydyer
Date: Fri Jun  1 00:24:43 2018
New Revision: 27203

Log:
Added key to KEYS file for Jeremy Dyer

Modified:
dev/nifi/KEYS

Modified: dev/nifi/KEYS
==
--- dev/nifi/KEYS (original)
+++ dev/nifi/KEYS Fri Jun  1 00:24:43 2018
@@ -1205,3 +1205,62 @@ P+fxbdKgI+aX98GEf1M/nOxE1e+JM72QvYYmIzsP
 wlD9UcFgm7oY2wqz8LwAyhQoSv0xtV9r4kc29iZfTMzurydNilsNU6B5orXr
 =dVDp
 -END PGP PUBLIC KEY BLOCK-
+pub   rsa4096 2018-05-30 [SC]
+  50AA60AD5D58311187B0BEB5C6E550DA6B295AD5
+uid   [ultimate] Jeremy Dyer (CODE SIGNING KEY) 
+sig 3C6E550DA6B295AD5 2018-05-30  Jeremy Dyer (CODE SIGNING KEY) 

+sub   rsa4096 2018-05-30 [E]
+sig  C6E550DA6B295AD5 2018-05-30  Jeremy Dyer (CODE SIGNING KEY) 

+
+-BEGIN PGP PUBLIC KEY BLOCK-
+
+mQINBFsPAk8BEACRAvRODT9U5sCFs/xQskdll6REVnhwTtwqMSKi7iG0doUwmN5o
+ScirN1R8kFER9uGhC6J6S1ReqHMfdPO1Ul1GTGN9LMNBVzxPr6esq7Ut6ZzPuc5F
+nmtHIsly36pNXy/MJExMZj0WgiFgE8CoWljfIiiURvHAxA3dT0VD5a1TajWoIqmP
+AOmXqalQX9tf1SIrFJ8dWj15MvsX854revc6qK1gr1yA/kabhRPngRTlxcP54oyE
+hHr9twpVqKrGYSZ8WL0yqm2ZLKt4jihuRPd3Ys7g6erwcl+dUVBCaXskdAwcSMWo
+5E11KqGyyP3Wukp+2P6Y7LTsD1zuBdcC4q/oinQKJrOyytagMBNQbRgTfyanje3H
+4fcuxmeKbQo0tIGOq5YLMLSZNZmTVFrqfABrK+AMohN4Mucoibmi0yFxWfEJACXP
+8CtwH9Oxv1v0KRbWwEfcWEJSCYDoORZCawK89ABMpc/ConRsJcAyCAdDXqWCYOVG
+u0jcjgCfQkWlMD3fMeW98obSkQ+0bDu+WGXAt6Z/oaoyHodcWcN4wuKExCwMAaoA
+IdpDjHtHySgQ+kd1G6apNgSv7m5J7gx9dgUUe4lJI+gG9hb4Bt/3y5ypEnEt8Pj/
+zd2PI8FzPHVUC5UI6RybxN755OQA76JzchrADwQNFGXLpCCVje3SWfK5bwARAQAB
+tDZKZXJlbXkgRHllciAoQ09ERSBTSUdOSU5HIEtFWSkgPGplcmVteWR5ZXJAYXBh
+Y2hlLm9yZz6JAk4EEwEIADgWIQRQqmCtXVgxEYewvrXG5VDaayla1QUCWw8CTwIb
+AwULCQgHAgYVCgkICwIEFgIDAQIeAQIXgAAKCRDG5VDaayla1UldD/41koIk215a
+wg/l3JivW5PC7/HN89a4PoFAp+IXpocQi8zud67HEu2v6gM8MgDWUF+/+0qOO/GY
+GpWoFpGEtivRAJoSkX0VYuAemLdOB7sEbjwHSBpRQz1jaKUzMjgBVmUbcu6/ROQL
+FVpj3z/wm+/UD3nMpF/WKRk/05uAfyW1qVE4l9M3U81jhWcARVPPIRmLoJ64d92W
+RdJGDF1wXBwO6FPPvNJLK01jdcNJrh3YEtHdtL7NB4fdn6SR8Thdz8ua0PTK/68f
+/8URkUX/F9Q8+iSZKQkPRx59f5gNnAUhZhu433Z95SnHMoRJrew/jdE2NR6+nIc/
+aDvK4d67JL7zZ79HI4LNtkWObsARL27eJ2TTIQq8hHAEwxjeLEdxSaDTDdhiOkYs
+2g7Xd+NYwd4Z1TFE3awsH1ooWso/I91j7GX3uqlfMH5FCN0XRJvxNAXxgbokjs53
+xAdI+Wh0jnpfG+jWYEZcWrUMAbE0XYVUEKww8VMrRHnFRmr2euJ7dqKU0n9pMABt
+avYR9yY+3KhQ6AZNwFWHzeIB7E/swuzqTY9VvgOKScPmqXo/VCuOx5QPv3Q6Qq5o
+GYaHq+ipxwzBy1Beik6JBX4QCByIzEhQfEGjmov60o7XxW4OB+1++lH1ToZa+tal
+6CuFDdvgSG5hm5+QzEwMm0uOqajQxkDHaLkCDQRbDwJPARAA2CHyu/DFGKtXQ0Uh
+HGyQD4ZjlR/qqx97J4fOwthRCV6zWho6ztJ2YwPACd50DbqQVPhMZXipNZ20hivn
+4/eUiFoMXJD9T54G4MWFhzDVE9GaH50hDGDcWb4CmDekoAFPGMrfK7MkHUOcZgJq
+5lVsDwmXCmm4UOXX9S4Tldb790J7AwRk8b/ysy5h4t/5jNctJeOPmFT/TIuxlHQt
+L0mryHhWkIVCgCWueimJGqYR9Ouc7o5djq//Xt2ON2HZyhIpHN152ouNHBT1b6La
+zOL2Y8RDe2iVAsyg9SF7Vh1AWMGgTWwVxEQOMuf/q6YUJvk4SYlnMVXJONda6bG4
+qlsvkRxrAAeWM55ZDuHbLRs/zR9mnZXvvZJ5AyvShm0wtfAjjTWd7qBf4gX3TB0Y
+3LkMOkR5HSlD+xJW6WhmB4udqzdwv1hMI3WIX5iQdwXex+KYXNxTSsuLgCKVd5wo
+BC1gkmPBPkDuhhQg68cAFptDUao5lCXPLfCRs69Ao7wxD5JuLXPlyUiDmsP8pXuU
+viArCI4biwms/ooMJaB53rxiSuLxf1GHMN69hJ/6gvkzgwblQ7aYdalfgjsj+mJA
+VYwnlapH+vXKuM2gkFXRK0d9js6x/xrSNYJCbPY06GbsklzcZnW77s9/TQWoi08a
+auEkS1ptZQ9mLjtpV+zVHyn29ckAEQEAAYkCNgQYAQgAIBYhBFCqYK1dWDERh7C+
+tcblUNprKVrVBQJbDwJPAhsMAAoJEMblUNprKVrVJpIP/2mSNmH5gW5mm1Efpp85
+0jqTvEUhMta5qUBzJSrdqpSJmVWK59PUUkfD4gIlDbcwBHEikR8RfvMZD2iubwtF
+LEBaXaDAvV62elotKMQbNlS4G0j0vH2wQzzLWyrZZ9yvYQ4Z7BBdPHQFex8O4mST
+OZhBhUZUz567goB2gkAxLyzcWSIOkT/LYKM7Hwvfl3mfFq4HeDy9B0TD1DxUdwGC
+twA9L0oesNduhjeQjzrhqUexZN5Hq5YrczbFFoUXzCiI31x/Bo621Vg+t8xfYPgO
+KuBYm2m0rMCOopUw6A6bEoc97BFsQmd/9Ck3iO2glK8b2e1Og9dJbCxv/tFnyjJU
+dBym6rRWCD/do8zDCUN5ubrwnhuXFoy2rRiCg3cSX2G2361hAXwsC/jHzYkd847h
+1uCd80EB8cEXSaWxRp5TMd3lfcKqswcV1S1ceyhdRuQ1aqMdLC8KVDt5oF1fJgwo
+U5fBzgzoHowSb6GY+buf5h2BFPswIBY3j5Qsv8ph7EcFEd+j3uah1fMZMWqtZEPM
+y8gVDvB5hp0vezRp/ql7W56AsVi2DExcGB6fekxmDwyGjq5hV6QnaE3hdlOaT6JG
+5BugXPuDKJm67KxyVEOQgGqMXnj2d8j76t6mydZVuM9Tc6Wr3AkJMAF5k2bUMzkJ
++o6lD0oc0cjNpxCbOkGK01pQ
+=+4cC
+-END PGP PUBLIC KEY BLOCK-




[nifi-minifi-cpp] Git Push Summary

2018-05-31 Thread jeremydyer
Repository: nifi-minifi-cpp
Updated Tags:  refs/tags/minifi-cpp-0.5.0-RC1 [created] 5f3b3973e


svn commit: r27202 - /dev/nifi/nifi-minifi-cpp/0.5.0/

2018-05-31 Thread jeremydyer
Author: jeremydyer
Date: Thu May 31 23:19:54 2018
New Revision: 27202

Log:
Apache NiFi MiNiFi 0.5.0 RC1

Added:
dev/nifi/nifi-minifi-cpp/0.5.0/
dev/nifi/nifi-minifi-cpp/0.5.0/nifi-minifi-cpp-0.5.0-bin-darwin.tar.gz   
(with props)
dev/nifi/nifi-minifi-cpp/0.5.0/nifi-minifi-cpp-0.5.0-bin-darwin.tar.gz.asc  
 (with props)
dev/nifi/nifi-minifi-cpp/0.5.0/nifi-minifi-cpp-0.5.0-bin-darwin.tar.gz.md5
dev/nifi/nifi-minifi-cpp/0.5.0/nifi-minifi-cpp-0.5.0-bin-darwin.tar.gz.sha1

dev/nifi/nifi-minifi-cpp/0.5.0/nifi-minifi-cpp-0.5.0-bin-darwin.tar.gz.sha256

dev/nifi/nifi-minifi-cpp/0.5.0/nifi-minifi-cpp-0.5.0-bin-linux-debian.tar.gz   
(with props)

dev/nifi/nifi-minifi-cpp/0.5.0/nifi-minifi-cpp-0.5.0-bin-linux-debian.tar.gz.asc
   (with props)

dev/nifi/nifi-minifi-cpp/0.5.0/nifi-minifi-cpp-0.5.0-bin-linux-debian.tar.gz.md5

dev/nifi/nifi-minifi-cpp/0.5.0/nifi-minifi-cpp-0.5.0-bin-linux-debian.tar.gz.sha1

dev/nifi/nifi-minifi-cpp/0.5.0/nifi-minifi-cpp-0.5.0-bin-linux-debian.tar.gz.sha256
dev/nifi/nifi-minifi-cpp/0.5.0/nifi-minifi-cpp-0.5.0-bin-linux-rhel.tar.gz  
 (with props)

dev/nifi/nifi-minifi-cpp/0.5.0/nifi-minifi-cpp-0.5.0-bin-linux-rhel.tar.gz.asc  
 (with props)

dev/nifi/nifi-minifi-cpp/0.5.0/nifi-minifi-cpp-0.5.0-bin-linux-rhel.tar.gz.md5

dev/nifi/nifi-minifi-cpp/0.5.0/nifi-minifi-cpp-0.5.0-bin-linux-rhel.tar.gz.sha1

dev/nifi/nifi-minifi-cpp/0.5.0/nifi-minifi-cpp-0.5.0-bin-linux-rhel.tar.gz.sha256
dev/nifi/nifi-minifi-cpp/0.5.0/nifi-minifi-cpp-0.5.0-source.tar.gz   (with 
props)
dev/nifi/nifi-minifi-cpp/0.5.0/nifi-minifi-cpp-0.5.0-source.tar.gz.asc   
(with props)
dev/nifi/nifi-minifi-cpp/0.5.0/nifi-minifi-cpp-0.5.0-source.tar.gz.md5
dev/nifi/nifi-minifi-cpp/0.5.0/nifi-minifi-cpp-0.5.0-source.tar.gz.sha1
dev/nifi/nifi-minifi-cpp/0.5.0/nifi-minifi-cpp-0.5.0-source.tar.gz.sha256

Added: dev/nifi/nifi-minifi-cpp/0.5.0/nifi-minifi-cpp-0.5.0-bin-darwin.tar.gz
==
Binary file - no diff available.

Propchange: 
dev/nifi/nifi-minifi-cpp/0.5.0/nifi-minifi-cpp-0.5.0-bin-darwin.tar.gz
--
svn:mime-type = application/x-gzip

Added: 
dev/nifi/nifi-minifi-cpp/0.5.0/nifi-minifi-cpp-0.5.0-bin-darwin.tar.gz.asc
==
Binary file - no diff available.

Propchange: 
dev/nifi/nifi-minifi-cpp/0.5.0/nifi-minifi-cpp-0.5.0-bin-darwin.tar.gz.asc
--
svn:mime-type = application/pgp-signature

Added: 
dev/nifi/nifi-minifi-cpp/0.5.0/nifi-minifi-cpp-0.5.0-bin-darwin.tar.gz.md5
==
--- dev/nifi/nifi-minifi-cpp/0.5.0/nifi-minifi-cpp-0.5.0-bin-darwin.tar.gz.md5 
(added)
+++ dev/nifi/nifi-minifi-cpp/0.5.0/nifi-minifi-cpp-0.5.0-bin-darwin.tar.gz.md5 
Thu May 31 23:19:54 2018
@@ -0,0 +1 @@
+MD5

Added: 
dev/nifi/nifi-minifi-cpp/0.5.0/nifi-minifi-cpp-0.5.0-bin-darwin.tar.gz.sha1
==
--- dev/nifi/nifi-minifi-cpp/0.5.0/nifi-minifi-cpp-0.5.0-bin-darwin.tar.gz.sha1 
(added)
+++ dev/nifi/nifi-minifi-cpp/0.5.0/nifi-minifi-cpp-0.5.0-bin-darwin.tar.gz.sha1 
Thu May 31 23:19:54 2018
@@ -0,0 +1 @@
+7c560a5ae18cef1cc69e5267c80edf7a127f46b3

Added: 
dev/nifi/nifi-minifi-cpp/0.5.0/nifi-minifi-cpp-0.5.0-bin-darwin.tar.gz.sha256
==
--- 
dev/nifi/nifi-minifi-cpp/0.5.0/nifi-minifi-cpp-0.5.0-bin-darwin.tar.gz.sha256 
(added)
+++ 
dev/nifi/nifi-minifi-cpp/0.5.0/nifi-minifi-cpp-0.5.0-bin-darwin.tar.gz.sha256 
Thu May 31 23:19:54 2018
@@ -0,0 +1 @@
+f3500e69483ef26374b797df84ed531deeea6d474b12af361b9bd05b6ef8d31a

Added: 
dev/nifi/nifi-minifi-cpp/0.5.0/nifi-minifi-cpp-0.5.0-bin-linux-debian.tar.gz
==
Binary file - no diff available.

Propchange: 
dev/nifi/nifi-minifi-cpp/0.5.0/nifi-minifi-cpp-0.5.0-bin-linux-debian.tar.gz
--
svn:mime-type = application/x-gzip

Added: 
dev/nifi/nifi-minifi-cpp/0.5.0/nifi-minifi-cpp-0.5.0-bin-linux-debian.tar.gz.asc
==
Binary file - no diff available.

Propchange: 
dev/nifi/nifi-minifi-cpp/0.5.0/nifi-minifi-cpp-0.5.0-bin-linux-debian.tar.gz.asc
--
svn:mime-type = application/pgp-signature

Added: 
dev/nifi/nifi-minifi-cpp/0.5.0/nifi-minifi-cpp-0.5.0-bin-linux-debian.tar.gz.md5
==
--- 
dev/nifi/nifi-minifi-cpp/0.5.0/nifi-minifi-cpp-0.5.0-bin

[nifi-minifi-cpp] Git Push Summary

2018-05-31 Thread jeremydyer
Repository: nifi-minifi-cpp
Updated Branches:
  refs/heads/MINIFICPP-519-RC1-0.5.0 [created] 5f3b3973e


nifi git commit: Corrected Docker tag back to standard format Modified Docker startup scripts to accept Environment variables if available, or use the usual defaults if not. Updated readme to reflect

2018-01-31 Thread jeremydyer
Repository: nifi
Updated Branches:
  refs/heads/master dc67bd2fd -> e74c67f77


Corrected Docker tag back to standard format
Modified Docker startup scripts to accept Environment variables if available, 
or use the usual defaults if not.
Updated readme to reflect changes in allowing environment variables.
Updated readme to correctly reflect the default ports that NiFi will run on.

This closes #2439

Signed-off-by: Jeremy Dyer 


Project: http://git-wip-us.apache.org/repos/asf/nifi/repo
Commit: http://git-wip-us.apache.org/repos/asf/nifi/commit/e74c67f7
Tree: http://git-wip-us.apache.org/repos/asf/nifi/tree/e74c67f7
Diff: http://git-wip-us.apache.org/repos/asf/nifi/diff/e74c67f7

Branch: refs/heads/master
Commit: e74c67f77965b8c0cd1bea4d1f1924b2dcebfa07
Parents: dc67bd2
Author: dchaffey 
Authored: Sun Jan 28 18:23:58 2018 +
Committer: Jeremy Dyer 
Committed: Wed Jan 31 15:50:42 2018 -0500

--
 nifi-docker/dockerhub/DockerImage.txt |  2 +-
 nifi-docker/dockerhub/README.md   | 23 +--
 nifi-docker/dockerhub/sh/secure.sh|  4 ++--
 nifi-docker/dockerhub/sh/start.sh |  8 
 4 files changed, 24 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/nifi/blob/e74c67f7/nifi-docker/dockerhub/DockerImage.txt
--
diff --git a/nifi-docker/dockerhub/DockerImage.txt 
b/nifi-docker/dockerhub/DockerImage.txt
index 01bc5e6..ea34860 100644
--- a/nifi-docker/dockerhub/DockerImage.txt
+++ b/nifi-docker/dockerhub/DockerImage.txt
@@ -13,4 +13,4 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-apachenifi:1.6.0
+apache/nifi:1.6.0

http://git-wip-us.apache.org/repos/asf/nifi/blob/e74c67f7/nifi-docker/dockerhub/README.md
--
diff --git a/nifi-docker/dockerhub/README.md b/nifi-docker/dockerhub/README.md
index d87765e..4d9a559 100644
--- a/nifi-docker/dockerhub/README.md
+++ b/nifi-docker/dockerhub/README.md
@@ -46,12 +46,22 @@ The configuration scripts are suitable for at least 1.4.0+.
 The minimum to run a NiFi instance is as follows:
 
 docker run --name nifi \
-  -p 18080:8080 \
+  -p 8080:8080 \
   -d \
   apache/nifi:latest
   
-This will provide a running instance, exposing the instance UI to the host 
system on at port 18080,
-viewable at `http://localhost:18080/nifi`.
+This will provide a running instance, exposing the instance UI to the host 
system on at port 8080,
+viewable at `http://localhost:8080/nifi`.
+
+You can also pass in environment variables to change the NiFi communication 
ports and hostname using the Docker '-e' switch as follows:
+
+docker run --name nifi \
+  -p 9090:9090 \
+  -d \
+  -e NIFI_WEB_HTTP_PORT='9090'
+  apache/nifi:latest
+  
+For a list of the environment variables recognised in this build, look into 
the .sh/secure.sh and .sh/start.sh scripts
 
 ### Standalone Instance, Two-Way SSL
 In this configuration, the user will need to provide certificates and the 
associated configuration information.
@@ -62,7 +72,7 @@ Finally, this command makes use of a volume to provide 
certificates on the host
 
 docker run --name nifi \
   -v /User/dreynolds/certs/localhost:/opt/certs \
-  -p 18443:8443 \
+  -p 8443:8443 \
   -e AUTH=tls \
   -e KEYSTORE_PATH=/opt/certs/keystore.jks \
   -e KEYSTORE_TYPE=JKS \
@@ -86,7 +96,7 @@ volume to provide certificates on the host system to the 
container instance.
 
 docker run --name nifi \
   -v /User/dreynolds/certs/localhost:/opt/certs \
-  -p 18443:8443 \
+  -p 8443:8443 \
   -e AUTH=tls \
   -e KEYSTORE_PATH=/opt/certs/keystore.jks \
   -e KEYSTORE_TYPE=JKS \
@@ -115,7 +125,7 @@ volume to provide certificates on the host system to the 
container instance.
 -e LDAP_TLS_TRUSTSTORE_TYPE: ''
 
 ## Configuration Information
-The following ports are specified by the Docker container for NiFi operation 
within the container and 
+The following ports are specified by default in Docker for NiFi operation 
within the container and 
 can be published to the host.
 
 | Function | Property  | Port  |
@@ -123,3 +133,4 @@ can be published to the host.
 | HTTP Port| nifi.web.http.port| 8080  |
 | HTTPS Port   | nifi.web.https.port   | 8443  |
 | Remote Input Socket Port | nifi.remote.input.socket.port | 1 |
+

http://git-wip-us.apache.org/repos/asf/nifi/blob/e74c67f7/nifi-docker/dockerhub/sh/secure.sh
--
diff --git a/nifi-docker/dockerhub/sh/secure.sh 

nifi git commit: NIFI-4747 - Removed directory existence check in GetHDFS

2018-01-31 Thread jeremydyer
Repository: nifi
Updated Branches:
  refs/heads/master 6e7dfb993 -> dc67bd2fd


NIFI-4747 - Removed directory existence check in GetHDFS

This closes #2391

Signed-off-by: Jeremy Dyer 


Project: http://git-wip-us.apache.org/repos/asf/nifi/repo
Commit: http://git-wip-us.apache.org/repos/asf/nifi/commit/dc67bd2f
Tree: http://git-wip-us.apache.org/repos/asf/nifi/tree/dc67bd2f
Diff: http://git-wip-us.apache.org/repos/asf/nifi/diff/dc67bd2f

Branch: refs/heads/master
Commit: dc67bd2fdd762e48075dfa5edbe1a427025c6576
Parents: 6e7dfb9
Author: Pierre Villard 
Authored: Tue Jan 9 17:45:51 2018 +0100
Committer: Jeremy Dyer 
Committed: Wed Jan 31 12:01:17 2018 -0500

--
 .../apache/nifi/processors/hadoop/GetHDFS.java  | 21 ++--
 .../nifi/processors/hadoop/GetHDFSTest.java | 11 ++
 2 files changed, 21 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/nifi/blob/dc67bd2f/nifi-nar-bundles/nifi-hadoop-bundle/nifi-hdfs-processors/src/main/java/org/apache/nifi/processors/hadoop/GetHDFS.java
--
diff --git 
a/nifi-nar-bundles/nifi-hadoop-bundle/nifi-hdfs-processors/src/main/java/org/apache/nifi/processors/hadoop/GetHDFS.java
 
b/nifi-nar-bundles/nifi-hadoop-bundle/nifi-hdfs-processors/src/main/java/org/apache/nifi/processors/hadoop/GetHDFS.java
index 1aefc75..27b7375 100644
--- 
a/nifi-nar-bundles/nifi-hadoop-bundle/nifi-hdfs-processors/src/main/java/org/apache/nifi/processors/hadoop/GetHDFS.java
+++ 
b/nifi-nar-bundles/nifi-hadoop-bundle/nifi-hdfs-processors/src/main/java/org/apache/nifi/processors/hadoop/GetHDFS.java
@@ -238,11 +238,6 @@ public class GetHDFS extends AbstractHadoopProcessor {
 abstractOnScheduled(context);
 // copy configuration values to pass them around cleanly
 processorConfig = new ProcessorConfiguration(context);
-final FileSystem fs = getFileSystem();
-final Path dir = new 
Path(context.getProperty(DIRECTORY).evaluateAttributeExpressions().getValue());
-if (!fs.exists(dir)) {
-throw new IOException("PropertyDescriptor " + DIRECTORY + " has 
invalid value " + dir + ". The directory does not exist.");
-}
 
 // forget the state of the queue in case HDFS contents changed while 
this processor was turned off
 queueLock.lock();
@@ -422,8 +417,16 @@ public class GetHDFS extends AbstractHadoopProcessor {
 if (System.currentTimeMillis() >= nextPollTime && 
listingLock.tryLock()) {
 try {
 final FileSystem hdfs = getFileSystem();
-// get listing
-listing = selectFiles(hdfs, new 
Path(context.getProperty(DIRECTORY).evaluateAttributeExpressions().getValue()), 
null);
+final Path directoryPath = new 
Path(context.getProperty(DIRECTORY).evaluateAttributeExpressions().getValue());
+
+if (!hdfs.exists(directoryPath)) {
+context.yield();
+getLogger().warn("The directory {} does not exist.", new 
Object[]{directoryPath});
+} else {
+// get listing
+listing = selectFiles(hdfs, directoryPath, null);
+}
+
 lastPollTime.set(System.currentTimeMillis());
 } finally {
 listingLock.unlock();
@@ -447,10 +450,6 @@ public class GetHDFS extends AbstractHadoopProcessor {
 filesVisited = new HashSet<>();
 }
 
-if (!hdfs.exists(dir)) {
-throw new IOException("Selection directory " + dir.toString() + " 
doesn't appear to exist!");
-}
-
 final Set files = new HashSet<>();
 
 FileStatus[] fileStatuses = 
getUserGroupInformation().doAs((PrivilegedExceptionAction) () -> 
hdfs.listStatus(dir));

http://git-wip-us.apache.org/repos/asf/nifi/blob/dc67bd2f/nifi-nar-bundles/nifi-hadoop-bundle/nifi-hdfs-processors/src/test/java/org/apache/nifi/processors/hadoop/GetHDFSTest.java
--
diff --git 
a/nifi-nar-bundles/nifi-hadoop-bundle/nifi-hdfs-processors/src/test/java/org/apache/nifi/processors/hadoop/GetHDFSTest.java
 
b/nifi-nar-bundles/nifi-hadoop-bundle/nifi-hdfs-processors/src/test/java/org/apache/nifi/processors/hadoop/GetHDFSTest.java
index 40666d9..d3837a8 100644
--- 
a/nifi-nar-bundles/nifi-hadoop-bundle/nifi-hdfs-processors/src/test/java/org/apache/nifi/processors/hadoop/GetHDFSTest.java
+++ 
b/nifi-nar-bundles/nifi-hadoop-bundle/nifi-hdfs-processors/src/test/java/org/apache/nifi/processors/hadoop/GetHDFSTest.java
@@ -143,6 +143,17 @@ public class GetHDFSTest {
 }
 
 @Test
+public void testDirectoryDoesNotExist() {
+

nifi git commit: NIFI-4823 Made pretty printing configurable in GetMongo.

2018-01-31 Thread jeremydyer
Repository: nifi
Updated Branches:
  refs/heads/master d8cfb8e6c -> 6e7dfb993


NIFI-4823 Made pretty printing configurable in GetMongo.

This closes #2441

Signed-off-by: Jeremy Dyer 


Project: http://git-wip-us.apache.org/repos/asf/nifi/repo
Commit: http://git-wip-us.apache.org/repos/asf/nifi/commit/6e7dfb99
Tree: http://git-wip-us.apache.org/repos/asf/nifi/tree/6e7dfb99
Diff: http://git-wip-us.apache.org/repos/asf/nifi/diff/6e7dfb99

Branch: refs/heads/master
Commit: 6e7dfb9935622a5215115df93503ef4bfba75949
Parents: d8cfb8e
Author: Mike Thomsen 
Authored: Mon Jan 29 06:44:14 2018 -0500
Committer: Jeremy Dyer 
Committed: Wed Jan 31 11:34:31 2018 -0500

--
 .../nifi/processors/mongodb/GetMongo.java   | 46 ++--
 .../nifi/processors/mongodb/GetMongoTest.java   | 21 +
 2 files changed, 53 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/nifi/blob/6e7dfb99/nifi-nar-bundles/nifi-mongodb-bundle/nifi-mongodb-processors/src/main/java/org/apache/nifi/processors/mongodb/GetMongo.java
--
diff --git 
a/nifi-nar-bundles/nifi-mongodb-bundle/nifi-mongodb-processors/src/main/java/org/apache/nifi/processors/mongodb/GetMongo.java
 
b/nifi-nar-bundles/nifi-mongodb-bundle/nifi-mongodb-processors/src/main/java/org/apache/nifi/processors/mongodb/GetMongo.java
index 8988245..5fa4550 100644
--- 
a/nifi-nar-bundles/nifi-mongodb-bundle/nifi-mongodb-processors/src/main/java/org/apache/nifi/processors/mongodb/GetMongo.java
+++ 
b/nifi-nar-bundles/nifi-mongodb-bundle/nifi-mongodb-processors/src/main/java/org/apache/nifi/processors/mongodb/GetMongo.java
@@ -19,6 +19,7 @@
 package org.apache.nifi.processors.mongodb;
 
 import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.ObjectWriter;
 import com.mongodb.client.FindIterable;
 import com.mongodb.client.MongoCollection;
 import com.mongodb.client.MongoCursor;
@@ -126,6 +127,19 @@ public class GetMongo extends AbstractMongoProcessor {
 .addValidator(StandardValidators.POSITIVE_INTEGER_VALIDATOR)
 .build();
 
+static final AllowableValue YES_PP = new AllowableValue("true", "True");
+static final AllowableValue NO_PP  = new AllowableValue("false", "False");
+static final PropertyDescriptor USE_PRETTY_PRINTING = new 
PropertyDescriptor.Builder()
+.name("use-pretty-printing")
+.displayName("Pretty Print Results JSON")
+.description("Choose whether or not to pretty print the JSON from the 
results of the query. " +
+"Choosing yes can greatly increase the space requirements on 
disk depending on the complexity of the JSON document")
+.required(true)
+.defaultValue(YES_PP.getValue())
+.allowableValues(YES_PP, NO_PP)
+.addValidator(Validator.VALID)
+.build();
+
 static final String JSON_TYPE_EXTENDED = "Extended";
 static final String JSON_TYPE_STANDARD   = "Standard";
 static final AllowableValue JSON_EXTENDED = new 
AllowableValue(JSON_TYPE_EXTENDED, "Extended JSON",
@@ -151,6 +165,7 @@ public class GetMongo extends AbstractMongoProcessor {
 List _propertyDescriptors = new ArrayList<>();
 _propertyDescriptors.addAll(descriptors);
 _propertyDescriptors.add(JSON_TYPE);
+_propertyDescriptors.add(USE_PRETTY_PRINTING);
 _propertyDescriptors.add(QUERY);
 _propertyDescriptors.add(PROJECTION);
 _propertyDescriptors.add(SORT);
@@ -179,13 +194,13 @@ public class GetMongo extends AbstractMongoProcessor {
 private ObjectMapper mapper;
 
 //Turn a list of Mongo result documents into a String representation of a 
JSON array
-private String buildBatch(List documents, String 
jsonTypeSetting) throws IOException {
+private String buildBatch(List documents, String 
jsonTypeSetting, String prettyPrintSetting) throws IOException {
 StringBuilder builder = new StringBuilder();
 for (int index = 0; index < documents.size(); index++) {
 Document document = documents.get(index);
 String asJson;
 if (jsonTypeSetting.equals(JSON_TYPE_STANDARD)) {
-asJson = 
mapper.writerWithDefaultPrettyPrinter().writeValueAsString(document);
+asJson = getObjectWriter(mapper, 
prettyPrintSetting).writeValueAsString(document);
 } else {
 asJson = document.toJson(new JsonWriterSettings(true));
 }
@@ -206,6 +221,11 @@ public class GetMongo extends AbstractMongoProcessor {
 }
 }
 
+private ObjectWriter getObjectWriter(ObjectMapper mapper, String 
ppSetting) {
+return ppSetting.equals(YES_PP.getValue()) ? 
mapper.writerWithDefaultPrettyPrinter()
+ 

[06/51] [partial] nifi-minifi-cpp git commit: MINIFI-372: Replace leveldb with RocksDB

2017-10-09 Thread jeremydyer
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/48867732/thirdparty/rocksdb/db/write_thread.cc
--
diff --git a/thirdparty/rocksdb/db/write_thread.cc 
b/thirdparty/rocksdb/db/write_thread.cc
new file mode 100644
index 000..2d3b346
--- /dev/null
+++ b/thirdparty/rocksdb/db/write_thread.cc
@@ -0,0 +1,656 @@
+//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
+//  This source code is licensed under both the GPLv2 (found in the
+//  COPYING file in the root directory) and Apache 2.0 License
+//  (found in the LICENSE.Apache file in the root directory).
+
+#include "db/write_thread.h"
+#include 
+#include 
+#include "db/column_family.h"
+#include "port/port.h"
+#include "util/random.h"
+#include "util/sync_point.h"
+
+namespace rocksdb {
+
+WriteThread::WriteThread(const ImmutableDBOptions& db_options)
+: max_yield_usec_(db_options.enable_write_thread_adaptive_yield
+  ? db_options.write_thread_max_yield_usec
+  : 0),
+  slow_yield_usec_(db_options.write_thread_slow_yield_usec),
+  allow_concurrent_memtable_write_(
+  db_options.allow_concurrent_memtable_write),
+  enable_pipelined_write_(db_options.enable_pipelined_write),
+  newest_writer_(nullptr),
+  newest_memtable_writer_(nullptr),
+  last_sequence_(0) {}
+
+uint8_t WriteThread::BlockingAwaitState(Writer* w, uint8_t goal_mask) {
+  // We're going to block.  Lazily create the mutex.  We guarantee
+  // propagation of this construction to the waker via the
+  // STATE_LOCKED_WAITING state.  The waker won't try to touch the mutex
+  // or the condvar unless they CAS away the STATE_LOCKED_WAITING that
+  // we install below.
+  w->CreateMutex();
+
+  auto state = w->state.load(std::memory_order_acquire);
+  assert(state != STATE_LOCKED_WAITING);
+  if ((state & goal_mask) == 0 &&
+  w->state.compare_exchange_strong(state, STATE_LOCKED_WAITING)) {
+// we have permission (and an obligation) to use StateMutex
+std::unique_lock guard(w->StateMutex());
+w->StateCV().wait(guard, [w] {
+  return w->state.load(std::memory_order_relaxed) != STATE_LOCKED_WAITING;
+});
+state = w->state.load(std::memory_order_relaxed);
+  }
+  // else tricky.  Goal is met or CAS failed.  In the latter case the waker
+  // must have changed the state, and compare_exchange_strong has updated
+  // our local variable with the new one.  At the moment WriteThread never
+  // waits for a transition across intermediate states, so we know that
+  // since a state change has occurred the goal must have been met.
+  assert((state & goal_mask) != 0);
+  return state;
+}
+
+uint8_t WriteThread::AwaitState(Writer* w, uint8_t goal_mask,
+AdaptationContext* ctx) {
+  uint8_t state;
+
+  // 1. Busy loop using "pause" for 1 micro sec
+  // 2. Else SOMETIMES busy loop using "yield" for 100 micro sec (default)
+  // 3. Else blocking wait
+
+  // On a modern Xeon each loop takes about 7 nanoseconds (most of which
+  // is the effect of the pause instruction), so 200 iterations is a bit
+  // more than a microsecond.  This is long enough that waits longer than
+  // this can amortize the cost of accessing the clock and yielding.
+  for (uint32_t tries = 0; tries < 200; ++tries) {
+state = w->state.load(std::memory_order_acquire);
+if ((state & goal_mask) != 0) {
+  return state;
+}
+port::AsmVolatilePause();
+  }
+
+  // If we're only going to end up waiting a short period of time,
+  // it can be a lot more efficient to call std::this_thread::yield()
+  // in a loop than to block in StateMutex().  For reference, on my 4.0
+  // SELinux test server with support for syscall auditing enabled, the
+  // minimum latency between FUTEX_WAKE to returning from FUTEX_WAIT is
+  // 2.7 usec, and the average is more like 10 usec.  That can be a big
+  // drag on RockDB's single-writer design.  Of course, spinning is a
+  // bad idea if other threads are waiting to run or if we're going to
+  // wait for a long time.  How do we decide?
+  //
+  // We break waiting into 3 categories: short-uncontended,
+  // short-contended, and long.  If we had an oracle, then we would always
+  // spin for short-uncontended, always block for long, and our choice for
+  // short-contended might depend on whether we were trying to optimize
+  // RocksDB throughput or avoid being greedy with system resources.
+  //
+  // Bucketing into short or long is easy by measuring elapsed time.
+  // Differentiating short-uncontended from short-contended is a bit
+  // trickier, but not too bad.  We could look for involuntary context
+  // switches using getrusage(RUSAGE_THREAD, ..), but it's less work
+  // (portability code and CPU) to just look for yield calls that take
+  // longer than we expect.  sched_yield() doesn't actually result in any
+  // context switch overhead if there 

[15/51] [partial] nifi-minifi-cpp git commit: MINIFI-372: Replace leveldb with RocksDB

2017-10-09 Thread jeremydyer
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/48867732/thirdparty/rocksdb/db/flush_job.cc
--
diff --git a/thirdparty/rocksdb/db/flush_job.cc 
b/thirdparty/rocksdb/db/flush_job.cc
new file mode 100644
index 000..846edb4
--- /dev/null
+++ b/thirdparty/rocksdb/db/flush_job.cc
@@ -0,0 +1,359 @@
+//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
+//  This source code is licensed under both the GPLv2 (found in the
+//  COPYING file in the root directory) and Apache 2.0 License
+//  (found in the LICENSE.Apache file in the root directory).
+//
+// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include "db/flush_job.h"
+
+#ifndef __STDC_FORMAT_MACROS
+#define __STDC_FORMAT_MACROS
+#endif
+
+#include 
+
+#include 
+#include 
+
+#include "db/builder.h"
+#include "db/db_iter.h"
+#include "db/dbformat.h"
+#include "db/event_helpers.h"
+#include "db/log_reader.h"
+#include "db/log_writer.h"
+#include "db/memtable_list.h"
+#include "db/merge_context.h"
+#include "db/version_set.h"
+#include "monitoring/iostats_context_imp.h"
+#include "monitoring/perf_context_imp.h"
+#include "monitoring/thread_status_util.h"
+#include "port/likely.h"
+#include "port/port.h"
+#include "db/memtable.h"
+#include "rocksdb/db.h"
+#include "rocksdb/env.h"
+#include "rocksdb/statistics.h"
+#include "rocksdb/status.h"
+#include "rocksdb/table.h"
+#include "table/block.h"
+#include "table/block_based_table_factory.h"
+#include "table/merging_iterator.h"
+#include "table/table_builder.h"
+#include "table/two_level_iterator.h"
+#include "util/coding.h"
+#include "util/event_logger.h"
+#include "util/file_util.h"
+#include "util/filename.h"
+#include "util/log_buffer.h"
+#include "util/logging.h"
+#include "util/mutexlock.h"
+#include "util/stop_watch.h"
+#include "util/sync_point.h"
+
+namespace rocksdb {
+
+FlushJob::FlushJob(const std::string& dbname, ColumnFamilyData* cfd,
+   const ImmutableDBOptions& db_options,
+   const MutableCFOptions& mutable_cf_options,
+   const EnvOptions& env_options, VersionSet* versions,
+   InstrumentedMutex* db_mutex,
+   std::atomic* shutting_down,
+   std::vector existing_snapshots,
+   SequenceNumber earliest_write_conflict_snapshot,
+   JobContext* job_context, LogBuffer* log_buffer,
+   Directory* db_directory, Directory* output_file_directory,
+   CompressionType output_compression, Statistics* stats,
+   EventLogger* event_logger, bool measure_io_stats)
+: dbname_(dbname),
+  cfd_(cfd),
+  db_options_(db_options),
+  mutable_cf_options_(mutable_cf_options),
+  env_options_(env_options),
+  versions_(versions),
+  db_mutex_(db_mutex),
+  shutting_down_(shutting_down),
+  existing_snapshots_(std::move(existing_snapshots)),
+  earliest_write_conflict_snapshot_(earliest_write_conflict_snapshot),
+  job_context_(job_context),
+  log_buffer_(log_buffer),
+  db_directory_(db_directory),
+  output_file_directory_(output_file_directory),
+  output_compression_(output_compression),
+  stats_(stats),
+  event_logger_(event_logger),
+  measure_io_stats_(measure_io_stats),
+  pick_memtable_called(false) {
+  // Update the thread status to indicate flush.
+  ReportStartedFlush();
+  TEST_SYNC_POINT("FlushJob::FlushJob()");
+}
+
+FlushJob::~FlushJob() {
+  ThreadStatusUtil::ResetThreadStatus();
+}
+
+void FlushJob::ReportStartedFlush() {
+  ThreadStatusUtil::SetColumnFamily(cfd_, cfd_->ioptions()->env,
+db_options_.enable_thread_tracking);
+  ThreadStatusUtil::SetThreadOperation(ThreadStatus::OP_FLUSH);
+  ThreadStatusUtil::SetThreadOperationProperty(
+  ThreadStatus::COMPACTION_JOB_ID,
+  job_context_->job_id);
+  IOSTATS_RESET(bytes_written);
+}
+
+void FlushJob::ReportFlushInputSize(const autovector& mems) {
+  uint64_t input_size = 0;
+  for (auto* mem : mems) {
+input_size += mem->ApproximateMemoryUsage();
+  }
+  ThreadStatusUtil::IncreaseThreadOperationProperty(
+  ThreadStatus::FLUSH_BYTES_MEMTABLES,
+  input_size);
+}
+
+void FlushJob::RecordFlushIOStats() {
+  RecordTick(stats_, FLUSH_WRITE_BYTES, IOSTATS(bytes_written));
+  ThreadStatusUtil::IncreaseThreadOperationProperty(
+  ThreadStatus::FLUSH_BYTES_WRITTEN, IOSTATS(bytes_written));
+  IOSTATS_RESET(bytes_written);
+}
+
+void FlushJob::PickMemTable() {
+  db_mutex_->AssertHeld();
+  assert(!pick_memtable_called);
+  pick_memtable_called = true;
+  // Save the contents of the earliest memtable as a new Table
+  cfd_->imm()->PickMemtablesToFlush(_);
+  if 

[31/51] [partial] nifi-minifi-cpp git commit: MINIFI-372: Replace leveldb with RocksDB

2017-10-09 Thread jeremydyer
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/48867732/thirdparty/rocksdb/build_tools/make_package.sh
--
diff --git a/thirdparty/rocksdb/build_tools/make_package.sh 
b/thirdparty/rocksdb/build_tools/make_package.sh
new file mode 100755
index 000..58bac44
--- /dev/null
+++ b/thirdparty/rocksdb/build_tools/make_package.sh
@@ -0,0 +1,128 @@
+#/usr/bin/env bash
+
+set -e
+
+function log() {
+  echo "[+] $1"
+}
+
+function fatal() {
+  echo "[!] $1"
+  exit 1
+}
+
+function platform() {
+  local  __resultvar=$1
+  if [[ -f "/etc/yum.conf" ]]; then
+eval $__resultvar="centos"
+  elif [[ -f "/etc/dpkg/dpkg.cfg" ]]; then
+eval $__resultvar="ubuntu"
+  else
+fatal "Unknwon operating system"
+  fi
+}
+platform OS
+
+function package() {
+  if [[ $OS = "ubuntu" ]]; then
+if dpkg --get-selections | grep --quiet $1; then
+  log "$1 is already installed. skipping."
+else
+  apt-get install $@ -y
+fi
+  elif [[ $OS = "centos" ]]; then
+if rpm -qa | grep --quiet $1; then
+  log "$1 is already installed. skipping."
+else
+  yum install $@ -y
+fi
+  fi
+}
+
+function detect_fpm_output() {
+  if [[ $OS = "ubuntu" ]]; then
+export FPM_OUTPUT=deb
+  elif [[ $OS = "centos" ]]; then
+export FPM_OUTPUT=rpm
+  fi
+}
+detect_fpm_output
+
+function gem_install() {
+  if gem list | grep --quiet $1; then
+log "$1 is already installed. skipping."
+  else
+gem install $@
+  fi
+}
+
+function main() {
+  if [[ $# -ne 1 ]]; then
+fatal "Usage: $0 "
+  else
+log "using rocksdb version: $1"
+  fi
+
+  if [[ -d /vagrant ]]; then
+if [[ $OS = "ubuntu" ]]; then
+  package g++-4.8
+  export CXX=g++-4.8
+
+  # the deb would depend on libgflags2, but the static lib is the only 
thing
+  # installed by make install
+  package libgflags-dev
+
+  package ruby-all-dev
+elif [[ $OS = "centos" ]]; then
+  pushd /etc/yum.repos.d
+  if [[ ! -f /etc/yum.repos.d/devtools-1.1.repo ]]; then
+wget http://people.centos.org/tru/devtools-1.1/devtools-1.1.repo
+  fi
+  package devtoolset-1.1-gcc --enablerepo=testing-1.1-devtools-6
+  package devtoolset-1.1-gcc-c++ --enablerepo=testing-1.1-devtools-6
+  export CC=/opt/centos/devtoolset-1.1/root/usr/bin/gcc
+  export CPP=/opt/centos/devtoolset-1.1/root/usr/bin/cpp
+  export CXX=/opt/centos/devtoolset-1.1/root/usr/bin/c++
+  export PATH=$PATH:/opt/centos/devtoolset-1.1/root/usr/bin
+  popd
+  if ! rpm -qa | grep --quiet gflags; then
+rpm -i 
https://github.com/schuhschuh/gflags/releases/download/v2.1.0/gflags-devel-2.1.0-1.amd64.rpm
+  fi
+
+  package ruby
+  package ruby-devel
+  package rubygems
+  package rpm-build
+fi
+  fi
+  gem_install fpm
+
+  make static_lib
+  make install INSTALL_PATH=package
+
+  cd package
+
+  LIB_DIR=lib
+  if [[ -z "$ARCH" ]]; then
+  ARCH=$(getconf LONG_BIT)
+  fi
+  if [[ ("$FPM_OUTPUT" = "rpm") && ($ARCH -eq 64) ]]; then
+  mv lib lib64
+  LIB_DIR=lib64
+  fi
+
+  fpm \
+-s dir \
+-t $FPM_OUTPUT \
+-n rocksdb \
+-v $1 \
+--prefix /usr \
+--url http://rocksdb.org/ \
+-m rock...@fb.com \
+--license BSD \
+--vendor Facebook \
+--description "RocksDB is an embeddable persistent key-value store for 
fast storage." \
+include $LIB_DIR
+}
+
+main $@

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/48867732/thirdparty/rocksdb/build_tools/precommit_checker.py
--
diff --git a/thirdparty/rocksdb/build_tools/precommit_checker.py 
b/thirdparty/rocksdb/build_tools/precommit_checker.py
new file mode 100755
index 000..0f8884d
--- /dev/null
+++ b/thirdparty/rocksdb/build_tools/precommit_checker.py
@@ -0,0 +1,208 @@
+#!/usr/local/fbcode/gcc-4.9-glibc-2.20-fb/bin/python2.7
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+import argparse
+import commands
+import subprocess
+import sys
+import re
+import os
+import time
+
+
+#
+# Simple logger
+#
+
+class Log:
+
+def __init__(self, filename):
+self.filename = filename
+self.f = open(self.filename, 'w+', 0)
+
+def caption(self, str):
+line = "\n# %s #\n" % str
+if self.f:
+self.f.write("%s \n" % line)
+else:
+print(line)
+
+def error(self, str):
+data = "\n\n# ERROR # %s" % str
+if self.f:
+self.f.write("%s \n" % data)
+else:
+print(data)
+
+def log(self, str):
+if self.f:
+self.f.write("%s \n" % str)
+else:
+print(str)
+
+#
+# Shell Environment
+#
+
+
+class Env(object):
+
+def __init__(self, logfile, tests):
+self.tests = tests
+

[47/51] [partial] nifi-minifi-cpp git commit: MINIFI-372: Replace leveldb with RocksDB

2017-10-09 Thread jeremydyer
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/48867732/thirdparty/leveldb-1.18/db/corruption_test.cc
--
diff --git a/thirdparty/leveldb-1.18/db/corruption_test.cc 
b/thirdparty/leveldb-1.18/db/corruption_test.cc
deleted file mode 100755
index 52ab7e2..000
--- a/thirdparty/leveldb-1.18/db/corruption_test.cc
+++ /dev/null
@@ -1,388 +0,0 @@
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "leveldb/db.h"
-
-#include 
-#include 
-#include 
-#include 
-#include "leveldb/cache.h"
-#include "leveldb/env.h"
-#include "leveldb/table.h"
-#include "leveldb/write_batch.h"
-#include "db/db_impl.h"
-#include "db/filename.h"
-#include "db/log_format.h"
-#include "db/version_set.h"
-#include "util/logging.h"
-#include "util/testharness.h"
-#include "util/testutil.h"
-
-namespace leveldb {
-
-static const int kValueSize = 1000;
-
-class CorruptionTest {
- public:
-  test::ErrorEnv env_;
-  std::string dbname_;
-  Cache* tiny_cache_;
-  Options options_;
-  DB* db_;
-
-  CorruptionTest() {
-tiny_cache_ = NewLRUCache(100);
-options_.env = _;
-options_.block_cache = tiny_cache_;
-dbname_ = test::TmpDir() + "/db_test";
-DestroyDB(dbname_, options_);
-
-db_ = NULL;
-options_.create_if_missing = true;
-Reopen();
-options_.create_if_missing = false;
-  }
-
-  ~CorruptionTest() {
- delete db_;
- DestroyDB(dbname_, Options());
- delete tiny_cache_;
-  }
-
-  Status TryReopen() {
-delete db_;
-db_ = NULL;
-return DB::Open(options_, dbname_, _);
-  }
-
-  void Close() {
-  delete db_;
-  db_ = nullptr;
-  }
-
-  void Reopen() {
-ASSERT_OK(TryReopen());
-  }
-
-  void RepairDB() {
-delete db_;
-db_ = NULL;
-ASSERT_OK(::leveldb::RepairDB(dbname_, options_));
-  }
-
-  void Build(int n) {
-std::string key_space, value_space;
-WriteBatch batch;
-for (int i = 0; i < n; i++) {
-  //if ((i % 100) == 0) fprintf(stderr, "@ %d of %d\n", i, n);
-  Slice key = Key(i, _space);
-  batch.Clear();
-  batch.Put(key, Value(i, _space));
-  WriteOptions options;
-  // Corrupt() doesn't work without this sync on windows; stat reports 0 
for
-  // the file size.
-  if (i == n - 1) {
-options.sync = true;
-  }
-  ASSERT_OK(db_->Write(options, ));
-}
-  }
-
-  void Check(int min_expected, int max_expected) {
-int next_expected = 0;
-int missed = 0;
-int bad_keys = 0;
-int bad_values = 0;
-int correct = 0;
-std::string value_space;
-Iterator* iter = db_->NewIterator(ReadOptions());
-for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
-  uint64_t key;
-  Slice in(iter->key());
-  if (in == "" || in == "~") {
-// Ignore boundary keys.
-continue;
-  }
-  if (!ConsumeDecimalNumber(, ) ||
-  !in.empty() ||
-  key < next_expected) {
-bad_keys++;
-continue;
-  }
-  missed += static_cast(key - next_expected);
-  next_expected = static_cast(key + 1);
-  if (iter->value() != Value(static_cast(key), _space)) {
-bad_values++;
-  } else {
-correct++;
-  }
-}
-delete iter;
-
-fprintf(stderr,
-"expected=%d..%d; got=%d; bad_keys=%d; bad_values=%d; missed=%d\n",
-min_expected, max_expected, correct, bad_keys, bad_values, missed);
-ASSERT_LE(min_expected, correct);
-ASSERT_GE(max_expected, correct);
-  }
-
-  void Corrupt(FileType filetype, int offset, int bytes_to_corrupt) {
-// Pick file to corrupt
-std::vector filenames;
-ASSERT_OK(env_.GetChildren(dbname_, ));
-uint64_t number;
-FileType type;
-std::string fname;
-int picked_number = -1;
-for (size_t i = 0; i < filenames.size(); i++) {
-  if (ParseFileName(filenames[i], , ) &&
-  type == filetype &&
-  int(number) > picked_number) {  // Pick latest file
-fname = dbname_ + "/" + filenames[i];
-picked_number = static_cast(number);
-  }
-}
-ASSERT_TRUE(!fname.empty()) << filetype;
-
-struct stat sbuf;
-if (stat(fname.c_str(), ) != 0) {
-  const char* msg = strerror(errno);
-  ASSERT_TRUE(false) << fname << ": " << msg;
-}
-
-if (offset < 0) {
-  // Relative to end of file; make it absolute
-  if (-offset > sbuf.st_size) {
-offset = 0;
-  } else {
-offset = sbuf.st_size + offset;
-  }
-}
-if (offset > sbuf.st_size) {
-  offset = sbuf.st_size;
-}
-if (offset + bytes_to_corrupt > sbuf.st_size) {
-  bytes_to_corrupt = sbuf.st_size - offset;
-}
-
-// Do it
-std::string contents;
-Status s = ReadFileToString(Env::Default(), fname, );
-ASSERT_TRUE(s.ok()) << 

[39/51] [partial] nifi-minifi-cpp git commit: MINIFI-372: Replace leveldb with RocksDB

2017-10-09 Thread jeremydyer
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/48867732/thirdparty/leveldb-1.18/port/thread_annotations.h
--
diff --git a/thirdparty/leveldb-1.18/port/thread_annotations.h 
b/thirdparty/leveldb-1.18/port/thread_annotations.h
deleted file mode 100755
index aa2d1f5..000
--- a/thirdparty/leveldb-1.18/port/thread_annotations.h
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright (c) 2012 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#ifndef STORAGE_LEVELDB_PORT_THREAD_ANNOTATIONS_H_
-#define STORAGE_LEVELDB_PORT_THREAD_ANNOTATIONS_H_
-
-// Some environments provide custom macros to aid in static thread-safety
-// analysis.  Provide empty definitions of such macros unless they are already
-// defined.
-
-#ifndef EXCLUSIVE_LOCKS_REQUIRED
-#define EXCLUSIVE_LOCKS_REQUIRED(...)
-#endif
-
-#ifndef SHARED_LOCKS_REQUIRED
-#define SHARED_LOCKS_REQUIRED(...)
-#endif
-
-#ifndef LOCKS_EXCLUDED
-#define LOCKS_EXCLUDED(...)
-#endif
-
-#ifndef LOCK_RETURNED
-#define LOCK_RETURNED(x)
-#endif
-
-#ifndef LOCKABLE
-#define LOCKABLE
-#endif
-
-#ifndef SCOPED_LOCKABLE
-#define SCOPED_LOCKABLE
-#endif
-
-#ifndef EXCLUSIVE_LOCK_FUNCTION
-#define EXCLUSIVE_LOCK_FUNCTION(...)
-#endif
-
-#ifndef SHARED_LOCK_FUNCTION
-#define SHARED_LOCK_FUNCTION(...)
-#endif
-
-#ifndef EXCLUSIVE_TRYLOCK_FUNCTION
-#define EXCLUSIVE_TRYLOCK_FUNCTION(...)
-#endif
-
-#ifndef SHARED_TRYLOCK_FUNCTION
-#define SHARED_TRYLOCK_FUNCTION(...)
-#endif
-
-#ifndef UNLOCK_FUNCTION
-#define UNLOCK_FUNCTION(...)
-#endif
-
-#ifndef NO_THREAD_SAFETY_ANALYSIS
-#define NO_THREAD_SAFETY_ANALYSIS
-#endif
-
-#endif  // STORAGE_LEVELDB_PORT_THREAD_ANNOTATIONS_H

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/48867732/thirdparty/leveldb-1.18/port/win/stdint.h
--
diff --git a/thirdparty/leveldb-1.18/port/win/stdint.h 
b/thirdparty/leveldb-1.18/port/win/stdint.h
deleted file mode 100755
index 39edd0d..000
--- a/thirdparty/leveldb-1.18/port/win/stdint.h
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-// MSVC didn't ship with this file until the 2010 version.
-
-#ifndef STORAGE_LEVELDB_PORT_WIN_STDINT_H_
-#define STORAGE_LEVELDB_PORT_WIN_STDINT_H_
-
-#if !defined(_MSC_VER)
-#error This file should only be included when compiling with MSVC.
-#endif
-
-// Define C99 equivalent types.
-typedef signed char   int8_t;
-typedef signed short  int16_t;
-typedef signed intint32_t;
-typedef signed long long  int64_t;
-typedef unsigned char uint8_t;
-typedef unsigned shortuint16_t;
-typedef unsigned int  uint32_t;
-typedef unsigned long longuint64_t;
-
-#endif  // STORAGE_LEVELDB_PORT_WIN_STDINT_H_

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/48867732/thirdparty/leveldb-1.18/table/block.cc
--
diff --git a/thirdparty/leveldb-1.18/table/block.cc 
b/thirdparty/leveldb-1.18/table/block.cc
deleted file mode 100755
index 1ead640..000
--- a/thirdparty/leveldb-1.18/table/block.cc
+++ /dev/null
@@ -1,268 +0,0 @@
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-//
-// Decodes the blocks generated by block_builder.cc.
-
-#include "table/block.h"
-
-#include 
-#include 
-#include "leveldb/comparator.h"
-#include "table/format.h"
-#include "util/coding.h"
-#include "util/logging.h"
-
-namespace leveldb {
-
-inline uint32_t Block::NumRestarts() const {
-  assert(size_ >= sizeof(uint32_t));
-  return DecodeFixed32(data_ + size_ - sizeof(uint32_t));
-}
-
-Block::Block(const BlockContents& contents)
-: data_(contents.data.data()),
-  size_(contents.data.size()),
-  owned_(contents.heap_allocated) {
-  if (size_ < sizeof(uint32_t)) {
-size_ = 0;  // Error marker
-  } else {
-size_t max_restarts_allowed = (size_-sizeof(uint32_t)) / sizeof(uint32_t);
-if (NumRestarts() > max_restarts_allowed) {
-  // The size is too small for NumRestarts()
-  size_ = 0;
-} else {
-  restart_offset_ = static_cast(size_ - (1 + NumRestarts()) * 
sizeof(uint32_t));
-}
-  }
-}
-
-Block::~Block() {
-  if (owned_) {
-delete[] data_;
-  }
-}
-
-// Helper routine: decode the next block entry starting at "p",
-// storing the number of shared key bytes, non_shared key bytes,
-// and the length of the value in "*shared", "*non_shared", and
-// "*value_length", 

[20/51] [partial] nifi-minifi-cpp git commit: MINIFI-372: Replace leveldb with RocksDB

2017-10-09 Thread jeremydyer
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/48867732/thirdparty/rocksdb/db/db_impl_files.cc
--
diff --git a/thirdparty/rocksdb/db/db_impl_files.cc 
b/thirdparty/rocksdb/db/db_impl_files.cc
new file mode 100644
index 000..e44e423
--- /dev/null
+++ b/thirdparty/rocksdb/db/db_impl_files.cc
@@ -0,0 +1,548 @@
+//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
+//  This source code is licensed under both the GPLv2 (found in the
+//  COPYING file in the root directory) and Apache 2.0 License
+//  (found in the LICENSE.Apache file in the root directory).
+//
+// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+#include "db/db_impl.h"
+
+#ifndef __STDC_FORMAT_MACROS
+#define __STDC_FORMAT_MACROS
+#endif
+#include 
+#include "db/event_helpers.h"
+#include "util/file_util.h"
+#include "util/sst_file_manager_impl.h"
+
+
+namespace rocksdb {
+uint64_t DBImpl::FindMinPrepLogReferencedByMemTable() {
+  if (!allow_2pc()) {
+return 0;
+  }
+
+  uint64_t min_log = 0;
+
+  // we must look through the memtables for two phase transactions
+  // that have been committed but not yet flushed
+  for (auto loop_cfd : *versions_->GetColumnFamilySet()) {
+if (loop_cfd->IsDropped()) {
+  continue;
+}
+
+auto log = loop_cfd->imm()->GetMinLogContainingPrepSection();
+
+if (log > 0 && (min_log == 0 || log < min_log)) {
+  min_log = log;
+}
+
+log = loop_cfd->mem()->GetMinLogContainingPrepSection();
+
+if (log > 0 && (min_log == 0 || log < min_log)) {
+  min_log = log;
+}
+  }
+
+  return min_log;
+}
+
+void DBImpl::MarkLogAsHavingPrepSectionFlushed(uint64_t log) {
+  assert(log != 0);
+  std::lock_guard lock(prep_heap_mutex_);
+  auto it = prepared_section_completed_.find(log);
+  assert(it != prepared_section_completed_.end());
+  it->second += 1;
+}
+
+void DBImpl::MarkLogAsContainingPrepSection(uint64_t log) {
+  assert(log != 0);
+  std::lock_guard lock(prep_heap_mutex_);
+  min_log_with_prep_.push(log);
+  auto it = prepared_section_completed_.find(log);
+  if (it == prepared_section_completed_.end()) {
+prepared_section_completed_[log] = 0;
+  }
+}
+
+uint64_t DBImpl::FindMinLogContainingOutstandingPrep() {
+
+  if (!allow_2pc()) {
+return 0;
+  }
+
+  std::lock_guard lock(prep_heap_mutex_);
+  uint64_t min_log = 0;
+
+  // first we look in the prepared heap where we keep
+  // track of transactions that have been prepared (written to WAL)
+  // but not yet committed.
+  while (!min_log_with_prep_.empty()) {
+min_log = min_log_with_prep_.top();
+
+auto it = prepared_section_completed_.find(min_log);
+
+// value was marked as 'deleted' from heap
+if (it != prepared_section_completed_.end() && it->second > 0) {
+  it->second -= 1;
+  min_log_with_prep_.pop();
+
+  // back to squere one...
+  min_log = 0;
+  continue;
+} else {
+  // found a valid value
+  break;
+}
+  }
+
+  return min_log;
+}
+
+uint64_t DBImpl::MinLogNumberToKeep() {
+  uint64_t log_number = versions_->MinLogNumber();
+
+  if (allow_2pc()) {
+// if are 2pc we must consider logs containing prepared
+// sections of outstanding transactions.
+//
+// We must check min logs with outstanding prep before we check
+// logs referneces by memtables because a log referenced by the
+// first data structure could transition to the second under us.
+//
+// TODO(horuff): iterating over all column families under db mutex.
+// should find more optimial solution
+auto min_log_in_prep_heap = FindMinLogContainingOutstandingPrep();
+
+if (min_log_in_prep_heap != 0 && min_log_in_prep_heap < log_number) {
+  log_number = min_log_in_prep_heap;
+}
+
+auto min_log_refed_by_mem = FindMinPrepLogReferencedByMemTable();
+
+if (min_log_refed_by_mem != 0 && min_log_refed_by_mem < log_number) {
+  log_number = min_log_refed_by_mem;
+}
+  }
+  return log_number;
+}
+
+// * Returns the list of live files in 'sst_live'
+// If it's doing full scan:
+// * Returns the list of all files in the filesystem in
+// 'full_scan_candidate_files'.
+// Otherwise, gets obsolete files from VersionSet.
+// no_full_scan = true -- never do the full scan using GetChildren()
+// force = false -- don't force the full scan, except every
+//  mutable_db_options_.delete_obsolete_files_period_micros
+// force = true -- force the full scan
+void DBImpl::FindObsoleteFiles(JobContext* job_context, bool force,
+   bool no_full_scan) {
+  mutex_.AssertHeld();
+
+  // if deletion is disabled, do nothing
+  if (disable_delete_obsolete_files_ > 0) {
+return;
+  }
+
+  bool doing_the_full_scan = false;
+
+  // logic for figurint out if we're doing the full 

[21/51] [partial] nifi-minifi-cpp git commit: MINIFI-372: Replace leveldb with RocksDB

2017-10-09 Thread jeremydyer
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/48867732/thirdparty/rocksdb/db/db_impl_compaction_flush.cc
--
diff --git a/thirdparty/rocksdb/db/db_impl_compaction_flush.cc 
b/thirdparty/rocksdb/db/db_impl_compaction_flush.cc
new file mode 100644
index 000..3e686fe
--- /dev/null
+++ b/thirdparty/rocksdb/db/db_impl_compaction_flush.cc
@@ -0,0 +1,1910 @@
+//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
+//  This source code is licensed under both the GPLv2 (found in the
+//  COPYING file in the root directory) and Apache 2.0 License
+//  (found in the LICENSE.Apache file in the root directory).
+//
+// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+#include "db/db_impl.h"
+
+#ifndef __STDC_FORMAT_MACROS
+#define __STDC_FORMAT_MACROS
+#endif
+#include 
+
+#include "db/builder.h"
+#include "db/event_helpers.h"
+#include "monitoring/iostats_context_imp.h"
+#include "monitoring/perf_context_imp.h"
+#include "monitoring/thread_status_updater.h"
+#include "monitoring/thread_status_util.h"
+#include "util/sst_file_manager_impl.h"
+#include "util/sync_point.h"
+
+namespace rocksdb {
+Status DBImpl::SyncClosedLogs(JobContext* job_context) {
+  TEST_SYNC_POINT("DBImpl::SyncClosedLogs:Start");
+  mutex_.AssertHeld();
+  autovector logs_to_sync;
+  uint64_t current_log_number = logfile_number_;
+  while (logs_.front().number < current_log_number &&
+ logs_.front().getting_synced) {
+log_sync_cv_.Wait();
+  }
+  for (auto it = logs_.begin();
+   it != logs_.end() && it->number < current_log_number; ++it) {
+auto& log = *it;
+assert(!log.getting_synced);
+log.getting_synced = true;
+logs_to_sync.push_back(log.writer);
+  }
+
+  Status s;
+  if (!logs_to_sync.empty()) {
+mutex_.Unlock();
+
+for (log::Writer* log : logs_to_sync) {
+  ROCKS_LOG_INFO(immutable_db_options_.info_log,
+ "[JOB %d] Syncing log #%" PRIu64, job_context->job_id,
+ log->get_log_number());
+  s = log->file()->Sync(immutable_db_options_.use_fsync);
+}
+if (s.ok()) {
+  s = directories_.GetWalDir()->Fsync();
+}
+
+mutex_.Lock();
+
+// "number <= current_log_number - 1" is equivalent to
+// "number < current_log_number".
+MarkLogsSynced(current_log_number - 1, true, s);
+if (!s.ok()) {
+  Status new_bg_error = s;
+  // may temporarily unlock and lock the mutex.
+  EventHelpers::NotifyOnBackgroundError(immutable_db_options_.listeners,
+BackgroundErrorReason::kFlush,
+_bg_error, _);
+  if (!new_bg_error.ok()) {
+bg_error_ = new_bg_error;
+  }
+  TEST_SYNC_POINT("DBImpl::SyncClosedLogs:Failed");
+  return s;
+}
+  }
+  return s;
+}
+
+Status DBImpl::FlushMemTableToOutputFile(
+ColumnFamilyData* cfd, const MutableCFOptions& mutable_cf_options,
+bool* made_progress, JobContext* job_context, LogBuffer* log_buffer) {
+  mutex_.AssertHeld();
+  assert(cfd->imm()->NumNotFlushed() != 0);
+  assert(cfd->imm()->IsFlushPending());
+
+  SequenceNumber earliest_write_conflict_snapshot;
+  std::vector snapshot_seqs =
+  snapshots_.GetAll(_write_conflict_snapshot);
+
+  FlushJob flush_job(
+  dbname_, cfd, immutable_db_options_, mutable_cf_options, env_options_,
+  versions_.get(), _, _down_, snapshot_seqs,
+  earliest_write_conflict_snapshot, job_context, log_buffer,
+  directories_.GetDbDir(), directories_.GetDataDir(0U),
+  GetCompressionFlush(*cfd->ioptions(), mutable_cf_options), stats_,
+  _logger_, mutable_cf_options.report_bg_io_stats);
+
+  FileMetaData file_meta;
+
+  flush_job.PickMemTable();
+
+#ifndef ROCKSDB_LITE
+  // may temporarily unlock and lock the mutex.
+  NotifyOnFlushBegin(cfd, _meta, mutable_cf_options, job_context->job_id,
+ flush_job.GetTableProperties());
+#endif  // ROCKSDB_LITE
+
+  Status s;
+  if (logfile_number_ > 0 &&
+  versions_->GetColumnFamilySet()->NumberOfColumnFamilies() > 0) {
+// If there are more than one column families, we need to make sure that
+// all the log files except the most recent one are synced. Otherwise if
+// the host crashes after flushing and before WAL is persistent, the
+// flushed SST may contain data from write batches whose updates to
+// other column families are missing.
+// SyncClosedLogs() may unlock and re-lock the db_mutex.
+s = SyncClosedLogs(job_context);
+  }
+
+  // Within flush_job.Run, rocksdb may call event listener to notify
+  // file creation and deletion.
+  //
+  // Note that flush_job.Run will unlock and lock the db_mutex,
+  // and EventListener callback will be 

[10/51] [partial] nifi-minifi-cpp git commit: MINIFI-372: Replace leveldb with RocksDB

2017-10-09 Thread jeremydyer
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/48867732/thirdparty/rocksdb/db/table_properties_collector.cc
--
diff --git a/thirdparty/rocksdb/db/table_properties_collector.cc 
b/thirdparty/rocksdb/db/table_properties_collector.cc
new file mode 100644
index 000..a1f4dba
--- /dev/null
+++ b/thirdparty/rocksdb/db/table_properties_collector.cc
@@ -0,0 +1,134 @@
+//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
+//  This source code is licensed under both the GPLv2 (found in the
+//  COPYING file in the root directory) and Apache 2.0 License
+//  (found in the LICENSE.Apache file in the root directory).
+
+#include "db/table_properties_collector.h"
+
+#include "db/dbformat.h"
+#include "util/coding.h"
+#include "util/string_util.h"
+
+namespace rocksdb {
+
+Status InternalKeyPropertiesCollector::InternalAdd(const Slice& key,
+   const Slice& value,
+   uint64_t file_size) {
+  ParsedInternalKey ikey;
+  if (!ParseInternalKey(key, )) {
+return Status::InvalidArgument("Invalid internal key");
+  }
+
+  // Note: We count both, deletions and single deletions here.
+  if (ikey.type == ValueType::kTypeDeletion ||
+  ikey.type == ValueType::kTypeSingleDeletion) {
+++deleted_keys_;
+  } else if (ikey.type == ValueType::kTypeMerge) {
+++merge_operands_;
+  }
+
+  return Status::OK();
+}
+
+Status InternalKeyPropertiesCollector::Finish(
+UserCollectedProperties* properties) {
+  assert(properties);
+  assert(properties->find(
+InternalKeyTablePropertiesNames::kDeletedKeys) == properties->end());
+  assert(properties->find(InternalKeyTablePropertiesNames::kMergeOperands) ==
+ properties->end());
+
+  std::string val_deleted_keys;
+  PutVarint64(_deleted_keys, deleted_keys_);
+  properties->insert(
+  {InternalKeyTablePropertiesNames::kDeletedKeys, val_deleted_keys});
+
+  std::string val_merge_operands;
+  PutVarint64(_merge_operands, merge_operands_);
+  properties->insert(
+  {InternalKeyTablePropertiesNames::kMergeOperands, val_merge_operands});
+
+  return Status::OK();
+}
+
+UserCollectedProperties
+InternalKeyPropertiesCollector::GetReadableProperties() const {
+  return {{"kDeletedKeys", ToString(deleted_keys_)},
+  {"kMergeOperands", ToString(merge_operands_)}};
+}
+
+namespace {
+
+EntryType GetEntryType(ValueType value_type) {
+  switch (value_type) {
+case kTypeValue:
+  return kEntryPut;
+case kTypeDeletion:
+  return kEntryDelete;
+case kTypeSingleDeletion:
+  return kEntrySingleDelete;
+case kTypeMerge:
+  return kEntryMerge;
+default:
+  return kEntryOther;
+  }
+}
+
+uint64_t GetUint64Property(const UserCollectedProperties& props,
+   const std::string property_name,
+   bool* property_present) {
+  auto pos = props.find(property_name);
+  if (pos == props.end()) {
+*property_present = false;
+return 0;
+  }
+  Slice raw = pos->second;
+  uint64_t val = 0;
+  *property_present = true;
+  return GetVarint64(, ) ? val : 0;
+}
+
+}  // namespace
+
+Status UserKeyTablePropertiesCollector::InternalAdd(const Slice& key,
+const Slice& value,
+uint64_t file_size) {
+  ParsedInternalKey ikey;
+  if (!ParseInternalKey(key, )) {
+return Status::InvalidArgument("Invalid internal key");
+  }
+
+  return collector_->AddUserKey(ikey.user_key, value, GetEntryType(ikey.type),
+ikey.sequence, file_size);
+}
+
+Status UserKeyTablePropertiesCollector::Finish(
+UserCollectedProperties* properties) {
+  return collector_->Finish(properties);
+}
+
+UserCollectedProperties
+UserKeyTablePropertiesCollector::GetReadableProperties() const {
+  return collector_->GetReadableProperties();
+}
+
+
+const std::string InternalKeyTablePropertiesNames::kDeletedKeys
+  = "rocksdb.deleted.keys";
+const std::string InternalKeyTablePropertiesNames::kMergeOperands =
+"rocksdb.merge.operands";
+
+uint64_t GetDeletedKeys(
+const UserCollectedProperties& props) {
+  bool property_present_ignored;
+  return GetUint64Property(props, 
InternalKeyTablePropertiesNames::kDeletedKeys,
+   _present_ignored);
+}
+
+uint64_t GetMergeOperands(const UserCollectedProperties& props,
+  bool* property_present) {
+  return GetUint64Property(
+  props, InternalKeyTablePropertiesNames::kMergeOperands, 
property_present);
+}
+
+}  // namespace rocksdb

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/48867732/thirdparty/rocksdb/db/table_properties_collector.h
--
diff --git a/thirdparty/rocksdb/db/table_properties_collector.h 

[43/51] [partial] nifi-minifi-cpp git commit: MINIFI-372: Replace leveldb with RocksDB

2017-10-09 Thread jeremydyer
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/48867732/thirdparty/leveldb-1.18/db/version_set.cc
--
diff --git a/thirdparty/leveldb-1.18/db/version_set.cc 
b/thirdparty/leveldb-1.18/db/version_set.cc
deleted file mode 100755
index c59aef1..000
--- a/thirdparty/leveldb-1.18/db/version_set.cc
+++ /dev/null
@@ -1,1484 +0,0 @@
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "db/version_set.h"
-
-#include 
-#include 
-#include "db/filename.h"
-#include "db/log_reader.h"
-#include "db/log_writer.h"
-#include "db/memtable.h"
-#include "db/table_cache.h"
-#include "leveldb/env.h"
-#include "leveldb/table_builder.h"
-#include "table/merger.h"
-#include "table/two_level_iterator.h"
-#include "util/coding.h"
-#include "util/logging.h"
-
-namespace leveldb {
-
-static const int kTargetFileSize = 2 * 1048576;
-
-// Maximum bytes of overlaps in grandparent (i.e., level+2) before we
-// stop building a single file in a level->level+1 compaction.
-static const int64_t kMaxGrandParentOverlapBytes = 10 * kTargetFileSize;
-
-// Maximum number of bytes in all compacted files.  We avoid expanding
-// the lower level file set of a compaction if it would make the
-// total compaction cover more than this many bytes.
-static const int64_t kExpandedCompactionByteSizeLimit = 25 * kTargetFileSize;
-
-static double MaxBytesForLevel(int level) {
-  // Note: the result for level zero is not really used since we set
-  // the level-0 compaction threshold based on number of files.
-  double result = 10 * 1048576.0;  // Result for both level-0 and level-1
-  while (level > 1) {
-result *= 10;
-level--;
-  }
-  return result;
-}
-
-static uint64_t MaxFileSizeForLevel(int level) {
-  return kTargetFileSize;  // We could vary per level to reduce number of 
files?
-}
-
-static int64_t TotalFileSize(const std::vector& files) {
-  int64_t sum = 0;
-  for (size_t i = 0; i < files.size(); i++) {
-sum += files[i]->file_size;
-  }
-  return sum;
-}
-
-Version::~Version() {
-  assert(refs_ == 0);
-
-  // Remove from linked list
-  prev_->next_ = next_;
-  next_->prev_ = prev_;
-
-  // Drop references to files
-  for (int level = 0; level < config::kNumLevels; level++) {
-for (size_t i = 0; i < files_[level].size(); i++) {
-  FileMetaData* f = files_[level][i];
-  assert(f->refs > 0);
-  f->refs--;
-  if (f->refs <= 0) {
-delete f;
-  }
-}
-  }
-}
-
-size_t FindFile(const InternalKeyComparator& icmp,
- const std::vector& files,
- const Slice& key) {
-  size_t left = 0;
-  size_t right = files.size();
-  while (left < right) {
-size_t mid = (left + right) / 2;
-const FileMetaData* f = files[mid];
-if (icmp.InternalKeyComparator::Compare(f->largest.Encode(), key) < 0) {
-  // Key at "mid.largest" is < "target".  Therefore all
-  // files at or before "mid" are uninteresting.
-  left = mid + 1;
-} else {
-  // Key at "mid.largest" is >= "target".  Therefore all files
-  // after "mid" are uninteresting.
-  right = mid;
-}
-  }
-  return right;
-}
-
-static bool AfterFile(const Comparator* ucmp,
-  const Slice* user_key, const FileMetaData* f) {
-  // NULL user_key occurs before all keys and is therefore never after *f
-  return (user_key != NULL &&
-  ucmp->Compare(*user_key, f->largest.user_key()) > 0);
-}
-
-static bool BeforeFile(const Comparator* ucmp,
-   const Slice* user_key, const FileMetaData* f) {
-  // NULL user_key occurs after all keys and is therefore never before *f
-  return (user_key != NULL &&
-  ucmp->Compare(*user_key, f->smallest.user_key()) < 0);
-}
-
-bool SomeFileOverlapsRange(
-const InternalKeyComparator& icmp,
-bool disjoint_sorted_files,
-const std::vector& files,
-const Slice* smallest_user_key,
-const Slice* largest_user_key) {
-  const Comparator* ucmp = icmp.user_comparator();
-  if (!disjoint_sorted_files) {
-// Need to check against all files
-for (size_t i = 0; i < files.size(); i++) {
-  const FileMetaData* f = files[i];
-  if (AfterFile(ucmp, smallest_user_key, f) ||
-  BeforeFile(ucmp, largest_user_key, f)) {
-// No overlap
-  } else {
-return true;  // Overlap
-  }
-}
-return false;
-  }
-
-  // Binary search over file list
-  size_t index = 0;
-  if (smallest_user_key != NULL) {
-// Find the earliest possible internal key for smallest_user_key
-InternalKey small(*smallest_user_key, 
kMaxSequenceNumber,kValueTypeForSeek);
-index = FindFile(icmp, files, small.Encode());
-  }
-
-  if (index >= files.size()) {
-// beginning of range is after all 

[33/51] [partial] nifi-minifi-cpp git commit: MINIFI-372: Replace leveldb with RocksDB

2017-10-09 Thread jeremydyer
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/48867732/thirdparty/rocksdb/build_tools/build_detect_platform
--
diff --git a/thirdparty/rocksdb/build_tools/build_detect_platform 
b/thirdparty/rocksdb/build_tools/build_detect_platform
new file mode 100755
index 000..c7ddb7c
--- /dev/null
+++ b/thirdparty/rocksdb/build_tools/build_detect_platform
@@ -0,0 +1,532 @@
+#!/bin/sh
+#
+# Detects OS we're compiling on and outputs a file specified by the first
+# argument, which in turn gets read while processing Makefile.
+#
+# The output will set the following variables:
+#   CC  C Compiler path
+#   CXX C++ Compiler path
+#   PLATFORM_LDFLAGSLinker flags
+#   JAVA_LDFLAGSLinker flags for RocksDBJava
+#   JAVA_STATIC_LDFLAGS Linker flags for RocksDBJava static build
+#   PLATFORM_SHARED_EXT Extension for shared libraries
+#   PLATFORM_SHARED_LDFLAGS Flags for building shared library
+#   PLATFORM_SHARED_CFLAGS  Flags for compiling objects for shared library
+#   PLATFORM_CCFLAGSC compiler flags
+#   PLATFORM_CXXFLAGS   C++ compiler flags.  Will contain:
+#   PLATFORM_SHARED_VERSIONED   Set to 'true' if platform supports versioned
+#   shared libraries, empty otherwise.
+#
+# The PLATFORM_CCFLAGS and PLATFORM_CXXFLAGS might include the following:
+#
+#   -DROCKSDB_PLATFORM_POSIXif posix-platform based
+#   -DSNAPPYif the Snappy library is present
+#   -DLZ4   if the LZ4 library is present
+#   -DZSTD  if the ZSTD library is present
+#   -DNUMA  if the NUMA library is present
+#   -DTBB   if the TBB library is present
+#
+# Using gflags in rocksdb:
+# Our project depends on gflags, which requires users to take some extra steps
+# before they can compile the whole repository:
+#   1. Install gflags. You may download it from here:
+#  https://gflags.github.io/gflags/ (Mac users can `brew install gflags`)
+#   2. Once installed, add the include path for gflags to your CPATH env var 
and
+#  the lib path to LIBRARY_PATH. If installed with default settings, the 
lib
+#  will be /usr/local/lib and the include path will be /usr/local/include
+
+OUTPUT=$1
+if test -z "$OUTPUT"; then
+  echo "usage: $0 " >&2
+  exit 1
+fi
+
+# we depend on C++11
+PLATFORM_CXXFLAGS="-std=c++11"
+# we currently depend on POSIX platform
+COMMON_FLAGS="-DROCKSDB_PLATFORM_POSIX -DROCKSDB_LIB_IO_POSIX"
+
+# Default to fbcode gcc on internal fb machines
+if [ -z "$ROCKSDB_NO_FBCODE" -a -d /mnt/gvfs/third-party ]; then
+FBCODE_BUILD="true"
+# If we're compiling with TSAN we need pic build
+PIC_BUILD=$COMPILE_WITH_TSAN
+if [ -z "$ROCKSDB_FBCODE_BUILD_WITH_481" ]; then
+  source "$PWD/build_tools/fbcode_config.sh"
+else
+  # we need this to build with MySQL. Don't use for other purposes.
+  source "$PWD/build_tools/fbcode_config4.8.1.sh"
+fi
+fi
+
+# Delete existing output, if it exists
+rm -f "$OUTPUT"
+touch "$OUTPUT"
+
+if test -z "$CC"; then
+   CC=cc
+fi
+
+if test -z "$CXX"; then
+CXX=g++
+fi
+
+# Detect OS
+if test -z "$TARGET_OS"; then
+TARGET_OS=`uname -s`
+fi
+
+if test -z "$TARGET_ARCHITECTURE"; then
+TARGET_ARCHITECTURE=`uname -m`
+fi
+
+if test -z "$CLANG_SCAN_BUILD"; then
+CLANG_SCAN_BUILD=scan-build
+fi
+
+if test -z "$CLANG_ANALYZER"; then
+CLANG_ANALYZER=$(which clang++ 2> /dev/null)
+fi
+
+COMMON_FLAGS="$COMMON_FLAGS ${CFLAGS}"
+CROSS_COMPILE=
+PLATFORM_CCFLAGS=
+PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS"
+PLATFORM_SHARED_EXT="so"
+PLATFORM_SHARED_LDFLAGS="-Wl,--no-as-needed -shared -Wl,-soname -Wl,"
+PLATFORM_SHARED_CFLAGS="-fPIC"
+PLATFORM_SHARED_VERSIONED=true
+
+# generic port files (working on all platform by #ifdef) go directly in /port
+GENERIC_PORT_FILES=`cd "$ROCKSDB_ROOT"; find port -name '*.cc' | tr "\n" " "`
+
+# On GCC, we pick libc's memcmp over GCC's memcmp via -fno-builtin-memcmp
+case "$TARGET_OS" in
+Darwin)
+PLATFORM=OS_MACOSX
+COMMON_FLAGS="$COMMON_FLAGS -DOS_MACOSX"
+PLATFORM_SHARED_EXT=dylib
+PLATFORM_SHARED_LDFLAGS="-dynamiclib -install_name "
+# PORT_FILES=port/darwin/darwin_specific.cc
+;;
+IOS)
+PLATFORM=IOS
+COMMON_FLAGS="$COMMON_FLAGS -DOS_MACOSX -DIOS_CROSS_COMPILE 
-DROCKSDB_LITE"
+PLATFORM_SHARED_EXT=dylib
+PLATFORM_SHARED_LDFLAGS="-dynamiclib -install_name "
+CROSS_COMPILE=true
+PLATFORM_SHARED_VERSIONED=
+;;
+Linux)
+PLATFORM=OS_LINUX
+COMMON_FLAGS="$COMMON_FLAGS -DOS_LINUX"
+if [ -z "$USE_CLANG" ]; then
+COMMON_FLAGS="$COMMON_FLAGS -fno-builtin-memcmp"
+fi
+PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -lpthread -lrt"
+# 

[34/51] [partial] nifi-minifi-cpp git commit: MINIFI-372: Replace leveldb with RocksDB

2017-10-09 Thread jeremydyer
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/48867732/thirdparty/rocksdb/TARGETS
--
diff --git a/thirdparty/rocksdb/TARGETS b/thirdparty/rocksdb/TARGETS
new file mode 100644
index 000..3fac4a7
--- /dev/null
+++ b/thirdparty/rocksdb/TARGETS
@@ -0,0 +1,533 @@
+
+import os
+
+TARGETS_PATH = os.path.dirname(__file__)
+REPO_PATH = TARGETS_PATH[(TARGETS_PATH.find('fbcode/') + len('fbcode/')):] + 
"/"
+BUCK_BINS = "buck-out/gen/" + REPO_PATH
+TEST_RUNNER = REPO_PATH + "buckifier/rocks_test_runner.sh"
+rocksdb_compiler_flags = [
+  "-fno-builtin-memcmp",
+  "-DROCKSDB_PLATFORM_POSIX",
+  "-DROCKSDB_LIB_IO_POSIX",
+  "-DROCKSDB_FALLOCATE_PRESENT",
+  "-DROCKSDB_MALLOC_USABLE_SIZE",
+  "-DROCKSDB_RANGESYNC_PRESENT",
+  "-DROCKSDB_SCHED_GETCPU_PRESENT",
+  "-DROCKSDB_SUPPORT_THREAD_LOCAL",
+  "-DOS_LINUX",
+  "-DROCKSDB_UBSAN_RUN",
+  # Flags to enable libs we include
+  "-DSNAPPY",
+  "-DZLIB",
+  "-DBZIP2",
+  "-DLZ4",
+  "-DZSTD",
+  "-DGFLAGS=gflags",
+  "-DNUMA",
+  "-DTBB",
+  # Needed to compile in fbcode
+  "-Wno-expansion-to-defined",
+]
+
+rocksdb_external_deps = [
+  ('bzip2', None, 'bz2'),
+  ('snappy', None, "snappy"),
+  ('zlib', None, 'z'),
+  ('gflags', None, 'gflags'),
+  ('lz4', None, 'lz4'),
+  ('zstd', None),
+  ('tbb', None),
+  ("numa", None, "numa"),
+  ("googletest", None, "gtest"),
+]
+
+rocksdb_preprocessor_flags = [
+  # Directories with files for #include
+  "-I" + REPO_PATH + "include/",
+  "-I" + REPO_PATH,
+]
+
+rocksdb_arch_preprocessor_flags = {
+  "x86_64": ["-DHAVE_SSE42"],
+}
+
+cpp_library(
+name = "rocksdb_lib",
+headers = AutoHeaders.RECURSIVE_GLOB,
+srcs = [
+  "cache/clock_cache.cc",
+  "cache/lru_cache.cc",
+  "cache/sharded_cache.cc",
+  "db/builder.cc",
+  "db/c.cc",
+  "db/column_family.cc",
+  "db/compacted_db_impl.cc",
+  "db/compaction.cc",
+  "db/compaction_iterator.cc",
+  "db/compaction_job.cc",
+  "db/compaction_picker.cc",
+  "db/compaction_picker_universal.cc",
+  "db/convenience.cc",
+  "db/db_filesnapshot.cc",
+  "db/db_impl.cc",
+  "db/db_impl_write.cc",
+  "db/db_impl_compaction_flush.cc",
+  "db/db_impl_files.cc",
+  "db/db_impl_open.cc",
+  "db/db_impl_debug.cc",
+  "db/db_impl_experimental.cc",
+  "db/db_impl_readonly.cc",
+  "db/db_info_dumper.cc",
+  "db/db_iter.cc",
+  "db/dbformat.cc",
+  "db/event_helpers.cc",
+  "db/experimental.cc",
+  "db/external_sst_file_ingestion_job.cc",
+  "db/file_indexer.cc",
+  "db/flush_job.cc",
+  "db/flush_scheduler.cc",
+  "db/forward_iterator.cc",
+  "db/internal_stats.cc",
+  "db/log_reader.cc",
+  "db/log_writer.cc",
+  "db/malloc_stats.cc",
+  "db/managed_iterator.cc",
+  "db/memtable.cc",
+  "db/memtable_list.cc",
+  "db/merge_helper.cc",
+  "db/merge_operator.cc",
+  "db/range_del_aggregator.cc",
+  "db/repair.cc",
+  "db/snapshot_impl.cc",
+  "db/table_cache.cc",
+  "db/table_properties_collector.cc",
+  "db/transaction_log_impl.cc",
+  "db/version_builder.cc",
+  "db/version_edit.cc",
+  "db/version_set.cc",
+  "db/wal_manager.cc",
+  "db/write_batch.cc",
+  "db/write_batch_base.cc",
+  "db/write_controller.cc",
+  "db/write_thread.cc",
+  "env/env.cc",
+  "env/env_chroot.cc",
+  "env/env_encryption.cc",
+  "env/env_hdfs.cc",
+  "env/env_posix.cc",
+  "env/io_posix.cc",
+  "env/mock_env.cc",
+  "memtable/alloc_tracker.cc",
+  "memtable/hash_cuckoo_rep.cc",
+  "memtable/hash_linklist_rep.cc",
+  "memtable/hash_skiplist_rep.cc",
+  "memtable/skiplistrep.cc",
+  "memtable/vectorrep.cc",
+  "memtable/write_buffer_manager.cc",
+  "monitoring/histogram.cc",
+  "monitoring/histogram_windowing.cc",
+  "monitoring/instrumented_mutex.cc",
+  "monitoring/iostats_context.cc",
+  "monitoring/perf_context.cc",
+  "monitoring/perf_level.cc",
+  "monitoring/statistics.cc",
+  "monitoring/thread_status_impl.cc",
+  "monitoring/thread_status_updater.cc",
+  "monitoring/thread_status_updater_debug.cc",
+  "monitoring/thread_status_util.cc",
+  "monitoring/thread_status_util_debug.cc",
+  "options/cf_options.cc",
+  "options/db_options.cc",
+  "options/options.cc",
+  "options/options_helper.cc",
+  "options/options_parser.cc",
+  "options/options_sanity_check.cc",
+  "port/port_posix.cc",
+  "port/stack_trace.cc",
+  "table/adaptive_table_factory.cc",
+  "table/block.cc",
+  "table/block_based_filter_block.cc",
+  "table/block_based_table_builder.cc",
+  "table/block_based_table_factory.cc",
+  "table/block_based_table_reader.cc",
+  "table/block_builder.cc",
+  "table/block_prefix_index.cc",
+  "table/bloom_block.cc",
+  

[37/51] [partial] nifi-minifi-cpp git commit: MINIFI-372: Replace leveldb with RocksDB

2017-10-09 Thread jeremydyer
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/48867732/thirdparty/leveldb-1.18/util/crc32c.cc
--
diff --git a/thirdparty/leveldb-1.18/util/crc32c.cc 
b/thirdparty/leveldb-1.18/util/crc32c.cc
deleted file mode 100755
index 6db9e77..000
--- a/thirdparty/leveldb-1.18/util/crc32c.cc
+++ /dev/null
@@ -1,332 +0,0 @@
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-//
-// A portable implementation of crc32c, optimized to handle
-// four bytes at a time.
-
-#include "util/crc32c.h"
-
-#include 
-#include "util/coding.h"
-
-namespace leveldb {
-namespace crc32c {
-
-static const uint32_t table0_[256] = {
-  0x, 0xf26b8303, 0xe13b70f7, 0x1350f3f4,
-  0xc79a971f, 0x35f1141c, 0x26a1e7e8, 0xd4ca64eb,
-  0x8ad958cf, 0x78b2dbcc, 0x6be22838, 0x9989ab3b,
-  0x4d43cfd0, 0xbf284cd3, 0xac78bf27, 0x5e133c24,
-  0x105ec76f, 0xe235446c, 0xf165b798, 0x030e349b,
-  0xd7c45070, 0x25afd373, 0x36ff2087, 0xc494a384,
-  0x9a879fa0, 0x68ec1ca3, 0x7bbcef57, 0x89d76c54,
-  0x5d1d08bf, 0xaf768bbc, 0xbc267848, 0x4e4dfb4b,
-  0x20bd8ede, 0xd2d60ddd, 0xc186fe29, 0x33ed7d2a,
-  0xe72719c1, 0x154c9ac2, 0x061c6936, 0xf477ea35,
-  0xaa64d611, 0x580f5512, 0x4b5fa6e6, 0xb93425e5,
-  0x6dfe410e, 0x9f95c20d, 0x8cc531f9, 0x7eaeb2fa,
-  0x30e349b1, 0xc288cab2, 0xd1d83946, 0x23b3ba45,
-  0xf779deae, 0x05125dad, 0x1642ae59, 0xe4292d5a,
-  0xba3a117e, 0x4851927d, 0x5b016189, 0xa96ae28a,
-  0x7da08661, 0x8fcb0562, 0x9c9bf696, 0x6ef07595,
-  0x417b1dbc, 0xb3109ebf, 0xa0406d4b, 0x522bee48,
-  0x86e18aa3, 0x748a09a0, 0x67dafa54, 0x95b17957,
-  0xcba24573, 0x39c9c670, 0x2a993584, 0xd8f2b687,
-  0x0c38d26c, 0xfe53516f, 0xed03a29b, 0x1f682198,
-  0x5125dad3, 0xa34e59d0, 0xb01eaa24, 0x42752927,
-  0x96bf4dcc, 0x64d4cecf, 0x77843d3b, 0x85efbe38,
-  0xdbfc821c, 0x2997011f, 0x3ac7f2eb, 0xc8ac71e8,
-  0x1c661503, 0xee0d9600, 0xfd5d65f4, 0x0f36e6f7,
-  0x61c69362, 0x93ad1061, 0x80fde395, 0x72966096,
-  0xa65c047d, 0x5437877e, 0x4767748a, 0xb50cf789,
-  0xeb1fcbad, 0x197448ae, 0x0a24bb5a, 0xf84f3859,
-  0x2c855cb2, 0xdeeedfb1, 0xcdbe2c45, 0x3fd5af46,
-  0x7198540d, 0x83f3d70e, 0x90a324fa, 0x62c8a7f9,
-  0xb602c312, 0x44694011, 0x5739b3e5, 0xa55230e6,
-  0xfb410cc2, 0x092a8fc1, 0x1a7a7c35, 0xe811ff36,
-  0x3cdb9bdd, 0xceb018de, 0xdde0eb2a, 0x2f8b6829,
-  0x82f63b78, 0x709db87b, 0x63cd4b8f, 0x91a6c88c,
-  0x456cac67, 0xb7072f64, 0xa457dc90, 0x563c5f93,
-  0x082f63b7, 0xfa44e0b4, 0xe9141340, 0x1b7f9043,
-  0xcfb5f4a8, 0x3dde77ab, 0x2e8e845f, 0xdce5075c,
-  0x92a8fc17, 0x60c37f14, 0x73938ce0, 0x81f80fe3,
-  0x55326b08, 0xa759e80b, 0xb4091bff, 0x466298fc,
-  0x1871a4d8, 0xea1a27db, 0xf94ad42f, 0x0b21572c,
-  0xdfeb33c7, 0x2d80b0c4, 0x3ed04330, 0xccbbc033,
-  0xa24bb5a6, 0x502036a5, 0x4370c551, 0xb11b4652,
-  0x65d122b9, 0x97baa1ba, 0x84ea524e, 0x7681d14d,
-  0x2892ed69, 0xdaf96e6a, 0xc9a99d9e, 0x3bc21e9d,
-  0xef087a76, 0x1d63f975, 0x0e330a81, 0xfc588982,
-  0xb21572c9, 0x407ef1ca, 0x532e023e, 0xa145813d,
-  0x758fe5d6, 0x87e466d5, 0x94b49521, 0x66df1622,
-  0x38cc2a06, 0xcaa7a905, 0xd9f75af1, 0x2b9cd9f2,
-  0xff56bd19, 0x0d3d3e1a, 0x1e6dcdee, 0xec064eed,
-  0xc38d26c4, 0x31e6a5c7, 0x22b65633, 0xd0ddd530,
-  0x0417b1db, 0xf67c32d8, 0xe52cc12c, 0x1747422f,
-  0x49547e0b, 0xbb3ffd08, 0xa86f0efc, 0x5a048dff,
-  0x8ecee914, 0x7ca56a17, 0x6ff599e3, 0x9d9e1ae0,
-  0xd3d3e1ab, 0x21b862a8, 0x32e8915c, 0xc083125f,
-  0x144976b4, 0xe622f5b7, 0xf5720643, 0x07198540,
-  0x590ab964, 0xab613a67, 0xb831c993, 0x4a5a4a90,
-  0x9e902e7b, 0x6cfbad78, 0x7fab5e8c, 0x8dc0dd8f,
-  0xe330a81a, 0x115b2b19, 0x020bd8ed, 0xf0605bee,
-  0x24aa3f05, 0xd6c1bc06, 0xc5914ff2, 0x37faccf1,
-  0x69e9f0d5, 0x9b8273d6, 0x88d28022, 0x7ab90321,
-  0xae7367ca, 0x5c18e4c9, 0x4f48173d, 0xbd23943e,
-  0xf36e6f75, 0x0105ec76, 0x12551f82, 0xe03e9c81,
-  0x34f4f86a, 0xc69f7b69, 0xd5cf889d, 0x27a40b9e,
-  0x79b737ba, 0x8bdcb4b9, 0x988c474d, 0x6ae7c44e,
-  0xbe2da0a5, 0x4c4623a6, 0x5f16d052, 0xad7d5351
-};
-static const uint32_t table1_[256] = {
-  0x, 0x13a29877, 0x274530ee, 0x34e7a899,
-  0x4e8a61dc, 0x5d28f9ab, 0x69cf5132, 0x7a6dc945,
-  0x9d14c3b8, 0x8eb65bcf, 0xba51f356, 0xa9f36b21,
-  0xd39ea264, 0xc03c3a13, 0xf4db928a, 0xe7790afd,
-  0x3fc5f181, 0x2c6769f6, 0x1880c16f, 0x0b225918,
-  0x714f905d, 0x62ed082a, 0x560aa0b3, 0x45a838c4,
-  0xa2d13239, 0xb173aa4e, 0x859402d7, 0x96369aa0,
-  0xec5b53e5, 0xfff9cb92, 0xcb1e630b, 0xd8bcfb7c,
-  0x7f8be302, 0x6c297b75, 0x58ced3ec, 0x4b6c4b9b,
-  0x310182de, 0x22a31aa9, 0x1644b230, 0x05e62a47,
-  0xe29f20ba, 0xf13db8cd, 0xc5da1054, 0xd6788823,
-  0xac154166, 0xbfb7d911, 0x8b507188, 0x98f2e9ff,
-  0x404e1283, 0x53ec8af4, 0x670b226d, 0x74a9ba1a,
-  0x0ec4735f, 0x1d66eb28, 0x298143b1, 0x3a23dbc6,
-  0xdd5ad13b, 0xcef8494c, 0xfa1fe1d5, 0xe9bd79a2,
-  0x93d0b0e7, 0x80722890, 0xb4958009, 0xa737187e,
-  0xff17c604, 

[12/51] [partial] nifi-minifi-cpp git commit: MINIFI-372: Replace leveldb with RocksDB

2017-10-09 Thread jeremydyer
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/48867732/thirdparty/rocksdb/db/memtable.h
--
diff --git a/thirdparty/rocksdb/db/memtable.h b/thirdparty/rocksdb/db/memtable.h
new file mode 100644
index 000..fe9feaf
--- /dev/null
+++ b/thirdparty/rocksdb/db/memtable.h
@@ -0,0 +1,427 @@
+//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
+//  This source code is licensed under both the GPLv2 (found in the
+//  COPYING file in the root directory) and Apache 2.0 License
+//  (found in the LICENSE.Apache file in the root directory).
+//
+// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#pragma once
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include "db/dbformat.h"
+#include "db/range_del_aggregator.h"
+#include "db/version_edit.h"
+#include "monitoring/instrumented_mutex.h"
+#include "options/cf_options.h"
+#include "rocksdb/db.h"
+#include "rocksdb/env.h"
+#include "rocksdb/memtablerep.h"
+#include "util/allocator.h"
+#include "util/concurrent_arena.h"
+#include "util/dynamic_bloom.h"
+#include "util/hash.h"
+
+namespace rocksdb {
+
+class Mutex;
+class MemTableIterator;
+class MergeContext;
+class InternalIterator;
+
+struct MemTableOptions {
+  explicit MemTableOptions(
+  const ImmutableCFOptions& ioptions,
+  const MutableCFOptions& mutable_cf_options);
+  size_t write_buffer_size;
+  size_t arena_block_size;
+  uint32_t memtable_prefix_bloom_bits;
+  size_t memtable_huge_page_size;
+  bool inplace_update_support;
+  size_t inplace_update_num_locks;
+  UpdateStatus (*inplace_callback)(char* existing_value,
+   uint32_t* existing_value_size,
+   Slice delta_value,
+   std::string* merged_value);
+  size_t max_successive_merges;
+  Statistics* statistics;
+  MergeOperator* merge_operator;
+  Logger* info_log;
+};
+
+// Batched counters to updated when inserting keys in one write batch.
+// In post process of the write batch, these can be updated together.
+// Only used in concurrent memtable insert case.
+struct MemTablePostProcessInfo {
+  uint64_t data_size = 0;
+  uint64_t num_entries = 0;
+  uint64_t num_deletes = 0;
+};
+
+// Note:  Many of the methods in this class have comments indicating that
+// external synchromization is required as these methods are not thread-safe.
+// It is up to higher layers of code to decide how to prevent concurrent
+// invokation of these methods.  This is usually done by acquiring either
+// the db mutex or the single writer thread.
+//
+// Some of these methods are documented to only require external
+// synchronization if this memtable is immutable.  Calling MarkImmutable() is
+// not sufficient to guarantee immutability.  It is up to higher layers of
+// code to determine if this MemTable can still be modified by other threads.
+// Eg: The Superversion stores a pointer to the current MemTable (that can
+// be modified) and a separate list of the MemTables that can no longer be
+// written to (aka the 'immutable memtables').
+class MemTable {
+ public:
+  struct KeyComparator : public MemTableRep::KeyComparator {
+const InternalKeyComparator comparator;
+explicit KeyComparator(const InternalKeyComparator& c) : comparator(c) { }
+virtual int operator()(const char* prefix_len_key1,
+   const char* prefix_len_key2) const override;
+virtual int operator()(const char* prefix_len_key,
+   const Slice& key) const override;
+  };
+
+  // MemTables are reference counted.  The initial reference count
+  // is zero and the caller must call Ref() at least once.
+  //
+  // earliest_seq should be the current SequenceNumber in the db such that any
+  // key inserted into this memtable will have an equal or larger seq number.
+  // (When a db is first created, the earliest sequence number will be 0).
+  // If the earliest sequence number is not known, kMaxSequenceNumber may be
+  // used, but this may prevent some transactions from succeeding until the
+  // first key is inserted into the memtable.
+  explicit MemTable(const InternalKeyComparator& comparator,
+const ImmutableCFOptions& ioptions,
+const MutableCFOptions& mutable_cf_options,
+WriteBufferManager* write_buffer_manager,
+SequenceNumber earliest_seq, uint32_t column_family_id);
+
+  // Do not delete this MemTable unless Unref() indicates it not in use.
+  ~MemTable();
+
+  // Increase reference count.
+  // REQUIRES: external synchronization to prevent simultaneous
+  // operations on the same MemTable.
+  void Ref() { ++refs_; }
+
+  // Drop reference count.
+  // If the 

[40/51] [partial] nifi-minifi-cpp git commit: MINIFI-372: Replace leveldb with RocksDB

2017-10-09 Thread jeremydyer
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/48867732/thirdparty/leveldb-1.18/include/leveldb/filter_policy.h
--
diff --git a/thirdparty/leveldb-1.18/include/leveldb/filter_policy.h 
b/thirdparty/leveldb-1.18/include/leveldb/filter_policy.h
deleted file mode 100755
index 1fba080..000
--- a/thirdparty/leveldb-1.18/include/leveldb/filter_policy.h
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright (c) 2012 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-//
-// A database can be configured with a custom FilterPolicy object.
-// This object is responsible for creating a small filter from a set
-// of keys.  These filters are stored in leveldb and are consulted
-// automatically by leveldb to decide whether or not to read some
-// information from disk. In many cases, a filter can cut down the
-// number of disk seeks form a handful to a single disk seek per
-// DB::Get() call.
-//
-// Most people will want to use the builtin bloom filter support (see
-// NewBloomFilterPolicy() below).
-
-#ifndef STORAGE_LEVELDB_INCLUDE_FILTER_POLICY_H_
-#define STORAGE_LEVELDB_INCLUDE_FILTER_POLICY_H_
-
-#include 
-
-namespace leveldb {
-
-class Slice;
-
-class FilterPolicy {
- public:
-  virtual ~FilterPolicy();
-
-  // Return the name of this policy.  Note that if the filter encoding
-  // changes in an incompatible way, the name returned by this method
-  // must be changed.  Otherwise, old incompatible filters may be
-  // passed to methods of this type.
-  virtual const char* Name() const = 0;
-
-  // keys[0,n-1] contains a list of keys (potentially with duplicates)
-  // that are ordered according to the user supplied comparator.
-  // Append a filter that summarizes keys[0,n-1] to *dst.
-  //
-  // Warning: do not change the initial contents of *dst.  Instead,
-  // append the newly constructed filter to *dst.
-  virtual void CreateFilter(const Slice* keys, int n, std::string* dst)
-  const = 0;
-
-  // "filter" contains the data appended by a preceding call to
-  // CreateFilter() on this class.  This method must return true if
-  // the key was in the list of keys passed to CreateFilter().
-  // This method may return true or false if the key was not on the
-  // list, but it should aim to return false with a high probability.
-  virtual bool KeyMayMatch(const Slice& key, const Slice& filter) const = 0;
-};
-
-// Return a new filter policy that uses a bloom filter with approximately
-// the specified number of bits per key.  A good value for bits_per_key
-// is 10, which yields a filter with ~ 1% false positive rate.
-//
-// Callers must delete the result after any database that is using the
-// result has been closed.
-//
-// Note: if you are using a custom comparator that ignores some parts
-// of the keys being compared, you must not use NewBloomFilterPolicy()
-// and must provide your own FilterPolicy that also ignores the
-// corresponding parts of the keys.  For example, if the comparator
-// ignores trailing spaces, it would be incorrect to use a
-// FilterPolicy (like NewBloomFilterPolicy) that does not ignore
-// trailing spaces in keys.
-extern const FilterPolicy* NewBloomFilterPolicy(int bits_per_key);
-
-}
-
-#endif  // STORAGE_LEVELDB_INCLUDE_FILTER_POLICY_H_

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/48867732/thirdparty/leveldb-1.18/include/leveldb/iterator.h
--
diff --git a/thirdparty/leveldb-1.18/include/leveldb/iterator.h 
b/thirdparty/leveldb-1.18/include/leveldb/iterator.h
deleted file mode 100755
index 76aced0..000
--- a/thirdparty/leveldb-1.18/include/leveldb/iterator.h
+++ /dev/null
@@ -1,100 +0,0 @@
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-//
-// An iterator yields a sequence of key/value pairs from a source.
-// The following class defines the interface.  Multiple implementations
-// are provided by this library.  In particular, iterators are provided
-// to access the contents of a Table or a DB.
-//
-// Multiple threads can invoke const methods on an Iterator without
-// external synchronization, but if any of the threads may call a
-// non-const method, all threads accessing the same Iterator must use
-// external synchronization.
-
-#ifndef STORAGE_LEVELDB_INCLUDE_ITERATOR_H_
-#define STORAGE_LEVELDB_INCLUDE_ITERATOR_H_
-
-#include "leveldb/slice.h"
-#include "leveldb/status.h"
-
-namespace leveldb {
-
-class Iterator {
- public:
-  Iterator();
-  virtual ~Iterator();
-
-  // An iterator is either positioned at a key/value pair, or
-  // not valid.  This method returns true iff the iterator is 

[32/51] [partial] nifi-minifi-cpp git commit: MINIFI-372: Replace leveldb with RocksDB

2017-10-09 Thread jeremydyer
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/48867732/thirdparty/rocksdb/build_tools/gnu_parallel
--
diff --git a/thirdparty/rocksdb/build_tools/gnu_parallel 
b/thirdparty/rocksdb/build_tools/gnu_parallel
new file mode 100755
index 000..abbf8f1
--- /dev/null
+++ b/thirdparty/rocksdb/build_tools/gnu_parallel
@@ -0,0 +1,7936 @@
+#!/usr/bin/env perl
+
+# Copyright (C) 2007,2008,2009,2010,2011,2012,2013,2014 Ole Tange and
+# Free Software Foundation, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see 
+# or write to the Free Software Foundation, Inc., 51 Franklin St,
+# Fifth Floor, Boston, MA 02110-1301 USA
+
+# open3 used in Job::start
+use IPC::Open3;
+#  used in reaper
+use POSIX qw(:sys_wait_h setsid ceil :errno_h);
+# gensym used in Job::start
+use Symbol qw(gensym);
+# tempfile used in Job::start
+use File::Temp qw(tempfile tempdir);
+# mkpath used in openresultsfile
+use File::Path;
+# GetOptions used in get_options_from_array
+use Getopt::Long;
+# Used to ensure code quality
+use strict;
+use File::Basename;
+
+if(not $ENV{HOME}) {
+# $ENV{HOME} is sometimes not set if called from PHP
+::warning("\$HOME not set. Using /tmp\n");
+$ENV{HOME} = "/tmp";
+}
+
+save_stdin_stdout_stderr();
+save_original_signal_handler();
+parse_options();
+::debug("init", "Open file descriptors: ", join(" ",keys %Global::fd), "\n");
+my $number_of_args;
+if($Global::max_number_of_args) {
+$number_of_args=$Global::max_number_of_args;
+} elsif ($opt::X or $opt::m or $opt::xargs) {
+$number_of_args = undef;
+} else {
+$number_of_args = 1;
+}
+
+my @command;
+@command = @ARGV;
+
+my @fhlist;
+if($opt::pipepart) {
+@fhlist = map { open_or_exit($_) } "/dev/null";
+} else {
+@fhlist = map { open_or_exit($_) } @opt::a;
+if(not @fhlist and not $opt::pipe) {
+   @fhlist = (*STDIN);
+}
+}
+
+if($opt::skip_first_line) {
+# Skip the first line for the first file handle
+my $fh = $fhlist[0];
+<$fh>;
+}
+if($opt::header and not $opt::pipe) {
+my $fh = $fhlist[0];
+# split with colsep or \t
+# $header force $colsep = \t if undef?
+my $delimiter = $opt::colsep;
+$delimiter ||= "\$";
+my $id = 1;
+for my $fh (@fhlist) {
+   my $line = <$fh>;
+   chomp($line);
+   ::debug("init", "Delimiter: '$delimiter'");
+   for my $s (split /$delimiter/o, $line) {
+   ::debug("init", "Colname: '$s'");
+   # Replace {colname} with {2}
+   # TODO accept configurable short hands
+   # TODO how to deal with headers in {=...=}
+   for(@command) {
+ s:\{$s(|/|//|\.|/\.)\}:\{$id$1\}:g;
+   }
+   $Global::input_source_header{$id} = $s;
+   $id++;
+   }
+}
+} else {
+my $id = 1;
+for my $fh (@fhlist) {
+   $Global::input_source_header{$id} = $id;
+   $id++;
+}
+}
+
+if($opt::filter_hosts and (@opt::sshlogin or @opt::sshloginfile)) {
+# Parallel check all hosts are up. Remove hosts that are down
+filter_hosts();
+}
+
+if($opt::nonall or $opt::onall) {
+onall(@command);
+wait_and_exit(min(undef_as_zero($Global::exitstatus),254));
+}
+
+# TODO --transfer foo/./bar --cleanup
+# multiple --transfer and --basefile with different /./
+
+$Global::JobQueue = JobQueue->new(
+
\@command,\@fhlist,$Global::ContextReplace,$number_of_args,\@Global::ret_files);
+
+if($opt::eta or $opt::bar) {
+# Count the number of jobs before starting any
+$Global::JobQueue->total_jobs();
+}
+if($opt::pipepart) {
+@Global::cat_partials = map { pipe_part_files($_) } @opt::a;
+# Unget the command as many times as there are parts
+$Global::JobQueue->{'commandlinequeue'}->unget(
+   map { $Global::JobQueue->{'commandlinequeue'}->get() } 
@Global::cat_partials
+   );
+}
+for my $sshlogin (values %Global::host) {
+$sshlogin->max_jobs_running();
+}
+
+init_run_jobs();
+my $sem;
+if($Global::semaphore) {
+$sem = acquire_semaphore();
+}
+$SIG{TERM} = \_no_new_jobs;
+
+start_more_jobs();
+if(not $opt::pipepart) {
+if($opt::pipe) {
+   spreadstdin();
+}
+}
+::debug("init", "Start draining\n");
+drain_job_queue();
+::debug("init", "Done draining\n");
+reaper();
+::debug("init", "Done reaping\n");
+if($opt::pipe and @opt::a) {
+for my $job (@Global::tee_jobs) {
+ 

[42/51] [partial] nifi-minifi-cpp git commit: MINIFI-372: Replace leveldb with RocksDB

2017-10-09 Thread jeremydyer
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/48867732/thirdparty/leveldb-1.18/doc/bench/db_bench_sqlite3.cc
--
diff --git a/thirdparty/leveldb-1.18/doc/bench/db_bench_sqlite3.cc 
b/thirdparty/leveldb-1.18/doc/bench/db_bench_sqlite3.cc
deleted file mode 100755
index e63aaa8..000
--- a/thirdparty/leveldb-1.18/doc/bench/db_bench_sqlite3.cc
+++ /dev/null
@@ -1,718 +0,0 @@
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include 
-#include 
-#include 
-#include "util/histogram.h"
-#include "util/random.h"
-#include "util/testutil.h"
-
-// Comma-separated list of operations to run in the specified order
-//   Actual benchmarks:
-//
-//   fillseq   -- write N values in sequential key order in async mode
-//   fillseqsync   -- write N/100 values in sequential key order in sync mode
-//   fillseqbatch  -- batch write N values in sequential key order in async 
mode
-//   fillrandom-- write N values in random key order in async mode
-//   fillrandsync  -- write N/100 values in random key order in sync mode
-//   fillrandbatch -- batch write N values in sequential key order in async 
mode
-//   overwrite -- overwrite N values in random key order in async mode
-//   fillrand100K  -- write N/1000 100K values in random order in async mode
-//   fillseq100K   -- write N/1000 100K values in sequential order in async 
mode
-//   readseq   -- read N times sequentially
-//   readrandom-- read N times in random order
-//   readrand100K  -- read N/1000 100K values in sequential order in async mode
-static const char* FLAGS_benchmarks =
-"fillseq,"
-"fillseqsync,"
-"fillseqbatch,"
-"fillrandom,"
-"fillrandsync,"
-"fillrandbatch,"
-"overwrite,"
-"overwritebatch,"
-"readrandom,"
-"readseq,"
-"fillrand100K,"
-"fillseq100K,"
-"readseq,"
-"readrand100K,"
-;
-
-// Number of key/values to place in database
-static int FLAGS_num = 100;
-
-// Number of read operations to do.  If negative, do FLAGS_num reads.
-static int FLAGS_reads = -1;
-
-// Size of each value
-static int FLAGS_value_size = 100;
-
-// Print histogram of operation timings
-static bool FLAGS_histogram = false;
-
-// Arrange to generate values that shrink to this fraction of
-// their original size after compression
-static double FLAGS_compression_ratio = 0.5;
-
-// Page size. Default 1 KB.
-static int FLAGS_page_size = 1024;
-
-// Number of pages.
-// Default cache size = FLAGS_page_size * FLAGS_num_pages = 4 MB.
-static int FLAGS_num_pages = 4096;
-
-// If true, do not destroy the existing database.  If you set this
-// flag and also specify a benchmark that wants a fresh database, that
-// benchmark will fail.
-static bool FLAGS_use_existing_db = false;
-
-// If true, we allow batch writes to occur
-static bool FLAGS_transaction = true;
-
-// If true, we enable Write-Ahead Logging
-static bool FLAGS_WAL_enabled = true;
-
-// Use the db with the following name.
-static const char* FLAGS_db = NULL;
-
-inline
-static void ExecErrorCheck(int status, char *err_msg) {
-  if (status != SQLITE_OK) {
-fprintf(stderr, "SQL error: %s\n", err_msg);
-sqlite3_free(err_msg);
-exit(1);
-  }
-}
-
-inline
-static void StepErrorCheck(int status) {
-  if (status != SQLITE_DONE) {
-fprintf(stderr, "SQL step error: status = %d\n", status);
-exit(1);
-  }
-}
-
-inline
-static void ErrorCheck(int status) {
-  if (status != SQLITE_OK) {
-fprintf(stderr, "sqlite3 error: status = %d\n", status);
-exit(1);
-  }
-}
-
-inline
-static void WalCheckpoint(sqlite3* db_) {
-  // Flush all writes to disk
-  if (FLAGS_WAL_enabled) {
-sqlite3_wal_checkpoint_v2(db_, NULL, SQLITE_CHECKPOINT_FULL, NULL, NULL);
-  }
-}
-
-namespace leveldb {
-
-// Helper for quickly generating random data.
-namespace {
-class RandomGenerator {
- private:
-  std::string data_;
-  int pos_;
-
- public:
-  RandomGenerator() {
-// We use a limited amount of data over and over again and ensure
-// that it is larger than the compression window (32KB), and also
-// large enough to serve all typical value sizes we want to write.
-Random rnd(301);
-std::string piece;
-while (data_.size() < 1048576) {
-  // Add a short fragment that is as compressible as specified
-  // by FLAGS_compression_ratio.
-  test::CompressibleString(, FLAGS_compression_ratio, 100, );
-  data_.append(piece);
-}
-pos_ = 0;
-  }
-
-  Slice Generate(int len) {
-if (pos_ + len > data_.size()) {
-  pos_ = 0;
-  assert(len < data_.size());
-}
-pos_ += len;
-return Slice(data_.data() + pos_ - len, len);
-  }
-};
-
-static Slice TrimSpace(Slice s) {
-  int start = 0;
-  while (start < s.size() && isspace(s[start])) {
-

[08/51] [partial] nifi-minifi-cpp git commit: MINIFI-372: Replace leveldb with RocksDB

2017-10-09 Thread jeremydyer
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/48867732/thirdparty/rocksdb/db/version_set.h
--
diff --git a/thirdparty/rocksdb/db/version_set.h 
b/thirdparty/rocksdb/db/version_set.h
new file mode 100644
index 000..9fb000c
--- /dev/null
+++ b/thirdparty/rocksdb/db/version_set.h
@@ -0,0 +1,862 @@
+//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
+//  This source code is licensed under both the GPLv2 (found in the
+//  COPYING file in the root directory) and Apache 2.0 License
+//  (found in the LICENSE.Apache file in the root directory).
+//
+// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+//
+// The representation of a DBImpl consists of a set of Versions.  The
+// newest version is called "current".  Older versions may be kept
+// around to provide a consistent view to live iterators.
+//
+// Each Version keeps track of a set of Table files per level.  The
+// entire set of versions is maintained in a VersionSet.
+//
+// Version,VersionSet are thread-compatible, but require external
+// synchronization on all accesses.
+
+#pragma once
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#include "db/column_family.h"
+#include "db/compaction.h"
+#include "db/compaction_picker.h"
+#include "db/dbformat.h"
+#include "db/file_indexer.h"
+#include "db/log_reader.h"
+#include "db/range_del_aggregator.h"
+#include "db/table_cache.h"
+#include "db/version_builder.h"
+#include "db/version_edit.h"
+#include "db/write_controller.h"
+#include "monitoring/instrumented_mutex.h"
+#include "options/db_options.h"
+#include "port/port.h"
+#include "rocksdb/env.h"
+
+namespace rocksdb {
+
+namespace log {
+class Writer;
+}
+
+class Compaction;
+class InternalIterator;
+class LogBuffer;
+class LookupKey;
+class MemTable;
+class Version;
+class VersionSet;
+class WriteBufferManager;
+class MergeContext;
+class ColumnFamilySet;
+class TableCache;
+class MergeIteratorBuilder;
+
+// Return the smallest index i such that file_level.files[i]->largest >= key.
+// Return file_level.num_files if there is no such file.
+// REQUIRES: "file_level.files" contains a sorted list of
+// non-overlapping files.
+extern int FindFile(const InternalKeyComparator& icmp,
+const LevelFilesBrief& file_level, const Slice& key);
+
+// Returns true iff some file in "files" overlaps the user key range
+// [*smallest,*largest].
+// smallest==nullptr represents a key smaller than all keys in the DB.
+// largest==nullptr represents a key largest than all keys in the DB.
+// REQUIRES: If disjoint_sorted_files, file_level.files[]
+// contains disjoint ranges in sorted order.
+extern bool SomeFileOverlapsRange(const InternalKeyComparator& icmp,
+  bool disjoint_sorted_files,
+  const LevelFilesBrief& file_level,
+  const Slice* smallest_user_key,
+  const Slice* largest_user_key);
+
+// Generate LevelFilesBrief from vector
+// Would copy smallest_key and largest_key data to sequential memory
+// arena: Arena used to allocate the memory
+extern void DoGenerateLevelFilesBrief(LevelFilesBrief* file_level,
+  const std::vector& files,
+  Arena* arena);
+
+class VersionStorageInfo {
+ public:
+  VersionStorageInfo(const InternalKeyComparator* internal_comparator,
+ const Comparator* user_comparator, int num_levels,
+ CompactionStyle compaction_style,
+ VersionStorageInfo* src_vstorage,
+ bool _force_consistency_checks);
+  ~VersionStorageInfo();
+
+  void Reserve(int level, size_t size) { files_[level].reserve(size); }
+
+  void AddFile(int level, FileMetaData* f, Logger* info_log = nullptr);
+
+  void SetFinalized();
+
+  // Update num_non_empty_levels_.
+  void UpdateNumNonEmptyLevels();
+
+  void GenerateFileIndexer() {
+file_indexer_.UpdateIndex(_, num_non_empty_levels_, files_);
+  }
+
+  // Update the accumulated stats from a file-meta.
+  void UpdateAccumulatedStats(FileMetaData* file_meta);
+
+  // Decrease the current stat form a to-be-delected file-meta
+  void RemoveCurrentStats(FileMetaData* file_meta);
+
+  void ComputeCompensatedSizes();
+
+  // Updates internal structures that keep track of compaction scores
+  // We use compaction scores to figure out which compaction to do next
+  // REQUIRES: db_mutex held!!
+  // TODO find a better way to pass compaction_options_fifo.
+  void ComputeCompactionScore(const ImmutableCFOptions& immutable_cf_options,
+  const 

[26/51] [partial] nifi-minifi-cpp git commit: MINIFI-372: Replace leveldb with RocksDB

2017-10-09 Thread jeremydyer
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/48867732/thirdparty/rocksdb/db/compaction_job.cc
--
diff --git a/thirdparty/rocksdb/db/compaction_job.cc 
b/thirdparty/rocksdb/db/compaction_job.cc
new file mode 100644
index 000..75f5ab6
--- /dev/null
+++ b/thirdparty/rocksdb/db/compaction_job.cc
@@ -0,0 +1,1467 @@
+//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
+//  This source code is licensed under both the GPLv2 (found in the
+//  COPYING file in the root directory) and Apache 2.0 License
+//  (found in the LICENSE.Apache file in the root directory).
+//
+// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include "db/compaction_job.h"
+
+#ifndef __STDC_FORMAT_MACROS
+#define __STDC_FORMAT_MACROS
+#endif
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#include "db/builder.h"
+#include "db/db_iter.h"
+#include "db/dbformat.h"
+#include "db/event_helpers.h"
+#include "db/log_reader.h"
+#include "db/log_writer.h"
+#include "db/memtable.h"
+#include "db/memtable_list.h"
+#include "db/merge_context.h"
+#include "db/merge_helper.h"
+#include "db/version_set.h"
+#include "monitoring/iostats_context_imp.h"
+#include "monitoring/perf_context_imp.h"
+#include "monitoring/thread_status_util.h"
+#include "port/likely.h"
+#include "port/port.h"
+#include "rocksdb/db.h"
+#include "rocksdb/env.h"
+#include "rocksdb/statistics.h"
+#include "rocksdb/status.h"
+#include "rocksdb/table.h"
+#include "table/block.h"
+#include "table/block_based_table_factory.h"
+#include "table/merging_iterator.h"
+#include "table/table_builder.h"
+#include "util/coding.h"
+#include "util/file_reader_writer.h"
+#include "util/filename.h"
+#include "util/log_buffer.h"
+#include "util/logging.h"
+#include "util/mutexlock.h"
+#include "util/random.h"
+#include "util/sst_file_manager_impl.h"
+#include "util/stop_watch.h"
+#include "util/string_util.h"
+#include "util/sync_point.h"
+
+namespace rocksdb {
+
+// Maintains state for each sub-compaction
+struct CompactionJob::SubcompactionState {
+  const Compaction* compaction;
+  std::unique_ptr c_iter;
+
+  // The boundaries of the key-range this compaction is interested in. No two
+  // subcompactions may have overlapping key-ranges.
+  // 'start' is inclusive, 'end' is exclusive, and nullptr means unbounded
+  Slice *start, *end;
+
+  // The return status of this subcompaction
+  Status status;
+
+  // Files produced by this subcompaction
+  struct Output {
+FileMetaData meta;
+bool finished;
+std::shared_ptr table_properties;
+  };
+
+  // State kept for output being generated
+  std::vector outputs;
+  std::unique_ptr outfile;
+  std::unique_ptr builder;
+  Output* current_output() {
+if (outputs.empty()) {
+  // This subcompaction's outptut could be empty if compaction was aborted
+  // before this subcompaction had a chance to generate any output files.
+  // When subcompactions are executed sequentially this is more likely and
+  // will be particulalry likely for the later subcompactions to be empty.
+  // Once they are run in parallel however it should be much rarer.
+  return nullptr;
+} else {
+  return ();
+}
+  }
+
+  uint64_t current_output_file_size;
+
+  // State during the subcompaction
+  uint64_t total_bytes;
+  uint64_t num_input_records;
+  uint64_t num_output_records;
+  CompactionJobStats compaction_job_stats;
+  uint64_t approx_size;
+  // An index that used to speed up ShouldStopBefore().
+  size_t grandparent_index = 0;
+  // The number of bytes overlapping between the current output and
+  // grandparent files used in ShouldStopBefore().
+  uint64_t overlapped_bytes = 0;
+  // A flag determine whether the key has been seen in ShouldStopBefore()
+  bool seen_key = false;
+  std::string compression_dict;
+
+  SubcompactionState(Compaction* c, Slice* _start, Slice* _end,
+ uint64_t size = 0)
+  : compaction(c),
+start(_start),
+end(_end),
+outfile(nullptr),
+builder(nullptr),
+current_output_file_size(0),
+total_bytes(0),
+num_input_records(0),
+num_output_records(0),
+approx_size(size),
+grandparent_index(0),
+overlapped_bytes(0),
+seen_key(false),
+compression_dict() {
+assert(compaction != nullptr);
+  }
+
+  SubcompactionState(SubcompactionState&& o) { *this = std::move(o); }
+
+  SubcompactionState& operator=(SubcompactionState&& o) {
+compaction = std::move(o.compaction);
+start = std::move(o.start);
+end = std::move(o.end);
+status = std::move(o.status);
+outputs = std::move(o.outputs);
+outfile = std::move(o.outfile);
+

[35/51] [partial] nifi-minifi-cpp git commit: MINIFI-372: Replace leveldb with RocksDB

2017-10-09 Thread jeremydyer
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/48867732/thirdparty/rocksdb/HISTORY.md
--
diff --git a/thirdparty/rocksdb/HISTORY.md b/thirdparty/rocksdb/HISTORY.md
new file mode 100644
index 000..29f0f3f
--- /dev/null
+++ b/thirdparty/rocksdb/HISTORY.md
@@ -0,0 +1,568 @@
+# Rocksdb Change Log
+## Unreleased
+### New Features
+* Add Iterator::Refresh(), which allows users to update the iterator state so 
that they can avoid some initialization costs of recreating iterators.
+* Replace dynamic_cast<> (except unit test) so people can choose to build with 
RTTI off. With make, release mode is by default built with -fno-rtti and debug 
mode is built without it. Users can override it by setting USE_RTTI=0 or 1.
+* Universal compactions including the bottom level can be executed in a 
dedicated thread pool. This alleviates head-of-line blocking in the compaction 
queue, which cause write stalling, particularly in multi-instance use cases. 
Users can enable this feature via `Env::SetBackgroundThreads(N, 
Env::Priority::BOTTOM)`, where `N > 0`.
+
+### Bug Fixes
+* Fix wrong latencies in `rocksdb.db.get.micros`, `rocksdb.db.write.micros`, 
and `rocksdb.sst.read.micros`.
+* Fix incorrect dropping of deletions during intra-L0 compaction.
+
+## 5.7.0 (07/13/2017)
+### Public API Change
+* DB property "rocksdb.sstables" now prints keys in hex form.
+
+### New Features
+* Measure estimated number of reads per file. The information can be accessed 
through DB::GetColumnFamilyMetaData or "rocksdb.sstables" DB property.
+* RateLimiter support for throttling background reads, or throttling the sum 
of background reads and writes. This can give more predictable I/O usage when 
compaction reads more data than it writes, e.g., due to lots of deletions.
+* [Experimental] FIFO compaction with TTL support. It can be enabled by 
setting CompactionOptionsFIFO.ttl > 0.
+* Introduce `EventListener::OnBackgroundError()` callback. Users can implement 
it to be notified of errors causing the DB to enter read-only mode, and 
optionally override them.
+* Partitioned Index/Filters exiting the experimental mode. To enable 
partitioned indexes set index_type to kTwoLevelIndexSearch and to further 
enable partitioned filters set partition_filters to true. To configure the 
partition size set metadata_block_size.
+
+
+### Bug Fixes
+* Fix discarding empty compaction output files when `DeleteRange()` is used 
together with subcompactions.
+
+## 5.6.0 (06/06/2017)
+### Public API Change
+* Scheduling flushes and compactions in the same thread pool is no longer 
supported by setting `max_background_flushes=0`. Instead, users can achieve 
this by configuring their high-pri thread pool to have zero threads.
+* Replace `Options::max_background_flushes`, 
`Options::max_background_compactions`, and 
`Options::base_background_compactions` all with `Options::max_background_jobs`, 
which automatically decides how many threads to allocate towards 
flush/compaction.
+* options.delayed_write_rate by default take the value of options.rate_limiter 
rate.
+* Replace global variable `IOStatsContext iostats_context` with 
`IOStatsContext* get_iostats_context()`; replace global variable `PerfContext 
perf_context` with `PerfContext* get_perf_context()`.
+
+### New Features
+* Change ticker/histogram statistics implementations to use core-local 
storage. This improves aggregation speed compared to our previous thread-local 
approach, particularly for applications with many threads.
+* Users can pass a cache object to write buffer manager, so that they can cap 
memory usage for memtable and block cache using one single limit.
+* Flush will be triggered when 7/8 of the limit introduced by 
write_buffer_manager or db_write_buffer_size is triggered, so that the hard 
threshold is hard to hit.
+* Introduce WriteOptions.low_pri. If it is true, low priority writes will be 
throttled if the compaction is behind.
+* `DB::IngestExternalFile()` now supports ingesting files into a database 
containing range deletions.
+
+### Bug Fixes
+* Shouldn't ignore return value of fsync() in flush.
+
+## 5.5.0 (05/17/2017)
+### New Features
+* FIFO compaction to support Intra L0 compaction too with 
CompactionOptionsFIFO.allow_compaction=true.
+* DB::ResetStats() to reset internal stats.
+* Statistics::Reset() to reset user stats.
+* ldb add option --try_load_options, which will open DB with its own option 
file.
+* Introduce WriteBatch::PopSavePoint to pop the most recent save point 
explicitly.
+* Support dynamically change `max_open_files` option via SetDBOptions()
+* Added DB::CreateColumnFamilie() and DB::DropColumnFamilies() to bulk 
create/drop column families.
+* Add debugging function `GetAllKeyVersions` to see internal versions of a 
range of keys.
+* Support file ingestion with universal compaction style
+* Support file ingestion behind with option `allow_ingest_behind`
+* New option 

[13/51] [partial] nifi-minifi-cpp git commit: MINIFI-372: Replace leveldb with RocksDB

2017-10-09 Thread jeremydyer
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/48867732/thirdparty/rocksdb/db/log_reader.cc
--
diff --git a/thirdparty/rocksdb/db/log_reader.cc 
b/thirdparty/rocksdb/db/log_reader.cc
new file mode 100644
index 000..cae5d8e
--- /dev/null
+++ b/thirdparty/rocksdb/db/log_reader.cc
@@ -0,0 +1,432 @@
+//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
+//  This source code is licensed under both the GPLv2 (found in the
+//  COPYING file in the root directory) and Apache 2.0 License
+//  (found in the LICENSE.Apache file in the root directory).
+//
+// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include "db/log_reader.h"
+
+#include 
+#include "rocksdb/env.h"
+#include "util/coding.h"
+#include "util/crc32c.h"
+#include "util/file_reader_writer.h"
+
+namespace rocksdb {
+namespace log {
+
+Reader::Reporter::~Reporter() {
+}
+
+Reader::Reader(std::shared_ptr info_log,
+   unique_ptr&& _file, Reporter* reporter,
+   bool checksum, uint64_t initial_offset, uint64_t log_num)
+: info_log_(info_log),
+  file_(std::move(_file)),
+  reporter_(reporter),
+  checksum_(checksum),
+  backing_store_(new char[kBlockSize]),
+  buffer_(),
+  eof_(false),
+  read_error_(false),
+  eof_offset_(0),
+  last_record_offset_(0),
+  end_of_buffer_offset_(0),
+  initial_offset_(initial_offset),
+  log_number_(log_num),
+  recycled_(false) {}
+
+Reader::~Reader() {
+  delete[] backing_store_;
+}
+
+bool Reader::SkipToInitialBlock() {
+  size_t initial_offset_in_block = initial_offset_ % kBlockSize;
+  uint64_t block_start_location = initial_offset_ - initial_offset_in_block;
+
+  // Don't search a block if we'd be in the trailer
+  if (initial_offset_in_block > kBlockSize - 6) {
+block_start_location += kBlockSize;
+  }
+
+  end_of_buffer_offset_ = block_start_location;
+
+  // Skip to start of first block that can contain the initial record
+  if (block_start_location > 0) {
+Status skip_status = file_->Skip(block_start_location);
+if (!skip_status.ok()) {
+  ReportDrop(static_cast(block_start_location), skip_status);
+  return false;
+}
+  }
+
+  return true;
+}
+
+// For kAbsoluteConsistency, on clean shutdown we don't expect any error
+// in the log files.  For other modes, we can ignore only incomplete records
+// in the last log file, which are presumably due to a write in progress
+// during restart (or from log recycling).
+//
+// TODO krad: Evaluate if we need to move to a more strict mode where we
+// restrict the inconsistency to only the last log
+bool Reader::ReadRecord(Slice* record, std::string* scratch,
+WALRecoveryMode wal_recovery_mode) {
+  if (last_record_offset_ < initial_offset_) {
+if (!SkipToInitialBlock()) {
+  return false;
+}
+  }
+
+  scratch->clear();
+  record->clear();
+  bool in_fragmented_record = false;
+  // Record offset of the logical record that we're reading
+  // 0 is a dummy value to make compilers happy
+  uint64_t prospective_record_offset = 0;
+
+  Slice fragment;
+  while (true) {
+uint64_t physical_record_offset = end_of_buffer_offset_ - buffer_.size();
+size_t drop_size = 0;
+const unsigned int record_type = ReadPhysicalRecord(, _size);
+switch (record_type) {
+  case kFullType:
+  case kRecyclableFullType:
+if (in_fragmented_record && !scratch->empty()) {
+  // Handle bug in earlier versions of log::Writer where
+  // it could emit an empty kFirstType record at the tail end
+  // of a block followed by a kFullType or kFirstType record
+  // at the beginning of the next block.
+  ReportCorruption(scratch->size(), "partial record without end(1)");
+}
+prospective_record_offset = physical_record_offset;
+scratch->clear();
+*record = fragment;
+last_record_offset_ = prospective_record_offset;
+return true;
+
+  case kFirstType:
+  case kRecyclableFirstType:
+if (in_fragmented_record && !scratch->empty()) {
+  // Handle bug in earlier versions of log::Writer where
+  // it could emit an empty kFirstType record at the tail end
+  // of a block followed by a kFullType or kFirstType record
+  // at the beginning of the next block.
+  ReportCorruption(scratch->size(), "partial record without end(2)");
+}
+prospective_record_offset = physical_record_offset;
+scratch->assign(fragment.data(), fragment.size());
+in_fragmented_record = true;
+break;
+
+  case kMiddleType:
+  case kRecyclableMiddleType:
+if (!in_fragmented_record) {
+  

[19/51] [partial] nifi-minifi-cpp git commit: MINIFI-372: Replace leveldb with RocksDB

2017-10-09 Thread jeremydyer
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/48867732/thirdparty/rocksdb/db/db_impl_write.cc
--
diff --git a/thirdparty/rocksdb/db/db_impl_write.cc 
b/thirdparty/rocksdb/db/db_impl_write.cc
new file mode 100644
index 000..b93dd6f
--- /dev/null
+++ b/thirdparty/rocksdb/db/db_impl_write.cc
@@ -0,0 +1,1240 @@
+//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
+//  This source code is licensed under both the GPLv2 (found in the
+//  COPYING file in the root directory) and Apache 2.0 License
+//  (found in the LICENSE.Apache file in the root directory).
+//
+// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+#include "db/db_impl.h"
+
+#ifndef __STDC_FORMAT_MACROS
+#define __STDC_FORMAT_MACROS
+#endif
+#include 
+#include "db/event_helpers.h"
+#include "monitoring/perf_context_imp.h"
+#include "options/options_helper.h"
+#include "util/sync_point.h"
+
+namespace rocksdb {
+// Convenience methods
+Status DBImpl::Put(const WriteOptions& o, ColumnFamilyHandle* column_family,
+   const Slice& key, const Slice& val) {
+  return DB::Put(o, column_family, key, val);
+}
+
+Status DBImpl::Merge(const WriteOptions& o, ColumnFamilyHandle* column_family,
+ const Slice& key, const Slice& val) {
+  auto cfh = reinterpret_cast(column_family);
+  if (!cfh->cfd()->ioptions()->merge_operator) {
+return Status::NotSupported("Provide a merge_operator when opening DB");
+  } else {
+return DB::Merge(o, column_family, key, val);
+  }
+}
+
+Status DBImpl::Delete(const WriteOptions& write_options,
+  ColumnFamilyHandle* column_family, const Slice& key) {
+  return DB::Delete(write_options, column_family, key);
+}
+
+Status DBImpl::SingleDelete(const WriteOptions& write_options,
+ColumnFamilyHandle* column_family,
+const Slice& key) {
+  return DB::SingleDelete(write_options, column_family, key);
+}
+
+Status DBImpl::Write(const WriteOptions& write_options, WriteBatch* my_batch) {
+  return WriteImpl(write_options, my_batch, nullptr, nullptr);
+}
+
+#ifndef ROCKSDB_LITE
+Status DBImpl::WriteWithCallback(const WriteOptions& write_options,
+ WriteBatch* my_batch,
+ WriteCallback* callback) {
+  return WriteImpl(write_options, my_batch, callback, nullptr);
+}
+#endif  // ROCKSDB_LITE
+
+Status DBImpl::WriteImpl(const WriteOptions& write_options,
+ WriteBatch* my_batch, WriteCallback* callback,
+ uint64_t* log_used, uint64_t log_ref,
+ bool disable_memtable) {
+  if (my_batch == nullptr) {
+return Status::Corruption("Batch is nullptr!");
+  }
+  if (concurrent_prepare_ && immutable_db_options_.enable_pipelined_write) {
+return Status::NotSupported(
+"pipelined_writes is not compatible with concurrent prepares");
+  }
+
+  Status status;
+  if (write_options.low_pri) {
+status = ThrottleLowPriWritesIfNeeded(write_options, my_batch);
+if (!status.ok()) {
+  return status;
+}
+  }
+
+  if (concurrent_prepare_ && disable_memtable) {
+return WriteImplWALOnly(write_options, my_batch, callback, log_used,
+log_ref);
+  }
+
+  if (immutable_db_options_.enable_pipelined_write) {
+return PipelinedWriteImpl(write_options, my_batch, callback, log_used,
+  log_ref, disable_memtable);
+  }
+
+  PERF_TIMER_GUARD(write_pre_and_post_process_time);
+  WriteThread::Writer w(write_options, my_batch, callback, log_ref,
+disable_memtable);
+
+  if (!write_options.disableWAL) {
+RecordTick(stats_, WRITE_WITH_WAL);
+  }
+
+  StopWatch write_sw(env_, immutable_db_options_.statistics.get(), DB_WRITE);
+
+  write_thread_.JoinBatchGroup();
+  if (w.state == WriteThread::STATE_PARALLEL_MEMTABLE_WRITER) {
+// we are a non-leader in a parallel group
+PERF_TIMER_GUARD(write_memtable_time);
+
+if (w.ShouldWriteToMemtable()) {
+  ColumnFamilyMemTablesImpl column_family_memtables(
+  versions_->GetColumnFamilySet());
+  w.status = WriteBatchInternal::InsertInto(
+  , w.sequence, _family_memtables, _scheduler_,
+  write_options.ignore_missing_column_families, 0 /*log_number*/, this,
+  true /*concurrent_memtable_writes*/);
+}
+
+if (write_thread_.CompleteParallelMemTableWriter()) {
+  // we're responsible for exit batch group
+  auto last_sequence = w.write_group->last_sequence;
+  versions_->SetLastSequence(last_sequence);
+  MemTableInsertStatusCheck(w.status);
+  write_thread_.ExitAsBatchGroupFollower();
+}
+

[51/51] [partial] nifi-minifi-cpp git commit: MINIFI-372: Replace leveldb with RocksDB

2017-10-09 Thread jeremydyer
MINIFI-372: Replace leveldb with RocksDB

MINIFI-372: Update Cmake
MINIFI-372: Fix tests
MINIFI-372: Include deps
MINIFI-372: Rename data
MINIFI-372: Change cmake cxx flags

MINIFI-372: Remove LevelDB reference. Not including RocksDB because
it is dual licensed under Apache License, Version2.0

MINIFI-372: Remove level db cmake fles and edit comments

MINIFI-372: Add test to cover stream and content repo

MINIFI-372: Update readme

MINIFI-372: Remove rocksdb tests and tools that aren't needed from the source

MINIFI-372: Update feature names and how they are validated

MINIFI_372: Update travis to install rocksdb

MINIFI-372: Resolve issue with inheritance across the class loader

MINIFI-372: Fix volatile repos and polymorphic issues

MINIFI-372: Remove cout statement

MINIFI-372: Remove extraneous RocksDB

This closes #142

Signed-off-by: Jeremy Dyer 


Project: http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/repo
Commit: http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/commit/48867732
Tree: http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/tree/48867732
Diff: http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/diff/48867732

Branch: refs/heads/master
Commit: 488677321cdf32c196fdaae6fb31a2b38ef10461
Parents: 380a98b
Author: Marc Parisi 
Authored: Tue Aug 15 11:34:12 2017 -0400
Committer: Jeremy Dyer 
Committed: Mon Oct 9 12:23:07 2017 -0400

--
 .travis.yml | 5 +-
 CMakeLists.txt  |50 +-
 LICENSE |30 -
 README.md   |18 +-
 cmake/BuildTests.cmake  |30 +-
 cmake/FindLeveldb.cmake |50 -
 cmake/FindRocksDB.cmake |51 +
 docker/Dockerfile   |12 +-
 extensions/http-curl/CMakeLists.txt | 2 +-
 extensions/rocksdb-repos/CMakeLists.txt |84 +
 .../rocksdb-repos/DatabaseContentRepository.cpp |   112 +
 .../rocksdb-repos/DatabaseContentRepository.h   |   132 +
 extensions/rocksdb-repos/FlowFileRepository.cpp |   140 +
 extensions/rocksdb-repos/FlowFileRepository.h   |   168 +
 .../rocksdb-repos/ProvenanceRepository.cpp  |93 +
 extensions/rocksdb-repos/ProvenanceRepository.h |   256 +
 extensions/rocksdb-repos/RocksDBLoader.cpp  |29 +
 extensions/rocksdb-repos/RocksDBLoader.h|80 +
 extensions/rocksdb-repos/RocksDbStream.cpp  |   124 +
 extensions/rocksdb-repos/RocksDbStream.h|   185 +
 libminifi/CMakeLists.txt|17 +-
 libminifi/cmake/FindLeveldb.cmake   |50 -
 libminifi/cmake/FindRocksDB.cmake   |51 +
 libminifi/include/Connection.h  | 8 +-
 libminifi/include/SchedulingAgent.h | 1 -
 libminifi/include/core/ClassLoader.h|37 +-
 libminifi/include/core/Connectable.h| 4 +
 libminifi/include/core/ContentRepository.h  | 2 +
 libminifi/include/core/Core.h   | 6 +-
 libminifi/include/core/FlowConfiguration.h  |16 +-
 libminifi/include/core/FlowFile.h   |31 +-
 libminifi/include/core/ProcessGroup.h   | 2 +
 libminifi/include/core/Repository.h |10 +-
 libminifi/include/core/SerializableComponent.h  |24 +-
 .../core/repository/FlowFileRepository.h|   167 -
 .../core/repository/VolatileContentRepository.h | 7 +-
 .../repository/VolatileFlowFileRepository.h |18 +-
 .../repository/VolatileProvenanceRepository.h   | 6 +-
 .../core/repository/VolatileRepository.h|17 +-
 libminifi/include/properties/Configure.h| 1 +
 .../include/provenance/ProvenanceRepository.h   |   250 -
 libminifi/src/Configure.cpp | 1 +
 libminifi/src/FlowController.cpp| 9 +-
 libminifi/src/FlowFileRecord.cpp|22 +-
 libminifi/src/core/FlowConfiguration.cpp| 3 +-
 libminifi/src/core/FlowFile.cpp |13 +-
 libminifi/src/core/ProcessGroup.cpp |10 +
 libminifi/src/core/Repository.cpp   | 1 -
 libminifi/src/core/RepositoryFactory.cpp|57 +-
 .../src/core/repository/FlowFileRepository.cpp  |   140 -
 .../repository/VolatileContentRepository.cpp|19 +-
 .../src/provenance/ProvenanceRepository.cpp |92 -
 .../test/curl-tests/C2NullConfiguration.cpp | 4 +-
 libminifi/test/nodefs/NoLevelDB.cpp |32 -
 libminifi/test/nodefs/NoRocksDB.cpp |32 +
 .../rocksdb-tests/DBContentRepositoryTests.cpp  |   246 +
 .../test/rocksdb-tests/ProvenanceTests.cpp  |   165 +
 libminifi/test/rocksdb-tests/RepoTests.cpp  |   162 +
 

[02/51] [partial] nifi-minifi-cpp git commit: MINIFI-372: Replace leveldb with RocksDB

2017-10-09 Thread jeremydyer
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/48867732/thirdparty/rocksdb/docs/_sass/_search.scss
--
diff --git a/thirdparty/rocksdb/docs/_sass/_search.scss 
b/thirdparty/rocksdb/docs/_sass/_search.scss
new file mode 100644
index 000..eadfa11
--- /dev/null
+++ b/thirdparty/rocksdb/docs/_sass/_search.scss
@@ -0,0 +1,142 @@
+input[type="search"] {
+-moz-appearance:none;
+-webkit-appearance: none;
+}
+
+.navSearchWrapper {
+  align-self: center;
+  position: relative;
+
+  &::before {
+border: 3px solid $primary-overlay-special;
+border-radius: 50%;
+content: " ";
+display: block;
+height: 6px;
+left: 15px;
+width: 6px;
+position: absolute;
+top: 4px;
+z-index: 1;
+  }
+
+  &::after {
+background: $primary-overlay-special;
+content: " ";
+height: 7px;
+left: 24px;
+position: absolute;
+transform: rotate(-45deg);
+top: 12px;
+width: 3px;
+z-index: 1;
+  }
+
+  .aa-dropdown-menu {
+background: $secondary-bg;
+border: 3px solid rgba($text, 0.25);
+color: $text;
+font-size: 14px;
+left: auto !important;
+line-height: 1.2em;
+right: 0 !important;
+
+.algolia-docsearch-suggestion--category-header {
+  background: $primary-overlay-special;
+  color: $primary-bg;
+
+  .algolia-docsearch-suggestion--highlight {
+background-color: $primary-bg;
+color: $primary-overlay;
+  }
+}
+
+.algolia-docsearch-suggestion--title 
.algolia-docsearch-suggestion--highlight,
+.algolia-docsearch-suggestion--subcategory-column 
.algolia-docsearch-suggestion--highlight {
+color: $primary-bg;
+}
+
+.algolia-docsearch-suggestion__secondary,
+.algolia-docsearch-suggestion--subcategory-column {
+  border-color: rgba($text, 0.3);
+}
+  }
+}
+
+input#search_input {
+  padding-left: 25px;
+  font-size: 14px;
+  line-height: 20px;
+  border-radius: 20px;
+  background-color: rgba($primary-overlay-special, 0.25);
+  border: none;
+  color: rgba($primary-overlay-special, 0);
+  outline: none;
+  position: relative;
+  transition: background-color .2s cubic-bezier(0.68, -0.55, 0.265, 1.55), 
width .2s cubic-bezier(0.68, -0.55, 0.265, 1.55), color .2s ease;
+  width: 60px;
+
+  &:focus, &:active {
+background-color: $secondary-bg;
+color: $text;
+width: 240px;
+  }
+}
+
+.navigationSlider {
+  .navSearchWrapper {
+&::before {
+  left: 6px;
+  top: 6px;
+}
+
+&::after {
+  left: 15px;
+  top: 14px;
+}
+  }
+
+  input#search_input_react {
+box-sizing: border-box;
+padding-left: 25px;
+font-size: 14px;
+line-height: 20px;
+border-radius: 20px;
+background-color: rgba($primary-overlay-special, 0.25);
+border: none;
+color: $text;
+outline: none;
+position: relative;
+transition: background-color .2s cubic-bezier(0.68, -0.55, 0.265, 1.55), 
width .2s cubic-bezier(0.68, -0.55, 0.265, 1.55), color .2s ease;
+width: 100%;
+
+&:focus, &:active {
+  background-color: $primary-bg;
+  color: $primary-overlay;
+}
+  }
+
+  .algolia-docsearch-suggestion--subcategory-inline {
+display: none;
+  }
+
+  & > span {
+width: 100%;
+  }
+
+  .aa-dropdown-menu {
+background: $secondary-bg;
+border: 0px solid $secondary-bg;
+color: $text;
+font-size: 12px;
+line-height: 2em;
+max-height: 140px;
+min-width: auto;
+overflow-y: scroll;
+-webkit-overflow-scrolling: touch;
+padding: 0;
+border-radius: 0;
+position: relative !important;
+width: 100%;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/48867732/thirdparty/rocksdb/docs/_sass/_slideshow.scss
--
diff --git a/thirdparty/rocksdb/docs/_sass/_slideshow.scss 
b/thirdparty/rocksdb/docs/_sass/_slideshow.scss
new file mode 100644
index 000..cd98a6c
--- /dev/null
+++ b/thirdparty/rocksdb/docs/_sass/_slideshow.scss
@@ -0,0 +1,48 @@
+.slideshow {
+  position: relative;
+
+  .slide {
+display: none;
+
+img {
+  display: block;
+  margin: 0 auto;
+}
+
+&.slideActive {
+  display: block;
+}
+
+a {
+  border: none;
+  display: block;
+}
+  }
+
+  .pagination {
+display: block;
+margin: -10px;
+padding: 1em 0;
+text-align: center;
+width: 100%;
+
+.pager {
+  background: transparent;
+  border: 2px solid rgba(255, 255, 255, 0.5);
+  border-radius: 50%;
+  cursor: pointer;
+  display: inline-block;
+  height: 12px;
+  margin: 10px;
+  transition: background-color 0.3s, border-color 0.3s;
+  width: 12px;
+
+  &.pagerActive {
+background: rgba(255, 255, 255, 0.5);
+border-width: 4px;
+height: 8px;
+width: 8px;
+  }
+}
+  }
+}


[45/51] [partial] nifi-minifi-cpp git commit: MINIFI-372: Replace leveldb with RocksDB

2017-10-09 Thread jeremydyer
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/48867732/thirdparty/leveldb-1.18/db/dbformat.h
--
diff --git a/thirdparty/leveldb-1.18/db/dbformat.h 
b/thirdparty/leveldb-1.18/db/dbformat.h
deleted file mode 100755
index ea897b1..000
--- a/thirdparty/leveldb-1.18/db/dbformat.h
+++ /dev/null
@@ -1,230 +0,0 @@
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#ifndef STORAGE_LEVELDB_DB_DBFORMAT_H_
-#define STORAGE_LEVELDB_DB_DBFORMAT_H_
-
-#include 
-#include "leveldb/comparator.h"
-#include "leveldb/db.h"
-#include "leveldb/filter_policy.h"
-#include "leveldb/slice.h"
-#include "leveldb/table_builder.h"
-#include "util/coding.h"
-#include "util/logging.h"
-
-namespace leveldb {
-
-// Grouping of constants.  We may want to make some of these
-// parameters set via options.
-namespace config {
-static const int kNumLevels = 7;
-
-// Level-0 compaction is started when we hit this many files.
-static const int kL0_CompactionTrigger = 4;
-
-// Soft limit on number of level-0 files.  We slow down writes at this point.
-static const int kL0_SlowdownWritesTrigger = 8;
-
-// Maximum number of level-0 files.  We stop writes at this point.
-static const int kL0_StopWritesTrigger = 12;
-
-// Maximum level to which a new compacted memtable is pushed if it
-// does not create overlap.  We try to push to level 2 to avoid the
-// relatively expensive level 0=>1 compactions and to avoid some
-// expensive manifest file operations.  We do not push all the way to
-// the largest level since that can generate a lot of wasted disk
-// space if the same key space is being repeatedly overwritten.
-static const int kMaxMemCompactLevel = 2;
-
-// Approximate gap in bytes between samples of data read during iteration.
-static const int kReadBytesPeriod = 1048576;
-
-}  // namespace config
-
-class InternalKey;
-
-// Value types encoded as the last component of internal keys.
-// DO NOT CHANGE THESE ENUM VALUES: they are embedded in the on-disk
-// data structures.
-enum ValueType {
-  kTypeDeletion = 0x0,
-  kTypeValue = 0x1
-};
-// kValueTypeForSeek defines the ValueType that should be passed when
-// constructing a ParsedInternalKey object for seeking to a particular
-// sequence number (since we sort sequence numbers in decreasing order
-// and the value type is embedded as the low 8 bits in the sequence
-// number in internal keys, we need to use the highest-numbered
-// ValueType, not the lowest).
-static const ValueType kValueTypeForSeek = kTypeValue;
-
-typedef uint64_t SequenceNumber;
-
-// We leave eight bits empty at the bottom so a type and sequence#
-// can be packed together into 64-bits.
-static const SequenceNumber kMaxSequenceNumber =
-((0x1ull << 56) - 1);
-
-struct ParsedInternalKey {
-  Slice user_key;
-  SequenceNumber sequence;
-  ValueType type;
-
-  ParsedInternalKey() { }  // Intentionally left uninitialized (for speed)
-  ParsedInternalKey(const Slice& u, const SequenceNumber& seq, ValueType t)
-  : user_key(u), sequence(seq), type(t) { }
-  std::string DebugString() const;
-};
-
-// Return the length of the encoding of "key".
-inline size_t InternalKeyEncodingLength(const ParsedInternalKey& key) {
-  return key.user_key.size() + 8;
-}
-
-// Append the serialization of "key" to *result.
-extern void AppendInternalKey(std::string* result,
-  const ParsedInternalKey& key);
-
-// Attempt to parse an internal key from "internal_key".  On success,
-// stores the parsed data in "*result", and returns true.
-//
-// On error, returns false, leaves "*result" in an undefined state.
-extern bool ParseInternalKey(const Slice& internal_key,
- ParsedInternalKey* result);
-
-// Returns the user key portion of an internal key.
-inline Slice ExtractUserKey(const Slice& internal_key) {
-  assert(internal_key.size() >= 8);
-  return Slice(internal_key.data(), internal_key.size() - 8);
-}
-
-inline ValueType ExtractValueType(const Slice& internal_key) {
-  assert(internal_key.size() >= 8);
-  const size_t n = internal_key.size();
-  uint64_t num = DecodeFixed64(internal_key.data() + n - 8);
-  unsigned char c = num & 0xff;
-  return static_cast(c);
-}
-
-// A comparator for internal keys that uses a specified comparator for
-// the user key portion and breaks ties by decreasing sequence number.
-class InternalKeyComparator : public Comparator {
- private:
-  const Comparator* user_comparator_;
- public:
-  explicit InternalKeyComparator(const Comparator* c) : user_comparator_(c) { }
-  virtual const char* Name() const;
-  virtual int Compare(const Slice& a, const Slice& b) const;
-  virtual void FindShortestSeparator(
-  std::string* start,
-  const Slice& limit) const;
-  virtual void 

[04/51] [partial] nifi-minifi-cpp git commit: MINIFI-372: Replace leveldb with RocksDB

2017-10-09 Thread jeremydyer
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/48867732/thirdparty/rocksdb/docs/_posts/2014-05-14-lock.markdown
--
diff --git a/thirdparty/rocksdb/docs/_posts/2014-05-14-lock.markdown 
b/thirdparty/rocksdb/docs/_posts/2014-05-14-lock.markdown
new file mode 100644
index 000..12009cc
--- /dev/null
+++ b/thirdparty/rocksdb/docs/_posts/2014-05-14-lock.markdown
@@ -0,0 +1,88 @@
+---
+title: Reducing Lock Contention in RocksDB
+layout: post
+author: sdong
+category: blog
+redirect_from:
+  - /blog/521/lock/
+---
+
+In this post, we briefly introduce the recent improvements we did to RocksDB 
to improve the issue of lock contention costs.
+
+RocksDB has a simple thread synchronization mechanism (See [RocksDB 
Architecture 
Guide](https://github.com/facebook/rocksdb/wiki/Rocksdb-Architecture-Guide)  
to understand terms used below, like SST tables or mem tables). SST tables are 
immutable after being written and mem tables are lock-free data structures 
supporting single writer and multiple readers. There is only one single major 
lock, the DB mutex (DBImpl.mutex_) protecting all the meta operations, 
including:
+
+
+
+  * Increase or decrease reference counters of mem tables and SST tables
+
+
+  * Change and check meta data structures, before and after finishing 
compactions, flushes and new mem table creations
+
+
+  * Coordinating writers
+
+
+This DB mutex used to be scalability bottleneck preventing us from scaling to 
more than 16 threads. To address the issue, we improved RocksDB in several ways.
+
+1. Consolidate reference counters and introduce "super version". For every 
read operation, mutex was acquired, and reference counters for each mem table 
and each SST table were increased. One such operation is not expensive but if 
you are building a high throughput server with lots of reads, the lock 
contention will become the bottleneck. This is especially true if you store all 
your data in RAM.
+
+To solve this problem, we created a meta-meta data structure called “[super 
version](https://reviews.facebook.net/rROCKSDB1fdb3f7dc60e96394e3e5b69a46ede5d67fb976c)”,
 which holds reference counters to all those mem table and SST tables, so that 
readers only need to increase the reference counters for this single data 
structure. In RocksDB, list of live mem tables and SST tables only changes 
infrequently, which would happen when new mem tables are created or 
flush/compaction happens. Now, at those times, a new super version is created 
with their reference counters increased. A super version lists live mem tables 
and SST tables so a reader only needs acquire the lock in order to find the 
latest super version and increase its reference counter. From the super 
version, the reader can find all the mem and SST tables which are safety 
accessible as long as the reader holds the reference count for the super 
version.
+
+2. We replace some reference counters to stc::atomic objects, so that 
decreasing reference count of an object usually doesn’t need to be inside the 
mutex any more.
+
+3. Make fetching super version and reference counting lock-free in read 
queries. After consolidating reference counting to one single super version and 
removing the locking for decreasing reference counts, in read case, we only 
acquire mutex for one thing: fetch the latest super version and increase the 
reference count for that (dereference the counter is done in an atomic 
decrease). We designed and implemented a (mostly) lock-free approach to do it. 
See 
[details](https://github.com/facebook/rocksdb/raw/gh-pages/talks/2014-03-27-RocksDB-Meetup-Lei-Lockless-Get.pdf).
 We will write a separate blog post for that.
+
+4. Avoid disk I/O inside the mutex. As we know, each disk I/O to hard drives 
takes several milliseconds. It can be even longer if file system journal is 
involved or I/Os are queued. Even occasional disk I/O within mutex can cause 
huge performance outliers.
+We identified in two situations, we might do disk I/O inside mutex and we 
removed them:
+(1) Opening and closing transactional log files. We moved those operations out 
of the mutex.
+(2) Information logging. In multiple places we write to logs within mutex. 
There is a chance that file write will wait for disk I/O to finish before 
finishing, even if fsync() is not issued, especially in EXT systems. We 
occasionally see 100+ milliseconds write() latency on EXT. Instead of removing 
those logging, we came up with a solution of delay logging. When inside mutex, 
instead of directly writing to the log file, we write to a log buffer, with the 
timing information. As soon as mutex is released, we flush the log buffer to 
log files.
+
+5. Reduce object creation inside the mutex.
+Object creation can be slow because it involves malloc (in our case). Malloc 
sometimes is slow because it needs to lock some shared data structures. 
Allocating can also be slow because we 

[30/51] [partial] nifi-minifi-cpp git commit: MINIFI-372: Replace leveldb with RocksDB

2017-10-09 Thread jeremydyer
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/48867732/thirdparty/rocksdb/cache/lru_cache.h
--
diff --git a/thirdparty/rocksdb/cache/lru_cache.h 
b/thirdparty/rocksdb/cache/lru_cache.h
new file mode 100644
index 000..2fd44bb
--- /dev/null
+++ b/thirdparty/rocksdb/cache/lru_cache.h
@@ -0,0 +1,298 @@
+//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
+//  This source code is licensed under both the GPLv2 (found in the
+//  COPYING file in the root directory) and Apache 2.0 License
+//  (found in the LICENSE.Apache file in the root directory).
+//
+// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+#pragma once
+
+#include 
+
+#include "cache/sharded_cache.h"
+
+#include "port/port.h"
+#include "util/autovector.h"
+
+namespace rocksdb {
+
+// LRU cache implementation
+
+// An entry is a variable length heap-allocated structure.
+// Entries are referenced by cache and/or by any external entity.
+// The cache keeps all its entries in table. Some elements
+// are also stored on LRU list.
+//
+// LRUHandle can be in these states:
+// 1. Referenced externally AND in hash table.
+//  In that case the entry is *not* in the LRU. (refs > 1 && in_cache == true)
+// 2. Not referenced externally and in hash table. In that case the entry is
+// in the LRU and can be freed. (refs == 1 && in_cache == true)
+// 3. Referenced externally and not in hash table. In that case the entry is
+// in not on LRU and not in table. (refs >= 1 && in_cache == false)
+//
+// All newly created LRUHandles are in state 1. If you call
+// LRUCacheShard::Release
+// on entry in state 1, it will go into state 2. To move from state 1 to
+// state 3, either call LRUCacheShard::Erase or LRUCacheShard::Insert with the
+// same key.
+// To move from state 2 to state 1, use LRUCacheShard::Lookup.
+// Before destruction, make sure that no handles are in state 1. This means
+// that any successful LRUCacheShard::Lookup/LRUCacheShard::Insert have a
+// matching
+// RUCache::Release (to move into state 2) or LRUCacheShard::Erase (for state 
3)
+
+struct LRUHandle {
+  void* value;
+  void (*deleter)(const Slice&, void* value);
+  LRUHandle* next_hash;
+  LRUHandle* next;
+  LRUHandle* prev;
+  size_t charge;  // TODO(opt): Only allow uint32_t?
+  size_t key_length;
+  uint32_t refs; // a number of refs to this entry
+ // cache itself is counted as 1
+
+  // Include the following flags:
+  //   in_cache:whether this entry is referenced by the hash table.
+  //   is_high_pri: whether this entry is high priority entry.
+  //   in_high_pro_pool: whether this entry is in high-pri pool.
+  char flags;
+
+  uint32_t hash; // Hash of key(); used for fast sharding and comparisons
+
+  char key_data[1];  // Beginning of key
+
+  Slice key() const {
+// For cheaper lookups, we allow a temporary Handle object
+// to store a pointer to a key in "value".
+if (next == this) {
+  return *(reinterpret_cast(value));
+} else {
+  return Slice(key_data, key_length);
+}
+  }
+
+  bool InCache() { return flags & 1; }
+  bool IsHighPri() { return flags & 2; }
+  bool InHighPriPool() { return flags & 4; }
+
+  void SetInCache(bool in_cache) {
+if (in_cache) {
+  flags |= 1;
+} else {
+  flags &= ~1;
+}
+  }
+
+  void SetPriority(Cache::Priority priority) {
+if (priority == Cache::Priority::HIGH) {
+  flags |= 2;
+} else {
+  flags &= ~2;
+}
+  }
+
+  void SetInHighPriPool(bool in_high_pri_pool) {
+if (in_high_pri_pool) {
+  flags |= 4;
+} else {
+  flags &= ~4;
+}
+  }
+
+  void Free() {
+assert((refs == 1 && InCache()) || (refs == 0 && !InCache()));
+if (deleter) {
+  (*deleter)(key(), value);
+}
+delete[] reinterpret_cast(this);
+  }
+};
+
+// We provide our own simple hash table since it removes a whole bunch
+// of porting hacks and is also faster than some of the built-in hash
+// table implementations in some of the compiler/runtime combinations
+// we have tested.  E.g., readrandom speeds up by ~5% over the g++
+// 4.4.3's builtin hashtable.
+class LRUHandleTable {
+ public:
+  LRUHandleTable();
+  ~LRUHandleTable();
+
+  LRUHandle* Lookup(const Slice& key, uint32_t hash);
+  LRUHandle* Insert(LRUHandle* h);
+  LRUHandle* Remove(const Slice& key, uint32_t hash);
+
+  template 
+  void ApplyToAllCacheEntries(T func) {
+for (uint32_t i = 0; i < length_; i++) {
+  LRUHandle* h = list_[i];
+  while (h != nullptr) {
+auto n = h->next_hash;
+assert(h->InCache());
+func(h);
+h = n;
+  }
+}
+  }
+
+ private:
+  // Return a pointer to slot that points to a cache entry that
+  // matches key/hash.  If there is 

[16/51] [partial] nifi-minifi-cpp git commit: MINIFI-372: Replace leveldb with RocksDB

2017-10-09 Thread jeremydyer
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/48867732/thirdparty/rocksdb/db/dbformat.h
--
diff --git a/thirdparty/rocksdb/db/dbformat.h b/thirdparty/rocksdb/db/dbformat.h
new file mode 100644
index 000..d9fd5f3
--- /dev/null
+++ b/thirdparty/rocksdb/db/dbformat.h
@@ -0,0 +1,596 @@
+//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
+//  This source code is licensed under both the GPLv2 (found in the
+//  COPYING file in the root directory) and Apache 2.0 License
+//  (found in the LICENSE.Apache file in the root directory).
+//
+// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#pragma once
+#include 
+#include 
+#include 
+#include "rocksdb/comparator.h"
+#include "rocksdb/db.h"
+#include "rocksdb/filter_policy.h"
+#include "rocksdb/slice.h"
+#include "rocksdb/slice_transform.h"
+#include "rocksdb/table.h"
+#include "rocksdb/types.h"
+#include "util/coding.h"
+#include "util/logging.h"
+
+namespace rocksdb {
+
+class InternalKey;
+
+// Value types encoded as the last component of internal keys.
+// DO NOT CHANGE THESE ENUM VALUES: they are embedded in the on-disk
+// data structures.
+// The highest bit of the value type needs to be reserved to SST tables
+// for them to do more flexible encoding.
+enum ValueType : unsigned char {
+  kTypeDeletion = 0x0,
+  kTypeValue = 0x1,
+  kTypeMerge = 0x2,
+  kTypeLogData = 0x3,   // WAL only.
+  kTypeColumnFamilyDeletion = 0x4,  // WAL only.
+  kTypeColumnFamilyValue = 0x5, // WAL only.
+  kTypeColumnFamilyMerge = 0x6, // WAL only.
+  kTypeSingleDeletion = 0x7,
+  kTypeColumnFamilySingleDeletion = 0x8,  // WAL only.
+  kTypeBeginPrepareXID = 0x9, // WAL only.
+  kTypeEndPrepareXID = 0xA,   // WAL only.
+  kTypeCommitXID = 0xB,   // WAL only.
+  kTypeRollbackXID = 0xC, // WAL only.
+  kTypeNoop = 0xD,// WAL only.
+  kTypeColumnFamilyRangeDeletion = 0xE,   // WAL only.
+  kTypeRangeDeletion = 0xF,   // meta block
+  kMaxValue = 0x7F// Not used for storing records.
+};
+
+// Defined in dbformat.cc
+extern const ValueType kValueTypeForSeek;
+extern const ValueType kValueTypeForSeekForPrev;
+
+// Checks whether a type is an inline value type
+// (i.e. a type used in memtable skiplist and sst file datablock).
+inline bool IsValueType(ValueType t) {
+  return t <= kTypeMerge || t == kTypeSingleDeletion;
+}
+
+// Checks whether a type is from user operation
+// kTypeRangeDeletion is in meta block so this API is separated from above
+inline bool IsExtendedValueType(ValueType t) {
+  return IsValueType(t) || t == kTypeRangeDeletion;
+}
+
+// We leave eight bits empty at the bottom so a type and sequence#
+// can be packed together into 64-bits.
+static const SequenceNumber kMaxSequenceNumber =
+((0x1ull << 56) - 1);
+
+static const SequenceNumber kDisableGlobalSequenceNumber = port::kMaxUint64;
+
+struct ParsedInternalKey {
+  Slice user_key;
+  SequenceNumber sequence;
+  ValueType type;
+
+  ParsedInternalKey()
+  : sequence(kMaxSequenceNumber)  // Make code analyzer happy
+  {}  // Intentionally left uninitialized (for speed)
+  ParsedInternalKey(const Slice& u, const SequenceNumber& seq, ValueType t)
+  : user_key(u), sequence(seq), type(t) { }
+  std::string DebugString(bool hex = false) const;
+
+  void clear() {
+user_key.clear();
+sequence = 0;
+type = kTypeDeletion;
+  }
+};
+
+// Return the length of the encoding of "key".
+inline size_t InternalKeyEncodingLength(const ParsedInternalKey& key) {
+  return key.user_key.size() + 8;
+}
+
+// Pack a sequence number and a ValueType into a uint64_t
+extern uint64_t PackSequenceAndType(uint64_t seq, ValueType t);
+
+// Given the result of PackSequenceAndType, store the sequence number in *seq
+// and the ValueType in *t.
+extern void UnPackSequenceAndType(uint64_t packed, uint64_t* seq, ValueType* 
t);
+
+// Append the serialization of "key" to *result.
+extern void AppendInternalKey(std::string* result,
+  const ParsedInternalKey& key);
+// Serialized internal key consists of user key followed by footer.
+// This function appends the footer to *result, assuming that *result already
+// contains the user key at the end.
+extern void AppendInternalKeyFooter(std::string* result, SequenceNumber s,
+ValueType t);
+
+// Attempt to parse an internal key from "internal_key".  On success,
+// stores the parsed data in "*result", and returns true.
+//
+// On error, returns false, leaves "*result" in an undefined state.
+extern bool ParseInternalKey(const Slice& internal_key,
+ ParsedInternalKey* result);
+
+// 

[46/51] [partial] nifi-minifi-cpp git commit: MINIFI-372: Replace leveldb with RocksDB

2017-10-09 Thread jeremydyer
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/48867732/thirdparty/leveldb-1.18/db/db_impl.h
--
diff --git a/thirdparty/leveldb-1.18/db/db_impl.h 
b/thirdparty/leveldb-1.18/db/db_impl.h
deleted file mode 100755
index cfc9981..000
--- a/thirdparty/leveldb-1.18/db/db_impl.h
+++ /dev/null
@@ -1,211 +0,0 @@
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#ifndef STORAGE_LEVELDB_DB_DB_IMPL_H_
-#define STORAGE_LEVELDB_DB_DB_IMPL_H_
-
-#include 
-#include 
-#include "db/dbformat.h"
-#include "db/log_writer.h"
-#include "db/snapshot.h"
-#include "leveldb/db.h"
-#include "leveldb/env.h"
-#include "port/port.h"
-#include "port/thread_annotations.h"
-
-namespace leveldb {
-
-class MemTable;
-class TableCache;
-class Version;
-class VersionEdit;
-class VersionSet;
-
-class DBImpl : public DB {
- public:
-  DBImpl(const Options& options, const std::string& dbname);
-  virtual ~DBImpl();
-
-  // Implementations of the DB interface
-  virtual Status Put(const WriteOptions&, const Slice& key, const Slice& 
value);
-  virtual Status Delete(const WriteOptions&, const Slice& key);
-  virtual Status Write(const WriteOptions& options, WriteBatch* updates);
-  virtual Status Get(const ReadOptions& options,
- const Slice& key,
- std::string* value);
-  virtual Iterator* NewIterator(const ReadOptions&);
-  virtual const Snapshot* GetSnapshot();
-  virtual void ReleaseSnapshot(const Snapshot* snapshot);
-  virtual bool GetProperty(const Slice& property, std::string* value);
-  virtual void GetApproximateSizes(const Range* range, int n, uint64_t* sizes);
-  virtual void CompactRange(const Slice* begin, const Slice* end);
-
-  // Extra methods (for testing) that are not in the public DB interface
-
-  // Compact any files in the named level that overlap [*begin,*end]
-  void TEST_CompactRange(int level, const Slice* begin, const Slice* end);
-
-  // Force current memtable contents to be compacted.
-  Status TEST_CompactMemTable();
-
-  // Return an internal iterator over the current state of the database.
-  // The keys of this iterator are internal keys (see format.h).
-  // The returned iterator should be deleted when no longer needed.
-  Iterator* TEST_NewInternalIterator();
-
-  // Return the maximum overlapping data (in bytes) at next level for any
-  // file at a level >= 1.
-  int64_t TEST_MaxNextLevelOverlappingBytes();
-
-  // Record a sample of bytes read at the specified internal key.
-  // Samples are taken approximately once every config::kReadBytesPeriod
-  // bytes.
-  void RecordReadSample(Slice key);
-
- private:
-  friend class DB;
-  struct CompactionState;
-  struct Writer;
-
-  Iterator* NewInternalIterator(const ReadOptions&,
-SequenceNumber* latest_snapshot,
-uint32_t* seed);
-
-  Status NewDB();
-
-  // Recover the descriptor from persistent storage.  May do a significant
-  // amount of work to recover recently logged updates.  Any changes to
-  // be made to the descriptor are added to *edit.
-  Status Recover(VersionEdit* edit) EXCLUSIVE_LOCKS_REQUIRED(mutex_);
-
-  void MaybeIgnoreError(Status* s) const;
-
-  // Delete any unneeded files and stale in-memory entries.
-  void DeleteObsoleteFiles();
-
-  // Compact the in-memory write buffer to disk.  Switches to a new
-  // log-file/memtable and writes a new descriptor iff successful.
-  // Errors are recorded in bg_error_.
-  void CompactMemTable() EXCLUSIVE_LOCKS_REQUIRED(mutex_);
-
-  Status RecoverLogFile(uint64_t log_number,
-VersionEdit* edit,
-SequenceNumber* max_sequence)
-  EXCLUSIVE_LOCKS_REQUIRED(mutex_);
-
-  Status WriteLevel0Table(MemTable* mem, VersionEdit* edit, Version* base)
-  EXCLUSIVE_LOCKS_REQUIRED(mutex_);
-
-  Status MakeRoomForWrite(bool force /* compact even if there is room? */)
-  EXCLUSIVE_LOCKS_REQUIRED(mutex_);
-  WriteBatch* BuildBatchGroup(Writer** last_writer);
-
-  void RecordBackgroundError(const Status& s);
-
-  void MaybeScheduleCompaction() EXCLUSIVE_LOCKS_REQUIRED(mutex_);
-  static void BGWork(void* db);
-  void BackgroundCall();
-  void  BackgroundCompaction() EXCLUSIVE_LOCKS_REQUIRED(mutex_);
-  void CleanupCompaction(CompactionState* compact)
-  EXCLUSIVE_LOCKS_REQUIRED(mutex_);
-  Status DoCompactionWork(CompactionState* compact)
-  EXCLUSIVE_LOCKS_REQUIRED(mutex_);
-
-  Status OpenCompactionOutputFile(CompactionState* compact);
-  Status FinishCompactionOutputFile(CompactionState* compact, Iterator* input);
-  Status InstallCompactionResults(CompactionState* compact)
-  EXCLUSIVE_LOCKS_REQUIRED(mutex_);
-
-  // Constant after construction
-  Env* const env_;
-  const 

[14/51] [partial] nifi-minifi-cpp git commit: MINIFI-372: Replace leveldb with RocksDB

2017-10-09 Thread jeremydyer
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/48867732/thirdparty/rocksdb/db/internal_stats.cc
--
diff --git a/thirdparty/rocksdb/db/internal_stats.cc 
b/thirdparty/rocksdb/db/internal_stats.cc
new file mode 100644
index 000..54723ea
--- /dev/null
+++ b/thirdparty/rocksdb/db/internal_stats.cc
@@ -0,0 +1,1174 @@
+//  This source code is licensed under both the GPLv2 (found in the
+//  COPYING file in the root directory) and Apache 2.0 License
+//  (found in the LICENSE.Apache file in the root directory).
+//
+// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include "db/internal_stats.h"
+
+#ifndef __STDC_FORMAT_MACROS
+#define __STDC_FORMAT_MACROS
+#endif
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include "db/column_family.h"
+
+#include "db/db_impl.h"
+#include "util/string_util.h"
+
+namespace rocksdb {
+
+#ifndef ROCKSDB_LITE
+
+const std::map InternalStats::compaction_level_stats 
=
+{
+{LevelStatType::NUM_FILES, LevelStat{"NumFiles", "Files"}},
+{LevelStatType::COMPACTED_FILES,
+ LevelStat{"CompactedFiles", "CompactedFiles"}},
+{LevelStatType::SIZE_BYTES, LevelStat{"SizeBytes", "Size"}},
+{LevelStatType::SCORE, LevelStat{"Score", "Score"}},
+{LevelStatType::READ_GB, LevelStat{"ReadGB", "Read(GB)"}},
+{LevelStatType::RN_GB, LevelStat{"RnGB", "Rn(GB)"}},
+{LevelStatType::RNP1_GB, LevelStat{"Rnp1GB", "Rnp1(GB)"}},
+{LevelStatType::WRITE_GB, LevelStat{"WriteGB", "Write(GB)"}},
+{LevelStatType::W_NEW_GB, LevelStat{"WnewGB", "Wnew(GB)"}},
+{LevelStatType::MOVED_GB, LevelStat{"MovedGB", "Moved(GB)"}},
+{LevelStatType::WRITE_AMP, LevelStat{"WriteAmp", "W-Amp"}},
+{LevelStatType::READ_MBPS, LevelStat{"ReadMBps", "Rd(MB/s)"}},
+{LevelStatType::WRITE_MBPS, LevelStat{"WriteMBps", "Wr(MB/s)"}},
+{LevelStatType::COMP_SEC, LevelStat{"CompSec", "Comp(sec)"}},
+{LevelStatType::COMP_COUNT, LevelStat{"CompCount", "Comp(cnt)"}},
+{LevelStatType::AVG_SEC, LevelStat{"AvgSec", "Avg(sec)"}},
+{LevelStatType::KEY_IN, LevelStat{"KeyIn", "KeyIn"}},
+{LevelStatType::KEY_DROP, LevelStat{"KeyDrop", "KeyDrop"}},
+};
+
+namespace {
+const double kMB = 1048576.0;
+const double kGB = kMB * 1024;
+const double kMicrosInSec = 100.0;
+
+void PrintLevelStatsHeader(char* buf, size_t len, const std::string& cf_name) {
+  int written_size =
+  snprintf(buf, len, "\n** Compaction Stats [%s] **\n", cf_name.c_str());
+  auto hdr = [](LevelStatType t) {
+return InternalStats::compaction_level_stats.at(t).header_name.c_str();
+  };
+  int line_size = snprintf(
+  buf + written_size, len - written_size,
+  "Level%s   %s %s %s  %s %s %s %s %s %s %s %s %s %s %s %s %s\n",
+  // Note that we skip COMPACTED_FILES and merge it with Files column
+  hdr(LevelStatType::NUM_FILES), hdr(LevelStatType::SIZE_BYTES),
+  hdr(LevelStatType::SCORE), hdr(LevelStatType::READ_GB),
+  hdr(LevelStatType::RN_GB), hdr(LevelStatType::RNP1_GB),
+  hdr(LevelStatType::WRITE_GB), hdr(LevelStatType::W_NEW_GB),
+  hdr(LevelStatType::MOVED_GB), hdr(LevelStatType::WRITE_AMP),
+  hdr(LevelStatType::READ_MBPS), hdr(LevelStatType::WRITE_MBPS),
+  hdr(LevelStatType::COMP_SEC), hdr(LevelStatType::COMP_COUNT),
+  hdr(LevelStatType::AVG_SEC), hdr(LevelStatType::KEY_IN),
+  hdr(LevelStatType::KEY_DROP));
+
+  written_size += line_size;
+  snprintf(buf + written_size, len - written_size, "%s\n",
+   std::string(line_size, '-').c_str());
+}
+
+void PrepareLevelStats(std::map* level_stats,
+   int num_files, int being_compacted,
+   double total_file_size, double score, double w_amp,
+   const InternalStats::CompactionStats& stats) {
+  uint64_t bytes_read =
+  stats.bytes_read_non_output_levels + stats.bytes_read_output_level;
+  int64_t bytes_new =
+  stats.bytes_written - stats.bytes_read_output_level;
+  double elapsed = (stats.micros + 1) / kMicrosInSec;
+
+  (*level_stats)[LevelStatType::NUM_FILES] = num_files;
+  (*level_stats)[LevelStatType::COMPACTED_FILES] = being_compacted;
+  (*level_stats)[LevelStatType::SIZE_BYTES] = total_file_size;
+  (*level_stats)[LevelStatType::SCORE] = score;
+  (*level_stats)[LevelStatType::READ_GB] = bytes_read / kGB;
+  (*level_stats)[LevelStatType::RN_GB] =
+  stats.bytes_read_non_output_levels / kGB;
+  (*level_stats)[LevelStatType::RNP1_GB] = stats.bytes_read_output_level / kGB;
+  (*level_stats)[LevelStatType::WRITE_GB] = stats.bytes_written / kGB;
+  (*level_stats)[LevelStatType::W_NEW_GB] = bytes_new / kGB;
+  

[24/51] [partial] nifi-minifi-cpp git commit: MINIFI-372: Replace leveldb with RocksDB

2017-10-09 Thread jeremydyer
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/48867732/thirdparty/rocksdb/db/compaction_picker_universal.cc
--
diff --git a/thirdparty/rocksdb/db/compaction_picker_universal.cc 
b/thirdparty/rocksdb/db/compaction_picker_universal.cc
new file mode 100644
index 000..ce48026
--- /dev/null
+++ b/thirdparty/rocksdb/db/compaction_picker_universal.cc
@@ -0,0 +1,747 @@
+//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
+//  This source code is licensed under both the GPLv2 (found in the
+//  COPYING file in the root directory) and Apache 2.0 License
+//  (found in the LICENSE.Apache file in the root directory).
+//
+// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include "db/compaction_picker_universal.h"
+#ifndef ROCKSDB_LITE
+
+#ifndef __STDC_FORMAT_MACROS
+#define __STDC_FORMAT_MACROS
+#endif
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include "db/column_family.h"
+#include "monitoring/statistics.h"
+#include "util/filename.h"
+#include "util/log_buffer.h"
+#include "util/random.h"
+#include "util/string_util.h"
+#include "util/sync_point.h"
+
+namespace rocksdb {
+namespace {
+// Used in universal compaction when trivial move is enabled.
+// This structure is used for the construction of min heap
+// that contains the file meta data, the level of the file
+// and the index of the file in that level
+
+struct InputFileInfo {
+  InputFileInfo() : f(nullptr) {}
+
+  FileMetaData* f;
+  size_t level;
+  size_t index;
+};
+
+// Used in universal compaction when trivial move is enabled.
+// This comparator is used for the construction of min heap
+// based on the smallest key of the file.
+struct SmallestKeyHeapComparator {
+  explicit SmallestKeyHeapComparator(const Comparator* ucmp) { ucmp_ = ucmp; }
+
+  bool operator()(InputFileInfo i1, InputFileInfo i2) const {
+return (ucmp_->Compare(i1.f->smallest.user_key(),
+   i2.f->smallest.user_key()) > 0);
+  }
+
+ private:
+  const Comparator* ucmp_;
+};
+
+typedef std::priority_queue
+SmallestKeyHeap;
+
+// This function creates the heap that is used to find if the files are
+// overlapping during universal compaction when the allow_trivial_move
+// is set.
+SmallestKeyHeap create_level_heap(Compaction* c, const Comparator* ucmp) {
+  SmallestKeyHeap smallest_key_priority_q =
+  SmallestKeyHeap(SmallestKeyHeapComparator(ucmp));
+
+  InputFileInfo input_file;
+
+  for (size_t l = 0; l < c->num_input_levels(); l++) {
+if (c->num_input_files(l) != 0) {
+  if (l == 0 && c->start_level() == 0) {
+for (size_t i = 0; i < c->num_input_files(0); i++) {
+  input_file.f = c->input(0, i);
+  input_file.level = 0;
+  input_file.index = i;
+  smallest_key_priority_q.push(std::move(input_file));
+}
+  } else {
+input_file.f = c->input(l, 0);
+input_file.level = l;
+input_file.index = 0;
+smallest_key_priority_q.push(std::move(input_file));
+  }
+}
+  }
+  return smallest_key_priority_q;
+}
+
+#ifndef NDEBUG
+// smallest_seqno and largest_seqno are set iff. `files` is not empty.
+void GetSmallestLargestSeqno(const std::vector& files,
+ SequenceNumber* smallest_seqno,
+ SequenceNumber* largest_seqno) {
+  bool is_first = true;
+  for (FileMetaData* f : files) {
+assert(f->smallest_seqno <= f->largest_seqno);
+if (is_first) {
+  is_first = false;
+  *smallest_seqno = f->smallest_seqno;
+  *largest_seqno = f->largest_seqno;
+} else {
+  if (f->smallest_seqno < *smallest_seqno) {
+*smallest_seqno = f->smallest_seqno;
+  }
+  if (f->largest_seqno > *largest_seqno) {
+*largest_seqno = f->largest_seqno;
+  }
+}
+  }
+}
+#endif
+}  // namespace
+
+// Algorithm that checks to see if there are any overlapping
+// files in the input
+bool UniversalCompactionPicker::IsInputFilesNonOverlapping(Compaction* c) {
+  auto comparator = icmp_->user_comparator();
+  int first_iter = 1;
+
+  InputFileInfo prev, curr, next;
+
+  SmallestKeyHeap smallest_key_priority_q =
+  create_level_heap(c, icmp_->user_comparator());
+
+  while (!smallest_key_priority_q.empty()) {
+curr = smallest_key_priority_q.top();
+smallest_key_priority_q.pop();
+
+if (first_iter) {
+  prev = curr;
+  first_iter = 0;
+} else {
+  if (comparator->Compare(prev.f->largest.user_key(),
+  curr.f->smallest.user_key()) >= 0) {
+// found overlapping files, return false
+return false;
+  }
+  

[25/51] [partial] nifi-minifi-cpp git commit: MINIFI-372: Replace leveldb with RocksDB

2017-10-09 Thread jeremydyer
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/48867732/thirdparty/rocksdb/db/compaction_picker.cc
--
diff --git a/thirdparty/rocksdb/db/compaction_picker.cc 
b/thirdparty/rocksdb/db/compaction_picker.cc
new file mode 100644
index 000..c6a5674
--- /dev/null
+++ b/thirdparty/rocksdb/db/compaction_picker.cc
@@ -0,0 +1,1591 @@
+//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
+//  This source code is licensed under both the GPLv2 (found in the
+//  COPYING file in the root directory) and Apache 2.0 License
+//  (found in the LICENSE.Apache file in the root directory).
+//
+// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include "db/compaction_picker.h"
+
+#ifndef __STDC_FORMAT_MACROS
+#define __STDC_FORMAT_MACROS
+#endif
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include "db/column_family.h"
+#include "monitoring/statistics.h"
+#include "util/filename.h"
+#include "util/log_buffer.h"
+#include "util/random.h"
+#include "util/string_util.h"
+#include "util/sync_point.h"
+
+namespace rocksdb {
+
+namespace {
+uint64_t TotalCompensatedFileSize(const std::vector& files) {
+  uint64_t sum = 0;
+  for (size_t i = 0; i < files.size() && files[i]; i++) {
+sum += files[i]->compensated_file_size;
+  }
+  return sum;
+}
+
+bool FindIntraL0Compaction(const std::vector& level_files,
+   size_t min_files_to_compact,
+   uint64_t max_compact_bytes_per_del_file,
+   CompactionInputFiles* comp_inputs) {
+  size_t compact_bytes = level_files[0]->fd.file_size;
+  size_t compact_bytes_per_del_file = port::kMaxSizet;
+  // compaction range will be [0, span_len).
+  size_t span_len;
+  // pull in files until the amount of compaction work per deleted file begins
+  // increasing.
+  size_t new_compact_bytes_per_del_file = 0;
+  for (span_len = 1; span_len < level_files.size(); ++span_len) {
+compact_bytes += level_files[span_len]->fd.file_size;
+new_compact_bytes_per_del_file = compact_bytes / span_len;
+if (level_files[span_len]->being_compacted ||
+new_compact_bytes_per_del_file > compact_bytes_per_del_file) {
+  break;
+}
+compact_bytes_per_del_file = new_compact_bytes_per_del_file;
+  }
+
+  if (span_len >= min_files_to_compact &&
+  new_compact_bytes_per_del_file < max_compact_bytes_per_del_file) {
+assert(comp_inputs != nullptr);
+comp_inputs->level = 0;
+for (size_t i = 0; i < span_len; ++i) {
+  comp_inputs->files.push_back(level_files[i]);
+}
+return true;
+  }
+  return false;
+}
+}  // anonymous namespace
+
+// Determine compression type, based on user options, level of the output
+// file and whether compression is disabled.
+// If enable_compression is false, then compression is always disabled no
+// matter what the values of the other two parameters are.
+// Otherwise, the compression type is determined based on options and level.
+CompressionType GetCompressionType(const ImmutableCFOptions& ioptions,
+   const VersionStorageInfo* vstorage,
+   const MutableCFOptions& mutable_cf_options,
+   int level, int base_level,
+   const bool enable_compression) {
+  if (!enable_compression) {
+// disable compression
+return kNoCompression;
+  }
+
+  // If bottommost_compression is set and we are compacting to the
+  // bottommost level then we should use it.
+  if (ioptions.bottommost_compression != kDisableCompressionOption &&
+  level > base_level && level >= (vstorage->num_non_empty_levels() - 1)) {
+return ioptions.bottommost_compression;
+  }
+  // If the user has specified a different compression level for each level,
+  // then pick the compression for that level.
+  if (!ioptions.compression_per_level.empty()) {
+assert(level == 0 || level >= base_level);
+int idx = (level == 0) ? 0 : level - base_level + 1;
+
+const int n = static_cast(ioptions.compression_per_level.size()) - 1;
+// It is possible for level_ to be -1; in that case, we use level
+// 0's compression.  This occurs mostly in backwards compatibility
+// situations when the builder doesn't know what level the file
+// belongs to.  Likewise, if level is beyond the end of the
+// specified compression levels, use the last value.
+return ioptions.compression_per_level[std::max(0, std::min(idx, n))];
+  } else {
+return mutable_cf_options.compression;
+  }
+}
+
+CompactionPicker::CompactionPicker(const ImmutableCFOptions& ioptions,
+   const InternalKeyComparator* icmp)
+: 

[18/51] [partial] nifi-minifi-cpp git commit: MINIFI-372: Replace leveldb with RocksDB

2017-10-09 Thread jeremydyer
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/48867732/thirdparty/rocksdb/db/db_iter.cc
--
diff --git a/thirdparty/rocksdb/db/db_iter.cc b/thirdparty/rocksdb/db/db_iter.cc
new file mode 100644
index 000..801b110
--- /dev/null
+++ b/thirdparty/rocksdb/db/db_iter.cc
@@ -0,0 +1,1256 @@
+//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
+//  This source code is licensed under both the GPLv2 (found in the
+//  COPYING file in the root directory) and Apache 2.0 License
+//  (found in the LICENSE.Apache file in the root directory).
+//
+// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include "db/db_iter.h"
+#include 
+#include 
+#include 
+#include 
+
+#include "db/dbformat.h"
+#include "db/merge_context.h"
+#include "db/merge_helper.h"
+#include "db/pinned_iterators_manager.h"
+#include "monitoring/perf_context_imp.h"
+#include "port/port.h"
+#include "rocksdb/env.h"
+#include "rocksdb/iterator.h"
+#include "rocksdb/merge_operator.h"
+#include "rocksdb/options.h"
+#include "table/internal_iterator.h"
+#include "util/arena.h"
+#include "util/filename.h"
+#include "util/logging.h"
+#include "util/mutexlock.h"
+#include "util/string_util.h"
+
+namespace rocksdb {
+
+#if 0
+static void DumpInternalIter(Iterator* iter) {
+  for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
+ParsedInternalKey k;
+if (!ParseInternalKey(iter->key(), )) {
+  fprintf(stderr, "Corrupt '%s'\n", EscapeString(iter->key()).c_str());
+} else {
+  fprintf(stderr, "@ '%s'\n", k.DebugString().c_str());
+}
+  }
+}
+#endif
+
+// Memtables and sstables that make the DB representation contain
+// (userkey,seq,type) => uservalue entries.  DBIter
+// combines multiple entries for the same userkey found in the DB
+// representation into a single entry while accounting for sequence
+// numbers, deletion markers, overwrites, etc.
+class DBIter: public Iterator {
+ public:
+  // The following is grossly complicated. TODO: clean it up
+  // Which direction is the iterator currently moving?
+  // (1) When moving forward, the internal iterator is positioned at
+  // the exact entry that yields this->key(), this->value()
+  // (2) When moving backwards, the internal iterator is positioned
+  // just before all entries whose user key == this->key().
+  enum Direction {
+kForward,
+kReverse
+  };
+
+  // LocalStatistics contain Statistics counters that will be aggregated per
+  // each iterator instance and then will be sent to the global statistics when
+  // the iterator is destroyed.
+  //
+  // The purpose of this approach is to avoid perf regression happening
+  // when multiple threads bump the atomic counters from a DBIter::Next().
+  struct LocalStatistics {
+explicit LocalStatistics() { ResetCounters(); }
+
+void ResetCounters() {
+  next_count_ = 0;
+  next_found_count_ = 0;
+  prev_count_ = 0;
+  prev_found_count_ = 0;
+  bytes_read_ = 0;
+}
+
+void BumpGlobalStatistics(Statistics* global_statistics) {
+  RecordTick(global_statistics, NUMBER_DB_NEXT, next_count_);
+  RecordTick(global_statistics, NUMBER_DB_NEXT_FOUND, next_found_count_);
+  RecordTick(global_statistics, NUMBER_DB_PREV, prev_count_);
+  RecordTick(global_statistics, NUMBER_DB_PREV_FOUND, prev_found_count_);
+  RecordTick(global_statistics, ITER_BYTES_READ, bytes_read_);
+  ResetCounters();
+}
+
+// Map to Tickers::NUMBER_DB_NEXT
+uint64_t next_count_;
+// Map to Tickers::NUMBER_DB_NEXT_FOUND
+uint64_t next_found_count_;
+// Map to Tickers::NUMBER_DB_PREV
+uint64_t prev_count_;
+// Map to Tickers::NUMBER_DB_PREV_FOUND
+uint64_t prev_found_count_;
+// Map to Tickers::ITER_BYTES_READ
+uint64_t bytes_read_;
+  };
+
+  DBIter(Env* _env, const ReadOptions& read_options,
+ const ImmutableCFOptions& cf_options, const Comparator* cmp,
+ InternalIterator* iter, SequenceNumber s, bool arena_mode,
+ uint64_t max_sequential_skip_in_iterations)
+  : arena_mode_(arena_mode),
+env_(_env),
+logger_(cf_options.info_log),
+user_comparator_(cmp),
+merge_operator_(cf_options.merge_operator),
+iter_(iter),
+sequence_(s),
+direction_(kForward),
+valid_(false),
+current_entry_is_merged_(false),
+statistics_(cf_options.statistics),
+iterate_upper_bound_(read_options.iterate_upper_bound),
+prefix_same_as_start_(read_options.prefix_same_as_start),
+pin_thru_lifetime_(read_options.pin_data),
+total_order_seek_(read_options.total_order_seek),
+range_del_agg_(cf_options.internal_comparator, s,
+   true /* collapse_deletions */) 

[48/51] [partial] nifi-minifi-cpp git commit: MINIFI-372: Replace leveldb with RocksDB

2017-10-09 Thread jeremydyer
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/48867732/libminifi/test/unit/ProvenanceTests.cpp
--
diff --git a/libminifi/test/unit/ProvenanceTests.cpp 
b/libminifi/test/unit/ProvenanceTests.cpp
deleted file mode 100644
index 97cb646..000
--- a/libminifi/test/unit/ProvenanceTests.cpp
+++ /dev/null
@@ -1,164 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define CATCH_CONFIG_MAIN  // This tells Catch to provide a main() - only do 
this in one cpp file
-#include "../TestBase.h"
-#include 
-#include 
-#include 
-#include 
-#include "ProvenanceTestHelper.h"
-#include "provenance/Provenance.h"
-#include "FlowFileRecord.h"
-#include "core/Core.h"
-#include "core/repository/AtomicRepoEntries.h"
-#include "core/repository/VolatileProvenanceRepository.h"
-
-TEST_CASE("Test Provenance record create", 
"[Testprovenance::ProvenanceEventRecord]") {
-  provenance::ProvenanceEventRecord 
record1(provenance::ProvenanceEventRecord::ProvenanceEventType::CREATE, "blah", 
"blahblah");
-  REQUIRE(record1.getAttributes().size() == 0);
-  REQUIRE(record1.getAlternateIdentifierUri().length() == 0);
-}
-
-TEST_CASE("Test Provenance record serialization", 
"[Testprovenance::ProvenanceEventRecordSerializeDeser]") {
-  provenance::ProvenanceEventRecord 
record1(provenance::ProvenanceEventRecord::ProvenanceEventType::CREATE, 
"componentid", "componenttype");
-
-  std::string eventId = record1.getEventId();
-
-  std::string smileyface = ":)";
-  record1.setDetails(smileyface);
-
-  uint64_t sample = 6;
-  std::shared_ptr testRepository = 
std::make_shared();
-  record1.setEventDuration(sample);
-
-  record1.Serialize(testRepository);
-  provenance::ProvenanceEventRecord record2;
-  record2.setEventId(eventId);
-  REQUIRE(record2.DeSerialize(testRepository) == true);
-  REQUIRE(record2.getEventId() == record1.getEventId());
-  REQUIRE(record2.getComponentId() == record1.getComponentId());
-  REQUIRE(record2.getComponentType() == record1.getComponentType());
-  REQUIRE(record2.getDetails() == record1.getDetails());
-  REQUIRE(record2.getDetails() == smileyface);
-  REQUIRE(record2.getEventDuration() == sample);
-}
-
-TEST_CASE("Test Flowfile record added to provenance", "[TestFlowAndProv1]") {
-  provenance::ProvenanceEventRecord 
record1(provenance::ProvenanceEventRecord::ProvenanceEventType::CLONE, 
"componentid", "componenttype");
-  std::shared_ptr content_repo = 
std::make_shared();
-  std::string eventId = record1.getEventId();
-  std::map attributes;
-  attributes.insert(std::pair("potato", "potatoe"));
-  attributes.insert(std::pair("tomato", "tomatoe"));
-  std::shared_ptr frepo = 
std::make_shared("ff", 
"./content_repository", 0, 0, 0);
-  std::shared_ptr ffr1 = 
std::make_shared(frepo, content_repo, attributes);
-
-  record1.addChildFlowFile(ffr1);
-
-  uint64_t sample = 6;
-  std::shared_ptr testRepository = 
std::make_shared();
-  record1.setEventDuration(sample);
-
-  record1.Serialize(testRepository);
-  provenance::ProvenanceEventRecord record2;
-  record2.setEventId(eventId);
-  REQUIRE(record2.DeSerialize(testRepository) == true);
-  REQUIRE(record1.getChildrenUuids().size() == 1);
-  REQUIRE(record2.getChildrenUuids().size() == 1);
-  std::string childId = record2.getChildrenUuids().at(0);
-  REQUIRE(childId == ffr1->getUUIDStr());
-  record2.removeChildUuid(childId);
-  REQUIRE(record2.getChildrenUuids().size() == 0);
-}
-
-TEST_CASE("Test Provenance record serialization Volatile", 
"[Testprovenance::ProvenanceEventRecordSerializeDeser]") {
-  provenance::ProvenanceEventRecord 
record1(provenance::ProvenanceEventRecord::ProvenanceEventType::CREATE, 
"componentid", "componenttype");
-
-  std::string eventId = record1.getEventId();
-
-  std::string smileyface = ":)";
-  record1.setDetails(smileyface);
-
-  uint64_t sample = 6;
-
-  std::shared_ptr testRepository = 
std::make_shared();
-  testRepository->initialize(0);
-  record1.setEventDuration(sample);
-
-  record1.Serialize(testRepository);
-  provenance::ProvenanceEventRecord record2;
-  record2.setEventId(eventId);
-  

[23/51] [partial] nifi-minifi-cpp git commit: MINIFI-372: Replace leveldb with RocksDB

2017-10-09 Thread jeremydyer
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/48867732/thirdparty/rocksdb/db/db_impl.cc
--
diff --git a/thirdparty/rocksdb/db/db_impl.cc b/thirdparty/rocksdb/db/db_impl.cc
new file mode 100644
index 000..cdba039
--- /dev/null
+++ b/thirdparty/rocksdb/db/db_impl.cc
@@ -0,0 +1,2824 @@
+//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
+//  This source code is licensed under both the GPLv2 (found in the
+//  COPYING file in the root directory) and Apache 2.0 License
+//  (found in the LICENSE.Apache file in the root directory).
+//
+// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+#include "db/db_impl.h"
+
+#ifndef __STDC_FORMAT_MACROS
+#define __STDC_FORMAT_MACROS
+#endif
+#include 
+#include 
+#ifdef OS_SOLARIS
+#include 
+#endif
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#include "db/builder.h"
+#include "db/compaction_job.h"
+#include "db/db_info_dumper.h"
+#include "db/db_iter.h"
+#include "db/dbformat.h"
+#include "db/event_helpers.h"
+#include "db/external_sst_file_ingestion_job.h"
+#include "db/flush_job.h"
+#include "db/forward_iterator.h"
+#include "db/job_context.h"
+#include "db/log_reader.h"
+#include "db/log_writer.h"
+#include "db/malloc_stats.h"
+#include "db/managed_iterator.h"
+#include "db/memtable.h"
+#include "db/memtable_list.h"
+#include "db/merge_context.h"
+#include "db/merge_helper.h"
+#include "db/range_del_aggregator.h"
+#include "db/table_cache.h"
+#include "db/table_properties_collector.h"
+#include "db/transaction_log_impl.h"
+#include "db/version_set.h"
+#include "db/write_batch_internal.h"
+#include "db/write_callback.h"
+#include "memtable/hash_linklist_rep.h"
+#include "memtable/hash_skiplist_rep.h"
+#include "monitoring/iostats_context_imp.h"
+#include "monitoring/perf_context_imp.h"
+#include "monitoring/thread_status_updater.h"
+#include "monitoring/thread_status_util.h"
+#include "options/cf_options.h"
+#include "options/options_helper.h"
+#include "options/options_parser.h"
+#include "port/likely.h"
+#include "port/port.h"
+#include "rocksdb/cache.h"
+#include "rocksdb/compaction_filter.h"
+#include "rocksdb/convenience.h"
+#include "rocksdb/db.h"
+#include "rocksdb/env.h"
+#include "rocksdb/merge_operator.h"
+#include "rocksdb/statistics.h"
+#include "rocksdb/status.h"
+#include "rocksdb/table.h"
+#include "rocksdb/version.h"
+#include "rocksdb/write_buffer_manager.h"
+#include "table/block.h"
+#include "table/block_based_table_factory.h"
+#include "table/merging_iterator.h"
+#include "table/table_builder.h"
+#include "table/two_level_iterator.h"
+#include "tools/sst_dump_tool_imp.h"
+#include "util/auto_roll_logger.h"
+#include "util/autovector.h"
+#include "util/build_version.h"
+#include "util/coding.h"
+#include "util/compression.h"
+#include "util/crc32c.h"
+#include "util/file_reader_writer.h"
+#include "util/file_util.h"
+#include "util/filename.h"
+#include "util/log_buffer.h"
+#include "util/logging.h"
+#include "util/mutexlock.h"
+#include "util/sst_file_manager_impl.h"
+#include "util/stop_watch.h"
+#include "util/string_util.h"
+#include "util/sync_point.h"
+
+namespace rocksdb {
+const std::string kDefaultColumnFamilyName("default");
+void DumpRocksDBBuildVersion(Logger * log);
+
+CompressionType GetCompressionFlush(
+const ImmutableCFOptions& ioptions,
+const MutableCFOptions& mutable_cf_options) {
+  // Compressing memtable flushes might not help unless the sequential load
+  // optimization is used for leveled compaction. Otherwise the CPU and
+  // latency overhead is not offset by saving much space.
+  if (ioptions.compaction_style == kCompactionStyleUniversal) {
+if (ioptions.compaction_options_universal.compression_size_percent < 0) {
+  return mutable_cf_options.compression;
+} else {
+  return kNoCompression;
+}
+  } else if (!ioptions.compression_per_level.empty()) {
+// For leveled compress when min_level_to_compress != 0.
+return ioptions.compression_per_level[0];
+  } else {
+return mutable_cf_options.compression;
+  }
+}
+
+namespace {
+void DumpSupportInfo(Logger* logger) {
+  ROCKS_LOG_HEADER(logger, "Compression algorithms supported:");
+  ROCKS_LOG_HEADER(logger, "\tSnappy supported: %d", Snappy_Supported());
+  ROCKS_LOG_HEADER(logger, "\tZlib supported: %d", Zlib_Supported());
+  ROCKS_LOG_HEADER(logger, "\tBzip supported: %d", BZip2_Supported());
+  ROCKS_LOG_HEADER(logger, "\tLZ4 supported: %d", LZ4_Supported());
+  ROCKS_LOG_HEADER(logger, "\tZSTD supported: %d", ZSTD_Supported());
+  ROCKS_LOG_HEADER(logger, "Fast CRC32 supported: %d",
+   crc32c::IsFastCrc32Supported());
+}
+
+int64_t kDefaultLowPriThrottledRate = 2 * 1024 * 

[09/51] [partial] nifi-minifi-cpp git commit: MINIFI-372: Replace leveldb with RocksDB

2017-10-09 Thread jeremydyer
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/48867732/thirdparty/rocksdb/db/version_set.cc
--
diff --git a/thirdparty/rocksdb/db/version_set.cc 
b/thirdparty/rocksdb/db/version_set.cc
new file mode 100644
index 000..6b9611a
--- /dev/null
+++ b/thirdparty/rocksdb/db/version_set.cc
@@ -0,0 +1,3801 @@
+//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
+//  This source code is licensed under both the GPLv2 (found in the
+//  COPYING file in the root directory) and Apache 2.0 License
+//  (found in the LICENSE.Apache file in the root directory).
+//
+// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include "db/version_set.h"
+
+#ifndef __STDC_FORMAT_MACROS
+#define __STDC_FORMAT_MACROS
+#endif
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include "db/compaction.h"
+#include "db/internal_stats.h"
+#include "db/log_reader.h"
+#include "db/log_writer.h"
+#include "db/memtable.h"
+#include "db/merge_context.h"
+#include "db/merge_helper.h"
+#include "db/pinned_iterators_manager.h"
+#include "db/table_cache.h"
+#include "db/version_builder.h"
+#include "monitoring/file_read_sample.h"
+#include "monitoring/perf_context_imp.h"
+#include "rocksdb/env.h"
+#include "rocksdb/merge_operator.h"
+#include "rocksdb/write_buffer_manager.h"
+#include "table/format.h"
+#include "table/get_context.h"
+#include "table/internal_iterator.h"
+#include "table/merging_iterator.h"
+#include "table/meta_blocks.h"
+#include "table/plain_table_factory.h"
+#include "table/table_reader.h"
+#include "table/two_level_iterator.h"
+#include "util/coding.h"
+#include "util/file_reader_writer.h"
+#include "util/filename.h"
+#include "util/stop_watch.h"
+#include "util/string_util.h"
+#include "util/sync_point.h"
+
+namespace rocksdb {
+
+namespace {
+
+// Find File in LevelFilesBrief data structure
+// Within an index range defined by left and right
+int FindFileInRange(const InternalKeyComparator& icmp,
+const LevelFilesBrief& file_level,
+const Slice& key,
+uint32_t left,
+uint32_t right) {
+  while (left < right) {
+uint32_t mid = (left + right) / 2;
+const FdWithKeyRange& f = file_level.files[mid];
+if (icmp.InternalKeyComparator::Compare(f.largest_key, key) < 0) {
+  // Key at "mid.largest" is < "target".  Therefore all
+  // files at or before "mid" are uninteresting.
+  left = mid + 1;
+} else {
+  // Key at "mid.largest" is >= "target".  Therefore all files
+  // after "mid" are uninteresting.
+  right = mid;
+}
+  }
+  return right;
+}
+
+// Class to help choose the next file to search for the particular key.
+// Searches and returns files level by level.
+// We can search level-by-level since entries never hop across
+// levels. Therefore we are guaranteed that if we find data
+// in a smaller level, later levels are irrelevant (unless we
+// are MergeInProgress).
+class FilePicker {
+ public:
+  FilePicker(std::vector* files, const Slice& user_key,
+ const Slice& ikey, autovector* file_levels,
+ unsigned int num_levels, FileIndexer* file_indexer,
+ const Comparator* user_comparator,
+ const InternalKeyComparator* internal_comparator)
+  : num_levels_(num_levels),
+curr_level_(static_cast(-1)),
+returned_file_level_(static_cast(-1)),
+hit_file_level_(static_cast(-1)),
+search_left_bound_(0),
+search_right_bound_(FileIndexer::kLevelMaxIndex),
+#ifndef NDEBUG
+files_(files),
+#endif
+level_files_brief_(file_levels),
+is_hit_file_last_in_level_(false),
+user_key_(user_key),
+ikey_(ikey),
+file_indexer_(file_indexer),
+user_comparator_(user_comparator),
+internal_comparator_(internal_comparator) {
+// Setup member variables to search first level.
+search_ended_ = !PrepareNextLevel();
+if (!search_ended_) {
+  // Prefetch Level 0 table data to avoid cache miss if possible.
+  for (unsigned int i = 0; i < (*level_files_brief_)[0].num_files; ++i) {
+auto* r = (*level_files_brief_)[0].files[i].fd.table_reader;
+if (r) {
+  r->Prepare(ikey);
+}
+  }
+}
+  }
+
+  int GetCurrentLevel() const { return curr_level_; }
+
+  FdWithKeyRange* GetNextFile() {
+while (!search_ended_) {  // Loops over different levels.
+  while (curr_index_in_curr_level_ < curr_file_level_->num_files) {
+// Loops over all files in current level.
+FdWithKeyRange* f = 
_file_level_->files[curr_index_in_curr_level_];
+hit_file_level_ = curr_level_;
+is_hit_file_last_in_level_ =
+curr_index_in_curr_level_ == 

[27/51] [partial] nifi-minifi-cpp git commit: MINIFI-372: Replace leveldb with RocksDB

2017-10-09 Thread jeremydyer
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/48867732/thirdparty/rocksdb/db/compaction.cc
--
diff --git a/thirdparty/rocksdb/db/compaction.cc 
b/thirdparty/rocksdb/db/compaction.cc
new file mode 100644
index 000..9ea332d
--- /dev/null
+++ b/thirdparty/rocksdb/db/compaction.cc
@@ -0,0 +1,480 @@
+//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
+//  This source code is licensed under both the GPLv2 (found in the
+//  COPYING file in the root directory) and Apache 2.0 License
+//  (found in the LICENSE.Apache file in the root directory).
+//
+// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include "db/compaction.h"
+
+#ifndef __STDC_FORMAT_MACROS
+#define __STDC_FORMAT_MACROS
+#endif
+
+#include 
+#include 
+
+#include "db/column_family.h"
+#include "rocksdb/compaction_filter.h"
+#include "util/string_util.h"
+#include "util/sync_point.h"
+
+namespace rocksdb {
+
+uint64_t TotalFileSize(const std::vector& files) {
+  uint64_t sum = 0;
+  for (size_t i = 0; i < files.size() && files[i]; i++) {
+sum += files[i]->fd.GetFileSize();
+  }
+  return sum;
+}
+
+void Compaction::SetInputVersion(Version* _input_version) {
+  input_version_ = _input_version;
+  cfd_ = input_version_->cfd();
+
+  cfd_->Ref();
+  input_version_->Ref();
+  edit_.SetColumnFamily(cfd_->GetID());
+}
+
+void Compaction::GetBoundaryKeys(
+VersionStorageInfo* vstorage,
+const std::vector& inputs, Slice* smallest_user_key,
+Slice* largest_user_key) {
+  bool initialized = false;
+  const Comparator* ucmp = vstorage->InternalComparator()->user_comparator();
+  for (size_t i = 0; i < inputs.size(); ++i) {
+if (inputs[i].files.empty()) {
+  continue;
+}
+if (inputs[i].level == 0) {
+  // we need to consider all files on level 0
+  for (const auto* f : inputs[i].files) {
+const Slice& start_user_key = f->smallest.user_key();
+if (!initialized ||
+ucmp->Compare(start_user_key, *smallest_user_key) < 0) {
+  *smallest_user_key = start_user_key;
+}
+const Slice& end_user_key = f->largest.user_key();
+if (!initialized ||
+ucmp->Compare(end_user_key, *largest_user_key) > 0) {
+  *largest_user_key = end_user_key;
+}
+initialized = true;
+  }
+} else {
+  // we only need to consider the first and last file
+  const Slice& start_user_key = inputs[i].files[0]->smallest.user_key();
+  if (!initialized ||
+  ucmp->Compare(start_user_key, *smallest_user_key) < 0) {
+*smallest_user_key = start_user_key;
+  }
+  const Slice& end_user_key = inputs[i].files.back()->largest.user_key();
+  if (!initialized || ucmp->Compare(end_user_key, *largest_user_key) > 0) {
+*largest_user_key = end_user_key;
+  }
+  initialized = true;
+}
+  }
+}
+
+// helper function to determine if compaction is creating files at the
+// bottommost level
+bool Compaction::IsBottommostLevel(
+int output_level, VersionStorageInfo* vstorage,
+const std::vector& inputs) {
+  if (inputs[0].level == 0 &&
+  inputs[0].files.back() != vstorage->LevelFiles(0).back()) {
+return false;
+  }
+
+  Slice smallest_key, largest_key;
+  GetBoundaryKeys(vstorage, inputs, _key, _key);
+
+  // Checks whether there are files living beyond the output_level.
+  // If lower levels have files, it checks for overlap between files
+  // if the compaction process and those files.
+  // Bottomlevel optimizations can be made if there are no files in
+  // lower levels or if there is no overlap with the files in
+  // the lower levels.
+  for (int i = output_level + 1; i < vstorage->num_levels(); i++) {
+// It is not the bottommost level if there are files in higher
+// levels when the output level is 0 or if there are files in
+// higher levels which overlap with files to be compacted.
+// output_level == 0 means that we want it to be considered
+// s the bottommost level only if the last file on the level
+// is a part of the files to be compacted - this is verified by
+// the first if condition in this function
+if (vstorage->NumLevelFiles(i) > 0 &&
+(output_level == 0 ||
+ vstorage->OverlapInLevel(i, _key, _key))) {
+  return false;
+}
+  }
+  return true;
+}
+
+// test function to validate the functionality of IsBottommostLevel()
+// function -- determines if compaction with inputs and storage is bottommost
+bool Compaction::TEST_IsBottommostLevel(
+int output_level, VersionStorageInfo* vstorage,
+const std::vector& inputs) {
+  return IsBottommostLevel(output_level, vstorage, inputs);
+}
+
+bool Compaction::IsFullCompaction(
+VersionStorageInfo* 

[38/51] [partial] nifi-minifi-cpp git commit: MINIFI-372: Replace leveldb with RocksDB

2017-10-09 Thread jeremydyer
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/48867732/thirdparty/leveldb-1.18/table/table_test.cc
--
diff --git a/thirdparty/leveldb-1.18/table/table_test.cc 
b/thirdparty/leveldb-1.18/table/table_test.cc
deleted file mode 100755
index d46c079..000
--- a/thirdparty/leveldb-1.18/table/table_test.cc
+++ /dev/null
@@ -1,868 +0,0 @@
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "leveldb/table.h"
-
-#include 
-#include 
-#include "db/dbformat.h"
-#include "db/memtable.h"
-#include "db/write_batch_internal.h"
-#include "leveldb/db.h"
-#include "leveldb/env.h"
-#include "leveldb/iterator.h"
-#include "leveldb/table_builder.h"
-#include "table/block.h"
-#include "table/block_builder.h"
-#include "table/format.h"
-#include "util/random.h"
-#include "util/testharness.h"
-#include "util/testutil.h"
-
-namespace leveldb {
-
-// Return reverse of "key".
-// Used to test non-lexicographic comparators.
-static std::string Reverse(const Slice& key) {
-  std::string str(key.ToString());
-  std::string rev("");
-  for (std::string::reverse_iterator rit = str.rbegin();
-   rit != str.rend(); ++rit) {
-rev.push_back(*rit);
-  }
-  return rev;
-}
-
-namespace {
-class ReverseKeyComparator : public Comparator {
- public:
-  virtual const char* Name() const {
-return "leveldb.ReverseBytewiseComparator";
-  }
-
-  virtual int Compare(const Slice& a, const Slice& b) const {
-return BytewiseComparator()->Compare(Reverse(a), Reverse(b));
-  }
-
-  virtual void FindShortestSeparator(
-  std::string* start,
-  const Slice& limit) const {
-std::string s = Reverse(*start);
-std::string l = Reverse(limit);
-BytewiseComparator()->FindShortestSeparator(, l);
-*start = Reverse(s);
-  }
-
-  virtual void FindShortSuccessor(std::string* key) const {
-std::string s = Reverse(*key);
-BytewiseComparator()->FindShortSuccessor();
-*key = Reverse(s);
-  }
-};
-}  // namespace
-static ReverseKeyComparator reverse_key_comparator;
-
-static void Increment(const Comparator* cmp, std::string* key) {
-  if (cmp == BytewiseComparator()) {
-key->push_back('\0');
-  } else {
-assert(cmp == _key_comparator);
-std::string rev = Reverse(*key);
-rev.push_back('\0');
-*key = Reverse(rev);
-  }
-}
-
-// An STL comparator that uses a Comparator
-namespace {
-struct STLLessThan {
-  const Comparator* cmp;
-
-  STLLessThan() : cmp(BytewiseComparator()) { }
-  STLLessThan(const Comparator* c) : cmp(c) { }
-  bool operator()(const std::string& a, const std::string& b) const {
-return cmp->Compare(Slice(a), Slice(b)) < 0;
-  }
-};
-}  // namespace
-
-class StringSink: public WritableFile {
- public:
-  ~StringSink() { }
-
-  const std::string& contents() const { return contents_; }
-
-  virtual Status Close() { return Status::OK(); }
-  virtual Status Flush() { return Status::OK(); }
-  virtual Status Sync() { return Status::OK(); }
-
-  virtual Status Append(const Slice& data) {
-contents_.append(data.data(), data.size());
-return Status::OK();
-  }
-
- private:
-  std::string contents_;
-};
-
-
-class StringSource: public RandomAccessFile {
- public:
-  StringSource(const Slice& contents)
-  : contents_(contents.data(), contents.size()) {
-  }
-
-  virtual ~StringSource() { }
-
-  uint64_t Size() const { return contents_.size(); }
-
-  virtual Status Read(uint64_t offset, size_t n, Slice* result,
-   char* scratch) const {
-if (offset > contents_.size()) {
-  return Status::InvalidArgument("invalid Read offset");
-}
-if (offset + n > contents_.size()) {
-  n = contents_.size() - offset;
-}
-memcpy(scratch, _[offset], n);
-*result = Slice(scratch, n);
-return Status::OK();
-  }
-
- private:
-  std::string contents_;
-};
-
-typedef std::map KVMap;
-
-// Helper class for tests to unify the interface between
-// BlockBuilder/TableBuilder and Block/Table.
-class Constructor {
- public:
-  explicit Constructor(const Comparator* cmp) : data_(STLLessThan(cmp)) { }
-  virtual ~Constructor() { }
-
-  void Add(const std::string& key, const Slice& value) {
-data_[key] = value.ToString();
-  }
-
-  // Finish constructing the data structure with all the keys that have
-  // been added so far.  Returns the keys in sorted order in "*keys"
-  // and stores the key/value pairs in "*kvmap"
-  void Finish(const Options& options,
-  std::vector* keys,
-  KVMap* kvmap) {
-*kvmap = data_;
-keys->clear();
-for (KVMap::const_iterator it = data_.begin();
- it != data_.end();
- ++it) {
-  keys->push_back(it->first);
-}
-data_.clear();
-Status s = FinishImpl(options, *kvmap);

[22/51] [partial] nifi-minifi-cpp git commit: MINIFI-372: Replace leveldb with RocksDB

2017-10-09 Thread jeremydyer
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/48867732/thirdparty/rocksdb/db/db_impl.h
--
diff --git a/thirdparty/rocksdb/db/db_impl.h b/thirdparty/rocksdb/db/db_impl.h
new file mode 100644
index 000..31d69a9
--- /dev/null
+++ b/thirdparty/rocksdb/db/db_impl.h
@@ -0,0 +1,1284 @@
+//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
+//  This source code is licensed under both the GPLv2 (found in the
+//  COPYING file in the root directory) and Apache 2.0 License
+//  (found in the LICENSE.Apache file in the root directory).
+//
+// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+#pragma once
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#include "db/column_family.h"
+#include "db/compaction_job.h"
+#include "db/dbformat.h"
+#include "db/external_sst_file_ingestion_job.h"
+#include "db/flush_job.h"
+#include "db/flush_scheduler.h"
+#include "db/internal_stats.h"
+#include "db/log_writer.h"
+#include "db/snapshot_impl.h"
+#include "db/version_edit.h"
+#include "db/wal_manager.h"
+#include "db/write_controller.h"
+#include "db/write_thread.h"
+#include "memtable_list.h"
+#include "monitoring/instrumented_mutex.h"
+#include "options/db_options.h"
+#include "port/port.h"
+#include "rocksdb/db.h"
+#include "rocksdb/env.h"
+#include "rocksdb/memtablerep.h"
+#include "rocksdb/status.h"
+#include "rocksdb/transaction_log.h"
+#include "rocksdb/write_buffer_manager.h"
+#include "table/scoped_arena_iterator.h"
+#include "util/autovector.h"
+#include "util/event_logger.h"
+#include "util/hash.h"
+#include "util/stop_watch.h"
+#include "util/thread_local.h"
+
+namespace rocksdb {
+
+class MemTable;
+class TableCache;
+class Version;
+class VersionEdit;
+class VersionSet;
+class Arena;
+class WriteCallback;
+struct JobContext;
+struct ExternalSstFileInfo;
+struct MemTableInfo;
+
+class DBImpl : public DB {
+ public:
+  DBImpl(const DBOptions& options, const std::string& dbname);
+  virtual ~DBImpl();
+
+  // Implementations of the DB interface
+  using DB::Put;
+  virtual Status Put(const WriteOptions& options,
+ ColumnFamilyHandle* column_family, const Slice& key,
+ const Slice& value) override;
+  using DB::Merge;
+  virtual Status Merge(const WriteOptions& options,
+   ColumnFamilyHandle* column_family, const Slice& key,
+   const Slice& value) override;
+  using DB::Delete;
+  virtual Status Delete(const WriteOptions& options,
+ColumnFamilyHandle* column_family,
+const Slice& key) override;
+  using DB::SingleDelete;
+  virtual Status SingleDelete(const WriteOptions& options,
+  ColumnFamilyHandle* column_family,
+  const Slice& key) override;
+  using DB::Write;
+  virtual Status Write(const WriteOptions& options,
+   WriteBatch* updates) override;
+
+  using DB::Get;
+  virtual Status Get(const ReadOptions& options,
+ ColumnFamilyHandle* column_family, const Slice& key,
+ PinnableSlice* value) override;
+  using DB::MultiGet;
+  virtual std::vector MultiGet(
+  const ReadOptions& options,
+  const std::vector& column_family,
+  const std::vector& keys,
+  std::vector* values) override;
+
+  virtual Status CreateColumnFamily(const ColumnFamilyOptions& cf_options,
+const std::string& column_family,
+ColumnFamilyHandle** handle) override;
+  virtual Status CreateColumnFamilies(
+  const ColumnFamilyOptions& cf_options,
+  const std::vector& column_family_names,
+  std::vector* handles) override;
+  virtual Status CreateColumnFamilies(
+  const std::vector& column_families,
+  std::vector* handles) override;
+  virtual Status DropColumnFamily(ColumnFamilyHandle* column_family) override;
+  virtual Status DropColumnFamilies(
+  const std::vector& column_families) override;
+
+  // Returns false if key doesn't exist in the database and true if it may.
+  // If value_found is not passed in as null, then return the value if found in
+  // memory. On return, if value was found, then value_found will be set to 
true
+  // , otherwise false.
+  using DB::KeyMayExist;
+  virtual bool KeyMayExist(const ReadOptions& options,
+   ColumnFamilyHandle* column_family, const Slice& key,
+   std::string* value,
+   bool* value_found = nullptr) override;
+  using DB::NewIterator;
+  virtual 

[44/51] [partial] nifi-minifi-cpp git commit: MINIFI-372: Replace leveldb with RocksDB

2017-10-09 Thread jeremydyer
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/48867732/thirdparty/leveldb-1.18/db/repair.cc
--
diff --git a/thirdparty/leveldb-1.18/db/repair.cc 
b/thirdparty/leveldb-1.18/db/repair.cc
deleted file mode 100755
index 4cd4bb0..000
--- a/thirdparty/leveldb-1.18/db/repair.cc
+++ /dev/null
@@ -1,461 +0,0 @@
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-//
-// We recover the contents of the descriptor from the other files we find.
-// (1) Any log files are first converted to tables
-// (2) We scan every table to compute
-// (a) smallest/largest for the table
-// (b) largest sequence number in the table
-// (3) We generate descriptor contents:
-//  - log number is set to zero
-//  - next-file-number is set to 1 + largest file number we found
-//  - last-sequence-number is set to largest sequence# found across
-//all tables (see 2c)
-//  - compaction pointers are cleared
-//  - every table file is added at level 0
-//
-// Possible optimization 1:
-//   (a) Compute total size and use to pick appropriate max-level M
-//   (b) Sort tables by largest sequence# in the table
-//   (c) For each table: if it overlaps earlier table, place in level-0,
-//   else place in level-M.
-// Possible optimization 2:
-//   Store per-table metadata (smallest, largest, largest-seq#, ...)
-//   in the table's meta section to speed up ScanTable.
-
-#include "db/builder.h"
-#include "db/db_impl.h"
-#include "db/dbformat.h"
-#include "db/filename.h"
-#include "db/log_reader.h"
-#include "db/log_writer.h"
-#include "db/memtable.h"
-#include "db/table_cache.h"
-#include "db/version_edit.h"
-#include "db/write_batch_internal.h"
-#include "leveldb/comparator.h"
-#include "leveldb/db.h"
-#include "leveldb/env.h"
-
-namespace leveldb {
-
-namespace {
-
-class Repairer {
- public:
-  Repairer(const std::string& dbname, const Options& options)
-  : dbname_(dbname),
-env_(options.env),
-icmp_(options.comparator),
-ipolicy_(options.filter_policy),
-options_(SanitizeOptions(dbname, _, _, options)),
-owns_info_log_(options_.info_log != options.info_log),
-owns_cache_(options_.block_cache != options.block_cache),
-next_file_number_(1) {
-// TableCache can be small since we expect each table to be opened once.
-table_cache_ = new TableCache(dbname_, _, 10);
-  }
-
-  ~Repairer() {
-delete table_cache_;
-if (owns_info_log_) {
-  delete options_.info_log;
-}
-if (owns_cache_) {
-  delete options_.block_cache;
-}
-  }
-
-  Status Run() {
-Status status = FindFiles();
-if (status.ok()) {
-  ConvertLogFilesToTables();
-  ExtractMetaData();
-  status = WriteDescriptor();
-}
-if (status.ok()) {
-  unsigned long long bytes = 0;
-  for (size_t i = 0; i < tables_.size(); i++) {
-bytes += tables_[i].meta.file_size;
-  }
-  Log(options_.info_log,
-  " Repaired leveldb %s; "
-  "recovered %d files; %llu bytes. "
-  "Some data may have been lost. "
-  "",
-  dbname_.c_str(),
-  static_cast(tables_.size()),
-  bytes);
-}
-return status;
-  }
-
- private:
-  struct TableInfo {
-FileMetaData meta;
-SequenceNumber max_sequence;
-  };
-
-  std::string const dbname_;
-  Env* const env_;
-  InternalKeyComparator const icmp_;
-  InternalFilterPolicy const ipolicy_;
-  Options const options_;
-  bool owns_info_log_;
-  bool owns_cache_;
-  TableCache* table_cache_;
-  VersionEdit edit_;
-
-  std::vector manifests_;
-  std::vector table_numbers_;
-  std::vector logs_;
-  std::vector tables_;
-  uint64_t next_file_number_;
-
-  Status FindFiles() {
-std::vector filenames;
-Status status = env_->GetChildren(dbname_, );
-if (!status.ok()) {
-  return status;
-}
-if (filenames.empty()) {
-  return Status::IOError(dbname_, "repair found no files");
-}
-
-uint64_t number;
-FileType type;
-for (size_t i = 0; i < filenames.size(); i++) {
-  if (ParseFileName(filenames[i], , )) {
-if (type == kDescriptorFile) {
-  manifests_.push_back(filenames[i]);
-} else {
-  if (number + 1 > next_file_number_) {
-next_file_number_ = number + 1;
-  }
-  if (type == kLogFile) {
-logs_.push_back(number);
-  } else if (type == kTableFile) {
-table_numbers_.push_back(number);
-  } else {
-// Ignore other files
-  }
-}
-  }
-}
-return status;
-  }
-
-  void ConvertLogFilesToTables() {
-for (size_t i = 0; i < logs_.size(); i++) {
-  std::string logname = LogFileName(dbname_, logs_[i]);
-  

[50/51] [partial] nifi-minifi-cpp git commit: MINIFI-372: Replace leveldb with RocksDB

2017-10-09 Thread jeremydyer
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/48867732/cmake/BuildTests.cmake
--
diff --git a/cmake/BuildTests.cmake b/cmake/BuildTests.cmake
index 4f4bed0..e76802d 100644
--- a/cmake/BuildTests.cmake
+++ b/cmake/BuildTests.cmake
@@ -31,13 +31,11 @@ ENDMACRO()
 
 function(createTests testName)
message ("-- Adding test: ${testName}")
-target_include_directories(${testName} PRIVATE BEFORE ${UUID_INCLUDE_DIRS})
 target_include_directories(${testName} PRIVATE BEFORE "thirdparty/catch")
 target_include_directories(${testName} PRIVATE BEFORE 
"thirdparty/spdlog-20170710/include")
 target_include_directories(${testName} PRIVATE BEFORE 
"thirdparty/yaml-cpp-yaml-cpp-0.5.3/include")
 target_include_directories(${testName} PRIVATE BEFORE 
"thirdparty/jsoncpp/include")
 target_include_directories(${testName} PRIVATE BEFORE 
"thirdparty/civetweb-1.9.1/include")
-target_include_directories(${testName} PRIVATE BEFORE 
${LEVELDB_INCLUDE_DIRS})
 target_include_directories(${testName} PRIVATE BEFORE "include")
 target_include_directories(${testName} PRIVATE BEFORE "libminifi/include/")
 target_include_directories(${testName} PRIVATE BEFORE 
"libminifi/include/c2/protocols")
@@ -52,8 +50,8 @@ function(createTests testName)
 target_include_directories(${testName} PRIVATE BEFORE 
"libminifi/include/utils")
 target_include_directories(${testName} PRIVATE BEFORE 
"libminifi/include/processors")
 target_include_directories(${testName} PRIVATE BEFORE 
"libminifi/include/provenance")
-target_link_libraries(${testName} ${CMAKE_THREAD_LIBS_INIT} 
${UUID_LIBRARIES} ${LEVELDB_LIBRARIES} ${OPENSSL_LIBRARIES} minifi yaml-cpp 
c-library civetweb-cpp ${JSON_CPP_LIB})
- if (HTTP-CURL)
+target_link_libraries(${testName} ${CMAKE_THREAD_LIBS_INIT} 
${OPENSSL_LIBRARIES} minifi yaml-cpp c-library civetweb-cpp ${JSON_CPP_LIB})
+ if (CURL_FOUND)
 target_include_directories(${testName} PRIVATE BEFORE 
${CURL_INCLUDE_DIRS})
 target_include_directories(${testName} PRIVATE BEFORE 
"extensions/http-curl/")
 target_include_directories(${testName} PRIVATE BEFORE 
"extensions/http-curl/client/")
@@ -61,7 +59,7 @@ function(createTests testName)
 target_include_directories(${testName} PRIVATE BEFORE 
"extensions/http-curl/protocols/")
 target_link_libraries(${testName} ${CURL_LIBRARIES} )
 if (APPLE)
-   target_link_libraries (${testName} -Wl,-all_load 
${HTTP-CURL})
+   target_link_libraries (${testName} -Wl,-all_load 
${HTTP-CURL})
else ()
target_link_libraries (${testName} -Wl,--whole-archive 
${HTTP-CURL} -Wl,--no-whole-archive)
endif ()
@@ -77,6 +75,7 @@ SET(TEST_RESOURCES ${TEST_DIR}/resources)
 GETSOURCEFILES(UNIT_TESTS "${TEST_DIR}/unit/")
 GETSOURCEFILES(INTEGRATION_TESTS "${TEST_DIR}/integration/")
 GETSOURCEFILES(CURL_INTEGRATION_TESTS "${TEST_DIR}/curl-tests/")
+GETSOURCEFILES(ROCKSDB_INTEGRATION_TESTS "${TEST_DIR}/rocksdb-tests/")
 
 SET(UNIT_TEST_COUNT 0)
 FOREACH(testfile ${UNIT_TESTS})
@@ -93,11 +92,28 @@ FOREACH(testfile ${INTEGRATION_TESTS})
get_filename_component(testfilename "${testfile}" NAME_WE)
add_executable("${testfilename}" "${TEST_DIR}/integration/${testfile}" 
${SPD_SOURCES} "${TEST_DIR}/TestBase.cpp")
createTests("${testfilename}")
-   #message("Adding ${testfilename} from ${testfile}")
MATH(EXPR INT_TEST_COUNT "${INT_TEST_COUNT}+1")
 ENDFOREACH()
 message("-- Finished building ${INT_TEST_COUNT} integration test file(s)...")
 
+if (ROCKSDB-REPOS)
+SET(ROCKSDB_TEST_COUNT 0)
+FOREACH(testfile ${ROCKSDB_INTEGRATION_TESTS})
+   get_filename_component(testfilename "${testfile}" NAME_WE)
+   add_executable("${testfilename}" 
"${TEST_DIR}/rocksdb-tests/${testfile}" ${SPD_SOURCES} 
"${TEST_DIR}/TestBase.cpp")
+   target_include_directories(${testfilename} PRIVATE BEFORE 
"extensions/rocksdb-repos/")
+target_include_directories(${testfilename} PRIVATE BEFORE 
"thirdparty/rocksdb/include")
+if (APPLE)
+   target_link_libraries (${testfilename} -Wl,-all_load 
${ROCKSDB-REPOS})
+   else ()
+   target_link_libraries (${testfilename} 
-Wl,--whole-archive ${ROCKSDB-REPOS} -Wl,--no-whole-archive)
+   endif ()
+   createTests("${testfilename}")
+   MATH(EXPR ROCKSDB_TEST_COUNT "${ROCKSDB_TEST_COUNT}+1")
+ENDFOREACH()
+message("-- Finished building ${ROCKSDB_TEST_COUNT} RocksDB related test 
file(s)...")
+endif(ROCKSDB-REPOS)
+
 if (HTTP-CURL)
 
 SET(CURL_INT_TEST_COUNT 0)
@@ -124,8 +140,6 @@ add_test(NAME HttpPostIntegrationTestChunked COMMAND 
HttpPostIntegrationTest "${
 
 add_test(NAME C2VerifyServeResults COMMAND C2VerifyServeResults 
"${TEST_RESOURCES}/TestHTTPGet.yml" "${TEST_RESOURCES}/")
 
-add_test(NAME C2NullConfiguration 

[28/51] [partial] nifi-minifi-cpp git commit: MINIFI-372: Replace leveldb with RocksDB

2017-10-09 Thread jeremydyer
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/48867732/thirdparty/rocksdb/db/column_family.cc
--
diff --git a/thirdparty/rocksdb/db/column_family.cc 
b/thirdparty/rocksdb/db/column_family.cc
new file mode 100644
index 000..b00eda0
--- /dev/null
+++ b/thirdparty/rocksdb/db/column_family.cc
@@ -0,0 +1,1137 @@
+//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
+//  This source code is licensed under both the GPLv2 (found in the
+//  COPYING file in the root directory) and Apache 2.0 License
+//  (found in the LICENSE.Apache file in the root directory).
+//
+// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include "db/column_family.h"
+
+#ifndef __STDC_FORMAT_MACROS
+#define __STDC_FORMAT_MACROS
+#endif
+
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#include "db/compaction_picker.h"
+#include "db/compaction_picker_universal.h"
+#include "db/db_impl.h"
+#include "db/internal_stats.h"
+#include "db/job_context.h"
+#include "db/table_properties_collector.h"
+#include "db/version_set.h"
+#include "db/write_controller.h"
+#include "memtable/hash_skiplist_rep.h"
+#include "monitoring/thread_status_util.h"
+#include "options/options_helper.h"
+#include "table/block_based_table_factory.h"
+#include "util/autovector.h"
+#include "util/compression.h"
+
+namespace rocksdb {
+
+ColumnFamilyHandleImpl::ColumnFamilyHandleImpl(
+ColumnFamilyData* column_family_data, DBImpl* db, InstrumentedMutex* mutex)
+: cfd_(column_family_data), db_(db), mutex_(mutex) {
+  if (cfd_ != nullptr) {
+cfd_->Ref();
+  }
+}
+
+ColumnFamilyHandleImpl::~ColumnFamilyHandleImpl() {
+  if (cfd_ != nullptr) {
+#ifndef ROCKSDB_LITE
+for (auto& listener : cfd_->ioptions()->listeners) {
+  listener->OnColumnFamilyHandleDeletionStarted(this);
+}
+#endif  // ROCKSDB_LITE
+// Job id == 0 means that this is not our background process, but rather
+// user thread
+JobContext job_context(0);
+mutex_->Lock();
+if (cfd_->Unref()) {
+  delete cfd_;
+}
+db_->FindObsoleteFiles(_context, false, true);
+mutex_->Unlock();
+if (job_context.HaveSomethingToDelete()) {
+  db_->PurgeObsoleteFiles(job_context);
+}
+job_context.Clean();
+  }
+}
+
+uint32_t ColumnFamilyHandleImpl::GetID() const { return cfd()->GetID(); }
+
+const std::string& ColumnFamilyHandleImpl::GetName() const {
+  return cfd()->GetName();
+}
+
+Status ColumnFamilyHandleImpl::GetDescriptor(ColumnFamilyDescriptor* desc) {
+#ifndef ROCKSDB_LITE
+  // accessing mutable cf-options requires db mutex.
+  InstrumentedMutexLock l(mutex_);
+  *desc = ColumnFamilyDescriptor(cfd()->GetName(), 
cfd()->GetLatestCFOptions());
+  return Status::OK();
+#else
+  return Status::NotSupported();
+#endif  // !ROCKSDB_LITE
+}
+
+const Comparator* ColumnFamilyHandleImpl::GetComparator() const {
+  return cfd()->user_comparator();
+}
+
+void GetIntTblPropCollectorFactory(
+const ImmutableCFOptions& ioptions,
+std::vector*
+int_tbl_prop_collector_factories) {
+  auto& collector_factories = ioptions.table_properties_collector_factories;
+  for (size_t i = 0; i < ioptions.table_properties_collector_factories.size();
+   ++i) {
+assert(collector_factories[i]);
+int_tbl_prop_collector_factories->emplace_back(
+new UserKeyTablePropertiesCollectorFactory(collector_factories[i]));
+  }
+  // Add collector to collect internal key statistics
+  int_tbl_prop_collector_factories->emplace_back(
+  new InternalKeyPropertiesCollectorFactory);
+}
+
+Status CheckCompressionSupported(const ColumnFamilyOptions& cf_options) {
+  if (!cf_options.compression_per_level.empty()) {
+for (size_t level = 0; level < cf_options.compression_per_level.size();
+ ++level) {
+  if (!CompressionTypeSupported(cf_options.compression_per_level[level])) {
+return Status::InvalidArgument(
+"Compression type " +
+CompressionTypeToString(cf_options.compression_per_level[level]) +
+" is not linked with the binary.");
+  }
+}
+  } else {
+if (!CompressionTypeSupported(cf_options.compression)) {
+  return Status::InvalidArgument(
+  "Compression type " +
+  CompressionTypeToString(cf_options.compression) +
+  " is not linked with the binary.");
+}
+  }
+  return Status::OK();
+}
+
+Status CheckConcurrentWritesSupported(const ColumnFamilyOptions& cf_options) {
+  if (cf_options.inplace_update_support) {
+return Status::InvalidArgument(
+"In-place memtable updates (inplace_update_support) is not compatible "
+"with concurrent writes (allow_concurrent_memtable_write)");
+  }
+  if 

[03/51] [partial] nifi-minifi-cpp git commit: MINIFI-372: Replace leveldb with RocksDB

2017-10-09 Thread jeremydyer
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/48867732/thirdparty/rocksdb/docs/_posts/2016-01-29-compaction_pri.markdown
--
diff --git a/thirdparty/rocksdb/docs/_posts/2016-01-29-compaction_pri.markdown 
b/thirdparty/rocksdb/docs/_posts/2016-01-29-compaction_pri.markdown
new file mode 100644
index 000..ba9ee62
--- /dev/null
+++ b/thirdparty/rocksdb/docs/_posts/2016-01-29-compaction_pri.markdown
@@ -0,0 +1,51 @@
+---
+title: Option of Compaction Priority
+layout: post
+author: sdong
+category: blog
+redirect_from:
+  - /blog/2921/compaction_pri/
+---
+
+The most popular compaction style of RocksDB is level-based compaction, which 
is an improved version of LevelDB's compaction algorithm. Page 9- 16 of this 
[slides](https://github.com/facebook/rocksdb/blob/gh-pages/talks/2015-09-29-HPTS-Siying-RocksDB.pdf)
 gives an illustrated introduction of this compaction style. The basic idea 
that: data is organized by multiple levels with exponential increasing target 
size. Except a special level 0, every level is key-range partitioned into many 
files. When size of a level exceeds its target size, we pick one or more of its 
files, and merge the file into the next level.
+
+
+
+Which file to pick to compact is an interesting question. LevelDB only uses 
one thread for compaction and it always picks files in round robin manner. We 
implemented multi-thread compaction in RocksDB by picking multiple files from 
the same level and compact them in parallel. We had to move away from LevelDB's 
file picking approach. Recently, we created an option 
[options.compaction_pri](https://github.com/facebook/rocksdb/blob/d6c838f1e130d8860407bc771fa6d4ac238859ba/include/rocksdb/options.h#L83-L93),
 which indicated three different algorithms to pick files to compact.
+
+Why do we need to multiple algorithms to choose from? Because there are 
different factors to consider when picking the files, and we now don't yet know 
how to balance them automatically, so we expose it to users to choose. Here are 
factors to consider:
+
+**Write amplification**
+
+When we estimate write amplification, we usually simplify the problem by 
assuming keys are uniformly distributed inside each level. In reality, it is 
not the case, even if user updates are uniformly distributed across the whole 
key range. For instance, when we compact one file of a level to the next level, 
it creates a hole. Over time, incoming compaction will fill data to the hole, 
but the density will still be lower for a while. Picking a file with keys least 
densely populated is more expensive to get the file to the next level, because 
there will be more overlapping files in the next level so we need to rewrite 
more data. For example, assume a file is 100MB, if an L2 file overlaps with 8 
L3 files, we need to rewrite about 800MB of data to get the file to L3. If the 
file overlaps with 12 L3 files, we'll need to rewrite about 1200MB to get a 
file of the same size out of L2. It uses 50% more writes. (This analysis 
ignores the key density of the next level, because the range covers N
  times of files in that level so one hole only impacts write amplification by 
1/N)
+
+If all the updates are uniformly distributed, LevelDB's approach optimizes 
write amplification, because a file being picked covers a range whose last 
compaction time to the next level is the oldest, so the range will accumulated 
keys from incoming compactions for the longest and the density is the highest.
+
+We created a compaction priority **kOldestSmallestSeqFirst** for the same 
effect. With this mode, we always pick the file covers the oldest updates in 
the level, which usually is contains the densest key range. If you have a use 
case where writes are uniformly distributed across the key space and you want 
to reduce write amplification, you should set 
options.compaction_pri=kOldestSmallestSeqFirst.
+
+**Optimize for small working set**
+
+We are assuming updates are uniformly distributed across the whole key space 
in previous analysis. However, in many use cases, there are subset of keys that 
are frequently updated while other key ranges are very cold. In this case, 
keeping hot key ranges from compacting to deeper levels will benefit write 
amplification, as well as space amplification. For example, if in a DB only key 
150-160 are updated and other keys are seldom updated. If level 1 contains 20 
keys, we want to keep 150-160 all stay in level 1. Because when next level 0 -> 
1 compaction comes, it will simply overwrite existing keys so size level 1 
doesn't increase, so no need to schedule further compaction for level 1->2. On 
the other hand, if we compact key 150-155 to level2, when a new Level 1->2 
compaction comes, it increases the size of level 1, making size of level 1 
exceed target size and more compactions will be needed, which generates more 
writes.
+
+The compaction priority 

[17/51] [partial] nifi-minifi-cpp git commit: MINIFI-372: Replace leveldb with RocksDB

2017-10-09 Thread jeremydyer
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/48867732/thirdparty/rocksdb/db/db_test_util.cc
--
diff --git a/thirdparty/rocksdb/db/db_test_util.cc 
b/thirdparty/rocksdb/db/db_test_util.cc
new file mode 100644
index 000..c4d465b
--- /dev/null
+++ b/thirdparty/rocksdb/db/db_test_util.cc
@@ -0,0 +1,1395 @@
+// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
+//  This source code is licensed under both the GPLv2 (found in the
+//  COPYING file in the root directory) and Apache 2.0 License
+//  (found in the LICENSE.Apache file in the root directory).
+//
+// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include "db/db_test_util.h"
+#include "db/forward_iterator.h"
+#include "rocksdb/env_encryption.h"
+
+namespace rocksdb {
+
+// Special Env used to delay background operations
+
+SpecialEnv::SpecialEnv(Env* base)
+: EnvWrapper(base),
+  rnd_(301),
+  sleep_counter_(this),
+  addon_time_(0),
+  time_elapse_only_sleep_(false),
+  no_slowdown_(false) {
+  delay_sstable_sync_.store(false, std::memory_order_release);
+  drop_writes_.store(false, std::memory_order_release);
+  no_space_.store(false, std::memory_order_release);
+  non_writable_.store(false, std::memory_order_release);
+  count_random_reads_ = false;
+  count_sequential_reads_ = false;
+  manifest_sync_error_.store(false, std::memory_order_release);
+  manifest_write_error_.store(false, std::memory_order_release);
+  log_write_error_.store(false, std::memory_order_release);
+  random_file_open_counter_.store(0, std::memory_order_relaxed);
+  delete_count_.store(0, std::memory_order_relaxed);
+  num_open_wal_file_.store(0);
+  log_write_slowdown_ = 0;
+  bytes_written_ = 0;
+  sync_counter_ = 0;
+  non_writeable_rate_ = 0;
+  new_writable_count_ = 0;
+  non_writable_count_ = 0;
+  table_write_callback_ = nullptr;
+}
+#ifndef ROCKSDB_LITE
+ROT13BlockCipher rot13Cipher_(16);
+#endif  // ROCKSDB_LITE
+
+DBTestBase::DBTestBase(const std::string path)
+: mem_env_(!getenv("MEM_ENV") ? nullptr : new MockEnv(Env::Default())),
+#ifndef ROCKSDB_LITE
+  encrypted_env_(
+  !getenv("ENCRYPTED_ENV")
+  ? nullptr
+  : NewEncryptedEnv(mem_env_ ? mem_env_ : Env::Default(),
+new CTREncryptionProvider(rot13Cipher_))),
+#else
+  encrypted_env_(nullptr),
+#endif  // ROCKSDB_LITE
+  env_(new SpecialEnv(encrypted_env_
+  ? encrypted_env_
+  : (mem_env_ ? mem_env_ : Env::Default(,
+  option_config_(kDefault) {
+  env_->SetBackgroundThreads(1, Env::LOW);
+  env_->SetBackgroundThreads(1, Env::HIGH);
+  dbname_ = test::TmpDir(env_) + path;
+  alternative_wal_dir_ = dbname_ + "/wal";
+  alternative_db_log_dir_ = dbname_ + "/db_log_dir";
+  auto options = CurrentOptions();
+  options.env = env_;
+  auto delete_options = options;
+  delete_options.wal_dir = alternative_wal_dir_;
+  EXPECT_OK(DestroyDB(dbname_, delete_options));
+  // Destroy it for not alternative WAL dir is used.
+  EXPECT_OK(DestroyDB(dbname_, options));
+  db_ = nullptr;
+  Reopen(options);
+  Random::GetTLSInstance()->Reset(0xdeadbeef);
+}
+
+DBTestBase::~DBTestBase() {
+  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
+  rocksdb::SyncPoint::GetInstance()->LoadDependency({});
+  rocksdb::SyncPoint::GetInstance()->ClearAllCallBacks();
+  Close();
+  Options options;
+  options.db_paths.emplace_back(dbname_, 0);
+  options.db_paths.emplace_back(dbname_ + "_2", 0);
+  options.db_paths.emplace_back(dbname_ + "_3", 0);
+  options.db_paths.emplace_back(dbname_ + "_4", 0);
+  options.env = env_;
+
+  if (getenv("KEEP_DB")) {
+printf("DB is still at %s\n", dbname_.c_str());
+  } else {
+EXPECT_OK(DestroyDB(dbname_, options));
+  }
+  delete env_;
+}
+
+bool DBTestBase::ShouldSkipOptions(int option_config, int skip_mask) {
+#ifdef ROCKSDB_LITE
+// These options are not supported in ROCKSDB_LITE
+  if (option_config == kHashSkipList ||
+  option_config == kPlainTableFirstBytePrefix ||
+  option_config == kPlainTableCappedPrefix ||
+  option_config == kPlainTableCappedPrefixNonMmap ||
+  option_config == kPlainTableAllBytesPrefix ||
+  option_config == kVectorRep || option_config == kHashLinkList ||
+  option_config == kHashCuckoo || option_config == kUniversalCompaction ||
+  option_config == kUniversalCompactionMultiLevel ||
+  option_config == kUniversalSubcompactions ||
+  option_config == kFIFOCompaction ||
+  option_config == kConcurrentSkipList) {
+return true;
+}
+#endif
+
+if ((skip_mask & kSkipUniversalCompaction) &&
+(option_config == kUniversalCompaction ||
+ option_config == 

[29/51] [partial] nifi-minifi-cpp git commit: MINIFI-372: Replace leveldb with RocksDB

2017-10-09 Thread jeremydyer
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/48867732/thirdparty/rocksdb/db/c.cc
--
diff --git a/thirdparty/rocksdb/db/c.cc b/thirdparty/rocksdb/db/c.cc
new file mode 100644
index 000..788eab6
--- /dev/null
+++ b/thirdparty/rocksdb/db/c.cc
@@ -0,0 +1,3549 @@
+//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
+//  This source code is licensed under both the GPLv2 (found in the
+//  COPYING file in the root directory) and Apache 2.0 License
+//  (found in the LICENSE.Apache file in the root directory).
+//
+// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#ifndef ROCKSDB_LITE
+
+#include "rocksdb/c.h"
+
+#include 
+#include "port/port.h"
+#include "rocksdb/cache.h"
+#include "rocksdb/compaction_filter.h"
+#include "rocksdb/comparator.h"
+#include "rocksdb/convenience.h"
+#include "rocksdb/db.h"
+#include "rocksdb/env.h"
+#include "rocksdb/filter_policy.h"
+#include "rocksdb/iterator.h"
+#include "rocksdb/merge_operator.h"
+#include "rocksdb/options.h"
+#include "rocksdb/status.h"
+#include "rocksdb/write_batch.h"
+#include "rocksdb/memtablerep.h"
+#include "rocksdb/universal_compaction.h"
+#include "rocksdb/statistics.h"
+#include "rocksdb/slice_transform.h"
+#include "rocksdb/table.h"
+#include "rocksdb/rate_limiter.h"
+#include "rocksdb/utilities/backupable_db.h"
+#include "rocksdb/utilities/write_batch_with_index.h"
+#include "utilities/merge_operators.h"
+#include "rocksdb/utilities/transaction.h"
+#include "rocksdb/utilities/transaction_db.h"
+#include "rocksdb/utilities/checkpoint.h"
+
+using rocksdb::BytewiseComparator;
+using rocksdb::Cache;
+using rocksdb::ColumnFamilyDescriptor;
+using rocksdb::ColumnFamilyHandle;
+using rocksdb::ColumnFamilyOptions;
+using rocksdb::CompactionFilter;
+using rocksdb::CompactionFilterFactory;
+using rocksdb::CompactionFilterContext;
+using rocksdb::CompactionOptionsFIFO;
+using rocksdb::Comparator;
+using rocksdb::CompressionType;
+using rocksdb::WALRecoveryMode;
+using rocksdb::DB;
+using rocksdb::DBOptions;
+using rocksdb::DbPath;
+using rocksdb::Env;
+using rocksdb::EnvOptions;
+using rocksdb::InfoLogLevel;
+using rocksdb::FileLock;
+using rocksdb::FilterPolicy;
+using rocksdb::FlushOptions;
+using rocksdb::IngestExternalFileOptions;
+using rocksdb::Iterator;
+using rocksdb::Logger;
+using rocksdb::MergeOperator;
+using rocksdb::MergeOperators;
+using rocksdb::NewBloomFilterPolicy;
+using rocksdb::NewLRUCache;
+using rocksdb::Options;
+using rocksdb::BlockBasedTableOptions;
+using rocksdb::CuckooTableOptions;
+using rocksdb::RandomAccessFile;
+using rocksdb::Range;
+using rocksdb::ReadOptions;
+using rocksdb::SequentialFile;
+using rocksdb::Slice;
+using rocksdb::SliceParts;
+using rocksdb::SliceTransform;
+using rocksdb::Snapshot;
+using rocksdb::SstFileWriter;
+using rocksdb::Status;
+using rocksdb::WritableFile;
+using rocksdb::WriteBatch;
+using rocksdb::WriteBatchWithIndex;
+using rocksdb::WriteOptions;
+using rocksdb::LiveFileMetaData;
+using rocksdb::BackupEngine;
+using rocksdb::BackupableDBOptions;
+using rocksdb::BackupInfo;
+using rocksdb::RestoreOptions;
+using rocksdb::CompactRangeOptions;
+using rocksdb::RateLimiter;
+using rocksdb::NewGenericRateLimiter;
+using rocksdb::PinnableSlice;
+using rocksdb::TransactionDBOptions;
+using rocksdb::TransactionDB;
+using rocksdb::TransactionOptions;
+using rocksdb::Transaction;
+using rocksdb::Checkpoint;
+
+using std::shared_ptr;
+
+extern "C" {
+
+struct rocksdb_t { DB*   rep; };
+struct rocksdb_backup_engine_t   { BackupEngine* rep; };
+struct rocksdb_backup_engine_info_t { std::vector rep; };
+struct rocksdb_restore_options_t { RestoreOptions rep; };
+struct rocksdb_iterator_t{ Iterator* rep; };
+struct rocksdb_writebatch_t  { WriteBatchrep; };
+struct rocksdb_writebatch_wi_t   { WriteBatchWithIndex* rep; };
+struct rocksdb_snapshot_t{ const Snapshot*   rep; };
+struct rocksdb_flushoptions_t{ FlushOptions  rep; };
+struct rocksdb_fifo_compaction_options_t { CompactionOptionsFIFO rep; };
+struct rocksdb_readoptions_t {
+   ReadOptions rep;
+   Slice upper_bound; // stack variable to set pointer to in ReadOptions
+};
+struct rocksdb_writeoptions_t{ WriteOptions  rep; };
+struct rocksdb_options_t { Options   rep; };
+struct rocksdb_compactoptions_t {
+  CompactRangeOptions rep;
+};
+struct rocksdb_block_based_table_options_t  { BlockBasedTableOptions rep; };
+struct rocksdb_cuckoo_table_options_t  { CuckooTableOptions rep; };
+struct rocksdb_seqfile_t { SequentialFile*   rep; };
+struct rocksdb_randomfile_t  { RandomAccessFile* rep; };
+struct rocksdb_writablefile_t{ WritableFile* rep; };
+struct rocksdb_filelock_t 

[41/51] [partial] nifi-minifi-cpp git commit: MINIFI-372: Replace leveldb with RocksDB

2017-10-09 Thread jeremydyer
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/48867732/thirdparty/leveldb-1.18/doc/index.html
--
diff --git a/thirdparty/leveldb-1.18/doc/index.html 
b/thirdparty/leveldb-1.18/doc/index.html
deleted file mode 100755
index 3ed0ed9..000
--- a/thirdparty/leveldb-1.18/doc/index.html
+++ /dev/null
@@ -1,549 +0,0 @@
-
-
-
-
-Leveldb
-
-
-
-Leveldb
-Jeff Dean, Sanjay Ghemawat
-
-The leveldb library provides a persistent key value store.  Keys 
and
-values are arbitrary byte arrays.  The keys are ordered within the key
-value store according to a user-specified comparator function.
-
-
-Opening A Database
-
-A leveldb database has a name which corresponds to a file system
-directory.  All of the contents of database are stored in this
-directory.  The following example shows how to open a database,
-creating it if necessary:
-
-
-  #include assert
-  #include "leveldb/db.h"
-
-  leveldb::DB* db;
-  leveldb::Options options;
-  options.create_if_missing = true;
-  leveldb::Status status = leveldb::DB::Open(options, "/tmp/testdb", db);
-  assert(status.ok());
-  ...
-
-If you want to raise an error if the database already exists, add
-the following line before the leveldb::DB::Open call:
-
-  options.error_if_exists = true;
-
-Status
-
-You may have noticed the leveldb::Status type above.  Values of 
this
-type are returned by most functions in leveldb that may encounter 
an
-error.  You can check if such a result is ok, and also print an
-associated error message:
-
-
-   leveldb::Status s = ...;
-   if (!s.ok()) cerr  s.ToString()  endl;
-
-Closing A Database
-
-When you are done with a database, just delete the database object.
-Example:
-
-
-  ... open the db as described above ...
-  ... do something with db ...
-  delete db;
-
-Reads And Writes
-
-The database provides Put, Delete, and 
Get methods to
-modify/query the database.  For example, the following code
-moves the value stored under key1 to key2.
-
-  std::string value;
-  leveldb::Status s = db-Get(leveldb::ReadOptions(), key1, value);
-  if (s.ok()) s = db-Put(leveldb::WriteOptions(), key2, value);
-  if (s.ok()) s = db-Delete(leveldb::WriteOptions(), key1);
-
-
-Atomic Updates
-
-Note that if the process dies after the Put of key2 but before the
-delete of key1, the same value may be left stored under multiple keys.
-Such problems can be avoided by using the WriteBatch class to
-atomically apply a set of updates:
-
-
-  #include "leveldb/write_batch.h"
-  ...
-  std::string value;
-  leveldb::Status s = db-Get(leveldb::ReadOptions(), key1, value);
-  if (s.ok()) {
-leveldb::WriteBatch batch;
-batch.Delete(key1);
-batch.Put(key2, value);
-s = db-Write(leveldb::WriteOptions(), batch);
-  }
-
-The WriteBatch holds a sequence of edits to be made to the 
database,
-and these edits within the batch are applied in order.  Note that we
-called Delete before Put so that if 
key1 is identical to key2,
-we do not end up erroneously dropping the value entirely.
-
-Apart from its atomicity benefits, WriteBatch may also be used to
-speed up bulk updates by placing lots of individual mutations into the
-same batch.
-
-Synchronous Writes
-By default, each write to leveldb is asynchronous: it
-returns after pushing the write from the process into the operating
-system.  The transfer from operating system memory to the underlying
-persistent storage happens asynchronously.  The sync flag
-can be turned on for a particular write to make the write operation
-not return until the data being written has been pushed all the way to
-persistent storage.  (On Posix systems, this is implemented by calling
-either fsync(...) or fdatasync(...) or
-msync(..., MS_SYNC) before the write operation returns.)
-
-  leveldb::WriteOptions write_options;
-  write_options.sync = true;
-  db-Put(write_options, ...);
-
-Asynchronous writes are often more than a thousand times as fast as
-synchronous writes.  The downside of asynchronous writes is that a
-crash of the machine may cause the last few updates to be lost.  Note
-that a crash of just the writing process (i.e., not a reboot) will not
-cause any loss since even when sync is false, an update
-is pushed from the process memory into the operating system before it
-is considered done.
-
-
-Asynchronous writes can often be used safely.  For example, when
-loading a large amount of data into the database you can handle lost
-updates by restarting the bulk load after a crash.  A hybrid scheme is
-also possible where every Nth write is synchronous, and in the event
-of a crash, the bulk load is restarted just after the last synchronous
-write finished by the previous run.  (The synchronous write can update
-a marker that describes where to restart on a crash.)
-
-
-WriteBatch provides an alternative to asynchronous writes.
-Multiple updates may be placed in the same WriteBatch and
-applied together using a synchronous write (i.e.,

[49/51] [partial] nifi-minifi-cpp git commit: MINIFI-372: Replace leveldb with RocksDB

2017-10-09 Thread jeremydyer
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/48867732/libminifi/include/core/repository/FlowFileRepository.h
--
diff --git a/libminifi/include/core/repository/FlowFileRepository.h 
b/libminifi/include/core/repository/FlowFileRepository.h
deleted file mode 100644
index f2691ac..000
--- a/libminifi/include/core/repository/FlowFileRepository.h
+++ /dev/null
@@ -1,167 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#ifndef LIBMINIFI_INCLUDE_CORE_REPOSITORY_FLOWFILEREPOSITORY_H_
-#define LIBMINIFI_INCLUDE_CORE_REPOSITORY_FLOWFILEREPOSITORY_H_
-
-#include "leveldb/db.h"
-#include "leveldb/options.h"
-#include "leveldb/slice.h"
-#include "leveldb/status.h"
-#include "core/Repository.h"
-#include "core/Core.h"
-#include "Connection.h"
-#include "core/logging/LoggerConfiguration.h"
-#include "concurrentqueue.h"
-
-namespace org {
-namespace apache {
-namespace nifi {
-namespace minifi {
-namespace core {
-namespace repository {
-
-#define FLOWFILE_REPOSITORY_DIRECTORY "./flowfile_repository"
-#define MAX_FLOWFILE_REPOSITORY_STORAGE_SIZE (10*1024*1024) // 10M
-#define MAX_FLOWFILE_REPOSITORY_ENTRY_LIFE_TIME (60) // 10 minute
-#define FLOWFILE_REPOSITORY_PURGE_PERIOD (2000) // 2000 msec
-
-/**
- * Flow File repository
- * Design: Extends Repository and implements the run function, using LevelDB 
as the primary substrate.
- */
-class FlowFileRepository : public core::Repository, public 
std::enable_shared_from_this {
- public:
-  // Constructor
-
-  FlowFileRepository(const std::string repo_name = "", std::string directory = 
FLOWFILE_REPOSITORY_DIRECTORY, int64_t maxPartitionMillis = 
MAX_FLOWFILE_REPOSITORY_ENTRY_LIFE_TIME,
- int64_t maxPartitionBytes = 
MAX_FLOWFILE_REPOSITORY_STORAGE_SIZE, uint64_t purgePeriod = 
FLOWFILE_REPOSITORY_PURGE_PERIOD)
-  : Repository(repo_name.length() > 0 ? repo_name : 
core::getClassName(), directory, maxPartitionMillis, 
maxPartitionBytes, purgePeriod),
-logger_(logging::LoggerFactory::getLogger()),
-content_repo_(nullptr) {
-db_ = NULL;
-  }
-
-  // Destructor
-  ~FlowFileRepository() {
-if (db_)
-  delete db_;
-  }
-
-  virtual void flush();
-
-  // initialize
-  virtual bool initialize(const std::shared_ptr ) {
-std::string value;
-
-if (configure->get(Configure::nifi_flowfile_repository_directory_default, 
value)) {
-  directory_ = value;
-}
-logger_->log_info("NiFi FlowFile Repository Directory %s", 
directory_.c_str());
-if (configure->get(Configure::nifi_flowfile_repository_max_storage_size, 
value)) {
-  Property::StringToInt(value, max_partition_bytes_);
-}
-logger_->log_info("NiFi FlowFile Max Partition Bytes %d", 
max_partition_bytes_);
-if (configure->get(Configure::nifi_flowfile_repository_max_storage_time, 
value)) {
-  TimeUnit unit;
-  if (Property::StringToTime(value, max_partition_millis_, unit) && 
Property::ConvertTimeUnitToMS(max_partition_millis_, unit, 
max_partition_millis_)) {
-  }
-}
-logger_->log_info("NiFi FlowFile Max Storage Time: [%d] ms", 
max_partition_millis_);
-leveldb::Options options;
-options.create_if_missing = true;
-leveldb::Status status = leveldb::DB::Open(options, directory_.c_str(), 
_);
-if (status.ok()) {
-  logger_->log_info("NiFi FlowFile Repository database open %s success", 
directory_.c_str());
-} else {
-  logger_->log_error("NiFi FlowFile Repository database open %s fail", 
directory_.c_str());
-  return false;
-}
-return true;
-  }
-
-  virtual void run();
-
-  virtual bool Put(std::string key, const uint8_t *buf, size_t bufLen) {
-
-// persistent to the DB
-leveldb::Slice value((const char *) buf, bufLen);
-leveldb::Status status;
-repo_size_ += bufLen;
-status = db_->Put(leveldb::WriteOptions(), key, value);
-if (status.ok())
-  return true;
-else
-  return false;
-  }
-  /**
-   * 
-   * Deletes the key
-   * @return status of the delete operation
-   */
-  virtual bool Delete(std::string key) {
-keys_to_delete.enqueue(key);
-return true;
-  }
-  /**
-   * Sets the value 

[11/51] [partial] nifi-minifi-cpp git commit: MINIFI-372: Replace leveldb with RocksDB

2017-10-09 Thread jeremydyer
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/48867732/thirdparty/rocksdb/db/range_del_aggregator.cc
--
diff --git a/thirdparty/rocksdb/db/range_del_aggregator.cc 
b/thirdparty/rocksdb/db/range_del_aggregator.cc
new file mode 100644
index 000..0aa5d22
--- /dev/null
+++ b/thirdparty/rocksdb/db/range_del_aggregator.cc
@@ -0,0 +1,519 @@
+//  Copyright (c) 2016-present, Facebook, Inc.  All rights reserved.
+//  This source code is licensed under both the GPLv2 (found in the
+//  COPYING file in the root directory) and Apache 2.0 License
+//  (found in the LICENSE.Apache file in the root directory).
+
+#include "db/range_del_aggregator.h"
+
+#include 
+
+namespace rocksdb {
+
+RangeDelAggregator::RangeDelAggregator(
+const InternalKeyComparator& icmp,
+const std::vector& snapshots,
+bool collapse_deletions /* = true */)
+: upper_bound_(kMaxSequenceNumber),
+  icmp_(icmp),
+  collapse_deletions_(collapse_deletions) {
+  InitRep(snapshots);
+}
+
+RangeDelAggregator::RangeDelAggregator(const InternalKeyComparator& icmp,
+   SequenceNumber snapshot,
+   bool collapse_deletions /* = false */)
+: upper_bound_(snapshot),
+  icmp_(icmp),
+  collapse_deletions_(collapse_deletions) {}
+
+void RangeDelAggregator::InitRep(const std::vector& snapshots) 
{
+  assert(rep_ == nullptr);
+  rep_.reset(new Rep());
+  for (auto snapshot : snapshots) {
+rep_->stripe_map_.emplace(
+snapshot,
+PositionalTombstoneMap(TombstoneMap(
+stl_wrappers::LessOfComparator(icmp_.user_comparator();
+  }
+  // Data newer than any snapshot falls in this catch-all stripe
+  rep_->stripe_map_.emplace(
+  kMaxSequenceNumber,
+  PositionalTombstoneMap(TombstoneMap(
+  stl_wrappers::LessOfComparator(icmp_.user_comparator();
+  rep_->pinned_iters_mgr_.StartPinning();
+}
+
+bool RangeDelAggregator::ShouldDelete(
+const Slice& internal_key, RangeDelAggregator::RangePositioningMode mode) {
+  if (rep_ == nullptr) {
+return false;
+  }
+  ParsedInternalKey parsed;
+  if (!ParseInternalKey(internal_key, )) {
+assert(false);
+  }
+  return ShouldDelete(parsed, mode);
+}
+
+bool RangeDelAggregator::ShouldDelete(
+const ParsedInternalKey& parsed,
+RangeDelAggregator::RangePositioningMode mode) {
+  assert(IsValueType(parsed.type));
+  if (rep_ == nullptr) {
+return false;
+  }
+  auto& positional_tombstone_map = GetPositionalTombstoneMap(parsed.sequence);
+  const auto& tombstone_map = positional_tombstone_map.raw_map;
+  if (tombstone_map.empty()) {
+return false;
+  }
+  auto& tombstone_map_iter = positional_tombstone_map.iter;
+  if (tombstone_map_iter == tombstone_map.end() &&
+  (mode == kForwardTraversal || mode == kBackwardTraversal)) {
+// invalid (e.g., if AddTombstones() changed the deletions), so need to
+// reseek
+mode = kBinarySearch;
+  }
+  switch (mode) {
+case kFullScan:
+  assert(!collapse_deletions_);
+  // The maintained state (PositionalTombstoneMap::iter) isn't useful when
+  // we linear scan from the beginning each time, but we maintain it 
anyways
+  // for consistency.
+  tombstone_map_iter = tombstone_map.begin();
+  while (tombstone_map_iter != tombstone_map.end()) {
+const auto& tombstone = tombstone_map_iter->second;
+if (icmp_.user_comparator()->Compare(parsed.user_key,
+ tombstone.start_key_) < 0) {
+  break;
+}
+if (parsed.sequence < tombstone.seq_ &&
+icmp_.user_comparator()->Compare(parsed.user_key,
+ tombstone.end_key_) < 0) {
+  return true;
+}
+++tombstone_map_iter;
+  }
+  return false;
+case kForwardTraversal:
+  assert(collapse_deletions_ && tombstone_map_iter != tombstone_map.end());
+  if (tombstone_map_iter == tombstone_map.begin() &&
+  icmp_.user_comparator()->Compare(parsed.user_key,
+   tombstone_map_iter->first) < 0) {
+// before start of deletion intervals
+return false;
+  }
+  while (std::next(tombstone_map_iter) != tombstone_map.end() &&
+ icmp_.user_comparator()->Compare(
+ std::next(tombstone_map_iter)->first, parsed.user_key) <= 0) {
+++tombstone_map_iter;
+  }
+  break;
+case kBackwardTraversal:
+  assert(collapse_deletions_ && tombstone_map_iter != tombstone_map.end());
+  while (tombstone_map_iter != tombstone_map.begin() &&
+ icmp_.user_comparator()->Compare(parsed.user_key,
+  tombstone_map_iter->first) < 0) {
+--tombstone_map_iter;
+  }
+  if (tombstone_map_iter == tombstone_map.begin() &&
+  

[05/51] [partial] nifi-minifi-cpp git commit: MINIFI-372: Replace leveldb with RocksDB

2017-10-09 Thread jeremydyer
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/48867732/thirdparty/rocksdb/docs/_docs/faq.md
--
diff --git a/thirdparty/rocksdb/docs/_docs/faq.md 
b/thirdparty/rocksdb/docs/_docs/faq.md
new file mode 100644
index 000..0887a09
--- /dev/null
+++ b/thirdparty/rocksdb/docs/_docs/faq.md
@@ -0,0 +1,48 @@
+---
+docid: support-faq
+title: FAQ
+layout: docs
+permalink: /docs/support/faq.html
+---
+
+Here is an ever-growing list of frequently asked questions around RocksDB
+
+## What is RocksDB?
+
+RocksDB is an embeddable persistent key-value store for fast storage. RocksDB 
can also be the foundation for a client-server database but our current focus 
is on embedded workloads.
+
+RocksDB builds on [LevelDB](https://code.google.com/p/leveldb/) to be scalable 
to run on servers with many CPU cores, to efficiently use fast storage, to 
support IO-bound, in-memory and write-once workloads, and to be flexible to 
allow for innovation.
+
+For the latest details, watch [Mark Callaghan’s and Igor Canadi’s talk at 
CMU on 
10/2015](https://scs.hosted.panopto.com/Panopto/Pages/Viewer.aspx?id=f4e0eb37-ae18-468f-9248-cb73edad3e56).
 [Dhruba Borthakur’s introductory 
talk](https://github.com/facebook/rocksdb/blob/gh-pages-old/intro.pdf?raw=true) 
from the Data @ Scale 2013 conference provides some perspective about how 
RocksDB has evolved.
+
+## How does performance compare?
+
+We benchmarked LevelDB and found that it was unsuitable for our server 
workloads. The [benchmark 
results](http://leveldb.googlecode.com/svn/trunk/doc/benchmark.html) look 
awesome at first sight, but we quickly realized that those results were for a 
database whose size was smaller than the size of RAM on the test machine – 
where the entire database could fit in the OS page cache. When we performed the 
same benchmarks on a database that was at least 5 times larger than main 
memory, the performance results were dismal.
+
+By contrast, we’ve published the [RocksDB benchmark 
results](https://github.com/facebook/rocksdb/wiki/Performance-Benchmarks) for 
server side workloads on Flash. We also measured the performance of LevelDB on 
these server-workload benchmarks and found that RocksDB solidly outperforms 
LevelDB for these IO bound workloads. We found that LevelDB’s single-threaded 
compaction process was insufficient to drive server workloads. We saw frequent 
write-stalls with LevelDB that caused 99-percentile latency to be tremendously 
large. We found that mmap-ing a file into the OS cache introduced performance 
bottlenecks for reads. We could not make LevelDB consume all the IOs offered by 
the underlying Flash storage.
+
+## What is RocksDB suitable for?
+
+RocksDB can be used by applications that need low latency database accesses. 
Possibilities include:
+
+* A user-facing application that stores the viewing history and state of users 
of a website.
+* A spam detection application that needs fast access to big data sets.
+* A graph-search query that needs to scan a data set in realtime.
+* A cache data from Hadoop, thereby allowing applications to query Hadoop data 
in realtime.
+* A message-queue that supports a high number of inserts and deletes.
+
+## How big is RocksDB adoption?
+
+RocksDB is an embedded storage engine that is used in a number of backend 
systems at Facebook. In the Facebook newsfeed’s backend, it replaced another 
internal storage engine called Centrifuge and is one of the many components 
used. ZippyDB, a distributed key value store service used by Facebook products 
relies RocksDB. Details on ZippyDB are in [Muthu Annamalai’s talk at 
Data@Scale in Seattle](https://youtu.be/DfiN7pG0D0k). Dragon, a distributed 
graph query engine part of the social graph infrastructure, is using RocksDB to 
store data. Parse has been running [MongoDB on RocksDB in 
production](http://blog.parse.com/announcements/mongodb-rocksdb-parse/) since 
early 2015.
+
+RocksDB is proving to be a useful component for a lot of other groups in the 
industry. For a list of projects currently using RocksDB, take a look at our 
USERS.md list on github.
+
+## How good is RocksDB as a database storage engine?
+
+Our engineering team at Facebook firmly believes that RocksDB has great 
potential as storage engine for databases. It has been proven in production 
with MongoDB: [MongoRocks](https://github.com/mongodb-partners/mongo-rocks) is 
the RocksDB based storage engine for MongoDB.
+
+[MyRocks](https://code.facebook.com/posts/190251048047090/myrocks-a-space-and-write-optimized-mysql-database/)
 is the RocksDB based storage engine for MySQL. Using RocksDB we have managed 
to achieve 2x better compression and 10x less write amplification for our 
benchmarks compared to our existing MySQL setup. Given our current results, 
work is currently underway to develop MyRocks into a production ready solution 
for web-scale MySQL workloads. Follow along on 

[36/51] [partial] nifi-minifi-cpp git commit: MINIFI-372: Replace leveldb with RocksDB

2017-10-09 Thread jeremydyer
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/48867732/thirdparty/leveldb-1.18/util/testharness.cc
--
diff --git a/thirdparty/leveldb-1.18/util/testharness.cc 
b/thirdparty/leveldb-1.18/util/testharness.cc
deleted file mode 100755
index 402fab3..000
--- a/thirdparty/leveldb-1.18/util/testharness.cc
+++ /dev/null
@@ -1,77 +0,0 @@
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "util/testharness.h"
-
-#include 
-#include 
-#include 
-#include 
-
-namespace leveldb {
-namespace test {
-
-namespace {
-struct Test {
-  const char* base;
-  const char* name;
-  void (*func)();
-};
-std::vector* tests;
-}
-
-bool RegisterTest(const char* base, const char* name, void (*func)()) {
-  if (tests == NULL) {
-tests = new std::vector;
-  }
-  Test t;
-  t.base = base;
-  t.name = name;
-  t.func = func;
-  tests->push_back(t);
-  return true;
-}
-
-int RunAllTests() {
-  const char* matcher = getenv("LEVELDB_TESTS");
-
-  int num = 0;
-  if (tests != NULL) {
-for (size_t i = 0; i < tests->size(); i++) {
-  const Test& t = (*tests)[i];
-  if (matcher != NULL) {
-std::string name = t.base;
-name.push_back('.');
-name.append(t.name);
-if (strstr(name.c_str(), matcher) == NULL) {
-  continue;
-}
-  }
-  fprintf(stderr, " Test %s.%s\n", t.base, t.name);
-  (*t.func)();
-  ++num;
-}
-  }
-  fprintf(stderr, " PASSED %d tests\n", num);
-  return 0;
-}
-
-std::string TmpDir() {
-  std::string dir;
-  Status s = Env::Default()->GetTestDirectory();
-  ASSERT_TRUE(s.ok()) << s.ToString();
-  return dir;
-}
-
-int RandomSeed() {
-  const char* env = getenv("TEST_RANDOM_SEED");
-  int result = (env != NULL ? atoi(env) : 301);
-  if (result <= 0) {
-result = 301;
-  }
-  return result;
-}
-
-}  // namespace test
-}  // namespace leveldb

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/48867732/thirdparty/leveldb-1.18/util/testharness.h
--
diff --git a/thirdparty/leveldb-1.18/util/testharness.h 
b/thirdparty/leveldb-1.18/util/testharness.h
deleted file mode 100755
index da4fe68..000
--- a/thirdparty/leveldb-1.18/util/testharness.h
+++ /dev/null
@@ -1,138 +0,0 @@
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#ifndef STORAGE_LEVELDB_UTIL_TESTHARNESS_H_
-#define STORAGE_LEVELDB_UTIL_TESTHARNESS_H_
-
-#include 
-#include 
-#include 
-#include "leveldb/env.h"
-#include "leveldb/slice.h"
-#include "util/random.h"
-
-namespace leveldb {
-namespace test {
-
-// Run some of the tests registered by the TEST() macro.  If the
-// environment variable "LEVELDB_TESTS" is not set, runs all tests.
-// Otherwise, runs only the tests whose name contains the value of
-// "LEVELDB_TESTS" as a substring.  E.g., suppose the tests are:
-//TEST(Foo, Hello) { ... }
-//TEST(Foo, World) { ... }
-// LEVELDB_TESTS=Hello will run the first test
-// LEVELDB_TESTS=o will run both tests
-// LEVELDB_TESTS=Junk  will run no tests
-//
-// Returns 0 if all tests pass.
-// Dies or returns a non-zero value if some test fails.
-extern int RunAllTests();
-
-// Return the directory to use for temporary storage.
-extern std::string TmpDir();
-
-// Return a randomization seed for this run.  Typically returns the
-// same number on repeated invocations of this binary, but automated
-// runs may be able to vary the seed.
-extern int RandomSeed();
-
-// An instance of Tester is allocated to hold temporary state during
-// the execution of an assertion.
-class Tester {
- private:
-  bool ok_;
-  const char* fname_;
-  int line_;
-  std::stringstream ss_;
-
- public:
-  Tester(const char* f, int l)
-  : ok_(true), fname_(f), line_(l) {
-  }
-
-  ~Tester() {
-if (!ok_) {
-  fprintf(stderr, "%s:%d:%s\n", fname_, line_, ss_.str().c_str());
-  exit(1);
-}
-  }
-
-  Tester& Is(bool b, const char* msg) {
-if (!b) {
-  ss_ << " Assertion failure " << msg;
-  ok_ = false;
-}
-return *this;
-  }
-
-  Tester& IsOk(const Status& s) {
-if (!s.ok()) {
-  ss_ << " " << s.ToString();
-  ok_ = false;
-}
-return *this;
-  }
-
-#define BINARY_OP(name,op)  \
-  template\
-  Tester& name(const X& x, const Y& y) {\
-if (! (x op y)) {   \
-  ss_ << " failed: " << x << (" " #op " ") << y;\
-  ok_ = false;  \
-}   \
-  

[07/51] [partial] nifi-minifi-cpp git commit: MINIFI-372: Replace leveldb with RocksDB

2017-10-09 Thread jeremydyer
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/48867732/thirdparty/rocksdb/db/write_batch.cc
--
diff --git a/thirdparty/rocksdb/db/write_batch.cc 
b/thirdparty/rocksdb/db/write_batch.cc
new file mode 100644
index 000..91be9a0
--- /dev/null
+++ b/thirdparty/rocksdb/db/write_batch.cc
@@ -0,0 +1,1396 @@
+//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
+//  This source code is licensed under both the GPLv2 (found in the
+//  COPYING file in the root directory) and Apache 2.0 License
+//  (found in the LICENSE.Apache file in the root directory).
+//
+// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+//
+// WriteBatch::rep_ :=
+//sequence: fixed64
+//count: fixed32
+//data: record[count]
+// record :=
+//kTypeValue varstring varstring
+//kTypeDeletion varstring
+//kTypeSingleDeletion varstring
+//kTypeMerge varstring varstring
+//kTypeColumnFamilyValue varint32 varstring varstring
+//kTypeColumnFamilyDeletion varint32 varstring varstring
+//kTypeColumnFamilySingleDeletion varint32 varstring varstring
+//kTypeColumnFamilyMerge varint32 varstring varstring
+//kTypeBeginPrepareXID varstring
+//kTypeEndPrepareXID
+//kTypeCommitXID varstring
+//kTypeRollbackXID varstring
+//kTypeNoop
+// varstring :=
+//len: varint32
+//data: uint8[len]
+
+#include "rocksdb/write_batch.h"
+
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#include "db/column_family.h"
+#include "db/db_impl.h"
+#include "db/dbformat.h"
+#include "db/flush_scheduler.h"
+#include "db/memtable.h"
+#include "db/merge_context.h"
+#include "db/snapshot_impl.h"
+#include "db/write_batch_internal.h"
+#include "monitoring/perf_context_imp.h"
+#include "monitoring/statistics.h"
+#include "rocksdb/merge_operator.h"
+#include "util/coding.h"
+#include "util/string_util.h"
+
+namespace rocksdb {
+
+// anon namespace for file-local types
+namespace {
+
+enum ContentFlags : uint32_t {
+  DEFERRED = 1 << 0,
+  HAS_PUT = 1 << 1,
+  HAS_DELETE = 1 << 2,
+  HAS_SINGLE_DELETE = 1 << 3,
+  HAS_MERGE = 1 << 4,
+  HAS_BEGIN_PREPARE = 1 << 5,
+  HAS_END_PREPARE = 1 << 6,
+  HAS_COMMIT = 1 << 7,
+  HAS_ROLLBACK = 1 << 8,
+  HAS_DELETE_RANGE = 1 << 9,
+};
+
+struct BatchContentClassifier : public WriteBatch::Handler {
+  uint32_t content_flags = 0;
+
+  Status PutCF(uint32_t, const Slice&, const Slice&) override {
+content_flags |= ContentFlags::HAS_PUT;
+return Status::OK();
+  }
+
+  Status DeleteCF(uint32_t, const Slice&) override {
+content_flags |= ContentFlags::HAS_DELETE;
+return Status::OK();
+  }
+
+  Status SingleDeleteCF(uint32_t, const Slice&) override {
+content_flags |= ContentFlags::HAS_SINGLE_DELETE;
+return Status::OK();
+  }
+
+  Status DeleteRangeCF(uint32_t, const Slice&, const Slice&) override {
+content_flags |= ContentFlags::HAS_DELETE_RANGE;
+return Status::OK();
+  }
+
+  Status MergeCF(uint32_t, const Slice&, const Slice&) override {
+content_flags |= ContentFlags::HAS_MERGE;
+return Status::OK();
+  }
+
+  Status MarkBeginPrepare() override {
+content_flags |= ContentFlags::HAS_BEGIN_PREPARE;
+return Status::OK();
+  }
+
+  Status MarkEndPrepare(const Slice&) override {
+content_flags |= ContentFlags::HAS_END_PREPARE;
+return Status::OK();
+  }
+
+  Status MarkCommit(const Slice&) override {
+content_flags |= ContentFlags::HAS_COMMIT;
+return Status::OK();
+  }
+
+  Status MarkRollback(const Slice&) override {
+content_flags |= ContentFlags::HAS_ROLLBACK;
+return Status::OK();
+  }
+};
+
+}  // anon namespace
+
+struct SavePoints {
+  std::stack stack;
+};
+
+WriteBatch::WriteBatch(size_t reserved_bytes, size_t max_bytes)
+: save_points_(nullptr), content_flags_(0), max_bytes_(max_bytes), rep_() {
+  rep_.reserve((reserved_bytes > WriteBatchInternal::kHeader) ?
+reserved_bytes : WriteBatchInternal::kHeader);
+  rep_.resize(WriteBatchInternal::kHeader);
+}
+
+WriteBatch::WriteBatch(const std::string& rep)
+: save_points_(nullptr),
+  content_flags_(ContentFlags::DEFERRED),
+  max_bytes_(0),
+  rep_(rep) {}
+
+WriteBatch::WriteBatch(const WriteBatch& src)
+: save_points_(src.save_points_),
+  wal_term_point_(src.wal_term_point_),
+  content_flags_(src.content_flags_.load(std::memory_order_relaxed)),
+  max_bytes_(src.max_bytes_),
+  rep_(src.rep_) {}
+
+WriteBatch::WriteBatch(WriteBatch&& src)
+: save_points_(std::move(src.save_points_)),
+  wal_term_point_(std::move(src.wal_term_point_)),
+  content_flags_(src.content_flags_.load(std::memory_order_relaxed)),
+  max_bytes_(src.max_bytes_),
+  rep_(std::move(src.rep_)) {}
+
+WriteBatch& WriteBatch::operator=(const 

[01/51] [partial] nifi-minifi-cpp git commit: MINIFI-372: Replace leveldb with RocksDB

2017-10-09 Thread jeremydyer
Repository: nifi-minifi-cpp
Updated Branches:
  refs/heads/master 380a98bb8 -> 488677321


http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/48867732/thirdparty/rocksdb/env/env_chroot.cc
--
diff --git a/thirdparty/rocksdb/env/env_chroot.cc 
b/thirdparty/rocksdb/env/env_chroot.cc
new file mode 100644
index 000..6a1fda8
--- /dev/null
+++ b/thirdparty/rocksdb/env/env_chroot.cc
@@ -0,0 +1,324 @@
+//  Copyright (c) 2016-present, Facebook, Inc.  All rights reserved.
+//  This source code is licensed under both the GPLv2 (found in the
+//  COPYING file in the root directory) and Apache 2.0 License
+//  (found in the LICENSE.Apache file in the root directory).
+
+#if !defined(ROCKSDB_LITE) && !defined(OS_WIN)
+
+#include "env/env_chroot.h"
+
+#include 
+#include 
+#include 
+#include 
+
+#include 
+#include 
+#include 
+
+#include "rocksdb/status.h"
+
+namespace rocksdb {
+
+class ChrootEnv : public EnvWrapper {
+ public:
+  ChrootEnv(Env* base_env, const std::string& chroot_dir)
+  : EnvWrapper(base_env) {
+#if defined(OS_AIX)
+char resolvedName[PATH_MAX];
+char* real_chroot_dir = realpath(chroot_dir.c_str(), resolvedName);
+#else
+char* real_chroot_dir = realpath(chroot_dir.c_str(), nullptr);
+#endif
+// chroot_dir must exist so realpath() returns non-nullptr.
+assert(real_chroot_dir != nullptr);
+chroot_dir_ = real_chroot_dir;
+#if !defined(OS_AIX)
+free(real_chroot_dir);
+#endif
+  }
+
+  virtual Status NewSequentialFile(const std::string& fname,
+   std::unique_ptr* result,
+   const EnvOptions& options) override {
+auto status_and_enc_path = EncodePathWithNewBasename(fname);
+if (!status_and_enc_path.first.ok()) {
+  return status_and_enc_path.first;
+}
+return EnvWrapper::NewSequentialFile(status_and_enc_path.second, result,
+ options);
+  }
+
+  virtual Status NewRandomAccessFile(const std::string& fname,
+ unique_ptr* result,
+ const EnvOptions& options) override {
+auto status_and_enc_path = EncodePathWithNewBasename(fname);
+if (!status_and_enc_path.first.ok()) {
+  return status_and_enc_path.first;
+}
+return EnvWrapper::NewRandomAccessFile(status_and_enc_path.second, result,
+   options);
+  }
+
+  virtual Status NewWritableFile(const std::string& fname,
+ unique_ptr* result,
+ const EnvOptions& options) override {
+auto status_and_enc_path = EncodePathWithNewBasename(fname);
+if (!status_and_enc_path.first.ok()) {
+  return status_and_enc_path.first;
+}
+return EnvWrapper::NewWritableFile(status_and_enc_path.second, result,
+   options);
+  }
+
+  virtual Status ReuseWritableFile(const std::string& fname,
+   const std::string& old_fname,
+   unique_ptr* result,
+   const EnvOptions& options) override {
+auto status_and_enc_path = EncodePathWithNewBasename(fname);
+if (!status_and_enc_path.first.ok()) {
+  return status_and_enc_path.first;
+}
+auto status_and_old_enc_path = EncodePath(old_fname);
+if (!status_and_old_enc_path.first.ok()) {
+  return status_and_old_enc_path.first;
+}
+return EnvWrapper::ReuseWritableFile(status_and_old_enc_path.second,
+ status_and_old_enc_path.second, 
result,
+ options);
+  }
+
+  virtual Status NewRandomRWFile(const std::string& fname,
+ unique_ptr* result,
+ const EnvOptions& options) override {
+auto status_and_enc_path = EncodePathWithNewBasename(fname);
+if (!status_and_enc_path.first.ok()) {
+  return status_and_enc_path.first;
+}
+return EnvWrapper::NewRandomRWFile(status_and_enc_path.second, result,
+   options);
+  }
+
+  virtual Status NewDirectory(const std::string& dir,
+  unique_ptr* result) override {
+auto status_and_enc_path = EncodePathWithNewBasename(dir);
+if (!status_and_enc_path.first.ok()) {
+  return status_and_enc_path.first;
+}
+return EnvWrapper::NewDirectory(status_and_enc_path.second, result);
+  }
+
+  virtual Status FileExists(const std::string& fname) override {
+auto status_and_enc_path = EncodePathWithNewBasename(fname);
+if (!status_and_enc_path.first.ok()) {
+  return status_and_enc_path.first;
+}
+return EnvWrapper::FileExists(status_and_enc_path.second);
+  }
+
+  virtual Status GetChildren(const std::string& dir,
+ std::vector* 

nifi-minifi-cpp git commit: MINIFICPP-254: Resolve odd naming of isRunning variable so that it's clear we do not lock when we are running [Forced Update!]

2017-10-09 Thread jeremydyer
Repository: nifi-minifi-cpp
Updated Branches:
  refs/heads/master b4cdf964d -> 380a98bb8 (forced update)


MINIFICPP-254: Resolve odd naming of isRunning variable so that it's clear we 
do not lock when we are running

Originally identified by Fredrick Stakem.

This closes #143

Signed-off-by: Jeremy Dyer 


Project: http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/repo
Commit: http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/commit/380a98bb
Tree: http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/tree/380a98bb
Diff: http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/diff/380a98bb

Branch: refs/heads/master
Commit: 380a98bb85b185c109261b0bc3b0af878a57270e
Parents: 96d1874
Author: Marc Parisi 
Authored: Sun Oct 8 10:43:54 2017 -0400
Committer: Jeremy Dyer 
Committed: Mon Oct 9 11:26:10 2017 -0400

--
 libminifi/src/core/Connectable.cpp | 12 
 1 file changed, 8 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/380a98bb/libminifi/src/core/Connectable.cpp
--
diff --git a/libminifi/src/core/Connectable.cpp 
b/libminifi/src/core/Connectable.cpp
index cf01f0c..e2f033e 100644
--- a/libminifi/src/core/Connectable.cpp
+++ b/libminifi/src/core/Connectable.cpp
@@ -65,9 +65,11 @@ bool 
Connectable::setSupportedRelationships(std::set relatio
 
 // Whether the relationship is supported
 bool Connectable::isSupportedRelationship(core::Relationship relationship) {
-  const bool requiresLock = isRunning();
+  // if we are running we do not need a lock since the function to change 
relationships_ ( setSupportedRelationships)
+  // cannot be executed while we are running
+  const bool isConnectableRunning = isRunning();
 
-  const auto conditionalLock = !requiresLock ? std::unique_lock() 
: std::unique_lock(relationship_mutex_);
+  const auto conditionalLock = isConnectableRunning ? 
std::unique_lock() : 
std::unique_lock(relationship_mutex_);
 
   const auto  = relationships_.find(relationship.getName());
   if (it != relationships_.end()) {
@@ -95,9 +97,11 @@ bool 
Connectable::setAutoTerminatedRelationships(std::set relation
 
 // Check whether the relationship is auto terminated
 bool Connectable::isAutoTerminated(core::Relationship relationship) {
-  const bool requiresLock = isRunning();
+  // if we are running we do not need a lock since the function to change 
relationships_ ( setSupportedRelationships)
+// cannot be executed while we are running
+  const bool isConnectableRunning = isRunning();
 
-  const auto conditionalLock = !requiresLock ? std::unique_lock() 
: std::unique_lock(relationship_mutex_);
+  const auto conditionalLock = isConnectableRunning ? 
std::unique_lock() : 
std::unique_lock(relationship_mutex_);
 
   const auto  = auto_terminated_relationships_.find(relationship.getName());
   if (it != auto_terminated_relationships_.end()) {



nifi-minifi-cpp git commit: MINIFICPP-254: Resolve odd naming of isRunning variable so that it's clear we do not lock when we are running

2017-10-09 Thread jeremydyer
Repository: nifi-minifi-cpp
Updated Branches:
  refs/heads/master 96d187445 -> b4cdf964d


MINIFICPP-254: Resolve odd naming of isRunning variable so that it's clear we 
do not lock when we are running

Originally identified by Fredrick Stakem.

This closes #143

Signed-off-by: Jeremy Dyer 


Project: http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/repo
Commit: http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/commit/b4cdf964
Tree: http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/tree/b4cdf964
Diff: http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/diff/b4cdf964

Branch: refs/heads/master
Commit: b4cdf964dc9c383e50774f261528e76cc12ff3d2
Parents: 96d1874
Author: Marc Parisi 
Authored: Sun Oct 8 10:43:54 2017 -0400
Committer: Jeremy Dyer 
Committed: Mon Oct 9 09:32:53 2017 -0400

--
 extensions/bluetooth/processors/BLEScanner.cpp |  5 +
 extensions/bluetooth/processors/BLEScanner.h   | 12 
 libminifi/src/core/Connectable.cpp | 12 
 3 files changed, 25 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/b4cdf964/extensions/bluetooth/processors/BLEScanner.cpp
--
diff --git a/extensions/bluetooth/processors/BLEScanner.cpp 
b/extensions/bluetooth/processors/BLEScanner.cpp
new file mode 100644
index 000..e4152c1
--- /dev/null
+++ b/extensions/bluetooth/processors/BLEScanner.cpp
@@ -0,0 +1,5 @@
+//
+// Created by Jeremy Dyer on 10/4/17.
+//
+
+#include "BLEScanner.h"

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/b4cdf964/extensions/bluetooth/processors/BLEScanner.h
--
diff --git a/extensions/bluetooth/processors/BLEScanner.h 
b/extensions/bluetooth/processors/BLEScanner.h
new file mode 100644
index 000..40d61c8
--- /dev/null
+++ b/extensions/bluetooth/processors/BLEScanner.h
@@ -0,0 +1,12 @@
+//
+// Created by Jeremy Dyer on 10/4/17.
+//
+
+#ifndef NIFI_MINIFI_CPP_BLESCANNER_H
+#define NIFI_MINIFI_CPP_BLESCANNER_H
+
+class BLEScanner {
+
+};
+
+#endif //NIFI_MINIFI_CPP_BLESCANNER_H

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/b4cdf964/libminifi/src/core/Connectable.cpp
--
diff --git a/libminifi/src/core/Connectable.cpp 
b/libminifi/src/core/Connectable.cpp
index cf01f0c..e2f033e 100644
--- a/libminifi/src/core/Connectable.cpp
+++ b/libminifi/src/core/Connectable.cpp
@@ -65,9 +65,11 @@ bool 
Connectable::setSupportedRelationships(std::set relatio
 
 // Whether the relationship is supported
 bool Connectable::isSupportedRelationship(core::Relationship relationship) {
-  const bool requiresLock = isRunning();
+  // if we are running we do not need a lock since the function to change 
relationships_ ( setSupportedRelationships)
+  // cannot be executed while we are running
+  const bool isConnectableRunning = isRunning();
 
-  const auto conditionalLock = !requiresLock ? std::unique_lock() 
: std::unique_lock(relationship_mutex_);
+  const auto conditionalLock = isConnectableRunning ? 
std::unique_lock() : 
std::unique_lock(relationship_mutex_);
 
   const auto  = relationships_.find(relationship.getName());
   if (it != relationships_.end()) {
@@ -95,9 +97,11 @@ bool 
Connectable::setAutoTerminatedRelationships(std::set relation
 
 // Check whether the relationship is auto terminated
 bool Connectable::isAutoTerminated(core::Relationship relationship) {
-  const bool requiresLock = isRunning();
+  // if we are running we do not need a lock since the function to change 
relationships_ ( setSupportedRelationships)
+// cannot be executed while we are running
+  const bool isConnectableRunning = isRunning();
 
-  const auto conditionalLock = !requiresLock ? std::unique_lock() 
: std::unique_lock(relationship_mutex_);
+  const auto conditionalLock = isConnectableRunning ? 
std::unique_lock() : 
std::unique_lock(relationship_mutex_);
 
   const auto  = auto_terminated_relationships_.find(relationship.getName());
   if (it != auto_terminated_relationships_.end()) {



[1/4] nifi-minifi-cpp git commit: MINIFICPP-215: Make libCuRL containing classes extensions.

2017-10-04 Thread jeremydyer
Repository: nifi-minifi-cpp
Updated Branches:
  refs/heads/master 1a2fa1ea5 -> 96d187445


http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/96d18744/libminifi/test/curl-tests/ThreadPoolAdjust.cpp
--
diff --git a/libminifi/test/curl-tests/ThreadPoolAdjust.cpp 
b/libminifi/test/curl-tests/ThreadPoolAdjust.cpp
new file mode 100644
index 000..7c15d39
--- /dev/null
+++ b/libminifi/test/curl-tests/ThreadPoolAdjust.cpp
@@ -0,0 +1,111 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include 
+#undef NDEBUG
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include "HTTPClient.h"
+#include "InvokeHTTP.h"
+#include "../TestBase.h"
+#include "utils/StringUtils.h"
+#include "core/Core.h"
+#include "../include/core/logging/Logger.h"
+#include "core/ProcessGroup.h"
+#include "core/yaml/YamlConfiguration.h"
+#include "FlowController.h"
+#include "properties/Configure.h"
+#include "../unit/ProvenanceTestHelper.h"
+#include "io/StreamFactory.h"
+#include "CivetServer.h"
+#include "RemoteProcessorGroupPort.h"
+#include "core/ConfigurableComponent.h"
+#include "controllers/SSLContextService.h"
+#include "../TestServer.h"
+#include "../integration/IntegrationBase.h"
+
+class HttpTestHarness : public IntegrationBase {
+ public:
+  HttpTestHarness() {
+char format[] = "/tmp/ssth.XX";
+dir = testController.createTempDirectory(format);
+  }
+
+  void testSetup() {
+LogTestController::getInstance().setDebug();
+LogTestController::getInstance().setDebug();
+LogTestController::getInstance().setDebug();
+LogTestController::getInstance().setDebug();
+LogTestController::getInstance().setDebug();
+LogTestController::getInstance().setDebug();
+LogTestController::getInstance().setDebug();
+
LogTestController::getInstance().setDebug();
+
LogTestController::getInstance().setDebug();
+LogTestController::getInstance().setDebug();
+LogTestController::getInstance().setDebug();
+
LogTestController::getInstance().setDebug();
+
LogTestController::getInstance().setDebug();
+LogTestController::getInstance().setDebug();
+std::fstream file;
+ss << dir << "/" << "tstFile.ext";
+file.open(ss.str(), std::ios::out);
+file << "tempFile";
+file.close();
+configuration->set("nifi.flow.engine.threads", "1");
+  }
+
+  void cleanup() {
+unlink(ss.str().c_str());
+  }
+
+  void runAssertions() {
+assert(LogTestController::getInstance().contains("curl performed") == 
true);
+assert(LogTestController::getInstance().contains("Size:1024 Offset:0") == 
true);
+assert(LogTestController::getInstance().contains("Size:0 Offset:0") == 
false);
+  }
+
+ protected:
+  char *dir;
+  std::stringstream ss;
+  TestController testController;
+};
+
+int main(int argc, char **argv) {
+  std::string key_dir, test_file_location, url;
+  if (argc > 1) {
+test_file_location = argv[1];
+key_dir = argv[2];
+  }
+
+  HttpTestHarness harness;
+
+  harness.setKeyDir(key_dir);
+
+  harness.run(test_file_location);
+
+  return 0;
+}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/96d18744/libminifi/test/integration/C2NullConfiguration.cpp
--
diff --git a/libminifi/test/integration/C2NullConfiguration.cpp 
b/libminifi/test/integration/C2NullConfiguration.cpp
deleted file mode 100644
index 394429f..000
--- a/libminifi/test/integration/C2NullConfiguration.cpp
+++ /dev/null
@@ -1,135 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License 

  1   2   >