am-c-p-p commented on a change in pull request #656: MINIFI-1013 Used soci 
library.
URL: https://github.com/apache/nifi-minifi-cpp/pull/656#discussion_r358633260
 
 

 ##########
 File path: extensions/sql/processors/QueryDatabaseTable.cpp
 ##########
 @@ -0,0 +1,453 @@
+/**
+ * @file QueryDatabaseTable.cpp
+ * PutSQL class declaration
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "QueryDatabaseTable.h"
+
+#include <vector>
+#include <queue>
+#include <map>
+#include <set>
+#include <sstream>
+#include <stdio.h>
+#include <string>
+#include <iostream>
+#include <memory>
+#include <codecvt>
+#include <algorithm>
+#include <regex>
+#include <soci.h>
+
+#include "io/DataStream.h"
+#include "core/ProcessContext.h"
+#include "core/ProcessSession.h"
+#include "Exception.h"
+#include "utils/OsUtils.h"
+#include "data/DatabaseConnectors.h"
+#include "data/JSONSQLWriter.h"
+#include "data/WriteCallback.h"
+#include "data/MaxCollector.h"
+#include "data/Utils.h"
+#include "utils/file/FileUtils.h"
+
+namespace org {
+namespace apache {
+namespace nifi {
+namespace minifi {
+namespace processors {
+
+const std::string QueryDatabaseTable::ProcessorName("QueryDatabaseTable");
+
+static const core::Property s_dbControllerService(
+    core::PropertyBuilder::createProperty("DB Controller 
Service")->isRequired(true)->withDescription("Database Controller 
Service.")->supportsExpressionLanguage(true)->build());
+
+static const core::Property s_tableName(
+  core::PropertyBuilder::createProperty("Table 
Name")->isRequired(true)->withDescription("The name of the database table to be 
queried.")->supportsExpressionLanguage(true)->build());
+
+static const core::Property s_columnNames(
+  core::PropertyBuilder::createProperty("Columns to 
Return")->isRequired(false)->withDescription(
+    "A comma-separated list of column names to be used in the query. If your 
database requires special treatment of the names (quoting, e.g.), each name 
should include such treatment. "
+    "If no column names are supplied, all columns in the specified table will 
be returned. "
+    "NOTE: It is important to use consistent column names for a given table 
for incremental fetch to work 
properly.")->supportsExpressionLanguage(true)->build());
+
+static const core::Property s_maxValueColumnNames(
+  core::PropertyBuilder::createProperty("Maximum-value 
Columns")->isRequired(false)->withDescription(
+    "A comma-separated list of column names. The processor will keep track of 
the maximum value for each column that has been returned since the processor 
started running. "
+    "Using multiple columns implies an order to the column list, and each 
column's values are expected to increase more slowly than the previous columns' 
values. "
+    "Thus, using multiple columns implies a hierarchical structure of columns, 
which is usually used for partitioning tables. "
+    "This processor can be used to retrieve only those rows that have been 
added/updated since the last retrieval. "
+    "Note that some ODBC types such as bit/boolean are not conducive to 
maintaining maximum value, so columns of these types should not be listed in 
this property, and will result in error(s) during processing. "
+    "If no columns are provided, all rows from the table will be considered, 
which could have a performance impact. "
+    "NOTE: It is important to use consistent max-value column names for a 
given table for incremental fetch to work 
properly.")->supportsExpressionLanguage(true)->build());
+
+static const core::Property s_whereClause(
+  
core::PropertyBuilder::createProperty("db-fetch-where-clause")->isRequired(false)->withDescription(
+    "A custom clause to be added in the WHERE condition when building SQL 
queries.")->supportsExpressionLanguage(true)->build());
+
+static const core::Property s_sqlQuery(
+  
core::PropertyBuilder::createProperty("db-fetch-sql-query")->isRequired(false)->withDescription(
+    "A custom SQL query used to retrieve data. Instead of building a SQL query 
from other properties, this query will be wrapped as a sub-query. "
+    "Query must have no ORDER BY 
statement.")->supportsExpressionLanguage(true)->build());
+
+static const core::Property s_maxRowsPerFlowFile(
+  
core::PropertyBuilder::createProperty("qdbt-max-rows")->isRequired(true)->withDefaultValue<int>(0)->withDescription(
+    "The maximum number of result rows that will be included in a single 
FlowFile. This will allow you to break up very large result sets into multiple 
FlowFiles. "
+    "If the value specified is zero, then all rows are returned in a single 
FlowFile.")->supportsExpressionLanguage(true)->build());
+
+static core::Property s_stateDirectory(
+  core::PropertyBuilder::createProperty("State 
Directory")->isRequired(false)->withDefaultValue("QDTState")->withDescription("Directory
 which contains processor state data.")->build());
+
+static const core::Relationship s_success("success", "Successfully created 
FlowFile from SQL query result set.");
+
+// Probably need to have `FragmentAttributes` like nifi has 
https://github.com/apache/nifi/blob/master/nifi-commons/nifi-utils/src/main/java/org/apache/nifi/flowfile/attributes/FragmentAttributes.java.
+static const std::string FragmentId = "fragment.identifier";
+static const std::string FragmentIndex = "fragment.index";
+static const std::string FragmentCount = "fragment.count";
+
+// State
+class State {
+ public:
+  State(const std::string& stateDir, const std::string& uuid, 
std::shared_ptr<logging::Logger> logger)
+    :logger_(logger) {
+    if (!createUUIDDir(stateDir, uuid, filePath_)) {
+      return;
+    }
+
+    filePath_ += "State.txt";
+
+    if (!getStateFromFile()) {
+      return;
+    }
+
+    ok_ = true;
+  }
+
+  State::~State() {
+    if (file_.is_open()) {
+      file_.close();
+    }
+  }
+
+  operator bool() const {
+    return ok_;
+  }
+
+  std::unordered_map<std::string, std::string> mapState() const {
+    return mapState_;
+  }
+
+  bool writeStateToFile(const std::unordered_map<std::string, std::string>& 
mapState) {
+    file_.seekp(std::ios::beg);
+
+    auto dataSize = 0;
+    for (const auto& el : mapState) {
+      file_ << el.first << '=' << el.second << separator();
+      dataSize += el.first.size() + 1 + el.second.size() + separator().size();
+    }
+
+    // Clear old data with ' '. 
+    for (auto i = dataSize_ - dataSize; i > 0; i--) {
+      file_ << ' ';
+    }
+    dataSize_ = dataSize;
+
+    file_.flush();
+
+    mapState_ = mapState;
+  }
+
+ private:
+   static const std::string& separator() {
+     static const std::string s_separator = "@!qdt!@";
+     return s_separator;
+   }
+
+   bool createUUIDDir(const std::string& stateDir, const std::string& uuid, 
std::string& dir)
+   {
+     if (stateDir.empty()) {
+       dir.clear();
+       return false;
+     }
+
+     auto dirWithBackslash = stateDir;
+     if (stateDir.back() != '\\') {
+       dirWithBackslash += '\\';
+     }
+
+     dir = dirWithBackslash + "uuid\\" + uuid + "\\";
 
 Review comment:
   Fixed.

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services

Reply via email to