fgerlits commented on code in PR #1927: URL: https://github.com/apache/nifi-minifi-cpp/pull/1927#discussion_r2081335614
########## extensions/aws/processors/PutKinesisStream.cpp: ########## @@ -0,0 +1,201 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "PutKinesisStream.h" + +#include <memory> +#include <random> +#include <unordered_map> + +#include "aws/kinesis/KinesisClient.h" +#include "aws/kinesis/model/PutRecordsRequest.h" +#include "core/ProcessContext.h" +#include "core/ProcessSession.h" +#include "core/Resource.h" +#include "range/v3/view.hpp" +#include "utils/ProcessorConfigUtils.h" + +namespace org::apache::nifi::minifi::aws::processors { + +void PutKinesisStream::initialize() { + setSupportedProperties(Properties); + setSupportedRelationships(Relationships); +} + +void PutKinesisStream::onSchedule(core::ProcessContext& context, core::ProcessSessionFactory& session_factory) { + AwsProcessor::onSchedule(context, session_factory); + + batch_size_ = parseU64Property(context, MessageBatchSize); + if (batch_size_ == 0 || batch_size_ > 500) { + logger_->log_warn("{} is invalid. Setting it to the maximum 500 value.", MessageBatchSize.name); + batch_size_ = 500; + } + batch_data_size_soft_cap_ = parseDataSizeProperty(context, MaxBatchDataSize); + if (batch_data_size_soft_cap_ > 4_MB) { + logger_->log_warn("{} is invalid. Setting it to the maximum 4 MB value.", MaxBatchDataSize.name); + batch_data_size_soft_cap_ = 4_MB; + } + + endpoint_override_url_ = context.getProperty(EndpointOverrideURL.name) | minifi::utils::toOptional(); +} + +nonstd::expected<Aws::Kinesis::Model::PutRecordsRequestEntry, PutKinesisStream::BatchItemError> PutKinesisStream::createEntryFromFlowFile(const core::ProcessContext& context, + core::ProcessSession& session, + const std::shared_ptr<core::FlowFile>& flow_file) const { + Aws::Kinesis::Model::PutRecordsRequestEntry entry; + const auto partition_key = context.getProperty(AmazonKinesisStreamPartitionKey.name, flow_file.get()) | minifi::utils::valueOrElse([&flow_file] { return flow_file->getUUID().to_string(); }); + entry.SetPartitionKey(partition_key); + const auto [status, buffer] = session.readBuffer(flow_file); + if (io::isError(status)) { + logger_->log_error("Couldn't read content from {}", flow_file->getUUIDStr()); + return nonstd::make_unexpected(BatchItemError{.error_message = "Failed to read content", .error_code = std::nullopt}); + } + Aws::Utils::ByteBuffer aws_buffer(reinterpret_cast<const unsigned char*>(buffer.data()), buffer.size()); + entry.SetData(aws_buffer); + return entry; +} + +std::unordered_map<std::string, PutKinesisStream::StreamBatch> PutKinesisStream::createStreamBatches(const core::ProcessContext& context, core::ProcessSession& session) const { + static constexpr uint64_t SINGLE_RECORD_MAX_SIZE = 1_MB; + std::unordered_map<std::string, StreamBatch> stream_batches; + uint64_t ff_count_in_batches = 0; + while (ff_count_in_batches < batch_size_) { + std::shared_ptr<core::FlowFile> flow_file = session.get(); + if (!flow_file) { break; } + const auto flow_file_size = flow_file->getSize(); + if (flow_file_size > SINGLE_RECORD_MAX_SIZE) { + flow_file->setAttribute(AwsKinesisErrorMessage.name, fmt::format("record too big {}, max allowed {}", flow_file_size, SINGLE_RECORD_MAX_SIZE)); + session.transfer(flow_file, Failure); + logger_->log_error("Failed to publish to kinesis record {} because the size was greater than {} bytes", flow_file->getUUID().to_string(), SINGLE_RECORD_MAX_SIZE); + continue; + } + + auto stream_name = context.getProperty(AmazonKinesisStreamName.name, flow_file.get()); + if (!stream_name) { + logger_->log_error("Stream name is invalid due to {}", stream_name.error().message()); + flow_file->setAttribute(AwsKinesisErrorMessage.name, fmt::format("Stream name is invalid due to {}", stream_name.error().message())); + session.transfer(flow_file, Failure); + continue; + } + + auto entry = createEntryFromFlowFile(context, session, flow_file); + if (!entry) { + flow_file->addAttribute(AwsKinesisErrorMessage.name, entry.error().error_message); + if (entry.error().error_code) { + flow_file->addAttribute(AwsKinesisErrorCode.name, *entry.error().error_code); + } + session.transfer(flow_file, Failure); + continue; + } + + auto [stream_batch, newly_created] = stream_batches.emplace(*stream_name, StreamBatch{}); + if (newly_created) { + stream_batch->second.request.SetStreamName(*stream_name); + } + stream_batch->second.request.AddRecords(*entry); + stream_batch->second.items.push_back(BatchItem{.flow_file = std::move(flow_file), .error = std::nullopt}); + stream_batches[*stream_name].batch_size += flow_file_size; + ++ff_count_in_batches; + + if (stream_batches[*stream_name].batch_size > batch_data_size_soft_cap_) { + break; + } + } + return stream_batches; +} + +void PutKinesisStream::processBatch(core::ProcessSession& session, StreamBatch& stream_batch, const Aws::Kinesis::KinesisClient& client) const { + const auto put_record_result = client.PutRecords(stream_batch.request); + + auto transfer_failed_flow_files = gsl::finally([&] { + for (auto& [flow_file, error] : stream_batch.items) { + if (error) { + flow_file->addAttribute(AwsKinesisErrorMessage.name, error->error_message); + if (error->error_code) { + flow_file->addAttribute(AwsKinesisErrorCode.name, *(error->error_code)); + } + session.transfer(flow_file, Failure); + } + } + }); Review Comment: OK, this works, but the structure of the code is a bit hard to follow. I would prefer to break the `processBatch()` function into two separate functions: one that calls `PutRecords()` and sets the errors, if any, and another one that iterates over the `stream_batch.items` and sets the flow file properties and does the transfers. -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected]
