ChrisSamo632 commented on code in PR #7529:
URL: https://github.com/apache/nifi/pull/7529#discussion_r1278381284
##########
nifi-nar-bundles/nifi-aws-bundle/nifi-aws-processors/src/main/java/org/apache/nifi/processors/aws/kinesis/stream/PutKinesisStream.java:
##########
@@ -126,72 +127,60 @@ public void onTrigger(final ProcessContext context, final
ProcessSession session
final int batchSize = context.getProperty(BATCH_SIZE).asInteger();
final long maxBufferSizeBytes =
context.getProperty(MAX_MESSAGE_BUFFER_SIZE_MB).asDataSize(DataUnit.B).longValue();
- List<FlowFile> flowFiles = filterMessagesByMaxSize(session, batchSize,
maxBufferSizeBytes, AWS_KINESIS_ERROR_MESSAGE);
+ final List<FlowFile> flowFiles =
KinesisProcessorUtils.filterMessagesByMaxSize(session, batchSize,
maxBufferSizeBytes, AWS_KINESIS_ERROR_MESSAGE, getLogger());
- HashMap<String, List<FlowFile>> hashFlowFiles = new HashMap<>();
- HashMap<String, List<PutRecordsRequestEntry>> recordHash = new
HashMap<String, List<PutRecordsRequestEntry>>();
-
- final AmazonKinesisClient client = getClient(context);
+ final HashMap<String, List<FlowFile>> hashFlowFiles = new HashMap<>();
+ final HashMap<String, List<PutRecordsRequestEntry>> recordHash = new
HashMap<>();
+ final KinesisClient client = getClient(context);
try {
- List<FlowFile> failedFlowFiles = new ArrayList<>();
- List<FlowFile> successfulFlowFiles = new ArrayList<>();
+ final List<FlowFile> failedFlowFiles = new ArrayList<>();
+ final List<FlowFile> successfulFlowFiles = new ArrayList<>();
// Prepare batch of records
for (int i = 0; i < flowFiles.size(); i++) {
- FlowFile flowFile = flowFiles.get(i);
+ final FlowFile flowFile = flowFiles.get(i);
- String streamName =
context.getProperty(KINESIS_STREAM_NAME).evaluateAttributeExpressions(flowFile).getValue();;
+ final String streamName =
context.getProperty(KINESIS_STREAM_NAME).evaluateAttributeExpressions(flowFile).getValue();;
final ByteArrayOutputStream baos = new ByteArrayOutputStream();
session.exportTo(flowFile, baos);
- PutRecordsRequestEntry record = new
PutRecordsRequestEntry().withData(ByteBuffer.wrap(baos.toByteArray()));
+ final PutRecordsRequestEntry.Builder recordBuilder =
PutRecordsRequestEntry.builder().data(SdkBytes.fromByteArray(baos.toByteArray()));
- String partitionKey =
context.getProperty(PutKinesisStream.KINESIS_PARTITION_KEY)
+ final String partitionKey =
context.getProperty(PutKinesisStream.KINESIS_PARTITION_KEY)
.evaluateAttributeExpressions(flowFiles.get(i)).getValue();
- if (StringUtils.isBlank(partitionKey) == false) {
- record.setPartitionKey(partitionKey);
- } else {
-
record.setPartitionKey(Integer.toString(randomPartitionKeyGenerator.nextInt()));
- }
+ recordBuilder.partitionKey(StringUtils.isBlank(partitionKey) ?
Integer.toString(randomPartitionKeyGenerator.nextInt()) : partitionKey);
- if (recordHash.containsKey(streamName) == false) {
- recordHash.put(streamName, new ArrayList<>());
- }
- if (hashFlowFiles.containsKey(streamName) == false) {
- hashFlowFiles.put(streamName, new ArrayList<>());
- }
-
- hashFlowFiles.get(streamName).add(flowFile);
- recordHash.get(streamName).add(record);
+ hashFlowFiles.computeIfAbsent(streamName, key -> new
ArrayList<>()).add(flowFile);
+ recordHash.computeIfAbsent(streamName, key -> new
ArrayList<>()).add(recordBuilder.build());
}
- for (Map.Entry<String, List<PutRecordsRequestEntry>> entryRecord :
recordHash.entrySet()) {
- String streamName = entryRecord.getKey();
- List<PutRecordsRequestEntry> records = entryRecord.getValue();
+ for (final Map.Entry<String, List<PutRecordsRequestEntry>>
entryRecord : recordHash.entrySet()) {
+ final String streamName = entryRecord.getKey();
+ final List<PutRecordsRequestEntry> records =
entryRecord.getValue();
if (records.size() > 0) {
-
- PutRecordsRequest putRecordRequest = new
PutRecordsRequest();
- putRecordRequest.setStreamName(streamName);
- putRecordRequest.setRecords(records);
- PutRecordsResult results =
client.putRecords(putRecordRequest);
-
- List<PutRecordsResultEntry> responseEntries =
results.getRecords();
- for (int i = 0; i < responseEntries.size(); i++ ) {
- PutRecordsResultEntry entry = responseEntries.get(i);
+ final PutRecordsRequest putRecordRequest =
PutRecordsRequest.builder()
+ .streamName(streamName)
+ .records(records)
+ .build();
+ final PutRecordsResponse response =
client.putRecords(putRecordRequest);
+
+ final
List<software.amazon.awssdk.services.kinesis.model.PutRecordsResultEntry>
responseEntries = response.records();
+ for (int i = 0; i < responseEntries.size(); i++) {
+ final PutRecordsResultEntry entry =
responseEntries.get(i);
FlowFile flowFile =
hashFlowFiles.get(streamName).get(i);
Map<String,String> attributes = new HashMap<>();
- attributes.put(AWS_KINESIS_SHARD_ID,
entry.getShardId());
- attributes.put(AWS_KINESIS_SEQUENCE_NUMBER,
entry.getSequenceNumber());
+ attributes.put(AWS_KINESIS_SHARD_ID, entry.shardId());
+ attributes.put(AWS_KINESIS_SEQUENCE_NUMBER,
entry.sequenceNumber());
- if (StringUtils.isBlank(entry.getErrorCode()) ==
false) {
- attributes.put(AWS_KINESIS_ERROR_CODE,
entry.getErrorCode());
- attributes.put(AWS_KINESIS_ERROR_MESSAGE,
entry.getErrorMessage());
+ if (StringUtils.isBlank(entry.errorCode()) == false) {
+ attributes.put(AWS_KINESIS_ERROR_CODE,
entry.errorCode());
+ attributes.put(AWS_KINESIS_ERROR_MESSAGE,
entry.errorMessage());
flowFile = session.putAllAttributes(flowFile,
attributes);
failedFlowFiles.add(flowFile);
} else {
Review Comment:
As we're updating this class, let's take the opportunity to replace use of
the old `new Object[] {}` logger statements (towards the end of this message)
The checks for `failedFlowFiles.size() > 0` and `successfulFlowFiles.size()
> 0` can be replaces with `!isEmpty()` calls for simplicity too
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]