wsry commented on a change in pull request #13447:
URL: https://github.com/apache/flink/pull/13447#discussion_r493470252
##########
File path:
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/api/writer/RecordWriter.java
##########
@@ -109,89 +94,58 @@
}
}
- protected void emit(T record, int targetChannel) throws IOException,
InterruptedException {
+ protected void emit(T record, int targetSubpartition) throws
IOException {
checkErroneous();
- serializer.serializeRecord(record);
-
- // Make sure we don't hold onto the large intermediate
serialization buffer for too long
- copyFromSerializerToTargetChannel(targetChannel);
- }
-
- /**
- * @param targetChannel
- * @return <tt>true</tt> if the intermediate serialization buffer
should be pruned
- */
- protected boolean copyFromSerializerToTargetChannel(int targetChannel)
throws IOException, InterruptedException {
- // We should reset the initial position of the intermediate
serialization buffer before
- // copying, so the serialization results can be copied to
multiple target buffers.
- serializer.reset();
-
- boolean pruneTriggered = false;
- BufferBuilder bufferBuilder = getBufferBuilder(targetChannel);
- SerializationResult result =
serializer.copyToBufferBuilder(bufferBuilder);
- while (result.isFullBuffer()) {
- finishBufferBuilder(bufferBuilder);
-
- // If this was a full record, we are done. Not breaking
out of the loop at this point
- // will lead to another buffer request before breaking
out (that would not be a
- // problem per se, but it can lead to stalls in the
pipeline).
- if (result.isFullRecord()) {
- pruneTriggered = true;
- emptyCurrentBufferBuilder(targetChannel);
- break;
- }
-
- bufferBuilder = requestNewBufferBuilder(targetChannel);
- result = serializer.copyToBufferBuilder(bufferBuilder);
- }
- checkState(!serializer.hasSerializedData(), "All data should be
written at once");
+ targetPartition.emitRecord(serializeRecord(serializer, record),
targetSubpartition);
if (flushAlways) {
- flushTargetPartition(targetChannel);
+ targetPartition.flush(targetSubpartition);
}
- return pruneTriggered;
}
public void broadcastEvent(AbstractEvent event) throws IOException {
broadcastEvent(event, false);
}
public void broadcastEvent(AbstractEvent event, boolean
isPriorityEvent) throws IOException {
- try (BufferConsumer eventBufferConsumer =
EventSerializer.toBufferConsumer(event)) {
- for (int targetChannel = 0; targetChannel <
numberOfChannels; targetChannel++) {
- tryFinishCurrentBufferBuilder(targetChannel);
-
- // Retain the buffer so that it can be recycled
by each channel of targetPartition
-
targetPartition.addBufferConsumer(eventBufferConsumer.copy(), targetChannel,
isPriorityEvent);
- }
+ targetPartition.broadcastEvent(event, isPriorityEvent);
- if (flushAlways) {
- flushAll();
- }
+ if (flushAlways) {
+ flushAll();
}
}
- public void flushAll() {
- targetPartition.flushAll();
+ @VisibleForTesting
+ public static ByteBuffer serializeRecord(
Review comment:
I removed BufferWritingResultPartition#addBufferConsumer completely.
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]