This is an automated email from the ASF dual-hosted git repository. pnowojski pushed a commit to branch master in repository https://gitbox.apache.org/repos/asf/flink.git
commit 2c53a175228be3f1b840a45504cc69de18e84df8 Author: Piotr Nowojski <[email protected]> AuthorDate: Fri Sep 7 13:53:22 2018 +0200 [FLINK-10537][network] Fix network small performance degradation after merging [FLINK-9913] Checks removed by this commit were performed once per every record, while Before [FLINK-9913] those checks were executed only once per "continue writing with new buffer". Apparently those checks have some overhead once per record while are helping to avoid the need to commit the data very rarelly. --- .../api/serialization/SpanningRecordSerializer.java | 17 +++-------------- 1 file changed, 3 insertions(+), 14 deletions(-) diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/api/serialization/SpanningRecordSerializer.java b/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/api/serialization/SpanningRecordSerializer.java index ba2ed01..f066679 100644 --- a/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/api/serialization/SpanningRecordSerializer.java +++ b/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/api/serialization/SpanningRecordSerializer.java @@ -92,20 +92,9 @@ public class SpanningRecordSerializer<T extends IOReadableWritable> implements R */ @Override public SerializationResult copyToBufferBuilder(BufferBuilder targetBuffer) { - boolean mustCommit = false; - if (lengthBuffer.hasRemaining()) { - targetBuffer.append(lengthBuffer); - mustCommit = true; - } - - if (dataBuffer.hasRemaining()) { - targetBuffer.append(dataBuffer); - mustCommit = true; - } - - if (mustCommit) { - targetBuffer.commit(); - } + targetBuffer.append(lengthBuffer); + targetBuffer.append(dataBuffer); + targetBuffer.commit(); return getSerializationResult(targetBuffer); }
