poorbarcode commented on code in PR #16758:
URL: https://github.com/apache/pulsar/pull/16758#discussion_r958452138


##########
pulsar-transaction/coordinator/src/main/java/org/apache/pulsar/transaction/coordinator/impl/TxnLogBufferedWriter.java:
##########
@@ -198,33 +223,102 @@ public void asyncAddData(T data, AddDataCallback 
callback, Object ctx){
                     AsyncAddArgs.newInstance(callback, ctx, 
System.currentTimeMillis(), byteBuf));
             return;
         }
-        singleThreadExecutorForWrite.execute(() -> internalAsyncAddData(data, 
callback, ctx));
+        singleThreadExecutorForWrite.execute(() -> {
+            internalAsyncAddData(data, callback, ctx);
+        });
     }
 
+    /**
+     * Append data to queue, if reach {@link #batchedWriteMaxRecords} or 
{@link #batchedWriteMaxSize}, do flush. And if
+     * accept a request that {@param data} is too large (larger than {@link 
#batchedWriteMaxSize}), then two flushes
+     * are executed:
+     *    1. Write the data cached in the queue to BK.
+     *    2. Direct write the large data to BK,  this flush event will not 
record to Metrics.
+     * This ensures the sequential nature of multiple writes to BK.
+     */
     private void internalAsyncAddData(T data, AddDataCallback callback, Object 
ctx){
-        if (state == State.CLOSING || state == State.CLOSED){
-            callback.addFailed(BUFFERED_WRITER_CLOSED_EXCEPTION, ctx);
+        // Avoid missing callback, do failed callback when error occur before 
add data to the array.
+        boolean shouldCompensateCallBackWhenWriteFail = false;
+        try {
+            if (state == State.CLOSING || state == State.CLOSED){
+                callback.addFailed(BUFFERED_WRITER_CLOSED_EXCEPTION, ctx);
+                return;
+            }
+            int dataLength = dataSerializer.getSerializedSize(data);
+            if (dataLength >= batchedWriteMaxSize){
+                trigFlushByLargeSingleData();
+                ByteBuf byteBuf = dataSerializer.serialize(data);
+                shouldCompensateCallBackWhenWriteFail = false;
+                managedLedger.asyncAddEntry(byteBuf, 
DisabledBatchCallback.INSTANCE,
+                        AsyncAddArgs.newInstance(callback, ctx, 
System.currentTimeMillis(), byteBuf));
+                return;
+            }
+            dataArray.add(data);
+            flushContext.addCallback(callback, ctx);
+            bytesSize += dataLength;
+            shouldCompensateCallBackWhenWriteFail = false;
+            trigFlushIfReachMaxRecordsOrMaxSize();
+        } catch (Exception e){
+            if (shouldCompensateCallBackWhenWriteFail){
+                log.error("Failed to add data asynchronously, and do failed 
callback when error occur before add"
+                        + " data to the array.", e);
+                callback.addFailed(new ManagedLedgerInterceptException(e), 
ctx);

Review Comment:
   Already fixed



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to