klion26 commented on a change in pull request #10329: 
[FLINK-12785][StateBackend] RocksDB savepoint recovery can use a lot of 
unmanaged memory
URL: https://github.com/apache/flink/pull/10329#discussion_r353772273
 
 

 ##########
 File path: 
flink-state-backends/flink-statebackend-rocksdb/src/test/java/org/apache/flink/contrib/streaming/state/RocksDBWriteBatchWrapperTest.java
 ##########
 @@ -65,4 +69,52 @@ public void basicTest() throws Exception {
                        }
                }
        }
+
+       /**
+        * Tests that {@link RocksDBWriteBatchWrapper} flushes after the memory 
consumed exceeds the preconfigured value.
+        */
+       @Test
+       public void testWriteBatchWrapperFlushAfterMemorySizeExceed() throws 
Exception {
+               try (RocksDB db = 
RocksDB.open(folder.newFolder().getAbsolutePath());
+                       WriteOptions options = new 
WriteOptions().setDisableWAL(true);
+                       ColumnFamilyHandle handle = db.createColumnFamily(new 
ColumnFamilyDescriptor("test".getBytes()));
+                       RocksDBWriteBatchWrapper writeBatchWrapper = new 
RocksDBWriteBatchWrapper(db, options, 200, 50)) {
+                       // sequence (8 bytes) + count (4 bytes)
+                       // more information please ref to write_batch.cc in 
RocksDB
+                       assertEquals(12, writeBatchWrapper.getDataSize());
+                       byte[] dummy = new byte[6];
+                       ThreadLocalRandom.current().nextBytes(dummy);
+                       // will add 1 + 1 + 1 + 6 + 1 + 6 = 16 bytes for each KV
+                       // format is 
[handleType|kvType|keyLen|key|valueLen|value]
+                       // more information please ref write_batch.cc in RocksDB
+                       writeBatchWrapper.put(handle, dummy, dummy);
+                       assertEquals(28, writeBatchWrapper.getDataSize());
+                       writeBatchWrapper.put(handle, dummy, dummy);
+                       assertEquals(44, writeBatchWrapper.getDataSize());
+                       writeBatchWrapper.put(handle, dummy, dummy);
+                       // will flush all, then an empty write batch
+                       assertEquals(12, writeBatchWrapper.getDataSize());
+               }
+       }
+
+       /**
+        * Tests that {@link RocksDBWriteBatchWrapper} flushes after the kv 
count exceeds the preconfigured value.
+        */
+       @Test
+       public void testWriteBatchWrapperFlushAfterCountExceed() throws 
Exception {
+               try (RocksDB db = 
RocksDB.open(folder.newFolder().getAbsolutePath());
+                       WriteOptions options = new 
WriteOptions().setDisableWAL(true);
+                       ColumnFamilyHandle handle = db.createColumnFamily(new 
ColumnFamilyDescriptor("test".getBytes()));
+                       RocksDBWriteBatchWrapper writeBatchWrapper = new 
RocksDBWriteBatchWrapper(db, options, 100, 50000)) {
+                       byte[] dummy = new byte[2];
+                       ThreadLocalRandom.current().nextBytes(dummy);
+                       for (int i = 1; i < 100; ++i) {
+                               writeBatchWrapper.put(handle, dummy, dummy);
+                               // init 12 bytes, each kv consumes 8 bytes
+                               assertEquals(12 + 8 * i, 
writeBatchWrapper.getDataSize());
 
 Review comment:
   done

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
[email protected]


With regards,
Apache Git Services

Reply via email to