yuzelin commented on code in PR #590:
URL: https://github.com/apache/flink-table-store/pull/590#discussion_r1135496601


##########
flink-table-store-common/src/test/java/org/apache/flink/table/store/lookup/hash/HashLookupStoreFactoryTest.java:
##########
@@ -157,52 +158,55 @@ public void testKeyLengthStartTwo() throws IOException {
         assertThat(reader.lookup(toBytes(2454441))).isNull();
     }
 
-    @Test
-    public void testDataOnTwoBuffers() throws IOException {
-        Object[] keys = new Object[] {1, 2, 3};
-        Object[] values =
-                new Object[] {
-                    generateStringData(100), generateStringData(10000), 
generateStringData(100)
-                };
-
-        int byteSize = toBytes(values[0]).length + toBytes(values[1]).length;
-
-        // Write
-        writeStore(file, keys, values);
-
-        // Read
-        factory = new HashLookupStoreFactory(0.75d, true, new 
MemorySize(byteSize - 100));
-        HashLookupStoreReader reader = factory.createReader(file);
-        for (int i = 0; i < keys.length; i++) {
-            
assertThat(reader.lookup(toBytes(keys[i]))).isEqualTo(toBytes(values[i]));
-        }
-    }
-
-    @Test
-    public void testDataSizeOnTwoBuffers() throws IOException {
-        Object[] keys = new Object[] {1, 2, 3};
-        Object[] values =
-                new Object[] {
-                    generateStringData(100), generateStringData(10000), 
generateStringData(100)
-                };
-
-        byte[] b1 = toBytes(values[0]);
-        byte[] b2 = toBytes(values[1]);
-        int byteSize = b1.length + b2.length;
-        int sizeSize =
-                VarLengthIntUtils.encodeInt(new DataOutputSerializer(4), 
b1.length)
-                        + VarLengthIntUtils.encodeInt(new 
DataOutputSerializer(4), b2.length);
-
-        // Write
-        writeStore(file, keys, values);
-
-        // Read
-        factory = new HashLookupStoreFactory(0.75d, true, new 
MemorySize(byteSize + sizeSize + 3));
-        HashLookupStoreReader reader = factory.createReader(file);
-        for (int i = 0; i < keys.length; i++) {
-            
assertThat(reader.lookup(toBytes(keys[i]))).isEqualTo(toBytes(values[i]));
-        }
-    }
+    //    @Test
+    //    public void testDataOnTwoBuffers() throws IOException {
+    //        Object[] keys = new Object[] {1, 2, 3};
+    //        Object[] values =
+    //                new Object[] {
+    //                    generateStringData(100), generateStringData(10000),
+    // generateStringData(100)
+    //                };
+    //
+    //        int byteSize = toBytes(values[0]).length + 
toBytes(values[1]).length;
+    //
+    //        // Write
+    //        writeStore(file, keys, values);
+    //
+    //        // Read
+    //        factory = new HashLookupStoreFactory(0.75d, true, new 
MemorySize(byteSize - 100));
+    //        HashLookupStoreReader reader = factory.createReader(file);
+    //        for (int i = 0; i < keys.length; i++) {
+    //            
assertThat(reader.lookup(toBytes(keys[i]))).isEqualTo(toBytes(values[i]));
+    //        }
+    //    }
+
+    //    @Test
+    //    public void testDataSizeOnTwoBuffers() throws IOException {
+    //        Object[] keys = new Object[] {1, 2, 3};
+    //        Object[] values =
+    //                new Object[] {
+    //                    generateStringData(100), generateStringData(10000),
+    // generateStringData(100)
+    //                };
+    //
+    //        byte[] b1 = toBytes(values[0]);
+    //        byte[] b2 = toBytes(values[1]);
+    //        int byteSize = b1.length + b2.length;
+    //        int sizeSize =
+    //                VarLengthIntUtils.encodeInt(new DataOutputSerializer(4), 
b1.length)
+    //                        + VarLengthIntUtils.encodeInt(new 
DataOutputSerializer(4), b2.length);
+    //
+    //        // Write
+    //        writeStore(file, keys, values);
+    //
+    //        // Read
+    //        factory = new HashLookupStoreFactory(0.75d, true, new 
MemorySize(byteSize + sizeSize +
+    // 3));
+    //        HashLookupStoreReader reader = factory.createReader(file);
+    //        for (int i = 0; i < keys.length; i++) {
+    //            
assertThat(reader.lookup(toBytes(keys[i]))).isEqualTo(toBytes(values[i]));
+    //        }
+    //    }

Review Comment:
   Should be removed?



##########
flink-table-store-common/src/test/java/org/apache/flink/table/store/lookup/hash/HashLookupStoreFactoryTest.java:
##########
@@ -249,23 +253,23 @@ public void testReadCompoundByteToString() throws 
IOException {
         testReadKeyToString(new Object[] {generateCompoundByteKey()});
     }
 
-    @Test
-    public void testReadDisk() throws IOException {
-        Integer[] keys = generateIntKeys(10000);
-
-        // Write
-        Object[] values = generateStringData(keys.length, 1000);
-        writeStore(file, keys, values);
-
-        // Read
-        factory = new HashLookupStoreFactory(0.75d, false, 
MemorySize.ofMebiBytes(1024));
-        HashLookupStoreReader reader = factory.createReader(file);
-
-        for (int i = 0; i < keys.length; i++) {
-            
assertThat(reader.lookup(toBytes(keys[i]))).isEqualTo(toBytes(values[i]));
-        }
-        reader.close();
-    }
+    //    @Test
+    //    public void testReadDisk() throws IOException {
+    //        Integer[] keys = generateIntKeys(10000);
+    //
+    //        // Write
+    //        Object[] values = generateStringData(keys.length, 1000);
+    //        writeStore(file, keys, values);
+    //
+    //        // Read
+    //        factory = new HashLookupStoreFactory(0.75d, false, 
MemorySize.ofMebiBytes(1024));
+    //        HashLookupStoreReader reader = factory.createReader(file);
+    //
+    //        for (int i = 0; i < keys.length; i++) {
+    //            
assertThat(reader.lookup(toBytes(keys[i]))).isEqualTo(toBytes(values[i]));
+    //        }
+    //        reader.close();
+    //    }

Review Comment:
   Should be removed?



##########
docs/content/docs/concepts/primary-key-table.md:
##########
@@ -60,7 +60,7 @@ For example, let's say Table Store receives three records:
 If the first column is the primary key. The final result will be `<1, 25.2, 
10, 'This is a book'>`.
 
 {{< hint info >}}
-For streaming queries, `partial-update` merge engine must be used together 
with `full-compaction` [changelog producer]({{< ref 
"docs/concepts/primary-key-table#changelog-producers" >}}).
+For streaming queries, `partial-update` merge engine must be used together 
with `lookup` and `full-compaction` [changelog producer]({{< ref 
"docs/concepts/primary-key-table#changelog-producers" >}}).

Review Comment:
   lookup or full-compaction



##########
docs/content/docs/concepts/primary-key-table.md:
##########
@@ -109,7 +109,7 @@ If you allow some functions to ignore retraction messages, 
you can configure:
 `'fields.${field_name}.ignore-retract'='true'`.
 
 {{< hint info >}}
-For streaming queries, `aggregation` merge engine must be used together with 
`full-compaction` [changelog producer]({{< ref 
"docs/concepts/primary-key-table#changelog-producers" >}}).
+For streaming queries, `aggregation` merge engine must be used together with 
`lookup` and `full-compaction` [changelog producer]({{< ref 
"docs/concepts/primary-key-table#changelog-producers" >}}).

Review Comment:
   lookup or full-compaction



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to