narendly commented on a change in pull request #809: Add system property
options to config auto compression
URL: https://github.com/apache/helix/pull/809#discussion_r385466281
##########
File path:
helix-core/src/test/java/org/apache/helix/manager/zk/TestZNRecordSizeLimit.java
##########
@@ -290,4 +292,176 @@ public void
testZNRecordSizeLimitUseZNRecordStreamingSerializer() {
System.out.println("END
testZNRecordSizeLimitUseZNRecordStreamingSerializer at " + new Date(
System.currentTimeMillis()));
}
+
+ /*
+ * Tests ZNRecordSerializer auto compression threshold.
+ * Two cases:
+ * 1. serialized data size is less than threshold and could be written to ZK.
+ * 2. serialized data size is greater than threshold, so ZkClientException
is thrown.
+ */
+ @Test(dependsOnMethods =
"testZNRecordSizeLimitUseZNRecordStreamingSerializer")
+ public void testZNRecordSerializerCompressThreshold() {
+ // Backup properties for later resetting.
+ final String compressionThresholdProperty =
+
System.getProperty(ZkSystemPropertyKeys.ZNRECORD_SERIALIZER_COMPRESS_THRESHOLD_BYTES);
+
+ ZNRecordSerializer serializer = new ZNRecordSerializer();
+
+ String root = getShortClassName();
+
+ byte[] buf = new byte[1024];
+ for (int i = 0; i < 1024; i++) {
+ buf[i] = 'a';
+ }
+ String bufStr = new String(buf);
+
+ // 1. legal-sized data gets written to zk
+ // write a znode of size less than threshold
+ int rawZnRecordSize = 900;
+ int thresholdKB = 800;
+ int compressionThreshold = thresholdKB * 1024;
+
System.setProperty(ZkSystemPropertyKeys.ZNRECORD_SERIALIZER_COMPRESS_THRESHOLD_BYTES,
+ String.valueOf(compressionThreshold));
+
+ final ZNRecord normalSizeRecord = new ZNRecord("normal-size");
+ for (int i = 0; i < rawZnRecordSize; i++) {
+ normalSizeRecord.setSimpleField(Integer.toString(i), bufStr);
+ }
+
+ String path = "/" + root + "/normal";
+ _gZkClient.createPersistent(path, true);
+ _gZkClient.writeData(path, normalSizeRecord);
+
+ ZNRecord record = _gZkClient.readData(path);
+
+ // Successfully reads the same data.
+ Assert.assertEquals(normalSizeRecord, record);
+
+ int length = serializer.serialize(record).length;
+
+ // Less than compression threshold so it is written to ZK.
+ Assert.assertTrue(length < compressionThreshold);
+
+ // 2. Large size data is not allowed to write to ZK
+ // Set raw record size to be large enough so its compressed data exceeds
the threshold.
+ rawZnRecordSize = 5000;
+ // Set the threshold to very small so compressed data size exceeds the
threshold.
+ thresholdKB = 1;
+ compressionThreshold = thresholdKB * 1024;
+
System.setProperty(ZkSystemPropertyKeys.ZNRECORD_SERIALIZER_COMPRESS_THRESHOLD_BYTES,
+ String.valueOf(compressionThreshold));
+
+ final ZNRecord largeRecord = new ZNRecord("large-size");
+ for (int i = 0; i < rawZnRecordSize; i++) {
+ largeRecord.setSimpleField(Integer.toString(i), bufStr);
+ }
+
+ path = "/" + root + "/large";
+ _gZkClient.createPersistent(path, true);
+
+ try {
+ _gZkClient.writeData(path, largeRecord);
+ Assert.fail("Data should not written to ZK because data size exceeds
threshold!");
+ } catch (ZkClientException expected) {
+ Assert.assertTrue(
+ expected.getMessage().contains(" is greater than " +
compressionThreshold + " bytes"));
+ }
+
+ // Delete the nodes.
+ _gZkClient.deleteRecursively("/" + root);
Review comment:
Nit: Good to add a TestHelper check here given how often deleteRecursively
could fail. To make sure things have been really cleaned up.
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
With regards,
Apache Git Services
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]