This is an automated email from the ASF dual-hosted git repository.
sodonnell pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new 2d58933 HDDS-5758. Speed up TestKeyInputStream and
TestChunkInputStream by combining some tests (#2656)
2d58933 is described below
commit 2d589331cf22251eb06d1dce0787709101fc8428
Author: Stephen O'Donnell <[email protected]>
AuthorDate: Mon Sep 20 14:25:35 2021 +0100
HDDS-5758. Speed up TestKeyInputStream and TestChunkInputStream by
combining some tests (#2656)
---
.../client/rpc/read/TestChunkInputStream.java | 13 ++++-
.../ozone/client/rpc/read/TestInputStreamBase.java | 44 ----------------
.../ozone/client/rpc/read/TestKeyInputStream.java | 58 ++++++++++++++++++++--
3 files changed, 65 insertions(+), 50 deletions(-)
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestChunkInputStream.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestChunkInputStream.java
index fc051c8..9248a8a 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestChunkInputStream.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestChunkInputStream.java
@@ -36,10 +36,20 @@ public class TestChunkInputStream extends
TestInputStreamBase {
}
/**
+ * Run the tests as a single test method to avoid needing a new mini-cluster
+ * for each test.
+ */
+ @Test
+ public void testAll() throws Exception {
+ testChunkReadBuffers();
+ testBufferRelease();
+ }
+
+
+ /**
* Test to verify that data read from chunks is stored in a list of buffers
* with max capacity equal to the bytes per checksum.
*/
- @Test
public void testChunkReadBuffers() throws Exception {
String keyName = getNewKeyName();
int dataLength = (2 * BLOCK_SIZE) + (CHUNK_SIZE);
@@ -104,7 +114,6 @@ public class TestChunkInputStream extends
TestInputStreamBase {
* Test that ChunkInputStream buffers are released as soon as the last byte
* of the buffer is read.
*/
- @Test
public void testBufferRelease() throws Exception {
String keyName = getNewKeyName();
int dataLength = CHUNK_SIZE;
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestInputStreamBase.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestInputStreamBase.java
index ccfd541..7a23715 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestInputStreamBase.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestInputStreamBase.java
@@ -18,7 +18,6 @@ package org.apache.hadoop.ozone.client.rpc.read;
import java.io.IOException;
import java.time.Duration;
-import java.util.List;
import java.util.Random;
import java.util.UUID;
import java.util.concurrent.TimeUnit;
@@ -29,8 +28,6 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.scm.OzoneClientConfig;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import
org.apache.hadoop.hdds.scm.container.ReplicationManager.ReplicationManagerConfiguration;
-import org.apache.hadoop.hdds.scm.storage.BlockInputStream;
-import org.apache.hadoop.hdds.scm.storage.ChunkInputStream;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.client.ObjectStore;
@@ -38,7 +35,6 @@ import org.apache.hadoop.ozone.client.OzoneClient;
import org.apache.hadoop.ozone.client.OzoneClientFactory;
import org.apache.hadoop.ozone.client.io.KeyInputStream;
import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.ozone.common.utils.BufferUtils;
import org.apache.hadoop.ozone.container.ContainerTestHelper;
import org.apache.hadoop.ozone.container.TestHelper;
import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion;
@@ -47,7 +43,6 @@ import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Rule;
-import org.junit.Test;
import org.junit.rules.Timeout;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
@@ -207,43 +202,4 @@ public abstract class TestInputStreamBase {
"position " + (offset + i), expectedData[i], readData[i]);
}
}
-
- @Test
- public void testInputStreams() throws Exception {
- String keyName = getNewKeyName();
- int dataLength = (2 * BLOCK_SIZE) + (CHUNK_SIZE) + 1;
- writeRandomBytes(keyName, dataLength);
-
- KeyInputStream keyInputStream = getKeyInputStream(keyName);
-
- // Verify BlockStreams and ChunkStreams
- int expectedNumBlockStreams = BufferUtils.getNumberOfBins(
- dataLength, BLOCK_SIZE);
- List<BlockInputStream> blockStreams = keyInputStream.getBlockStreams();
- Assert.assertEquals(expectedNumBlockStreams, blockStreams.size());
-
- int readBlockLength = 0;
- for (BlockInputStream blockStream : blockStreams) {
- int blockStreamLength = Math.min(BLOCK_SIZE,
- dataLength - readBlockLength);
- Assert.assertEquals(blockStreamLength, blockStream.getLength());
-
- int expectedNumChunkStreams =
- BufferUtils.getNumberOfBins(blockStreamLength, CHUNK_SIZE);
- blockStream.initialize();
- List<ChunkInputStream> chunkStreams = blockStream.getChunkStreams();
- Assert.assertEquals(expectedNumChunkStreams, chunkStreams.size());
-
- int readChunkLength = 0;
- for (ChunkInputStream chunkStream : chunkStreams) {
- int chunkStreamLength = Math.min(CHUNK_SIZE,
- blockStreamLength - readChunkLength);
- Assert.assertEquals(chunkStreamLength, chunkStream.getRemaining());
-
- readChunkLength += chunkStreamLength;
- }
-
- readBlockLength += blockStreamLength;
- }
- }
}
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestKeyInputStream.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestKeyInputStream.java
index f93caf9..3af15d3 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestKeyInputStream.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestKeyInputStream.java
@@ -33,7 +33,10 @@ import org.apache.hadoop.hdds.scm.XceiverClientManager;
import org.apache.hadoop.hdds.scm.XceiverClientMetrics;
import org.apache.hadoop.hdds.scm.node.NodeManager;
import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
+import org.apache.hadoop.hdds.scm.storage.BlockInputStream;
+import org.apache.hadoop.hdds.scm.storage.ChunkInputStream;
import org.apache.hadoop.ozone.client.io.KeyInputStream;
+import org.apache.hadoop.ozone.common.utils.BufferUtils;
import org.apache.hadoop.ozone.container.TestHelper;
import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion;
import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
@@ -110,7 +113,58 @@ public class TestKeyInputStream extends
TestInputStreamBase {
validateData(inputData, (int) seek, readData);
}
+ /**
+ * This test runs the others as a single test, so to avoid creating a new
+ * mini-cluster for each test.
+ */
@Test
+ public void testNonReplicationReads() throws Exception {
+ testInputStreams();
+ testSeekRandomly();
+ testSeek();
+ testReadChunkWithByteArray();
+ testReadChunkWithByteBuffer();
+ testSkip();
+ }
+
+ public void testInputStreams() throws Exception {
+ String keyName = getNewKeyName();
+ int dataLength = (2 * BLOCK_SIZE) + (CHUNK_SIZE) + 1;
+ writeRandomBytes(keyName, dataLength);
+
+ KeyInputStream keyInputStream = getKeyInputStream(keyName);
+
+ // Verify BlockStreams and ChunkStreams
+ int expectedNumBlockStreams = BufferUtils.getNumberOfBins(
+ dataLength, BLOCK_SIZE);
+ List<BlockInputStream> blockStreams = keyInputStream.getBlockStreams();
+ Assert.assertEquals(expectedNumBlockStreams, blockStreams.size());
+
+ int readBlockLength = 0;
+ for (BlockInputStream blockStream : blockStreams) {
+ int blockStreamLength = Math.min(BLOCK_SIZE,
+ dataLength - readBlockLength);
+ Assert.assertEquals(blockStreamLength, blockStream.getLength());
+
+ int expectedNumChunkStreams =
+ BufferUtils.getNumberOfBins(blockStreamLength, CHUNK_SIZE);
+ blockStream.initialize();
+ List<ChunkInputStream> chunkStreams = blockStream.getChunkStreams();
+ Assert.assertEquals(expectedNumChunkStreams, chunkStreams.size());
+
+ int readChunkLength = 0;
+ for (ChunkInputStream chunkStream : chunkStreams) {
+ int chunkStreamLength = Math.min(CHUNK_SIZE,
+ blockStreamLength - readChunkLength);
+ Assert.assertEquals(chunkStreamLength, chunkStream.getRemaining());
+
+ readChunkLength += chunkStreamLength;
+ }
+
+ readBlockLength += blockStreamLength;
+ }
+ }
+
public void testSeekRandomly() throws Exception {
String keyName = getNewKeyName();
int dataLength = (2 * BLOCK_SIZE) + (CHUNK_SIZE);
@@ -141,7 +195,6 @@ public class TestKeyInputStream extends TestInputStreamBase
{
keyInputStream.close();
}
- @Test
public void testSeek() throws Exception {
XceiverClientManager.resetXceiverClientMetrics();
XceiverClientMetrics metrics = XceiverClientManager
@@ -187,7 +240,6 @@ public class TestKeyInputStream extends TestInputStreamBase
{
}
}
- @Test
public void testReadChunkWithByteArray() throws Exception {
String keyName = getNewKeyName();
@@ -208,7 +260,6 @@ public class TestKeyInputStream extends TestInputStreamBase
{
}
}
- @Test
public void testReadChunkWithByteBuffer() throws Exception {
String keyName = getNewKeyName();
@@ -229,7 +280,6 @@ public class TestKeyInputStream extends TestInputStreamBase
{
}
}
- @Test
public void testSkip() throws Exception {
XceiverClientManager.resetXceiverClientMetrics();
XceiverClientMetrics metrics = XceiverClientManager
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]