liuml07 commented on a change in pull request #2189:
URL: https://github.com/apache/hadoop/pull/2189#discussion_r487387683



##########
File path: 
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
##########
@@ -2298,9 +2298,9 @@ private void cacheBlock(String bpid, long blockId) {
               ": volume was not an instance of FsVolumeImpl.");
           return;
         }
-        if (volume.isTransientStorage()) {
+        if (volume.isRAMStorage()) {
           LOG.warn("Caching not supported on block with id " + blockId +
-              " since the volume is backed by RAM.");
+              " since the volume is backed by RAM_DISK or NVDIMM.");

Review comment:
       nit: we can make it clear what the volume is.
   
   ```
    LOG.warn("Caching not supported on block with id {} since the volume "
       + "is backed by {} which is RAM.", blockId, volume);
   ```

##########
File path: 
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/net/TestDFSNetworkTopology.java
##########
@@ -182,15 +212,16 @@ public void testGetStorageTypeInfo() throws Exception {
    */
   @Test
   public void testAddAndRemoveTopology() throws Exception {
-    String[] newRack = {"/l1/d1/r1", "/l1/d1/r3", "/l1/d3/r3", "/l1/d3/r3"};
-    String[] newHost = {"nhost1", "nhost2", "nhost3", "nhost4"};
+    String[] newRack = {"/l1/d1/r1", "/l1/d1/r3", "/l1/d3/r3", "/l1/d3/r3",
+        "/l1/d3/r4"};
+    String[] newHost = {"nhost1", "nhost2", "nhost3", "nhost4", "nhost5"};
     String[] newips = {"30.30.30.30", "31.31.31.31", "32.32.32.32",
-        "33.33.33.33"};
+        "33.33.33.33", "33.33.33.34"};

Review comment:
       nit: why not use `34.34.34.34`?

##########
File path: 
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockStatsMXBean.java
##########
@@ -145,9 +150,11 @@ public void testStorageTypeStatsJMX() throws Exception {
       Map<String,Object> storageTypeStats = 
(Map<String,Object>)entry.get("value");
       typesPresent.add(storageType);
       if (storageType.equals("ARCHIVE") || storageType.equals("DISK") ) {
-        assertEquals(3l, storageTypeStats.get("nodesInService"));
+        assertEquals(3L, storageTypeStats.get("nodesInService"));

Review comment:
       As this `if-else if-else if-else` getting longer, let's use switch case?

##########
File path: 
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataDirs.java
##########
@@ -43,14 +43,15 @@ public void testDataDirParsing() throws Throwable {
 
     File dir5 = new File("/dir5");
     File dir6 = new File("/dir6");
+    File dir7 = new File("/dir7");
     // Verify that a valid string is correctly parsed, and that storage
     // type is not case-sensitive and we are able to handle white-space between
     // storage type and URI.
     String locations1 = "[disk]/dir0,[DISK]/dir1,[sSd]/dir2,[disK]/dir3," +
-            "[ram_disk]/dir4,[disk]/dir5, [disk] /dir6, [disk] ";
+            "[ram_disk]/dir4,[disk]/dir5, [disk] /dir6, [disk], [nvdimm]/dir7";

Review comment:
       The invalid 8th URI has ending space deliberately for testing. Let's 
keep it, aka
   
   ```
   "[ram_disk]/dir4,[disk]/dir5, [disk] /dir6, [disk] , [nvdimm]/dir7";
   ```

##########
File path: 
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/net/TestDFSNetworkTopology.java
##########
@@ -120,10 +125,11 @@ public void testGetStorageTypeInfo() throws Exception {
     HashMap<String, EnumMap<StorageType, Integer>> d2info =
         d2.getChildrenStorageInfo();
     assertEquals(1, d2info.keySet().size());
-    assertTrue(d2info.get("r3").size() == 3);
+    assertTrue(d2info.get("r3").size() == 4);

Review comment:
       nit: replace with `assertEquals(4, d2info.get("r3").size())`;

##########
File path: 
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java
##########
@@ -202,6 +205,14 @@ public void testDfsReservedForDifferentStorageTypes() 
throws IOException {
         .setConf(conf)
         .build();
     assertEquals("", 100L, volume4.getReserved());
+    FsVolumeImpl volume5 = new FsVolumeImplBuilder().setDataset(dataset)
+        .setStorageDirectory(
+            new StorageDirectory(
+                StorageLocation.parse("[NVDIMM]"+volDir.getPath())))
+        .setStorageID("storage-id")
+        .setConf(conf)
+        .build();
+    assertEquals("", 3L, volume5.getReserved());

Review comment:
       nit: Let's use better assertion statement `assertEquals(3L, 
volume5.getReserved());`

##########
File path: 
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/net/TestDFSNetworkTopology.java
##########
@@ -64,37 +64,42 @@ public void setupDatanodes() {
     final String[] racks = {
         "/l1/d1/r1", "/l1/d1/r1", "/l1/d1/r2", "/l1/d1/r2", "/l1/d1/r2",
 
-        "/l1/d2/r3", "/l1/d2/r3", "/l1/d2/r3",
+        "/l1/d2/r3", "/l1/d2/r3", "/l1/d2/r3", "/l1/d2/r3",
 
         "/l2/d3/r1", "/l2/d3/r2", "/l2/d3/r3", "/l2/d3/r4", "/l2/d3/r5",
 
         "/l2/d4/r1", "/l2/d4/r1", "/l2/d4/r1", "/l2/d4/r1", "/l2/d4/r1",
-        "/l2/d4/r1", "/l2/d4/r1"};
+        "/l2/d4/r1", "/l2/d4/r1", "/l2/d4/r2",
+        "/l3/d5/r1", "/l3/d5/r1", "/l3/d5/r2"};

Review comment:
       nit: Add a blank line before every new rack, aka
   ```
     "/l2/d4/r1", "/l2/d4/r1", "/l2/d4/r1", "/l2/d4/r1", "/l2/d4/r1",
     "/l2/d4/r1", "/l2/d4/r1", "/l2/d4/r2",
     "/l3/d5/r1", "/l3/d5/r1", "/l3/d5/r2"};
   ```
   can be 
   ```
     "/l2/d4/r1", "/l2/d4/r1", "/l2/d4/r1", "/l2/d4/r1", "/l2/d4/r1",
     "/l2/d4/r1", "/l2/d4/r1", "/l2/d4/r2",
     
     "/l3/d5/r1", "/l3/d5/r1", "/l3/d5/r2"};
   ```
   
   Same to the `hosts`, please  make the last three hosts in a new line so that 
`racks`, `hosts`, and `types` can be easily read with eyeballs.

##########
File path: 
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/net/TestDFSNetworkTopology.java
##########
@@ -182,15 +212,16 @@ public void testGetStorageTypeInfo() throws Exception {
    */
   @Test
   public void testAddAndRemoveTopology() throws Exception {
-    String[] newRack = {"/l1/d1/r1", "/l1/d1/r3", "/l1/d3/r3", "/l1/d3/r3"};
-    String[] newHost = {"nhost1", "nhost2", "nhost3", "nhost4"};
+    String[] newRack = {"/l1/d1/r1", "/l1/d1/r3", "/l1/d3/r3", "/l1/d3/r3",
+        "/l1/d3/r4"};
+    String[] newHost = {"nhost1", "nhost2", "nhost3", "nhost4", "nhost5"};
     String[] newips = {"30.30.30.30", "31.31.31.31", "32.32.32.32",
-        "33.33.33.33"};
+        "33.33.33.33", "33.33.33.34"};
     StorageType[] newTypes = {StorageType.DISK, StorageType.SSD,
-        StorageType.SSD, StorageType.SSD};
-    DatanodeDescriptor[] newDD = new DatanodeDescriptor[4];
+        StorageType.SSD, StorageType.SSD, StorageType.NVDIMM};
+    DatanodeDescriptor[] newDD = new DatanodeDescriptor[5];
 
-    for (int i = 0; i<4; i++) {
+    for (int i = 0; i<5; i++) {

Review comment:
       nit: let's have space before and after `<` operator

##########
File path: 
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
##########
@@ -1103,6 +1103,8 @@ public void testSetQuota() throws Exception {
         () -> webHdfs.setQuotaByStorageType(path, StorageType.SSD, -100));
     LambdaTestUtils.intercept(IllegalArgumentException.class,
         () -> webHdfs.setQuotaByStorageType(path, StorageType.RAM_DISK, 100));
+    LambdaTestUtils.intercept(IllegalArgumentException.class,
+        () -> webHdfs.setQuotaByStorageType(path, StorageType.NVDIMM, -100));

Review comment:
       Also add a few happy test?




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]



---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to