[10/50] [abbrv] hadoop git commit: HDFS-13728. Disk Balancer should not fail if volume usage is greater than capacity. Contributed by Stephen O'Donnell.
HDFS-13728. Disk Balancer should not fail if volume usage is greater than capacity. Contributed by Stephen O'Donnell. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6677717c Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6677717c Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6677717c Branch: refs/heads/HDFS-12090 Commit: 6677717c689cc94a15f14c3466242e23652d473b Parents: 2b0f977 Author: Xiao Chen Authored: Tue Aug 7 22:04:41 2018 -0700 Committer: Xiao Chen Committed: Tue Aug 7 22:05:17 2018 -0700 -- .../diskbalancer/datamodel/DiskBalancerVolume.java | 17 - .../hdfs/server/diskbalancer/TestDataModels.java | 16 2 files changed, 28 insertions(+), 5 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/6677717c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java index a9fd7f0..e43b83e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java @@ -21,9 +21,10 @@ import com.fasterxml.jackson.annotation.JsonIgnore; import com.fasterxml.jackson.annotation.JsonIgnoreProperties; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.ObjectReader; -import com.google.common.base.Preconditions; import org.apache.hadoop.hdfs.web.JsonUtil; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import java.io.IOException; @@ -35,6 +36,9 @@ public class DiskBalancerVolume { private static final ObjectReader READER = new ObjectMapper().readerFor(DiskBalancerVolume.class); + private static final Logger LOG = + LoggerFactory.getLogger(DiskBalancerVolume.class); + private String path; private long capacity; private String storageType; @@ -269,10 +273,13 @@ public class DiskBalancerVolume { * @param dfsUsedSpace - dfsUsedSpace for this volume. */ public void setUsed(long dfsUsedSpace) { -Preconditions.checkArgument(dfsUsedSpace < this.getCapacity(), -"DiskBalancerVolume.setUsed: dfsUsedSpace(%s) < capacity(%s)", -dfsUsedSpace, getCapacity()); -this.used = dfsUsedSpace; +if (dfsUsedSpace > this.getCapacity()) { + LOG.warn("Volume usage ("+dfsUsedSpace+") is greater than capacity ("+ +this.getCapacity()+"). Setting volume usage to the capacity"); + this.used = this.getCapacity(); +} else { + this.used = dfsUsedSpace; +} } /** http://git-wip-us.apache.org/repos/asf/hadoop/blob/6677717c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDataModels.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDataModels.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDataModels.java index ace8212..12fbcf1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDataModels.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDataModels.java @@ -224,4 +224,20 @@ public class TestDataModels { Assert .assertEquals(cluster.getNodes().size(), newCluster.getNodes().size()); } + + @Test + public void testUsageLimitedToCapacity() throws Exception { +DiskBalancerTestUtil util = new DiskBalancerTestUtil(); + +// If usage is greater than capacity, then it should be set to capacity +DiskBalancerVolume v1 = util.createRandomVolume(StorageType.DISK); +v1.setCapacity(DiskBalancerTestUtil.GB); +v1.setUsed(2 * DiskBalancerTestUtil.GB); +Assert.assertEquals(v1.getUsed(),v1.getCapacity()); +// If usage is less than capacity, usage should be set to the real usage +DiskBalancerVolume v2 = util.createRandomVolume(StorageType.DISK); +v2.setCapacity(2*DiskBalancerTestUtil.GB); +v2.setUsed(DiskBalancerTestUtil.GB); +Assert.assertEquals(v1.getUsed(),DiskBalancerTestUtil.GB); + } } - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional
[29/50] [abbrv] hadoop git commit: HDFS-13728. Disk Balancer should not fail if volume usage is greater than capacity. Contributed by Stephen O'Donnell.
HDFS-13728. Disk Balancer should not fail if volume usage is greater than capacity. Contributed by Stephen O'Donnell. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6677717c Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6677717c Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6677717c Branch: refs/heads/HDFS-12943 Commit: 6677717c689cc94a15f14c3466242e23652d473b Parents: 2b0f977 Author: Xiao Chen Authored: Tue Aug 7 22:04:41 2018 -0700 Committer: Xiao Chen Committed: Tue Aug 7 22:05:17 2018 -0700 -- .../diskbalancer/datamodel/DiskBalancerVolume.java | 17 - .../hdfs/server/diskbalancer/TestDataModels.java | 16 2 files changed, 28 insertions(+), 5 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/6677717c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java index a9fd7f0..e43b83e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java @@ -21,9 +21,10 @@ import com.fasterxml.jackson.annotation.JsonIgnore; import com.fasterxml.jackson.annotation.JsonIgnoreProperties; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.ObjectReader; -import com.google.common.base.Preconditions; import org.apache.hadoop.hdfs.web.JsonUtil; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import java.io.IOException; @@ -35,6 +36,9 @@ public class DiskBalancerVolume { private static final ObjectReader READER = new ObjectMapper().readerFor(DiskBalancerVolume.class); + private static final Logger LOG = + LoggerFactory.getLogger(DiskBalancerVolume.class); + private String path; private long capacity; private String storageType; @@ -269,10 +273,13 @@ public class DiskBalancerVolume { * @param dfsUsedSpace - dfsUsedSpace for this volume. */ public void setUsed(long dfsUsedSpace) { -Preconditions.checkArgument(dfsUsedSpace < this.getCapacity(), -"DiskBalancerVolume.setUsed: dfsUsedSpace(%s) < capacity(%s)", -dfsUsedSpace, getCapacity()); -this.used = dfsUsedSpace; +if (dfsUsedSpace > this.getCapacity()) { + LOG.warn("Volume usage ("+dfsUsedSpace+") is greater than capacity ("+ +this.getCapacity()+"). Setting volume usage to the capacity"); + this.used = this.getCapacity(); +} else { + this.used = dfsUsedSpace; +} } /** http://git-wip-us.apache.org/repos/asf/hadoop/blob/6677717c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDataModels.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDataModels.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDataModels.java index ace8212..12fbcf1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDataModels.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDataModels.java @@ -224,4 +224,20 @@ public class TestDataModels { Assert .assertEquals(cluster.getNodes().size(), newCluster.getNodes().size()); } + + @Test + public void testUsageLimitedToCapacity() throws Exception { +DiskBalancerTestUtil util = new DiskBalancerTestUtil(); + +// If usage is greater than capacity, then it should be set to capacity +DiskBalancerVolume v1 = util.createRandomVolume(StorageType.DISK); +v1.setCapacity(DiskBalancerTestUtil.GB); +v1.setUsed(2 * DiskBalancerTestUtil.GB); +Assert.assertEquals(v1.getUsed(),v1.getCapacity()); +// If usage is less than capacity, usage should be set to the real usage +DiskBalancerVolume v2 = util.createRandomVolume(StorageType.DISK); +v2.setCapacity(2*DiskBalancerTestUtil.GB); +v2.setUsed(DiskBalancerTestUtil.GB); +Assert.assertEquals(v1.getUsed(),DiskBalancerTestUtil.GB); + } } - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional
hadoop git commit: HDFS-13728. Disk Balancer should not fail if volume usage is greater than capacity. Contributed by Stephen O'Donnell.
Repository: hadoop Updated Branches: refs/heads/branch-3.0 a3675f382 -> 8e5081569 HDFS-13728. Disk Balancer should not fail if volume usage is greater than capacity. Contributed by Stephen O'Donnell. (cherry picked from commit 6677717c689cc94a15f14c3466242e23652d473b) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8e508156 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8e508156 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8e508156 Branch: refs/heads/branch-3.0 Commit: 8e5081569f00cde23e58e234dc22a1dabb20323a Parents: a3675f3 Author: Xiao Chen Authored: Tue Aug 7 22:04:41 2018 -0700 Committer: Xiao Chen Committed: Tue Aug 7 22:05:59 2018 -0700 -- .../diskbalancer/datamodel/DiskBalancerVolume.java | 17 - .../hdfs/server/diskbalancer/TestDataModels.java | 16 2 files changed, 28 insertions(+), 5 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/8e508156/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java index a9fd7f0..e43b83e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java @@ -21,9 +21,10 @@ import com.fasterxml.jackson.annotation.JsonIgnore; import com.fasterxml.jackson.annotation.JsonIgnoreProperties; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.ObjectReader; -import com.google.common.base.Preconditions; import org.apache.hadoop.hdfs.web.JsonUtil; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import java.io.IOException; @@ -35,6 +36,9 @@ public class DiskBalancerVolume { private static final ObjectReader READER = new ObjectMapper().readerFor(DiskBalancerVolume.class); + private static final Logger LOG = + LoggerFactory.getLogger(DiskBalancerVolume.class); + private String path; private long capacity; private String storageType; @@ -269,10 +273,13 @@ public class DiskBalancerVolume { * @param dfsUsedSpace - dfsUsedSpace for this volume. */ public void setUsed(long dfsUsedSpace) { -Preconditions.checkArgument(dfsUsedSpace < this.getCapacity(), -"DiskBalancerVolume.setUsed: dfsUsedSpace(%s) < capacity(%s)", -dfsUsedSpace, getCapacity()); -this.used = dfsUsedSpace; +if (dfsUsedSpace > this.getCapacity()) { + LOG.warn("Volume usage ("+dfsUsedSpace+") is greater than capacity ("+ +this.getCapacity()+"). Setting volume usage to the capacity"); + this.used = this.getCapacity(); +} else { + this.used = dfsUsedSpace; +} } /** http://git-wip-us.apache.org/repos/asf/hadoop/blob/8e508156/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDataModels.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDataModels.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDataModels.java index ace8212..12fbcf1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDataModels.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDataModels.java @@ -224,4 +224,20 @@ public class TestDataModels { Assert .assertEquals(cluster.getNodes().size(), newCluster.getNodes().size()); } + + @Test + public void testUsageLimitedToCapacity() throws Exception { +DiskBalancerTestUtil util = new DiskBalancerTestUtil(); + +// If usage is greater than capacity, then it should be set to capacity +DiskBalancerVolume v1 = util.createRandomVolume(StorageType.DISK); +v1.setCapacity(DiskBalancerTestUtil.GB); +v1.setUsed(2 * DiskBalancerTestUtil.GB); +Assert.assertEquals(v1.getUsed(),v1.getCapacity()); +// If usage is less than capacity, usage should be set to the real usage +DiskBalancerVolume v2 = util.createRandomVolume(StorageType.DISK); +v2.setCapacity(2*DiskBalancerTestUtil.GB); +v2.setUsed(DiskBalancerTestUtil.GB); +Assert.assertEquals(v1.getUsed(),DiskBalancerTestUtil.GB); + } }
hadoop git commit: HDFS-13728. Disk Balancer should not fail if volume usage is greater than capacity. Contributed by Stephen O'Donnell.
Repository: hadoop Updated Branches: refs/heads/branch-3.1 f2768eaa3 -> bf03b25f4 HDFS-13728. Disk Balancer should not fail if volume usage is greater than capacity. Contributed by Stephen O'Donnell. (cherry picked from commit 6677717c689cc94a15f14c3466242e23652d473b) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bf03b25f Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bf03b25f Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bf03b25f Branch: refs/heads/branch-3.1 Commit: bf03b25f4b940d9ee8507795fb85b2b6f36e2cf7 Parents: f2768ea Author: Xiao Chen Authored: Tue Aug 7 22:04:41 2018 -0700 Committer: Xiao Chen Committed: Tue Aug 7 22:05:51 2018 -0700 -- .../diskbalancer/datamodel/DiskBalancerVolume.java | 17 - .../hdfs/server/diskbalancer/TestDataModels.java | 16 2 files changed, 28 insertions(+), 5 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/bf03b25f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java index a9fd7f0..e43b83e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java @@ -21,9 +21,10 @@ import com.fasterxml.jackson.annotation.JsonIgnore; import com.fasterxml.jackson.annotation.JsonIgnoreProperties; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.ObjectReader; -import com.google.common.base.Preconditions; import org.apache.hadoop.hdfs.web.JsonUtil; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import java.io.IOException; @@ -35,6 +36,9 @@ public class DiskBalancerVolume { private static final ObjectReader READER = new ObjectMapper().readerFor(DiskBalancerVolume.class); + private static final Logger LOG = + LoggerFactory.getLogger(DiskBalancerVolume.class); + private String path; private long capacity; private String storageType; @@ -269,10 +273,13 @@ public class DiskBalancerVolume { * @param dfsUsedSpace - dfsUsedSpace for this volume. */ public void setUsed(long dfsUsedSpace) { -Preconditions.checkArgument(dfsUsedSpace < this.getCapacity(), -"DiskBalancerVolume.setUsed: dfsUsedSpace(%s) < capacity(%s)", -dfsUsedSpace, getCapacity()); -this.used = dfsUsedSpace; +if (dfsUsedSpace > this.getCapacity()) { + LOG.warn("Volume usage ("+dfsUsedSpace+") is greater than capacity ("+ +this.getCapacity()+"). Setting volume usage to the capacity"); + this.used = this.getCapacity(); +} else { + this.used = dfsUsedSpace; +} } /** http://git-wip-us.apache.org/repos/asf/hadoop/blob/bf03b25f/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDataModels.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDataModels.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDataModels.java index ace8212..12fbcf1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDataModels.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDataModels.java @@ -224,4 +224,20 @@ public class TestDataModels { Assert .assertEquals(cluster.getNodes().size(), newCluster.getNodes().size()); } + + @Test + public void testUsageLimitedToCapacity() throws Exception { +DiskBalancerTestUtil util = new DiskBalancerTestUtil(); + +// If usage is greater than capacity, then it should be set to capacity +DiskBalancerVolume v1 = util.createRandomVolume(StorageType.DISK); +v1.setCapacity(DiskBalancerTestUtil.GB); +v1.setUsed(2 * DiskBalancerTestUtil.GB); +Assert.assertEquals(v1.getUsed(),v1.getCapacity()); +// If usage is less than capacity, usage should be set to the real usage +DiskBalancerVolume v2 = util.createRandomVolume(StorageType.DISK); +v2.setCapacity(2*DiskBalancerTestUtil.GB); +v2.setUsed(DiskBalancerTestUtil.GB); +Assert.assertEquals(v1.getUsed(),DiskBalancerTestUtil.GB); + } }
hadoop git commit: HDFS-13728. Disk Balancer should not fail if volume usage is greater than capacity. Contributed by Stephen O'Donnell.
Repository: hadoop Updated Branches: refs/heads/trunk 2b0f97724 -> 6677717c6 HDFS-13728. Disk Balancer should not fail if volume usage is greater than capacity. Contributed by Stephen O'Donnell. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6677717c Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6677717c Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6677717c Branch: refs/heads/trunk Commit: 6677717c689cc94a15f14c3466242e23652d473b Parents: 2b0f977 Author: Xiao Chen Authored: Tue Aug 7 22:04:41 2018 -0700 Committer: Xiao Chen Committed: Tue Aug 7 22:05:17 2018 -0700 -- .../diskbalancer/datamodel/DiskBalancerVolume.java | 17 - .../hdfs/server/diskbalancer/TestDataModels.java | 16 2 files changed, 28 insertions(+), 5 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/6677717c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java index a9fd7f0..e43b83e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java @@ -21,9 +21,10 @@ import com.fasterxml.jackson.annotation.JsonIgnore; import com.fasterxml.jackson.annotation.JsonIgnoreProperties; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.ObjectReader; -import com.google.common.base.Preconditions; import org.apache.hadoop.hdfs.web.JsonUtil; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import java.io.IOException; @@ -35,6 +36,9 @@ public class DiskBalancerVolume { private static final ObjectReader READER = new ObjectMapper().readerFor(DiskBalancerVolume.class); + private static final Logger LOG = + LoggerFactory.getLogger(DiskBalancerVolume.class); + private String path; private long capacity; private String storageType; @@ -269,10 +273,13 @@ public class DiskBalancerVolume { * @param dfsUsedSpace - dfsUsedSpace for this volume. */ public void setUsed(long dfsUsedSpace) { -Preconditions.checkArgument(dfsUsedSpace < this.getCapacity(), -"DiskBalancerVolume.setUsed: dfsUsedSpace(%s) < capacity(%s)", -dfsUsedSpace, getCapacity()); -this.used = dfsUsedSpace; +if (dfsUsedSpace > this.getCapacity()) { + LOG.warn("Volume usage ("+dfsUsedSpace+") is greater than capacity ("+ +this.getCapacity()+"). Setting volume usage to the capacity"); + this.used = this.getCapacity(); +} else { + this.used = dfsUsedSpace; +} } /** http://git-wip-us.apache.org/repos/asf/hadoop/blob/6677717c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDataModels.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDataModels.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDataModels.java index ace8212..12fbcf1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDataModels.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDataModels.java @@ -224,4 +224,20 @@ public class TestDataModels { Assert .assertEquals(cluster.getNodes().size(), newCluster.getNodes().size()); } + + @Test + public void testUsageLimitedToCapacity() throws Exception { +DiskBalancerTestUtil util = new DiskBalancerTestUtil(); + +// If usage is greater than capacity, then it should be set to capacity +DiskBalancerVolume v1 = util.createRandomVolume(StorageType.DISK); +v1.setCapacity(DiskBalancerTestUtil.GB); +v1.setUsed(2 * DiskBalancerTestUtil.GB); +Assert.assertEquals(v1.getUsed(),v1.getCapacity()); +// If usage is less than capacity, usage should be set to the real usage +DiskBalancerVolume v2 = util.createRandomVolume(StorageType.DISK); +v2.setCapacity(2*DiskBalancerTestUtil.GB); +v2.setUsed(DiskBalancerTestUtil.GB); +Assert.assertEquals(v1.getUsed(),DiskBalancerTestUtil.GB); + } } - To unsubscribe,