You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by in...@apache.org on 2019/06/06 19:00:02 UTC
[hadoop] branch trunk updated: HDFS-14486. The exception classes in
some throw statements do not accurately describe why they are thrown.
Contributed by Ayush Saxena.
This is an automated email from the ASF dual-hosted git repository.
inigoiri pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git
The following commit(s) were added to refs/heads/trunk by this push:
new e1dfc06 HDFS-14486. The exception classes in some throw statements do not accurately describe why they are thrown. Contributed by Ayush Saxena.
e1dfc06 is described below
commit e1dfc060f8f0247f97127c75c9284a068fc93907
Author: Inigo Goiri <in...@apache.org>
AuthorDate: Thu Jun 6 11:59:53 2019 -0700
HDFS-14486. The exception classes in some throw statements do not accurately describe why they are thrown. Contributed by Ayush Saxena.
---
.../hadoop/hdfs/server/datanode/BlockReceiver.java | 3 +-
.../hadoop/hdfs/server/datanode/DataNode.java | 3 +-
.../datanode/checker/DatasetVolumeChecker.java | 10 +++---
.../datanode/checker/StorageLocationChecker.java | 8 +++--
.../datanode/fsdataset/impl/FsDatasetImpl.java | 4 ++-
.../TestDataNodeVolumeFailureToleration.java | 4 +--
.../hdfs/server/datanode/TestDatanodeRegister.java | 12 +++++++
.../datanode/checker/TestDatasetVolumeChecker.java | 37 ++++++++++++++++++++++
.../checker/TestStorageLocationChecker.java | 33 ++++++++++++++++++-
9 files changed, 101 insertions(+), 13 deletions(-)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
index 9509184..dad964c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
@@ -25,6 +25,7 @@ import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.EOFException;
import java.io.IOException;
+import java.io.InterruptedIOException;
import java.io.OutputStreamWriter;
import java.io.Writer;
import java.nio.ByteBuffer;
@@ -1074,7 +1075,7 @@ class BlockReceiver implements Closeable {
responder.interrupt();
// do not throw if shutting down for restart.
if (!datanode.isRestarting()) {
- throw new IOException("Interrupted receiveBlock");
+ throw new InterruptedIOException("Interrupted receiveBlock");
}
}
responder = null;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index f9ef83d..09c99fd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -96,6 +96,7 @@ import javax.net.SocketFactory;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.ReconfigurableBase;
@@ -1420,7 +1421,7 @@ public class DataNode extends ReconfigurableBase
int volsConfigured = dnConf.getVolsConfigured();
if (volFailuresTolerated < MAX_VOLUME_FAILURE_TOLERATED_LIMIT
|| volFailuresTolerated >= volsConfigured) {
- throw new DiskErrorException("Invalid value configured for "
+ throw new HadoopIllegalArgumentException("Invalid value configured for "
+ "dfs.datanode.failed.volumes.tolerated - " + volFailuresTolerated
+ ". Value configured is either less than -1 or >= "
+ "to the number of configured volumes (" + volsConfigured + ").");
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java
index b492287..91582fe 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java
@@ -26,6 +26,8 @@ import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.MoreExecutors;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
+
+import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
@@ -120,7 +122,7 @@ public class DatasetVolumeChecker {
TimeUnit.MILLISECONDS);
if (maxAllowedTimeForCheckMs <= 0) {
- throw new DiskErrorException("Invalid value configured for "
+ throw new HadoopIllegalArgumentException("Invalid value configured for "
+ DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY + " - "
+ maxAllowedTimeForCheckMs + " (should be > 0)");
}
@@ -137,7 +139,7 @@ public class DatasetVolumeChecker {
TimeUnit.MILLISECONDS);
if (minDiskCheckGapMs < 0) {
- throw new DiskErrorException("Invalid value configured for "
+ throw new HadoopIllegalArgumentException("Invalid value configured for "
+ DFS_DATANODE_DISK_CHECK_MIN_GAP_KEY + " - "
+ minDiskCheckGapMs + " (should be >= 0)");
}
@@ -148,7 +150,7 @@ public class DatasetVolumeChecker {
TimeUnit.MILLISECONDS);
if (diskCheckTimeout < 0) {
- throw new DiskErrorException("Invalid value configured for "
+ throw new HadoopIllegalArgumentException("Invalid value configured for "
+ DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY + " - "
+ diskCheckTimeout + " (should be >= 0)");
}
@@ -156,7 +158,7 @@ public class DatasetVolumeChecker {
lastAllVolumesCheck = timer.monotonicNow() - minDiskCheckGapMs;
if (maxVolumeFailuresTolerated < DataNode.MAX_VOLUME_FAILURE_TOLERATED_LIMIT) {
- throw new DiskErrorException("Invalid value configured for "
+ throw new HadoopIllegalArgumentException("Invalid value configured for "
+ DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY + " - "
+ maxVolumeFailuresTolerated + " "
+ DataNode.MAX_VOLUME_FAILURES_TOLERATED_MSG);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/StorageLocationChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/StorageLocationChecker.java
index c5de065..0332bc8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/StorageLocationChecker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/StorageLocationChecker.java
@@ -23,6 +23,8 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
import com.google.common.collect.Maps;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
+
+import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
@@ -93,7 +95,7 @@ public class StorageLocationChecker {
TimeUnit.MILLISECONDS);
if (maxAllowedTimeForCheckMs <= 0) {
- throw new DiskErrorException("Invalid value configured for "
+ throw new HadoopIllegalArgumentException("Invalid value configured for "
+ DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY + " - "
+ maxAllowedTimeForCheckMs + " (should be > 0)");
}
@@ -107,7 +109,7 @@ public class StorageLocationChecker {
DFS_DATANODE_FAILED_VOLUMES_TOLERATED_DEFAULT);
if (maxVolumeFailuresTolerated < DataNode.MAX_VOLUME_FAILURE_TOLERATED_LIMIT) {
- throw new DiskErrorException("Invalid value configured for "
+ throw new HadoopIllegalArgumentException("Invalid value configured for "
+ DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY + " - "
+ maxVolumeFailuresTolerated + " "
+ DataNode.MAX_VOLUME_FAILURES_TOLERATED_MSG);
@@ -170,7 +172,7 @@ public class StorageLocationChecker {
}
if (maxVolumeFailuresTolerated >= dataDirs.size()) {
- throw new DiskErrorException("Invalid value configured for "
+ throw new HadoopIllegalArgumentException("Invalid value configured for "
+ DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY + " - "
+ maxVolumeFailuresTolerated + ". Value configured is >= "
+ "to the number of configured volumes (" + dataDirs.size() + ").");
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index 76110d6..f850763 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -48,6 +48,8 @@ import javax.management.ObjectName;
import javax.management.StandardMBean;
import com.google.common.annotations.VisibleForTesting;
+
+import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
@@ -292,7 +294,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
if (volFailuresTolerated < DataNode.MAX_VOLUME_FAILURE_TOLERATED_LIMIT
|| volFailuresTolerated >= volsConfigured) {
- throw new DiskErrorException("Invalid value configured for "
+ throw new HadoopIllegalArgumentException("Invalid value configured for "
+ "dfs.datanode.failed.volumes.tolerated - " + volFailuresTolerated
+ ". Value configured is either less than maxVolumeFailureLimit or greater than "
+ "to the number of configured volumes (" + volsConfigured + ").");
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java
index 825887c..a9e4096 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java
@@ -26,6 +26,7 @@ import java.io.File;
import java.io.IOException;
import java.util.concurrent.TimeUnit;
+import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
@@ -36,7 +37,6 @@ import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.util.DiskChecker.DiskErrorException;
import org.junit.After;
import org.junit.Before;
import org.junit.Rule;
@@ -242,7 +242,7 @@ public class TestDataNodeVolumeFailureToleration {
prepareDirToFail(dirs[i]);
}
restartDatanodes(volumesTolerated, manageDfsDirs);
- } catch (DiskErrorException e) {
+ } catch (HadoopIllegalArgumentException e) {
GenericTestUtils.assertExceptionContains("Invalid value configured for "
+ "dfs.datanode.failed.volumes.tolerated", e);
} finally {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRegister.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRegister.java
index 95a361a..bffdaae 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRegister.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRegister.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.hdfs.server.datanode;
+import static org.apache.hadoop.test.LambdaTestUtils.intercept;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
import static org.mockito.Mockito.doReturn;
@@ -36,6 +37,7 @@ import com.google.common.collect.Lists;
import org.junit.Assert;
+import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -162,4 +164,14 @@ public class TestDatanodeRegister {
e.getMessage());
}
}
+
+ @Test
+ public void testInvalidConfigurationValue() throws Exception {
+ Configuration conf = new HdfsConfiguration();
+ conf.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, -2);
+ intercept(HadoopIllegalArgumentException.class,
+ "Invalid value configured for dfs.datanode.failed.volumes.tolerated"
+ + " - -2 should be greater than or equal to -1",
+ () -> new DataNode(conf, new ArrayList<>(), null, null));
+ }
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestDatasetVolumeChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestDatasetVolumeChecker.java
index 9a45e62..9b0636f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestDatasetVolumeChecker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestDatasetVolumeChecker.java
@@ -20,6 +20,8 @@ package org.apache.hadoop.hdfs.server.datanode.checker;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
+
+import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.*;
@@ -44,6 +46,10 @@ import java.util.Set;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
+import static org.apache.hadoop.test.LambdaTestUtils.intercept;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DISK_CHECK_MIN_GAP_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY;
import static org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult.*;
import static org.hamcrest.CoreMatchers.is;
import static org.junit.Assert.*;
@@ -226,4 +232,35 @@ public class TestDatasetVolumeChecker {
}
return volumes;
}
+
+ @Test
+ public void testInvalidConfigurationValues() throws Exception {
+ HdfsConfiguration conf = new HdfsConfiguration();
+ conf.setInt(DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY, 0);
+ intercept(HadoopIllegalArgumentException.class,
+ "Invalid value configured for dfs.datanode.disk.check.timeout"
+ + " - 0 (should be > 0)",
+ () -> new DatasetVolumeChecker(conf, new FakeTimer()));
+ conf.unset(DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY);
+
+ conf.setInt(DFS_DATANODE_DISK_CHECK_MIN_GAP_KEY, -1);
+ intercept(HadoopIllegalArgumentException.class,
+ "Invalid value configured for dfs.datanode.disk.check.min.gap"
+ + " - -1 (should be >= 0)",
+ () -> new DatasetVolumeChecker(conf, new FakeTimer()));
+ conf.unset(DFS_DATANODE_DISK_CHECK_MIN_GAP_KEY);
+
+ conf.setInt(DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY, -1);
+ intercept(HadoopIllegalArgumentException.class,
+ "Invalid value configured for dfs.datanode.disk.check.timeout"
+ + " - -1 (should be > 0)",
+ () -> new DatasetVolumeChecker(conf, new FakeTimer()));
+ conf.unset(DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY);
+
+ conf.setInt(DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, -2);
+ intercept(HadoopIllegalArgumentException.class,
+ "Invalid value configured for dfs.datanode.failed.volumes.tolerated"
+ + " - -2 should be greater than or equal to -1",
+ () -> new DatasetVolumeChecker(conf, new FakeTimer()));
+ }
}
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestStorageLocationChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestStorageLocationChecker.java
index 169a1b9..80f0396 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestStorageLocationChecker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestStorageLocationChecker.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.hdfs.server.datanode.checker;
+import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
@@ -36,6 +37,7 @@ import java.util.List;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
+import static org.apache.hadoop.test.LambdaTestUtils.intercept;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY;
import static org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult.*;
@@ -129,7 +131,7 @@ public class TestStorageLocationChecker {
final Configuration conf = new HdfsConfiguration();
conf.setInt(DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 3);
- thrown.expect(IOException.class);
+ thrown.expect(HadoopIllegalArgumentException.class);
thrown.expectMessage("Invalid value configured");
StorageLocationChecker checker =
new StorageLocationChecker(conf, new FakeTimer());
@@ -214,4 +216,33 @@ public class TestStorageLocationChecker {
}
return locations;
}
+
+ @Test
+ public void testInvalidConfigurationValues() throws Exception {
+ final List<StorageLocation> locations =
+ makeMockLocations(HEALTHY, HEALTHY, HEALTHY);
+ Configuration conf = new HdfsConfiguration();
+
+ conf.setInt(DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 4);
+ intercept(HadoopIllegalArgumentException.class,
+ "Invalid value configured for dfs.datanode.failed.volumes.tolerated"
+ + " - 4. Value configured is >= to the "
+ + "number of configured volumes (3).",
+ () -> new StorageLocationChecker(conf, new FakeTimer()).check(conf,
+ locations));
+ conf.unset(DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY);
+
+ conf.setInt(DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY, 0);
+ intercept(HadoopIllegalArgumentException.class,
+ "Invalid value configured for dfs.datanode.disk.check.timeout"
+ + " - 0 (should be > 0)",
+ () -> new StorageLocationChecker(conf, new FakeTimer()));
+ conf.unset(DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY);
+
+ conf.setInt(DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, -2);
+ intercept(HadoopIllegalArgumentException.class,
+ "Invalid value configured for dfs.datanode.failed.volumes.tolerated"
+ + " - -2 should be greater than or equal to -1",
+ () -> new StorageLocationChecker(conf, new FakeTimer()));
+ }
}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org