You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ae...@apache.org on 2017/07/05 18:21:33 UTC
hadoop git commit: HDFS-12053. Ozone: ozone server should create
missing metadata directory if it has permission to. Contributed by Weiwei
Yang.
Repository: hadoop
Updated Branches:
refs/heads/HDFS-7240 b23b01645 -> ebbc04c2d
HDFS-12053. Ozone: ozone server should create missing metadata directory if it has permission to. Contributed by Weiwei Yang.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ebbc04c2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ebbc04c2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ebbc04c2
Branch: refs/heads/HDFS-7240
Commit: ebbc04c2d5a0739f81921d72b68449291395b88f
Parents: b23b016
Author: Anu Engineer <ae...@apache.org>
Authored: Wed Jul 5 11:15:16 2017 -0700
Committer: Anu Engineer <ae...@apache.org>
Committed: Wed Jul 5 11:15:16 2017 -0700
----------------------------------------------------------------------
.../container/common/impl/ContainerManagerImpl.java | 10 +++++++++-
.../container/common/TestDatanodeStateMachine.java | 13 ++++++++++---
2 files changed, 19 insertions(+), 4 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ebbc04c2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
index 48d218a..f3e010d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
@@ -128,8 +128,15 @@ public class ContainerManagerImpl implements ContainerManager {
readLock();
try {
for (StorageLocation path : containerDirs) {
- LOG.info("Loading containers under {}", path);
File directory = Paths.get(path.getNormalizedUri()).toFile();
+ if (!directory.exists() && !directory.mkdirs()) {
+ LOG.error("Container metadata directory doesn't exist "
+ + "and cannot be created. Path: {}", path.toString());
+ throw new StorageContainerException("Container metadata "
+ + "directory doesn't exist and cannot be created " + path
+ .toString(), INVALID_CONFIG);
+ }
+
// TODO: This will fail if any directory is invalid.
// We should fix this to handle invalid directories and continue.
// Leaving it this way to fail fast for time being.
@@ -139,6 +146,7 @@ public class ContainerManagerImpl implements ContainerManager {
throw new StorageContainerException("Invalid path to container " +
"metadata directory." + path, INVALID_CONFIG);
}
+ LOG.info("Loading containers under {}", path);
File[] files = directory.listFiles(new ContainerFilter());
if (files != null) {
for (File containerFile : files) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ebbc04c2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
index 9c2a0bd..15b2852 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
@@ -22,6 +22,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
import org.apache.hadoop.scm.ScmConfigKeys;
import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine;
@@ -70,6 +71,7 @@ public class TestDatanodeStateMachine {
private List<ScmTestMock> mockServers;
private ExecutorService executorService;
private Configuration conf;
+ private File testRoot;
@Before
public void setUp() throws Exception {
@@ -95,11 +97,14 @@ public class TestDatanodeStateMachine {
URL p = this.getClass().getResource("");
String path = p.getPath().concat(
TestDatanodeStateMachine.class.getSimpleName());
- File f = new File(path);
- if (!f.mkdirs()) {
+ testRoot = new File(path);
+ if (!testRoot.mkdirs()) {
LOG.info("Required directories already exist.");
}
- conf.set(DFS_DATANODE_DATA_DIR_KEY, path);
+ conf.set(DFS_DATANODE_DATA_DIR_KEY,
+ new File(testRoot, "data").getAbsolutePath());
+ conf.set(OzoneConfigKeys.OZONE_CONTAINER_METADATA_DIRS,
+ new File(testRoot, "scm").getAbsolutePath());
path = Paths.get(path.toString(),
TestDatanodeStateMachine.class.getSimpleName() + ".id").toString();
conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_ID, path);
@@ -131,6 +136,8 @@ public class TestDatanodeStateMachine {
}
} catch (Exception e) {
//ignore all execption from the shutdown
+ } finally {
+ testRoot.delete();
}
}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org