You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by sh...@apache.org on 2018/09/22 01:39:59 UTC
[01/18] hadoop git commit: HDFS-13892. Disk Balancer: Make execute
command documentation better. Contributed by Ranith Sardar. [Forced Update!]
Repository: hadoop
Updated Branches:
refs/heads/HDFS-12943 77e106f74 -> 741547e16 (forced update)
HDFS-13892. Disk Balancer: Make execute command documentation better.
Contributed by Ranith Sardar.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6fc293fe
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6fc293fe
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6fc293fe
Branch: refs/heads/HDFS-12943
Commit: 6fc293fece935e3524ae59699aa3c3e3d98f6d86
Parents: 6b5838e
Author: Anu Engineer <ae...@apache.org>
Authored: Wed Sep 19 20:48:41 2018 -0700
Committer: Anu Engineer <ae...@apache.org>
Committed: Wed Sep 19 20:48:41 2018 -0700
----------------------------------------------------------------------
.../hadoop-hdfs/src/site/markdown/HDFSDiskbalancer.md | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6fc293fe/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSDiskbalancer.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSDiskbalancer.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSDiskbalancer.md
index 5dd6ffc..955f179 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSDiskbalancer.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSDiskbalancer.md
@@ -78,7 +78,9 @@ Execute command takes a plan command executes it against the datanode that plan
`hdfs diskbalancer -execute /system/diskbalancer/nodename.plan.json`
This executes the plan by reading datanode’s address from the plan file.
-
+When DiskBalancer executes the plan, it is the beginning of an asynchronous process that can take a long time.
+So, query command can help to get the current status of execute command.
+
| COMMAND\_OPTION | Description |
|:---- |:---- |
| `-skipDateCheck` | Skip date check and force execute the plan.|
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[12/18] hadoop git commit: YARN-8769. [Submarine] Allow user to
specify customized quicklink(s) when submit Submarine job. Contributed by
Wangda Tan.
Posted by sh...@apache.org.
YARN-8769. [Submarine] Allow user to specify customized quicklink(s) when submit Submarine job. Contributed by Wangda Tan.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0cd63461
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0cd63461
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0cd63461
Branch: refs/heads/HDFS-12943
Commit: 0cd63461021cc7cac39e7cc2bfaafd609c82fc79
Parents: a275277
Author: Sunil G <su...@apache.org>
Authored: Fri Sep 21 23:39:22 2018 +0530
Committer: Sunil G <su...@apache.org>
Committed: Fri Sep 21 23:39:22 2018 +0530
----------------------------------------------------------------------
.../yarn/submarine/client/cli/CliConstants.java | 1 +
.../yarn/submarine/client/cli/RunJobCli.java | 8 ++
.../submarine/client/cli/param/Quicklink.java | 71 ++++++++++++++
.../client/cli/param/RunJobParameters.java | 18 ++++
.../yarnservice/YarnServiceJobSubmitter.java | 99 ++++++++++++++------
.../runtimes/yarnservice/YarnServiceUtils.java | 47 ++++++++--
.../yarnservice/TestYarnServiceRunJobCli.java | 94 +++++++++++++++++++
7 files changed, 303 insertions(+), 35 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0cd63461/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/CliConstants.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/CliConstants.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/CliConstants.java
index d51ffc7..454ff1c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/CliConstants.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/CliConstants.java
@@ -49,6 +49,7 @@ public class CliConstants {
public static final String WAIT_JOB_FINISH = "wait_job_finish";
public static final String PS_DOCKER_IMAGE = "ps_docker_image";
public static final String WORKER_DOCKER_IMAGE = "worker_docker_image";
+ public static final String QUICKLINK = "quicklink";
public static final String TENSORBOARD_DOCKER_IMAGE =
"tensorboard_docker_image";
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0cd63461/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/RunJobCli.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/RunJobCli.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/RunJobCli.java
index faa22d3..5054a94 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/RunJobCli.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/RunJobCli.java
@@ -117,6 +117,14 @@ public class RunJobCli extends AbstractCli {
options.addOption(CliConstants.WORKER_DOCKER_IMAGE, true,
"Specify docker image for WORKER, when this is not specified, WORKER "
+ "uses --" + CliConstants.DOCKER_IMAGE + " as default.");
+ options.addOption(CliConstants.QUICKLINK, true, "Specify quicklink so YARN"
+ + "web UI shows link to given role instance and port. When "
+ + "--tensorboard is speciied, quicklink to tensorboard instance will "
+ + "be added automatically. The format of quick link is: "
+ + "Quick_link_label=http(or https)://role-name:port. For example, "
+ + "if want to link to first worker's 7070 port, and text of quicklink "
+ + "is Notebook_UI, user need to specify --quicklink "
+ + "Notebook_UI=https://master-0:7070");
options.addOption("h", "help", false, "Print help");
return options;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0cd63461/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/param/Quicklink.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/param/Quicklink.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/param/Quicklink.java
new file mode 100644
index 0000000..ea8732c
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/param/Quicklink.java
@@ -0,0 +1,71 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License. See accompanying LICENSE file.
+ */
+
+package org.apache.hadoop.yarn.submarine.client.cli.param;
+
+import org.apache.commons.cli.ParseException;
+
+/**
+ * A class represents quick links to a web page.
+ */
+public class Quicklink {
+ private String label;
+ private String componentInstanceName;
+ private String protocol;
+ private int port;
+
+ public void parse(String quicklinkStr) throws ParseException {
+ if (!quicklinkStr.contains("=")) {
+ throw new ParseException("Should be <label>=<link> format for quicklink");
+ }
+
+ int index = quicklinkStr.indexOf("=");
+ label = quicklinkStr.substring(0, index);
+ quicklinkStr = quicklinkStr.substring(index + 1);
+
+ if (quicklinkStr.startsWith("http://")) {
+ protocol = "http://";
+ } else if (quicklinkStr.startsWith("https://")) {
+ protocol = "https://";
+ } else {
+ throw new ParseException("Quicklink should start with http or https");
+ }
+
+ quicklinkStr = quicklinkStr.substring(protocol.length());
+ index = quicklinkStr.indexOf(":");
+
+ if (index == -1) {
+ throw new ParseException("Quicklink should be componet-id:port form");
+ }
+
+ componentInstanceName = quicklinkStr.substring(0, index);
+ port = Integer.parseInt(quicklinkStr.substring(index + 1));
+ }
+
+ public String getLabel() {
+ return label;
+ }
+
+ public String getComponentInstanceName() {
+ return componentInstanceName;
+ }
+
+ public String getProtocol() {
+ return protocol;
+ }
+
+ public int getPort() {
+ return port;
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0cd63461/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/param/RunJobParameters.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/param/RunJobParameters.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/param/RunJobParameters.java
index 4558f6a..92a1883 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/param/RunJobParameters.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/param/RunJobParameters.java
@@ -24,6 +24,8 @@ import org.apache.hadoop.yarn.submarine.client.cli.CliUtils;
import org.apache.hadoop.yarn.submarine.common.ClientContext;
import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
/**
* Parameters used to run a job
@@ -41,6 +43,7 @@ public class RunJobParameters extends RunParameters {
private String tensorboardDockerImage;
private String workerLaunchCmd;
private String psLaunchCmd;
+ private List<Quicklink> quicklinks = new ArrayList<>();
private String psDockerImage = null;
private String workerDockerImage = null;
@@ -119,6 +122,17 @@ public class RunJobParameters extends RunParameters {
this.waitJobFinish = true;
}
+ // Quicklinks
+ String[] quicklinkStrs = parsedCommandLine.getOptionValues(
+ CliConstants.QUICKLINK);
+ if (quicklinkStrs != null) {
+ for (String ql : quicklinkStrs) {
+ Quicklink quicklink = new Quicklink();
+ quicklink.parse(ql);
+ quicklinks.add(quicklink);
+ }
+ }
+
psDockerImage = parsedCommandLine.getOptionValue(
CliConstants.PS_DOCKER_IMAGE);
workerDockerImage = parsedCommandLine.getOptionValue(
@@ -247,4 +261,8 @@ public class RunJobParameters extends RunParameters {
public String getTensorboardDockerImage() {
return tensorboardDockerImage;
}
+
+ public List<Quicklink> getQuicklinks() {
+ return quicklinks;
+ }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0cd63461/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/runtimes/yarnservice/YarnServiceJobSubmitter.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/runtimes/yarnservice/YarnServiceJobSubmitter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/runtimes/yarnservice/YarnServiceJobSubmitter.java
index 8fb213f..5855287 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/runtimes/yarnservice/YarnServiceJobSubmitter.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/runtimes/yarnservice/YarnServiceJobSubmitter.java
@@ -15,7 +15,6 @@
package org.apache.hadoop.yarn.submarine.runtimes.yarnservice;
import com.google.common.annotations.VisibleForTesting;
-import com.google.common.collect.ImmutableMap;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
@@ -29,6 +28,7 @@ import org.apache.hadoop.yarn.service.api.records.Resource;
import org.apache.hadoop.yarn.service.api.records.ResourceInformation;
import org.apache.hadoop.yarn.service.api.records.Service;
import org.apache.hadoop.yarn.service.client.ServiceClient;
+import org.apache.hadoop.yarn.submarine.client.cli.param.Quicklink;
import org.apache.hadoop.yarn.submarine.client.cli.param.RunJobParameters;
import org.apache.hadoop.yarn.submarine.common.ClientContext;
import org.apache.hadoop.yarn.submarine.common.Envs;
@@ -40,10 +40,14 @@ import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.FileNotFoundException;
-import java.io.FileWriter;
+import java.io.FileOutputStream;
import java.io.IOException;
+import java.io.OutputStreamWriter;
+import java.io.PrintWriter;
+import java.io.Writer;
import java.util.HashMap;
import java.util.HashSet;
+import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.StringTokenizer;
@@ -54,6 +58,7 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY
* Submit a job to cluster
*/
public class YarnServiceJobSubmitter implements JobSubmitter {
+ public static final String TENSORBOARD_QUICKLINK_LABEL = "Tensorboard";
private static final Logger LOG =
LoggerFactory.getLogger(YarnServiceJobSubmitter.class);
ClientContext clientContext;
@@ -98,7 +103,7 @@ public class YarnServiceJobSubmitter implements JobSubmitter {
}
private void addHdfsClassPathIfNeeded(RunJobParameters parameters,
- FileWriter fw, Component comp) throws IOException {
+ PrintWriter fw, Component comp) throws IOException {
// Find envs to use HDFS
String hdfsHome = null;
String javaHome = null;
@@ -191,7 +196,8 @@ public class YarnServiceJobSubmitter implements JobSubmitter {
envs.put(Envs.TASK_TYPE_ENV, taskType.name());
}
- private String getUserName() {
+ @VisibleForTesting
+ protected String getUserName() {
return System.getProperty("user.name");
}
@@ -205,18 +211,19 @@ public class YarnServiceJobSubmitter implements JobSubmitter {
private String generateCommandLaunchScript(RunJobParameters parameters,
TaskType taskType, Component comp) throws IOException {
File file = File.createTempFile(taskType.name() + "-launch-script", ".sh");
- FileWriter fw = new FileWriter(file);
+ Writer w = new OutputStreamWriter(new FileOutputStream(file), "UTF-8");
+ PrintWriter pw = new PrintWriter(w);
try {
- fw.append("#!/bin/bash\n");
+ pw.append("#!/bin/bash\n");
- addHdfsClassPathIfNeeded(parameters, fw, comp);
+ addHdfsClassPathIfNeeded(parameters, pw, comp);
if (taskType.equals(TaskType.TENSORBOARD)) {
String tbCommand =
"export LC_ALL=C && tensorboard --logdir=" + parameters
.getCheckpointPath();
- fw.append(tbCommand + "\n");
+ pw.append(tbCommand + "\n");
LOG.info("Tensorboard command=" + tbCommand);
} else{
// When distributed training is required
@@ -226,20 +233,20 @@ public class YarnServiceJobSubmitter implements JobSubmitter {
taskType.getComponentName(), parameters.getNumWorkers(),
parameters.getNumPS(), parameters.getName(), getUserName(),
getDNSDomain());
- fw.append("export TF_CONFIG=\"" + tfConfigEnv + "\"\n");
+ pw.append("export TF_CONFIG=\"" + tfConfigEnv + "\"\n");
}
// Print launch command
if (taskType.equals(TaskType.WORKER) || taskType.equals(
TaskType.PRIMARY_WORKER)) {
- fw.append(parameters.getWorkerLaunchCmd() + '\n');
+ pw.append(parameters.getWorkerLaunchCmd() + '\n');
if (SubmarineLogs.isVerbose()) {
LOG.info(
"Worker command =[" + parameters.getWorkerLaunchCmd() + "]");
}
} else if (taskType.equals(TaskType.PS)) {
- fw.append(parameters.getPSLaunchCmd() + '\n');
+ pw.append(parameters.getPSLaunchCmd() + '\n');
if (SubmarineLogs.isVerbose()) {
LOG.info("PS command =[" + parameters.getPSLaunchCmd() + "]");
@@ -247,7 +254,7 @@ public class YarnServiceJobSubmitter implements JobSubmitter {
}
}
} finally {
- fw.close();
+ pw.close();
}
return file.getAbsolutePath();
}
@@ -421,18 +428,51 @@ public class YarnServiceJobSubmitter implements JobSubmitter {
return new Artifact().type(Artifact.TypeEnum.DOCKER).id(dockerImageName);
}
+ private void handleQuicklinks(RunJobParameters runJobParameters)
+ throws IOException {
+ List<Quicklink> quicklinks = runJobParameters.getQuicklinks();
+ if (null != quicklinks && !quicklinks.isEmpty()) {
+ for (Quicklink ql : quicklinks) {
+ // Make sure it is a valid instance name
+ String instanceName = ql.getComponentInstanceName();
+ boolean found = false;
+
+ for (Component comp : serviceSpec.getComponents()) {
+ for (int i = 0; i < comp.getNumberOfContainers(); i++) {
+ String possibleInstanceName = comp.getName() + "-" + i;
+ if (possibleInstanceName.equals(instanceName)) {
+ found = true;
+ break;
+ }
+ }
+ }
+
+ if (!found) {
+ throw new IOException(
+ "Couldn't find a component instance = " + instanceName
+ + " while adding quicklink");
+ }
+
+ String link = ql.getProtocol() + YarnServiceUtils.getDNSName(
+ serviceSpec.getName(), instanceName, getUserName(), getDNSDomain(),
+ ql.getPort());
+ YarnServiceUtils.addQuicklink(serviceSpec, ql.getLabel(), link);
+ }
+ }
+ }
+
private Service createServiceByParameters(RunJobParameters parameters)
throws IOException {
componentToLocalLaunchScriptPath.clear();
- Service service = new Service();
- service.setName(parameters.getName());
- service.setVersion(String.valueOf(System.currentTimeMillis()));
- service.setArtifact(getDockerArtifact(parameters.getDockerImageName()));
+ serviceSpec = new Service();
+ serviceSpec.setName(parameters.getName());
+ serviceSpec.setVersion(String.valueOf(System.currentTimeMillis()));
+ serviceSpec.setArtifact(getDockerArtifact(parameters.getDockerImageName()));
- handleServiceEnvs(service, parameters);
+ handleServiceEnvs(serviceSpec, parameters);
if (parameters.getNumWorkers() > 0) {
- addWorkerComponents(service, parameters);
+ addWorkerComponents(serviceSpec, parameters);
}
if (parameters.getNumPS() > 0) {
@@ -450,7 +490,7 @@ public class YarnServiceJobSubmitter implements JobSubmitter {
getDockerArtifact(parameters.getPsDockerImage()));
}
handleLaunchCommand(parameters, TaskType.PS, psComponent);
- service.addComponent(psComponent);
+ serviceSpec.addComponent(psComponent);
}
if (parameters.isTensorboardEnabled()) {
@@ -470,14 +510,20 @@ public class YarnServiceJobSubmitter implements JobSubmitter {
// Add tensorboard to quicklink
String tensorboardLink = "http://" + YarnServiceUtils.getDNSName(
- parameters.getName(), TaskType.TENSORBOARD.getComponentName(), 0,
- getUserName(), getDNSDomain(), 6006);
+ parameters.getName(),
+ TaskType.TENSORBOARD.getComponentName() + "-" + 0, getUserName(),
+ getDNSDomain(), 6006);
LOG.info("Link to tensorboard:" + tensorboardLink);
- service.addComponent(tbComponent);
- service.setQuicklinks(ImmutableMap.of("Tensorboard", tensorboardLink));
+ serviceSpec.addComponent(tbComponent);
+
+ YarnServiceUtils.addQuicklink(serviceSpec, TENSORBOARD_QUICKLINK_LABEL,
+ tensorboardLink);
}
- return service;
+ // After all components added, handle quicklinks
+ handleQuicklinks(parameters);
+
+ return serviceSpec;
}
/**
@@ -486,12 +532,11 @@ public class YarnServiceJobSubmitter implements JobSubmitter {
@Override
public ApplicationId submitJob(RunJobParameters parameters)
throws IOException, YarnException {
- Service service = createServiceByParameters(parameters);
+ createServiceByParameters(parameters);
ServiceClient serviceClient = YarnServiceUtils.createServiceClient(
clientContext.getYarnConfig());
- ApplicationId appid = serviceClient.actionCreate(service);
+ ApplicationId appid = serviceClient.actionCreate(serviceSpec);
serviceClient.stop();
- this.serviceSpec = service;
return appid;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0cd63461/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/runtimes/yarnservice/YarnServiceUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/runtimes/yarnservice/YarnServiceUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/runtimes/yarnservice/YarnServiceUtils.java
index 9238a67..26402da 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/runtimes/yarnservice/YarnServiceUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/runtimes/yarnservice/YarnServiceUtils.java
@@ -16,10 +16,20 @@ package org.apache.hadoop.yarn.submarine.runtimes.yarnservice;
import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.service.api.records.Service;
import org.apache.hadoop.yarn.service.client.ServiceClient;
import org.apache.hadoop.yarn.submarine.common.Envs;
+import org.apache.hadoop.yarn.submarine.common.conf.SubmarineLogs;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.HashMap;
+import java.util.Map;
public class YarnServiceUtils {
+ private static final Logger LOG =
+ LoggerFactory.getLogger(YarnServiceUtils.class);
+
// This will be true only in UT.
private static ServiceClient stubServiceClient = null;
@@ -40,10 +50,10 @@ public class YarnServiceUtils {
YarnServiceUtils.stubServiceClient = stubServiceClient;
}
- public static String getDNSName(String serviceName, String componentName,
- int index, String userName, String domain, int port) {
- return componentName + "-" + index + getDNSNameCommonSuffix(serviceName,
- userName, domain, port);
+ public static String getDNSName(String serviceName,
+ String componentInstanceName, String userName, String domain, int port) {
+ return componentInstanceName + getDNSNameCommonSuffix(serviceName, userName,
+ domain, port);
}
private static String getDNSNameCommonSuffix(String serviceName,
@@ -66,12 +76,18 @@ public class YarnServiceUtils {
commonEndpointSuffix) + ",";
String ps = getComponentArrayJson("ps", nPs, commonEndpointSuffix) + "},";
- String task =
- "\\\"task\\\":{" + " \\\"type\\\":\\\"" + curCommponentName + "\\\","
- + " \\\"index\\\":" + '$' + Envs.TASK_INDEX_ENV + "},";
+ StringBuilder sb = new StringBuilder();
+ sb.append("\\\"task\\\":{");
+ sb.append(" \\\"type\\\":\\\"");
+ sb.append(curCommponentName);
+ sb.append("\\\",");
+ sb.append(" \\\"index\\\":");
+ sb.append('$');
+ sb.append(Envs.TASK_INDEX_ENV + "},");
+ String task = sb.toString();
String environment = "\\\"environment\\\":\\\"cloud\\\"}";
- StringBuilder sb = new StringBuilder();
+ sb = new StringBuilder();
sb.append(json);
sb.append(master);
sb.append(worker);
@@ -81,6 +97,21 @@ public class YarnServiceUtils {
return sb.toString();
}
+ public static void addQuicklink(Service serviceSpec, String label,
+ String link) {
+ Map<String, String> quicklinks = serviceSpec.getQuicklinks();
+ if (null == quicklinks) {
+ quicklinks = new HashMap<>();
+ serviceSpec.setQuicklinks(quicklinks);
+ }
+
+ if (SubmarineLogs.isVerbose()) {
+ LOG.info("Added quicklink, " + label + "=" + link);
+ }
+
+ quicklinks.put(label, link);
+ }
+
private static String getComponentArrayJson(String componentName, int count,
String endpointSuffix) {
String component = "\\\"" + componentName + "\\\":";
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0cd63461/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/test/java/org/apache/hadoop/yarn/submarine/client/cli/yarnservice/TestYarnServiceRunJobCli.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/test/java/org/apache/hadoop/yarn/submarine/client/cli/yarnservice/TestYarnServiceRunJobCli.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/test/java/org/apache/hadoop/yarn/submarine/client/cli/yarnservice/TestYarnServiceRunJobCli.java
index a88d673..89d39a0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/test/java/org/apache/hadoop/yarn/submarine/client/cli/yarnservice/TestYarnServiceRunJobCli.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/test/java/org/apache/hadoop/yarn/submarine/client/cli/yarnservice/TestYarnServiceRunJobCli.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.yarn.submarine.client.cli.yarnservice;
+import com.google.common.collect.ImmutableMap;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.exceptions.YarnException;
@@ -100,6 +101,32 @@ public class TestYarnServiceRunJobCli {
Assert.assertTrue(SubmarineLogs.isVerbose());
}
+ private void verifyQuicklink(Service serviceSpec,
+ Map<String, String> expectedQuicklinks) {
+ Map<String, String> actualQuicklinks = serviceSpec.getQuicklinks();
+ if (actualQuicklinks == null || actualQuicklinks.isEmpty()) {
+ Assert.assertTrue(
+ expectedQuicklinks == null || expectedQuicklinks.isEmpty());
+ return;
+ }
+
+ Assert.assertEquals(expectedQuicklinks.size(), actualQuicklinks.size());
+ for (Map.Entry<String, String> expectedEntry : expectedQuicklinks
+ .entrySet()) {
+ Assert.assertTrue(actualQuicklinks.containsKey(expectedEntry.getKey()));
+
+ // $USER could be changed in different environment. so replace $USER by
+ // "user"
+ String expectedValue = expectedEntry.getValue();
+ String actualValue = actualQuicklinks.get(expectedEntry.getKey());
+
+ String userName = System.getProperty("user.name");
+ actualValue = actualValue.replaceAll(userName, "username");
+
+ Assert.assertEquals(expectedValue, actualValue);
+ }
+ }
+
@Test
public void testBasicRunJobForDistributedTraining() throws Exception {
MockClientContext mockClientContext =
@@ -120,6 +147,8 @@ public class TestYarnServiceRunJobCli {
Assert.assertEquals(3, serviceSpec.getComponents().size());
commonVerifyDistributedTrainingSpec(serviceSpec);
+
+ verifyQuicklink(serviceSpec, null);
}
@Test
@@ -147,6 +176,10 @@ public class TestYarnServiceRunJobCli {
verifyTensorboardComponent(runJobCli, serviceSpec,
Resources.createResource(4096, 1));
+
+ verifyQuicklink(serviceSpec, ImmutableMap
+ .of(YarnServiceJobSubmitter.TENSORBOARD_QUICKLINK_LABEL,
+ "http://tensorboard-0.my-job.username.null:6006"));
}
@Test
@@ -232,6 +265,9 @@ public class TestYarnServiceRunJobCli {
verifyTensorboardComponent(runJobCli, serviceSpec,
Resources.createResource(2048, 2));
+ verifyQuicklink(serviceSpec, ImmutableMap
+ .of(YarnServiceJobSubmitter.TENSORBOARD_QUICKLINK_LABEL,
+ "http://tensorboard-0.my-job.username.null:6006"));
}
private void commonTestSingleNodeTraining(Service serviceSpec)
@@ -372,4 +408,62 @@ public class TestYarnServiceRunJobCli {
Assert.assertEquals(jobInfo.get(StorageKeyConstants.INPUT_PATH),
"s3://input");
}
+
+ @Test
+ public void testAddQuicklinksWithoutTensorboard() throws Exception {
+ MockClientContext mockClientContext =
+ YarnServiceCliTestUtils.getMockClientContext();
+ RunJobCli runJobCli = new RunJobCli(mockClientContext);
+ Assert.assertFalse(SubmarineLogs.isVerbose());
+
+ runJobCli.run(
+ new String[] { "--name", "my-job", "--docker_image", "tf-docker:1.1.0",
+ "--input_path", "s3://input", "--checkpoint_path", "s3://output",
+ "--num_workers", "3", "--num_ps", "2", "--worker_launch_cmd",
+ "python run-job.py", "--worker_resources", "memory=2048M,vcores=2",
+ "--ps_resources", "memory=4096M,vcores=4", "--ps_docker_image",
+ "ps.image", "--worker_docker_image", "worker.image",
+ "--ps_launch_cmd", "python run-ps.py", "--verbose", "--quicklink",
+ "AAA=http://master-0:8321", "--quicklink",
+ "BBB=http://worker-0:1234" });
+ Service serviceSpec = getServiceSpecFromJobSubmitter(
+ runJobCli.getJobSubmitter());
+ Assert.assertEquals(3, serviceSpec.getComponents().size());
+
+ commonVerifyDistributedTrainingSpec(serviceSpec);
+
+ verifyQuicklink(serviceSpec, ImmutableMap
+ .of("AAA", "http://master-0.my-job.username.null:8321", "BBB",
+ "http://worker-0.my-job.username.null:1234"));
+ }
+
+ @Test
+ public void testAddQuicklinksWithTensorboard() throws Exception {
+ MockClientContext mockClientContext =
+ YarnServiceCliTestUtils.getMockClientContext();
+ RunJobCli runJobCli = new RunJobCli(mockClientContext);
+ Assert.assertFalse(SubmarineLogs.isVerbose());
+
+ runJobCli.run(
+ new String[] { "--name", "my-job", "--docker_image", "tf-docker:1.1.0",
+ "--input_path", "s3://input", "--checkpoint_path", "s3://output",
+ "--num_workers", "3", "--num_ps", "2", "--worker_launch_cmd",
+ "python run-job.py", "--worker_resources", "memory=2048M,vcores=2",
+ "--ps_resources", "memory=4096M,vcores=4", "--ps_docker_image",
+ "ps.image", "--worker_docker_image", "worker.image",
+ "--ps_launch_cmd", "python run-ps.py", "--verbose", "--quicklink",
+ "AAA=http://master-0:8321", "--quicklink",
+ "BBB=http://worker-0:1234", "--tensorboard" });
+ Service serviceSpec = getServiceSpecFromJobSubmitter(
+ runJobCli.getJobSubmitter());
+ Assert.assertEquals(4, serviceSpec.getComponents().size());
+
+ commonVerifyDistributedTrainingSpec(serviceSpec);
+
+ verifyQuicklink(serviceSpec, ImmutableMap
+ .of("AAA", "http://master-0.my-job.username.null:8321", "BBB",
+ "http://worker-0.my-job.username.null:1234",
+ YarnServiceJobSubmitter.TENSORBOARD_QUICKLINK_LABEL,
+ "http://tensorboard-0.my-job.username.null:6006"));
+ }
}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[16/18] hadoop git commit: Merge branch 'trunk' into HDFS-12943
Posted by sh...@apache.org.
Merge branch 'trunk' into HDFS-12943
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c04e0c0e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c04e0c0e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c04e0c0e
Branch: refs/heads/HDFS-12943
Commit: c04e0c0e9951aab88d7e5a4f47bf24a045f6171c
Parents: 6c37db9 0cd6346
Author: Konstantin V Shvachko <sh...@apache.org>
Authored: Fri Sep 21 18:24:51 2018 -0700
Committer: Konstantin V Shvachko <sh...@apache.org>
Committed: Fri Sep 21 18:28:31 2018 -0700
----------------------------------------------------------------------
dev-support/bin/create-release | 4 +-
dev-support/bin/ozone-dist-layout-stitching | 32 +-
dev-support/bin/yetus-wrapper | 2 +-
dev-support/docker/Dockerfile | 212 +-
.../assemblies/hadoop-src-with-hdds.xml | 56 +
.../assemblies/hadoop-src-with-hdsl.xml | 56 -
.../hadoop-client-minicluster/pom.xml | 17 +-
.../hadoop-client-runtime/pom.xml | 11 +
.../hadoop-annotations/pom.xml | 24 +
hadoop-common-project/hadoop-common/pom.xml | 7 +
.../src/main/conf/log4j.properties | 23 -
.../apache/hadoop/crypto/CryptoStreamUtils.java | 21 +-
.../main/java/org/apache/hadoop/fs/Globber.java | 13 +-
.../apache/hadoop/fs/TrashPolicyDefault.java | 10 +-
.../main/java/org/apache/hadoop/ha/HAAdmin.java | 7 +-
.../org/apache/hadoop/io/nativeio/NativeIO.java | 15 +-
.../org/apache/hadoop/ipc/CallQueueManager.java | 5 +-
.../apache/hadoop/log/LogThrottlingHelper.java | 358 ++
.../apache/hadoop/security/SecurityUtil.java | 34 +-
.../org/apache/hadoop/util/CleanerUtil.java | 199 +
.../org/apache/hadoop/util/StringUtils.java | 2 +-
.../hadoop/util/curator/ZKCuratorManager.java | 10 +-
.../src/main/resources/core-default.xml | 24 +-
.../markdown/release/0.1.0/CHANGELOG.0.1.0.md | 101 +
.../markdown/release/0.1.0/CHANGES.0.1.0.md | 101 -
.../markdown/release/0.1.1/CHANGELOG.0.1.1.md | 39 +
.../markdown/release/0.1.1/CHANGES.0.1.1.md | 39 -
.../markdown/release/0.10.0/CHANGELOG.0.10.0.md | 101 +
.../markdown/release/0.10.0/CHANGES.0.10.0.md | 101 -
.../markdown/release/0.10.1/CHANGELOG.0.10.1.md | 49 +
.../markdown/release/0.10.1/CHANGES.0.10.1.md | 49 -
.../markdown/release/0.11.0/CHANGELOG.0.11.0.md | 96 +
.../markdown/release/0.11.0/CHANGES.0.11.0.md | 96 -
.../markdown/release/0.11.1/CHANGELOG.0.11.1.md | 34 +
.../markdown/release/0.11.1/CHANGES.0.11.1.md | 34 -
.../markdown/release/0.11.2/CHANGELOG.0.11.2.md | 33 +
.../markdown/release/0.11.2/CHANGES.0.11.2.md | 33 -
.../markdown/release/0.12.0/CHANGELOG.0.12.0.md | 113 +
.../markdown/release/0.12.0/CHANGES.0.12.0.md | 113 -
.../markdown/release/0.12.1/CHANGELOG.0.12.1.md | 59 +
.../markdown/release/0.12.1/CHANGES.0.12.1.md | 59 -
.../markdown/release/0.12.2/CHANGELOG.0.12.2.md | 34 +
.../markdown/release/0.12.2/CHANGES.0.12.2.md | 34 -
.../markdown/release/0.12.3/CHANGELOG.0.12.3.md | 38 +
.../markdown/release/0.12.3/CHANGES.0.12.3.md | 38 -
.../markdown/release/0.13.0/CHANGELOG.0.13.0.md | 173 +
.../markdown/release/0.13.0/CHANGES.0.13.0.md | 173 -
.../markdown/release/0.14.0/CHANGELOG.0.14.0.md | 214 +
.../markdown/release/0.14.0/CHANGES.0.14.0.md | 214 -
.../markdown/release/0.14.1/CHANGELOG.0.14.1.md | 33 +
.../markdown/release/0.14.1/CHANGES.0.14.1.md | 33 -
.../markdown/release/0.14.2/CHANGELOG.0.14.2.md | 40 +
.../markdown/release/0.14.2/CHANGES.0.14.2.md | 40 -
.../markdown/release/0.14.3/CHANGELOG.0.14.3.md | 34 +
.../markdown/release/0.14.3/CHANGES.0.14.3.md | 34 -
.../markdown/release/0.14.4/CHANGELOG.0.14.4.md | 39 +
.../markdown/release/0.14.4/CHANGES.0.14.4.md | 39 -
.../markdown/release/0.15.0/CHANGELOG.0.15.0.md | 190 +
.../markdown/release/0.15.0/CHANGES.0.15.0.md | 190 -
.../markdown/release/0.15.1/CHANGELOG.0.15.1.md | 49 +
.../markdown/release/0.15.1/CHANGES.0.15.1.md | 49 -
.../markdown/release/0.15.2/CHANGELOG.0.15.2.md | 51 +
.../markdown/release/0.15.2/CHANGES.0.15.2.md | 51 -
.../markdown/release/0.15.3/CHANGELOG.0.15.3.md | 35 +
.../markdown/release/0.15.3/CHANGES.0.15.3.md | 35 -
.../markdown/release/0.15.4/CHANGELOG.0.15.4.md | 31 +
.../markdown/release/0.15.4/CHANGES.0.15.4.md | 31 -
.../markdown/release/0.16.0/CHANGELOG.0.16.0.md | 225 ++
.../markdown/release/0.16.0/CHANGES.0.16.0.md | 225 --
.../markdown/release/0.16.1/CHANGELOG.0.16.1.md | 94 +
.../markdown/release/0.16.1/CHANGES.0.16.1.md | 94 -
.../markdown/release/0.16.2/CHANGELOG.0.16.2.md | 59 +
.../markdown/release/0.16.2/CHANGES.0.16.2.md | 59 -
.../markdown/release/0.16.3/CHANGELOG.0.16.3.md | 37 +
.../markdown/release/0.16.3/CHANGES.0.16.3.md | 37 -
.../markdown/release/0.16.4/CHANGELOG.0.16.4.md | 34 +
.../markdown/release/0.16.4/CHANGES.0.16.4.md | 34 -
.../markdown/release/0.17.0/CHANGELOG.0.17.0.md | 259 ++
.../markdown/release/0.17.0/CHANGES.0.17.0.md | 259 --
.../markdown/release/0.17.1/CHANGELOG.0.17.1.md | 44 +
.../markdown/release/0.17.1/CHANGES.0.17.1.md | 44 -
.../markdown/release/0.17.2/CHANGELOG.0.17.2.md | 43 +
.../markdown/release/0.17.2/CHANGES.0.17.2.md | 43 -
.../markdown/release/0.17.3/CHANGELOG.0.17.3.md | 42 +
.../markdown/release/0.17.3/CHANGES.0.17.3.md | 42 -
.../markdown/release/0.18.0/CHANGELOG.0.18.0.md | 331 ++
.../markdown/release/0.18.0/CHANGES.0.18.0.md | 331 --
.../markdown/release/0.18.1/CHANGELOG.0.18.1.md | 46 +
.../markdown/release/0.18.1/CHANGES.0.18.1.md | 46 -
.../markdown/release/0.18.2/CHANGELOG.0.18.2.md | 70 +
.../markdown/release/0.18.2/CHANGES.0.18.2.md | 70 -
.../markdown/release/0.18.3/CHANGELOG.0.18.3.md | 103 +
.../markdown/release/0.18.3/CHANGES.0.18.3.md | 103 -
.../markdown/release/0.18.4/CHANGELOG.0.18.4.md | 45 +
.../markdown/release/0.18.4/CHANGES.0.18.4.md | 45 -
.../markdown/release/0.19.0/CHANGELOG.0.19.0.md | 422 ++
.../markdown/release/0.19.0/CHANGES.0.19.0.md | 422 --
.../markdown/release/0.19.1/CHANGELOG.0.19.1.md | 94 +
.../markdown/release/0.19.1/CHANGES.0.19.1.md | 94 -
.../markdown/release/0.19.2/CHANGELOG.0.19.2.md | 80 +
.../markdown/release/0.19.2/CHANGES.0.19.2.md | 80 -
.../markdown/release/0.2.0/CHANGELOG.0.2.0.md | 103 +
.../markdown/release/0.2.0/CHANGES.0.2.0.md | 103 -
.../markdown/release/0.2.1/CHANGELOG.0.2.1.md | 33 +
.../markdown/release/0.2.1/CHANGES.0.2.1.md | 33 -
.../markdown/release/0.20.0/CHANGELOG.0.20.0.md | 341 ++
.../markdown/release/0.20.0/CHANGES.0.20.0.md | 341 --
.../markdown/release/0.20.1/CHANGELOG.0.20.1.md | 127 +
.../markdown/release/0.20.1/CHANGES.0.20.1.md | 127 -
.../markdown/release/0.20.2/CHANGELOG.0.20.2.md | 91 +
.../markdown/release/0.20.2/CHANGES.0.20.2.md | 91 -
.../release/0.20.203.0/CHANGELOG.0.20.203.0.md | 73 +
.../release/0.20.203.0/CHANGES.0.20.203.0.md | 73 -
.../release/0.20.203.1/CHANGELOG.0.20.203.1.md | 32 +
.../release/0.20.203.1/CHANGES.0.20.203.1.md | 32 -
.../release/0.20.204.0/CHANGELOG.0.20.204.0.md | 121 +
.../release/0.20.204.0/CHANGES.0.20.204.0.md | 121 -
.../release/0.20.205.0/CHANGELOG.0.20.205.0.md | 191 +
.../release/0.20.205.0/CHANGES.0.20.205.0.md | 191 -
.../markdown/release/0.20.3/CHANGELOG.0.20.3.md | 91 +
.../markdown/release/0.20.3/CHANGES.0.20.3.md | 91 -
.../markdown/release/0.21.0/CHANGELOG.0.21.0.md | 1360 +++++++
.../markdown/release/0.21.0/CHANGES.0.21.0.md | 1360 -------
.../markdown/release/0.21.1/CHANGELOG.0.21.1.md | 123 +
.../markdown/release/0.21.1/CHANGES.0.21.1.md | 123 -
.../markdown/release/0.22.0/CHANGELOG.0.22.0.md | 762 ++++
.../markdown/release/0.22.0/CHANGES.0.22.0.md | 762 ----
.../markdown/release/0.22.1/CHANGELOG.0.22.1.md | 107 +
.../markdown/release/0.22.1/CHANGES.0.22.1.md | 107 -
.../markdown/release/0.23.0/CHANGELOG.0.23.0.md | 1182 ++++++
.../markdown/release/0.23.0/CHANGES.0.23.0.md | 1182 ------
.../markdown/release/0.23.1/CHANGELOG.0.23.1.md | 484 +++
.../markdown/release/0.23.1/CHANGES.0.23.1.md | 484 ---
.../release/0.23.10/CHANGELOG.0.23.10.md | 121 +
.../markdown/release/0.23.10/CHANGES.0.23.10.md | 120 -
.../release/0.23.11/CHANGELOG.0.23.11.md | 83 +
.../markdown/release/0.23.11/CHANGES.0.23.11.md | 83 -
.../markdown/release/0.23.2/CHANGELOG.0.23.2.md | 177 +
.../markdown/release/0.23.2/CHANGES.0.23.2.md | 177 -
.../markdown/release/0.23.3/CHANGELOG.0.23.3.md | 334 ++
.../markdown/release/0.23.3/CHANGES.0.23.3.md | 334 --
.../markdown/release/0.23.4/CHANGELOG.0.23.4.md | 74 +
.../markdown/release/0.23.4/CHANGES.0.23.4.md | 74 -
.../markdown/release/0.23.5/CHANGELOG.0.23.5.md | 152 +
.../markdown/release/0.23.5/CHANGES.0.23.5.md | 152 -
.../markdown/release/0.23.6/CHANGELOG.0.23.6.md | 127 +
.../markdown/release/0.23.6/CHANGES.0.23.6.md | 127 -
.../markdown/release/0.23.7/CHANGELOG.0.23.7.md | 189 +
.../markdown/release/0.23.7/CHANGES.0.23.7.md | 189 -
.../markdown/release/0.23.8/CHANGELOG.0.23.8.md | 67 +
.../markdown/release/0.23.8/CHANGES.0.23.8.md | 67 -
.../markdown/release/0.23.9/CHANGELOG.0.23.9.md | 66 +
.../markdown/release/0.23.9/CHANGES.0.23.9.md | 66 -
.../markdown/release/0.24.0/CHANGELOG.0.24.0.md | 89 +
.../markdown/release/0.24.0/CHANGES.0.24.0.md | 89 -
.../markdown/release/0.3.0/CHANGELOG.0.3.0.md | 86 +
.../markdown/release/0.3.0/CHANGES.0.3.0.md | 86 -
.../markdown/release/0.3.1/CHANGELOG.0.3.1.md | 35 +
.../markdown/release/0.3.1/CHANGES.0.3.1.md | 35 -
.../markdown/release/0.3.2/CHANGELOG.0.3.2.md | 55 +
.../markdown/release/0.3.2/CHANGES.0.3.2.md | 55 -
.../markdown/release/0.4.0/CHANGELOG.0.4.0.md | 66 +
.../markdown/release/0.4.0/CHANGES.0.4.0.md | 66 -
.../markdown/release/0.5.0/CHANGELOG.0.5.0.md | 112 +
.../markdown/release/0.5.0/CHANGES.0.5.0.md | 112 -
.../markdown/release/0.6.0/CHANGELOG.0.6.0.md | 89 +
.../markdown/release/0.6.0/CHANGES.0.6.0.md | 89 -
.../markdown/release/0.6.1/CHANGELOG.0.6.1.md | 35 +
.../markdown/release/0.6.1/CHANGES.0.6.1.md | 35 -
.../markdown/release/0.6.2/CHANGELOG.0.6.2.md | 39 +
.../markdown/release/0.6.2/CHANGES.0.6.2.md | 39 -
.../markdown/release/0.7.0/CHANGELOG.0.7.0.md | 87 +
.../markdown/release/0.7.0/CHANGES.0.7.0.md | 87 -
.../markdown/release/0.7.1/CHANGELOG.0.7.1.md | 41 +
.../markdown/release/0.7.1/CHANGES.0.7.1.md | 41 -
.../markdown/release/0.7.2/CHANGELOG.0.7.2.md | 39 +
.../markdown/release/0.7.2/CHANGES.0.7.2.md | 39 -
.../markdown/release/0.8.0/CHANGELOG.0.8.0.md | 82 +
.../markdown/release/0.8.0/CHANGES.0.8.0.md | 82 -
.../markdown/release/0.9.0/CHANGELOG.0.9.0.md | 99 +
.../markdown/release/0.9.0/CHANGES.0.9.0.md | 99 -
.../markdown/release/0.9.1/CHANGELOG.0.9.1.md | 32 +
.../markdown/release/0.9.1/CHANGES.0.9.1.md | 32 -
.../markdown/release/0.9.2/CHANGELOG.0.9.2.md | 33 +
.../markdown/release/0.9.2/CHANGES.0.9.2.md | 33 -
.../markdown/release/1.0.0/CHANGELOG.1.0.0.md | 117 +
.../markdown/release/1.0.0/CHANGES.1.0.0.md | 117 -
.../markdown/release/1.0.1/CHANGELOG.1.0.1.md | 54 +
.../markdown/release/1.0.1/CHANGES.1.0.1.md | 54 -
.../markdown/release/1.0.2/CHANGELOG.1.0.2.md | 59 +
.../markdown/release/1.0.2/CHANGES.1.0.2.md | 59 -
.../markdown/release/1.0.3/CHANGELOG.1.0.3.md | 68 +
.../markdown/release/1.0.3/CHANGES.1.0.3.md | 68 -
.../markdown/release/1.0.4/CHANGELOG.1.0.4.md | 39 +
.../markdown/release/1.0.4/CHANGES.1.0.4.md | 39 -
.../markdown/release/1.1.0/CHANGELOG.1.1.0.md | 199 +
.../markdown/release/1.1.0/CHANGES.1.1.0.md | 199 -
.../markdown/release/1.1.1/CHANGELOG.1.1.1.md | 68 +
.../markdown/release/1.1.1/CHANGES.1.1.1.md | 68 -
.../markdown/release/1.1.2/CHANGELOG.1.1.2.md | 73 +
.../markdown/release/1.1.2/CHANGES.1.1.2.md | 73 -
.../markdown/release/1.1.3/CHANGELOG.1.1.3.md | 31 +
.../markdown/release/1.1.3/CHANGES.1.1.3.md | 31 -
.../markdown/release/1.2.0/CHANGELOG.1.2.0.md | 274 ++
.../markdown/release/1.2.0/CHANGES.1.2.0.md | 274 --
.../markdown/release/1.2.1/CHANGELOG.1.2.1.md | 62 +
.../markdown/release/1.2.1/CHANGES.1.2.1.md | 62 -
.../markdown/release/1.2.2/CHANGELOG.1.2.2.md | 32 +
.../markdown/release/1.2.2/CHANGES.1.2.2.md | 32 -
.../markdown/release/1.3.0/CHANGELOG.1.3.0.md | 139 +
.../markdown/release/1.3.0/CHANGES.1.3.0.md | 139 -
.../2.0.0-alpha/CHANGELOG.2.0.0-alpha.md | 398 ++
.../release/2.0.0-alpha/CHANGES.2.0.0-alpha.md | 398 --
.../2.0.1-alpha/CHANGELOG.2.0.1-alpha.md | 36 +
.../release/2.0.1-alpha/CHANGES.2.0.1-alpha.md | 36 -
.../2.0.2-alpha/CHANGELOG.2.0.2-alpha.md | 714 ++++
.../release/2.0.2-alpha/CHANGES.2.0.2-alpha.md | 714 ----
.../2.0.3-alpha/CHANGELOG.2.0.3-alpha.md | 588 +++
.../release/2.0.3-alpha/CHANGES.2.0.3-alpha.md | 588 ---
.../2.0.4-alpha/CHANGELOG.2.0.4-alpha.md | 76 +
.../release/2.0.4-alpha/CHANGES.2.0.4-alpha.md | 76 -
.../2.0.5-alpha/CHANGELOG.2.0.5-alpha.md | 35 +
.../release/2.0.5-alpha/CHANGES.2.0.5-alpha.md | 35 -
.../2.0.6-alpha/CHANGELOG.2.0.6-alpha.md | 31 +
.../release/2.0.6-alpha/CHANGES.2.0.6-alpha.md | 31 -
.../release/2.1.0-beta/CHANGELOG.2.1.0-beta.md | 911 +++++
.../release/2.1.0-beta/CHANGES.2.1.0-beta.md | 911 -----
.../release/2.1.1-beta/CHANGELOG.2.1.1-beta.md | 233 ++
.../release/2.1.1-beta/CHANGES.2.1.1-beta.md | 233 --
.../markdown/release/2.10.0/CHANGELOG.2.10.0.md | 484 +++
.../release/2.10.0/RELEASENOTES.2.10.0.md | 115 +
.../markdown/release/2.2.0/CHANGELOG.2.2.0.md | 140 +
.../markdown/release/2.2.0/CHANGES.2.2.0.md | 140 -
.../markdown/release/2.2.1/CHANGELOG.2.2.1.md | 38 +
.../markdown/release/2.2.1/CHANGES.2.2.1.md | 38 -
.../markdown/release/2.3.0/CHANGELOG.2.3.0.md | 665 ++++
.../markdown/release/2.3.0/CHANGES.2.3.0.md | 665 ----
.../markdown/release/2.4.0/CHANGELOG.2.4.0.md | 487 +++
.../markdown/release/2.4.0/CHANGES.2.4.0.md | 487 ---
.../markdown/release/2.4.1/CHANGELOG.2.4.1.md | 136 +
.../markdown/release/2.4.1/CHANGES.2.4.1.md | 136 -
.../markdown/release/2.5.0/CHANGELOG.2.5.0.md | 551 +++
.../markdown/release/2.5.0/CHANGES.2.5.0.md | 551 ---
.../markdown/release/2.5.1/CHANGELOG.2.5.1.md | 35 +
.../markdown/release/2.5.1/CHANGES.2.5.1.md | 35 -
.../markdown/release/2.5.2/CHANGELOG.2.5.2.md | 35 +
.../markdown/release/2.5.2/CHANGES.2.5.2.md | 35 -
.../markdown/release/2.6.0/CHANGELOG.2.6.0.md | 964 +++++
.../markdown/release/2.6.0/CHANGES.2.6.0.md | 964 -----
.../markdown/release/2.6.1/CHANGELOG.2.6.1.md | 202 +
.../markdown/release/2.6.1/CHANGES.2.6.1.md | 202 -
.../markdown/release/2.6.2/CHANGELOG.2.6.2.md | 61 +
.../markdown/release/2.6.2/CHANGES.2.6.2.md | 61 -
.../markdown/release/2.6.3/CHANGELOG.2.6.3.md | 77 +
.../markdown/release/2.6.3/CHANGES.2.6.3.md | 77 -
.../markdown/release/2.6.4/CHANGELOG.2.6.4.md | 98 +
.../markdown/release/2.6.4/CHANGES.2.6.4.md | 98 -
.../markdown/release/2.6.5/CHANGELOG.2.6.5.md | 132 +
.../markdown/release/2.6.5/CHANGES.2.6.5.md | 132 -
.../markdown/release/2.6.6/CHANGELOG.2.6.6.md | 57 +
.../markdown/release/2.6.6/CHANGES.2.6.6.md | 47 -
.../markdown/release/2.7.0/CHANGELOG.2.7.0.md | 988 +++++
.../markdown/release/2.7.0/CHANGES.2.7.0.md | 988 -----
.../release/2.7.0/RELEASENOTES.2.7.0.md | 7 +
.../markdown/release/2.7.1/CHANGELOG.2.7.1.md | 175 +
.../markdown/release/2.7.1/CHANGES.2.7.1.md | 182 -
.../release/2.7.1/RELEASENOTES.2.7.1.md | 7 -
.../markdown/release/2.7.2/CHANGELOG.2.7.2.md | 208 +
.../markdown/release/2.7.2/CHANGES.2.7.2.md | 208 -
.../markdown/release/2.7.3/CHANGELOG.2.7.3.md | 284 ++
.../markdown/release/2.7.3/CHANGES.2.7.3.md | 284 --
.../markdown/release/2.7.4/CHANGELOG.2.7.4.md | 326 ++
.../markdown/release/2.7.4/CHANGES.2.7.4.md | 326 --
.../release/2.7.4/RELEASENOTES.2.7.4.md | 14 +
.../markdown/release/2.7.5/CHANGELOG.2.7.5.md | 93 +
.../markdown/release/2.7.5/CHANGES.2.7.5.md | 32 -
.../release/2.7.5/RELEASENOTES.2.7.5.md | 12 +
.../markdown/release/2.7.6/CHANGELOG.2.7.6.md | 92 +
.../release/2.7.6/RELEASENOTES.2.7.6.md | 42 +
.../markdown/release/2.7.7/CHANGELOG.2.7.7.md | 47 +
.../release/2.7.7/RELEASENOTES.2.7.7.md | 21 +
.../markdown/release/2.7.8/CHANGELOG.2.7.8.md | 31 +
.../release/2.7.8/RELEASENOTES.2.7.8.md | 21 +
.../markdown/release/2.8.0/CHANGELOG.2.8.0.md | 2999 ++++++++++++++
.../markdown/release/2.8.0/CHANGES.2.8.0.md | 2993 --------------
.../release/2.8.0/RELEASENOTES.2.8.0.md | 7 +
.../markdown/release/2.8.1/CHANGELOG.2.8.1.md | 31 +
.../markdown/release/2.8.1/CHANGES.2.8.1.md | 31 -
.../markdown/release/2.8.2/CHANGELOG.2.8.2.md | 374 ++
.../markdown/release/2.8.2/CHANGES.2.8.2.md | 385 --
.../release/2.8.2/RELEASENOTES.2.8.2.md | 9 +-
.../markdown/release/2.8.3/CHANGELOG.2.8.3.md | 129 +
.../markdown/release/2.8.3/CHANGES.2.8.3.md | 69 -
.../release/2.8.3/RELEASENOTES.2.8.3.md | 17 +
.../markdown/release/2.8.4/CHANGELOG.2.8.4.md | 123 +
.../release/2.8.4/RELEASENOTES.2.8.4.md | 21 +
.../markdown/release/2.8.5/CHANGELOG.2.8.5.md | 70 +
.../release/2.8.5/RELEASENOTES.2.8.5.md | 21 +
.../markdown/release/2.9.0/CHANGELOG.2.9.0.md | 2013 ++++++++++
.../markdown/release/2.9.0/CHANGES.2.9.0.md | 1453 -------
.../release/2.9.0/RELEASENOTES.2.9.0.md | 189 +-
.../markdown/release/2.9.1/CHANGELOG.2.9.1.md | 279 ++
.../markdown/release/2.9.1/CHANGES.2.9.1.md | 277 --
.../release/2.9.1/RELEASENOTES.2.9.1.md | 2 +-
.../markdown/release/2.9.2/CHANGELOG.2.9.2.md | 202 +
.../release/2.9.2/RELEASENOTES.2.9.2.md | 21 +
.../3.0.0-alpha1/CHANGELOG.3.0.0-alpha1.md | 3751 ++++++++++++++++++
.../3.0.0-alpha1/CHANGES.3.0.0-alpha1.md | 3750 -----------------
.../3.0.0-alpha1/RELEASENOTES.3.0.0-alpha1.md | 14 +
.../3.0.0-alpha2/CHANGELOG.3.0.0-alpha2.md | 922 +++++
.../3.0.0-alpha2/CHANGES.3.0.0-alpha2.md | 919 -----
.../3.0.0-alpha2/RELEASENOTES.3.0.0-alpha2.md | 17 +-
.../3.0.0-alpha3/CHANGELOG.3.0.0-alpha3.md | 46 +
.../3.0.0-alpha3/CHANGES.3.0.0-alpha3.md | 46 -
.../3.0.0-alpha4/CHANGELOG.3.0.0-alpha4.md | 878 ++++
.../3.0.0-alpha4/CHANGES.3.0.0-alpha4.md | 887 -----
.../3.0.0-alpha4/RELEASENOTES.3.0.0-alpha4.md | 12 +-
.../3.0.0-beta1/CHANGELOG.3.0.0-beta1.md | 649 +++
.../release/3.0.0-beta1/CHANGES.3.0.0-beta1.md | 646 ---
.../3.0.0-beta1/RELEASENOTES.3.0.0-beta1.md | 13 +
.../markdown/release/3.0.0/CHANGELOG.3.0.0.md | 363 ++
.../markdown/release/3.0.0/CHANGES.3.0.0.md | 360 --
.../release/3.0.0/RELEASENOTES.3.0.0.md | 7 -
.../markdown/release/3.0.1/CHANGELOG.3.0.1.md | 231 ++
.../markdown/release/3.0.1/CHANGES.3.0.1.md | 241 --
.../markdown/release/3.0.2/CHANGELOG.3.0.2.md | 31 +
.../markdown/release/3.0.2/CHANGES.3.0.2.md | 31 -
.../markdown/release/3.0.3/CHANGELOG.3.0.3.md | 312 ++
.../markdown/release/3.0.3/CHANGES.3.0.3.md | 309 --
.../release/3.0.3/RELEASENOTES.3.0.3.md | 7 +
.../markdown/release/3.0.4/CHANGELOG.3.0.4.md | 189 +
.../release/3.0.4/RELEASENOTES.3.0.4.md | 50 +
.../markdown/release/3.1.0/CHANGELOG.3.1.0.md | 1042 +++++
.../markdown/release/3.1.0/CHANGES.3.1.0.md | 1022 -----
.../release/3.1.0/RELEASENOTES.3.1.0.md | 7 +
.../markdown/release/3.1.1/CHANGELOG.3.1.1.md | 502 +++
.../markdown/release/3.1.1/CHANGES.3.1.1.md | 498 ---
.../release/3.1.1/RELEASENOTES.3.1.1.md | 539 +--
.../markdown/release/3.1.2/CHANGELOG.3.1.2.md | 158 +
.../release/3.1.2/RELEASENOTES.3.1.2.md | 28 +
.../markdown/release/3.2.0/CHANGELOG.3.2.0.md | 881 ++++
.../release/3.2.0/RELEASENOTES.3.2.0.md | 134 +
.../java/org/apache/hadoop/fs/TestTrash.java | 6 +
.../hadoop/log/TestLogThrottlingHelper.java | 172 +
.../org/apache/hadoop/util/TestStringUtils.java | 9 +
.../util/curator/TestZKCuratorManager.java | 23 +
hadoop-dist/src/main/compose/ozone-hdfs/.env | 17 +
.../main/compose/ozone-hdfs/docker-compose.yaml | 60 +
.../src/main/compose/ozone-hdfs/docker-config | 76 +
.../src/main/compose/ozone/docker-config | 5 +
.../main/compose/ozonefs/docker-compose.yaml | 59 +
.../src/main/compose/ozonefs/docker-config | 35 +
.../src/main/compose/ozoneperf/docker-config | 4 +
.../compose/ozonescripts/.ssh/authorized_keys | 16 +
.../src/main/compose/ozonescripts/.ssh/config | 18 +
.../main/compose/ozonescripts/.ssh/environment | 16 +
.../src/main/compose/ozonescripts/.ssh/id_rsa | 42 +
.../main/compose/ozonescripts/.ssh/id_rsa.pub | 16 +
.../src/main/compose/ozonescripts/Dockerfile | 33 +
.../src/main/compose/ozonescripts/README.md | 38 +
.../compose/ozonescripts/docker-compose.yaml | 42 +
.../src/main/compose/ozonescripts/docker-config | 38 +
hadoop-dist/src/main/compose/ozonescripts/ps.sh | 17 +
.../src/main/compose/ozonescripts/start.sh | 24 +
.../src/main/compose/ozonescripts/stop.sh | 17 +
hadoop-dist/src/main/ozone/README.txt | 51 +
hadoop-dist/src/main/smoketest/README.md | 30 +
.../src/main/smoketest/basic/basic.robot | 47 +
.../src/main/smoketest/basic/ozone-shell.robot | 82 +
hadoop-dist/src/main/smoketest/commonlib.robot | 24 +
.../src/main/smoketest/ozonefs/ozonefs.robot | 35 +
hadoop-dist/src/main/smoketest/test.sh | 101 +
hadoop-hdds/client/pom.xml | 5 +-
.../apache/hadoop/hdds/scm/XceiverClient.java | 209 -
.../hadoop/hdds/scm/XceiverClientHandler.java | 202 -
.../hdds/scm/XceiverClientInitializer.java | 74 -
.../hadoop/hdds/scm/XceiverClientRatis.java | 101 +-
.../hadoop/hdds/scm/client/HddsClientUtils.java | 4 +-
.../hdds/scm/storage/ChunkInputStream.java | 30 +-
.../hdds/scm/storage/ChunkOutputStream.java | 13 +-
hadoop-hdds/common/pom.xml | 11 +-
.../common/src/main/conf/log4j.properties | 157 +
.../org/apache/hadoop/hdds/HddsConfigKeys.java | 19 +
.../java/org/apache/hadoop/hdds/HddsUtils.java | 8 +-
.../org/apache/hadoop/hdds/cli/GenericCli.java | 26 +-
.../hadoop/hdds/cli/GenericParentCommand.java | 25 +
.../hdds/cli/MissingSubcommandException.java | 35 +
.../apache/hadoop/hdds/scm/ScmConfigKeys.java | 19 +
.../hadoop/hdds/scm/container/ContainerID.java | 26 +-
.../container/common/helpers/ContainerInfo.java | 11 +-
.../scm/container/common/helpers/Pipeline.java | 46 +-
.../container/common/helpers/PipelineID.java | 13 +-
.../scm/storage/ContainerProtocolCalls.java | 62 +-
.../apache/hadoop/ozone/OzoneConfigKeys.java | 20 +
.../org/apache/hadoop/ozone/OzoneConsts.java | 2 +
.../container/common/helpers/BlockData.java | 255 ++
.../ozone/container/common/helpers/KeyData.java | 253 --
.../apache/hadoop/ozone/lock/ActiveLock.java | 101 +
.../apache/hadoop/ozone/lock/LockManager.java | 101 +
.../hadoop/ozone/lock/PooledLockFactory.java | 43 +
.../apache/hadoop/ozone/lock/package-info.java | 21 +
.../hadoop/utils/db/DBConfigFromFile.java | 34 +-
.../main/java/org/apache/ratis/RatisHelper.java | 75 +-
.../main/proto/DatanodeContainerProtocol.proto | 74 +-
.../main/proto/ScmBlockLocationProtocol.proto | 6 +-
hadoop-hdds/common/src/main/proto/hdds.proto | 14 +
.../common/src/main/resources/ozone-default.xml | 63 +
.../hadoop/ozone/lock/TestLockManager.java | 64 +
.../apache/hadoop/ozone/lock/package-info.java | 21 +
hadoop-hdds/container-service/pom.xml | 6 +-
.../apache/hadoop/hdds/scm/HddsServerUtil.java | 21 +
.../common/impl/OpenContainerBlockMap.java | 46 +-
.../common/report/PipelineReportPublisher.java | 73 +
.../common/report/ReportPublisherFactory.java | 4 +
.../statemachine/DatanodeStateMachine.java | 19 +-
.../statemachine/SCMConnectionManager.java | 7 +-
.../common/statemachine/StateContext.java | 44 +-
.../CloseContainerCommandHandler.java | 17 +-
.../DeleteBlocksCommandHandler.java | 4 +-
.../ReplicateContainerCommandHandler.java | 129 +-
.../states/datanode/InitDatanodeState.java | 22 +-
.../states/endpoint/RegisterEndpointTask.java | 8 +-
.../common/transport/server/XceiverServer.java | 140 -
.../transport/server/XceiverServerGrpc.java | 28 +-
.../transport/server/XceiverServerHandler.java | 82 -
.../server/XceiverServerInitializer.java | 64 -
.../transport/server/XceiverServerSpi.java | 9 +
.../server/ratis/ContainerStateMachine.java | 42 +-
.../server/ratis/XceiverServerRatis.java | 132 +-
.../keyvalue/KeyValueBlockIterator.java | 16 +-
.../container/keyvalue/KeyValueContainer.java | 5 +-
.../container/keyvalue/KeyValueHandler.java | 124 +-
.../container/keyvalue/helpers/BlockUtils.java | 199 +
.../container/keyvalue/helpers/KeyUtils.java | 199 -
.../keyvalue/helpers/KeyValueContainerUtil.java | 12 +-
.../keyvalue/helpers/SmallFileUtils.java | 2 +-
.../keyvalue/impl/BlockManagerImpl.java | 229 ++
.../container/keyvalue/impl/KeyManagerImpl.java | 227 --
.../container/keyvalue/impl/package-info.java | 5 +-
.../keyvalue/interfaces/BlockManager.java | 84 +
.../keyvalue/interfaces/KeyManager.java | 84 -
.../keyvalue/interfaces/package-info.java | 21 +
.../background/BlockDeletingService.java | 10 +-
.../container/ozoneimpl/OzoneContainer.java | 126 +-
.../replication/ContainerReplicator.java | 27 +
.../DownloadAndImportReplicator.java | 136 +
.../replication/GrpcReplicationClient.java | 2 +-
.../replication/ReplicationSupervisor.java | 142 +
.../container/replication/ReplicationTask.java | 102 +
.../StorageContainerDatanodeProtocol.java | 10 +-
.../protocol/StorageContainerNodeProtocol.java | 6 +-
.../commands/CloseContainerCommand.java | 23 +-
...rDatanodeProtocolClientSideTranslatorPB.java | 6 +-
...rDatanodeProtocolServerSideTranslatorPB.java | 5 +-
.../StorageContainerDatanodeProtocol.proto | 10 +
.../ozone/container/common/ScmTestMock.java | 8 +-
.../common/TestDatanodeStateMachine.java | 42 +
.../TestReplicateContainerCommandHandler.java | 146 -
.../keyvalue/TestBlockManagerImpl.java | 211 +
.../keyvalue/TestChunkManagerImpl.java | 2 +-
.../container/keyvalue/TestKeyManagerImpl.java | 191 -
.../keyvalue/TestKeyValueBlockIterator.java | 30 +-
.../keyvalue/TestKeyValueContainer.java | 26 +-
.../container/keyvalue/TestKeyValueHandler.java | 38 +-
.../replication/TestReplicationSupervisor.java | 143 +
.../container/replication/package-info.java | 22 +
hadoop-hdds/framework/pom.xml | 5 +-
.../hadoop/hdds/server/events/EventQueue.java | 18 +
hadoop-hdds/pom.xml | 12 +-
hadoop-hdds/server-scm/pom.xml | 8 +-
.../org/apache/hadoop/hdds/scm/ScmUtils.java | 45 +
.../hadoop/hdds/scm/block/BlockManagerImpl.java | 339 +-
.../container/CloseContainerEventHandler.java | 90 +-
.../hdds/scm/container/ContainerMapping.java | 54 +-
.../scm/container/ContainerReportHandler.java | 55 +-
.../scm/container/ContainerStateManager.java | 67 +-
.../hadoop/hdds/scm/container/Mapping.java | 15 +-
.../replication/ReplicationActivityStatus.java | 55 +-
.../replication/ReplicationManager.java | 18 +-
.../scm/container/states/ContainerQueryKey.java | 110 +
.../scm/container/states/ContainerStateMap.java | 235 +-
.../hadoop/hdds/scm/events/SCMEvents.java | 27 +-
.../hdds/scm/exceptions/SCMException.java | 3 +-
.../hadoop/hdds/scm/node/DeadNodeHandler.java | 17 +
.../hadoop/hdds/scm/node/SCMNodeManager.java | 5 +-
.../hadoop/hdds/scm/node/StaleNodeHandler.java | 21 +-
.../hdds/scm/node/states/Node2ContainerMap.java | 123 +-
.../hdds/scm/node/states/Node2ObjectsMap.java | 162 +
.../hdds/scm/node/states/ReportResult.java | 105 +-
.../hdds/scm/pipelines/Node2PipelineMap.java | 45 +-
.../pipelines/PipelineActionEventHandler.java | 2 +
.../scm/pipelines/PipelineCloseHandler.java | 24 +-
.../hdds/scm/pipelines/PipelineManager.java | 180 +-
.../scm/pipelines/PipelineReportHandler.java | 59 +
.../hdds/scm/pipelines/PipelineSelector.java | 340 +-
.../scm/pipelines/PipelineStateManager.java | 136 +
.../scm/pipelines/ratis/RatisManagerImpl.java | 57 +-
.../standalone/StandaloneManagerImpl.java | 52 +-
.../hdds/scm/server/ChillModePrecheck.java | 56 +
.../apache/hadoop/hdds/scm/server/Precheck.java | 29 +
.../hdds/scm/server/SCMChillModeManager.java | 245 ++
.../scm/server/SCMClientProtocolServer.java | 53 +-
.../server/SCMDatanodeHeartbeatDispatcher.java | 23 +
.../scm/server/SCMDatanodeProtocolServer.java | 33 +-
.../scm/server/StorageContainerManager.java | 137 +-
.../apache/hadoop/hdds/scm/HddsTestUtils.java | 85 +
.../org/apache/hadoop/hdds/scm/TestUtils.java | 55 +-
.../hadoop/hdds/scm/block/TestBlockManager.java | 89 +-
.../hdds/scm/container/MockNodeManager.java | 4 +-
.../scm/container/TestContainerMapping.java | 54 +-
.../container/TestContainerReportHandler.java | 23 +-
.../container/TestContainerStateManager.java | 98 +
.../TestReplicationActivityStatus.java | 63 +
.../replication/TestReplicationManager.java | 104 +-
.../hdds/scm/node/TestDeadNodeHandler.java | 99 +-
.../hadoop/hdds/scm/node/TestNodeManager.java | 6 +-
.../scm/node/states/TestNode2ContainerMap.java | 35 +-
.../scm/server/TestSCMChillModeManager.java | 125 +
.../scm/server/TestSCMClientProtocolServer.java | 60 +
.../ozone/container/common/TestEndPoint.java | 5 +-
.../testutils/ReplicationNodeManagerMock.java | 5 +-
hadoop-hdds/tools/pom.xml | 4 +-
.../java/org/apache/hadoop/hdfs/DFSClient.java | 7 +-
.../hdfs/protocol/NoECPolicySetException.java | 37 +
.../apache/hadoop/hdfs/web/JsonUtilClient.java | 4 +
.../hadoop/fs/http/client/HttpFSFileSystem.java | 2 +-
.../hadoop/fs/http/server/FSOperations.java | 3 +
.../fs/http/client/BaseTestHttpFSWith.java | 35 +-
.../federation/router/RouterAdminServer.java | 23 +
.../src/site/markdown/HDFSRouterFederation.md | 26 +
.../store/driver/TestStateStoreZK.java | 53 +
.../org/apache/hadoop/hdfs/DFSConfigKeys.java | 3 +
.../qjournal/server/JournaledEditsCache.java | 2 +-
.../blockmanagement/BlockManagerSafeMode.java | 20 +-
.../BlockPlacementPolicyDefault.java | 29 +-
.../apache/hadoop/hdfs/server/common/Util.java | 3 +-
.../hdfs/server/datanode/BlockSender.java | 7 +-
.../hdfs/server/namenode/CacheManager.java | 42 +-
.../server/namenode/FSDirErasureCodingOp.java | 4 +
.../hdfs/server/namenode/FSEditLogAsync.java | 61 +-
.../hdfs/server/namenode/FSNamesystemLock.java | 46 +-
.../hdfs/server/namenode/ha/EditLogTailer.java | 34 +-
.../snapshot/DirectorySnapshottableFeature.java | 2 +-
.../org/apache/hadoop/hdfs/tools/ECAdmin.java | 7 +
.../PBImageDelimitedTextWriter.java | 18 +-
.../src/main/resources/hdfs-default.xml | 17 +-
.../src/main/webapps/datanode/datanode.html | 3 +
.../src/main/webapps/static/dfs-dust.js | 4 +-
.../site/markdown/CentralizedCacheManagement.md | 5 +
.../src/site/markdown/HDFSDiskbalancer.md | 5 +-
.../apache/hadoop/hdfs/MiniDFSNNTopology.java | 17 +
.../TestDFSStripedOutputStreamWithFailure.java | 41 +-
.../TestUnsetAndChangeDirectoryEcPolicy.java | 23 +-
.../TestBlockManagerSafeMode.java | 81 +-
.../blockmanagement/TestReplicationPolicy.java | 28 +
.../TestDataNodeMultipleRegistrations.java | 4 +-
.../server/namenode/TestCacheDirectives.java | 49 +
.../hdfs/server/namenode/TestEditLogRace.java | 158 +-
.../server/namenode/ha/TestEditLogTailer.java | 98 +-
.../snapshot/TestSnapshotDiffReport.java | 6 +
.../TestOfflineImageViewer.java | 17 +
.../org/apache/hadoop/hdfs/web/TestWebHDFS.java | 18 +
.../test/resources/testErasureCodingConf.xml | 24 +
.../v2/app/speculate/DefaultSpeculator.java | 4 +-
.../mapreduce/v2/app/webapp/AMWebServices.java | 9 +-
.../v2/app/webapp/JAXBContextResolver.java | 7 +-
.../mapreduce/v2/app/webapp/TaskPage.java | 3 +-
.../v2/app/webapp/dao/MapTaskAttemptInfo.java | 39 +
.../app/webapp/dao/ReduceTaskAttemptInfo.java | 11 +-
.../v2/app/webapp/dao/TaskAttemptInfo.java | 14 +-
.../v2/app/webapp/dao/TaskAttemptsInfo.java | 18 +-
.../hadoop/mapreduce/v2/app/MRAppBenchmark.java | 20 +-
.../impl/TestTaskAttemptContainerRequest.java | 11 +-
.../v2/app/metrics/TestMRAppMetrics.java | 7 +
.../app/webapp/TestAMWebServicesAttempts.java | 3 +
.../src/site/markdown/MapredAppMasterRest.md | 2 +-
.../mapreduce/v2/hs/webapp/HsTaskPage.java | 3 +-
.../mapreduce/v2/hs/webapp/HsTasksBlock.java | 5 +-
.../mapreduce/v2/hs/webapp/HsWebServices.java | 9 +-
.../v2/hs/webapp/JAXBContextResolver.java | 4 +-
.../hadoop/mapred/ResourceMgrDelegate.java | 22 +
.../hadoop/mapred/TestClientRedirect.java | 25 +
hadoop-ozone/acceptance-test/README.md | 48 -
.../dev-support/bin/robot-all.sh | 18 -
.../dev-support/bin/robot-dnd-all.sh | 57 -
.../acceptance-test/dev-support/bin/robot.sh | 38 -
.../dev-support/docker/Dockerfile | 21 -
.../dev-support/docker/docker-compose.yaml | 23 -
hadoop-ozone/acceptance-test/pom.xml | 59 -
.../src/test/acceptance/basic/.env | 17 -
.../src/test/acceptance/basic/basic.robot | 50 -
.../test/acceptance/basic/docker-compose.yaml | 50 -
.../src/test/acceptance/basic/docker-config | 33 -
.../src/test/acceptance/basic/ozone-shell.robot | 86 -
.../src/test/acceptance/commonlib.robot | 78 -
.../src/test/acceptance/ozonefs/.env | 17 -
.../test/acceptance/ozonefs/docker-compose.yaml | 59 -
.../src/test/acceptance/ozonefs/docker-config | 34 -
.../src/test/acceptance/ozonefs/ozonefs.robot | 39 -
.../acceptance/ozonefs/ozonesinglenode.robot | 49 -
hadoop-ozone/client/pom.xml | 4 +-
.../ozone/client/io/ChunkGroupInputStream.java | 41 +-
.../ozone/client/io/OzoneInputStream.java | 5 +
.../hadoop/ozone/client/rpc/RpcClient.java | 3 +-
hadoop-ozone/common/pom.xml | 4 +-
hadoop-ozone/common/src/main/bin/ozone | 21 +-
.../common/src/main/bin/ozone-config.sh | 51 +
hadoop-ozone/common/src/main/bin/start-ozone.sh | 90 +-
hadoop-ozone/common/src/main/bin/stop-ozone.sh | 22 +-
.../hadoop/ozone/om/helpers/OmBucketInfo.java | 2 +-
.../hadoop/ozone/om/helpers/OmVolumeArgs.java | 2 +-
.../src/main/proto/OzoneManagerProtocol.proto | 2 +-
hadoop-ozone/docs/README.md | 13 +-
hadoop-ozone/docs/archetypes/default.md | 13 +-
hadoop-ozone/docs/config.toml | 23 -
hadoop-ozone/docs/config.yaml | 41 +
hadoop-ozone/docs/content/BucketCommands.md | 122 +
hadoop-ozone/docs/content/BuildingSources.md | 54 +
hadoop-ozone/docs/content/CommandShell.md | 245 +-
hadoop-ozone/docs/content/Concepts.md | 108 +
hadoop-ozone/docs/content/Dozone.md | 110 +
hadoop-ozone/docs/content/Freon.md | 64 +
hadoop-ozone/docs/content/GettingStarted.md | 369 --
hadoop-ozone/docs/content/Hdds.md | 65 +
hadoop-ozone/docs/content/JavaApi.md | 172 +
hadoop-ozone/docs/content/KeyCommands.md | 127 +
hadoop-ozone/docs/content/Metrics.md | 170 -
hadoop-ozone/docs/content/OzoneFS.md | 80 +
hadoop-ozone/docs/content/OzoneManager.md | 77 +
hadoop-ozone/docs/content/RealCluster.md | 74 +
hadoop-ozone/docs/content/Rest.md | 45 +-
hadoop-ozone/docs/content/RunningViaDocker.md | 73 +
hadoop-ozone/docs/content/RunningWithHDFS.md | 77 +
hadoop-ozone/docs/content/SCMCLI.md | 29 +
hadoop-ozone/docs/content/Settings.md | 142 +
hadoop-ozone/docs/content/VolumeCommands.md | 116 +
hadoop-ozone/docs/content/_index.md | 99 +-
hadoop-ozone/docs/pom.xml | 13 +-
hadoop-ozone/docs/static/NOTES.md | 13 +-
.../ozonedoc/layouts/_default/single.html | 16 +-
.../docs/themes/ozonedoc/layouts/index.html | 16 +
.../ozonedoc/layouts/partials/footer.html | 13 +-
.../ozonedoc/layouts/partials/header.html | 17 +-
.../ozonedoc/layouts/partials/navbar.html | 13 +-
.../ozonedoc/layouts/partials/sidebar.html | 21 +-
.../themes/ozonedoc/static/css/ozonedoc.css | 14 +-
hadoop-ozone/integration-test/pom.xml | 9 +-
.../container/TestContainerStateManager.java | 415 --
.../TestContainerStateManagerIntegration.java | 417 ++
.../hdds/scm/pipeline/TestNode2PipelineMap.java | 22 +-
.../hdds/scm/pipeline/TestPipelineClose.java | 15 +-
.../hdds/scm/pipeline/TestSCMRestart.java | 119 +
.../apache/hadoop/ozone/MiniOzoneCluster.java | 28 +-
.../hadoop/ozone/MiniOzoneClusterImpl.java | 101 +-
.../apache/hadoop/ozone/RatisTestHelper.java | 22 +
.../hadoop/ozone/TestMiniOzoneCluster.java | 4 +-
.../ozone/TestStorageContainerManager.java | 224 +-
.../TestStorageContainerManagerHelper.java | 8 +-
.../ozone/client/rest/TestOzoneRestClient.java | 8 +-
.../rpc/TestCloseContainerHandlingByClient.java | 5 +-
.../ozone/client/rpc/TestOzoneRpcClient.java | 8 +-
.../ozone/container/ContainerTestHelper.java | 84 +-
.../container/TestContainerReplication.java | 24 +-
.../common/TestBlockDeletingService.java | 12 +-
.../container/common/helpers/TestBlockData.java | 127 +
.../container/common/helpers/TestKeyData.java | 119 -
.../common/impl/TestCloseContainerHandler.java | 51 +-
.../common/impl/TestContainerPersistence.java | 154 +-
.../commandhandler/TestBlockDeletion.java | 9 +-
.../TestCloseContainerByPipeline.java | 5 +-
.../transport/server/ratis/TestCSMMetrics.java | 16 +-
.../container/metrics/TestContainerMetrics.java | 21 +-
.../container/ozoneimpl/TestOzoneContainer.java | 100 +-
.../container/server/TestContainerServer.java | 67 +-
.../server/TestContainerStateMachine.java | 2 +-
.../hadoop/ozone/freon/TestDataValidate.java | 119 +-
.../apache/hadoop/ozone/freon/TestFreon.java | 129 -
.../ozone/freon/TestRandomKeyGenerator.java | 106 +
...TestGenerateOzoneRequiredConfigurations.java | 131 -
.../hadoop/ozone/om/TestOzoneManager.java | 4 +-
.../hadoop/ozone/om/TestScmChillMode.java | 171 +
.../hadoop/ozone/ozShell/TestOzoneShell.java | 453 ++-
.../hadoop/ozone/scm/TestContainerSQLCli.java | 15 +-
.../ozone/scm/TestContainerSmallFile.java | 4 +-
.../TestGetCommittedBlockLengthAndPutKey.java | 12 +-
.../hadoop/ozone/web/client/TestKeys.java | 45 +-
.../hadoop/ozone/web/client/TestKeysRatis.java | 2 -
.../src/test/resources/log4j.properties | 5 +-
hadoop-ozone/objectstore-service/pom.xml | 56 +-
.../org/apache/hadoop/ozone/TestErrorCode.java | 53 -
.../apache/hadoop/ozone/web/TestErrorCode.java | 53 +
hadoop-ozone/ozone-manager/pom.xml | 6 +-
.../hadoop/ozone/om/BucketManagerImpl.java | 45 +-
.../apache/hadoop/ozone/om/KeyManagerImpl.java | 134 +-
.../hadoop/ozone/om/OMMetadataManager.java | 14 +-
.../hadoop/ozone/om/OmMetadataManagerImpl.java | 35 +-
.../apache/hadoop/ozone/om/OzoneManager.java | 35 +-
.../hadoop/ozone/om/OzoneManagerLock.java | 181 +
.../hadoop/ozone/om/VolumeManagerImpl.java | 41 +-
.../hadoop/ozone/om/exceptions/OMException.java | 3 +-
.../hadoop/ozone/web/ozShell/Handler.java | 49 +-
.../apache/hadoop/ozone/web/ozShell/Shell.java | 408 +-
.../web/ozShell/bucket/BucketCommands.java | 60 +
.../web/ozShell/bucket/CreateBucketHandler.java | 46 +-
.../web/ozShell/bucket/DeleteBucketHandler.java | 45 +-
.../web/ozShell/bucket/InfoBucketHandler.java | 42 +-
.../web/ozShell/bucket/ListBucketHandler.java | 89 +-
.../web/ozShell/bucket/UpdateBucketHandler.java | 66 +-
.../web/ozShell/keys/DeleteKeyHandler.java | 48 +-
.../ozone/web/ozShell/keys/GetKeyHandler.java | 77 +-
.../ozone/web/ozShell/keys/InfoKeyHandler.java | 45 +-
.../ozone/web/ozShell/keys/KeyCommands.java | 60 +
.../ozone/web/ozShell/keys/ListKeyHandler.java | 92 +-
.../ozone/web/ozShell/keys/PutKeyHandler.java | 80 +-
.../web/ozShell/volume/CreateVolumeHandler.java | 69 +-
.../web/ozShell/volume/DeleteVolumeHandler.java | 35 +-
.../web/ozShell/volume/InfoVolumeHandler.java | 37 +-
.../web/ozShell/volume/ListVolumeHandler.java | 82 +-
.../web/ozShell/volume/UpdateVolumeHandler.java | 49 +-
.../web/ozShell/volume/VolumeCommands.java | 61 +
.../ozone/om/ScmBlockLocationTestIngClient.java | 2 +-
.../hadoop/ozone/om/TestChunkStreams.java | 14 +-
.../hadoop/ozone/om/TestKeyDeletingService.java | 47 +-
.../hadoop/ozone/om/TestKeyManagerImpl.java | 165 +
.../hadoop/ozone/om/TestOzoneManagerLock.java | 192 +
hadoop-ozone/ozonefs/pom.xml | 18 +-
.../ozonefs/src/test/resources/log4j.properties | 1 +
hadoop-ozone/pom.xml | 46 +-
hadoop-ozone/tools/pom.xml | 6 +-
.../org/apache/hadoop/ozone/freon/Freon.java | 1136 +-----
.../hadoop/ozone/freon/RandomKeyGenerator.java | 1039 +++++
.../GenerateOzoneRequiredConfigurations.java | 12 +-
.../genesis/BenchMarkContainerStateMap.java | 24 +-
.../genesis/BenchMarkDatanodeDispatcher.java | 84 +-
.../apache/hadoop/ozone/genesis/Genesis.java | 9 +-
...TestGenerateOzoneRequiredConfigurations.java | 152 +
.../hadoop/ozone/genconf/package-info.java | 22 +
hadoop-project-dist/pom.xml | 34 +-
hadoop-project/pom.xml | 14 +-
hadoop-project/src/site/site.xml | 1 +
hadoop-tools/hadoop-aws/pom.xml | 4 +
.../fs/s3a/BasicAWSCredentialsProvider.java | 62 -
.../org/apache/hadoop/fs/s3a/Constants.java | 47 +-
.../org/apache/hadoop/fs/s3a/S3AFileSystem.java | 32 +-
.../hadoop/fs/s3a/S3AInstrumentation.java | 5 +-
.../apache/hadoop/fs/s3a/S3ARetryPolicy.java | 24 +-
.../java/org/apache/hadoop/fs/s3a/S3AUtils.java | 60 +-
.../fs/s3a/SimpleAWSCredentialsProvider.java | 27 +-
.../fs/s3a/TemporaryAWSCredentialsProvider.java | 24 +-
.../fs/s3a/s3guard/DynamoDBMetadataStore.java | 430 +-
.../fs/s3a/s3guard/LocalMetadataStore.java | 26 +-
.../s3guard/S3GuardDataAccessRetryPolicy.java | 47 +
.../hadoop/fs/s3a/s3guard/S3GuardTool.java | 35 +
.../hadoop/fs/s3native/S3xLoginHelper.java | 121 +-
.../src/site/markdown/tools/hadoop-aws/index.md | 32 +-
.../site/markdown/tools/hadoop-aws/s3guard.md | 165 +-
.../site/markdown/tools/hadoop-aws/testing.md | 95 +-
.../tools/hadoop-aws/troubleshooting_s3a.md | 44 +-
.../fs/s3a/ITestS3AAWSCredentialsProvider.java | 20 +-
.../hadoop/fs/s3a/ITestS3AConfiguration.java | 51 +-
.../hadoop/fs/s3a/ITestS3ACredentialsInURL.java | 164 -
.../fs/s3a/ITestS3AFileSystemContract.java | 5 +
.../fs/s3a/TestS3AAWSCredentialsProvider.java | 50 +-
.../hadoop/fs/s3a/TestSSEConfiguration.java | 31 +-
.../s3guard/AbstractS3GuardToolTestBase.java | 27 +-
.../s3a/s3guard/ITestDynamoDBMetadataStore.java | 54 +-
.../ITestDynamoDBMetadataStoreScale.java | 595 ++-
.../s3a/s3guard/ITestS3GuardToolDynamoDB.java | 103 +-
.../AbstractITestS3AMetadataStoreScale.java | 24 +-
.../hadoop/fs/s3native/TestS3xLoginHelper.java | 70 +-
.../hadoop-aws/src/test/resources/core-site.xml | 10 +
.../hadoop/fs/azure/PageBlobOutputStream.java | 21 +-
.../fs/azure/ITestOutputStreamSemantics.java | 43 +
.../hadoop/yarn/sls/nodemanager/NodeInfo.java | 7 +
.../yarn/sls/scheduler/RMNodeWrapper.java | 6 +
hadoop-yarn-project/hadoop-yarn/bin/yarn | 5 +
.../yarn/api/ApplicationClientProtocol.java | 55 +
.../GetAttributesToNodesRequest.java | 74 +
.../GetAttributesToNodesResponse.java | 65 +
.../GetClusterNodeAttributesRequest.java | 47 +
.../GetClusterNodeAttributesResponse.java | 73 +
.../GetNodesToAttributesRequest.java | 65 +
.../GetNodesToAttributesResponse.java | 63 +
.../hadoop/yarn/api/records/NodeAttribute.java | 92 +
.../yarn/api/records/NodeAttributeInfo.java | 62 +
.../yarn/api/records/NodeAttributeKey.java | 66 +
.../yarn/api/records/NodeAttributeOpCode.java | 43 +
.../yarn/api/records/NodeAttributeType.java | 35 +
.../hadoop/yarn/api/records/NodeReport.java | 13 +
.../yarn/api/records/NodeToAttributeValue.java | 57 +
.../hadoop/yarn/api/records/Resource.java | 2 +-
.../yarn/api/resource/PlacementConstraint.java | 40 +-
.../yarn/api/resource/PlacementConstraints.java | 19 +
.../hadoop/yarn/conf/YarnConfiguration.java | 68 +-
.../ResourceManagerAdministrationProtocol.java | 13 +-
.../AttributeMappingOperationType.java | 42 +
.../api/protocolrecords/NodeToAttributes.java | 59 +
.../NodesToAttributesMappingRequest.java | 69 +
.../NodesToAttributesMappingResponse.java | 31 +
.../constraint/PlacementConstraintParser.java | 156 +-
.../main/proto/applicationclient_protocol.proto | 3 +
...esourcemanager_administration_protocol.proto | 1 +
..._server_resourcemanager_service_protos.proto | 16 +
.../src/main/proto/yarn_protos.proto | 44 +
.../src/main/proto/yarn_service_protos.proto | 23 +
.../resource/TestPlacementConstraintParser.java | 83 +-
.../distributedshell/ApplicationMaster.java | 51 +-
.../applications/distributedshell/Client.java | 9 +-
.../distributedshell/PlacementSpec.java | 19 +-
.../distributedshell/TestDistributedShell.java | 12 +-
.../yarn/service/api/records/Artifact.java | 1 -
.../yarn/service/api/records/Component.java | 1 -
.../yarn/service/api/records/ConfigFile.java | 1 -
.../yarn/service/api/records/Configuration.java | 1 -
.../yarn/service/api/records/Container.java | 1 -
.../hadoop/yarn/service/api/records/Error.java | 1 -
.../service/api/records/KerberosPrincipal.java | 2 -
.../api/records/PlacementConstraint.java | 3 -
.../service/api/records/PlacementPolicy.java | 3 -
.../service/api/records/PlacementScope.java | 3 -
.../yarn/service/api/records/PlacementType.java | 3 -
.../service/api/records/ReadinessCheck.java | 1 -
.../yarn/service/api/records/Resource.java | 1 -
.../api/records/ResourceInformation.java | 2 -
.../yarn/service/api/records/Service.java | 1 -
.../yarn/service/api/records/ServiceState.java | 1 -
.../yarn/service/api/records/ServiceStatus.java | 1 -
.../yarn/submarine/client/cli/CliConstants.java | 7 +
.../yarn/submarine/client/cli/CliUtils.java | 10 +-
.../yarn/submarine/client/cli/RunJobCli.java | 51 +-
.../submarine/client/cli/param/Quicklink.java | 71 +
.../client/cli/param/RunJobParameters.java | 70 +-
.../fs/DefaultRemoteDirectoryManager.java | 21 +-
.../common/fs/RemoteDirectoryManager.java | 4 +-
.../common/FSBasedSubmarineStorageImpl.java | 4 +-
.../yarnservice/YarnServiceJobSubmitter.java | 208 +-
.../runtimes/yarnservice/YarnServiceUtils.java | 63 +-
.../yarnservice/TestYarnServiceRunJobCli.java | 362 +-
.../common/fs/MockRemoteDirectoryManager.java | 7 +-
.../hadoop/yarn/client/api/YarnClient.java | 60 +-
.../yarn/client/api/impl/YarnClientImpl.java | 33 +-
.../hadoop/yarn/client/cli/ClusterCLI.java | 17 +
.../yarn/client/cli/NodeAttributesCLI.java | 715 ++++
.../apache/hadoop/yarn/client/cli/NodeCLI.java | 13 +-
.../hadoop/yarn/client/cli/TestClusterCLI.java | 32 +-
.../yarn/client/cli/TestNodeAttributesCLI.java | 537 +++
.../hadoop/yarn/client/cli/TestYarnCLI.java | 31 +-
.../ApplicationClientProtocolPBClientImpl.java | 55 +
.../ApplicationClientProtocolPBServiceImpl.java | 65 +
.../PlacementConstraintFromProtoConverter.java | 10 +-
.../pb/PlacementConstraintToProtoConverter.java | 11 +
.../pb/GetAttributesToNodesRequestPBImpl.java | 176 +
.../pb/GetAttributesToNodesResponsePBImpl.java | 207 +
.../GetClusterNodeAttributesRequestPBImpl.java | 75 +
.../GetClusterNodeAttributesResponsePBImpl.java | 160 +
.../pb/GetNodesToAttributesRequestPBImpl.java | 132 +
.../pb/GetNodesToAttributesResponsePBImpl.java | 181 +
.../impl/pb/NodeAttributeInfoPBImpl.java | 147 +
.../records/impl/pb/NodeAttributeKeyPBImpl.java | 140 +
.../records/impl/pb/NodeAttributePBImpl.java | 170 +
.../api/records/impl/pb/NodeReportPBImpl.java | 44 +-
.../impl/pb/NodeToAttributeValuePBImpl.java | 137 +
.../hadoop/yarn/nodelabels/AbstractLabel.java | 71 +
.../AttributeExpressionOperation.java | 26 +
.../hadoop/yarn/nodelabels/AttributeValue.java | 53 +
.../nodelabels/CommonNodeLabelsManager.java | 32 +-
.../nodelabels/FileSystemNodeLabelsStore.java | 270 +-
.../yarn/nodelabels/NodeAttributeStore.java | 77 +
.../yarn/nodelabels/NodeAttributesManager.java | 137 +
.../hadoop/yarn/nodelabels/NodeLabelUtil.java | 155 +
.../hadoop/yarn/nodelabels/NodeLabelsStore.java | 32 +-
.../NonAppendableFSNodeLabelStore.java | 46 +-
.../hadoop/yarn/nodelabels/RMNodeAttribute.java | 98 +
.../hadoop/yarn/nodelabels/RMNodeLabel.java | 110 +-
.../yarn/nodelabels/StringAttributeValue.java | 61 +
.../nodelabels/store/AbstractFSNodeStore.java | 213 +
.../yarn/nodelabels/store/FSStoreOpHandler.java | 131 +
.../hadoop/yarn/nodelabels/store/StoreOp.java | 49 +
.../nodelabels/store/op/AddClusterLabelOp.java | 73 +
.../store/op/AddNodeToAttributeLogOp.java | 71 +
.../nodelabels/store/op/FSNodeStoreLogOp.java | 52 +
.../store/op/NodeAttributeMirrorOp.java | 64 +
.../nodelabels/store/op/NodeLabelMirrorOp.java | 85 +
.../yarn/nodelabels/store/op/NodeToLabelOp.java | 75 +
.../store/op/RemoveClusterLabelOp.java | 75 +
.../store/op/RemoveNodeToAttributeLogOp.java | 71 +
.../store/op/ReplaceNodeToAttributeLogOp.java | 73 +
.../yarn/nodelabels/store/op/package-info.java | 21 +
.../yarn/nodelabels/store/package-info.java | 21 +
...nagerAdministrationProtocolPBClientImpl.java | 26 +-
...agerAdministrationProtocolPBServiceImpl.java | 31 +-
.../impl/pb/NodeToAttributesPBImpl.java | 164 +
.../NodesToAttributesMappingRequestPBImpl.java | 197 +
.../NodesToAttributesMappingResponsePBImpl.java | 50 +
...emoveFromClusterNodeLabelsRequestPBImpl.java | 24 +-
.../src/main/resources/yarn-default.xml | 86 +
.../hadoop/yarn/api/TestPBImplRecords.java | 114 +-
.../DummyCommonNodeLabelsManager.java | 8 +-
.../TestFileSystemNodeLabelsStore.java | 16 +-
.../yarn/nodelabels/TestNodeLabelUtil.java | 51 +
.../hadoop/yarn/server/AMRMClientRelayer.java | 412 +-
.../protocolrecords/NodeHeartbeatRequest.java | 17 +
.../impl/pb/NodeHeartbeatRequestPBImpl.java | 52 +
.../metrics/AMRMClientRelayerMetrics.java | 368 ++
.../yarn/server/metrics/package-info.java | 18 +
.../yarn/server/uam/UnmanagedAMPoolManager.java | 30 +-
.../server/uam/UnmanagedApplicationManager.java | 16 +-
.../hadoop/yarn/server/utils/BuilderUtils.java | 6 +-
.../yarn_server_common_service_protos.proto | 5 +
.../yarn/server/MockResourceManagerFacade.java | 46 +-
.../yarn/server/TestAMRMClientRelayer.java | 2 +-
.../protocolrecords/TestProtocolRecords.java | 12 +
.../metrics/TestAMRMClientRelayerMetrics.java | 513 +++
.../uam/TestUnmanagedApplicationManager.java | 2 +-
.../server/nodemanager/ContainerExecutor.java | 3 +-
.../nodemanager/LinuxContainerExecutor.java | 15 +-
.../yarn/server/nodemanager/NodeManager.java | 70 +-
.../server/nodemanager/NodeStatusUpdater.java | 14 +
.../nodemanager/NodeStatusUpdaterImpl.java | 92 +-
.../amrmproxy/FederationInterceptor.java | 19 +-
.../containermanager/ContainerManagerImpl.java | 29 +-
.../launcher/ContainerLaunch.java | 5 +-
.../linux/resources/ResourceHandlerModule.java | 15 +
.../TrafficControlBandwidthHandlerImpl.java | 2 +-
.../runtime/DockerLinuxContainerRuntime.java | 95 +-
.../JavaSandboxLinuxContainerRuntime.java | 28 +-
.../runtime/docker/DockerCommandExecutor.java | 73 +-
.../runtime/docker/DockerInspectCommand.java | 16 +-
.../linux/runtime/docker/DockerRmCommand.java | 11 +-
.../localizer/ResourceLocalizationService.java | 87 +-
.../AbstractNodeDescriptorsProvider.java | 197 +
.../nodelabels/AbstractNodeLabelsProvider.java | 149 -
.../ConfigurationNodeAttributesProvider.java | 156 +
.../ConfigurationNodeLabelsProvider.java | 13 +-
.../nodelabels/NodeAttributesProvider.java | 32 +
.../nodelabels/NodeDescriptorsProvider.java | 45 +
.../nodelabels/NodeDescriptorsScriptRunner.java | 84 +
.../nodelabels/NodeLabelsProvider.java | 22 +-
.../ScriptBasedNodeAttributesProvider.java | 158 +
.../ScriptBasedNodeLabelsProvider.java | 126 +-
.../nodemanager/nodelabels/package-info.java | 28 +
.../recovery/NMLeveldbStateStoreService.java | 173 +-
.../recovery/NMStateStoreService.java | 29 +-
.../impl/container-executor.c | 153 +-
.../impl/container-executor.h | 8 +-
.../main/native/container-executor/impl/main.c | 12 +-
.../container-executor/impl/utils/docker-util.c | 5 +-
.../container-executor/impl/utils/docker-util.h | 30 +-
.../test/test-container-executor.c | 147 +
.../server/nodemanager/TestNodeManager.java | 2 +-
.../TestNodeStatusUpdaterForLabels.java | 52 +-
.../TestableFederationInterceptor.java | 5 +-
.../launcher/TestContainerLaunch.java | 81 +
.../runtime/TestDockerContainerRuntime.java | 47 +-
.../docker/TestDockerCommandExecutor.java | 23 +-
.../runtime/docker/TestDockerRmCommand.java | 35 +-
...TestConfigurationNodeAttributesProvider.java | 262 ++
.../TestConfigurationNodeLabelsProvider.java | 26 +-
.../TestScriptBasedNodeAttributesProvider.java | 251 ++
.../TestScriptBasedNodeLabelsProvider.java | 18 +-
.../recovery/NMMemoryStateStoreService.java | 18 +-
.../TestNMLeveldbStateStoreService.java | 269 +-
.../server/resourcemanager/AdminService.java | 106 +
.../server/resourcemanager/ClientRMService.java | 71 +-
.../resourcemanager/DefaultAMSProcessor.java | 3 +-
.../resourcemanager/RMActiveServiceContext.java | 14 +
.../server/resourcemanager/RMAppManager.java | 5 +-
.../yarn/server/resourcemanager/RMContext.java | 5 +
.../server/resourcemanager/RMContextImpl.java | 11 +
.../server/resourcemanager/ResourceManager.java | 14 +-
.../resourcemanager/ResourceTrackerService.java | 31 +-
.../ProportionalCapacityPreemptionPolicy.java | 3 +
.../FileSystemNodeAttributeStore.java | 102 +
.../nodelabels/NodeAttributesManagerImpl.java | 754 ++++
.../nodelabels/NodeAttributesStoreEvent.java | 52 +
.../NodeAttributesStoreEventType.java | 26 +
.../nodelabels/NodeLabelsUtils.java | 19 +
.../server/resourcemanager/rmnode/RMNode.java | 6 +
.../resourcemanager/rmnode/RMNodeImpl.java | 11 +
.../scheduler/SchedulerNode.java | 11 +
.../scheduler/capacity/CapacityScheduler.java | 42 +-
.../allocator/RegularContainerAllocator.java | 3 +-
.../constraint/PlacementConstraintsUtil.java | 148 +-
.../NodeAttributesUpdateSchedulerEvent.java | 41 +
.../scheduler/event/SchedulerEventType.java | 1 +
.../LocalityAppPlacementAllocator.java | 4 +
.../webapp/dao/NodeAttributeInfo.java | 65 +
.../webapp/dao/NodeAttributesInfo.java | 49 +
.../resourcemanager/webapp/dao/NodeInfo.java | 10 +
.../yarn/server/resourcemanager/MockNodes.java | 11 +
.../resourcemanager/NodeAttributeTestUtils.java | 54 +
.../server/resourcemanager/TestAppManager.java | 3 +
.../resourcemanager/TestClientRMService.java | 246 +-
.../resourcemanager/TestRMAdminService.java | 171 +-
.../TestResourceTrackerService.java | 88 +
...alCapacityPreemptionPolicyMockFramework.java | 5 +
...ionalCapacityPreemptionPolicyIntraQueue.java | 90 +-
.../nodelabels/NullRMNodeLabelsManager.java | 7 +
.../TestFileSystemNodeAttributeStore.java | 279 ++
.../nodelabels/TestNodeAttributesManager.java | 364 ++
.../capacity/TestContainerAllocation.java | 68 +
.../scheduler/capacity/TestUtils.java | 20 +-
...stSingleConstraintAppPlacementAllocator.java | 192 +-
.../webapp/TestRMWebServicesNodeLabels.java | 2 +-
.../webapp/TestRMWebServicesNodes.java | 2 +-
.../server/router/clientrm/ClientMethod.java | 71 +
.../DefaultClientRequestInterceptor.java | 25 +
.../clientrm/FederationClientInterceptor.java | 121 +-
.../router/clientrm/RouterClientRMService.java | 28 +
.../router/clientrm/RouterYarnClientUtils.java | 55 +
.../DefaultRMAdminRequestInterceptor.java | 9 +
.../router/rmadmin/RouterRMAdminService.java | 10 +
.../PassThroughClientRequestInterceptor.java | 25 +
.../TestFederationClientInterceptor.java | 37 +-
.../clientrm/TestRouterYarnClientUtils.java | 57 +
.../PassThroughRMAdminRequestInterceptor.java | 9 +
.../reader/TimelineReaderWebServices.java | 4 +-
.../TestTimelineReaderWebServicesBasicAcl.java | 11 +-
.../src/site/markdown/NodeAttributes.md | 156 +
.../site/markdown/PlacementConstraints.md.vm | 2 +
.../main/webapp/app/controllers/application.js | 5 +-
.../app/controllers/yarn-app/components.js | 2 +-
.../controllers/yarn-component-instance/info.js | 5 +-
.../yarn-component-instances/info.js | 3 +-
.../src/main/webapp/app/initializers/loader.js | 3 +
.../src/main/webapp/app/routes/application.js | 4 +-
.../app/routes/yarn-component-instance/info.js | 4 +-
.../app/serializers/yarn-component-instance.js | 1 -
.../webapp/app/serializers/yarn-container.js | 2 +-
.../app/serializers/yarn-service-component.js | 2 +-
.../app/serializers/yarn-timeline-container.js | 2 +-
.../webapp/app/templates/yarn-app/configs.hbs | 7 +-
.../templates/yarn-component-instance/info.hbs | 4 -
pom.xml | 5 +-
1034 files changed, 72254 insertions(+), 47687 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c04e0c0e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c04e0c0e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c04e0c0e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c04e0c0e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournaledEditsCache.java
----------------------------------------------------------------------
diff --cc hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournaledEditsCache.java
index 2693301,0000000..387caa1
mode 100644,000000..100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournaledEditsCache.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournaledEditsCache.java
@@@ -1,412 -1,0 +1,412 @@@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.qjournal.server;
+
+import com.google.common.annotations.VisibleForTesting;
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.NavigableMap;
+import java.util.TreeMap;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp;
+import org.apache.hadoop.util.AutoCloseableLock;
+
+/**
+ * An in-memory cache of edits in their serialized form. This is used to serve
+ * the {@link Journal#getJournaledEdits(long, int)} call, used by the
+ * QJM when {@value DFSConfigKeys#DFS_HA_TAILEDITS_INPROGRESS_KEY} is
+ * enabled.
+ *
+ * <p>When a batch of edits is received by the JournalNode, it is put into this
+ * cache via {@link #storeEdits(byte[], long, long, int)}. Edits must be
+ * stored contiguously; if a batch of edits is stored that does not align with
+ * the previously stored edits, the cache will be cleared before storing new
+ * edits to avoid gaps. This decision is made because gaps are only handled
+ * when in recovery mode, which the cache is not intended to be used for.
+ *
+ * <p>Batches of edits are stored in a {@link TreeMap} mapping the starting
+ * transaction ID of the batch to the data buffer. Upon retrieval, the
+ * relevant data buffers are concatenated together and a header is added
+ * to construct a fully-formed edit data stream.
+ *
+ * <p>The cache is of a limited size capacity determined by
+ * {@value DFSConfigKeys#DFS_JOURNALNODE_EDIT_CACHE_SIZE_KEY}. If the capacity
+ * is exceeded after adding a new batch of edits, batches of edits are removed
+ * until the total size is less than the capacity, starting from the ones
+ * containing the oldest transactions. Transactions range in size, but a
+ * decent rule of thumb is that 200 bytes are needed per transaction. Monitoring
+ * the {@link JournalMetrics#rpcRequestCacheMissAmount} metric is recommended
+ * to determine if the cache is too small; it will indicate both how many
+ * cache misses occurred, and how many more transactions would have been
+ * needed in the cache to serve the request.
+ */
+class JournaledEditsCache {
+
+ private static final int INVALID_LAYOUT_VERSION = 0;
+ private static final long INVALID_TXN_ID = -1;
+
+ /** The capacity, in bytes, of this cache. */
+ private final int capacity;
+
+ /**
+ * Read/write lock pair wrapped in AutoCloseable; these refer to the same
+ * underlying lock.
+ */
+ private final AutoCloseableLock readLock;
+ private final AutoCloseableLock writeLock;
+
+ // ** Start lock-protected fields **
+
+ /**
+ * Stores the actual data as a mapping of the StartTxnId of a batch of edits
+ * to the serialized batch of edits. Stores only contiguous ranges; that is,
+ * the last transaction ID in one batch is always one less than the first
+ * transaction ID in the next batch. Though the map is protected by the lock,
+ * individual data buffers are immutable and can be accessed without locking.
+ */
+ private final NavigableMap<Long, byte[]> dataMap = new TreeMap<>();
+ /** Stores the layout version currently present in the cache. */
+ private int layoutVersion = INVALID_LAYOUT_VERSION;
+ /** Stores the serialized version of the header for the current version. */
+ private ByteBuffer layoutHeader;
+
+ /**
+ * The lowest/highest transaction IDs present in the cache.
+ * {@value INVALID_TXN_ID} if there are no transactions in the cache.
+ */
+ private long lowestTxnId;
+ private long highestTxnId;
+ /**
+ * The lowest transaction ID that was ever present in the cache since last
+ * being reset (i.e. since initialization or since reset due to being out of
+ * sync with the Journal). Until the cache size goes above capacity, this is
+ * equal to lowestTxnId.
+ */
+ private long initialTxnId;
+ /** The current total size of all buffers in this cache. */
+ private int totalSize;
+
+ // ** End lock-protected fields **
+
+ JournaledEditsCache(Configuration conf) {
+ capacity = conf.getInt(DFSConfigKeys.DFS_JOURNALNODE_EDIT_CACHE_SIZE_KEY,
+ DFSConfigKeys.DFS_JOURNALNODE_EDIT_CACHE_SIZE_DEFAULT);
+ if (capacity > 0.9 * Runtime.getRuntime().maxMemory()) {
+ Journal.LOG.warn(String.format("Cache capacity is set at %d bytes but " +
+ "maximum JVM memory is only %d bytes. It is recommended that you " +
+ "decrease the cache size or increase the heap size.",
+ capacity, Runtime.getRuntime().maxMemory()));
+ }
+ Journal.LOG.info("Enabling the journaled edits cache with a capacity " +
+ "of bytes: " + capacity);
+ ReadWriteLock lock = new ReentrantReadWriteLock(true);
+ readLock = new AutoCloseableLock(lock.readLock());
+ writeLock = new AutoCloseableLock(lock.writeLock());
+ initialize(INVALID_TXN_ID);
+ }
+
+ /**
+ * Fetch the data for edits starting at the specific transaction ID, fetching
+ * up to {@code maxTxns} transactions. Populates a list of output buffers
+ * which contains a serialized version of the edits, and returns the count of
+ * edits contained within the serialized buffers. The serialized edits are
+ * prefixed with a standard edit log header containing information about the
+ * layout version. The transactions returned are guaranteed to have contiguous
+ * transaction IDs.
+ *
+ * If {@code requestedStartTxn} is higher than the highest transaction which
+ * has been added to this cache, a response with an empty buffer and a
+ * transaction count of 0 will be returned. If {@code requestedStartTxn} is
+ * lower than the lowest transaction currently contained in this cache, or no
+ * transactions have yet been added to the cache, an exception will be thrown.
+ *
+ * @param requestedStartTxn The ID of the first transaction to return. If any
+ * transactions are returned, it is guaranteed that
+ * the first one will have this ID.
+ * @param maxTxns The maximum number of transactions to return.
+ * @param outputBuffers A list to populate with output buffers. When
+ * concatenated, these form a full response.
+ * @return The number of transactions contained within the set of output
+ * buffers.
+ * @throws IOException If transactions are requested which cannot be served
+ * by this cache.
+ */
+ int retrieveEdits(long requestedStartTxn, int maxTxns,
+ List<ByteBuffer> outputBuffers) throws IOException {
+ int txnCount = 0;
+
+ try (AutoCloseableLock l = readLock.acquire()) {
+ if (lowestTxnId == INVALID_TXN_ID || requestedStartTxn < lowestTxnId) {
+ throw getCacheMissException(requestedStartTxn);
+ } else if (requestedStartTxn > highestTxnId) {
+ return 0;
+ }
+ outputBuffers.add(layoutHeader);
+ Iterator<Map.Entry<Long, byte[]>> incrBuffIter =
+ dataMap.tailMap(dataMap.floorKey(requestedStartTxn), true)
+ .entrySet().iterator();
+ long prevTxn = requestedStartTxn;
+ byte[] prevBuf = null;
+ // Stop when maximum transactions reached...
+ while ((txnCount < maxTxns) &&
+ // ... or there are no more entries ...
+ (incrBuffIter.hasNext() || prevBuf != null)) {
+ long currTxn;
+ byte[] currBuf;
+ if (incrBuffIter.hasNext()) {
+ Map.Entry<Long, byte[]> ent = incrBuffIter.next();
+ currTxn = ent.getKey();
+ currBuf = ent.getValue();
+ } else {
+ // This accounts for the trailing entry
+ currTxn = highestTxnId + 1;
+ currBuf = null;
+ }
+ if (prevBuf != null) { // True except for the first loop iteration
+ outputBuffers.add(ByteBuffer.wrap(prevBuf));
+ // if prevTxn < requestedStartTxn, the extra transactions will get
+ // removed after the loop, so don't include them in the txn count
+ txnCount += currTxn - Math.max(requestedStartTxn, prevTxn);
+ }
+ prevTxn = currTxn;
+ prevBuf = currBuf;
+ }
+ // Release the lock before doing operations on the buffers (deserializing
+ // to find transaction boundaries, and copying into an output buffer)
+ }
+ // Remove extra leading transactions in the first buffer
+ ByteBuffer firstBuf = outputBuffers.get(1); // 0th is the header
+ firstBuf.position(
+ findTransactionPosition(firstBuf.array(), requestedStartTxn));
+ // Remove trailing transactions in the last buffer if necessary
+ if (txnCount > maxTxns) {
+ ByteBuffer lastBuf = outputBuffers.get(outputBuffers.size() - 1);
+ int limit =
+ findTransactionPosition(lastBuf.array(), requestedStartTxn + maxTxns);
+ lastBuf.limit(limit);
+ txnCount = maxTxns;
+ }
+
+ return txnCount;
+ }
+
+ /**
+ * Store a batch of serialized edits into this cache. Removes old batches
+ * as necessary to keep the total size of the cache below the capacity.
+ * See the class Javadoc for more info.
+ *
+ * This attempts to always handle malformed inputs gracefully rather than
+ * throwing an exception, to allow the rest of the Journal's operations
+ * to proceed normally.
+ *
+ * @param inputData A buffer containing edits in serialized form
+ * @param newStartTxn The txn ID of the first edit in {@code inputData}
+ * @param newEndTxn The txn ID of the last edit in {@code inputData}
+ * @param newLayoutVersion The version of the layout used to serialize
+ * the edits
+ */
+ void storeEdits(byte[] inputData, long newStartTxn, long newEndTxn,
+ int newLayoutVersion) {
+ if (newStartTxn < 0 || newEndTxn < newStartTxn) {
+ Journal.LOG.error(String.format("Attempted to cache data of length %d " +
+ "with newStartTxn %d and newEndTxn %d",
+ inputData.length, newStartTxn, newEndTxn));
+ return;
+ }
+ try (AutoCloseableLock l = writeLock.acquire()) {
+ if (newLayoutVersion != layoutVersion) {
+ try {
+ updateLayoutVersion(newLayoutVersion, newStartTxn);
+ } catch (IOException ioe) {
+ Journal.LOG.error(String.format("Unable to save new edits [%d, %d] " +
+ "due to exception when updating to new layout version %d",
+ newStartTxn, newEndTxn, newLayoutVersion), ioe);
+ return;
+ }
+ } else if (lowestTxnId == INVALID_TXN_ID) {
+ Journal.LOG.info("Initializing edits cache starting from txn ID " +
+ newStartTxn);
+ initialize(newStartTxn);
+ } else if (highestTxnId + 1 != newStartTxn) {
+ // Cache is out of sync; clear to avoid storing noncontiguous regions
+ Journal.LOG.error(String.format("Edits cache is out of sync; " +
+ "looked for next txn id at %d but got start txn id for " +
+ "cache put request at %d. Reinitializing at new request.",
+ highestTxnId + 1, newStartTxn));
+ initialize(newStartTxn);
+ }
+
+ while ((totalSize + inputData.length) > capacity && !dataMap.isEmpty()) {
+ Map.Entry<Long, byte[]> lowest = dataMap.firstEntry();
+ dataMap.remove(lowest.getKey());
+ totalSize -= lowest.getValue().length;
+ }
+ if (inputData.length > capacity) {
+ initialize(INVALID_TXN_ID);
+ Journal.LOG.warn(String.format("A single batch of edits was too " +
+ "large to fit into the cache: startTxn = %d, endTxn = %d, " +
+ "input length = %d. The capacity of the cache (%s) must be " +
+ "increased for it to work properly (current capacity %d)." +
+ "Cache is now empty.",
+ newStartTxn, newEndTxn, inputData.length,
+ DFSConfigKeys.DFS_JOURNALNODE_EDIT_CACHE_SIZE_KEY, capacity));
+ return;
+ }
+ if (dataMap.isEmpty()) {
+ lowestTxnId = newStartTxn;
+ } else {
+ lowestTxnId = dataMap.firstKey();
+ }
+
+ dataMap.put(newStartTxn, inputData);
+ highestTxnId = newEndTxn;
+ totalSize += inputData.length;
+ }
+ }
+
+ /**
+ * Skip through a given stream of edits until the given transaction ID is
+ * found. Return the number of bytes that appear prior to the given
+ * transaction.
+ *
+ * @param buf A buffer containing a stream of serialized edits
+ * @param txnId The transaction ID to search for
+ * @return The number of bytes appearing in {@code buf} <i>before</i>
+ * the start of the transaction with ID {@code txnId}.
+ */
+ private int findTransactionPosition(byte[] buf, long txnId)
+ throws IOException {
+ ByteArrayInputStream bais = new ByteArrayInputStream(buf);
+ FSEditLogLoader.PositionTrackingInputStream tracker =
+ new FSEditLogLoader.PositionTrackingInputStream(bais);
+ FSEditLogOp.Reader reader = FSEditLogOp.Reader.create(
+ new DataInputStream(tracker), tracker, layoutVersion);
+ long previousPos = 0;
+ while (reader.scanOp() < txnId) {
+ previousPos = tracker.getPos();
+ }
+ // tracker is backed by a byte[]; position cannot go above an integer
+ return (int) previousPos;
+ }
+
+ /**
+ * Update the layout version of the cache. This clears out all existing
+ * entries, and populates the new layout version and header for that version.
+ *
+ * @param newLayoutVersion The new layout version to be stored in the cache
+ * @param newStartTxn The new lowest transaction in the cache
+ */
+ private void updateLayoutVersion(int newLayoutVersion, long newStartTxn)
+ throws IOException {
+ StringBuilder logMsg = new StringBuilder()
+ .append("Updating edits cache to use layout version ")
+ .append(newLayoutVersion)
+ .append(" starting from txn ID ")
+ .append(newStartTxn);
+ if (layoutVersion != INVALID_LAYOUT_VERSION) {
+ logMsg.append("; previous version was ").append(layoutVersion)
+ .append("; old entries will be cleared.");
+ }
- Journal.LOG.info(logMsg);
++ Journal.LOG.info(logMsg.toString());
+ initialize(newStartTxn);
+ ByteArrayOutputStream baos = new ByteArrayOutputStream();
+ EditLogFileOutputStream.writeHeader(newLayoutVersion,
+ new DataOutputStream(baos));
+ layoutVersion = newLayoutVersion;
+ layoutHeader = ByteBuffer.wrap(baos.toByteArray());
+ }
+
+ /**
+ * Initialize the cache back to a clear state.
+ *
+ * @param newInitialTxnId The new lowest transaction ID stored in the cache.
+ * This should be {@value INVALID_TXN_ID} if the cache
+ * is to remain empty at this time.
+ */
+ private void initialize(long newInitialTxnId) {
+ dataMap.clear();
+ totalSize = 0;
+ initialTxnId = newInitialTxnId;
+ lowestTxnId = initialTxnId;
+ highestTxnId = INVALID_TXN_ID; // this will be set later
+ }
+
+ /**
+ * Return the underlying data buffer used to store information about the
+ * given transaction ID.
+ *
+ * @param txnId Transaction ID whose containing buffer should be fetched.
+ * @return The data buffer for the transaction
+ */
+ @VisibleForTesting
+ byte[] getRawDataForTests(long txnId) {
+ try (AutoCloseableLock l = readLock.acquire()) {
+ return dataMap.floorEntry(txnId).getValue();
+ }
+ }
+
+ private CacheMissException getCacheMissException(long requestedTxnId) {
+ if (lowestTxnId == INVALID_TXN_ID) {
+ return new CacheMissException(0, "Cache is empty; either it was never " +
+ "written to or the last write overflowed the cache capacity.");
+ } else if (requestedTxnId < initialTxnId) {
+ return new CacheMissException(initialTxnId - requestedTxnId,
+ "Cache started at txn ID %d but requested txns starting at %d.",
+ initialTxnId, requestedTxnId);
+ } else {
+ return new CacheMissException(lowestTxnId - requestedTxnId,
+ "Oldest txn ID available in the cache is %d, but requested txns " +
+ "starting at %d. The cache size (%s) may need to be increased " +
+ "to hold more transactions (currently %d bytes containing %d " +
+ "transactions)", lowestTxnId, requestedTxnId,
+ DFSConfigKeys.DFS_JOURNALNODE_EDIT_CACHE_SIZE_KEY, capacity,
+ highestTxnId - lowestTxnId + 1);
+ }
+ }
+
+ static class CacheMissException extends IOException {
+
+ private static final long serialVersionUID = 0L;
+
+ private final long cacheMissAmount;
+
+ CacheMissException(long cacheMissAmount, String msgFormat,
+ Object... msgArgs) {
+ super(String.format(msgFormat, msgArgs));
+ this.cacheMissAmount = cacheMissAmount;
+ }
+
+ long getCacheMissAmount() {
+ return cacheMissAmount;
+ }
+
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c04e0c0e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c04e0c0e/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c04e0c0e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
----------------------------------------------------------------------
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[05/18] hadoop git commit: HADOOP-15764. [JDK10] Migrate from
sun.net.dns.ResolverConfiguration to the replacement. Contributed by Akira
Ajisaka.
Posted by sh...@apache.org.
HADOOP-15764. [JDK10] Migrate from sun.net.dns.ResolverConfiguration to the replacement. Contributed by Akira Ajisaka.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/429a07e0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/429a07e0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/429a07e0
Branch: refs/heads/HDFS-12943
Commit: 429a07e08c8c919b1679c0a80df73d147d95e8a6
Parents: 3da94a3
Author: Ewan Higgs <ew...@wdc.com>
Authored: Thu Sep 20 15:13:55 2018 +0200
Committer: Ewan Higgs <ew...@wdc.com>
Committed: Thu Sep 20 15:13:55 2018 +0200
----------------------------------------------------------------------
.../hadoop-client-minicluster/pom.xml | 17 ++++-------------
.../hadoop-client-runtime/pom.xml | 11 +++++++++++
hadoop-common-project/hadoop-common/pom.xml | 5 +++++
.../org/apache/hadoop/security/SecurityUtil.java | 19 +++++++++++++------
4 files changed, 33 insertions(+), 19 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/429a07e0/hadoop-client-modules/hadoop-client-minicluster/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-client-modules/hadoop-client-minicluster/pom.xml b/hadoop-client-modules/hadoop-client-minicluster/pom.xml
index ea8d680..70fca8a 100644
--- a/hadoop-client-modules/hadoop-client-minicluster/pom.xml
+++ b/hadoop-client-modules/hadoop-client-minicluster/pom.xml
@@ -318,6 +318,10 @@
<groupId>commons-net</groupId>
<artifactId>commons-net</artifactId>
</exclusion>
+ <exclusion>
+ <groupId>dnsjava</groupId>
+ <artifactId>dnsjava</artifactId>
+ </exclusion>
</exclusions>
</dependency>
<!-- Add optional runtime dependency on the in-development timeline server module
@@ -773,19 +777,6 @@
<exclude>ehcache-core.xsd</exclude>
</excludes>
</filter>
-
- <!-- remove utility classes which are not required from
- dnsjava -->
- <filter>
- <artifact>dnsjava:dnsjava</artifact>
- <excludes>
- <excldue>dig*</excldue>
- <exclude>jnamed*</exclude>
- <exlcude>lookup*</exlcude>
- <exclude>update*</exclude>
- </excludes>
- </filter>
-
</filters>
<!-- relocate classes from mssql-jdbc -->
http://git-wip-us.apache.org/repos/asf/hadoop/blob/429a07e0/hadoop-client-modules/hadoop-client-runtime/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-client-modules/hadoop-client-runtime/pom.xml b/hadoop-client-modules/hadoop-client-runtime/pom.xml
index 532fae9..bfa6c15 100644
--- a/hadoop-client-modules/hadoop-client-runtime/pom.xml
+++ b/hadoop-client-modules/hadoop-client-runtime/pom.xml
@@ -212,6 +212,17 @@
<exclude>ccache.txt</exclude>
</excludes>
</filter>
+ <!-- remove utility classes which are not required from
+ dnsjava -->
+ <filter>
+ <artifact>dnsjava:dnsjava</artifact>
+ <excludes>
+ <exclude>dig*</exclude>
+ <exclude>jnamed*</exclude>
+ <exclude>lookup*</exclude>
+ <exclude>update*</exclude>
+ </excludes>
+ </filter>
</filters>
<relocations>
<relocation>
http://git-wip-us.apache.org/repos/asf/hadoop/blob/429a07e0/hadoop-common-project/hadoop-common/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml
index 695dcde..1e6da92 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -324,6 +324,11 @@
<artifactId>mockwebserver</artifactId>
<scope>test</scope>
</dependency>
+ <dependency>
+ <groupId>dnsjava</groupId>
+ <artifactId>dnsjava</artifactId>
+ <scope>compile</scope>
+ </dependency>
</dependencies>
<build>
http://git-wip-us.apache.org/repos/asf/hadoop/blob/429a07e0/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java
index 0de334a..9fea535 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java
@@ -27,6 +27,7 @@ import java.net.URI;
import java.net.UnknownHostException;
import java.security.PrivilegedAction;
import java.security.PrivilegedExceptionAction;
+import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
@@ -52,8 +53,9 @@ import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.ZKUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-//this will need to be replaced someday when there is a suitable replacement
-import sun.net.dns.ResolverConfiguration;
+import org.xbill.DNS.Name;
+import org.xbill.DNS.ResolverConfig;
+
import com.google.common.annotations.VisibleForTesting;
import com.google.common.net.InetAddresses;
@@ -584,10 +586,15 @@ public final class SecurityUtil {
* hadoop.security.token.service.use_ip=false
*/
protected static class QualifiedHostResolver implements HostResolver {
- @SuppressWarnings("unchecked")
- private List<String> searchDomains =
- ResolverConfiguration.open().searchlist();
-
+ private List<String> searchDomains;
+ {
+ ResolverConfig resolverConfig = ResolverConfig.getCurrentConfig();
+ searchDomains = new ArrayList<>();
+ for (Name name : resolverConfig.searchPath()) {
+ searchDomains.add(name.toString());
+ }
+ }
+
/**
* Create an InetAddress with a fully qualified hostname of the given
* hostname. InetAddress does not qualify an incomplete hostname that
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[18/18] hadoop git commit: HDFS-13749. [SBN read] Use
getServiceStatus to discover observer namenodes. Contributed by Chao Sun.
Posted by sh...@apache.org.
HDFS-13749. [SBN read] Use getServiceStatus to discover observer namenodes. Contributed by Chao Sun.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/741547e1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/741547e1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/741547e1
Branch: refs/heads/HDFS-12943
Commit: 741547e1687c186e186d05be09e7d30dfea7226f
Parents: a1f9c00
Author: Erik Krogen <xk...@apache.org>
Authored: Thu Sep 20 13:27:58 2018 -0700
Committer: Konstantin V Shvachko <sh...@apache.org>
Committed: Fri Sep 21 18:31:40 2018 -0700
----------------------------------------------------------------------
.../hadoop/hdfs/NameNodeProxiesClient.java | 47 ++++++++-
.../ha/AbstractNNFailoverProxyProvider.java | 36 +++++--
.../namenode/ha/IPFailoverProxyProvider.java | 2 +-
.../namenode/ha/ObserverReadProxyProvider.java | 49 +--------
.../ha/TestObserverReadProxyProvider.java | 105 ++++++++++++++-----
5 files changed, 151 insertions(+), 88 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/741547e1/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/NameNodeProxiesClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/NameNodeProxiesClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/NameNodeProxiesClient.java
index 284e4ef..f90d671 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/NameNodeProxiesClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/NameNodeProxiesClient.java
@@ -25,12 +25,16 @@ import java.net.InetSocketAddress;
import java.net.URI;
import java.util.HashMap;
import java.util.Map;
+import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.ha.HAServiceProtocol;
+import org.apache.hadoop.ha.protocolPB.HAServiceProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdfs.server.namenode.ha.ClientHAProxyFactory;
import org.apache.hadoop.hdfs.server.namenode.ha.HAProxyFactory;
import org.apache.hadoop.ipc.AlignmentContext;
+import org.apache.hadoop.ipc.Client;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -62,13 +66,14 @@ import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
/**
- * Create proxy objects with {@link ClientProtocol} to communicate with a remote
- * NN. Generally use {@link NameNodeProxiesClient#createProxyWithClientProtocol(
+ * Create proxy objects with {@link ClientProtocol} and
+ * {@link HAServiceProtocol} to communicate with a remote NN. For the former,
+ * generally use {@link NameNodeProxiesClient#createProxyWithClientProtocol(
* Configuration, URI, AtomicBoolean)}, which will create either an HA- or
* non-HA-enabled client proxy as appropriate.
*
- * For creating proxy objects with other protocols, please see
- * {@link NameNodeProxies#createProxy(Configuration, URI, Class)}.
+ * For creating proxy objects with other protocols, please see the server-side
+ * counterpart {@code NameNodeProxies#createProxy}
*/
@InterfaceAudience.Private
public class NameNodeProxiesClient {
@@ -76,6 +81,11 @@ public class NameNodeProxiesClient {
private static final Logger LOG = LoggerFactory.getLogger(
NameNodeProxiesClient.class);
+ /** Maximum # of retries for HAProxy with HAServiceProtocol. */
+ private static final int MAX_RETRIES = 3;
+ /** Initial retry delay for HAProxy with HAServiceProtocol. */
+ private static final int DELAY_MILLISECONDS = 200;
+
/**
* Wrapper for a client proxy as well as its associated service ID.
* This is simply used as a tuple-like return type for created NN proxy.
@@ -119,7 +129,6 @@ public class NameNodeProxiesClient {
* @return an object containing both the proxy and the associated
* delegation token service it corresponds to
* @throws IOException if there is an error creating the proxy
- * @see {@link NameNodeProxies#createProxy(Configuration, URI, Class)}.
*/
public static ProxyAndInfo<ClientProtocol> createProxyWithClientProtocol(
Configuration conf, URI nameNodeUri, AtomicBoolean fallbackToSimpleAuth)
@@ -343,6 +352,34 @@ public class NameNodeProxiesClient {
fallbackToSimpleAuth, null);
}
+ /**
+ * Creates a non-HA proxy object with {@link HAServiceProtocol} to the
+ * given NameNode address, using the provided configuration. The proxy will
+ * use the RPC timeout configuration specified via {@link
+ * org.apache.hadoop.fs.CommonConfigurationKeys#IPC_CLIENT_RPC_TIMEOUT_KEY}.
+ * Upon failures, this will retry up to certain times with {@link RetryProxy}.
+ *
+ * @param address the NameNode address
+ * @param conf the configuration to be used
+ * @return a non-HA proxy with {@link HAServiceProtocol}.
+ */
+ public static HAServiceProtocol createNonHAProxyWithHAServiceProtocol(
+ InetSocketAddress address, Configuration conf) throws IOException {
+ RetryPolicy timeoutPolicy = RetryPolicies.exponentialBackoffRetry(
+ MAX_RETRIES, DELAY_MILLISECONDS, TimeUnit.MILLISECONDS);
+
+ HAServiceProtocol proxy =
+ new HAServiceProtocolClientSideTranslatorPB(
+ address, conf, NetUtils.getDefaultSocketFactory(conf),
+ Client.getRpcTimeout(conf));
+ return (HAServiceProtocol) RetryProxy.create(
+ HAServiceProtocol.class,
+ new DefaultFailoverProxyProvider<>(HAServiceProtocol.class, proxy),
+ new HashMap<>(),
+ timeoutPolicy
+ );
+ }
+
public static ClientProtocol createProxyWithAlignmentContext(
InetSocketAddress address, Configuration conf, UserGroupInformation ugi,
boolean withRetries, AtomicBoolean fallbackToSimpleAuth,
http://git-wip-us.apache.org/repos/asf/hadoop/blob/741547e1/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/AbstractNNFailoverProxyProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/AbstractNNFailoverProxyProvider.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/AbstractNNFailoverProxyProvider.java
index 32edb36..1b5ad16 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/AbstractNNFailoverProxyProvider.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/AbstractNNFailoverProxyProvider.java
@@ -28,11 +28,14 @@ import java.util.List;
import java.util.Map;
import java.util.concurrent.atomic.AtomicBoolean;
+import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.ha.HAServiceProtocol;
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
import org.apache.hadoop.hdfs.DFSUtilClient;
import org.apache.hadoop.hdfs.HAUtilClient;
+import org.apache.hadoop.hdfs.NameNodeProxiesClient;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.io.retry.FailoverProxyProvider;
import org.apache.hadoop.security.UserGroupInformation;
@@ -119,23 +122,44 @@ public abstract class AbstractNNFailoverProxyProvider<T> implements
*/
private HAServiceState cachedState;
- public NNProxyInfo(InetSocketAddress address) {
+ /** Proxy for getting HA service status from the given NameNode. */
+ private HAServiceProtocol serviceProxy;
+
+ public NNProxyInfo(InetSocketAddress address, Configuration conf) {
super(null, address.toString());
this.address = address;
+ try {
+ serviceProxy = NameNodeProxiesClient
+ .createNonHAProxyWithHAServiceProtocol(address, conf);
+ } catch (IOException ioe) {
+ LOG.error("Failed to create HAServiceProtocol proxy to NameNode" +
+ " at {}", address, ioe);
+ throw new RuntimeException(ioe);
+ }
}
public InetSocketAddress getAddress() {
return address;
}
- public void setCachedState(HAServiceState state) {
- cachedState = state;
+ public void refreshCachedState() {
+ try {
+ cachedState = serviceProxy.getServiceStatus().getState();
+ } catch (IOException e) {
+ LOG.warn("Failed to connect to {}. Setting cached state to Standby",
+ address, e);
+ cachedState = HAServiceState.STANDBY;
+ }
}
public HAServiceState getCachedState() {
return cachedState;
}
+ @VisibleForTesting
+ public void setServiceProxyForTesting(HAServiceProtocol proxy) {
+ this.serviceProxy = proxy;
+ }
}
@Override
@@ -153,8 +177,8 @@ public abstract class AbstractNNFailoverProxyProvider<T> implements
pi.proxy = factory.createProxy(conf,
pi.getAddress(), xface, ugi, false, getFallbackToSimpleAuth());
} catch (IOException ioe) {
- LOG.error("{} Failed to create RPC proxy to NameNode",
- this.getClass().getSimpleName(), ioe);
+ LOG.error("{} Failed to create RPC proxy to NameNode at {}",
+ this.getClass().getSimpleName(), pi.address, ioe);
throw new RuntimeException(ioe);
}
}
@@ -178,7 +202,7 @@ public abstract class AbstractNNFailoverProxyProvider<T> implements
Collection<InetSocketAddress> addressesOfNns = addressesInNN.values();
for (InetSocketAddress address : addressesOfNns) {
- proxies.add(new NNProxyInfo<T>(address));
+ proxies.add(new NNProxyInfo<T>(address, conf));
}
// Randomize the list to prevent all clients pointing to the same one
boolean randomized = getRandomOrder(conf, uri);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/741547e1/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/IPFailoverProxyProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/IPFailoverProxyProvider.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/IPFailoverProxyProvider.java
index e703740..8062e79 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/IPFailoverProxyProvider.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/IPFailoverProxyProvider.java
@@ -48,7 +48,7 @@ public class IPFailoverProxyProvider<T> extends
public IPFailoverProxyProvider(Configuration conf, URI uri,
Class<T> xface, HAProxyFactory<T> factory) {
super(conf, uri, xface, factory);
- this.nnProxyInfo = new NNProxyInfo<T>(DFSUtilClient.getNNAddress(uri));
+ this.nnProxyInfo = new NNProxyInfo<>(DFSUtilClient.getNNAddress(uri), conf);
}
@Override
http://git-wip-us.apache.org/repos/asf/hadoop/blob/741547e1/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java
index e819282..690ee0b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java
@@ -27,12 +27,10 @@ import java.net.URI;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
import org.apache.hadoop.hdfs.ClientGSIContext;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
-import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.io.retry.AtMostOnce;
import org.apache.hadoop.io.retry.Idempotent;
import org.apache.hadoop.io.retry.RetryPolicies;
@@ -40,8 +38,6 @@ import org.apache.hadoop.io.retry.RetryPolicy;
import org.apache.hadoop.io.retry.RetryPolicy.RetryAction;
import org.apache.hadoop.ipc.AlignmentContext;
import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.ipc.RemoteException;
-import org.apache.hadoop.ipc.StandbyException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -181,49 +177,6 @@ public class ObserverReadProxyProvider<T extends ClientProtocol>
return lastProxy;
}
- private static <T extends ClientProtocol> HAServiceState getServiceState(
- NNProxyInfo<T> pi) {
- // TODO: should introduce new ClientProtocol method to verify the
- // underlying service state, which does not require superuser access
- // The is a workaround
- IOException ioe = null;
- try {
- // Verify write access first
- pi.proxy.reportBadBlocks(new LocatedBlock[0]);
- return HAServiceState.ACTIVE; // Only active NameNode allows write
- } catch (RemoteException re) {
- IOException sbe = re.unwrapRemoteException(StandbyException.class);
- if (!(sbe instanceof StandbyException)) {
- ioe = re;
- }
- } catch (IOException e) {
- ioe = e;
- }
- if (ioe != null) {
- LOG.warn("Failed to connect to {}", pi.getAddress(), ioe);
- return HAServiceState.STANDBY; // Just assume standby in this case
- // Anything besides observer is fine
- }
- // Verify read access
- // For now we assume only Observer nodes allow reads
- // Stale reads on StandbyNode should be turned off
- try {
- pi.proxy.checkAccess("/", FsAction.READ);
- return HAServiceState.OBSERVER;
- } catch (RemoteException re) {
- IOException sbe = re.unwrapRemoteException(StandbyException.class);
- if (!(sbe instanceof StandbyException)) {
- ioe = re;
- }
- } catch (IOException e) {
- ioe = e;
- }
- if (ioe != null) {
- LOG.warn("Failed to connect to {}", pi.getAddress(), ioe);
- }
- return HAServiceState.STANDBY;
- }
-
/**
* Return the currently used proxy. If there is none, first calls
* {@link #changeProxy(NNProxyInfo)} to initialize one.
@@ -254,7 +207,7 @@ public class ObserverReadProxyProvider<T extends ClientProtocol>
currentProxy = null;
currentIndex = (currentIndex + 1) % nameNodeProxies.size();
currentProxy = createProxyIfNeeded(nameNodeProxies.get(currentIndex));
- currentProxy.setCachedState(getServiceState(currentProxy));
+ currentProxy.refreshCachedState();
LOG.debug("Changed current proxy from {} to {}",
initial == null ? "none" : initial.proxyInfo,
currentProxy.proxyInfo);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/741547e1/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestObserverReadProxyProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestObserverReadProxyProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestObserverReadProxyProvider.java
index 4d5bc13..3f56c96 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestObserverReadProxyProvider.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestObserverReadProxyProvider.java
@@ -22,10 +22,13 @@ import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.URI;
import java.util.HashMap;
+import java.util.List;
import java.util.Map;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.ha.HAServiceProtocol;
+import org.apache.hadoop.ha.HAServiceStatus;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@@ -38,10 +41,12 @@ import org.mockito.Mockito;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
+import static org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
+
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
import static org.mockito.Mockito.mock;
-
+import static org.mockito.Mockito.when;
/**
* Tests for {@link ObserverReadProxyProvider} under various configurations of
@@ -56,7 +61,7 @@ public class TestObserverReadProxyProvider {
private Configuration conf;
private ObserverReadProxyProvider<ClientProtocol> proxyProvider;
- private ClientProtocolAnswer[] namenodeAnswers;
+ private NameNodeAnswer[] namenodeAnswers;
private String[] namenodeAddrs;
@Before
@@ -70,32 +75,53 @@ public class TestObserverReadProxyProvider {
private void setupProxyProvider(int namenodeCount) throws Exception {
String[] namenodeIDs = new String[namenodeCount];
namenodeAddrs = new String[namenodeCount];
- namenodeAnswers = new ClientProtocolAnswer[namenodeCount];
+ namenodeAnswers = new NameNodeAnswer[namenodeCount];
ClientProtocol[] proxies = new ClientProtocol[namenodeCount];
Map<String, ClientProtocol> proxyMap = new HashMap<>();
+ HAServiceProtocol[] serviceProxies = new HAServiceProtocol[namenodeCount];
+ Map<String, HAServiceProtocol> serviceProxyMap = new HashMap<>();
for (int i = 0; i < namenodeCount; i++) {
namenodeIDs[i] = "nn" + i;
namenodeAddrs[i] = "namenode" + i + ".test:8020";
conf.set(HdfsClientConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY + "." + ns +
"." + namenodeIDs[i], namenodeAddrs[i]);
- namenodeAnswers[i] = new ClientProtocolAnswer();
+ namenodeAnswers[i] = new NameNodeAnswer();
proxies[i] = mock(ClientProtocol.class);
- doWrite(Mockito.doAnswer(namenodeAnswers[i]).when(proxies[i]));
- doRead(Mockito.doAnswer(namenodeAnswers[i]).when(proxies[i]));
+ doWrite(Mockito.doAnswer(namenodeAnswers[i].clientAnswer)
+ .when(proxies[i]));
+ doRead(Mockito.doAnswer(namenodeAnswers[i].clientAnswer)
+ .when(proxies[i]));
+ serviceProxies[i] = mock(HAServiceProtocol.class);
+ Mockito.doAnswer(namenodeAnswers[i].serviceAnswer)
+ .when(serviceProxies[i]).getServiceStatus();
proxyMap.put(namenodeAddrs[i], proxies[i]);
+ serviceProxyMap.put(namenodeAddrs[i], serviceProxies[i]);
}
conf.set(HdfsClientConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX + "." + ns,
Joiner.on(",").join(namenodeIDs));
- proxyProvider = new ObserverReadProxyProvider<>(conf, nnURI,
- ClientProtocol.class, new ClientHAProxyFactory<ClientProtocol>() {
+ proxyProvider = new ObserverReadProxyProvider<ClientProtocol>(conf, nnURI,
+ ClientProtocol.class,
+ new ClientHAProxyFactory<ClientProtocol>() {
+ @Override
+ public ClientProtocol createProxy(Configuration config,
+ InetSocketAddress nnAddr, Class<ClientProtocol> xface,
+ UserGroupInformation ugi, boolean withRetries,
+ AtomicBoolean fallbackToSimpleAuth) {
+ return proxyMap.get(nnAddr.toString());
+ }
+ }) {
@Override
- public ClientProtocol createProxy(Configuration conf,
- InetSocketAddress nnAddr, Class<ClientProtocol> xface,
- UserGroupInformation ugi, boolean withRetries,
- AtomicBoolean fallbackToSimpleAuth) {
- return proxyMap.get(nnAddr.toString());
+ protected List<NNProxyInfo<ClientProtocol>> getProxyAddresses(
+ URI uri, String addressKey) {
+ List<NNProxyInfo<ClientProtocol>> nnProxies =
+ super.getProxyAddresses(uri, addressKey);
+ for (NNProxyInfo<ClientProtocol> nnProxy : nnProxies) {
+ String addressStr = nnProxy.getAddress().toString();
+ nnProxy.setServiceProxyForTesting(serviceProxyMap.get(addressStr));
+ }
+ return nnProxies;
}
- });
+ };
proxyProvider.setObserverReadEnabled(true);
}
@@ -275,39 +301,62 @@ public class TestObserverReadProxyProvider {
}
/**
- * An {@link Answer} used for mocking of a {@link ClientProtocol}. Setting
- * the state or unreachability of this Answer will make the linked
- * ClientProtocol respond as if it was communicating with a NameNode of
- * the corresponding state. It is in Standby state by default.
+ * An {@link Answer} used for mocking of {@link ClientProtocol} and
+ * {@link HAServiceProtocol}. Setting the state or unreachability of this
+ * Answer will make the linked ClientProtocol respond as if it was
+ * communicating with a NameNode of the corresponding state. It is in Standby
+ * state by default.
*/
- private static class ClientProtocolAnswer implements Answer<Void> {
+ private static class NameNodeAnswer {
private volatile boolean unreachable = false;
// Standby state by default
private volatile boolean allowWrites = false;
private volatile boolean allowReads = false;
- @Override
- public Void answer(InvocationOnMock invocationOnMock) throws Throwable {
- if (unreachable) {
- throw new IOException("Unavailable");
+ private ClientProtocolAnswer clientAnswer = new ClientProtocolAnswer();
+ private HAServiceProtocolAnswer serviceAnswer =
+ new HAServiceProtocolAnswer();
+
+ private class HAServiceProtocolAnswer implements Answer<HAServiceStatus> {
+ @Override
+ public HAServiceStatus answer(InvocationOnMock invocation)
+ throws Throwable {
+ HAServiceStatus status = mock(HAServiceStatus.class);
+ if (allowReads && allowWrites) {
+ when(status.getState()).thenReturn(HAServiceState.ACTIVE);
+ } else if (allowReads) {
+ when(status.getState()).thenReturn(HAServiceState.OBSERVER);
+ } else {
+ when(status.getState()).thenReturn(HAServiceState.STANDBY);
+ }
+ return status;
}
- switch (invocationOnMock.getMethod().getName()) {
+ }
+
+ private class ClientProtocolAnswer implements Answer<Void> {
+ @Override
+ public Void answer(InvocationOnMock invocationOnMock) throws Throwable {
+ if (unreachable) {
+ throw new IOException("Unavailable");
+ }
+ switch (invocationOnMock.getMethod().getName()) {
case "reportBadBlocks":
if (!allowWrites) {
- throw new RemoteException(StandbyException.class.getCanonicalName(),
- "No writes!");
+ throw new RemoteException(
+ StandbyException.class.getCanonicalName(), "No writes!");
}
return null;
case "checkAccess":
if (!allowReads) {
- throw new RemoteException(StandbyException.class.getCanonicalName(),
- "No reads!");
+ throw new RemoteException(
+ StandbyException.class.getCanonicalName(), "No reads!");
}
return null;
default:
throw new IllegalArgumentException(
"Only reportBadBlocks and checkAccess supported!");
+ }
}
}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[06/18] hadoop git commit: YARN-8801. Fixed header comments for
docker utility functions. Contributed by Zian Chen
Posted by sh...@apache.org.
YARN-8801. Fixed header comments for docker utility functions.
Contributed by Zian Chen
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/aa4bd493
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/aa4bd493
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/aa4bd493
Branch: refs/heads/HDFS-12943
Commit: aa4bd493c309f09f8f2ea7449aa33c8b641fb8d2
Parents: 429a07e
Author: Eric Yang <ey...@apache.org>
Authored: Thu Sep 20 13:08:59 2018 -0400
Committer: Eric Yang <ey...@apache.org>
Committed: Thu Sep 20 13:08:59 2018 -0400
----------------------------------------------------------------------
.../container-executor/impl/utils/docker-util.h | 30 +++++++-------------
1 file changed, 10 insertions(+), 20 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/aa4bd493/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.h
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.h b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.h
index 278dc53..7b7322d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.h
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.h
@@ -81,8 +81,7 @@ char *get_docker_binary(const struct configuration *conf);
* Get the Docker command line string. The function will inspect the params file to determine the command to be run.
* @param command_file File containing the params for the Docker command
* @param conf Configuration struct containing the container-executor.cfg details
- * @param out Buffer to fill with the Docker command
- * @param outlen Size of the output buffer
+ * @param args Buffer to construct argv
* @return Return code with 0 indicating success and non-zero codes indicating error
*/
int get_docker_command(const char* command_file, const struct configuration* conf, args *args);
@@ -98,8 +97,7 @@ int get_use_entry_point_flag();
* inspect command.
* @param command_file File containing the params for the Docker inspect command
* @param conf Configuration struct containing the container-executor.cfg details
- * @param out Buffer to fill with the inspect command
- * @param outlen Size of the output buffer
+ * @param args Buffer to construct argv
* @return Return code with 0 indicating success and non-zero codes indicating error
*/
int get_docker_inspect_command(const char* command_file, const struct configuration* conf, args *args);
@@ -108,8 +106,7 @@ int get_docker_inspect_command(const char* command_file, const struct configurat
* Get the Docker load command line string. The function will verify that the params file is meant for the load command.
* @param command_file File containing the params for the Docker load command
* @param conf Configuration struct containing the container-executor.cfg details
- * @param out Buffer to fill with the load command
- * @param outlen Size of the output buffer
+ * @param args Buffer to construct argv
* @return Return code with 0 indicating success and non-zero codes indicating error
*/
int get_docker_load_command(const char* command_file, const struct configuration* conf, args *args);
@@ -118,8 +115,7 @@ int get_docker_load_command(const char* command_file, const struct configuration
* Get the Docker pull command line string. The function will verify that the params file is meant for the pull command.
* @param command_file File containing the params for the Docker pull command
* @param conf Configuration struct containing the container-executor.cfg details
- * @param out Buffer to fill with the pull command
- * @param outlen Size of the output buffer
+ * @param args Buffer to construct argv
* @return Return code with 0 indicating success and non-zero codes indicating error
*/
int get_docker_pull_command(const char* command_file, const struct configuration* conf, args *args);
@@ -128,8 +124,7 @@ int get_docker_pull_command(const char* command_file, const struct configuration
* Get the Docker rm command line string. The function will verify that the params file is meant for the rm command.
* @param command_file File containing the params for the Docker rm command
* @param conf Configuration struct containing the container-executor.cfg details
- * @param out Buffer to fill with the rm command
- * @param outlen Size of the output buffer
+ * @param args Buffer to construct argv
* @return Return code with 0 indicating success and non-zero codes indicating error
*/
int get_docker_rm_command(const char* command_file, const struct configuration* conf, args *args);
@@ -138,8 +133,7 @@ int get_docker_rm_command(const char* command_file, const struct configuration*
* Get the Docker run command line string. The function will verify that the params file is meant for the run command.
* @param command_file File containing the params for the Docker run command
* @param conf Configuration struct containing the container-executor.cfg details
- * @param out Buffer to fill with the run command
- * @param outlen Size of the output buffer
+ * @param args Buffer to construct argv
* @return Return code with 0 indicating success and non-zero codes indicating error
*/
int get_docker_run_command(const char* command_file, const struct configuration* conf, args *args);
@@ -148,8 +142,7 @@ int get_docker_run_command(const char* command_file, const struct configuration*
* Get the Docker stop command line string. The function will verify that the params file is meant for the stop command.
* @param command_file File containing the params for the Docker stop command
* @param conf Configuration struct containing the container-executor.cfg details
- * @param out Buffer to fill with the stop command
- * @param outlen Size of the output buffer
+ * @param args Buffer to construct argv
* @return Return code with 0 indicating success and non-zero codes indicating error
*/
int get_docker_stop_command(const char* command_file, const struct configuration* conf, args *args);
@@ -158,8 +151,7 @@ int get_docker_stop_command(const char* command_file, const struct configuration
* Get the Docker kill command line string. The function will verify that the params file is meant for the kill command.
* @param command_file File containing the params for the Docker kill command
* @param conf Configuration struct containing the container-executor.cfg details
- * @param out Buffer to fill with the kill command
- * @param outlen Size of the output buffer
+ * @param args Buffer to construct argv
* @return Return code with 0 indicating success and non-zero codes indicating error
*/
int get_docker_kill_command(const char* command_file, const struct configuration* conf, args *args);
@@ -169,8 +161,7 @@ int get_docker_kill_command(const char* command_file, const struct configuration
* params file is meant for the volume command.
* @param command_file File containing the params for the Docker volume command
* @param conf Configuration struct containing the container-executor.cfg details
- * @param out Buffer to fill with the volume command
- * @param outlen Size of the output buffer
+ * @param args Buffer to construct argv
* @return Return code with 0 indicating success and non-zero codes indicating error
*/
int get_docker_volume_command(const char *command_file, const struct configuration *conf, args *args);
@@ -179,8 +170,7 @@ int get_docker_volume_command(const char *command_file, const struct configurati
* Get the Docker start command line string. The function will verify that the params file is meant for the start command.
* @param command_file File containing the params for the Docker start command
* @param conf Configuration struct containing the container-executor.cfg details
- * @param out Buffer to fill with the start command
- * @param outlen Size of the output buffer
+ * @param args Buffer to construct argv
* @return Return code with 0 indicating success and non-zero codes indicating error
*/
int get_docker_start_command(const char* command_file, const struct configuration* conf, args *args);
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[10/18] hadoop git commit: HDDS-514. Clean Unregister JMX upon
SCMConnectionManager#close. Contributed by Xiaoyu Yao.
Posted by sh...@apache.org.
HDDS-514. Clean Unregister JMX upon SCMConnectionManager#close.
Contributed by Xiaoyu Yao.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/524f7cd3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/524f7cd3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/524f7cd3
Branch: refs/heads/HDFS-12943
Commit: 524f7cd354e0683c9ec61fdbce344ef79b841728
Parents: 096a716
Author: Anu Engineer <ae...@apache.org>
Authored: Thu Sep 20 12:21:34 2018 -0700
Committer: Anu Engineer <ae...@apache.org>
Committed: Thu Sep 20 12:21:34 2018 -0700
----------------------------------------------------------------------
.../container/common/statemachine/SCMConnectionManager.java | 7 +++++--
1 file changed, 5 insertions(+), 2 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/524f7cd3/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/SCMConnectionManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/SCMConnectionManager.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/SCMConnectionManager.java
index 85fb580..775a91a 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/SCMConnectionManager.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/SCMConnectionManager.java
@@ -59,7 +59,7 @@ public class SCMConnectionManager
private final int rpcTimeout;
private final Configuration conf;
- private final ObjectName jmxBean;
+ private ObjectName jmxBean;
public SCMConnectionManager(Configuration conf) {
this.mapLock = new ReentrantReadWriteLock();
@@ -191,7 +191,10 @@ public class SCMConnectionManager
public void close() throws IOException {
getValues().forEach(endpointStateMachine
-> IOUtils.cleanupWithLogger(LOG, endpointStateMachine));
- MBeans.unregister(jmxBean);
+ if (jmxBean != null) {
+ MBeans.unregister(jmxBean);
+ jmxBean = null;
+ }
}
@Override
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[15/18] hadoop git commit: Merge commit
'eca1a4bfe952fc184fe90dde50bac9b0e5293568' into HDFS-12943
Posted by sh...@apache.org.
Merge commit 'eca1a4bfe952fc184fe90dde50bac9b0e5293568' into HDFS-12943
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6c37db9d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6c37db9d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6c37db9d
Branch: refs/heads/HDFS-12943
Commit: 6c37db9da36a4fe716f616977b1d382ffa8a027a
Parents: 4b0ff03 eca1a4b
Author: Konstantin V Shvachko <sh...@apache.org>
Authored: Fri Sep 21 18:22:48 2018 -0700
Committer: Konstantin V Shvachko <sh...@apache.org>
Committed: Fri Sep 21 18:22:48 2018 -0700
----------------------------------------------------------------------
.../java/org/apache/hadoop/hdfs/DFSClient.java | 4 +--
.../org/apache/hadoop/hdfs/DFSUtilClient.java | 2 +-
.../java/org/apache/hadoop/hdfs/PeerCache.java | 8 ++---
.../hdfs/client/impl/BlockReaderFactory.java | 12 +++----
.../client/impl/BlockReaderLocalLegacy.java | 2 +-
.../hdfs/shortcircuit/ShortCircuitCache.java | 4 +--
.../hdfs/shortcircuit/ShortCircuitReplica.java | 2 +-
.../apache/hadoop/hdfs/util/IOUtilsClient.java | 3 +-
.../hadoop/hdfs/nfs/nfs3/OpenFileCtx.java | 4 +--
.../hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java | 6 ++--
.../java/org/apache/hadoop/hdfs/DFSUtil.java | 7 +++--
.../org/apache/hadoop/hdfs/HdfsDtFetcher.java | 7 +++--
.../org/apache/hadoop/hdfs/NameNodeProxies.java | 7 +++--
.../apache/hadoop/hdfs/SWebHdfsDtFetcher.java | 7 +++--
.../apache/hadoop/hdfs/WebHdfsDtFetcher.java | 7 +++--
.../hadoop/hdfs/net/DomainPeerServer.java | 6 ++--
.../apache/hadoop/hdfs/net/TcpPeerServer.java | 6 ++--
.../hdfs/qjournal/client/AsyncLoggerSet.java | 8 ++---
.../qjournal/client/QuorumJournalManager.java | 6 ++--
.../qjournal/server/GetJournalEditServlet.java | 7 +++--
.../hadoop/hdfs/qjournal/server/Journal.java | 12 +++----
.../hdfs/qjournal/server/JournalNode.java | 10 +++---
.../qjournal/server/JournalNodeRpcServer.java | 4 +--
.../token/block/BlockTokenSecretManager.java | 7 +++--
.../DelegationTokenSecretManager.java | 8 ++---
.../hadoop/hdfs/server/balancer/Balancer.java | 8 ++---
.../hadoop/hdfs/server/balancer/Dispatcher.java | 6 ++--
.../hdfs/server/balancer/NameNodeConnector.java | 7 +++--
.../AvailableSpaceBlockPlacementPolicy.java | 8 ++---
.../server/blockmanagement/DatanodeManager.java | 6 ++--
.../server/blockmanagement/HostFileManager.java | 7 +++--
.../hadoop/hdfs/server/common/JspHelper.java | 6 ++--
.../hdfs/server/common/MetricsLoggerTask.java | 6 ++--
.../apache/hadoop/hdfs/server/common/Util.java | 7 +++--
.../hdfs/server/datanode/DirectoryScanner.java | 7 +++--
.../server/datanode/ProfilingFileIoEvents.java | 7 +++--
.../server/datanode/ShortCircuitRegistry.java | 7 +++--
.../AvailableSpaceVolumeChoosingPolicy.java | 7 +++--
.../RoundRobinVolumeChoosingPolicy.java | 7 +++--
.../datanode/fsdataset/impl/BlockPoolSlice.java | 8 ++---
.../impl/FsDatasetAsyncDiskService.java | 7 +++--
.../impl/RamDiskAsyncLazyPersistService.java | 7 +++--
.../fsdataset/impl/RamDiskReplicaTracker.java | 7 +++--
.../server/datanode/web/DatanodeHttpServer.java | 6 ++--
.../web/RestCsrfPreventionFilterHandler.java | 4 +--
.../datanode/web/SimpleHttpProxyHandler.java | 4 +--
.../web/webhdfs/DataNodeUGIProvider.java | 6 ++--
.../datanode/web/webhdfs/ExceptionHandler.java | 4 +--
.../server/datanode/web/webhdfs/HdfsWriter.java | 8 ++---
.../datanode/web/webhdfs/WebHdfsHandler.java | 10 +++---
.../apache/hadoop/hdfs/server/mover/Mover.java | 12 +++----
.../hadoop/hdfs/server/namenode/CachePool.java | 2 --
.../hdfs/server/namenode/CheckpointConf.java | 7 +++--
.../hdfs/server/namenode/Checkpointer.java | 8 ++---
.../ContentSummaryComputationContext.java | 8 ++---
.../hadoop/hdfs/server/namenode/DfsServlet.java | 7 +++--
.../namenode/EditLogBackupOutputStream.java | 7 +++--
.../server/namenode/EditLogFileInputStream.java | 8 ++---
.../namenode/EditLogFileOutputStream.java | 11 ++++---
.../hdfs/server/namenode/EditsDoubleBuffer.java | 7 +++--
.../hdfs/server/namenode/FSEditLogAsync.java | 8 ++---
.../hdfs/server/namenode/FSEditLogLoader.java | 7 +++--
.../hadoop/hdfs/server/namenode/FSImage.java | 9 +++---
.../hdfs/server/namenode/FSImageFormat.java | 6 ++--
.../server/namenode/FSImageFormatPBINode.java | 7 +++--
...FSImagePreTransactionalStorageInspector.java | 10 +++---
.../FSImageTransactionalStorageInspector.java | 6 ++--
.../hdfs/server/namenode/FSNamesystem.java | 2 +-
.../server/namenode/FSPermissionChecker.java | 6 ++--
.../server/namenode/FileJournalManager.java | 7 +++--
.../hadoop/hdfs/server/namenode/INode.java | 6 ++--
.../hdfs/server/namenode/INodesInPath.java | 6 ++--
.../hdfs/server/namenode/ImageServlet.java | 6 ++--
.../hadoop/hdfs/server/namenode/JournalSet.java | 8 ++---
.../server/namenode/MetaRecoveryContext.java | 7 +++--
.../namenode/NNStorageRetentionManager.java | 6 ++--
.../hdfs/server/namenode/NNUpgradeUtil.java | 7 +++--
.../hadoop/hdfs/server/namenode/NameCache.java | 6 ++--
.../namenode/NameNodeResourceChecker.java | 7 +++--
.../hdfs/server/namenode/NamenodeFsck.java | 9 +++---
.../namenode/RedundantEditLogInputStream.java | 8 ++---
.../hdfs/server/namenode/SecondaryNameNode.java | 16 +++++-----
.../server/namenode/StartupProgressServlet.java | 2 +-
.../hdfs/server/namenode/TransferFsImage.java | 7 +++--
.../server/namenode/ha/BootstrapStandby.java | 13 ++++----
.../hdfs/server/namenode/ha/EditLogTailer.java | 8 ++---
.../web/resources/NamenodeWebHdfsMethods.java | 7 +++--
.../org/apache/hadoop/hdfs/tools/DFSAdmin.java | 6 ++--
.../apache/hadoop/hdfs/tools/DFSHAAdmin.java | 6 ++--
.../hdfs/tools/DFSZKFailoverController.java | 12 +++----
.../hdfs/tools/DelegationTokenFetcher.java | 8 ++---
.../org/apache/hadoop/hdfs/tools/GetGroups.java | 6 ++--
.../OfflineEditsBinaryLoader.java | 10 +++---
.../offlineImageViewer/FSImageHandler.java | 7 +++--
.../tools/offlineImageViewer/FSImageLoader.java | 7 +++--
.../OfflineImageReconstructor.java | 10 +++---
.../offlineImageViewer/OfflineImageViewer.java | 9 +++---
.../OfflineImageViewerPB.java | 7 +++--
.../offlineImageViewer/WebImageViewer.java | 7 +++--
.../hdfs/util/AtomicFileOutputStream.java | 6 ++--
.../hadoop/hdfs/util/LightWeightHashSet.java | 7 +++--
.../apache/hadoop/hdfs/util/MD5FileUtils.java | 8 ++---
.../hadoop/hdfs/util/PersistentLongFile.java | 8 ++---
.../hdfs/web/resources/ExceptionHandler.java | 7 +++--
.../apache/hadoop/cli/TestCacheAdminCLI.java | 7 +++--
.../hadoop/fs/TestEnhancedByteBufferAccess.java | 10 +++---
.../hadoop/fs/TestHdfsNativeCodeLoader.java | 7 +++--
.../java/org/apache/hadoop/fs/TestUnbuffer.java | 8 ++---
.../apache/hadoop/hdfs/AdminStatesBaseTest.java | 7 +++--
.../org/apache/hadoop/hdfs/AppendTestUtil.java | 6 ++--
.../apache/hadoop/hdfs/BenchmarkThroughput.java | 13 +++-----
.../org/apache/hadoop/hdfs/DFSTestUtil.java | 6 ++--
.../org/apache/hadoop/hdfs/FileAppendTest4.java | 7 +++--
.../org/apache/hadoop/hdfs/MiniDFSCluster.java | 9 +++---
.../hdfs/MiniDFSClusterWithNodeGroup.java | 7 +++--
.../hdfs/ReadStripedFileWithDecodingHelper.java | 5 +--
.../apache/hadoop/hdfs/TestAbandonBlock.java | 7 +++--
.../apache/hadoop/hdfs/TestAclsEndToEnd.java | 8 ++---
.../hadoop/hdfs/TestAppendSnapshotTruncate.java | 7 +++--
.../hadoop/hdfs/TestBalancerBandwidth.java | 7 +++--
.../hadoop/hdfs/TestBlockMissingException.java | 7 +++--
.../hadoop/hdfs/TestClientReportBadBlock.java | 8 ++---
.../org/apache/hadoop/hdfs/TestConnCache.java | 6 ++--
.../hadoop/hdfs/TestDFSClientFailover.java | 2 --
.../hadoop/hdfs/TestDFSClientRetries.java | 10 +++---
.../org/apache/hadoop/hdfs/TestDFSFinalize.java | 6 ++--
.../hdfs/TestDFSInotifyEventInputStream.java | 6 ++--
.../apache/hadoop/hdfs/TestDFSPermission.java | 7 +++--
.../org/apache/hadoop/hdfs/TestDFSRollback.java | 6 ++--
.../org/apache/hadoop/hdfs/TestDFSShell.java | 6 ++--
.../hadoop/hdfs/TestDFSStartupVersions.java | 6 ++--
.../hdfs/TestDFSStorageStateRecovery.java | 6 ++--
.../hadoop/hdfs/TestDFSStripedInputStream.java | 8 ++---
...DFSStripedInputStreamWithRandomECPolicy.java | 8 ++---
.../hadoop/hdfs/TestDFSStripedOutputStream.java | 6 ++--
...tputStreamWithFailureWithRandomECPolicy.java | 8 ++---
...FSStripedOutputStreamWithRandomECPolicy.java | 8 ++---
.../org/apache/hadoop/hdfs/TestDFSUpgrade.java | 7 +++--
.../hadoop/hdfs/TestDFSUpgradeFromImage.java | 7 ++---
.../org/apache/hadoop/hdfs/TestDataStream.java | 6 ++--
.../hadoop/hdfs/TestDataTransferProtocol.java | 6 ++--
.../hadoop/hdfs/TestDatanodeRegistration.java | 7 +++--
.../apache/hadoop/hdfs/TestDatanodeReport.java | 6 ++--
.../hadoop/hdfs/TestDisableConnCache.java | 6 ++--
.../hadoop/hdfs/TestEncryptedTransfer.java | 17 +++++-----
.../hadoop/hdfs/TestExternalBlockReader.java | 8 ++---
.../apache/hadoop/hdfs/TestHDFSServerPorts.java | 7 +++--
.../org/apache/hadoop/hdfs/TestHDFSTrash.java | 6 ++--
.../hdfs/TestInjectionForSimulatedStorage.java | 6 ++--
.../org/apache/hadoop/hdfs/TestLargeBlock.java | 7 +++--
.../java/org/apache/hadoop/hdfs/TestLease.java | 6 ++--
.../apache/hadoop/hdfs/TestLeaseRecovery2.java | 7 +++--
.../hadoop/hdfs/TestMissingBlocksAlert.java | 8 ++---
.../hadoop/hdfs/TestParallelReadUtil.java | 6 ++--
.../org/apache/hadoop/hdfs/TestPipelines.java | 6 ++--
.../TestReadStripedFileWithMissingBlocks.java | 8 ++---
.../hadoop/hdfs/TestReconstructStripedFile.java | 7 +++--
.../TestReplaceDatanodeFailureReplication.java | 8 ++---
.../hdfs/TestReplaceDatanodeOnFailure.java | 7 +++--
.../org/apache/hadoop/hdfs/TestReplication.java | 2 --
.../apache/hadoop/hdfs/TestRollingUpgrade.java | 7 +++--
.../org/apache/hadoop/hdfs/TestSafeMode.java | 6 ++--
.../TestUnsetAndChangeDirectoryEcPolicy.java | 8 ++---
.../org/apache/hadoop/hdfs/TestWriteRead.java | 8 ++---
.../hadoop/hdfs/TestWriteReadStripedFile.java | 19 ++++++-----
.../hdfs/TestWriteStripedFileWithFailure.java | 8 ++---
.../hadoop/hdfs/net/TestDFSNetworkTopology.java | 8 ++---
.../hadoop/hdfs/protocol/TestLocatedBlock.java | 9 +++---
.../datatransfer/sasl/TestSaslDataTransfer.java | 4 +--
.../hdfs/qjournal/MiniJournalCluster.java | 7 +++--
.../hadoop/hdfs/qjournal/MiniQJMHACluster.java | 7 +++--
.../hdfs/qjournal/TestSecureNNWithQJM.java | 2 --
.../qjournal/client/TestEpochsAreUnique.java | 7 +++--
.../hdfs/qjournal/client/TestQJMWithFaults.java | 6 ++--
.../client/TestQuorumJournalManager.java | 16 +++++-----
.../client/TestQuorumJournalManagerUnit.java | 4 +--
.../TestJournalNodeRespectsBindHostKeys.java | 6 ++--
.../hdfs/security/TestDelegationToken.java | 11 ++++---
.../TestDelegationTokenForProxyUser.java | 7 +++--
.../security/token/block/TestBlockToken.java | 7 +++--
.../hdfs/server/balancer/TestBalancer.java | 12 +++----
.../TestBalancerWithMultipleNameNodes.java | 12 +++----
.../balancer/TestBalancerWithNodeGroup.java | 6 ++--
.../BaseReplicationPolicyTest.java | 3 +-
.../server/blockmanagement/TestBlockInfo.java | 8 ++---
.../blockmanagement/TestBlockManager.java | 12 +++----
.../TestBlockReportRateLimiting.java | 7 +++--
.../TestBlocksWithNotEnoughRacks.java | 7 +++--
.../blockmanagement/TestCachedBlocksList.java | 7 +++--
.../blockmanagement/TestCorruptReplicaInfo.java | 6 ++--
.../blockmanagement/TestDatanodeManager.java | 7 +++--
.../TestNameNodePrunesMissingStorages.java | 7 +++--
.../TestRBWBlockInvalidation.java | 9 +++---
.../TestSequentialBlockGroupId.java | 8 ++---
.../blockmanagement/TestSequentialBlockId.java | 7 +++--
.../server/common/TestGetUriFromString.java | 7 +++--
.../hdfs/server/datanode/DataNodeTestUtils.java | 8 ++---
.../server/datanode/TestBPOfferService.java | 6 ++--
.../hdfs/server/datanode/TestBatchIbr.java | 11 ++++---
.../TestBlockHasMultipleReplicasOnSameDN.java | 7 +++--
.../server/datanode/TestBlockPoolManager.java | 7 +++--
.../hdfs/server/datanode/TestBlockRecovery.java | 13 ++++----
.../server/datanode/TestBlockReplacement.java | 6 ++--
.../datanode/TestBpServiceActorScheduler.java | 7 +++--
.../TestDataNodeErasureCodingMetrics.java | 8 ++---
.../datanode/TestDataNodeFaultInjector.java | 8 ++---
.../datanode/TestDataNodeHotSwapVolumes.java | 8 ++---
.../datanode/TestDataNodeInitStorage.java | 7 +++--
.../server/datanode/TestDataNodeMXBean.java | 7 +++--
.../server/datanode/TestDataNodeMetrics.java | 9 +++---
.../datanode/TestDataNodeMetricsLogger.java | 6 ++--
.../TestDataNodeMultipleRegistrations.java | 8 ++---
.../datanode/TestDataNodeReconfiguration.java | 7 +++--
.../datanode/TestDataNodeRollingUpgrade.java | 7 +++--
.../server/datanode/TestDataNodeTcpNoDelay.java | 8 ++---
.../TestDataNodeVolumeFailureReporting.java | 14 ++++-----
.../datanode/TestDataNodeVolumeMetrics.java | 8 ++---
.../TestDatanodeProtocolRetryPolicy.java | 10 +++---
.../server/datanode/TestDatanodeRegister.java | 7 +++--
.../server/datanode/TestDirectoryScanner.java | 9 +++---
...TestDnRespectsBlockReportSplitThreshold.java | 7 +++--
.../server/datanode/TestFsDatasetCache.java | 14 ++++-----
.../datanode/TestIncrementalBlockReports.java | 7 +++--
.../datanode/TestIncrementalBrVariations.java | 21 +++++++------
.../datanode/TestReadOnlySharedStorage.java | 7 +++--
.../hdfs/server/datanode/TestStorageReport.java | 7 +++--
.../hdfs/server/datanode/TestTransferRbw.java | 7 +++--
.../fsdataset/impl/FsDatasetImplTestUtils.java | 8 ++---
.../fsdataset/impl/LazyPersistTestCase.java | 7 +++--
.../fsdataset/impl/TestSpaceReservation.java | 6 ++--
.../TestDiskBalancerWithMockMover.java | 7 +++--
.../hdfs/server/mover/TestStorageMover.java | 22 ++++++-------
.../hdfs/server/namenode/FSImageTestUtil.java | 28 +++++------------
.../server/namenode/NNThroughputBenchmark.java | 9 +++---
.../namenode/OfflineEditsViewerHelper.java | 8 ++---
.../hdfs/server/namenode/TestAddBlockRetry.java | 7 +++--
.../hdfs/server/namenode/TestAllowFormat.java | 8 ++---
.../server/namenode/TestAuditLogAtDebug.java | 6 ++--
.../hdfs/server/namenode/TestBackupNode.java | 13 ++++----
.../server/namenode/TestCacheDirectives.java | 6 ++--
.../hdfs/server/namenode/TestCheckpoint.java | 8 ++---
.../hdfs/server/namenode/TestClusterId.java | 9 +++---
.../hdfs/server/namenode/TestDeadDatanode.java | 7 +++--
.../hdfs/server/namenode/TestEditLog.java | 10 +++---
.../namenode/TestEditLogFileInputStream.java | 8 ++---
.../hdfs/server/namenode/TestEditLogRace.java | 11 ++++---
.../hdfs/server/namenode/TestFSDirAttrOp.java | 7 +++--
.../hdfs/server/namenode/TestFSDirectory.java | 7 +++--
.../server/namenode/TestFSEditLogLoader.java | 6 ++--
.../namenode/TestFSImageWithSnapshot.java | 4 +--
.../namenode/TestFavoredNodesEndToEnd.java | 8 ++---
.../server/namenode/TestFileJournalManager.java | 23 ++++++++------
.../hdfs/server/namenode/TestFileTruncate.java | 12 +++----
.../hadoop/hdfs/server/namenode/TestFsck.java | 13 ++++----
.../namenode/TestFsckWithMultipleNameNodes.java | 7 +++--
.../hdfs/server/namenode/TestHDFSConcat.java | 7 +++--
.../hdfs/server/namenode/TestHostsFiles.java | 8 ++---
.../hdfs/server/namenode/TestINodeFile.java | 8 ++---
.../namenode/TestLargeDirectoryDelete.java | 9 +++---
.../hdfs/server/namenode/TestListOpenFiles.java | 9 +++---
.../TestNNStorageRetentionFunctional.java | 6 ++--
.../server/namenode/TestNameEditsConfigs.java | 6 ++--
.../namenode/TestNameNodeMetricsLogger.java | 6 ++--
.../namenode/TestNameNodeReconfigure.java | 8 ++---
.../server/namenode/TestNameNodeRecovery.java | 9 +++---
.../TestNameNodeRespectsBindHostKeys.java | 7 +++--
.../namenode/TestNameNodeStatusMXBean.java | 6 ++--
.../namenode/TestNamenodeCapacityReport.java | 7 +++--
.../server/namenode/TestQuotaByStorageType.java | 33 ++++++++++----------
.../hdfs/server/namenode/TestSaveNamespace.java | 4 +--
.../hdfs/server/namenode/TestStartup.java | 7 ++---
.../server/namenode/TestStorageRestore.java | 8 ++---
.../server/namenode/TestStripedINodeFile.java | 6 ++--
.../hdfs/server/namenode/ha/HATestUtil.java | 6 ++--
.../namenode/ha/TestBootstrapStandby.java | 13 ++++----
.../namenode/ha/TestDFSUpgradeWithHA.java | 7 +++--
.../namenode/ha/TestDelegationTokensWithHA.java | 8 ++---
.../server/namenode/ha/TestEditLogTailer.java | 4 +--
.../namenode/ha/TestEditLogsDuringFailover.java | 10 +++---
.../namenode/ha/TestFailureToReadEdits.java | 8 ++---
.../hdfs/server/namenode/ha/TestHAFsck.java | 4 +--
.../hdfs/server/namenode/ha/TestHAMetrics.java | 11 ++++---
.../hdfs/server/namenode/ha/TestHASafeMode.java | 13 ++++----
.../namenode/ha/TestHAStateTransitions.java | 12 +++----
.../namenode/ha/TestInitializeSharedEdits.java | 7 +++--
.../namenode/ha/TestRetryCacheWithHA.java | 7 +++--
.../namenode/ha/TestSeveralNameNodes.java | 7 +++--
.../namenode/ha/TestStandbyBlockManagement.java | 6 ++--
.../namenode/ha/TestStandbyInProgressTail.java | 8 ++---
.../server/namenode/ha/TestStandbyIsHot.java | 6 ++--
.../namenode/metrics/TestNameNodeMetrics.java | 11 ++++---
...tINodeFileUnderConstructionWithSnapshot.java | 6 ++--
.../snapshot/TestOpenFilesWithSnapshot.java | 8 ++---
.../snapshot/TestRenameWithSnapshots.java | 7 +++--
.../server/namenode/snapshot/TestSnapshot.java | 4 +--
.../resources/TestWebHdfsCreatePermissions.java | 7 +++--
.../web/resources/TestWebHdfsDataLocality.java | 7 +++--
.../sps/TestExternalStoragePolicySatisfier.java | 3 +-
.../shortcircuit/TestShortCircuitCache.java | 9 +++---
.../apache/hadoop/hdfs/tools/TestDFSAdmin.java | 6 ++--
.../hadoop/hdfs/tools/TestDFSHAAdmin.java | 7 +++--
.../hdfs/tools/TestDFSHAAdminMiniCluster.java | 13 ++++----
.../TestOfflineEditsViewer.java | 8 ++---
.../TestOfflineImageViewer.java | 9 +++---
.../TestOfflineImageViewerForAcl.java | 8 ++---
...TestOfflineImageViewerForContentSummary.java | 8 ++---
.../TestOfflineImageViewerForXAttr.java | 8 ++---
.../hdfs/util/TestLightWeightHashSet.java | 8 ++---
.../hdfs/util/TestLightWeightLinkedSet.java | 10 +++---
.../hdfs/web/TestFSMainOperationsWebHdfs.java | 4 +--
.../org/apache/hadoop/hdfs/web/TestWebHDFS.java | 10 +++---
.../hadoop/hdfs/web/TestWebHdfsTimeouts.java | 18 ++++++-----
.../web/TestWebHdfsWithMultipleNameNodes.java | 12 +++----
.../apache/hadoop/hdfs/web/WebHdfsTestUtil.java | 7 +++--
.../hadoop/hdfs/web/resources/TestParam.java | 6 ++--
.../apache/hadoop/security/TestPermission.java | 7 +++--
.../hadoop/security/TestPermissionSymlinks.java | 7 +++--
317 files changed, 1283 insertions(+), 1173 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6c37db9d/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6c37db9d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java
----------------------------------------------------------------------
diff --cc hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java
index c3d7071,6302b2a..f024b0e
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java
@@@ -23,10 -23,9 +23,10 @@@ import java.util.List
import java.util.Map;
import java.util.concurrent.TimeoutException;
- import org.apache.commons.logging.Log;
- import org.apache.commons.logging.LogFactory;
- import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsResponseProto;
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto;
++import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsResponseProto;
import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto;
import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto;
import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6c37db9d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
----------------------------------------------------------------------
diff --cc hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
index ad053a3,ba2b20a..f96fe09
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
@@@ -67,18 -65,8 +67,18 @@@ import com.google.protobuf.TextFormat
*/
@InterfaceAudience.Private
public class QuorumJournalManager implements JournalManager {
- static final Log LOG = LogFactory.getLog(QuorumJournalManager.class);
+ static final Logger LOG = LoggerFactory.getLogger(QuorumJournalManager.class);
+ // This config is not publicly exposed
+ static final String QJM_RPC_MAX_TXNS_KEY =
+ "dfs.ha.tail-edits.qjm.rpc.max-txns";
+ static final int QJM_RPC_MAX_TXNS_DEFAULT = 5000;
+
+ // Maximum number of transactions to fetch at a time when using the
+ // RPC edit fetch mechanism
+ private final int maxTxnsPerRpc;
+ // Whether or not in-progress tailing is enabled in the configuration
+ private final boolean inProgressTailingEnabled;
// Timeouts for which the QJM will wait for each of the following actions.
private final int startSegmentTimeoutMs;
private final int prepareRecoveryTimeoutMs;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6c37db9d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
----------------------------------------------------------------------
diff --cc hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
index ccf8ba2,39afabc..9e204cb
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
@@@ -35,11 -32,10 +35,11 @@@ import java.util.List
import java.util.concurrent.TimeUnit;
import org.apache.commons.lang3.Range;
- import org.apache.commons.logging.Log;
- import org.apache.commons.logging.LogFactory;
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.qjournal.protocol.JournalNotFormattedException;
import org.apache.hadoop.hdfs.qjournal.protocol.JournalOutOfSyncException;
import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocol;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6c37db9d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeRpcServer.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6c37db9d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6c37db9d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6c37db9d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6c37db9d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6c37db9d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java
----------------------------------------------------------------------
diff --cc hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java
index f533bc7,f7c3a27..f3bb954
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java
@@@ -40,11 -40,10 +40,11 @@@ import java.util.List
import java.util.concurrent.ExecutorService;
import java.util.concurrent.TimeoutException;
- import org.apache.commons.logging.Log;
- import org.apache.commons.logging.LogFactory;
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.qjournal.MiniJournalCluster;
import org.apache.hadoop.hdfs.qjournal.QJMTestUtil;
import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6c37db9d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManagerUnit.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6c37db9d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileInputStream.java
----------------------------------------------------------------------
diff --cc hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileInputStream.java
index 0843bfe,3eca80f..3824b83
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileInputStream.java
@@@ -32,9 -32,8 +32,9 @@@ import java.net.HttpURLConnection
import java.net.URL;
import java.util.EnumMap;
+import com.google.protobuf.ByteString;
- import org.apache.commons.logging.Log;
- import org.apache.commons.logging.LogFactory;
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6c37db9d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6c37db9d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyInProgressTail.java
----------------------------------------------------------------------
diff --cc hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyInProgressTail.java
index 0420579,2bdada4..8394073
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyInProgressTail.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyInProgressTail.java
@@@ -22,14 -21,12 +22,14 @@@ import static org.junit.Assert.assertNo
import static org.junit.Assert.assertNull;
import java.io.File;
+import java.io.FilenameFilter;
import java.io.IOException;
import java.net.URI;
+import java.util.Iterator;
import java.util.List;
- import org.apache.commons.logging.Log;
- import org.apache.commons.logging.LogFactory;
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6c37db9d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6c37db9d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java
----------------------------------------------------------------------
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[02/18] hadoop git commit: HADOOP-15736. Trash : Negative Value For
Deletion Interval Leads To Abnormal Behaviour. Contributed by Ayush Saxena.
Posted by sh...@apache.org.
HADOOP-15736. Trash : Negative Value For Deletion Interval Leads To Abnormal Behaviour. Contributed by Ayush Saxena.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7ad27e97
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7ad27e97
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7ad27e97
Branch: refs/heads/HDFS-12943
Commit: 7ad27e97f05b13b33fdcef9cb63ace9c1728bfb5
Parents: 6fc293f
Author: Vinayakumar B <vi...@apache.org>
Authored: Thu Sep 20 09:31:35 2018 +0530
Committer: Vinayakumar B <vi...@apache.org>
Committed: Thu Sep 20 09:31:35 2018 +0530
----------------------------------------------------------------------
.../main/java/org/apache/hadoop/fs/TrashPolicyDefault.java | 8 +++++++-
.../src/test/java/org/apache/hadoop/fs/TestTrash.java | 6 ++++++
2 files changed, 13 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ad27e97/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
index 6e101a2..39d5e73 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
@@ -101,6 +101,12 @@ public class TrashPolicyDefault extends TrashPolicy {
this.emptierInterval = (long)(conf.getFloat(
FS_TRASH_CHECKPOINT_INTERVAL_KEY, FS_TRASH_CHECKPOINT_INTERVAL_DEFAULT)
* MSECS_PER_MINUTE);
+ if (deletionInterval < 0) {
+ LOG.warn("Invalid value {} for deletion interval,"
+ + " deletion interaval can not be negative."
+ + "Changing to default value 0", deletionInterval);
+ this.deletionInterval = 0;
+ }
}
private Path makeTrashRelativePath(Path basePath, Path rmFilePath) {
@@ -109,7 +115,7 @@ public class TrashPolicyDefault extends TrashPolicy {
@Override
public boolean isEnabled() {
- return deletionInterval != 0;
+ return deletionInterval > 0;
}
@SuppressWarnings("deprecation")
http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ad27e97/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
index 568821b..04f56fb 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
@@ -132,6 +132,9 @@ public class TestTrash {
conf.setLong(FS_TRASH_INTERVAL_KEY, 0); // disabled
assertFalse(new Trash(conf).isEnabled());
+ conf.setLong(FS_TRASH_INTERVAL_KEY, -1); // disabled
+ assertFalse(new Trash(conf).isEnabled());
+
conf.setLong(FS_TRASH_INTERVAL_KEY, 10); // 10 minute
assertTrue(new Trash(conf).isEnabled());
@@ -526,6 +529,9 @@ public class TestTrash {
conf.setLong(FS_TRASH_INTERVAL_KEY, 0); // disabled
assertFalse(new Trash(conf).isEnabled());
+ conf.setLong(FS_TRASH_INTERVAL_KEY, -1); // disabled
+ assertFalse(new Trash(conf).isEnabled());
+
conf.setLong(FS_TRASH_INTERVAL_KEY, 10); // 10 minute
assertTrue(new Trash(conf).isEnabled());
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[04/18] hadoop git commit: HADOOP-15756. [JDK10] Migrate from
sun.net.util.IPAddressUtil to the replacement. Contributed by Akira Ajisaka.
Posted by sh...@apache.org.
HADOOP-15756. [JDK10] Migrate from sun.net.util.IPAddressUtil to the replacement. Contributed by Akira Ajisaka.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3da94a36
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3da94a36
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3da94a36
Branch: refs/heads/HDFS-12943
Commit: 3da94a36e21a315c09ec7edb7702820fe2b524f9
Parents: 646874c
Author: Ewan Higgs <ew...@wdc.com>
Authored: Thu Sep 20 14:53:21 2018 +0200
Committer: Ewan Higgs <ew...@wdc.com>
Committed: Thu Sep 20 14:53:21 2018 +0200
----------------------------------------------------------------------
.../org/apache/hadoop/security/SecurityUtil.java | 15 ++++++---------
1 file changed, 6 insertions(+), 9 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3da94a36/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java
index 5f8cb29..0de334a 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java
@@ -54,9 +54,9 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
//this will need to be replaced someday when there is a suitable replacement
import sun.net.dns.ResolverConfiguration;
-import sun.net.util.IPAddressUtil;
import com.google.common.annotations.VisibleForTesting;
+import com.google.common.net.InetAddresses;
/**
* Security Utils.
@@ -604,14 +604,11 @@ public final class SecurityUtil {
public InetAddress getByName(String host) throws UnknownHostException {
InetAddress addr = null;
- if (IPAddressUtil.isIPv4LiteralAddress(host)) {
- // use ipv4 address as-is
- byte[] ip = IPAddressUtil.textToNumericFormatV4(host);
- addr = InetAddress.getByAddress(host, ip);
- } else if (IPAddressUtil.isIPv6LiteralAddress(host)) {
- // use ipv6 address as-is
- byte[] ip = IPAddressUtil.textToNumericFormatV6(host);
- addr = InetAddress.getByAddress(host, ip);
+ if (InetAddresses.isInetAddress(host)) {
+ // valid ip address. use it as-is
+ addr = InetAddresses.forString(host);
+ // set hostname
+ addr = InetAddress.getByAddress(host, addr.getAddress());
} else if (host.endsWith(".")) {
// a rooted host ends with a dot, ex. "host."
// rooted hosts never use the search path, so only try an exact lookup
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[07/18] hadoop git commit: HDDS-394. Rename *Key Apis in
DatanodeContainerProtocol to *Block apis. Contributed Dinesh Chitlangia.
Posted by sh...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestBlockData.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestBlockData.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestBlockData.java
new file mode 100644
index 0000000..9df4249
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestBlockData.java
@@ -0,0 +1,127 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.ozone.container.common.helpers;
+
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.junit.Assert;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TestRule;
+import org.junit.rules.Timeout;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.ThreadLocalRandom;
+
+/**
+ * Tests to test block deleting service.
+ */
+public class TestBlockData {
+ static final Logger LOG = LoggerFactory.getLogger(TestBlockData.class);
+ @Rule
+ public TestRule timeout = new Timeout(10000);
+
+ static ContainerProtos.ChunkInfo buildChunkInfo(String name, long offset,
+ long len) {
+ return ContainerProtos.ChunkInfo.newBuilder()
+ .setChunkName(name).setOffset(offset).setLen(len).build();
+ }
+
+ @Test
+ public void testAddAndRemove() {
+ final BlockData computed = new BlockData(null);
+ final List<ContainerProtos.ChunkInfo> expected = new ArrayList<>();
+
+ assertChunks(expected, computed);
+ long offset = 0;
+ int n = 5;
+ for(int i = 0; i < n; i++) {
+ offset += assertAddChunk(expected, computed, offset);
+ }
+
+ for(; !expected.isEmpty();) {
+ removeChunk(expected, computed);
+ }
+ }
+
+ private static int chunkCount = 0;
+ static ContainerProtos.ChunkInfo addChunk(
+ List<ContainerProtos.ChunkInfo> expected, long offset) {
+ final long length = ThreadLocalRandom.current().nextLong(1000);
+ final ContainerProtos.ChunkInfo info =
+ buildChunkInfo("c" + ++chunkCount, offset, length);
+ expected.add(info);
+ return info;
+ }
+
+ static long assertAddChunk(List<ContainerProtos.ChunkInfo> expected,
+ BlockData computed, long offset) {
+ final ContainerProtos.ChunkInfo info = addChunk(expected, offset);
+ LOG.info("addChunk: " + toString(info));
+ computed.addChunk(info);
+ assertChunks(expected, computed);
+ return info.getLen();
+ }
+
+
+ static void removeChunk(List<ContainerProtos.ChunkInfo> expected,
+ BlockData computed) {
+ final int i = ThreadLocalRandom.current().nextInt(expected.size());
+ final ContainerProtos.ChunkInfo info = expected.remove(i);
+ LOG.info("removeChunk: " + toString(info));
+ computed.removeChunk(info);
+ assertChunks(expected, computed);
+ }
+
+ static void assertChunks(List<ContainerProtos.ChunkInfo> expected,
+ BlockData computed) {
+ final List<ContainerProtos.ChunkInfo> computedChunks = computed.getChunks();
+ Assert.assertEquals("expected=" + expected + "\ncomputed=" +
+ computedChunks, expected, computedChunks);
+ Assert.assertEquals(expected.stream().mapToLong(i -> i.getLen()).sum(),
+ computed.getSize());
+ }
+
+ static String toString(ContainerProtos.ChunkInfo info) {
+ return info.getChunkName() + ":" + info.getOffset() + "," + info.getLen();
+ }
+
+ static String toString(List<ContainerProtos.ChunkInfo> infos) {
+ return infos.stream().map(TestBlockData::toString)
+ .reduce((left, right) -> left + ", " + right)
+ .orElse("");
+ }
+
+ @Test
+ public void testSetChunks() {
+ final BlockData computed = new BlockData(null);
+ final List<ContainerProtos.ChunkInfo> expected = new ArrayList<>();
+
+ assertChunks(expected, computed);
+ long offset = 0;
+ int n = 5;
+ for(int i = 0; i < n; i++) {
+ offset += addChunk(expected, offset).getLen();
+ LOG.info("setChunk: " + toString(expected));
+ computed.setChunks(expected);
+ assertChunks(expected, computed);
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestKeyData.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestKeyData.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestKeyData.java
deleted file mode 100644
index f57fe99..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestKeyData.java
+++ /dev/null
@@ -1,119 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.container.common.helpers;
-
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.junit.Assert;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TestRule;
-import org.junit.rules.Timeout;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.concurrent.ThreadLocalRandom;
-
-/**
- * Tests to test block deleting service.
- */
-public class TestKeyData {
- static final Logger LOG = LoggerFactory.getLogger(TestKeyData.class);
- @Rule
- public TestRule timeout = new Timeout(10000);
-
- static ContainerProtos.ChunkInfo buildChunkInfo(String name, long offset, long len) {
- return ContainerProtos.ChunkInfo.newBuilder()
- .setChunkName(name).setOffset(offset).setLen(len).build();
- }
-
- @Test
- public void testAddAndRemove() {
- final KeyData computed = new KeyData(null);
- final List<ContainerProtos.ChunkInfo> expected = new ArrayList<>();
-
- assertChunks(expected, computed);
- long offset = 0;
- int n = 5;
- for(int i = 0; i < n; i++) {
- offset += assertAddChunk(expected, computed, offset);
- }
-
- for(; !expected.isEmpty(); ) {
- removeChunk(expected, computed);
- }
- }
-
- private static int chunkCount = 0;
- static ContainerProtos.ChunkInfo addChunk(List<ContainerProtos.ChunkInfo> expected, long offset) {
- final long length = ThreadLocalRandom.current().nextLong(1000);
- final ContainerProtos.ChunkInfo info = buildChunkInfo("c" + ++chunkCount, offset, length);
- expected.add(info);
- return info;
- }
-
- static long assertAddChunk(List<ContainerProtos.ChunkInfo> expected, KeyData computed, long offset) {
- final ContainerProtos.ChunkInfo info = addChunk(expected, offset);
- LOG.info("addChunk: " + toString(info));
- computed.addChunk(info);
- assertChunks(expected, computed);
- return info.getLen();
- }
-
-
- static void removeChunk(List<ContainerProtos.ChunkInfo> expected, KeyData computed) {
- final int i = ThreadLocalRandom.current().nextInt(expected.size());
- final ContainerProtos.ChunkInfo info = expected.remove(i);
- LOG.info("removeChunk: " + toString(info));
- computed.removeChunk(info);
- assertChunks(expected, computed);
- }
-
- static void assertChunks(List<ContainerProtos.ChunkInfo> expected, KeyData computed) {
- final List<ContainerProtos.ChunkInfo> computedChunks = computed.getChunks();
- Assert.assertEquals("expected=" + expected + "\ncomputed=" + computedChunks, expected, computedChunks);
- Assert.assertEquals(expected.stream().mapToLong(i -> i.getLen()).sum(), computed.getSize());
- }
-
- static String toString(ContainerProtos.ChunkInfo info) {
- return info.getChunkName() + ":" + info.getOffset() + "," + info.getLen();
- }
-
- static String toString(List<ContainerProtos.ChunkInfo> infos) {
- return infos.stream().map(TestKeyData::toString)
- .reduce((left, right) -> left + ", " + right)
- .orElse("");
- }
-
- @Test
- public void testSetChunks() {
- final KeyData computed = new KeyData(null);
- final List<ContainerProtos.ChunkInfo> expected = new ArrayList<>();
-
- assertChunks(expected, computed);
- long offset = 0;
- int n = 5;
- for(int i = 0; i < n; i++) {
- offset += addChunk(expected, offset).getLen();
- LOG.info("setChunk: " + toString(expected));
- computed.setChunks(expected);
- assertChunks(expected, computed);
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestCloseContainerHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestCloseContainerHandler.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestCloseContainerHandler.java
index 73fa70d..78bf008 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestCloseContainerHandler.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestCloseContainerHandler.java
@@ -25,12 +25,12 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.ozone.container.ContainerTestHelper;
+import org.apache.hadoop.ozone.container.common.helpers.BlockData;
import org.apache.hadoop.ozone.container.common.interfaces.Container;
import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
import org.apache.hadoop.ozone.container.keyvalue.KeyValueHandler;
import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
-import org.apache.hadoop.ozone.container.common.helpers.KeyData;
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.apache.ratis.shaded.com.google.protobuf.ByteString;
import org.junit.AfterClass;
@@ -71,14 +71,14 @@ public class TestCloseContainerHandler {
private final static String DATANODE_UUID = UUID.randomUUID().toString();
- private static final String baseDir = MiniDFSCluster.getBaseDirectory();
- private static final String volume1 = baseDir + "disk1";
- private static final String volume2 = baseDir + "disk2";
+ private static final String BASE_DIR = MiniDFSCluster.getBaseDirectory();
+ private static final String VOLUME_1 = BASE_DIR + "disk1";
+ private static final String VOLUME_2 = BASE_DIR + "disk2";
@BeforeClass
public static void setup() throws Exception {
conf = new Configuration();
- String dataDirKey = volume1 + "," + volume2;
+ String dataDirKey = VOLUME_1 + "," + VOLUME_2;
conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dataDirKey);
containerSet = new ContainerSet();
DatanodeDetails datanodeDetails =
@@ -160,31 +160,31 @@ public class TestCloseContainerHandler {
getTestBlockID(testContainerID);
Pipeline pipeline = createSingleNodePipeline();
List<ChunkInfo> chunkList = writeChunkBuilder(blockID, pipeline, 3);
- // the key should exist in the map
+ // the block should exist in the map
Assert.assertNotNull(
- openContainerBlockMap.getKeyDataMap(testContainerID)
+ openContainerBlockMap.getBlockDataMap(testContainerID)
.get(blockID.getLocalID()));
- KeyData keyData = new KeyData(blockID);
+ BlockData blockData = new BlockData(blockID);
List<ContainerProtos.ChunkInfo> chunkProtoList = new LinkedList<>();
for (ChunkInfo i : chunkList) {
chunkProtoList.add(i.getProtoBufMessage());
}
- keyData.setChunks(chunkProtoList);
- ContainerProtos.PutKeyRequestProto.Builder putKeyRequestProto =
- ContainerProtos.PutKeyRequestProto.newBuilder();
- putKeyRequestProto.setKeyData(keyData.getProtoBufMessage());
+ blockData.setChunks(chunkProtoList);
+ ContainerProtos.PutBlockRequestProto.Builder putBlockRequestProto =
+ ContainerProtos.PutBlockRequestProto.newBuilder();
+ putBlockRequestProto.setBlockData(blockData.getProtoBufMessage());
ContainerProtos.ContainerCommandRequestProto.Builder request =
ContainerProtos.ContainerCommandRequestProto.newBuilder();
- request.setCmdType(ContainerProtos.Type.PutKey);
+ request.setCmdType(ContainerProtos.Type.PutBlock);
request.setContainerID(blockID.getContainerID());
- request.setPutKey(putKeyRequestProto);
+ request.setPutBlock(putBlockRequestProto);
request.setTraceID(UUID.randomUUID().toString());
request.setDatanodeUuid(pipeline.getLeader().getUuidString());
dispatcher.dispatch(request.build());
- //the open key should be removed from Map
+ //the open block should be removed from Map
Assert.assertNull(
- openContainerBlockMap.getKeyDataMap(testContainerID));
+ openContainerBlockMap.getBlockDataMap(testContainerID));
}
@Test
@@ -197,10 +197,10 @@ public class TestCloseContainerHandler {
List<ChunkInfo> chunkList = writeChunkBuilder(blockID, pipeline, 3);
// the key should exist in the map
Assert.assertNotNull(
- openContainerBlockMap.getKeyDataMap(testContainerID)
+ openContainerBlockMap.getBlockDataMap(testContainerID)
.get(blockID.getLocalID()));
Assert.assertTrue(
- openContainerBlockMap.getKeyDataMap(testContainerID)
+ openContainerBlockMap.getBlockDataMap(testContainerID)
.get(blockID.getLocalID()).getChunks().size() == 3);
ContainerProtos.DeleteChunkRequestProto.Builder deleteChunkProto =
ContainerProtos.DeleteChunkRequestProto.newBuilder();
@@ -220,7 +220,7 @@ public class TestCloseContainerHandler {
request.setDatanodeUuid(pipeline.getLeader().getUuidString());
dispatcher.dispatch(request.build());
Assert.assertTrue(
- openContainerBlockMap.getKeyDataMap(testContainerID)
+ openContainerBlockMap.getBlockDataMap(testContainerID)
.get(blockID.getLocalID()).getChunks().size() == 2);
}
@@ -235,14 +235,14 @@ public class TestCloseContainerHandler {
List<ChunkInfo> chunkList = writeChunkBuilder(blockID, pipeline, 3);
Container container = containerSet.getContainer(testContainerID);
- KeyData keyData = openContainerBlockMap.
- getKeyDataMap(testContainerID).get(blockID.getLocalID());
+ BlockData blockData = openContainerBlockMap.
+ getBlockDataMap(testContainerID).get(blockID.getLocalID());
// the key should exist in the map
Assert.assertNotNull(
- openContainerBlockMap.getKeyDataMap(testContainerID)
+ openContainerBlockMap.getBlockDataMap(testContainerID)
.get(blockID.getLocalID()));
Assert.assertTrue(
- keyData.getChunks().size() == chunkList.size());
+ blockData.getChunks().size() == chunkList.size());
ContainerProtos.ContainerCommandRequestProto.Builder request =
ContainerProtos.ContainerCommandRequestProto.newBuilder();
request.setCmdType(ContainerProtos.Type.CloseContainer);
@@ -253,8 +253,9 @@ public class TestCloseContainerHandler {
request.setDatanodeUuid(pipeline.getLeader().getUuidString());
dispatcher.dispatch(request.build());
Assert.assertNull(
- openContainerBlockMap.getKeyDataMap(testContainerID));
+ openContainerBlockMap.getBlockDataMap(testContainerID));
// Make sure the key got committed
- Assert.assertNotNull(handler.getKeyManager().getKey(container, blockID));
+ Assert.assertNotNull(handler.getBlockManager()
+ .getBlock(container, blockID));
}
}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
index 3a36331..b6cdc9d 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
@@ -29,20 +29,20 @@ import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerExcep
import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.container.ContainerTestHelper;
+import org.apache.hadoop.ozone.container.common.helpers.BlockData;
import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
-import org.apache.hadoop.ozone.container.common.helpers.KeyData;
import org.apache.hadoop.ozone.container.common.interfaces.Container;
import org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy;
import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy;
import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
-import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyUtils;
+import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
import org.apache.hadoop.ozone.container.keyvalue.impl.ChunkManagerImpl;
-import org.apache.hadoop.ozone.container.keyvalue.impl.KeyManagerImpl;
+import org.apache.hadoop.ozone.container.keyvalue.impl.BlockManagerImpl;
import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager;
-import org.apache.hadoop.ozone.container.keyvalue.interfaces.KeyManager;
+import org.apache.hadoop.ozone.container.keyvalue.interfaces.BlockManager;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.utils.MetadataStore;
import org.junit.After;
@@ -97,7 +97,7 @@ public class TestContainerPersistence {
private static ContainerSet containerSet;
private static VolumeSet volumeSet;
private static VolumeChoosingPolicy volumeChoosingPolicy;
- private static KeyManager keyManager;
+ private static BlockManager blockManager;
private static ChunkManager chunkManager;
@Rule
public ExpectedException exception = ExpectedException.none();
@@ -126,7 +126,7 @@ public class TestContainerPersistence {
public void setupPaths() throws IOException {
containerSet = new ContainerSet();
volumeSet = new VolumeSet(DATANODE_UUID, conf);
- keyManager = new KeyManagerImpl(conf);
+ blockManager = new BlockManagerImpl(conf);
chunkManager = new ChunkManagerImpl();
for (String dir : conf.getStrings(DFS_DATANODE_DATA_DIR_KEY)) {
@@ -152,15 +152,15 @@ public class TestContainerPersistence {
return ContainerTestHelper.getTestContainerID();
}
- private Container addContainer(ContainerSet containerSet, long containerID)
+ private Container addContainer(ContainerSet cSet, long cID)
throws IOException {
- KeyValueContainerData data = new KeyValueContainerData(containerID,
+ KeyValueContainerData data = new KeyValueContainerData(cID,
ContainerTestHelper.CONTAINER_MAX_SIZE);
data.addMetadata("VOLUME", "shire");
data.addMetadata("owner)", "bilbo");
KeyValueContainer container = new KeyValueContainer(data, conf);
container.create(volumeSet, volumeChoosingPolicy, SCM_ID);
- containerSet.addContainer(container);
+ cSet.addContainer(container);
return container;
}
@@ -184,7 +184,7 @@ public class TestContainerPersistence {
MetadataStore store = null;
try {
- store = KeyUtils.getDB(kvData, conf);
+ store = BlockUtils.getDB(kvData, conf);
Assert.assertNotNull(store);
} finally {
if (store != null) {
@@ -227,19 +227,19 @@ public class TestContainerPersistence {
Assert.assertFalse(containerSet.getContainerMap()
.containsKey(testContainerID1));
- // Adding key to a deleted container should fail.
+ // Adding block to a deleted container should fail.
exception.expect(StorageContainerException.class);
exception.expectMessage("Error opening DB.");
BlockID blockID1 = ContainerTestHelper.getTestBlockID(testContainerID1);
- KeyData someKey1 = new KeyData(blockID1);
+ BlockData someKey1 = new BlockData(blockID1);
someKey1.setChunks(new LinkedList<ContainerProtos.ChunkInfo>());
- keyManager.putKey(container1, someKey1);
+ blockManager.putBlock(container1, someKey1);
// Deleting a non-empty container should fail.
BlockID blockID2 = ContainerTestHelper.getTestBlockID(testContainerID2);
- KeyData someKey2 = new KeyData(blockID2);
+ BlockData someKey2 = new BlockData(blockID2);
someKey2.setChunks(new LinkedList<ContainerProtos.ChunkInfo>());
- keyManager.putKey(container2, someKey2);
+ blockManager.putBlock(container2, someKey2);
exception.expect(StorageContainerException.class);
exception.expectMessage(
@@ -325,7 +325,8 @@ public class TestContainerPersistence {
if (container == null) {
container = addContainer(containerSet, testContainerID);
}
- ChunkInfo info = getChunk(blockID.getLocalID(), 0, 0, datalen);
+ ChunkInfo info = getChunk(
+ blockID.getLocalID(), 0, 0, datalen);
byte[] data = getData(datalen);
setDataChecksum(info, data);
chunkManager.writeChunk(container, blockID, info, data, COMBINED);
@@ -348,8 +349,8 @@ public class TestContainerPersistence {
}
/**
- * Writes many chunks of the same key into different chunk files and verifies
- * that we have that data in many files.
+ * Writes many chunks of the same block into different chunk files and
+ * verifies that we have that data in many files.
*
* @throws IOException
* @throws NoSuchAlgorithmException
@@ -425,7 +426,8 @@ public class TestContainerPersistence {
Container container = addContainer(containerSet, testContainerID);
BlockID blockID = ContainerTestHelper.getTestBlockID(testContainerID);
- ChunkInfo info = getChunk(blockID.getLocalID(), 0, 0, datalen);
+ ChunkInfo info = getChunk(
+ blockID.getLocalID(), 0, 0, datalen);
byte[] data = getData(datalen);
setDataChecksum(info, data);
chunkManager.writeChunk(container, blockID, info, data, COMBINED);
@@ -456,7 +458,8 @@ public class TestContainerPersistence {
Container container = addContainer(containerSet, testContainerID);
BlockID blockID = ContainerTestHelper.getTestBlockID(testContainerID);
- ChunkInfo info = getChunk(blockID.getLocalID(), 0, 0, datalen);
+ ChunkInfo info = getChunk(
+ blockID.getLocalID(), 0, 0, datalen);
byte[] data = getData(datalen);
setDataChecksum(info, data);
chunkManager.writeChunk(container, blockID, info, data, COMBINED);
@@ -500,7 +503,8 @@ public class TestContainerPersistence {
for (int x = 0; x < chunkCount; x++) {
// we are writing to the same chunk file but at different offsets.
long offset = x * datalen;
- ChunkInfo info = getChunk(blockID.getLocalID(), 0, offset, datalen);
+ ChunkInfo info = getChunk(
+ blockID.getLocalID(), 0, offset, datalen);
byte[] data = getData(datalen);
oldSha.update(data);
setDataChecksum(info, data);
@@ -531,7 +535,8 @@ public class TestContainerPersistence {
Container container = addContainer(containerSet, testContainerID);
BlockID blockID = ContainerTestHelper.getTestBlockID(testContainerID);
- ChunkInfo info = getChunk(blockID.getLocalID(), 0, 0, datalen);
+ ChunkInfo info = getChunk(
+ blockID.getLocalID(), 0, 0, datalen);
byte[] data = getData(datalen);
setDataChecksum(info, data);
chunkManager.writeChunk(container, blockID, info, data, COMBINED);
@@ -542,37 +547,38 @@ public class TestContainerPersistence {
}
/**
- * Tests a put key and read key.
+ * Tests a put block and read block.
*
* @throws IOException
* @throws NoSuchAlgorithmException
*/
@Test
- public void testPutKey() throws IOException, NoSuchAlgorithmException {
+ public void testPutBlock() throws IOException, NoSuchAlgorithmException {
long testContainerID = getTestContainerID();
Container container = addContainer(containerSet, testContainerID);
BlockID blockID = ContainerTestHelper.getTestBlockID(testContainerID);
ChunkInfo info = writeChunkHelper(blockID);
- KeyData keyData = new KeyData(blockID);
+ BlockData blockData = new BlockData(blockID);
List<ContainerProtos.ChunkInfo> chunkList = new LinkedList<>();
chunkList.add(info.getProtoBufMessage());
- keyData.setChunks(chunkList);
- keyManager.putKey(container, keyData);
- KeyData readKeyData = keyManager.getKey(container, keyData.getBlockID());
+ blockData.setChunks(chunkList);
+ blockManager.putBlock(container, blockData);
+ BlockData readBlockData = blockManager.
+ getBlock(container, blockData.getBlockID());
ChunkInfo readChunk =
- ChunkInfo.getFromProtoBuf(readKeyData.getChunks().get(0));
+ ChunkInfo.getFromProtoBuf(readBlockData.getChunks().get(0));
Assert.assertEquals(info.getChecksum(), readChunk.getChecksum());
}
/**
- * Tests a put key and read key.
+ * Tests a put block and read block.
*
* @throws IOException
* @throws NoSuchAlgorithmException
*/
@Test
- public void testPutKeyWithLotsOfChunks() throws IOException,
+ public void testPutBlockWithLotsOfChunks() throws IOException,
NoSuchAlgorithmException {
final int chunkCount = 2;
final int datalen = 1024;
@@ -603,66 +609,67 @@ public class TestContainerPersistence {
long writeCount = container.getContainerData().getWriteCount();
Assert.assertEquals(chunkCount, writeCount);
- KeyData keyData = new KeyData(blockID);
+ BlockData blockData = new BlockData(blockID);
List<ContainerProtos.ChunkInfo> chunkProtoList = new LinkedList<>();
for (ChunkInfo i : chunkList) {
chunkProtoList.add(i.getProtoBufMessage());
}
- keyData.setChunks(chunkProtoList);
- keyManager.putKey(container, keyData);
- KeyData readKeyData = keyManager.getKey(container, keyData.getBlockID());
+ blockData.setChunks(chunkProtoList);
+ blockManager.putBlock(container, blockData);
+ BlockData readBlockData = blockManager.
+ getBlock(container, blockData.getBlockID());
ChunkInfo lastChunk = chunkList.get(chunkList.size() - 1);
ChunkInfo readChunk =
- ChunkInfo.getFromProtoBuf(readKeyData.getChunks().get(readKeyData
+ ChunkInfo.getFromProtoBuf(readBlockData.getChunks().get(readBlockData
.getChunks().size() - 1));
Assert.assertEquals(lastChunk.getChecksum(), readChunk.getChecksum());
}
/**
- * Deletes a key and tries to read it back.
+ * Deletes a block and tries to read it back.
*
* @throws IOException
* @throws NoSuchAlgorithmException
*/
@Test
- public void testDeleteKey() throws IOException, NoSuchAlgorithmException {
+ public void testDeleteBlock() throws IOException, NoSuchAlgorithmException {
long testContainerID = getTestContainerID();
Container container = addContainer(containerSet, testContainerID);
BlockID blockID = ContainerTestHelper.getTestBlockID(testContainerID);
ChunkInfo info = writeChunkHelper(blockID);
- KeyData keyData = new KeyData(blockID);
+ BlockData blockData = new BlockData(blockID);
List<ContainerProtos.ChunkInfo> chunkList = new LinkedList<>();
chunkList.add(info.getProtoBufMessage());
- keyData.setChunks(chunkList);
- keyManager.putKey(container, keyData);
- keyManager.deleteKey(container, blockID);
+ blockData.setChunks(chunkList);
+ blockManager.putBlock(container, blockData);
+ blockManager.deleteBlock(container, blockID);
exception.expect(StorageContainerException.class);
- exception.expectMessage("Unable to find the key.");
- keyManager.getKey(container, keyData.getBlockID());
+ exception.expectMessage("Unable to find the block.");
+ blockManager.getBlock(container, blockData.getBlockID());
}
/**
- * Tries to Deletes a key twice.
+ * Tries to Deletes a block twice.
*
* @throws IOException
* @throws NoSuchAlgorithmException
*/
@Test
- public void testDeleteKeyTwice() throws IOException,
+ public void testDeleteBlockTwice() throws IOException,
NoSuchAlgorithmException {
long testContainerID = getTestContainerID();
Container container = addContainer(containerSet, testContainerID);
BlockID blockID = ContainerTestHelper.getTestBlockID(testContainerID);
ChunkInfo info = writeChunkHelper(blockID);
- KeyData keyData = new KeyData(blockID);
+ BlockData blockData = new BlockData(blockID);
List<ContainerProtos.ChunkInfo> chunkList = new LinkedList<>();
chunkList.add(info.getProtoBufMessage());
- keyData.setChunks(chunkList);
- keyManager.putKey(container, keyData);
- keyManager.deleteKey(container, blockID);
+ blockData.setChunks(chunkList);
+ blockManager.putBlock(container, blockData);
+ blockManager.deleteBlock(container, blockID);
exception.expect(StorageContainerException.class);
- exception.expectMessage("Unable to find the key.");
- keyManager.deleteKey(container, blockID);
+ exception.expectMessage("Unable to find the block.");
+ blockManager.deleteBlock(container, blockID);
}
/**
@@ -722,8 +729,9 @@ public class TestContainerPersistence {
try {
container.update(newMetadata, false);
} catch (StorageContainerException ex) {
- Assert.assertEquals("Updating a closed container without force option " +
- "is not allowed. ContainerID: " + testContainerID, ex.getMessage());
+ Assert.assertEquals("Updating a closed container without " +
+ "force option is not allowed. ContainerID: " +
+ testContainerID, ex.getMessage());
}
// Update with force flag, it should be success.
@@ -741,53 +749,55 @@ public class TestContainerPersistence {
}
- private KeyData writeKeyHelper(BlockID blockID)
+ private BlockData writeBlockHelper(BlockID blockID)
throws IOException, NoSuchAlgorithmException {
ChunkInfo info = writeChunkHelper(blockID);
- KeyData keyData = new KeyData(blockID);
+ BlockData blockData = new BlockData(blockID);
List<ContainerProtos.ChunkInfo> chunkList = new LinkedList<>();
chunkList.add(info.getProtoBufMessage());
- keyData.setChunks(chunkList);
- return keyData;
+ blockData.setChunks(chunkList);
+ return blockData;
}
@Test
- public void testListKey() throws Exception {
+ public void testListBlock() throws Exception {
long testContainerID = getTestContainerID();
Container container = addContainer(containerSet, testContainerID);
- List<BlockID> expectedKeys = new ArrayList<>();
+ List<BlockID> expectedBlocks = new ArrayList<>();
for (int i = 0; i < 10; i++) {
BlockID blockID = new BlockID(testContainerID, i);
- expectedKeys.add(blockID);
- KeyData kd = writeKeyHelper(blockID);
- keyManager.putKey(container, kd);
+ expectedBlocks.add(blockID);
+ BlockData kd = writeBlockHelper(blockID);
+ blockManager.putBlock(container, kd);
}
- // List all keys
- List<KeyData> result = keyManager.listKey(container, 0, 100);
+ // List all blocks
+ List<BlockData> result = blockManager.listBlock(
+ container, 0, 100);
Assert.assertEquals(10, result.size());
int index = 0;
for (int i = index; i < result.size(); i++) {
- KeyData data = result.get(i);
+ BlockData data = result.get(i);
Assert.assertEquals(testContainerID, data.getContainerID());
- Assert.assertEquals(expectedKeys.get(i).getLocalID(), data.getLocalID());
+ Assert.assertEquals(expectedBlocks.get(i).getLocalID(),
+ data.getLocalID());
index++;
}
- // List key with startKey filter
- long k6 = expectedKeys.get(6).getLocalID();
- result = keyManager.listKey(container, k6, 100);
+ // List block with startBlock filter
+ long k6 = expectedBlocks.get(6).getLocalID();
+ result = blockManager.listBlock(container, k6, 100);
Assert.assertEquals(4, result.size());
for (int i = 6; i < 10; i++) {
- Assert.assertEquals(expectedKeys.get(i).getLocalID(),
+ Assert.assertEquals(expectedBlocks.get(i).getLocalID(),
result.get(i - 6).getLocalID());
}
// Count must be >0
exception.expect(IllegalArgumentException.class);
exception.expectMessage("Count must be a positive number.");
- keyManager.listKey(container, 0, -1);
+ blockManager.listBlock(container, 0, -1);
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
index 3c77687..a129ed0 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
@@ -41,7 +41,7 @@ import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
import org.apache.hadoop.ozone.container.common.impl.ContainerData;
import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
-import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyUtils;
+import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
import org.apache.hadoop.ozone.om.OzoneManager;
import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
@@ -69,6 +69,9 @@ import static org.apache.hadoop.hdds
import static org.apache.hadoop.ozone
.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL;
+/**
+ * Tests for Block deletion.
+ */
public class TestBlockDeletion {
private static OzoneConfiguration conf = null;
private static ObjectStore store;
@@ -229,7 +232,7 @@ public class TestBlockDeletion {
throws IOException {
return OzoneTestUtils.performOperationOnKeyContainers((blockID) -> {
try {
- MetadataStore db = KeyUtils.getDB((KeyValueContainerData)
+ MetadataStore db = BlockUtils.getDB((KeyValueContainerData)
dnContainerSet.getContainer(blockID.getContainerID())
.getContainerData(), conf);
Assert.assertNotNull(db.get(Longs.toByteArray(blockID.getLocalID())));
@@ -244,7 +247,7 @@ public class TestBlockDeletion {
throws IOException {
return OzoneTestUtils.performOperationOnKeyContainers((blockID) -> {
try {
- MetadataStore db = KeyUtils.getDB((KeyValueContainerData)
+ MetadataStore db = BlockUtils.getDB((KeyValueContainerData)
dnContainerSet.getContainer(blockID.getContainerID())
.getContainerData(), conf);
Assert.assertNull(db.get(Longs.toByteArray(blockID.getLocalID())));
http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
index 5dd88fb..a3c92fb 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
@@ -68,7 +68,8 @@ public class TestOzoneContainer {
conf.set(HDDS_DATANODE_DIR_KEY, tempFolder.getRoot().getPath());
conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, pipeline.getLeader()
.getPort(DatanodeDetails.Port.Name.STANDALONE).getValue());
- conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, false);
+ conf.setBoolean(
+ OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, false);
container = new OzoneContainer(TestUtils.randomDatanodeDetails(),
conf, null);
@@ -129,7 +130,7 @@ public class TestOzoneContainer {
static void runTestOzoneContainerViaDataNode(
long testContainerID, XceiverClientSpi client) throws Exception {
ContainerProtos.ContainerCommandRequestProto
- request, writeChunkRequest, putKeyRequest,
+ request, writeChunkRequest, putBlockRequest,
updateRequest1, updateRequest2;
ContainerProtos.ContainerCommandResponseProto response,
updateResponse1, updateResponse2;
@@ -138,46 +139,50 @@ public class TestOzoneContainer {
Pipeline pipeline = client.getPipeline();
createContainerForTesting(client, testContainerID);
- writeChunkRequest = writeChunkForContainer(client, testContainerID, 1024);
+ writeChunkRequest = writeChunkForContainer(client, testContainerID,
+ 1024);
// Read Chunk
- request = ContainerTestHelper.getReadChunkRequest(pipeline, writeChunkRequest
- .getWriteChunk());
+ request = ContainerTestHelper.getReadChunkRequest(
+ pipeline, writeChunkRequest.getWriteChunk());
response = client.sendCommand(request);
Assert.assertNotNull(response);
Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
Assert.assertTrue(request.getTraceID().equals(response.getTraceID()));
- // Put Key
- putKeyRequest = ContainerTestHelper.getPutKeyRequest(pipeline, writeChunkRequest
- .getWriteChunk());
+ // Put Block
+ putBlockRequest = ContainerTestHelper.getPutBlockRequest(
+ pipeline, writeChunkRequest.getWriteChunk());
- response = client.sendCommand(putKeyRequest);
+ response = client.sendCommand(putBlockRequest);
Assert.assertNotNull(response);
Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
- Assert
- .assertTrue(putKeyRequest.getTraceID().equals(response.getTraceID()));
+ Assert.assertTrue(putBlockRequest.getTraceID()
+ .equals(response.getTraceID()));
- // Get Key
- request = ContainerTestHelper.getKeyRequest(pipeline, putKeyRequest.getPutKey());
+ // Get Block
+ request = ContainerTestHelper.
+ getBlockRequest(pipeline, putBlockRequest.getPutBlock());
response = client.sendCommand(request);
- int chunksCount = putKeyRequest.getPutKey().getKeyData().getChunksCount();
- ContainerTestHelper.verifyGetKey(request, response, chunksCount);
+ int chunksCount = putBlockRequest.getPutBlock().getBlockData().
+ getChunksCount();
+ ContainerTestHelper.verifyGetBlock(request, response, chunksCount);
- // Delete Key
+ // Delete Block
request =
- ContainerTestHelper.getDeleteKeyRequest(pipeline, putKeyRequest.getPutKey());
+ ContainerTestHelper.getDeleteBlockRequest(
+ pipeline, putBlockRequest.getPutBlock());
response = client.sendCommand(request);
Assert.assertNotNull(response);
Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
Assert.assertTrue(request.getTraceID().equals(response.getTraceID()));
//Delete Chunk
- request = ContainerTestHelper.getDeleteChunkRequest(pipeline, writeChunkRequest
- .getWriteChunk());
+ request = ContainerTestHelper.getDeleteChunkRequest(
+ pipeline, writeChunkRequest.getWriteChunk());
response = client.sendCommand(request);
Assert.assertNotNull(response);
@@ -249,7 +254,7 @@ public class TestOzoneContainer {
final ContainerProtos.ContainerCommandRequestProto getSmallFileRequest
= ContainerTestHelper.getReadSmallFileRequest(client.getPipeline(),
- smallFileRequest.getPutSmallFile().getKey());
+ smallFileRequest.getPutSmallFile().getBlock());
response = client.sendCommand(getSmallFileRequest);
Assert.assertArrayEquals(
smallFileRequest.getPutSmallFile().getData().toByteArray(),
@@ -269,7 +274,7 @@ public class TestOzoneContainer {
XceiverClientGrpc client = null;
ContainerProtos.ContainerCommandResponseProto response;
ContainerProtos.ContainerCommandRequestProto
- writeChunkRequest, putKeyRequest, request;
+ writeChunkRequest, putBlockRequest, request;
try {
OzoneConfiguration conf = newOzoneConfiguration();
@@ -283,18 +288,19 @@ public class TestOzoneContainer {
long containerID = ContainerTestHelper.getTestContainerID();
createContainerForTesting(client, containerID);
- writeChunkRequest = writeChunkForContainer(client, containerID, 1024);
+ writeChunkRequest = writeChunkForContainer(client, containerID,
+ 1024);
- putKeyRequest = ContainerTestHelper.getPutKeyRequest(client.getPipeline(),
- writeChunkRequest.getWriteChunk());
- // Put key before closing.
- response = client.sendCommand(putKeyRequest);
+ putBlockRequest = ContainerTestHelper.getPutBlockRequest(
+ client.getPipeline(), writeChunkRequest.getWriteChunk());
+ // Put block before closing.
+ response = client.sendCommand(putBlockRequest);
Assert.assertNotNull(response);
Assert.assertEquals(ContainerProtos.Result.SUCCESS,
response.getResult());
Assert.assertTrue(
- putKeyRequest.getTraceID().equals(response.getTraceID()));
+ putBlockRequest.getTraceID().equals(response.getTraceID()));
// Close the contianer.
request = ContainerTestHelper.getCloseContainer(
@@ -325,25 +331,26 @@ public class TestOzoneContainer {
Assert.assertTrue(request.getTraceID().equals(response.getTraceID()));
- // Put key will fail on a closed container.
- response = client.sendCommand(putKeyRequest);
+ // Put block will fail on a closed container.
+ response = client.sendCommand(putBlockRequest);
Assert.assertNotNull(response);
Assert.assertEquals(ContainerProtos.Result.CLOSED_CONTAINER_IO,
response.getResult());
- Assert
- .assertTrue(putKeyRequest.getTraceID().equals(response.getTraceID()));
+ Assert.assertTrue(putBlockRequest.getTraceID()
+ .equals(response.getTraceID()));
- // Get key must work on the closed container.
- request = ContainerTestHelper.getKeyRequest(client.getPipeline(),
- putKeyRequest.getPutKey());
+ // Get block must work on the closed container.
+ request = ContainerTestHelper.getBlockRequest(client.getPipeline(),
+ putBlockRequest.getPutBlock());
response = client.sendCommand(request);
- int chunksCount = putKeyRequest.getPutKey().getKeyData().getChunksCount();
- ContainerTestHelper.verifyGetKey(request, response, chunksCount);
+ int chunksCount = putBlockRequest.getPutBlock().getBlockData()
+ .getChunksCount();
+ ContainerTestHelper.verifyGetBlock(request, response, chunksCount);
- // Delete Key must fail on a closed container.
+ // Delete block must fail on a closed container.
request =
- ContainerTestHelper.getDeleteKeyRequest(client.getPipeline(),
- putKeyRequest.getPutKey());
+ ContainerTestHelper.getDeleteBlockRequest(client.getPipeline(),
+ putBlockRequest.getPutBlock());
response = client.sendCommand(request);
Assert.assertNotNull(response);
Assert.assertEquals(ContainerProtos.Result.CLOSED_CONTAINER_IO,
@@ -365,7 +372,7 @@ public class TestOzoneContainer {
XceiverClientGrpc client = null;
ContainerProtos.ContainerCommandResponseProto response;
ContainerProtos.ContainerCommandRequestProto request,
- writeChunkRequest, putKeyRequest;
+ writeChunkRequest, putBlockRequest;
try {
OzoneConfiguration conf = newOzoneConfiguration();
@@ -378,17 +385,18 @@ public class TestOzoneContainer {
long containerID = ContainerTestHelper.getTestContainerID();
createContainerForTesting(client, containerID);
- writeChunkRequest = writeChunkForContainer(client, containerID, 1024);
+ writeChunkRequest = writeChunkForContainer(
+ client, containerID, 1024);
- putKeyRequest = ContainerTestHelper.getPutKeyRequest(client.getPipeline(),
- writeChunkRequest.getWriteChunk());
+ putBlockRequest = ContainerTestHelper.getPutBlockRequest(
+ client.getPipeline(), writeChunkRequest.getWriteChunk());
// Put key before deleting.
- response = client.sendCommand(putKeyRequest);
+ response = client.sendCommand(putBlockRequest);
Assert.assertNotNull(response);
Assert.assertEquals(ContainerProtos.Result.SUCCESS,
response.getResult());
Assert.assertTrue(
- putKeyRequest.getTraceID().equals(response.getTraceID()));
+ putBlockRequest.getTraceID().equals(response.getTraceID()));
// Container cannot be deleted forcibly because
// the container is not closed.
@@ -529,7 +537,7 @@ public class TestOzoneContainer {
writeChunkForContainer(XceiverClientSpi client,
long containerID, int dataLen) throws Exception {
// Write Chunk
- BlockID blockID = ContainerTestHelper.getTestBlockID(containerID);;
+ BlockID blockID = ContainerTestHelper.getTestBlockID(containerID);
ContainerProtos.ContainerCommandRequestProto writeChunkRequest =
ContainerTestHelper.getWriteChunkRequest(client.getPipeline(),
blockID, dataLen);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerStateMachine.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerStateMachine.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerStateMachine.java
index 8c83fd3..c875a7e 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerStateMachine.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerStateMachine.java
@@ -89,7 +89,7 @@ public class TestContainerStateMachine {
// add putKey request
ContainerCommandRequestProto putKeyProto = ContainerTestHelper
- .getPutKeyRequest(pipeline, writeChunkProto.getWriteChunk());
+ .getPutBlockRequest(pipeline, writeChunkProto.getWriteChunk());
RaftClientRequest putKeyRequest = getRaftClientRequest(putKeyProto);
TransactionContext createContainerCtxt =
http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManager.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManager.java
index f309715..7144005 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManager.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManager.java
@@ -1209,8 +1209,8 @@ public class TestOzoneManager {
//Disabling this test
@Ignore("Disabling this test until Open Key is fixed.")
public void testExpiredOpenKey() throws Exception {
-// BackgroundService openKeyCleanUpService = ((KeyManagerImpl)cluster
-// .getOzoneManager().getKeyManager()).getOpenKeyCleanupService();
+// BackgroundService openKeyCleanUpService = ((BlockManagerImpl)cluster
+// .getOzoneManager().getBlockManager()).getOpenKeyCleanupService();
String userName = "user" + RandomStringUtils.randomNumeric(5);
String adminName = "admin" + RandomStringUtils.randomNumeric(5);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSmallFile.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSmallFile.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSmallFile.java
index a2d95e8..84a4028 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSmallFile.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSmallFile.java
@@ -103,7 +103,7 @@ public class TestContainerSmallFile {
}
@Test
- public void testInvalidKeyRead() throws Exception {
+ public void testInvalidBlockRead() throws Exception {
String traceID = UUID.randomUUID().toString();
ContainerWithPipeline container =
storageContainerLocationClient.allocateContainer(
@@ -116,7 +116,7 @@ public class TestContainerSmallFile {
container.getContainerInfo().getContainerID(), traceID);
thrown.expect(StorageContainerException.class);
- thrown.expectMessage("Unable to find the key");
+ thrown.expectMessage("Unable to find the block");
BlockID blockID = ContainerTestHelper.getTestBlockID(
container.getContainerInfo().getContainerID());
http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestGetCommittedBlockLengthAndPutKey.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestGetCommittedBlockLengthAndPutKey.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestGetCommittedBlockLengthAndPutKey.java
index f82b0d3..08e7808 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestGetCommittedBlockLengthAndPutKey.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestGetCommittedBlockLengthAndPutKey.java
@@ -107,7 +107,7 @@ public class TestGetCommittedBlockLengthAndPutKey {
// Now, explicitly make a putKey request for the block.
ContainerProtos.ContainerCommandRequestProto putKeyRequest =
ContainerTestHelper
- .getPutKeyRequest(pipeline, writeChunkRequest.getWriteChunk());
+ .getPutBlockRequest(pipeline, writeChunkRequest.getWriteChunk());
client.sendCommand(putKeyRequest);
response = ContainerProtocolCalls
.getCommittedBlockLength(client, blockID, traceID);
@@ -155,7 +155,7 @@ public class TestGetCommittedBlockLengthAndPutKey {
}
@Test
- public void tesGetCommittedBlockLengthForInvalidBlock() throws Exception {
+ public void testGetCommittedBlockLengthForInvalidBlock() throws Exception {
String traceID = UUID.randomUUID().toString();
ContainerWithPipeline container = storageContainerLocationClient
.allocateContainer(xceiverClientManager.getType(),
@@ -174,7 +174,7 @@ public class TestGetCommittedBlockLengthAndPutKey {
ContainerProtocolCalls.getCommittedBlockLength(client, blockID, traceID);
Assert.fail("Expected exception not thrown");
} catch (StorageContainerException sce) {
- Assert.assertTrue(sce.getMessage().contains("Unable to find the key"));
+ Assert.assertTrue(sce.getMessage().contains("Unable to find the block"));
}
xceiverClientManager.releaseClient(client);
}
@@ -216,7 +216,7 @@ public class TestGetCommittedBlockLengthAndPutKey {
@Test
public void tesPutKeyResposne() throws Exception {
- ContainerProtos.PutKeyResponseProto response;
+ ContainerProtos.PutBlockResponseProto response;
String traceID = UUID.randomUUID().toString();
ContainerWithPipeline container = storageContainerLocationClient
.allocateContainer(xceiverClientManager.getType(),
@@ -239,8 +239,8 @@ public class TestGetCommittedBlockLengthAndPutKey {
// Now, explicitly make a putKey request for the block.
ContainerProtos.ContainerCommandRequestProto putKeyRequest =
ContainerTestHelper
- .getPutKeyRequest(pipeline, writeChunkRequest.getWriteChunk());
- response = client.sendCommand(putKeyRequest).getPutKey();
+ .getPutBlockRequest(pipeline, writeChunkRequest.getWriteChunk());
+ response = client.sendCommand(putKeyRequest).getPutBlock();
// make sure the block ids in the request and response are same.
// This will also ensure that closing the container committed the block
// on the Datanodes.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java
index 02cd985..7eb2ec2 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java
@@ -44,7 +44,7 @@ import org.apache.hadoop.ozone.client.io.OzoneInputStream;
import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
import org.apache.hadoop.ozone.client.rpc.RpcClient;
-import org.apache.hadoop.ozone.container.common.helpers.KeyData;
+import org.apache.hadoop.ozone.container.common.helpers.BlockData;
import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
import org.apache.hadoop.ozone.container.keyvalue.KeyValueHandler;
@@ -278,29 +278,29 @@ public class TestKeys {
}
static void runTestPutKey(PutHelper helper) throws Exception {
- final ClientProtocol client = helper.client;
+ final ClientProtocol helperClient = helper.client;
helper.putKey();
assertNotNull(helper.getBucket());
assertNotNull(helper.getFile());
- List<OzoneKey> keyList = client
+ List<OzoneKey> keyList = helperClient
.listKeys(helper.getVol().getName(), helper.getBucket().getName(), null,
null, 10);
Assert.assertEquals(1, keyList.size());
// test list key using a more efficient call
String newkeyName = OzoneUtils.getRequestID().toLowerCase();
- OzoneOutputStream ozoneOutputStream = client
+ OzoneOutputStream ozoneOutputStream = helperClient
.createKey(helper.getVol().getName(), helper.getBucket().getName(),
newkeyName, 0, replicationType, replicationFactor);
ozoneOutputStream.close();
- keyList = client
+ keyList = helperClient
.listKeys(helper.getVol().getName(), helper.getBucket().getName(), null,
null, 10);
Assert.assertEquals(2, keyList.size());
// test new put key with invalid volume/bucket name
try {
- ozoneOutputStream = client
+ ozoneOutputStream = helperClient
.createKey("invalid-volume", helper.getBucket().getName(), newkeyName,
0, replicationType, replicationFactor);
ozoneOutputStream.close();
@@ -312,7 +312,7 @@ public class TestKeys {
}
try {
- ozoneOutputStream = client
+ ozoneOutputStream = helperClient
.createKey(helper.getVol().getName(), "invalid-bucket", newkeyName, 0,
replicationType, replicationFactor);
ozoneOutputStream.close();
@@ -380,7 +380,7 @@ public class TestKeys {
}
static void runTestPutAndGetKey(PutHelper helper) throws Exception {
- final ClientProtocol client = helper.client;
+ final ClientProtocol helperClient = helper.client;
String keyName = helper.putKey();
assertNotNull(helper.getBucket());
@@ -427,7 +427,8 @@ public class TestKeys {
// test new get key with invalid volume/bucket name
try {
- client.getKey("invalid-volume", helper.getBucket().getName(), keyName);
+ helperClient.getKey(
+ "invalid-volume", helper.getBucket().getName(), keyName);
fail("Get key should have thrown " + "when using invalid volume name.");
} catch (IOException e) {
GenericTestUtils
@@ -435,7 +436,8 @@ public class TestKeys {
}
try {
- client.getKey(helper.getVol().getName(), "invalid-bucket", keyName);
+ helperClient.getKey(
+ helper.getVol().getName(), "invalid-bucket", keyName);
fail("Get key should have thrown " + "when using invalid bucket name.");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains(
@@ -476,7 +478,7 @@ public class TestKeys {
}
static void runTestPutAndListKey(PutHelper helper) throws Exception {
- ClientProtocol client = helper.client;
+ ClientProtocol helperClient = helper.client;
helper.putKey();
assertNotNull(helper.getBucket());
assertNotNull(helper.getFile());
@@ -495,7 +497,7 @@ public class TestKeys {
List<OzoneKey> keyList1 =
IteratorUtils.toList(helper.getBucket().listKeys(null, null));
// test list key using a more efficient call
- List<OzoneKey> keyList2 = client
+ List<OzoneKey> keyList2 = helperClient
.listKeys(helper.getVol().getName(), helper.getBucket().getName(), null,
null, 100);
@@ -515,7 +517,7 @@ public class TestKeys {
}
// test maxLength parameter of list keys
- keyList2 = client
+ keyList2 = helperClient
.listKeys(helper.getVol().getName(), helper.getBucket().getName(), null,
null, 1);
Assert.assertEquals(1, keyList2.size());
@@ -523,7 +525,7 @@ public class TestKeys {
// test startKey parameter of list keys
keyList1 = IteratorUtils
.toList(helper.getBucket().listKeys("list-key", "list-key4"));
- keyList2 = client
+ keyList2 = helperClient
.listKeys(helper.getVol().getName(), helper.getBucket().getName(),
"list-key", "list-key4", 100);
Assert.assertEquals(5, keyList1.size());
@@ -532,7 +534,7 @@ public class TestKeys {
// test prefix parameter of list keys
keyList1 =
IteratorUtils.toList(helper.getBucket().listKeys("list-key2", null));
- keyList2 = client
+ keyList2 = helperClient
.listKeys(helper.getVol().getName(), helper.getBucket().getName(),
"list-key2", null, 100);
Assert.assertTrue(
@@ -542,7 +544,7 @@ public class TestKeys {
// test new list keys with invalid volume/bucket name
try {
- client.listKeys("invalid-volume", helper.getBucket().getName(),
+ helperClient.listKeys("invalid-volume", helper.getBucket().getName(),
null, null, 100);
fail("List keys should have thrown when using invalid volume name.");
} catch (IOException e) {
@@ -551,7 +553,7 @@ public class TestKeys {
}
try {
- client.listKeys(helper.getVol().getName(), "invalid-bucket", null,
+ helperClient.listKeys(helper.getVol().getName(), "invalid-bucket", null,
null, 100);
fail("List keys should have thrown when using invalid bucket name.");
} catch (IOException e) {
@@ -697,10 +699,10 @@ public class TestKeys {
.KeyValueContainer);
KeyValueContainer container = (KeyValueContainer) cm.getContainerSet()
.getContainer(location.getBlockID().getContainerID());
- KeyData blockInfo = keyValueHandler
- .getKeyManager().getKey(container, location.getBlockID());
- KeyValueContainerData containerData = (KeyValueContainerData) container
- .getContainerData();
+ BlockData blockInfo = keyValueHandler
+ .getBlockManager().getBlock(container, location.getBlockID());
+ KeyValueContainerData containerData =
+ (KeyValueContainerData) container.getContainerData();
File dataDir = new File(containerData.getChunksPath());
for (ContainerProtos.ChunkInfo chunkInfo : blockInfo.getChunks()) {
File chunkFile = dataDir.toPath()
http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java
index 9c451e2..3e740d9 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java
@@ -54,7 +54,7 @@ public class BucketManagerImpl implements BucketManager {
/**
* MetadataDB is maintained in MetadataManager and shared between
- * BucketManager and VolumeManager. (and also by KeyManager)
+ * BucketManager and VolumeManager. (and also by BlockManager)
*
* BucketManager uses MetadataDB to store bucket level information.
*
http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ScmBlockLocationTestIngClient.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ScmBlockLocationTestIngClient.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ScmBlockLocationTestIngClient.java
index 2da60de..eb533e8 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ScmBlockLocationTestIngClient.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ScmBlockLocationTestIngClient.java
@@ -102,7 +102,7 @@ public class ScmBlockLocationTestIngClient implements ScmBlockLocationProtocol {
}
/**
- * Returns Fake blocks to the KeyManager so we get blocks in the Database.
+ * Returns Fake blocks to the BlockManager so we get blocks in the Database.
* @param size - size of the block.
* @param type Replication Type
* @param factor - Replication factor
http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java
index 5ac7e0a..8811d91 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java
@@ -48,17 +48,16 @@ import java.util.Random;
import java.util.UUID;
import java.util.concurrent.atomic.AtomicInteger;
-
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+ .PutBlockRequestProto;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+ .GetBlockRequestProto;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
.ContainerCommandRequestProto;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
.ReadChunkRequestProto;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
.WriteChunkRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
- .PutKeyRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
- .GetKeyRequestProto;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
@@ -141,7 +140,7 @@ public class BenchMarkDatanodeDispatcher {
long containerID = containers.get(y);
BlockID blockID = new BlockID(containerID, key);
dispatcher
- .dispatch(getPutKeyCommand(blockID, chunkName));
+ .dispatch(getPutBlockCommand(blockID, chunkName));
dispatcher.dispatch(getWriteChunkCommand(blockID, chunkName));
}
}
@@ -213,38 +212,39 @@ public class BenchMarkDatanodeDispatcher {
return builder.build();
}
- private ContainerCommandRequestProto getPutKeyCommand(
+ private ContainerCommandRequestProto getPutBlockCommand(
BlockID blockID, String chunkKey) {
- PutKeyRequestProto.Builder putKeyRequest = PutKeyRequestProto
+ PutBlockRequestProto.Builder putBlockRequest = PutBlockRequestProto
.newBuilder()
- .setKeyData(getKeyData(blockID, chunkKey));
+ .setBlockData(getBlockData(blockID, chunkKey));
ContainerCommandRequestProto.Builder request = ContainerCommandRequestProto
.newBuilder();
- request.setCmdType(ContainerProtos.Type.PutKey)
+ request.setCmdType(ContainerProtos.Type.PutBlock)
.setContainerID(blockID.getContainerID())
.setTraceID(getBlockTraceID(blockID))
.setDatanodeUuid(datanodeUuid)
- .setPutKey(putKeyRequest);
+ .setPutBlock(putBlockRequest);
return request.build();
}
- private ContainerCommandRequestProto getGetKeyCommand(BlockID blockID) {
- GetKeyRequestProto.Builder readKeyRequest = GetKeyRequestProto.newBuilder()
+ private ContainerCommandRequestProto getGetBlockCommand(BlockID blockID) {
+ GetBlockRequestProto.Builder readBlockRequest =
+ GetBlockRequestProto.newBuilder()
.setBlockID(blockID.getDatanodeBlockIDProtobuf());
ContainerCommandRequestProto.Builder request = ContainerCommandRequestProto
.newBuilder()
- .setCmdType(ContainerProtos.Type.GetKey)
+ .setCmdType(ContainerProtos.Type.GetBlock)
.setContainerID(blockID.getContainerID())
.setTraceID(getBlockTraceID(blockID))
.setDatanodeUuid(datanodeUuid)
- .setGetKey(readKeyRequest);
+ .setGetBlock(readBlockRequest);
return request.build();
}
- private ContainerProtos.KeyData getKeyData(
+ private ContainerProtos.BlockData getBlockData(
BlockID blockID, String chunkKey) {
- ContainerProtos.KeyData.Builder builder = ContainerProtos.KeyData
+ ContainerProtos.BlockData.Builder builder = ContainerProtos.BlockData
.newBuilder()
.setBlockID(blockID.getDatanodeBlockIDProtobuf())
.addChunks(getChunkInfo(blockID, chunkKey));
@@ -275,16 +275,16 @@ public class BenchMarkDatanodeDispatcher {
}
@Benchmark
- public void putKey(BenchMarkDatanodeDispatcher bmdd) {
+ public void putBlock(BenchMarkDatanodeDispatcher bmdd) {
BlockID blockID = getRandomBlockID();
String chunkKey = getNewChunkToWrite();
- bmdd.dispatcher.dispatch(getPutKeyCommand(blockID, chunkKey));
+ bmdd.dispatcher.dispatch(getPutBlockCommand(blockID, chunkKey));
}
@Benchmark
- public void getKey(BenchMarkDatanodeDispatcher bmdd) {
+ public void getBlock(BenchMarkDatanodeDispatcher bmdd) {
BlockID blockID = getRandomBlockID();
- bmdd.dispatcher.dispatch(getGetKeyCommand(blockID));
+ bmdd.dispatcher.dispatch(getGetBlockCommand(blockID));
}
// Chunks writes from benchmark only reaches certain containers
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[13/18] hadoop git commit: Merge commit
'9af96d4ed4b6f80d3ca53a2b003d2ef768650dd4' into HDFS-12943
Posted by sh...@apache.org.
Merge commit '9af96d4ed4b6f80d3ca53a2b003d2ef768650dd4' into HDFS-12943
# Conflicts:
# hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithQJM.md
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4cdd0b9c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4cdd0b9c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4cdd0b9c
Branch: refs/heads/HDFS-12943
Commit: 4cdd0b9cdc9c39384333c1757766f02b1b9d0daf
Parents: 94d7f90 9af96d4
Author: Konstantin V Shvachko <sh...@apache.org>
Authored: Mon Sep 17 17:39:11 2018 -0700
Committer: Konstantin V Shvachko <sh...@apache.org>
Committed: Fri Sep 21 18:17:41 2018 -0700
----------------------------------------------------------------------
.../org/apache/hadoop/http/IsActiveServlet.java | 71 +++++++++++++++
.../apache/hadoop/http/TestIsActiveServlet.java | 95 ++++++++++++++++++++
.../router/IsRouterActiveServlet.java | 37 ++++++++
.../federation/router/RouterHttpServer.java | 9 ++
.../src/site/markdown/HDFSRouterFederation.md | 2 +-
.../namenode/IsNameNodeActiveServlet.java | 33 +++++++
.../server/namenode/NameNodeHttpServer.java | 3 +
.../markdown/HDFSHighAvailabilityWithQJM.md | 8 ++
.../IsResourceManagerActiveServlet.java | 38 ++++++++
.../server/resourcemanager/ResourceManager.java | 5 ++
.../resourcemanager/webapp/RMWebAppFilter.java | 3 +-
.../src/site/markdown/ResourceManagerHA.md | 5 ++
12 files changed, 307 insertions(+), 2 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4cdd0b9c/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithQJM.md
----------------------------------------------------------------------
diff --cc hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithQJM.md
index 0d20091,e4363fb..76a9837
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithQJM.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithQJM.md
@@@ -423,34 -423,14 +423,42 @@@ This guide describes high-level uses o
**Note:** This is not yet implemented, and at present will always return
success, unless the given NameNode is completely down.
+
+ ### Load Balancer Setup
+
+ If you are running a set of NameNodes behind a Load Balancer (e.g. [Azure](https://docs.microsoft.com/en-us/azure/load-balancer/load-balancer-custom-probe-overview) or [AWS](https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-healthchecks.html) ) and would like the Load Balancer to point to the active NN, you can use the /isActive HTTP endpoint as a health probe.
+ http://NN_HOSTNAME/isActive will return a 200 status code response if the NN is in Active HA State, 405 otherwise.
+
+
+
+### In-Progress Edit Log Tailing
+
+Under the default settings, the Standby NameNode will only apply edits that are present in an edit
+log segments which has been finalized. If it is desirable to have a Standby NameNode which has more
+up-to-date namespace information, it is possible to enable tailing of in-progress edit segments.
+This setting will attempt to fetch edits from an in-memory cache on the JournalNodes and can reduce
+the lag time before a transaction is applied on the Standby NameNode to the order of milliseconds.
+If an edit cannot be served from the cache, the Standby will still be able to retrieve it, but the
+lag time will be much longer. The relevant configurations are:
+
+* **dfs.ha.tail-edits.in-progress** - Whether or not to enable tailing on in-progress edits logs.
+ This will also enable the in-memory edit cache on the JournalNodes. Disabled by default.
+
+* **dfs.journalnode.edit-cache-size.bytes** - The size of the in-memory cache of edits on the
+ JournalNode. Edits take around 200 bytes each in a typical environment, so, for example, the
+ default of 1048576 (1MB) can hold around 5000 transactions. It is recommended to monitor the
+ JournalNode metrics RpcRequestCacheMissAmountNumMisses and RpcRequestCacheMissAmountAvgTxns,
+ which respectively count the number of requests unable to be served by the cache, and the extra
+ number of transactions which would have needed to have been in the cache for the request to
+ succeed. For example, if a request attempted to fetch edits starting at transaction ID 10, but
+ the oldest data in the cache was at transaction ID 20, a value of 10 would be added to the
+ average.
+
+This feature is primarily useful in conjunction with the Standby/Observer Read feature. Using this
+feature, read requests can be serviced from non-active NameNodes; thus tailing in-progress edits
+provides these nodes with the ability to serve requests with data which is much more fresh. See the
+Apache JIRA ticket HDFS-12943 for more information on this feature.
+
Automatic Failover
------------------
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[17/18] hadoop git commit: HDFS-13778. [SBN read]
TestStateAlignmentContextWithHA should use real ObserverReadProxyProvider
instead of AlignmentContextProxyProvider. Contributed by Konstantin Shvachko
and Plamen Jeliazkov.
Posted by sh...@apache.org.
HDFS-13778. [SBN read] TestStateAlignmentContextWithHA should use real ObserverReadProxyProvider instead of AlignmentContextProxyProvider. Contributed by Konstantin Shvachko and Plamen Jeliazkov.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a1f9c005
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a1f9c005
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a1f9c005
Branch: refs/heads/HDFS-12943
Commit: a1f9c0051664e4a13118258c2219081cd7d91e77
Parents: c04e0c0
Author: Konstantin V Shvachko <sh...@apache.org>
Authored: Mon Sep 17 18:25:27 2018 -0700
Committer: Konstantin V Shvachko <sh...@apache.org>
Committed: Fri Sep 21 18:31:11 2018 -0700
----------------------------------------------------------------------
.../hdfs/TestStateAlignmentContextWithHA.java | 186 ++++++-------------
1 file changed, 57 insertions(+), 129 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1f9c005/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestStateAlignmentContextWithHA.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestStateAlignmentContextWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestStateAlignmentContextWithHA.java
index 1acbd75..a494252 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestStateAlignmentContextWithHA.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestStateAlignmentContextWithHA.java
@@ -18,28 +18,24 @@
package org.apache.hadoop.hdfs;
-import static org.hamcrest.CoreMatchers.containsString;
import static org.hamcrest.CoreMatchers.is;
-import static org.hamcrest.CoreMatchers.not;
import static org.junit.Assert.assertThat;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
-import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
-import org.apache.hadoop.hdfs.server.namenode.ha.ClientHAProxyFactory;
-import org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider;
+import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.server.namenode.ha.HAProxyFactory;
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
-import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos;
-import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.hdfs.server.namenode.ha.ObserverReadProxyProvider;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
-import org.mockito.Mockito;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.net.URI;
@@ -61,55 +57,31 @@ import java.util.concurrent.TimeUnit;
* to the most recent alignment state of the server.
*/
public class TestStateAlignmentContextWithHA {
+ public static final Logger LOG =
+ LoggerFactory.getLogger(TestStateAlignmentContextWithHA.class.getName());
private static final int NUMDATANODES = 1;
private static final int NUMCLIENTS = 10;
- private static final int NUMFILES = 300;
+ private static final int NUMFILES = 120;
private static final Configuration CONF = new HdfsConfiguration();
- private static final String NAMESERVICE = "nameservice";
private static final List<ClientGSIContext> AC_LIST = new ArrayList<>();
private static MiniDFSCluster cluster;
private static List<Worker> clients;
- private static ClientGSIContext spy;
private DistributedFileSystem dfs;
private int active = 0;
private int standby = 1;
- static class AlignmentContextProxyProvider<T>
- extends ConfiguredFailoverProxyProvider<T> {
+ static class ORPPwithAlignmentContexts<T extends ClientProtocol>
+ extends ObserverReadProxyProvider<T> {
- private ClientGSIContext alignmentContext;
-
- public AlignmentContextProxyProvider(
+ public ORPPwithAlignmentContexts(
Configuration conf, URI uri, Class<T> xface,
HAProxyFactory<T> factory) throws IOException {
super(conf, uri, xface, factory);
- // Create and set AlignmentContext in HAProxyFactory.
- // All proxies by factory will now have AlignmentContext assigned.
- this.alignmentContext = (spy != null ? spy : new ClientGSIContext());
- ((ClientHAProxyFactory<T>) factory).setAlignmentContext(alignmentContext);
-
- AC_LIST.add(alignmentContext);
- }
- }
-
- static class SpyConfiguredContextProxyProvider<T>
- extends ConfiguredFailoverProxyProvider<T> {
-
- private ClientGSIContext alignmentContext;
-
- public SpyConfiguredContextProxyProvider(
- Configuration conf, URI uri, Class<T> xface,
- HAProxyFactory<T> factory) throws IOException {
- super(conf, uri, xface, factory);
-
- // Create but DON'T set in HAProxyFactory.
- this.alignmentContext = (spy != null ? spy : new ClientGSIContext());
-
- AC_LIST.add(alignmentContext);
+ AC_LIST.add((ClientGSIContext) getAlignmentContext());
}
}
@@ -121,23 +93,21 @@ public class TestStateAlignmentContextWithHA {
CONF.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10);
CONF.setBoolean("fs.hdfs.impl.disable.cache", true);
- MiniDFSNNTopology.NSConf nsConf = new MiniDFSNNTopology.NSConf(NAMESERVICE);
- nsConf.addNN(new MiniDFSNNTopology.NNConf("nn1"));
- nsConf.addNN(new MiniDFSNNTopology.NNConf("nn2"));
-
cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(NUMDATANODES)
- .nnTopology(MiniDFSNNTopology.simpleHATopology().addNameservice(nsConf))
+ .nnTopology(MiniDFSNNTopology.simpleHATopology(3))
.build();
cluster.waitActive();
cluster.transitionToActive(0);
+ cluster.transitionToObserver(2);
+
+ String nameservice = HATestUtil.getLogicalHostname(cluster);
+ HATestUtil.setFailoverConfigurations(cluster, CONF, nameservice, 0);
+ CONF.set(HdfsClientConfigKeys.Failover.PROXY_PROVIDER_KEY_PREFIX +
+ "." + nameservice, ORPPwithAlignmentContexts.class.getName());
}
@Before
public void before() throws IOException, URISyntaxException {
- killWorkers();
- HATestUtil.setFailoverConfigurations(cluster, CONF, NAMESERVICE, 0);
- CONF.set(HdfsClientConfigKeys.Failover.PROXY_PROVIDER_KEY_PREFIX +
- "." + NAMESERVICE, AlignmentContextProxyProvider.class.getName());
dfs = (DistributedFileSystem) FileSystem.get(CONF);
}
@@ -151,6 +121,7 @@ public class TestStateAlignmentContextWithHA {
@After
public void after() throws IOException {
+ killWorkers();
cluster.transitionToStandby(1);
cluster.transitionToActive(0);
active = 0;
@@ -160,26 +131,6 @@ public class TestStateAlignmentContextWithHA {
dfs = null;
}
AC_LIST.clear();
- spy = null;
- }
-
- /**
- * This test checks if after a client writes we can see the state id in
- * updated via the response.
- */
- @Test
- public void testNoStateOnConfiguredProxyProvider() throws Exception {
- Configuration confCopy = new Configuration(CONF);
- confCopy.set(HdfsClientConfigKeys.Failover.PROXY_PROVIDER_KEY_PREFIX +
- "." + NAMESERVICE, SpyConfiguredContextProxyProvider.class.getName());
-
- try (DistributedFileSystem clearDfs =
- (DistributedFileSystem) FileSystem.get(confCopy)) {
- ClientGSIContext clientState = getContext(1);
- assertThat(clientState.getLastSeenStateId(), is(Long.MIN_VALUE));
- DFSTestUtil.writeFile(clearDfs, new Path("/testFileNoState"), "no_state");
- assertThat(clientState.getLastSeenStateId(), is(Long.MIN_VALUE));
- }
}
/**
@@ -234,48 +185,6 @@ public class TestStateAlignmentContextWithHA {
}
/**
- * This test mocks an AlignmentContext and ensures that DFSClient
- * writes its lastSeenStateId into RPC requests.
- */
- @Test
- public void testClientSendsState() throws Exception {
- ClientGSIContext alignmentContext = new ClientGSIContext();
- ClientGSIContext spiedAlignContext = Mockito.spy(alignmentContext);
- spy = spiedAlignContext;
-
- try (DistributedFileSystem clearDfs =
- (DistributedFileSystem) FileSystem.get(CONF)) {
-
- // Collect RpcRequestHeaders for verification later.
- final List<RpcHeaderProtos.RpcRequestHeaderProto.Builder> headers =
- new ArrayList<>();
- Mockito.doAnswer(a -> {
- Object[] arguments = a.getArguments();
- RpcHeaderProtos.RpcRequestHeaderProto.Builder header =
- (RpcHeaderProtos.RpcRequestHeaderProto.Builder) arguments[0];
- headers.add(header);
- return a.callRealMethod();
- }).when(spiedAlignContext).updateRequestState(Mockito.any());
-
- DFSTestUtil.writeFile(clearDfs, new Path("/testFile4"), "shv");
-
- // Ensure first header and last header have different state.
- assertThat(headers.size() > 1, is(true));
- assertThat(headers.get(0).getStateId(),
- is(not(headers.get(headers.size() - 1))));
-
- // Ensure collected RpcRequestHeaders are in increasing order.
- long lastHeader = headers.get(0).getStateId();
- for (RpcHeaderProtos.RpcRequestHeaderProto.Builder header :
- headers.subList(1, headers.size())) {
- long currentHeader = header.getStateId();
- assertThat(currentHeader >= lastHeader, is(true));
- lastHeader = header.getStateId();
- }
- }
- }
-
- /**
* This test checks if after a client writes we can see the state id in
* updated via the response.
*/
@@ -310,14 +219,22 @@ public class TestStateAlignmentContextWithHA {
@Test(timeout=300000)
public void testMultiClientStatesWithRandomFailovers() throws Exception {
- // We want threads to run during failovers; assuming at minimum 4 cores,
- // would like to see 2 clients competing against 2 NameNodes.
+ // First run, half the load, with one failover.
+ runClientsWithFailover(1, NUMCLIENTS/2, NUMFILES/2);
+ // Second half, with fail back.
+ runClientsWithFailover(NUMCLIENTS/2 + 1, NUMCLIENTS, NUMFILES/2);
+ }
+
+ private void runClientsWithFailover(int clientStartId,
+ int numClients,
+ int numFiles)
+ throws Exception {
ExecutorService execService = Executors.newFixedThreadPool(2);
- clients = new ArrayList<>(NUMCLIENTS);
- for (int i = 1; i <= NUMCLIENTS; i++) {
+ clients = new ArrayList<>(numClients);
+ for (int i = clientStartId; i <= numClients; i++) {
DistributedFileSystem haClient =
(DistributedFileSystem) FileSystem.get(CONF);
- clients.add(new Worker(haClient, NUMFILES, "/testFile3FO_", i));
+ clients.add(new Worker(haClient, numFiles, "/testFile3FO_", i));
}
// Execute workers in threadpool with random failovers.
@@ -325,15 +242,18 @@ public class TestStateAlignmentContextWithHA {
execService.shutdown();
boolean finished = false;
+ failOver();
+
while (!finished) {
- failOver();
- finished = execService.awaitTermination(1L, TimeUnit.SECONDS);
+ finished = execService.awaitTermination(20L, TimeUnit.SECONDS);
}
// Validation.
for (Future<STATE> future : futures) {
assertThat(future.get(), is(STATE.SUCCESS));
}
+
+ clients.clear();
}
private ClientGSIContext getContext(int clientCreationIndex) {
@@ -341,7 +261,9 @@ public class TestStateAlignmentContextWithHA {
}
private void failOver() throws IOException {
+ LOG.info("Transitioning Active to Standby");
cluster.transitionToStandby(active);
+ LOG.info("Transitioning Standby to Active");
cluster.transitionToActive(standby);
int tempActive = active;
active = standby;
@@ -388,30 +310,36 @@ public class TestStateAlignmentContextWithHA {
@Override
public STATE call() {
+ int i = -1;
try {
- for (int i = 0; i < filesToMake; i++) {
- long preClientStateFO =
- getContext(nonce).getLastSeenStateId();
+ for (i = 0; i < filesToMake; i++) {
+ ClientGSIContext gsiContext = getContext(nonce);
+ long preClientStateFO = gsiContext.getLastSeenStateId();
// Write using HA client.
- Path path = new Path(filePath + nonce + i);
+ Path path = new Path(filePath + nonce + "_" + i);
DFSTestUtil.writeFile(client, path, "erk");
- long postClientStateFO =
- getContext(nonce).getLastSeenStateId();
+ long postClientStateFO = gsiContext.getLastSeenStateId();
// Write(s) should have increased state. Check for greater than.
- if (postClientStateFO <= preClientStateFO) {
- System.out.println("FAIL: Worker started with: " +
- preClientStateFO + ", but finished with: " + postClientStateFO);
+ if (postClientStateFO < 0 || postClientStateFO <= preClientStateFO) {
+ LOG.error("FAIL: Worker started with: {} , but finished with: {}",
+ preClientStateFO, postClientStateFO);
return STATE.FAIL;
}
+
+ if(i % (NUMFILES/10) == 0) {
+ LOG.info("Worker {} created {} files", nonce, i);
+ LOG.info("LastSeenStateId = {}", postClientStateFO);
+ }
}
- client.close();
return STATE.SUCCESS;
- } catch (IOException e) {
- System.out.println("ERROR: Worker failed with: " + e);
+ } catch (Exception e) {
+ LOG.error("ERROR: Worker failed with: ", e);
return STATE.ERROR;
+ } finally {
+ LOG.info("Worker {} created {} files", nonce, i);
}
}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[03/18] hadoop git commit: HADOOP-15748. S3 listing inconsistency can
raise NPE in globber. Contributed by Steve Loughran.
Posted by sh...@apache.org.
HADOOP-15748. S3 listing inconsistency can raise NPE in globber.
Contributed by Steve Loughran.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/646874c3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/646874c3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/646874c3
Branch: refs/heads/HDFS-12943
Commit: 646874c326139457b79cf8cfa547b3c91a78c7b4
Parents: 7ad27e9
Author: Steve Loughran <st...@apache.org>
Authored: Thu Sep 20 13:04:52 2018 +0100
Committer: Steve Loughran <st...@apache.org>
Committed: Thu Sep 20 13:04:52 2018 +0100
----------------------------------------------------------------------
.../src/main/java/org/apache/hadoop/fs/Globber.java | 13 ++++++++++++-
1 file changed, 12 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/646874c3/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Globber.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Globber.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Globber.java
index ca3db1d..b241a94 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Globber.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Globber.java
@@ -245,7 +245,18 @@ class Globber {
// incorrectly conclude that /a/b was a file and should not match
// /a/*/*. So we use getFileStatus of the path we just listed to
// disambiguate.
- if (!getFileStatus(candidate.getPath()).isDirectory()) {
+ Path path = candidate.getPath();
+ FileStatus status = getFileStatus(path);
+ if (status == null) {
+ // null means the file was not found
+ LOG.warn("File/directory {} not found:"
+ + " it may have been deleted."
+ + " If this is an object store, this can be a sign of"
+ + " eventual consistency problems.",
+ path);
+ continue;
+ }
+ if (!status.isDirectory()) {
continue;
}
}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[08/18] hadoop git commit: HDDS-394. Rename *Key Apis in
DatanodeContainerProtocol to *Block apis. Contributed Dinesh Chitlangia.
Posted by sh...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
index ed4536f..4f2b3a2 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
@@ -31,7 +31,7 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
.ContainerCommandResponseProto;
import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
-import org.apache.hadoop.ozone.container.common.helpers.KeyData;
+import org.apache.hadoop.ozone.container.common.helpers.BlockData;
import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
import org.apache.hadoop.utils.MetadataKeyFilters;
import org.apache.hadoop.utils.MetadataStore;
@@ -116,7 +116,7 @@ public final class KeyValueContainerUtil {
File chunksPath = new File(containerData.getChunksPath());
// Close the DB connection and remove the DB handler from cache
- KeyUtils.removeDB(containerData, conf);
+ BlockUtils.removeDB(containerData, conf);
// Delete the Container MetaData path.
FileUtils.deleteDirectory(containerMetaDataPath);
@@ -175,16 +175,16 @@ public final class KeyValueContainerUtil {
}
kvContainerData.setDbFile(dbFile);
- MetadataStore metadata = KeyUtils.getDB(kvContainerData, config);
+ MetadataStore metadata = BlockUtils.getDB(kvContainerData, config);
long bytesUsed = 0;
List<Map.Entry<byte[], byte[]>> liveKeys = metadata
.getRangeKVs(null, Integer.MAX_VALUE,
MetadataKeyFilters.getNormalKeyFilter());
bytesUsed = liveKeys.parallelStream().mapToLong(e-> {
- KeyData keyData;
+ BlockData blockData;
try {
- keyData = KeyUtils.getKeyData(e.getValue());
- return keyData.getSize();
+ blockData = BlockUtils.getBlockData(e.getValue());
+ return blockData.getSize();
} catch (IOException ex) {
return 0L;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/SmallFileUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/SmallFileUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/SmallFileUtils.java
index df60c60..3495363 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/SmallFileUtils.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/SmallFileUtils.java
@@ -69,7 +69,7 @@ public final class SmallFileUtils {
ContainerProtos.ReadChunkResponseProto.newBuilder();
readChunkresponse.setChunkData(info.getProtoBufMessage());
readChunkresponse.setData(ByteString.copyFrom(data));
- readChunkresponse.setBlockID(msg.getGetSmallFile().getKey().getBlockID());
+ readChunkresponse.setBlockID(msg.getGetSmallFile().getBlock().getBlockID());
ContainerProtos.GetSmallFileResponseProto.Builder getSmallFile =
ContainerProtos.GetSmallFileResponseProto.newBuilder();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
new file mode 100644
index 0000000..54c15fb
--- /dev/null
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
@@ -0,0 +1,229 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.container.keyvalue.impl;
+
+import com.google.common.base.Preconditions;
+import com.google.common.primitives.Longs;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.client.BlockID;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
+
+import org.apache.hadoop.ozone.container.common.helpers.BlockData;
+import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
+import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
+import org.apache.hadoop.ozone.container.common.interfaces.Container;
+import org.apache.hadoop.ozone.container.keyvalue.interfaces.BlockManager;
+import org.apache.hadoop.ozone.container.common.utils.ContainerCache;
+import org.apache.hadoop.utils.MetadataStore;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.NO_SUCH_BLOCK;
+
+/**
+ * This class is for performing block related operations on the KeyValue
+ * Container.
+ */
+public class BlockManagerImpl implements BlockManager {
+
+ static final Logger LOG = LoggerFactory.getLogger(BlockManagerImpl.class);
+
+ private Configuration config;
+
+ /**
+ * Constructs a Block Manager.
+ *
+ * @param conf - Ozone configuration
+ */
+ public BlockManagerImpl(Configuration conf) {
+ Preconditions.checkNotNull(conf, "Config cannot be null");
+ this.config = conf;
+ }
+
+ /**
+ * Puts or overwrites a block.
+ *
+ * @param container - Container for which block need to be added.
+ * @param data - BlockData.
+ * @return length of the block.
+ * @throws IOException
+ */
+ public long putBlock(Container container, BlockData data) throws IOException {
+ Preconditions.checkNotNull(data, "BlockData cannot be null for put " +
+ "operation.");
+ Preconditions.checkState(data.getContainerID() >= 0, "Container Id " +
+ "cannot be negative");
+ // We are not locking the key manager since LevelDb serializes all actions
+ // against a single DB. We rely on DB level locking to avoid conflicts.
+ MetadataStore db = BlockUtils.getDB((KeyValueContainerData) container
+ .getContainerData(), config);
+
+ // This is a post condition that acts as a hint to the user.
+ // Should never fail.
+ Preconditions.checkNotNull(db, "DB cannot be null here");
+ db.put(Longs.toByteArray(data.getLocalID()), data.getProtoBufMessage()
+ .toByteArray());
+
+ // Increment keycount here
+ container.getContainerData().incrKeyCount();
+ return data.getSize();
+ }
+
+ /**
+ * Gets an existing block.
+ *
+ * @param container - Container from which block need to be fetched.
+ * @param blockID - BlockID of the block.
+ * @return Key Data.
+ * @throws IOException
+ */
+ public BlockData getBlock(Container container, BlockID blockID)
+ throws IOException {
+ Preconditions.checkNotNull(blockID,
+ "BlockID cannot be null in GetBlock request");
+ Preconditions.checkNotNull(blockID.getContainerID(),
+ "Container name cannot be null");
+
+ KeyValueContainerData containerData = (KeyValueContainerData) container
+ .getContainerData();
+ MetadataStore db = BlockUtils.getDB(containerData, config);
+ // This is a post condition that acts as a hint to the user.
+ // Should never fail.
+ Preconditions.checkNotNull(db, "DB cannot be null here");
+ byte[] kData = db.get(Longs.toByteArray(blockID.getLocalID()));
+ if (kData == null) {
+ throw new StorageContainerException("Unable to find the block.",
+ NO_SUCH_BLOCK);
+ }
+ ContainerProtos.BlockData blockData =
+ ContainerProtos.BlockData.parseFrom(kData);
+ return BlockData.getFromProtoBuf(blockData);
+ }
+
+ /**
+ * Returns the length of the committed block.
+ *
+ * @param container - Container from which block need to be fetched.
+ * @param blockID - BlockID of the block.
+ * @return length of the block.
+ * @throws IOException in case, the block key does not exist in db.
+ */
+ @Override
+ public long getCommittedBlockLength(Container container, BlockID blockID)
+ throws IOException {
+ KeyValueContainerData containerData = (KeyValueContainerData) container
+ .getContainerData();
+ MetadataStore db = BlockUtils.getDB(containerData, config);
+ // This is a post condition that acts as a hint to the user.
+ // Should never fail.
+ Preconditions.checkNotNull(db, "DB cannot be null here");
+ byte[] kData = db.get(Longs.toByteArray(blockID.getLocalID()));
+ if (kData == null) {
+ throw new StorageContainerException("Unable to find the block.",
+ NO_SUCH_BLOCK);
+ }
+ ContainerProtos.BlockData blockData =
+ ContainerProtos.BlockData.parseFrom(kData);
+ return blockData.getSize();
+ }
+
+ /**
+ * Deletes an existing block.
+ *
+ * @param container - Container from which block need to be deleted.
+ * @param blockID - ID of the block.
+ * @throws StorageContainerException
+ */
+ public void deleteBlock(Container container, BlockID blockID) throws
+ IOException {
+ Preconditions.checkNotNull(blockID, "block ID cannot be null.");
+ Preconditions.checkState(blockID.getContainerID() >= 0,
+ "Container ID cannot be negative.");
+ Preconditions.checkState(blockID.getLocalID() >= 0,
+ "Local ID cannot be negative.");
+
+ KeyValueContainerData cData = (KeyValueContainerData) container
+ .getContainerData();
+ MetadataStore db = BlockUtils.getDB(cData, config);
+ // This is a post condition that acts as a hint to the user.
+ // Should never fail.
+ Preconditions.checkNotNull(db, "DB cannot be null here");
+ // Note : There is a race condition here, since get and delete
+ // are not atomic. Leaving it here since the impact is refusing
+ // to delete a Block which might have just gotten inserted after
+ // the get check.
+ byte[] kKey = Longs.toByteArray(blockID.getLocalID());
+ byte[] kData = db.get(kKey);
+ if (kData == null) {
+ throw new StorageContainerException("Unable to find the block.",
+ NO_SUCH_BLOCK);
+ }
+ db.delete(kKey);
+
+ // Decrement blockcount here
+ container.getContainerData().decrKeyCount();
+ }
+
+ /**
+ * List blocks in a container.
+ *
+ * @param container - Container from which blocks need to be listed.
+ * @param startLocalID - Key to start from, 0 to begin.
+ * @param count - Number of blocks to return.
+ * @return List of Blocks that match the criteria.
+ */
+ @Override
+ public List<BlockData> listBlock(Container container, long startLocalID, int
+ count) throws IOException {
+ Preconditions.checkNotNull(container, "container cannot be null");
+ Preconditions.checkState(startLocalID >= 0, "startLocal ID cannot be " +
+ "negative");
+ Preconditions.checkArgument(count > 0,
+ "Count must be a positive number.");
+ container.readLock();
+ List<BlockData> result = null;
+ KeyValueContainerData cData = (KeyValueContainerData) container
+ .getContainerData();
+ MetadataStore db = BlockUtils.getDB(cData, config);
+ result = new ArrayList<>();
+ byte[] startKeyInBytes = Longs.toByteArray(startLocalID);
+ List<Map.Entry<byte[], byte[]>> range = db.getSequentialRangeKVs(
+ startKeyInBytes, count, null);
+ for (Map.Entry<byte[], byte[]> entry : range) {
+ BlockData value = BlockUtils.getBlockData(entry.getValue());
+ BlockData data = new BlockData(value.getBlockID());
+ result.add(data);
+ }
+ return result;
+ }
+
+ /**
+ * Shutdown KeyValueContainerManager.
+ */
+ public void shutdown() {
+ BlockUtils.shutdownCache(ContainerCache.getInstance(config));
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/KeyManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/KeyManagerImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/KeyManagerImpl.java
deleted file mode 100644
index 6370f8e..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/KeyManagerImpl.java
+++ /dev/null
@@ -1,227 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.keyvalue.impl;
-
-import com.google.common.base.Preconditions;
-import com.google.common.primitives.Longs;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
-
-import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
-import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyUtils;
-import org.apache.hadoop.ozone.container.common.helpers.KeyData;
-import org.apache.hadoop.ozone.container.common.interfaces.Container;
-import org.apache.hadoop.ozone.container.keyvalue.interfaces.KeyManager;
-import org.apache.hadoop.ozone.container.common.utils.ContainerCache;
-import org.apache.hadoop.utils.MetadataStore;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-
-import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.NO_SUCH_KEY;
-
-/**
- * This class is for performing key related operations on the KeyValue
- * Container.
- */
-public class KeyManagerImpl implements KeyManager {
-
- static final Logger LOG = LoggerFactory.getLogger(KeyManagerImpl.class);
-
- private Configuration config;
-
- /**
- * Constructs a key Manager.
- *
- * @param conf - Ozone configuration
- */
- public KeyManagerImpl(Configuration conf) {
- Preconditions.checkNotNull(conf, "Config cannot be null");
- this.config = conf;
- }
-
- /**
- * Puts or overwrites a key.
- *
- * @param container - Container for which key need to be added.
- * @param data - Key Data.
- * @return length of the key.
- * @throws IOException
- */
- public long putKey(Container container, KeyData data) throws IOException {
- Preconditions.checkNotNull(data, "KeyData cannot be null for put " +
- "operation.");
- Preconditions.checkState(data.getContainerID() >= 0, "Container Id " +
- "cannot be negative");
- // We are not locking the key manager since LevelDb serializes all actions
- // against a single DB. We rely on DB level locking to avoid conflicts.
- MetadataStore db = KeyUtils.getDB((KeyValueContainerData) container
- .getContainerData(), config);
-
- // This is a post condition that acts as a hint to the user.
- // Should never fail.
- Preconditions.checkNotNull(db, "DB cannot be null here");
- db.put(Longs.toByteArray(data.getLocalID()), data.getProtoBufMessage()
- .toByteArray());
-
- // Increment keycount here
- container.getContainerData().incrKeyCount();
- return data.getSize();
- }
-
- /**
- * Gets an existing key.
- *
- * @param container - Container from which key need to be get.
- * @param blockID - BlockID of the key.
- * @return Key Data.
- * @throws IOException
- */
- public KeyData getKey(Container container, BlockID blockID)
- throws IOException {
- Preconditions.checkNotNull(blockID,
- "BlockID cannot be null in GetKet request");
- Preconditions.checkNotNull(blockID.getContainerID(),
- "Container name cannot be null");
-
- KeyValueContainerData containerData = (KeyValueContainerData) container
- .getContainerData();
- MetadataStore db = KeyUtils.getDB(containerData, config);
- // This is a post condition that acts as a hint to the user.
- // Should never fail.
- Preconditions.checkNotNull(db, "DB cannot be null here");
- byte[] kData = db.get(Longs.toByteArray(blockID.getLocalID()));
- if (kData == null) {
- throw new StorageContainerException("Unable to find the key.",
- NO_SUCH_KEY);
- }
- ContainerProtos.KeyData keyData = ContainerProtos.KeyData.parseFrom(kData);
- return KeyData.getFromProtoBuf(keyData);
- }
-
- /**
- * Returns the length of the committed block.
- *
- * @param container - Container from which key need to be get.
- * @param blockID - BlockID of the key.
- * @return length of the block.
- * @throws IOException in case, the block key does not exist in db.
- */
- @Override
- public long getCommittedBlockLength(Container container, BlockID blockID)
- throws IOException {
- KeyValueContainerData containerData = (KeyValueContainerData) container
- .getContainerData();
- MetadataStore db = KeyUtils.getDB(containerData, config);
- // This is a post condition that acts as a hint to the user.
- // Should never fail.
- Preconditions.checkNotNull(db, "DB cannot be null here");
- byte[] kData = db.get(Longs.toByteArray(blockID.getLocalID()));
- if (kData == null) {
- throw new StorageContainerException("Unable to find the key.",
- NO_SUCH_KEY);
- }
- ContainerProtos.KeyData keyData = ContainerProtos.KeyData.parseFrom(kData);
- return keyData.getSize();
- }
-
- /**
- * Deletes an existing Key.
- *
- * @param container - Container from which key need to be deleted.
- * @param blockID - ID of the block.
- * @throws StorageContainerException
- */
- public void deleteKey(Container container, BlockID blockID) throws
- IOException {
- Preconditions.checkNotNull(blockID, "block ID cannot be null.");
- Preconditions.checkState(blockID.getContainerID() >= 0,
- "Container ID cannot be negative.");
- Preconditions.checkState(blockID.getLocalID() >= 0,
- "Local ID cannot be negative.");
-
- KeyValueContainerData cData = (KeyValueContainerData) container
- .getContainerData();
- MetadataStore db = KeyUtils.getDB(cData, config);
- // This is a post condition that acts as a hint to the user.
- // Should never fail.
- Preconditions.checkNotNull(db, "DB cannot be null here");
- // Note : There is a race condition here, since get and delete
- // are not atomic. Leaving it here since the impact is refusing
- // to delete a key which might have just gotten inserted after
- // the get check.
- byte[] kKey = Longs.toByteArray(blockID.getLocalID());
- byte[] kData = db.get(kKey);
- if (kData == null) {
- throw new StorageContainerException("Unable to find the key.",
- NO_SUCH_KEY);
- }
- db.delete(kKey);
-
- // Decrement keycount here
- container.getContainerData().decrKeyCount();
- }
-
- /**
- * List keys in a container.
- *
- * @param container - Container from which keys need to be listed.
- * @param startLocalID - Key to start from, 0 to begin.
- * @param count - Number of keys to return.
- * @return List of Keys that match the criteria.
- */
- @Override
- public List<KeyData> listKey(Container container, long startLocalID, int
- count) throws IOException {
- Preconditions.checkNotNull(container, "container cannot be null");
- Preconditions.checkState(startLocalID >= 0, "startLocal ID cannot be " +
- "negative");
- Preconditions.checkArgument(count > 0,
- "Count must be a positive number.");
- container.readLock();
- List<KeyData> result = null;
- KeyValueContainerData cData = (KeyValueContainerData) container
- .getContainerData();
- MetadataStore db = KeyUtils.getDB(cData, config);
- result = new ArrayList<>();
- byte[] startKeyInBytes = Longs.toByteArray(startLocalID);
- List<Map.Entry<byte[], byte[]>> range = db.getSequentialRangeKVs(
- startKeyInBytes, count, null);
- for (Map.Entry<byte[], byte[]> entry : range) {
- KeyData value = KeyUtils.getKeyData(entry.getValue());
- KeyData data = new KeyData(value.getBlockID());
- result.add(data);
- }
- return result;
- }
-
- /**
- * Shutdown KeyValueContainerManager.
- */
- public void shutdown() {
- KeyUtils.shutdownCache(ContainerCache.getInstance(config));
- }
-
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/package-info.java
index 525d51b..564b50e 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/package-info.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/package-info.java
@@ -17,6 +17,5 @@
*/
package org.apache.hadoop.ozone.container.keyvalue.impl;
/**
- This package contains chunk manager and key manager implementation for
- keyvalue container type.
- **/
\ No newline at end of file
+ * Chunk manager and block manager implementations for keyvalue container type.
+ */
http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/BlockManager.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/BlockManager.java
new file mode 100644
index 0000000..35ed22a
--- /dev/null
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/BlockManager.java
@@ -0,0 +1,84 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.ozone.container.keyvalue.interfaces;
+
+import org.apache.hadoop.hdds.client.BlockID;
+import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
+import org.apache.hadoop.ozone.container.common.helpers.BlockData;
+import org.apache.hadoop.ozone.container.common.interfaces.Container;
+
+import java.io.IOException;
+import java.util.List;
+
+/**
+ * BlockManager is for performing key related operations on the container.
+ */
+public interface BlockManager {
+
+ /**
+ * Puts or overwrites a block.
+ *
+ * @param container - Container for which block need to be added.
+ * @param data - Block Data.
+ * @return length of the Block.
+ * @throws IOException
+ */
+ long putBlock(Container container, BlockData data) throws IOException;
+
+ /**
+ * Gets an existing block.
+ *
+ * @param container - Container from which block need to be get.
+ * @param blockID - BlockID of the Block.
+ * @return Block Data.
+ * @throws IOException
+ */
+ BlockData getBlock(Container container, BlockID blockID) throws IOException;
+
+ /**
+ * Deletes an existing block.
+ *
+ * @param container - Container from which block need to be deleted.
+ * @param blockID - ID of the block.
+ * @throws StorageContainerException
+ */
+ void deleteBlock(Container container, BlockID blockID) throws IOException;
+
+ /**
+ * List blocks in a container.
+ *
+ * @param container - Container from which blocks need to be listed.
+ * @param startLocalID - Block to start from, 0 to begin.
+ * @param count - Number of blocks to return.
+ * @return List of Blocks that match the criteria.
+ */
+ List<BlockData> listBlock(Container container, long startLocalID, int count)
+ throws IOException;
+
+ /**
+ * Returns the last committed block length for the block.
+ * @param blockID blockId
+ */
+ long getCommittedBlockLength(Container container, BlockID blockID)
+ throws IOException;
+
+ /**
+ * Shutdown ContainerManager.
+ */
+ void shutdown();
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/KeyManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/KeyManager.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/KeyManager.java
deleted file mode 100644
index 84f771a..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/KeyManager.java
+++ /dev/null
@@ -1,84 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.container.keyvalue.interfaces;
-
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
-import org.apache.hadoop.ozone.container.common.helpers.KeyData;
-import org.apache.hadoop.ozone.container.common.interfaces.Container;
-
-import java.io.IOException;
-import java.util.List;
-
-/**
- * KeyManager is for performing key related operations on the container.
- */
-public interface KeyManager {
-
- /**
- * Puts or overwrites a key.
- *
- * @param container - Container for which key need to be added.
- * @param data - Key Data.
- * @return length of the Key.
- * @throws IOException
- */
- long putKey(Container container, KeyData data) throws IOException;
-
- /**
- * Gets an existing key.
- *
- * @param container - Container from which key need to be get.
- * @param blockID - BlockID of the Key.
- * @return Key Data.
- * @throws IOException
- */
- KeyData getKey(Container container, BlockID blockID) throws IOException;
-
- /**
- * Deletes an existing Key.
- *
- * @param container - Container from which key need to be deleted.
- * @param blockID - ID of the block.
- * @throws StorageContainerException
- */
- void deleteKey(Container container, BlockID blockID) throws IOException;
-
- /**
- * List keys in a container.
- *
- * @param container - Container from which keys need to be listed.
- * @param startLocalID - Key to start from, 0 to begin.
- * @param count - Number of keys to return.
- * @return List of Keys that match the criteria.
- */
- List<KeyData> listKey(Container container, long startLocalID, int count)
- throws IOException;
-
- /**
- * Returns the last committed block length for the block.
- * @param blockID blockId
- */
- long getCommittedBlockLength(Container container, BlockID blockID)
- throws IOException;
-
- /**
- * Shutdown ContainerManager.
- */
- void shutdown();
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/package-info.java
new file mode 100644
index 0000000..5129094
--- /dev/null
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/package-info.java
@@ -0,0 +1,21 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.container.keyvalue.interfaces;
+/**
+ * Chunk manager and block manager interfaces for keyvalue container type.
+ */
http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java
index 51eed7f..d96fbfa 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java
@@ -25,7 +25,7 @@ import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
import org.apache.hadoop.ozone.container.common.impl.TopNOrderedContainerDeletionChoosingPolicy;
import org.apache.hadoop.ozone.container.common.interfaces.ContainerDeletionChoosingPolicy;
import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
-import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyUtils;
+import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.ratis.shaded.com.google.protobuf
.InvalidProtocolBufferException;
@@ -72,7 +72,7 @@ public class BlockDeletingService extends BackgroundService{
private static final Logger LOG =
LoggerFactory.getLogger(BlockDeletingService.class);
- ContainerSet containerSet;
+ private ContainerSet containerSet;
private ContainerDeletionChoosingPolicy containerDeletionPolicy;
private final Configuration conf;
@@ -185,7 +185,7 @@ public class BlockDeletingService extends BackgroundService{
ContainerBackgroundTaskResult crr = new ContainerBackgroundTaskResult();
long startTime = Time.monotonicNow();
// Scan container's db and get list of under deletion blocks
- MetadataStore meta = KeyUtils.getDB(
+ MetadataStore meta = BlockUtils.getDB(
(KeyValueContainerData) containerData, conf);
// # of blocks to delete is throttled
KeyPrefixFilter filter =
@@ -211,8 +211,8 @@ public class BlockDeletingService extends BackgroundService{
String blockName = DFSUtil.bytes2String(entry.getKey());
LOG.debug("Deleting block {}", blockName);
try {
- ContainerProtos.KeyData data =
- ContainerProtos.KeyData.parseFrom(entry.getValue());
+ ContainerProtos.BlockData data =
+ ContainerProtos.BlockData.parseFrom(entry.getValue());
for (ContainerProtos.ChunkInfo chunkInfo : data.getChunksList()) {
File chunkFile = dataDir.toPath()
.resolve(chunkInfo.getChunkName()).toFile();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestBlockManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestBlockManagerImpl.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestBlockManagerImpl.java
new file mode 100644
index 0000000..6fe6d81
--- /dev/null
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestBlockManagerImpl.java
@@ -0,0 +1,211 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.container.keyvalue;
+
+import org.apache.hadoop.conf.StorageUnit;
+import org.apache.hadoop.hdds.client.BlockID;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
+import org.apache.hadoop.ozone.container.common.helpers.BlockData;
+import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
+import org.apache.hadoop.ozone.container.common.volume
+ .RoundRobinVolumeChoosingPolicy;
+import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
+import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
+import org.apache.hadoop.ozone.container.keyvalue.impl.BlockManagerImpl;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+import org.mockito.Mockito;
+
+import java.io.IOException;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.UUID;
+
+import static org.junit.Assert.*;
+import static org.mockito.ArgumentMatchers.anyList;
+import static org.mockito.ArgumentMatchers.anyLong;
+import static org.mockito.Mockito.mock;
+
+/**
+ * This class is used to test key related operations on the container.
+ */
+public class TestBlockManagerImpl {
+
+ private OzoneConfiguration config;
+ private String scmId = UUID.randomUUID().toString();
+ private VolumeSet volumeSet;
+ private RoundRobinVolumeChoosingPolicy volumeChoosingPolicy;
+ private KeyValueContainerData keyValueContainerData;
+ private KeyValueContainer keyValueContainer;
+ private BlockData blockData;
+ private BlockManagerImpl blockManager;
+ private BlockID blockID;
+
+ @Rule
+ public TemporaryFolder folder = new TemporaryFolder();
+
+
+ @Before
+ public void setUp() throws Exception {
+ config = new OzoneConfiguration();
+
+ HddsVolume hddsVolume = new HddsVolume.Builder(folder.getRoot()
+ .getAbsolutePath()).conf(config).datanodeUuid(UUID.randomUUID()
+ .toString()).build();
+
+ volumeSet = mock(VolumeSet.class);
+
+ volumeChoosingPolicy = mock(RoundRobinVolumeChoosingPolicy.class);
+ Mockito.when(volumeChoosingPolicy.chooseVolume(anyList(), anyLong()))
+ .thenReturn(hddsVolume);
+
+ keyValueContainerData = new KeyValueContainerData(1L,
+ (long) StorageUnit.GB.toBytes(5));
+
+ keyValueContainer = new KeyValueContainer(
+ keyValueContainerData, config);
+
+ keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
+
+ // Creating BlockData
+ blockID = new BlockID(1L, 1L);
+ blockData = new BlockData(blockID);
+ blockData.addMetadata("VOLUME", "ozone");
+ blockData.addMetadata("OWNER", "hdfs");
+ List<ContainerProtos.ChunkInfo> chunkList = new LinkedList<>();
+ ChunkInfo info = new ChunkInfo(String.format("%d.data.%d", blockID
+ .getLocalID(), 0), 0, 1024);
+ chunkList.add(info.getProtoBufMessage());
+ blockData.setChunks(chunkList);
+
+ // Create KeyValueContainerManager
+ blockManager = new BlockManagerImpl(config);
+
+ }
+
+ @Test
+ public void testPutAndGetBlock() throws Exception {
+ assertEquals(0, keyValueContainer.getContainerData().getKeyCount());
+ //Put Block
+ blockManager.putBlock(keyValueContainer, blockData);
+
+ assertEquals(1, keyValueContainer.getContainerData().getKeyCount());
+ //Get Block
+ BlockData fromGetBlockData = blockManager.getBlock(keyValueContainer,
+ blockData.getBlockID());
+
+ assertEquals(blockData.getContainerID(), fromGetBlockData.getContainerID());
+ assertEquals(blockData.getLocalID(), fromGetBlockData.getLocalID());
+ assertEquals(blockData.getChunks().size(),
+ fromGetBlockData.getChunks().size());
+ assertEquals(blockData.getMetadata().size(), fromGetBlockData.getMetadata()
+ .size());
+
+ }
+
+
+ @Test
+ public void testDeleteBlock() throws Exception {
+ try {
+ assertEquals(0,
+ keyValueContainer.getContainerData().getKeyCount());
+ //Put Block
+ blockManager.putBlock(keyValueContainer, blockData);
+ assertEquals(1,
+ keyValueContainer.getContainerData().getKeyCount());
+ //Delete Block
+ blockManager.deleteBlock(keyValueContainer, blockID);
+ assertEquals(0,
+ keyValueContainer.getContainerData().getKeyCount());
+ try {
+ blockManager.getBlock(keyValueContainer, blockID);
+ fail("testDeleteBlock");
+ } catch (StorageContainerException ex) {
+ GenericTestUtils.assertExceptionContains(
+ "Unable to find the block", ex);
+ }
+ } catch (IOException ex) {
+ fail("testDeleteBlock failed");
+ }
+ }
+
+ @Test
+ public void testListBlock() throws Exception {
+ try {
+ blockManager.putBlock(keyValueContainer, blockData);
+ List<BlockData> listBlockData = blockManager.listBlock(
+ keyValueContainer, 1, 10);
+ assertNotNull(listBlockData);
+ assertTrue(listBlockData.size() == 1);
+
+ for (long i = 2; i <= 10; i++) {
+ blockID = new BlockID(1L, i);
+ blockData = new BlockData(blockID);
+ blockData.addMetadata("VOLUME", "ozone");
+ blockData.addMetadata("OWNER", "hdfs");
+ List<ContainerProtos.ChunkInfo> chunkList = new LinkedList<>();
+ ChunkInfo info = new ChunkInfo(String.format("%d.data.%d", blockID
+ .getLocalID(), 0), 0, 1024);
+ chunkList.add(info.getProtoBufMessage());
+ blockData.setChunks(chunkList);
+ blockManager.putBlock(keyValueContainer, blockData);
+ }
+
+ listBlockData = blockManager.listBlock(
+ keyValueContainer, 1, 10);
+ assertNotNull(listBlockData);
+ assertTrue(listBlockData.size() == 10);
+
+ } catch (IOException ex) {
+ fail("testListBlock failed");
+ }
+ }
+
+ @Test
+ public void testGetNoSuchBlock() throws Exception {
+ try {
+ assertEquals(0,
+ keyValueContainer.getContainerData().getKeyCount());
+ //Put Block
+ blockManager.putBlock(keyValueContainer, blockData);
+ assertEquals(1,
+ keyValueContainer.getContainerData().getKeyCount());
+ //Delete Block
+ blockManager.deleteBlock(keyValueContainer, blockID);
+ assertEquals(0,
+ keyValueContainer.getContainerData().getKeyCount());
+ try {
+ //Since the block has been deleted, we should not be able to find it
+ blockManager.getBlock(keyValueContainer, blockID);
+ fail("testGetNoSuchBlock failed");
+ } catch (StorageContainerException ex) {
+ GenericTestUtils.assertExceptionContains(
+ "Unable to find the block", ex);
+ assertEquals(ContainerProtos.Result.NO_SUCH_BLOCK, ex.getResult());
+ }
+ } catch (IOException ex) {
+ fail("testGetNoSuchBlock failed");
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestChunkManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestChunkManagerImpl.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestChunkManagerImpl.java
index 9664052..3c0876b 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestChunkManagerImpl.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestChunkManagerImpl.java
@@ -88,7 +88,7 @@ public class TestChunkManagerImpl {
keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
data = "testing write chunks".getBytes();
- // Creating KeyData
+ // Creating BlockData
blockID = new BlockID(1L, 1L);
chunkInfo = new ChunkInfo(String.format("%d.data.%d", blockID
.getLocalID(), 0), 0, data.length);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyManagerImpl.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyManagerImpl.java
deleted file mode 100644
index b05dbca..0000000
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyManagerImpl.java
+++ /dev/null
@@ -1,191 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.keyvalue;
-
-import org.apache.hadoop.conf.StorageUnit;
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
-import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
-import org.apache.hadoop.ozone.container.common.helpers.KeyData;
-import org.apache.hadoop.ozone.container.common.volume
- .RoundRobinVolumeChoosingPolicy;
-import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
-import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
-import org.apache.hadoop.ozone.container.keyvalue.impl.KeyManagerImpl;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-import org.mockito.Mockito;
-
-import java.io.IOException;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.UUID;
-
-import static org.junit.Assert.*;
-import static org.mockito.ArgumentMatchers.anyList;
-import static org.mockito.ArgumentMatchers.anyLong;
-import static org.mockito.Mockito.mock;
-
-/**
- * This class is used to test key related operations on the container.
- */
-public class TestKeyManagerImpl {
-
- private OzoneConfiguration config;
- private String scmId = UUID.randomUUID().toString();
- private VolumeSet volumeSet;
- private RoundRobinVolumeChoosingPolicy volumeChoosingPolicy;
- private KeyValueContainerData keyValueContainerData;
- private KeyValueContainer keyValueContainer;
- private KeyData keyData;
- private KeyManagerImpl keyManager;
- private BlockID blockID;
-
- @Rule
- public TemporaryFolder folder = new TemporaryFolder();
-
-
- @Before
- public void setUp() throws Exception {
- config = new OzoneConfiguration();
-
- HddsVolume hddsVolume = new HddsVolume.Builder(folder.getRoot()
- .getAbsolutePath()).conf(config).datanodeUuid(UUID.randomUUID()
- .toString()).build();
-
- volumeSet = mock(VolumeSet.class);
-
- volumeChoosingPolicy = mock(RoundRobinVolumeChoosingPolicy.class);
- Mockito.when(volumeChoosingPolicy.chooseVolume(anyList(), anyLong()))
- .thenReturn(hddsVolume);
-
- keyValueContainerData = new KeyValueContainerData(1L,
- (long) StorageUnit.GB.toBytes(5));
-
- keyValueContainer = new KeyValueContainer(
- keyValueContainerData, config);
-
- keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
-
- // Creating KeyData
- blockID = new BlockID(1L, 1L);
- keyData = new KeyData(blockID);
- keyData.addMetadata("VOLUME", "ozone");
- keyData.addMetadata("OWNER", "hdfs");
- List<ContainerProtos.ChunkInfo> chunkList = new LinkedList<>();
- ChunkInfo info = new ChunkInfo(String.format("%d.data.%d", blockID
- .getLocalID(), 0), 0, 1024);
- chunkList.add(info.getProtoBufMessage());
- keyData.setChunks(chunkList);
-
- // Create KeyValueContainerManager
- keyManager = new KeyManagerImpl(config);
-
- }
-
- @Test
- public void testPutAndGetKey() throws Exception {
- assertEquals(0, keyValueContainer.getContainerData().getKeyCount());
- //Put Key
- keyManager.putKey(keyValueContainer, keyData);
-
- assertEquals(1, keyValueContainer.getContainerData().getKeyCount());
- //Get Key
- KeyData fromGetKeyData = keyManager.getKey(keyValueContainer,
- keyData.getBlockID());
-
- assertEquals(keyData.getContainerID(), fromGetKeyData.getContainerID());
- assertEquals(keyData.getLocalID(), fromGetKeyData.getLocalID());
- assertEquals(keyData.getChunks().size(), fromGetKeyData.getChunks().size());
- assertEquals(keyData.getMetadata().size(), fromGetKeyData.getMetadata()
- .size());
-
- }
-
-
- @Test
- public void testDeleteKey() throws Exception {
- try {
- assertEquals(0, keyValueContainer.getContainerData().getKeyCount());
- //Put Key
- keyManager.putKey(keyValueContainer, keyData);
- assertEquals(1, keyValueContainer.getContainerData().getKeyCount());
- //Delete Key
- keyManager.deleteKey(keyValueContainer, blockID);
- assertEquals(0, keyValueContainer.getContainerData().getKeyCount());
- try {
- keyManager.getKey(keyValueContainer, blockID);
- fail("testDeleteKey");
- } catch (StorageContainerException ex) {
- GenericTestUtils.assertExceptionContains("Unable to find the key", ex);
- }
- } catch (IOException ex) {
- fail("testDeleteKey failed");
- }
- }
-
- @Test
- public void testListKey() throws Exception {
- try {
- keyManager.putKey(keyValueContainer, keyData);
- List<KeyData> listKeyData = keyManager.listKey(
- keyValueContainer, 1, 10);
- assertNotNull(listKeyData);
- assertTrue(listKeyData.size() == 1);
-
- for (long i = 2; i <= 10; i++) {
- blockID = new BlockID(1L, i);
- keyData = new KeyData(blockID);
- keyData.addMetadata("VOLUME", "ozone");
- keyData.addMetadata("OWNER", "hdfs");
- List<ContainerProtos.ChunkInfo> chunkList = new LinkedList<>();
- ChunkInfo info = new ChunkInfo(String.format("%d.data.%d", blockID
- .getLocalID(), 0), 0, 1024);
- chunkList.add(info.getProtoBufMessage());
- keyData.setChunks(chunkList);
- keyManager.putKey(keyValueContainer, keyData);
- }
-
- listKeyData = keyManager.listKey(
- keyValueContainer, 1, 10);
- assertNotNull(listKeyData);
- assertTrue(listKeyData.size() == 10);
-
- } catch (IOException ex) {
- fail("testListKey failed");
- }
- }
-
- @Test
- public void testGetNoSuchKey() throws Exception {
- try {
- keyData = new KeyData(new BlockID(1L, 2L));
- keyManager.getKey(keyValueContainer, new BlockID(1L, 2L));
- fail("testGetNoSuchKey failed");
- } catch (StorageContainerException ex) {
- GenericTestUtils.assertExceptionContains("Unable to find the key.", ex);
- assertEquals(ContainerProtos.Result.NO_SUCH_KEY, ex.getResult());
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java
index f1fe88e..fbc5ad0 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java
@@ -27,11 +27,11 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.container.common.helpers.BlockData;
import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
-import org.apache.hadoop.ozone.container.common.helpers.KeyData;
import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy;
import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
-import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyUtils;
+import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.utils.MetadataKeyFilters;
import org.apache.hadoop.utils.MetadataStore;
@@ -114,8 +114,8 @@ public class TestKeyValueBlockIterator {
int counter = 0;
while(keyValueBlockIterator.hasNext()) {
- KeyData keyData = keyValueBlockIterator.nextBlock();
- assertEquals(keyData.getLocalID(), counter++);
+ BlockData blockData = keyValueBlockIterator.nextBlock();
+ assertEquals(blockData.getLocalID(), counter++);
}
assertFalse(keyValueBlockIterator.hasNext());
@@ -123,8 +123,8 @@ public class TestKeyValueBlockIterator {
keyValueBlockIterator.seekToFirst();
counter = 0;
while(keyValueBlockIterator.hasNext()) {
- KeyData keyData = keyValueBlockIterator.nextBlock();
- assertEquals(keyData.getLocalID(), counter++);
+ BlockData blockData = keyValueBlockIterator.nextBlock();
+ assertEquals(blockData.getLocalID(), counter++);
}
assertFalse(keyValueBlockIterator.hasNext());
@@ -214,8 +214,8 @@ public class TestKeyValueBlockIterator {
int counter = 5;
while(keyValueBlockIterator.hasNext()) {
- KeyData keyData = keyValueBlockIterator.nextBlock();
- assertEquals(keyData.getLocalID(), counter++);
+ BlockData blockData = keyValueBlockIterator.nextBlock();
+ assertEquals(blockData.getLocalID(), counter++);
}
}
@@ -250,7 +250,7 @@ public class TestKeyValueBlockIterator {
container = new KeyValueContainer(containerData, conf);
container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), UUID
.randomUUID().toString());
- MetadataStore metadataStore = KeyUtils.getDB(containerData, conf);
+ MetadataStore metadataStore = BlockUtils.getDB(containerData, conf);
List<ContainerProtos.ChunkInfo> chunkList = new LinkedList<>();
ChunkInfo info = new ChunkInfo("chunkfile", 0, 1024);
@@ -258,18 +258,18 @@ public class TestKeyValueBlockIterator {
for (int i=0; i<normalBlocks; i++) {
BlockID blockID = new BlockID(containerId, i);
- KeyData keyData = new KeyData(blockID);
- keyData.setChunks(chunkList);
- metadataStore.put(Longs.toByteArray(blockID.getLocalID()), keyData
+ BlockData blockData = new BlockData(blockID);
+ blockData.setChunks(chunkList);
+ metadataStore.put(Longs.toByteArray(blockID.getLocalID()), blockData
.getProtoBufMessage().toByteArray());
}
for (int i=normalBlocks; i<deletedBlocks; i++) {
BlockID blockID = new BlockID(containerId, i);
- KeyData keyData = new KeyData(blockID);
- keyData.setChunks(chunkList);
+ BlockData blockData = new BlockData(blockID);
+ blockData.setChunks(chunkList);
metadataStore.put(DFSUtil.string2Bytes(OzoneConsts
- .DELETING_KEY_PREFIX + blockID.getLocalID()), keyData
+ .DELETING_KEY_PREFIX + blockID.getLocalID()), blockData
.getProtoBufMessage().toByteArray());
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
index f84ba7d..bf6b8b0 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
@@ -28,14 +28,14 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
.ContainerLifeCycleState;
import org.apache.hadoop.hdds.scm.container.common.helpers
.StorageContainerException;
+import org.apache.hadoop.ozone.container.common.helpers.BlockData;
import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
-import org.apache.hadoop.ozone.container.common.helpers.KeyData;
import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml;
import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
import org.apache.hadoop.ozone.container.common.volume
.RoundRobinVolumeChoosingPolicy;
import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
-import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyUtils;
+import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.DiskChecker;
import org.apache.hadoop.utils.MetadataStore;
@@ -117,11 +117,11 @@ public class TestKeyValueContainer {
addBlocks(blockCount);
blockIterator = keyValueContainer.blockIterator();
assertTrue(blockIterator.hasNext());
- KeyData keyData;
+ BlockData blockData;
int blockCounter = 0;
while(blockIterator.hasNext()) {
- keyData = blockIterator.nextBlock();
- assertEquals(blockCounter++, keyData.getBlockID().getLocalID());
+ blockData = blockIterator.nextBlock();
+ assertEquals(blockCounter++, blockData.getBlockID().getLocalID());
}
assertEquals(blockCount, blockCounter);
}
@@ -129,20 +129,20 @@ public class TestKeyValueContainer {
private void addBlocks(int count) throws Exception {
long containerId = keyValueContainerData.getContainerID();
- MetadataStore metadataStore = KeyUtils.getDB(keyValueContainer
+ MetadataStore metadataStore = BlockUtils.getDB(keyValueContainer
.getContainerData(), conf);
for (int i=0; i < count; i++) {
- // Creating KeyData
+ // Creating BlockData
BlockID blockID = new BlockID(containerId, i);
- KeyData keyData = new KeyData(blockID);
- keyData.addMetadata("VOLUME", "ozone");
- keyData.addMetadata("OWNER", "hdfs");
+ BlockData blockData = new BlockData(blockID);
+ blockData.addMetadata("VOLUME", "ozone");
+ blockData.addMetadata("OWNER", "hdfs");
List<ContainerProtos.ChunkInfo> chunkList = new LinkedList<>();
ChunkInfo info = new ChunkInfo(String.format("%d.data.%d", blockID
.getLocalID(), 0), 0, 1024);
chunkList.add(info.getProtoBufMessage());
- keyData.setChunks(chunkList);
- metadataStore.put(Longs.toByteArray(blockID.getLocalID()), keyData
+ blockData.setChunks(chunkList);
+ metadataStore.put(Longs.toByteArray(blockID.getLocalID()), blockData
.getProtoBufMessage().toByteArray());
}
@@ -189,7 +189,7 @@ public class TestKeyValueContainer {
int numberOfKeysToWrite = 12;
//write one few keys to check the key count after import
- MetadataStore metadataStore = KeyUtils.getDB(keyValueContainerData, conf);
+ MetadataStore metadataStore = BlockUtils.getDB(keyValueContainerData, conf);
for (int i = 0; i < numberOfKeysToWrite; i++) {
metadataStore.put(("test" + i).getBytes(), "test".getBytes());
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
index d91bbf7..e1904c1 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
@@ -142,31 +142,31 @@ public class TestKeyValueHandler {
Mockito.verify(handler, times(1)).handleCloseContainer(
any(ContainerCommandRequestProto.class), any());
- // Test Put Key Request handling
- ContainerCommandRequestProto putKeyRequest =
- getDummyCommandRequestProto(ContainerProtos.Type.PutKey);
- dispatcher.dispatch(putKeyRequest);
- Mockito.verify(handler, times(1)).handlePutKey(
+ // Test Put Block Request handling
+ ContainerCommandRequestProto putBlockRequest =
+ getDummyCommandRequestProto(ContainerProtos.Type.PutBlock);
+ dispatcher.dispatch(putBlockRequest);
+ Mockito.verify(handler, times(1)).handlePutBlock(
any(ContainerCommandRequestProto.class), any());
- // Test Get Key Request handling
- ContainerCommandRequestProto getKeyRequest =
- getDummyCommandRequestProto(ContainerProtos.Type.GetKey);
- dispatcher.dispatch(getKeyRequest);
- Mockito.verify(handler, times(1)).handleGetKey(
+ // Test Get Block Request handling
+ ContainerCommandRequestProto getBlockRequest =
+ getDummyCommandRequestProto(ContainerProtos.Type.GetBlock);
+ dispatcher.dispatch(getBlockRequest);
+ Mockito.verify(handler, times(1)).handleGetBlock(
any(ContainerCommandRequestProto.class), any());
- // Test Delete Key Request handling
- ContainerCommandRequestProto deleteKeyRequest =
- getDummyCommandRequestProto(ContainerProtos.Type.DeleteKey);
- dispatcher.dispatch(deleteKeyRequest);
- Mockito.verify(handler, times(1)).handleDeleteKey(
+ // Test Delete Block Request handling
+ ContainerCommandRequestProto deleteBlockRequest =
+ getDummyCommandRequestProto(ContainerProtos.Type.DeleteBlock);
+ dispatcher.dispatch(deleteBlockRequest);
+ Mockito.verify(handler, times(1)).handleDeleteBlock(
any(ContainerCommandRequestProto.class), any());
- // Test List Key Request handling
- ContainerCommandRequestProto listKeyRequest =
- getDummyCommandRequestProto(ContainerProtos.Type.ListKey);
- dispatcher.dispatch(listKeyRequest);
+ // Test List Block Request handling
+ ContainerCommandRequestProto listBlockRequest =
+ getDummyCommandRequestProto(ContainerProtos.Type.ListBlock);
+ dispatcher.dispatch(listBlockRequest);
Mockito.verify(handler, times(2)).handleUnsupportedOp(
any(ContainerCommandRequestProto.class));
http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java
index 94966f6..2b10578 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java
@@ -281,10 +281,10 @@ public class ChunkGroupInputStream extends InputStream implements Seekable {
groupInputStream.streamOffset[i] = length;
ContainerProtos.DatanodeBlockID datanodeBlockID = blockID
.getDatanodeBlockIDProtobuf();
- ContainerProtos.GetKeyResponseProto response = ContainerProtocolCalls
- .getKey(xceiverClient, datanodeBlockID, requestId);
+ ContainerProtos.GetBlockResponseProto response = ContainerProtocolCalls
+ .getBlock(xceiverClient, datanodeBlockID, requestId);
List<ContainerProtos.ChunkInfo> chunks =
- response.getKeyData().getChunksList();
+ response.getBlockData().getChunksList();
for (ContainerProtos.ChunkInfo chunk : chunks) {
length += chunk.getLen();
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java
index d0e173c..0537f8a 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java
@@ -27,7 +27,7 @@ import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyUtils;
+import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
@@ -165,10 +165,10 @@ public class TestStorageContainerManagerHelper {
DatanodeDetails leadDN = containerWithPipeline.getPipeline().getLeader();
OzoneContainer containerServer =
getContainerServerByDatanodeUuid(leadDN.getUuidString());
- KeyValueContainerData containerData = (KeyValueContainerData) containerServer
- .getContainerSet()
+ KeyValueContainerData containerData =
+ (KeyValueContainerData) containerServer.getContainerSet()
.getContainer(containerID).getContainerData();
- return KeyUtils.getDB(containerData, conf);
+ return BlockUtils.getDB(containerData, conf);
}
private OzoneContainer getContainerServerByDatanodeUuid(String dnUUID)
http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rest/TestOzoneRestClient.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rest/TestOzoneRestClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rest/TestOzoneRestClient.java
index ddff0c5..b4a0ba7 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rest/TestOzoneRestClient.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rest/TestOzoneRestClient.java
@@ -31,7 +31,7 @@ import org.apache.hadoop.hdds.client.ReplicationFactor;
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.ozone.client.io.OzoneInputStream;
import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.ozone.container.common.helpers.KeyData;
+import org.apache.hadoop.ozone.container.common.helpers.BlockData;
import org.apache.hadoop.ozone.container.keyvalue.KeyValueBlockIterator;
import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
import org.junit.AfterClass;
@@ -469,9 +469,9 @@ public class TestOzoneRestClient {
containerID, new File(containerPath));
long valueLength = 0;
while (keyValueBlockIterator.hasNext()) {
- KeyData keyData = keyValueBlockIterator.nextBlock();
- if (keyData.getBlockID().getLocalID() == localID) {
- List<ContainerProtos.ChunkInfo> chunks = keyData.getChunks();
+ BlockData blockData = keyValueBlockIterator.nextBlock();
+ if (blockData.getBlockID().getLocalID() == localID) {
+ List<ContainerProtos.ChunkInfo> chunks = blockData.getChunks();
for (ContainerProtos.ChunkInfo chunk : chunks) {
valueLength += chunk.getLen();
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java
index bf1eba6..cc045d0 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java
@@ -33,7 +33,7 @@ import org.apache.hadoop.hdds.client.ReplicationFactor;
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.ozone.client.io.OzoneInputStream;
import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.ozone.container.common.helpers.KeyData;
+import org.apache.hadoop.ozone.container.common.helpers.BlockData;
import org.apache.hadoop.ozone.container.keyvalue.KeyValueBlockIterator;
import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
import org.apache.hadoop.ozone.om.OzoneManager;
@@ -603,10 +603,10 @@ public class TestOzoneRpcClient {
KeyValueBlockIterator keyValueBlockIterator = new KeyValueBlockIterator(
containerID, new File(containerPath));
while (keyValueBlockIterator.hasNext()) {
- KeyData keyData = keyValueBlockIterator.nextBlock();
- if (keyData.getBlockID().getLocalID() == localID) {
+ BlockData blockData = keyValueBlockIterator.nextBlock();
+ if (blockData.getBlockID().getLocalID() == localID) {
long length = 0;
- List<ContainerProtos.ChunkInfo> chunks = keyData.getChunks();
+ List<ContainerProtos.ChunkInfo> chunks = blockData.getChunks();
for (ContainerProtos.ChunkInfo chunk : chunks) {
length += chunk.getLen();
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
index 0c86828..f278479 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
@@ -40,7 +40,7 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.KeyValue;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
-import org.apache.hadoop.ozone.container.common.helpers.KeyData;
+import org.apache.hadoop.ozone.container.common.helpers.BlockData;
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.util.Time;
import org.junit.Assert;
@@ -241,18 +241,18 @@ public final class ContainerTestHelper {
setDataChecksum(info, data);
- ContainerProtos.PutKeyRequestProto.Builder putRequest =
- ContainerProtos.PutKeyRequestProto.newBuilder();
+ ContainerProtos.PutBlockRequestProto.Builder putRequest =
+ ContainerProtos.PutBlockRequestProto.newBuilder();
- KeyData keyData = new KeyData(blockID);
+ BlockData blockData = new BlockData(blockID);
List<ContainerProtos.ChunkInfo> newList = new LinkedList<>();
newList.add(info.getProtoBufMessage());
- keyData.setChunks(newList);
- putRequest.setKeyData(keyData.getProtoBufMessage());
+ blockData.setChunks(newList);
+ putRequest.setBlockData(blockData.getProtoBufMessage());
smallFileRequest.setChunkInfo(info.getProtoBufMessage());
smallFileRequest.setData(ByteString.copyFrom(data));
- smallFileRequest.setKey(putRequest);
+ smallFileRequest.setBlock(putRequest);
ContainerCommandRequestProto.Builder request =
ContainerCommandRequestProto.newBuilder();
@@ -266,17 +266,17 @@ public final class ContainerTestHelper {
public static ContainerCommandRequestProto getReadSmallFileRequest(
- Pipeline pipeline, ContainerProtos.PutKeyRequestProto putKey)
+ Pipeline pipeline, ContainerProtos.PutBlockRequestProto putKey)
throws Exception {
ContainerProtos.GetSmallFileRequestProto.Builder smallFileRequest =
ContainerProtos.GetSmallFileRequestProto.newBuilder();
- ContainerCommandRequestProto getKey = getKeyRequest(pipeline, putKey);
- smallFileRequest.setKey(getKey.getGetKey());
+ ContainerCommandRequestProto getKey = getBlockRequest(pipeline, putKey);
+ smallFileRequest.setBlock(getKey.getGetBlock());
ContainerCommandRequestProto.Builder request =
ContainerCommandRequestProto.newBuilder();
request.setCmdType(ContainerProtos.Type.GetSmallFile);
- request.setContainerID(getKey.getGetKey().getBlockID().getContainerID());
+ request.setContainerID(getKey.getGetBlock().getBlockID().getContainerID());
request.setGetSmallFile(smallFileRequest);
request.setTraceID(UUID.randomUUID().toString());
request.setDatanodeUuid(pipeline.getLeader().getUuidString());
@@ -421,58 +421,58 @@ public final class ContainerTestHelper {
}
/**
- * Returns the PutKeyRequest for test purpose.
+ * Returns the PutBlockRequest for test purpose.
* @param pipeline - pipeline.
* @param writeRequest - Write Chunk Request.
* @return - Request
*/
- public static ContainerCommandRequestProto getPutKeyRequest(
+ public static ContainerCommandRequestProto getPutBlockRequest(
Pipeline pipeline, ContainerProtos.WriteChunkRequestProto writeRequest) {
- LOG.trace("putKey: {} to pipeline={}",
+ LOG.trace("putBlock: {} to pipeline={}",
writeRequest.getBlockID());
- ContainerProtos.PutKeyRequestProto.Builder putRequest =
- ContainerProtos.PutKeyRequestProto.newBuilder();
+ ContainerProtos.PutBlockRequestProto.Builder putRequest =
+ ContainerProtos.PutBlockRequestProto.newBuilder();
- KeyData keyData = new KeyData(
+ BlockData blockData = new BlockData(
BlockID.getFromProtobuf(writeRequest.getBlockID()));
List<ContainerProtos.ChunkInfo> newList = new LinkedList<>();
newList.add(writeRequest.getChunkData());
- keyData.setChunks(newList);
- putRequest.setKeyData(keyData.getProtoBufMessage());
+ blockData.setChunks(newList);
+ putRequest.setBlockData(blockData.getProtoBufMessage());
ContainerCommandRequestProto.Builder request =
ContainerCommandRequestProto.newBuilder();
- request.setCmdType(ContainerProtos.Type.PutKey);
- request.setContainerID(keyData.getContainerID());
- request.setPutKey(putRequest);
+ request.setCmdType(ContainerProtos.Type.PutBlock);
+ request.setContainerID(blockData.getContainerID());
+ request.setPutBlock(putRequest);
request.setTraceID(UUID.randomUUID().toString());
request.setDatanodeUuid(pipeline.getLeader().getUuidString());
return request.build();
}
/**
- * Gets a GetKeyRequest for test purpose.
+ * Gets a GetBlockRequest for test purpose.
* @param pipeline - pipeline
- * @param putKeyRequest - putKeyRequest.
+ * @param putBlockRequest - putBlockRequest.
* @return - Request
* immediately.
*/
- public static ContainerCommandRequestProto getKeyRequest(
- Pipeline pipeline, ContainerProtos.PutKeyRequestProto putKeyRequest) {
+ public static ContainerCommandRequestProto getBlockRequest(
+ Pipeline pipeline, ContainerProtos.PutBlockRequestProto putBlockRequest) {
ContainerProtos.DatanodeBlockID blockID =
- putKeyRequest.getKeyData().getBlockID();
+ putBlockRequest.getBlockData().getBlockID();
LOG.trace("getKey: blockID={}", blockID);
- ContainerProtos.GetKeyRequestProto.Builder getRequest =
- ContainerProtos.GetKeyRequestProto.newBuilder();
+ ContainerProtos.GetBlockRequestProto.Builder getRequest =
+ ContainerProtos.GetBlockRequestProto.newBuilder();
getRequest.setBlockID(blockID);
ContainerCommandRequestProto.Builder request =
ContainerCommandRequestProto.newBuilder();
- request.setCmdType(ContainerProtos.Type.GetKey);
+ request.setCmdType(ContainerProtos.Type.GetBlock);
request.setContainerID(blockID.getContainerID());
- request.setGetKey(getRequest);
+ request.setGetBlock(getRequest);
request.setTraceID(UUID.randomUUID().toString());
request.setDatanodeUuid(pipeline.getLeader().getUuidString());
return request.build();
@@ -484,32 +484,32 @@ public final class ContainerTestHelper {
* @param request - Request
* @param response - Response
*/
- public static void verifyGetKey(ContainerCommandRequestProto request,
+ public static void verifyGetBlock(ContainerCommandRequestProto request,
ContainerCommandResponseProto response, int expectedChunksCount) {
Assert.assertEquals(request.getTraceID(), response.getTraceID());
Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
Assert.assertEquals(expectedChunksCount,
- response.getGetKey().getKeyData().getChunksCount());
+ response.getGetBlock().getBlockData().getChunksCount());
}
/**
* @param pipeline - pipeline.
- * @param putKeyRequest - putKeyRequest.
+ * @param putBlockRequest - putBlockRequest.
* @return - Request
*/
- public static ContainerCommandRequestProto getDeleteKeyRequest(
- Pipeline pipeline, ContainerProtos.PutKeyRequestProto putKeyRequest) {
- ContainerProtos.DatanodeBlockID blockID = putKeyRequest.getKeyData()
+ public static ContainerCommandRequestProto getDeleteBlockRequest(
+ Pipeline pipeline, ContainerProtos.PutBlockRequestProto putBlockRequest) {
+ ContainerProtos.DatanodeBlockID blockID = putBlockRequest.getBlockData()
.getBlockID();
- LOG.trace("deleteKey: name={}", blockID);
- ContainerProtos.DeleteKeyRequestProto.Builder delRequest =
- ContainerProtos.DeleteKeyRequestProto.newBuilder();
+ LOG.trace("deleteBlock: name={}", blockID);
+ ContainerProtos.DeleteBlockRequestProto.Builder delRequest =
+ ContainerProtos.DeleteBlockRequestProto.newBuilder();
delRequest.setBlockID(blockID);
ContainerCommandRequestProto.Builder request =
ContainerCommandRequestProto.newBuilder();
- request.setCmdType(ContainerProtos.Type.DeleteKey);
+ request.setCmdType(ContainerProtos.Type.DeleteBlock);
request.setContainerID(blockID.getContainerID());
- request.setDeleteKey(delRequest);
+ request.setDeleteBlock(delRequest);
request.setTraceID(UUID.randomUUID().toString());
request.setDatanodeUuid(pipeline.getLeader().getUuidString());
return request.build();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestContainerReplication.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestContainerReplication.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestContainerReplication.java
index 7391b25..52cebb3 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestContainerReplication.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestContainerReplication.java
@@ -39,7 +39,7 @@ import org.apache.hadoop.hdds.scm.XceiverClientSpi;
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.ozone.HddsDatanodeService;
import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.container.common.helpers.KeyData;
+import org.apache.hadoop.ozone.container.common.helpers.BlockData;
import org.apache.hadoop.ozone.container.common.interfaces.Container;
import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
import org.apache.hadoop.ozone.container.keyvalue.KeyValueHandler;
@@ -100,17 +100,20 @@ public class TestContainerReplication {
DatanodeBlockID blockID = requestProto.getWriteChunk().getBlockID();
- // Put Key to the test container
- ContainerCommandRequestProto putKeyRequest = ContainerTestHelper
- .getPutKeyRequest(sourcePipelines, requestProto.getWriteChunk());
+ // Put Block to the test container
+ ContainerCommandRequestProto putBlockRequest = ContainerTestHelper
+ .getPutBlockRequest(sourcePipelines, requestProto.getWriteChunk());
- ContainerProtos.KeyData keyData = putKeyRequest.getPutKey().getKeyData();
+ ContainerProtos.BlockData blockData =
+ putBlockRequest.getPutBlock().getBlockData();
- ContainerCommandResponseProto response = client.sendCommand(putKeyRequest);
+ ContainerCommandResponseProto response =
+ client.sendCommand(putBlockRequest);
Assert.assertNotNull(response);
Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
- Assert.assertTrue(putKeyRequest.getTraceID().equals(response.getTraceID()));
+ Assert.assertTrue(
+ putBlockRequest.getTraceID().equals(response.getTraceID()));
HddsDatanodeService destinationDatanode =
chooseDatanodeWithoutContainer(sourcePipelines,
@@ -147,8 +150,8 @@ public class TestContainerReplication {
KeyValueHandler handler = (KeyValueHandler) ozoneContainer.getDispatcher()
.getHandler(ContainerType.KeyValueContainer);
- KeyData key = handler.getKeyManager()
- .getKey(container, BlockID.getFromProtobuf(blockID));
+ BlockData key = handler.getBlockManager()
+ .getBlock(container, BlockID.getFromProtobuf(blockID));
Assert.assertNotNull(key);
Assert.assertEquals(1, key.getChunks().size());
@@ -164,7 +167,8 @@ public class TestContainerReplication {
return datanode;
}
}
- throw new AssertionError("No datanode outside of the pipeline");
+ throw new AssertionError(
+ "No datanode outside of the pipeline");
}
static OzoneConfiguration newOzoneConfiguration() {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
index 25c8c6b..7e30c5f 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
@@ -26,6 +26,7 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.container.ContainerTestHelper;
+import org.apache.hadoop.ozone.container.common.helpers.BlockData;
import org.apache.hadoop.ozone.container.common.impl.ContainerData;
import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
import org.apache.hadoop.ozone.container.common.interfaces.Container;
@@ -33,9 +34,8 @@ import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingP
import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
-import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyUtils;
+import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
import org.apache.hadoop.ozone.container.testutils.BlockDeletingServiceTestImpl;
-import org.apache.hadoop.ozone.container.common.helpers.KeyData;
import org.apache.hadoop.ozone.container.common.impl.RandomContainerDeletionChoosingPolicy;
import org.apache.hadoop.ozone.container.keyvalue.statemachine.background
.BlockDeletingService;
@@ -117,13 +117,13 @@ public class TestBlockDeletingService {
containerSet.addContainer(container);
data = (KeyValueContainerData) containerSet.getContainer(
containerID).getContainerData();
- MetadataStore metadata = KeyUtils.getDB(data, conf);
+ MetadataStore metadata = BlockUtils.getDB(data, conf);
for (int j = 0; j<numOfBlocksPerContainer; j++) {
BlockID blockID =
ContainerTestHelper.getTestBlockID(containerID);
String deleteStateName = OzoneConsts.DELETING_KEY_PREFIX +
blockID.getLocalID();
- KeyData kd = new KeyData(blockID);
+ BlockData kd = new BlockData(blockID);
List<ContainerProtos.ChunkInfo> chunks = Lists.newArrayList();
for (int k = 0; k<numOfChunksPerBlock; k++) {
// offset doesn't matter here
@@ -200,7 +200,7 @@ public class TestBlockDeletingService {
containerSet.listContainer(0L, 1, containerData);
Assert.assertEquals(1, containerData.size());
- MetadataStore meta = KeyUtils.getDB(
+ MetadataStore meta = BlockUtils.getDB(
(KeyValueContainerData) containerData.get(0), conf);
Map<Long, Container> containerMap = containerSet.getContainerMap();
// NOTE: this test assumes that all the container is KetValueContainer and
@@ -309,7 +309,7 @@ public class TestBlockDeletingService {
// get container meta data
List<ContainerData> containerData = Lists.newArrayList();
containerSet.listContainer(0L, 1, containerData);
- MetadataStore meta = KeyUtils.getDB(
+ MetadataStore meta = BlockUtils.getDB(
(KeyValueContainerData) containerData.get(0), conf);
LogCapturer newLog = LogCapturer.captureLogs(BackgroundService.LOG);
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[09/18] hadoop git commit: HDDS-394. Rename *Key Apis in
DatanodeContainerProtocol to *Block apis. Contributed Dinesh Chitlangia.
Posted by sh...@apache.org.
HDDS-394. Rename *Key Apis in DatanodeContainerProtocol to *Block apis.
Contributed Dinesh Chitlangia.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/096a7160
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/096a7160
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/096a7160
Branch: refs/heads/HDFS-12943
Commit: 096a7160803494219581c067dfcdb67d2bd0bcdb
Parents: aa4bd49
Author: Anu Engineer <ae...@apache.org>
Authored: Thu Sep 20 11:51:49 2018 -0700
Committer: Anu Engineer <ae...@apache.org>
Committed: Thu Sep 20 11:51:49 2018 -0700
----------------------------------------------------------------------
.../hdds/scm/storage/ChunkOutputStream.java | 13 +-
.../java/org/apache/hadoop/hdds/HddsUtils.java | 8 +-
.../scm/storage/ContainerProtocolCalls.java | 62 ++---
.../container/common/helpers/BlockData.java | 255 +++++++++++++++++++
.../ozone/container/common/helpers/KeyData.java | 253 ------------------
.../main/proto/DatanodeContainerProtocol.proto | 74 +++---
.../common/impl/OpenContainerBlockMap.java | 46 ++--
.../DeleteBlocksCommandHandler.java | 4 +-
.../server/ratis/ContainerStateMachine.java | 28 +-
.../keyvalue/KeyValueBlockIterator.java | 16 +-
.../container/keyvalue/KeyValueContainer.java | 4 +-
.../container/keyvalue/KeyValueHandler.java | 124 ++++-----
.../container/keyvalue/helpers/BlockUtils.java | 199 +++++++++++++++
.../container/keyvalue/helpers/KeyUtils.java | 199 ---------------
.../keyvalue/helpers/KeyValueContainerUtil.java | 12 +-
.../keyvalue/helpers/SmallFileUtils.java | 2 +-
.../keyvalue/impl/BlockManagerImpl.java | 229 +++++++++++++++++
.../container/keyvalue/impl/KeyManagerImpl.java | 227 -----------------
.../container/keyvalue/impl/package-info.java | 5 +-
.../keyvalue/interfaces/BlockManager.java | 84 ++++++
.../keyvalue/interfaces/KeyManager.java | 84 ------
.../keyvalue/interfaces/package-info.java | 21 ++
.../background/BlockDeletingService.java | 10 +-
.../keyvalue/TestBlockManagerImpl.java | 211 +++++++++++++++
.../keyvalue/TestChunkManagerImpl.java | 2 +-
.../container/keyvalue/TestKeyManagerImpl.java | 191 --------------
.../keyvalue/TestKeyValueBlockIterator.java | 30 +--
.../keyvalue/TestKeyValueContainer.java | 26 +-
.../container/keyvalue/TestKeyValueHandler.java | 38 +--
.../ozone/client/io/ChunkGroupInputStream.java | 6 +-
.../TestStorageContainerManagerHelper.java | 8 +-
.../ozone/client/rest/TestOzoneRestClient.java | 8 +-
.../ozone/client/rpc/TestOzoneRpcClient.java | 8 +-
.../ozone/container/ContainerTestHelper.java | 84 +++---
.../container/TestContainerReplication.java | 24 +-
.../common/TestBlockDeletingService.java | 12 +-
.../container/common/helpers/TestBlockData.java | 127 +++++++++
.../container/common/helpers/TestKeyData.java | 119 ---------
.../common/impl/TestCloseContainerHandler.java | 51 ++--
.../common/impl/TestContainerPersistence.java | 154 +++++------
.../commandhandler/TestBlockDeletion.java | 9 +-
.../container/ozoneimpl/TestOzoneContainer.java | 100 ++++----
.../server/TestContainerStateMachine.java | 2 +-
.../hadoop/ozone/om/TestOzoneManager.java | 4 +-
.../ozone/scm/TestContainerSmallFile.java | 4 +-
.../TestGetCommittedBlockLengthAndPutKey.java | 12 +-
.../hadoop/ozone/web/client/TestKeys.java | 44 ++--
.../hadoop/ozone/om/BucketManagerImpl.java | 2 +-
.../ozone/om/ScmBlockLocationTestIngClient.java | 2 +-
.../genesis/BenchMarkDatanodeDispatcher.java | 42 +--
50 files changed, 1680 insertions(+), 1599 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java
index 8d311d0..10b3bb5 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java
@@ -23,7 +23,7 @@ import org.apache.commons.codec.digest.DigestUtils;
import org.apache.hadoop.hdds.scm.XceiverClientManager;
import org.apache.hadoop.hdds.scm.XceiverClientSpi;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.KeyData;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.BlockData;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.KeyValue;
import org.apache.hadoop.hdds.client.BlockID;
@@ -32,7 +32,8 @@ import java.io.OutputStream;
import java.nio.ByteBuffer;
import java.util.UUID;
-import static org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls.putKey;
+import static org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls
+ .putBlock;
import static org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls
.writeChunk;
@@ -57,7 +58,7 @@ public class ChunkOutputStream extends OutputStream {
private final BlockID blockID;
private final String key;
private final String traceID;
- private final KeyData.Builder containerKeyData;
+ private final BlockData.Builder containerBlockData;
private XceiverClientManager xceiverClientManager;
private XceiverClientSpi xceiverClient;
private ByteBuffer buffer;
@@ -84,7 +85,7 @@ public class ChunkOutputStream extends OutputStream {
this.chunkSize = chunkSize;
KeyValue keyValue = KeyValue.newBuilder()
.setKey("TYPE").setValue("KEY").build();
- this.containerKeyData = KeyData.newBuilder()
+ this.containerBlockData = BlockData.newBuilder()
.setBlockID(blockID.getDatanodeBlockIDProtobuf())
.addMetadata(keyValue);
this.xceiverClientManager = xceiverClientManager;
@@ -154,7 +155,7 @@ public class ChunkOutputStream extends OutputStream {
writeChunkToContainer();
}
try {
- putKey(xceiverClient, containerKeyData.build(), traceID);
+ putBlock(xceiverClient, containerBlockData.build(), traceID);
} catch (IOException e) {
throw new IOException(
"Unexpected Storage Container Exception: " + e.toString(), e);
@@ -230,6 +231,6 @@ public class ChunkOutputStream extends OutputStream {
throw new IOException(
"Unexpected Storage Container Exception: " + e.toString(), e);
}
- containerKeyData.addChunks(chunk);
+ containerBlockData.addChunks(chunk);
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
index 33bf90c..db9d374 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
@@ -326,8 +326,8 @@ public final class HddsUtils {
switch (proto.getCmdType()) {
case ReadContainer:
case ReadChunk:
- case ListKey:
- case GetKey:
+ case ListBlock:
+ case GetBlock:
case GetSmallFile:
case ListContainer:
case ListChunk:
@@ -340,8 +340,8 @@ public final class HddsUtils {
case CreateContainer:
case DeleteChunk:
case DeleteContainer:
- case DeleteKey:
- case PutKey:
+ case DeleteBlock:
+ case PutBlock:
case PutSmallFile:
default:
return false;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
index 1d6a89d..6b7a328 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
@@ -35,16 +35,16 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
.DatanodeBlockID;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
- .GetKeyRequestProto;
+ .GetBlockRequestProto;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
- .GetKeyResponseProto;
+ .GetBlockResponseProto;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
.GetSmallFileRequestProto;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
.GetSmallFileResponseProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.KeyData;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.BlockData;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
- .PutKeyRequestProto;
+ .PutBlockRequestProto;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
.PutSmallFileRequestProto;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
@@ -76,33 +76,33 @@ public final class ContainerProtocolCalls {
}
/**
- * Calls the container protocol to get a container key.
+ * Calls the container protocol to get a container block.
*
* @param xceiverClient client to perform call
* @param datanodeBlockID blockID to identify container
* @param traceID container protocol call args
- * @return container protocol get key response
+ * @return container protocol get block response
* @throws IOException if there is an I/O error while performing the call
*/
- public static GetKeyResponseProto getKey(XceiverClientSpi xceiverClient,
+ public static GetBlockResponseProto getBlock(XceiverClientSpi xceiverClient,
DatanodeBlockID datanodeBlockID, String traceID) throws IOException {
- GetKeyRequestProto.Builder readKeyRequest = GetKeyRequestProto
+ GetBlockRequestProto.Builder readBlockRequest = GetBlockRequestProto
.newBuilder()
.setBlockID(datanodeBlockID);
String id = xceiverClient.getPipeline().getLeader().getUuidString();
ContainerCommandRequestProto request = ContainerCommandRequestProto
.newBuilder()
- .setCmdType(Type.GetKey)
+ .setCmdType(Type.GetBlock)
.setContainerID(datanodeBlockID.getContainerID())
.setTraceID(traceID)
.setDatanodeUuid(id)
- .setGetKey(readKeyRequest)
+ .setGetBlock(readBlockRequest)
.build();
ContainerCommandResponseProto response = xceiverClient.sendCommand(request);
validateContainerResponse(response);
- return response.getGetKey();
+ return response.getGetBlock();
}
/**
@@ -136,26 +136,26 @@ public final class ContainerProtocolCalls {
}
/**
- * Calls the container protocol to put a container key.
+ * Calls the container protocol to put a container block.
*
* @param xceiverClient client to perform call
- * @param containerKeyData key data to identify container
+ * @param containerBlockData block data to identify container
* @param traceID container protocol call args
* @throws IOException if there is an I/O error while performing the call
*/
- public static void putKey(XceiverClientSpi xceiverClient,
- KeyData containerKeyData, String traceID) throws IOException {
- PutKeyRequestProto.Builder createKeyRequest = PutKeyRequestProto
+ public static void putBlock(XceiverClientSpi xceiverClient,
+ BlockData containerBlockData, String traceID) throws IOException {
+ PutBlockRequestProto.Builder createBlockRequest = PutBlockRequestProto
.newBuilder()
- .setKeyData(containerKeyData);
+ .setBlockData(containerBlockData);
String id = xceiverClient.getPipeline().getLeader().getUuidString();
ContainerCommandRequestProto request = ContainerCommandRequestProto
.newBuilder()
- .setCmdType(Type.PutKey)
- .setContainerID(containerKeyData.getBlockID().getContainerID())
+ .setCmdType(Type.PutBlock)
+ .setContainerID(containerBlockData.getBlockID().getContainerID())
.setTraceID(traceID)
.setDatanodeUuid(id)
- .setPutKey(createKeyRequest)
+ .setPutBlock(createBlockRequest)
.build();
ContainerCommandResponseProto response = xceiverClient.sendCommand(request);
validateContainerResponse(response);
@@ -224,9 +224,9 @@ public final class ContainerProtocolCalls {
/**
* Allows writing a small file using single RPC. This takes the container
- * name, key name and data to write sends all that data to the container using
- * a single RPC. This API is designed to be used for files which are smaller
- * than 1 MB.
+ * name, block name and data to write sends all that data to the container
+ * using a single RPC. This API is designed to be used for files which are
+ * smaller than 1 MB.
*
* @param client - client that communicates with the container.
* @param blockID - ID of the block
@@ -238,12 +238,12 @@ public final class ContainerProtocolCalls {
BlockID blockID, byte[] data, String traceID)
throws IOException {
- KeyData containerKeyData =
- KeyData.newBuilder().setBlockID(blockID.getDatanodeBlockIDProtobuf())
+ BlockData containerBlockData =
+ BlockData.newBuilder().setBlockID(blockID.getDatanodeBlockIDProtobuf())
.build();
- PutKeyRequestProto.Builder createKeyRequest =
- PutKeyRequestProto.newBuilder()
- .setKeyData(containerKeyData);
+ PutBlockRequestProto.Builder createBlockRequest =
+ PutBlockRequestProto.newBuilder()
+ .setBlockData(containerBlockData);
KeyValue keyValue =
KeyValue.newBuilder().setKey("OverWriteRequested").setValue("true")
@@ -255,7 +255,7 @@ public final class ContainerProtocolCalls {
PutSmallFileRequestProto putSmallFileRequest =
PutSmallFileRequestProto.newBuilder().setChunkInfo(chunk)
- .setKey(createKeyRequest).setData(ByteString.copyFrom(data))
+ .setBlock(createBlockRequest).setData(ByteString.copyFrom(data))
.build();
String id = client.getPipeline().getLeader().getUuidString();
@@ -387,12 +387,12 @@ public final class ContainerProtocolCalls {
*/
public static GetSmallFileResponseProto readSmallFile(XceiverClientSpi client,
BlockID blockID, String traceID) throws IOException {
- GetKeyRequestProto.Builder getKey = GetKeyRequestProto
+ GetBlockRequestProto.Builder getBlock = GetBlockRequestProto
.newBuilder()
.setBlockID(blockID.getDatanodeBlockIDProtobuf());
ContainerProtos.GetSmallFileRequestProto getSmallFileRequest =
GetSmallFileRequestProto
- .newBuilder().setKey(getKey)
+ .newBuilder().setBlock(getBlock)
.build();
String id = client.getPipeline().getLeader().getUuidString();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockData.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockData.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockData.java
new file mode 100644
index 0000000..0c1d427
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockData.java
@@ -0,0 +1,255 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.container.common.helpers;
+
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.hdds.client.BlockID;
+import com.google.common.base.Preconditions;
+
+import java.io.IOException;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.TreeMap;
+import java.util.ArrayList;
+
+/**
+ * Helper class to convert Protobuf to Java classes.
+ */
+public class BlockData {
+ private final BlockID blockID;
+ private final Map<String, String> metadata;
+
+ /**
+ * Represent a list of chunks.
+ * In order to reduce memory usage, chunkList is declared as an
+ * {@link Object}.
+ * When #elements == 0, chunkList is null.
+ * When #elements == 1, chunkList refers to the only element.
+ * When #elements > 1, chunkList refers to the list.
+ *
+ * Please note : when we are working with blocks, we don't care what they
+ * point to. So we We don't read chunkinfo nor validate them. It is
+ * responsibility of higher layer like ozone. We just read and write data
+ * from network.
+ */
+ private Object chunkList;
+
+ /**
+ * total size of the key.
+ */
+ private long size;
+
+ /**
+ * Constructs a BlockData Object.
+ *
+ * @param blockID
+ */
+ public BlockData(BlockID blockID) {
+ this.blockID = blockID;
+ this.metadata = new TreeMap<>();
+ this.size = 0;
+ }
+
+ /**
+ * Returns a blockData object from the protobuf data.
+ *
+ * @param data - Protobuf data.
+ * @return - BlockData
+ * @throws IOException
+ */
+ public static BlockData getFromProtoBuf(ContainerProtos.BlockData data) throws
+ IOException {
+ BlockData blockData = new BlockData(
+ BlockID.getFromProtobuf(data.getBlockID()));
+ for (int x = 0; x < data.getMetadataCount(); x++) {
+ blockData.addMetadata(data.getMetadata(x).getKey(),
+ data.getMetadata(x).getValue());
+ }
+ blockData.setChunks(data.getChunksList());
+ if (data.hasSize()) {
+ Preconditions.checkArgument(data.getSize() == blockData.getSize());
+ }
+ return blockData;
+ }
+
+ /**
+ * Returns a Protobuf message from BlockData.
+ * @return Proto Buf Message.
+ */
+ public ContainerProtos.BlockData getProtoBufMessage() {
+ ContainerProtos.BlockData.Builder builder =
+ ContainerProtos.BlockData.newBuilder();
+ builder.setBlockID(this.blockID.getDatanodeBlockIDProtobuf());
+ for (Map.Entry<String, String> entry : metadata.entrySet()) {
+ ContainerProtos.KeyValue.Builder keyValBuilder =
+ ContainerProtos.KeyValue.newBuilder();
+ builder.addMetadata(keyValBuilder.setKey(entry.getKey())
+ .setValue(entry.getValue()).build());
+ }
+ builder.addAllChunks(getChunks());
+ builder.setSize(size);
+ return builder.build();
+ }
+
+ /**
+ * Adds metadata.
+ *
+ * @param key - Key
+ * @param value - Value
+ * @throws IOException
+ */
+ public synchronized void addMetadata(String key, String value) throws
+ IOException {
+ if (this.metadata.containsKey(key)) {
+ throw new IOException("This key already exists. Key " + key);
+ }
+ metadata.put(key, value);
+ }
+
+ public synchronized Map<String, String> getMetadata() {
+ return Collections.unmodifiableMap(this.metadata);
+ }
+
+ /**
+ * Returns value of a key.
+ */
+ public synchronized String getValue(String key) {
+ return metadata.get(key);
+ }
+
+ /**
+ * Deletes a metadata entry from the map.
+ *
+ * @param key - Key
+ */
+ public synchronized void deleteKey(String key) {
+ metadata.remove(key);
+ }
+
+ @SuppressWarnings("unchecked")
+ private List<ContainerProtos.ChunkInfo> castChunkList() {
+ return (List<ContainerProtos.ChunkInfo>)chunkList;
+ }
+
+ /**
+ * Returns chunks list.
+ *
+ * @return list of chunkinfo.
+ */
+ public List<ContainerProtos.ChunkInfo> getChunks() {
+ return chunkList == null? Collections.emptyList()
+ : chunkList instanceof ContainerProtos.ChunkInfo?
+ Collections.singletonList((ContainerProtos.ChunkInfo)chunkList)
+ : Collections.unmodifiableList(castChunkList());
+ }
+
+ /**
+ * Adds chinkInfo to the list.
+ */
+ public void addChunk(ContainerProtos.ChunkInfo chunkInfo) {
+ if (chunkList == null) {
+ chunkList = chunkInfo;
+ } else {
+ final List<ContainerProtos.ChunkInfo> list;
+ if (chunkList instanceof ContainerProtos.ChunkInfo) {
+ list = new ArrayList<>(2);
+ list.add((ContainerProtos.ChunkInfo)chunkList);
+ chunkList = list;
+ } else {
+ list = castChunkList();
+ }
+ list.add(chunkInfo);
+ }
+ size += chunkInfo.getLen();
+ }
+
+ /**
+ * removes the chunk.
+ */
+ public boolean removeChunk(ContainerProtos.ChunkInfo chunkInfo) {
+ final boolean removed;
+ if (chunkList instanceof List) {
+ final List<ContainerProtos.ChunkInfo> list = castChunkList();
+ removed = list.remove(chunkInfo);
+ if (list.size() == 1) {
+ chunkList = list.get(0);
+ }
+ } else if (chunkInfo.equals(chunkList)) {
+ chunkList = null;
+ removed = true;
+ } else {
+ removed = false;
+ }
+
+ if (removed) {
+ size -= chunkInfo.getLen();
+ }
+ return removed;
+ }
+
+ /**
+ * Returns container ID.
+ *
+ * @return long.
+ */
+ public long getContainerID() {
+ return blockID.getContainerID();
+ }
+
+ /**
+ * Returns LocalID.
+ * @return long.
+ */
+ public long getLocalID() {
+ return blockID.getLocalID();
+ }
+
+ /**
+ * Return Block ID.
+ * @return BlockID.
+ */
+ public BlockID getBlockID() {
+ return blockID;
+ }
+
+ /**
+ * Sets Chunk list.
+ *
+ * @param chunks - List of chunks.
+ */
+ public void setChunks(List<ContainerProtos.ChunkInfo> chunks) {
+ if (chunks == null) {
+ chunkList = null;
+ size = 0L;
+ } else {
+ final int n = chunks.size();
+ chunkList = n == 0? null: n == 1? chunks.get(0): chunks;
+ size = chunks.parallelStream().mapToLong(
+ ContainerProtos.ChunkInfo::getLen).sum();
+ }
+ }
+
+ /**
+ * Get the total size of chunks allocated for the key.
+ * @return total size of the key.
+ */
+ public long getSize() {
+ return size;
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyData.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyData.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyData.java
deleted file mode 100644
index ee27021..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyData.java
+++ /dev/null
@@ -1,253 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.container.common.helpers;
-
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.client.BlockID;
-import com.google.common.base.Preconditions;
-
-import java.io.IOException;
-import java.util.Collections;
-import java.util.List;
-import java.util.Map;
-import java.util.TreeMap;
-import java.util.ArrayList;
-
-/**
- * Helper class to convert Protobuf to Java classes.
- */
-public class KeyData {
- private final BlockID blockID;
- private final Map<String, String> metadata;
-
- /**
- * Represent a list of chunks.
- * In order to reduce memory usage, chunkList is declared as an
- * {@link Object}.
- * When #elements == 0, chunkList is null.
- * When #elements == 1, chunkList refers to the only element.
- * When #elements > 1, chunkList refers to the list.
- *
- * Please note : when we are working with keys, we don't care what they point
- * to. So we We don't read chunkinfo nor validate them. It is responsibility
- * of higher layer like ozone. We just read and write data from network.
- */
- private Object chunkList;
-
- /**
- * total size of the key.
- */
- private long size;
-
- /**
- * Constructs a KeyData Object.
- *
- * @param blockID
- */
- public KeyData(BlockID blockID) {
- this.blockID = blockID;
- this.metadata = new TreeMap<>();
- this.size = 0;
- }
-
- /**
- * Returns a keyData object from the protobuf data.
- *
- * @param data - Protobuf data.
- * @return - KeyData
- * @throws IOException
- */
- public static KeyData getFromProtoBuf(ContainerProtos.KeyData data) throws
- IOException {
- KeyData keyData = new KeyData(BlockID.getFromProtobuf(data.getBlockID()));
- for (int x = 0; x < data.getMetadataCount(); x++) {
- keyData.addMetadata(data.getMetadata(x).getKey(),
- data.getMetadata(x).getValue());
- }
- keyData.setChunks(data.getChunksList());
- if (data.hasSize()) {
- Preconditions.checkArgument(data.getSize() == keyData.getSize());
- }
- return keyData;
- }
-
- /**
- * Returns a Protobuf message from KeyData.
- * @return Proto Buf Message.
- */
- public ContainerProtos.KeyData getProtoBufMessage() {
- ContainerProtos.KeyData.Builder builder =
- ContainerProtos.KeyData.newBuilder();
- builder.setBlockID(this.blockID.getDatanodeBlockIDProtobuf());
- for (Map.Entry<String, String> entry : metadata.entrySet()) {
- ContainerProtos.KeyValue.Builder keyValBuilder =
- ContainerProtos.KeyValue.newBuilder();
- builder.addMetadata(keyValBuilder.setKey(entry.getKey())
- .setValue(entry.getValue()).build());
- }
- builder.addAllChunks(getChunks());
- builder.setSize(size);
- return builder.build();
- }
-
- /**
- * Adds metadata.
- *
- * @param key - Key
- * @param value - Value
- * @throws IOException
- */
- public synchronized void addMetadata(String key, String value) throws
- IOException {
- if (this.metadata.containsKey(key)) {
- throw new IOException("This key already exists. Key " + key);
- }
- metadata.put(key, value);
- }
-
- public synchronized Map<String, String> getMetadata() {
- return Collections.unmodifiableMap(this.metadata);
- }
-
- /**
- * Returns value of a key.
- */
- public synchronized String getValue(String key) {
- return metadata.get(key);
- }
-
- /**
- * Deletes a metadata entry from the map.
- *
- * @param key - Key
- */
- public synchronized void deleteKey(String key) {
- metadata.remove(key);
- }
-
- @SuppressWarnings("unchecked")
- private List<ContainerProtos.ChunkInfo> castChunkList() {
- return (List<ContainerProtos.ChunkInfo>)chunkList;
- }
-
- /**
- * Returns chunks list.
- *
- * @return list of chunkinfo.
- */
- public List<ContainerProtos.ChunkInfo> getChunks() {
- return chunkList == null? Collections.emptyList()
- : chunkList instanceof ContainerProtos.ChunkInfo?
- Collections.singletonList((ContainerProtos.ChunkInfo)chunkList)
- : Collections.unmodifiableList(castChunkList());
- }
-
- /**
- * Adds chinkInfo to the list.
- */
- public void addChunk(ContainerProtos.ChunkInfo chunkInfo) {
- if (chunkList == null) {
- chunkList = chunkInfo;
- } else {
- final List<ContainerProtos.ChunkInfo> list;
- if (chunkList instanceof ContainerProtos.ChunkInfo) {
- list = new ArrayList<>(2);
- list.add((ContainerProtos.ChunkInfo)chunkList);
- chunkList = list;
- } else {
- list = castChunkList();
- }
- list.add(chunkInfo);
- }
- size += chunkInfo.getLen();
- }
-
- /**
- * removes the chunk.
- */
- public boolean removeChunk(ContainerProtos.ChunkInfo chunkInfo) {
- final boolean removed;
- if (chunkList instanceof List) {
- final List<ContainerProtos.ChunkInfo> list = castChunkList();
- removed = list.remove(chunkInfo);
- if (list.size() == 1) {
- chunkList = list.get(0);
- }
- } else if (chunkInfo.equals(chunkList)) {
- chunkList = null;
- removed = true;
- } else {
- removed = false;
- }
-
- if (removed) {
- size -= chunkInfo.getLen();
- }
- return removed;
- }
-
- /**
- * Returns container ID.
- *
- * @return long.
- */
- public long getContainerID() {
- return blockID.getContainerID();
- }
-
- /**
- * Returns LocalID.
- * @return long.
- */
- public long getLocalID() {
- return blockID.getLocalID();
- }
-
- /**
- * Return Block ID.
- * @return BlockID.
- */
- public BlockID getBlockID() {
- return blockID;
- }
-
- /**
- * Sets Chunk list.
- *
- * @param chunks - List of chunks.
- */
- public void setChunks(List<ContainerProtos.ChunkInfo> chunks) {
- if (chunks == null) {
- chunkList = null;
- size = 0L;
- } else {
- final int n = chunks.size();
- chunkList = n == 0? null: n == 1? chunks.get(0): chunks;
- size = chunks.parallelStream().mapToLong(
- ContainerProtos.ChunkInfo::getLen).sum();
- }
- }
-
- /**
- * Get the total size of chunks allocated for the key.
- * @return total size of the key.
- */
- public long getSize() {
- return size;
- }
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
index ba0d2d4..7be8a62 100644
--- a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
+++ b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
@@ -49,13 +49,13 @@ package hadoop.hdds.datanode;
* 5. ListContainer - Returns the list of containers on this
* datanode. This will be used by tests and tools.
*
- * 6. PutKey - Given a valid container, creates a key.
+ * 6. PutBlock - Given a valid container, creates a block.
*
- * 7. GetKey - Allows user to read the metadata of a Key.
+ * 7. GetBlock - Allows user to read the metadata of a block.
*
- * 8. DeleteKey - Deletes a given key.
+ * 8. DeleteBlock - Deletes a given block.
*
- * 9. ListKey - Returns a list of keys that are present inside
+ * 9. ListBlock - Returns a list of blocks that are present inside
* a given container.
*
* 10. ReadChunk - Allows us to read a chunk.
@@ -64,13 +64,13 @@ package hadoop.hdds.datanode;
*
* 12. WriteChunk - Allows us to write a chunk
*
- * 13. ListChunk - Given a Container/Key returns the list of Chunks.
+ * 13. ListChunk - Given a Container/Block returns the list of Chunks.
*
* 14. CompactChunk - Re-writes a chunk based on Offsets.
*
- * 15. PutSmallFile - A single RPC that combines both putKey and WriteChunk.
+ * 15. PutSmallFile - A single RPC that combines both putBlock and WriteChunk.
*
- * 16. GetSmallFile - A single RPC that combines both getKey and ReadChunk.
+ * 16. GetSmallFile - A single RPC that combines both getBlock and ReadChunk.
*
* 17. CloseContainer - Closes an open container and makes it immutable.
*
@@ -84,10 +84,10 @@ enum Type {
DeleteContainer = 4;
ListContainer = 5;
- PutKey = 6;
- GetKey = 7;
- DeleteKey = 8;
- ListKey = 9;
+ PutBlock = 6;
+ GetBlock = 7;
+ DeleteBlock = 8;
+ ListBlock = 9;
ReadChunk = 10;
DeleteChunk = 11;
@@ -95,7 +95,7 @@ enum Type {
ListChunk = 13;
CompactChunk = 14;
- /** Combines Key and Chunk Operation into Single RPC. */
+ /** Combines Block and Chunk Operation into Single RPC. */
PutSmallFile = 15;
GetSmallFile = 16;
CloseContainer = 17;
@@ -115,7 +115,7 @@ enum Result {
CONTAINER_NOT_FOUND = 9;
IO_EXCEPTION = 10;
UNABLE_TO_READ_METADATA_DB = 11;
- NO_SUCH_KEY = 12;
+ NO_SUCH_BLOCK = 12;
OVERWRITE_FLAG_REQUIRED = 13;
UNABLE_TO_FIND_DATA_DIR = 14;
INVALID_WRITE_SIZE = 15;
@@ -185,10 +185,10 @@ message ContainerCommandRequestProto {
optional ListContainerRequestProto listContainer = 9;
optional CloseContainerRequestProto closeContainer = 10;
- optional PutKeyRequestProto putKey = 11;
- optional GetKeyRequestProto getKey = 12;
- optional DeleteKeyRequestProto deleteKey = 13;
- optional ListKeyRequestProto listKey = 14;
+ optional PutBlockRequestProto putBlock = 11;
+ optional GetBlockRequestProto getBlock = 12;
+ optional DeleteBlockRequestProto deleteBlock = 13;
+ optional ListBlockRequestProto listBlock = 14;
optional ReadChunkRequestProto readChunk = 15;
optional WriteChunkRequestProto writeChunk = 16;
@@ -215,10 +215,10 @@ message ContainerCommandResponseProto {
optional ListContainerResponseProto listContainer = 9;
optional CloseContainerResponseProto closeContainer = 10;
- optional PutKeyResponseProto putKey = 11;
- optional GetKeyResponseProto getKey = 12;
- optional DeleteKeyResponseProto deleteKey = 13;
- optional ListKeyResponseProto listKey = 14;
+ optional PutBlockResponseProto putBlock = 11;
+ optional GetBlockResponseProto getBlock = 12;
+ optional DeleteBlockResponseProto deleteBlock = 13;
+ optional ListBlockResponseProto listBlock = 14;
optional WriteChunkResponseProto writeChunk = 15;
optional ReadChunkResponseProto readChunk = 16;
@@ -294,7 +294,7 @@ message CloseContainerResponseProto {
optional int64 containerID = 2;
}
-message KeyData {
+message BlockData {
required DatanodeBlockID blockID = 1;
optional int64 flags = 2; // for future use.
repeated KeyValue metadata = 3;
@@ -302,25 +302,25 @@ message KeyData {
optional int64 size = 5;
}
-// Key Messages.
-message PutKeyRequestProto {
- required KeyData keyData = 1;
+// Block Messages.
+message PutBlockRequestProto {
+ required BlockData blockData = 1;
}
-message PutKeyResponseProto {
+message PutBlockResponseProto {
required GetCommittedBlockLengthResponseProto committedBlockLength = 1;
}
-message GetKeyRequestProto {
+message GetBlockRequestProto {
required DatanodeBlockID blockID = 1;
}
-message GetKeyResponseProto {
- required KeyData keyData = 1;
+message GetBlockResponseProto {
+ required BlockData blockData = 1;
}
-message DeleteKeyRequestProto {
+message DeleteBlockRequestProto {
required DatanodeBlockID blockID = 1;
}
@@ -333,17 +333,17 @@ message GetCommittedBlockLengthResponseProto {
required int64 blockLength = 2;
}
-message DeleteKeyResponseProto {
+message DeleteBlockResponseProto {
}
-message ListKeyRequestProto {
+message ListBlockRequestProto {
optional int64 startLocalID = 2;
required uint32 count = 3;
}
-message ListKeyResponseProto {
- repeated KeyData keyData = 1;
+message ListBlockResponseProto {
+ repeated BlockData blockData = 1;
}
// Chunk Operations
@@ -401,11 +401,11 @@ message ListChunkResponseProto {
repeated ChunkInfo chunkData = 1;
}
-/** For small file access combines write chunk and putKey into a single
+/** For small file access combines write chunk and putBlock into a single
RPC */
message PutSmallFileRequestProto {
- required PutKeyRequestProto key = 1;
+ required PutBlockRequestProto block = 1;
required ChunkInfo chunkInfo = 2;
required bytes data = 3;
}
@@ -416,7 +416,7 @@ message PutSmallFileResponseProto {
}
message GetSmallFileRequestProto {
- required GetKeyRequestProto key = 1;
+ required GetBlockRequestProto block = 1;
}
message GetSmallFileResponseProto {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java
index 1ef3d0d..b736eb5 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java
@@ -22,7 +22,7 @@ import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo;
-import org.apache.hadoop.ozone.container.common.helpers.KeyData;
+import org.apache.hadoop.ozone.container.common.helpers.BlockData;
import java.util.ArrayList;
import java.util.Collections;
@@ -33,9 +33,9 @@ import java.util.concurrent.ConcurrentMap;
import java.util.function.Function;
/**
- * Map: containerId -> (localId -> {@link KeyData}).
+ * Map: containerId {@literal ->} (localId {@literal ->} {@link BlockData}).
* The outer container map does not entail locking for a better performance.
- * The inner {@link KeyDataMap} is synchronized.
+ * The inner {@link BlockDataMap} is synchronized.
*
* This class will maintain list of open keys per container when closeContainer
* command comes, it should autocommit all open keys of a open container before
@@ -43,16 +43,16 @@ import java.util.function.Function;
*/
public class OpenContainerBlockMap {
/**
- * Map: localId -> KeyData.
+ * Map: localId {@literal ->} BlockData.
*
* In order to support {@link #getAll()}, the update operations are
* synchronized.
*/
- static class KeyDataMap {
- private final ConcurrentMap<Long, KeyData> blocks =
+ static class BlockDataMap {
+ private final ConcurrentMap<Long, BlockData> blocks =
new ConcurrentHashMap<>();
- KeyData get(long localId) {
+ BlockData get(long localId) {
return blocks.get(localId);
}
@@ -61,12 +61,12 @@ public class OpenContainerBlockMap {
return blocks.size();
}
- synchronized KeyData computeIfAbsent(
- long localId, Function<Long, KeyData> f) {
+ synchronized BlockData computeIfAbsent(
+ long localId, Function<Long, BlockData> f) {
return blocks.computeIfAbsent(localId, f);
}
- synchronized List<KeyData> getAll() {
+ synchronized List<BlockData> getAll() {
return new ArrayList<>(blocks.values());
}
}
@@ -79,7 +79,7 @@ public class OpenContainerBlockMap {
*
* For now, we will track all open blocks of a container in the blockMap.
*/
- private final ConcurrentMap<Long, KeyDataMap> containers =
+ private final ConcurrentMap<Long, BlockDataMap> containers =
new ConcurrentHashMap<>();
/**
@@ -94,9 +94,9 @@ public class OpenContainerBlockMap {
public void addChunk(BlockID blockID, ChunkInfo info) {
Preconditions.checkNotNull(info);
- containers.computeIfAbsent(blockID.getContainerID(), id -> new KeyDataMap())
- .computeIfAbsent(blockID.getLocalID(), id -> new KeyData(blockID))
- .addChunk(info);
+ containers.computeIfAbsent(blockID.getContainerID(),
+ id -> new BlockDataMap()).computeIfAbsent(blockID.getLocalID(),
+ id -> new BlockData(blockID)).addChunk(info);
}
/**
@@ -113,21 +113,21 @@ public class OpenContainerBlockMap {
}
/**
- * Returns the list of open to the openContainerBlockMap.
+ * Returns the list of open blocks to the openContainerBlockMap.
* @param containerId container id
- * @return List of open Keys(blocks)
+ * @return List of open blocks
*/
- public List<KeyData> getOpenKeys(long containerId) {
+ public List<BlockData> getOpenBlocks(long containerId) {
return Optional.ofNullable(containers.get(containerId))
- .map(KeyDataMap::getAll)
+ .map(BlockDataMap::getAll)
.orElseGet(Collections::emptyList);
}
/**
* removes the block from the block map.
- * @param blockID
+ * @param blockID - block ID
*/
- public void removeFromKeyMap(BlockID blockID) {
+ public void removeFromBlockMap(BlockID blockID) {
Preconditions.checkNotNull(blockID);
containers.computeIfPresent(blockID.getContainerID(), (containerId, blocks)
-> blocks.removeAndGetSize(blockID.getLocalID()) == 0? null: blocks);
@@ -136,16 +136,16 @@ public class OpenContainerBlockMap {
/**
* Returns true if the block exists in the map, false otherwise.
*
- * @param blockID
+ * @param blockID - Block ID.
* @return True, if it exists, false otherwise
*/
public boolean checkIfBlockExists(BlockID blockID) {
- KeyDataMap keyDataMap = containers.get(blockID.getContainerID());
+ BlockDataMap keyDataMap = containers.get(blockID.getContainerID());
return keyDataMap != null && keyDataMap.get(blockID.getLocalID()) != null;
}
@VisibleForTesting
- KeyDataMap getKeyDataMap(long containerId) {
+ BlockDataMap getBlockDataMap(long containerId) {
return containers.get(containerId);
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java
index b0d4cbc..430b0ef 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java
@@ -36,7 +36,7 @@ import org.apache.hadoop.ozone.container.common.helpers
.DeletedContainerBlocksSummary;
import org.apache.hadoop.ozone.container.common.interfaces.Container;
import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
-import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyUtils;
+import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
import org.apache.hadoop.ozone.container.common.statemachine
.EndpointStateMachine;
@@ -199,7 +199,7 @@ public class DeleteBlocksCommandHandler implements CommandHandler {
}
int newDeletionBlocks = 0;
- MetadataStore containerDB = KeyUtils.getDB(containerData, conf);
+ MetadataStore containerDB = BlockUtils.getDB(containerData, conf);
for (Long blk : delTX.getLocalIDList()) {
BatchOperation batch = new BatchOperation();
byte[] blkBytes = Longs.toByteArray(blk);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
index b84db66..a7bef86 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
@@ -103,10 +103,10 @@ import java.util.stream.Collectors;
* implementation. For example, synchronization between writeChunk and
* createContainer in {@link ContainerStateMachine}.
*
- * PutKey is synchronized with WriteChunk operations, PutKey for a block is
- * executed only after all the WriteChunk preceding the PutKey have finished.
+ * PutBlock is synchronized with WriteChunk operations, PutBlock for a block is
+ * executed only after all the WriteChunk preceding the PutBlock have finished.
*
- * CloseContainer is synchronized with WriteChunk and PutKey operations,
+ * CloseContainer is synchronized with WriteChunk and PutBlock operations,
* CloseContainer for a container is processed after all the preceding write
* operations for the container have finished.
* */
@@ -443,7 +443,7 @@ public class ContainerStateMachine extends BaseStateMachine {
/**
* This class maintains maps and provide utilities to enforce synchronization
- * among createContainer, writeChunk, putKey and closeContainer.
+ * among createContainer, writeChunk, putBlock and closeContainer.
*/
private class StateMachineHelper {
@@ -453,7 +453,7 @@ public class ContainerStateMachine extends BaseStateMachine {
private final ConcurrentHashMap<Long, CommitChunkFutureMap>
block2ChunkMap;
- // Map for putKey futures
+ // Map for putBlock futures
private final ConcurrentHashMap<Long, CompletableFuture<Message>>
blockCommitMap;
@@ -505,11 +505,11 @@ public class ContainerStateMachine extends BaseStateMachine {
// The following section handles applyTransaction transactions
// on a container
- private CompletableFuture<Message> handlePutKey(
+ private CompletableFuture<Message> handlePutBlock(
ContainerCommandRequestProto requestProto) {
List<CompletableFuture<Message>> futureList = new ArrayList<>();
long localId =
- requestProto.getPutKey().getKeyData().getBlockID().getLocalID();
+ requestProto.getPutBlock().getBlockData().getBlockID().getLocalID();
// Need not wait for create container future here as it has already
// finished.
if (block2ChunkMap.get(localId) != null) {
@@ -518,18 +518,18 @@ public class ContainerStateMachine extends BaseStateMachine {
CompletableFuture<Message> effectiveFuture =
runCommandAfterFutures(futureList, requestProto);
- CompletableFuture<Message> putKeyFuture =
+ CompletableFuture<Message> putBlockFuture =
effectiveFuture.thenApply(message -> {
blockCommitMap.remove(localId);
return message;
});
- blockCommitMap.put(localId, putKeyFuture);
- return putKeyFuture;
+ blockCommitMap.put(localId, putBlockFuture);
+ return putBlockFuture;
}
// Close Container should be executed only if all pending WriteType
// container cmds get executed. Transactions which can return a future
- // are WriteChunk and PutKey.
+ // are WriteChunk and PutBlock.
private CompletableFuture<Message> handleCloseContainer(
ContainerCommandRequestProto requestProto) {
List<CompletableFuture<Message>> futureList = new ArrayList<>();
@@ -539,7 +539,7 @@ public class ContainerStateMachine extends BaseStateMachine {
block2ChunkMap.values().forEach(b -> futureList.addAll(b.getAll()));
futureList.addAll(blockCommitMap.values());
- // There are pending write Chunk/PutKey type requests
+ // There are pending write Chunk/PutBlock type requests
// Queue this closeContainer request behind all these requests
CompletableFuture<Message> closeContainerFuture =
runCommandAfterFutures(futureList, requestProto);
@@ -615,8 +615,8 @@ public class ContainerStateMachine extends BaseStateMachine {
return handleChunkCommit(requestProto, index);
case CloseContainer:
return handleCloseContainer(requestProto);
- case PutKey:
- return handlePutKey(requestProto);
+ case PutBlock:
+ return handlePutBlock(requestProto);
case CreateContainer:
return handleCreateContainer(requestProto);
default:
http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueBlockIterator.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueBlockIterator.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueBlockIterator.java
index f800223..535af29 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueBlockIterator.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueBlockIterator.java
@@ -21,12 +21,12 @@ package org.apache.hadoop.ozone.container.keyvalue;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.container.common.helpers.BlockData;
import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
-import org.apache.hadoop.ozone.container.common.helpers.KeyData;
import org.apache.hadoop.ozone.container.common.impl.ContainerData;
import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml;
import org.apache.hadoop.ozone.container.common.interfaces.BlockIterator;
-import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyUtils;
+import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerLocationUtil;
import org.apache.hadoop.utils.MetaStoreIterator;
import org.apache.hadoop.utils.MetadataKeyFilters;
@@ -48,7 +48,7 @@ import java.util.NoSuchElementException;
* {@link MetadataKeyFilters#getNormalKeyFilter()}
*/
@InterfaceAudience.Public
-public class KeyValueBlockIterator implements BlockIterator<KeyData> {
+public class KeyValueBlockIterator implements BlockIterator<BlockData> {
private static final Logger LOG = LoggerFactory.getLogger(
KeyValueBlockIterator.class);
@@ -57,7 +57,7 @@ public class KeyValueBlockIterator implements BlockIterator<KeyData> {
private static KeyPrefixFilter defaultBlockFilter = MetadataKeyFilters
.getNormalKeyFilter();
private KeyPrefixFilter blockFilter;
- private KeyData nextBlock;
+ private BlockData nextBlock;
private long containerId;
/**
@@ -91,7 +91,7 @@ public class KeyValueBlockIterator implements BlockIterator<KeyData> {
containerData;
keyValueContainerData.setDbFile(KeyValueContainerLocationUtil
.getContainerDBFile(metdataPath, containerId));
- MetadataStore metadataStore = KeyUtils.getDB(keyValueContainerData, new
+ MetadataStore metadataStore = BlockUtils.getDB(keyValueContainerData, new
OzoneConfiguration());
blockIterator = metadataStore.iterator();
blockFilter = filter;
@@ -103,9 +103,9 @@ public class KeyValueBlockIterator implements BlockIterator<KeyData> {
* @throws IOException
*/
@Override
- public KeyData nextBlock() throws IOException, NoSuchElementException {
+ public BlockData nextBlock() throws IOException, NoSuchElementException {
if (nextBlock != null) {
- KeyData currentBlock = nextBlock;
+ BlockData currentBlock = nextBlock;
nextBlock = null;
return currentBlock;
}
@@ -124,7 +124,7 @@ public class KeyValueBlockIterator implements BlockIterator<KeyData> {
if (blockIterator.hasNext()) {
KeyValue block = blockIterator.next();
if (blockFilter.filterKey(null, block.getKey(), null)) {
- nextBlock = KeyUtils.getKeyData(block.getValue());
+ nextBlock = BlockUtils.getBlockData(block.getValue());
LOG.trace("Block matching with filter found: blockID is : {} for " +
"containerID {}", nextBlock.getLocalID(), containerId);
return true;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
index 0870c76..09d4054 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
@@ -49,7 +49,7 @@ import org.apache.hadoop.ozone.container.common.interfaces.ContainerPacker;
import org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy;
import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
-import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyUtils;
+import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
import org.apache.hadoop.ozone.container.keyvalue.helpers
.KeyValueContainerLocationUtil;
import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil;
@@ -293,7 +293,7 @@ public class KeyValueContainer implements Container<KeyValueContainerData> {
// It is ok if this operation takes a bit of time.
// Close container is not expected to be instantaneous.
try {
- MetadataStore db = KeyUtils.getDB(containerData, config);
+ MetadataStore db = BlockUtils.getDB(containerData, config);
db.compactDB();
} catch (StorageContainerException ex) {
throw ex;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
index 5acecb4..5be6e28 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
@@ -48,10 +48,10 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.scm.container.common.helpers
.StorageContainerException;
+import org.apache.hadoop.ozone.container.common.helpers.BlockData;
import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics;
import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
-import org.apache.hadoop.ozone.container.common.helpers.KeyData;
import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
import org.apache.hadoop.ozone.container.common.impl.OpenContainerBlockMap;
import org.apache.hadoop.ozone.container.common.interfaces.Container;
@@ -62,13 +62,13 @@ import org.apache.hadoop.ozone.container.common.volume
.RoundRobinVolumeChoosingPolicy;
import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
import org.apache.hadoop.ozone.container.keyvalue.helpers.ChunkUtils;
-import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyUtils;
+import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil;
import org.apache.hadoop.ozone.container.keyvalue.helpers.SmallFileUtils;
import org.apache.hadoop.ozone.container.keyvalue.impl.ChunkManagerImpl;
-import org.apache.hadoop.ozone.container.keyvalue.impl.KeyManagerImpl;
+import org.apache.hadoop.ozone.container.keyvalue.impl.BlockManagerImpl;
import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager;
-import org.apache.hadoop.ozone.container.keyvalue.interfaces.KeyManager;
+import org.apache.hadoop.ozone.container.keyvalue.interfaces.BlockManager;
import org.apache.hadoop.ozone.container.keyvalue.statemachine.background
.BlockDeletingService;
import org.apache.hadoop.util.AutoCloseableLock;
@@ -117,7 +117,7 @@ public class KeyValueHandler extends Handler {
KeyValueHandler.class);
private final ContainerType containerType;
- private final KeyManager keyManager;
+ private final BlockManager blockManager;
private final ChunkManager chunkManager;
private final BlockDeletingService blockDeletingService;
private final VolumeChoosingPolicy volumeChoosingPolicy;
@@ -129,7 +129,7 @@ public class KeyValueHandler extends Handler {
VolumeSet volSet, ContainerMetrics metrics) {
super(config, contSet, volSet, metrics);
containerType = ContainerType.KeyValueContainer;
- keyManager = new KeyManagerImpl(config);
+ blockManager = new BlockManagerImpl(config);
chunkManager = new ChunkManagerImpl();
long svcInterval = config
.getTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL,
@@ -187,13 +187,13 @@ public class KeyValueHandler extends Handler {
return handleUnsupportedOp(request);
case CloseContainer:
return handleCloseContainer(request, kvContainer);
- case PutKey:
- return handlePutKey(request, kvContainer);
- case GetKey:
- return handleGetKey(request, kvContainer);
- case DeleteKey:
- return handleDeleteKey(request, kvContainer);
- case ListKey:
+ case PutBlock:
+ return handlePutBlock(request, kvContainer);
+ case GetBlock:
+ return handleGetBlock(request, kvContainer);
+ case DeleteBlock:
+ return handleDeleteBlock(request, kvContainer);
+ case ListBlock:
return handleUnsupportedOp(request);
case ReadChunk:
return handleReadChunk(request, kvContainer);
@@ -222,8 +222,8 @@ public class KeyValueHandler extends Handler {
}
@VisibleForTesting
- public KeyManager getKeyManager() {
- return this.keyManager;
+ public BlockManager getBlockManager() {
+ return this.blockManager;
}
/**
@@ -413,7 +413,7 @@ public class KeyValueHandler extends Handler {
// remove the container from open block map once, all the blocks
// have been committed and the container is closed
kvData.setState(ContainerProtos.ContainerLifeCycleState.CLOSING);
- commitPendingKeys(kvContainer);
+ commitPendingBlocks(kvContainer);
kvContainer.close();
// make sure the the container open keys from BlockMap gets removed
openContainerBlockMap.removeContainer(kvData.getContainerID());
@@ -429,13 +429,13 @@ public class KeyValueHandler extends Handler {
}
/**
- * Handle Put Key operation. Calls KeyManager to process the request.
+ * Handle Put Block operation. Calls BlockManager to process the request.
*/
- ContainerCommandResponseProto handlePutKey(
+ ContainerCommandResponseProto handlePutBlock(
ContainerCommandRequestProto request, KeyValueContainer kvContainer) {
long blockLength;
- if (!request.hasPutKey()) {
+ if (!request.hasPutBlock()) {
LOG.debug("Malformed Put Key request. trace ID: {}",
request.getTraceID());
return ContainerUtils.malformedRequest(request);
@@ -444,11 +444,11 @@ public class KeyValueHandler extends Handler {
try {
checkContainerOpen(kvContainer);
- KeyData keyData = KeyData.getFromProtoBuf(
- request.getPutKey().getKeyData());
- long numBytes = keyData.getProtoBufMessage().toByteArray().length;
- blockLength = commitKey(keyData, kvContainer);
- metrics.incContainerBytesStats(Type.PutKey, numBytes);
+ BlockData blockData = BlockData.getFromProtoBuf(
+ request.getPutBlock().getBlockData());
+ long numBytes = blockData.getProtoBufMessage().toByteArray().length;
+ blockLength = commitKey(blockData, kvContainer);
+ metrics.incContainerBytesStats(Type.PutBlock, numBytes);
} catch (StorageContainerException ex) {
return ContainerUtils.logAndReturnError(LOG, ex, request);
} catch (IOException ex) {
@@ -457,46 +457,46 @@ public class KeyValueHandler extends Handler {
request);
}
- return KeyUtils.putKeyResponseSuccess(request, blockLength);
+ return BlockUtils.putBlockResponseSuccess(request, blockLength);
}
- private void commitPendingKeys(KeyValueContainer kvContainer)
+ private void commitPendingBlocks(KeyValueContainer kvContainer)
throws IOException {
long containerId = kvContainer.getContainerData().getContainerID();
- List<KeyData> pendingKeys =
- this.openContainerBlockMap.getOpenKeys(containerId);
- for(KeyData keyData : pendingKeys) {
- commitKey(keyData, kvContainer);
+ List<BlockData> pendingBlocks =
+ this.openContainerBlockMap.getOpenBlocks(containerId);
+ for(BlockData blockData : pendingBlocks) {
+ commitKey(blockData, kvContainer);
}
}
- private long commitKey(KeyData keyData, KeyValueContainer kvContainer)
+ private long commitKey(BlockData blockData, KeyValueContainer kvContainer)
throws IOException {
- Preconditions.checkNotNull(keyData);
- long length = keyManager.putKey(kvContainer, keyData);
+ Preconditions.checkNotNull(blockData);
+ long length = blockManager.putBlock(kvContainer, blockData);
//update the open key Map in containerManager
- this.openContainerBlockMap.removeFromKeyMap(keyData.getBlockID());
+ this.openContainerBlockMap.removeFromBlockMap(blockData.getBlockID());
return length;
}
/**
- * Handle Get Key operation. Calls KeyManager to process the request.
+ * Handle Get Block operation. Calls BlockManager to process the request.
*/
- ContainerCommandResponseProto handleGetKey(
+ ContainerCommandResponseProto handleGetBlock(
ContainerCommandRequestProto request, KeyValueContainer kvContainer) {
- if (!request.hasGetKey()) {
+ if (!request.hasGetBlock()) {
LOG.debug("Malformed Get Key request. trace ID: {}",
request.getTraceID());
return ContainerUtils.malformedRequest(request);
}
- KeyData responseData;
+ BlockData responseData;
try {
BlockID blockID = BlockID.getFromProtobuf(
- request.getGetKey().getBlockID());
- responseData = keyManager.getKey(kvContainer, blockID);
+ request.getGetBlock().getBlockID());
+ responseData = blockManager.getBlock(kvContainer, blockID);
long numBytes = responseData.getProtoBufMessage().toByteArray().length;
- metrics.incContainerBytesStats(Type.GetKey, numBytes);
+ metrics.incContainerBytesStats(Type.GetBlock, numBytes);
} catch (StorageContainerException ex) {
return ContainerUtils.logAndReturnError(LOG, ex, request);
@@ -506,12 +506,12 @@ public class KeyValueHandler extends Handler {
request);
}
- return KeyUtils.getKeyDataResponse(request, responseData);
+ return BlockUtils.getBlockDataResponse(request, responseData);
}
/**
* Handles GetCommittedBlockLength operation.
- * Calls KeyManager to process the request.
+ * Calls BlockManager to process the request.
*/
ContainerCommandResponseProto handleGetCommittedBlockLength(
ContainerCommandRequestProto request, KeyValueContainer kvContainer) {
@@ -530,7 +530,7 @@ public class KeyValueHandler extends Handler {
String msg = "Block " + blockID + " is not committed yet.";
throw new StorageContainerException(msg, BLOCK_NOT_COMMITTED);
}
- blockLength = keyManager.getCommittedBlockLength(kvContainer, blockID);
+ blockLength = blockManager.getCommittedBlockLength(kvContainer, blockID);
} catch (StorageContainerException ex) {
return ContainerUtils.logAndReturnError(LOG, ex, request);
} catch (IOException ex) {
@@ -539,16 +539,16 @@ public class KeyValueHandler extends Handler {
IO_EXCEPTION), request);
}
- return KeyUtils.getBlockLengthResponse(request, blockLength);
+ return BlockUtils.getBlockLengthResponse(request, blockLength);
}
/**
- * Handle Delete Key operation. Calls KeyManager to process the request.
+ * Handle Delete Block operation. Calls BlockManager to process the request.
*/
- ContainerCommandResponseProto handleDeleteKey(
+ ContainerCommandResponseProto handleDeleteBlock(
ContainerCommandRequestProto request, KeyValueContainer kvContainer) {
- if (!request.hasDeleteKey()) {
+ if (!request.hasDeleteBlock()) {
LOG.debug("Malformed Delete Key request. trace ID: {}",
request.getTraceID());
return ContainerUtils.malformedRequest(request);
@@ -558,9 +558,9 @@ public class KeyValueHandler extends Handler {
checkContainerOpen(kvContainer);
BlockID blockID = BlockID.getFromProtobuf(
- request.getDeleteKey().getBlockID());
+ request.getDeleteBlock().getBlockID());
- keyManager.deleteKey(kvContainer, blockID);
+ blockManager.deleteBlock(kvContainer, blockID);
} catch (StorageContainerException ex) {
return ContainerUtils.logAndReturnError(LOG, ex, request);
} catch (IOException ex) {
@@ -569,7 +569,7 @@ public class KeyValueHandler extends Handler {
request);
}
- return KeyUtils.getKeyResponseSuccess(request);
+ return BlockUtils.getBlockResponseSuccess(request);
}
/**
@@ -698,7 +698,7 @@ public class KeyValueHandler extends Handler {
/**
* Handle Put Small File operation. Writes the chunk and associated key
- * using a single RPC. Calls KeyManager and ChunkManager to process the
+ * using a single RPC. Calls BlockManager and ChunkManager to process the
* request.
*/
ContainerCommandResponseProto handlePutSmallFile(
@@ -715,11 +715,11 @@ public class KeyValueHandler extends Handler {
try {
checkContainerOpen(kvContainer);
- BlockID blockID = BlockID.getFromProtobuf(putSmallFileReq.getKey()
- .getKeyData().getBlockID());
- KeyData keyData = KeyData.getFromProtoBuf(
- putSmallFileReq.getKey().getKeyData());
- Preconditions.checkNotNull(keyData);
+ BlockID blockID = BlockID.getFromProtobuf(putSmallFileReq.getBlock()
+ .getBlockData().getBlockID());
+ BlockData blockData = BlockData.getFromProtoBuf(
+ putSmallFileReq.getBlock().getBlockData());
+ Preconditions.checkNotNull(blockData);
ChunkInfo chunkInfo = ChunkInfo.getFromProtoBuf(
putSmallFileReq.getChunkInfo());
@@ -732,8 +732,8 @@ public class KeyValueHandler extends Handler {
List<ContainerProtos.ChunkInfo> chunks = new LinkedList<>();
chunks.add(chunkInfo.getProtoBufMessage());
- keyData.setChunks(chunks);
- keyManager.putKey(kvContainer, keyData);
+ blockData.setChunks(chunks);
+ blockManager.putBlock(kvContainer, blockData);
metrics.incContainerBytesStats(Type.PutSmallFile, data.length);
} catch (StorageContainerException ex) {
@@ -749,7 +749,7 @@ public class KeyValueHandler extends Handler {
/**
* Handle Get Small File operation. Gets a data stream using a key. This
- * helps in reducing the RPC overhead for small files. Calls KeyManager and
+ * helps in reducing the RPC overhead for small files. Calls BlockManager and
* ChunkManager to process the request.
*/
ContainerCommandResponseProto handleGetSmallFile(
@@ -764,9 +764,9 @@ public class KeyValueHandler extends Handler {
GetSmallFileRequestProto getSmallFileReq = request.getGetSmallFile();
try {
- BlockID blockID = BlockID.getFromProtobuf(getSmallFileReq.getKey()
+ BlockID blockID = BlockID.getFromProtobuf(getSmallFileReq.getBlock()
.getBlockID());
- KeyData responseData = keyManager.getKey(kvContainer, blockID);
+ BlockData responseData = blockManager.getBlock(kvContainer, blockID);
ContainerProtos.ChunkInfo chunkInfo = null;
ByteString dataBuf = ByteString.EMPTY;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java
new file mode 100644
index 0000000..f5cc847
--- /dev/null
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java
@@ -0,0 +1,199 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.container.keyvalue.helpers;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+ .ContainerCommandRequestProto;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+ .ContainerCommandResponseProto;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+ .GetBlockResponseProto;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.
+ GetCommittedBlockLengthResponseProto;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.
+ PutBlockResponseProto;
+import org.apache.hadoop.hdds.scm.container.common.helpers
+ .StorageContainerException;
+import org.apache.hadoop.ozone.container.common.helpers.BlockData;
+import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
+import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
+import org.apache.hadoop.ozone.container.common.utils.ContainerCache;
+import org.apache.hadoop.utils.MetadataStore;
+
+import java.io.IOException;
+
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+ .Result.NO_SUCH_BLOCK;
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+ .Result.UNABLE_TO_READ_METADATA_DB;
+
+/**
+ * Utils functions to help block functions.
+ */
+public final class BlockUtils {
+
+ /** Never constructed. **/
+ private BlockUtils() {
+
+ }
+ /**
+ * Get a DB handler for a given container.
+ * If the handler doesn't exist in cache yet, first create one and
+ * add into cache. This function is called with containerManager
+ * ReadLock held.
+ *
+ * @param containerData containerData.
+ * @param conf configuration.
+ * @return MetadataStore handle.
+ * @throws StorageContainerException
+ */
+ public static MetadataStore getDB(KeyValueContainerData containerData,
+ Configuration conf) throws
+ StorageContainerException {
+ Preconditions.checkNotNull(containerData);
+ ContainerCache cache = ContainerCache.getInstance(conf);
+ Preconditions.checkNotNull(cache);
+ Preconditions.checkNotNull(containerData.getDbFile());
+ try {
+ return cache.getDB(containerData.getContainerID(), containerData
+ .getContainerDBType(), containerData.getDbFile().getAbsolutePath());
+ } catch (IOException ex) {
+ String message = String.format("Error opening DB. Container:%s " +
+ "ContainerPath:%s", containerData.getContainerID(), containerData
+ .getDbFile().getPath());
+ throw new StorageContainerException(message, UNABLE_TO_READ_METADATA_DB);
+ }
+ }
+ /**
+ * Remove a DB handler from cache.
+ *
+ * @param container - Container data.
+ * @param conf - Configuration.
+ */
+ public static void removeDB(KeyValueContainerData container, Configuration
+ conf) {
+ Preconditions.checkNotNull(container);
+ ContainerCache cache = ContainerCache.getInstance(conf);
+ Preconditions.checkNotNull(cache);
+ cache.removeDB(container.getContainerID());
+ }
+
+ /**
+ * Shutdown all DB Handles.
+ *
+ * @param cache - Cache for DB Handles.
+ */
+ @SuppressWarnings("unchecked")
+ public static void shutdownCache(ContainerCache cache) {
+ cache.shutdownCache();
+ }
+
+ /**
+ * Parses the {@link BlockData} from a bytes array.
+ *
+ * @param bytes Block data in bytes.
+ * @return Block data.
+ * @throws IOException if the bytes array is malformed or invalid.
+ */
+ public static BlockData getBlockData(byte[] bytes) throws IOException {
+ try {
+ ContainerProtos.BlockData blockData = ContainerProtos.BlockData.parseFrom(
+ bytes);
+ BlockData data = BlockData.getFromProtoBuf(blockData);
+ return data;
+ } catch (IOException e) {
+ throw new StorageContainerException("Failed to parse block data from " +
+ "the bytes array.", NO_SUCH_BLOCK);
+ }
+ }
+
+ /**
+ * Returns putBlock response success.
+ * @param msg - Request.
+ * @return Response.
+ */
+ public static ContainerCommandResponseProto putBlockResponseSuccess(
+ ContainerCommandRequestProto msg, long blockLength) {
+ GetCommittedBlockLengthResponseProto.Builder
+ committedBlockLengthResponseBuilder =
+ getCommittedBlockLengthResponseBuilder(blockLength,
+ msg.getPutBlock().getBlockData().getBlockID());
+ PutBlockResponseProto.Builder putKeyResponse =
+ PutBlockResponseProto.newBuilder();
+ putKeyResponse
+ .setCommittedBlockLength(committedBlockLengthResponseBuilder);
+ ContainerProtos.ContainerCommandResponseProto.Builder builder =
+ ContainerUtils.getSuccessResponseBuilder(msg);
+ builder.setPutBlock(putKeyResponse);
+ return builder.build();
+ }
+ /**
+ * Returns successful blockResponse.
+ * @param msg - Request.
+ * @return Response.
+ */
+ public static ContainerCommandResponseProto getBlockResponseSuccess(
+ ContainerCommandRequestProto msg) {
+ return ContainerUtils.getSuccessResponse(msg);
+ }
+
+
+ public static ContainerCommandResponseProto getBlockDataResponse(
+ ContainerCommandRequestProto msg, BlockData data) {
+ GetBlockResponseProto.Builder getBlock = ContainerProtos
+ .GetBlockResponseProto
+ .newBuilder();
+ getBlock.setBlockData(data.getProtoBufMessage());
+ ContainerProtos.ContainerCommandResponseProto.Builder builder =
+ ContainerUtils.getSuccessResponseBuilder(msg);
+ builder.setGetBlock(getBlock);
+ return builder.build();
+ }
+
+ /**
+ * Returns successful getCommittedBlockLength Response.
+ * @param msg - Request.
+ * @return Response.
+ */
+ public static ContainerCommandResponseProto getBlockLengthResponse(
+ ContainerCommandRequestProto msg, long blockLength) {
+ GetCommittedBlockLengthResponseProto.Builder
+ committedBlockLengthResponseBuilder =
+ getCommittedBlockLengthResponseBuilder(blockLength,
+ msg.getGetCommittedBlockLength().getBlockID());
+ ContainerProtos.ContainerCommandResponseProto.Builder builder =
+ ContainerUtils.getSuccessResponseBuilder(msg);
+ builder.setGetCommittedBlockLength(committedBlockLengthResponseBuilder);
+ return builder.build();
+ }
+
+ private static GetCommittedBlockLengthResponseProto.Builder
+ getCommittedBlockLengthResponseBuilder(long blockLength,
+ ContainerProtos.DatanodeBlockID blockID) {
+ ContainerProtos.GetCommittedBlockLengthResponseProto.Builder
+ getCommittedBlockLengthResponseBuilder = ContainerProtos.
+ GetCommittedBlockLengthResponseProto.newBuilder();
+ getCommittedBlockLengthResponseBuilder.setBlockLength(blockLength);
+ getCommittedBlockLengthResponseBuilder.setBlockID(blockID);
+ return getCommittedBlockLengthResponseBuilder;
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyUtils.java
deleted file mode 100644
index a83d298..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyUtils.java
+++ /dev/null
@@ -1,199 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.keyvalue.helpers;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
- .ContainerCommandRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
- .ContainerCommandResponseProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
- .GetKeyResponseProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.
- GetCommittedBlockLengthResponseProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.
- PutKeyResponseProto;
-import org.apache.hadoop.hdds.scm.container.common.helpers
- .StorageContainerException;
-import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
-import org.apache.hadoop.ozone.container.common.helpers.KeyData;
-import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
-import org.apache.hadoop.ozone.container.common.utils.ContainerCache;
-import org.apache.hadoop.utils.MetadataStore;
-
-import java.io.IOException;
-
-import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
- .Result.NO_SUCH_KEY;
-import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
- .Result.UNABLE_TO_READ_METADATA_DB;
-
-/**
- * Utils functions to help key functions.
- */
-public final class KeyUtils {
-
- /** Never constructed. **/
- private KeyUtils() {
-
- }
- /**
- * Get a DB handler for a given container.
- * If the handler doesn't exist in cache yet, first create one and
- * add into cache. This function is called with containerManager
- * ReadLock held.
- *
- * @param containerData containerData.
- * @param conf configuration.
- * @return MetadataStore handle.
- * @throws StorageContainerException
- */
- public static MetadataStore getDB(KeyValueContainerData containerData,
- Configuration conf) throws
- StorageContainerException {
- Preconditions.checkNotNull(containerData);
- ContainerCache cache = ContainerCache.getInstance(conf);
- Preconditions.checkNotNull(cache);
- Preconditions.checkNotNull(containerData.getDbFile());
- try {
- return cache.getDB(containerData.getContainerID(), containerData
- .getContainerDBType(), containerData.getDbFile().getAbsolutePath());
- } catch (IOException ex) {
- String message = String.format("Error opening DB. Container:%s " +
- "ContainerPath:%s", containerData.getContainerID(), containerData
- .getDbFile().getPath());
- throw new StorageContainerException(message, UNABLE_TO_READ_METADATA_DB);
- }
- }
- /**
- * Remove a DB handler from cache.
- *
- * @param container - Container data.
- * @param conf - Configuration.
- */
- public static void removeDB(KeyValueContainerData container, Configuration
- conf) {
- Preconditions.checkNotNull(container);
- ContainerCache cache = ContainerCache.getInstance(conf);
- Preconditions.checkNotNull(cache);
- cache.removeDB(container.getContainerID());
- }
-
- /**
- * Shutdown all DB Handles.
- *
- * @param cache - Cache for DB Handles.
- */
- @SuppressWarnings("unchecked")
- public static void shutdownCache(ContainerCache cache) {
- cache.shutdownCache();
- }
-
- /**
- * Parses the {@link KeyData} from a bytes array.
- *
- * @param bytes key data in bytes.
- * @return key data.
- * @throws IOException if the bytes array is malformed or invalid.
- */
- public static KeyData getKeyData(byte[] bytes) throws IOException {
- try {
- ContainerProtos.KeyData keyData = ContainerProtos.KeyData.parseFrom(
- bytes);
- KeyData data = KeyData.getFromProtoBuf(keyData);
- return data;
- } catch (IOException e) {
- throw new StorageContainerException("Failed to parse key data from the" +
- " bytes array.", NO_SUCH_KEY);
- }
- }
-
- /**
- * Returns putKey response success.
- * @param msg - Request.
- * @return Response.
- */
- public static ContainerCommandResponseProto putKeyResponseSuccess(
- ContainerCommandRequestProto msg, long blockLength) {
- GetCommittedBlockLengthResponseProto.Builder
- committedBlockLengthResponseBuilder =
- getCommittedBlockLengthResponseBuilder(blockLength,
- msg.getPutKey().getKeyData().getBlockID());
- PutKeyResponseProto.Builder putKeyResponse =
- PutKeyResponseProto.newBuilder();
- putKeyResponse
- .setCommittedBlockLength(committedBlockLengthResponseBuilder);
- ContainerProtos.ContainerCommandResponseProto.Builder builder =
- ContainerUtils.getSuccessResponseBuilder(msg);
- builder.setPutKey(putKeyResponse);
- return builder.build();
- }
- /**
- * Returns successful keyResponse.
- * @param msg - Request.
- * @return Response.
- */
- public static ContainerCommandResponseProto getKeyResponseSuccess(
- ContainerCommandRequestProto msg) {
- return ContainerUtils.getSuccessResponse(msg);
- }
-
-
- public static ContainerCommandResponseProto getKeyDataResponse(
- ContainerCommandRequestProto msg, KeyData data) {
- GetKeyResponseProto.Builder getKey = ContainerProtos
- .GetKeyResponseProto
- .newBuilder();
- getKey.setKeyData(data.getProtoBufMessage());
- ContainerProtos.ContainerCommandResponseProto.Builder builder =
- ContainerUtils.getSuccessResponseBuilder(msg);
- builder.setGetKey(getKey);
- return builder.build();
- }
-
- /**
- * Returns successful getCommittedBlockLength Response.
- * @param msg - Request.
- * @return Response.
- */
- public static ContainerCommandResponseProto getBlockLengthResponse(
- ContainerCommandRequestProto msg, long blockLength) {
- GetCommittedBlockLengthResponseProto.Builder
- committedBlockLengthResponseBuilder =
- getCommittedBlockLengthResponseBuilder(blockLength,
- msg.getGetCommittedBlockLength().getBlockID());
- ContainerProtos.ContainerCommandResponseProto.Builder builder =
- ContainerUtils.getSuccessResponseBuilder(msg);
- builder.setGetCommittedBlockLength(committedBlockLengthResponseBuilder);
- return builder.build();
- }
-
- private static GetCommittedBlockLengthResponseProto.Builder
- getCommittedBlockLengthResponseBuilder(
- long blockLength, ContainerProtos.DatanodeBlockID blockID) {
- ContainerProtos.GetCommittedBlockLengthResponseProto.Builder
- getCommittedBlockLengthResponseBuilder = ContainerProtos.
- GetCommittedBlockLengthResponseProto.newBuilder();
- getCommittedBlockLengthResponseBuilder.setBlockLength(blockLength);
- getCommittedBlockLengthResponseBuilder.setBlockID(blockID);
- return getCommittedBlockLengthResponseBuilder;
- }
-}
\ No newline at end of file
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[11/18] hadoop git commit: YARN-8628. [UI2] Few duplicated or
inconsistent information displayed in UI2. Contributed by Akhil PB.
Posted by sh...@apache.org.
YARN-8628. [UI2] Few duplicated or inconsistent information displayed in UI2. Contributed by Akhil PB.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a2752779
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a2752779
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a2752779
Branch: refs/heads/HDFS-12943
Commit: a2752779ac1545f5e0a52fce3cff02a7007e95fb
Parents: 524f7cd
Author: Sunil G <su...@apache.org>
Authored: Fri Sep 21 15:47:10 2018 +0530
Committer: Sunil G <su...@apache.org>
Committed: Fri Sep 21 15:47:10 2018 +0530
----------------------------------------------------------------------
.../src/main/webapp/app/controllers/yarn-app/components.js | 2 +-
.../webapp/app/controllers/yarn-component-instance/info.js | 5 +++--
.../webapp/app/controllers/yarn-component-instances/info.js | 3 ++-
.../main/webapp/app/routes/yarn-component-instance/info.js | 4 ++--
.../main/webapp/app/serializers/yarn-component-instance.js | 1 -
.../src/main/webapp/app/serializers/yarn-container.js | 2 +-
.../src/main/webapp/app/serializers/yarn-service-component.js | 2 +-
.../main/webapp/app/serializers/yarn-timeline-container.js | 2 +-
.../src/main/webapp/app/templates/yarn-app/configs.hbs | 7 ++++---
.../webapp/app/templates/yarn-component-instance/info.hbs | 4 ----
10 files changed, 15 insertions(+), 17 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2752779/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app/components.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app/components.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app/components.js
index 5981eb5..5a6c616 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app/components.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app/components.js
@@ -41,7 +41,7 @@ export default Ember.Controller.extend({
getCellContent: function(row) {
return {
displayText: row.get('name'),
- href: `#/yarn-component-instances/${row.get('name')}/info?service=${service}&&appid=${appId}`
+ href: `#/yarn-component-instances/${row.get('name')}/info?service=${service}&appid=${appId}`
};
}
}, {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2752779/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-component-instance/info.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-component-instance/info.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-component-instance/info.js
index e3abcb7..e920aa2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-component-instance/info.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-component-instance/info.js
@@ -19,7 +19,8 @@
import Ember from 'ember';
export default Ember.Controller.extend({
- queryParams: ["appid", "service"],
+ queryParams: ["appid", "service", "containerid"],
appid: undefined,
- service: undefined
+ service: undefined,
+ containerid: undefined
});
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2752779/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-component-instances/info.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-component-instances/info.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-component-instances/info.js
index 44cfe17..be4b4f3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-component-instances/info.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-component-instances/info.js
@@ -42,9 +42,10 @@ export default Ember.Controller.extend({
getCellContent: function(row) {
var component = row.get('component');
var instance = row.get('instanceName');
+ var containerId = row.get('containerId');
return {
text: instance,
- href: `#/yarn-component-instance/${component}/instances/${instance}/info?appid=${appId}&&service=${serviceName}`
+ href: `#/yarn-component-instance/${component}/instances/${instance}/info?appid=${appId}&service=${serviceName}&containerid=${containerId}`
};
}
}, {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2752779/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-component-instance/info.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-component-instance/info.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-component-instance/info.js
index 3753c75..a67324a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-component-instance/info.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-component-instance/info.js
@@ -29,8 +29,8 @@ export default AbstractRoute.extend({
componentName: params.component_name,
instanceName: instanceName,
container: this.store.query('yarn-component-instance', {appId: params.appid}).then(function(instances) {
- if (instances && instances.findBy('instanceName', instanceName)) {
- return instances.findBy('instanceName', instanceName);
+ if (instances && instances.findBy('containerId', params.containerid)) {
+ return instances.findBy('containerId', params.containerid);
}
return null;
}, function() {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2752779/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-component-instance.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-component-instance.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-component-instance.js
index 1bd3b2c..ce0dfb0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-component-instance.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-component-instance.js
@@ -34,7 +34,6 @@ export default DS.JSONAPISerializer.extend({
startedTimestamp: info.LAUNCH_TIME,
host: info.HOSTNAME,
node: info.BARE_HOST,
- hostUrl: 'N/A',
ipAddr: info.IP,
exitStatusCode: info.EXIT_STATUS_CODE
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2752779/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-container.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-container.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-container.js
index fc640c5..f7f8272 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-container.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-container.js
@@ -34,7 +34,7 @@ export default DS.JSONAPISerializer.extend({
finishedTime: Converter.timeStampToDate(payload.finishedTime),
elapsedTime: payload.elapsedTime,
logUrl: payload.logUrl,
- containerExitStatus: payload.containerExitStatus,
+ containerExitStatus: payload.containerExitStatus + '',
containerState: payload.containerState,
nodeId : payload.nodeId,
nodeHttpAddress: payload.nodeHttpAddress
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2752779/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-service-component.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-service-component.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-service-component.js
index b0261fc..9dd7e6c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-service-component.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-service-component.js
@@ -52,7 +52,7 @@ export default DS.JSONAPISerializer.extend({
vcores: info.RESOURCE_CPU,
memory: info.RESOURCE_MEMORY,
priority: 'N/A',
- instances: 'N/A',
+ instances: '0',
createdTimestamp: payload.createdtime,
configs: newConfigs,
metrics: newMetrics
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2752779/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-timeline-container.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-timeline-container.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-timeline-container.js
index 99ab6c4..5b62b2f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-timeline-container.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-timeline-container.js
@@ -34,7 +34,7 @@ export default DS.JSONAPISerializer.extend({
startedTime: Converter.timeStampToDate(payload.createdtime),
finishedTime: Converter.timeStampToDate(payload.info.YARN_CONTAINER_FINISHED_TIME),
nodeHttpAddress: payload.info.YARN_CONTAINER_ALLOCATED_HOST_HTTP_ADDRESS,
- containerExitStatus: payload.info.YARN_CONTAINER_EXIT_STATUS,
+ containerExitStatus: payload.info.YARN_CONTAINER_EXIT_STATUS + '',
containerState: payload.info.YARN_CONTAINER_STATE,
nodeId: payload.info.YARN_CONTAINER_ALLOCATED_HOST + ':' + payload.info.YARN_CONTAINER_ALLOCATED_PORT,
diagnosticsInfo: payload.info.YARN_CONTAINER_DIAGNOSTICS_INFO
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2752779/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app/configs.hbs
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app/configs.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app/configs.hbs
index 5f4d29d..daf9549 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app/configs.hbs
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app/configs.hbs
@@ -16,10 +16,11 @@
* limitations under the License.
}}
-<div class="col-md-12">
- {{metrics-table metrics=model.metrics type="Service"}}
+<div class="col-md-12" style="margin-bottom: 15px;">
+ <div class="col-md-12">
+ {{metrics-table metrics=model.metrics type="Service"}}
+ </div>
</div>
-
{{#if model.configs}}
<div class="col-md-12">
<div class="panel panel-default">
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2752779/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-component-instance/info.hbs
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-component-instance/info.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-component-instance/info.hbs
index 553f4e8..1b9d04a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-component-instance/info.hbs
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-component-instance/info.hbs
@@ -50,10 +50,6 @@
<td>{{check-availability model.container.host}}</td>
</tr>
<tr>
- <td>Host URL</td>
- <td>{{check-availability model.container.hostUrl}}</td>
- </tr>
- <tr>
<td>Node</td>
<td>{{check-availability model.container.node}}</td>
</tr>
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[14/18] hadoop git commit: Merge commit
'b3161c4dd9367c68b30528a63c03756eaa32aaf9' into HDFS-12943
Posted by sh...@apache.org.
Merge commit 'b3161c4dd9367c68b30528a63c03756eaa32aaf9' into HDFS-12943
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4b0ff03f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4b0ff03f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4b0ff03f
Branch: refs/heads/HDFS-12943
Commit: 4b0ff03f6f87dfb3c50f59e12377b9c24c4fc491
Parents: 4cdd0b9 b3161c4
Author: Konstantin V Shvachko <sh...@apache.org>
Authored: Fri Sep 21 18:18:24 2018 -0700
Committer: Konstantin V Shvachko <sh...@apache.org>
Committed: Fri Sep 21 18:18:24 2018 -0700
----------------------------------------------------------------------
.../hadoop/metrics2/annotation/Metric.java | 5 +
.../metrics2/lib/MutableMetricsFactory.java | 4 +
.../apache/hadoop/hdds/scm/XceiverClient.java | 8 +-
.../hadoop/hdds/scm/XceiverClientGrpc.java | 9 +-
.../hadoop/hdds/scm/XceiverClientRatis.java | 36 ++-
.../scm/client/ContainerOperationClient.java | 2 +-
.../org/apache/hadoop/hdds/HddsConfigKeys.java | 11 +
.../apache/hadoop/hdds/scm/ScmConfigKeys.java | 17 +-
.../hadoop/hdds/scm/XceiverClientSpi.java | 10 +-
.../apache/hadoop/ozone/OzoneConfigKeys.java | 12 +
.../main/java/org/apache/ratis/RatisHelper.java | 22 +-
.../common/src/main/resources/ozone-default.xml | 31 ++-
.../container/common/impl/HddsDispatcher.java | 6 +-
.../common/statemachine/StateContext.java | 45 ++++
.../states/endpoint/HeartbeatEndpointTask.java | 28 +++
.../server/ratis/ContainerStateMachine.java | 17 +-
.../server/ratis/XceiverServerRatis.java | 205 ++++++++++++-----
.../container/ozoneimpl/OzoneContainer.java | 2 +-
.../StorageContainerDatanodeProtocol.proto | 26 +++
.../hdds/scm/container/ContainerMapping.java | 109 +++------
.../scm/container/ContainerStateManager.java | 5 +-
.../hadoop/hdds/scm/container/Mapping.java | 14 ++
.../scm/container/closer/ContainerCloser.java | 194 ----------------
.../hadoop/hdds/scm/events/SCMEvents.java | 24 +-
.../hadoop/hdds/scm/node/StaleNodeHandler.java | 16 +-
.../hdds/scm/pipelines/Node2PipelineMap.java | 34 +--
.../pipelines/PipelineActionEventHandler.java | 60 +++++
.../scm/pipelines/PipelineCloseHandler.java | 38 ++++
.../hdds/scm/pipelines/PipelineManager.java | 10 +-
.../hdds/scm/pipelines/PipelineSelector.java | 46 ++--
.../scm/pipelines/ratis/RatisManagerImpl.java | 14 +-
.../standalone/StandaloneManagerImpl.java | 7 +-
.../server/SCMDatanodeHeartbeatDispatcher.java | 23 ++
.../scm/server/StorageContainerManager.java | 13 +-
.../scm/container/TestContainerMapping.java | 43 ----
.../container/closer/TestContainerCloser.java | 228 -------------------
.../mapreduce/v2/hs/HistoryFileManager.java | 12 +-
.../mapreduce/v2/hs/TestHistoryFileManager.java | 52 +++++
.../ozone/om/helpers/OmKeyLocationInfo.java | 10 +
.../hdds/scm/pipeline/TestNode2PipelineMap.java | 6 +-
.../hdds/scm/pipeline/TestNodeFailure.java | 126 ++++++++++
.../hdds/scm/pipeline/TestPipelineClose.java | 6 +-
.../apache/hadoop/ozone/MiniOzoneCluster.java | 15 ++
.../hadoop/ozone/MiniOzoneClusterImpl.java | 21 ++
.../transport/server/ratis/TestCSMMetrics.java | 3 +-
.../container/server/TestContainerServer.java | 3 +-
.../server/TestContainerStateMachine.java | 2 +-
.../hadoop/ozone/om/TestOzoneManager.java | 26 ++-
.../hadoop/ozone/om/VolumeManagerImpl.java | 2 +-
49 files changed, 936 insertions(+), 722 deletions(-)
----------------------------------------------------------------------
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org