You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by xg...@apache.org on 2016/12/21 22:42:25 UTC
[01/17] hadoop git commit: YARN-5650. Render Application Timeout
value in web UI. Contributed by Akhil PB.
Repository: hadoop
Updated Branches:
refs/heads/YARN-5734 fcbe15234 -> 736f54b72
YARN-5650. Render Application Timeout value in web UI. Contributed by Akhil PB.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ef2dd7b7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ef2dd7b7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ef2dd7b7
Branch: refs/heads/YARN-5734
Commit: ef2dd7b78c5bf15bd85a9c793e57855255148b7f
Parents: fcbe152
Author: Sunil G <su...@apache.org>
Authored: Mon Dec 19 11:44:03 2016 +0530
Committer: Sunil G <su...@apache.org>
Committed: Mon Dec 19 11:44:03 2016 +0530
----------------------------------------------------------------------
.../hadoop/yarn/server/webapp/AppBlock.java | 9 +++
.../resourcemanager/webapp/dao/AppInfo.java | 8 ++-
.../webapp/app/components/app-timeout-bar.js | 60 ++++++++++++++++++++
.../src/main/webapp/app/models/yarn-app.js | 2 +
.../src/main/webapp/app/serializers/yarn-app.js | 21 +++++--
.../templates/components/app-timeout-bar.hbs | 34 +++++++++++
.../src/main/webapp/app/templates/yarn-app.hbs | 10 +++-
.../src/main/webapp/app/utils/converter.js | 3 +
.../components/app-timeout-bar-test.js | 34 +++++++++++
9 files changed, 172 insertions(+), 9 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef2dd7b7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
index 69beef2..349a98c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
@@ -40,6 +40,7 @@ import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportRequest;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ApplicationReport;
+import org.apache.hadoop.yarn.api.records.ApplicationTimeoutType;
import org.apache.hadoop.yarn.api.records.ContainerReport;
import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
import org.apache.hadoop.yarn.api.records.LogAggregationStatus;
@@ -207,6 +208,14 @@ public class AppBlock extends HtmlBlock {
overviewTable._("Log Aggregation Status:",
root_url("logaggregationstatus", app.getAppId()), status.name());
}
+ long timeout = appReport.getApplicationTimeouts()
+ .get(ApplicationTimeoutType.LIFETIME).getRemainingTime();
+ if (timeout < 0) {
+ overviewTable._("Application Timeout (Remaining Time):", "Unlimited");
+ } else {
+ overviewTable._("Application Timeout (Remaining Time):",
+ String.format("%d seconds", timeout));
+ }
}
overviewTable._("Diagnostics:",
app.getDiagnosticsInfo() == null ? "" : app.getDiagnosticsInfo());
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef2dd7b7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
index 2d364f4..4e85b67 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
@@ -259,8 +259,12 @@ public class AppInfo {
timeout.setTimeoutType(entry.getKey());
long timeoutInMillis = entry.getValue().longValue();
timeout.setExpiryTime(Times.formatISO8601(timeoutInMillis));
- timeout.setRemainingTime(Math
- .max((timeoutInMillis - System.currentTimeMillis()) / 1000, 0));
+ if (app.isAppInCompletedStates()) {
+ timeout.setRemainingTime(0);
+ } else {
+ timeout.setRemainingTime(Math
+ .max((timeoutInMillis - System.currentTimeMillis()) / 1000, 0));
+ }
timeouts.add(timeout);
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef2dd7b7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/app-timeout-bar.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/app-timeout-bar.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/app-timeout-bar.js
new file mode 100644
index 0000000..0eac827
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/app-timeout-bar.js
@@ -0,0 +1,60 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import Ember from 'ember';
+import Converter from 'yarn-ui/utils/converter';
+
+export default Ember.Component.extend({
+ app: null,
+
+ appTimeoutValue: function() {
+ var timeoutValueInSecs = this.get("app.remainingTimeoutInSeconds");
+ if (timeoutValueInSecs > -1) {
+ return Converter.msToElapsedTime(timeoutValueInSecs * 1000);
+ } else {
+ return timeoutValueInSecs;
+ }
+ }.property("app.remainingTimeoutInSeconds"),
+
+ isAppTimedOut: function() {
+ if (this.get("app.remainingTimeoutInSeconds") > 0) {
+ return false;
+ } else {
+ return true;
+ }
+ }.property("app.remainingTimeoutInSeconds"),
+
+ appTimeoutBarStyle: function() {
+ var remainingInSecs = this.get("app.remainingTimeoutInSeconds"),
+ expiryTimestamp = Converter.dateToTimeStamp(this.get("app.applicationExpiryTime")),
+ expiryInSecs = expiryTimestamp / 1000,
+ startTimestamp = Converter.dateToTimeStamp(this.get("app.startTime")),
+ startInSecs = startTimestamp / 1000,
+ totalRunInSecs = 0,
+ appRunDurationInSecs = 0,
+ width = 0;
+
+ if (remainingInSecs > 0) {
+ totalRunInSecs = expiryInSecs - startInSecs;
+ appRunDurationInSecs = totalRunInSecs - remainingInSecs;
+ width = appRunDurationInSecs / totalRunInSecs * 100;
+ }
+
+ return "width: " + width + "%";
+ }.property("app.remainingTimeoutInSeconds", "app.applicationExpiryTime", "app.startTime")
+});
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef2dd7b7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app.js
index 8b5474f..4138a87 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app.js
@@ -50,6 +50,8 @@ export default DS.Model.extend({
clusterUsagePercentage: DS.attr('number'),
queueUsagePercentage: DS.attr('number'),
currentAppAttemptId: DS.attr('string'),
+ remainingTimeoutInSeconds: DS.attr('number'),
+ applicationExpiryTime: DS.attr('string'),
isFailed: function() {
return this.get('finalStatus') == "FAILED"
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef2dd7b7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-app.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-app.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-app.js
index 427c3d8..fdba04a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-app.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-app.js
@@ -23,9 +23,18 @@ export default DS.JSONAPISerializer.extend({
internalNormalizeSingleResponse(store, primaryModelClass, payload, id,
requestType) {
if (payload.app) {
- payload = payload.app;
+ payload = payload.app;
}
-
+
+ var timeoutInSecs = -1;
+ var appExpiryTime = Converter.timeStampToDate(payload.finishedTime);
+ if (payload.timeouts && payload.timeouts.timeout && payload.timeouts.timeout[0]) {
+ timeoutInSecs = payload.timeouts.timeout[0].remainingTimeInSeconds;
+ if (timeoutInSecs > -1) {
+ appExpiryTime = Converter.isoDateToDate(payload.timeouts.timeout[0].expiryTime);
+ }
+ }
+
var fixedPayload = {
id: id,
type: primaryModelClass.modelName, // yarn-app
@@ -58,7 +67,9 @@ export default DS.JSONAPISerializer.extend({
numAMContainerPreempted: payload.numAMContainerPreempted,
clusterUsagePercentage: payload.clusterUsagePercentage,
queueUsagePercentage: payload.queueUsagePercentage,
- currentAppAttemptId: payload.currentAppAttemptId
+ currentAppAttemptId: payload.currentAppAttemptId,
+ remainingTimeoutInSeconds: timeoutInSecs,
+ applicationExpiryTime: appExpiryTime
}
};
@@ -67,7 +78,7 @@ export default DS.JSONAPISerializer.extend({
normalizeSingleResponse(store, primaryModelClass, payload, id,
requestType) {
- var p = this.internalNormalizeSingleResponse(store,
+ var p = this.internalNormalizeSingleResponse(store,
primaryModelClass, payload, id, requestType);
return { data: p };
},
@@ -90,4 +101,4 @@ export default DS.JSONAPISerializer.extend({
return normalizedArrayResponse;
}
-});
\ No newline at end of file
+});
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef2dd7b7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/app-timeout-bar.hbs
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/app-timeout-bar.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/app-timeout-bar.hbs
new file mode 100644
index 0000000..acbe0b2
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/app-timeout-bar.hbs
@@ -0,0 +1,34 @@
+{{!
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+}}
+
+<div class="app-timeout-wrapper">
+ {{#unless isAppTimedOut}}
+ <div class="panel panel-default">
+ <div class="panel-body">
+ <label>Application will be timed out after <span class="text-danger">{{appTimeoutValue}}</span></label>
+ <div class="progress">
+ <div class="progress-bar progress-bar-danger progress-bar-striped active" style="{{appTimeoutBarStyle}}"></div>
+ </div>
+ <div>
+ <label class="pull-left">{{app.startTime}}</label>
+ <label class="pull-right">{{app.applicationExpiryTime}}</label>
+ </div>
+ </div>
+ </div>
+ {{/unless}}
+</div>
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef2dd7b7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app.hbs
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app.hbs
index acf00d1..578cc95 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app.hbs
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app.hbs
@@ -46,6 +46,12 @@
<div class="col-md-10 container-fluid">
<div class="row">
+ <div class="col-md-12">
+ {{app-timeout-bar app=model.app}}
+ </div>
+ </div>
+
+ <div class="row">
<div class="col-md-12 container-fluid">
<div class="panel panel-default">
<div class="panel-heading">Basic Info</div>
@@ -116,10 +122,10 @@
</div>
{{else}}
<div class="panel panel-default">
- <div class="panel-body">
+ <div class="panel-heading">
Diagnostics
</div>
- <div class="panel-footer">{{model.app.diagnostics}}</div>
+ <div class="panel-body">{{model.app.diagnostics}}</div>
</div>
{{/if}}
</div>
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef2dd7b7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/converter.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/converter.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/converter.js
index 6fd9d30..fb6b61c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/converter.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/converter.js
@@ -91,6 +91,9 @@ export default {
return ts;
}
},
+ isoDateToDate: function(isoDate) {
+ return moment(isoDate).format("YYYY/MM/DD HH:mm:ss");
+ },
splitForContainerLogs: function(id) {
if (id) {
var splits = id.split(Constants.PARAM_SEPARATOR);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef2dd7b7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/integration/components/app-timeout-bar-test.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/integration/components/app-timeout-bar-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/integration/components/app-timeout-bar-test.js
new file mode 100644
index 0000000..fb2a73a
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/integration/components/app-timeout-bar-test.js
@@ -0,0 +1,34 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import { moduleForComponent, test } from 'ember-qunit';
+import hbs from 'htmlbars-inline-precompile';
+
+moduleForComponent('app-timeout-bar', 'Integration | Component | app timeout bar', {
+ integration: true
+});
+
+test('it renders', function(assert) {
+
+ // Set any properties with this.set('myProperty', 'value');
+ // Handle any actions with this.on('myAction', function(val) { ... });" + EOL + EOL +
+
+ this.render(hbs`{{app-timeout-bar}}`);
+
+ assert.equal(this.$().text().trim(), '');
+});
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[12/17] hadoop git commit: HDFS-11182. Update DataNode to use
DatasetVolumeChecker. Contributed by Arpit Agarwal.
Posted by xg...@apache.org.
HDFS-11182. Update DataNode to use DatasetVolumeChecker. Contributed by Arpit Agarwal.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f678080d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f678080d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f678080d
Branch: refs/heads/YARN-5734
Commit: f678080dbd25a218e0406463a3c3a1fc03680702
Parents: 5daa8d8
Author: Xiaoyu Yao <xy...@apache.org>
Authored: Tue Dec 20 13:53:07 2016 -0800
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Tue Dec 20 13:53:32 2016 -0800
----------------------------------------------------------------------
.../hadoop/hdfs/server/datanode/DataNode.java | 130 ++++++++-----------
.../datanode/checker/DatasetVolumeChecker.java | 116 +++++++++++------
.../server/datanode/fsdataset/FsDatasetSpi.java | 3 +-
.../datanode/fsdataset/impl/FsDatasetImpl.java | 5 +-
.../datanode/fsdataset/impl/FsVolumeImpl.java | 7 -
.../datanode/fsdataset/impl/FsVolumeList.java | 25 +---
.../blockmanagement/TestBlockStatsMXBean.java | 4 +
.../server/datanode/SimulatedFSDataset.java | 18 ++-
.../datanode/TestDataNodeHotSwapVolumes.java | 3 +
.../datanode/TestDataNodeVolumeFailure.java | 3 +
.../TestDataNodeVolumeFailureReporting.java | 3 +
.../TestDataNodeVolumeFailureToleration.java | 3 +
.../hdfs/server/datanode/TestDiskError.java | 24 ++--
.../checker/TestDatasetVolumeChecker.java | 17 ++-
.../TestDatasetVolumeCheckerFailures.java | 45 ++++---
.../extdataset/ExternalDatasetImpl.java | 3 +-
.../fsdataset/impl/TestFsDatasetImpl.java | 84 ++----------
.../fsdataset/impl/TestFsVolumeList.java | 37 ------
18 files changed, 233 insertions(+), 297 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f678080d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 794b1ad..a94c4b1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -74,6 +74,7 @@ import java.util.Collection;
import java.util.Collections;
import java.util.EnumSet;
import java.util.HashMap;
+import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
@@ -85,7 +86,6 @@ import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.ScheduledThreadPoolExecutor;
-import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
@@ -108,6 +108,7 @@ import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DFSUtilClient;
import org.apache.hadoop.hdfs.HDFSPolicyProvider;
import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.server.datanode.checker.DatasetVolumeChecker;
import org.apache.hadoop.hdfs.server.datanode.checker.StorageLocationChecker;
import org.apache.hadoop.util.AutoCloseableLock;
import org.apache.hadoop.hdfs.client.BlockReportOptions;
@@ -369,11 +370,7 @@ public class DataNode extends ReconfigurableBase
SaslDataTransferClient saslClient;
SaslDataTransferServer saslServer;
private ObjectName dataNodeInfoBeanName;
- private Thread checkDiskErrorThread = null;
- protected final int checkDiskErrorInterval;
- private boolean checkDiskErrorFlag = false;
- private Object checkDiskErrorMutex = new Object();
- private long lastDiskErrorCheck;
+ private volatile long lastDiskErrorCheck;
private String supergroup;
private boolean isPermissionEnabled;
private String dnUserName = null;
@@ -389,6 +386,7 @@ public class DataNode extends ReconfigurableBase
@Nullable
private final StorageLocationChecker storageLocationChecker;
+ private final DatasetVolumeChecker volumeChecker;
private final SocketFactory socketFactory;
@@ -407,7 +405,7 @@ public class DataNode extends ReconfigurableBase
*/
@VisibleForTesting
@InterfaceAudience.LimitedPrivate("HDFS")
- DataNode(final Configuration conf) {
+ DataNode(final Configuration conf) throws DiskErrorException {
super(conf);
this.tracer = createTracer(conf);
this.tracerConfigurationManager =
@@ -420,11 +418,10 @@ public class DataNode extends ReconfigurableBase
this.connectToDnViaHostname = false;
this.blockScanner = new BlockScanner(this, this.getConf());
this.pipelineSupportECN = false;
- this.checkDiskErrorInterval =
- ThreadLocalRandom.current().nextInt(5000, (int) (5000 * 1.25));
this.socketFactory = NetUtils.getDefaultSocketFactory(conf);
initOOBTimeout();
storageLocationChecker = null;
+ volumeChecker = new DatasetVolumeChecker(conf, new Timer());
}
/**
@@ -464,8 +461,7 @@ public class DataNode extends ReconfigurableBase
",hdfs-" +
conf.get("hadoop.hdfs.configuration.version", "UNSPECIFIED");
- this.checkDiskErrorInterval =
- ThreadLocalRandom.current().nextInt(5000, (int) (5000 * 1.25));
+ this.volumeChecker = new DatasetVolumeChecker(conf, new Timer());
// Determine whether we should try to pass file descriptors to clients.
if (conf.getBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY,
@@ -1918,11 +1914,6 @@ public class DataNode extends ReconfigurableBase
}
}
- // Interrupt the checkDiskErrorThread and terminate it.
- if(this.checkDiskErrorThread != null) {
- this.checkDiskErrorThread.interrupt();
- }
-
// Record the time of initial notification
long timeNotified = Time.monotonicNow();
@@ -1944,6 +1935,8 @@ public class DataNode extends ReconfigurableBase
}
}
+ volumeChecker.shutdownAndWait(1, TimeUnit.SECONDS);
+
if (storageLocationChecker != null) {
storageLocationChecker.shutdownAndWait(1, TimeUnit.SECONDS);
}
@@ -2051,16 +2044,19 @@ public class DataNode extends ReconfigurableBase
* Check if there is a disk failure asynchronously and if so, handle the error
*/
public void checkDiskErrorAsync() {
- synchronized(checkDiskErrorMutex) {
- checkDiskErrorFlag = true;
- if(checkDiskErrorThread == null) {
- startCheckDiskErrorThread();
- checkDiskErrorThread.start();
- LOG.info("Starting CheckDiskError Thread");
- }
- }
+ volumeChecker.checkAllVolumesAsync(
+ data, (healthyVolumes, failedVolumes) -> {
+ if (failedVolumes.size() > 0) {
+ LOG.warn("checkDiskErrorAsync callback got {} failed volumes: {}",
+ failedVolumes.size(), failedVolumes);
+ } else {
+ LOG.debug("checkDiskErrorAsync: no volume failures detected");
+ }
+ lastDiskErrorCheck = Time.monotonicNow();
+ handleVolumeFailures(failedVolumes);
+ });
}
-
+
private void handleDiskError(String errMsgr) {
final boolean hasEnoughResources = data.hasEnoughResource();
LOG.warn("DataNode.handleDiskError: Keep Running: " + hasEnoughResources);
@@ -3208,11 +3204,40 @@ public class DataNode extends ReconfigurableBase
}
/**
- * Check the disk error
+ * Check the disk error synchronously.
*/
- private void checkDiskError() {
- Set<StorageLocation> unhealthyLocations = data.checkDataDir();
- if (unhealthyLocations != null && !unhealthyLocations.isEmpty()) {
+ @VisibleForTesting
+ public void checkDiskError() throws IOException {
+ Set<FsVolumeSpi> unhealthyVolumes;
+ try {
+ unhealthyVolumes = volumeChecker.checkAllVolumes(data);
+ lastDiskErrorCheck = Time.monotonicNow();
+ } catch (InterruptedException e) {
+ LOG.error("Interruped while running disk check", e);
+ throw new IOException("Interrupted while running disk check", e);
+ }
+
+ if (unhealthyVolumes.size() > 0) {
+ LOG.warn("checkDiskError got {} failed volumes - {}",
+ unhealthyVolumes.size(), unhealthyVolumes);
+ handleVolumeFailures(unhealthyVolumes);
+ } else {
+ LOG.debug("checkDiskError encountered no failures");
+ }
+ }
+
+ private void handleVolumeFailures(Set<FsVolumeSpi> unhealthyVolumes) {
+ data.handleVolumeFailures(unhealthyVolumes);
+ Set<StorageLocation> unhealthyLocations = new HashSet<>(
+ unhealthyVolumes.size());
+
+ if (!unhealthyVolumes.isEmpty()) {
+ StringBuilder sb = new StringBuilder("DataNode failed volumes:");
+ for (FsVolumeSpi vol : unhealthyVolumes) {
+ unhealthyLocations.add(vol.getStorageLocation());
+ sb.append(vol.getStorageLocation()).append(";");
+ }
+
try {
// Remove all unhealthy volumes from DataNode.
removeVolumes(unhealthyLocations, false);
@@ -3220,56 +3245,13 @@ public class DataNode extends ReconfigurableBase
LOG.warn("Error occurred when removing unhealthy storage dirs: "
+ e.getMessage(), e);
}
- StringBuilder sb = new StringBuilder("DataNode failed volumes:");
- for (StorageLocation location : unhealthyLocations) {
- sb.append(location + ";");
- }
+ LOG.info(sb.toString());
handleDiskError(sb.toString());
}
}
- /**
- * Starts a new thread which will check for disk error check request
- * every 5 sec
- */
- private void startCheckDiskErrorThread() {
- checkDiskErrorThread = new Thread(new Runnable() {
- @Override
- public void run() {
- while(shouldRun) {
- boolean tempFlag ;
- synchronized(checkDiskErrorMutex) {
- tempFlag = checkDiskErrorFlag;
- checkDiskErrorFlag = false;
- }
- if(tempFlag) {
- try {
- checkDiskError();
- } catch (Exception e) {
- LOG.warn("Unexpected exception occurred while checking disk error " + e);
- checkDiskErrorThread = null;
- return;
- }
- synchronized(checkDiskErrorMutex) {
- lastDiskErrorCheck = Time.monotonicNow();
- }
- }
- try {
- Thread.sleep(checkDiskErrorInterval);
- } catch (InterruptedException e) {
- LOG.debug("InterruptedException in check disk error thread", e);
- checkDiskErrorThread = null;
- return;
- }
- }
- }
- });
- }
-
public long getLastDiskErrorCheck() {
- synchronized(checkDiskErrorMutex) {
- return lastDiskErrorCheck;
- }
+ return lastDiskErrorCheck;
}
@Override
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f678080d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java
index 8a57812..ba09d23 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java
@@ -27,7 +27,6 @@ import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
@@ -161,37 +160,54 @@ public class DatasetVolumeChecker {
* @param dataset - FsDatasetSpi to be checked.
* @return set of failed volumes.
*/
- public Set<StorageLocation> checkAllVolumes(
+ public Set<FsVolumeSpi> checkAllVolumes(
final FsDatasetSpi<? extends FsVolumeSpi> dataset)
throws InterruptedException {
-
- if (timer.monotonicNow() - lastAllVolumesCheck < minDiskCheckGapMs) {
+ final long gap = timer.monotonicNow() - lastAllVolumesCheck;
+ if (gap < minDiskCheckGapMs) {
numSkippedChecks.incrementAndGet();
+ LOG.trace(
+ "Skipped checking all volumes, time since last check {} is less " +
+ "than the minimum gap between checks ({} ms).",
+ gap, minDiskCheckGapMs);
return Collections.emptySet();
}
- lastAllVolumesCheck = timer.monotonicNow();
- final Set<StorageLocation> healthyVolumes = new HashSet<>();
- final Set<StorageLocation> failedVolumes = new HashSet<>();
- final Set<StorageLocation> allVolumes = new HashSet<>();
-
final FsDatasetSpi.FsVolumeReferences references =
dataset.getFsVolumeReferences();
- final CountDownLatch resultsLatch = new CountDownLatch(references.size());
+
+ if (references.size() == 0) {
+ LOG.warn("checkAllVolumesAsync - no volumes can be referenced");
+ return Collections.emptySet();
+ }
+
+ lastAllVolumesCheck = timer.monotonicNow();
+ final Set<FsVolumeSpi> healthyVolumes = new HashSet<>();
+ final Set<FsVolumeSpi> failedVolumes = new HashSet<>();
+ final Set<FsVolumeSpi> allVolumes = new HashSet<>();
+
+ final AtomicLong numVolumes = new AtomicLong(references.size());
+ final CountDownLatch latch = new CountDownLatch(1);
for (int i = 0; i < references.size(); ++i) {
final FsVolumeReference reference = references.getReference(i);
- allVolumes.add(reference.getVolume().getStorageLocation());
+ allVolumes.add(reference.getVolume());
ListenableFuture<VolumeCheckResult> future =
delegateChecker.schedule(reference.getVolume(), IGNORED_CONTEXT);
LOG.info("Scheduled health check for volume {}", reference.getVolume());
Futures.addCallback(future, new ResultHandler(
- reference, healthyVolumes, failedVolumes, resultsLatch, null));
+ reference, healthyVolumes, failedVolumes, numVolumes, new Callback() {
+ @Override
+ public void call(Set<FsVolumeSpi> ignored1,
+ Set<FsVolumeSpi> ignored2) {
+ latch.countDown();
+ }
+ }));
}
// Wait until our timeout elapses, after which we give up on
// the remaining volumes.
- if (!resultsLatch.await(maxAllowedTimeForCheckMs, TimeUnit.MILLISECONDS)) {
+ if (!latch.await(maxAllowedTimeForCheckMs, TimeUnit.MILLISECONDS)) {
LOG.warn("checkAllVolumes timed out after {} ms" +
maxAllowedTimeForCheckMs);
}
@@ -225,18 +241,28 @@ public class DatasetVolumeChecker {
public boolean checkAllVolumesAsync(
final FsDatasetSpi<? extends FsVolumeSpi> dataset,
Callback callback) {
-
- if (timer.monotonicNow() - lastAllVolumesCheck < minDiskCheckGapMs) {
+ final long gap = timer.monotonicNow() - lastAllVolumesCheck;
+ if (gap < minDiskCheckGapMs) {
numSkippedChecks.incrementAndGet();
+ LOG.trace(
+ "Skipped checking all volumes, time since last check {} is less " +
+ "than the minimum gap between checks ({} ms).",
+ gap, minDiskCheckGapMs);
return false;
}
- lastAllVolumesCheck = timer.monotonicNow();
- final Set<StorageLocation> healthyVolumes = new HashSet<>();
- final Set<StorageLocation> failedVolumes = new HashSet<>();
final FsDatasetSpi.FsVolumeReferences references =
dataset.getFsVolumeReferences();
- final CountDownLatch latch = new CountDownLatch(references.size());
+
+ if (references.size() == 0) {
+ LOG.warn("checkAllVolumesAsync - no volumes can be referenced");
+ return false;
+ }
+
+ lastAllVolumesCheck = timer.monotonicNow();
+ final Set<FsVolumeSpi> healthyVolumes = new HashSet<>();
+ final Set<FsVolumeSpi> failedVolumes = new HashSet<>();
+ final AtomicLong numVolumes = new AtomicLong(references.size());
LOG.info("Checking {} volumes", references.size());
for (int i = 0; i < references.size(); ++i) {
@@ -245,7 +271,7 @@ public class DatasetVolumeChecker {
ListenableFuture<VolumeCheckResult> future =
delegateChecker.schedule(reference.getVolume(), IGNORED_CONTEXT);
Futures.addCallback(future, new ResultHandler(
- reference, healthyVolumes, failedVolumes, latch, callback));
+ reference, healthyVolumes, failedVolumes, numVolumes, callback));
}
numAsyncDatasetChecks.incrementAndGet();
return true;
@@ -260,8 +286,8 @@ public class DatasetVolumeChecker {
* @param healthyVolumes set of volumes that passed disk checks.
* @param failedVolumes set of volumes that failed disk checks.
*/
- void call(Set<StorageLocation> healthyVolumes,
- Set<StorageLocation> failedVolumes);
+ void call(Set<FsVolumeSpi> healthyVolumes,
+ Set<FsVolumeSpi> failedVolumes);
}
/**
@@ -273,8 +299,10 @@ public class DatasetVolumeChecker {
*
* @param volume the volume that is to be checked.
* @param callback callback to be invoked when the volume check completes.
+ * @return true if the check was scheduled and the callback will be invoked.
+ * false otherwise.
*/
- public void checkVolume(
+ public boolean checkVolume(
final FsVolumeSpi volume,
Callback callback) {
FsVolumeReference volumeReference;
@@ -283,14 +311,15 @@ public class DatasetVolumeChecker {
} catch (ClosedChannelException e) {
// The volume has already been closed.
callback.call(new HashSet<>(), new HashSet<>());
- return;
+ return false;
}
ListenableFuture<VolumeCheckResult> future =
delegateChecker.schedule(volume, IGNORED_CONTEXT);
numVolumeChecks.incrementAndGet();
Futures.addCallback(future, new ResultHandler(
volumeReference, new HashSet<>(), new HashSet<>(),
- new CountDownLatch(1), callback));
+ new AtomicLong(1), callback));
+ return true;
}
/**
@@ -299,26 +328,35 @@ public class DatasetVolumeChecker {
private class ResultHandler
implements FutureCallback<VolumeCheckResult> {
private final FsVolumeReference reference;
- private final Set<StorageLocation> failedVolumes;
- private final Set<StorageLocation> healthyVolumes;
- private final CountDownLatch latch;
- private final AtomicLong numVolumes;
+ private final Set<FsVolumeSpi> failedVolumes;
+ private final Set<FsVolumeSpi> healthyVolumes;
+ private final AtomicLong volumeCounter;
@Nullable
private final Callback callback;
+ /**
+ *
+ * @param reference FsVolumeReference to be released when the check is
+ * complete.
+ * @param healthyVolumes set of healthy volumes. If the disk check is
+ * successful, add the volume here.
+ * @param failedVolumes set of failed volumes. If the disk check fails,
+ * add the volume here.
+ * @param semaphore semaphore used to trigger callback invocation.
+ * @param callback invoked when the semaphore can be successfully acquired.
+ */
ResultHandler(FsVolumeReference reference,
- Set<StorageLocation> healthyVolumes,
- Set<StorageLocation> failedVolumes,
- CountDownLatch latch,
+ Set<FsVolumeSpi> healthyVolumes,
+ Set<FsVolumeSpi> failedVolumes,
+ AtomicLong volumeCounter,
@Nullable Callback callback) {
Preconditions.checkState(reference != null);
this.reference = reference;
this.healthyVolumes = healthyVolumes;
this.failedVolumes = failedVolumes;
- this.latch = latch;
+ this.volumeCounter = volumeCounter;
this.callback = callback;
- numVolumes = new AtomicLong(latch.getCount());
}
@Override
@@ -355,13 +393,13 @@ public class DatasetVolumeChecker {
private void markHealthy() {
synchronized (DatasetVolumeChecker.this) {
- healthyVolumes.add(reference.getVolume().getStorageLocation());
+ healthyVolumes.add(reference.getVolume());
}
}
private void markFailed() {
synchronized (DatasetVolumeChecker.this) {
- failedVolumes.add(reference.getVolume().getStorageLocation());
+ failedVolumes.add(reference.getVolume());
}
}
@@ -372,10 +410,8 @@ public class DatasetVolumeChecker {
private void invokeCallback() {
try {
- latch.countDown();
-
- if (numVolumes.decrementAndGet() == 0 &&
- callback != null) {
+ final long remaining = volumeCounter.decrementAndGet();
+ if (callback != null && remaining == 0) {
callback.call(healthyVolumes, failedVolumes);
}
} catch(Exception e) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f678080d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java
index 30f045f..9e979f7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java
@@ -494,8 +494,9 @@ public interface FsDatasetSpi<V extends FsVolumeSpi> extends FSDatasetMBean {
/**
* Check if all the data directories are healthy
* @return A set of unhealthy data directories.
+ * @param failedVolumes
*/
- Set<StorageLocation> checkDataDir();
+ void handleVolumeFailures(Set<FsVolumeSpi> failedVolumes);
/**
* Shutdown the FSDataset
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f678080d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index 35561cd..0d5a12c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -2067,10 +2067,11 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
* if some volumes failed - the caller must emove all the blocks that belong
* to these failed volumes.
* @return the failed volumes. Returns null if no volume failed.
+ * @param failedVolumes
*/
@Override // FsDatasetSpi
- public Set<StorageLocation> checkDataDir() {
- return volumes.checkDirs();
+ public void handleVolumeFailures(Set<FsVolumeSpi> failedVolumes) {
+ volumes.handleVolumeFailures(failedVolumes);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f678080d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
index e28ee27..753c083 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
@@ -959,13 +959,6 @@ public class FsVolumeImpl implements FsVolumeSpi {
return cacheExecutor;
}
- void checkDirs() throws DiskErrorException {
- // TODO:FEDERATION valid synchronization
- for(BlockPoolSlice s : bpSlices.values()) {
- s.checkDirs();
- }
- }
-
@Override
public VolumeCheckResult check(VolumeCheckContext ignored)
throws DiskErrorException {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f678080d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
index cf9c319..64921d7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
@@ -23,7 +23,6 @@ import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.concurrent.ConcurrentLinkedQueue;
-import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
@@ -44,7 +43,6 @@ import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.util.AutoCloseableLock;
-import org.apache.hadoop.util.DiskChecker.DiskErrorException;
import org.apache.hadoop.util.Time;
class FsVolumeList {
@@ -235,23 +233,14 @@ class FsVolumeList {
* Use {@link checkDirsLock} to allow only one instance of checkDirs() call.
*
* @return list of all the failed volumes.
+ * @param failedVolumes
*/
- Set<StorageLocation> checkDirs() {
+ void handleVolumeFailures(Set<FsVolumeSpi> failedVolumes) {
try (AutoCloseableLock lock = checkDirsLock.acquire()) {
- Set<StorageLocation> failedLocations = null;
- // Make a copy of volumes for performing modification
- final List<FsVolumeImpl> volumeList = getVolumes();
- for(Iterator<FsVolumeImpl> i = volumeList.iterator(); i.hasNext(); ) {
- final FsVolumeImpl fsv = i.next();
+ for(FsVolumeSpi vol : failedVolumes) {
+ FsVolumeImpl fsv = (FsVolumeImpl) vol;
try (FsVolumeReference ref = fsv.obtainReference()) {
- fsv.checkDirs();
- } catch (DiskErrorException e) {
- FsDatasetImpl.LOG.warn("Removing failed volume " + fsv + ": ", e);
- if (failedLocations == null) {
- failedLocations = new HashSet<>(1);
- }
- failedLocations.add(fsv.getStorageLocation());
addVolumeFailureInfo(fsv);
removeVolume(fsv);
} catch (ClosedChannelException e) {
@@ -262,13 +251,7 @@ class FsVolumeList {
}
}
- if (failedLocations != null && failedLocations.size() > 0) {
- FsDatasetImpl.LOG.warn("Completed checkDirs. Found " +
- failedLocations.size() + " failure volumes.");
- }
-
waitVolumeRemoved(5000, checkDirsLockCondition);
- return failedLocations;
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f678080d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockStatsMXBean.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockStatsMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockStatsMXBean.java
index 476565dc..b7583c4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockStatsMXBean.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockStatsMXBean.java
@@ -30,9 +30,11 @@ import java.net.URL;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
+import java.util.concurrent.TimeUnit;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -52,6 +54,8 @@ public class TestBlockStatsMXBean {
@Before
public void setup() throws IOException {
HdfsConfiguration conf = new HdfsConfiguration();
+ conf.setTimeDuration(DFSConfigKeys.DFS_DATANODE_DISK_CHECK_MIN_GAP_KEY,
+ 0, TimeUnit.MILLISECONDS);
cluster = null;
StorageType[][] types = new StorageType[6][];
for (int i=0; i<3; i++) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f678080d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
index 484fbe4..8472eca 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
@@ -489,7 +489,17 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
@Override
public FsVolumeReference obtainReference() throws ClosedChannelException {
- return null;
+ return new FsVolumeReference() {
+ @Override
+ public void close() throws IOException {
+ // no-op.
+ }
+
+ @Override
+ public FsVolumeSpi getVolume() {
+ return SimulatedVolume.this;
+ }
+ };
}
@Override
@@ -1078,9 +1088,7 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
}
@Override
- public Set<StorageLocation> checkDataDir() {
- // nothing to check for simulated data set
- return null;
+ public void handleVolumeFailures(Set<FsVolumeSpi> failedVolumes) {
}
@Override // FsDatasetSpi
@@ -1349,7 +1357,7 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
@Override
public FsVolumeReferences getFsVolumeReferences() {
- throw new UnsupportedOperationException();
+ return new FsVolumeReferences(Collections.singletonList(volume));
}
@Override
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f678080d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
index 5607ccc..e31e783 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
@@ -62,6 +62,7 @@ import java.util.Map;
import java.util.Random;
import java.util.concurrent.BrokenBarrierException;
import java.util.concurrent.CyclicBarrier;
+import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import org.apache.commons.logging.Log;
@@ -113,6 +114,8 @@ public class TestDataNodeHotSwapVolumes {
1000);
/* Allow 1 volume failure */
conf.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 1);
+ conf.setTimeDuration(DFSConfigKeys.DFS_DATANODE_DISK_CHECK_MIN_GAP_KEY,
+ 0, TimeUnit.MILLISECONDS);
MiniDFSNNTopology nnTopology =
MiniDFSNNTopology.simpleFederatedTopology(numNameNodes);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f678080d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
index 8db7658..06e2871 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
@@ -33,6 +33,7 @@ import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
+import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import org.apache.hadoop.conf.Configuration;
@@ -118,6 +119,8 @@ public class TestDataNodeVolumeFailure {
// Allow a single volume failure (there are two volumes)
conf.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 30);
+ conf.setTimeDuration(DFSConfigKeys.DFS_DATANODE_DISK_CHECK_MIN_GAP_KEY,
+ 0, TimeUnit.MILLISECONDS);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(dn_num).build();
cluster.waitActive();
fs = cluster.getFileSystem();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f678080d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java
index aa9b7aa..3d37b10 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java
@@ -30,6 +30,7 @@ import java.io.File;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.ArrayList;
+import java.util.concurrent.TimeUnit;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -659,6 +660,8 @@ public class TestDataNodeVolumeFailureReporting {
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
conf.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY,
failedVolumesTolerated);
+ conf.setTimeDuration(DFSConfigKeys.DFS_DATANODE_DISK_CHECK_MIN_GAP_KEY,
+ 0, TimeUnit.MILLISECONDS);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes)
.storagesPerDatanode(storagesPerDatanode).build();
cluster.waitActive();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f678080d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java
index 5ff7d9b..de50ccb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java
@@ -24,6 +24,7 @@ import static org.junit.Assert.assertTrue;
import java.io.File;
import java.io.IOException;
+import java.util.concurrent.TimeUnit;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
@@ -144,6 +145,8 @@ public class TestDataNodeVolumeFailureToleration {
// Bring up two additional datanodes that need both of their volumes
// functioning in order to stay up.
conf.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 0);
+ conf.setTimeDuration(DFSConfigKeys.DFS_DATANODE_DISK_CHECK_MIN_GAP_KEY,
+ 0, TimeUnit.MILLISECONDS);
cluster.startDataNodes(conf, 2, true, null, null);
cluster.waitActive();
final DatanodeManager dm = cluster.getNamesystem().getBlockManager(
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f678080d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
index 56dee43..cd86720 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
@@ -26,7 +26,9 @@ import java.io.IOException;
import java.io.RandomAccessFile;
import java.net.InetSocketAddress;
import java.net.Socket;
+import java.util.concurrent.TimeUnit;
+import com.google.common.base.Supplier;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
@@ -49,8 +51,8 @@ import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
+import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.DataChecksum;
-import org.apache.hadoop.util.Time;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
@@ -69,6 +71,9 @@ public class TestDiskError {
public void setUp() throws Exception {
conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 512L);
+ conf.setTimeDuration(
+ DFSConfigKeys.DFS_DATANODE_DISK_CHECK_MIN_GAP_KEY,
+ 0, TimeUnit.MILLISECONDS);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
fs = cluster.getFileSystem();
@@ -213,19 +218,22 @@ public class TestDiskError {
* Before refactoring the code the above function was not getting called
* @throws IOException, InterruptedException
*/
- @Test
- public void testcheckDiskError() throws IOException, InterruptedException {
+ @Test(timeout=60000)
+ public void testcheckDiskError() throws Exception {
if(cluster.getDataNodes().size() <= 0) {
cluster.startDataNodes(conf, 1, true, null, null);
cluster.waitActive();
}
DataNode dataNode = cluster.getDataNodes().get(0);
- long slackTime = dataNode.checkDiskErrorInterval/2;
//checking for disk error
- dataNode.checkDiskErrorAsync();
- Thread.sleep(dataNode.checkDiskErrorInterval);
- long lastDiskErrorCheck = dataNode.getLastDiskErrorCheck();
- assertTrue("Disk Error check is not performed within " + dataNode.checkDiskErrorInterval + " ms", ((Time.monotonicNow()-lastDiskErrorCheck) < (dataNode.checkDiskErrorInterval + slackTime)));
+ final long lastCheckTimestamp = dataNode.getLastDiskErrorCheck();
+ dataNode.checkDiskError();
+ GenericTestUtils.waitFor(new Supplier<Boolean>() {
+ @Override
+ public Boolean get() {
+ return dataNode.getLastDiskErrorCheck() > lastCheckTimestamp;
+ }
+ }, 100, 60000);
}
@Test
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f678080d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestDatasetVolumeChecker.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestDatasetVolumeChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestDatasetVolumeChecker.java
index fa809d1..50096ba 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestDatasetVolumeChecker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestDatasetVolumeChecker.java
@@ -35,7 +35,10 @@ import org.junit.runners.Parameterized.Parameters;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import java.util.*;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+import java.util.Set;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
@@ -103,8 +106,8 @@ public class TestDatasetVolumeChecker {
*/
checker.checkVolume(volume, new DatasetVolumeChecker.Callback() {
@Override
- public void call(Set<StorageLocation> healthyVolumes,
- Set<StorageLocation> failedVolumes) {
+ public void call(Set<FsVolumeSpi> healthyVolumes,
+ Set<FsVolumeSpi> failedVolumes) {
numCallbackInvocations.incrementAndGet();
if (expectedVolumeHealth != null && expectedVolumeHealth != FAILED) {
assertThat(healthyVolumes.size(), is(1));
@@ -138,7 +141,7 @@ public class TestDatasetVolumeChecker {
new DatasetVolumeChecker(new HdfsConfiguration(), new FakeTimer());
checker.setDelegateChecker(new DummyChecker());
- Set<StorageLocation> failedVolumes = checker.checkAllVolumes(dataset);
+ Set<FsVolumeSpi> failedVolumes = checker.checkAllVolumes(dataset);
LOG.info("Got back {} failed volumes", failedVolumes.size());
if (expectedVolumeHealth == null || expectedVolumeHealth == FAILED) {
@@ -174,8 +177,8 @@ public class TestDatasetVolumeChecker {
dataset, new DatasetVolumeChecker.Callback() {
@Override
public void call(
- Set<StorageLocation> healthyVolumes,
- Set<StorageLocation> failedVolumes) {
+ Set<FsVolumeSpi> healthyVolumes,
+ Set<FsVolumeSpi> failedVolumes) {
LOG.info("Got back {} failed volumes", failedVolumes.size());
if (expectedVolumeHealth == null ||
expectedVolumeHealth == FAILED) {
@@ -236,7 +239,7 @@ public class TestDatasetVolumeChecker {
return dataset;
}
- private static List<FsVolumeSpi> makeVolumes(
+ static List<FsVolumeSpi> makeVolumes(
int numVolumes, VolumeCheckResult health) throws Exception {
final List<FsVolumeSpi> volumes = new ArrayList<>(numVolumes);
for (int i = 0; i < numVolumes; ++i) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f678080d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestDatasetVolumeCheckerFailures.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestDatasetVolumeCheckerFailures.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestDatasetVolumeCheckerFailures.java
index b57d84f..16c333b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestDatasetVolumeCheckerFailures.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestDatasetVolumeCheckerFailures.java
@@ -24,6 +24,7 @@ import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.*;
import org.apache.hadoop.util.FakeTimer;
+import org.junit.Before;
import org.junit.Test;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
@@ -47,6 +48,19 @@ public class TestDatasetVolumeCheckerFailures {
public static final Logger LOG =LoggerFactory.getLogger(
TestDatasetVolumeCheckerFailures.class);
+ private FakeTimer timer;
+ private Configuration conf;
+
+ private static final long MIN_DISK_CHECK_GAP_MS = 1000; // 1 second.
+
+ @Before
+ public void commonInit() {
+ timer = new FakeTimer();
+ conf = new HdfsConfiguration();
+ conf.setTimeDuration(DFSConfigKeys.DFS_DATANODE_DISK_CHECK_MIN_GAP_KEY,
+ MIN_DISK_CHECK_GAP_MS, TimeUnit.MILLISECONDS);
+ }
+
/**
* Test timeout in {@link DatasetVolumeChecker#checkAllVolumes}.
* @throws Exception
@@ -61,14 +75,13 @@ public class TestDatasetVolumeCheckerFailures {
TestDatasetVolumeChecker.makeDataset(volumes);
// Create a disk checker with a very low timeout.
- final HdfsConfiguration conf = new HdfsConfiguration();
conf.setTimeDuration(DFSConfigKeys.DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY,
1, TimeUnit.SECONDS);
final DatasetVolumeChecker checker =
new DatasetVolumeChecker(conf, new FakeTimer());
// Ensure that the hung volume is detected as failed.
- Set<StorageLocation> failedVolumes = checker.checkAllVolumes(dataset);
+ Set<FsVolumeSpi> failedVolumes = checker.checkAllVolumes(dataset);
assertThat(failedVolumes.size(), is(1));
}
@@ -86,10 +99,10 @@ public class TestDatasetVolumeCheckerFailures {
final FsDatasetSpi<FsVolumeSpi> dataset =
TestDatasetVolumeChecker.makeDataset(volumes);
- DatasetVolumeChecker checker =
- new DatasetVolumeChecker(new HdfsConfiguration(), new FakeTimer());
- Set<StorageLocation> failedVolumes = checker.checkAllVolumes(dataset);
+ DatasetVolumeChecker checker = new DatasetVolumeChecker(conf, timer);
+ Set<FsVolumeSpi> failedVolumes = checker.checkAllVolumes(dataset);
assertThat(failedVolumes.size(), is(0));
+ assertThat(checker.getNumSyncDatasetChecks(), is(0L));
// The closed volume should not have been checked as it cannot
// be referenced.
@@ -98,13 +111,10 @@ public class TestDatasetVolumeCheckerFailures {
@Test(timeout=60000)
public void testMinGapIsEnforcedForSyncChecks() throws Exception {
+ final List<FsVolumeSpi> volumes =
+ TestDatasetVolumeChecker.makeVolumes(1, VolumeCheckResult.HEALTHY);
final FsDatasetSpi<FsVolumeSpi> dataset =
- TestDatasetVolumeChecker.makeDataset(Collections.emptyList());
- final FakeTimer timer = new FakeTimer();
- final Configuration conf = new HdfsConfiguration();
- final long minGapMs = 100;
- conf.setTimeDuration(DFSConfigKeys.DFS_DATANODE_DISK_CHECK_MIN_GAP_KEY,
- minGapMs, TimeUnit.MILLISECONDS);
+ TestDatasetVolumeChecker.makeDataset(volumes);
final DatasetVolumeChecker checker = new DatasetVolumeChecker(conf, timer);
checker.checkAllVolumes(dataset);
@@ -116,7 +126,7 @@ public class TestDatasetVolumeCheckerFailures {
assertThat(checker.getNumSkippedChecks(), is(1L));
// Re-check after advancing the timer. Ensure the check is performed.
- timer.advance(minGapMs);
+ timer.advance(MIN_DISK_CHECK_GAP_MS);
checker.checkAllVolumes(dataset);
assertThat(checker.getNumSyncDatasetChecks(), is(2L));
assertThat(checker.getNumSkippedChecks(), is(1L));
@@ -124,13 +134,10 @@ public class TestDatasetVolumeCheckerFailures {
@Test(timeout=60000)
public void testMinGapIsEnforcedForASyncChecks() throws Exception {
+ final List<FsVolumeSpi> volumes =
+ TestDatasetVolumeChecker.makeVolumes(1, VolumeCheckResult.HEALTHY);
final FsDatasetSpi<FsVolumeSpi> dataset =
- TestDatasetVolumeChecker.makeDataset(Collections.emptyList());
- final FakeTimer timer = new FakeTimer();
- final Configuration conf = new HdfsConfiguration();
- final long minGapMs = 100;
- conf.setTimeDuration(DFSConfigKeys.DFS_DATANODE_DISK_CHECK_MIN_GAP_KEY,
- minGapMs, TimeUnit.MILLISECONDS);
+ TestDatasetVolumeChecker.makeDataset(volumes);
final DatasetVolumeChecker checker = new DatasetVolumeChecker(conf, timer);
checker.checkAllVolumesAsync(dataset, null);
@@ -142,7 +149,7 @@ public class TestDatasetVolumeCheckerFailures {
assertThat(checker.getNumSkippedChecks(), is(1L));
// Re-check after advancing the timer. Ensure the check is performed.
- timer.advance(minGapMs);
+ timer.advance(MIN_DISK_CHECK_GAP_MS);
checker.checkAllVolumesAsync(dataset, null);
assertThat(checker.getNumAsyncDatasetChecks(), is(2L));
assertThat(checker.getNumSkippedChecks(), is(1L));
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f678080d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java
index 5cd86e2..62ef731 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java
@@ -239,8 +239,7 @@ public class ExternalDatasetImpl implements FsDatasetSpi<ExternalVolumeImpl> {
}
@Override
- public Set<StorageLocation> checkDataDir() {
- return null;
+ public void handleVolumeFailures(Set<FsVolumeSpi> failedVolumes) {
}
@Override
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f678080d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
index e48aae0..905c3f0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
@@ -52,13 +52,10 @@ import org.apache.hadoop.hdfs.server.datanode.ShortCircuitRegistry;
import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences;
-import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
-import org.apache.hadoop.hdfs.server.datanode.fsdataset.RoundRobinVolumeChoosingPolicy;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.io.MultipleIOException;
import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.util.DiskChecker;
import org.apache.hadoop.util.FakeTimer;
import org.apache.hadoop.util.StringUtils;
import org.junit.Assert;
@@ -66,8 +63,6 @@ import org.junit.Before;
import org.junit.Test;
import org.mockito.Matchers;
import org.mockito.Mockito;
-import org.mockito.invocation.InvocationOnMock;
-import org.mockito.stubbing.Answer;
import java.io.File;
import java.io.FileOutputStream;
@@ -76,16 +71,18 @@ import java.io.OutputStreamWriter;
import java.io.Writer;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
-import java.util.Collections;
import java.util.concurrent.CountDownLatch;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
+import java.util.concurrent.TimeUnit;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DN_CACHED_DFSUSED_CHECK_INTERVAL_MS;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY;
+import static org.hamcrest.core.Is.is;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertSame;
@@ -94,13 +91,10 @@ import static org.mockito.Matchers.any;
import static org.mockito.Matchers.anyListOf;
import static org.mockito.Matchers.anyString;
import static org.mockito.Matchers.eq;
-import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.doThrow;
import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.never;
import static org.mockito.Mockito.spy;
-import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
import org.slf4j.Logger;
@@ -339,68 +333,6 @@ public class TestFsDatasetImpl {
assertEquals(numExistingVolumes, getNumVolumes());
}
- @Test(timeout = 5000)
- public void testChangeVolumeWithRunningCheckDirs() throws IOException {
- RoundRobinVolumeChoosingPolicy<FsVolumeImpl> blockChooser =
- new RoundRobinVolumeChoosingPolicy<>();
- conf.setLong(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1);
- final BlockScanner blockScanner = new BlockScanner(datanode);
- final FsVolumeList volumeList = new FsVolumeList(
- Collections.<VolumeFailureInfo>emptyList(), blockScanner, blockChooser);
- final List<FsVolumeImpl> oldVolumes = new ArrayList<>();
-
- // Initialize FsVolumeList with 5 mock volumes.
- final int NUM_VOLUMES = 5;
- for (int i = 0; i < NUM_VOLUMES; i++) {
- FsVolumeImpl volume = mock(FsVolumeImpl.class);
- oldVolumes.add(volume);
- when(volume.getStorageLocation()).thenReturn(
- StorageLocation.parse(new File("data" + i).toURI().toString()));
- when(volume.checkClosed()).thenReturn(true);
- FsVolumeReference ref = mock(FsVolumeReference.class);
- when(ref.getVolume()).thenReturn(volume);
- volumeList.addVolume(ref);
- }
-
- // When call checkDirs() on the 2nd volume, anther "thread" removes the 5th
- // volume and add another volume. It does not affect checkDirs() running.
- final FsVolumeImpl newVolume = mock(FsVolumeImpl.class);
- final FsVolumeReference newRef = mock(FsVolumeReference.class);
- when(newRef.getVolume()).thenReturn(newVolume);
- when(newVolume.getStorageLocation()).thenReturn(
- StorageLocation.parse(new File("data4").toURI().toString()));
- FsVolumeImpl blockedVolume = volumeList.getVolumes().get(1);
- doAnswer(new Answer() {
- @Override
- public Object answer(InvocationOnMock invocationOnMock)
- throws Throwable {
- volumeList.removeVolume(
- StorageLocation.parse((new File("data4")).toURI().toString()),
- false);
- volumeList.addVolume(newRef);
- return null;
- }
- }).when(blockedVolume).checkDirs();
-
- FsVolumeImpl brokenVolume = volumeList.getVolumes().get(2);
- doThrow(new DiskChecker.DiskErrorException("broken"))
- .when(brokenVolume).checkDirs();
-
- volumeList.checkDirs();
-
- // Since FsVolumeImpl#checkDirs() get a snapshot of the list of volumes
- // before running removeVolume(), it is supposed to run checkDirs() on all
- // the old volumes.
- for (FsVolumeImpl volume : oldVolumes) {
- verify(volume).checkDirs();
- }
- // New volume is not visible to checkDirs() process.
- verify(newVolume, never()).checkDirs();
- assertTrue(volumeList.getVolumes().contains(newVolume));
- assertFalse(volumeList.getVolumes().contains(brokenVolume));
- assertEquals(NUM_VOLUMES - 1, volumeList.getVolumes().size());
- }
-
@Test
public void testAddVolumeFailureReleasesInUseLock() throws IOException {
FsDatasetImpl spyDataset = spy(dataset);
@@ -717,6 +649,9 @@ public class TestFsDatasetImpl {
Configuration config = new HdfsConfiguration();
config.setLong(
DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_KEY, 1000);
+ config.setTimeDuration(
+ DFSConfigKeys.DFS_DATANODE_DISK_CHECK_MIN_GAP_KEY, 0,
+ TimeUnit.MILLISECONDS);
config.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 1);
cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
@@ -734,6 +669,8 @@ public class TestFsDatasetImpl {
getVolume(block);
File finalizedDir = volume.getFinalizedDir(cluster.getNamesystem()
.getBlockPoolId());
+ LocatedBlock lb = DFSTestUtil.getAllBlocks(fs, filePath).get(0);
+ DatanodeInfo info = lb.getLocations()[0];
if (finalizedDir.exists()) {
// Remove write and execute access so that checkDiskErrorThread detects
@@ -744,15 +681,14 @@ public class TestFsDatasetImpl {
Assert.assertTrue("Reference count for the volume should be greater "
+ "than 0", volume.getReferenceCount() > 0);
// Invoke the synchronous checkDiskError method
- dataNode.getFSDataset().checkDataDir();
+ dataNode.checkDiskError();
// Sleep for 1 second so that datanode can interrupt and cluster clean up
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override public Boolean get() {
return volume.getReferenceCount() == 0;
}
}, 100, 10);
- LocatedBlock lb = DFSTestUtil.getAllBlocks(fs, filePath).get(0);
- DatanodeInfo info = lb.getLocations()[0];
+ assertThat(dataNode.getFSDataset().getNumFailedVolumes(), is(1));
try {
out.close();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f678080d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java
index 6eff300..83c15ca 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java
@@ -109,43 +109,6 @@ public class TestFsVolumeList {
}
@Test(timeout=30000)
- public void testCheckDirsWithClosedVolume() throws IOException {
- FsVolumeList volumeList = new FsVolumeList(
- Collections.<VolumeFailureInfo>emptyList(), blockScanner, blockChooser);
- final List<FsVolumeImpl> volumes = new ArrayList<>();
- for (int i = 0; i < 3; i++) {
- File curDir = new File(baseDir, "volume-" + i);
- curDir.mkdirs();
- FsVolumeImpl volume = new FsVolumeImplBuilder()
- .setConf(conf)
- .setDataset(dataset)
- .setStorageID("storage-id")
- .setStorageDirectory(
- new StorageDirectory(StorageLocation.parse(curDir.getPath())))
- .build();
- volumes.add(volume);
- volumeList.addVolume(volume.obtainReference());
- }
-
- // Close the 2nd volume.
- volumes.get(1).setClosed();
- try {
- GenericTestUtils.waitFor(new Supplier<Boolean>() {
- @Override
- public Boolean get() {
- return volumes.get(1).checkClosed();
- }
- }, 100, 3000);
- } catch (TimeoutException e) {
- fail("timed out while waiting for volume to be removed.");
- } catch (InterruptedException ie) {
- Thread.currentThread().interrupt();
- }
- // checkDirs() should ignore the 2nd volume since it is closed.
- volumeList.checkDirs();
- }
-
- @Test(timeout=30000)
public void testReleaseVolumeRefIfNoBlockScanner() throws IOException {
FsVolumeList volumeList = new FsVolumeList(
Collections.<VolumeFailureInfo>emptyList(), null, blockChooser);
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[16/17] hadoop git commit: HDFS-11258. File mtime change could not
save to editlog. Contributed by Jimmy Xiang.
Posted by xg...@apache.org.
HDFS-11258. File mtime change could not save to editlog. Contributed by Jimmy Xiang.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f6e80acd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f6e80acd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f6e80acd
Branch: refs/heads/YARN-5734
Commit: f6e80acd681548b14fe3f0f3d2b3aaf800d10310
Parents: 8b042bc
Author: Haohui Mai <wh...@apache.org>
Authored: Wed Dec 21 13:04:03 2016 -0800
Committer: Haohui Mai <wh...@apache.org>
Committed: Wed Dec 21 13:04:03 2016 -0800
----------------------------------------------------------------------
.../hdfs/server/namenode/FSDirAttrOp.java | 16 ++---
.../hdfs/server/namenode/TestFSDirAttrOp.java | 76 ++++++++++++++++++++
2 files changed, 82 insertions(+), 10 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6e80acd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
index a3e7f9f..4d26885 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
@@ -489,17 +489,13 @@ public class FSDirAttrOp {
inode = inode.setModificationTime(mtime, latest);
status = true;
}
- if (atime != -1) {
- long inodeTime = inode.getAccessTime();
- // if the last access time update was within the last precision interval, then
- // no need to store access time
- if (atime <= inodeTime + fsd.getAccessTimePrecision() && !force) {
- status = false;
- } else {
- inode.setAccessTime(atime, latest);
- status = true;
- }
+ // if the last access time update was within the last precision interval,
+ // then no need to store access time
+ if (atime != -1 && (status || force
+ || atime > inode.getAccessTime() + fsd.getAccessTimePrecision())) {
+ inode.setAccessTime(atime, latest);
+ status = true;
}
return status;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6e80acd/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirAttrOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirAttrOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirAttrOp.java
new file mode 100644
index 0000000..8cd68a1
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirAttrOp.java
@@ -0,0 +1,76 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.server.namenode;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
+import org.junit.Test;
+import org.mockito.Mockito;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.when;
+
+/**
+ * Test {@link FSDirAttrOp}.
+ */
+public class TestFSDirAttrOp {
+ public static final Log LOG = LogFactory.getLog(TestFSDirAttrOp.class);
+
+ private boolean unprotectedSetTimes(long atime, long atime0, long precision,
+ long mtime, boolean force) throws QuotaExceededException {
+ FSDirectory fsd = Mockito.mock(FSDirectory.class);
+ INodesInPath iip = Mockito.mock(INodesInPath.class);
+ INode inode = Mockito.mock(INode.class);
+
+ when(fsd.getAccessTimePrecision()).thenReturn(precision);
+ when(fsd.hasWriteLock()).thenReturn(Boolean.TRUE);
+ when(iip.getLastINode()).thenReturn(inode);
+ when(iip.getLatestSnapshotId()).thenReturn(Mockito.anyInt());
+ when(inode.getAccessTime()).thenReturn(atime0);
+
+ return FSDirAttrOp.unprotectedSetTimes(fsd, iip, mtime, atime, force);
+ }
+
+ @Test
+ public void testUnprotectedSetTimes() throws Exception {
+ // atime < access time + precision
+ assertFalse("SetTimes should not update access time"
+ + "because it's within the last precision interval",
+ unprotectedSetTimes(100, 0, 1000, -1, false));
+
+ // atime = access time + precision
+ assertFalse("SetTimes should not update access time"
+ + "because it's within the last precision interval",
+ unprotectedSetTimes(1000, 0, 1000, -1, false));
+
+ // atime > access time + precision
+ assertTrue("SetTimes should store access time",
+ unprotectedSetTimes(1011, 10, 1000, -1, false));
+
+ // atime < access time + precision, but force is set
+ assertTrue("SetTimes should store access time",
+ unprotectedSetTimes(100, 0, 1000, -1, true));
+
+ // atime < access time + precision, but mtime is set
+ assertTrue("SetTimes should store access time",
+ unprotectedSetTimes(100, 0, 1000, 1, false));
+ }
+}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[13/17] hadoop git commit: HADOOP-13911. Remove TRUSTSTORE_PASSWORD
related scripts from KMS. Contributed by John Zhuge.
Posted by xg...@apache.org.
HADOOP-13911. Remove TRUSTSTORE_PASSWORD related scripts from KMS. Contributed by John Zhuge.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/30f85d7a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/30f85d7a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/30f85d7a
Branch: refs/heads/YARN-5734
Commit: 30f85d7a88a110637757cf7a1f4cdc9ed40f59fb
Parents: f678080
Author: Xiao Chen <xi...@apache.org>
Authored: Tue Dec 20 16:02:26 2016 -0800
Committer: Xiao Chen <xi...@apache.org>
Committed: Tue Dec 20 16:02:26 2016 -0800
----------------------------------------------------------------------
.../hadoop-kms/src/main/conf/kms-env.sh | 5 -----
.../hadoop-kms/src/main/libexec/kms-config.sh | 5 -----
hadoop-common-project/hadoop-kms/src/main/sbin/kms.sh | 11 ++---------
.../hadoop-kms/src/main/tomcat/ssl-server.xml.conf | 1 -
4 files changed, 2 insertions(+), 20 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/30f85d7a/hadoop-common-project/hadoop-kms/src/main/conf/kms-env.sh
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-kms/src/main/conf/kms-env.sh b/hadoop-common-project/hadoop-kms/src/main/conf/kms-env.sh
index 729e63a..e42904d 100644
--- a/hadoop-common-project/hadoop-kms/src/main/conf/kms-env.sh
+++ b/hadoop-common-project/hadoop-kms/src/main/conf/kms-env.sh
@@ -47,11 +47,6 @@
#
# export KMS_SSL_KEYSTORE_PASS=password
-#
-# The password of the truststore
-#
-# export KMS_SSL_TRUSTSTORE_PASS=
-
##
## Tomcat specific settings
http://git-wip-us.apache.org/repos/asf/hadoop/blob/30f85d7a/hadoop-common-project/hadoop-kms/src/main/libexec/kms-config.sh
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-kms/src/main/libexec/kms-config.sh b/hadoop-common-project/hadoop-kms/src/main/libexec/kms-config.sh
index 927b4af..52dba38 100644
--- a/hadoop-common-project/hadoop-kms/src/main/libexec/kms-config.sh
+++ b/hadoop-common-project/hadoop-kms/src/main/libexec/kms-config.sh
@@ -44,11 +44,6 @@ function hadoop_subproject_init
export HADOOP_CATALINA_SSL_KEYSTORE_FILE="${KMS_SSL_KEYSTORE_FILE:-${HOME}/.keystore}"
- # this is undocumented, but older versions would rip the TRUSTSTORE_PASS out of the
- # CATALINA_OPTS
- # shellcheck disable=SC2086
- export KMS_SSL_TRUSTSTORE_PASS=${KMS_SSL_TRUSTSTORE_PASS:-"$(echo ${CATALINA_OPTS} | grep -o 'trustStorePassword=[^ ]*' | cut -f2 -d= )"}
-
export CATALINA_BASE="${CATALINA_BASE:-${HADOOP_HOME}/share/hadoop/kms/tomcat}"
export HADOOP_CATALINA_HOME="${KMS_CATALINA_HOME:-${CATALINA_BASE}}"
http://git-wip-us.apache.org/repos/asf/hadoop/blob/30f85d7a/hadoop-common-project/hadoop-kms/src/main/sbin/kms.sh
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-kms/src/main/sbin/kms.sh b/hadoop-common-project/hadoop-kms/src/main/sbin/kms.sh
index 1d3c948..7611f2a 100755
--- a/hadoop-common-project/hadoop-kms/src/main/sbin/kms.sh
+++ b/hadoop-common-project/hadoop-kms/src/main/sbin/kms.sh
@@ -51,11 +51,7 @@ fi
# it is used in Tomcat's server.xml configuration file
#
-# Mask the trustStorePassword
-# shellcheck disable=SC2086
-CATALINA_OPTS_DISP="$(echo ${CATALINA_OPTS} | sed -e 's/trustStorePassword=[^ ]*/trustStorePassword=***/')"
-
-hadoop_debug "Using CATALINA_OPTS: ${CATALINA_OPTS_DISP}"
+hadoop_debug "Using CATALINA_OPTS: ${CATALINA_OPTS}"
# We're using hadoop-common, so set up some stuff it might need:
hadoop_finalize
@@ -94,14 +90,11 @@ fi
# if custom, use provided password
#
if [[ -f "${HADOOP_CATALINA_HOME}/conf/ssl-server.xml.conf" ]]; then
- if [[ -n "${KMS_SSL_KEYSTORE_PASS+x}" ]] || [[ -n "${KMS_SSL_TRUSTSTORE_PASS}" ]]; then
+ if [[ -n "${KMS_SSL_KEYSTORE_PASS+x}" ]]; then
export KMS_SSL_KEYSTORE_PASS=${KMS_SSL_KEYSTORE_PASS:-password}
KMS_SSL_KEYSTORE_PASS_ESCAPED=$(hadoop_xml_escape \
"$(hadoop_sed_escape "$KMS_SSL_KEYSTORE_PASS")")
- KMS_SSL_TRUSTSTORE_PASS_ESCAPED=$(hadoop_xml_escape \
- "$(hadoop_sed_escape "$KMS_SSL_TRUSTSTORE_PASS")")
sed -e 's/"_kms_ssl_keystore_pass_"/'"\"${KMS_SSL_KEYSTORE_PASS_ESCAPED}\""'/g' \
- -e 's/"_kms_ssl_truststore_pass_"/'"\"${KMS_SSL_TRUSTSTORE_PASS_ESCAPED}\""'/g' \
"${HADOOP_CATALINA_HOME}/conf/ssl-server.xml.conf" \
> "${HADOOP_CATALINA_HOME}/conf/ssl-server.xml"
chmod 700 "${HADOOP_CATALINA_HOME}/conf/ssl-server.xml" >/dev/null 2>&1
http://git-wip-us.apache.org/repos/asf/hadoop/blob/30f85d7a/hadoop-common-project/hadoop-kms/src/main/tomcat/ssl-server.xml.conf
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-kms/src/main/tomcat/ssl-server.xml.conf b/hadoop-common-project/hadoop-kms/src/main/tomcat/ssl-server.xml.conf
index 01b429c..272542a 100644
--- a/hadoop-common-project/hadoop-kms/src/main/tomcat/ssl-server.xml.conf
+++ b/hadoop-common-project/hadoop-kms/src/main/tomcat/ssl-server.xml.conf
@@ -72,7 +72,6 @@
maxThreads="${kms.max.threads}" scheme="https" secure="true"
maxHttpHeaderSize="${kms.max.http.header.size}"
clientAuth="false" sslEnabledProtocols="TLSv1,TLSv1.1,TLSv1.2,SSLv2Hello"
- truststorePass="_kms_ssl_truststore_pass_"
keystoreFile="${kms.ssl.keystore.file}"
keystorePass="_kms_ssl_keystore_pass_"/>
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[14/17] hadoop git commit: HDFS-11261. Document missing NameNode
metrics. Contributed by Yiqun Lin.
Posted by xg...@apache.org.
HDFS-11261. Document missing NameNode metrics. Contributed by Yiqun Lin.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f6e2521e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f6e2521e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f6e2521e
Branch: refs/heads/YARN-5734
Commit: f6e2521eb216dae820846cab31397e9a88ba2f88
Parents: 30f85d7
Author: Akira Ajisaka <aa...@apache.org>
Authored: Wed Dec 21 14:17:00 2016 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Wed Dec 21 14:17:47 2016 +0900
----------------------------------------------------------------------
.../hadoop-common/src/site/markdown/Metrics.md | 6 ++++++
1 file changed, 6 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6e2521e/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
index 0666b3f..1d5ea5e 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
@@ -171,6 +171,10 @@ Each metrics record contains tags such as ProcessName, SessionId, and Hostname a
| `PutImageAvgTime` | Average fsimage upload time in milliseconds |
| `TotalFileOps`| Total number of file operations performed |
| `NNStartedTimeInMillis`| NameNode start time in milliseconds |
+| `GenerateEDEKTimeNumOps` | Total number of generating EDEK |
+| `GenerateEDEKTimeAvgTime` | Average time of generating EDEK in milliseconds |
+| `WarmUpEDEKTimeNumOps` | Total number of warming up EDEK |
+| `WarmUpEDEKTimeAvgTime` | Average time of warming up EDEK in milliseconds |
FSNamesystem
------------
@@ -219,6 +223,8 @@ Each metrics record contains tags such as HAState and Hostname as additional inf
| `TotalSyncTimes` | Total number of milliseconds spent by various edit logs in sync operation|
| `NameDirSize` | NameNode name directories size in bytes |
| `NumTimedOutPendingReconstructions` | The number of timed out reconstructions. Not the number of unique blocks that timed out. |
+| `FSN(Read|Write)Lock`*OperationName*`NumOps` | Total number of acquiring lock by operations |
+| `FSN(Read|Write)Lock`*OperationName*`AvgTime` | Average time of holding the lock by operations in milliseconds |
JournalNode
-----------
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[11/17] hadoop git commit: HDFS-10913. Introduce fault injectors to
simulate slow mirrors. Contributed by Xiaobing Zhou.
Posted by xg...@apache.org.
HDFS-10913. Introduce fault injectors to simulate slow mirrors. Contributed by Xiaobing Zhou.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5daa8d86
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5daa8d86
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5daa8d86
Branch: refs/heads/YARN-5734
Commit: 5daa8d8631835de97d4e4979e507a080017ca159
Parents: 4af66b1
Author: Xiaoyu Yao <xy...@apache.org>
Authored: Tue Dec 20 13:04:03 2016 -0800
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Tue Dec 20 13:17:52 2016 -0800
----------------------------------------------------------------------
.../hdfs/server/datanode/BlockReceiver.java | 10 +-
.../server/datanode/DataNodeFaultInjector.java | 25 ++-
.../TestClientProtocolForPipelineRecovery.java | 3 +-
.../datanode/TestDataNodeFaultInjector.java | 173 +++++++++++++++++++
4 files changed, 208 insertions(+), 3 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5daa8d86/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
index 441bd91..23cd44d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
@@ -566,12 +566,15 @@ class BlockReceiver implements Closeable {
try {
long begin = Time.monotonicNow();
// For testing. Normally no-op.
- DataNodeFaultInjector.get().stopSendingPacketDownstream();
+ DataNodeFaultInjector.get().stopSendingPacketDownstream(mirrorAddr);
packetReceiver.mirrorPacketTo(mirrorOut);
mirrorOut.flush();
long now = Time.monotonicNow();
setLastSentTime(now);
long duration = now - begin;
+ DataNodeFaultInjector.get().logDelaySendingPacketDownstream(
+ mirrorAddr,
+ duration);
if (duration > datanodeSlowLogThresholdMs) {
LOG.warn("Slow BlockReceiver write packet to mirror took " + duration
+ "ms (threshold=" + datanodeSlowLogThresholdMs + "ms)");
@@ -1534,9 +1537,14 @@ class BlockReceiver implements Closeable {
}
// send my ack back to upstream datanode
long begin = Time.monotonicNow();
+ /* for test only, no-op in production system */
+ DataNodeFaultInjector.get().delaySendingAckToUpstream(inAddr);
replyAck.write(upstreamOut);
upstreamOut.flush();
long duration = Time.monotonicNow() - begin;
+ DataNodeFaultInjector.get().logDelaySendingAckToUpstream(
+ inAddr,
+ duration);
if (duration > datanodeSlowLogThresholdMs) {
LOG.warn("Slow PacketResponder send ack to upstream took " + duration
+ "ms (threshold=" + datanodeSlowLogThresholdMs + "ms), " + myString
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5daa8d86/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
index aa06aa1..b74d2c9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
@@ -50,7 +50,30 @@ public class DataNodeFaultInjector {
return false;
}
- public void stopSendingPacketDownstream() throws IOException {}
+ public void stopSendingPacketDownstream(final String mirrAddr)
+ throws IOException {
+ }
+
+ /**
+ * Used as a hook to intercept the latency of sending packet.
+ */
+ public void logDelaySendingPacketDownstream(
+ final String mirrAddr,
+ final long delayMs) throws IOException {
+ }
+
+ public void delaySendingAckToUpstream(final String upstreamAddr)
+ throws IOException {
+ }
+
+ /**
+ * Used as a hook to intercept the latency of sending ack.
+ */
+ public void logDelaySendingAckToUpstream(
+ final String upstreamAddr,
+ final long delayMs)
+ throws IOException {
+ }
public void noRegistration() throws IOException { }
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5daa8d86/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java
index 65a484c..1a640b4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java
@@ -453,7 +453,8 @@ public class TestClientProtocolForPipelineRecovery {
DataNodeFaultInjector dnFaultInjector = new DataNodeFaultInjector() {
int tries = 1;
@Override
- public void stopSendingPacketDownstream() throws IOException {
+ public void stopSendingPacketDownstream(final String mirrAddr)
+ throws IOException {
if (tries > 0) {
tries--;
try {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5daa8d86/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeFaultInjector.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeFaultInjector.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeFaultInjector.java
new file mode 100644
index 0000000..fe65429
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeFaultInjector.java
@@ -0,0 +1,173 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.datanode;
+
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.test.PathUtils;
+import org.junit.Test;
+
+/**
+ * This class tests various cases where faults are injected to DataNode.
+ */
+public class TestDataNodeFaultInjector {
+ private static final Log LOG = LogFactory
+ .getLog(TestDataNodeFaultInjector.class);
+
+ private static class MetricsDataNodeFaultInjector
+ extends DataNodeFaultInjector {
+
+ public static final long DELAY = 2000;
+ private long delayMs = 0;
+ private final String err = "Interrupted while sleeping. Bailing out.";
+ private long delayTries = 1;
+
+ void delayOnce() throws IOException {
+ if (delayTries > 0) {
+ delayTries--;
+ try {
+ Thread.sleep(DELAY);
+ } catch (InterruptedException ie) {
+ throw new IOException(err);
+ }
+ }
+ }
+
+ long getDelayMs() {
+ return delayMs;
+ }
+
+ void logDelay(final long duration) {
+ /**
+ * delay should be at least longer than DELAY, otherwise, delayXYZ is
+ * no-op
+ */
+ if (duration >= DELAY) {
+ this.delayMs = duration;
+ }
+ }
+ }
+
+ @Test(timeout = 60000)
+ public void testDelaySendingAckToUpstream() throws Exception {
+ final MetricsDataNodeFaultInjector mdnFaultInjector =
+ new MetricsDataNodeFaultInjector() {
+ @Override
+ public void delaySendingAckToUpstream(final String upstreamAddr)
+ throws IOException {
+ delayOnce();
+ }
+
+ @Override
+ public void logDelaySendingAckToUpstream(final String upstreamAddr,
+ final long delayMs) throws IOException {
+ logDelay(delayMs);
+ }
+ };
+ verifyFaultInjectionDelayPipeline(mdnFaultInjector);
+ }
+
+ @Test(timeout = 60000)
+ public void testDelaySendingPacketDownstream() throws Exception {
+ final MetricsDataNodeFaultInjector mdnFaultInjector =
+ new MetricsDataNodeFaultInjector() {
+ @Override
+ public void stopSendingPacketDownstream(final String mirrAddr)
+ throws IOException {
+ delayOnce();
+ }
+
+ @Override
+ public void logDelaySendingPacketDownstream(final String mirrAddr,
+ final long delayMs) throws IOException {
+ logDelay(delayMs);
+ }
+ };
+ verifyFaultInjectionDelayPipeline(mdnFaultInjector);
+ }
+
+ private void verifyFaultInjectionDelayPipeline(
+ final MetricsDataNodeFaultInjector mdnFaultInjector) throws Exception {
+
+ final Path baseDir = new Path(
+ PathUtils.getTestDir(getClass()).getAbsolutePath(),
+ GenericTestUtils.getMethodName());
+ final DataNodeFaultInjector oldDnInjector = DataNodeFaultInjector.get();
+ DataNodeFaultInjector.set(mdnFaultInjector);
+
+ final Configuration conf = new HdfsConfiguration();
+
+ /*
+ * MetricsDataNodeFaultInjector.DELAY/2 ms is viewed as slow.
+ */
+ final long datanodeSlowLogThresholdMs = MetricsDataNodeFaultInjector.DELAY
+ / 2;
+ conf.setLong(DFSConfigKeys.DFS_DATANODE_SLOW_IO_WARNING_THRESHOLD_KEY,
+ datanodeSlowLogThresholdMs);
+ conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.toString());
+
+ /**
+ * configure to avoid resulting in pipeline failure due to read socket
+ * timeout
+ */
+ conf.setLong(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY,
+ MetricsDataNodeFaultInjector.DELAY * 2);
+ conf.setBoolean(
+ HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure.ENABLE_KEY,
+ true);
+ conf.set(
+ HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure.POLICY_KEY,
+ "ALWAYS");
+
+ MiniDFSCluster cluster = null;
+ try {
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
+ cluster.waitActive();
+
+ final FileSystem fs = cluster.getFileSystem();
+ try (final FSDataOutputStream out = fs
+ .create(new Path(baseDir, "test.data"), (short) 2)) {
+ out.write(0x31);
+ out.hflush();
+ out.hsync();
+ }
+ LOG.info("delay info: " + mdnFaultInjector.getDelayMs() + ":"
+ + datanodeSlowLogThresholdMs);
+ assertTrue("Injected delay should be longer than the configured one",
+ mdnFaultInjector.getDelayMs() > datanodeSlowLogThresholdMs);
+ } finally {
+ if (cluster != null) {
+ cluster.shutdown();
+ }
+ DataNodeFaultInjector.set(oldDnInjector);
+ }
+ }
+}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[06/17] hadoop git commit: HDFS-11263. ClassCastException when we use
Bzipcodec for Fsimage compression. Contributed by Brahma Reddy Battula.
Posted by xg...@apache.org.
HDFS-11263. ClassCastException when we use Bzipcodec for Fsimage compression. Contributed by Brahma Reddy Battula.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1b401f6a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1b401f6a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1b401f6a
Branch: refs/heads/YARN-5734
Commit: 1b401f6a734df4e23a79b3bd89c816a1fc0de574
Parents: fe4ff64
Author: Brahma Reddy Battula <br...@apache.org>
Authored: Tue Dec 20 20:54:03 2016 +0530
Committer: Brahma Reddy Battula <br...@apache.org>
Committed: Tue Dec 20 20:54:03 2016 +0530
----------------------------------------------------------------------
.../hdfs/server/namenode/FSImageFormatProtobuf.java | 4 ++--
.../apache/hadoop/hdfs/server/namenode/TestFSImage.java | 12 +++++++++---
2 files changed, 11 insertions(+), 5 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b401f6a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java
index 7a81f9e..22331fe 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java
@@ -40,6 +40,7 @@ import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
+import org.apache.hadoop.io.compress.CompressionOutputStream;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.classification.InterfaceAudience;
@@ -63,7 +64,6 @@ import org.apache.hadoop.hdfs.server.namenode.startupprogress.StepType;
import org.apache.hadoop.hdfs.util.MD5FileUtils;
import org.apache.hadoop.io.MD5Hash;
import org.apache.hadoop.io.compress.CompressionCodec;
-import org.apache.hadoop.io.compress.CompressorStream;
import org.apache.hadoop.util.LimitInputStream;
import org.apache.hadoop.util.Time;
@@ -417,7 +417,7 @@ public final class FSImageFormatProtobuf {
private void flushSectionOutputStream() throws IOException {
if (codec != null) {
- ((CompressorStream) sectionOutputStream).finish();
+ ((CompressionOutputStream) sectionOutputStream).finish();
}
sectionOutputStream.flush();
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b401f6a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
index 0d8431d..f839270 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
@@ -52,7 +52,6 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.StripedFileTestUtil;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
@@ -84,8 +83,15 @@ public class TestFSImage {
public void testCompression() throws IOException {
Configuration conf = new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY, true);
- conf.set(DFSConfigKeys.DFS_IMAGE_COMPRESSION_CODEC_KEY,
- "org.apache.hadoop.io.compress.GzipCodec");
+ setCompressCodec(conf, "org.apache.hadoop.io.compress.DefaultCodec");
+ setCompressCodec(conf, "org.apache.hadoop.io.compress.GzipCodec");
+ setCompressCodec(conf, "org.apache.hadoop.io.compress.BZip2Codec");
+ setCompressCodec(conf, "org.apache.hadoop.io.compress.Lz4Codec");
+ }
+
+ private void setCompressCodec(Configuration conf, String compressCodec)
+ throws IOException {
+ conf.set(DFSConfigKeys.DFS_IMAGE_COMPRESSION_CODEC_KEY, compressCodec);
testPersistHelper(conf);
}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[02/17] hadoop git commit: YARN-5524. Yarn live log aggregation does
not throw if command line arg is wrong. Contributed by Xuan Gong.
Posted by xg...@apache.org.
YARN-5524. Yarn live log aggregation does not throw if command line arg is wrong. Contributed by Xuan Gong.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/35834139
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/35834139
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/35834139
Branch: refs/heads/YARN-5734
Commit: 358341398adfe8f59bb2165472b50751de3ffb98
Parents: ef2dd7b
Author: Naganarasimha <na...@apache.org>
Authored: Tue Dec 20 06:36:25 2016 +0530
Committer: Naganarasimha <na...@apache.org>
Committed: Tue Dec 20 06:36:25 2016 +0530
----------------------------------------------------------------------
.../org/apache/hadoop/yarn/client/cli/LogsCLI.java | 2 +-
.../apache/hadoop/yarn/client/cli/TestLogsCLI.java | 15 +++++++++++++++
2 files changed, 16 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/35834139/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
index 9b21ff8..a9ca96c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
@@ -138,7 +138,7 @@ public class LogsCLI extends Configured implements Tool {
String localDir = null;
long bytes = Long.MAX_VALUE;
try {
- CommandLine commandLine = parser.parse(opts, args, true);
+ CommandLine commandLine = parser.parse(opts, args, false);
appIdStr = commandLine.getOptionValue(APPLICATION_ID_OPTION);
containerIdStr = commandLine.getOptionValue(CONTAINER_ID_OPTION);
nodeAddress = commandLine.getOptionValue(NODE_ADDRESS_OPTION);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/35834139/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java
index a564f6f..ef164a5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java
@@ -126,6 +126,21 @@ public class TestLogsCLI {
assertTrue("Should return an error code", exitCode != 0);
}
+ @Test(timeout = 1000l)
+ public void testInvalidOpts() throws Exception {
+ Configuration conf = new YarnConfiguration();
+ YarnClient mockYarnClient = createMockYarnClient(
+ YarnApplicationState.FINISHED,
+ UserGroupInformation.getCurrentUser().getShortUserName());
+ LogsCLI cli = new LogsCLIForTest(mockYarnClient);
+ cli.setConf(conf);
+
+ int exitCode = cli.run( new String[] { "-InvalidOpts"});
+ assertTrue(exitCode == -1);
+ assertTrue(sysErrStream.toString().contains(
+ "options parsing failed: Unrecognized option: -InvalidOpts"));
+ }
+
@Test(timeout = 5000l)
public void testInvalidApplicationId() throws Exception {
Configuration conf = new YarnConfiguration();
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[15/17] hadoop git commit: YARN-5976. Update hbase version to 1.2.
Contributed by Vrushali C.
Posted by xg...@apache.org.
YARN-5976. Update hbase version to 1.2. Contributed by Vrushali C.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8b042bc1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8b042bc1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8b042bc1
Branch: refs/heads/YARN-5734
Commit: 8b042bc1e6ae5e18d435d6a184dec1811cc3a513
Parents: f6e2521
Author: Sangjin Lee <sj...@apache.org>
Authored: Wed Dec 21 09:43:17 2016 -0800
Committer: Sangjin Lee <sj...@apache.org>
Committed: Wed Dec 21 09:43:17 2016 -0800
----------------------------------------------------------------------
LICENSE.txt | 8 +-
hadoop-project/pom.xml | 26 +-
.../pom.xml | 142 +-------
...TestPhoenixOfflineAggregationWriterImpl.java | 161 ---------
.../hadoop-yarn-server-timelineservice/pom.xml | 30 +-
.../PhoenixOfflineAggregationWriterImpl.java | 358 -------------------
.../storage/TimelineSchemaCreator.java | 22 --
7 files changed, 27 insertions(+), 720 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b042bc1/LICENSE.txt
----------------------------------------------------------------------
diff --git a/LICENSE.txt b/LICENSE.txt
index 2183f0e..fd07edf 100644
--- a/LICENSE.txt
+++ b/LICENSE.txt
@@ -1643,12 +1643,6 @@ JLine 0.9.94
leveldbjni-all 1.8
Hamcrest Core 1.3
xmlenc Library 0.52
-StringTemplate 4 4.0.7
-ANTLR 3 Tool 3.5
-ANTLR 3 Runtime 3.5
-ANTLR StringTemplate 3.2.1
-ASM All 5.0.2
-sqlline 1.1.8
--------------------------------------------------------------------------------
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
@@ -1879,7 +1873,7 @@ the Licensor and You.
The binary distribution of this product bundles these dependencies under the
following license:
-jamon-runtime 2.3.1
+jamon-runtime 2.4.1
--------------------------------------------------------------------------------
MOZILLA PUBLIC LICENSE
Version 1.1
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b042bc1/hadoop-project/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index c9ee793..a935292 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -49,8 +49,7 @@
<xerces.jdiff.version>2.11.0</xerces.jdiff.version>
<kafka.version>0.8.2.1</kafka.version>
- <hbase.version>1.1.3</hbase.version>
- <phoenix.version>4.7.0-HBase-1.1</phoenix.version>
+ <hbase.version>1.2.4</hbase.version>
<hbase-compatible-hadoop.version>2.5.1</hbase-compatible-hadoop.version>
<hadoop.assemblies.version>${project.version}</hadoop.assemblies.version>
@@ -1219,29 +1218,6 @@
<classifier>tests</classifier>
</dependency>
<dependency>
- <groupId>org.apache.phoenix</groupId>
- <artifactId>phoenix-core</artifactId>
- <version>${phoenix.version}</version>
- <exclusions>
- <!-- Exclude jline from here -->
- <exclusion>
- <artifactId>jline</artifactId>
- <groupId>jline</groupId>
- </exclusion>
- <exclusion>
- <artifactId>joda-time</artifactId>
- <groupId>joda-time</groupId>
- </exclusion>
- </exclusions>
- </dependency>
- <dependency>
- <groupId>org.apache.phoenix</groupId>
- <artifactId>phoenix-core</artifactId>
- <type>test-jar</type>
- <version>${phoenix.version}</version>
- <scope>test</scope>
- </dependency>
- <dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-it</artifactId>
<version>${hbase.version}</version>
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b042bc1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml
index 026ef75..f151e1d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml
@@ -55,10 +55,6 @@
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
</exclusion>
- <exclusion>
- <groupId>org.apache.phoenix</groupId>
- <artifactId>phoenix-core</artifactId>
- </exclusion>
</exclusions>
</dependency>
@@ -79,6 +75,8 @@
</exclusions>
</dependency>
+ <!-- 'mvn dependency:analyze' fails to detect use of this direct
+ dependency -->
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-auth</artifactId>
@@ -118,18 +116,6 @@
dependency -->
<dependency>
<groupId>org.apache.hadoop</groupId>
- <artifactId>hadoop-yarn-server-common</artifactId>
- <scope>test</scope>
- <exclusions>
- <exclusion>
- <groupId>org.apache.hadoop</groupId>
- <artifactId>hadoop-common</artifactId>
- </exclusion>
- </exclusions>
- </dependency>
-
- <dependency>
- <groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-server-applicationhistoryservice</artifactId>
<scope>test</scope>
<exclusions>
@@ -148,14 +134,14 @@
<dependency>
<groupId>com.sun.jersey</groupId>
- <artifactId>jersey-core</artifactId>
+ <artifactId>jersey-client</artifactId>
<scope>test</scope>
</dependency>
<dependency>
- <groupId>com.sun.jersey</groupId>
- <artifactId>jersey-client</artifactId>
- <scope>test</scope>
+ <groupId>javax.ws.rs</groupId>
+ <artifactId>jsr311-api</artifactId>
+ <version>1.1.1</version>
</dependency>
<dependency>
@@ -228,23 +214,6 @@
<dependency>
<groupId>org.apache.hbase</groupId>
- <artifactId>hbase-common</artifactId>
- <classifier>tests</classifier>
- <scope>test</scope>
- <exclusions>
- <exclusion>
- <groupId>org.apache.hadoop</groupId>
- <artifactId>hadoop-common</artifactId>
- </exclusion>
- <exclusion>
- <groupId>org.apache.hadoop</groupId>
- <artifactId>hadoop-mapreduce-client-core</artifactId>
- </exclusion>
- </exclusions>
- </dependency>
-
- <dependency>
- <groupId>org.apache.hbase</groupId>
<artifactId>hbase-server</artifactId>
<classifier>tests</classifier>
<scope>test</scope>
@@ -279,99 +248,6 @@
<!-- 'mvn dependency:analyze' fails to detect use of this direct
dependency -->
<dependency>
- <groupId>org.apache.hbase</groupId>
- <artifactId>hbase-it</artifactId>
- <scope>test</scope>
- <classifier>tests</classifier>
- <exclusions>
- <exclusion>
- <groupId>org.apache.hadoop</groupId>
- <artifactId>hadoop-common</artifactId>
- </exclusion>
- <exclusion>
- <groupId>org.apache.hadoop</groupId>
- <artifactId>hadoop-auth</artifactId>
- </exclusion>
- <exclusion>
- <groupId>org.apache.hadoop</groupId>
- <artifactId>hadoop-hdfs</artifactId>
- </exclusion>
- <exclusion>
- <groupId>org.apache.hadoop</groupId>
- <artifactId>hadoop-client</artifactId>
- </exclusion>
- <exclusion>
- <groupId>org.apache.hadoop</groupId>
- <artifactId>hadoop-mapreduce-client-core</artifactId>
- </exclusion>
- </exclusions>
- </dependency>
-
- <dependency>
- <groupId>org.apache.phoenix</groupId>
- <artifactId>phoenix-core</artifactId>
- <scope>test</scope>
- <exclusions>
- <exclusion>
- <groupId>org.apache.hadoop</groupId>
- <artifactId>hadoop-common</artifactId>
- </exclusion>
- <exclusion>
- <groupId>org.apache.hadoop</groupId>
- <artifactId>hadoop-auth</artifactId>
- </exclusion>
- <exclusion>
- <groupId>org.apache.hadoop</groupId>
- <artifactId>hadoop-hdfs</artifactId>
- </exclusion>
- <exclusion>
- <groupId>org.apache.hadoop</groupId>
- <artifactId>hadoop-mapreduce-client-core</artifactId>
- </exclusion>
- <exclusion>
- <groupId>net.sourceforge.findbugs</groupId>
- <artifactId>annotations</artifactId>
- </exclusion>
- </exclusions>
- </dependency>
-
- <dependency>
- <groupId>org.apache.phoenix</groupId>
- <artifactId>phoenix-core</artifactId>
- <type>test-jar</type>
- <scope>test</scope>
- <exclusions>
- <exclusion>
- <groupId>org.apache.hadoop</groupId>
- <artifactId>hadoop-common</artifactId>
- </exclusion>
- <exclusion>
- <groupId>org.apache.hadoop</groupId>
- <artifactId>hadoop-auth</artifactId>
- </exclusion>
- <exclusion>
- <groupId>org.apache.hadoop</groupId>
- <artifactId>hadoop-hdfs</artifactId>
- </exclusion>
- <exclusion>
- <groupId>org.apache.hadoop</groupId>
- <artifactId>hadoop-mapreduce-client-core</artifactId>
- </exclusion>
- <exclusion>
- <groupId>net.sourceforge.findbugs</groupId>
- <artifactId>annotations</artifactId>
- </exclusion>
- </exclusions>
- </dependency>
-
- <!-- for runtime dependencies -->
- <dependency>
- <groupId>org.mockito</groupId>
- <artifactId>mockito-all</artifactId>
- <scope>test</scope>
- </dependency>
-
- <dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<version>${hbase-compatible-hadoop.version}</version>
@@ -385,6 +261,8 @@
</exclusions>
</dependency>
+ <!-- 'mvn dependency:analyze' fails to detect use of this direct
+ dependency -->
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
@@ -392,6 +270,8 @@
<scope>test</scope>
</dependency>
+ <!-- 'mvn dependency:analyze' fails to detect use of this direct
+ dependency -->
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
@@ -429,6 +309,8 @@
</exclusions>
</dependency>
+ <!-- 'mvn dependency:analyze' fails to detect use of this direct
+ dependency -->
<!-- Jetty 9 is needed by the ATS code -->
<dependency>
<groupId>org.eclipse.jetty</groupId>
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b042bc1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestPhoenixOfflineAggregationWriterImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestPhoenixOfflineAggregationWriterImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestPhoenixOfflineAggregationWriterImpl.java
deleted file mode 100644
index e34ae90..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestPhoenixOfflineAggregationWriterImpl.java
+++ /dev/null
@@ -1,161 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.timelineservice.storage;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-import org.apache.hadoop.hbase.IntegrationTestingUtility;
-import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntities;
-import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
-import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.server.timelineservice.collector.TimelineCollectorContext;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.OfflineAggregationInfo;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.apache.phoenix.hbase.index.write.IndexWriterUtils;
-import org.apache.phoenix.query.BaseTest;
-import org.apache.phoenix.query.QueryServices;
-import org.apache.phoenix.util.ReadOnlyProps;
-
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.util.HashMap;
-import java.util.Map;
-
-import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
-
-public class TestPhoenixOfflineAggregationWriterImpl extends BaseTest {
- private static PhoenixOfflineAggregationWriterImpl storage;
- private static final int BATCH_SIZE = 3;
-
- @BeforeClass
- public static void setup() throws Exception {
- YarnConfiguration conf = new YarnConfiguration();
- storage = setupPhoenixClusterAndWriterForTest(conf);
- }
-
- @Test(timeout = 90000)
- public void testFlowLevelAggregationStorage() throws Exception {
- testAggregator(OfflineAggregationInfo.FLOW_AGGREGATION);
- }
-
- @Test(timeout = 90000)
- public void testUserLevelAggregationStorage() throws Exception {
- testAggregator(OfflineAggregationInfo.USER_AGGREGATION);
- }
-
- @AfterClass
- public static void cleanup() throws Exception {
- storage.dropTable(OfflineAggregationInfo.FLOW_AGGREGATION_TABLE_NAME);
- storage.dropTable(OfflineAggregationInfo.USER_AGGREGATION_TABLE_NAME);
- tearDownMiniCluster();
- }
-
- private static PhoenixOfflineAggregationWriterImpl
- setupPhoenixClusterAndWriterForTest(YarnConfiguration conf)
- throws Exception {
- Map<String, String> props = new HashMap<>();
- // Must update config before starting server
- props.put(QueryServices.STATS_USE_CURRENT_TIME_ATTRIB,
- Boolean.FALSE.toString());
- props.put("java.security.krb5.realm", "");
- props.put("java.security.krb5.kdc", "");
- props.put(IntegrationTestingUtility.IS_DISTRIBUTED_CLUSTER,
- Boolean.FALSE.toString());
- props.put(QueryServices.QUEUE_SIZE_ATTRIB, Integer.toString(5000));
- props.put(IndexWriterUtils.HTABLE_THREAD_KEY, Integer.toString(100));
- // Make a small batch size to test multiple calls to reserve sequences
- props.put(QueryServices.SEQUENCE_CACHE_SIZE_ATTRIB,
- Long.toString(BATCH_SIZE));
- // Must update config before starting server
- setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
-
- // Change connection settings for test
- conf.set(
- YarnConfiguration.PHOENIX_OFFLINE_STORAGE_CONN_STR,
- getUrl());
- PhoenixOfflineAggregationWriterImpl
- myWriter = new PhoenixOfflineAggregationWriterImpl(TEST_PROPERTIES);
- myWriter.init(conf);
- myWriter.start();
- myWriter.createPhoenixTables();
- return myWriter;
- }
-
- private static TimelineEntity getTestAggregationTimelineEntity() {
- TimelineEntity entity = new TimelineEntity();
- String id = "hello1";
- String type = "testAggregationType";
- entity.setId(id);
- entity.setType(type);
- entity.setCreatedTime(1425016501000L);
-
- TimelineMetric metric = new TimelineMetric();
- metric.setId("HDFS_BYTES_READ");
- metric.addValue(1425016501100L, 8000);
- entity.addMetric(metric);
-
- return entity;
- }
-
- private void testAggregator(OfflineAggregationInfo aggregationInfo)
- throws Exception {
- // Set up a list of timeline entities and write them back to Phoenix
- int numEntity = 1;
- TimelineEntities te = new TimelineEntities();
- te.addEntity(getTestAggregationTimelineEntity());
- TimelineCollectorContext context = new TimelineCollectorContext("cluster_1",
- "user1", "testFlow", null, 0L, null);
- storage.writeAggregatedEntity(context, te,
- aggregationInfo);
-
- // Verify if we're storing all entities
- String[] primaryKeyList = aggregationInfo.getPrimaryKeyList();
- String sql = "SELECT COUNT(" + primaryKeyList[primaryKeyList.length - 1]
- +") FROM " + aggregationInfo.getTableName();
- verifySQLWithCount(sql, numEntity, "Number of entities should be ");
- // Check metric
- sql = "SELECT COUNT(m.HDFS_BYTES_READ) FROM "
- + aggregationInfo.getTableName() + "(m.HDFS_BYTES_READ VARBINARY) ";
- verifySQLWithCount(sql, numEntity,
- "Number of entities with info should be ");
- }
-
-
- private void verifySQLWithCount(String sql, int targetCount, String message)
- throws Exception {
- try (
- Statement stmt =
- storage.getConnection().createStatement();
- ResultSet rs = stmt.executeQuery(sql)) {
- assertTrue("Result set empty on statement " + sql, rs.next());
- assertNotNull("Fail to execute query " + sql, rs);
- assertEquals(message + " " + targetCount, targetCount, rs.getInt(1));
- } catch (SQLException se) {
- fail("SQL exception on query: " + sql
- + " With exception message: " + se.getLocalizedMessage());
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b042bc1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/pom.xml
index 6b535c3..69c4c7e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/pom.xml
@@ -91,8 +91,13 @@
</dependency>
<dependency>
- <groupId>com.sun.jersey</groupId>
- <artifactId>jersey-core</artifactId>
+ <groupId>com.fasterxml.jackson.core</groupId>
+ <artifactId>jackson-core</artifactId>
+ </dependency>
+
+ <dependency>
+ <groupId>com.fasterxml.jackson.core</groupId>
+ <artifactId>jackson-databind</artifactId>
</dependency>
<dependency>
@@ -121,6 +126,12 @@
</dependency>
<dependency>
+ <groupId>javax.ws.rs</groupId>
+ <artifactId>jsr311-api</artifactId>
+ <version>1.1.1</version>
+ </dependency>
+
+ <dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-common</artifactId>
<exclusions>
@@ -181,21 +192,6 @@
</exclusions>
</dependency>
- <dependency>
- <groupId>org.apache.phoenix</groupId>
- <artifactId>phoenix-core</artifactId>
- <exclusions>
- <exclusion>
- <groupId>org.apache.hadoop</groupId>
- <artifactId>hadoop-mapreduce-client-core</artifactId>
- </exclusion>
- <exclusion>
- <groupId>net.sourceforge.findbugs</groupId>
- <artifactId>annotations</artifactId>
- </exclusion>
- </exclusions>
- </dependency>
-
<!-- 'mvn dependency:analyze' fails to detect use of this dependency -->
<dependency>
<groupId>org.apache.hadoop</groupId>
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b042bc1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/PhoenixOfflineAggregationWriterImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/PhoenixOfflineAggregationWriterImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/PhoenixOfflineAggregationWriterImpl.java
deleted file mode 100644
index 130cb6c..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/PhoenixOfflineAggregationWriterImpl.java
+++ /dev/null
@@ -1,358 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.timelineservice.storage;
-
-import com.google.common.annotations.VisibleForTesting;
-import org.apache.commons.lang.StringUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.classification.InterfaceAudience.Private;
-import org.apache.hadoop.classification.InterfaceStability.Unstable;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntities;
-import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
-import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric;
-import org.apache.hadoop.yarn.api.records.timelineservice.TimelineWriteResponse;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.server.timeline.GenericObjectMapper;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.OfflineAggregationInfo;
-import org.apache.hadoop.yarn.server.timelineservice.collector.TimelineCollectorContext;
-import org.apache.phoenix.util.PropertiesUtil;
-
-import java.io.IOException;
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.PreparedStatement;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Properties;
-import java.util.Set;
-
-/**
- * Offline aggregation Phoenix storage. This storage currently consists of two
- * aggregation tables, one for flow level aggregation and one for user level
- * aggregation.
- *
- * Example table record:
- *
- * <pre>
- * |---------------------------|
- * | Primary | Column Family|
- * | key | metrics |
- * |---------------------------|
- * | row_key | metricId1: |
- * | | metricValue1 |
- * | | @timestamp1 |
- * | | |
- * | | metriciD1: |
- * | | metricValue2 |
- * | | @timestamp2 |
- * | | |
- * | | metricId2: |
- * | | metricValue1 |
- * | | @timestamp2 |
- * | | |
- * | | |
- * | | |
- * | | |
- * | | |
- * | | |
- * | | |
- * | | |
- * | | |
- * | | |
- * | | |
- * | | |
- * | | |
- * | | |
- * |---------------------------|
- * </pre>
- *
- * For the flow aggregation table, the primary key contains user, cluster, and
- * flow id. For user aggregation table,the primary key is user.
- *
- * Metrics column family stores all aggregated metrics for each record.
- */
-@Private
-@Unstable
-public class PhoenixOfflineAggregationWriterImpl
- extends OfflineAggregationWriter {
-
- private static final Log LOG
- = LogFactory.getLog(PhoenixOfflineAggregationWriterImpl.class);
- private static final String PHOENIX_COL_FAMILY_PLACE_HOLDER
- = "timeline_cf_placeholder";
-
- /** Default Phoenix JDBC driver name. */
- private static final String DRIVER_CLASS_NAME
- = "org.apache.phoenix.jdbc.PhoenixDriver";
-
- /** Default Phoenix timeline config column family. */
- private static final String METRIC_COLUMN_FAMILY = "m.";
- /** Default Phoenix timeline info column family. */
- private static final String INFO_COLUMN_FAMILY = "i.";
- /** Default separator for Phoenix storage. */
- private static final String AGGREGATION_STORAGE_SEPARATOR = ";";
-
- /** Connection string to the deployed Phoenix cluster. */
- private String connString = null;
- private Properties connProperties = new Properties();
-
- public PhoenixOfflineAggregationWriterImpl(Properties prop) {
- super(PhoenixOfflineAggregationWriterImpl.class.getName());
- connProperties = PropertiesUtil.deepCopy(prop);
- }
-
- public PhoenixOfflineAggregationWriterImpl() {
- super(PhoenixOfflineAggregationWriterImpl.class.getName());
- }
-
- @Override
- public void serviceInit(Configuration conf) throws Exception {
- Class.forName(DRIVER_CLASS_NAME);
- // so check it here and only read in the config if it's not overridden.
- connString =
- conf.get(YarnConfiguration.PHOENIX_OFFLINE_STORAGE_CONN_STR,
- YarnConfiguration.PHOENIX_OFFLINE_STORAGE_CONN_STR_DEFAULT);
- super.init(conf);
- }
-
- @Override
- public TimelineWriteResponse writeAggregatedEntity(
- TimelineCollectorContext context, TimelineEntities entities,
- OfflineAggregationInfo info) throws IOException {
- TimelineWriteResponse response = new TimelineWriteResponse();
- String sql = "UPSERT INTO " + info.getTableName()
- + " (" + StringUtils.join(info.getPrimaryKeyList(), ",")
- + ", created_time, metric_names) "
- + "VALUES ("
- + StringUtils.repeat("?,", info.getPrimaryKeyList().length)
- + "?, ?)";
- if (LOG.isDebugEnabled()) {
- LOG.debug("TimelineEntity write SQL: " + sql);
- }
-
- try (Connection conn = getConnection();
- PreparedStatement ps = conn.prepareStatement(sql)) {
- for (TimelineEntity entity : entities.getEntities()) {
- HashMap<String, TimelineMetric> formattedMetrics = new HashMap<>();
- if (entity.getMetrics() != null) {
- for (TimelineMetric m : entity.getMetrics()) {
- formattedMetrics.put(m.getId(), m);
- }
- }
- int idx = info.setStringsForPrimaryKey(ps, context, null, 1);
- ps.setLong(idx++, entity.getCreatedTime());
- ps.setString(idx++,
- StringUtils.join(formattedMetrics.keySet().toArray(),
- AGGREGATION_STORAGE_SEPARATOR));
- ps.execute();
-
- storeEntityVariableLengthFields(entity, formattedMetrics, context, conn,
- info);
-
- conn.commit();
- }
- } catch (SQLException se) {
- LOG.error("Failed to add entity to Phoenix " + se.getMessage());
- throw new IOException(se);
- } catch (Exception e) {
- LOG.error("Exception on getting connection: " + e.getMessage());
- throw new IOException(e);
- }
- return response;
- }
-
- /**
- * Create Phoenix tables for offline aggregation storage if the tables do not
- * exist.
- *
- * @throws IOException if any problem happens while creating Phoenix tables.
- */
- public void createPhoenixTables() throws IOException {
- // Create tables if necessary
- try (Connection conn = getConnection();
- Statement stmt = conn.createStatement()) {
- // Table schema defined as in YARN-3817.
- String sql = "CREATE TABLE IF NOT EXISTS "
- + OfflineAggregationInfo.FLOW_AGGREGATION_TABLE_NAME
- + "(user VARCHAR NOT NULL, cluster VARCHAR NOT NULL, "
- + "flow_name VARCHAR NOT NULL, "
- + "created_time UNSIGNED_LONG, "
- + METRIC_COLUMN_FAMILY + PHOENIX_COL_FAMILY_PLACE_HOLDER
- + " VARBINARY, "
- + "metric_names VARCHAR, info_keys VARCHAR "
- + "CONSTRAINT pk PRIMARY KEY("
- + "user, cluster, flow_name))";
- stmt.executeUpdate(sql);
- sql = "CREATE TABLE IF NOT EXISTS "
- + OfflineAggregationInfo.USER_AGGREGATION_TABLE_NAME
- + "(user VARCHAR NOT NULL, cluster VARCHAR NOT NULL, "
- + "created_time UNSIGNED_LONG, "
- + METRIC_COLUMN_FAMILY + PHOENIX_COL_FAMILY_PLACE_HOLDER
- + " VARBINARY, "
- + "metric_names VARCHAR, info_keys VARCHAR "
- + "CONSTRAINT pk PRIMARY KEY(user, cluster))";
- stmt.executeUpdate(sql);
- conn.commit();
- } catch (SQLException se) {
- LOG.error("Failed in init data " + se.getLocalizedMessage());
- throw new IOException(se);
- }
- return;
- }
-
- // Utility functions
- @Private
- @VisibleForTesting
- Connection getConnection() throws IOException {
- Connection conn;
- try {
- conn = DriverManager.getConnection(connString, connProperties);
- conn.setAutoCommit(false);
- } catch (SQLException se) {
- LOG.error("Failed to connect to phoenix server! "
- + se.getLocalizedMessage());
- throw new IOException(se);
- }
- return conn;
- }
-
- // WARNING: This method will permanently drop a table!
- @Private
- @VisibleForTesting
- void dropTable(String tableName) throws Exception {
- try (Connection conn = getConnection();
- Statement stmt = conn.createStatement()) {
- String sql = "DROP TABLE " + tableName;
- stmt.executeUpdate(sql);
- } catch (SQLException se) {
- LOG.error("Failed in dropping entity table " + se.getLocalizedMessage());
- throw se;
- }
- }
-
- private static class DynamicColumns<K> {
- static final String COLUMN_FAMILY_TYPE_BYTES = " VARBINARY";
- static final String COLUMN_FAMILY_TYPE_STRING = " VARCHAR";
- private String columnFamilyPrefix;
- private String type;
- private Set<K> columns;
-
- public DynamicColumns(String columnFamilyPrefix, String type,
- Set<K> keyValues) {
- this.columnFamilyPrefix = columnFamilyPrefix;
- this.columns = keyValues;
- this.type = type;
- }
- }
-
- private static <K> StringBuilder appendColumnsSQL(
- StringBuilder colNames, DynamicColumns<K> cfInfo) {
- // Prepare the sql template by iterating through all keys
- for (K key : cfInfo.columns) {
- colNames.append(",").append(cfInfo.columnFamilyPrefix)
- .append(key.toString()).append(cfInfo.type);
- }
- return colNames;
- }
-
- private static <K, V> int setValuesForColumnFamily(
- PreparedStatement ps, Map<K, V> keyValues, int startPos,
- boolean converToBytes) throws SQLException {
- int idx = startPos;
- for (Map.Entry<K, V> entry : keyValues.entrySet()) {
- V value = entry.getValue();
- if (value instanceof Collection) {
- ps.setString(idx++, StringUtils.join(
- (Collection) value, AGGREGATION_STORAGE_SEPARATOR));
- } else {
- if (converToBytes) {
- try {
- ps.setBytes(idx++, GenericObjectMapper.write(entry.getValue()));
- } catch (IOException ie) {
- LOG.error("Exception in converting values into bytes "
- + ie.getMessage());
- throw new SQLException(ie);
- }
- } else {
- ps.setString(idx++, value.toString());
- }
- }
- }
- return idx;
- }
-
- private static <K, V> int setBytesForColumnFamily(
- PreparedStatement ps, Map<K, V> keyValues, int startPos)
- throws SQLException {
- return setValuesForColumnFamily(ps, keyValues, startPos, true);
- }
-
- private static <K, V> int setStringsForColumnFamily(
- PreparedStatement ps, Map<K, V> keyValues, int startPos)
- throws SQLException {
- return setValuesForColumnFamily(ps, keyValues, startPos, false);
- }
-
- private static void storeEntityVariableLengthFields(TimelineEntity entity,
- Map<String, TimelineMetric> formattedMetrics,
- TimelineCollectorContext context, Connection conn,
- OfflineAggregationInfo aggregationInfo) throws SQLException {
- int numPlaceholders = 0;
- StringBuilder columnDefs = new StringBuilder(
- StringUtils.join(aggregationInfo.getPrimaryKeyList(), ","));
- if (formattedMetrics != null && formattedMetrics.size() > 0) {
- appendColumnsSQL(columnDefs, new DynamicColumns<>(
- METRIC_COLUMN_FAMILY, DynamicColumns.COLUMN_FAMILY_TYPE_BYTES,
- formattedMetrics.keySet()));
- numPlaceholders += formattedMetrics.keySet().size();
- }
- if (numPlaceholders == 0) {
- return;
- }
- StringBuilder placeholders = new StringBuilder();
- placeholders.append(
- StringUtils.repeat("?,", aggregationInfo.getPrimaryKeyList().length));
- // numPlaceholders >= 1 now
- placeholders.append("?")
- .append(StringUtils.repeat(",?", numPlaceholders - 1));
- String sqlVariableLengthFields = new StringBuilder("UPSERT INTO ")
- .append(aggregationInfo.getTableName()).append(" (").append(columnDefs)
- .append(") VALUES(").append(placeholders).append(")").toString();
- if (LOG.isDebugEnabled()) {
- LOG.debug("SQL statement for variable length fields: "
- + sqlVariableLengthFields);
- }
- // Use try with resource statement for the prepared statement
- try (PreparedStatement psVariableLengthFields =
- conn.prepareStatement(sqlVariableLengthFields)) {
- int idx = aggregationInfo.setStringsForPrimaryKey(
- psVariableLengthFields, context, null, 1);
- if (formattedMetrics != null && formattedMetrics.size() > 0) {
- idx = setBytesForColumnFamily(
- psVariableLengthFields, formattedMetrics, idx);
- }
- psVariableLengthFields.execute();
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b042bc1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java
index 33f5449..9369d6a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java
@@ -59,7 +59,6 @@ public final class TimelineSchemaCreator {
final static String NAME = TimelineSchemaCreator.class.getSimpleName();
private static final Log LOG = LogFactory.getLog(TimelineSchemaCreator.class);
- private static final String PHOENIX_OPTION_SHORT = "p";
private static final String SKIP_EXISTING_TABLE_OPTION_SHORT = "s";
private static final String APP_TABLE_NAME_SHORT = "a";
private static final String APP_TO_FLOW_TABLE_NAME_SHORT = "a2f";
@@ -116,22 +115,6 @@ public final class TimelineSchemaCreator {
exceptions.add(e);
}
- // Create Phoenix data schema if needed
- if (commandLine.hasOption(PHOENIX_OPTION_SHORT)) {
- Configuration phoenixConf = new Configuration();
- try {
- PhoenixOfflineAggregationWriterImpl phoenixWriter =
- new PhoenixOfflineAggregationWriterImpl();
- phoenixWriter.init(phoenixConf);
- phoenixWriter.start();
- phoenixWriter.createPhoenixTables();
- phoenixWriter.stop();
- LOG.info("Successfully created Phoenix offline aggregation schema. ");
- } catch (IOException e) {
- LOG.error("Error in creating phoenix tables: " + e.getMessage());
- exceptions.add(e);
- }
- }
if (exceptions.size() > 0) {
LOG.warn("Schema creation finished with the following exceptions");
for (Exception e : exceptions) {
@@ -181,11 +164,6 @@ public final class TimelineSchemaCreator {
// Options without an argument
// No need to set arg name since we do not need an argument here
- o = new Option(PHOENIX_OPTION_SHORT, "usePhoenix", false,
- "create Phoenix offline aggregation tables");
- o.setRequired(false);
- options.addOption(o);
-
o = new Option(SKIP_EXISTING_TABLE_OPTION_SHORT, "skipExistingTable",
false, "skip existing Hbase tables and continue to create new tables");
o.setRequired(false);
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[17/17] hadoop git commit: YARN-4990. Re-direction of a particular
log file within in a container in NM UI does not redirect properly.
Contributed by Xuan Gong.
Posted by xg...@apache.org.
YARN-4990. Re-direction of a particular log file within in a container in NM UI does not redirect properly. Contributed by Xuan Gong.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/736f54b7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/736f54b7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/736f54b7
Branch: refs/heads/YARN-5734
Commit: 736f54b727c3f0ecc8fb9a594f2281c240c89cb8
Parents: f6e80ac
Author: Junping Du <ju...@apache.org>
Authored: Wed Dec 21 14:14:42 2016 -0800
Committer: Junping Du <ju...@apache.org>
Committed: Wed Dec 21 14:14:42 2016 -0800
----------------------------------------------------------------------
.../nodemanager/webapp/NMWebAppFilter.java | 8 +
.../nodemanager/webapp/TestNMWebFilter.java | 176 +++++++++++++++++++
2 files changed, 184 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/736f54b7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMWebAppFilter.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMWebAppFilter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMWebAppFilter.java
index d2f5849..7b4c2a9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMWebAppFilter.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMWebAppFilter.java
@@ -79,6 +79,10 @@ public class NMWebAppFilter extends GuiceContainer{
String[] parts = uri.split("/");
String containerIdStr = parts[3];
String appOwner = parts[4];
+ String logType = null;
+ if (parts.length > 5) {
+ logType = parts[5];
+ }
if (containerIdStr != null && !containerIdStr.isEmpty()) {
ContainerId containerId = null;
try {
@@ -106,6 +110,10 @@ public class NMWebAppFilter extends GuiceContainer{
sb.append(containerIdStr);
sb.append("/");
sb.append(appOwner);
+ if (logType != null && !logType.isEmpty()) {
+ sb.append("/");
+ sb.append(logType);
+ }
redirectPath =
WebAppUtils.appendQueryParams(request, sb.toString());
} else {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/736f54b7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebFilter.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebFilter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebFilter.java
new file mode 100644
index 0000000..5dbe244
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebFilter.java
@@ -0,0 +1,176 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.nodemanager.webapp;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import com.google.inject.Injector;
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.io.StringWriter;
+import java.net.HttpURLConnection;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import javax.servlet.FilterChain;
+import javax.servlet.ServletException;
+import javax.servlet.ServletRequest;
+import javax.servlet.ServletResponse;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService;
+import org.apache.hadoop.yarn.server.nodemanager.NodeManager.NMContext;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application;
+import org.glassfish.grizzly.servlet.HttpServletResponseImpl;
+import org.junit.Test;
+
+/**
+ * Basic sanity Tests for NMWebFilter.
+ *
+ */
+public class TestNMWebFilter {
+
+ private static final String LOG_SERVER_URI = "log-server:1999/logs";
+ private static final String USER = "testUser";
+
+ @Test(timeout = 5000)
+ public void testRedirection() throws Exception {
+ ApplicationId appId = ApplicationId.newInstance(
+ System.currentTimeMillis(), 1);
+ ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(
+ appId, 1);
+ ContainerId containerId = ContainerId.newContainerId(attemptId, 1);
+
+ NMContext mockNMContext = mock(NMContext.class);
+ ConcurrentMap<ApplicationId, Application> applications
+ = new ConcurrentHashMap<>();
+ when(mockNMContext.getApplications()).thenReturn(applications);
+ LocalDirsHandlerService mockLocalDirsHandlerService = mock(
+ LocalDirsHandlerService.class);
+ Configuration conf = new Configuration();
+ conf.setBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, true);
+ conf.set(YarnConfiguration.YARN_LOG_SERVER_URL,
+ "http://" + LOG_SERVER_URI);
+ when(mockLocalDirsHandlerService.getConfig()).thenReturn(conf);
+ when(mockNMContext.getLocalDirsHandler()).thenReturn(
+ mockLocalDirsHandlerService);
+ NodeId nodeId = NodeId.newInstance("testNM", 9999);
+ when(mockNMContext.getNodeId()).thenReturn(nodeId);
+
+ Injector mockInjector = mock(Injector.class);
+ NMWebAppFilter testFilter = new NMWebAppFilter(
+ mockInjector, mockNMContext);
+
+ HttpServletResponseForTest response = new HttpServletResponseForTest();
+ // dummy filter
+ FilterChain chain = new FilterChain() {
+ @Override
+ public void doFilter(ServletRequest servletRequest,
+ ServletResponse servletResponse) throws IOException,
+ ServletException {
+ // Do Nothing
+ }
+ };
+
+ String uri = "testNM:8042/node/containerlogs/"
+ + containerId.toString() + "/" + USER;
+ HttpServletRequest request = mock(HttpServletRequest.class);
+ when(request.getRequestURI()).thenReturn(uri);
+ testFilter.doFilter(request, response, chain);
+ assertEquals(HttpServletResponse.SC_TEMPORARY_REDIRECT, response.status);
+ String redirect = response.getHeader("Location");
+ assertTrue(redirect.contains(LOG_SERVER_URI));
+ assertTrue(redirect.contains(nodeId.toString()));
+ assertTrue(redirect.contains(containerId.toString()));
+ assertTrue(redirect.contains(USER));
+
+ String logType = "syslog";
+ uri = "testNM:8042/node/containerlogs/" + containerId.toString()
+ + "/" + USER + "/" + logType + "/?start=10";
+ HttpServletRequest request2 = mock(HttpServletRequest.class);
+ when(request2.getRequestURI()).thenReturn(uri);
+ when(request2.getQueryString()).thenReturn("start=10");
+ testFilter.doFilter(request2, response, chain);
+ assertEquals(HttpServletResponse.SC_TEMPORARY_REDIRECT, response.status);
+ redirect = response.getHeader("Location");
+ assertTrue(redirect.contains(LOG_SERVER_URI));
+ assertTrue(redirect.contains(nodeId.toString()));
+ assertTrue(redirect.contains(containerId.toString()));
+ assertTrue(redirect.contains(USER));
+ assertTrue(redirect.contains(logType));
+ assertTrue(redirect.contains("start=10"));
+ }
+
+ private class HttpServletResponseForTest extends HttpServletResponseImpl {
+ String redirectLocation = "";
+ int status;
+ private String contentType;
+ private final Map<String, String> headers = new HashMap<>(1);
+ private StringWriter body;
+
+ public String getRedirect() {
+ return redirectLocation;
+ }
+
+ @Override
+ public void sendRedirect(String location) throws IOException {
+ redirectLocation = location;
+ }
+
+ @Override
+ public String encodeRedirectURL(String url) {
+ return url;
+ }
+
+ @Override
+ public void setStatus(int status) {
+ this.status = status;
+ }
+
+ @Override
+ public void setContentType(String type) {
+ this.contentType = type;
+ }
+
+ @Override
+ public void setHeader(String name, String value) {
+ headers.put(name, value);
+ }
+
+ public String getHeader(String name) {
+ return headers.get(name);
+ }
+
+ @Override
+ public PrintWriter getWriter() throws IOException {
+ body = new StringWriter();
+ return new PrintWriter(body);
+ }
+ }
+}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[10/17] hadoop git commit: HDFS-11247. Add a test to verify
NameNodeMXBean#getDecomNodes() and Live/Dead Decom Nodes shown in NameNode
WebUI. Contributed by Manoj Govindassamy.
Posted by xg...@apache.org.
HDFS-11247. Add a test to verify NameNodeMXBean#getDecomNodes() and Live/Dead Decom Nodes shown in NameNode WebUI. Contributed by Manoj Govindassamy.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4af66b1d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4af66b1d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4af66b1d
Branch: refs/heads/YARN-5734
Commit: 4af66b1d664b05590c39e34ae04f1f304c3cd227
Parents: 5b7acdd
Author: Xiao Chen <xi...@apache.org>
Authored: Tue Dec 20 12:42:13 2016 -0800
Committer: Xiao Chen <xi...@apache.org>
Committed: Tue Dec 20 12:42:13 2016 -0800
----------------------------------------------------------------------
.../server/namenode/TestNameNodeMXBean.java | 100 +++++++++++++++++++
1 file changed, 100 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4af66b1d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
index 6221137..4287a92 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
@@ -315,6 +315,106 @@ public class TestNameNodeMXBean {
}
}
+ @Test (timeout = 120000)
+ public void testDecommissioningNodes() throws Exception {
+ Configuration conf = new Configuration();
+ conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
+ conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 30);
+ MiniDFSCluster cluster = null;
+ HostsFileWriter hostsFileWriter = new HostsFileWriter();
+ hostsFileWriter.initialize(conf, "temp/TestNameNodeMXBean");
+
+ try {
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
+ cluster.waitActive();
+
+ FSNamesystem fsn = cluster.getNameNode().namesystem;
+ MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
+ ObjectName mxbeanName = new ObjectName(
+ "Hadoop:service=NameNode,name=NameNodeInfo");
+
+ List<String> hosts = new ArrayList<>();
+ for(DataNode dn : cluster.getDataNodes()) {
+ hosts.add(dn.getDisplayName());
+ }
+ hostsFileWriter.initIncludeHosts(hosts.toArray(
+ new String[hosts.size()]));
+ fsn.getBlockManager().getDatanodeManager().refreshNodes(conf);
+
+ // 1. Verify Live nodes
+ String liveNodesInfo = (String) (mbs.getAttribute(mxbeanName,
+ "LiveNodes"));
+ Map<String, Map<String, Object>> liveNodes =
+ (Map<String, Map<String, Object>>) JSON.parse(liveNodesInfo);
+ assertEquals(fsn.getLiveNodes(), liveNodesInfo);
+ assertEquals(fsn.getNumLiveDataNodes(), liveNodes.size());
+
+ for (Map<String, Object> liveNode : liveNodes.values()) {
+ assertTrue(liveNode.containsKey("lastContact"));
+ assertTrue(liveNode.containsKey("xferaddr"));
+ }
+
+ // Add the 1st DataNode to Decommission list
+ hostsFileWriter.initExcludeHost(
+ cluster.getDataNodes().get(0).getDisplayName());
+ fsn.getBlockManager().getDatanodeManager().refreshNodes(conf);
+
+ // Wait for the DecommissionManager to complete refresh nodes
+ GenericTestUtils.waitFor(new Supplier<Boolean>() {
+ @Override
+ public Boolean get() {
+ try {
+ String decomNodesInfo = (String) (mbs.getAttribute(mxbeanName,
+ "DecomNodes"));
+ Map<String, Map<String, Object>> decomNodes =
+ (Map<String, Map<String, Object>>) JSON.parse(decomNodesInfo);
+ if (decomNodes.size() > 0) {
+ return true;
+ }
+ } catch (Exception e) {
+ return false;
+ }
+ return false;
+ }
+ }, 1000, 60000);
+
+ // 2. Verify Decommission InProgress nodes
+ String decomNodesInfo = (String) (mbs.getAttribute(mxbeanName,
+ "DecomNodes"));
+ Map<String, Map<String, Object>> decomNodes =
+ (Map<String, Map<String, Object>>) JSON.parse(decomNodesInfo);
+ assertEquals(fsn.getDecomNodes(), decomNodesInfo);
+ assertEquals(fsn.getNumDecommissioningDataNodes(), decomNodes.size());
+ assertEquals(0, fsn.getNumDecomLiveDataNodes());
+ assertEquals(0, fsn.getNumDecomDeadDataNodes());
+
+ // Wait for the DecommissionManager to complete check
+ GenericTestUtils.waitFor(new Supplier<Boolean>() {
+ @Override
+ public Boolean get() {
+ if (fsn.getNumDecomLiveDataNodes() == 1) {
+ return true;
+ }
+ return false;
+ }
+ }, 1000, 60000);
+
+ // 3. Verify Decommissioned nodes
+ decomNodesInfo = (String) (mbs.getAttribute(mxbeanName, "DecomNodes"));
+ decomNodes =
+ (Map<String, Map<String, Object>>) JSON.parse(decomNodesInfo);
+ assertEquals(0, decomNodes.size());
+ assertEquals(fsn.getDecomNodes(), decomNodesInfo);
+ assertEquals(1, fsn.getNumDecomLiveDataNodes());
+ assertEquals(0, fsn.getNumDecomDeadDataNodes());
+ } finally {
+ if (cluster != null) {
+ cluster.shutdown();
+ }
+ hostsFileWriter.cleanup();
+ }
+ }
+
@Test(timeout=120000)
@SuppressWarnings("unchecked")
public void testTopUsers() throws Exception {
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[08/17] hadoop git commit: YARN-4844 (Addendum). Change
JobStatus(usedMem, reservedMem,
neededMem) back to int for compatibility. Contributed by Wangda Tan
Posted by xg...@apache.org.
YARN-4844 (Addendum). Change JobStatus(usedMem,reservedMem,neededMem)
back to int for compatibility. Contributed by Wangda Tan
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/523411d6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/523411d6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/523411d6
Branch: refs/heads/YARN-5734
Commit: 523411d69b37d85046bd8b23001c267daac7a108
Parents: 3bcfe3a
Author: Jian He <ji...@apache.org>
Authored: Tue Dec 20 11:28:19 2016 -0800
Committer: Jian He <ji...@apache.org>
Committed: Tue Dec 20 11:28:44 2016 -0800
----------------------------------------------------------------------
.../apache/hadoop/mapreduce/TypeConverter.java | 7 ++++---
.../org/apache/hadoop/mapreduce/JobStatus.java | 18 +++++++++---------
.../apache/hadoop/mapred/JobClientUnitTest.java | 6 +++---
3 files changed, 16 insertions(+), 15 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/523411d6/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java
index ca38bab..aea931a 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java
@@ -521,13 +521,14 @@ public class TypeConverter {
application.getApplicationResourceUsageReport();
if (resourceUsageReport != null) {
jobStatus.setNeededMem(
- resourceUsageReport.getNeededResources().getMemorySize());
+ (int)resourceUsageReport.getNeededResources().getMemorySize());
jobStatus.setNumReservedSlots(
resourceUsageReport.getNumReservedContainers());
jobStatus.setNumUsedSlots(resourceUsageReport.getNumUsedContainers());
jobStatus.setReservedMem(
- resourceUsageReport.getReservedResources().getMemorySize());
- jobStatus.setUsedMem(resourceUsageReport.getUsedResources().getMemorySize());
+ (int)resourceUsageReport.getReservedResources().getMemorySize());
+ jobStatus.setUsedMem(
+ (int) resourceUsageReport.getUsedResources().getMemorySize());
}
return jobStatus;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/523411d6/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobStatus.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobStatus.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobStatus.java
index 7438296..9ff75b9 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobStatus.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobStatus.java
@@ -95,9 +95,9 @@ public class JobStatus implements Writable, Cloneable {
private String trackingUrl ="";
private int numUsedSlots;
private int numReservedSlots;
- private long usedMem;
- private long reservedMem;
- private long neededMem;
+ private int usedMem;
+ private int reservedMem;
+ private int neededMem;
private boolean isUber;
/**
@@ -580,42 +580,42 @@ public class JobStatus implements Writable, Cloneable {
/**
* @return the used memory
*/
- public long getUsedMem() {
+ public int getUsedMem() {
return usedMem;
}
/**
* @param m the used memory
*/
- public void setUsedMem(long m) {
+ public void setUsedMem(int m) {
this.usedMem = m;
}
/**
* @return the reserved memory
*/
- public long getReservedMem() {
+ public int getReservedMem() {
return reservedMem;
}
/**
* @param r the reserved memory
*/
- public void setReservedMem(long r) {
+ public void setReservedMem(int r) {
this.reservedMem = r;
}
/**
* @return the needed memory
*/
- public long getNeededMem() {
+ public int getNeededMem() {
return neededMem;
}
/**
* @param n the needed memory
*/
- public void setNeededMem(long n) {
+ public void setNeededMem(int n) {
this.neededMem = n;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/523411d6/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/JobClientUnitTest.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/JobClientUnitTest.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/JobClientUnitTest.java
index e02232d..b5edf2d 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/JobClientUnitTest.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/JobClientUnitTest.java
@@ -170,9 +170,9 @@ public class JobClientUnitTest {
when(mockJobStatus.getPriority()).thenReturn(JobPriority.NORMAL);
when(mockJobStatus.getNumUsedSlots()).thenReturn(1);
when(mockJobStatus.getNumReservedSlots()).thenReturn(1);
- when(mockJobStatus.getUsedMem()).thenReturn(1024L);
- when(mockJobStatus.getReservedMem()).thenReturn(512L);
- when(mockJobStatus.getNeededMem()).thenReturn(2048L);
+ when(mockJobStatus.getUsedMem()).thenReturn(1024);
+ when(mockJobStatus.getReservedMem()).thenReturn(512);
+ when(mockJobStatus.getNeededMem()).thenReturn(2048);
when(mockJobStatus.getSchedulingInfo()).thenReturn("NA");
Job mockJob = mock(Job.class);
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[07/17] hadoop git commit: YARN-5706. Fail to launch SLSRunner due to
NPE. (Kai Sasaki via wangda)
Posted by xg...@apache.org.
YARN-5706. Fail to launch SLSRunner due to NPE. (Kai Sasaki via wangda)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3bcfe3a9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3bcfe3a9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3bcfe3a9
Branch: refs/heads/YARN-5734
Commit: 3bcfe3a9de2cde67092d0eb13f94d361feab6ec8
Parents: 1b401f6
Author: Wangda Tan <wa...@apache.org>
Authored: Tue Dec 20 11:03:29 2016 -0800
Committer: Wangda Tan <wa...@apache.org>
Committed: Tue Dec 20 11:03:29 2016 -0800
----------------------------------------------------------------------
hadoop-tools/hadoop-sls/src/main/bin/slsrun.sh | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3bcfe3a9/hadoop-tools/hadoop-sls/src/main/bin/slsrun.sh
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/main/bin/slsrun.sh b/hadoop-tools/hadoop-sls/src/main/bin/slsrun.sh
index 218dee4..19b5c34 100644
--- a/hadoop-tools/hadoop-sls/src/main/bin/slsrun.sh
+++ b/hadoop-tools/hadoop-sls/src/main/bin/slsrun.sh
@@ -71,8 +71,8 @@ function parse_args()
function calculate_classpath
{
hadoop_add_to_classpath_tools hadoop-sls
- hadoop_debug "Injecting ${HADOOP_TOOLS_DIR}/sls/html into CLASSPATH"
- hadoop_add_classpath "${HADOOP_TOOLS_DIR}/sls/html"
+ hadoop_debug "Injecting ${HADOOP_TOOLS_HOME}/${HADOOP_TOOLS_DIR}/sls/html into CLASSPATH"
+ hadoop_add_classpath "${HADOOP_TOOLS_HOME}/${HADOOP_TOOLS_DIR}/sls/html"
}
function run_simulation() {
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[03/17] hadoop git commit: HDFS-11262. Remove unused variables in
FSImage.java. Contributed by Jagadesh Kiran N.
Posted by xg...@apache.org.
HDFS-11262. Remove unused variables in FSImage.java. Contributed by Jagadesh Kiran N.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d2026ae9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d2026ae9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d2026ae9
Branch: refs/heads/YARN-5734
Commit: d2026ae9e6c86f4fd9e48b80e71233181c1853d9
Parents: 3583413
Author: Akira Ajisaka <aa...@apache.org>
Authored: Tue Dec 20 14:34:35 2016 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Tue Dec 20 14:34:35 2016 +0900
----------------------------------------------------------------------
.../main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java | 3 ---
1 file changed, 3 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d2026ae9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
index c40c626..f315d1e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
@@ -97,7 +97,6 @@ public class FSImage implements Closeable {
final private Configuration conf;
protected NNStorageRetentionManager archivalManager;
- private int quotaInitThreads;
/* Used to make sure there are no concurrent checkpoints for a given txid
* The checkpoint here could be one of the following operations.
@@ -666,7 +665,6 @@ public class FSImage implements Closeable {
LOG.info("No edit log streams selected.");
}
- Exception le = null;
FSImageFile imageFile = null;
for (int i = 0; i < imageFiles.size(); i++) {
try {
@@ -677,7 +675,6 @@ public class FSImage implements Closeable {
throw new IOException("Failed to load image from " + imageFile,
ie);
} catch (Exception e) {
- le = e;
LOG.error("Failed to load image from " + imageFile, e);
target.clear();
imageFile = null;
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[05/17] hadoop git commit: HDFS-10959. Adding per disk IO statistics
and metrics in DataNode. Contributed by Xiaoyu Yao.
Posted by xg...@apache.org.
HDFS-10959. Adding per disk IO statistics and metrics in DataNode. Contributed by Xiaoyu Yao.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fe4ff64a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fe4ff64a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fe4ff64a
Branch: refs/heads/YARN-5734
Commit: fe4ff64a4af4ab5f3ee59ba22ce7db2dec447f7d
Parents: 575773a
Author: Xiaoyu Yao <xy...@apache.org>
Authored: Mon Dec 19 22:35:51 2016 -0800
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Mon Dec 19 22:42:54 2016 -0800
----------------------------------------------------------------------
.../server/datanode/ProfilingFileIoEvents.java | 110 +++++++
.../fsdataset/DataNodeVolumeMetrics.java | 289 +++++++++++++++++++
.../server/datanode/fsdataset/FsVolumeSpi.java | 2 +
.../datanode/fsdataset/impl/FsVolumeImpl.java | 11 +
.../hdfs/server/datanode/DataNodeTestUtils.java | 39 +++
.../server/datanode/SimulatedFSDataset.java | 18 +-
.../TestDataNodeVolumeFailureReporting.java | 48 +--
.../datanode/TestDataNodeVolumeMetrics.java | 182 ++++++++++++
.../server/datanode/TestDirectoryScanner.java | 6 +
.../datanode/extdataset/ExternalVolumeImpl.java | 6 +
10 files changed, 669 insertions(+), 42 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fe4ff64a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ProfilingFileIoEvents.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ProfilingFileIoEvents.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ProfilingFileIoEvents.java
new file mode 100644
index 0000000..5835fe8
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ProfilingFileIoEvents.java
@@ -0,0 +1,110 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.server.datanode;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.DataNodeVolumeMetrics;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
+import org.apache.hadoop.util.Time;
+
+import javax.annotation.Nullable;
+
+/**
+ * {@link FileIoEvents} that profiles the performance of the metadata and data
+ * related operations on datanode volumes.
+ */
+@InterfaceAudience.Private
+class ProfilingFileIoEvents implements FileIoEvents {
+
+ @Override
+ public long beforeMetadataOp(@Nullable FsVolumeSpi volume,
+ FileIoProvider.OPERATION op) {
+ DataNodeVolumeMetrics metrics = getVolumeMetrics(volume);
+ if (metrics != null) {
+ return Time.monotonicNow();
+ }
+ return 0;
+ }
+
+ @Override
+ public void afterMetadataOp(@Nullable FsVolumeSpi volume,
+ FileIoProvider.OPERATION op, long begin) {
+ DataNodeVolumeMetrics metrics = getVolumeMetrics(volume);
+ if (metrics != null) {
+ metrics.addMetadastaOperationLatency(Time.monotonicNow() - begin);
+ }
+ }
+
+ @Override
+ public long beforeFileIo(@Nullable FsVolumeSpi volume,
+ FileIoProvider.OPERATION op, long len) {
+ DataNodeVolumeMetrics metrics = getVolumeMetrics(volume);
+ if (metrics != null) {
+ return Time.monotonicNow();
+ }
+ return 0;
+ }
+
+ @Override
+ public void afterFileIo(@Nullable FsVolumeSpi volume,
+ FileIoProvider.OPERATION op, long begin, long len) {
+ DataNodeVolumeMetrics metrics = getVolumeMetrics(volume);
+ if (metrics != null) {
+ long latency = Time.monotonicNow() - begin;
+ metrics.addDataFileIoLatency(latency);
+ switch (op) {
+ case SYNC:
+ metrics.addSyncIoLatency(latency);
+ break;
+ case FLUSH:
+ metrics.addFlushIoLatency(latency);
+ break;
+ case READ:
+ metrics.addReadIoLatency(latency);
+ break;
+ case WRITE:
+ metrics.addWriteIoLatency(latency);
+ break;
+ default:
+ }
+ }
+ }
+
+ @Override
+ public void onFailure(@Nullable FsVolumeSpi volume,
+ FileIoProvider.OPERATION op, Exception e, long begin) {
+ DataNodeVolumeMetrics metrics = getVolumeMetrics(volume);
+ if (metrics != null) {
+ metrics.addFileIoError(Time.monotonicNow() - begin);
+ }
+ }
+
+ @Nullable
+ @Override
+ public String getStatistics() {
+ return null;
+ }
+
+ private DataNodeVolumeMetrics getVolumeMetrics(final FsVolumeSpi volume) {
+ if (volume != null) {
+ return volume.getMetrics();
+ }
+ return null;
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fe4ff64a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/DataNodeVolumeMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/DataNodeVolumeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/DataNodeVolumeMetrics.java
new file mode 100644
index 0000000..e4d8707
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/DataNodeVolumeMetrics.java
@@ -0,0 +1,289 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.datanode.fsdataset;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.metrics2.MetricsSystem;
+import org.apache.hadoop.metrics2.annotation.Metric;
+import org.apache.hadoop.metrics2.annotation.Metrics;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.metrics2.lib.MetricsRegistry;
+import org.apache.hadoop.metrics2.lib.MutableCounterLong;
+import org.apache.hadoop.metrics2.lib.MutableQuantiles;
+import org.apache.hadoop.metrics2.lib.MutableRate;
+
+import java.util.concurrent.ThreadLocalRandom;
+
+/**
+ * This class is for maintaining Datanode Volume IO related statistics and
+ * publishing them through the metrics interfaces.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+@Metrics(name = "DataNodeVolume", about = "DataNode Volume metrics",
+ context = "dfs")
+public class DataNodeVolumeMetrics {
+ private final MetricsRegistry registry = new MetricsRegistry("FsVolume");
+
+ @Metric("number of metadata operations")
+ private MutableCounterLong totalMetadataOperations;
+ @Metric("metadata operation rate")
+ private MutableRate metadataOperationRate;
+ private MutableQuantiles[] metadataOperationLatencyQuantiles;
+
+ @Metric("number of data file io operations")
+ private MutableCounterLong totalDataFileIos;
+ @Metric("data file io operation rate")
+ private MutableRate dataFileIoRate;
+ private MutableQuantiles[] dataFileIoLatencyQuantiles;
+
+ @Metric("file io flush rate")
+ private MutableRate flushIoRate;
+ private MutableQuantiles[] flushIoLatencyQuantiles;
+
+ @Metric("file io sync rate")
+ private MutableRate syncIoRate;
+ private MutableQuantiles[] syncIoLatencyQuantiles;
+
+ @Metric("file io read rate")
+ private MutableRate readIoRate;
+ private MutableQuantiles[] readIoLatencyQuantiles;
+
+ @Metric("file io write rate")
+ private MutableRate writeIoRate;
+ private MutableQuantiles[] writeIoLatencyQuantiles;
+
+ @Metric("number of file io errors")
+ private MutableCounterLong totalFileIoErrors;
+ @Metric("file io error rate")
+ private MutableRate fileIoErrorRate;
+
+ public long getTotalMetadataOperations() {
+ return totalMetadataOperations.value();
+ }
+
+ // Based on metadataOperationRate
+ public long getMetadataOperationSampleCount() {
+ return metadataOperationRate.lastStat().numSamples();
+ }
+
+ public double getMetadataOperationMean() {
+ return metadataOperationRate.lastStat().mean();
+ }
+
+ public double getMetadataOperationStdDev() {
+ return metadataOperationRate.lastStat().stddev();
+ }
+
+ public long getTotalDataFileIos() {
+ return totalDataFileIos.value();
+ }
+
+ // Based on dataFileIoRate
+ public long getDataFileIoSampleCount() {
+ return dataFileIoRate.lastStat().numSamples();
+ }
+
+ public double getDataFileIoMean() {
+ return dataFileIoRate.lastStat().mean();
+ }
+
+ public double getDataFileIoStdDev() {
+ return dataFileIoRate.lastStat().stddev();
+ }
+
+ // Based on flushIoRate
+ public long getFlushIoSampleCount() {
+ return flushIoRate.lastStat().numSamples();
+ }
+
+ public double getFlushIoMean() {
+ return flushIoRate.lastStat().mean();
+ }
+
+ public double getFlushIoStdDev() {
+ return flushIoRate.lastStat().stddev();
+ }
+
+ // Based on syncIoRate
+ public long getSyncIoSampleCount() {
+ return syncIoRate.lastStat().numSamples();
+ }
+
+ public double getSyncIoMean() {
+ return syncIoRate.lastStat().mean();
+ }
+
+ public double getSyncIoStdDev() {
+ return syncIoRate.lastStat().stddev();
+ }
+
+ // Based on readIoRate
+ public long getReadIoSampleCount() {
+ return readIoRate.lastStat().numSamples();
+ }
+
+ public double getReadIoMean() {
+ return readIoRate.lastStat().mean();
+ }
+
+ public double getReadIoStdDev() {
+ return readIoRate.lastStat().stddev();
+ }
+
+ // Based on writeIoRate
+ public long getWriteIoSampleCount() {
+ return syncIoRate.lastStat().numSamples();
+ }
+
+ public double getWriteIoMean() {
+ return syncIoRate.lastStat().mean();
+ }
+
+ public double getWriteIoStdDev() {
+ return syncIoRate.lastStat().stddev();
+ }
+
+ public long getTotalFileIoErrors() {
+ return totalFileIoErrors.value();
+ }
+
+ // Based on fileIoErrorRate
+ public long getFileIoErrorSampleCount() {
+ return fileIoErrorRate.lastStat().numSamples();
+ }
+
+ public double getFileIoErrorMean() {
+ return fileIoErrorRate.lastStat().mean();
+ }
+
+ public double getFileIoErrorStdDev() {
+ return fileIoErrorRate.lastStat().stddev();
+ }
+
+ private final String name;
+ private final MetricsSystem ms;
+
+ public DataNodeVolumeMetrics(final MetricsSystem metricsSystem,
+ final String volumeName, final int[] intervals) {
+ this.ms = metricsSystem;
+ this.name = volumeName;
+ final int len = intervals.length;
+ metadataOperationLatencyQuantiles = new MutableQuantiles[len];
+ dataFileIoLatencyQuantiles = new MutableQuantiles[len];
+ flushIoLatencyQuantiles = new MutableQuantiles[len];
+ syncIoLatencyQuantiles = new MutableQuantiles[len];
+ readIoLatencyQuantiles = new MutableQuantiles[len];
+ writeIoLatencyQuantiles = new MutableQuantiles[len];
+ for (int i = 0; i < len; i++) {
+ int interval = intervals[i];
+ metadataOperationLatencyQuantiles[i] = registry.newQuantiles(
+ "metadataOperationLatency" + interval + "s",
+ "Meatadata Operation Latency in ms", "ops", "latency", interval);
+ dataFileIoLatencyQuantiles[i] = registry.newQuantiles(
+ "dataFileIoLatency" + interval + "s",
+ "Data File Io Latency in ms", "ops", "latency", interval);
+ flushIoLatencyQuantiles[i] = registry.newQuantiles(
+ "flushIoLatency" + interval + "s",
+ "Data flush Io Latency in ms", "ops", "latency", interval);
+ syncIoLatencyQuantiles[i] = registry.newQuantiles(
+ "syncIoLatency" + interval + "s",
+ "Data sync Io Latency in ms", "ops", "latency", interval);
+ readIoLatencyQuantiles[i] = registry.newQuantiles(
+ "readIoLatency" + interval + "s",
+ "Data read Io Latency in ms", "ops", "latency", interval);
+ writeIoLatencyQuantiles[i] = registry.newQuantiles(
+ "writeIoLatency" + interval + "s",
+ "Data write Io Latency in ms", "ops", "latency", interval);
+ }
+ }
+
+ public static DataNodeVolumeMetrics create(final Configuration conf,
+ final String volumeName) {
+ MetricsSystem ms = DefaultMetricsSystem.instance();
+ String name = "DataNodeVolume-"+ (volumeName.isEmpty()
+ ? "UndefinedDataNodeVolume"+ ThreadLocalRandom.current().nextInt()
+ : volumeName.replace(':', '-'));
+
+ // Percentile measurement is off by default, by watching no intervals
+ int[] intervals =
+ conf.getInts(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY);
+ return ms.register(name, null, new DataNodeVolumeMetrics(ms, name,
+ intervals));
+ }
+
+ public String name() {
+ return name;
+ }
+
+ public void unRegister() {
+ ms.unregisterSource(name);
+ }
+
+ public void addMetadastaOperationLatency(final long latency) {
+ totalMetadataOperations.incr();
+ metadataOperationRate.add(latency);
+ for (MutableQuantiles q : metadataOperationLatencyQuantiles) {
+ q.add(latency);
+ }
+ }
+
+ public void addDataFileIoLatency(final long latency) {
+ totalDataFileIos.incr();
+ dataFileIoRate.add(latency);
+ for (MutableQuantiles q : dataFileIoLatencyQuantiles) {
+ q.add(latency);
+ }
+ }
+
+ public void addSyncIoLatency(final long latency) {
+ syncIoRate.add(latency);
+ for (MutableQuantiles q : syncIoLatencyQuantiles) {
+ q.add(latency);
+ }
+ }
+
+ public void addFlushIoLatency(final long latency) {
+ flushIoRate.add(latency);
+ for (MutableQuantiles q : flushIoLatencyQuantiles) {
+ q.add(latency);
+ }
+ }
+
+ public void addReadIoLatency(final long latency) {
+ readIoRate.add(latency);
+ for (MutableQuantiles q : readIoLatencyQuantiles) {
+ q.add(latency);
+ }
+ }
+
+ public void addWriteIoLatency(final long latency) {
+ writeIoRate.add(latency);
+ for (MutableQuantiles q: writeIoLatencyQuantiles) {
+ q.add(latency);
+ }
+ }
+
+ public void addFileIoError(final long latency) {
+ totalFileIoErrors.incr();
+ metadataOperationRate.add(latency);
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fe4ff64a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsVolumeSpi.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsVolumeSpi.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsVolumeSpi.java
index 8aa2fd9..adec209 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsVolumeSpi.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsVolumeSpi.java
@@ -432,4 +432,6 @@ public interface FsVolumeSpi
}
FileIoProvider getFileIoProvider();
+
+ DataNodeVolumeMetrics getMetrics();
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fe4ff64a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
index e1bc886..e28ee27 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
@@ -48,6 +48,7 @@ import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.server.datanode.FileIoProvider;
import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader;
import org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.DataNodeVolumeMetrics;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.util.AutoCloseableLock;
import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -130,6 +131,7 @@ public class FsVolumeImpl implements FsVolumeSpi {
// query from the filesystem.
protected volatile long configuredCapacity;
private final FileIoProvider fileIoProvider;
+ private final DataNodeVolumeMetrics metrics;
/**
* Per-volume worker pool that processes new blocks to cache.
@@ -163,6 +165,7 @@ public class FsVolumeImpl implements FsVolumeSpi {
this.conf = conf;
this.fileIoProvider = fileIoProvider;
cacheExecutor = initializeCacheExecutor(parent);
+ this.metrics = DataNodeVolumeMetrics.create(conf, getBaseURI().getPath());
}
protected ThreadPoolExecutor initializeCacheExecutor(File parent) {
@@ -1008,6 +1011,9 @@ public class FsVolumeImpl implements FsVolumeSpi {
for (Entry<String, BlockPoolSlice> entry : set) {
entry.getValue().shutdown(null);
}
+ if (metrics != null) {
+ metrics.unRegister();
+ }
}
void addBlockPool(String bpid, Configuration c) throws IOException {
@@ -1305,6 +1311,11 @@ public class FsVolumeImpl implements FsVolumeSpi {
return fileIoProvider;
}
+ @Override
+ public DataNodeVolumeMetrics getMetrics() {
+ return metrics;
+ }
+
private LinkedList<ScanInfo> compileReport(File bpFinalizedDir,
File dir, LinkedList<ScanInfo> report, ReportCompiler reportCompiler)
throws InterruptedException {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fe4ff64a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java
index 3501ed3..5a1ad87 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java
@@ -22,7 +22,11 @@ package org.apache.hadoop.hdfs.server.datanode;
import java.io.File;
import java.io.IOException;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.ReconfigurationException;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -34,6 +38,9 @@ import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
import org.mockito.Mockito;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
+
+import static org.hamcrest.core.Is.is;
+import static org.junit.Assert.assertThat;
import static org.mockito.Matchers.any;
import static org.mockito.Mockito.doAnswer;
@@ -43,6 +50,8 @@ import static org.mockito.Mockito.doAnswer;
* dependencies to {@link MiniDFSCluster}.
*/
public class DataNodeTestUtils {
+ private static final Log LOG =
+ LogFactory.getLog(DataNodeTestUtils.class);
private static final String DIR_FAILURE_SUFFIX = ".origin";
public final static String TEST_CLUSTER_ID = "testClusterID";
@@ -203,4 +212,34 @@ public class DataNodeTestUtils {
}
}).when(dn.data).getPinning(any(ExtendedBlock.class));
}
+
+ /**
+ * Reconfigure a DataNode by setting a new list of volumes.
+ *
+ * @param dn DataNode to reconfigure
+ * @param newVols new volumes to configure
+ * @throws Exception if there is any failure
+ */
+ public static void reconfigureDataNode(DataNode dn, File... newVols)
+ throws Exception {
+ StringBuilder dnNewDataDirs = new StringBuilder();
+ for (File newVol: newVols) {
+ if (dnNewDataDirs.length() > 0) {
+ dnNewDataDirs.append(',');
+ }
+ dnNewDataDirs.append(newVol.getAbsolutePath());
+ }
+ try {
+ assertThat(
+ dn.reconfigurePropertyImpl(
+ DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
+ dnNewDataDirs.toString()),
+ is(dn.getConf().get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY)));
+ } catch (ReconfigurationException e) {
+ // This can be thrown if reconfiguration tries to use a failed volume.
+ // We need to swallow the exception, because some of our tests want to
+ // cover this case.
+ LOG.warn("Could not reconfigure DataNode.", e);
+ }
+ }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fe4ff64a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
index d3efe48..484fbe4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
@@ -50,6 +50,7 @@ import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.datanode.DirectoryScanner.ReportCompiler;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.DataNodeVolumeMetrics;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
@@ -476,11 +477,14 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
static class SimulatedVolume implements FsVolumeSpi {
private final SimulatedStorage storage;
private final FileIoProvider fileIoProvider;
+ private final DataNodeVolumeMetrics metrics;
SimulatedVolume(final SimulatedStorage storage,
- final FileIoProvider fileIoProvider) {
+ final FileIoProvider fileIoProvider,
+ final DataNodeVolumeMetrics metrics) {
this.storage = storage;
this.fileIoProvider = fileIoProvider;
+ this.metrics = metrics;
}
@Override
@@ -575,6 +579,11 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
}
@Override
+ public DataNodeVolumeMetrics getMetrics() {
+ return metrics;
+ }
+
+ @Override
public VolumeCheckResult check(VolumeCheckContext context)
throws Exception {
return VolumeCheckResult.HEALTHY;
@@ -609,7 +618,12 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
this.storage = new SimulatedStorage(
conf.getLong(CONFIG_PROPERTY_CAPACITY, DEFAULT_CAPACITY),
conf.getEnum(CONFIG_PROPERTY_STATE, DEFAULT_STATE));
- this.volume = new SimulatedVolume(this.storage, this.fileIoProvider);
+
+ // TODO: per volume id or path
+ DataNodeVolumeMetrics volumeMetrics = DataNodeVolumeMetrics.create(conf,
+ datanodeUuid);
+ this.volume = new SimulatedVolume(this.storage, this.fileIoProvider,
+ volumeMetrics);
this.datasetLock = new AutoCloseableLock();
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fe4ff64a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java
index b45dabf..aa9b7aa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java
@@ -20,12 +20,10 @@ package org.apache.hadoop.hdfs.server.datanode;
import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows;
-import static org.hamcrest.core.Is.is;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue;
import java.io.File;
@@ -36,7 +34,6 @@ import java.util.ArrayList;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.ReconfigurationException;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -61,7 +58,8 @@ import org.junit.Test;
*/
public class TestDataNodeVolumeFailureReporting {
- private static final Log LOG = LogFactory.getLog(TestDataNodeVolumeFailureReporting.class);
+ private static final Log LOG =
+ LogFactory.getLog(TestDataNodeVolumeFailureReporting.class);
{
GenericTestUtils.setLogLevel(TestDataNodeVolumeFailureReporting.LOG,
Level.ALL);
@@ -389,8 +387,8 @@ public class TestDataNodeVolumeFailureReporting {
checkFailuresAtNameNode(dm, dns.get(1), true, dn2Vol1.getAbsolutePath());
// Reconfigure again to try to add back the failed volumes.
- reconfigureDataNode(dns.get(0), dn1Vol1, dn1Vol2);
- reconfigureDataNode(dns.get(1), dn2Vol1, dn2Vol2);
+ DataNodeTestUtils.reconfigureDataNode(dns.get(0), dn1Vol1, dn1Vol2);
+ DataNodeTestUtils.reconfigureDataNode(dns.get(1), dn2Vol1, dn2Vol2);
DataNodeTestUtils.triggerHeartbeat(dns.get(0));
DataNodeTestUtils.triggerHeartbeat(dns.get(1));
@@ -410,8 +408,8 @@ public class TestDataNodeVolumeFailureReporting {
// Reconfigure a third time with the failed volumes. Afterwards, we expect
// the same volume failures to be reported. (No double-counting.)
- reconfigureDataNode(dns.get(0), dn1Vol1, dn1Vol2);
- reconfigureDataNode(dns.get(1), dn2Vol1, dn2Vol2);
+ DataNodeTestUtils.reconfigureDataNode(dns.get(0), dn1Vol1, dn1Vol2);
+ DataNodeTestUtils.reconfigureDataNode(dns.get(1), dn2Vol1, dn2Vol2);
DataNodeTestUtils.triggerHeartbeat(dns.get(0));
DataNodeTestUtils.triggerHeartbeat(dns.get(1));
@@ -432,8 +430,8 @@ public class TestDataNodeVolumeFailureReporting {
// Replace failed volume with healthy volume and run reconfigure DataNode.
// The failed volume information should be cleared.
DataNodeTestUtils.restoreDataDirFromFailure(dn1Vol1, dn2Vol1);
- reconfigureDataNode(dns.get(0), dn1Vol1, dn1Vol2);
- reconfigureDataNode(dns.get(1), dn2Vol1, dn2Vol2);
+ DataNodeTestUtils.reconfigureDataNode(dns.get(0), dn1Vol1, dn1Vol2);
+ DataNodeTestUtils.reconfigureDataNode(dns.get(1), dn2Vol1, dn2Vol2);
DataNodeTestUtils.triggerHeartbeat(dns.get(0));
DataNodeTestUtils.triggerHeartbeat(dns.get(1));
@@ -670,34 +668,4 @@ public class TestDataNodeVolumeFailureReporting {
cluster.getNamesystem().getBlockManager().getDatanodeManager(), 0);
volumeCapacity = dnCapacity / cluster.getStoragesPerDatanode();
}
-
- /**
- * Reconfigure a DataNode by setting a new list of volumes.
- *
- * @param dn DataNode to reconfigure
- * @param newVols new volumes to configure
- * @throws Exception if there is any failure
- */
- private static void reconfigureDataNode(DataNode dn, File... newVols)
- throws Exception {
- StringBuilder dnNewDataDirs = new StringBuilder();
- for (File newVol: newVols) {
- if (dnNewDataDirs.length() > 0) {
- dnNewDataDirs.append(',');
- }
- dnNewDataDirs.append(newVol.getAbsolutePath());
- }
- try {
- assertThat(
- dn.reconfigurePropertyImpl(
- DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
- dnNewDataDirs.toString()),
- is(dn.getConf().get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY)));
- } catch (ReconfigurationException e) {
- // This can be thrown if reconfiguration tries to use a failed volume.
- // We need to swallow the exception, because some of our tests want to
- // cover this case.
- LOG.warn("Could not reconfigure DataNode.", e);
- }
- }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fe4ff64a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeMetrics.java
new file mode 100644
index 0000000..407c3e9
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeMetrics.java
@@ -0,0 +1,182 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.datanode;
+
+import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
+import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
+import static org.junit.Assert.assertTrue;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSOutputStream;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.DataNodeVolumeMetrics;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.Timeout;
+
+/**
+ * Test class for DataNodeVolumeMetrics.
+ */
+public class TestDataNodeVolumeMetrics {
+ private static final Log LOG =
+ LogFactory.getLog(TestDataNodeVolumeMetrics.class);
+
+ private static final int BLOCK_SIZE = 1024;
+ private static final short REPL = 1;
+ private static final int NUM_DATANODES = 1;
+
+ @Rule
+ public Timeout timeout = new Timeout(300000);
+
+ @Test
+ public void testVolumeMetrics() throws Exception {
+ MiniDFSCluster cluster = setupClusterForVolumeMetrics();
+ try {
+ FileSystem fs = cluster.getFileSystem();
+ final Path fileName = new Path("/test.dat");
+ final long fileLen = Integer.MAX_VALUE + 1L;
+ DFSTestUtil.createFile(fs, fileName, false, BLOCK_SIZE, fileLen,
+ fs.getDefaultBlockSize(fileName),
+ REPL, 1L, true);
+
+ try (FSDataOutputStream out = fs.append(fileName)) {
+ out.writeBytes("hello world");
+ ((DFSOutputStream) out.getWrappedStream()).hsync();
+ }
+
+ verifyDataNodeVolumeMetrics(fs, cluster, fileName);
+ } finally {
+ if (cluster != null) {
+ cluster.shutdown();
+ }
+ }
+ }
+
+ @Test
+ public void testVolumeMetricsWithVolumeDepartureArrival() throws Exception {
+ MiniDFSCluster cluster = setupClusterForVolumeMetrics();
+ try {
+ FileSystem fs = cluster.getFileSystem();
+ final Path fileName = new Path("/test.dat");
+ final long fileLen = Integer.MAX_VALUE + 1L;
+ DFSTestUtil.createFile(fs, fileName, false, BLOCK_SIZE, fileLen,
+ fs.getDefaultBlockSize(fileName),
+ REPL, 1L, true);
+
+ try (FSDataOutputStream out = fs.append(fileName)) {
+ out.writeBytes("hello world");
+ ((DFSOutputStream) out.getWrappedStream()).hsync();
+ }
+
+ ArrayList<DataNode> dns = cluster.getDataNodes();
+ assertTrue("DN1 should be up", dns.get(0).isDatanodeUp());
+
+ final String dataDir = cluster.getDataDirectory();
+ final File dn1Vol2 = new File(dataDir, "data2");
+
+ DataNodeTestUtils.injectDataDirFailure(dn1Vol2);
+ verifyDataNodeVolumeMetrics(fs, cluster, fileName);
+
+ DataNodeTestUtils.restoreDataDirFromFailure(dn1Vol2);
+ DataNodeTestUtils.reconfigureDataNode(dns.get(0), dn1Vol2);
+ verifyDataNodeVolumeMetrics(fs, cluster, fileName);
+ } finally {
+ if (cluster != null) {
+ cluster.shutdown();
+ }
+ }
+ }
+
+ private MiniDFSCluster setupClusterForVolumeMetrics() throws IOException {
+ Configuration conf = new HdfsConfiguration();
+ conf.set(DFSConfigKeys.DFS_DATANODE_FILE_IO_EVENTS_CLASS_KEY,
+ "org.apache.hadoop.hdfs.server.datanode.ProfilingFileIoEvents");
+ SimulatedFSDataset.setFactory(conf);
+ return new MiniDFSCluster.Builder(conf)
+ .numDataNodes(NUM_DATANODES)
+ .storageTypes(new StorageType[]{StorageType.RAM_DISK, StorageType.DISK})
+ .storagesPerDatanode(2)
+ .build();
+ }
+
+ private void verifyDataNodeVolumeMetrics(final FileSystem fs,
+ final MiniDFSCluster cluster, final Path fileName) throws IOException {
+ List<DataNode> datanodes = cluster.getDataNodes();
+ DataNode datanode = datanodes.get(0);
+
+ final ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName);
+ final FsVolumeSpi volume = datanode.getFSDataset().getVolume(block);
+ DataNodeVolumeMetrics metrics = volume.getMetrics();
+
+ MetricsRecordBuilder rb = getMetrics(volume.getMetrics().name());
+ assertCounter("TotalDataFileIos", metrics.getTotalDataFileIos(), rb);
+
+ LOG.info("TotalMetadataOperations : " +
+ metrics.getTotalMetadataOperations());
+ LOG.info("TotalDataFileIos : " + metrics.getTotalDataFileIos());
+ LOG.info("TotalFileIoErrors : " + metrics.getTotalFileIoErrors());
+
+ LOG.info("MetadataOperationSampleCount : " +
+ metrics.getMetadataOperationSampleCount());
+ LOG.info("MetadataOperationMean : " + metrics.getMetadataOperationMean());
+ LOG.info("MetadataFileIoStdDev : " +
+ metrics.getMetadataOperationStdDev());
+
+ LOG.info("DataFileIoSampleCount : " + metrics.getDataFileIoSampleCount());
+ LOG.info("DataFileIoMean : " + metrics.getDataFileIoMean());
+ LOG.info("DataFileIoStdDev : " + metrics.getDataFileIoStdDev());
+
+ LOG.info("flushIoSampleCount : " + metrics.getFlushIoSampleCount());
+ LOG.info("flushIoMean : " + metrics.getFlushIoMean());
+ LOG.info("flushIoStdDev : " + metrics.getFlushIoStdDev());
+
+ LOG.info("syncIoSampleCount : " + metrics.getSyncIoSampleCount());
+ LOG.info("syncIoMean : " + metrics.getSyncIoMean());
+ LOG.info("syncIoStdDev : " + metrics.getSyncIoStdDev());
+
+ LOG.info("readIoSampleCount : " + metrics.getReadIoMean());
+ LOG.info("readIoMean : " + metrics.getReadIoMean());
+ LOG.info("readIoStdDev : " + metrics.getReadIoStdDev());
+
+ LOG.info("writeIoSampleCount : " + metrics.getWriteIoSampleCount());
+ LOG.info("writeIoMean : " + metrics.getWriteIoMean());
+ LOG.info("writeIoStdDev : " + metrics.getWriteIoStdDev());
+
+ LOG.info("fileIoErrorSampleCount : "
+ + metrics.getFileIoErrorSampleCount());
+ LOG.info("fileIoErrorMean : " + metrics.getFileIoErrorMean());
+ LOG.info("fileIoErrorStdDev : " + metrics.getFileIoErrorStdDev());
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fe4ff64a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
index 9b0aa82..956406d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
@@ -55,6 +55,7 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.DataNodeVolumeMetrics;
import org.apache.hadoop.util.AutoCloseableLock;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
@@ -916,6 +917,11 @@ public class TestDirectoryScanner {
return null;
}
+ @Override
+ public DataNodeVolumeMetrics getMetrics() {
+ return null;
+ }
+
@Override
public VolumeCheckResult check(VolumeCheckContext context)
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fe4ff64a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalVolumeImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalVolumeImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalVolumeImpl.java
index 2d33e20..ba32834 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalVolumeImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalVolumeImpl.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.hdfs.server.datanode.FileIoProvider;
import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
import org.apache.hadoop.hdfs.server.datanode.DirectoryScanner.ReportCompiler;
import org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.DataNodeVolumeMetrics;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
@@ -128,6 +129,11 @@ public class ExternalVolumeImpl implements FsVolumeSpi {
}
@Override
+ public DataNodeVolumeMetrics getMetrics() {
+ return null;
+ }
+
+ @Override
public VolumeCheckResult check(VolumeCheckContext context)
throws Exception {
return VolumeCheckResult.HEALTHY;
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[09/17] hadoop git commit: HDFS-11195. Return error when appending
files by webhdfs rest api fails. Contributed by Yuanbo Liu.
Posted by xg...@apache.org.
HDFS-11195. Return error when appending files by webhdfs rest api fails. Contributed by Yuanbo Liu.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5b7acdd2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5b7acdd2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5b7acdd2
Branch: refs/heads/YARN-5734
Commit: 5b7acdd206f5a7d1b7af29b68adaa7587d7d8c43
Parents: 523411d
Author: Xiao Chen <xi...@apache.org>
Authored: Tue Dec 20 12:24:00 2016 -0800
Committer: Xiao Chen <xi...@apache.org>
Committed: Tue Dec 20 12:24:00 2016 -0800
----------------------------------------------------------------------
.../server/datanode/web/webhdfs/HdfsWriter.java | 19 ++++++++--
.../org/apache/hadoop/hdfs/MiniDFSCluster.java | 15 ++++++--
.../org/apache/hadoop/hdfs/web/TestWebHDFS.java | 39 ++++++++++++++++++++
3 files changed, 65 insertions(+), 8 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b7acdd2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/HdfsWriter.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/HdfsWriter.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/HdfsWriter.java
index 99924e5..8de4bb2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/HdfsWriter.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/HdfsWriter.java
@@ -55,9 +55,13 @@ class HdfsWriter extends SimpleChannelInboundHandler<HttpContent> {
throws IOException {
chunk.content().readBytes(out, chunk.content().readableBytes());
if (chunk instanceof LastHttpContent) {
- response.headers().set(CONNECTION, CLOSE);
- ctx.write(response).addListener(ChannelFutureListener.CLOSE);
- releaseDfsResources();
+ try {
+ releaseDfsResourcesAndThrow();
+ response.headers().set(CONNECTION, CLOSE);
+ ctx.write(response).addListener(ChannelFutureListener.CLOSE);
+ } catch (Exception cause) {
+ exceptionCaught(ctx, cause);
+ }
}
}
@@ -71,7 +75,10 @@ class HdfsWriter extends SimpleChannelInboundHandler<HttpContent> {
releaseDfsResources();
DefaultHttpResponse resp = ExceptionHandler.exceptionCaught(cause);
resp.headers().set(CONNECTION, CLOSE);
- ctx.writeAndFlush(response).addListener(ChannelFutureListener.CLOSE);
+ ctx.writeAndFlush(resp).addListener(ChannelFutureListener.CLOSE);
+ if (LOG != null && LOG.isDebugEnabled()) {
+ LOG.debug("Exception in channel handler ", cause);
+ }
}
private void releaseDfsResources() {
@@ -79,4 +86,8 @@ class HdfsWriter extends SimpleChannelInboundHandler<HttpContent> {
IOUtils.cleanup(LOG, client);
}
+ private void releaseDfsResourcesAndThrow() throws Exception {
+ out.close();
+ client.close();
+ }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b7acdd2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
index cf02a8d..51dca41 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
@@ -1980,14 +1980,21 @@ public class MiniDFSCluster implements AutoCloseable {
*/
public void shutdownDataNodes() {
for (int i = dataNodes.size()-1; i >= 0; i--) {
- LOG.info("Shutting down DataNode " + i);
- DataNode dn = dataNodes.remove(i).datanode;
- dn.shutdown();
- numDataNodes--;
+ shutdownDataNode(i);
}
}
/**
+ * Shutdown the datanode at a given index.
+ */
+ public void shutdownDataNode(int dnIndex) {
+ LOG.info("Shutting down DataNode " + dnIndex);
+ DataNode dn = dataNodes.remove(dnIndex).datanode;
+ dn.shutdown();
+ numDataNodes--;
+ }
+
+ /**
* Shutdown all the namenodes.
*/
public synchronized void shutdownNameNodes() {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b7acdd2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
index 259353c..d4495dc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
@@ -1181,4 +1181,43 @@ public class TestWebHDFS {
cluster.shutdown();
}
}
+
+ @Test
+ public void testWebHdfsAppend() throws Exception {
+ MiniDFSCluster cluster = null;
+ final Configuration conf = WebHdfsTestUtil.createConf();
+ final int dnNumber = 3;
+ try {
+
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(dnNumber).build();
+
+ final WebHdfsFileSystem webFS = WebHdfsTestUtil.getWebHdfsFileSystem(
+ conf, WebHdfsConstants.WEBHDFS_SCHEME);
+
+ final DistributedFileSystem fs = cluster.getFileSystem();
+
+ final Path appendFile = new Path("/testAppend.txt");
+ final String content = "hello world";
+ DFSTestUtil.writeFile(fs, appendFile, content);
+
+ for (int index = 0; index < dnNumber - 1; index++){
+ cluster.shutdownDataNode(index);
+ }
+ cluster.restartNameNodes();
+ cluster.waitActive();
+
+ try {
+ DFSTestUtil.appendFile(webFS, appendFile, content);
+ fail("Should fail to append file since "
+ + "datanode number is 1 and replication is 3");
+ } catch (IOException ignored) {
+ String resultContent = DFSTestUtil.readFile(fs, appendFile);
+ assertTrue(resultContent.equals(content));
+ }
+ } finally {
+ if (cluster != null) {
+ cluster.shutdown(true);
+ }
+ }
+ }
}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[04/17] hadoop git commit: YARN-5877. Allow all env's from
yarn.nodemanager.env-whitelist to get overridden during launch. Contributed
by Bibin A Chundatt.
Posted by xg...@apache.org.
YARN-5877. Allow all env's from yarn.nodemanager.env-whitelist to get overridden during launch. Contributed by Bibin A Chundatt.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/575773a3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/575773a3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/575773a3
Branch: refs/heads/YARN-5734
Commit: 575773a3570b85293fdf7b8aeb8467580ec7f896
Parents: d2026ae
Author: Sunil G <su...@apache.org>
Authored: Tue Dec 20 11:39:06 2016 +0530
Committer: Sunil G <su...@apache.org>
Committed: Tue Dec 20 11:39:06 2016 +0530
----------------------------------------------------------------------
.../server/nodemanager/ContainerExecutor.java | 11 ++--
.../launcher/TestContainerLaunch.java | 63 ++++++++++++++++----
2 files changed, 58 insertions(+), 16 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/575773a3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
index f880506..158585e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
@@ -44,7 +44,6 @@ import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.yarn.api.ApplicationConstants;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
@@ -320,11 +319,11 @@ public abstract class ContainerExecutor implements Configurable {
ContainerLaunch.ShellScriptBuilder.create();
Set<String> whitelist = new HashSet<>();
- whitelist.add(ApplicationConstants.Environment.HADOOP_YARN_HOME.name());
- whitelist.add(ApplicationConstants.Environment.HADOOP_COMMON_HOME.name());
- whitelist.add(ApplicationConstants.Environment.HADOOP_HDFS_HOME.name());
- whitelist.add(ApplicationConstants.Environment.HADOOP_CONF_DIR.name());
- whitelist.add(ApplicationConstants.Environment.JAVA_HOME.name());
+ String[] nmWhiteList = conf.get(YarnConfiguration.NM_ENV_WHITELIST,
+ YarnConfiguration.DEFAULT_NM_ENV_WHITELIST).split(",");
+ for (String param : nmWhiteList) {
+ whitelist.add(param);
+ }
if (environment != null) {
for (Map.Entry<String, String> env : environment.entrySet()) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/575773a3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
index 4ce816a..23b99d9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
@@ -19,6 +19,7 @@
package org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher;
import static org.apache.hadoop.test.PlatformAssumptions.assumeWindows;
+import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.fail;
@@ -33,6 +34,9 @@ import java.io.IOException;
import java.io.PrintStream;
import java.io.PrintWriter;
import java.nio.ByteBuffer;
+import java.nio.charset.StandardCharsets;
+import java.nio.file.Files;
+import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
@@ -172,10 +176,11 @@ public class TestContainerLaunch extends BaseContainerManagerTest {
commands.add("/bin/sh ./\\\"" + badSymlink + "\\\"");
}
- new DefaultContainerExecutor()
- .writeLaunchEnv(fos, env, resources, commands,
- new Path(localLogDir.getAbsolutePath()), "user",
- tempFile.getName());
+ DefaultContainerExecutor defaultContainerExecutor =
+ new DefaultContainerExecutor();
+ defaultContainerExecutor.setConf(new YarnConfiguration());
+ defaultContainerExecutor.writeLaunchEnv(fos, env, resources, commands,
+ new Path(localLogDir.getAbsolutePath()), "user", tempFile.getName());
fos.flush();
fos.close();
FileUtil.setExecutable(tempFile, true);
@@ -242,9 +247,11 @@ public class TestContainerLaunch extends BaseContainerManagerTest {
} else {
commands.add("/bin/sh ./\\\"" + symLink + "\\\"");
}
- new DefaultContainerExecutor()
- .writeLaunchEnv(fos, env, resources, commands,
- new Path(localLogDir.getAbsolutePath()), "user");
+ DefaultContainerExecutor defaultContainerExecutor =
+ new DefaultContainerExecutor();
+ defaultContainerExecutor.setConf(new YarnConfiguration());
+ defaultContainerExecutor.writeLaunchEnv(fos, env, resources, commands,
+ new Path(localLogDir.getAbsolutePath()), "user");
fos.flush();
fos.close();
FileUtil.setExecutable(tempFile, true);
@@ -279,6 +286,39 @@ public class TestContainerLaunch extends BaseContainerManagerTest {
}
}
+ @Test(timeout = 20000)
+ public void testWriteEnvExport() throws Exception {
+ // Valid only for unix
+ assumeNotWindows();
+ File shellFile = Shell.appendScriptExtension(tmpDir, "hello");
+ Map<String, String> env = new HashMap<String, String>();
+ env.put("HADOOP_COMMON_HOME", "/opt/hadoopcommon");
+ env.put("HADOOP_MAPRED_HOME", "/opt/hadoopbuild");
+ Map<Path, List<String>> resources = new HashMap<Path, List<String>>();
+ FileOutputStream fos = new FileOutputStream(shellFile);
+ List<String> commands = new ArrayList<String>();
+ DefaultContainerExecutor defaultContainerExecutor =
+ new DefaultContainerExecutor();
+ YarnConfiguration conf = new YarnConfiguration();
+ conf.set(YarnConfiguration.NM_ENV_WHITELIST,
+ "HADOOP_MAPRED_HOME,HADOOP_YARN_HOME");
+ defaultContainerExecutor.setConf(conf);
+ defaultContainerExecutor.writeLaunchEnv(fos, env, resources, commands,
+ new Path(localLogDir.getAbsolutePath()), "user");
+ String shellContent =
+ new String(Files.readAllBytes(Paths.get(shellFile.getAbsolutePath())),
+ StandardCharsets.UTF_8);
+ Assert.assertTrue(shellContent
+ .contains("export HADOOP_COMMON_HOME=\"/opt/hadoopcommon\""));
+ // Not available in env and whitelist
+ Assert.assertTrue(shellContent.contains("export HADOOP_MAPRED_HOME="
+ + "${HADOOP_MAPRED_HOME:-\"/opt/hadoopbuild\"}"));
+ // Not available in env but in whitelist
+ Assert.assertFalse(shellContent.contains("HADOOP_YARN_HOME"));
+ fos.flush();
+ fos.close();
+ }
+
@Test (timeout = 20000)
public void testInvalidEnvSyntaxDiagnostics() throws IOException {
@@ -297,9 +337,11 @@ public class TestContainerLaunch extends BaseContainerManagerTest {
"\"workflowName\":\"\n\ninsert table " +
"\npartition (cd_education_status)\nselect cd_demo_sk, cd_gender, " );
List<String> commands = new ArrayList<String>();
- new DefaultContainerExecutor()
- .writeLaunchEnv(fos, env, resources, commands,
- new Path(localLogDir.getAbsolutePath()), "user");
+ DefaultContainerExecutor defaultContainerExecutor =
+ new DefaultContainerExecutor();
+ defaultContainerExecutor.setConf(new YarnConfiguration());
+ defaultContainerExecutor.writeLaunchEnv(fos, env, resources, commands,
+ new Path(localLogDir.getAbsolutePath()), "user");
fos.flush();
fos.close();
@@ -377,6 +419,7 @@ public class TestContainerLaunch extends BaseContainerManagerTest {
List<String> commands = new ArrayList<String>();
commands.add(command);
ContainerExecutor exec = new DefaultContainerExecutor();
+ exec.setConf(new YarnConfiguration());
exec.writeLaunchEnv(fos, env, resources, commands,
new Path(localLogDir.getAbsolutePath()), "user");
fos.flush();
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org