You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by aw...@apache.org on 2015/07/23 17:20:13 UTC
[01/14] hadoop git commit: HDFS-8721. Add a metric for number of
encryption zones. Contributed by Rakesh R.
Repository: hadoop
Updated Branches:
refs/heads/HADOOP-12111 b8750c685 -> 1e4f36147
HDFS-8721. Add a metric for number of encryption zones. Contributed by Rakesh R.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cb03768b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cb03768b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cb03768b
Branch: refs/heads/HADOOP-12111
Commit: cb03768b1b2250b9b5a7944cf6ef918e8a974e20
Parents: 5137b38
Author: cnauroth <cn...@apache.org>
Authored: Tue Jul 21 13:55:58 2015 -0700
Committer: cnauroth <cn...@apache.org>
Committed: Tue Jul 21 13:55:58 2015 -0700
----------------------------------------------------------------------
.../hadoop-common/src/site/markdown/Metrics.md | 1 +
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++
.../hadoop/hdfs/server/namenode/EncryptionZoneManager.java | 7 +++++++
.../org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java | 6 ++++++
.../hdfs/server/namenode/metrics/FSNamesystemMBean.java | 5 +++++
.../test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java | 6 ++++++
.../hadoop/hdfs/server/namenode/TestFSNamesystemMBean.java | 5 +++++
7 files changed, 33 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb03768b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
index ca89745..2b23508 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
@@ -216,6 +216,7 @@ Each metrics record contains tags such as HAState and Hostname as additional inf
| `TotalLoad` | Current number of connections |
| `SnapshottableDirectories` | Current number of snapshottable directories |
| `Snapshots` | Current number of snapshots |
+| `NumEncryptionZones` | Current number of encryption zones |
| `BlocksTotal` | Current number of allocated blocks in the system |
| `FilesTotal` | Current number of files and directories |
| `PendingReplicationBlocks` | Current number of blocks pending to be replicated |
http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb03768b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index a29a090..7c771b0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -734,6 +734,9 @@ Release 2.8.0 - UNRELEASED
HDFS-7483. Display information per tier on the Namenode UI.
(Benoy Antony and wheat9 via wheat9)
+ HDFS-8721. Add a metric for number of encryption zones.
+ (Rakesh R via cnauroth)
+
OPTIMIZATIONS
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb03768b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
index 3fe748d..7c3c895 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
@@ -360,4 +360,11 @@ public class EncryptionZoneManager {
final boolean hasMore = (numResponses < tailMap.size());
return new BatchedListEntries<EncryptionZone>(zones, hasMore);
}
+
+ /**
+ * @return number of encryption zones.
+ */
+ public int getNumEncryptionZones() {
+ return encryptionZones.size();
+ }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb03768b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 7c6d6a1..fd37fbe 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -4075,6 +4075,12 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
return JSON.toString(info);
}
+ @Override // FSNamesystemMBean
+ @Metric({ "NumEncryptionZones", "The number of encryption zones" })
+ public int getNumEncryptionZones() {
+ return dir.ezManager.getNumEncryptionZones();
+ }
+
int getNumberOfDatanodes(DatanodeReportType type) {
readLock();
try {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb03768b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMBean.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMBean.java
index b31b7b6..580cb78 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMBean.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMBean.java
@@ -184,4 +184,9 @@ public interface FSNamesystemMBean {
* @return JSON string
*/
public String getTopUserOpCounts();
+
+ /**
+ * Return the number of encryption zones in the system.
+ */
+ int getNumEncryptionZones();
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb03768b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
index e0bd6f4..567a70a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
@@ -97,6 +97,8 @@ import static org.mockito.Mockito.any;
import static org.mockito.Mockito.anyString;
import static org.apache.hadoop.hdfs.DFSTestUtil.verifyFilesEqual;
import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
+import static org.apache.hadoop.test.MetricsAsserts.assertGauge;
+import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotEquals;
@@ -120,6 +122,7 @@ public class TestEncryptionZones {
protected DistributedFileSystem fs;
private File testRootDir;
protected final String TEST_KEY = "test_key";
+ private static final String NS_METRICS = "FSNamesystem";
protected FileSystemTestWrapper fsWrapper;
protected FileContextTestWrapper fcWrapper;
@@ -358,6 +361,9 @@ public class TestEncryptionZones {
fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
cluster.restartNameNode(true);
assertNumZones(numZones);
+ assertEquals("Unexpected number of encryption zones!", numZones, cluster
+ .getNamesystem().getNumEncryptionZones());
+ assertGauge("NumEncryptionZones", numZones, getMetrics(NS_METRICS));
assertZonePresent(null, zone1.toString());
// Verify newly added ez is present after restarting the NameNode
http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb03768b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemMBean.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemMBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemMBean.java
index c044fb0..fb3179a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemMBean.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemMBean.java
@@ -117,6 +117,11 @@ public class TestFSNamesystemMBean {
"PendingDeletionBlocks");
assertNotNull(pendingDeletionBlocks);
assertTrue(pendingDeletionBlocks instanceof Long);
+
+ Object encryptionZones = mbs.getAttribute(mxbeanName,
+ "NumEncryptionZones");
+ assertNotNull(encryptionZones);
+ assertTrue(encryptionZones instanceof Integer);
} finally {
if (cluster != null) {
cluster.shutdown();
[14/14] hadoop git commit: Merge branch 'trunk' into HADOOP-12111
Posted by aw...@apache.org.
Merge branch 'trunk' into HADOOP-12111
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1e4f3614
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1e4f3614
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1e4f3614
Branch: refs/heads/HADOOP-12111
Commit: 1e4f361471cb0895e0c3ab22adf31af456315c60
Parents: b8750c6 ee98d63
Author: Allen Wittenauer <aw...@apache.org>
Authored: Thu Jul 23 08:20:05 2015 -0700
Committer: Allen Wittenauer <aw...@apache.org>
Committed: Thu Jul 23 08:20:05 2015 -0700
----------------------------------------------------------------------
hadoop-common-project/hadoop-common/CHANGES.txt | 9 +
.../org/apache/hadoop/io/nativeio/NativeIO.java | 4 -
.../hadoop-common/src/site/markdown/Metrics.md | 6 +
.../hadoop/hdfs/web/ByteRangeInputStream.java | 57 +++-
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 13 +
.../blockmanagement/InvalidateBlocks.java | 5 +-
.../server/namenode/EncryptionZoneManager.java | 7 +
.../hdfs/server/namenode/FSDirAppendOp.java | 261 +++++++++++++++++++
.../server/namenode/FSDirStatAndListingOp.java | 2 +-
.../hdfs/server/namenode/FSDirTruncateOp.java | 16 +-
.../hdfs/server/namenode/FSDirWriteFileOp.java | 6 +-
.../hdfs/server/namenode/FSEditLogLoader.java | 4 +-
.../hdfs/server/namenode/FSNamesystem.java | 247 +++---------------
.../namenode/metrics/FSNamesystemMBean.java | 5 +
.../apache/hadoop/hdfs/TestEncryptionZones.java | 6 +
.../server/namenode/TestFSNamesystemMBean.java | 5 +
.../hdfs/web/TestByteRangeInputStream.java | 35 +--
.../org/apache/hadoop/hdfs/web/TestWebHDFS.java | 41 +++
.../org/apache/hadoop/tools/HadoopArchives.java | 21 +-
.../src/site/markdown/HadoopArchives.md.vm | 2 +-
.../apache/hadoop/tools/TestHadoopArchives.java | 26 +-
.../hadoop/fs/azure/NativeAzureFileSystem.java | 8 +-
hadoop-yarn-project/CHANGES.txt | 14 +
.../hadoop/yarn/conf/YarnConfiguration.java | 11 +
.../hadoop/yarn/event/AsyncDispatcher.java | 8 +
.../src/main/resources/yarn-default.xml | 26 ++
.../hadoop/yarn/event/DrainDispatcher.java | 11 +-
.../hadoop/yarn/event/TestAsyncDispatcher.java | 62 +++++
.../util/TestNodeManagerHardwareUtils.java | 5 +
.../resourcemanager/recovery/RMStateStore.java | 9 +-
.../scheduler/SchedulerApplicationAttempt.java | 2 +-
.../scheduler/capacity/LeafQueue.java | 8 +-
.../TestCapacitySchedulerNodeLabelUpdate.java | 64 +++++
33 files changed, 722 insertions(+), 284 deletions(-)
----------------------------------------------------------------------
[03/14] hadoop git commit: YARN-3878. AsyncDispatcher can hang while
stopping if it is configured for draining events on stop. Contributed by
Varun Saxena
Posted by aw...@apache.org.
YARN-3878. AsyncDispatcher can hang while stopping if it is configured for draining events on stop. Contributed by Varun Saxena
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/393fe717
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/393fe717
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/393fe717
Branch: refs/heads/HADOOP-12111
Commit: 393fe71771e3ac6bc0efe59d9aaf19d3576411b3
Parents: a26cc66
Author: Jian He <ji...@apache.org>
Authored: Tue Jul 21 15:05:41 2015 -0700
Committer: Jian He <ji...@apache.org>
Committed: Tue Jul 21 15:05:41 2015 -0700
----------------------------------------------------------------------
hadoop-yarn-project/CHANGES.txt | 3 +
.../hadoop/yarn/event/AsyncDispatcher.java | 8 +++
.../hadoop/yarn/event/DrainDispatcher.java | 11 +++-
.../hadoop/yarn/event/TestAsyncDispatcher.java | 62 ++++++++++++++++++++
4 files changed, 83 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/393fe717/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 79e9ae2..5100cdf 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -682,6 +682,9 @@ Release 2.7.2 - UNRELEASED
YARN-3535. Scheduler must re-request container resources when RMContainer transitions
from ALLOCATED to KILLED (rohithsharma and peng.zhang via asuresh)
+ YARN-3878. AsyncDispatcher can hang while stopping if it is configured for
+ draining events on stop. (Varun Saxena via jianhe)
+
Release 2.7.1 - 2015-07-06
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/393fe717/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
index c54b9c7..48312a3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
@@ -246,6 +246,9 @@ public class AsyncDispatcher extends AbstractService implements Dispatcher {
if (!stopped) {
LOG.warn("AsyncDispatcher thread interrupted", e);
}
+ // Need to reset drained flag to true if event queue is empty,
+ // otherwise dispatcher will hang on stop.
+ drained = eventQueue.isEmpty();
throw new YarnRuntimeException(e);
}
};
@@ -287,6 +290,11 @@ public class AsyncDispatcher extends AbstractService implements Dispatcher {
}
@VisibleForTesting
+ protected boolean isEventThreadWaiting() {
+ return eventHandlingThread.getState() == Thread.State.WAITING;
+ }
+
+ @VisibleForTesting
protected boolean isDrained() {
return this.drained;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/393fe717/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/DrainDispatcher.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/DrainDispatcher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/DrainDispatcher.java
index da5ae44..e4a5a82 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/DrainDispatcher.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/DrainDispatcher.java
@@ -27,11 +27,20 @@ public class DrainDispatcher extends AsyncDispatcher {
this(new LinkedBlockingQueue<Event>());
}
- private DrainDispatcher(BlockingQueue<Event> eventQueue) {
+ public DrainDispatcher(BlockingQueue<Event> eventQueue) {
super(eventQueue);
}
/**
+ * Wait till event thread enters WAITING state (i.e. waiting for new events).
+ */
+ public void waitForEventThreadToWait() {
+ while (!isEventThreadWaiting()) {
+ Thread.yield();
+ }
+ }
+
+ /**
* Busy loop waiting for all queued events to drain.
*/
public void await() {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/393fe717/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/TestAsyncDispatcher.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/TestAsyncDispatcher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/TestAsyncDispatcher.java
new file mode 100644
index 0000000..b5fd923
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/TestAsyncDispatcher.java
@@ -0,0 +1,62 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.event;
+
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.spy;
+
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.LinkedBlockingQueue;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
+import org.junit.Assert;
+import org.junit.Test;
+
+public class TestAsyncDispatcher {
+
+ /* This test checks whether dispatcher hangs on close if following two things
+ * happen :
+ * 1. A thread which was putting event to event queue is interrupted.
+ * 2. Event queue is empty on close.
+ */
+ @SuppressWarnings({ "unchecked", "rawtypes" })
+ @Test(timeout=10000)
+ public void testDispatcherOnCloseIfQueueEmpty() throws Exception {
+ BlockingQueue<Event> eventQueue = spy(new LinkedBlockingQueue<Event>());
+ Event event = mock(Event.class);
+ doThrow(new InterruptedException()).when(eventQueue).put(event);
+ DrainDispatcher disp = new DrainDispatcher(eventQueue);
+ disp.init(new Configuration());
+ disp.setDrainEventsOnStop();
+ disp.start();
+ // Wait for event handler thread to start and begin waiting for events.
+ disp.waitForEventThreadToWait();
+ try {
+ disp.getEventHandler().handle(event);
+ } catch (YarnRuntimeException e) {
+ }
+ // Queue should be empty and dispatcher should not hang on close
+ Assert.assertTrue("Event Queue should have been empty",
+ eventQueue.isEmpty());
+ disp.close();
+ }
+}
+
[04/14] hadoop git commit: HDFS-8495. Consolidate append() related
implementation into a single class. Contributed by Rakesh R.
Posted by aw...@apache.org.
HDFS-8495. Consolidate append() related implementation into a single class. Contributed by Rakesh R.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/31f11713
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/31f11713
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/31f11713
Branch: refs/heads/HADOOP-12111
Commit: 31f117138a00794de4951ee8433e304d72b04094
Parents: 393fe71
Author: Haohui Mai <wh...@apache.org>
Authored: Tue Jul 21 17:25:23 2015 -0700
Committer: Haohui Mai <wh...@apache.org>
Committed: Tue Jul 21 17:25:23 2015 -0700
----------------------------------------------------------------------
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +
.../hdfs/server/namenode/FSDirAppendOp.java | 261 +++++++++++++++++++
.../server/namenode/FSDirStatAndListingOp.java | 2 +-
.../hdfs/server/namenode/FSDirTruncateOp.java | 16 +-
.../hdfs/server/namenode/FSDirWriteFileOp.java | 6 +-
.../hdfs/server/namenode/FSEditLogLoader.java | 4 +-
.../hdfs/server/namenode/FSNamesystem.java | 241 ++---------------
7 files changed, 304 insertions(+), 229 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/31f11713/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 8122045..50803de 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -737,6 +737,9 @@ Release 2.8.0 - UNRELEASED
HDFS-8721. Add a metric for number of encryption zones.
(Rakesh R via cnauroth)
+ HDFS-8495. Consolidate append() related implementation into a single class.
+ (Rakesh R via wheat9)
+
OPTIMIZATIONS
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
http://git-wip-us.apache.org/repos/asf/hadoop/blob/31f11713/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java
new file mode 100644
index 0000000..abb2dc8
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java
@@ -0,0 +1,261 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.hadoop.fs.FileAlreadyExistsException;
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
+import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem.RecoverLeaseOp;
+import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion.Feature;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * Helper class to perform append operation.
+ */
+final class FSDirAppendOp {
+
+ /**
+ * Private constructor for preventing FSDirAppendOp object creation.
+ * Static-only class.
+ */
+ private FSDirAppendOp() {}
+
+ /**
+ * Append to an existing file.
+ * <p>
+ *
+ * The method returns the last block of the file if this is a partial block,
+ * which can still be used for writing more data. The client uses the
+ * returned block locations to form the data pipeline for this block.<br>
+ * The {@link LocatedBlock} will be null if the last block is full.
+ * The client then allocates a new block with the next call using
+ * {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#addBlock}.
+ * <p>
+ *
+ * For description of parameters and exceptions thrown see
+ * {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#append}
+ *
+ * @param fsn namespace
+ * @param srcArg path name
+ * @param pc permission checker to check fs permission
+ * @param holder client name
+ * @param clientMachine client machine info
+ * @param newBlock if the data is appended to a new block
+ * @param logRetryCache whether to record RPC ids in editlog for retry cache
+ * rebuilding
+ * @return the last block with status
+ */
+ static LastBlockWithStatus appendFile(final FSNamesystem fsn,
+ final String srcArg, final FSPermissionChecker pc, final String holder,
+ final String clientMachine, final boolean newBlock,
+ final boolean logRetryCache) throws IOException {
+ assert fsn.hasWriteLock();
+
+ final byte[][] pathComponents = FSDirectory
+ .getPathComponentsForReservedPath(srcArg);
+ final LocatedBlock lb;
+ final FSDirectory fsd = fsn.getFSDirectory();
+ final String src;
+ fsd.writeLock();
+ try {
+ src = fsd.resolvePath(pc, srcArg, pathComponents);
+ final INodesInPath iip = fsd.getINodesInPath4Write(src);
+ // Verify that the destination does not exist as a directory already
+ final INode inode = iip.getLastINode();
+ final String path = iip.getPath();
+ if (inode != null && inode.isDirectory()) {
+ throw new FileAlreadyExistsException("Cannot append to directory "
+ + path + "; already exists as a directory.");
+ }
+ if (fsd.isPermissionEnabled()) {
+ fsd.checkPathAccess(pc, iip, FsAction.WRITE);
+ }
+
+ if (inode == null) {
+ throw new FileNotFoundException(
+ "Failed to append to non-existent file " + path + " for client "
+ + clientMachine);
+ }
+ final INodeFile file = INodeFile.valueOf(inode, path, true);
+ BlockManager blockManager = fsd.getBlockManager();
+ final BlockStoragePolicy lpPolicy = blockManager
+ .getStoragePolicy("LAZY_PERSIST");
+ if (lpPolicy != null && lpPolicy.getId() == file.getStoragePolicyID()) {
+ throw new UnsupportedOperationException(
+ "Cannot append to lazy persist file " + path);
+ }
+ // Opening an existing file for append - may need to recover lease.
+ fsn.recoverLeaseInternal(RecoverLeaseOp.APPEND_FILE, iip, path, holder,
+ clientMachine, false);
+
+ final BlockInfo lastBlock = file.getLastBlock();
+ // Check that the block has at least minimum replication.
+ if (lastBlock != null && lastBlock.isComplete()
+ && !blockManager.isSufficientlyReplicated(lastBlock)) {
+ throw new IOException("append: lastBlock=" + lastBlock + " of src="
+ + path + " is not sufficiently replicated yet.");
+ }
+ lb = prepareFileForAppend(fsn, iip, holder, clientMachine, newBlock,
+ true, logRetryCache);
+ } catch (IOException ie) {
+ NameNode.stateChangeLog
+ .warn("DIR* NameSystem.append: " + ie.getMessage());
+ throw ie;
+ } finally {
+ fsd.writeUnlock();
+ }
+
+ HdfsFileStatus stat = FSDirStatAndListingOp.getFileInfo(fsd, src, false,
+ FSDirectory.isReservedRawName(srcArg), true);
+ if (lb != null) {
+ NameNode.stateChangeLog.debug(
+ "DIR* NameSystem.appendFile: file {} for {} at {} block {} block"
+ + " size {}", srcArg, holder, clientMachine, lb.getBlock(), lb
+ .getBlock().getNumBytes());
+ }
+ return new LastBlockWithStatus(lb, stat);
+ }
+
+ /**
+ * Convert current node to under construction.
+ * Recreate in-memory lease record.
+ *
+ * @param fsn namespace
+ * @param iip inodes in the path containing the file
+ * @param leaseHolder identifier of the lease holder on this file
+ * @param clientMachine identifier of the client machine
+ * @param newBlock if the data is appended to a new block
+ * @param writeToEditLog whether to persist this change to the edit log
+ * @param logRetryCache whether to record RPC ids in editlog for retry cache
+ * rebuilding
+ * @return the last block locations if the block is partial or null otherwise
+ * @throws IOException
+ */
+ static LocatedBlock prepareFileForAppend(final FSNamesystem fsn,
+ final INodesInPath iip, final String leaseHolder,
+ final String clientMachine, final boolean newBlock,
+ final boolean writeToEditLog, final boolean logRetryCache)
+ throws IOException {
+ assert fsn.hasWriteLock();
+
+ final INodeFile file = iip.getLastINode().asFile();
+ final QuotaCounts delta = verifyQuotaForUCBlock(fsn, file, iip);
+
+ file.recordModification(iip.getLatestSnapshotId());
+ file.toUnderConstruction(leaseHolder, clientMachine);
+
+ fsn.getLeaseManager().addLease(
+ file.getFileUnderConstructionFeature().getClientName(), file.getId());
+
+ LocatedBlock ret = null;
+ if (!newBlock) {
+ FSDirectory fsd = fsn.getFSDirectory();
+ ret = fsd.getBlockManager().convertLastBlockToUnderConstruction(file, 0);
+ if (ret != null && delta != null) {
+ Preconditions.checkState(delta.getStorageSpace() >= 0, "appending to"
+ + " a block with size larger than the preferred block size");
+ fsd.writeLock();
+ try {
+ fsd.updateCountNoQuotaCheck(iip, iip.length() - 1, delta);
+ } finally {
+ fsd.writeUnlock();
+ }
+ }
+ } else {
+ BlockInfo lastBlock = file.getLastBlock();
+ if (lastBlock != null) {
+ ExtendedBlock blk = new ExtendedBlock(fsn.getBlockPoolId(), lastBlock);
+ ret = new LocatedBlock(blk, new DatanodeInfo[0]);
+ }
+ }
+
+ if (writeToEditLog) {
+ final String path = iip.getPath();
+ if (NameNodeLayoutVersion.supports(Feature.APPEND_NEW_BLOCK,
+ fsn.getEffectiveLayoutVersion())) {
+ fsn.getEditLog().logAppendFile(path, file, newBlock, logRetryCache);
+ } else {
+ fsn.getEditLog().logOpenFile(path, file, false, logRetryCache);
+ }
+ }
+ return ret;
+ }
+
+ /**
+ * Verify quota when using the preferred block size for UC block. This is
+ * usually used by append and truncate.
+ *
+ * @throws QuotaExceededException when violating the storage quota
+ * @return expected quota usage update. null means no change or no need to
+ * update quota usage later
+ */
+ private static QuotaCounts verifyQuotaForUCBlock(FSNamesystem fsn,
+ INodeFile file, INodesInPath iip) throws QuotaExceededException {
+ FSDirectory fsd = fsn.getFSDirectory();
+ if (!fsn.isImageLoaded() || fsd.shouldSkipQuotaChecks()) {
+ // Do not check quota if editlog is still being processed
+ return null;
+ }
+ if (file.getLastBlock() != null) {
+ final QuotaCounts delta = computeQuotaDeltaForUCBlock(fsn, file);
+ fsd.readLock();
+ try {
+ FSDirectory.verifyQuota(iip, iip.length() - 1, delta, null);
+ return delta;
+ } finally {
+ fsd.readUnlock();
+ }
+ }
+ return null;
+ }
+
+ /** Compute quota change for converting a complete block to a UC block. */
+ private static QuotaCounts computeQuotaDeltaForUCBlock(FSNamesystem fsn,
+ INodeFile file) {
+ final QuotaCounts delta = new QuotaCounts.Builder().build();
+ final BlockInfo lastBlock = file.getLastBlock();
+ if (lastBlock != null) {
+ final long diff = file.getPreferredBlockSize() - lastBlock.getNumBytes();
+ final short repl = file.getPreferredBlockReplication();
+ delta.addStorageSpace(diff * repl);
+ final BlockStoragePolicy policy = fsn.getFSDirectory()
+ .getBlockStoragePolicySuite().getPolicy(file.getStoragePolicyID());
+ List<StorageType> types = policy.chooseStorageTypes(repl);
+ for (StorageType t : types) {
+ if (t.supportTypeQuota()) {
+ delta.addTypeSpace(t, diff);
+ }
+ }
+ }
+ return delta;
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/31f11713/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
index 201dabc..14f4d66 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
@@ -508,7 +508,7 @@ class FSDirStatAndListingOp {
final long fileSize = !inSnapshot && isUc ?
fileNode.computeFileSizeNotIncludingLastUcBlock() : size;
- loc = fsd.getFSNamesystem().getBlockManager().createLocatedBlocks(
+ loc = fsd.getBlockManager().createLocatedBlocks(
fileNode.getBlocks(snapshot), fileSize, isUc, 0L, size, false,
inSnapshot, feInfo);
if (loc == null) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/31f11713/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java
index 9fc9def..e24bb2f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstructionContiguous;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem.RecoverLeaseOp;
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
@@ -79,11 +80,11 @@ final class FSDirTruncateOp {
try {
src = fsd.resolvePath(pc, srcArg, pathComponents);
iip = fsd.getINodesInPath4Write(src, true);
- if (fsn.isPermissionEnabled()) {
+ if (fsd.isPermissionEnabled()) {
fsd.checkPathAccess(pc, iip, FsAction.WRITE);
}
INodeFile file = INodeFile.valueOf(iip.getLastINode(), src);
- final BlockStoragePolicy lpPolicy = fsn.getBlockManager()
+ final BlockStoragePolicy lpPolicy = fsd.getBlockManager()
.getStoragePolicy("LAZY_PERSIST");
if (lpPolicy != null && lpPolicy.getId() == file.getStoragePolicyID()) {
@@ -178,7 +179,7 @@ final class FSDirTruncateOp {
"Should be the same block.";
if (oldBlock.getBlockId() != tBlk.getBlockId()
&& !file.isBlockInLatestSnapshot(oldBlock)) {
- fsn.getBlockManager().removeBlockFromMap(oldBlock);
+ fsd.getBlockManager().removeBlockFromMap(oldBlock);
}
}
assert onBlockBoundary == (truncateBlock == null) :
@@ -223,6 +224,7 @@ final class FSDirTruncateOp {
}
BlockInfoUnderConstruction truncatedBlockUC;
+ BlockManager blockManager = fsn.getFSDirectory().getBlockManager();
if (shouldCopyOnTruncate) {
// Add new truncateBlock into blocksMap and
// use oldBlock as a source for copy-on-truncate recovery
@@ -230,9 +232,8 @@ final class FSDirTruncateOp {
file.getPreferredBlockReplication());
truncatedBlockUC.setNumBytes(oldBlock.getNumBytes() - lastBlockDelta);
truncatedBlockUC.setTruncateBlock(oldBlock);
- file.setLastBlock(truncatedBlockUC,
- fsn.getBlockManager().getStorages(oldBlock));
- fsn.getBlockManager().addBlockCollection(truncatedBlockUC, file);
+ file.setLastBlock(truncatedBlockUC, blockManager.getStorages(oldBlock));
+ blockManager.addBlockCollection(truncatedBlockUC, file);
NameNode.stateChangeLog.debug(
"BLOCK* prepareFileForTruncate: Scheduling copy-on-truncate to new"
@@ -241,8 +242,7 @@ final class FSDirTruncateOp {
truncatedBlockUC.getTruncateBlock());
} else {
// Use new generation stamp for in-place truncate recovery
- fsn.getBlockManager().convertLastBlockToUnderConstruction(file,
- lastBlockDelta);
+ blockManager.convertLastBlockToUnderConstruction(file, lastBlockDelta);
oldBlock = file.getLastBlock();
assert !oldBlock.isComplete() : "oldBlock should be under construction";
truncatedBlockUC = (BlockInfoUnderConstruction) oldBlock;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/31f11713/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
index 4830d5d..008a945 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
@@ -206,8 +206,8 @@ class FSDirWriteFileOp {
DatanodeStorageInfo[] locs, long offset) throws IOException {
LocatedBlock lBlk = BlockManager.newLocatedBlock(fsn.getExtendedBlock(blk),
locs, offset, false);
- fsn.getBlockManager().setBlockToken(lBlk,
- BlockTokenIdentifier.AccessMode.WRITE);
+ fsn.getFSDirectory().getBlockManager()
+ .setBlockToken(lBlk, BlockTokenIdentifier.AccessMode.WRITE);
return lBlk;
}
@@ -426,7 +426,7 @@ class FSDirWriteFileOp {
fsd.setFileEncryptionInfo(src, feInfo);
newNode = fsd.getInode(newNode.getId()).asFile();
}
- setNewINodeStoragePolicy(fsn.getBlockManager(), newNode, iip,
+ setNewINodeStoragePolicy(fsd.getBlockManager(), newNode, iip,
isLazyPersist);
fsd.getEditLog().logOpenFile(src, newNode, overwrite, logRetryEntry);
if (NameNode.stateChangeLog.isDebugEnabled()) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/31f11713/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
index 63ef985..357684a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
@@ -392,7 +392,7 @@ public class FSEditLogLoader {
FSNamesystem.LOG.debug("Reopening an already-closed file " +
"for append");
}
- LocatedBlock lb = fsNamesys.prepareFileForAppend(path, iip,
+ LocatedBlock lb = FSDirAppendOp.prepareFileForAppend(fsNamesys, iip,
addCloseOp.clientName, addCloseOp.clientMachine, false, false,
false);
// add the op into retry cache if necessary
@@ -466,7 +466,7 @@ public class FSEditLogLoader {
INodesInPath iip = fsDir.getINodesInPath4Write(path);
INodeFile file = INodeFile.valueOf(iip.getLastINode(), path);
if (!file.isUnderConstruction()) {
- LocatedBlock lb = fsNamesys.prepareFileForAppend(path, iip,
+ LocatedBlock lb = FSDirAppendOp.prepareFileForAppend(fsNamesys, iip,
appendOp.clientName, appendOp.clientMachine, appendOp.newBlock,
false, false);
// add the op into retry cache if necessary
http://git-wip-us.apache.org/repos/asf/hadoop/blob/31f11713/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index fd37fbe..0b44431 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -142,7 +142,6 @@ import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries;
import org.apache.hadoop.fs.CacheFlag;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.CreateFlag;
-import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FsServerDefaults;
@@ -185,7 +184,6 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
-import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException;
import org.apache.hadoop.hdfs.protocol.RollingUpgradeException;
import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
@@ -250,7 +248,6 @@ import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary;
-import org.apache.hadoop.io.EnumSetWritable;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.ipc.RetriableException;
@@ -2174,175 +2171,6 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
}
/**
- * Append to an existing file for append.
- * <p>
- *
- * The method returns the last block of the file if this is a partial block,
- * which can still be used for writing more data. The client uses the returned
- * block locations to form the data pipeline for this block.<br>
- * The method returns null if the last block is full. The client then
- * allocates a new block with the next call using
- * {@link ClientProtocol#addBlock}.
- * <p>
- *
- * For description of parameters and exceptions thrown see
- * {@link ClientProtocol#append(String, String, EnumSetWritable)}
- *
- * @return the last block locations if the block is partial or null otherwise
- */
- private LocatedBlock appendFileInternal(FSPermissionChecker pc,
- INodesInPath iip, String holder, String clientMachine, boolean newBlock,
- boolean logRetryCache) throws IOException {
- assert hasWriteLock();
- // Verify that the destination does not exist as a directory already.
- final INode inode = iip.getLastINode();
- final String src = iip.getPath();
- if (inode != null && inode.isDirectory()) {
- throw new FileAlreadyExistsException("Cannot append to directory " + src
- + "; already exists as a directory.");
- }
- if (isPermissionEnabled) {
- dir.checkPathAccess(pc, iip, FsAction.WRITE);
- }
-
- try {
- if (inode == null) {
- throw new FileNotFoundException("failed to append to non-existent file "
- + src + " for client " + clientMachine);
- }
- INodeFile myFile = INodeFile.valueOf(inode, src, true);
- final BlockStoragePolicy lpPolicy =
- blockManager.getStoragePolicy("LAZY_PERSIST");
- if (lpPolicy != null &&
- lpPolicy.getId() == myFile.getStoragePolicyID()) {
- throw new UnsupportedOperationException(
- "Cannot append to lazy persist file " + src);
- }
- // Opening an existing file for append - may need to recover lease.
- recoverLeaseInternal(RecoverLeaseOp.APPEND_FILE, iip, src, holder,
- clientMachine, false);
-
- final BlockInfo lastBlock = myFile.getLastBlock();
- // Check that the block has at least minimum replication.
- if(lastBlock != null && lastBlock.isComplete() &&
- !getBlockManager().isSufficientlyReplicated(lastBlock)) {
- throw new IOException("append: lastBlock=" + lastBlock +
- " of src=" + src + " is not sufficiently replicated yet.");
- }
- return prepareFileForAppend(src, iip, holder, clientMachine, newBlock,
- true, logRetryCache);
- } catch (IOException ie) {
- NameNode.stateChangeLog.warn("DIR* NameSystem.append: " +ie.getMessage());
- throw ie;
- }
- }
-
- /**
- * Convert current node to under construction.
- * Recreate in-memory lease record.
- *
- * @param src path to the file
- * @param leaseHolder identifier of the lease holder on this file
- * @param clientMachine identifier of the client machine
- * @param newBlock if the data is appended to a new block
- * @param writeToEditLog whether to persist this change to the edit log
- * @param logRetryCache whether to record RPC ids in editlog for retry cache
- * rebuilding
- * @return the last block locations if the block is partial or null otherwise
- * @throws UnresolvedLinkException
- * @throws IOException
- */
- LocatedBlock prepareFileForAppend(String src, INodesInPath iip,
- String leaseHolder, String clientMachine, boolean newBlock,
- boolean writeToEditLog, boolean logRetryCache) throws IOException {
- final INodeFile file = iip.getLastINode().asFile();
- final QuotaCounts delta = verifyQuotaForUCBlock(file, iip);
-
- file.recordModification(iip.getLatestSnapshotId());
- file.toUnderConstruction(leaseHolder, clientMachine);
-
- leaseManager.addLease(
- file.getFileUnderConstructionFeature().getClientName(), file.getId());
-
- LocatedBlock ret = null;
- if (!newBlock) {
- ret = blockManager.convertLastBlockToUnderConstruction(file, 0);
- if (ret != null && delta != null) {
- Preconditions.checkState(delta.getStorageSpace() >= 0,
- "appending to a block with size larger than the preferred block size");
- dir.writeLock();
- try {
- dir.updateCountNoQuotaCheck(iip, iip.length() - 1, delta);
- } finally {
- dir.writeUnlock();
- }
- }
- } else {
- BlockInfo lastBlock = file.getLastBlock();
- if (lastBlock != null) {
- ExtendedBlock blk = new ExtendedBlock(this.getBlockPoolId(), lastBlock);
- ret = new LocatedBlock(blk, new DatanodeInfo[0]);
- }
- }
-
- if (writeToEditLog) {
- if (NameNodeLayoutVersion.supports(Feature.APPEND_NEW_BLOCK,
- getEffectiveLayoutVersion())) {
- getEditLog().logAppendFile(src, file, newBlock, logRetryCache);
- } else {
- getEditLog().logOpenFile(src, file, false, logRetryCache);
- }
- }
- return ret;
- }
-
- /**
- * Verify quota when using the preferred block size for UC block. This is
- * usually used by append and truncate
- * @throws QuotaExceededException when violating the storage quota
- * @return expected quota usage update. null means no change or no need to
- * update quota usage later
- */
- private QuotaCounts verifyQuotaForUCBlock(INodeFile file, INodesInPath iip)
- throws QuotaExceededException {
- if (!isImageLoaded() || dir.shouldSkipQuotaChecks()) {
- // Do not check quota if editlog is still being processed
- return null;
- }
- if (file.getLastBlock() != null) {
- final QuotaCounts delta = computeQuotaDeltaForUCBlock(file);
- dir.readLock();
- try {
- FSDirectory.verifyQuota(iip, iip.length() - 1, delta, null);
- return delta;
- } finally {
- dir.readUnlock();
- }
- }
- return null;
- }
-
- /** Compute quota change for converting a complete block to a UC block */
- private QuotaCounts computeQuotaDeltaForUCBlock(INodeFile file) {
- final QuotaCounts delta = new QuotaCounts.Builder().build();
- final BlockInfo lastBlock = file.getLastBlock();
- if (lastBlock != null) {
- final long diff = file.getPreferredBlockSize() - lastBlock.getNumBytes();
- final short repl = file.getPreferredBlockReplication();
- delta.addStorageSpace(diff * repl);
- final BlockStoragePolicy policy = dir.getBlockStoragePolicySuite()
- .getPolicy(file.getStoragePolicyID());
- List<StorageType> types = policy.chooseStorageTypes(repl);
- for (StorageType t : types) {
- if (t.supportTypeQuota()) {
- delta.addTypeSpace(t, diff);
- }
- }
- }
- return delta;
- }
-
- /**
* Recover lease;
* Immediately revoke the lease of the current lease holder and start lease
* recovery so that the file can be forced to be closed.
@@ -2487,62 +2315,45 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
/**
* Append to an existing file in the namespace.
*/
- LastBlockWithStatus appendFile(String src, String holder,
+ LastBlockWithStatus appendFile(String srcArg, String holder,
String clientMachine, EnumSet<CreateFlag> flag, boolean logRetryCache)
throws IOException {
boolean newBlock = flag.contains(CreateFlag.NEW_BLOCK);
if (newBlock) {
requireEffectiveLayoutVersionForFeature(Feature.APPEND_NEW_BLOCK);
}
- try {
- return appendFileInt(src, holder, clientMachine, newBlock, logRetryCache);
- } catch (AccessControlException e) {
- logAuditEvent(false, "append", src);
- throw e;
- }
- }
- private LastBlockWithStatus appendFileInt(final String srcArg, String holder,
- String clientMachine, boolean newBlock, boolean logRetryCache)
- throws IOException {
- String src = srcArg;
NameNode.stateChangeLog.debug(
"DIR* NameSystem.appendFile: src={}, holder={}, clientMachine={}",
- src, holder, clientMachine);
- boolean skipSync = false;
- LocatedBlock lb = null;
- HdfsFileStatus stat = null;
- FSPermissionChecker pc = getPermissionChecker();
- byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
- writeLock();
+ srcArg, holder, clientMachine);
try {
+ boolean skipSync = false;
+ LastBlockWithStatus lbs = null;
+ final FSPermissionChecker pc = getPermissionChecker();
checkOperation(OperationCategory.WRITE);
- checkNameNodeSafeMode("Cannot append to file" + src);
- src = dir.resolvePath(pc, src, pathComponents);
- final INodesInPath iip = dir.getINodesInPath4Write(src);
- lb = appendFileInternal(pc, iip, holder, clientMachine, newBlock,
- logRetryCache);
- stat = FSDirStatAndListingOp.getFileInfo(dir, src, false,
- FSDirectory.isReservedRawName(srcArg), true);
- } catch (StandbyException se) {
- skipSync = true;
- throw se;
- } finally {
- writeUnlock();
- // There might be transactions logged while trying to recover the lease.
- // They need to be sync'ed even when an exception was thrown.
- if (!skipSync) {
- getEditLog().logSync();
+ writeLock();
+ try {
+ checkOperation(OperationCategory.WRITE);
+ checkNameNodeSafeMode("Cannot append to file" + srcArg);
+ lbs = FSDirAppendOp.appendFile(this, srcArg, pc, holder, clientMachine,
+ newBlock, logRetryCache);
+ } catch (StandbyException se) {
+ skipSync = true;
+ throw se;
+ } finally {
+ writeUnlock();
+ // There might be transactions logged while trying to recover the lease
+ // They need to be sync'ed even when an exception was thrown.
+ if (!skipSync) {
+ getEditLog().logSync();
+ }
}
+ logAuditEvent(true, "append", srcArg);
+ return lbs;
+ } catch (AccessControlException e) {
+ logAuditEvent(false, "append", srcArg);
+ throw e;
}
- if (lb != null) {
- NameNode.stateChangeLog.debug(
- "DIR* NameSystem.appendFile: file {} for {} at {} block {} block" +
- " size {}", src, holder, clientMachine, lb.getBlock(),
- lb.getBlock().getNumBytes());
- }
- logAuditEvent(true, "append", srcArg);
- return new LastBlockWithStatus(lb, stat);
}
ExtendedBlock getExtendedBlock(Block blk) {
[13/14] hadoop git commit: YARN-2019. Retrospect on decision of
making RM crashed if any exception throw in ZKRMStateStore. Contributed by
Jian He.
Posted by aw...@apache.org.
YARN-2019. Retrospect on decision of making RM crashed if any exception throw in ZKRMStateStore. Contributed by Jian He.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ee98d635
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ee98d635
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ee98d635
Branch: refs/heads/HADOOP-12111
Commit: ee98d6354bbbcd0832d3e539ee097f837e5d0e31
Parents: e91ccfa
Author: Junping Du <ju...@apache.org>
Authored: Wed Jul 22 17:52:35 2015 -0700
Committer: Junping Du <ju...@apache.org>
Committed: Wed Jul 22 17:52:35 2015 -0700
----------------------------------------------------------------------
hadoop-yarn-project/CHANGES.txt | 3 +++
.../apache/hadoop/yarn/conf/YarnConfiguration.java | 11 +++++++++++
.../src/main/resources/yarn-default.xml | 16 ++++++++++++++++
.../resourcemanager/recovery/RMStateStore.java | 9 +++++++--
4 files changed, 37 insertions(+), 2 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee98d635/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index a5fd4e7..93962f1 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -144,6 +144,9 @@ Release 2.8.0 - UNRELEASED
YARN-2003. Support for Application priority : Changes in RM and Capacity
Scheduler. (Sunil G via wangda)
+ YARN-2019. Retrospect on decision of making RM crashed if any exception throw
+ in ZKRMStateStore. (Jian He via junping_du)
+
IMPROVEMENTS
YARN-644. Basic null check is not performed on passed in arguments before
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee98d635/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 060635f..9832729 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -401,6 +401,11 @@ public class YarnConfiguration extends Configuration {
public static final String RECOVERY_ENABLED = RM_PREFIX + "recovery.enabled";
public static final boolean DEFAULT_RM_RECOVERY_ENABLED = false;
+ public static final String YARN_FAIL_FAST = YARN_PREFIX + "fail-fast";
+ public static final boolean DEFAULT_YARN_FAIL_FAST = true;
+
+ public static final String RM_FAIL_FAST = RM_PREFIX + "fail-fast";
+
@Private
public static final String RM_WORK_PRESERVING_RECOVERY_ENABLED = RM_PREFIX
+ "work-preserving-recovery.enabled";
@@ -2018,6 +2023,12 @@ public class YarnConfiguration extends Configuration {
YARN_HTTP_POLICY_DEFAULT));
}
+ public static boolean shouldRMFailFast(Configuration conf) {
+ return conf.getBoolean(YarnConfiguration.RM_FAIL_FAST,
+ conf.getBoolean(YarnConfiguration.YARN_FAIL_FAST,
+ YarnConfiguration.DEFAULT_YARN_FAIL_FAST));
+ }
+
@Private
public static String getClusterId(Configuration conf) {
String clusterId = conf.get(YarnConfiguration.RM_CLUSTER_ID);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee98d635/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index d586f51..8b3a3af 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -324,6 +324,22 @@
</property>
<property>
+ <description>Should RM fail fast if it encounters any errors. By defalt, it
+ points to ${yarn.fail-fast}. Errors include:
+ 1) exceptions when state-store write/read operations fails.
+ </description>
+ <name>yarn.resourcemanager.fail-fast</name>
+ <value>${yarn.fail-fast}</value>
+ </property>
+
+ <property>
+ <description>Should YARN fail fast if it encounters any errors.
+ </description>
+ <name>yarn.fail-fast</name>
+ <value>true</value>
+ </property>
+
+ <property>
<description>Enable RM work preserving recovery. This configuration is private
to YARN for experimenting the feature.
</description>
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee98d635/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java
index 46c2954..9b17bf7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java
@@ -44,6 +44,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationSubmissionContextPBImpl;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.event.AsyncDispatcher;
import org.apache.hadoop.yarn.event.Dispatcher;
import org.apache.hadoop.yarn.event.EventHandler;
@@ -855,6 +856,7 @@ public abstract class RMStateStore extends AbstractService {
* @param failureCause the exception due to which the operation failed
*/
protected void notifyStoreOperationFailed(Exception failureCause) {
+ LOG.error("State store operation failed ", failureCause);
if (failureCause instanceof StoreFencedException) {
updateFencedState();
Thread standByTransitionThread =
@@ -862,8 +864,11 @@ public abstract class RMStateStore extends AbstractService {
standByTransitionThread.setName("StandByTransitionThread Handler");
standByTransitionThread.start();
} else {
- rmDispatcher.getEventHandler().handle(
- new RMFatalEvent(RMFatalEventType.STATE_STORE_OP_FAILED, failureCause));
+ if (YarnConfiguration.shouldRMFailFast(getConfig())) {
+ rmDispatcher.getEventHandler().handle(
+ new RMFatalEvent(RMFatalEventType.STATE_STORE_OP_FAILED,
+ failureCause));
+ }
}
}
[12/14] hadoop git commit: HDFS-8797. WebHdfsFileSystem creates too
many connections for pread. Contributed by Jing Zhao.
Posted by aw...@apache.org.
HDFS-8797. WebHdfsFileSystem creates too many connections for pread. Contributed by Jing Zhao.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e91ccfad
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e91ccfad
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e91ccfad
Branch: refs/heads/HADOOP-12111
Commit: e91ccfad07ec5b5674a84009772dd31a82b4e4de
Parents: 06e5dd2
Author: Jing Zhao <ji...@apache.org>
Authored: Wed Jul 22 17:42:31 2015 -0700
Committer: Jing Zhao <ji...@apache.org>
Committed: Wed Jul 22 17:42:31 2015 -0700
----------------------------------------------------------------------
.../hadoop/hdfs/web/ByteRangeInputStream.java | 57 +++++++++++++++++---
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 +
.../hdfs/web/TestByteRangeInputStream.java | 35 ++++++------
.../org/apache/hadoop/hdfs/web/TestWebHDFS.java | 41 ++++++++++++++
4 files changed, 113 insertions(+), 22 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/e91ccfad/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/ByteRangeInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/ByteRangeInputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/ByteRangeInputStream.java
index 395c9f6..bb581db 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/ByteRangeInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/ByteRangeInputStream.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.hdfs.web;
+import java.io.EOFException;
import java.io.IOException;
import java.io.InputStream;
import java.net.HttpURLConnection;
@@ -65,6 +66,16 @@ public abstract class ByteRangeInputStream extends FSInputStream {
final boolean resolved) throws IOException;
}
+ static class InputStreamAndFileLength {
+ final Long length;
+ final InputStream in;
+
+ InputStreamAndFileLength(Long length, InputStream in) {
+ this.length = length;
+ this.in = in;
+ }
+ }
+
enum StreamStatus {
NORMAL, SEEK, CLOSED
}
@@ -101,7 +112,9 @@ public abstract class ByteRangeInputStream extends FSInputStream {
if (in != null) {
in.close();
}
- in = openInputStream();
+ InputStreamAndFileLength fin = openInputStream(startPos);
+ in = fin.in;
+ fileLength = fin.length;
status = StreamStatus.NORMAL;
break;
case CLOSED:
@@ -111,20 +124,22 @@ public abstract class ByteRangeInputStream extends FSInputStream {
}
@VisibleForTesting
- protected InputStream openInputStream() throws IOException {
+ protected InputStreamAndFileLength openInputStream(long startOffset)
+ throws IOException {
// Use the original url if no resolved url exists, eg. if
// it's the first time a request is made.
final boolean resolved = resolvedURL.getURL() != null;
final URLOpener opener = resolved? resolvedURL: originalURL;
- final HttpURLConnection connection = opener.connect(startPos, resolved);
+ final HttpURLConnection connection = opener.connect(startOffset, resolved);
resolvedURL.setURL(getResolvedUrl(connection));
InputStream in = connection.getInputStream();
+ final Long length;
final Map<String, List<String>> headers = connection.getHeaderFields();
if (isChunkedTransferEncoding(headers)) {
// file length is not known
- fileLength = null;
+ length = null;
} else {
// for non-chunked transfer-encoding, get content-length
final String cl = connection.getHeaderField(HttpHeaders.CONTENT_LENGTH);
@@ -133,14 +148,14 @@ public abstract class ByteRangeInputStream extends FSInputStream {
+ headers);
}
final long streamlength = Long.parseLong(cl);
- fileLength = startPos + streamlength;
+ length = startOffset + streamlength;
// Java has a bug with >2GB request streams. It won't bounds check
// the reads so the transfer blocks until the server times out
in = new BoundedInputStream(in, streamlength);
}
- return in;
+ return new InputStreamAndFileLength(length, in);
}
private static boolean isChunkedTransferEncoding(
@@ -204,6 +219,36 @@ public abstract class ByteRangeInputStream extends FSInputStream {
}
}
+ @Override
+ public int read(long position, byte[] buffer, int offset, int length)
+ throws IOException {
+ try (InputStream in = openInputStream(position).in) {
+ return in.read(buffer, offset, length);
+ }
+ }
+
+ @Override
+ public void readFully(long position, byte[] buffer, int offset, int length)
+ throws IOException {
+ final InputStreamAndFileLength fin = openInputStream(position);
+ if (fin.length != null && length + position > fin.length) {
+ throw new EOFException("The length to read " + length
+ + " exceeds the file length " + fin.length);
+ }
+ try {
+ int nread = 0;
+ while (nread < length) {
+ int nbytes = fin.in.read(buffer, offset + nread, length - nread);
+ if (nbytes < 0) {
+ throw new EOFException("End of file reached before reading fully.");
+ }
+ nread += nbytes;
+ }
+ } finally {
+ fin.in.close();
+ }
+ }
+
/**
* Return the current offset from the start of the file
*/
http://git-wip-us.apache.org/repos/asf/hadoop/blob/e91ccfad/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 66cb89e..c3eab70 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -742,6 +742,8 @@ Release 2.8.0 - UNRELEASED
HDFS-8795. Improve InvalidateBlocks#node2blocks. (yliu)
+ HDFS-8797. WebHdfsFileSystem creates too many connections for pread. (jing9)
+
OPTIMIZATIONS
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
http://git-wip-us.apache.org/repos/asf/hadoop/blob/e91ccfad/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestByteRangeInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestByteRangeInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestByteRangeInputStream.java
index 11deab8..40f2b9c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestByteRangeInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestByteRangeInputStream.java
@@ -35,7 +35,9 @@ import java.net.HttpURLConnection;
import java.net.URL;
import com.google.common.net.HttpHeaders;
+import org.apache.hadoop.hdfs.web.ByteRangeInputStream.InputStreamAndFileLength;
import org.junit.Test;
+import org.mockito.Mockito;
import org.mockito.internal.util.reflection.Whitebox;
public class TestByteRangeInputStream {
@@ -140,8 +142,9 @@ public class TestByteRangeInputStream {
public void testPropagatedClose() throws IOException {
ByteRangeInputStream bris =
mock(ByteRangeInputStream.class, CALLS_REAL_METHODS);
- InputStream mockStream = mock(InputStream.class);
- doReturn(mockStream).when(bris).openInputStream();
+ InputStreamAndFileLength mockStream = new InputStreamAndFileLength(1L,
+ mock(InputStream.class));
+ doReturn(mockStream).when(bris).openInputStream(Mockito.anyLong());
Whitebox.setInternalState(bris, "status",
ByteRangeInputStream.StreamStatus.SEEK);
@@ -151,46 +154,46 @@ public class TestByteRangeInputStream {
// first open, shouldn't close underlying stream
bris.getInputStream();
- verify(bris, times(++brisOpens)).openInputStream();
+ verify(bris, times(++brisOpens)).openInputStream(Mockito.anyLong());
verify(bris, times(brisCloses)).close();
- verify(mockStream, times(isCloses)).close();
+ verify(mockStream.in, times(isCloses)).close();
// stream is open, shouldn't close underlying stream
bris.getInputStream();
- verify(bris, times(brisOpens)).openInputStream();
+ verify(bris, times(brisOpens)).openInputStream(Mockito.anyLong());
verify(bris, times(brisCloses)).close();
- verify(mockStream, times(isCloses)).close();
+ verify(mockStream.in, times(isCloses)).close();
// seek forces a reopen, should close underlying stream
bris.seek(1);
bris.getInputStream();
- verify(bris, times(++brisOpens)).openInputStream();
+ verify(bris, times(++brisOpens)).openInputStream(Mockito.anyLong());
verify(bris, times(brisCloses)).close();
- verify(mockStream, times(++isCloses)).close();
+ verify(mockStream.in, times(++isCloses)).close();
// verify that the underlying stream isn't closed after a seek
// ie. the state was correctly updated
bris.getInputStream();
- verify(bris, times(brisOpens)).openInputStream();
+ verify(bris, times(brisOpens)).openInputStream(Mockito.anyLong());
verify(bris, times(brisCloses)).close();
- verify(mockStream, times(isCloses)).close();
+ verify(mockStream.in, times(isCloses)).close();
// seeking to same location should be a no-op
bris.seek(1);
bris.getInputStream();
- verify(bris, times(brisOpens)).openInputStream();
+ verify(bris, times(brisOpens)).openInputStream(Mockito.anyLong());
verify(bris, times(brisCloses)).close();
- verify(mockStream, times(isCloses)).close();
+ verify(mockStream.in, times(isCloses)).close();
// close should of course close
bris.close();
verify(bris, times(++brisCloses)).close();
- verify(mockStream, times(++isCloses)).close();
+ verify(mockStream.in, times(++isCloses)).close();
// it's already closed, underlying stream should not close
bris.close();
verify(bris, times(++brisCloses)).close();
- verify(mockStream, times(isCloses)).close();
+ verify(mockStream.in, times(isCloses)).close();
// it's closed, don't reopen it
boolean errored = false;
@@ -202,9 +205,9 @@ public class TestByteRangeInputStream {
} finally {
assertTrue("Read a closed steam", errored);
}
- verify(bris, times(brisOpens)).openInputStream();
+ verify(bris, times(brisOpens)).openInputStream(Mockito.anyLong());
verify(bris, times(brisCloses)).close();
- verify(mockStream, times(isCloses)).close();
+ verify(mockStream.in, times(isCloses)).close();
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/e91ccfad/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
index 0563f12..8bba105 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
@@ -22,6 +22,7 @@ import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotEquals;
import static org.junit.Assert.fail;
+import java.io.EOFException;
import java.io.IOException;
import java.io.OutputStream;
import java.net.HttpURLConnection;
@@ -45,6 +46,7 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.TestDFSClientRetries;
@@ -561,6 +563,45 @@ public class TestWebHDFS {
}
}
+ @Test
+ public void testWebHdfsPread() throws Exception {
+ final Configuration conf = WebHdfsTestUtil.createConf();
+ MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
+ .build();
+ byte[] content = new byte[1024];
+ RANDOM.nextBytes(content);
+ final Path foo = new Path("/foo");
+ FSDataInputStream in = null;
+ try {
+ final WebHdfsFileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
+ WebHdfsConstants.WEBHDFS_SCHEME);
+ try (OutputStream os = fs.create(foo)) {
+ os.write(content);
+ }
+
+ // pread
+ in = fs.open(foo, 1024);
+ byte[] buf = new byte[1024];
+ try {
+ in.readFully(1020, buf, 0, 5);
+ Assert.fail("EOF expected");
+ } catch (EOFException ignored) {}
+
+ // mix pread with stateful read
+ int length = in.read(buf, 0, 512);
+ in.readFully(100, new byte[1024], 0, 100);
+ int preadLen = in.read(200, new byte[1024], 0, 200);
+ Assert.assertTrue(preadLen > 0);
+ IOUtils.readFully(in, buf, length, 1024 - length);
+ Assert.assertArrayEquals(content, buf);
+ } finally {
+ if (in != null) {
+ in.close();
+ }
+ cluster.shutdown();
+ }
+ }
+
@Test(timeout = 30000)
public void testGetHomeDirectory() throws Exception {
[10/14] hadoop git commit: YARN-3954. Fix
TestYarnConfigurationFields#testCompareConfigurationClassAgainstXml. (varun
saxena via rohithsharmaks)
Posted by aw...@apache.org.
YARN-3954. Fix TestYarnConfigurationFields#testCompareConfigurationClassAgainstXml. (varun saxena via rohithsharmaks)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8376ea32
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8376ea32
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8376ea32
Branch: refs/heads/HADOOP-12111
Commit: 8376ea3297a3eab33df27454b18cf215cfb7c6ff
Parents: 76ec26d
Author: rohithsharmaks <ro...@apache.org>
Authored: Thu Jul 23 00:28:24 2015 +0530
Committer: rohithsharmaks <ro...@apache.org>
Committed: Thu Jul 23 00:28:24 2015 +0530
----------------------------------------------------------------------
hadoop-yarn-project/CHANGES.txt | 3 +++
.../src/main/resources/yarn-default.xml | 10 ++++++++++
2 files changed, 13 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8376ea32/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index f751862..eb52745 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -656,6 +656,9 @@ Release 2.8.0 - UNRELEASED
YARN-3932. SchedulerApplicationAttempt#getResourceUsageReport and UserInfo
should based on total-used-resources. (Bibin A Chundatt via wangda)
+ YARN-3954. Fix TestYarnConfigurationFields#testCompareConfigurationClassAgainstXml.
+ (varun saxena via rohithsharmaks)
+
Release 2.7.2 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8376ea32/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 2edeef0..d586f51 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -2131,4 +2131,14 @@
<value>false</value>
</property>
+ <property>
+ <description>
+ Defines maximum application priority in a cluster.
+ If an application is submitted with a priority higher than this value, it will be
+ reset to this maximum value.
+ </description>
+ <name>yarn.cluster.max-application-priority</name>
+ <value>0</value>
+ </property>
+
</configuration>
[02/14] hadoop git commit: HDFS-8773. Few FSNamesystem metrics are
not documented in the Metrics page. Contributed by Rakesh R.
Posted by aw...@apache.org.
HDFS-8773. Few FSNamesystem metrics are not documented in the Metrics page. Contributed by Rakesh R.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a26cc66f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a26cc66f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a26cc66f
Branch: refs/heads/HADOOP-12111
Commit: a26cc66f38daec2342215a66b599bf59cee1112c
Parents: cb03768
Author: cnauroth <cn...@apache.org>
Authored: Tue Jul 21 14:12:03 2015 -0700
Committer: cnauroth <cn...@apache.org>
Committed: Tue Jul 21 14:12:03 2015 -0700
----------------------------------------------------------------------
.../hadoop-common/src/site/markdown/Metrics.md | 5 +++++
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++
2 files changed, 8 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a26cc66f/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
index 2b23508..646cda5 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
@@ -231,6 +231,11 @@ Each metrics record contains tags such as HAState and Hostname as additional inf
| `BlockCapacity` | Current number of block capacity |
| `StaleDataNodes` | Current number of DataNodes marked stale due to delayed heartbeat |
| `TotalFiles` | Current number of files and directories (same as FilesTotal) |
+| `MissingReplOneBlocks` | Current number of missing blocks with replication factor 1 |
+| `NumFilesUnderConstruction` | Current number of files under construction |
+| `NumActiveClients` | Current number of active clients holding lease |
+| `HAState` | (HA-only) Current state of the NameNode: initializing or active or standby or stopping state |
+| `FSState` | Current state of the file system: Safemode or Operational |
JournalNode
-----------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a26cc66f/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 7c771b0..8122045 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1062,6 +1062,9 @@ Release 2.8.0 - UNRELEASED
HDFS-7582. Enforce maximum number of ACL entries separately per access
and default. (vinayakumarb)
+ HDFS-8773. Few FSNamesystem metrics are not documented in the Metrics page.
+ (Rakesh R via cnauroth)
+
Release 2.7.2 - UNRELEASED
INCOMPATIBLE CHANGES
[06/14] hadoop git commit: HDFS-8795. Improve
InvalidateBlocks#node2blocks. (yliu)
Posted by aw...@apache.org.
HDFS-8795. Improve InvalidateBlocks#node2blocks. (yliu)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/40253262
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/40253262
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/40253262
Branch: refs/heads/HADOOP-12111
Commit: 4025326288c0167ff300d4f7ecc96f84ed141912
Parents: 94c6a4a
Author: yliu <yl...@apache.org>
Authored: Wed Jul 22 15:16:50 2015 +0800
Committer: yliu <yl...@apache.org>
Committed: Wed Jul 22 15:16:50 2015 +0800
----------------------------------------------------------------------
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 ++
.../hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java | 5 +++--
2 files changed, 5 insertions(+), 2 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/40253262/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 50803de..66cb89e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -740,6 +740,8 @@ Release 2.8.0 - UNRELEASED
HDFS-8495. Consolidate append() related implementation into a single class.
(Rakesh R via wheat9)
+ HDFS-8795. Improve InvalidateBlocks#node2blocks. (yliu)
+
OPTIMIZATIONS
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
http://git-wip-us.apache.org/repos/asf/hadoop/blob/40253262/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java
index a465f85..c486095 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java
@@ -22,9 +22,9 @@ import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Calendar;
import java.util.GregorianCalendar;
+import java.util.HashMap;
import java.util.List;
import java.util.Map;
-import java.util.TreeMap;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -36,6 +36,7 @@ import org.apache.hadoop.util.Time;
import org.apache.hadoop.hdfs.DFSUtil;
import com.google.common.annotations.VisibleForTesting;
+
import org.slf4j.Logger;
/**
@@ -47,7 +48,7 @@ import org.slf4j.Logger;
class InvalidateBlocks {
/** Mapping: DatanodeInfo -> Collection of Blocks */
private final Map<DatanodeInfo, LightWeightHashSet<Block>> node2blocks =
- new TreeMap<DatanodeInfo, LightWeightHashSet<Block>>();
+ new HashMap<DatanodeInfo, LightWeightHashSet<Block>>();
/** The total number of blocks in the map. */
private long numBlocks = 0L;
[07/14] hadoop git commit: HADOOP-12239. StorageException complaining
" no lease ID" when updating FolderLastModifiedTime in WASB. Contributed by
Duo Xu.
Posted by aw...@apache.org.
HADOOP-12239. StorageException complaining " no lease ID" when updating FolderLastModifiedTime in WASB. Contributed by Duo Xu.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/efa97243
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/efa97243
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/efa97243
Branch: refs/heads/HADOOP-12111
Commit: efa97243ecb84b3b468e732897cd685e3869f480
Parents: 4025326
Author: cnauroth <cn...@apache.org>
Authored: Wed Jul 22 11:16:49 2015 -0700
Committer: cnauroth <cn...@apache.org>
Committed: Wed Jul 22 11:16:49 2015 -0700
----------------------------------------------------------------------
hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++
.../org/apache/hadoop/fs/azure/NativeAzureFileSystem.java | 8 ++++++--
2 files changed, 9 insertions(+), 2 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/efa97243/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 3d101d4..c0e5c92 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -995,6 +995,9 @@ Release 2.8.0 - UNRELEASED
HADOOP-12017. Hadoop archives command should use configurable replication
factor when closing (Bibin A Chundatt via vinayakumarb)
+ HADOOP-12239. StorageException complaining " no lease ID" when updating
+ FolderLastModifiedTime in WASB. (Duo Xu via cnauroth)
+
Release 2.7.2 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/efa97243/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
index a567b33..bb9941b 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
@@ -1360,8 +1360,12 @@ public class NativeAzureFileSystem extends FileSystem {
String parentKey = pathToKey(parentFolder);
FileMetadata parentMetadata = store.retrieveMetadata(parentKey);
if (parentMetadata != null && parentMetadata.isDir() &&
- parentMetadata.getBlobMaterialization() == BlobMaterialization.Explicit) {
- store.updateFolderLastModifiedTime(parentKey, parentFolderLease);
+ parentMetadata.getBlobMaterialization() == BlobMaterialization.Explicit) {
+ if (parentFolderLease != null) {
+ store.updateFolderLastModifiedTime(parentKey, parentFolderLease);
+ } else {
+ updateParentFolderLastModifiedTime(key);
+ }
} else {
// Make sure that the parent folder exists.
// Create it using inherited permissions from the first existing directory going up the path
[11/14] hadoop git commit: YARN-3956. Fix
TestNodeManagerHardwareUtils fails on Mac (Varun Vasudev via wangda)
Posted by aw...@apache.org.
YARN-3956. Fix TestNodeManagerHardwareUtils fails on Mac (Varun Vasudev via wangda)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/06e5dd2c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/06e5dd2c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/06e5dd2c
Branch: refs/heads/HADOOP-12111
Commit: 06e5dd2c84c49460884757b56980b1b9c58af996
Parents: 8376ea3
Author: Wangda Tan <wa...@apache.org>
Authored: Wed Jul 22 11:59:31 2015 -0700
Committer: Wangda Tan <wa...@apache.org>
Committed: Wed Jul 22 12:01:41 2015 -0700
----------------------------------------------------------------------
hadoop-yarn-project/CHANGES.txt | 2 ++
.../server/nodemanager/util/TestNodeManagerHardwareUtils.java | 5 +++++
2 files changed, 7 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/06e5dd2c/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index eb52745..a5fd4e7 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -659,6 +659,8 @@ Release 2.8.0 - UNRELEASED
YARN-3954. Fix TestYarnConfigurationFields#testCompareConfigurationClassAgainstXml.
(varun saxena via rohithsharmaks)
+ YARN-3956. Fix TestNodeManagerHardwareUtils fails on Mac (Varun Vasudev via wangda)
+
Release 2.7.2 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/06e5dd2c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/util/TestNodeManagerHardwareUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/util/TestNodeManagerHardwareUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/util/TestNodeManagerHardwareUtils.java
index 5bf8cb7..84a045d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/util/TestNodeManagerHardwareUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/util/TestNodeManagerHardwareUtils.java
@@ -30,6 +30,11 @@ import org.mockito.Mockito;
public class TestNodeManagerHardwareUtils {
static class TestResourceCalculatorPlugin extends ResourceCalculatorPlugin {
+
+ TestResourceCalculatorPlugin() {
+ super(null);
+ }
+
@Override
public long getVirtualMemorySize() {
return 0;
[08/14] hadoop git commit: HADOOP-12184. Remove unused Linux-specific
constants in NativeIO (Martin Walsh via Colin P. McCabe)
Posted by aw...@apache.org.
HADOOP-12184. Remove unused Linux-specific constants in NativeIO (Martin Walsh via Colin P. McCabe)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1b3bceb5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1b3bceb5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1b3bceb5
Branch: refs/heads/HADOOP-12111
Commit: 1b3bceb58c8e536a75fa3f99cc3ceeaba91a07de
Parents: efa9724
Author: Colin Patrick Mccabe <cm...@cloudera.com>
Authored: Wed Jul 22 11:11:38 2015 -0700
Committer: Colin Patrick Mccabe <cm...@cloudera.com>
Committed: Wed Jul 22 11:34:10 2015 -0700
----------------------------------------------------------------------
hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++
.../src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java | 4 ----
2 files changed, 3 insertions(+), 4 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b3bceb5/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index c0e5c92..ff7d2ad 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -713,6 +713,9 @@ Release 2.8.0 - UNRELEASED
HADOOP-12214. Parse 'HadoopArchive' commandline using cli Options.
(vinayakumarb)
+ HADOOP-12184. Remove unused Linux-specific constants in NativeIO (Martin
+ Walsh via Colin P. McCabe)
+
OPTIMIZATIONS
HADOOP-11785. Reduce the number of listStatus operation in distcp
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b3bceb5/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java
index 688b955..77a40ea 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java
@@ -67,9 +67,6 @@ public class NativeIO {
public static final int O_APPEND = 02000;
public static final int O_NONBLOCK = 04000;
public static final int O_SYNC = 010000;
- public static final int O_ASYNC = 020000;
- public static final int O_FSYNC = O_SYNC;
- public static final int O_NDELAY = O_NONBLOCK;
// Flags for posix_fadvise() from bits/fcntl.h
/* No further special treatment. */
@@ -356,7 +353,6 @@ public class NativeIO {
public static final int S_IFREG = 0100000; /* regular */
public static final int S_IFLNK = 0120000; /* symbolic link */
public static final int S_IFSOCK = 0140000; /* socket */
- public static final int S_IFWHT = 0160000; /* whiteout */
public static final int S_ISUID = 0004000; /* set user id on execution */
public static final int S_ISGID = 0002000; /* set group id on execution */
public static final int S_ISVTX = 0001000; /* save swapped text even after use */
[05/14] hadoop git commit: HADOOP-12017. Hadoop archives command
should use configurable replication factor when closing (Contributed by Bibin
A Chundatt)
Posted by aw...@apache.org.
HADOOP-12017. Hadoop archives command should use configurable replication factor when closing (Contributed by Bibin A Chundatt)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/94c6a4aa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/94c6a4aa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/94c6a4aa
Branch: refs/heads/HADOOP-12111
Commit: 94c6a4aa85e7d98e9b532b330f30783315f4334b
Parents: 31f1171
Author: Vinayakumar B <vi...@apache.org>
Authored: Wed Jul 22 10:25:49 2015 +0530
Committer: Vinayakumar B <vi...@apache.org>
Committed: Wed Jul 22 10:25:49 2015 +0530
----------------------------------------------------------------------
hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++
.../org/apache/hadoop/tools/HadoopArchives.java | 21 ++++++++++------
.../src/site/markdown/HadoopArchives.md.vm | 2 +-
.../apache/hadoop/tools/TestHadoopArchives.java | 26 ++++++++++++--------
4 files changed, 33 insertions(+), 19 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/94c6a4aa/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 5b51bce..3d101d4 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -992,6 +992,9 @@ Release 2.8.0 - UNRELEASED
HADOOP-12051. ProtobufRpcEngine.invoke() should use Exception.toString()
over getMessage() in logging/span events. (Varun Saxena via stevel)
+ HADOOP-12017. Hadoop archives command should use configurable replication
+ factor when closing (Bibin A Chundatt via vinayakumarb)
+
Release 2.7.2 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/94c6a4aa/hadoop-tools/hadoop-archives/src/main/java/org/apache/hadoop/tools/HadoopArchives.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-archives/src/main/java/org/apache/hadoop/tools/HadoopArchives.java b/hadoop-tools/hadoop-archives/src/main/java/org/apache/hadoop/tools/HadoopArchives.java
index 330830b..ee14850 100644
--- a/hadoop-tools/hadoop-archives/src/main/java/org/apache/hadoop/tools/HadoopArchives.java
+++ b/hadoop-tools/hadoop-archives/src/main/java/org/apache/hadoop/tools/HadoopArchives.java
@@ -100,15 +100,17 @@ public class HadoopArchives implements Tool {
static final String SRC_PARENT_LABEL = NAME + ".parent.path";
/** the size of the blocks that will be created when archiving **/
static final String HAR_BLOCKSIZE_LABEL = NAME + ".block.size";
- /**the size of the part files that will be created when archiving **/
+ /** the replication factor for the file in archiving. **/
+ static final String HAR_REPLICATION_LABEL = NAME + ".replication.factor";
+ /** the size of the part files that will be created when archiving **/
static final String HAR_PARTSIZE_LABEL = NAME + ".partfile.size";
/** size of each part file size **/
long partSize = 2 * 1024 * 1024 * 1024l;
/** size of blocks in hadoop archives **/
long blockSize = 512 * 1024 * 1024l;
- /** the desired replication degree; default is 10 **/
- short repl = 10;
+ /** the desired replication degree; default is 3 **/
+ short repl = 3;
private static final String usage = "archive"
+ " <-archiveName <NAME>.har> <-p <parent path>> [-r <replication factor>]" +
@@ -475,6 +477,7 @@ public class HadoopArchives implements Tool {
conf.setLong(HAR_PARTSIZE_LABEL, partSize);
conf.set(DST_HAR_LABEL, archiveName);
conf.set(SRC_PARENT_LABEL, parentPath.makeQualified(fs).toString());
+ conf.setInt(HAR_REPLICATION_LABEL, repl);
Path outputPath = new Path(dest, archiveName);
FileOutputFormat.setOutputPath(conf, outputPath);
FileSystem outFs = outputPath.getFileSystem(conf);
@@ -549,8 +552,6 @@ public class HadoopArchives implements Tool {
} finally {
srcWriter.close();
}
- //increase the replication of src files
- jobfs.setReplication(srcFiles, repl);
conf.setInt(SRC_COUNT_LABEL, numFiles);
conf.setLong(TOTAL_SIZE_LABEL, totalSize);
int numMaps = (int)(totalSize/partSize);
@@ -587,6 +588,7 @@ public class HadoopArchives implements Tool {
FileSystem destFs = null;
byte[] buffer;
int buf_size = 128 * 1024;
+ private int replication = 3;
long blockSize = 512 * 1024 * 1024l;
// configure the mapper and create
@@ -595,7 +597,7 @@ public class HadoopArchives implements Tool {
// tmp files.
public void configure(JobConf conf) {
this.conf = conf;
-
+ replication = conf.getInt(HAR_REPLICATION_LABEL, 3);
// this is tightly tied to map reduce
// since it does not expose an api
// to get the partition
@@ -712,6 +714,7 @@ public class HadoopArchives implements Tool {
public void close() throws IOException {
// close the part files.
partStream.close();
+ destFs.setReplication(tmpOutput, (short) replication);
}
}
@@ -732,6 +735,7 @@ public class HadoopArchives implements Tool {
private int numIndexes = 1000;
private Path tmpOutputDir = null;
private int written = 0;
+ private int replication = 3;
private int keyVal = 0;
// configure
@@ -740,6 +744,7 @@ public class HadoopArchives implements Tool {
tmpOutputDir = FileOutputFormat.getWorkOutputPath(this.conf);
masterIndex = new Path(tmpOutputDir, "_masterindex");
index = new Path(tmpOutputDir, "_index");
+ replication = conf.getInt(HAR_REPLICATION_LABEL, 3);
try {
fs = masterIndex.getFileSystem(conf);
if (fs.exists(masterIndex)) {
@@ -798,8 +803,8 @@ public class HadoopArchives implements Tool {
outStream.close();
indexStream.close();
// try increasing the replication
- fs.setReplication(index, (short) 5);
- fs.setReplication(masterIndex, (short) 5);
+ fs.setReplication(index, (short) replication);
+ fs.setReplication(masterIndex, (short) replication);
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/94c6a4aa/hadoop-tools/hadoop-archives/src/site/markdown/HadoopArchives.md.vm
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-archives/src/site/markdown/HadoopArchives.md.vm b/hadoop-tools/hadoop-archives/src/site/markdown/HadoopArchives.md.vm
index be557a7..8bbb1ea 100644
--- a/hadoop-tools/hadoop-archives/src/site/markdown/HadoopArchives.md.vm
+++ b/hadoop-tools/hadoop-archives/src/site/markdown/HadoopArchives.md.vm
@@ -53,7 +53,7 @@ How to Create an Archive
sections.
-r indicates the desired replication factor; if this optional argument is
- not specified, a replication factor of 10 will be used.
+ not specified, a replication factor of 3 will be used.
If you just want to archive a single directory /foo/bar then you can just use
http://git-wip-us.apache.org/repos/asf/hadoop/blob/94c6a4aa/hadoop-tools/hadoop-archives/src/test/java/org/apache/hadoop/tools/TestHadoopArchives.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-archives/src/test/java/org/apache/hadoop/tools/TestHadoopArchives.java b/hadoop-tools/hadoop-archives/src/test/java/org/apache/hadoop/tools/TestHadoopArchives.java
index d8222dc..165c515 100644
--- a/hadoop-tools/hadoop-archives/src/test/java/org/apache/hadoop/tools/TestHadoopArchives.java
+++ b/hadoop-tools/hadoop-archives/src/test/java/org/apache/hadoop/tools/TestHadoopArchives.java
@@ -21,7 +21,6 @@ package org.apache.hadoop.tools;
import java.io.ByteArrayOutputStream;
import java.io.FilterInputStream;
import java.io.IOException;
-import java.io.OutputStream;
import java.io.PrintStream;
import java.net.URI;
import java.util.ArrayList;
@@ -39,7 +38,9 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FsShell;
import org.apache.hadoop.fs.HarFileSystem;
import org.apache.hadoop.fs.LocalFileSystem;
+import org.apache.hadoop.fs.LocatedFileStatus;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.util.JarFinder;
@@ -110,13 +111,9 @@ public class TestHadoopArchives {
conf.set(CapacitySchedulerConfiguration.PREFIX
+ CapacitySchedulerConfiguration.ROOT + ".default."
+ CapacitySchedulerConfiguration.CAPACITY, "100");
- dfscluster = new MiniDFSCluster
- .Builder(conf)
- .checkExitOnShutdown(true)
- .numDataNodes(2)
- .format(true)
- .racks(null)
- .build();
+ dfscluster =
+ new MiniDFSCluster.Builder(conf).checkExitOnShutdown(true)
+ .numDataNodes(3).format(true).racks(null).build();
fs = dfscluster.getFileSystem();
@@ -753,12 +750,21 @@ public class TestHadoopArchives {
final String harName = "foo.har";
final String fullHarPathStr = prefix + harName;
- final String[] args = { "-archiveName", harName, "-p", inputPathStr, "-r",
- "3", "*", archivePath.toString() };
+ final String[] args =
+ { "-archiveName", harName, "-p", inputPathStr, "-r", "2", "*",
+ archivePath.toString() };
System.setProperty(HadoopArchives.TEST_HADOOP_ARCHIVES_JAR_PATH,
HADOOP_ARCHIVES_JAR);
final HadoopArchives har = new HadoopArchives(conf);
assertEquals(0, ToolRunner.run(har, args));
+ RemoteIterator<LocatedFileStatus> listFiles =
+ fs.listFiles(new Path(archivePath.toString() + "/" + harName), false);
+ while (listFiles.hasNext()) {
+ LocatedFileStatus next = listFiles.next();
+ if (!next.getPath().toString().endsWith("_SUCCESS")) {
+ assertEquals(next.getPath().toString(), 2, next.getReplication());
+ }
+ }
return fullHarPathStr;
}
[09/14] hadoop git commit: YARN-3932.
SchedulerApplicationAttempt#getResourceUsageReport and UserInfo should based
on total-used-resources. (Bibin A Chundatt via wangda)
Posted by aw...@apache.org.
YARN-3932. SchedulerApplicationAttempt#getResourceUsageReport and UserInfo should based on total-used-resources. (Bibin A Chundatt via wangda)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/76ec26de
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/76ec26de
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/76ec26de
Branch: refs/heads/HADOOP-12111
Commit: 76ec26de8099dc48ce3812c595b7ab857a600442
Parents: 1b3bceb
Author: Wangda Tan <wa...@apache.org>
Authored: Wed Jul 22 11:54:02 2015 -0700
Committer: Wangda Tan <wa...@apache.org>
Committed: Wed Jul 22 11:54:02 2015 -0700
----------------------------------------------------------------------
hadoop-yarn-project/CHANGES.txt | 3 +
.../scheduler/SchedulerApplicationAttempt.java | 2 +-
.../scheduler/capacity/LeafQueue.java | 8 ++-
.../TestCapacitySchedulerNodeLabelUpdate.java | 64 ++++++++++++++++++++
4 files changed, 74 insertions(+), 3 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/76ec26de/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 5100cdf..f751862 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -653,6 +653,9 @@ Release 2.8.0 - UNRELEASED
YARN-3885. ProportionalCapacityPreemptionPolicy doesn't preempt if queue is
more than 2 level. (Ajith S via wangda)
+ YARN-3932. SchedulerApplicationAttempt#getResourceUsageReport and UserInfo
+ should based on total-used-resources. (Bibin A Chundatt via wangda)
+
Release 2.7.2 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/76ec26de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
index cf543bd..317e61c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
@@ -598,7 +598,7 @@ public class SchedulerApplicationAttempt implements SchedulableEntity {
AggregateAppResourceUsage runningResourceUsage =
getRunningAggregateAppResourceUsage();
Resource usedResourceClone =
- Resources.clone(attemptResourceUsage.getUsed());
+ Resources.clone(attemptResourceUsage.getAllUsed());
Resource reservedResourceClone =
Resources.clone(attemptResourceUsage.getReserved());
return ApplicationResourceUsageReport.newInstance(liveContainers.size(),
http://git-wip-us.apache.org/repos/asf/hadoop/blob/76ec26de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
index 0ce4d68..5c283f4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
@@ -439,7 +439,7 @@ public class LeafQueue extends AbstractCSQueue {
for (Map.Entry<String, User> entry : users.entrySet()) {
User user = entry.getValue();
usersToReturn.add(new UserInfo(entry.getKey(), Resources.clone(user
- .getUsed()), user.getActiveApplications(), user
+ .getAllUsed()), user.getActiveApplications(), user
.getPendingApplications(), Resources.clone(user
.getConsumedAMResources()), Resources.clone(user
.getUserResourceLimit())));
@@ -1894,7 +1894,11 @@ public class LeafQueue extends AbstractCSQueue {
public Resource getUsed() {
return userResourceUsage.getUsed();
}
-
+
+ public Resource getAllUsed() {
+ return userResourceUsage.getAllUsed();
+ }
+
public Resource getUsed(String label) {
return userResourceUsage.getUsed(label);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/76ec26de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerNodeLabelUpdate.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerNodeLabelUpdate.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerNodeLabelUpdate.java
index e60e496..0a701d8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerNodeLabelUpdate.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerNodeLabelUpdate.java
@@ -22,6 +22,7 @@ import java.util.ArrayList;
import java.util.Set;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
@@ -34,6 +35,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsMana
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerState;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ResourceInfo;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
@@ -95,6 +97,68 @@ public class TestCapacitySchedulerNodeLabelUpdate {
.getMemory());
}
+ @Test(timeout = 60000)
+ public void testResourceUsage() throws Exception {
+ // set node -> label
+ mgr.addToCluserNodeLabelsWithDefaultExclusivity(ImmutableSet.of("x", "y",
+ "z"));
+
+ // set mapping:
+ // h1 -> x
+ // h2 -> y
+ mgr.addLabelsToNode(ImmutableMap.of(NodeId.newInstance("h1", 0), toSet("x")));
+ mgr.addLabelsToNode(ImmutableMap.of(NodeId.newInstance("h2", 0), toSet("y")));
+
+ // inject node label manager
+ MockRM rm = new MockRM(getConfigurationWithQueueLabels(conf)) {
+ @Override
+ public RMNodeLabelsManager createNodeLabelManager() {
+ return mgr;
+ }
+ };
+ rm.getRMContext().setNodeLabelManager(mgr);
+ rm.start();
+ MockNM nm1 = rm.registerNode("h1:1234", 2048);
+ MockNM nm2 = rm.registerNode("h2:1234", 2048);
+ MockNM nm3 = rm.registerNode("h3:1234", 2048);
+
+ ContainerId containerId;
+ // launch an app to queue a1 (label = x), and check all container will
+ // be allocated in h1
+ RMApp app1 = rm.submitApp(GB, "app", "user", null, "a");
+ MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, nm3);
+ ApplicationResourceUsageReport appResourceUsageReport =
+ rm.getResourceScheduler().getAppResourceUsageReport(
+ am1.getApplicationAttemptId());
+ Assert.assertEquals(1024, appResourceUsageReport.getUsedResources()
+ .getMemory());
+ Assert.assertEquals(1, appResourceUsageReport.getUsedResources()
+ .getVirtualCores());
+ // request a container.
+ am1.allocate("*", GB, 1, new ArrayList<ContainerId>(), "x");
+ containerId = ContainerId.newContainerId(am1.getApplicationAttemptId(), 2);
+ rm.waitForState(nm1, containerId, RMContainerState.ALLOCATED, 10 * 1000);
+ appResourceUsageReport =
+ rm.getResourceScheduler().getAppResourceUsageReport(
+ am1.getApplicationAttemptId());
+ Assert.assertEquals(2048, appResourceUsageReport.getUsedResources()
+ .getMemory());
+ Assert.assertEquals(2, appResourceUsageReport.getUsedResources()
+ .getVirtualCores());
+ LeafQueue queue =
+ (LeafQueue) ((CapacityScheduler) rm.getResourceScheduler())
+ .getQueue("a");
+ ArrayList<UserInfo> users = queue.getUsers();
+ for (UserInfo userInfo : users) {
+ if (userInfo.getUsername().equals("user")) {
+ ResourceInfo resourcesUsed = userInfo.getResourcesUsed();
+ Assert.assertEquals(2048, resourcesUsed.getMemory());
+ Assert.assertEquals(2, resourcesUsed.getvCores());
+ }
+ }
+ rm.stop();
+ }
+
@Test (timeout = 60000)
public void testNodeUpdate() throws Exception {
// set node -> label