You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by vi...@apache.org on 2016/02/22 05:08:01 UTC
[1/3] hadoop git commit: HDFS-9425. Expose number of blocks per
volume as a metric (Contributed by Brahma Reddy Battula)
Repository: hadoop
Updated Branches:
refs/heads/branch-2 3e76768f2 -> 2e3c35a83
refs/heads/branch-2.8 8ca8d218a -> 426f21308
refs/heads/trunk f31351673 -> 342c9572b
HDFS-9425. Expose number of blocks per volume as a metric (Contributed by Brahma Reddy Battula)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/342c9572
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/342c9572
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/342c9572
Branch: refs/heads/trunk
Commit: 342c9572bf6a623287f34c5cc0bc3be6038c191a
Parents: f313516
Author: Vinayakumar B <vi...@apache.org>
Authored: Mon Feb 22 09:29:23 2016 +0530
Committer: Vinayakumar B <vi...@apache.org>
Committed: Mon Feb 22 09:29:23 2016 +0530
----------------------------------------------------------------------
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++
.../datanode/fsdataset/impl/BlockPoolSlice.java | 29 ++++++++++-
.../datanode/fsdataset/impl/FsDatasetImpl.java | 12 ++++-
.../datanode/fsdataset/impl/FsVolumeImpl.java | 30 +++++++++--
.../server/datanode/TestDataNodeMXBean.java | 55 ++++++++++++++++++++
5 files changed, 121 insertions(+), 8 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/342c9572/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 9a156a5..3552c8e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2024,6 +2024,9 @@ Release 2.8.0 - UNRELEASED
HDFS-9768. Reuse ObjectMapper instance in HDFS to improve the performance.
(Lin Yiqun via aajisaka)
+ HDFS-9425. Expose number of blocks per volume as a metric
+ (Brahma Reddy Battula via vinayakumarb)
+
BUG FIXES
HDFS-7501. TransactionsSinceLastCheckpoint can be negative on SBNs.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/342c9572/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
index 188ab68..aa33851 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
@@ -30,6 +30,7 @@ import java.io.RandomAccessFile;
import java.io.Writer;
import java.util.Iterator;
import java.util.Scanner;
+import java.util.concurrent.atomic.AtomicLong;
import org.apache.commons.io.FileUtils;
import org.apache.commons.logging.Log;
@@ -86,6 +87,7 @@ class BlockPoolSlice {
private final boolean deleteDuplicateReplicas;
private static final String REPLICA_CACHE_FILE = "replicas";
private final long replicaCacheExpiry = 5*60*1000;
+ private AtomicLong numOfBlocks = new AtomicLong();
private final long cachedDfsUsedCheckTime;
private final Timer timer;
@@ -273,7 +275,11 @@ class BlockPoolSlice {
*/
File createTmpFile(Block b) throws IOException {
File f = new File(tmpDir, b.getBlockName());
- return DatanodeUtil.createTmpFile(b, f);
+ File tmpFile = DatanodeUtil.createTmpFile(b, f);
+ // If any exception during creation, its expected that counter will not be
+ // incremented, So no need to decrement
+ incrNumBlocks();
+ return tmpFile;
}
/**
@@ -282,7 +288,11 @@ class BlockPoolSlice {
*/
File createRbwFile(Block b) throws IOException {
File f = new File(rbwDir, b.getBlockName());
- return DatanodeUtil.createTmpFile(b, f);
+ File rbwFile = DatanodeUtil.createTmpFile(b, f);
+ // If any exception during creation, its expected that counter will not be
+ // incremented, So no need to decrement
+ incrNumBlocks();
+ return rbwFile;
}
File addFinalizedBlock(Block b, File f) throws IOException {
@@ -493,6 +503,9 @@ class BlockPoolSlice {
} else {
lazyWriteReplicaMap.discardReplica(bpid, blockId, false);
}
+ if (oldReplica == null) {
+ incrNumBlocks();
+ }
}
@@ -825,4 +838,16 @@ class BlockPoolSlice {
}
}
}
+
+ void incrNumBlocks() {
+ numOfBlocks.incrementAndGet();
+ }
+
+ void decrNumBlocks() {
+ numOfBlocks.decrementAndGet();
+ }
+
+ public long getNumOfBlocks() {
+ return numOfBlocks.get();
+ }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/342c9572/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index 819d097..d1cb836 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -965,6 +965,11 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
newReplicaInfo.setNumBytes(blockFiles[1].length());
// Finalize the copied files
newReplicaInfo = finalizeReplica(block.getBlockPoolId(), newReplicaInfo);
+ synchronized (this) {
+ // Increment numBlocks here as this block moved without knowing to BPS
+ FsVolumeImpl volume = (FsVolumeImpl) newReplicaInfo.getVolume();
+ volume.getBlockPoolSlice(block.getBlockPoolId()).incrNumBlocks();
+ }
removeOldReplica(replicaInfo, newReplicaInfo, oldBlockFile, oldMetaFile,
oldBlockFile.length(), oldMetaFile.length(), block.getBlockPoolId());
@@ -2599,6 +2604,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
final long reservedSpace; // size of space reserved for non-HDFS
final long reservedSpaceForReplicas; // size of space reserved RBW or
// re-replication
+ final long numBlocks;
VolumeInfo(FsVolumeImpl v, long usedSpace, long freeSpace) {
this.directory = v.toString();
@@ -2606,6 +2612,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
this.freeSpace = freeSpace;
this.reservedSpace = v.getReserved();
this.reservedSpaceForReplicas = v.getReservedForReplicas();
+ this.numBlocks = v.getNumBlocks();
}
}
@@ -2640,6 +2647,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
innerInfo.put("freeSpace", v.freeSpace);
innerInfo.put("reservedSpace", v.reservedSpace);
innerInfo.put("reservedSpaceForReplicas", v.reservedSpaceForReplicas);
+ innerInfo.put("numBlocks", v.numBlocks);
info.put(v.directory, innerInfo);
}
return info;
@@ -2728,8 +2736,8 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
synchronized (FsDatasetImpl.this) {
ramDiskReplicaTracker.recordEndLazyPersist(bpId, blockId, savedFiles);
- targetVolume.incDfsUsed(bpId,
- savedFiles[0].length() + savedFiles[1].length());
+ targetVolume.incDfsUsedAndNumBlocks(bpId, savedFiles[0].length()
+ + savedFiles[1].length());
// Update metrics (ignore the metadata file size)
datanode.getMetrics().incrRamDiskBlocksLazyPersisted();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/342c9572/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
index 1533a32..857d0ad 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
@@ -284,21 +284,35 @@ public class FsVolumeImpl implements FsVolumeSpi {
}
void onBlockFileDeletion(String bpid, long value) {
- decDfsUsed(bpid, value);
+ decDfsUsedAndNumBlocks(bpid, value, true);
if (isTransientStorage()) {
dataset.releaseLockedMemory(value, true);
}
}
void onMetaFileDeletion(String bpid, long value) {
- decDfsUsed(bpid, value);
+ decDfsUsedAndNumBlocks(bpid, value, false);
}
- private void decDfsUsed(String bpid, long value) {
+ private void decDfsUsedAndNumBlocks(String bpid, long value,
+ boolean blockFileDeleted) {
synchronized(dataset) {
BlockPoolSlice bp = bpSlices.get(bpid);
if (bp != null) {
bp.decDfsUsed(value);
+ if (blockFileDeleted) {
+ bp.decrNumBlocks();
+ }
+ }
+ }
+ }
+
+ void incDfsUsedAndNumBlocks(String bpid, long value) {
+ synchronized (dataset) {
+ BlockPoolSlice bp = bpSlices.get(bpid);
+ if (bp != null) {
+ bp.incDfsUsed(value);
+ bp.incrNumBlocks();
}
}
}
@@ -847,7 +861,15 @@ public class FsVolumeImpl implements FsVolumeSpi {
throws IOException {
getBlockPoolSlice(bpid).getVolumeMap(volumeMap, ramDiskReplicaMap);
}
-
+
+ long getNumBlocks() {
+ long numBlocks = 0;
+ for (BlockPoolSlice s : bpSlices.values()) {
+ numBlocks += s.getNumOfBlocks();
+ }
+ return numBlocks;
+ }
+
@Override
public String toString() {
return currentDir.getAbsolutePath();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/342c9572/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java
index a7d0a5e..6d520ae 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java
@@ -18,15 +18,23 @@
package org.apache.hadoop.hdfs.server.datanode;
import java.lang.management.ManagementFactory;
+import java.util.Collection;
import java.util.List;
+import java.util.Map;
import javax.management.MBeanServer;
import javax.management.ObjectName;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.junit.Assert;
import org.junit.Test;
+import org.mortbay.util.ajax.JSON;
+
+import static org.junit.Assert.assertEquals;
/**
* Class for testing {@link DataNodeMXBean} implementation
@@ -84,4 +92,51 @@ public class TestDataNodeMXBean {
private static String replaceDigits(final String s) {
return s.replaceAll("[0-9]+", "_DIGITS_");
}
+
+ @Test
+ public void testDataNodeMXBeanBlockCount() throws Exception {
+ Configuration conf = new Configuration();
+ MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
+
+ try {
+ List<DataNode> datanodes = cluster.getDataNodes();
+ assertEquals(datanodes.size(), 1);
+
+ MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
+ ObjectName mxbeanName =
+ new ObjectName("Hadoop:service=DataNode,name=DataNodeInfo");
+ FileSystem fs = cluster.getFileSystem();
+ for (int i = 0; i < 5; i++) {
+ DFSTestUtil.createFile(fs, new Path("/tmp.txt" + i), 1024, (short) 1,
+ 1L);
+ }
+ assertEquals("Before restart DN", 5, getTotalNumBlocks(mbs, mxbeanName));
+ cluster.restartDataNode(0);
+ cluster.waitActive();
+ assertEquals("After restart DN", 5, getTotalNumBlocks(mbs, mxbeanName));
+ fs.delete(new Path("/tmp.txt1"), true);
+ // Wait till replica gets deleted on disk.
+ Thread.sleep(5000);
+ assertEquals("After delete one file", 4,
+ getTotalNumBlocks(mbs, mxbeanName));
+ } finally {
+ if (cluster != null) {
+ cluster.shutdown();
+ }
+ }
+ }
+
+ @SuppressWarnings("unchecked")
+ int getTotalNumBlocks(MBeanServer mbs, ObjectName mxbeanName)
+ throws Exception {
+ int totalBlocks = 0;
+ String volumeInfo = (String) mbs.getAttribute(mxbeanName, "VolumeInfo");
+ Map<?, ?> m = (Map<?, ?>) JSON.parse(volumeInfo);
+ Collection<Map<String, Long>> values =
+ (Collection<Map<String, Long>>) m.values();
+ for (Map<String, Long> volumeInfoMap : values) {
+ totalBlocks += volumeInfoMap.get("numBlocks");
+ }
+ return totalBlocks;
+ }
}
[2/3] hadoop git commit: HDFS-9425. Expose number of blocks per
volume as a metric (Contributed by Brahma Reddy Battula)
Posted by vi...@apache.org.
HDFS-9425. Expose number of blocks per volume as a metric (Contributed by Brahma Reddy Battula)
(cherry picked from commit 342c9572bf6a623287f34c5cc0bc3be6038c191a)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2e3c35a8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2e3c35a8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2e3c35a8
Branch: refs/heads/branch-2
Commit: 2e3c35a83549b124cc44fc58eca17df1d3a15b8f
Parents: 3e76768
Author: Vinayakumar B <vi...@apache.org>
Authored: Mon Feb 22 09:29:23 2016 +0530
Committer: Vinayakumar B <vi...@apache.org>
Committed: Mon Feb 22 09:29:47 2016 +0530
----------------------------------------------------------------------
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++
.../datanode/fsdataset/impl/BlockPoolSlice.java | 29 ++++++++++-
.../datanode/fsdataset/impl/FsDatasetImpl.java | 12 ++++-
.../datanode/fsdataset/impl/FsVolumeImpl.java | 30 +++++++++--
.../server/datanode/TestDataNodeMXBean.java | 55 ++++++++++++++++++++
5 files changed, 121 insertions(+), 8 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e3c35a8/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index df9abd2..237e0a4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1084,6 +1084,9 @@ Release 2.8.0 - UNRELEASED
HDFS-9768. Reuse ObjectMapper instance in HDFS to improve the performance.
(Lin Yiqun via aajisaka)
+ HDFS-9425. Expose number of blocks per volume as a metric
+ (Brahma Reddy Battula via vinayakumarb)
+
BUG FIXES
HDFS-8091: ACLStatus and XAttributes should be presented to
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e3c35a8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
index 69e0913..14faa71 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
@@ -30,6 +30,7 @@ import java.io.RandomAccessFile;
import java.io.Writer;
import java.util.Iterator;
import java.util.Scanner;
+import java.util.concurrent.atomic.AtomicLong;
import org.apache.commons.io.FileUtils;
import org.apache.commons.logging.Log;
@@ -86,6 +87,7 @@ class BlockPoolSlice {
private final boolean deleteDuplicateReplicas;
private static final String REPLICA_CACHE_FILE = "replicas";
private final long replicaCacheExpiry = 5*60*1000;
+ private AtomicLong numOfBlocks = new AtomicLong();
private final long cachedDfsUsedCheckTime;
private final Timer timer;
@@ -279,7 +281,11 @@ class BlockPoolSlice {
*/
File createTmpFile(Block b) throws IOException {
File f = new File(tmpDir, b.getBlockName());
- return DatanodeUtil.createTmpFile(b, f);
+ File tmpFile = DatanodeUtil.createTmpFile(b, f);
+ // If any exception during creation, its expected that counter will not be
+ // incremented, So no need to decrement
+ incrNumBlocks();
+ return tmpFile;
}
/**
@@ -288,7 +294,11 @@ class BlockPoolSlice {
*/
File createRbwFile(Block b) throws IOException {
File f = new File(rbwDir, b.getBlockName());
- return DatanodeUtil.createTmpFile(b, f);
+ File rbwFile = DatanodeUtil.createTmpFile(b, f);
+ // If any exception during creation, its expected that counter will not be
+ // incremented, So no need to decrement
+ incrNumBlocks();
+ return rbwFile;
}
File addBlock(Block b, File f) throws IOException {
@@ -499,6 +509,9 @@ class BlockPoolSlice {
} else {
lazyWriteReplicaMap.discardReplica(bpid, blockId, false);
}
+ if (oldReplica == null) {
+ incrNumBlocks();
+ }
}
@@ -831,4 +844,16 @@ class BlockPoolSlice {
}
}
}
+
+ void incrNumBlocks() {
+ numOfBlocks.incrementAndGet();
+ }
+
+ void decrNumBlocks() {
+ numOfBlocks.decrementAndGet();
+ }
+
+ public long getNumOfBlocks() {
+ return numOfBlocks.get();
+ }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e3c35a8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index 36b76ac..df82b76 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -970,6 +970,11 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
newReplicaInfo.setNumBytes(blockFiles[1].length());
// Finalize the copied files
newReplicaInfo = finalizeReplica(block.getBlockPoolId(), newReplicaInfo);
+ synchronized (this) {
+ // Increment numBlocks here as this block moved without knowing to BPS
+ FsVolumeImpl volume = (FsVolumeImpl) newReplicaInfo.getVolume();
+ volume.getBlockPoolSlice(block.getBlockPoolId()).incrNumBlocks();
+ }
removeOldReplica(replicaInfo, newReplicaInfo, oldBlockFile, oldMetaFile,
oldBlockFile.length(), oldMetaFile.length(), block.getBlockPoolId());
@@ -2604,6 +2609,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
final long reservedSpace; // size of space reserved for non-HDFS
final long reservedSpaceForReplicas; // size of space reserved RBW or
// re-replication
+ final long numBlocks;
VolumeInfo(FsVolumeImpl v, long usedSpace, long freeSpace) {
this.directory = v.toString();
@@ -2611,6 +2617,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
this.freeSpace = freeSpace;
this.reservedSpace = v.getReserved();
this.reservedSpaceForReplicas = v.getReservedForReplicas();
+ this.numBlocks = v.getNumBlocks();
}
}
@@ -2645,6 +2652,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
innerInfo.put("freeSpace", v.freeSpace);
innerInfo.put("reservedSpace", v.reservedSpace);
innerInfo.put("reservedSpaceForReplicas", v.reservedSpaceForReplicas);
+ innerInfo.put("numBlocks", v.numBlocks);
info.put(v.directory, innerInfo);
}
return info;
@@ -2775,8 +2783,8 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
synchronized (FsDatasetImpl.this) {
ramDiskReplicaTracker.recordEndLazyPersist(bpId, blockId, savedFiles);
- targetVolume.incDfsUsed(bpId,
- savedFiles[0].length() + savedFiles[1].length());
+ targetVolume.incDfsUsedAndNumBlocks(bpId, savedFiles[0].length()
+ + savedFiles[1].length());
// Update metrics (ignore the metadata file size)
datanode.getMetrics().incrRamDiskBlocksLazyPersisted();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e3c35a8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
index f3dd60b..95deef8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
@@ -284,21 +284,35 @@ public class FsVolumeImpl implements FsVolumeSpi {
}
void onBlockFileDeletion(String bpid, long value) {
- decDfsUsed(bpid, value);
+ decDfsUsedAndNumBlocks(bpid, value, true);
if (isTransientStorage()) {
dataset.releaseLockedMemory(value, true);
}
}
void onMetaFileDeletion(String bpid, long value) {
- decDfsUsed(bpid, value);
+ decDfsUsedAndNumBlocks(bpid, value, false);
}
- private void decDfsUsed(String bpid, long value) {
+ private void decDfsUsedAndNumBlocks(String bpid, long value,
+ boolean blockFileDeleted) {
synchronized(dataset) {
BlockPoolSlice bp = bpSlices.get(bpid);
if (bp != null) {
bp.decDfsUsed(value);
+ if (blockFileDeleted) {
+ bp.decrNumBlocks();
+ }
+ }
+ }
+ }
+
+ void incDfsUsedAndNumBlocks(String bpid, long value) {
+ synchronized (dataset) {
+ BlockPoolSlice bp = bpSlices.get(bpid);
+ if (bp != null) {
+ bp.incDfsUsed(value);
+ bp.incrNumBlocks();
}
}
}
@@ -847,7 +861,15 @@ public class FsVolumeImpl implements FsVolumeSpi {
throws IOException {
getBlockPoolSlice(bpid).getVolumeMap(volumeMap, ramDiskReplicaMap);
}
-
+
+ long getNumBlocks() {
+ long numBlocks = 0;
+ for (BlockPoolSlice s : bpSlices.values()) {
+ numBlocks += s.getNumOfBlocks();
+ }
+ return numBlocks;
+ }
+
@Override
public String toString() {
return currentDir.getAbsolutePath();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e3c35a8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java
index b461e3a..9f5a471 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java
@@ -18,15 +18,23 @@
package org.apache.hadoop.hdfs.server.datanode;
import java.lang.management.ManagementFactory;
+import java.util.Collection;
import java.util.List;
+import java.util.Map;
import javax.management.MBeanServer;
import javax.management.ObjectName;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.junit.Assert;
import org.junit.Test;
+import org.mortbay.util.ajax.JSON;
+
+import static org.junit.Assert.assertEquals;
/**
* Class for testing {@link DataNodeMXBean} implementation
@@ -78,4 +86,51 @@ public class TestDataNodeMXBean {
private static String replaceDigits(final String s) {
return s.replaceAll("[0-9]+", "_DIGITS_");
}
+
+ @Test
+ public void testDataNodeMXBeanBlockCount() throws Exception {
+ Configuration conf = new Configuration();
+ MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
+
+ try {
+ List<DataNode> datanodes = cluster.getDataNodes();
+ assertEquals(datanodes.size(), 1);
+
+ MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
+ ObjectName mxbeanName =
+ new ObjectName("Hadoop:service=DataNode,name=DataNodeInfo");
+ FileSystem fs = cluster.getFileSystem();
+ for (int i = 0; i < 5; i++) {
+ DFSTestUtil.createFile(fs, new Path("/tmp.txt" + i), 1024, (short) 1,
+ 1L);
+ }
+ assertEquals("Before restart DN", 5, getTotalNumBlocks(mbs, mxbeanName));
+ cluster.restartDataNode(0);
+ cluster.waitActive();
+ assertEquals("After restart DN", 5, getTotalNumBlocks(mbs, mxbeanName));
+ fs.delete(new Path("/tmp.txt1"), true);
+ // Wait till replica gets deleted on disk.
+ Thread.sleep(5000);
+ assertEquals("After delete one file", 4,
+ getTotalNumBlocks(mbs, mxbeanName));
+ } finally {
+ if (cluster != null) {
+ cluster.shutdown();
+ }
+ }
+ }
+
+ @SuppressWarnings("unchecked")
+ int getTotalNumBlocks(MBeanServer mbs, ObjectName mxbeanName)
+ throws Exception {
+ int totalBlocks = 0;
+ String volumeInfo = (String) mbs.getAttribute(mxbeanName, "VolumeInfo");
+ Map<?, ?> m = (Map<?, ?>) JSON.parse(volumeInfo);
+ Collection<Map<String, Long>> values =
+ (Collection<Map<String, Long>>) m.values();
+ for (Map<String, Long> volumeInfoMap : values) {
+ totalBlocks += volumeInfoMap.get("numBlocks");
+ }
+ return totalBlocks;
+ }
}
[3/3] hadoop git commit: HDFS-9425. Expose number of blocks per
volume as a metric (Contributed by Brahma Reddy Battula)
Posted by vi...@apache.org.
HDFS-9425. Expose number of blocks per volume as a metric (Contributed by Brahma Reddy Battula)
(cherry picked from commit 342c9572bf6a623287f34c5cc0bc3be6038c191a)
(cherry picked from commit 2e3c35a83549b124cc44fc58eca17df1d3a15b8f)
Conflicts:
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/426f2130
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/426f2130
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/426f2130
Branch: refs/heads/branch-2.8
Commit: 426f213088042ff5c0c8b0489bf6df5145aa35d5
Parents: 8ca8d21
Author: Vinayakumar B <vi...@apache.org>
Authored: Mon Feb 22 09:29:23 2016 +0530
Committer: Vinayakumar B <vi...@apache.org>
Committed: Mon Feb 22 09:31:52 2016 +0530
----------------------------------------------------------------------
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++
.../datanode/fsdataset/impl/BlockPoolSlice.java | 29 ++++++++++-
.../datanode/fsdataset/impl/FsDatasetImpl.java | 12 ++++-
.../datanode/fsdataset/impl/FsVolumeImpl.java | 30 +++++++++--
.../server/datanode/TestDataNodeMXBean.java | 55 ++++++++++++++++++++
5 files changed, 121 insertions(+), 8 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/426f2130/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index a411cf0..c80e453 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -997,6 +997,9 @@ Release 2.8.0 - UNRELEASED
HDFS-9768. Reuse ObjectMapper instance in HDFS to improve the performance.
(Lin Yiqun via aajisaka)
+ HDFS-9425. Expose number of blocks per volume as a metric
+ (Brahma Reddy Battula via vinayakumarb)
+
BUG FIXES
HDFS-8091: ACLStatus and XAttributes should be presented to
http://git-wip-us.apache.org/repos/asf/hadoop/blob/426f2130/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
index 33a88df..3d6d7c2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
@@ -30,6 +30,7 @@ import java.io.RandomAccessFile;
import java.io.Writer;
import java.util.Iterator;
import java.util.Scanner;
+import java.util.concurrent.atomic.AtomicLong;
import org.apache.commons.io.FileUtils;
import org.apache.commons.logging.Log;
@@ -85,6 +86,7 @@ class BlockPoolSlice {
private final boolean deleteDuplicateReplicas;
private static final String REPLICA_CACHE_FILE = "replicas";
private final long replicaCacheExpiry = 5*60*1000;
+ private AtomicLong numOfBlocks = new AtomicLong();
// TODO:FEDERATION scalability issue - a thread per DU is needed
private final DU dfsUsage;
@@ -267,7 +269,11 @@ class BlockPoolSlice {
*/
File createTmpFile(Block b) throws IOException {
File f = new File(tmpDir, b.getBlockName());
- return DatanodeUtil.createTmpFile(b, f);
+ File tmpFile = DatanodeUtil.createTmpFile(b, f);
+ // If any exception during creation, its expected that counter will not be
+ // incremented, So no need to decrement
+ incrNumBlocks();
+ return tmpFile;
}
/**
@@ -276,7 +282,11 @@ class BlockPoolSlice {
*/
File createRbwFile(Block b) throws IOException {
File f = new File(rbwDir, b.getBlockName());
- return DatanodeUtil.createTmpFile(b, f);
+ File rbwFile = DatanodeUtil.createTmpFile(b, f);
+ // If any exception during creation, its expected that counter will not be
+ // incremented, So no need to decrement
+ incrNumBlocks();
+ return rbwFile;
}
File addBlock(Block b, File f) throws IOException {
@@ -487,6 +497,9 @@ class BlockPoolSlice {
} else {
lazyWriteReplicaMap.discardReplica(bpid, blockId, false);
}
+ if (oldReplica == null) {
+ incrNumBlocks();
+ }
}
@@ -819,4 +832,16 @@ class BlockPoolSlice {
}
}
}
+
+ void incrNumBlocks() {
+ numOfBlocks.incrementAndGet();
+ }
+
+ void decrNumBlocks() {
+ numOfBlocks.decrementAndGet();
+ }
+
+ public long getNumOfBlocks() {
+ return numOfBlocks.get();
+ }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/426f2130/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index cad7248..677008c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -968,6 +968,11 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
newReplicaInfo.setNumBytes(blockFiles[1].length());
// Finalize the copied files
newReplicaInfo = finalizeReplica(block.getBlockPoolId(), newReplicaInfo);
+ synchronized (this) {
+ // Increment numBlocks here as this block moved without knowing to BPS
+ FsVolumeImpl volume = (FsVolumeImpl) newReplicaInfo.getVolume();
+ volume.getBlockPoolSlice(block.getBlockPoolId()).incrNumBlocks();
+ }
removeOldReplica(replicaInfo, newReplicaInfo, oldBlockFile, oldMetaFile,
oldBlockFile.length(), oldMetaFile.length(), block.getBlockPoolId());
@@ -2602,6 +2607,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
final long reservedSpace; // size of space reserved for non-HDFS
final long reservedSpaceForReplicas; // size of space reserved RBW or
// re-replication
+ final long numBlocks;
VolumeInfo(FsVolumeImpl v, long usedSpace, long freeSpace) {
this.directory = v.toString();
@@ -2609,6 +2615,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
this.freeSpace = freeSpace;
this.reservedSpace = v.getReserved();
this.reservedSpaceForReplicas = v.getReservedForReplicas();
+ this.numBlocks = v.getNumBlocks();
}
}
@@ -2643,6 +2650,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
innerInfo.put("freeSpace", v.freeSpace);
innerInfo.put("reservedSpace", v.reservedSpace);
innerInfo.put("reservedSpaceForReplicas", v.reservedSpaceForReplicas);
+ innerInfo.put("numBlocks", v.numBlocks);
info.put(v.directory, innerInfo);
}
return info;
@@ -2773,8 +2781,8 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
synchronized (FsDatasetImpl.this) {
ramDiskReplicaTracker.recordEndLazyPersist(bpId, blockId, savedFiles);
- targetVolume.incDfsUsed(bpId,
- savedFiles[0].length() + savedFiles[1].length());
+ targetVolume.incDfsUsedAndNumBlocks(bpId, savedFiles[0].length()
+ + savedFiles[1].length());
// Update metrics (ignore the metadata file size)
datanode.getMetrics().incrRamDiskBlocksLazyPersisted();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/426f2130/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
index 64ea8cb..e02c293 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
@@ -283,21 +283,35 @@ public class FsVolumeImpl implements FsVolumeSpi {
}
void onBlockFileDeletion(String bpid, long value) {
- decDfsUsed(bpid, value);
+ decDfsUsedAndNumBlocks(bpid, value, true);
if (isTransientStorage()) {
dataset.releaseLockedMemory(value, true);
}
}
void onMetaFileDeletion(String bpid, long value) {
- decDfsUsed(bpid, value);
+ decDfsUsedAndNumBlocks(bpid, value, false);
}
- private void decDfsUsed(String bpid, long value) {
+ private void decDfsUsedAndNumBlocks(String bpid, long value,
+ boolean blockFileDeleted) {
synchronized(dataset) {
BlockPoolSlice bp = bpSlices.get(bpid);
if (bp != null) {
bp.decDfsUsed(value);
+ if (blockFileDeleted) {
+ bp.decrNumBlocks();
+ }
+ }
+ }
+ }
+
+ void incDfsUsedAndNumBlocks(String bpid, long value) {
+ synchronized (dataset) {
+ BlockPoolSlice bp = bpSlices.get(bpid);
+ if (bp != null) {
+ bp.incDfsUsed(value);
+ bp.incrNumBlocks();
}
}
}
@@ -846,7 +860,15 @@ public class FsVolumeImpl implements FsVolumeSpi {
throws IOException {
getBlockPoolSlice(bpid).getVolumeMap(volumeMap, ramDiskReplicaMap);
}
-
+
+ long getNumBlocks() {
+ long numBlocks = 0;
+ for (BlockPoolSlice s : bpSlices.values()) {
+ numBlocks += s.getNumOfBlocks();
+ }
+ return numBlocks;
+ }
+
@Override
public String toString() {
return currentDir.getAbsolutePath();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/426f2130/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java
index b461e3a..9f5a471 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java
@@ -18,15 +18,23 @@
package org.apache.hadoop.hdfs.server.datanode;
import java.lang.management.ManagementFactory;
+import java.util.Collection;
import java.util.List;
+import java.util.Map;
import javax.management.MBeanServer;
import javax.management.ObjectName;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.junit.Assert;
import org.junit.Test;
+import org.mortbay.util.ajax.JSON;
+
+import static org.junit.Assert.assertEquals;
/**
* Class for testing {@link DataNodeMXBean} implementation
@@ -78,4 +86,51 @@ public class TestDataNodeMXBean {
private static String replaceDigits(final String s) {
return s.replaceAll("[0-9]+", "_DIGITS_");
}
+
+ @Test
+ public void testDataNodeMXBeanBlockCount() throws Exception {
+ Configuration conf = new Configuration();
+ MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
+
+ try {
+ List<DataNode> datanodes = cluster.getDataNodes();
+ assertEquals(datanodes.size(), 1);
+
+ MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
+ ObjectName mxbeanName =
+ new ObjectName("Hadoop:service=DataNode,name=DataNodeInfo");
+ FileSystem fs = cluster.getFileSystem();
+ for (int i = 0; i < 5; i++) {
+ DFSTestUtil.createFile(fs, new Path("/tmp.txt" + i), 1024, (short) 1,
+ 1L);
+ }
+ assertEquals("Before restart DN", 5, getTotalNumBlocks(mbs, mxbeanName));
+ cluster.restartDataNode(0);
+ cluster.waitActive();
+ assertEquals("After restart DN", 5, getTotalNumBlocks(mbs, mxbeanName));
+ fs.delete(new Path("/tmp.txt1"), true);
+ // Wait till replica gets deleted on disk.
+ Thread.sleep(5000);
+ assertEquals("After delete one file", 4,
+ getTotalNumBlocks(mbs, mxbeanName));
+ } finally {
+ if (cluster != null) {
+ cluster.shutdown();
+ }
+ }
+ }
+
+ @SuppressWarnings("unchecked")
+ int getTotalNumBlocks(MBeanServer mbs, ObjectName mxbeanName)
+ throws Exception {
+ int totalBlocks = 0;
+ String volumeInfo = (String) mbs.getAttribute(mxbeanName, "VolumeInfo");
+ Map<?, ?> m = (Map<?, ?>) JSON.parse(volumeInfo);
+ Collection<Map<String, Long>> values =
+ (Collection<Map<String, Long>>) m.values();
+ for (Map<String, Long> volumeInfoMap : values) {
+ totalBlocks += volumeInfoMap.get("numBlocks");
+ }
+ return totalBlocks;
+ }
}