You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ay...@apache.org on 2020/04/17 08:10:56 UTC
[hadoop] branch trunk updated: HDFS-15266. Add missing DFSOps
Statistics in WebHDFS. Contributed by Ayush Saxena.
This is an automated email from the ASF dual-hosted git repository.
ayushsaxena pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git
The following commit(s) were added to refs/heads/trunk by this push:
new 37d6582 HDFS-15266. Add missing DFSOps Statistics in WebHDFS. Contributed by Ayush Saxena.
37d6582 is described below
commit 37d65822235fe8285d10232589aba39ededd3be1
Author: Ayush Saxena <ay...@apache.org>
AuthorDate: Fri Apr 17 00:11:02 2020 +0530
HDFS-15266. Add missing DFSOps Statistics in WebHDFS. Contributed by Ayush Saxena.
---
.../apache/hadoop/hdfs/DistributedFileSystem.java | 13 +++--
.../apache/hadoop/hdfs/web/WebHdfsFileSystem.java | 8 +++
.../hadoop/hdfs/TestDistributedFileSystem.java | 21 ++++++--
.../org/apache/hadoop/hdfs/web/TestWebHDFS.java | 60 ++++++++++++++++++++++
4 files changed, 96 insertions(+), 6 deletions(-)
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index d1babe3..a6d4758 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@ -813,6 +813,8 @@ public class DistributedFileSystem extends FileSystem
@Override
public Collection<BlockStoragePolicy> getAllStoragePolicies()
throws IOException {
+ statistics.incrementReadOps(1);
+ storageStatistics.incrementOpCounter(OpType.GET_STORAGE_POLICIES);
return Arrays.asList(dfs.getStoragePolicies());
}
@@ -834,9 +836,7 @@ public class DistributedFileSystem extends FileSystem
*/
@Deprecated
public BlockStoragePolicy[] getStoragePolicies() throws IOException {
- statistics.incrementReadOps(1);
- storageStatistics.incrementOpCounter(OpType.GET_STORAGE_POLICIES);
- return dfs.getStoragePolicies();
+ return getAllStoragePolicies().toArray(new BlockStoragePolicy[0]);
}
/**
@@ -2123,6 +2123,9 @@ public class DistributedFileSystem extends FileSystem
*/
public SnapshottableDirectoryStatus[] getSnapshottableDirListing()
throws IOException {
+ statistics.incrementReadOps(1);
+ storageStatistics
+ .incrementOpCounter(OpType.GET_SNAPSHOTTABLE_DIRECTORY_LIST);
return dfs.getSnapshottableDirListing();
}
@@ -2295,6 +2298,8 @@ public class DistributedFileSystem extends FileSystem
*/
public SnapshotDiffReport getSnapshotDiffReport(final Path snapshotDir,
final String fromSnapshot, final String toSnapshot) throws IOException {
+ statistics.incrementReadOps(1);
+ storageStatistics.incrementOpCounter(OpType.GET_SNAPSHOT_DIFF);
Path absF = fixRelativePart(snapshotDir);
return new FileSystemLinkResolver<SnapshotDiffReport>() {
@Override
@@ -3243,6 +3248,8 @@ public class DistributedFileSystem extends FileSystem
*/
@Override
public Path getTrashRoot(Path path) {
+ statistics.incrementReadOps(1);
+ storageStatistics.incrementOpCounter(OpType.GET_TRASH_ROOT);
try {
if ((path == null) || !dfs.isHDFSEncryptionEnabled()) {
return super.getTrashRoot(path);
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
index d0b10cb..4caa0e9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
@@ -1331,6 +1331,8 @@ public class WebHdfsFileSystem extends FileSystem
@Override
public void satisfyStoragePolicy(final Path p) throws IOException {
+ statistics.incrementWriteOps(1);
+ storageStatistics.incrementOpCounter(OpType.SATISFY_STORAGE_POLICY);
final HttpOpParam.Op op = PutOpParam.Op.SATISFYSTORAGEPOLICY;
new FsPathRunner(op, p).run();
}
@@ -1420,6 +1422,7 @@ public class WebHdfsFileSystem extends FileSystem
public SnapshotDiffReport getSnapshotDiffReport(final Path snapshotDir,
final String fromSnapshot, final String toSnapshot) throws IOException {
+ statistics.incrementReadOps(1);
storageStatistics.incrementOpCounter(OpType.GET_SNAPSHOT_DIFF);
final HttpOpParam.Op op = GetOpParam.Op.GETSNAPSHOTDIFF;
return new FsPathResponseRunner<SnapshotDiffReport>(op, snapshotDir,
@@ -1434,6 +1437,7 @@ public class WebHdfsFileSystem extends FileSystem
public SnapshottableDirectoryStatus[] getSnapshottableDirectoryList()
throws IOException {
+ statistics.incrementReadOps(1);
storageStatistics
.incrementOpCounter(OpType.GET_SNAPSHOTTABLE_DIRECTORY_LIST);
final HttpOpParam.Op op = GetOpParam.Op.GETSNAPSHOTTABLEDIRECTORYLIST;
@@ -1995,6 +1999,8 @@ public class WebHdfsFileSystem extends FileSystem
@Override
public Collection<BlockStoragePolicy> getAllStoragePolicies()
throws IOException {
+ statistics.incrementReadOps(1);
+ storageStatistics.incrementOpCounter(OpType.GET_STORAGE_POLICIES);
final HttpOpParam.Op op = GetOpParam.Op.GETALLSTORAGEPOLICY;
return new FsPathResponseRunner<Collection<BlockStoragePolicy>>(op, null) {
@Override
@@ -2007,6 +2013,8 @@ public class WebHdfsFileSystem extends FileSystem
@Override
public BlockStoragePolicy getStoragePolicy(Path src) throws IOException {
+ statistics.incrementReadOps(1);
+ storageStatistics.incrementOpCounter(OpType.GET_STORAGE_POLICY);
final HttpOpParam.Op op = GetOpParam.Op.GETSTORAGEPOLICY;
return new FsPathResponseRunner<BlockStoragePolicy>(op, src) {
@Override
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
index 94830d7..6353e19 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
@@ -918,6 +918,21 @@ public class TestDistributedFileSystem {
dfs.getEZForPath(dir);
checkStatistics(dfs, ++readOps, writeOps, 0);
checkOpStatistics(OpType.GET_ENCRYPTION_ZONE, opCount + 1);
+
+ opCount = getOpStatistics(OpType.GET_SNAPSHOTTABLE_DIRECTORY_LIST);
+ dfs.getSnapshottableDirListing();
+ checkStatistics(dfs, ++readOps, writeOps, 0);
+ checkOpStatistics(OpType.GET_SNAPSHOTTABLE_DIRECTORY_LIST, opCount + 1);
+
+ opCount = getOpStatistics(OpType.GET_STORAGE_POLICIES);
+ dfs.getAllStoragePolicies();
+ checkStatistics(dfs, ++readOps, writeOps, 0);
+ checkOpStatistics(OpType.GET_STORAGE_POLICIES, opCount + 1);
+
+ opCount = getOpStatistics(OpType.GET_TRASH_ROOT);
+ dfs.getTrashRoot(dir);
+ checkStatistics(dfs, ++readOps, writeOps, 0);
+ checkOpStatistics(OpType.GET_TRASH_ROOT, opCount + 1);
}
}
@@ -1058,7 +1073,7 @@ public class TestDistributedFileSystem {
}
/** Checks statistics. -1 indicates do not check for the operations */
- private void checkStatistics(FileSystem fs, int readOps, int writeOps,
+ public static void checkStatistics(FileSystem fs, int readOps, int writeOps,
int largeReadOps) {
assertEquals(readOps, DFSTestUtil.getStatistics(fs).getReadOps());
assertEquals(writeOps, DFSTestUtil.getStatistics(fs).getWriteOps());
@@ -1164,12 +1179,12 @@ public class TestDistributedFileSystem {
}
}
- private static void checkOpStatistics(OpType op, long count) {
+ public static void checkOpStatistics(OpType op, long count) {
assertEquals("Op " + op.getSymbol() + " has unexpected count!",
count, getOpStatistics(op));
}
- private static long getOpStatistics(OpType op) {
+ public static long getOpStatistics(OpType op) {
return GlobalStorageStatistics.INSTANCE.get(
DFSOpsCountStatistics.NAME)
.getLong(op.getSymbol());
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
index 3ee7fcb..69a0e60 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
@@ -24,6 +24,9 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
+import static org.apache.hadoop.hdfs.TestDistributedFileSystem.checkOpStatistics;
+import static org.apache.hadoop.hdfs.TestDistributedFileSystem.checkStatistics;
+import static org.apache.hadoop.hdfs.TestDistributedFileSystem.getOpStatistics;
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY;
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY;
import static org.junit.Assert.assertEquals;
@@ -57,6 +60,7 @@ import java.util.Random;
import com.google.common.collect.ImmutableList;
import org.apache.commons.io.IOUtils;
import org.apache.hadoop.fs.QuotaUsage;
+import org.apache.hadoop.hdfs.DFSOpsCountStatistics;
import org.apache.hadoop.test.LambdaTestUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -2012,6 +2016,62 @@ public class TestWebHDFS {
ecpolicyForECfile, ecPolicyName);
}
+ @Test
+ public void testStatistics() throws Exception {
+ final Configuration conf = new HdfsConfiguration();
+ conf.set(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
+ StoragePolicySatisfierMode.EXTERNAL.toString());
+ StoragePolicySatisfier sps = new StoragePolicySatisfier(conf);
+ try {
+ cluster = new MiniDFSCluster.Builder(conf).storageTypes(
+ new StorageType[][] {{StorageType.DISK, StorageType.ARCHIVE}})
+ .storagesPerDatanode(2).numDataNodes(1).build();
+ cluster.waitActive();
+ sps.init(new ExternalSPSContext(sps, DFSTestUtil
+ .getNameNodeConnector(conf, HdfsServerConstants.MOVER_ID_PATH, 1,
+ false)));
+ sps.start(StoragePolicySatisfierMode.EXTERNAL);
+ sps.start(StoragePolicySatisfierMode.EXTERNAL);
+ final WebHdfsFileSystem webHdfs = WebHdfsTestUtil
+ .getWebHdfsFileSystem(conf, WebHdfsConstants.WEBHDFS_SCHEME);
+ Path dir = new Path("/test");
+ webHdfs.mkdirs(dir);
+ int readOps = 0;
+ int writeOps = 0;
+ FileSystem.clearStatistics();
+
+ long opCount =
+ getOpStatistics(DFSOpsCountStatistics.OpType.GET_STORAGE_POLICY);
+ webHdfs.getStoragePolicy(dir);
+ checkStatistics(webHdfs, ++readOps, writeOps, 0);
+ checkOpStatistics(DFSOpsCountStatistics.OpType.GET_STORAGE_POLICY,
+ opCount + 1);
+
+ opCount =
+ getOpStatistics(DFSOpsCountStatistics.OpType.GET_STORAGE_POLICIES);
+ webHdfs.getAllStoragePolicies();
+ checkStatistics(webHdfs, ++readOps, writeOps, 0);
+ checkOpStatistics(DFSOpsCountStatistics.OpType.GET_STORAGE_POLICIES,
+ opCount + 1);
+
+ opCount =
+ getOpStatistics(DFSOpsCountStatistics.OpType.SATISFY_STORAGE_POLICY);
+ webHdfs.satisfyStoragePolicy(dir);
+ checkStatistics(webHdfs, readOps, ++writeOps, 0);
+ checkOpStatistics(DFSOpsCountStatistics.OpType.SATISFY_STORAGE_POLICY,
+ opCount + 1);
+
+ opCount = getOpStatistics(
+ DFSOpsCountStatistics.OpType.GET_SNAPSHOTTABLE_DIRECTORY_LIST);
+ webHdfs.getSnapshottableDirectoryList();
+ checkStatistics(webHdfs, ++readOps, writeOps, 0);
+ checkOpStatistics(
+ DFSOpsCountStatistics.OpType.GET_SNAPSHOTTABLE_DIRECTORY_LIST,
+ opCount + 1);
+ } finally {
+ cluster.shutdown();
+ }
+ }
/**
* Get FileStatus JSONObject from ListStatus response.
*/
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org