You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ozone.apache.org by pi...@apache.org on 2021/08/30 00:31:08 UTC
[ozone] branch HDDS-5447-httpfs updated: HDDS-5519 Remove
unnecessary hadoop dependencies from httpfs module (#2536)
This is an automated email from the ASF dual-hosted git repository.
pifta pushed a commit to branch HDDS-5447-httpfs
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/HDDS-5447-httpfs by this push:
new 409002d HDDS-5519 Remove unnecessary hadoop dependencies from httpfs module (#2536)
409002d is described below
commit 409002dd1dba4f36037cc85f53b45875767e83d8
Author: dombizita <50...@users.noreply.github.com>
AuthorDate: Mon Aug 30 02:30:57 2021 +0200
HDDS-5519 Remove unnecessary hadoop dependencies from httpfs module (#2536)
---
.../dist/dev-support/bin/dist-layout-stitching | 2 +
hadoop-ozone/dist/pom.xml | 9 +-
hadoop-ozone/dist/src/main/license/jar-report.txt | 10 +
hadoop-ozone/httpfsgateway/pom.xml | 207 +--
.../hadoop/fs/http/client/HttpFSFileSystem.java | 4 +-
.../apache/hadoop/fs/http/server/FSOperations.java | 20 +-
.../apache/hadoop/fs/http/server/HttpFSServer.java | 1 -
.../org/apache/hadoop/fs/http/server/JsonUtil.java | 494 +++++
.../http/server/metrics/HttpFSServerMetrics.java | 5 +-
.../apache/hadoop/hdfs/web/WebHdfsConstants.java | 52 +
.../org/apache/hadoop/hdfs/web/package-info.java | 21 +
.../hadoop/fs/http/client/BaseTestHttpFSWith.java | 1903 -------------------
.../client/TestHttpFSFWithSWebhdfsFileSystem.java | 102 -
.../client/TestHttpFSFWithWebhdfsFileSystem.java | 38 -
.../TestHttpFSFileSystemLocalFileSystem.java | 99 -
.../client/TestHttpFSWithHttpFSFileSystem.java | 56 -
...pFSKerberosAuthenticationHandlerForTesting.java | 38 -
.../server/TestCheckUploadContentTypeFilter.java | 91 -
.../fs/http/server/TestHttpFSAccessControlled.java | 355 ----
.../hadoop/fs/http/server/TestHttpFSServer.java | 1951 --------------------
.../fs/http/server/TestHttpFSServerNoACLs.java | 283 ---
.../fs/http/server/TestHttpFSServerNoXAttrs.java | 248 ---
.../fs/http/server/TestHttpFSServerWebServer.java | 123 --
.../TestHttpFSServerWebServerWithRandomSecret.java | 58 -
.../fs/http/server/TestHttpFSWithKerberos.java | 292 ---
.../hadoop/lib/lang/TestRunnableCallable.java | 96 -
.../org/apache/hadoop/lib/lang/TestXException.java | 64 -
.../apache/hadoop/lib/server/TestBaseService.java | 71 -
.../org/apache/hadoop/lib/server/TestServer.java | 810 --------
.../hadoop/lib/server/TestServerConstructor.java | 76 -
.../hadoop/TestFileSystemAccessService.java | 466 -----
.../TestInstrumentationService.java | 409 ----
.../service/scheduler/TestSchedulerService.java | 50 -
.../lib/service/security/DummyGroupMapping.java | 50 -
.../lib/service/security/TestGroupsService.java | 64 -
.../hadoop/lib/servlet/TestHostnameFilter.java | 96 -
.../apache/hadoop/lib/servlet/TestMDCFilter.java | 121 --
.../hadoop/lib/servlet/TestServerWebApp.java | 99 -
.../java/org/apache/hadoop/lib/util/TestCheck.java | 145 --
.../hadoop/lib/util/TestConfigurationUtils.java | 141 --
.../hadoop/lib/wsrs/TestInputStreamEntity.java | 48 -
.../hadoop/lib/wsrs/TestJSONMapProvider.java | 48 -
.../apache/hadoop/lib/wsrs/TestJSONProvider.java | 47 -
.../java/org/apache/hadoop/lib/wsrs/TestParam.java | 127 --
.../java/org/apache/hadoop/test/HFSTestCase.java | 28 -
.../java/org/apache/hadoop/test/HTestCase.java | 176 --
.../hadoop/test/HadoopUsersConfTestHelper.java | 182 --
.../org/apache/hadoop/test/KerberosTestUtils.java | 138 --
.../apache/hadoop/test/SysPropsForTestsLoader.java | 70 -
.../test/java/org/apache/hadoop/test/TestDir.java | 34 -
.../java/org/apache/hadoop/test/TestDirHelper.java | 147 --
.../java/org/apache/hadoop/test/TestException.java | 30 -
.../apache/hadoop/test/TestExceptionHelper.java | 67 -
.../org/apache/hadoop/test/TestHFSTestCase.java | 194 --
.../java/org/apache/hadoop/test/TestHTestCase.java | 161 --
.../test/java/org/apache/hadoop/test/TestHdfs.java | 40 -
.../org/apache/hadoop/test/TestHdfsHelper.java | 226 ---
.../java/org/apache/hadoop/test/TestJetty.java | 40 -
.../org/apache/hadoop/test/TestJettyHelper.java | 180 --
.../src/test/resources/classutils.txt | 1 -
.../src/test/resources/default-log4j.properties | 26 -
.../httpfsgateway/src/test/resources/hdfs-site.xml | 29 -
.../src/test/resources/httpfs-log4j.properties | 22 -
.../httpfsgateway/src/test/resources/krb5.conf | 28 -
.../src/test/resources/server.properties | 13 -
.../resources/test-compact-format-property.xml | 18 -
.../src/test/resources/testserver-default.xml | 20 -
.../src/test/resources/testserver.properties | 13 -
.../test/resources/testserverwebapp1.properties | 13 -
.../test/resources/testserverwebapp2.properties | 13 -
70 files changed, 638 insertions(+), 10761 deletions(-)
diff --git a/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching b/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching
index 2491a1a..468b5ee 100755
--- a/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching
+++ b/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching
@@ -76,6 +76,8 @@ run mkdir -p ./bin
run mkdir -p ./sbin
run mkdir -p ./etc
run mkdir -p ./libexec
+run mkdir -p ./log
+run mkdir -p ./temp
run mkdir -p ./tests
run cp -r "${ROOT}/hadoop-hdds/common/src/main/conf/" "etc/hadoop"
diff --git a/hadoop-ozone/dist/pom.xml b/hadoop-ozone/dist/pom.xml
index 980ab8e..d487544 100644
--- a/hadoop-ozone/dist/pom.xml
+++ b/hadoop-ozone/dist/pom.xml
@@ -74,8 +74,8 @@
</outputDirectory>
<includes>*.classpath</includes>
<includeArtifactIds>
- hdds-server-scm,ozone-common,ozone-csi,ozone-datanode,ozone-insight,
- ozone-manager,ozone-recon,ozone-s3gateway,ozone-tools
+ hdds-server-scm,ozone-common,ozone-csi,ozone-datanode,ozone-httpfsgateway,
+ ozone-insight,ozone-manager,ozone-recon,ozone-s3gateway,ozone-tools
</includeArtifactIds>
</configuration>
</execution>
@@ -223,6 +223,11 @@
<groupId>org.apache.ozone</groupId>
<artifactId>ozone-insight</artifactId>
</dependency>
+ <dependency>
+ <groupId>org.apache.ozone</groupId>
+ <artifactId>ozone-httpfsgateway</artifactId>
+ <version>1.2.0-SNAPSHOT</version>
+ </dependency>
</dependencies>
<profiles>
<profile>
diff --git a/hadoop-ozone/dist/src/main/license/jar-report.txt b/hadoop-ozone/dist/src/main/license/jar-report.txt
index 8233966..4bf842a 100644
--- a/hadoop-ozone/dist/src/main/license/jar-report.txt
+++ b/hadoop-ozone/dist/src/main/license/jar-report.txt
@@ -28,11 +28,15 @@ share/ozone/lib/commons-digester.jar
share/ozone/lib/commons-io.jar
share/ozone/lib/commons-lang3.jar
share/ozone/lib/commons-logging.jar
+share/ozone/lib/commons-math.jar
share/ozone/lib/commons-math3.jar
share/ozone/lib/commons-net.jar
share/ozone/lib/commons-pool2.jar
share/ozone/lib/commons-text.jar
share/ozone/lib/commons-validator.jar
+share/ozone/lib/curator-client.jar
+share/ozone/lib/curator-framework.jar
+share/ozone/lib/curator-test.jar
share/ozone/lib/derby.jar
share/ozone/lib/disruptor.jar
share/ozone/lib/dnsjava.jar
@@ -130,16 +134,19 @@ share/ozone/lib/jetty-util-ajax.v20201120.jar
share/ozone/lib/jetty-util.v20201120.jar
share/ozone/lib/jetty-webapp.v20201120.jar
share/ozone/lib/jetty-xml.v20201120.jar
+share/ozone/lib/jline.jar
share/ozone/lib/jmespath-java.jar
share/ozone/lib/joda-time.jar
share/ozone/lib/jooq-codegen.jar
share/ozone/lib/jooq-meta.jar
share/ozone/lib/jooq.jar
share/ozone/lib/jsch.jar
+share/ozone/lib/json-simple.jar
share/ozone/lib/json-smart.jar
share/ozone/lib/jsp-api.jar
share/ozone/lib/jsr305.jar
share/ozone/lib/jsr311-api.jar
+share/ozone/lib/junit.jar
share/ozone/lib/kerb-admin.jar
share/ozone/lib/kerb-client.jar
share/ozone/lib/kerb-common.jar
@@ -175,6 +182,7 @@ share/ozone/lib/netty-resolver.Final.jar
share/ozone/lib/netty-transport-native-epoll.Final.jar
share/ozone/lib/netty-transport-native-unix-common.Final.jar
share/ozone/lib/netty-transport.Final.jar
+share/ozone/lib/netty.Final.jar
share/ozone/lib/nimbus-jose-jwt.jar
share/ozone/lib/okhttp.jar
share/ozone/lib/okio.jar
@@ -191,6 +199,7 @@ share/ozone/lib/ozone-filesystem-common.jar
share/ozone/lib/ozone-filesystem-hadoop2.jar
share/ozone/lib/ozone-filesystem-hadoop3.jar
share/ozone/lib/ozone-filesystem.jar
+share/ozone/lib/ozone-httpfsgateway.jar
share/ozone/lib/ozone-insight.jar
share/ozone/lib/ozone-interface-client.jar
share/ozone/lib/ozone-interface-storage.jar
@@ -237,3 +246,4 @@ share/ozone/lib/token-provider.jar
share/ozone/lib/txw2.jar
share/ozone/lib/weld-servlet.Final.jar
share/ozone/lib/woodstox-core.jar
+share/ozone/lib/zookeeper.jar
diff --git a/hadoop-ozone/httpfsgateway/pom.xml b/hadoop-ozone/httpfsgateway/pom.xml
index 01d6efa..36413b1 100644
--- a/hadoop-ozone/httpfsgateway/pom.xml
+++ b/hadoop-ozone/httpfsgateway/pom.xml
@@ -36,26 +36,22 @@
<httpfs.source.revision>REVISION NOT AVAIL</httpfs.source.revision>
<maven.build.timestamp.format>yyyy-MM-dd'T'HH:mm:ssZ</maven.build.timestamp.format>
<httpfs.build.timestamp>${maven.build.timestamp}</httpfs.build.timestamp>
- <kerberos.realm>LOCALHOST</kerberos.realm>
- <test.exclude.kerberos.test>**/TestHttpFSWithKerberos.java</test.exclude.kerberos.test>
</properties>
<dependencies>
<dependency>
- <groupId>junit</groupId>
- <artifactId>junit</artifactId>
- <scope>test</scope>
+ <groupId>org.apache.ozone</groupId>
+ <artifactId>ozone-filesystem-common</artifactId>
</dependency>
<dependency>
- <groupId>org.mockito</groupId>
- <artifactId>mockito-core</artifactId>
- <scope>test</scope>
+ <groupId>org.apache.ozone</groupId>
+ <artifactId>ozone-filesystem</artifactId>
+ <scope>provided</scope>
</dependency>
<dependency>
- <groupId>org.apache.hadoop</groupId>
- <artifactId>hadoop-auth</artifactId>
+ <groupId>com.googlecode.json-simple</groupId>
+ <artifactId>json-simple</artifactId>
<scope>compile</scope>
- <version>3.3.1</version>
</dependency>
<dependency>
<groupId>com.sun.jersey</groupId>
@@ -68,19 +64,13 @@
<scope>compile</scope>
</dependency>
<dependency>
- <groupId>javax.servlet</groupId>
- <artifactId>javax.servlet-api</artifactId>
- </dependency>
- <dependency>
- <groupId>org.apache.hadoop.thirdparty</groupId>
- <artifactId>hadoop-shaded-guava</artifactId>
+ <groupId>com.sun.jersey</groupId>
+ <artifactId>jersey-servlet</artifactId>
<scope>compile</scope>
- <version>1.1.1</version>
</dependency>
<dependency>
- <groupId>com.googlecode.json-simple</groupId>
- <artifactId>json-simple</artifactId>
- <scope>compile</scope>
+ <groupId>javax.servlet</groupId>
+ <artifactId>javax.servlet-api</artifactId>
</dependency>
<dependency>
<groupId>org.eclipse.jetty</groupId>
@@ -91,98 +81,9 @@
<artifactId>jetty-webapp</artifactId>
</dependency>
<dependency>
- <groupId>org.apache.hadoop</groupId>
- <artifactId>hadoop-common</artifactId>
- <scope>compile</scope>
- <version>3.3.1</version>
- <exclusions>
- <exclusion>
- <groupId>javax.xml.stream</groupId>
- <artifactId>stax-api</artifactId>
- </exclusion>
- <exclusion>
- <groupId>javax.servlet</groupId>
- <artifactId>javax.servlet-api</artifactId>
- </exclusion>
- <exclusion>
- <groupId>javax.servlet.jsp</groupId>
- <artifactId>jsp-api</artifactId>
- </exclusion>
- <exclusion>
- <groupId>org.eclipse.jetty</groupId>
- <artifactId>jetty-server</artifactId>
- </exclusion>
- <exclusion>
- <groupId>org.eclipse.jetty</groupId>
- <artifactId>jetty-util</artifactId>
- </exclusion>
- <exclusion>
- <groupId>org.eclipse.jetty</groupId>
- <artifactId>servlet-api-2.5</artifactId>
- </exclusion>
- <exclusion>
- <groupId>org.eclipse.jdt</groupId>
- <artifactId>core</artifactId>
- </exclusion>
- </exclusions>
- </dependency>
- <dependency>
- <groupId>org.apache.hadoop</groupId>
- <artifactId>hadoop-hdfs</artifactId>
- <scope>compile</scope>
- <version>3.3.1</version>
- <exclusions>
- <exclusion>
- <groupId>commons-cli</groupId>
- <artifactId>commons-cli</artifactId>
- </exclusion>
- <exclusion>
- <groupId>javax.servlet</groupId>
- <artifactId>javax.servlet-api</artifactId>
- </exclusion>
- <exclusion>
- <groupId>org.eclipse.jetty</groupId>
- <artifactId>jetty-server</artifactId>
- </exclusion>
- <exclusion>
- <groupId>org.eclipse.jetty</groupId>
- <artifactId>jetty-util</artifactId>
- </exclusion>
- <exclusion>
- <groupId>org.eclipse.jetty</groupId>
- <artifactId>servlet-api-2.5</artifactId>
- </exclusion>
- <exclusion>
- <groupId>org.eclipse.jdt</groupId>
- <artifactId>core</artifactId>
- </exclusion>
- </exclusions>
- </dependency>
- <dependency>
- <groupId>org.apache.hadoop</groupId>
- <artifactId>hadoop-hdfs-client</artifactId>
- <scope>provided</scope>
- </dependency>
- <dependency>
- <groupId>org.apache.hadoop</groupId>
- <artifactId>hadoop-common</artifactId>
- <scope>test</scope>
- <type>test-jar</type>
- <version>3.3.1</version>
- </dependency>
- <dependency>
- <groupId>org.apache.hadoop</groupId>
- <artifactId>hadoop-hdfs</artifactId>
- <scope>test</scope>
- <type>test-jar</type>
- <version>3.3.1</version>
- </dependency>
- <dependency>
- <groupId>org.apache.hadoop</groupId>
- <artifactId>hadoop-auth</artifactId>
- <scope>test</scope>
- <type>test-jar</type>
- <version>3.3.1</version>
+ <groupId>commons-codec</groupId>
+ <artifactId>commons-codec</artifactId>
+ <scope>runtime</scope>
</dependency>
<dependency>
<groupId>log4j</groupId>
@@ -199,11 +100,31 @@
<artifactId>slf4j-log4j12</artifactId>
<scope>runtime</scope>
</dependency>
- <!-- 'mvn dependency:analyze' fails to detect use of this dependency -->
<dependency>
- <groupId>org.bouncycastle</groupId>
- <artifactId>bcprov-jdk15on</artifactId>
- <scope>test</scope>
+ <groupId>org.apache.curator</groupId>
+ <artifactId>apache-curator</artifactId>
+ <version>2.4.0</version>
+ <type>pom</type>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.curator</groupId>
+ <artifactId>curator-test</artifactId>
+ <version>2.4.0</version>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.curator</groupId>
+ <artifactId>curator-client</artifactId>
+ <version>2.4.0</version>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.curator</groupId>
+ <artifactId>curator-framework</artifactId>
+ <version>2.4.0</version>
+ </dependency>
+ <dependency>
+ <groupId>javax.xml.bind</groupId>
+ <artifactId>jaxb-api</artifactId>
+ <version>2.3.0</version>
</dependency>
</dependencies>
@@ -228,16 +149,10 @@
<testResource>
<directory>${basedir}/src/test/resources</directory>
<filtering>false</filtering>
- <excludes>
- <exclude>krb5.conf</exclude>
- </excludes>
</testResource>
<testResource>
<directory>${basedir}/src/test/resources</directory>
<filtering>true</filtering>
- <includes>
- <include>krb5.conf</include>
- </includes>
</testResource>
</testResources>
@@ -264,21 +179,12 @@
<testFailureIgnore>${ignoreTestFailure}</testFailureIgnore>
<threadCount>1</threadCount>
<forkedProcessTimeoutInSeconds>600</forkedProcessTimeoutInSeconds>
- <systemPropertyVariables>
- <java.security.krb5.conf>${project.build.directory}/test-classes/krb5.conf</java.security.krb5.conf>
- <kerberos.realm>${kerberos.realm}</kerberos.realm>
- </systemPropertyVariables>
<properties>
<property>
<name>listener</name>
<value>org.apache.hadoop.test.TimedOutTestsListener</value>
</property>
</properties>
- <excludes>
- <exclude>**/${test.exclude}.java</exclude>
- <exclude>${test.exclude.pattern}</exclude>
- <exclude>${test.exclude.kerberos.test}</exclude>
- </excludes>
</configuration>
</plugin>
<plugin>
@@ -308,11 +214,6 @@
<plugin>
<groupId>org.apache.rat</groupId>
<artifactId>apache-rat-plugin</artifactId>
- <configuration>
- <excludes>
- <exclude>src/test/resources/classutils.txt</exclude>
- </excludes>
- </configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
@@ -363,38 +264,6 @@
<profiles>
<profile>
- <id>testKerberos</id>
- <activation>
- <activeByDefault>false</activeByDefault>
- </activation>
- <properties>
- <test.exclude.kerberos.test>_</test.exclude.kerberos.test>
- </properties>
- <build>
- <plugins>
- <plugin>
- <groupId>org.apache.maven.plugins</groupId>
- <artifactId>maven-surefire-plugin</artifactId>
- <configuration>
- <testFailureIgnore>${ignoreTestFailure}</testFailureIgnore>
- <forkCount>1</forkCount>
- <reuseForks>true</reuseForks>
- <forkedProcessTimeoutInSeconds>600</forkedProcessTimeoutInSeconds>
- <systemPropertyVariables>
- <java.security.krb5.conf>${project.build.directory}/test-classes/krb5.conf</java.security.krb5.conf>
- <kerberos.realm>${kerberos.realm}</kerberos.realm>
- <httpfs.http.hostname>localhost</httpfs.http.hostname>
- </systemPropertyVariables>
- <includes>
- <include>**/TestHttpFSWithKerberos.java</include>
- </includes>
- </configuration>
- </plugin>
- </plugins>
- </build>
- </profile>
-
- <profile>
<id>dist</id>
<activation>
<activeByDefault>false</activeByDefault>
diff --git a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java b/hadoop-ozone/httpfsgateway/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
index 7a5fba8..7590178 100644
--- a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
+++ b/hadoop-ozone/httpfsgateway/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
@@ -23,6 +23,7 @@ import java.util.Collection;
import java.util.EnumSet;
import java.util.List;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.thirdparty.com.google.common.base.Charsets;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
@@ -46,7 +47,6 @@ import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtilClient;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
@@ -450,7 +450,7 @@ public class HttpFSFileSystem extends FileSystem
*/
@Override
protected int getDefaultPort() {
- return DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT;
+ return HdfsClientConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT;
}
/**
diff --git a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java b/hadoop-ozone/httpfsgateway/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
index 5e7d65c..3ea17ae 100644
--- a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
+++ b/hadoop-ozone/httpfsgateway/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
@@ -46,7 +46,6 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
-import org.apache.hadoop.hdfs.web.JsonUtil;
import org.apache.hadoop.lib.service.FileSystemAccess;
import org.apache.hadoop.util.StringUtils;
import org.json.simple.JSONArray;
@@ -65,9 +64,6 @@ import java.util.Map;
import java.util.Map.Entry;
import java.util.TreeMap;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.HTTPFS_BUFFER_SIZE_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.HTTP_BUFFER_SIZE_DEFAULT;
-
/**
* FileSystem operation executors used by {@link HttpFSServer}.
*/
@@ -75,6 +71,8 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.HTTP_BUFFER_SIZE_DEFAULT;
public final class FSOperations {
private static int bufferSize = 4096;
+ private static final String HTTPFS_BUFFER_SIZE_KEY = "httpfs.buffer.size";
+ private static final int HTTP_BUFFER_SIZE_DEFAULT = 4096;
private FSOperations() {
// not called
@@ -156,13 +154,13 @@ public final class FSOperations {
hdfsFileStatus.getFileId());
json.put(HttpFSFileSystem.STORAGEPOLICY_JSON,
hdfsFileStatus.getStoragePolicy());
- if (hdfsFileStatus.getErasureCodingPolicy() != null) {
- json.put(HttpFSFileSystem.ECPOLICYNAME_JSON,
- hdfsFileStatus.getErasureCodingPolicy().getName());
- json.put(HttpFSFileSystem.ECPOLICY_JSON,
- JsonUtil.getEcPolicyAsMap(
- hdfsFileStatus.getErasureCodingPolicy()));
- }
+// if (hdfsFileStatus.getErasureCodingPolicy() != null) {
+// json.put(HttpFSFileSystem.ECPOLICYNAME_JSON,
+// hdfsFileStatus.getErasureCodingPolicy().getName());
+// json.put(HttpFSFileSystem.ECPOLICY_JSON,
+// JsonUtil.getEcPolicyAsMap(
+// hdfsFileStatus.getErasureCodingPolicy()));
+// }
}
if (fileStatus.getPermission().getAclBit()) {
json.put(HttpFSFileSystem.ACL_BIT_JSON, true);
diff --git a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java b/hadoop-ozone/httpfsgateway/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java
index 408364e..de21b45 100644
--- a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java
+++ b/hadoop-ozone/httpfsgateway/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java
@@ -56,7 +56,6 @@ import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.XAttrNameParam;
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.XAttrSetFlagParam;
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.XAttrValueParam;
import org.apache.hadoop.fs.permission.FsAction;
-import org.apache.hadoop.hdfs.web.JsonUtil;
import org.apache.hadoop.http.JettyUtils;
import org.apache.hadoop.lib.service.FileSystemAccess;
import org.apache.hadoop.lib.service.FileSystemAccessException;
diff --git a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/hadoop/fs/http/server/JsonUtil.java b/hadoop-ozone/httpfsgateway/src/main/java/org/apache/hadoop/fs/http/server/JsonUtil.java
new file mode 100644
index 0000000..a14c44d
--- /dev/null
+++ b/hadoop-ozone/httpfsgateway/src/main/java/org/apache/hadoop/fs/http/server/JsonUtil.java
@@ -0,0 +1,494 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.http.server;
+
+import com.fasterxml.jackson.databind.ObjectMapper;
+import org.apache.hadoop.fs.*;
+import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DFSUtilClient;
+import org.apache.hadoop.hdfs.XAttrHelper;
+import org.apache.hadoop.hdfs.protocol.*;
+import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.thirdparty.com.google.common.collect.Lists;
+import org.apache.hadoop.util.StringUtils;
+
+import java.io.IOException;
+import java.util.*;
+
+/** JSON Utilities. */
+final class JsonUtil {
+ private static final Object[] EMPTY_OBJECT_ARRAY = {};
+
+ private JsonUtil() {
+ }
+
+ // Reuse ObjectMapper instance for improving performance.
+ // ObjectMapper is thread safe as long as we always configure instance
+ // before use. We don't have a re-entrant call pattern in WebHDFS,
+ // so we just need to worry about thread-safety.
+ private static final ObjectMapper MAPPER = new ObjectMapper();
+
+ /** Convert a token object to a Json string. */
+ public static String toJsonString(final Token<?
+ extends TokenIdentifier> token) throws IOException {
+ return toJsonString(Token.class, toJsonMap(token));
+ }
+
+ private static Map<String, Object> toJsonMap(
+ final Token<? extends TokenIdentifier> token) throws IOException {
+ if (token == null) {
+ return null;
+ }
+
+ final Map<String, Object> m = new TreeMap<String, Object>();
+ m.put("urlString", token.encodeToUrlString());
+ return m;
+ }
+
+ /** Convert an exception object to a Json string. */
+ public static String toJsonString(final Exception e) {
+ final Map<String, Object> m = new TreeMap<String, Object>();
+ m.put("exception", e.getClass().getSimpleName());
+ m.put("message", e.getMessage());
+ m.put("javaClassName", e.getClass().getName());
+ return toJsonString(RemoteException.class, m);
+ }
+
+ private static String toJsonString(final Class<?> clazz, final Object value) {
+ return toJsonString(clazz.getSimpleName(), value);
+ }
+
+ /** Convert a key-value pair to a Json string. */
+ public static String toJsonString(final String key, final Object value) {
+ final Map<String, Object> m = new TreeMap<String, Object>();
+ m.put(key, value);
+ try {
+ return MAPPER.writeValueAsString(m);
+ } catch (IOException ignored) {
+ }
+ return null;
+ }
+
+ /** Convert a FsPermission object to a string. */
+ private static String toString(final FsPermission permission) {
+ return String.format("%o", permission.toShort());
+ }
+
+ /** Convert an ExtendedBlock to a Json map. */
+ private static Map<String, Object> toJsonMap(
+ final ExtendedBlock extendedblock) {
+ if (extendedblock == null) {
+ return null;
+ }
+
+ final Map<String, Object> m = new TreeMap<String, Object>();
+ m.put("blockPoolId", extendedblock.getBlockPoolId());
+ m.put("blockId", extendedblock.getBlockId());
+ m.put("numBytes", extendedblock.getNumBytes());
+ m.put("generationStamp", extendedblock.getGenerationStamp());
+ return m;
+ }
+
+ /** Convert a DatanodeInfo to a Json map. */
+ static Map<String, Object> toJsonMap(final DatanodeInfo datanodeinfo) {
+ if (datanodeinfo == null) {
+ return null;
+ }
+
+ // TODO: Fix storageID
+ final Map<String, Object> m = new TreeMap<String, Object>();
+ m.put("ipAddr", datanodeinfo.getIpAddr());
+ // 'name' is equivalent to ipAddr:xferPort. Older clients (1.x, 0.23.x)
+ // expects this instead of the two fields.
+ m.put("name", datanodeinfo.getXferAddr());
+ m.put("hostName", datanodeinfo.getHostName());
+ m.put("storageID", datanodeinfo.getDatanodeUuid());
+ m.put("xferPort", datanodeinfo.getXferPort());
+ m.put("infoPort", datanodeinfo.getInfoPort());
+ m.put("infoSecurePort", datanodeinfo.getInfoSecurePort());
+ m.put("ipcPort", datanodeinfo.getIpcPort());
+
+ m.put("capacity", datanodeinfo.getCapacity());
+ m.put("dfsUsed", datanodeinfo.getDfsUsed());
+ m.put("remaining", datanodeinfo.getRemaining());
+ m.put("blockPoolUsed", datanodeinfo.getBlockPoolUsed());
+ m.put("cacheCapacity", datanodeinfo.getCacheCapacity());
+ m.put("cacheUsed", datanodeinfo.getCacheUsed());
+ m.put("lastUpdate", datanodeinfo.getLastUpdate());
+ m.put("lastUpdateMonotonic", datanodeinfo.getLastUpdateMonotonic());
+ m.put("xceiverCount", datanodeinfo.getXceiverCount());
+ m.put("networkLocation", datanodeinfo.getNetworkLocation());
+ m.put("adminState", datanodeinfo.getAdminState().name());
+ if (datanodeinfo.getUpgradeDomain() != null) {
+ m.put("upgradeDomain", datanodeinfo.getUpgradeDomain());
+ }
+ m.put("lastBlockReportTime", datanodeinfo.getLastBlockReportTime());
+ m.put("lastBlockReportMonotonic",
+ datanodeinfo.getLastBlockReportMonotonic());
+ return m;
+ }
+
+ /** Convert a DatanodeInfo[] to a Json array. */
+ private static Object[] toJsonArray(final DatanodeInfo[] array) {
+ if (array == null) {
+ return null;
+ } else if (array.length == 0) {
+ return EMPTY_OBJECT_ARRAY;
+ } else {
+ final Object[] a = new Object[array.length];
+ for(int i = 0; i < array.length; i++) {
+ a[i] = toJsonMap(array[i]);
+ }
+ return a;
+ }
+ }
+
+ /** Convert a StorageType[] to a Json array. */
+ private static Object[] toJsonArray(final StorageType[] array) {
+ if (array == null) {
+ return null;
+ } else if (array.length == 0) {
+ return EMPTY_OBJECT_ARRAY;
+ } else {
+ final Object[] a = new Object[array.length];
+ for(int i = 0; i < array.length; i++) {
+ a[i] = array[i];
+ }
+ return a;
+ }
+ }
+
+ /** Convert a LocatedBlock to a Json map. */
+ private static Map<String, Object> toJsonMap(
+ final LocatedBlock locatedblock) throws IOException {
+ if (locatedblock == null) {
+ return null;
+ }
+
+ final Map<String, Object> m = new TreeMap<String, Object>();
+ m.put("blockToken", toJsonMap(locatedblock.getBlockToken()));
+ m.put("isCorrupt", locatedblock.isCorrupt());
+ m.put("startOffset", locatedblock.getStartOffset());
+ m.put("block", toJsonMap(locatedblock.getBlock()));
+ m.put("storageTypes", toJsonArray(locatedblock.getStorageTypes()));
+ m.put("locations", toJsonArray(locatedblock.getLocations()));
+ m.put("cachedLocations", toJsonArray(locatedblock.getCachedLocations()));
+ return m;
+ }
+
+ /** Convert a LocatedBlock[] to a Json array. */
+ private static Object[] toJsonArray(
+ final List<LocatedBlock> array) throws IOException {
+ if (array == null) {
+ return null;
+ } else if (array.size() == 0) {
+ return EMPTY_OBJECT_ARRAY;
+ } else {
+ final Object[] a = new Object[array.size()];
+ for(int i = 0; i < array.size(); i++) {
+ a[i] = toJsonMap(array.get(i));
+ }
+ return a;
+ }
+ }
+
+ /** Convert LocatedBlocks to a Json string. */
+ public static String toJsonString(
+ final LocatedBlocks locatedblocks) throws IOException {
+ if (locatedblocks == null) {
+ return null;
+ }
+
+ final Map<String, Object> m = new TreeMap<String, Object>();
+ m.put("fileLength", locatedblocks.getFileLength());
+ m.put("isUnderConstruction", locatedblocks.isUnderConstruction());
+
+ m.put("locatedBlocks", toJsonArray(locatedblocks.getLocatedBlocks()));
+ m.put("lastLocatedBlock", toJsonMap(locatedblocks.getLastLocatedBlock()));
+ m.put("isLastBlockComplete", locatedblocks.isLastBlockComplete());
+ return toJsonString(LocatedBlocks.class, m);
+ }
+
+ /** Convert a ContentSummary to a Json string. */
+ public static String toJsonString(final ContentSummary contentsummary) {
+ if (contentsummary == null) {
+ return null;
+ }
+
+ final Map<String, Object> m = new TreeMap<String, Object>();
+ m.put("length", contentsummary.getLength());
+ m.put("fileCount", contentsummary.getFileCount());
+ m.put("directoryCount", contentsummary.getDirectoryCount());
+ m.put("ecPolicy", contentsummary.getErasureCodingPolicy());
+ // For ContentSummary we don't need this since we already have
+ // separate count for file and directory.
+ m.putAll(toJsonMap(contentsummary, false));
+ m.put("snapshotLength", contentsummary.getSnapshotLength());
+ m.put("snapshotFileCount", contentsummary.getSnapshotFileCount());
+ m.put("snapshotDirectoryCount",
+ contentsummary.getSnapshotDirectoryCount());
+ m.put("snapshotSpaceConsumed", contentsummary.getSnapshotSpaceConsumed());
+ return toJsonString(ContentSummary.class, m);
+ }
+
+ /** Convert a QuotaUsage to a JSON string. */
+ public static String toJsonString(final QuotaUsage quotaUsage) {
+ if (quotaUsage == null) {
+ return null;
+ }
+ return toJsonString(QuotaUsage.class, toJsonMap(quotaUsage, true));
+ }
+
+ private static Map<String, Object> toJsonMap(
+ final QuotaUsage quotaUsage, boolean includeFileAndDirectoryCount) {
+ final Map<String, Object> m = new TreeMap<>();
+ if (includeFileAndDirectoryCount) {
+ m.put("fileAndDirectoryCount", quotaUsage.getFileAndDirectoryCount());
+ }
+ m.put("quota", quotaUsage.getQuota());
+ m.put("spaceConsumed", quotaUsage.getSpaceConsumed());
+ m.put("spaceQuota", quotaUsage.getSpaceQuota());
+ final Map<String, Map<String, Long>> typeQuota = new TreeMap<>();
+ for (StorageType t : StorageType.getTypesSupportingQuota()) {
+ long tQuota = quotaUsage.getTypeQuota(t);
+ if (tQuota != HdfsConstants.QUOTA_RESET) {
+ Map<String, Long> type = typeQuota.get(t.toString());
+ if (type == null) {
+ type = new TreeMap<>();
+ typeQuota.put(t.toString(), type);
+ }
+ type.put("quota", quotaUsage.getTypeQuota(t));
+ type.put("consumed", quotaUsage.getTypeConsumed(t));
+ }
+ }
+ m.put("typeQuota", typeQuota);
+ return m;
+ }
+
+ /** Convert a MD5MD5CRC32FileChecksum to a Json string. */
+ public static String toJsonString(final MD5MD5CRC32FileChecksum checksum) {
+ if (checksum == null) {
+ return null;
+ }
+
+ final Map<String, Object> m = new TreeMap<String, Object>();
+ m.put("algorithm", checksum.getAlgorithmName());
+ m.put("length", checksum.getLength());
+ m.put("bytes", StringUtils.byteToHexString(checksum.getBytes()));
+ return toJsonString(FileChecksum.class, m);
+ }
+
+ /** Convert a AclStatus object to a Json string. */
+ public static String toJsonString(final AclStatus status) {
+ if (status == null) {
+ return null;
+ }
+
+ final Map<String, Object> m = new TreeMap<String, Object>();
+ m.put("owner", status.getOwner());
+ m.put("group", status.getGroup());
+ m.put("stickyBit", status.isStickyBit());
+
+ final List<String> stringEntries = new ArrayList<>();
+ for (AclEntry entry : status.getEntries()) {
+ stringEntries.add(entry.toStringStable());
+ }
+ m.put("entries", stringEntries);
+
+ FsPermission perm = status.getPermission();
+ if (perm != null) {
+ m.put("permission", toString(perm));
+ }
+ final Map<String, Map<String, Object>> finalMap =
+ new TreeMap<String, Map<String, Object>>();
+ finalMap.put(AclStatus.class.getSimpleName(), m);
+
+ try {
+ return MAPPER.writeValueAsString(finalMap);
+ } catch (IOException ignored) {
+ }
+ return null;
+ }
+
+ private static Map<String, Object> toJsonMap(final XAttr xAttr,
+ final XAttrCodec encoding) throws IOException {
+ if (xAttr == null) {
+ return null;
+ }
+
+ final Map<String, Object> m = new TreeMap<String, Object>();
+ m.put("name", XAttrHelper.getPrefixedName(xAttr));
+ m.put("value", xAttr.getValue() != null ?
+ XAttrCodec.encodeValue(xAttr.getValue(), encoding) : null);
+ return m;
+ }
+
+ private static Object[] toJsonArray(final List<XAttr> array,
+ final XAttrCodec encoding) throws IOException {
+ if (array == null) {
+ return null;
+ } else if (array.size() == 0) {
+ return EMPTY_OBJECT_ARRAY;
+ } else {
+ final Object[] a = new Object[array.size()];
+ for(int i = 0; i < array.size(); i++) {
+ a[i] = toJsonMap(array.get(i), encoding);
+ }
+ return a;
+ }
+ }
+
+ public static String toJsonString(final List<XAttr> xAttrs,
+ final XAttrCodec encoding) throws IOException {
+ final Map<String, Object> finalMap = new TreeMap<String, Object>();
+ finalMap.put("XAttrs", toJsonArray(xAttrs, encoding));
+ return MAPPER.writeValueAsString(finalMap);
+ }
+
+ public static String toJsonString(final List<XAttr> xAttrs)
+ throws IOException {
+ final List<String> names = Lists.newArrayListWithCapacity(xAttrs.size());
+ for (XAttr xAttr : xAttrs) {
+ names.add(XAttrHelper.getPrefixedName(xAttr));
+ }
+ String ret = MAPPER.writeValueAsString(names);
+ final Map<String, Object> finalMap = new TreeMap<String, Object>();
+ finalMap.put("XAttrNames", ret);
+ return MAPPER.writeValueAsString(finalMap);
+ }
+
+ public static String toJsonString(Object obj) throws IOException {
+ return MAPPER.writeValueAsString(obj);
+ }
+
+ public static String toJsonString(BlockStoragePolicy[] storagePolicies) {
+ final Map<String, Object> blockStoragePolicies = new TreeMap<>();
+ Object[] a = null;
+ if (storagePolicies != null && storagePolicies.length > 0) {
+ a = new Object[storagePolicies.length];
+ for (int i = 0; i < storagePolicies.length; i++) {
+ a[i] = toJsonMap(storagePolicies[i]);
+ }
+ }
+ blockStoragePolicies.put("BlockStoragePolicy", a);
+ return toJsonString("BlockStoragePolicies", blockStoragePolicies);
+ }
+
+ private static Object toJsonMap(BlockStoragePolicy blockStoragePolicy) {
+ final Map<String, Object> m = new TreeMap<String, Object>();
+ m.put("id", blockStoragePolicy.getId());
+ m.put("name", blockStoragePolicy.getName());
+ m.put("storageTypes", blockStoragePolicy.getStorageTypes());
+ m.put("creationFallbacks", blockStoragePolicy.getCreationFallbacks());
+ m.put("replicationFallbacks", blockStoragePolicy.getReplicationFallbacks());
+ m.put("copyOnCreateFile", blockStoragePolicy.isCopyOnCreateFile());
+ return m;
+ }
+
+ public static String toJsonString(BlockStoragePolicy storagePolicy) {
+ return toJsonString(BlockStoragePolicy.class, toJsonMap(storagePolicy));
+ }
+
+ public static String toJsonString(FsServerDefaults serverDefaults) {
+ return toJsonString(FsServerDefaults.class, toJsonMap(serverDefaults));
+ }
+
+ private static Object toJsonMap(FsServerDefaults serverDefaults) {
+ final Map<String, Object> m = new HashMap<String, Object>();
+ m.put("blockSize", serverDefaults.getBlockSize());
+ m.put("bytesPerChecksum", serverDefaults.getBytesPerChecksum());
+ m.put("writePacketSize", serverDefaults.getWritePacketSize());
+ m.put("replication", serverDefaults.getReplication());
+ m.put("fileBufferSize", serverDefaults.getFileBufferSize());
+ m.put("encryptDataTransfer", serverDefaults.getEncryptDataTransfer());
+ m.put("trashInterval", serverDefaults.getTrashInterval());
+ m.put("checksumType", serverDefaults.getChecksumType().id);
+ m.put("keyProviderUri", serverDefaults.getKeyProviderUri());
+ m.put("defaultStoragePolicyId", serverDefaults.getDefaultStoragePolicyId());
+ return m;
+ }
+
+ public static String toJsonString(SnapshotDiffReport diffReport) {
+ return toJsonString(SnapshotDiffReport.class.getSimpleName(),
+ toJsonMap(diffReport));
+ }
+
+ private static Object toJsonMap(SnapshotDiffReport diffReport) {
+ final Map<String, Object> m = new TreeMap<String, Object>();
+ m.put("snapshotRoot", diffReport.getSnapshotRoot());
+ m.put("fromSnapshot", diffReport.getFromSnapshot());
+ m.put("toSnapshot", diffReport.getLaterSnapshotName());
+ Object[] diffList = new Object[diffReport.getDiffList().size()];
+ for (int i = 0; i < diffReport.getDiffList().size(); i++) {
+ diffList[i] = toJsonMap(diffReport.getDiffList().get(i));
+ }
+ m.put("diffList", diffList);
+ return m;
+ }
+
+ private static Object toJsonMap(
+ SnapshotDiffReport.DiffReportEntry diffReportEntry) {
+ final Map<String, Object> m = new TreeMap<String, Object>();
+ m.put("type", diffReportEntry.getType());
+ if (diffReportEntry.getSourcePath() != null) {
+ m.put("sourcePath",
+ DFSUtilClient.bytes2String(diffReportEntry.getSourcePath()));
+ }
+ if (diffReportEntry.getTargetPath() != null) {
+ m.put("targetPath",
+ DFSUtilClient.bytes2String(diffReportEntry.getTargetPath()));
+ }
+ return m;
+ }
+
+ private static Map<String, Object> toJsonMap(
+ final BlockLocation blockLocation) throws IOException {
+ if (blockLocation == null) {
+ return null;
+ }
+
+ final Map<String, Object> m = new HashMap<>();
+ m.put("length", blockLocation.getLength());
+ m.put("offset", blockLocation.getOffset());
+ m.put("corrupt", blockLocation.isCorrupt());
+ m.put("storageTypes", toJsonArray(blockLocation.getStorageTypes()));
+ m.put("cachedHosts", blockLocation.getCachedHosts());
+ m.put("hosts", blockLocation.getHosts());
+ m.put("names", blockLocation.getNames());
+ m.put("topologyPaths", blockLocation.getTopologyPaths());
+ return m;
+ }
+
+ public static String toJsonString(BlockLocation[] locations)
+ throws IOException {
+ if (locations == null) {
+ return null;
+ }
+ final Map<String, Object> m = new HashMap<>();
+ Object[] blockLocations = new Object[locations.length];
+ for(int i=0; i<locations.length; i++) {
+ blockLocations[i] = toJsonMap(locations[i]);
+ }
+ m.put(BlockLocation.class.getSimpleName(), blockLocations);
+ return toJsonString("BlockLocations", m);
+ }
+}
diff --git a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/hadoop/fs/http/server/metrics/HttpFSServerMetrics.java b/hadoop-ozone/httpfsgateway/src/main/java/org/apache/hadoop/fs/http/server/metrics/HttpFSServerMetrics.java
index 524ec09..00ebbad 100644
--- a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/hadoop/fs/http/server/metrics/HttpFSServerMetrics.java
+++ b/hadoop-ozone/httpfsgateway/src/main/java/org/apache/hadoop/fs/http/server/metrics/HttpFSServerMetrics.java
@@ -21,7 +21,6 @@ import static org.apache.hadoop.metrics2.impl.MsInfo.SessionId;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.metrics2.MetricsSystem;
import org.apache.hadoop.metrics2.annotation.Metric;
import org.apache.hadoop.metrics2.annotation.Metrics;
@@ -48,6 +47,8 @@ import java.util.concurrent.ThreadLocalRandom;
@Metrics(about="HttpFSServer metrics", context="httpfs")
public class HttpFSServerMetrics {
+ private static final String DFS_METRICS_SESSION_ID_KEY
+ = "dfs.metrics.session-id";
private @Metric MutableCounterLong bytesWritten;
private @Metric MutableCounterLong bytesRead;
@@ -78,7 +79,7 @@ public class HttpFSServerMetrics {
public static HttpFSServerMetrics create(Configuration conf,
String serverName) {
- String sessionId = conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY);
+ String sessionId = conf.get(DFS_METRICS_SESSION_ID_KEY);
MetricsSystem ms = DefaultMetricsSystem.instance();
JvmMetrics jm = JvmMetrics.create("HttpFSServer", sessionId, ms);
String name = "ServerActivity-"+ (serverName.isEmpty()
diff --git a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsConstants.java b/hadoop-ozone/httpfsgateway/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsConstants.java
new file mode 100644
index 0000000..4286653
--- /dev/null
+++ b/hadoop-ozone/httpfsgateway/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsConstants.java
@@ -0,0 +1,52 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.web;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
+import org.apache.hadoop.io.Text;
+
+/**
+ * Declared WebHdfs constants.
+ */
+@InterfaceAudience.Private
+public final class WebHdfsConstants {
+ public static final String WEBHDFS_SCHEME = "webhdfs";
+ public static final String SWEBHDFS_SCHEME = "swebhdfs";
+ public static final Text WEBHDFS_TOKEN_KIND
+ = new Text("WEBHDFS delegation");
+ public static final Text SWEBHDFS_TOKEN_KIND
+ = new Text("SWEBHDFS delegation");
+
+ private WebHdfsConstants() {
+ }
+
+ enum PathType {
+ FILE, DIRECTORY, SYMLINK;
+
+ static PathType valueOf(HdfsFileStatus status) {
+ if (status.isDirectory()) {
+ return DIRECTORY;
+ }
+ if (status.isSymlink()) {
+ return SYMLINK;
+ }
+ return FILE;
+ }
+ }
+}
diff --git a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/hadoop/hdfs/web/package-info.java b/hadoop-ozone/httpfsgateway/src/main/java/org/apache/hadoop/hdfs/web/package-info.java
new file mode 100644
index 0000000..66a5432
--- /dev/null
+++ b/hadoop-ozone/httpfsgateway/src/main/java/org/apache/hadoop/hdfs/web/package-info.java
@@ -0,0 +1,21 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+/**
+ * Webhdfs implementations.
+ */
+package org.apache.hadoop.hdfs.web;
\ No newline at end of file
diff --git a/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java b/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java
deleted file mode 100644
index 7182c98..0000000
--- a/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java
+++ /dev/null
@@ -1,1903 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.http.client;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.BlockStoragePolicySpi;
-import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
-import org.apache.hadoop.fs.ContentSummary;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileChecksum;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FileSystemTestHelper;
-import org.apache.hadoop.fs.FsServerDefaults;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.QuotaUsage;
-import org.apache.hadoop.fs.RemoteIterator;
-import org.apache.hadoop.fs.StorageType;
-import org.apache.hadoop.fs.contract.ContractTestUtils;
-import org.apache.hadoop.fs.http.server.HttpFSServerWebApp;
-import org.apache.hadoop.fs.permission.AclEntry;
-import org.apache.hadoop.fs.permission.AclStatus;
-import org.apache.hadoop.fs.permission.FsAction;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.AppendTestUtil;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.DFSTestUtil;
-import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
-import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
-import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
-import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
-import org.apache.hadoop.hdfs.protocol.SnapshotException;
-import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
-import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
-import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
-import org.apache.hadoop.hdfs.web.JsonUtil;
-import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
-import org.apache.hadoop.ipc.RemoteException;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.test.HFSTestCase;
-import org.apache.hadoop.test.HadoopUsersConfTestHelper;
-import org.apache.hadoop.test.LambdaTestUtils;
-import org.apache.hadoop.test.TestDir;
-import org.apache.hadoop.test.TestDirHelper;
-import org.apache.hadoop.test.TestHdfs;
-import org.apache.hadoop.test.TestHdfsHelper;
-import org.apache.hadoop.test.TestJetty;
-import org.apache.hadoop.test.TestJettyHelper;
-import org.junit.Assert;
-import org.junit.Assume;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-import org.eclipse.jetty.server.Server;
-import org.eclipse.jetty.webapp.WebAppContext;
-
-import org.apache.hadoop.thirdparty.com.google.common.collect.Lists;
-
-import java.io.File;
-import java.io.FileOutputStream;
-import java.io.FileWriter;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.io.Writer;
-import java.net.URI;
-import java.net.URL;
-import java.security.PrivilegedExceptionAction;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.List;
-import java.util.Map;
-import java.util.regex.Pattern;
-
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-@RunWith(value = Parameterized.class)
-public abstract class BaseTestHttpFSWith extends HFSTestCase {
-
- protected abstract Path getProxiedFSTestDir();
-
- protected abstract String getProxiedFSURI();
-
- protected abstract Configuration getProxiedFSConf();
-
- protected boolean isLocalFS() {
- return getProxiedFSURI().startsWith("file://");
- }
-
- private void createHttpFSServer() throws Exception {
- File homeDir = TestDirHelper.getTestDir();
- assertTrue(new File(homeDir, "conf").mkdir());
- assertTrue(new File(homeDir, "log").mkdir());
- assertTrue(new File(homeDir, "temp").mkdir());
- HttpFSServerWebApp.setHomeDirForCurrentThread(homeDir.getAbsolutePath());
-
- File secretFile = new File(new File(homeDir, "conf"), "secret");
- Writer w = new FileWriter(secretFile);
- w.write("secret");
- w.close();
-
- //FileSystem being served by HttpFS
- String fsDefaultName = getProxiedFSURI();
- Configuration conf = new Configuration(false);
- conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, fsDefaultName);
- conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
- conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
- // For BaseTestHttpFSWith#testFileAclsCustomizedUserAndGroupNames
- conf.set(HdfsClientConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY,
- "^[A-Za-z0-9_][A-Za-z0-9._-]*[$]?$");
- conf.set(HdfsClientConfigKeys.DFS_WEBHDFS_ACL_PERMISSION_PATTERN_KEY,
- "^(default:)?(user|group|mask|other):" +
- "[[0-9A-Za-z_][@A-Za-z0-9._-]]*:([rwx-]{3})?(,(default:)?" +
- "(user|group|mask|other):[[0-9A-Za-z_][@A-Za-z0-9._-]]*:" +
- "([rwx-]{3})?)*$");
- File hdfsSite = new File(new File(homeDir, "conf"), "hdfs-site.xml");
- OutputStream os = new FileOutputStream(hdfsSite);
- conf.writeXml(os);
- os.close();
-
- //HTTPFS configuration
- conf = new Configuration(false);
- conf.set("httpfs.proxyuser." + HadoopUsersConfTestHelper.getHadoopProxyUser() + ".groups",
- HadoopUsersConfTestHelper.getHadoopProxyUserGroups());
- conf.set("httpfs.proxyuser." + HadoopUsersConfTestHelper.getHadoopProxyUser() + ".hosts",
- HadoopUsersConfTestHelper.getHadoopProxyUserHosts());
- conf.set("httpfs.authentication.signature.secret.file", secretFile.getAbsolutePath());
- File httpfsSite = new File(new File(homeDir, "conf"), "httpfs-site.xml");
- os = new FileOutputStream(httpfsSite);
- conf.writeXml(os);
- os.close();
-
- ClassLoader cl = Thread.currentThread().getContextClassLoader();
- URL url = cl.getResource("webapp");
- WebAppContext context = new WebAppContext(url.getPath(), "/webhdfs");
- Server server = TestJettyHelper.getJettyServer();
- server.setHandler(context);
- server.start();
- }
-
- protected Class getFileSystemClass() {
- return HttpFSFileSystem.class;
- }
-
- protected String getScheme() {
- return "webhdfs";
- }
-
- protected FileSystem getHttpFSFileSystem(Configuration conf) throws
- Exception {
- conf.set("fs.webhdfs.impl", getFileSystemClass().getName());
- URI uri = new URI(getScheme() + "://" +
- TestJettyHelper.getJettyURL().toURI().getAuthority());
- return FileSystem.get(uri, conf);
- }
-
- protected FileSystem getHttpFSFileSystem() throws Exception {
- Configuration conf = new Configuration();
- return getHttpFSFileSystem(conf);
- }
-
- protected void testGet() throws Exception {
- FileSystem fs = getHttpFSFileSystem();
- Assert.assertNotNull(fs);
- URI uri = new URI(getScheme() + "://" +
- TestJettyHelper.getJettyURL().toURI().getAuthority());
- assertEquals(fs.getUri(), uri);
- fs.close();
- }
-
- private void testOpen() throws Exception {
- FileSystem fs = FileSystem.get(getProxiedFSConf());
- Path path = new Path(getProxiedFSTestDir(), "foo.txt");
- OutputStream os = fs.create(path);
- os.write(1);
- os.close();
- fs.close();
- fs = getHttpFSFileSystem();
- InputStream is = fs.open(new Path(path.toUri().getPath()));
- assertEquals(is.read(), 1);
- is.close();
- fs.close();
- }
-
- private void testCreate(Path path, boolean override) throws Exception {
- FileSystem fs = getHttpFSFileSystem();
- FsPermission permission = new FsPermission(FsAction.READ_WRITE, FsAction.NONE, FsAction.NONE);
- OutputStream os = fs.create(new Path(path.toUri().getPath()), permission, override, 1024,
- (short) 2, 100 * 1024 * 1024, null);
- os.write(1);
- os.close();
- fs.close();
-
- fs = FileSystem.get(getProxiedFSConf());
- FileStatus status = fs.getFileStatus(path);
- if (!isLocalFS()) {
- assertEquals(status.getReplication(), 2);
- assertEquals(status.getBlockSize(), 100 * 1024 * 1024);
- }
- assertEquals(status.getPermission(), permission);
- InputStream is = fs.open(path);
- assertEquals(is.read(), 1);
- is.close();
- fs.close();
- }
-
- private void testCreate() throws Exception {
- Path path = new Path(getProxiedFSTestDir(), "foo.txt");
- FileSystem fs = FileSystem.get(getProxiedFSConf());
- fs.delete(path, true);
- testCreate(path, false);
- testCreate(path, true);
- try {
- testCreate(path, false);
- Assert.fail("the create should have failed because the file exists " +
- "and override is FALSE");
- } catch (IOException ex) {
- System.out.println("#");
- } catch (Exception ex) {
- Assert.fail(ex.toString());
- }
- }
-
- private void testAppend() throws Exception {
- if (!isLocalFS()) {
- FileSystem fs = FileSystem.get(getProxiedFSConf());
- fs.mkdirs(getProxiedFSTestDir());
- Path path = new Path(getProxiedFSTestDir(), "foo.txt");
- OutputStream os = fs.create(path);
- os.write(1);
- os.close();
- fs.close();
- fs = getHttpFSFileSystem();
- os = fs.append(new Path(path.toUri().getPath()));
- os.write(2);
- os.close();
- fs.close();
- fs = FileSystem.get(getProxiedFSConf());
- InputStream is = fs.open(path);
- assertEquals(is.read(), 1);
- assertEquals(is.read(), 2);
- assertEquals(is.read(), -1);
- is.close();
- fs.close();
- }
- }
-
- private void testTruncate() throws Exception {
- if (!isLocalFS()) {
- final short repl = 3;
- final int blockSize = 1024;
- final int numOfBlocks = 2;
- FileSystem fs = FileSystem.get(getProxiedFSConf());
- fs.mkdirs(getProxiedFSTestDir());
- Path file = new Path(getProxiedFSTestDir(), "foo.txt");
- final byte[] data = FileSystemTestHelper.getFileData(
- numOfBlocks, blockSize);
- FileSystemTestHelper.createFile(fs, file, data, blockSize, repl);
-
- final int newLength = blockSize;
-
- boolean isReady = fs.truncate(file, newLength);
- assertTrue("Recovery is not expected.", isReady);
-
- FileStatus fileStatus = fs.getFileStatus(file);
- assertEquals(fileStatus.getLen(), newLength);
- AppendTestUtil.checkFullFile(fs, file, newLength, data, file.toString());
-
- fs.close();
- }
- }
-
- private void testConcat() throws Exception {
- Configuration config = getProxiedFSConf();
- config.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
- if (!isLocalFS()) {
- FileSystem fs = FileSystem.get(config);
- fs.mkdirs(getProxiedFSTestDir());
- Path path1 = new Path("/test/foo.txt");
- Path path2 = new Path("/test/bar.txt");
- Path path3 = new Path("/test/derp.txt");
- DFSTestUtil.createFile(fs, path1, 1024, (short) 3, 0);
- DFSTestUtil.createFile(fs, path2, 1024, (short) 3, 0);
- DFSTestUtil.createFile(fs, path3, 1024, (short) 3, 0);
- fs.close();
- fs = getHttpFSFileSystem();
- fs.concat(path1, new Path[]{path2, path3});
- fs.close();
- fs = FileSystem.get(config);
- assertTrue(fs.exists(path1));
- assertFalse(fs.exists(path2));
- assertFalse(fs.exists(path3));
- fs.close();
- }
- }
-
- private void testRename() throws Exception {
- FileSystem fs = FileSystem.get(getProxiedFSConf());
- Path path = new Path(getProxiedFSTestDir(), "foo");
- fs.mkdirs(path);
- fs.close();
- fs = getHttpFSFileSystem();
- Path oldPath = new Path(path.toUri().getPath());
- Path newPath = new Path(path.getParent(), "bar");
- fs.rename(oldPath, newPath);
- fs.close();
- fs = FileSystem.get(getProxiedFSConf());
- assertFalse(fs.exists(oldPath));
- assertTrue(fs.exists(newPath));
- fs.close();
- }
-
- private void testDelete() throws Exception {
- Path foo = new Path(getProxiedFSTestDir(), "foo");
- Path bar = new Path(getProxiedFSTestDir(), "bar");
- Path foe = new Path(getProxiedFSTestDir(), "foe");
- FileSystem fs = FileSystem.get(getProxiedFSConf());
- fs.mkdirs(foo);
- fs.mkdirs(new Path(bar, "a"));
- fs.mkdirs(foe);
-
- FileSystem hoopFs = getHttpFSFileSystem();
- assertTrue(hoopFs.delete(new Path(foo.toUri().getPath()), false));
- assertFalse(fs.exists(foo));
- try {
- hoopFs.delete(new Path(bar.toUri().getPath()), false);
- Assert.fail();
- } catch (IOException ex) {
- } catch (Exception ex) {
- Assert.fail();
- }
- assertTrue(fs.exists(bar));
- assertTrue(hoopFs.delete(new Path(bar.toUri().getPath()), true));
- assertFalse(fs.exists(bar));
-
- assertTrue(fs.exists(foe));
- assertTrue(hoopFs.delete(foe, true));
- assertFalse(fs.exists(foe));
-
- hoopFs.close();
- fs.close();
- }
-
- private void testListSymLinkStatus() throws Exception {
- if (isLocalFS()) {
- // do not test the the symlink for local FS.
- return;
- }
- FileSystem fs = FileSystem.get(getProxiedFSConf());
- boolean isWebhdfs = fs instanceof WebHdfsFileSystem;
- Path path =
- new Path(getProxiedFSTestDir() + "-symlink", "targetFoo.txt");
- OutputStream os = fs.create(path);
- os.write(1);
- os.close();
- Path linkPath =
- new Path(getProxiedFSTestDir()+ "-symlink", "symlinkFoo.txt");
- fs.createSymlink(path, linkPath, false);
- fs = getHttpFSFileSystem();
- FileStatus linkStatus = fs.getFileStatus(linkPath);
- FileStatus status1 = fs.getFileStatus(path);
-
- FileStatus[] stati = fs.listStatus(path.getParent());
- assertEquals(2, stati.length);
-
- int countSymlink = 0;
- for (int i = 0; i < stati.length; i++) {
- FileStatus fStatus = stati[i];
- countSymlink += fStatus.isSymlink() ? 1 : 0;
- }
- assertEquals(1, countSymlink);
-
- assertFalse(status1.isSymlink());
- if (isWebhdfs) {
- assertTrue(linkStatus.isSymlink());
- }
- fs.close();
- }
-
- private void testListStatus() throws Exception {
- FileSystem fs = FileSystem.get(getProxiedFSConf());
- boolean isDFS = fs instanceof DistributedFileSystem;
- Path path = new Path(getProxiedFSTestDir(), "foo.txt");
- OutputStream os = fs.create(path);
- os.write(1);
- os.close();
- FileStatus status1 = fs.getFileStatus(path);
- fs.close();
-
- fs = getHttpFSFileSystem();
- FileStatus status2 = fs.getFileStatus(new Path(path.toUri().getPath()));
- fs.close();
-
- assertEquals(status2.getPermission(), status1.getPermission());
- assertEquals(status2.getPath().toUri().getPath(),
- status1.getPath().toUri().getPath());
- assertEquals(status2.getReplication(), status1.getReplication());
- assertEquals(status2.getBlockSize(), status1.getBlockSize());
- assertEquals(status2.getAccessTime(), status1.getAccessTime());
- assertEquals(status2.getModificationTime(), status1.getModificationTime());
- assertEquals(status2.getOwner(), status1.getOwner());
- assertEquals(status2.getGroup(), status1.getGroup());
- assertEquals(status2.getLen(), status1.getLen());
- if (isDFS && status2 instanceof HdfsFileStatus) {
- assertTrue(status1 instanceof HdfsFileStatus);
- HdfsFileStatus hdfsFileStatus1 = (HdfsFileStatus) status1;
- HdfsFileStatus hdfsFileStatus2 = (HdfsFileStatus) status2;
- // Check HDFS-specific fields
- assertEquals(hdfsFileStatus2.getChildrenNum(),
- hdfsFileStatus1.getChildrenNum());
- assertEquals(hdfsFileStatus2.getFileId(),
- hdfsFileStatus1.getFileId());
- assertEquals(hdfsFileStatus2.getStoragePolicy(),
- hdfsFileStatus1.getStoragePolicy());
- }
-
- FileStatus[] stati = fs.listStatus(path.getParent());
- assertEquals(1, stati.length);
- assertEquals(stati[0].getPath().getName(), path.getName());
-
- // The full path should be the path to the file. See HDFS-12139
- FileStatus[] statl = fs.listStatus(path);
- Assert.assertEquals(1, statl.length);
- Assert.assertEquals(status2.getPath(), statl[0].getPath());
- Assert.assertEquals(statl[0].getPath().getName(), path.getName());
- Assert.assertEquals(stati[0].getPath(), statl[0].getPath());
- }
-
- private void testFileStatusAttr() throws Exception {
- if (!this.isLocalFS()) {
- // Create a directory
- Path path = new Path("/tmp/tmp-snap-test");
- DistributedFileSystem distributedFs = (DistributedFileSystem) FileSystem
- .get(path.toUri(), this.getProxiedFSConf());
- distributedFs.mkdirs(path);
- // Get the FileSystem instance that's being tested
- FileSystem fs = this.getHttpFSFileSystem();
- // Check FileStatus
- assertFalse("Snapshot should be disallowed by default",
- fs.getFileStatus(path).isSnapshotEnabled());
- // Allow snapshot
- distributedFs.allowSnapshot(path);
- // Check FileStatus
- assertTrue("Snapshot enabled bit is not set in FileStatus",
- fs.getFileStatus(path).isSnapshotEnabled());
- // Disallow snapshot
- distributedFs.disallowSnapshot(path);
- // Check FileStatus
- assertFalse("Snapshot enabled bit is not cleared in FileStatus",
- fs.getFileStatus(path).isSnapshotEnabled());
- // Cleanup
- fs.delete(path, true);
- fs.close();
- distributedFs.close();
- }
- }
-
- private static void assertSameListing(FileSystem expected, FileSystem
- actual, Path p) throws IOException {
- // Consume all the entries from both iterators
- RemoteIterator<FileStatus> exIt = expected.listStatusIterator(p);
- List<FileStatus> exStatuses = new ArrayList<>();
- while (exIt.hasNext()) {
- exStatuses.add(exIt.next());
- }
- RemoteIterator<FileStatus> acIt = actual.listStatusIterator(p);
- List<FileStatus> acStatuses = new ArrayList<>();
- while (acIt.hasNext()) {
- acStatuses.add(acIt.next());
- }
- assertEquals(exStatuses.size(), acStatuses.size());
- for (int i = 0; i < exStatuses.size(); i++) {
- FileStatus expectedStatus = exStatuses.get(i);
- FileStatus actualStatus = acStatuses.get(i);
- // Path URIs are fully qualified, so compare just the path component
- assertEquals(expectedStatus.getPath().toUri().getPath(),
- actualStatus.getPath().toUri().getPath());
- }
- }
-
- private void testListStatusBatch() throws Exception {
- // LocalFileSystem writes checksum files next to the data files, which
- // show up when listing via LFS. This makes the listings not compare
- // properly.
- Assume.assumeFalse(isLocalFS());
-
- FileSystem proxyFs = FileSystem.get(getProxiedFSConf());
- Configuration conf = new Configuration();
- conf.setInt(DFSConfigKeys.DFS_LIST_LIMIT, 2);
- FileSystem httpFs = getHttpFSFileSystem(conf);
-
- // Test an empty directory
- Path dir = new Path(getProxiedFSTestDir(), "dir");
- proxyFs.mkdirs(dir);
- assertSameListing(proxyFs, httpFs, dir);
- // Create and test in a loop
- for (int i = 0; i < 10; i++) {
- proxyFs.create(new Path(dir, "file" + i)).close();
- assertSameListing(proxyFs, httpFs, dir);
- }
-
- // Test for HDFS-12139
- Path dir1 = new Path(getProxiedFSTestDir(), "dir1");
- proxyFs.mkdirs(dir1);
- Path file1 = new Path(dir1, "file1");
- proxyFs.create(file1).close();
-
- RemoteIterator<FileStatus> si = proxyFs.listStatusIterator(dir1);
- FileStatus statusl = si.next();
- FileStatus status = proxyFs.getFileStatus(file1);
- Assert.assertEquals(file1.getName(), statusl.getPath().getName());
- Assert.assertEquals(status.getPath(), statusl.getPath());
-
- si = proxyFs.listStatusIterator(file1);
- statusl = si.next();
- Assert.assertEquals(file1.getName(), statusl.getPath().getName());
- Assert.assertEquals(status.getPath(), statusl.getPath());
- }
-
- private void testWorkingdirectory() throws Exception {
- FileSystem fs = FileSystem.get(getProxiedFSConf());
- Path workingDir = fs.getWorkingDirectory();
- fs.close();
-
- fs = getHttpFSFileSystem();
- if (isLocalFS()) {
- fs.setWorkingDirectory(workingDir);
- }
- Path httpFSWorkingDir = fs.getWorkingDirectory();
- fs.close();
- assertEquals(httpFSWorkingDir.toUri().getPath(),
- workingDir.toUri().getPath());
-
- fs = getHttpFSFileSystem();
- fs.setWorkingDirectory(new Path("/tmp"));
- workingDir = fs.getWorkingDirectory();
- assertEquals(workingDir.toUri().getPath(),
- new Path("/tmp").toUri().getPath());
- final FileSystem httpFs = getHttpFSFileSystem();
- LambdaTestUtils.intercept(IllegalArgumentException.class,
- "Invalid DFS directory name /foo:bar",
- () -> httpFs.setWorkingDirectory(new Path("/foo:bar")));
- fs.setWorkingDirectory(new Path("/bar"));
- workingDir = fs.getWorkingDirectory();
- httpFs.close();
- fs.close();
- assertEquals(workingDir.toUri().getPath(),
- new Path("/bar").toUri().getPath());
- }
-
- private void testTrashRoot() throws Exception {
- if (!isLocalFS()) {
- FileSystem fs = FileSystem.get(getProxiedFSConf());
-
- final Path rootDir = new Path("/");
- final Path fooPath = new Path(getProxiedFSTestDir(), "foo.txt");
- OutputStream os = fs.create(fooPath);
- os.write(1);
- os.close();
-
- Path trashPath = fs.getTrashRoot(rootDir);
- Path fooTrashPath = fs.getTrashRoot(fooPath);
- fs.close();
-
- fs = getHttpFSFileSystem();
- Path httpFSTrashPath = fs.getTrashRoot(rootDir);
- Path httpFSFooTrashPath = fs.getTrashRoot(fooPath);
- fs.close();
-
- assertEquals(trashPath.toUri().getPath(),
- httpFSTrashPath.toUri().getPath());
- assertEquals(fooTrashPath.toUri().getPath(),
- httpFSFooTrashPath.toUri().getPath());
- // trash path is related to USER, not path
- assertEquals(trashPath.toUri().getPath(),
- fooTrashPath.toUri().getPath());
- }
- }
-
- private void testMkdirs() throws Exception {
- Path path = new Path(getProxiedFSTestDir(), "foo");
- FileSystem fs = getHttpFSFileSystem();
- fs.mkdirs(path);
- fs.close();
- fs = FileSystem.get(getProxiedFSConf());
- assertTrue(fs.exists(path));
- fs.close();
- }
-
- private void testSetTimes() throws Exception {
- if (!isLocalFS()) {
- FileSystem fs = FileSystem.get(getProxiedFSConf());
- Path path = new Path(getProxiedFSTestDir(), "foo.txt");
- OutputStream os = fs.create(path);
- os.write(1);
- os.close();
- FileStatus status1 = fs.getFileStatus(path);
- fs.close();
- long at = status1.getAccessTime();
- long mt = status1.getModificationTime();
-
- fs = getHttpFSFileSystem();
- fs.setTimes(path, mt - 10, at - 20);
- fs.close();
-
- fs = FileSystem.get(getProxiedFSConf());
- status1 = fs.getFileStatus(path);
- fs.close();
- long atNew = status1.getAccessTime();
- long mtNew = status1.getModificationTime();
- assertEquals(mtNew, mt - 10);
- assertEquals(atNew, at - 20);
- }
- }
-
- protected void testSetPermission() throws Exception {
- FileSystem fs = FileSystem.get(getProxiedFSConf());
- Path path = new Path(getProxiedFSTestDir(), "foodir");
- fs.mkdirs(path);
-
- fs = getHttpFSFileSystem();
- FsPermission permission1 = new FsPermission(FsAction.READ_WRITE, FsAction.NONE, FsAction.NONE);
- fs.setPermission(path, permission1);
- fs.close();
-
- fs = FileSystem.get(getProxiedFSConf());
- FileStatus status1 = fs.getFileStatus(path);
- fs.close();
- FsPermission permission2 = status1.getPermission();
- assertEquals(permission2, permission1);
-
- //sticky bit
- fs = getHttpFSFileSystem();
- permission1 = new FsPermission(FsAction.READ_WRITE, FsAction.NONE, FsAction.NONE, true);
- fs.setPermission(path, permission1);
- fs.close();
-
- fs = FileSystem.get(getProxiedFSConf());
- status1 = fs.getFileStatus(path);
- fs.close();
- permission2 = status1.getPermission();
- assertTrue(permission2.getStickyBit());
- assertEquals(permission2, permission1);
- }
-
- private void testSetOwner() throws Exception {
- if (!isLocalFS()) {
- FileSystem fs = FileSystem.get(getProxiedFSConf());
- fs.mkdirs(getProxiedFSTestDir());
- Path path = new Path(getProxiedFSTestDir(), "foo.txt");
- OutputStream os = fs.create(path);
- os.write(1);
- os.close();
- fs.close();
-
- fs = getHttpFSFileSystem();
- String user = HadoopUsersConfTestHelper.getHadoopUsers()[1];
- String group = HadoopUsersConfTestHelper.getHadoopUserGroups(user)[0];
- fs.setOwner(path, user, group);
- fs.close();
-
- fs = FileSystem.get(getProxiedFSConf());
- FileStatus status1 = fs.getFileStatus(path);
- fs.close();
- assertEquals(status1.getOwner(), user);
- assertEquals(status1.getGroup(), group);
- }
- }
-
- private void testSetReplication() throws Exception {
- FileSystem fs = FileSystem.get(getProxiedFSConf());
- Path path = new Path(getProxiedFSTestDir(), "foo.txt");
- OutputStream os = fs.create(path);
- os.write(1);
- os.close();
- fs.setReplication(path, (short) 2);
- fs.close();
-
- fs = getHttpFSFileSystem();
- fs.setReplication(path, (short) 1);
- fs.close();
-
- fs = FileSystem.get(getProxiedFSConf());
- FileStatus status1 = fs.getFileStatus(path);
- fs.close();
- assertEquals(status1.getReplication(), (short) 1);
- }
-
- private void testChecksum() throws Exception {
- if (!isLocalFS()) {
- FileSystem fs = FileSystem.get(getProxiedFSConf());
- fs.mkdirs(getProxiedFSTestDir());
- Path path = new Path(getProxiedFSTestDir(), "foo.txt");
- OutputStream os = fs.create(path);
- os.write(1);
- os.close();
- FileChecksum hdfsChecksum = fs.getFileChecksum(path);
- fs.close();
- fs = getHttpFSFileSystem();
- FileChecksum httpChecksum = fs.getFileChecksum(path);
- fs.close();
- assertEquals(httpChecksum.getAlgorithmName(),
- hdfsChecksum.getAlgorithmName());
- assertEquals(httpChecksum.getLength(), hdfsChecksum.getLength());
- assertArrayEquals(httpChecksum.getBytes(), hdfsChecksum.getBytes());
- }
- }
-
- private void testContentSummary() throws Exception {
- FileSystem fs = FileSystem.get(getProxiedFSConf());
- Path path = new Path(getProxiedFSTestDir(), "foo.txt");
- OutputStream os = fs.create(path);
- os.write(1);
- os.close();
- ContentSummary hdfsContentSummary = fs.getContentSummary(path);
- fs.close();
- fs = getHttpFSFileSystem();
- ContentSummary httpContentSummary = fs.getContentSummary(path);
- fs.close();
- assertEquals(hdfsContentSummary.getDirectoryCount(),
- httpContentSummary.getDirectoryCount());
- assertEquals(hdfsContentSummary.getErasureCodingPolicy(),
- httpContentSummary.getErasureCodingPolicy());
- assertEquals(hdfsContentSummary.getFileCount(),
- httpContentSummary.getFileCount());
- assertEquals(hdfsContentSummary.getLength(),
- httpContentSummary.getLength());
- assertEquals(hdfsContentSummary.getQuota(), httpContentSummary.getQuota());
- assertEquals(hdfsContentSummary.getSpaceConsumed(),
- httpContentSummary.getSpaceConsumed());
- assertEquals(hdfsContentSummary.getSpaceQuota(),
- httpContentSummary.getSpaceQuota());
- }
-
- private void testQuotaUsage() throws Exception {
- if (isLocalFS()) {
- // LocalFS doesn't support setQuota so skip here
- return;
- }
-
- DistributedFileSystem dfs =
- (DistributedFileSystem) FileSystem.get(getProxiedFSConf());
- Path path = new Path(getProxiedFSTestDir(), "foo");
- dfs.mkdirs(path);
- dfs.setQuota(path, 20, 600 * 1024 * 1024);
- for (int i = 0; i < 10; i++) {
- dfs.createNewFile(new Path(path, "test_file_" + i));
- }
- FSDataOutputStream out = dfs.create(new Path(path, "test_file"));
- out.writeUTF("Hello World");
- out.close();
-
- dfs.setQuotaByStorageType(path, StorageType.SSD, 100000);
- dfs.setQuotaByStorageType(path, StorageType.DISK, 200000);
-
- QuotaUsage hdfsQuotaUsage = dfs.getQuotaUsage(path);
- dfs.close();
- FileSystem fs = getHttpFSFileSystem();
- QuotaUsage httpQuotaUsage = fs.getQuotaUsage(path);
- fs.close();
- assertEquals(hdfsQuotaUsage.getFileAndDirectoryCount(),
- httpQuotaUsage.getFileAndDirectoryCount());
- assertEquals(hdfsQuotaUsage.getQuota(), httpQuotaUsage.getQuota());
- assertEquals(hdfsQuotaUsage.getSpaceConsumed(),
- httpQuotaUsage.getSpaceConsumed());
- assertEquals(hdfsQuotaUsage.getSpaceQuota(),
- httpQuotaUsage.getSpaceQuota());
- assertEquals(hdfsQuotaUsage.getTypeQuota(StorageType.SSD),
- httpQuotaUsage.getTypeQuota(StorageType.SSD));
- assertEquals(hdfsQuotaUsage.getTypeQuota(StorageType.DISK),
- httpQuotaUsage.getTypeQuota(StorageType.DISK));
- }
-
- /** Set xattr */
- private void testSetXAttr() throws Exception {
- if (!isLocalFS()) {
- FileSystem fs = FileSystem.get(getProxiedFSConf());
- fs.mkdirs(getProxiedFSTestDir());
- Path path = new Path(getProxiedFSTestDir(), "foo.txt");
- OutputStream os = fs.create(path);
- os.write(1);
- os.close();
- fs.close();
-
- final String name1 = "user.a1";
- final byte[] value1 = new byte[]{0x31, 0x32, 0x33};
- final String name2 = "user.a2";
- final byte[] value2 = new byte[]{0x41, 0x42, 0x43};
- final String name3 = "user.a3";
- final byte[] value3 = null;
- final String name4 = "trusted.a1";
- final byte[] value4 = new byte[]{0x31, 0x32, 0x33};
- final String name5 = "a1";
- fs = getHttpFSFileSystem();
- fs.setXAttr(path, name1, value1);
- fs.setXAttr(path, name2, value2);
- fs.setXAttr(path, name3, value3);
- fs.setXAttr(path, name4, value4);
- try {
- fs.setXAttr(path, name5, value1);
- Assert.fail("Set xAttr with incorrect name format should fail.");
- } catch (IOException e) {
- } catch (IllegalArgumentException e) {
- }
- fs.close();
-
- fs = FileSystem.get(getProxiedFSConf());
- Map<String, byte[]> xAttrs = fs.getXAttrs(path);
- fs.close();
- assertEquals(4, xAttrs.size());
- assertArrayEquals(value1, xAttrs.get(name1));
- assertArrayEquals(value2, xAttrs.get(name2));
- assertArrayEquals(new byte[0], xAttrs.get(name3));
- assertArrayEquals(value4, xAttrs.get(name4));
- }
- }
-
- /** Get xattrs */
- private void testGetXAttrs() throws Exception {
- if (!isLocalFS()) {
- FileSystem fs = FileSystem.get(getProxiedFSConf());
- fs.mkdirs(getProxiedFSTestDir());
- Path path = new Path(getProxiedFSTestDir(), "foo.txt");
- OutputStream os = fs.create(path);
- os.write(1);
- os.close();
- fs.close();
-
- final String name1 = "user.a1";
- final byte[] value1 = new byte[]{0x31, 0x32, 0x33};
- final String name2 = "user.a2";
- final byte[] value2 = new byte[]{0x41, 0x42, 0x43};
- final String name3 = "user.a3";
- final byte[] value3 = null;
- final String name4 = "trusted.a1";
- final byte[] value4 = new byte[]{0x31, 0x32, 0x33};
- fs = FileSystem.get(getProxiedFSConf());
- fs.setXAttr(path, name1, value1);
- fs.setXAttr(path, name2, value2);
- fs.setXAttr(path, name3, value3);
- fs.setXAttr(path, name4, value4);
- fs.close();
-
- // Get xattrs with names parameter
- fs = getHttpFSFileSystem();
- List<String> names = Lists.newArrayList();
- names.add(name1);
- names.add(name2);
- names.add(name3);
- names.add(name4);
- Map<String, byte[]> xAttrs = fs.getXAttrs(path, names);
- fs.close();
- assertEquals(4, xAttrs.size());
- assertArrayEquals(value1, xAttrs.get(name1));
- assertArrayEquals(value2, xAttrs.get(name2));
- assertArrayEquals(new byte[0], xAttrs.get(name3));
- assertArrayEquals(value4, xAttrs.get(name4));
-
- // Get specific xattr
- fs = getHttpFSFileSystem();
- byte[] value = fs.getXAttr(path, name1);
- assertArrayEquals(value1, value);
- final String name5 = "a1";
- try {
- value = fs.getXAttr(path, name5);
- Assert.fail("Get xAttr with incorrect name format should fail.");
- } catch (IOException e) {
- } catch (IllegalArgumentException e) {
- }
- fs.close();
-
- // Get all xattrs
- fs = getHttpFSFileSystem();
- xAttrs = fs.getXAttrs(path);
- fs.close();
- assertEquals(4, xAttrs.size());
- assertArrayEquals(value1, xAttrs.get(name1));
- assertArrayEquals(value2, xAttrs.get(name2));
- assertArrayEquals(new byte[0], xAttrs.get(name3));
- assertArrayEquals(value4, xAttrs.get(name4));
- }
- }
-
- /** Remove xattr */
- private void testRemoveXAttr() throws Exception {
- if (!isLocalFS()) {
- FileSystem fs = FileSystem.get(getProxiedFSConf());
- fs.mkdirs(getProxiedFSTestDir());
- Path path = new Path(getProxiedFSTestDir(), "foo.txt");
- OutputStream os = fs.create(path);
- os.write(1);
- os.close();
- fs.close();
-
- final String name1 = "user.a1";
- final byte[] value1 = new byte[]{0x31, 0x32, 0x33};
- final String name2 = "user.a2";
- final byte[] value2 = new byte[]{0x41, 0x42, 0x43};
- final String name3 = "user.a3";
- final byte[] value3 = null;
- final String name4 = "trusted.a1";
- final byte[] value4 = new byte[]{0x31, 0x32, 0x33};
- final String name5 = "a1";
- fs = FileSystem.get(getProxiedFSConf());
- fs.setXAttr(path, name1, value1);
- fs.setXAttr(path, name2, value2);
- fs.setXAttr(path, name3, value3);
- fs.setXAttr(path, name4, value4);
- fs.close();
-
- fs = getHttpFSFileSystem();
- fs.removeXAttr(path, name1);
- fs.removeXAttr(path, name3);
- fs.removeXAttr(path, name4);
- try {
- fs.removeXAttr(path, name5);
- Assert.fail("Remove xAttr with incorrect name format should fail.");
- } catch (IOException e) {
- } catch (IllegalArgumentException e) {
- }
-
- fs = FileSystem.get(getProxiedFSConf());
- Map<String, byte[]> xAttrs = fs.getXAttrs(path);
- fs.close();
- assertEquals(1, xAttrs.size());
- assertArrayEquals(value2, xAttrs.get(name2));
- }
- }
-
- /** List xattrs */
- private void testListXAttrs() throws Exception {
- if (!isLocalFS()) {
- FileSystem fs = FileSystem.get(getProxiedFSConf());
- fs.mkdirs(getProxiedFSTestDir());
- Path path = new Path(getProxiedFSTestDir(), "foo.txt");
- OutputStream os = fs.create(path);
- os.write(1);
- os.close();
- fs.close();
-
- final String name1 = "user.a1";
- final byte[] value1 = new byte[]{0x31, 0x32, 0x33};
- final String name2 = "user.a2";
- final byte[] value2 = new byte[]{0x41, 0x42, 0x43};
- final String name3 = "user.a3";
- final byte[] value3 = null;
- final String name4 = "trusted.a1";
- final byte[] value4 = new byte[]{0x31, 0x32, 0x33};
- fs = FileSystem.get(getProxiedFSConf());
- fs.setXAttr(path, name1, value1);
- fs.setXAttr(path, name2, value2);
- fs.setXAttr(path, name3, value3);
- fs.setXAttr(path, name4, value4);
- fs.close();
-
- fs = getHttpFSFileSystem();
- List<String> names = fs.listXAttrs(path);
- assertEquals(4, names.size());
- assertTrue(names.contains(name1));
- assertTrue(names.contains(name2));
- assertTrue(names.contains(name3));
- assertTrue(names.contains(name4));
- }
- }
-
- /**
- * Runs assertions testing that two AclStatus objects contain the same info
- * @param a First AclStatus
- * @param b Second AclStatus
- * @throws Exception
- */
- private void assertSameAcls(AclStatus a, AclStatus b) throws Exception {
- assertEquals(a.getOwner(), b.getOwner());
- assertEquals(a.getGroup(), b.getGroup());
- assertEquals(a.getPermission(), b.getPermission());
- assertEquals(a.isStickyBit(), b.isStickyBit());
- assertEquals(a.getEntries().size(), b.getEntries().size());
- for (AclEntry e : a.getEntries()) {
- assertTrue(b.getEntries().contains(e));
- }
- for (AclEntry e : b.getEntries()) {
- assertTrue(a.getEntries().contains(e));
- }
- }
-
- private static void assertSameAcls(FileSystem expected, FileSystem actual,
- Path path) throws IOException {
- FileStatus expectedFileStatus = expected.getFileStatus(path);
- FileStatus actualFileStatus = actual.getFileStatus(path);
- assertEquals(actualFileStatus.hasAcl(), expectedFileStatus.hasAcl());
- // backwards compat
- assertEquals(actualFileStatus.getPermission().getAclBit(),
- expectedFileStatus.getPermission().getAclBit());
- }
-
- /**
- * Simple ACL tests on a file: Set an acl, add an acl, remove one acl,
- * and remove all acls.
- * @throws Exception
- */
- private void testFileAcls() throws Exception {
- if ( isLocalFS() ) {
- return;
- }
-
- final String aclUser1 = "user:foo:rw-";
- final String rmAclUser1 = "user:foo:";
- final String aclUser2 = "user:bar:r--";
- final String aclGroup1 = "group::r--";
- final String aclSet = "user::rwx," + aclUser1 + ","
- + aclGroup1 + ",other::---";
-
- FileSystem proxyFs = FileSystem.get(getProxiedFSConf());
- FileSystem httpfs = getHttpFSFileSystem();
-
- Path path = new Path(getProxiedFSTestDir(), "testAclStatus.txt");
- OutputStream os = proxyFs.create(path);
- os.write(1);
- os.close();
-
- AclStatus proxyAclStat = proxyFs.getAclStatus(path);
- AclStatus httpfsAclStat = httpfs.getAclStatus(path);
- assertSameAcls(httpfsAclStat, proxyAclStat);
- assertSameAcls(httpfs, proxyFs, path);
-
- httpfs.setAcl(path, AclEntry.parseAclSpec(aclSet,true));
- proxyAclStat = proxyFs.getAclStatus(path);
- httpfsAclStat = httpfs.getAclStatus(path);
- assertSameAcls(httpfsAclStat, proxyAclStat);
- assertSameAcls(httpfs, proxyFs, path);
-
- httpfs.modifyAclEntries(path, AclEntry.parseAclSpec(aclUser2, true));
- proxyAclStat = proxyFs.getAclStatus(path);
- httpfsAclStat = httpfs.getAclStatus(path);
- assertSameAcls(httpfsAclStat, proxyAclStat);
- assertSameAcls(httpfs, proxyFs, path);
-
- httpfs.removeAclEntries(path, AclEntry.parseAclSpec(rmAclUser1, false));
- proxyAclStat = proxyFs.getAclStatus(path);
- httpfsAclStat = httpfs.getAclStatus(path);
- assertSameAcls(httpfsAclStat, proxyAclStat);
- assertSameAcls(httpfs, proxyFs, path);
-
- httpfs.removeAcl(path);
- proxyAclStat = proxyFs.getAclStatus(path);
- httpfsAclStat = httpfs.getAclStatus(path);
- assertSameAcls(httpfsAclStat, proxyAclStat);
- assertSameAcls(httpfs, proxyFs, path);
- }
-
- /**
- * Simple acl tests on a directory: set a default acl, remove default acls.
- * @throws Exception
- */
- private void testDirAcls() throws Exception {
- if ( isLocalFS() ) {
- return;
- }
-
- final String defUser1 = "default:user:glarch:r-x";
-
- FileSystem proxyFs = FileSystem.get(getProxiedFSConf());
- FileSystem httpfs = getHttpFSFileSystem();
-
- Path dir = getProxiedFSTestDir();
-
- /* ACL Status on a directory */
- AclStatus proxyAclStat = proxyFs.getAclStatus(dir);
- AclStatus httpfsAclStat = httpfs.getAclStatus(dir);
- assertSameAcls(httpfsAclStat, proxyAclStat);
- assertSameAcls(httpfs, proxyFs, dir);
-
- /* Set a default ACL on the directory */
- httpfs.setAcl(dir, (AclEntry.parseAclSpec(defUser1,true)));
- proxyAclStat = proxyFs.getAclStatus(dir);
- httpfsAclStat = httpfs.getAclStatus(dir);
- assertSameAcls(httpfsAclStat, proxyAclStat);
- assertSameAcls(httpfs, proxyFs, dir);
-
- /* Remove the default ACL */
- httpfs.removeDefaultAcl(dir);
- proxyAclStat = proxyFs.getAclStatus(dir);
- httpfsAclStat = httpfs.getAclStatus(dir);
- assertSameAcls(httpfsAclStat, proxyAclStat);
- assertSameAcls(httpfs, proxyFs, dir);
- }
-
- private void testEncryption() throws Exception {
- if (isLocalFS()) {
- return;
- }
- FileSystem proxyFs = FileSystem.get(getProxiedFSConf());
- FileSystem httpFs = getHttpFSFileSystem();
- FileStatus proxyStatus = proxyFs.getFileStatus(TestHdfsHelper
- .ENCRYPTED_FILE);
- assertTrue(proxyStatus.isEncrypted());
- FileStatus httpStatus = httpFs.getFileStatus(TestHdfsHelper
- .ENCRYPTED_FILE);
- assertTrue(httpStatus.isEncrypted());
- proxyStatus = proxyFs.getFileStatus(new Path("/"));
- httpStatus = httpFs.getFileStatus(new Path("/"));
- assertFalse(proxyStatus.isEncrypted());
- assertFalse(httpStatus.isEncrypted());
- }
-
- private void testErasureCoding() throws Exception {
- Assume.assumeFalse("Assume its not a local FS!", isLocalFS());
- FileSystem proxyFs = FileSystem.get(getProxiedFSConf());
- FileSystem httpFS = getHttpFSFileSystem();
- Path filePath = new Path(getProxiedFSTestDir(), "foo.txt");
- proxyFs.create(filePath).close();
-
- ContractTestUtils.assertNotErasureCoded(httpFS, getProxiedFSTestDir());
- ContractTestUtils.assertNotErasureCoded(httpFS, filePath);
- ContractTestUtils.assertErasureCoded(httpFS,
- TestHdfsHelper.ERASURE_CODING_DIR);
- ContractTestUtils.assertErasureCoded(httpFS,
- TestHdfsHelper.ERASURE_CODING_FILE);
-
- proxyFs.close();
- httpFS.close();
- }
-
- private void testStoragePolicy() throws Exception {
- Assume.assumeFalse("Assume its not a local FS", isLocalFS());
- FileSystem fs = FileSystem.get(getProxiedFSConf());
- fs.mkdirs(getProxiedFSTestDir());
- Path path = new Path(getProxiedFSTestDir(), "policy.txt");
- FileSystem httpfs = getHttpFSFileSystem();
- // test getAllStoragePolicies
- Assert.assertArrayEquals(
- "Policy array returned from the DFS and HttpFS should be equals",
- fs.getAllStoragePolicies().toArray(), httpfs.getAllStoragePolicies().toArray());
-
- // test get/set/unset policies
- DFSTestUtil.createFile(fs, path, 0, (short) 1, 0L);
- // get defaultPolicy
- BlockStoragePolicySpi defaultdfsPolicy = fs.getStoragePolicy(path);
- // set policy through webhdfs
- httpfs.setStoragePolicy(path, HdfsConstants.COLD_STORAGE_POLICY_NAME);
- // get policy from dfs
- BlockStoragePolicySpi dfsPolicy = fs.getStoragePolicy(path);
- // get policy from webhdfs
- BlockStoragePolicySpi httpFsPolicy = httpfs.getStoragePolicy(path);
- Assert
- .assertEquals(
- "Storage policy returned from the get API should"
- + " be same as set policy",
- HdfsConstants.COLD_STORAGE_POLICY_NAME.toString(),
- httpFsPolicy.getName());
- Assert.assertEquals(
- "Storage policy returned from the DFS and HttpFS should be equals",
- httpFsPolicy, dfsPolicy);
- // unset policy
- httpfs.unsetStoragePolicy(path);
- Assert
- .assertEquals(
- "After unset storage policy, the get API shoudld"
- + " return the default policy",
- defaultdfsPolicy, httpfs.getStoragePolicy(path));
- fs.close();
- }
-
- protected enum Operation {
- GET, OPEN, CREATE, APPEND, TRUNCATE, CONCAT, RENAME, DELETE, LIST_STATUS,
- WORKING_DIRECTORY, MKDIRS, SET_TIMES, SET_PERMISSION, SET_OWNER,
- SET_REPLICATION, CHECKSUM, CONTENT_SUMMARY, QUOTA_USAGE, FILEACLS, DIRACLS,
- SET_XATTR, GET_XATTRS, REMOVE_XATTR, LIST_XATTRS, ENCRYPTION,
- LIST_STATUS_BATCH, GETTRASHROOT, STORAGEPOLICY, ERASURE_CODING,
- CREATE_SNAPSHOT, RENAME_SNAPSHOT, DELETE_SNAPSHOT,
- ALLOW_SNAPSHOT, DISALLOW_SNAPSHOT, DISALLOW_SNAPSHOT_EXCEPTION,
- FILE_STATUS_ATTR, GET_SNAPSHOT_DIFF, GET_SNAPSHOTTABLE_DIRECTORY_LIST,
- GET_SERVERDEFAULTS, CHECKACCESS, SETECPOLICY, SATISFYSTORAGEPOLICY
- }
-
- private void operation(Operation op) throws Exception {
- switch (op) {
- case GET:
- testGet();
- break;
- case OPEN:
- testOpen();
- break;
- case CREATE:
- testCreate();
- break;
- case APPEND:
- testAppend();
- break;
- case TRUNCATE:
- testTruncate();
- break;
- case CONCAT:
- testConcat();
- break;
- case RENAME:
- testRename();
- break;
- case DELETE:
- testDelete();
- break;
- case LIST_STATUS:
- testListStatus();
- testListSymLinkStatus();
- break;
- case WORKING_DIRECTORY:
- testWorkingdirectory();
- break;
- case MKDIRS:
- testMkdirs();
- break;
- case SET_TIMES:
- testSetTimes();
- break;
- case SET_PERMISSION:
- testSetPermission();
- break;
- case SET_OWNER:
- testSetOwner();
- break;
- case SET_REPLICATION:
- testSetReplication();
- break;
- case CHECKSUM:
- testChecksum();
- break;
- case CONTENT_SUMMARY:
- testContentSummary();
- break;
- case QUOTA_USAGE:
- testQuotaUsage();
- break;
- case FILEACLS:
- testFileAclsCustomizedUserAndGroupNames();
- testFileAcls();
- break;
- case DIRACLS:
- testDirAcls();
- break;
- case SET_XATTR:
- testSetXAttr();
- break;
- case REMOVE_XATTR:
- testRemoveXAttr();
- break;
- case GET_XATTRS:
- testGetXAttrs();
- break;
- case LIST_XATTRS:
- testListXAttrs();
- break;
- case ENCRYPTION:
- testEncryption();
- break;
- case LIST_STATUS_BATCH:
- testListStatusBatch();
- break;
- case GETTRASHROOT:
- testTrashRoot();
- break;
- case STORAGEPOLICY:
- testStoragePolicy();
- break;
- case ERASURE_CODING:
- testErasureCoding();
- break;
- case CREATE_SNAPSHOT:
- testCreateSnapshot();
- break;
- case RENAME_SNAPSHOT:
- testRenameSnapshot();
- break;
- case DELETE_SNAPSHOT:
- testDeleteSnapshot();
- break;
- case ALLOW_SNAPSHOT:
- testAllowSnapshot();
- break;
- case DISALLOW_SNAPSHOT:
- testDisallowSnapshot();
- break;
- case DISALLOW_SNAPSHOT_EXCEPTION:
- testDisallowSnapshotException();
- break;
- case FILE_STATUS_ATTR:
- testFileStatusAttr();
- break;
- case GET_SNAPSHOT_DIFF:
- testGetSnapshotDiff();
- testGetSnapshotDiffIllegalParam();
- break;
- case GET_SNAPSHOTTABLE_DIRECTORY_LIST:
- testGetSnapshottableDirListing();
- break;
- case GET_SERVERDEFAULTS:
- testGetServerDefaults();
- break;
- case CHECKACCESS:
- testAccess();
- break;
- case SETECPOLICY:
- testErasureCodingPolicy();
- break;
- case SATISFYSTORAGEPOLICY:
- testStoragePolicySatisfier();
- break;
- }
-
- }
-
- @Parameterized.Parameters
- public static Collection operations() {
- Object[][] ops = new Object[Operation.values().length][];
- for (int i = 0; i < Operation.values().length; i++) {
- ops[i] = new Object[]{Operation.values()[i]};
- }
- //To test one or a subset of operations do:
- //return Arrays.asList(new Object[][]{ new Object[]{Operation.APPEND}});
- return Arrays.asList(ops);
- }
-
- private Operation operation;
-
- public BaseTestHttpFSWith(Operation operation) {
- this.operation = operation;
- }
-
- @Test
- @TestDir
- @TestJetty
- @TestHdfs
- public void testOperation() throws Exception {
- createHttpFSServer();
- operation(operation);
- }
-
- @Test
- @TestDir
- @TestJetty
- @TestHdfs
- public void testOperationDoAs() throws Exception {
- createHttpFSServer();
- UserGroupInformation ugi = UserGroupInformation.createProxyUser(HadoopUsersConfTestHelper.getHadoopUsers()[0],
- UserGroupInformation.getCurrentUser());
- ugi.doAs(new PrivilegedExceptionAction<Void>() {
- @Override
- public Void run() throws Exception {
- operation(operation);
- return null;
- }
- });
- }
-
- private void testCreateSnapshot(String snapshotName) throws Exception {
- if (!this.isLocalFS()) {
- Path snapshottablePath = new Path("/tmp/tmp-snap-test");
- createSnapshotTestsPreconditions(snapshottablePath);
- //Now get the FileSystem instance that's being tested
- FileSystem fs = this.getHttpFSFileSystem();
- if (snapshotName == null) {
- fs.createSnapshot(snapshottablePath);
- } else {
- fs.createSnapshot(snapshottablePath, snapshotName);
- }
- Path snapshotsDir = new Path("/tmp/tmp-snap-test/.snapshot");
- FileStatus[] snapshotItems = fs.listStatus(snapshotsDir);
- assertTrue("Should have exactly one snapshot.",
- snapshotItems.length == 1);
- String resultingSnapName = snapshotItems[0].getPath().getName();
- if (snapshotName == null) {
- assertTrue("Snapshot auto generated name not matching pattern",
- Pattern.matches("(s)(\\d{8})(-)(\\d{6})(\\.)(\\d{3})",
- resultingSnapName));
- } else {
- assertTrue("Snapshot name is not same as passed name.",
- snapshotName.equals(resultingSnapName));
- }
- cleanSnapshotTests(snapshottablePath, resultingSnapName);
- }
- }
-
- private void testCreateSnapshot() throws Exception {
- testCreateSnapshot(null);
- testCreateSnapshot("snap-with-name");
- }
-
- private void createSnapshotTestsPreconditions(Path snapshottablePath,
- Boolean allowSnapshot) throws Exception {
- //Needed to get a DistributedFileSystem instance, in order to
- //call allowSnapshot on the newly created directory
- DistributedFileSystem distributedFs = (DistributedFileSystem)
- FileSystem.get(snapshottablePath.toUri(), this.getProxiedFSConf());
- distributedFs.mkdirs(snapshottablePath);
- if (allowSnapshot) {
- distributedFs.allowSnapshot(snapshottablePath);
- }
- Path subdirPath = new Path("/tmp/tmp-snap-test/subdir");
- distributedFs.mkdirs(subdirPath);
- }
-
- private void createSnapshotTestsPreconditions(Path snapshottablePath)
- throws Exception {
- // Allow snapshot by default for snapshot test
- createSnapshotTestsPreconditions(snapshottablePath, true);
- }
-
- private void cleanSnapshotTests(Path snapshottablePath,
- String resultingSnapName) throws Exception {
- DistributedFileSystem distributedFs = (DistributedFileSystem)
- FileSystem.get(snapshottablePath.toUri(), this.getProxiedFSConf());
- distributedFs.deleteSnapshot(snapshottablePath, resultingSnapName);
- distributedFs.delete(snapshottablePath, true);
- }
-
- private void testRenameSnapshot() throws Exception {
- if (!this.isLocalFS()) {
- Path snapshottablePath = new Path("/tmp/tmp-snap-test");
- createSnapshotTestsPreconditions(snapshottablePath);
- //Now get the FileSystem instance that's being tested
- FileSystem fs = this.getHttpFSFileSystem();
- fs.createSnapshot(snapshottablePath, "snap-to-rename");
- fs.renameSnapshot(snapshottablePath, "snap-to-rename",
- "snap-new-name");
- Path snapshotsDir = new Path("/tmp/tmp-snap-test/.snapshot");
- FileStatus[] snapshotItems = fs.listStatus(snapshotsDir);
- assertTrue("Should have exactly one snapshot.",
- snapshotItems.length == 1);
- String resultingSnapName = snapshotItems[0].getPath().getName();
- assertTrue("Snapshot name is not same as passed name.",
- "snap-new-name".equals(resultingSnapName));
- cleanSnapshotTests(snapshottablePath, resultingSnapName);
- }
- }
-
- private void testDeleteSnapshot() throws Exception {
- if (!this.isLocalFS()) {
- Path snapshottablePath = new Path("/tmp/tmp-snap-test");
- createSnapshotTestsPreconditions(snapshottablePath);
- //Now get the FileSystem instance that's being tested
- FileSystem fs = this.getHttpFSFileSystem();
- fs.createSnapshot(snapshottablePath, "snap-to-delete");
- Path snapshotsDir = new Path("/tmp/tmp-snap-test/.snapshot");
- FileStatus[] snapshotItems = fs.listStatus(snapshotsDir);
- assertTrue("Should have exactly one snapshot.",
- snapshotItems.length == 1);
- fs.deleteSnapshot(snapshottablePath, "snap-to-delete");
- snapshotItems = fs.listStatus(snapshotsDir);
- assertTrue("There should be no snapshot anymore.",
- snapshotItems.length == 0);
- fs.delete(snapshottablePath, true);
- }
- }
-
- private void testAllowSnapshot() throws Exception {
- if (!this.isLocalFS()) {
- // Create a directory with snapshot disallowed
- Path path = new Path("/tmp/tmp-snap-test");
- createSnapshotTestsPreconditions(path, false);
- // Get the FileSystem instance that's being tested
- FileSystem fs = this.getHttpFSFileSystem();
- // Check FileStatus
- assertFalse("Snapshot should be disallowed by default",
- fs.getFileStatus(path).isSnapshotEnabled());
- // Allow snapshot
- if (fs instanceof HttpFSFileSystem) {
- HttpFSFileSystem httpFS = (HttpFSFileSystem) fs;
- httpFS.allowSnapshot(path);
- } else if (fs instanceof WebHdfsFileSystem) {
- WebHdfsFileSystem webHdfsFileSystem = (WebHdfsFileSystem) fs;
- webHdfsFileSystem.allowSnapshot(path);
- } else {
- Assert.fail(fs.getClass().getSimpleName() +
- " doesn't support allowSnapshot");
- }
- // Check FileStatus
- assertTrue("allowSnapshot failed",
- fs.getFileStatus(path).isSnapshotEnabled());
- // Cleanup
- fs.delete(path, true);
- }
- }
-
- private void testDisallowSnapshot() throws Exception {
- if (!this.isLocalFS()) {
- // Create a directory with snapshot allowed
- Path path = new Path("/tmp/tmp-snap-test");
- createSnapshotTestsPreconditions(path);
- // Get the FileSystem instance that's being tested
- FileSystem fs = this.getHttpFSFileSystem();
- // Check FileStatus
- assertTrue("Snapshot should be allowed by DFS",
- fs.getFileStatus(path).isSnapshotEnabled());
- // Disallow snapshot
- if (fs instanceof HttpFSFileSystem) {
- HttpFSFileSystem httpFS = (HttpFSFileSystem) fs;
- httpFS.disallowSnapshot(path);
- } else if (fs instanceof WebHdfsFileSystem) {
- WebHdfsFileSystem webHdfsFileSystem = (WebHdfsFileSystem) fs;
- webHdfsFileSystem.disallowSnapshot(path);
- } else {
- Assert.fail(fs.getClass().getSimpleName() +
- " doesn't support disallowSnapshot");
- }
- // Check FileStatus
- assertFalse("disallowSnapshot failed",
- fs.getFileStatus(path).isSnapshotEnabled());
- // Cleanup
- fs.delete(path, true);
- }
- }
-
- private void testDisallowSnapshotException() throws Exception {
- if (!this.isLocalFS()) {
- // Create a directory with snapshot allowed
- Path path = new Path("/tmp/tmp-snap-test");
- createSnapshotTestsPreconditions(path);
- // Get the FileSystem instance that's being tested
- FileSystem fs = this.getHttpFSFileSystem();
- // Check FileStatus
- assertTrue("Snapshot should be allowed by DFS",
- fs.getFileStatus(path).isSnapshotEnabled());
- // Create some snapshots
- fs.createSnapshot(path, "snap-01");
- fs.createSnapshot(path, "snap-02");
- // Disallow snapshot
- boolean disallowSuccess = false;
- if (fs instanceof HttpFSFileSystem) {
- HttpFSFileSystem httpFS = (HttpFSFileSystem) fs;
- try {
- httpFS.disallowSnapshot(path);
- disallowSuccess = true;
- } catch (SnapshotException e) {
- // Expect SnapshotException
- }
- } else if (fs instanceof WebHdfsFileSystem) {
- WebHdfsFileSystem webHdfsFileSystem = (WebHdfsFileSystem) fs;
- try {
- webHdfsFileSystem.disallowSnapshot(path);
- disallowSuccess = true;
- } catch (SnapshotException e) {
- // Expect SnapshotException
- }
- } else {
- Assert.fail(fs.getClass().getSimpleName() +
- " doesn't support disallowSnapshot");
- }
- if (disallowSuccess) {
- Assert.fail("disallowSnapshot doesn't throw SnapshotException when "
- + "disallowing snapshot on a directory with at least one snapshot");
- }
- // Check FileStatus, should still be enabled since
- // disallow snapshot should fail
- assertTrue("disallowSnapshot should not have succeeded",
- fs.getFileStatus(path).isSnapshotEnabled());
- // Cleanup
- fs.deleteSnapshot(path, "snap-02");
- fs.deleteSnapshot(path, "snap-01");
- fs.delete(path, true);
- }
- }
-
- private void testGetSnapshotDiff() throws Exception {
- if (!this.isLocalFS()) {
- // Create a directory with snapshot allowed
- Path path = new Path("/tmp/tmp-snap-test");
- createSnapshotTestsPreconditions(path);
- // Get the FileSystem instance that's being tested
- FileSystem fs = this.getHttpFSFileSystem();
- // Check FileStatus
- Assert.assertTrue(fs.getFileStatus(path).isSnapshotEnabled());
- // Create a file and take a snapshot
- Path file1 = new Path(path, "file1");
- testCreate(file1, false);
- fs.createSnapshot(path, "snap1");
- // Create another file and take a snapshot
- Path file2 = new Path(path, "file2");
- testCreate(file2, false);
- fs.createSnapshot(path, "snap2");
- // Get snapshot diff
- SnapshotDiffReport diffReport = null;
- if (fs instanceof HttpFSFileSystem) {
- HttpFSFileSystem httpFS = (HttpFSFileSystem) fs;
- diffReport = httpFS.getSnapshotDiffReport(path, "snap1", "snap2");
- } else if (fs instanceof WebHdfsFileSystem) {
- WebHdfsFileSystem webHdfsFileSystem = (WebHdfsFileSystem) fs;
- diffReport = webHdfsFileSystem.getSnapshotDiffReport(path,
- "snap1", "snap2");
- } else {
- Assert.fail(fs.getClass().getSimpleName() +
- " doesn't support getSnapshotDiff");
- }
- // Verify result with DFS
- DistributedFileSystem dfs = (DistributedFileSystem)
- FileSystem.get(path.toUri(), this.getProxiedFSConf());
- SnapshotDiffReport dfsDiffReport =
- dfs.getSnapshotDiffReport(path, "snap1", "snap2");
- Assert.assertEquals(diffReport.toString(), dfsDiffReport.toString());
- // Cleanup
- fs.deleteSnapshot(path, "snap2");
- fs.deleteSnapshot(path, "snap1");
- fs.delete(path, true);
- }
- }
-
- private void testGetSnapshotDiffIllegalParamCase(FileSystem fs, Path path,
- String oldsnapshotname, String snapshotname) throws IOException {
- try {
- if (fs instanceof HttpFSFileSystem) {
- HttpFSFileSystem httpFS = (HttpFSFileSystem) fs;
- httpFS.getSnapshotDiffReport(path, oldsnapshotname, snapshotname);
- } else if (fs instanceof WebHdfsFileSystem) {
- WebHdfsFileSystem webHdfsFileSystem = (WebHdfsFileSystem) fs;
- webHdfsFileSystem.getSnapshotDiffReport(path, oldsnapshotname,
- snapshotname);
- } else {
- Assert.fail(fs.getClass().getSimpleName() +
- " doesn't support getSnapshotDiff");
- }
- } catch (SnapshotException|IllegalArgumentException|RemoteException e) {
- // Expect SnapshotException, IllegalArgumentException
- // or RemoteException(IllegalArgumentException)
- if (e instanceof RemoteException) {
- // Check RemoteException class name, should be IllegalArgumentException
- Assert.assertEquals(((RemoteException) e).getClassName()
- .compareTo(java.lang.IllegalArgumentException.class.getName()), 0);
- }
- return;
- }
- Assert.fail("getSnapshotDiff illegal param didn't throw Exception");
- }
-
- private void testGetSnapshotDiffIllegalParam() throws Exception {
- if (!this.isLocalFS()) {
- // Create a directory with snapshot allowed
- Path path = new Path("/tmp/tmp-snap-test");
- createSnapshotTestsPreconditions(path);
- // Get the FileSystem instance that's being tested
- FileSystem fs = this.getHttpFSFileSystem();
- // Check FileStatus
- assertTrue("Snapshot should be allowed by DFS",
- fs.getFileStatus(path).isSnapshotEnabled());
- Assert.assertTrue(fs.getFileStatus(path).isSnapshotEnabled());
- // Get snapshot diff
- testGetSnapshotDiffIllegalParamCase(fs, path, "", "");
- testGetSnapshotDiffIllegalParamCase(fs, path, "snap1", "");
- testGetSnapshotDiffIllegalParamCase(fs, path, "", "snap2");
- testGetSnapshotDiffIllegalParamCase(fs, path, "snap1", "snap2");
- // Cleanup
- fs.delete(path, true);
- }
- }
-
- private void verifyGetSnapshottableDirListing(
- FileSystem fs, DistributedFileSystem dfs) throws Exception {
- // Get snapshottable directory list
- SnapshottableDirectoryStatus[] sds = null;
- if (fs instanceof HttpFSFileSystem) {
- HttpFSFileSystem httpFS = (HttpFSFileSystem) fs;
- sds = httpFS.getSnapshottableDirectoryList();
- } else if (fs instanceof WebHdfsFileSystem) {
- WebHdfsFileSystem webHdfsFileSystem = (WebHdfsFileSystem) fs;
- sds = webHdfsFileSystem.getSnapshottableDirectoryList();
- } else {
- Assert.fail(fs.getClass().getSimpleName() +
- " doesn't support getSnapshottableDirListing");
- }
- // Verify result with DFS
- SnapshottableDirectoryStatus[] dfssds = dfs.getSnapshottableDirListing();
- Assert.assertEquals(JsonUtil.toJsonString(sds),
- JsonUtil.toJsonString(dfssds));
- }
-
- private void testGetSnapshottableDirListing() throws Exception {
- if (!this.isLocalFS()) {
- FileSystem fs = this.getHttpFSFileSystem();
- // Create directories with snapshot allowed
- Path path1 = new Path("/tmp/tmp-snap-dirlist-test-1");
- DistributedFileSystem dfs = (DistributedFileSystem)
- FileSystem.get(path1.toUri(), this.getProxiedFSConf());
- // Verify response when there is no snapshottable directory
- verifyGetSnapshottableDirListing(fs, dfs);
- createSnapshotTestsPreconditions(path1);
- Assert.assertTrue(fs.getFileStatus(path1).isSnapshotEnabled());
- // Verify response when there is one snapshottable directory
- verifyGetSnapshottableDirListing(fs, dfs);
- Path path2 = new Path("/tmp/tmp-snap-dirlist-test-2");
- createSnapshotTestsPreconditions(path2);
- Assert.assertTrue(fs.getFileStatus(path2).isSnapshotEnabled());
- // Verify response when there are two snapshottable directories
- verifyGetSnapshottableDirListing(fs, dfs);
-
- // Clean up and verify
- fs.delete(path2, true);
- verifyGetSnapshottableDirListing(fs, dfs);
- fs.delete(path1, true);
- verifyGetSnapshottableDirListing(fs, dfs);
- }
- }
-
- private void testFileAclsCustomizedUserAndGroupNames() throws Exception {
- if (isLocalFS()) {
- return;
- }
-
- // Get appropriate conf from the cluster
- MiniDFSCluster miniDFSCluster = ((TestHdfsHelper) hdfsTestHelper)
- .getMiniDFSCluster();
- Configuration conf = miniDFSCluster.getConfiguration(0);
- // If we call getHttpFSFileSystem() without conf from the mini cluster,
- // WebHDFS will be initialized with the default ACL string, causing the
- // setAcl() later to fail. This is only an issue in the unit test.
- FileSystem httpfs = getHttpFSFileSystem(conf);
- if (!(httpfs instanceof WebHdfsFileSystem)
- && !(httpfs instanceof HttpFSFileSystem)) {
- Assert.fail(httpfs.getClass().getSimpleName() +
- " doesn't support custom user and group name pattern. "
- + "Only WebHdfsFileSystem and HttpFSFileSystem support it.");
- }
- final String aclUser = "user:123:rwx";
- final String aclGroup = "group:foo@bar:r--";
- final String aclSet = "user::rwx," + aclUser + ",group::r--," +
- aclGroup + ",other::r--";
- final String dir = "/aclFileTestCustom";
- // Create test file
- FileSystem proxyFs = FileSystem.get(conf);
- proxyFs.mkdirs(new Path(dir));
- Path path = new Path(dir, "/testACL");
- OutputStream os = proxyFs.create(path);
- os.write(1);
- os.close();
- // Set ACL
- httpfs.setAcl(path, AclEntry.parseAclSpec(aclSet, true));
- // Verify getAclStatus responses are the same
- AclStatus proxyAclStat = proxyFs.getAclStatus(path);
- AclStatus httpfsAclStat = httpfs.getAclStatus(path);
- assertSameAcls(httpfsAclStat, proxyAclStat);
- assertSameAcls(httpfs, proxyFs, path);
- // Verify that custom user and group are set.
- List<String> strEntries = new ArrayList<>();
- for (AclEntry aclEntry : httpfsAclStat.getEntries()) {
- strEntries.add(aclEntry.toStringStable());
- }
- Assert.assertTrue(strEntries.contains(aclUser));
- Assert.assertTrue(strEntries.contains(aclGroup));
- // Clean up
- proxyFs.delete(new Path(dir), true);
- }
-
- private void verifyGetServerDefaults(FileSystem fs, DistributedFileSystem dfs)
- throws Exception {
- FsServerDefaults sds = null;
- if (fs instanceof HttpFSFileSystem) {
- HttpFSFileSystem httpFS = (HttpFSFileSystem) fs;
- sds = httpFS.getServerDefaults();
- } else if (fs instanceof WebHdfsFileSystem) {
- WebHdfsFileSystem webHdfsFileSystem = (WebHdfsFileSystem) fs;
- sds = webHdfsFileSystem.getServerDefaults();
- } else {
- Assert.fail(
- fs.getClass().getSimpleName() + " doesn't support getServerDefaults");
- }
- // Verify result with DFS
- FsServerDefaults dfssds = dfs.getServerDefaults();
- Assert.assertEquals(JsonUtil.toJsonString(sds),
- JsonUtil.toJsonString(dfssds));
- }
-
- private void testGetServerDefaults() throws Exception {
- if (!this.isLocalFS()) {
- FileSystem fs = this.getHttpFSFileSystem();
- Path path1 = new Path("/");
- DistributedFileSystem dfs = (DistributedFileSystem) FileSystem
- .get(path1.toUri(), this.getProxiedFSConf());
- verifyGetServerDefaults(fs, dfs);
- }
- }
-
- private void testAccess() throws Exception {
- if (!this.isLocalFS()) {
- FileSystem fs = this.getHttpFSFileSystem();
- Path path1 = new Path("/");
- DistributedFileSystem dfs = (DistributedFileSystem) FileSystem
- .get(path1.toUri(), this.getProxiedFSConf());
- verifyAccess(fs, dfs);
- }
- }
-
- private void verifyAccess(FileSystem fs, DistributedFileSystem dfs)
- throws Exception {
- Path p1 = new Path("/p1");
- dfs.mkdirs(p1);
- dfs.setOwner(p1, "user1", "group1");
- dfs.setPermission(p1, new FsPermission((short) 0444));
-
- if (fs instanceof HttpFSFileSystem) {
- HttpFSFileSystem httpFS = (HttpFSFileSystem) fs;
- httpFS.access(p1, FsAction.READ);
- } else if (fs instanceof WebHdfsFileSystem) {
- WebHdfsFileSystem webHdfsFileSystem = (WebHdfsFileSystem) fs;
- webHdfsFileSystem.access(p1, FsAction.READ);
- } else {
- Assert.fail(fs.getClass().getSimpleName() + " doesn't support access");
- }
- }
-
- private void testErasureCodingPolicy() throws Exception {
- if (!this.isLocalFS()) {
- FileSystem fs = this.getHttpFSFileSystem();
- Path path1 = new Path("/");
- DistributedFileSystem dfs = (DistributedFileSystem) FileSystem
- .get(path1.toUri(), this.getProxiedFSConf());
- final String dir = "/xattrTest";
- Path p1 = new Path(dir);
-
- final ErasureCodingPolicy ecPolicy = SystemErasureCodingPolicies
- .getByID(SystemErasureCodingPolicies.RS_3_2_POLICY_ID);
- final String ecPolicyName = ecPolicy.getName();
- dfs.mkdirs(new Path(dir));
- dfs.enableErasureCodingPolicy(ecPolicyName);
-
- if (fs instanceof HttpFSFileSystem) {
- HttpFSFileSystem httpFS = (HttpFSFileSystem) fs;
- httpFS.setErasureCodingPolicy(p1, ecPolicyName);
- ErasureCodingPolicy ecPolicy1 = httpFS.getErasureCodingPolicy(p1);
- assertEquals(ecPolicy, ecPolicy1);
- httpFS.unsetErasureCodingPolicy(p1);
- ecPolicy1 = httpFS.getErasureCodingPolicy(p1);
- Assert.assertNull(ecPolicy1);
- } else if (fs instanceof WebHdfsFileSystem) {
- WebHdfsFileSystem webHdfsFileSystem = (WebHdfsFileSystem) fs;
- webHdfsFileSystem.setErasureCodingPolicy(p1, ecPolicyName);
- ErasureCodingPolicy ecPolicy1 =
- webHdfsFileSystem.getErasureCodingPolicy(p1);
- assertEquals(ecPolicy, ecPolicy1);
- webHdfsFileSystem.unsetErasureCodingPolicy(p1);
- ecPolicy1 = dfs.getErasureCodingPolicy(p1);
- Assert.assertNull(ecPolicy1);
- } else {
- Assert.fail(fs.getClass().getSimpleName() + " doesn't support access");
- }
- }
- }
-
- public void testStoragePolicySatisfier() throws Exception {
- final String dir = "/parent";
- Path path1 = new Path(dir);
- String file = "/parent/file";
- Path filePath = new Path(file);
- if (!this.isLocalFS()) {
- FileSystem fs = this.getHttpFSFileSystem();
- DistributedFileSystem dfs = (DistributedFileSystem) FileSystem
- .get(path1.toUri(), this.getProxiedFSConf());
- dfs.mkdirs(path1);
- dfs.create(filePath).close();
- dfs.setStoragePolicy(filePath, HdfsConstants.COLD_STORAGE_POLICY_NAME);
- BlockStoragePolicy storagePolicy =
- (BlockStoragePolicy) dfs.getStoragePolicy(filePath);
- assertEquals(HdfsConstants.COLD_STORAGE_POLICY_NAME,
- storagePolicy.getName());
- Map<String, byte[]> xAttrs;
- if (fs instanceof HttpFSFileSystem) {
- HttpFSFileSystem httpFS = (HttpFSFileSystem) fs;
- httpFS.satisfyStoragePolicy(path1);
- xAttrs = httpFS.getXAttrs(path1);
- assertTrue(xAttrs
- .containsKey(HdfsServerConstants.XATTR_SATISFY_STORAGE_POLICY));
- } else if (fs instanceof WebHdfsFileSystem) {
- WebHdfsFileSystem webHdfsFileSystem = (WebHdfsFileSystem) fs;
- webHdfsFileSystem.satisfyStoragePolicy(path1);
- xAttrs = webHdfsFileSystem.getXAttrs(path1);
- assertTrue(xAttrs
- .containsKey(HdfsServerConstants.XATTR_SATISFY_STORAGE_POLICY));
- } else {
- Assert.fail(fs.getClass().getSimpleName() + " doesn't support access");
- }
- dfs.delete(path1, true);
- }
- }
-}
\ No newline at end of file
diff --git a/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/fs/http/client/TestHttpFSFWithSWebhdfsFileSystem.java b/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/fs/http/client/TestHttpFSFWithSWebhdfsFileSystem.java
deleted file mode 100644
index d53bb50..0000000
--- a/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/fs/http/client/TestHttpFSFWithSWebhdfsFileSystem.java
+++ /dev/null
@@ -1,102 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.http.client;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.hdfs.web.SWebHdfsFileSystem;
-import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.test.TestJettyHelper;
-import org.junit.AfterClass;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-
-import java.io.File;
-import java.net.URI;
-import java.net.URL;
-import java.util.UUID;
-
-@RunWith(value = Parameterized.class)
-public class TestHttpFSFWithSWebhdfsFileSystem
- extends TestHttpFSWithHttpFSFileSystem {
- private static String classpathDir;
- private static final String BASEDIR =
- GenericTestUtils.getTempPath(UUID.randomUUID().toString());
- private static String keyStoreDir;
-
- private static Configuration sslConf;
-
- {
- URL url = Thread.currentThread().getContextClassLoader().
- getResource("classutils.txt");
- classpathDir = url.toExternalForm();
- if (classpathDir.startsWith("file:")) {
- classpathDir = classpathDir.substring("file:".length());
- classpathDir = classpathDir.substring(0,
- classpathDir.length() - "/classutils.txt".length());
- } else {
- throw new RuntimeException("Cannot find test classes dir");
- }
- File base = new File(BASEDIR);
- FileUtil.fullyDelete(base);
- base.mkdirs();
- keyStoreDir = new File(BASEDIR).getAbsolutePath();
- try {
- sslConf = new Configuration();
- KeyStoreTestUtil.setupSSLConfig(keyStoreDir, classpathDir, sslConf, false);
- } catch (Exception ex) {
- throw new RuntimeException(ex);
- }
- jettyTestHelper = new TestJettyHelper("jks", keyStoreDir + "/serverKS.jks",
- "serverP");
- }
-
- @AfterClass
- public static void cleanUp() throws Exception {
- new File(classpathDir, "ssl-client.xml").delete();
- new File(classpathDir, "ssl-server.xml").delete();
- KeyStoreTestUtil.cleanupSSLConfig(keyStoreDir, classpathDir);
- }
-
- public TestHttpFSFWithSWebhdfsFileSystem(Operation operation) {
- super(operation);
- }
-
- @Override
- protected Class getFileSystemClass() {
- return SWebHdfsFileSystem.class;
- }
-
- @Override
- protected String getScheme() {
- return "swebhdfs";
- }
-
- @Override
- protected FileSystem getHttpFSFileSystem() throws Exception {
- Configuration conf = new Configuration(sslConf);
- conf.set("fs.swebhdfs.impl", getFileSystemClass().getName());
- URI uri = new URI("swebhdfs://" +
- TestJettyHelper.getJettyURL().toURI().getAuthority());
- return FileSystem.get(uri, conf);
- }
-
-}
diff --git a/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/fs/http/client/TestHttpFSFWithWebhdfsFileSystem.java b/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/fs/http/client/TestHttpFSFWithWebhdfsFileSystem.java
deleted file mode 100644
index cf1e4f1..0000000
--- a/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/fs/http/client/TestHttpFSFWithWebhdfsFileSystem.java
+++ /dev/null
@@ -1,38 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.http.client;
-
-import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-
-@RunWith(value = Parameterized.class)
-public class TestHttpFSFWithWebhdfsFileSystem
- extends TestHttpFSWithHttpFSFileSystem {
-
- public TestHttpFSFWithWebhdfsFileSystem(Operation operation) {
- super(operation);
- }
-
- @Override
- protected Class getFileSystemClass() {
- return WebHdfsFileSystem.class;
- }
-
-}
diff --git a/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/fs/http/client/TestHttpFSFileSystemLocalFileSystem.java b/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/fs/http/client/TestHttpFSFileSystemLocalFileSystem.java
deleted file mode 100644
index 83bcb2e..0000000
--- a/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/fs/http/client/TestHttpFSFileSystemLocalFileSystem.java
+++ /dev/null
@@ -1,99 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.http.client;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.permission.FsAction;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.test.TestDirHelper;
-import org.junit.Assert;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-
-import java.io.File;
-import java.net.URI;
-import java.net.URISyntaxException;
-
-@RunWith(value = Parameterized.class)
-public class TestHttpFSFileSystemLocalFileSystem extends BaseTestHttpFSWith {
-
- private static String PATH_PREFIX;
-
- static {
- new TestDirHelper();
- File file = GenericTestUtils.getTestDir("local");
- file.mkdirs();
- PATH_PREFIX = file.getAbsolutePath();
- }
-
- public TestHttpFSFileSystemLocalFileSystem(Operation operation) {
- super(operation);
- }
-
- @Override
- protected Path getProxiedFSTestDir() {
- return addPrefix(new Path(TestDirHelper.getTestDir().getAbsolutePath()));
- }
-
- @Override
- protected String getProxiedFSURI() {
- return "file:///";
- }
-
- @Override
- protected Configuration getProxiedFSConf() {
- Configuration conf = new Configuration(false);
- conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, getProxiedFSURI());
- return conf;
- }
-
- protected Path addPrefix(Path path) {
- return Path.mergePaths(new Path(PATH_PREFIX), path);
- }
-
- @Override
- protected void testSetPermission() throws Exception {
- if (Path.WINDOWS) {
- FileSystem fs = FileSystem.get(getProxiedFSConf());
- Path path = new Path(getProxiedFSTestDir(), "foodir");
- fs.mkdirs(path);
-
- fs = getHttpFSFileSystem();
- FsPermission permission1 = new FsPermission(FsAction.READ_WRITE, FsAction.NONE, FsAction.NONE);
- fs.setPermission(path, permission1);
- fs.close();
-
- fs = FileSystem.get(getProxiedFSConf());
- FileStatus status1 = fs.getFileStatus(path);
- fs.close();
- FsPermission permission2 = status1.getPermission();
- Assert.assertEquals(permission2, permission1);
-
- // sticky bit not supported on Windows with local file system, so the
- // subclass skips that part of the test
- } else {
- super.testSetPermission();
- }
- }
-}
diff --git a/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/fs/http/client/TestHttpFSWithHttpFSFileSystem.java b/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/fs/http/client/TestHttpFSWithHttpFSFileSystem.java
deleted file mode 100644
index b211e9a..0000000
--- a/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/fs/http/client/TestHttpFSWithHttpFSFileSystem.java
+++ /dev/null
@@ -1,56 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.http.client;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.test.TestHdfsHelper;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-
-@RunWith(value = Parameterized.class)
-public class TestHttpFSWithHttpFSFileSystem extends BaseTestHttpFSWith {
-
- public TestHttpFSWithHttpFSFileSystem(Operation operation) {
- super(operation);
- }
-
- @Override
- protected Class getFileSystemClass() {
- return HttpFSFileSystem.class;
- }
-
- @Override
- protected Path getProxiedFSTestDir() {
- return TestHdfsHelper.getHdfsTestDir();
- }
-
- @Override
- protected String getProxiedFSURI() {
- return TestHdfsHelper.getHdfsConf().get(
- CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY);
- }
-
- @Override
- protected Configuration getProxiedFSConf() {
- return TestHdfsHelper.getHdfsConf();
- }
-
-}
diff --git a/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/fs/http/server/HttpFSKerberosAuthenticationHandlerForTesting.java b/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/fs/http/server/HttpFSKerberosAuthenticationHandlerForTesting.java
deleted file mode 100644
index 8ac7da3..0000000
--- a/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/fs/http/server/HttpFSKerberosAuthenticationHandlerForTesting.java
+++ /dev/null
@@ -1,38 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.fs.http.server;
-
-import org.apache.hadoop.security.token.delegation.web.KerberosDelegationTokenAuthenticationHandler;
-
-import javax.servlet.ServletException;
-import java.util.Properties;
-
-public class HttpFSKerberosAuthenticationHandlerForTesting
- extends KerberosDelegationTokenAuthenticationHandler {
-
- @Override
- public void init(Properties config) throws ServletException {
- //NOP overwrite to avoid Kerberos initialization
- initTokenManager(config);
- }
-
- @Override
- public void destroy() {
- //NOP overwrite to avoid Kerberos initialization
- }
-}
diff --git a/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/fs/http/server/TestCheckUploadContentTypeFilter.java b/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/fs/http/server/TestCheckUploadContentTypeFilter.java
deleted file mode 100644
index 947f928..0000000
--- a/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/fs/http/server/TestCheckUploadContentTypeFilter.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.http.server;
-
-import javax.servlet.Filter;
-import javax.servlet.FilterChain;
-import javax.servlet.http.HttpServletRequest;
-import javax.servlet.http.HttpServletResponse;
-
-import org.apache.hadoop.fs.http.client.HttpFSFileSystem;
-import org.junit.Test;
-import org.mockito.Mockito;
-
-public class TestCheckUploadContentTypeFilter {
-
- @Test
- public void putUpload() throws Exception {
- test("PUT", HttpFSFileSystem.Operation.CREATE.toString(), "application/octet-stream", true, false);
- }
-
- @Test
- public void postUpload() throws Exception {
- test("POST", HttpFSFileSystem.Operation.APPEND.toString(), "APPLICATION/OCTET-STREAM", true, false);
- }
-
- @Test
- public void putUploadWrong() throws Exception {
- test("PUT", HttpFSFileSystem.Operation.CREATE.toString(), "plain/text", false, false);
- test("PUT", HttpFSFileSystem.Operation.CREATE.toString(), "plain/text", true, true);
- }
-
- @Test
- public void postUploadWrong() throws Exception {
- test("POST", HttpFSFileSystem.Operation.APPEND.toString(), "plain/text", false, false);
- test("POST", HttpFSFileSystem.Operation.APPEND.toString(), "plain/text", true, true);
- }
-
- @Test
- public void getOther() throws Exception {
- test("GET", HttpFSFileSystem.Operation.GETHOMEDIRECTORY.toString(), "plain/text", false, false);
- }
-
- @Test
- public void putOther() throws Exception {
- test("PUT", HttpFSFileSystem.Operation.MKDIRS.toString(), "plain/text", false, false);
- }
-
- private void test(String method, String operation, String contentType,
- boolean upload, boolean error) throws Exception {
- HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
- HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
- Mockito.reset(request);
- Mockito.when(request.getMethod()).thenReturn(method);
- Mockito.when(request.getParameter(HttpFSFileSystem.OP_PARAM)).thenReturn(operation);
- Mockito.when(request.getParameter(HttpFSParametersProvider.DataParam.NAME)).
- thenReturn(Boolean.toString(upload));
- Mockito.when(request.getContentType()).thenReturn(contentType);
-
- FilterChain chain = Mockito.mock(FilterChain.class);
-
- Filter filter = new CheckUploadContentTypeFilter();
-
- filter.doFilter(request, response, chain);
-
- if (error) {
- Mockito.verify(response).sendError(Mockito.eq(HttpServletResponse.SC_BAD_REQUEST),
- Mockito.contains("Data upload"));
- }
- else {
- Mockito.verify(chain).doFilter(request, response);
- }
- }
-
-
-}
diff --git a/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSAccessControlled.java b/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSAccessControlled.java
deleted file mode 100644
index 47d9935..0000000
--- a/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSAccessControlled.java
+++ /dev/null
@@ -1,355 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.fs.http.server;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.test.HTestCase;
-import org.apache.hadoop.test.HadoopUsersConfTestHelper;
-import org.apache.hadoop.test.TestDir;
-import org.apache.hadoop.test.TestDirHelper;
-import org.apache.hadoop.test.TestJetty;
-import org.apache.hadoop.test.TestJettyHelper;
-import org.junit.Assert;
-import org.junit.Test;
-import org.eclipse.jetty.server.Server;
-import org.eclipse.jetty.webapp.WebAppContext;
-
-import java.io.File;
-import java.io.FileOutputStream;
-import java.io.FileWriter;
-import java.io.IOException;
-import java.io.OutputStream;
-import java.io.Writer;
-import java.net.HttpURLConnection;
-import java.net.URL;
-import java.text.MessageFormat;
-
-/**
- * This test class ensures that everything works as expected when
- * support with the access controlled HTTPFS file system.
- */
-public class TestHttpFSAccessControlled extends HTestCase {
-
- private MiniDFSCluster miniDfs;
- private Configuration nnConf;
-
- /**
- * Fire up our own hand-rolled MiniDFSCluster. We do this here instead
- * of relying on TestHdfsHelper because we don't want to turn on ACL
- * support.
- *
- * @throws Exception
- */
- private void startMiniDFS() throws Exception {
-
- File testDirRoot = TestDirHelper.getTestDir();
-
- if (System.getProperty("hadoop.log.dir") == null) {
- System.setProperty("hadoop." +
- "log.dir",
- new File(testDirRoot, "hadoop-log").getAbsolutePath());
- }
- if (System.getProperty("test.build.data") == null) {
- System.setProperty("test.build.data",
- new File(testDirRoot, "hadoop-data").getAbsolutePath());
- }
-
- Configuration conf = HadoopUsersConfTestHelper.getBaseConf();
- HadoopUsersConfTestHelper.addUserConf(conf);
- conf.set("fs.hdfs.impl.disable.cache", "true");
- conf.set("dfs.block.access.token.enable", "false");
- conf.set("dfs.permissions", "true");
- conf.set("hadoop.security.authentication", "simple");
-
- MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
- builder.numDataNodes(2);
- miniDfs = builder.build();
- nnConf = miniDfs.getConfiguration(0);
- }
-
- /**
- * Create an HttpFS Server to talk to the MiniDFSCluster we created.
- * @throws Exception
- */
- private void createHttpFSServer() throws Exception {
- File homeDir = TestDirHelper.getTestDir();
- Assert.assertTrue(new File(homeDir, "conf").mkdir());
- Assert.assertTrue(new File(homeDir, "log").mkdir());
- Assert.assertTrue(new File(homeDir, "temp").mkdir());
- HttpFSServerWebApp.setHomeDirForCurrentThread(homeDir.getAbsolutePath());
-
- File secretFile = new File(new File(homeDir, "conf"), "secret");
- Writer w = new FileWriter(secretFile);
- w.write("secret");
- w.close();
-
- // HDFS configuration
- File hadoopConfDir = new File(new File(homeDir, "conf"), "hadoop-conf");
- if ( !hadoopConfDir.mkdirs() ) {
- throw new IOException();
- }
-
- String fsDefaultName =
- nnConf.get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY);
- Configuration conf = new Configuration(false);
- conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, fsDefaultName);
-
- File hdfsSite = new File(hadoopConfDir, "hdfs-site.xml");
- OutputStream os = new FileOutputStream(hdfsSite);
- conf.writeXml(os);
- os.close();
-
- // HTTPFS configuration
- conf = new Configuration(false);
- conf.set("httpfs.hadoop.config.dir", hadoopConfDir.toString());
- conf.set("httpfs.proxyuser." +
- HadoopUsersConfTestHelper.getHadoopProxyUser() + ".groups",
- HadoopUsersConfTestHelper.getHadoopProxyUserGroups());
- conf.set("httpfs.proxyuser." +
- HadoopUsersConfTestHelper.getHadoopProxyUser() + ".hosts",
- HadoopUsersConfTestHelper.getHadoopProxyUserHosts());
- conf.set("httpfs.authentication.signature.secret.file",
- secretFile.getAbsolutePath());
-
- File httpfsSite = new File(new File(homeDir, "conf"), "httpfs-site.xml");
- os = new FileOutputStream(httpfsSite);
- conf.writeXml(os);
- os.close();
-
- ClassLoader cl = Thread.currentThread().getContextClassLoader();
- URL url = cl.getResource("webapp");
- if ( url == null ) {
- throw new IOException();
- }
- WebAppContext context = new WebAppContext(url.getPath(), "/webhdfs");
- Server server = TestJettyHelper.getJettyServer();
- server.setHandler(context);
- server.start();
- }
-
- /**
- * Talks to the http interface to get the json output of a *STATUS command
- * on the given file.
- *
- * @param filename The file to query.
- * @param message Failure message
- * @param command Command to test
- * @param expectOK Is this operation expected to succeed?
- * @throws Exception
- */
- private void getCmd(String filename, String message, String command, boolean expectOK)
- throws Exception {
- String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
- String outMsg = message + " (" + command + ")";
- // Remove leading / from filename
- if ( filename.charAt(0) == '/' ) {
- filename = filename.substring(1);
- }
- String pathOps = MessageFormat.format(
- "/webhdfs/v1/{0}?user.name={1}&op={2}",
- filename, user, command);
- URL url = new URL(TestJettyHelper.getJettyURL(), pathOps);
- HttpURLConnection conn = (HttpURLConnection) url.openConnection();
- conn.setRequestMethod("GET");
- conn.connect();
- int resp = conn.getResponseCode();
- if ( expectOK ) {
- Assert.assertEquals( outMsg, HttpURLConnection.HTTP_OK, resp);
- } else {
- Assert.assertEquals(outMsg, HttpURLConnection.HTTP_FORBIDDEN, resp);
- }
- }
-
- /**
- * General-purpose http PUT command to the httpfs server.
- * @param filename The file to operate upon
- * @param message Failure message
- * @param command The command to perform (SETPERMISSION, etc)
- * @param params Parameters to command
- * @param expectOK Is this operation expected to succeed?
- */
- private void putCmd(String filename, String message, String command,
- String params, boolean expectOK) throws Exception {
- String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
- String outMsg = message + " (" + command + ")";
- // Remove leading / from filename
- if ( filename.charAt(0) == '/' ) {
- filename = filename.substring(1);
- }
- String pathOps = MessageFormat.format(
- "/webhdfs/v1/{0}?user.name={1}{2}{3}&op={4}",
- filename, user, (params == null) ? "" : "&",
- (params == null) ? "" : params, command);
- URL url = new URL(TestJettyHelper.getJettyURL(), pathOps);
- HttpURLConnection conn = (HttpURLConnection) url.openConnection();
- conn.setRequestMethod("PUT");
- conn.connect();
- int resp = conn.getResponseCode();
- if ( expectOK ) {
- Assert.assertEquals(outMsg, HttpURLConnection.HTTP_OK, resp);
- } else {
- Assert.assertEquals(outMsg, HttpURLConnection.HTTP_FORBIDDEN, resp);
- }
- }
-
- /**
- * General-purpose http PUT command to the httpfs server.
- * @param filename The file to operate upon
- * @param message Failure message
- * @param command The command to perform (SETPERMISSION, etc)
- * @param params Parameters to command
- * @param expectOK Is this operation expected to succeed?
- */
- private void deleteCmd(String filename, String message, String command,
- String params, boolean expectOK) throws Exception {
- String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
- String outMsg = message + " (" + command + ")";
- // Remove leading / from filename
- if ( filename.charAt(0) == '/' ) {
- filename = filename.substring(1);
- }
- String pathOps = MessageFormat.format(
- "/webhdfs/v1/{0}?user.name={1}{2}{3}&op={4}",
- filename, user, (params == null) ? "" : "&",
- (params == null) ? "" : params, command);
- URL url = new URL(TestJettyHelper.getJettyURL(), pathOps);
- HttpURLConnection conn = (HttpURLConnection) url.openConnection();
- conn.setRequestMethod("DELETE");
- conn.connect();
- int resp = conn.getResponseCode();
- if ( expectOK ) {
- Assert.assertEquals(outMsg, HttpURLConnection.HTTP_OK, resp);
- } else {
- Assert.assertEquals(outMsg, HttpURLConnection.HTTP_FORBIDDEN, resp);
- }
- }
-
- /**
- * General-purpose http POST command to the httpfs server.
- * @param filename The file to operate upon
- * @param message Failure message
- * @param command The command to perform (UNSETSTORAGEPOLICY, etc)
- * @param params Parameters to command"
- * @param expectOK Is this operation expected to succeed?
- */
- private void postCmd(String filename, String message, String command,
- String params, boolean expectOK) throws Exception {
- String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
- String outMsg = message + " (" + command + ")";
- // Remove leading / from filename
- if ( filename.charAt(0) == '/' ) {
- filename = filename.substring(1);
- }
- String pathOps = MessageFormat.format(
- "/webhdfs/v1/{0}?user.name={1}{2}{3}&op={4}",
- filename, user, (params == null) ? "" : "&",
- (params == null) ? "" : params, command);
- URL url = new URL(TestJettyHelper.getJettyURL(), pathOps);
- HttpURLConnection conn = (HttpURLConnection) url.openConnection();
- conn.setRequestMethod("POST");
- conn.connect();
- int resp = conn.getResponseCode();
- if ( expectOK ) {
- Assert.assertEquals(outMsg, HttpURLConnection.HTTP_OK, resp);
- } else {
- Assert.assertEquals(outMsg, HttpURLConnection.HTTP_FORBIDDEN, resp);
- }
- }
-
- /**
- * Ensure that
- * <ol>
- * <li>GETFILESTATUS (GET) and LISTSTATUS (GET) work in all modes</li>
- * <li>GETXATTRS (GET) works in read-write and read-only but write-only throws an exception</li>
- * <li>SETPERMISSION (PUT) works in read-write and write only but read-only throws an exception</li>
- * <li>SETPERMISSION (POST) works in read-write and write only but read-only throws an exception</li>
- * <li>DELETE (DELETE) works in read-write and write only but read-only throws an exception</li>
- * </ol>
- *
- * @throws Exception
- */
- @Test
- @TestDir
- @TestJetty
- public void testAcessControlledFS() throws Exception {
- final String testRwMsg = "Test read-write ";
- final String testRoMsg = "Test read-only ";
- final String testWoMsg = "Test write-only ";
- final String defUser1 = "default:user:glarch:r-x";
- final String dir = "/testAccess";
- final String pathRW = dir + "/foo-rw";
- final String pathWO = dir + "/foo-wo";
- final String pathRO = dir + "/foo-ro";
- final String setPermSpec = "744";
- final String snapshopSpec = "snapshotname=test-snap";
- startMiniDFS();
- createHttpFSServer();
-
- FileSystem fs = FileSystem.get(nnConf);
- fs.mkdirs(new Path(dir));
- OutputStream os = fs.create(new Path(pathRW));
- os.write(1);
- os.close();
-
- os = fs.create(new Path(pathWO));
- os.write(1);
- os.close();
-
- os = fs.create(new Path(pathRO));
- os.write(1);
- os.close();
-
- Configuration conf = HttpFSServerWebApp.get().getConfig();
-
- /* test Read-Write Mode */
- conf.setStrings("httpfs.access.mode", "read-write");
- getCmd(pathRW, testRwMsg + "GET", "GETFILESTATUS", true);
- getCmd(pathRW, testRwMsg + "GET", "LISTSTATUS", true);
- getCmd(pathRW, testRwMsg + "GET", "GETXATTRS", true);
- putCmd(pathRW, testRwMsg + "PUT", "SETPERMISSION", setPermSpec, true);
- postCmd(pathRW, testRwMsg + "POST", "UNSETSTORAGEPOLICY", null, true);
- deleteCmd(pathRW, testRwMsg + "DELETE", "DELETE", null, true);
-
- /* test Write-Only Mode */
- conf.setStrings("httpfs.access.mode", "write-only");
- getCmd(pathWO, testWoMsg + "GET", "GETFILESTATUS", true);
- getCmd(pathWO, testWoMsg + "GET", "LISTSTATUS", true);
- getCmd(pathWO, testWoMsg + "GET", "GETXATTRS", false);
- putCmd(pathWO, testWoMsg + "PUT", "SETPERMISSION", setPermSpec, true);
- postCmd(pathWO, testWoMsg + "POST", "UNSETSTORAGEPOLICY", null, true);
- deleteCmd(pathWO, testWoMsg + "DELETE", "DELETE", null, true);
-
- /* test Read-Only Mode */
- conf.setStrings("httpfs.access.mode", "read-only");
- getCmd(pathRO, testRoMsg + "GET", "GETFILESTATUS", true);
- getCmd(pathRO, testRoMsg + "GET", "LISTSTATUS", true);
- getCmd(pathRO, testRoMsg + "GET", "GETXATTRS", true);
- putCmd(pathRO, testRoMsg + "PUT", "SETPERMISSION", setPermSpec, false);
- postCmd(pathRO, testRoMsg + "POST", "UNSETSTORAGEPOLICY", null, false);
- deleteCmd(pathRO, testRoMsg + "DELETE", "DELETE", null, false);
-
- conf.setStrings("httpfs.access.mode", "read-write");
-
- miniDfs.shutdown();
- }
-}
diff --git a/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java b/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java
deleted file mode 100644
index 2f0ef9a..0000000
--- a/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java
+++ /dev/null
@@ -1,1951 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.fs.http.server;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.DFSTestUtil;
-import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
-import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
-import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfierMode;
-import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
-import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
-import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
-import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
-import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
-import org.apache.hadoop.hdfs.web.JsonUtil;
-import org.apache.hadoop.lib.service.FileSystemAccess;
-import org.apache.hadoop.security.authentication.util.SignerSecretProvider;
-import org.apache.hadoop.security.authentication.util.StringSignerSecretProviderCreator;
-import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
-import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticator;
-import org.apache.hadoop.security.token.delegation.web.KerberosDelegationTokenAuthenticationHandler;
-import org.json.simple.JSONArray;
-import org.junit.Assert;
-
-import java.io.BufferedReader;
-import java.io.File;
-import java.io.FileOutputStream;
-import java.io.FileWriter;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.InputStreamReader;
-import java.io.OutputStream;
-import java.io.Writer;
-import java.net.HttpURLConnection;
-import java.net.URI;
-import java.net.URL;
-import java.nio.charset.Charset;
-import java.text.MessageFormat;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.commons.io.IOUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FsServerDefaults;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.XAttrCodec;
-import org.apache.hadoop.fs.http.client.HttpFSUtils;
-import org.apache.hadoop.fs.http.client.HttpFSFileSystem.Operation;
-import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.DataParam;
-import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.NoRedirectParam;
-import org.apache.hadoop.fs.permission.AclEntry;
-import org.apache.hadoop.fs.permission.AclEntryScope;
-import org.apache.hadoop.fs.permission.AclEntryType;
-import org.apache.hadoop.fs.permission.AclStatus;
-import org.apache.hadoop.fs.permission.FsAction;
-import org.apache.hadoop.hdfs.web.WebHdfsConstants;
-import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
-import org.apache.hadoop.lib.server.Service;
-import org.apache.hadoop.lib.server.ServiceException;
-import org.apache.hadoop.lib.service.Groups;
-import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
-import org.apache.hadoop.security.authentication.server.AuthenticationToken;
-import org.apache.hadoop.security.authentication.util.Signer;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.test.HFSTestCase;
-import org.apache.hadoop.test.HadoopUsersConfTestHelper;
-import org.apache.hadoop.test.LambdaTestUtils;
-import org.apache.hadoop.test.TestDir;
-import org.apache.hadoop.test.TestDirHelper;
-import org.apache.hadoop.test.TestHdfs;
-import org.apache.hadoop.test.TestHdfsHelper;
-import org.apache.hadoop.test.TestJetty;
-import org.apache.hadoop.test.TestJettyHelper;
-import org.json.simple.JSONObject;
-import org.json.simple.parser.JSONParser;
-import org.junit.Test;
-import org.eclipse.jetty.server.Server;
-import org.eclipse.jetty.webapp.WebAppContext;
-
-import org.apache.hadoop.thirdparty.com.google.common.collect.Maps;
-import java.util.Properties;
-import java.util.concurrent.Callable;
-import java.util.regex.Pattern;
-
-import javax.ws.rs.HttpMethod;
-import javax.ws.rs.core.MediaType;
-
-import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
-
-/**
- * Main test class for HttpFSServer.
- */
-public class TestHttpFSServer extends HFSTestCase {
-
- /**
- * define metric getters for unit tests.
- */
- private static Callable<Long> defaultEntryMetricGetter = () -> 0L;
- private static Callable<Long> defaultExitMetricGetter = () -> 1L;
- private static HashMap<String, Callable<Long>> metricsGetter =
- new HashMap<String, Callable<Long>>() {
- {
- put("LISTSTATUS",
- () -> HttpFSServerWebApp.get().getMetrics().getOpsListing());
- put("MKDIRS",
- () -> HttpFSServerWebApp.get().getMetrics().getOpsMkdir());
- put("GETFILESTATUS",
- () -> HttpFSServerWebApp.get().getMetrics().getOpsStat());
- }
- };
-
- @Test
- @TestDir
- @TestJetty
- public void server() throws Exception {
- String dir = TestDirHelper.getTestDir().getAbsolutePath();
-
- Configuration httpfsConf = new Configuration(false);
- HttpFSServerWebApp server = new HttpFSServerWebApp(dir, dir, dir, dir,
- httpfsConf);
- server.init();
- server.destroy();
- }
-
- /**
- * Mock groups.
- */
- public static class MockGroups implements Service, Groups {
-
- @Override
- public void init(org.apache.hadoop.lib.server.Server server)
- throws ServiceException {
- }
-
- @Override
- public void postInit() throws ServiceException {
- }
-
- @Override
- public void destroy() {
- }
-
- @Override
- public Class[] getServiceDependencies() {
- return new Class[0];
- }
-
- @Override
- public Class getInterface() {
- return Groups.class;
- }
-
- @Override
- public void serverStatusChange(
- org.apache.hadoop.lib.server.Server.Status oldStatus,
- org.apache.hadoop.lib.server.Server.Status newStatus)
- throws ServiceException {
- }
-
- @Override
- public List<String> getGroups(String user) throws IOException {
- return Arrays.asList(HadoopUsersConfTestHelper.getHadoopUserGroups(user));
- }
-
- }
-
- private Configuration createHttpFSConf(boolean addDelegationTokenAuthHandler,
- boolean sslEnabled) throws Exception {
- File homeDir = TestDirHelper.getTestDir();
- Assert.assertTrue(new File(homeDir, "conf").mkdir());
- Assert.assertTrue(new File(homeDir, "log").mkdir());
- Assert.assertTrue(new File(homeDir, "temp").mkdir());
- HttpFSServerWebApp.setHomeDirForCurrentThread(homeDir.getAbsolutePath());
-
- File secretFile = new File(new File(homeDir, "conf"), "secret");
- Writer w = new FileWriter(secretFile);
- w.write("secret");
- w.close();
-
- // HDFS configuration
- File hadoopConfDir = new File(new File(homeDir, "conf"), "hadoop-conf");
- hadoopConfDir.mkdirs();
- Configuration hdfsConf = TestHdfsHelper.getHdfsConf();
-
- // Http Server's conf should be based on HDFS's conf
- Configuration conf = new Configuration(hdfsConf);
- conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
- conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
- conf.set(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
- StoragePolicySatisfierMode.EXTERNAL.toString());
- File hdfsSite = new File(hadoopConfDir, "hdfs-site.xml");
- OutputStream os = new FileOutputStream(hdfsSite);
- conf.writeXml(os);
- os.close();
-
- // HTTPFS configuration
- conf = new Configuration(false);
- if (addDelegationTokenAuthHandler) {
- conf.set("httpfs.authentication.type",
- HttpFSKerberosAuthenticationHandlerForTesting.class.getName());
- }
- conf.set("httpfs.services.ext", MockGroups.class.getName());
- conf.set("httpfs.admin.group", HadoopUsersConfTestHelper.
- getHadoopUserGroups(HadoopUsersConfTestHelper.getHadoopUsers()[0])[0]);
- conf.set("httpfs.proxyuser." +
- HadoopUsersConfTestHelper.getHadoopProxyUser() + ".groups",
- HadoopUsersConfTestHelper.getHadoopProxyUserGroups());
- conf.set("httpfs.proxyuser." +
- HadoopUsersConfTestHelper.getHadoopProxyUser() + ".hosts",
- HadoopUsersConfTestHelper.getHadoopProxyUserHosts());
- conf.set("httpfs.authentication.signature.secret.file",
- secretFile.getAbsolutePath());
- conf.set("httpfs.hadoop.config.dir", hadoopConfDir.toString());
- if (sslEnabled) {
- conf.set("httpfs.ssl.enabled", "true");
- }
- File httpfsSite = new File(new File(homeDir, "conf"), "httpfs-site.xml");
- os = new FileOutputStream(httpfsSite);
- conf.writeXml(os);
- os.close();
- return conf;
- }
-
- /**
- * Write configuration to a site file under Hadoop configuration dir.
- */
- private void writeConf(Configuration conf, String sitename)
- throws Exception {
- File homeDir = TestDirHelper.getTestDir();
- // HDFS configuration
- File hadoopConfDir = new File(new File(homeDir, "conf"), "hadoop-conf");
- Assert.assertTrue(hadoopConfDir.exists());
-
- File siteFile = new File(hadoopConfDir, sitename);
- OutputStream os = new FileOutputStream(siteFile);
- conf.writeXml(os);
- os.close();
- }
-
- private Server createHttpFSServer(boolean addDelegationTokenAuthHandler,
- boolean sslEnabled)
- throws Exception {
- Configuration conf = createHttpFSConf(addDelegationTokenAuthHandler,
- sslEnabled);
- ClassLoader cl = Thread.currentThread().getContextClassLoader();
- URL url = cl.getResource("webapp");
- WebAppContext context = new WebAppContext(url.getPath(), "/webhdfs");
- Server server = TestJettyHelper.getJettyServer();
- server.setHandler(context);
- server.start();
- if (addDelegationTokenAuthHandler) {
- HttpFSServerWebApp.get().setAuthority(TestJettyHelper.getAuthority());
- }
- return server;
- }
-
- private String getSignedTokenString()
- throws Exception {
- AuthenticationToken token = new AuthenticationToken("u", "p",
- new KerberosDelegationTokenAuthenticationHandler().getType());
- token.setExpires(System.currentTimeMillis() + 100000000);
- SignerSecretProvider secretProvider =
- StringSignerSecretProviderCreator.newStringSignerSecretProvider();
- Properties secretProviderProps = new Properties();
- secretProviderProps.setProperty(
- AuthenticationFilter.SIGNATURE_SECRET, "secret");
- secretProvider.init(secretProviderProps, null, -1);
- Signer signer = new Signer(secretProvider);
- return signer.sign(token.toString());
- }
-
- private void delegationTokenCommonTests(boolean sslEnabled) throws Exception {
- URL url = new URL(TestJettyHelper.getJettyURL(),
- "/webhdfs/v1/?op=GETHOMEDIRECTORY");
- HttpURLConnection conn = (HttpURLConnection) url.openConnection();
- Assert.assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED,
- conn.getResponseCode());
-
- String tokenSigned = getSignedTokenString();
-
- url = new URL(TestJettyHelper.getJettyURL(),
- "/webhdfs/v1/?op=GETDELEGATIONTOKEN");
- conn = (HttpURLConnection) url.openConnection();
- conn.setRequestProperty("Cookie",
- AuthenticatedURL.AUTH_COOKIE + "=" + tokenSigned);
- Assert.assertEquals(HttpURLConnection.HTTP_OK,
- conn.getResponseCode());
-
- JSONObject json = (JSONObject)new JSONParser().parse(
- new InputStreamReader(conn.getInputStream()));
- json = (JSONObject)
- json.get(DelegationTokenAuthenticator.DELEGATION_TOKEN_JSON);
- String tokenStr = (String)json.get(
- DelegationTokenAuthenticator.DELEGATION_TOKEN_URL_STRING_JSON);
-
- Token<AbstractDelegationTokenIdentifier> dToken =
- new Token<AbstractDelegationTokenIdentifier>();
- dToken.decodeFromUrlString(tokenStr);
- Assert.assertEquals(sslEnabled ?
- WebHdfsConstants.SWEBHDFS_TOKEN_KIND :
- WebHdfsConstants.WEBHDFS_TOKEN_KIND,
- dToken.getKind());
-
- url = new URL(TestJettyHelper.getJettyURL(),
- "/webhdfs/v1/?op=GETHOMEDIRECTORY&delegation=" + tokenStr);
- conn = (HttpURLConnection) url.openConnection();
- Assert.assertEquals(HttpURLConnection.HTTP_OK,
- conn.getResponseCode());
-
- url = new URL(TestJettyHelper.getJettyURL(),
- "/webhdfs/v1/?op=RENEWDELEGATIONTOKEN&token=" + tokenStr);
- conn = (HttpURLConnection) url.openConnection();
- conn.setRequestMethod("PUT");
- Assert.assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED,
- conn.getResponseCode());
-
- url = new URL(TestJettyHelper.getJettyURL(),
- "/webhdfs/v1/?op=RENEWDELEGATIONTOKEN&token=" + tokenStr);
- conn = (HttpURLConnection) url.openConnection();
- conn.setRequestMethod("PUT");
- conn.setRequestProperty("Cookie",
- AuthenticatedURL.AUTH_COOKIE + "=" + tokenSigned);
- Assert.assertEquals(HttpURLConnection.HTTP_OK,
- conn.getResponseCode());
-
- url = new URL(TestJettyHelper.getJettyURL(),
- "/webhdfs/v1/?op=CANCELDELEGATIONTOKEN&token=" + tokenStr);
- conn = (HttpURLConnection) url.openConnection();
- conn.setRequestMethod("PUT");
- Assert.assertEquals(HttpURLConnection.HTTP_OK,
- conn.getResponseCode());
-
- url = new URL(TestJettyHelper.getJettyURL(),
- "/webhdfs/v1/?op=GETHOMEDIRECTORY&delegation=" + tokenStr);
- conn = (HttpURLConnection) url.openConnection();
- Assert.assertEquals(HttpURLConnection.HTTP_FORBIDDEN,
- conn.getResponseCode());
-
- // getTrash test with delegation
- url = new URL(TestJettyHelper.getJettyURL(),
- "/webhdfs/v1/?op=GETTRASHROOT&delegation=" + tokenStr);
- conn = (HttpURLConnection) url.openConnection();
- Assert.assertEquals(HttpURLConnection.HTTP_FORBIDDEN,
- conn.getResponseCode());
-
- url = new URL(TestJettyHelper.getJettyURL(),
- "/webhdfs/v1/?op=GETTRASHROOT");
- conn = (HttpURLConnection) url.openConnection();
- conn.setRequestProperty("Cookie",
- AuthenticatedURL.AUTH_COOKIE + "=" + tokenSigned);
- Assert.assertEquals(HttpURLConnection.HTTP_OK,
- conn.getResponseCode());
- }
-
- @Test
- @TestDir
- @TestJetty
- @TestHdfs
- public void instrumentation() throws Exception {
- createHttpFSServer(false, false);
-
- URL url = new URL(TestJettyHelper.getJettyURL(),
- MessageFormat.format("/webhdfs/v1?user.name={0}&op=instrumentation",
- "nobody"));
- HttpURLConnection conn = (HttpURLConnection) url.openConnection();
- Assert.assertEquals(conn.getResponseCode(),
- HttpURLConnection.HTTP_UNAUTHORIZED);
-
- url = new URL(TestJettyHelper.getJettyURL(),
- MessageFormat.format("/webhdfs/v1?user.name={0}&op=instrumentation",
- HadoopUsersConfTestHelper.getHadoopUsers()[0]));
- conn = (HttpURLConnection) url.openConnection();
- Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
- BufferedReader reader = new BufferedReader(
- new InputStreamReader(conn.getInputStream()));
- String line = reader.readLine();
- reader.close();
- Assert.assertTrue(line.contains("\"counters\":{"));
-
- url = new URL(TestJettyHelper.getJettyURL(),
- MessageFormat.format(
- "/webhdfs/v1/foo?user.name={0}&op=instrumentation",
- HadoopUsersConfTestHelper.getHadoopUsers()[0]));
- conn = (HttpURLConnection) url.openConnection();
- Assert.assertEquals(conn.getResponseCode(),
- HttpURLConnection.HTTP_BAD_REQUEST);
- }
-
- @Test
- @TestDir
- @TestJetty
- @TestHdfs
- public void testHdfsAccess() throws Exception {
- createHttpFSServer(false, false);
- long oldOpsListStatus =
- metricsGetter.get("LISTSTATUS").call();
- String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
- URL url = new URL(TestJettyHelper.getJettyURL(),
- MessageFormat.format("/webhdfs/v1/?user.name={0}&op=liststatus",
- user));
- HttpURLConnection conn = (HttpURLConnection) url.openConnection();
- Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
- BufferedReader reader = new BufferedReader(
- new InputStreamReader(conn.getInputStream()));
- reader.readLine();
- reader.close();
- Assert.assertEquals(1 + oldOpsListStatus,
- (long) metricsGetter.get("LISTSTATUS").call());
- }
-
- @Test
- @TestDir
- @TestJetty
- @TestHdfs
- public void testMkdirs() throws Exception {
- createHttpFSServer(false, false);
- long oldMkdirOpsStat =
- metricsGetter.get("MKDIRS").call();
- String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
- URL url = new URL(TestJettyHelper.getJettyURL(), MessageFormat.format(
- "/webhdfs/v1/tmp/sub-tmp?user.name={0}&op=MKDIRS", user));
- HttpURLConnection conn = (HttpURLConnection) url.openConnection();
- conn.setRequestMethod("PUT");
- conn.connect();
- Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
- getStatus("/tmp/sub-tmp", "LISTSTATUS");
- long opsStat =
- metricsGetter.get("MKDIRS").call();
- Assert.assertEquals(1 + oldMkdirOpsStat, opsStat);
- }
-
- @Test
- @TestDir
- @TestJetty
- @TestHdfs
- public void testGlobFilter() throws Exception {
- createHttpFSServer(false, false);
- long oldOpsListStatus =
- metricsGetter.get("LISTSTATUS").call();
- FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
- fs.mkdirs(new Path("/tmp"));
- fs.create(new Path("/tmp/foo.txt")).close();
-
- String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
- URL url = new URL(TestJettyHelper.getJettyURL(),
- MessageFormat.format(
- "/webhdfs/v1/tmp?user.name={0}&op=liststatus&filter=f*", user));
- HttpURLConnection conn = (HttpURLConnection) url.openConnection();
- Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
- BufferedReader reader = new BufferedReader(
- new InputStreamReader(conn.getInputStream()));
- reader.readLine();
- reader.close();
- Assert.assertEquals(1 + oldOpsListStatus,
- (long) metricsGetter.get("LISTSTATUS").call());
- }
-
- /**
- * Talks to the http interface to create a file.
- *
- * @param filename The file to create
- * @param perms The permission field, if any (may be null)
- * @throws Exception
- */
- private void createWithHttp(String filename, String perms) throws Exception {
- createWithHttp(filename, perms, null);
- }
-
- /**
- * Talks to the http interface to create a file.
- *
- * @param filename The file to create
- * @param perms The permission field, if any (may be null)
- * @param unmaskedPerms The unmaskedPermission field, if any (may be null)
- * @throws Exception
- */
- private void createWithHttp(String filename, String perms,
- String unmaskedPerms) throws Exception {
- String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
- // Remove leading / from filename
- if (filename.charAt(0) == '/') {
- filename = filename.substring(1);
- }
- String pathOps;
- if (perms == null) {
- pathOps = MessageFormat.format(
- "/webhdfs/v1/{0}?user.name={1}&op=CREATE",
- filename, user);
- } else {
- pathOps = MessageFormat.format(
- "/webhdfs/v1/{0}?user.name={1}&permission={2}&op=CREATE",
- filename, user, perms);
- }
- if (unmaskedPerms != null) {
- pathOps = pathOps+"&unmaskedpermission="+unmaskedPerms;
- }
- URL url = new URL(TestJettyHelper.getJettyURL(), pathOps);
- HttpURLConnection conn = (HttpURLConnection) url.openConnection();
- conn.addRequestProperty("Content-Type", "application/octet-stream");
- conn.setRequestMethod("PUT");
- conn.connect();
- Assert.assertEquals(HttpURLConnection.HTTP_CREATED, conn.getResponseCode());
- }
-
- /**
- * Talks to the http interface to create a directory.
- *
- * @param dirname The directory to create
- * @param perms The permission field, if any (may be null)
- * @param unmaskedPerms The unmaskedPermission field, if any (may be null)
- * @throws Exception
- */
- private void createDirWithHttp(String dirname, String perms,
- String unmaskedPerms) throws Exception {
- // get the createDirMetrics
- long oldOpsMkdir =
- metricsGetter.get("MKDIRS").call();
- String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
- // Remove leading / from filename
- if (dirname.charAt(0) == '/') {
- dirname = dirname.substring(1);
- }
- String pathOps;
- if (perms == null) {
- pathOps = MessageFormat.format(
- "/webhdfs/v1/{0}?user.name={1}&op=MKDIRS",
- dirname, user);
- } else {
- pathOps = MessageFormat.format(
- "/webhdfs/v1/{0}?user.name={1}&permission={2}&op=MKDIRS",
- dirname, user, perms);
- }
- if (unmaskedPerms != null) {
- pathOps = pathOps+"&unmaskedpermission="+unmaskedPerms;
- }
- URL url = new URL(TestJettyHelper.getJettyURL(), pathOps);
- HttpURLConnection conn = (HttpURLConnection) url.openConnection();
- conn.setRequestMethod("PUT");
- conn.connect();
- Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
- Assert.assertEquals(1 + oldOpsMkdir,
- (long) metricsGetter.get("MKDIRS").call());
- }
-
- /**
- * Talks to the http interface to get the json output of a *STATUS command
- * on the given file.
- *
- * @param filename The file to query.
- * @param command Either GETFILESTATUS, LISTSTATUS, or ACLSTATUS
- * @return A string containing the JSON output describing the file.
- * @throws Exception
- */
- private String getStatus(String filename, String command)
- throws Exception {
- long oldOpsStat =
- metricsGetter.getOrDefault(command, defaultEntryMetricGetter).call();
- String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
- // Remove leading / from filename
- if (filename.charAt(0) == '/') {
- filename = filename.substring(1);
- }
- String pathOps = MessageFormat.format(
- "/webhdfs/v1/{0}?user.name={1}&op={2}",
- filename, user, command);
- URL url = new URL(TestJettyHelper.getJettyURL(), pathOps);
- HttpURLConnection conn = (HttpURLConnection) url.openConnection();
- conn.connect();
- Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
-
- BufferedReader reader =
- new BufferedReader(new InputStreamReader(conn.getInputStream()));
- long opsStat =
- metricsGetter.getOrDefault(command, defaultExitMetricGetter).call();
- Assert.assertEquals(oldOpsStat + 1L, opsStat);
- return reader.readLine();
- }
-
- /**
- * General-purpose http PUT command to the httpfs server.
- * @param filename The file to operate upon
- * @param command The command to perform (SETACL, etc)
- * @param params Parameters, like "aclspec=..."
- */
- private void putCmd(String filename, String command,
- String params) throws Exception {
- Assert.assertEquals(HttpURLConnection.HTTP_OK,
- putCmdWithReturn(filename, command, params).getResponseCode());
- }
-
- /**
- * General-purpose http PUT command to the httpfs server,
- * which returns relted HttpURLConnection instance.
- * @param filename The file to operate upon
- * @param command The command to perform (SETACL, etc)
- * @param params Parameters, like "aclspec=..."
- * @return HttpURLConnection the HttpURLConnection instance for the given PUT
- */
- private HttpURLConnection putCmdWithReturn(String filename, String command,
- String params) throws Exception {
- String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
- // Remove leading / from filename
- if (filename.charAt(0) == '/') {
- filename = filename.substring(1);
- }
- String pathOps = MessageFormat.format(
- "/webhdfs/v1/{0}?user.name={1}{2}{3}&op={4}",
- filename, user, (params == null) ? "" : "&",
- (params == null) ? "" : params, command);
- URL url = new URL(TestJettyHelper.getJettyURL(), pathOps);
- HttpURLConnection conn = (HttpURLConnection) url.openConnection();
- conn.setRequestMethod("PUT");
- conn.connect();
- return conn;
- }
-
- /**
- * Given the JSON output from the GETFILESTATUS call, return the
- * 'permission' value.
- *
- * @param statusJson JSON from GETFILESTATUS
- * @return The value of 'permission' in statusJson
- * @throws Exception
- */
- private String getPerms(String statusJson) throws Exception {
- JSONParser parser = new JSONParser();
- JSONObject jsonObject = (JSONObject) parser.parse(statusJson);
- JSONObject details = (JSONObject) jsonObject.get("FileStatus");
- return (String) details.get("permission");
- }
-
- /**
- * Given the JSON output from the GETTRASHPATH call, return the
- * 'path' value.
- *
- * @param statusJson JSON from GETTRASHPATH
- * @return The value of 'path' in statusJson
- * @throws Exception
- */
- private String getPath(String statusJson) throws Exception {
- JSONParser parser = new JSONParser();
- JSONObject details = (JSONObject) parser.parse(statusJson);
- return (String) details.get("Path");
- }
-
- /**
- * Given the JSON output from the GETACLSTATUS call, return the
- * 'entries' value as a List<String>.
- * @param statusJson JSON from GETACLSTATUS
- * @return A List of Strings which are the elements of the ACL entries
- * @throws Exception
- */
- private List<String> getAclEntries(String statusJson) throws Exception {
- List<String> entries = new ArrayList<String>();
- JSONParser parser = new JSONParser();
- JSONObject jsonObject = (JSONObject) parser.parse(statusJson);
- JSONObject details = (JSONObject) jsonObject.get("AclStatus");
- JSONArray jsonEntries = (JSONArray) details.get("entries");
- if (jsonEntries != null) {
- for (Object e : jsonEntries) {
- entries.add(e.toString());
- }
- }
- return entries;
- }
-
- /**
- * Parse xAttrs from JSON result of GETXATTRS call, return xAttrs Map.
- * @param statusJson JSON from GETXATTRS
- * @return Map<String, byte[]> xAttrs Map
- * @throws Exception
- */
- private Map<String, byte[]> getXAttrs(String statusJson) throws Exception {
- Map<String, byte[]> xAttrs = Maps.newHashMap();
- JSONParser parser = new JSONParser();
- JSONObject jsonObject = (JSONObject) parser.parse(statusJson);
- JSONArray jsonXAttrs = (JSONArray) jsonObject.get("XAttrs");
- if (jsonXAttrs != null) {
- for (Object a : jsonXAttrs) {
- String name = (String) ((JSONObject)a).get("name");
- String value = (String) ((JSONObject)a).get("value");
- xAttrs.put(name, decodeXAttrValue(value));
- }
- }
- return xAttrs;
- }
-
- /** Decode xattr value from string. */
- private byte[] decodeXAttrValue(String value) throws IOException {
- if (value != null) {
- return XAttrCodec.decodeValue(value);
- } else {
- return new byte[0];
- }
- }
-
- /**
- *
- * @param stat AclStatus object from a call to getAclStatus
- * @param name The name of the ACL being searched for
- * @return The AclEntry if found, or null otherwise
- * @throws IOException
- */
- private AclEntry findAclWithName(AclStatus stat, String name)
- throws IOException{
- AclEntry relevantAcl = null;
- Iterator<AclEntry> it = stat.getEntries().iterator();
- while (it.hasNext()) {
- AclEntry e = it.next();
- if (e.getName().equals(name)) {
- relevantAcl = e;
- break;
- }
- }
- return relevantAcl;
- }
-
- /**
- * Validate that files are created with 755 permissions when no
- * 'permissions' attribute is specified, and when 'permissions'
- * is specified, that value is honored.
- */
- @Test
- @TestDir
- @TestJetty
- @TestHdfs
- public void testPerms() throws Exception {
- createHttpFSServer(false, false);
-
- FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
- fs.mkdirs(new Path("/perm"));
-
- createWithHttp("/perm/none", null);
- String statusJson = getStatus("/perm/none", "GETFILESTATUS");
- Assert.assertTrue("755".equals(getPerms(statusJson)));
-
- createWithHttp("/perm/p-777", "777");
- statusJson = getStatus("/perm/p-777", "GETFILESTATUS");
- Assert.assertTrue("777".equals(getPerms(statusJson)));
-
- createWithHttp("/perm/p-654", "654");
- statusJson = getStatus("/perm/p-654", "GETFILESTATUS");
- Assert.assertTrue("654".equals(getPerms(statusJson)));
-
- createWithHttp("/perm/p-321", "321");
- statusJson = getStatus("/perm/p-321", "GETFILESTATUS");
- Assert.assertTrue("321".equals(getPerms(statusJson)));
- }
-
- /**
- * Validate XAttr get/set/remove calls.
- */
- @Test
- @TestDir
- @TestJetty
- @TestHdfs
- public void testXAttrs() throws Exception {
- final String name1 = "user.a1";
- final byte[] value1 = new byte[]{0x31, 0x32, 0x33};
- final String name2 = "user.a2";
- final byte[] value2 = new byte[]{0x41, 0x42, 0x43};
- final String dir = "/xattrTest";
- final String path = dir + "/file";
-
- createHttpFSServer(false, false);
-
- FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
- fs.mkdirs(new Path(dir));
-
- createWithHttp(path, null);
- String statusJson = getStatus(path, "GETXATTRS");
- Map<String, byte[]> xAttrs = getXAttrs(statusJson);
- Assert.assertEquals(0, xAttrs.size());
-
- // Set two xattrs
- putCmd(path, "SETXATTR", setXAttrParam(name1, value1));
- putCmd(path, "SETXATTR", setXAttrParam(name2, value2));
- statusJson = getStatus(path, "GETXATTRS");
- xAttrs = getXAttrs(statusJson);
- Assert.assertEquals(2, xAttrs.size());
- Assert.assertArrayEquals(value1, xAttrs.get(name1));
- Assert.assertArrayEquals(value2, xAttrs.get(name2));
-
- // Remove one xattr
- putCmd(path, "REMOVEXATTR", "xattr.name=" + name1);
- statusJson = getStatus(path, "GETXATTRS");
- xAttrs = getXAttrs(statusJson);
- Assert.assertEquals(1, xAttrs.size());
- Assert.assertArrayEquals(value2, xAttrs.get(name2));
-
- // Remove another xattr, then there is no xattr
- putCmd(path, "REMOVEXATTR", "xattr.name=" + name2);
- statusJson = getStatus(path, "GETXATTRS");
- xAttrs = getXAttrs(statusJson);
- Assert.assertEquals(0, xAttrs.size());
- }
-
- /** Params for setting an xAttr. */
- public static String setXAttrParam(String name, byte[] value)
- throws IOException {
- return "xattr.name=" + name + "&xattr.value=" + XAttrCodec.encodeValue(
- value, XAttrCodec.HEX) + "&encoding=hex&flag=create";
- }
-
- /**
- * Validate the various ACL set/modify/remove calls. General strategy is
- * to verify each of the following steps with GETFILESTATUS, LISTSTATUS,
- * and GETACLSTATUS:
- * <ol>
- * <li>Create a file with no ACLs</li>
- * <li>Add a user + group ACL</li>
- * <li>Add another user ACL</li>
- * <li>Remove the first user ACL</li>
- * <li>Remove all ACLs</li>
- * </ol>
- */
- @Test
- @TestDir
- @TestJetty
- @TestHdfs
- public void testFileAcls() throws Exception {
- final String aclUser1 = "user:foo:rw-";
- final String remAclUser1 = "user:foo:";
- final String aclUser2 = "user:bar:r--";
- final String aclGroup1 = "group::r--";
- final String aclSpec = "aclspec=user::rwx," + aclUser1 + ","
- + aclGroup1 + ",other::---";
- final String modAclSpec = "aclspec=" + aclUser2;
- final String remAclSpec = "aclspec=" + remAclUser1;
- final String dir = "/aclFileTest";
- final String path = dir + "/test";
- String statusJson;
- List<String> aclEntries;
-
- createHttpFSServer(false, false);
-
- FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
- fs.mkdirs(new Path(dir));
-
- createWithHttp(path, null);
-
- /* getfilestatus and liststatus don't have 'aclBit' in their reply */
- statusJson = getStatus(path, "GETFILESTATUS");
- Assert.assertEquals(-1, statusJson.indexOf("aclBit"));
- statusJson = getStatus(dir, "LISTSTATUS");
- Assert.assertEquals(-1, statusJson.indexOf("aclBit"));
-
- /* getaclstatus works and returns no entries */
- statusJson = getStatus(path, "GETACLSTATUS");
- aclEntries = getAclEntries(statusJson);
- Assert.assertTrue(aclEntries.size() == 0);
-
- /*
- * Now set an ACL on the file. (getfile|list)status have aclBit,
- * and aclstatus has entries that looks familiar.
- */
- putCmd(path, "SETACL", aclSpec);
- statusJson = getStatus(path, "GETFILESTATUS");
- Assert.assertNotEquals(-1, statusJson.indexOf("aclBit"));
- statusJson = getStatus(dir, "LISTSTATUS");
- Assert.assertNotEquals(-1, statusJson.indexOf("aclBit"));
- statusJson = getStatus(path, "GETACLSTATUS");
- aclEntries = getAclEntries(statusJson);
- Assert.assertTrue(aclEntries.size() == 2);
- Assert.assertTrue(aclEntries.contains(aclUser1));
- Assert.assertTrue(aclEntries.contains(aclGroup1));
-
- /* Modify acl entries to add another user acl */
- putCmd(path, "MODIFYACLENTRIES", modAclSpec);
- statusJson = getStatus(path, "GETACLSTATUS");
- aclEntries = getAclEntries(statusJson);
- Assert.assertTrue(aclEntries.size() == 3);
- Assert.assertTrue(aclEntries.contains(aclUser1));
- Assert.assertTrue(aclEntries.contains(aclUser2));
- Assert.assertTrue(aclEntries.contains(aclGroup1));
-
- /* Remove the first user acl entry and verify */
- putCmd(path, "REMOVEACLENTRIES", remAclSpec);
- statusJson = getStatus(path, "GETACLSTATUS");
- aclEntries = getAclEntries(statusJson);
- Assert.assertTrue(aclEntries.size() == 2);
- Assert.assertTrue(aclEntries.contains(aclUser2));
- Assert.assertTrue(aclEntries.contains(aclGroup1));
-
- /* Remove all acls and verify */
- putCmd(path, "REMOVEACL", null);
- statusJson = getStatus(path, "GETACLSTATUS");
- aclEntries = getAclEntries(statusJson);
- Assert.assertTrue(aclEntries.size() == 0);
- statusJson = getStatus(path, "GETFILESTATUS");
- Assert.assertEquals(-1, statusJson.indexOf("aclBit"));
- statusJson = getStatus(dir, "LISTSTATUS");
- Assert.assertEquals(-1, statusJson.indexOf("aclBit"));
- }
-
- /**
- * Test ACL operations on a directory, including default ACLs.
- * General strategy is to use GETFILESTATUS and GETACLSTATUS to verify:
- * <ol>
- * <li>Initial status with no ACLs</li>
- * <li>The addition of a default ACL</li>
- * <li>The removal of default ACLs</li>
- * </ol>
- *
- * @throws Exception
- */
- @Test
- @TestDir
- @TestJetty
- @TestHdfs
- public void testDirAcls() throws Exception {
- final String defUser1 = "default:user:glarch:r-x";
- final String defSpec1 = "aclspec=" + defUser1;
- final String dir = "/aclDirTest";
- String statusJson;
- List<String> aclEntries;
-
- createHttpFSServer(false, false);
-
- FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
- fs.mkdirs(new Path(dir));
-
- /* getfilestatus and liststatus don't have 'aclBit' in their reply */
- statusJson = getStatus(dir, "GETFILESTATUS");
- Assert.assertEquals(-1, statusJson.indexOf("aclBit"));
-
- /* No ACLs, either */
- statusJson = getStatus(dir, "GETACLSTATUS");
- aclEntries = getAclEntries(statusJson);
- Assert.assertTrue(aclEntries.size() == 0);
-
- /* Give it a default ACL and verify */
- putCmd(dir, "SETACL", defSpec1);
- statusJson = getStatus(dir, "GETFILESTATUS");
- Assert.assertNotEquals(-1, statusJson.indexOf("aclBit"));
- statusJson = getStatus(dir, "GETACLSTATUS");
- aclEntries = getAclEntries(statusJson);
- Assert.assertTrue(aclEntries.size() == 5);
- /* 4 Entries are default:(user|group|mask|other):perm */
- Assert.assertTrue(aclEntries.contains(defUser1));
-
- /* Remove the default ACL and re-verify */
- putCmd(dir, "REMOVEDEFAULTACL", null);
- statusJson = getStatus(dir, "GETFILESTATUS");
- Assert.assertEquals(-1, statusJson.indexOf("aclBit"));
- statusJson = getStatus(dir, "GETACLSTATUS");
- aclEntries = getAclEntries(statusJson);
- Assert.assertTrue(aclEntries.size() == 0);
- }
-
- @Test
- @TestDir
- @TestJetty
- @TestHdfs
- public void testCustomizedUserAndGroupNames() throws Exception {
- // Start server with default configuration
- Server server = createHttpFSServer(false, false);
- final Configuration conf = HttpFSServerWebApp.get()
- .get(FileSystemAccess.class).getFileSystemConfiguration();
- // Change pattern config
- conf.set(HdfsClientConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY,
- "^[A-Za-z0-9_][A-Za-z0-9._-]*[$]?$");
- conf.set(HdfsClientConfigKeys.DFS_WEBHDFS_ACL_PERMISSION_PATTERN_KEY,
- "^(default:)?(user|group|mask|other):" +
- "[[0-9A-Za-z_][@A-Za-z0-9._-]]*:([rwx-]{3})?(,(default:)?" +
- "(user|group|mask|other):[[0-9A-Za-z_][@A-Za-z0-9._-]]*:" +
- "([rwx-]{3})?)*$");
- // Save configuration to site file
- writeConf(conf, "hdfs-site.xml");
- // Restart the HttpFS server to apply new config
- server.stop();
- server.start();
-
- final String aclUser = "user:123:rw-";
- final String aclGroup = "group:foo@bar:r--";
- final String aclSpec = "aclspec=user::rwx," + aclUser + ",group::rwx," +
- aclGroup + ",other::---";
- final String dir = "/aclFileTestCustom";
- final String path = dir + "/test";
- // Create test dir
- FileSystem fs = FileSystem.get(conf);
- fs.mkdirs(new Path(dir));
- createWithHttp(path, null);
- // Set ACL
- putCmd(path, "SETACL", aclSpec);
- // Verify ACL
- String statusJson = getStatus(path, "GETACLSTATUS");
- List<String> aclEntries = getAclEntries(statusJson);
- Assert.assertTrue(aclEntries.contains(aclUser));
- Assert.assertTrue(aclEntries.contains(aclGroup));
- }
-
- @Test
- @TestDir
- @TestJetty
- @TestHdfs
- public void testOpenOffsetLength() throws Exception {
- createHttpFSServer(false, false);
-
- byte[] array = new byte[]{0, 1, 2, 3};
- FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
- fs.mkdirs(new Path("/tmp"));
- OutputStream os = fs.create(new Path("/tmp/foo"));
- os.write(array);
- os.close();
-
- String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
- URL url = new URL(TestJettyHelper.getJettyURL(),
- MessageFormat.format(
- "/webhdfs/v1/tmp/foo?user.name={0}&op=open&offset=1&length=2",
- user));
- HttpURLConnection conn = (HttpURLConnection) url.openConnection();
- Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
- InputStream is = conn.getInputStream();
- Assert.assertEquals(1, is.read());
- Assert.assertEquals(2, is.read());
- Assert.assertEquals(-1, is.read());
- }
-
- @Test
- @TestDir
- @TestJetty
- @TestHdfs
- public void testCreateFileWithUnmaskedPermissions() throws Exception {
- createHttpFSServer(false, false);
-
- FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
- // Create a folder with a default acl default:user2:rw-
- fs.mkdirs(new Path("/tmp"));
- AclEntry acl = new org.apache.hadoop.fs.permission.AclEntry.Builder()
- .setType(AclEntryType.USER)
- .setScope(AclEntryScope.DEFAULT)
- .setName("user2")
- .setPermission(FsAction.READ_WRITE)
- .build();
- fs.setAcl(new Path("/tmp"), new ArrayList<AclEntry>(Arrays.asList(acl)));
-
- String notUnmaskedFile = "/tmp/notUnmasked";
- String unmaskedFile = "/tmp/unmasked";
-
- // Create a file inside the folder. It should inherit the default acl
- // but the mask should affect the ACL permissions. The mask is controlled
- // by the group permissions, which are 0, and hence the mask will make
- // the effective permission of the inherited ACL be NONE.
- createWithHttp(notUnmaskedFile, "700");
-
- // Pull the relevant ACL from the FS object and check the mask has affected
- // its permissions.
- AclStatus aclStatus = fs.getAclStatus(new Path(notUnmaskedFile));
- AclEntry theAcl = findAclWithName(aclStatus, "user2");
-
- Assert.assertNotNull(theAcl);
- Assert.assertEquals(FsAction.NONE,
- aclStatus.getEffectivePermission(theAcl));
-
- // Create another file, this time pass a mask of 777. Now the inherited
- // permissions should be as expected
- createWithHttp(unmaskedFile, "700", "777");
-
- aclStatus = fs.getAclStatus(new Path(unmaskedFile));
- theAcl = findAclWithName(aclStatus, "user2");
-
- Assert.assertNotNull(theAcl);
- Assert.assertEquals(FsAction.READ_WRITE,
- aclStatus.getEffectivePermission(theAcl));
- }
-
- @Test
- @TestDir
- @TestJetty
- @TestHdfs
- public void testMkdirWithUnmaskedPermissions() throws Exception {
- createHttpFSServer(false, false);
-
- FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
- // Create a folder with a default acl default:user2:rw-
- fs.mkdirs(new Path("/tmp"));
- AclEntry acl = new org.apache.hadoop.fs.permission.AclEntry.Builder()
- .setType(AclEntryType.USER)
- .setScope(AclEntryScope.DEFAULT)
- .setName("user2")
- .setPermission(FsAction.READ_WRITE)
- .build();
- fs.setAcl(new Path("/tmp"), new ArrayList<AclEntry>(Arrays.asList(acl)));
-
- String notUnmaskedDir = "/tmp/notUnmaskedDir";
- String unmaskedDir = "/tmp/unmaskedDir";
-
- // Create a file inside the folder. It should inherit the default acl
- // but the mask should affect the ACL permissions. The mask is controlled
- // by the group permissions, which are 0, and hence the mask will make
- // the effective permission of the inherited ACL be NONE.
- createDirWithHttp(notUnmaskedDir, "700", null);
-
- // Pull the relevant ACL from the FS object and check the mask has affected
- // its permissions.
- AclStatus aclStatus = fs.getAclStatus(new Path(notUnmaskedDir));
- AclEntry theAcl = findAclWithName(aclStatus, "user2");
-
- Assert.assertNotNull(theAcl);
- Assert.assertEquals(FsAction.NONE,
- aclStatus.getEffectivePermission(theAcl));
-
- // Create another file, this time pass a mask of 777. Now the inherited
- // permissions should be as expected
- createDirWithHttp(unmaskedDir, "700", "777");
-
- aclStatus = fs.getAclStatus(new Path(unmaskedDir));
- theAcl = findAclWithName(aclStatus, "user2");
-
- Assert.assertNotNull(theAcl);
- Assert.assertEquals(FsAction.READ_WRITE,
- aclStatus.getEffectivePermission(theAcl));
- }
-
- @Test
- @TestDir
- @TestJetty
- @TestHdfs
- public void testPutNoOperation() throws Exception {
- createHttpFSServer(false, false);
-
- String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
- URL url = new URL(TestJettyHelper.getJettyURL(),
- MessageFormat.format("/webhdfs/v1/foo?user.name={0}", user));
- HttpURLConnection conn = (HttpURLConnection) url.openConnection();
- conn.setDoInput(true);
- conn.setDoOutput(true);
- conn.setRequestMethod("PUT");
- Assert.assertEquals(conn.getResponseCode(),
- HttpURLConnection.HTTP_BAD_REQUEST);
- }
-
- @Test
- @TestDir
- @TestJetty
- @TestHdfs
- public void testGetTrashRoot() throws Exception {
- String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
- createHttpFSServer(false, false);
- String trashJson = getStatus("/", "GETTRASHROOT");
- String trashPath = getPath(trashJson);
-
- Path expectedPath = new Path(FileSystem.USER_HOME_PREFIX,
- new Path(user, FileSystem.TRASH_PREFIX));
- Assert.assertEquals(expectedPath.toUri().getPath(), trashPath);
-
- byte[] array = new byte[]{0, 1, 2, 3};
- FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
- fs.mkdirs(new Path("/tmp"));
- OutputStream os = fs.create(new Path("/tmp/foo"));
- os.write(array);
- os.close();
-
- trashJson = getStatus("/tmp/foo", "GETTRASHROOT");
- trashPath = getPath(trashJson);
- Assert.assertEquals(expectedPath.toUri().getPath(), trashPath);
-
- //TestHdfsHelp has already set up EZ environment
- final Path ezFile = TestHdfsHelper.ENCRYPTED_FILE;
- final Path ezPath = TestHdfsHelper.ENCRYPTION_ZONE;
- trashJson = getStatus(ezFile.toUri().getPath(), "GETTRASHROOT");
- trashPath = getPath(trashJson);
- expectedPath = new Path(ezPath, new Path(FileSystem.TRASH_PREFIX, user));
- Assert.assertEquals(expectedPath.toUri().getPath(), trashPath);
- }
-
- @Test
- @TestDir
- @TestJetty
- @TestHdfs
- public void testDelegationTokenOperations() throws Exception {
- createHttpFSServer(true, false);
- delegationTokenCommonTests(false);
- }
-
- private HttpURLConnection snapshotTestPreconditions(String httpMethod,
- String snapOperation,
- String additionalParams)
- throws Exception {
- String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
- URL url = new URL(TestJettyHelper.getJettyURL(), MessageFormat.format(
- "/webhdfs/v1/tmp/tmp-snap-test/subdir?user.name={0}&op=MKDIRS",
- user));
- HttpURLConnection conn = (HttpURLConnection) url.openConnection();
- conn.setRequestMethod("PUT");
- conn.connect();
-
- Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
-
- //needed to make the given dir snapshottable
- Path snapshottablePath = new Path("/tmp/tmp-snap-test");
- DistributedFileSystem dfs =
- (DistributedFileSystem) FileSystem.get(snapshottablePath.toUri(),
- TestHdfsHelper.getHdfsConf());
- dfs.allowSnapshot(snapshottablePath);
-
- //Try to create snapshot passing snapshot name
- url = new URL(TestJettyHelper.getJettyURL(), MessageFormat.format(
- "/webhdfs/v1/tmp/tmp-snap-test?user.name={0}&op={1}&{2}", user,
- snapOperation, additionalParams));
- conn = (HttpURLConnection) url.openConnection();
- conn.setRequestMethod(httpMethod);
- conn.connect();
- return conn;
- }
-
- @Test
- @TestDir
- @TestJetty
- @TestHdfs
- public void testAllowSnapshot() throws Exception {
- createHttpFSServer(false, false);
- // Create a test directory
- String pathString = "/tmp/tmp-snap-allow-test";
- createDirWithHttp(pathString, "700", null);
-
- Path path = new Path(pathString);
- DistributedFileSystem dfs = (DistributedFileSystem) FileSystem.get(
- path.toUri(), TestHdfsHelper.getHdfsConf());
- // FileStatus should have snapshot enabled bit unset by default
- Assert.assertFalse(dfs.getFileStatus(path).isSnapshotEnabled());
- // Send a request with ALLOWSNAPSHOT API
- String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
- URL url = new URL(TestJettyHelper.getJettyURL(), MessageFormat.format(
- "/webhdfs/v1{0}?user.name={1}&op=ALLOWSNAPSHOT",
- pathString, user));
- HttpURLConnection conn = (HttpURLConnection) url.openConnection();
- conn.setRequestMethod("PUT");
- conn.connect();
- // Should return HTTP_OK
- Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
- // FileStatus should have snapshot enabled bit set
- Assert.assertTrue(dfs.getFileStatus(path).isSnapshotEnabled());
- // Clean up
- dfs.delete(path, true);
- }
-
- @Test
- @TestDir
- @TestJetty
- @TestHdfs
- public void testDisallowSnapshot() throws Exception {
- createHttpFSServer(false, false);
- // Create a test directory
- String pathString = "/tmp/tmp-snap-disallow-test";
- createDirWithHttp(pathString, "700", null);
-
- Path path = new Path(pathString);
- DistributedFileSystem dfs = (DistributedFileSystem) FileSystem.get(
- path.toUri(), TestHdfsHelper.getHdfsConf());
- // Allow snapshot
- dfs.allowSnapshot(path);
- // FileStatus should have snapshot enabled bit set so far
- Assert.assertTrue(dfs.getFileStatus(path).isSnapshotEnabled());
- // Send a request with DISALLOWSNAPSHOT API
- String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
- URL url = new URL(TestJettyHelper.getJettyURL(), MessageFormat.format(
- "/webhdfs/v1{0}?user.name={1}&op=DISALLOWSNAPSHOT",
- pathString, user));
- HttpURLConnection conn = (HttpURLConnection) url.openConnection();
- conn.setRequestMethod("PUT");
- conn.connect();
- // Should return HTTP_OK
- Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
- // FileStatus should not have snapshot enabled bit set
- Assert.assertFalse(dfs.getFileStatus(path).isSnapshotEnabled());
- // Clean up
- dfs.delete(path, true);
- }
-
- @Test
- @TestDir
- @TestJetty
- @TestHdfs
- public void testDisallowSnapshotException() throws Exception {
- createHttpFSServer(false, false);
- // Create a test directory
- String pathString = "/tmp/tmp-snap-disallow-exception-test";
- createDirWithHttp(pathString, "700", null);
-
- Path path = new Path(pathString);
- DistributedFileSystem dfs = (DistributedFileSystem) FileSystem.get(
- path.toUri(), TestHdfsHelper.getHdfsConf());
- // Allow snapshot
- dfs.allowSnapshot(path);
- // FileStatus should have snapshot enabled bit set so far
- Assert.assertTrue(dfs.getFileStatus(path).isSnapshotEnabled());
- // Create some snapshots
- dfs.createSnapshot(path, "snap-01");
- dfs.createSnapshot(path, "snap-02");
- // Send a request with DISALLOWSNAPSHOT API
- String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
- URL url = new URL(TestJettyHelper.getJettyURL(), MessageFormat.format(
- "/webhdfs/v1{0}?user.name={1}&op=DISALLOWSNAPSHOT",
- pathString, user));
- HttpURLConnection conn = (HttpURLConnection) url.openConnection();
- conn.setRequestMethod("PUT");
- conn.connect();
- // Should not return HTTP_OK
- Assert.assertNotEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
- // FileStatus should still have snapshot enabled bit set
- Assert.assertTrue(dfs.getFileStatus(path).isSnapshotEnabled());
- // Clean up
- dfs.deleteSnapshot(path, "snap-02");
- dfs.deleteSnapshot(path, "snap-01");
- dfs.delete(path, true);
- }
-
- @Test
- @TestDir
- @TestJetty
- @TestHdfs
- public void testCreateSnapshot() throws Exception {
- createHttpFSServer(false, false);
- final HttpURLConnection conn = snapshotTestPreconditions("PUT",
- "CREATESNAPSHOT",
- "snapshotname=snap-with-name");
- Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
- final BufferedReader reader =
- new BufferedReader(new InputStreamReader(conn.getInputStream()));
- String result = reader.readLine();
- //Validates if the content format is correct
- Assert.assertTrue(result.
- equals("{\"Path\":\"/tmp/tmp-snap-test/.snapshot/snap-with-name\"}"));
- //Validates if the snapshot is properly created under .snapshot folder
- result = getStatus("/tmp/tmp-snap-test/.snapshot",
- "LISTSTATUS");
- Assert.assertTrue(result.contains("snap-with-name"));
- }
-
- @Test
- @TestDir
- @TestJetty
- @TestHdfs
- public void testCreateSnapshotNoSnapshotName() throws Exception {
- createHttpFSServer(false, false);
- final HttpURLConnection conn = snapshotTestPreconditions("PUT",
- "CREATESNAPSHOT",
- "");
- Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
- final BufferedReader reader = new BufferedReader(
- new InputStreamReader(conn.getInputStream()));
- String result = reader.readLine();
- //Validates if the content format is correct
- Assert.assertTrue(Pattern.matches(
- "(\\{\\\"Path\\\"\\:\\\"/tmp/tmp-snap-test/.snapshot/s)" +
- "(\\d{8})(-)(\\d{6})(\\.)(\\d{3})(\\\"\\})", result));
- //Validates if the snapshot is properly created under .snapshot folder
- result = getStatus("/tmp/tmp-snap-test/.snapshot",
- "LISTSTATUS");
-
- Assert.assertTrue(Pattern.matches("(.+)(\\\"pathSuffix\\\":\\\"s)" +
- "(\\d{8})(-)(\\d{6})(\\.)(\\d{3})(\\\")(.+)",
- result));
- }
-
- @Test
- @TestDir
- @TestJetty
- @TestHdfs
- public void testRenameSnapshot() throws Exception {
- createHttpFSServer(false, false);
- HttpURLConnection conn = snapshotTestPreconditions("PUT",
- "CREATESNAPSHOT",
- "snapshotname=snap-to-rename");
- Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
- conn = snapshotTestPreconditions("PUT",
- "RENAMESNAPSHOT",
- "oldsnapshotname=snap-to-rename" +
- "&snapshotname=snap-renamed");
- Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
- //Validates the snapshot is properly renamed under .snapshot folder
- String result = getStatus("/tmp/tmp-snap-test/.snapshot",
- "LISTSTATUS");
- Assert.assertTrue(result.contains("snap-renamed"));
- //There should be no snapshot named snap-to-rename now
- Assert.assertFalse(result.contains("snap-to-rename"));
- }
-
- @Test
- @TestDir
- @TestJetty
- @TestHdfs
- public void testDelegationTokenOperationsSsl() throws Exception {
- createHttpFSServer(true, true);
- delegationTokenCommonTests(true);
- }
-
- @Test
- @TestDir
- @TestJetty
- @TestHdfs
- public void testDeleteSnapshot() throws Exception {
- createHttpFSServer(false, false);
- HttpURLConnection conn = snapshotTestPreconditions("PUT",
- "CREATESNAPSHOT",
- "snapshotname=snap-to-delete");
- Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
- conn = snapshotTestPreconditions("DELETE",
- "DELETESNAPSHOT",
- "snapshotname=snap-to-delete");
- Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
- //Validates the snapshot is not under .snapshot folder anymore
- String result = getStatus("/tmp/tmp-snap-test/.snapshot",
- "LISTSTATUS");
- Assert.assertFalse(result.contains("snap-to-delete"));
- }
-
- private HttpURLConnection sendRequestToHttpFSServer(String path, String op,
- String additionalParams) throws Exception {
- String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
- URL url = new URL(TestJettyHelper.getJettyURL(), MessageFormat.format(
- "/webhdfs/v1{0}?user.name={1}&op={2}&{3}",
- path, user, op, additionalParams));
- HttpURLConnection conn = (HttpURLConnection) url.openConnection();
- conn.setRequestMethod("GET");
- conn.connect();
- return conn;
- }
-
- private HttpURLConnection sendRequestGetSnapshotDiff(String path,
- String oldsnapshotname, String snapshotname) throws Exception{
- return sendRequestToHttpFSServer(path, "GETSNAPSHOTDIFF",
- MessageFormat.format("oldsnapshotname={0}&snapshotname={1}",
- oldsnapshotname, snapshotname));
- }
-
- @Test
- @TestDir
- @TestJetty
- @TestHdfs
- public void testGetSnapshotDiff() throws Exception {
- createHttpFSServer(false, false);
- // Create a test directory
- String pathStr = "/tmp/tmp-snap-diff-test";
- createDirWithHttp(pathStr, "700", null);
-
- Path path = new Path(pathStr);
- DistributedFileSystem dfs = (DistributedFileSystem) FileSystem.get(
- path.toUri(), TestHdfsHelper.getHdfsConf());
- // Enable snapshot
- dfs.allowSnapshot(path);
- Assert.assertTrue(dfs.getFileStatus(path).isSnapshotEnabled());
- // Create a file and take a snapshot
- String file1 = pathStr + "/file1";
- createWithHttp(file1, null);
- dfs.createSnapshot(path, "snap1");
- // Create another file and take a snapshot
- String file2 = pathStr + "/file2";
- createWithHttp(file2, null);
- dfs.createSnapshot(path, "snap2");
-
- // Send a request with GETSNAPSHOTDIFF API
- HttpURLConnection conn = sendRequestGetSnapshotDiff(pathStr,
- "snap1", "snap2");
- // Should return HTTP_OK
- Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
- // Verify the response
- BufferedReader reader =
- new BufferedReader(new InputStreamReader(conn.getInputStream()));
- // The response should be a one-line JSON string.
- String result = reader.readLine();
- // Verify the content of diff with DFS API.
- SnapshotDiffReport dfsDiffReport = dfs.getSnapshotDiffReport(path,
- "snap1", "snap2");
- Assert.assertEquals(result, JsonUtil.toJsonString(dfsDiffReport));
- // Clean up
- dfs.deleteSnapshot(path, "snap2");
- dfs.deleteSnapshot(path, "snap1");
- dfs.delete(path, true);
- }
-
- @Test
- @TestDir
- @TestJetty
- @TestHdfs
- public void testGetSnapshotDiffIllegalParam() throws Exception {
- createHttpFSServer(false, false);
- // Create a test directory
- String pathStr = "/tmp/tmp-snap-diff-exc-test";
- createDirWithHttp(pathStr, "700", null);
-
- Path path = new Path(pathStr);
- DistributedFileSystem dfs = (DistributedFileSystem) FileSystem.get(
- path.toUri(), TestHdfsHelper.getHdfsConf());
- // Enable snapshot
- dfs.allowSnapshot(path);
- Assert.assertTrue(dfs.getFileStatus(path).isSnapshotEnabled());
- // Send requests with GETSNAPSHOTDIFF API
- // Snapshots snap1 and snap2 are not created, expect failures but not NPE
- HttpURLConnection conn = sendRequestGetSnapshotDiff(pathStr, "", "");
- Assert.assertNotEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
- sendRequestGetSnapshotDiff(pathStr, "snap1", "");
- Assert.assertNotEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
- sendRequestGetSnapshotDiff(pathStr, "", "snap2");
- Assert.assertNotEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
- sendRequestGetSnapshotDiff(pathStr, "snap1", "snap2");
- Assert.assertNotEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
- // Clean up
- dfs.delete(path, true);
- }
-
- private void verifyGetSnapshottableDirectoryList(DistributedFileSystem dfs)
- throws Exception {
- // Send a request
- HttpURLConnection conn = sendRequestToHttpFSServer("/",
- "GETSNAPSHOTTABLEDIRECTORYLIST", "");
- // Should return HTTP_OK
- Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
- // Verify the response
- BufferedReader reader =
- new BufferedReader(new InputStreamReader(conn.getInputStream()));
- // The response should be a one-line JSON string.
- String dirLst = reader.readLine();
- // Verify the content of diff with DFS API.
- SnapshottableDirectoryStatus[] dfsDirLst = dfs.getSnapshottableDirListing();
- Assert.assertEquals(dirLst, JsonUtil.toJsonString(dfsDirLst));
- }
-
- @Test
- @TestDir
- @TestJetty
- @TestHdfs
- public void testGetSnapshottableDirectoryList() throws Exception {
- createHttpFSServer(false, false);
- // Create test directories
- String pathStr1 = "/tmp/tmp-snap-dirlist-test-1";
- createDirWithHttp(pathStr1, "700", null);
- Path path1 = new Path(pathStr1);
- String pathStr2 = "/tmp/tmp-snap-dirlist-test-2";
- createDirWithHttp(pathStr2, "700", null);
- Path path2 = new Path(pathStr2);
- DistributedFileSystem dfs = (DistributedFileSystem) FileSystem.get(
- path1.toUri(), TestHdfsHelper.getHdfsConf());
- // Verify response when there is no snapshottable directory
- verifyGetSnapshottableDirectoryList(dfs);
- // Enable snapshot for path1
- dfs.allowSnapshot(path1);
- Assert.assertTrue(dfs.getFileStatus(path1).isSnapshotEnabled());
- // Verify response when there is one snapshottable directory
- verifyGetSnapshottableDirectoryList(dfs);
- // Enable snapshot for path2
- dfs.allowSnapshot(path2);
- Assert.assertTrue(dfs.getFileStatus(path2).isSnapshotEnabled());
- // Verify response when there are two snapshottable directories
- verifyGetSnapshottableDirectoryList(dfs);
-
- // Clean up and verify
- dfs.delete(path2, true);
- verifyGetSnapshottableDirectoryList(dfs);
- dfs.delete(path1, true);
- verifyGetSnapshottableDirectoryList(dfs);
- }
-
- @Test
- @TestDir
- @TestJetty
- @TestHdfs
- public void testNoRedirect() throws Exception {
- createHttpFSServer(false, false);
-
- final String testContent = "Test content";
- final String path = "/testfile.txt";
- final String username = HadoopUsersConfTestHelper.getHadoopUsers()[0];
-
-
- // Trigger the creation of the file which shouldn't redirect
- URL url = new URL(TestJettyHelper.getJettyURL(), MessageFormat.format(
- "/webhdfs/v1{0}?user.name={1}&op=CREATE&noredirect=true",
- path, username));
- HttpURLConnection conn = (HttpURLConnection) url.openConnection();
- conn.setRequestMethod(HttpMethod.PUT);
- conn.connect();
- // Verify that it returned the final write location
- Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
- JSONObject json = (JSONObject)new JSONParser().parse(
- new InputStreamReader(conn.getInputStream()));
- String location = (String)json.get("Location");
- Assert.assertTrue(location.contains(DataParam.NAME));
- Assert.assertFalse(location.contains(NoRedirectParam.NAME));
- Assert.assertTrue(location.contains("CREATE"));
- Assert.assertTrue("Wrong location: " + location,
- location.startsWith(TestJettyHelper.getJettyURL().toString()));
-
- // Use the location to actually write the file
- url = new URL(location);
- conn = (HttpURLConnection) url.openConnection();
- conn.setRequestMethod(HttpMethod.PUT);
- conn.setRequestProperty(
- "Content-Type", MediaType.APPLICATION_OCTET_STREAM);
- conn.setDoOutput(true);
- conn.connect();
- OutputStream os = conn.getOutputStream();
- os.write(testContent.getBytes());
- os.close();
- // Verify that it created the file and returned the location
- Assert.assertEquals(
- HttpURLConnection.HTTP_CREATED, conn.getResponseCode());
- json = (JSONObject)new JSONParser().parse(
- new InputStreamReader(conn.getInputStream()));
- location = (String)json.get("Location");
- Assert.assertEquals(
- TestJettyHelper.getJettyURL() + "/webhdfs/v1" + path, location);
-
-
- // Read the file which shouldn't redirect
- url = new URL(TestJettyHelper.getJettyURL(), MessageFormat.format(
- "/webhdfs/v1{0}?user.name={1}&op=OPEN&noredirect=true",
- path, username));
- conn = (HttpURLConnection) url.openConnection();
- conn.setRequestMethod(HttpMethod.GET);
- conn.connect();
- // Verify that we got the final location to read from
- Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
- json = (JSONObject)new JSONParser().parse(
- new InputStreamReader(conn.getInputStream()));
- location = (String)json.get("Location");
- Assert.assertTrue(!location.contains(NoRedirectParam.NAME));
- Assert.assertTrue(location.contains("OPEN"));
- Assert.assertTrue("Wrong location: " + location,
- location.startsWith(TestJettyHelper.getJettyURL().toString()));
-
- // Use the location to actually read
- url = new URL(location);
- conn = (HttpURLConnection) url.openConnection();
- conn.setRequestMethod(HttpMethod.GET);
- conn.connect();
- // Verify that we read what we wrote
- Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
- String content = IOUtils.toString(
- conn.getInputStream(), Charset.defaultCharset());
- Assert.assertEquals(testContent, content);
-
-
- // Get the checksum of the file which shouldn't redirect
- url = new URL(TestJettyHelper.getJettyURL(), MessageFormat.format(
- "/webhdfs/v1{0}?user.name={1}&op=GETFILECHECKSUM&noredirect=true",
- path, username));
- conn = (HttpURLConnection) url.openConnection();
- conn.setRequestMethod(HttpMethod.GET);
- conn.connect();
- // Verify that we got the final location to write to
- Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
- json = (JSONObject)new JSONParser().parse(
- new InputStreamReader(conn.getInputStream()));
- location = (String)json.get("Location");
- Assert.assertTrue(!location.contains(NoRedirectParam.NAME));
- Assert.assertTrue(location.contains("GETFILECHECKSUM"));
- Assert.assertTrue("Wrong location: " + location,
- location.startsWith(TestJettyHelper.getJettyURL().toString()));
-
- // Use the location to actually get the checksum
- url = new URL(location);
- conn = (HttpURLConnection) url.openConnection();
- conn.setRequestMethod(HttpMethod.GET);
- conn.connect();
- // Verify that we read what we wrote
- Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
- json = (JSONObject)new JSONParser().parse(
- new InputStreamReader(conn.getInputStream()));
- JSONObject checksum = (JSONObject)json.get("FileChecksum");
- Assert.assertEquals(
- "0000020000000000000000001b9c0a445fed3c0bf1e1aa7438d96b1500000000",
- checksum.get("bytes"));
- Assert.assertEquals(28L, checksum.get("length"));
- Assert.assertEquals("MD5-of-0MD5-of-512CRC32C", checksum.get("algorithm"));
- }
-
- private void verifyGetServerDefaults(DistributedFileSystem dfs)
- throws Exception {
- // Send a request
- HttpURLConnection conn =
- sendRequestToHttpFSServer("/", "GETSERVERDEFAULTS", "");
- // Should return HTTP_OK
- Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
- // Verify the response
- BufferedReader reader =
- new BufferedReader(new InputStreamReader(conn.getInputStream()));
- // The response should be a one-line JSON string.
- String dirLst = reader.readLine();
- FsServerDefaults dfsDirLst = dfs.getServerDefaults();
- Assert.assertNotNull(dfsDirLst);
- Assert.assertEquals(dirLst, JsonUtil.toJsonString(dfsDirLst));
- }
-
- @Test
- @TestDir
- @TestJetty
- @TestHdfs
- public void testGetServerDefaults() throws Exception {
- createHttpFSServer(false, false);
- String pathStr1 = "/";
- Path path1 = new Path(pathStr1);
- DistributedFileSystem dfs = (DistributedFileSystem) FileSystem
- .get(path1.toUri(), TestHdfsHelper.getHdfsConf());
- verifyGetServerDefaults(dfs);
- }
-
- @Test
- @TestDir
- @TestJetty
- @TestHdfs
- public void testAccess() throws Exception {
- createHttpFSServer(false, false);
- final String dir = "/xattrTest";
- Path path1 = new Path(dir);
-
- DistributedFileSystem dfs = (DistributedFileSystem) FileSystem
- .get(path1.toUri(), TestHdfsHelper.getHdfsConf());
- dfs.mkdirs(new Path(dir));
-
- HttpURLConnection conn =
- sendRequestToHttpFSServer(dir, "CHECKACCESS", "fsaction=r--");
- Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
- HttpURLConnection conn1 =
- sendRequestToHttpFSServer(dir, "CHECKACCESS", "fsaction=-w-");
- Assert.assertEquals(HttpURLConnection.HTTP_OK, conn1.getResponseCode());
- }
-
- @Test
- @TestDir
- @TestJetty
- @TestHdfs
- public void testECPolicy() throws Exception {
- createHttpFSServer(false, false);
- final ErasureCodingPolicy ecPolicy = SystemErasureCodingPolicies
- .getByID(SystemErasureCodingPolicies.RS_3_2_POLICY_ID);
- final String ecPolicyName = ecPolicy.getName();
- // Create an EC dir and write a test file in it
- final Path ecDir = new Path("/ec");
-
- DistributedFileSystem dfs = (DistributedFileSystem) FileSystem
- .get(ecDir.toUri(), TestHdfsHelper.getHdfsConf());
- Path ecFile = new Path(ecDir, "ec_file.txt");
- dfs.mkdirs(ecDir);
- dfs.enableErasureCodingPolicy(ecPolicyName);
- dfs.setErasureCodingPolicy(ecDir, ecPolicyName);
- // Create a EC file
- DFSTestUtil.createFile(dfs, ecFile, 1024, (short) 1, 0);
-
- // Verify that ecPolicy is set in getFileStatus response for ecFile
- String getFileStatusResponse =
- getStatus(ecFile.toString(), "GETFILESTATUS");
- JSONParser parser = new JSONParser();
- JSONObject jsonObject = (JSONObject) parser.parse(getFileStatusResponse);
- JSONObject details = (JSONObject) jsonObject.get("FileStatus");
- String ecpolicyForECfile = (String) details.get("ecPolicy");
- assertEquals("EC policy for ecFile should match the set EC policy",
- ecpolicyForECfile, ecPolicyName);
-
- // Verify httpFs getFileStatus with WEBHDFS REST API
- WebHdfsFileSystem httpfsWebHdfs = (WebHdfsFileSystem) FileSystem.get(
- new URI("webhdfs://"
- + TestJettyHelper.getJettyURL().toURI().getAuthority()),
- TestHdfsHelper.getHdfsConf());
- HdfsFileStatus httpfsFileStatus =
- (HdfsFileStatus) httpfsWebHdfs.getFileStatus(ecFile);
- assertNotNull(httpfsFileStatus.getErasureCodingPolicy());
- }
-
- @Test
- @TestDir
- @TestJetty
- @TestHdfs
- public void testErasureCodingPolicy() throws Exception {
- createHttpFSServer(false, false);
- final String dir = "/ecPolicy";
- Path path1 = new Path(dir);
- final ErasureCodingPolicy ecPolicy = SystemErasureCodingPolicies
- .getByID(SystemErasureCodingPolicies.RS_3_2_POLICY_ID);
- final String ecPolicyName = ecPolicy.getName();
- DistributedFileSystem dfs = (DistributedFileSystem) FileSystem
- .get(path1.toUri(), TestHdfsHelper.getHdfsConf());
- dfs.mkdirs(new Path(dir));
- dfs.enableErasureCodingPolicy(ecPolicyName);
-
- HttpURLConnection conn =
- putCmdWithReturn(dir, "SETECPOLICY", "ecpolicy=" + ecPolicyName);
- Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
-
- HttpURLConnection conn1 = sendRequestToHttpFSServer(dir, "GETECPOLICY", "");
- // Should return HTTP_OK
- Assert.assertEquals(conn1.getResponseCode(), HttpURLConnection.HTTP_OK);
- // Verify the response
- BufferedReader reader =
- new BufferedReader(new InputStreamReader(conn1.getInputStream()));
- // The response should be a one-line JSON string.
- String dirLst = reader.readLine();
- ErasureCodingPolicy dfsDirLst = dfs.getErasureCodingPolicy(path1);
- Assert.assertNotNull(dfsDirLst);
- Assert.assertEquals(dirLst, JsonUtil.toJsonString(dfsDirLst));
-
- String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
- URL url = new URL(TestJettyHelper.getJettyURL(),
- MessageFormat.format("/webhdfs/v1{0}?user.name={1}&op={2}&{3}", dir,
- user, "UNSETECPOLICY", ""));
- HttpURLConnection conn2 = (HttpURLConnection) url.openConnection();
- conn2.setRequestMethod("POST");
- conn2.connect();
- Assert.assertEquals(HttpURLConnection.HTTP_OK, conn2.getResponseCode());
-
- // response should be null
- dfsDirLst = dfs.getErasureCodingPolicy(path1);
- Assert.assertNull(dfsDirLst);
-
- // test put opeartion with path as "/"
- final String dir1 = "/";
- HttpURLConnection conn3 =
- putCmdWithReturn(dir1, "SETECPOLICY", "ecpolicy=" + ecPolicyName);
- // Should return HTTP_OK
- Assert.assertEquals(HttpURLConnection.HTTP_OK, conn3.getResponseCode());
-
- // test post operation with path as "/"
- final String dir2 = "/";
- URL url1 = new URL(TestJettyHelper.getJettyURL(),
- MessageFormat.format("/webhdfs/v1{0}?user.name={1}&op={2}&{3}", dir2,
- user, "UNSETECPOLICY", ""));
- HttpURLConnection conn4 = (HttpURLConnection) url1.openConnection();
- conn4.setRequestMethod("POST");
- conn4.connect();
- Assert.assertEquals(HttpURLConnection.HTTP_OK, conn4.getResponseCode());
- }
-
- @Test
- @TestDir
- @TestJetty
- @TestHdfs
- public void testStoragePolicySatisfier() throws Exception {
- createHttpFSServer(false, false);
- final String dir = "/parent";
- Path path1 = new Path(dir);
- String file = "/parent/file";
- Path filePath = new Path(file);
- DistributedFileSystem dfs = (DistributedFileSystem) FileSystem
- .get(path1.toUri(), TestHdfsHelper.getHdfsConf());
- dfs.mkdirs(path1);
- dfs.create(filePath).close();
- dfs.setStoragePolicy(filePath, HdfsConstants.COLD_STORAGE_POLICY_NAME);
- BlockStoragePolicy storagePolicy =
- (BlockStoragePolicy) dfs.getStoragePolicy(filePath);
- assertEquals(HdfsConstants.COLD_STORAGE_POLICY_NAME,
- storagePolicy.getName());
- HttpURLConnection conn = putCmdWithReturn(dir, "SATISFYSTORAGEPOLICY", "");
- Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
- Map<String, byte[]> xAttrs = dfs.getXAttrs(path1);
- assertTrue(
- xAttrs.containsKey(HdfsServerConstants.XATTR_SATISFY_STORAGE_POLICY));
- }
-
- @Test
- @TestDir
- @TestJetty
- @TestHdfs
- public void testNoRedirectWithData() throws Exception {
- createHttpFSServer(false, false);
-
- final String path = "/file";
- final String username = HadoopUsersConfTestHelper.getHadoopUsers()[0];
- // file creation which should not redirect
- URL url = new URL(TestJettyHelper.getJettyURL(),
- MessageFormat.format(
- "/webhdfs/v1{0}?user.name={1}&op=CREATE&data=true&noredirect=true",
- path, username));
- HttpURLConnection conn = (HttpURLConnection) url.openConnection();
- conn.setRequestMethod(HttpMethod.PUT);
- conn.setRequestProperty("Content-Type", MediaType.APPLICATION_OCTET_STREAM);
- conn.setDoOutput(true);
- conn.connect();
- Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
- JSONObject json = (JSONObject) new JSONParser()
- .parse(new InputStreamReader(conn.getInputStream()));
-
- // get the location to write
- String location = (String) json.get("Location");
- Assert.assertTrue(location.contains(DataParam.NAME));
- Assert.assertTrue(location.contains("CREATE"));
- url = new URL(location);
- conn = (HttpURLConnection) url.openConnection();
- conn.setRequestMethod(HttpMethod.PUT);
- conn.setRequestProperty("Content-Type", MediaType.APPLICATION_OCTET_STREAM);
- conn.setDoOutput(true);
- conn.connect();
- final String writeStr = "write some content";
- OutputStream os = conn.getOutputStream();
- os.write(writeStr.getBytes());
- os.close();
- // Verify that file got created
- Assert.assertEquals(HttpURLConnection.HTTP_CREATED, conn.getResponseCode());
- json = (JSONObject) new JSONParser()
- .parse(new InputStreamReader(conn.getInputStream()));
- location = (String) json.get("Location");
- Assert.assertEquals(TestJettyHelper.getJettyURL() + "/webhdfs/v1" + path,
- location);
- }
-
- @Test
- @TestDir
- @TestJetty
- @TestHdfs
- public void testContentType() throws Exception {
- createHttpFSServer(false, false);
- FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
- Path dir = new Path("/tmp");
- Path file = new Path(dir, "foo");
- fs.mkdirs(dir);
- fs.create(file);
-
- String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
- URL url = new URL(TestJettyHelper.getJettyURL(), MessageFormat.format(
- "/webhdfs/v1/tmp/foo?user.name={0}&op=open&offset=1&length=2", user));
-
- // test jsonParse with non-json type.
- final HttpURLConnection conn = (HttpURLConnection) url.openConnection();
- conn.setRequestMethod(Operation.OPEN.getMethod());
- conn.connect();
-
- LambdaTestUtils.intercept(IOException.class,
- "Content-Type \"text/html;charset=iso-8859-1\" "
- + "is incompatible with \"application/json\"",
- () -> HttpFSUtils.jsonParse(conn));
- conn.disconnect();
- }
-}
diff --git a/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServerNoACLs.java b/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServerNoACLs.java
deleted file mode 100644
index c679dba..0000000
--- a/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServerNoACLs.java
+++ /dev/null
@@ -1,283 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.fs.http.server;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.test.HTestCase;
-import org.apache.hadoop.test.HadoopUsersConfTestHelper;
-import org.apache.hadoop.test.TestDir;
-import org.apache.hadoop.test.TestDirHelper;
-import org.apache.hadoop.test.TestJetty;
-import org.apache.hadoop.test.TestJettyHelper;
-import org.junit.Assert;
-import org.junit.Test;
-import org.eclipse.jetty.server.Server;
-import org.eclipse.jetty.webapp.WebAppContext;
-
-import java.io.BufferedReader;
-import java.io.File;
-import java.io.FileOutputStream;
-import java.io.FileWriter;
-import java.io.IOException;
-import java.io.InputStreamReader;
-import java.io.OutputStream;
-import java.io.Writer;
-import java.net.HttpURLConnection;
-import java.net.URL;
-import java.text.MessageFormat;
-
-/**
- * This test class ensures that everything works as expected when ACL
- * support is turned off HDFS. This is the default configuration. The other
- * tests operate with ACL support turned on.
- */
-public class TestHttpFSServerNoACLs extends HTestCase {
-
- private MiniDFSCluster miniDfs;
- private Configuration nnConf;
-
- /**
- * Fire up our own hand-rolled MiniDFSCluster. We do this here instead
- * of relying on TestHdfsHelper because we don't want to turn on ACL
- * support.
- *
- * @throws Exception
- */
- private void startMiniDFS() throws Exception {
-
- File testDirRoot = TestDirHelper.getTestDir();
-
- if (System.getProperty("hadoop.log.dir") == null) {
- System.setProperty("hadoop.log.dir",
- new File(testDirRoot, "hadoop-log").getAbsolutePath());
- }
- if (System.getProperty("test.build.data") == null) {
- System.setProperty("test.build.data",
- new File(testDirRoot, "hadoop-data").getAbsolutePath());
- }
-
- Configuration conf = HadoopUsersConfTestHelper.getBaseConf();
- HadoopUsersConfTestHelper.addUserConf(conf);
- conf.set("fs.hdfs.impl.disable.cache", "true");
- conf.set("dfs.block.access.token.enable", "false");
- conf.set("dfs.permissions", "true");
- conf.set("hadoop.security.authentication", "simple");
-
- // Explicitly turn off ACL support
- conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, false);
-
- MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
- builder.numDataNodes(2);
- miniDfs = builder.build();
- nnConf = miniDfs.getConfiguration(0);
- }
-
- /**
- * Create an HttpFS Server to talk to the MiniDFSCluster we created.
- * @throws Exception
- */
- private void createHttpFSServer() throws Exception {
- File homeDir = TestDirHelper.getTestDir();
- Assert.assertTrue(new File(homeDir, "conf").mkdir());
- Assert.assertTrue(new File(homeDir, "log").mkdir());
- Assert.assertTrue(new File(homeDir, "temp").mkdir());
- HttpFSServerWebApp.setHomeDirForCurrentThread(homeDir.getAbsolutePath());
-
- File secretFile = new File(new File(homeDir, "conf"), "secret");
- Writer w = new FileWriter(secretFile);
- w.write("secret");
- w.close();
-
- // HDFS configuration
- File hadoopConfDir = new File(new File(homeDir, "conf"), "hadoop-conf");
- if (!hadoopConfDir.mkdirs()) {
- throw new IOException();
- }
-
- String fsDefaultName =
- nnConf.get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY);
- Configuration conf = new Configuration(false);
- conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, fsDefaultName);
-
- // Explicitly turn off ACLs, just in case the default becomes true later
- conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, false);
-
- File hdfsSite = new File(hadoopConfDir, "hdfs-site.xml");
- OutputStream os = new FileOutputStream(hdfsSite);
- conf.writeXml(os);
- os.close();
-
- // HTTPFS configuration
- conf = new Configuration(false);
- conf.set("httpfs.hadoop.config.dir", hadoopConfDir.toString());
- conf.set("httpfs.proxyuser." +
- HadoopUsersConfTestHelper.getHadoopProxyUser() + ".groups",
- HadoopUsersConfTestHelper.getHadoopProxyUserGroups());
- conf.set("httpfs.proxyuser." +
- HadoopUsersConfTestHelper.getHadoopProxyUser() + ".hosts",
- HadoopUsersConfTestHelper.getHadoopProxyUserHosts());
- conf.set("httpfs.authentication.signature.secret.file",
- secretFile.getAbsolutePath());
-
- File httpfsSite = new File(new File(homeDir, "conf"), "httpfs-site.xml");
- os = new FileOutputStream(httpfsSite);
- conf.writeXml(os);
- os.close();
-
- ClassLoader cl = Thread.currentThread().getContextClassLoader();
- URL url = cl.getResource("webapp");
- if (url == null) {
- throw new IOException();
- }
- WebAppContext context = new WebAppContext(url.getPath(), "/webhdfs");
- Server server = TestJettyHelper.getJettyServer();
- server.setHandler(context);
- server.start();
- }
-
- /**
- * Talks to the http interface to get the json output of a *STATUS command
- * on the given file.
- *
- * @param filename The file to query.
- * @param command Either GETFILESTATUS, LISTSTATUS, or ACLSTATUS
- * @param expectOK Is this operation expected to succeed?
- * @throws Exception
- */
- private void getStatus(String filename, String command, boolean expectOK)
- throws Exception {
- String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
- // Remove leading / from filename
- if (filename.charAt(0) == '/') {
- filename = filename.substring(1);
- }
- String pathOps = MessageFormat.format(
- "/webhdfs/v1/{0}?user.name={1}&op={2}",
- filename, user, command);
- URL url = new URL(TestJettyHelper.getJettyURL(), pathOps);
- HttpURLConnection conn = (HttpURLConnection) url.openConnection();
- conn.connect();
- int resp = conn.getResponseCode();
- BufferedReader reader;
- if (expectOK) {
- Assert.assertEquals(HttpURLConnection.HTTP_OK, resp);
- reader = new BufferedReader(new InputStreamReader(conn.getInputStream()));
- String res = reader.readLine();
- Assert.assertTrue(!res.contains("aclBit"));
- Assert.assertTrue(res.contains("owner")); // basic sanity check
- } else {
- Assert.assertEquals(HttpURLConnection.HTTP_INTERNAL_ERROR, resp);
- reader = new BufferedReader(new InputStreamReader(conn.getErrorStream()));
- String res = reader.readLine();
- Assert.assertTrue(res.contains("AclException"));
- Assert.assertTrue(res.contains("Support for ACLs has been disabled"));
- }
- }
-
- /**
- * General-purpose http PUT command to the httpfs server.
- * @param filename The file to operate upon
- * @param command The command to perform (SETACL, etc)
- * @param params Parameters, like "aclspec=..."
- */
- private void putCmd(String filename, String command,
- String params, boolean expectOK) throws Exception {
- String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
- // Remove leading / from filename
- if (filename.charAt(0) == '/') {
- filename = filename.substring(1);
- }
- String pathOps = MessageFormat.format(
- "/webhdfs/v1/{0}?user.name={1}{2}{3}&op={4}",
- filename, user, (params == null) ? "" : "&",
- (params == null) ? "" : params, command);
- URL url = new URL(TestJettyHelper.getJettyURL(), pathOps);
- HttpURLConnection conn = (HttpURLConnection) url.openConnection();
- conn.setRequestMethod("PUT");
- conn.connect();
- int resp = conn.getResponseCode();
- if (expectOK) {
- Assert.assertEquals(HttpURLConnection.HTTP_OK, resp);
- } else {
- Assert.assertEquals(HttpURLConnection.HTTP_INTERNAL_ERROR, resp);
- BufferedReader reader;
- reader = new BufferedReader(new InputStreamReader(conn.getErrorStream()));
- String err = reader.readLine();
- Assert.assertTrue(err.contains("AclException"));
- Assert.assertTrue(err.contains("Support for ACLs has been disabled"));
- }
- }
-
- /**
- * Test without ACLs.
- * Ensure that
- * <ol>
- * <li>GETFILESTATUS and LISTSTATUS work happily</li>
- * <li>ACLSTATUS throws an exception</li>
- * <li>The ACL SET, REMOVE, etc calls all fail</li>
- * </ol>
- *
- * @throws Exception
- */
- @Test
- @TestDir
- @TestJetty
- public void testWithNoAcls() throws Exception {
- final String aclUser1 = "user:foo:rw-";
- final String rmAclUser1 = "user:foo:";
- final String aclUser2 = "user:bar:r--";
- final String aclGroup1 = "group::r--";
- final String aclSpec = "aclspec=user::rwx," + aclUser1 + ","
- + aclGroup1 + ",other::---";
- final String modAclSpec = "aclspec=" + aclUser2;
- final String remAclSpec = "aclspec=" + rmAclUser1;
- final String defUser1 = "default:user:glarch:r-x";
- final String defSpec1 = "aclspec=" + defUser1;
- final String dir = "/noACLs";
- final String path = dir + "/foo";
-
- startMiniDFS();
- createHttpFSServer();
-
- FileSystem fs = FileSystem.get(nnConf);
- fs.mkdirs(new Path(dir));
- OutputStream os = fs.create(new Path(path));
- os.write(1);
- os.close();
-
- /* The normal status calls work as expected; GETACLSTATUS fails */
- getStatus(path, "GETFILESTATUS", true);
- getStatus(dir, "LISTSTATUS", true);
- getStatus(path, "GETACLSTATUS", false);
-
- /* All the ACL-based PUT commands fail with ACL exceptions */
- putCmd(path, "SETACL", aclSpec, false);
- putCmd(path, "MODIFYACLENTRIES", modAclSpec, false);
- putCmd(path, "REMOVEACLENTRIES", remAclSpec, false);
- putCmd(path, "REMOVEACL", null, false);
- putCmd(dir, "SETACL", defSpec1, false);
- putCmd(dir, "REMOVEDEFAULTACL", null, false);
-
- miniDfs.shutdown();
- }
-}
diff --git a/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServerNoXAttrs.java b/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServerNoXAttrs.java
deleted file mode 100644
index 270989b..0000000
--- a/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServerNoXAttrs.java
+++ /dev/null
@@ -1,248 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.fs.http.server;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.test.HTestCase;
-import org.apache.hadoop.test.HadoopUsersConfTestHelper;
-import org.apache.hadoop.test.TestDir;
-import org.apache.hadoop.test.TestDirHelper;
-import org.apache.hadoop.test.TestHdfs;
-import org.apache.hadoop.test.TestJetty;
-import org.apache.hadoop.test.TestJettyHelper;
-import org.junit.Assert;
-import org.junit.Test;
-import org.eclipse.jetty.server.Server;
-import org.eclipse.jetty.webapp.WebAppContext;
-
-import java.io.BufferedReader;
-import java.io.File;
-import java.io.FileOutputStream;
-import java.io.FileWriter;
-import java.io.IOException;
-import java.io.InputStreamReader;
-import java.io.OutputStream;
-import java.io.Writer;
-import java.net.HttpURLConnection;
-import java.net.URL;
-import java.text.MessageFormat;
-
-/**
- * This test class ensures that everything works as expected when XAttr
- * support is turned off HDFS. This is the default configuration. The other
- * tests operate with XAttr support turned on.
- */
-public class TestHttpFSServerNoXAttrs extends HTestCase {
-
- private MiniDFSCluster miniDfs;
- private Configuration nnConf;
-
- /**
- * Fire up our own hand-rolled MiniDFSCluster. We do this here instead
- * of relying on TestHdfsHelper because we don't want to turn on XAttr
- * support.
- *
- * @throws Exception
- */
- private void startMiniDFS() throws Exception {
-
- File testDirRoot = TestDirHelper.getTestDir();
-
- if (System.getProperty("hadoop.log.dir") == null) {
- System.setProperty("hadoop.log.dir",
- new File(testDirRoot, "hadoop-log").getAbsolutePath());
- }
- if (System.getProperty("test.build.data") == null) {
- System.setProperty("test.build.data",
- new File(testDirRoot, "hadoop-data").getAbsolutePath());
- }
-
- Configuration conf = HadoopUsersConfTestHelper.getBaseConf();
- HadoopUsersConfTestHelper.addUserConf(conf);
- conf.set("fs.hdfs.impl.disable.cache", "true");
- conf.set("dfs.block.access.token.enable", "false");
- conf.set("dfs.permissions", "true");
- conf.set("hadoop.security.authentication", "simple");
-
- // Explicitly turn off XAttr support
- conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, false);
-
- MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
- builder.numDataNodes(2);
- miniDfs = builder.build();
- nnConf = miniDfs.getConfiguration(0);
- }
-
- /**
- * Create an HttpFS Server to talk to the MiniDFSCluster we created.
- * @throws Exception
- */
- private void createHttpFSServer() throws Exception {
- File homeDir = TestDirHelper.getTestDir();
- Assert.assertTrue(new File(homeDir, "conf").mkdir());
- Assert.assertTrue(new File(homeDir, "log").mkdir());
- Assert.assertTrue(new File(homeDir, "temp").mkdir());
- HttpFSServerWebApp.setHomeDirForCurrentThread(homeDir.getAbsolutePath());
-
- File secretFile = new File(new File(homeDir, "conf"), "secret");
- Writer w = new FileWriter(secretFile);
- w.write("secret");
- w.close();
-
- // HDFS configuration
- File hadoopConfDir = new File(new File(homeDir, "conf"), "hadoop-conf");
- if (!hadoopConfDir.mkdirs()) {
- throw new IOException();
- }
-
- String fsDefaultName =
- nnConf.get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY);
- Configuration conf = new Configuration(false);
- conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, fsDefaultName);
-
- // Explicitly turn off XAttr support
- conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, false);
-
- File hdfsSite = new File(hadoopConfDir, "hdfs-site.xml");
- OutputStream os = new FileOutputStream(hdfsSite);
- conf.writeXml(os);
- os.close();
-
- // HTTPFS configuration
- conf = new Configuration(false);
- conf.set("httpfs.hadoop.config.dir", hadoopConfDir.toString());
- conf.set("httpfs.proxyuser." +
- HadoopUsersConfTestHelper.getHadoopProxyUser() + ".groups",
- HadoopUsersConfTestHelper.getHadoopProxyUserGroups());
- conf.set("httpfs.proxyuser." +
- HadoopUsersConfTestHelper.getHadoopProxyUser() + ".hosts",
- HadoopUsersConfTestHelper.getHadoopProxyUserHosts());
- conf.set("httpfs.authentication.signature.secret.file",
- secretFile.getAbsolutePath());
-
- File httpfsSite = new File(new File(homeDir, "conf"), "httpfs-site.xml");
- os = new FileOutputStream(httpfsSite);
- conf.writeXml(os);
- os.close();
-
- ClassLoader cl = Thread.currentThread().getContextClassLoader();
- URL url = cl.getResource("webapp");
- if (url == null) {
- throw new IOException();
- }
- WebAppContext context = new WebAppContext(url.getPath(), "/webhdfs");
- Server server = TestJettyHelper.getJettyServer();
- server.setHandler(context);
- server.start();
- }
-
- /**
- * Talks to the http interface to get the json output of a *STATUS command
- * on the given file.
- *
- * @param filename The file to query.
- * @param command Either GETXATTRS, SETXATTR, or REMOVEXATTR
- * @throws Exception
- */
- private void getStatus(String filename, String command)
- throws Exception {
- String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
- // Remove leading / from filename
- if (filename.charAt(0) == '/') {
- filename = filename.substring(1);
- }
- String pathOps = MessageFormat.format(
- "/webhdfs/v1/{0}?user.name={1}&op={2}",
- filename, user, command);
- URL url = new URL(TestJettyHelper.getJettyURL(), pathOps);
- HttpURLConnection conn = (HttpURLConnection) url.openConnection();
- conn.connect();
- int resp = conn.getResponseCode();
- BufferedReader reader;
- Assert.assertEquals(HttpURLConnection.HTTP_INTERNAL_ERROR, resp);
- reader = new BufferedReader(new InputStreamReader(conn.getErrorStream()));
- String res = reader.readLine();
- Assert.assertTrue(res.contains("RemoteException"));
- Assert.assertTrue(res.contains("XAttr"));
- Assert.assertTrue(res.contains("rejected"));
- }
-
- /**
- * General-purpose http PUT command to the httpfs server.
- * @param filename The file to operate upon
- * @param command The command to perform (SETXATTR, etc)
- * @param params Parameters
- */
- private void putCmd(String filename, String command,
- String params) throws Exception {
- String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
- // Remove leading / from filename
- if (filename.charAt(0) == '/') {
- filename = filename.substring(1);
- }
- String pathOps = MessageFormat.format(
- "/webhdfs/v1/{0}?user.name={1}{2}{3}&op={4}",
- filename, user, (params == null) ? "" : "&",
- (params == null) ? "" : params, command);
- URL url = new URL(TestJettyHelper.getJettyURL(), pathOps);
- HttpURLConnection conn = (HttpURLConnection) url.openConnection();
- conn.setRequestMethod("PUT");
- conn.connect();
- int resp = conn.getResponseCode();
- Assert.assertEquals(HttpURLConnection.HTTP_INTERNAL_ERROR, resp);
- BufferedReader reader;
- reader = new BufferedReader(new InputStreamReader(conn.getErrorStream()));
- String err = reader.readLine();
- Assert.assertTrue(err.contains("RemoteException"));
- Assert.assertTrue(err.contains("XAttr"));
- Assert.assertTrue(err.contains("rejected"));
- }
-
- /**
- * Ensure that GETXATTRS, SETXATTR, REMOVEXATTR fail.
- */
- @Test
- @TestDir
- @TestJetty
- @TestHdfs
- public void testWithXAttrs() throws Exception {
- final String name1 = "user.a1";
- final byte[] value1 = new byte[]{0x31, 0x32, 0x33};
- final String dir = "/noXAttr";
- final String path = dir + "/file";
-
- startMiniDFS();
- createHttpFSServer();
-
- FileSystem fs = FileSystem.get(nnConf);
- fs.mkdirs(new Path(dir));
- OutputStream os = fs.create(new Path(path));
- os.write(1);
- os.close();
-
- /* GETXATTRS, SETXATTR, REMOVEXATTR fail */
- getStatus(path, "GETXATTRS");
- putCmd(path, "SETXATTR", TestHttpFSServer.setXAttrParam(name1, value1));
- putCmd(path, "REMOVEXATTR", "xattr.name=" + name1);
- }
-}
diff --git a/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServerWebServer.java b/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServerWebServer.java
deleted file mode 100644
index 97d41d3..0000000
--- a/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServerWebServer.java
+++ /dev/null
@@ -1,123 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.fs.http.server;
-
-import java.io.BufferedReader;
-import java.io.File;
-import java.io.InputStreamReader;
-import java.net.HttpURLConnection;
-import java.net.URL;
-import java.nio.charset.StandardCharsets;
-import java.text.MessageFormat;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.test.HadoopUsersConfTestHelper;
-import org.apache.hadoop.util.Shell;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.Timeout;
-
-/**
- * Test {@link HttpFSServerWebServer}.
- */
-public class TestHttpFSServerWebServer {
-
- @Rule
- public Timeout timeout = new Timeout(30000);
- private HttpFSServerWebServer webServer;
-
- @BeforeClass
- public static void beforeClass() throws Exception {
- File homeDir = GenericTestUtils.getTestDir();
- File confDir = new File(homeDir, "etc/hadoop");
- File logsDir = new File(homeDir, "logs");
- File tempDir = new File(homeDir, "temp");
- confDir.mkdirs();
- logsDir.mkdirs();
- tempDir.mkdirs();
-
- if (Shell.WINDOWS) {
- File binDir = new File(homeDir, "bin");
- binDir.mkdirs();
- File winutils = Shell.getWinUtilsFile();
- if (winutils.exists()) {
- FileUtils.copyFileToDirectory(winutils, binDir);
- }
- }
-
- System.setProperty("hadoop.home.dir", homeDir.getAbsolutePath());
- System.setProperty("hadoop.log.dir", logsDir.getAbsolutePath());
- System.setProperty("httpfs.home.dir", homeDir.getAbsolutePath());
- System.setProperty("httpfs.log.dir", logsDir.getAbsolutePath());
- System.setProperty("httpfs.config.dir", confDir.getAbsolutePath());
- FileUtils.writeStringToFile(new File(confDir, "httpfs-signature.secret"),
- "foo", StandardCharsets.UTF_8);
- }
-
- @Before
- public void setUp() throws Exception {
- Configuration conf = new Configuration();
- conf.set(HttpFSServerWebServer.HTTP_HOSTNAME_KEY, "localhost");
- conf.setInt(HttpFSServerWebServer.HTTP_PORT_KEY, 0);
- conf.set(AuthenticationFilter.SIGNATURE_SECRET_FILE,
- "httpfs-signature.secret");
- Configuration sslConf = new Configuration();
- webServer = new HttpFSServerWebServer(conf, sslConf);
- }
-
- @Test
- public void testStartStop() throws Exception {
- webServer.start();
- String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
- URL url = new URL(webServer.getUrl(), MessageFormat.format(
- "/webhdfs/v1/?user.name={0}&op=liststatus", user));
- HttpURLConnection conn = (HttpURLConnection) url.openConnection();
- Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
- BufferedReader reader = new BufferedReader(
- new InputStreamReader(conn.getInputStream()));
- reader.readLine();
- reader.close();
- webServer.stop();
- }
-
- @Test
- public void testJustStop() throws Exception {
- webServer.stop();
- }
-
- @Test
- public void testDoubleStop() throws Exception {
- webServer.start();
- webServer.stop();
- webServer.stop();
- }
-
- @Test
- public void testDoubleStart() throws Exception {
- webServer.start();
- webServer.start();
- webServer.stop();
- }
-
-}
diff --git a/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServerWebServerWithRandomSecret.java b/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServerWebServerWithRandomSecret.java
deleted file mode 100644
index b8e902a..0000000
--- a/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServerWebServerWithRandomSecret.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.fs.http.server;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.util.Shell;
-import org.junit.BeforeClass;
-
-import java.io.File;
-
-/**
- * Unlike {@link TestHttpFSServerWebServer}, httpfs-signature.secret doesn't
- * exist. In this case, a random secret is used.
- */
-public class TestHttpFSServerWebServerWithRandomSecret extends
- TestHttpFSServerWebServer {
- @BeforeClass
- public static void beforeClass() throws Exception {
- File homeDir = GenericTestUtils.getTestDir();
- File confDir = new File(homeDir, "etc/hadoop");
- File logsDir = new File(homeDir, "logs");
- File tempDir = new File(homeDir, "temp");
- confDir.mkdirs();
- logsDir.mkdirs();
- tempDir.mkdirs();
-
- if (Shell.WINDOWS) {
- File binDir = new File(homeDir, "bin");
- binDir.mkdirs();
- File winutils = Shell.getWinUtilsFile();
- if (winutils.exists()) {
- FileUtils.copyFileToDirectory(winutils, binDir);
- }
- }
-
- System.setProperty("hadoop.home.dir", homeDir.getAbsolutePath());
- System.setProperty("hadoop.log.dir", logsDir.getAbsolutePath());
- System.setProperty("httpfs.home.dir", homeDir.getAbsolutePath());
- System.setProperty("httpfs.log.dir", logsDir.getAbsolutePath());
- System.setProperty("httpfs.config.dir", confDir.getAbsolutePath());
- }
-}
diff --git a/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSWithKerberos.java b/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSWithKerberos.java
deleted file mode 100644
index fafeff0..0000000
--- a/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSWithKerberos.java
+++ /dev/null
@@ -1,292 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.fs.http.server;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
-import org.apache.hadoop.fs.DelegationTokenRenewer;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.http.client.HttpFSFileSystem;
-import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticator;
-import org.apache.hadoop.test.HFSTestCase;
-import org.apache.hadoop.test.KerberosTestUtils;
-import org.apache.hadoop.test.TestDir;
-import org.apache.hadoop.test.TestDirHelper;
-import org.apache.hadoop.test.TestHdfs;
-import org.apache.hadoop.test.TestHdfsHelper;
-import org.apache.hadoop.test.TestJetty;
-import org.apache.hadoop.test.TestJettyHelper;
-import org.json.simple.JSONObject;
-import org.json.simple.parser.JSONParser;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Test;
-import org.eclipse.jetty.server.Server;
-import org.eclipse.jetty.webapp.WebAppContext;
-
-import java.io.File;
-import java.io.FileOutputStream;
-import java.io.FileWriter;
-import java.io.InputStreamReader;
-import java.io.OutputStream;
-import java.io.Writer;
-import java.net.HttpURLConnection;
-import java.net.URI;
-import java.net.URL;
-import java.security.PrivilegedExceptionAction;
-import java.util.concurrent.Callable;
-
-public class TestHttpFSWithKerberos extends HFSTestCase {
-
- @After
- public void resetUGI() {
- Configuration conf = new Configuration();
- UserGroupInformation.setConfiguration(conf);
- }
-
- private void createHttpFSServer() throws Exception {
- File homeDir = TestDirHelper.getTestDir();
- Assert.assertTrue(new File(homeDir, "conf").mkdir());
- Assert.assertTrue(new File(homeDir, "log").mkdir());
- Assert.assertTrue(new File(homeDir, "temp").mkdir());
- HttpFSServerWebApp.setHomeDirForCurrentThread(homeDir.getAbsolutePath());
-
- File secretFile = new File(new File(homeDir, "conf"), "secret");
- Writer w = new FileWriter(secretFile);
- w.write("secret");
- w.close();
-
- //HDFS configuration
- File hadoopConfDir = new File(new File(homeDir, "conf"), "hadoop-conf");
- hadoopConfDir.mkdirs();
- String fsDefaultName = TestHdfsHelper.getHdfsConf()
- .get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY);
- Configuration conf = new Configuration(false);
- conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, fsDefaultName);
- File hdfsSite = new File(hadoopConfDir, "hdfs-site.xml");
- OutputStream os = new FileOutputStream(hdfsSite);
- conf.writeXml(os);
- os.close();
-
- conf = new Configuration(false);
- conf.set("httpfs.proxyuser.client.hosts", "*");
- conf.set("httpfs.proxyuser.client.groups", "*");
-
- conf.set("httpfs.authentication.type", "kerberos");
-
- conf.set("httpfs.authentication.signature.secret.file",
- secretFile.getAbsolutePath());
- File httpfsSite = new File(new File(homeDir, "conf"), "httpfs-site.xml");
- os = new FileOutputStream(httpfsSite);
- conf.writeXml(os);
- os.close();
-
- ClassLoader cl = Thread.currentThread().getContextClassLoader();
- URL url = cl.getResource("webapp");
- WebAppContext context = new WebAppContext(url.getPath(), "/webhdfs");
- Server server = TestJettyHelper.getJettyServer();
- server.setHandler(context);
- server.start();
- HttpFSServerWebApp.get().setAuthority(TestJettyHelper.getAuthority());
- }
-
- @Test
- @TestDir
- @TestJetty
- @TestHdfs
- public void testValidHttpFSAccess() throws Exception {
- createHttpFSServer();
-
- KerberosTestUtils.doAsClient(new Callable<Void>() {
- @Override
- public Void call() throws Exception {
- URL url = new URL(TestJettyHelper.getJettyURL(),
- "/webhdfs/v1/?op=GETHOMEDIRECTORY");
- AuthenticatedURL aUrl = new AuthenticatedURL();
- AuthenticatedURL.Token aToken = new AuthenticatedURL.Token();
- HttpURLConnection conn = aUrl.openConnection(url, aToken);
- Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
- return null;
- }
- });
- }
-
- @Test
- @TestDir
- @TestJetty
- @TestHdfs
- public void testInvalidadHttpFSAccess() throws Exception {
- createHttpFSServer();
-
- URL url = new URL(TestJettyHelper.getJettyURL(),
- "/webhdfs/v1/?op=GETHOMEDIRECTORY");
- HttpURLConnection conn = (HttpURLConnection) url.openConnection();
- Assert.assertEquals(conn.getResponseCode(),
- HttpURLConnection.HTTP_UNAUTHORIZED);
- }
-
- @Test
- @TestDir
- @TestJetty
- @TestHdfs
- public void testDelegationTokenHttpFSAccess() throws Exception {
- createHttpFSServer();
-
- KerberosTestUtils.doAsClient(new Callable<Void>() {
- @Override
- public Void call() throws Exception {
- //get delegation token doing SPNEGO authentication
- URL url = new URL(TestJettyHelper.getJettyURL(),
- "/webhdfs/v1/?op=GETDELEGATIONTOKEN");
- AuthenticatedURL aUrl = new AuthenticatedURL();
- AuthenticatedURL.Token aToken = new AuthenticatedURL.Token();
- HttpURLConnection conn = aUrl.openConnection(url, aToken);
- Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
- JSONObject json = (JSONObject) new JSONParser()
- .parse(new InputStreamReader(conn.getInputStream()));
- json =
- (JSONObject) json
- .get(DelegationTokenAuthenticator.DELEGATION_TOKEN_JSON);
- String tokenStr = (String) json
- .get(DelegationTokenAuthenticator.DELEGATION_TOKEN_URL_STRING_JSON);
-
- //access httpfs using the delegation token
- url = new URL(TestJettyHelper.getJettyURL(),
- "/webhdfs/v1/?op=GETHOMEDIRECTORY&delegation=" +
- tokenStr);
- conn = (HttpURLConnection) url.openConnection();
- Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
-
- //try to renew the delegation token without SPNEGO credentials
- url = new URL(TestJettyHelper.getJettyURL(),
- "/webhdfs/v1/?op=RENEWDELEGATIONTOKEN&token=" + tokenStr);
- conn = (HttpURLConnection) url.openConnection();
- conn.setRequestMethod("PUT");
- Assert.assertEquals(conn.getResponseCode(),
- HttpURLConnection.HTTP_UNAUTHORIZED);
-
- //renew the delegation token with SPNEGO credentials
- url = new URL(TestJettyHelper.getJettyURL(),
- "/webhdfs/v1/?op=RENEWDELEGATIONTOKEN&token=" + tokenStr);
- conn = aUrl.openConnection(url, aToken);
- conn.setRequestMethod("PUT");
- Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
-
- //cancel delegation token, no need for SPNEGO credentials
- url = new URL(TestJettyHelper.getJettyURL(),
- "/webhdfs/v1/?op=CANCELDELEGATIONTOKEN&token=" +
- tokenStr);
- conn = (HttpURLConnection) url.openConnection();
- conn.setRequestMethod("PUT");
- Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
-
- //try to access httpfs with the canceled delegation token
- url = new URL(TestJettyHelper.getJettyURL(),
- "/webhdfs/v1/?op=GETHOMEDIRECTORY&delegation=" +
- tokenStr);
- conn = (HttpURLConnection) url.openConnection();
- Assert.assertEquals(conn.getResponseCode(),
- HttpURLConnection.HTTP_UNAUTHORIZED);
- return null;
- }
- });
- }
-
- @SuppressWarnings("deprecation")
- private void testDelegationTokenWithFS(Class fileSystemClass)
- throws Exception {
- createHttpFSServer();
- Configuration conf = new Configuration();
- conf.set("fs.webhdfs.impl", fileSystemClass.getName());
- conf.set("fs.hdfs.impl.disable.cache", "true");
- URI uri = new URI( "webhdfs://" +
- TestJettyHelper.getJettyURL().toURI().getAuthority());
- FileSystem fs = FileSystem.get(uri, conf);
- Token<?> tokens[] = fs.addDelegationTokens("foo", null);
- fs.close();
- Assert.assertEquals(1, tokens.length);
- fs = FileSystem.get(uri, conf);
- ((DelegationTokenRenewer.Renewable) fs).setDelegationToken(tokens[0]);
- fs.listStatus(new Path("/"));
- fs.close();
- }
-
- private void testDelegationTokenWithinDoAs(
- final Class fileSystemClass, boolean proxyUser) throws Exception {
- Configuration conf = new Configuration();
- conf.set("hadoop.security.authentication", "kerberos");
- UserGroupInformation.setConfiguration(conf);
- UserGroupInformation.loginUserFromKeytab("client",
- "/Users/tucu/tucu.keytab");
- UserGroupInformation ugi = UserGroupInformation.getLoginUser();
- if (proxyUser) {
- ugi = UserGroupInformation.createProxyUser("foo", ugi);
- }
- conf = new Configuration();
- UserGroupInformation.setConfiguration(conf);
- ugi.doAs(
- new PrivilegedExceptionAction<Void>() {
- @Override
- public Void run() throws Exception {
- testDelegationTokenWithFS(fileSystemClass);
- return null;
- }
- });
- }
-
- @Test
- @TestDir
- @TestJetty
- @TestHdfs
- public void testDelegationTokenWithHttpFSFileSystem() throws Exception {
- testDelegationTokenWithinDoAs(HttpFSFileSystem.class, false);
- }
-
- @Test
- @TestDir
- @TestJetty
- @TestHdfs
- public void testDelegationTokenWithWebhdfsFileSystem() throws Exception {
- testDelegationTokenWithinDoAs(WebHdfsFileSystem.class, false);
- }
-
- @Test
- @TestDir
- @TestJetty
- @TestHdfs
- public void testDelegationTokenWithHttpFSFileSystemProxyUser()
- throws Exception {
- testDelegationTokenWithinDoAs(HttpFSFileSystem.class, true);
- }
-
- // TODO: WebHdfsFilesystem does work with ProxyUser HDFS-3509
- // @Test
- // @TestDir
- // @TestJetty
- // @TestHdfs
- // public void testDelegationTokenWithWebhdfsFileSystemProxyUser()
- // throws Exception {
- // testDelegationTokenWithinDoAs(WebHdfsFileSystem.class, true);
- // }
-
-}
diff --git a/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/lib/lang/TestRunnableCallable.java b/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/lib/lang/TestRunnableCallable.java
deleted file mode 100644
index 1520af8..0000000
--- a/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/lib/lang/TestRunnableCallable.java
+++ /dev/null
@@ -1,96 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.lib.lang;
-
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-import java.util.concurrent.Callable;
-
-import org.apache.hadoop.test.HTestCase;
-import org.junit.Test;
-
-public class TestRunnableCallable extends HTestCase {
-
- public static class R implements Runnable {
- boolean RUN;
-
- @Override
- public void run() {
- RUN = true;
- }
- }
-
- public static class C implements Callable {
- boolean RUN;
-
- @Override
- public Object call() throws Exception {
- RUN = true;
- return null;
- }
- }
-
- public static class CEx implements Callable {
-
- @Override
- public Object call() throws Exception {
- throw new Exception();
- }
- }
-
- @Test
- public void runnable() throws Exception {
- R r = new R();
- RunnableCallable rc = new RunnableCallable(r);
- rc.run();
- assertTrue(r.RUN);
-
- r = new R();
- rc = new RunnableCallable(r);
- rc.call();
- assertTrue(r.RUN);
-
- assertEquals(rc.toString(), "R");
- }
-
- @Test
- public void callable() throws Exception {
- C c = new C();
- RunnableCallable rc = new RunnableCallable(c);
- rc.run();
- assertTrue(c.RUN);
-
- c = new C();
- rc = new RunnableCallable(c);
- rc.call();
- assertTrue(c.RUN);
-
- assertEquals(rc.toString(), "C");
- }
-
- @Test(expected = RuntimeException.class)
- public void callableExRun() throws Exception {
- CEx c = new CEx();
- RunnableCallable rc = new RunnableCallable(c);
- rc.run();
- }
-
-}
diff --git a/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/lib/lang/TestXException.java b/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/lib/lang/TestXException.java
deleted file mode 100644
index 2869d47..0000000
--- a/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/lib/lang/TestXException.java
+++ /dev/null
@@ -1,64 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.lib.lang;
-
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNull;
-
-import org.apache.hadoop.test.HTestCase;
-import org.junit.Test;
-
-public class TestXException extends HTestCase {
-
- public enum TestERROR implements XException.ERROR {
- TC;
-
- @Override
- public String getTemplate() {
- return "{0}";
- }
- }
-
- @Test
- public void testXException() throws Exception {
- XException ex = new XException(TestERROR.TC);
- assertEquals(ex.getError(), TestERROR.TC);
- assertEquals(ex.getMessage(), "TC: {0}");
- assertNull(ex.getCause());
-
- ex = new XException(TestERROR.TC, "msg");
- assertEquals(ex.getError(), TestERROR.TC);
- assertEquals(ex.getMessage(), "TC: msg");
- assertNull(ex.getCause());
-
- Exception cause = new Exception();
- ex = new XException(TestERROR.TC, cause);
- assertEquals(ex.getError(), TestERROR.TC);
- assertEquals(ex.getMessage(), "TC: " + cause.toString());
- assertEquals(ex.getCause(), cause);
-
- XException xcause = ex;
- ex = new XException(xcause);
- assertEquals(ex.getError(), TestERROR.TC);
- assertEquals(ex.getMessage(), xcause.getMessage());
- assertEquals(ex.getCause(), xcause);
- }
-
-}
diff --git a/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/lib/server/TestBaseService.java b/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/lib/server/TestBaseService.java
deleted file mode 100644
index 402884b..0000000
--- a/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/lib/server/TestBaseService.java
+++ /dev/null
@@ -1,71 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.lib.server;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.test.HTestCase;
-import org.junit.Test;
-import org.mockito.Mockito;
-
-public class TestBaseService extends HTestCase {
-
- public static class MyService extends BaseService {
- static Boolean INIT;
-
- public MyService() {
- super("myservice");
- }
-
- @Override
- protected void init() throws ServiceException {
- INIT = true;
- }
-
- @Override
- public Class getInterface() {
- return null;
- }
- }
-
- @Test
- public void baseService() throws Exception {
- BaseService service = new MyService();
- assertNull(service.getInterface());
- assertEquals(service.getPrefix(), "myservice");
- assertEquals(service.getServiceDependencies().length, 0);
-
- Server server = Mockito.mock(Server.class);
- Configuration conf = new Configuration(false);
- conf.set("server.myservice.foo", "FOO");
- conf.set("server.myservice1.bar", "BAR");
- Mockito.when(server.getConfig()).thenReturn(conf);
- Mockito.when(server.getPrefixedName("myservice.foo")).thenReturn("server.myservice.foo");
- Mockito.when(server.getPrefixedName("myservice.")).thenReturn("server.myservice.");
-
- service.init(server);
- assertEquals(service.getPrefixedName("foo"), "server.myservice.foo");
- assertEquals(service.getServiceConfig().size(), 1);
- assertEquals(service.getServiceConfig().get("foo"), "FOO");
- assertTrue(MyService.INIT);
- }
-}
diff --git a/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/lib/server/TestServer.java b/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/lib/server/TestServer.java
deleted file mode 100644
index a6a139f..0000000
--- a/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/lib/server/TestServer.java
+++ /dev/null
@@ -1,810 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.lib.server;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-import java.io.File;
-import java.io.FileOutputStream;
-import java.io.FileWriter;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.io.Writer;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.lib.lang.XException;
-import org.apache.hadoop.test.HTestCase;
-import org.apache.hadoop.test.TestDir;
-import org.apache.hadoop.test.TestDirHelper;
-import org.apache.hadoop.test.TestException;
-import org.apache.hadoop.util.Shell;
-import org.apache.hadoop.util.StringUtils;
-import org.junit.Test;
-
-public class TestServer extends HTestCase {
-
- @Test
- @TestDir
- public void constructorsGetters() throws Exception {
- Server server = new Server("server", getAbsolutePath("/a"),
- getAbsolutePath("/b"), getAbsolutePath("/c"), getAbsolutePath("/d"),
- new Configuration(false));
- assertEquals(server.getHomeDir(), getAbsolutePath("/a"));
- assertEquals(server.getConfigDir(), getAbsolutePath("/b"));
- assertEquals(server.getLogDir(), getAbsolutePath("/c"));
- assertEquals(server.getTempDir(), getAbsolutePath("/d"));
- assertEquals(server.getName(), "server");
- assertEquals(server.getPrefix(), "server");
- assertEquals(server.getPrefixedName("name"), "server.name");
- assertNotNull(server.getConfig());
-
- server = new Server("server", getAbsolutePath("/a"), getAbsolutePath("/b"),
- getAbsolutePath("/c"), getAbsolutePath("/d"));
- assertEquals(server.getHomeDir(), getAbsolutePath("/a"));
- assertEquals(server.getConfigDir(), getAbsolutePath("/b"));
- assertEquals(server.getLogDir(), getAbsolutePath("/c"));
- assertEquals(server.getTempDir(), getAbsolutePath("/d"));
- assertEquals(server.getName(), "server");
- assertEquals(server.getPrefix(), "server");
- assertEquals(server.getPrefixedName("name"), "server.name");
- assertNull(server.getConfig());
-
- server = new Server("server", TestDirHelper.getTestDir().getAbsolutePath(), new Configuration(false));
- assertEquals(server.getHomeDir(), TestDirHelper.getTestDir().getAbsolutePath());
- assertEquals(server.getConfigDir(), TestDirHelper.getTestDir() + "/conf");
- assertEquals(server.getLogDir(), TestDirHelper.getTestDir() + "/log");
- assertEquals(server.getTempDir(), TestDirHelper.getTestDir() + "/temp");
- assertEquals(server.getName(), "server");
- assertEquals(server.getPrefix(), "server");
- assertEquals(server.getPrefixedName("name"), "server.name");
- assertNotNull(server.getConfig());
-
- server = new Server("server", TestDirHelper.getTestDir().getAbsolutePath());
- assertEquals(server.getHomeDir(), TestDirHelper.getTestDir().getAbsolutePath());
- assertEquals(server.getConfigDir(), TestDirHelper.getTestDir() + "/conf");
- assertEquals(server.getLogDir(), TestDirHelper.getTestDir() + "/log");
- assertEquals(server.getTempDir(), TestDirHelper.getTestDir() + "/temp");
- assertEquals(server.getName(), "server");
- assertEquals(server.getPrefix(), "server");
- assertEquals(server.getPrefixedName("name"), "server.name");
- assertNull(server.getConfig());
- }
-
- @Test
- @TestException(exception = ServerException.class, msgRegExp = "S01.*")
- @TestDir
- public void initNoHomeDir() throws Exception {
- File homeDir = new File(TestDirHelper.getTestDir(), "home");
- Configuration conf = new Configuration(false);
- conf.set("server.services", TestService.class.getName());
- Server server = new Server("server", homeDir.getAbsolutePath(), conf);
- server.init();
- }
-
- @Test
- @TestException(exception = ServerException.class, msgRegExp = "S02.*")
- @TestDir
- public void initHomeDirNotDir() throws Exception {
- File homeDir = new File(TestDirHelper.getTestDir(), "home");
- new FileOutputStream(homeDir).close();
- Configuration conf = new Configuration(false);
- conf.set("server.services", TestService.class.getName());
- Server server = new Server("server", homeDir.getAbsolutePath(), conf);
- server.init();
- }
-
- @Test
- @TestException(exception = ServerException.class, msgRegExp = "S01.*")
- @TestDir
- public void initNoConfigDir() throws Exception {
- File homeDir = new File(TestDirHelper.getTestDir(), "home");
- assertTrue(homeDir.mkdir());
- assertTrue(new File(homeDir, "log").mkdir());
- assertTrue(new File(homeDir, "temp").mkdir());
- Configuration conf = new Configuration(false);
- conf.set("server.services", TestService.class.getName());
- Server server = new Server("server", homeDir.getAbsolutePath(), conf);
- server.init();
- }
-
- @Test
- @TestException(exception = ServerException.class, msgRegExp = "S02.*")
- @TestDir
- public void initConfigDirNotDir() throws Exception {
- File homeDir = new File(TestDirHelper.getTestDir(), "home");
- assertTrue(homeDir.mkdir());
- assertTrue(new File(homeDir, "log").mkdir());
- assertTrue(new File(homeDir, "temp").mkdir());
- File configDir = new File(homeDir, "conf");
- new FileOutputStream(configDir).close();
- Configuration conf = new Configuration(false);
- conf.set("server.services", TestService.class.getName());
- Server server = new Server("server", homeDir.getAbsolutePath(), conf);
- server.init();
- }
-
- @Test
- @TestException(exception = ServerException.class, msgRegExp = "S01.*")
- @TestDir
- public void initNoLogDir() throws Exception {
- File homeDir = new File(TestDirHelper.getTestDir(), "home");
- assertTrue(homeDir.mkdir());
- assertTrue(new File(homeDir, "conf").mkdir());
- assertTrue(new File(homeDir, "temp").mkdir());
- Configuration conf = new Configuration(false);
- conf.set("server.services", TestService.class.getName());
- Server server = new Server("server", homeDir.getAbsolutePath(), conf);
- server.init();
- }
-
- @Test
- @TestException(exception = ServerException.class, msgRegExp = "S02.*")
- @TestDir
- public void initLogDirNotDir() throws Exception {
- File homeDir = new File(TestDirHelper.getTestDir(), "home");
- assertTrue(homeDir.mkdir());
- assertTrue(new File(homeDir, "conf").mkdir());
- assertTrue(new File(homeDir, "temp").mkdir());
- File logDir = new File(homeDir, "log");
- new FileOutputStream(logDir).close();
- Configuration conf = new Configuration(false);
- conf.set("server.services", TestService.class.getName());
- Server server = new Server("server", homeDir.getAbsolutePath(), conf);
- server.init();
- }
-
- @Test
- @TestException(exception = ServerException.class, msgRegExp = "S01.*")
- @TestDir
- public void initNoTempDir() throws Exception {
- File homeDir = new File(TestDirHelper.getTestDir(), "home");
- assertTrue(homeDir.mkdir());
- assertTrue(new File(homeDir, "conf").mkdir());
- assertTrue(new File(homeDir, "log").mkdir());
- Configuration conf = new Configuration(false);
- conf.set("server.services", TestService.class.getName());
- Server server = new Server("server", homeDir.getAbsolutePath(), conf);
- server.init();
- }
-
- @Test
- @TestException(exception = ServerException.class, msgRegExp = "S02.*")
- @TestDir
- public void initTempDirNotDir() throws Exception {
- File homeDir = new File(TestDirHelper.getTestDir(), "home");
- assertTrue(homeDir.mkdir());
- assertTrue(new File(homeDir, "conf").mkdir());
- assertTrue(new File(homeDir, "log").mkdir());
- File tempDir = new File(homeDir, "temp");
- new FileOutputStream(tempDir).close();
- Configuration conf = new Configuration(false);
- conf.set("server.services", TestService.class.getName());
- Server server = new Server("server", homeDir.getAbsolutePath(), conf);
- server.init();
- }
-
- @Test
- @TestException(exception = ServerException.class, msgRegExp = "S05.*")
- @TestDir
- public void siteFileNotAFile() throws Exception {
- String homeDir = TestDirHelper.getTestDir().getAbsolutePath();
- File siteFile = new File(homeDir, "server-site.xml");
- assertTrue(siteFile.mkdir());
- Server server = new Server("server", homeDir, homeDir, homeDir, homeDir);
- server.init();
- }
-
- private Server createServer(Configuration conf) {
- return new Server("server", TestDirHelper.getTestDir().getAbsolutePath(),
- TestDirHelper.getTestDir().getAbsolutePath(),
- TestDirHelper.getTestDir().getAbsolutePath(), TestDirHelper.getTestDir().getAbsolutePath(), conf);
- }
-
- @Test
- @TestDir
- public void log4jFile() throws Exception {
- InputStream is = Server.getResource("default-log4j.properties");
- OutputStream os = new FileOutputStream(new File(TestDirHelper.getTestDir(), "server-log4j.properties"));
- IOUtils.copyBytes(is, os, 1024, true);
- Configuration conf = new Configuration(false);
- Server server = createServer(conf);
- server.init();
- }
-
- public static class LifeCycleService extends BaseService {
-
- public LifeCycleService() {
- super("lifecycle");
- }
-
- @Override
- protected void init() throws ServiceException {
- assertEquals(getServer().getStatus(), Server.Status.BOOTING);
- }
-
- @Override
- public void destroy() {
- assertEquals(getServer().getStatus(), Server.Status.SHUTTING_DOWN);
- super.destroy();
- }
-
- @Override
- public Class getInterface() {
- return LifeCycleService.class;
- }
- }
-
- @Test
- @TestDir
- public void lifeCycle() throws Exception {
- Configuration conf = new Configuration(false);
- conf.set("server.services", LifeCycleService.class.getName());
- Server server = createServer(conf);
- assertEquals(server.getStatus(), Server.Status.UNDEF);
- server.init();
- assertNotNull(server.get(LifeCycleService.class));
- assertEquals(server.getStatus(), Server.Status.NORMAL);
- server.destroy();
- assertEquals(server.getStatus(), Server.Status.SHUTDOWN);
- }
-
- @Test
- @TestDir
- public void startWithStatusNotNormal() throws Exception {
- Configuration conf = new Configuration(false);
- conf.set("server.startup.status", "ADMIN");
- Server server = createServer(conf);
- server.init();
- assertEquals(server.getStatus(), Server.Status.ADMIN);
- server.destroy();
- }
-
- @Test(expected = IllegalArgumentException.class)
- @TestDir
- public void nonSeteableStatus() throws Exception {
- Configuration conf = new Configuration(false);
- Server server = createServer(conf);
- server.init();
- server.setStatus(Server.Status.SHUTDOWN);
- }
-
- public static class TestService implements Service {
- static List<String> LIFECYCLE = new ArrayList<String>();
-
- @Override
- public void init(Server server) throws ServiceException {
- LIFECYCLE.add("init");
- }
-
- @Override
- public void postInit() throws ServiceException {
- LIFECYCLE.add("postInit");
- }
-
- @Override
- public void destroy() {
- LIFECYCLE.add("destroy");
- }
-
- @Override
- public Class[] getServiceDependencies() {
- return new Class[0];
- }
-
- @Override
- public Class getInterface() {
- return TestService.class;
- }
-
- @Override
- public void serverStatusChange(Server.Status oldStatus, Server.Status newStatus) throws ServiceException {
- LIFECYCLE.add("serverStatusChange");
- }
- }
-
- public static class TestServiceExceptionOnStatusChange extends TestService {
-
- @Override
- public void serverStatusChange(Server.Status oldStatus, Server.Status newStatus) throws ServiceException {
- throw new RuntimeException();
- }
- }
-
- @Test
- @TestDir
- public void changeStatus() throws Exception {
- TestService.LIFECYCLE.clear();
- Configuration conf = new Configuration(false);
- conf.set("server.services", TestService.class.getName());
- Server server = createServer(conf);
- server.init();
- server.setStatus(Server.Status.ADMIN);
- assertTrue(TestService.LIFECYCLE.contains("serverStatusChange"));
- }
-
- @Test
- @TestException(exception = ServerException.class, msgRegExp = "S11.*")
- @TestDir
- public void changeStatusServiceException() throws Exception {
- TestService.LIFECYCLE.clear();
- Configuration conf = new Configuration(false);
- conf.set("server.services", TestServiceExceptionOnStatusChange.class.getName());
- Server server = createServer(conf);
- server.init();
- }
-
- @Test
- @TestDir
- public void setSameStatus() throws Exception {
- Configuration conf = new Configuration(false);
- conf.set("server.services", TestService.class.getName());
- Server server = createServer(conf);
- server.init();
- TestService.LIFECYCLE.clear();
- server.setStatus(server.getStatus());
- assertFalse(TestService.LIFECYCLE.contains("serverStatusChange"));
- }
-
- @Test
- @TestDir
- public void serviceLifeCycle() throws Exception {
- TestService.LIFECYCLE.clear();
- Configuration conf = new Configuration(false);
- conf.set("server.services", TestService.class.getName());
- Server server = createServer(conf);
- server.init();
- assertNotNull(server.get(TestService.class));
- server.destroy();
- assertEquals(TestService.LIFECYCLE, Arrays.asList("init", "postInit", "serverStatusChange", "destroy"));
- }
-
- @Test
- @TestDir
- public void loadingDefaultConfig() throws Exception {
- String dir = TestDirHelper.getTestDir().getAbsolutePath();
- Server server = new Server("testserver", dir, dir, dir, dir);
- server.init();
- assertEquals(server.getConfig().get("testserver.a"), "default");
- }
-
- @Test
- @TestDir
- public void loadingSiteConfig() throws Exception {
- String dir = TestDirHelper.getTestDir().getAbsolutePath();
- File configFile = new File(dir, "testserver-site.xml");
- Writer w = new FileWriter(configFile);
- w.write("<configuration><property><name>testserver.a</name><value>site</value></property></configuration>");
- w.close();
- Server server = new Server("testserver", dir, dir, dir, dir);
- server.init();
- assertEquals(server.getConfig().get("testserver.a"), "site");
- }
-
- @Test
- @TestDir
- public void loadingSysPropConfig() throws Exception {
- try {
- System.setProperty("testserver.a", "sysprop");
- String dir = TestDirHelper.getTestDir().getAbsolutePath();
- File configFile = new File(dir, "testserver-site.xml");
- Writer w = new FileWriter(configFile);
- w.write("<configuration><property><name>testserver.a</name><value>site</value></property></configuration>");
- w.close();
- Server server = new Server("testserver", dir, dir, dir, dir);
- server.init();
- assertEquals(server.getConfig().get("testserver.a"), "sysprop");
- } finally {
- System.getProperties().remove("testserver.a");
- }
- }
-
- @Test(expected = IllegalStateException.class)
- @TestDir
- public void illegalState1() throws Exception {
- Server server = new Server("server", TestDirHelper.getTestDir().getAbsolutePath(), new Configuration(false));
- server.destroy();
- }
-
- @Test(expected = IllegalStateException.class)
- @TestDir
- public void illegalState2() throws Exception {
- Server server = new Server("server", TestDirHelper.getTestDir().getAbsolutePath(), new Configuration(false));
- server.get(Object.class);
- }
-
- @Test(expected = IllegalStateException.class)
- @TestDir
- public void illegalState3() throws Exception {
- Server server = new Server("server", TestDirHelper.getTestDir().getAbsolutePath(), new Configuration(false));
- server.setService(null);
- }
-
- @Test(expected = IllegalStateException.class)
- @TestDir
- public void illegalState4() throws Exception {
- String dir = TestDirHelper.getTestDir().getAbsolutePath();
- Server server = new Server("server", dir, dir, dir, dir, new Configuration(false));
- server.init();
- server.init();
- }
-
- private static List<String> ORDER = new ArrayList<String>();
-
- public abstract static class MyService implements Service, XException.ERROR {
- private String id;
- private Class serviceInterface;
- private Class[] dependencies;
- private boolean failOnInit;
- private boolean failOnDestroy;
-
- protected MyService(String id, Class serviceInterface, Class[] dependencies, boolean failOnInit,
- boolean failOnDestroy) {
- this.id = id;
- this.serviceInterface = serviceInterface;
- this.dependencies = dependencies;
- this.failOnInit = failOnInit;
- this.failOnDestroy = failOnDestroy;
- }
-
-
- @Override
- public void init(Server server) throws ServiceException {
- ORDER.add(id + ".init");
- if (failOnInit) {
- throw new ServiceException(this);
- }
- }
-
- @Override
- public void postInit() throws ServiceException {
- ORDER.add(id + ".postInit");
- }
-
- @Override
- public String getTemplate() {
- return "";
- }
-
- @Override
- public void destroy() {
- ORDER.add(id + ".destroy");
- if (failOnDestroy) {
- throw new RuntimeException();
- }
- }
-
- @Override
- public Class[] getServiceDependencies() {
- return dependencies;
- }
-
- @Override
- public Class getInterface() {
- return serviceInterface;
- }
-
- @Override
- public void serverStatusChange(Server.Status oldStatus, Server.Status newStatus) throws ServiceException {
- }
- }
-
- public static class MyService1 extends MyService {
-
- public MyService1() {
- super("s1", MyService1.class, null, false, false);
- }
-
- protected MyService1(String id, Class serviceInterface, Class[] dependencies, boolean failOnInit,
- boolean failOnDestroy) {
- super(id, serviceInterface, dependencies, failOnInit, failOnDestroy);
- }
- }
-
- public static class MyService2 extends MyService {
- public MyService2() {
- super("s2", MyService2.class, null, true, false);
- }
- }
-
-
- public static class MyService3 extends MyService {
- public MyService3() {
- super("s3", MyService3.class, null, false, false);
- }
- }
-
- public static class MyService1a extends MyService1 {
- public MyService1a() {
- super("s1a", MyService1.class, null, false, false);
- }
- }
-
- public static class MyService4 extends MyService1 {
-
- public MyService4() {
- super("s4a", String.class, null, false, false);
- }
- }
-
- public static class MyService5 extends MyService {
-
- public MyService5() {
- super("s5", MyService5.class, null, false, true);
- }
-
- protected MyService5(String id, Class serviceInterface, Class[] dependencies, boolean failOnInit,
- boolean failOnDestroy) {
- super(id, serviceInterface, dependencies, failOnInit, failOnDestroy);
- }
- }
-
- public static class MyService5a extends MyService5 {
-
- public MyService5a() {
- super("s5a", MyService5.class, null, false, false);
- }
- }
-
- public static class MyService6 extends MyService {
-
- public MyService6() {
- super("s6", MyService6.class, new Class[]{MyService1.class}, false, false);
- }
- }
-
- public static class MyService7 extends MyService {
-
- @SuppressWarnings({"UnusedParameters"})
- public MyService7(String foo) {
- super("s6", MyService7.class, new Class[]{MyService1.class}, false, false);
- }
- }
-
- @Test
- @TestException(exception = ServerException.class, msgRegExp = "S08.*")
- @TestDir
- public void invalidSservice() throws Exception {
- String dir = TestDirHelper.getTestDir().getAbsolutePath();
- Configuration conf = new Configuration(false);
- conf.set("server.services", "foo");
- Server server = new Server("server", dir, dir, dir, dir, conf);
- server.init();
- }
-
- @Test
- @TestException(exception = ServerException.class, msgRegExp = "S07.*")
- @TestDir
- public void serviceWithNoDefaultConstructor() throws Exception {
- String dir = TestDirHelper.getTestDir().getAbsolutePath();
- Configuration conf = new Configuration(false);
- conf.set("server.services", MyService7.class.getName());
- Server server = new Server("server", dir, dir, dir, dir, conf);
- server.init();
- }
-
- @Test
- @TestException(exception = ServerException.class, msgRegExp = "S04.*")
- @TestDir
- public void serviceNotImplementingServiceInterface() throws Exception {
- String dir = TestDirHelper.getTestDir().getAbsolutePath();
- Configuration conf = new Configuration(false);
- conf.set("server.services", MyService4.class.getName());
- Server server = new Server("server", dir, dir, dir, dir, conf);
- server.init();
- }
-
- @Test
- @TestException(exception = ServerException.class, msgRegExp = "S10.*")
- @TestDir
- public void serviceWithMissingDependency() throws Exception {
- String dir = TestDirHelper.getTestDir().getAbsolutePath();
- Configuration conf = new Configuration(false);
- String services = StringUtils.join(",", Arrays.asList(MyService3.class.getName(), MyService6.class.getName())
- );
- conf.set("server.services", services);
- Server server = new Server("server", dir, dir, dir, dir, conf);
- server.init();
- }
-
- @Test
- @TestDir
- public void services() throws Exception {
- String dir = TestDirHelper.getTestDir().getAbsolutePath();
- Configuration conf;
- Server server;
-
- // no services
- ORDER.clear();
- conf = new Configuration(false);
- server = new Server("server", dir, dir, dir, dir, conf);
- server.init();
- assertEquals(ORDER.size(), 0);
-
- // 2 services init/destroy
- ORDER.clear();
- String services = StringUtils.join(",", Arrays.asList(MyService1.class.getName(), MyService3.class.getName())
- );
- conf = new Configuration(false);
- conf.set("server.services", services);
- server = new Server("server", dir, dir, dir, dir, conf);
- server.init();
- assertEquals(server.get(MyService1.class).getInterface(), MyService1.class);
- assertEquals(server.get(MyService3.class).getInterface(), MyService3.class);
- assertEquals(ORDER.size(), 4);
- assertEquals(ORDER.get(0), "s1.init");
- assertEquals(ORDER.get(1), "s3.init");
- assertEquals(ORDER.get(2), "s1.postInit");
- assertEquals(ORDER.get(3), "s3.postInit");
- server.destroy();
- assertEquals(ORDER.size(), 6);
- assertEquals(ORDER.get(4), "s3.destroy");
- assertEquals(ORDER.get(5), "s1.destroy");
-
- // 3 services, 2nd one fails on init
- ORDER.clear();
- services = StringUtils.join(",", Arrays.asList(MyService1.class.getName(), MyService2.class.getName(),
- MyService3.class.getName()));
- conf = new Configuration(false);
- conf.set("server.services", services);
-
- server = new Server("server", dir, dir, dir, dir, conf);
- try {
- server.init();
- fail();
- } catch (ServerException ex) {
- assertEquals(MyService2.class, ex.getError().getClass());
- } catch (Exception ex) {
- fail();
- }
- assertEquals(ORDER.size(), 3);
- assertEquals(ORDER.get(0), "s1.init");
- assertEquals(ORDER.get(1), "s2.init");
- assertEquals(ORDER.get(2), "s1.destroy");
-
- // 2 services one fails on destroy
- ORDER.clear();
- services = StringUtils.join(",", Arrays.asList(MyService1.class.getName(), MyService5.class.getName()));
- conf = new Configuration(false);
- conf.set("server.services", services);
- server = new Server("server", dir, dir, dir, dir, conf);
- server.init();
- assertEquals(ORDER.size(), 4);
- assertEquals(ORDER.get(0), "s1.init");
- assertEquals(ORDER.get(1), "s5.init");
- assertEquals(ORDER.get(2), "s1.postInit");
- assertEquals(ORDER.get(3), "s5.postInit");
- server.destroy();
- assertEquals(ORDER.size(), 6);
- assertEquals(ORDER.get(4), "s5.destroy");
- assertEquals(ORDER.get(5), "s1.destroy");
-
-
- // service override via ext
- ORDER.clear();
- services = StringUtils.join(",", Arrays.asList(MyService1.class.getName(), MyService3.class.getName()));
- String servicesExt = StringUtils.join(",", Arrays.asList(MyService1a.class.getName()));
-
- conf = new Configuration(false);
- conf.set("server.services", services);
- conf.set("server.services.ext", servicesExt);
- server = new Server("server", dir, dir, dir, dir, conf);
- server.init();
-
- assertEquals(server.get(MyService1.class).getClass(), MyService1a.class);
- assertEquals(ORDER.size(), 4);
- assertEquals(ORDER.get(0), "s1a.init");
- assertEquals(ORDER.get(1), "s3.init");
- assertEquals(ORDER.get(2), "s1a.postInit");
- assertEquals(ORDER.get(3), "s3.postInit");
- server.destroy();
- assertEquals(ORDER.size(), 6);
- assertEquals(ORDER.get(4), "s3.destroy");
- assertEquals(ORDER.get(5), "s1a.destroy");
-
- // service override via setService
- ORDER.clear();
- services = StringUtils.join(",", Arrays.asList(MyService1.class.getName(), MyService3.class.getName()));
- conf = new Configuration(false);
- conf.set("server.services", services);
- server = new Server("server", dir, dir, dir, dir, conf);
- server.init();
-
- server.setService(MyService1a.class);
- assertEquals(ORDER.size(), 6);
- assertEquals(ORDER.get(4), "s1.destroy");
- assertEquals(ORDER.get(5), "s1a.init");
-
- assertEquals(server.get(MyService1.class).getClass(), MyService1a.class);
-
- server.destroy();
- assertEquals(ORDER.size(), 8);
- assertEquals(ORDER.get(6), "s3.destroy");
- assertEquals(ORDER.get(7), "s1a.destroy");
-
- // service add via setService
- ORDER.clear();
- services = StringUtils.join(",", Arrays.asList(MyService1.class.getName(), MyService3.class.getName()));
- conf = new Configuration(false);
- conf.set("server.services", services);
- server = new Server("server", dir, dir, dir, dir, conf);
- server.init();
-
- server.setService(MyService5.class);
- assertEquals(ORDER.size(), 5);
- assertEquals(ORDER.get(4), "s5.init");
-
- assertEquals(server.get(MyService5.class).getClass(), MyService5.class);
-
- server.destroy();
- assertEquals(ORDER.size(), 8);
- assertEquals(ORDER.get(5), "s5.destroy");
- assertEquals(ORDER.get(6), "s3.destroy");
- assertEquals(ORDER.get(7), "s1.destroy");
-
- // service add via setService exception
- ORDER.clear();
- services = StringUtils.join(",", Arrays.asList(MyService1.class.getName(), MyService3.class.getName()));
- conf = new Configuration(false);
- conf.set("server.services", services);
- server = new Server("server", dir, dir, dir, dir, conf);
- server.init();
- try {
- server.setService(MyService7.class);
- fail();
- } catch (ServerException ex) {
- assertEquals(ServerException.ERROR.S09, ex.getError());
- } catch (Exception ex) {
- fail();
- }
- assertEquals(ORDER.size(), 6);
- assertEquals(ORDER.get(4), "s3.destroy");
- assertEquals(ORDER.get(5), "s1.destroy");
-
- // service with dependency
- ORDER.clear();
- services = StringUtils.join(",", Arrays.asList(MyService1.class.getName(), MyService6.class.getName()));
- conf = new Configuration(false);
- conf.set("server.services", services);
- server = new Server("server", dir, dir, dir, dir, conf);
- server.init();
- assertEquals(server.get(MyService1.class).getInterface(), MyService1.class);
- assertEquals(server.get(MyService6.class).getInterface(), MyService6.class);
- server.destroy();
- }
-
- /**
- * Creates an absolute path by appending the given relative path to the test
- * root.
- *
- * @param relativePath String relative path
- * @return String absolute path formed by appending relative path to test root
- */
- private static String getAbsolutePath(String relativePath) {
- return new File(TestDirHelper.getTestDir(), relativePath).getAbsolutePath();
- }
-}
diff --git a/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/lib/server/TestServerConstructor.java b/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/lib/server/TestServerConstructor.java
deleted file mode 100644
index 6b7c628..0000000
--- a/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/lib/server/TestServerConstructor.java
+++ /dev/null
@@ -1,76 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.lib.server;
-
-import java.util.Arrays;
-import java.util.Collection;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.test.HTestCase;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-
-@RunWith(value = Parameterized.class)
-public class TestServerConstructor extends HTestCase {
-
- @Parameterized.Parameters
- public static Collection constructorFailParams() {
- return Arrays.asList(new Object[][]{
- {null, null, null, null, null, null},
- {"", null, null, null, null, null},
- {null, null, null, null, null, null},
- {"server", null, null, null, null, null},
- {"server", "", null, null, null, null},
- {"server", "foo", null, null, null, null},
- {"server", "/tmp", null, null, null, null},
- {"server", "/tmp", "", null, null, null},
- {"server", "/tmp", "foo", null, null, null},
- {"server", "/tmp", "/tmp", null, null, null},
- {"server", "/tmp", "/tmp", "", null, null},
- {"server", "/tmp", "/tmp", "foo", null, null},
- {"server", "/tmp", "/tmp", "/tmp", null, null},
- {"server", "/tmp", "/tmp", "/tmp", "", null},
- {"server", "/tmp", "/tmp", "/tmp", "foo", null}});
- }
-
- private String name;
- private String homeDir;
- private String configDir;
- private String logDir;
- private String tempDir;
- private Configuration conf;
-
- public TestServerConstructor(String name, String homeDir, String configDir, String logDir, String tempDir,
- Configuration conf) {
- this.name = name;
- this.homeDir = homeDir;
- this.configDir = configDir;
- this.logDir = logDir;
- this.tempDir = tempDir;
- this.conf = conf;
- }
-
-
- @Test(expected = IllegalArgumentException.class)
- public void constructorFail() {
- new Server(name, homeDir, configDir, logDir, tempDir, conf);
- }
-
-}
diff --git a/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/lib/service/hadoop/TestFileSystemAccessService.java b/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/lib/service/hadoop/TestFileSystemAccessService.java
deleted file mode 100644
index ed9efa9..0000000
--- a/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/lib/service/hadoop/TestFileSystemAccessService.java
+++ /dev/null
@@ -1,466 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.lib.service.hadoop;
-
-import java.io.File;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.io.OutputStream;
-import java.util.Arrays;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.lib.server.Server;
-import org.apache.hadoop.lib.server.ServiceException;
-import org.apache.hadoop.lib.service.FileSystemAccess;
-import org.apache.hadoop.lib.service.FileSystemAccessException;
-import org.apache.hadoop.lib.service.instrumentation.InstrumentationService;
-import org.apache.hadoop.lib.service.scheduler.SchedulerService;
-import org.apache.hadoop.test.HFSTestCase;
-import org.apache.hadoop.test.TestDir;
-import org.apache.hadoop.test.TestDirHelper;
-import org.apache.hadoop.test.TestException;
-import org.apache.hadoop.test.TestHdfs;
-import org.apache.hadoop.test.TestHdfsHelper;
-import org.apache.hadoop.util.StringUtils;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-
-public class TestFileSystemAccessService extends HFSTestCase {
-
- private void createHadoopConf(Configuration hadoopConf) throws Exception {
- String dir = TestDirHelper.getTestDir().getAbsolutePath();
- File hdfsSite = new File(dir, "hdfs-site.xml");
- OutputStream os = new FileOutputStream(hdfsSite);
- hadoopConf.writeXml(os);
- os.close();
- }
-
- @Before
- public void createHadoopConf() throws Exception {
- Configuration hadoopConf = new Configuration(false);
- hadoopConf.set("foo", "FOO");
- createHadoopConf(hadoopConf);
- }
-
- @Test
- @TestDir
- public void simpleSecurity() throws Exception {
- String dir = TestDirHelper.getTestDir().getAbsolutePath();
- String services = StringUtils.join(",",
- Arrays.asList(InstrumentationService.class.getName(),
- SchedulerService.class.getName(),
- FileSystemAccessService.class.getName()));
- Configuration conf = new Configuration(false);
- conf.set("server.services", services);
- Server server = new Server("server", dir, dir, dir, dir, conf);
- server.init();
- Assert.assertNotNull(server.get(FileSystemAccess.class));
- server.destroy();
- }
-
- @Test
- @TestException(exception = ServiceException.class, msgRegExp = "H01.*")
- @TestDir
- public void noKerberosKeytabProperty() throws Exception {
- String dir = TestDirHelper.getTestDir().getAbsolutePath();
- String services = StringUtils.join(",",
- Arrays.asList(InstrumentationService.class.getName(),
- SchedulerService.class.getName(),
- FileSystemAccessService.class.getName()));
- Configuration conf = new Configuration(false);
- conf.set("server.services", services);
- conf.set("server.hadoop.authentication.type", "kerberos");
- conf.set("server.hadoop.authentication.kerberos.keytab", " ");
- Server server = new Server("server", dir, dir, dir, dir, conf);
- server.init();
- }
-
- @Test
- @TestException(exception = ServiceException.class, msgRegExp = "H01.*")
- @TestDir
- public void noKerberosPrincipalProperty() throws Exception {
- String dir = TestDirHelper.getTestDir().getAbsolutePath();
- String services = StringUtils.join(",",
- Arrays.asList(InstrumentationService.class.getName(),
- SchedulerService.class.getName(),
- FileSystemAccessService.class.getName()));
- Configuration conf = new Configuration(false);
- conf.set("server.services", services);
- conf.set("server.hadoop.authentication.type", "kerberos");
- conf.set("server.hadoop.authentication.kerberos.keytab", "/tmp/foo");
- conf.set("server.hadoop.authentication.kerberos.principal", " ");
- Server server = new Server("server", dir, dir, dir, dir, conf);
- server.init();
- }
-
- @Test
- @TestException(exception = ServiceException.class, msgRegExp = "H02.*")
- @TestDir
- public void kerberosInitializationFailure() throws Exception {
- String dir = TestDirHelper.getTestDir().getAbsolutePath();
- String services = StringUtils.join(",",
- Arrays.asList(InstrumentationService.class.getName(),
- SchedulerService.class.getName(),
- FileSystemAccessService.class.getName()));
- Configuration conf = new Configuration(false);
- conf.set("server.services", services);
- conf.set("server.hadoop.authentication.type", "kerberos");
- conf.set("server.hadoop.authentication.kerberos.keytab", "/tmp/foo");
- conf.set("server.hadoop.authentication.kerberos.principal", "foo@FOO");
- Server server = new Server("server", dir, dir, dir, dir, conf);
- server.init();
- }
-
- @Test
- @TestException(exception = ServiceException.class, msgRegExp = "H09.*")
- @TestDir
- public void invalidSecurity() throws Exception {
- String dir = TestDirHelper.getTestDir().getAbsolutePath();
- String services = StringUtils.join(",",
- Arrays.asList(InstrumentationService.class.getName(),
- SchedulerService.class.getName(),
- FileSystemAccessService.class.getName()));
- Configuration conf = new Configuration(false);
- conf.set("server.services", services);
- conf.set("server.hadoop.authentication.type", "foo");
- Server server = new Server("server", dir, dir, dir, dir, conf);
- server.init();
- }
-
- @Test
- @TestDir
- public void serviceHadoopConf() throws Exception {
- String dir = TestDirHelper.getTestDir().getAbsolutePath();
- String services = StringUtils.join(",",
- Arrays.asList(InstrumentationService.class.getName(),
- SchedulerService.class.getName(),
- FileSystemAccessService.class.getName()));
- Configuration conf = new Configuration(false);
- conf.set("server.services", services);
-
- Server server = new Server("server", dir, dir, dir, dir, conf);
- server.init();
- FileSystemAccessService fsAccess = (FileSystemAccessService) server.get(FileSystemAccess.class);
- Assert.assertEquals(fsAccess.serviceHadoopConf.get("foo"), "FOO");
- server.destroy();
- }
-
- @Test
- @TestDir
- public void serviceHadoopConfCustomDir() throws Exception {
- String dir = TestDirHelper.getTestDir().getAbsolutePath();
- String hadoopConfDir = new File(dir, "confx").getAbsolutePath();
- new File(hadoopConfDir).mkdirs();
- String services = StringUtils.join(",",
- Arrays.asList(InstrumentationService.class.getName(),
- SchedulerService.class.getName(),
- FileSystemAccessService.class.getName()));
- Configuration conf = new Configuration(false);
- conf.set("server.services", services);
- conf.set("server.hadoop.config.dir", hadoopConfDir);
-
- File hdfsSite = new File(hadoopConfDir, "hdfs-site.xml");
- OutputStream os = new FileOutputStream(hdfsSite);
- Configuration hadoopConf = new Configuration(false);
- hadoopConf.set("foo", "BAR");
- hadoopConf.writeXml(os);
- os.close();
-
- Server server = new Server("server", dir, dir, dir, dir, conf);
- server.init();
- FileSystemAccessService fsAccess = (FileSystemAccessService) server.get(FileSystemAccess.class);
- Assert.assertEquals(fsAccess.serviceHadoopConf.get("foo"), "BAR");
- server.destroy();
- }
-
- @Test
- @TestDir
- public void inWhitelists() throws Exception {
- String dir = TestDirHelper.getTestDir().getAbsolutePath();
- String services = StringUtils.join(",",
- Arrays.asList(InstrumentationService.class.getName(),
- SchedulerService.class.getName(),
- FileSystemAccessService.class.getName()));
- Configuration conf = new Configuration(false);
- conf.set("server.services", services);
- Server server = new Server("server", dir, dir, dir, dir, conf);
- server.init();
- FileSystemAccessService fsAccess = (FileSystemAccessService) server.get(FileSystemAccess.class);
- fsAccess.validateNamenode("NN");
- server.destroy();
-
- conf = new Configuration(false);
- conf.set("server.services", services);
- conf.set("server.hadoop.name.node.whitelist", "*");
- server = new Server("server", dir, dir, dir, dir, conf);
- server.init();
- fsAccess = (FileSystemAccessService) server.get(FileSystemAccess.class);
- fsAccess.validateNamenode("NN");
- server.destroy();
-
- conf = new Configuration(false);
- conf.set("server.services", services);
- conf.set("server.hadoop.name.node.whitelist", "NN");
- server = new Server("server", dir, dir, dir, dir, conf);
- server.init();
- fsAccess = (FileSystemAccessService) server.get(FileSystemAccess.class);
- fsAccess.validateNamenode("NN");
- server.destroy();
- }
-
- @Test
- @TestException(exception = FileSystemAccessException.class, msgRegExp = "H05.*")
- @TestDir
- public void NameNodeNotinWhitelists() throws Exception {
- String dir = TestDirHelper.getTestDir().getAbsolutePath();
- String services = StringUtils.join(",",
- Arrays.asList(InstrumentationService.class.getName(),
- SchedulerService.class.getName(),
- FileSystemAccessService.class.getName()));
- Configuration conf = new Configuration(false);
- conf.set("server.services", services);
- conf.set("server.hadoop.name.node.whitelist", "NN");
- Server server = new Server("server", dir, dir, dir, dir, conf);
- server.init();
- FileSystemAccessService fsAccess = (FileSystemAccessService) server.get(FileSystemAccess.class);
- fsAccess.validateNamenode("NNx");
- }
-
- @Test
- @TestDir
- @TestHdfs
- public void createFileSystem() throws Exception {
- String dir = TestDirHelper.getTestDir().getAbsolutePath();
- String services = StringUtils.join(",",
- Arrays.asList(InstrumentationService.class.getName(),
- SchedulerService.class.getName(),
- FileSystemAccessService.class.getName()));
-
- Configuration hadoopConf = new Configuration(false);
- hadoopConf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, TestHdfsHelper.getHdfsConf().get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY));
- createHadoopConf(hadoopConf);
-
- Configuration conf = new Configuration(false);
- conf.set("server.services", services);
- conf.set("server.hadoop.filesystem.cache.purge.timeout", "0");
- Server server = new Server("server", dir, dir, dir, dir, conf);
- server.init();
- FileSystemAccess hadoop = server.get(FileSystemAccess.class);
- FileSystem fs = hadoop.createFileSystem("u", hadoop.getFileSystemConfiguration());
- Assert.assertNotNull(fs);
- fs.mkdirs(new Path("/tmp/foo"));
- hadoop.releaseFileSystem(fs);
- try {
- fs.mkdirs(new Path("/tmp/foo"));
- Assert.fail();
- } catch (IOException ex) {
- } catch (Exception ex) {
- Assert.fail();
- }
- server.destroy();
- }
-
- @Test
- @TestDir
- @TestHdfs
- public void fileSystemExecutor() throws Exception {
- String dir = TestDirHelper.getTestDir().getAbsolutePath();
- String services = StringUtils.join(",",
- Arrays.asList(InstrumentationService.class.getName(),
- SchedulerService.class.getName(),
- FileSystemAccessService.class.getName()));
-
- Configuration hadoopConf = new Configuration(false);
- hadoopConf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, TestHdfsHelper.getHdfsConf().get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY));
- createHadoopConf(hadoopConf);
-
- Configuration conf = new Configuration(false);
- conf.set("server.services", services);
- conf.set("server.hadoop.filesystem.cache.purge.timeout", "0");
- Server server = new Server("server", dir, dir, dir, dir, conf);
- server.init();
- FileSystemAccess hadoop = server.get(FileSystemAccess.class);
-
- final FileSystem fsa[] = new FileSystem[1];
-
- hadoop.execute("u", hadoop.getFileSystemConfiguration(), new FileSystemAccess.FileSystemExecutor<Void>() {
- @Override
- public Void execute(FileSystem fs) throws IOException {
- fs.mkdirs(new Path("/tmp/foo"));
- fsa[0] = fs;
- return null;
- }
- });
- try {
- fsa[0].mkdirs(new Path("/tmp/foo"));
- Assert.fail();
- } catch (IOException ex) {
- } catch (Exception ex) {
- Assert.fail();
- }
- server.destroy();
- }
-
- @Test
- @TestException(exception = FileSystemAccessException.class, msgRegExp = "H06.*")
- @TestDir
- @TestHdfs
- public void fileSystemExecutorNoNameNode() throws Exception {
- String dir = TestDirHelper.getTestDir().getAbsolutePath();
- String services = StringUtils.join(",",
- Arrays.asList(InstrumentationService.class.getName(),
- SchedulerService.class.getName(),
- FileSystemAccessService.class.getName()));
- Configuration hadoopConf = new Configuration(false);
- hadoopConf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, TestHdfsHelper.getHdfsConf().get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY));
- createHadoopConf(hadoopConf);
-
- Configuration conf = new Configuration(false);
- conf.set("server.services", services);
- Server server = new Server("server", dir, dir, dir, dir, conf);
- server.init();
- FileSystemAccess fsAccess = server.get(FileSystemAccess.class);
-
- Configuration hdfsConf = fsAccess.getFileSystemConfiguration();
- hdfsConf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, "");
- fsAccess.execute("u", hdfsConf, new FileSystemAccess.FileSystemExecutor<Void>() {
- @Override
- public Void execute(FileSystem fs) throws IOException {
- return null;
- }
- });
- }
-
- @Test
- @TestDir
- @TestHdfs
- public void fileSystemExecutorException() throws Exception {
- String dir = TestDirHelper.getTestDir().getAbsolutePath();
- String services = StringUtils.join(",",
- Arrays.asList(InstrumentationService.class.getName(),
- SchedulerService.class.getName(),
- FileSystemAccessService.class.getName()));
-
- Configuration hadoopConf = new Configuration(false);
- hadoopConf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, TestHdfsHelper.getHdfsConf().get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY));
- createHadoopConf(hadoopConf);
-
- Configuration conf = new Configuration(false);
- conf.set("server.services", services);
- conf.set("server.hadoop.filesystem.cache.purge.timeout", "0");
- Server server = new Server("server", dir, dir, dir, dir, conf);
- server.init();
- FileSystemAccess hadoop = server.get(FileSystemAccess.class);
-
- final FileSystem fsa[] = new FileSystem[1];
- try {
- hadoop.execute("u", hadoop.getFileSystemConfiguration(), new FileSystemAccess.FileSystemExecutor<Void>() {
- @Override
- public Void execute(FileSystem fs) throws IOException {
- fsa[0] = fs;
- throw new IOException();
- }
- });
- Assert.fail();
- } catch (FileSystemAccessException ex) {
- Assert.assertEquals(ex.getError(), FileSystemAccessException.ERROR.H03);
- } catch (Exception ex) {
- Assert.fail();
- }
-
- try {
- fsa[0].mkdirs(new Path("/tmp/foo"));
- Assert.fail();
- } catch (IOException ex) {
- } catch (Exception ex) {
- Assert.fail();
- }
- server.destroy();
- }
-
- @Test
- @TestDir
- @TestHdfs
- public void fileSystemCache() throws Exception {
- String dir = TestDirHelper.getTestDir().getAbsolutePath();
- String services = StringUtils.join(",",
- Arrays.asList(InstrumentationService.class.getName(),
- SchedulerService.class.getName(),
- FileSystemAccessService.class.getName()));
-
- Configuration hadoopConf = new Configuration(false);
- hadoopConf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY,
- TestHdfsHelper.getHdfsConf().get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY));
- createHadoopConf(hadoopConf);
-
- Configuration conf = new Configuration(false);
- conf.set("server.services", services);
- conf.set("server.hadoop.filesystem.cache.purge.frequency", "1");
- conf.set("server.hadoop.filesystem.cache.purge.timeout", "1");
- Server server = new Server("server", dir, dir, dir, dir, conf);
- try {
- server.init();
- FileSystemAccess hadoop = server.get(FileSystemAccess.class);
-
- FileSystem fs1 =
- hadoop.createFileSystem("u", hadoop.getFileSystemConfiguration());
- Assert.assertNotNull(fs1);
- fs1.mkdirs(new Path("/tmp/foo1"));
- hadoop.releaseFileSystem(fs1);
-
- //still around because of caching
- fs1.mkdirs(new Path("/tmp/foo2"));
-
- FileSystem fs2 =
- hadoop.createFileSystem("u", hadoop.getFileSystemConfiguration());
-
- //should be same instance because of caching
- Assert.assertEquals(fs1, fs2);
-
- Thread.sleep(4 * 1000);
-
- //still around because of lease count is 1 (fs2 is out)
- fs1.mkdirs(new Path("/tmp/foo2"));
-
- Thread.sleep(4 * 1000);
-
- //still around because of lease count is 1 (fs2 is out)
- fs2.mkdirs(new Path("/tmp/foo"));
-
- hadoop.releaseFileSystem(fs2);
- Thread.sleep(4 * 1000);
-
- //should not be around as lease count is 0
- try {
- fs2.mkdirs(new Path("/tmp/foo"));
- Assert.fail();
- } catch (IOException ex) {
- } catch (Exception ex) {
- Assert.fail();
- }
- } finally {
- server.destroy();
- }
- }
-
-}
diff --git a/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/lib/service/instrumentation/TestInstrumentationService.java b/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/lib/service/instrumentation/TestInstrumentationService.java
deleted file mode 100644
index c609fef..0000000
--- a/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/lib/service/instrumentation/TestInstrumentationService.java
+++ /dev/null
@@ -1,409 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.lib.service.instrumentation;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-import java.io.StringWriter;
-import java.util.Arrays;
-import java.util.Map;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.lib.server.Server;
-import org.apache.hadoop.lib.service.Instrumentation;
-import org.apache.hadoop.lib.service.scheduler.SchedulerService;
-import org.apache.hadoop.test.HTestCase;
-import org.apache.hadoop.test.TestDir;
-import org.apache.hadoop.test.TestDirHelper;
-import org.apache.hadoop.util.StringUtils;
-import org.apache.hadoop.util.Time;
-import org.json.simple.JSONObject;
-import org.json.simple.parser.JSONParser;
-import org.junit.Test;
-
-public class TestInstrumentationService extends HTestCase {
-
- @Override
- protected float getWaitForRatio() {
- return 1;
- }
-
- @Test
- public void cron() {
- InstrumentationService.Cron cron = new InstrumentationService.Cron();
- assertEquals(cron.start, 0);
- assertEquals(cron.lapStart, 0);
- assertEquals(cron.own, 0);
- assertEquals(cron.total, 0);
- long begin = Time.now();
- assertEquals(cron.start(), cron);
- assertEquals(cron.start(), cron);
- assertEquals(cron.start, begin, 20);
- assertEquals(cron.start, cron.lapStart);
- sleep(100);
- assertEquals(cron.stop(), cron);
- long end = Time.now();
- long delta = end - begin;
- assertEquals(cron.own, delta, 20);
- assertEquals(cron.total, 0);
- assertEquals(cron.lapStart, 0);
- sleep(100);
- long reStart = Time.now();
- cron.start();
- assertEquals(cron.start, begin, 20);
- assertEquals(cron.lapStart, reStart, 20);
- sleep(100);
- cron.stop();
- long reEnd = Time.now();
- delta += reEnd - reStart;
- assertEquals(cron.own, delta, 20);
- assertEquals(cron.total, 0);
- assertEquals(cron.lapStart, 0);
- cron.end();
- assertEquals(cron.total, reEnd - begin, 20);
-
- try {
- cron.start();
- fail();
- } catch (IllegalStateException ex) {
- } catch (Exception ex) {
- fail();
- }
-
- try {
- cron.stop();
- fail();
- } catch (IllegalStateException ex) {
- } catch (Exception ex) {
- fail();
- }
- }
-
- @Test
- public void timer() throws Exception {
- InstrumentationService.Timer timer = new InstrumentationService.Timer(2);
- InstrumentationService.Cron cron = new InstrumentationService.Cron();
-
- long ownStart;
- long ownEnd;
- long totalStart;
- long totalEnd;
- long ownDelta;
- long totalDelta;
- long avgTotal;
- long avgOwn;
-
- cron.start();
- ownStart = Time.now();
- totalStart = ownStart;
- ownDelta = 0;
- sleep(100);
-
- cron.stop();
- ownEnd = Time.now();
- ownDelta += ownEnd - ownStart;
- sleep(100);
-
- cron.start();
- ownStart = Time.now();
- sleep(100);
-
- cron.stop();
- ownEnd = Time.now();
- ownDelta += ownEnd - ownStart;
- totalEnd = ownEnd;
- totalDelta = totalEnd - totalStart;
-
- avgTotal = totalDelta;
- avgOwn = ownDelta;
-
- timer.addCron(cron);
- long[] values = timer.getValues();
- assertEquals(values[InstrumentationService.Timer.LAST_TOTAL], totalDelta, 20);
- assertEquals(values[InstrumentationService.Timer.LAST_OWN], ownDelta, 20);
- assertEquals(values[InstrumentationService.Timer.AVG_TOTAL], avgTotal, 20);
- assertEquals(values[InstrumentationService.Timer.AVG_OWN], avgOwn, 20);
-
- cron = new InstrumentationService.Cron();
-
- cron.start();
- ownStart = Time.now();
- totalStart = ownStart;
- ownDelta = 0;
- sleep(200);
-
- cron.stop();
- ownEnd = Time.now();
- ownDelta += ownEnd - ownStart;
- sleep(200);
-
- cron.start();
- ownStart = Time.now();
- sleep(200);
-
- cron.stop();
- ownEnd = Time.now();
- ownDelta += ownEnd - ownStart;
- totalEnd = ownEnd;
- totalDelta = totalEnd - totalStart;
-
- avgTotal = (avgTotal * 1 + totalDelta) / 2;
- avgOwn = (avgOwn * 1 + ownDelta) / 2;
-
- timer.addCron(cron);
- values = timer.getValues();
- assertEquals(values[InstrumentationService.Timer.LAST_TOTAL], totalDelta, 20);
- assertEquals(values[InstrumentationService.Timer.LAST_OWN], ownDelta, 20);
- assertEquals(values[InstrumentationService.Timer.AVG_TOTAL], avgTotal, 20);
- assertEquals(values[InstrumentationService.Timer.AVG_OWN], avgOwn, 20);
-
- avgTotal = totalDelta;
- avgOwn = ownDelta;
-
- cron = new InstrumentationService.Cron();
-
- cron.start();
- ownStart = Time.now();
- totalStart = ownStart;
- ownDelta = 0;
- sleep(300);
-
- cron.stop();
- ownEnd = Time.now();
- ownDelta += ownEnd - ownStart;
- sleep(300);
-
- cron.start();
- ownStart = Time.now();
- sleep(300);
-
- cron.stop();
- ownEnd = Time.now();
- ownDelta += ownEnd - ownStart;
- totalEnd = ownEnd;
- totalDelta = totalEnd - totalStart;
-
- avgTotal = (avgTotal * 1 + totalDelta) / 2;
- avgOwn = (avgOwn * 1 + ownDelta) / 2;
-
- cron.stop();
- timer.addCron(cron);
- values = timer.getValues();
- assertEquals(values[InstrumentationService.Timer.LAST_TOTAL], totalDelta, 20);
- assertEquals(values[InstrumentationService.Timer.LAST_OWN], ownDelta, 20);
- assertEquals(values[InstrumentationService.Timer.AVG_TOTAL], avgTotal, 20);
- assertEquals(values[InstrumentationService.Timer.AVG_OWN], avgOwn, 20);
-
- JSONObject json = (JSONObject) new JSONParser().parse(timer.toJSONString());
- assertEquals(json.size(), 4);
- assertEquals(json.get("lastTotal"), values[InstrumentationService.Timer.LAST_TOTAL]);
- assertEquals(json.get("lastOwn"), values[InstrumentationService.Timer.LAST_OWN]);
- assertEquals(json.get("avgTotal"), values[InstrumentationService.Timer.AVG_TOTAL]);
- assertEquals(json.get("avgOwn"), values[InstrumentationService.Timer.AVG_OWN]);
-
- StringWriter writer = new StringWriter();
- timer.writeJSONString(writer);
- writer.close();
- json = (JSONObject) new JSONParser().parse(writer.toString());
- assertEquals(json.size(), 4);
- assertEquals(json.get("lastTotal"), values[InstrumentationService.Timer.LAST_TOTAL]);
- assertEquals(json.get("lastOwn"), values[InstrumentationService.Timer.LAST_OWN]);
- assertEquals(json.get("avgTotal"), values[InstrumentationService.Timer.AVG_TOTAL]);
- assertEquals(json.get("avgOwn"), values[InstrumentationService.Timer.AVG_OWN]);
- }
-
- @Test
- public void sampler() throws Exception {
- final long value[] = new long[1];
- Instrumentation.Variable<Long> var = new Instrumentation.Variable<Long>() {
- @Override
- public Long getValue() {
- return value[0];
- }
- };
-
- InstrumentationService.Sampler sampler = new InstrumentationService.Sampler();
- sampler.init(4, var);
- assertEquals(sampler.getRate(), 0f, 0.0001);
- sampler.sample();
- assertEquals(sampler.getRate(), 0f, 0.0001);
- value[0] = 1;
- sampler.sample();
- assertEquals(sampler.getRate(), (0d + 1) / 2, 0.0001);
- value[0] = 2;
- sampler.sample();
- assertEquals(sampler.getRate(), (0d + 1 + 2) / 3, 0.0001);
- value[0] = 3;
- sampler.sample();
- assertEquals(sampler.getRate(), (0d + 1 + 2 + 3) / 4, 0.0001);
- value[0] = 4;
- sampler.sample();
- assertEquals(sampler.getRate(), (4d + 1 + 2 + 3) / 4, 0.0001);
-
- JSONObject json = (JSONObject) new JSONParser().parse(sampler.toJSONString());
- assertEquals(json.size(), 2);
- assertEquals(json.get("sampler"), sampler.getRate());
- assertEquals(json.get("size"), 4L);
-
- StringWriter writer = new StringWriter();
- sampler.writeJSONString(writer);
- writer.close();
- json = (JSONObject) new JSONParser().parse(writer.toString());
- assertEquals(json.size(), 2);
- assertEquals(json.get("sampler"), sampler.getRate());
- assertEquals(json.get("size"), 4L);
- }
-
- @Test
- public void variableHolder() throws Exception {
- InstrumentationService.VariableHolder<String> variableHolder =
- new InstrumentationService.VariableHolder<String>();
-
- variableHolder.var = new Instrumentation.Variable<String>() {
- @Override
- public String getValue() {
- return "foo";
- }
- };
-
- JSONObject json = (JSONObject) new JSONParser().parse(variableHolder.toJSONString());
- assertEquals(json.size(), 1);
- assertEquals(json.get("value"), "foo");
-
- StringWriter writer = new StringWriter();
- variableHolder.writeJSONString(writer);
- writer.close();
- json = (JSONObject) new JSONParser().parse(writer.toString());
- assertEquals(json.size(), 1);
- assertEquals(json.get("value"), "foo");
- }
-
- @Test
- @TestDir
- @SuppressWarnings("unchecked")
- public void service() throws Exception {
- String dir = TestDirHelper.getTestDir().getAbsolutePath();
- String services = StringUtils.join(",", Arrays.asList(InstrumentationService.class.getName()));
- Configuration conf = new Configuration(false);
- conf.set("server.services", services);
- Server server = new Server("server", dir, dir, dir, dir, conf);
- server.init();
-
- Instrumentation instrumentation = server.get(Instrumentation.class);
- assertNotNull(instrumentation);
- instrumentation.incr("g", "c", 1);
- instrumentation.incr("g", "c", 2);
- instrumentation.incr("g", "c1", 2);
-
- Instrumentation.Cron cron = instrumentation.createCron();
- cron.start();
- sleep(100);
- cron.stop();
- instrumentation.addCron("g", "t", cron);
- cron = instrumentation.createCron();
- cron.start();
- sleep(200);
- cron.stop();
- instrumentation.addCron("g", "t", cron);
-
- Instrumentation.Variable<String> var = new Instrumentation.Variable<String>() {
- @Override
- public String getValue() {
- return "foo";
- }
- };
- instrumentation.addVariable("g", "v", var);
-
- Instrumentation.Variable<Long> varToSample = new Instrumentation.Variable<Long>() {
- @Override
- public Long getValue() {
- return 1L;
- }
- };
- instrumentation.addSampler("g", "s", 10, varToSample);
-
- Map<String, ?> snapshot = instrumentation.getSnapshot();
- assertNotNull(snapshot.get("os-env"));
- assertNotNull(snapshot.get("sys-props"));
- assertNotNull(snapshot.get("jvm"));
- assertNotNull(snapshot.get("counters"));
- assertNotNull(snapshot.get("timers"));
- assertNotNull(snapshot.get("variables"));
- assertNotNull(snapshot.get("samplers"));
- assertNotNull(((Map<String, String>) snapshot.get("os-env")).get("PATH"));
- assertNotNull(((Map<String, String>) snapshot.get("sys-props")).get("java.version"));
- assertNotNull(((Map<String, ?>) snapshot.get("jvm")).get("free.memory"));
- assertNotNull(((Map<String, ?>) snapshot.get("jvm")).get("max.memory"));
- assertNotNull(((Map<String, ?>) snapshot.get("jvm")).get("total.memory"));
- assertNotNull(((Map<String, Map<String, Object>>) snapshot.get("counters")).get("g"));
- assertNotNull(((Map<String, Map<String, Object>>) snapshot.get("timers")).get("g"));
- assertNotNull(((Map<String, Map<String, Object>>) snapshot.get("variables")).get("g"));
- assertNotNull(((Map<String, Map<String, Object>>) snapshot.get("samplers")).get("g"));
- assertNotNull(((Map<String, Map<String, Object>>) snapshot.get("counters")).get("g").get("c"));
- assertNotNull(((Map<String, Map<String, Object>>) snapshot.get("counters")).get("g").get("c1"));
- assertNotNull(((Map<String, Map<String, Object>>) snapshot.get("timers")).get("g").get("t"));
- assertNotNull(((Map<String, Map<String, Object>>) snapshot.get("variables")).get("g").get("v"));
- assertNotNull(((Map<String, Map<String, Object>>) snapshot.get("samplers")).get("g").get("s"));
-
- StringWriter writer = new StringWriter();
- JSONObject.writeJSONString(snapshot, writer);
- writer.close();
- server.destroy();
- }
-
- @Test
- @TestDir
- @SuppressWarnings("unchecked")
- public void sampling() throws Exception {
- String dir = TestDirHelper.getTestDir().getAbsolutePath();
- String services = StringUtils.join(",", Arrays.asList(InstrumentationService.class.getName(),
- SchedulerService.class.getName()));
- Configuration conf = new Configuration(false);
- conf.set("server.services", services);
- Server server = new Server("server", dir, dir, dir, dir, conf);
- server.init();
- Instrumentation instrumentation = server.get(Instrumentation.class);
-
- final AtomicInteger count = new AtomicInteger();
-
- Instrumentation.Variable<Long> varToSample = new Instrumentation.Variable<Long>() {
- @Override
- public Long getValue() {
- return (long) count.incrementAndGet();
- }
- };
- instrumentation.addSampler("g", "s", 10, varToSample);
-
- sleep(2000);
- int i = count.get();
- assertTrue(i > 0);
-
- Map<String, Map<String, ?>> snapshot = instrumentation.getSnapshot();
- Map<String, Map<String, Object>> samplers = (Map<String, Map<String, Object>>) snapshot.get("samplers");
- InstrumentationService.Sampler sampler = (InstrumentationService.Sampler) samplers.get("g").get("s");
- assertTrue(sampler.getRate() > 0);
-
- server.destroy();
- }
-
-}
diff --git a/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/lib/service/scheduler/TestSchedulerService.java b/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/lib/service/scheduler/TestSchedulerService.java
deleted file mode 100644
index f8abb48..0000000
--- a/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/lib/service/scheduler/TestSchedulerService.java
+++ /dev/null
@@ -1,50 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.lib.service.scheduler;
-
-import static org.junit.Assert.assertNotNull;
-
-import java.util.Arrays;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.lib.server.Server;
-import org.apache.hadoop.lib.service.Scheduler;
-import org.apache.hadoop.lib.service.instrumentation.InstrumentationService;
-import org.apache.hadoop.test.HTestCase;
-import org.apache.hadoop.test.TestDir;
-import org.apache.hadoop.test.TestDirHelper;
-import org.apache.hadoop.util.StringUtils;
-import org.junit.Test;
-
-public class TestSchedulerService extends HTestCase {
-
- @Test
- @TestDir
- public void service() throws Exception {
- String dir = TestDirHelper.getTestDir().getAbsolutePath();
- Configuration conf = new Configuration(false);
- conf.set("server.services", StringUtils.join(",", Arrays.asList(InstrumentationService.class.getName(),
- SchedulerService.class.getName())));
- Server server = new Server("server", dir, dir, dir, dir, conf);
- server.init();
- assertNotNull(server.get(Scheduler.class));
- server.destroy();
- }
-
-}
diff --git a/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/lib/service/security/DummyGroupMapping.java b/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/lib/service/security/DummyGroupMapping.java
deleted file mode 100644
index 9ef786d..0000000
--- a/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/lib/service/security/DummyGroupMapping.java
+++ /dev/null
@@ -1,50 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.lib.service.security;
-
-import java.io.IOException;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.List;
-
-import org.apache.hadoop.security.GroupMappingServiceProvider;
-import org.apache.hadoop.test.HadoopUsersConfTestHelper;
-
-public class DummyGroupMapping implements GroupMappingServiceProvider {
-
- @Override
- public List<String> getGroups(String user) throws IOException {
- if (user.equals("root")) {
- return Arrays.asList("admin");
- }
- else if (user.equals("nobody")) {
- return Arrays.asList("nobody");
- } else {
- String[] groups = HadoopUsersConfTestHelper.getHadoopUserGroups(user);
- return (groups != null) ? Arrays.asList(groups) : Collections.emptyList();
- }
- }
-
- @Override
- public void cacheGroupsRefresh() throws IOException {
- }
-
- @Override
- public void cacheGroupsAdd(List<String> groups) throws IOException {
- }
-}
diff --git a/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/lib/service/security/TestGroupsService.java b/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/lib/service/security/TestGroupsService.java
deleted file mode 100644
index 445192b..0000000
--- a/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/lib/service/security/TestGroupsService.java
+++ /dev/null
@@ -1,64 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.lib.service.security;
-
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNotSame;
-
-import java.util.Arrays;
-import java.util.List;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.lib.server.Server;
-import org.apache.hadoop.lib.service.Groups;
-import org.apache.hadoop.test.HTestCase;
-import org.apache.hadoop.test.TestDir;
-import org.apache.hadoop.test.TestDirHelper;
-import org.apache.hadoop.util.StringUtils;
-import org.junit.Test;
-
-public class TestGroupsService extends HTestCase {
-
- @Test
- @TestDir
- public void service() throws Exception {
- String dir = TestDirHelper.getTestDir().getAbsolutePath();
- Configuration conf = new Configuration(false);
- conf.set("server.services", StringUtils.join(",", Arrays.asList(GroupsService.class.getName())));
- Server server = new Server("server", dir, dir, dir, dir, conf);
- server.init();
- Groups groups = server.get(Groups.class);
- assertNotNull(groups);
- List<String> g = groups.getGroups(System.getProperty("user.name"));
- assertNotSame(g.size(), 0);
- server.destroy();
- }
-
- @Test(expected = RuntimeException.class)
- @TestDir
- public void invalidGroupsMapping() throws Exception {
- String dir = TestDirHelper.getTestDir().getAbsolutePath();
- Configuration conf = new Configuration(false);
- conf.set("server.services", StringUtils.join(",", Arrays.asList(GroupsService.class.getName())));
- conf.set("server.groups.hadoop.security.group.mapping", String.class.getName());
- Server server = new Server("server", dir, dir, dir, dir, conf);
- server.init();
- }
-
-}
diff --git a/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/lib/servlet/TestHostnameFilter.java b/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/lib/servlet/TestHostnameFilter.java
deleted file mode 100644
index 203796e..0000000
--- a/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/lib/servlet/TestHostnameFilter.java
+++ /dev/null
@@ -1,96 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.lib.servlet;
-
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-
-import java.io.IOException;
-import java.util.concurrent.atomic.AtomicBoolean;
-
-import javax.servlet.Filter;
-import javax.servlet.FilterChain;
-import javax.servlet.ServletException;
-import javax.servlet.ServletRequest;
-import javax.servlet.ServletResponse;
-
-import org.apache.hadoop.test.HTestCase;
-import org.junit.Test;
-import org.mockito.Mockito;
-
-
-public class TestHostnameFilter extends HTestCase {
-
- @Test
- public void hostname() throws Exception {
- ServletRequest request = Mockito.mock(ServletRequest.class);
- Mockito.when(request.getRemoteAddr()).thenReturn("localhost");
-
- ServletResponse response = Mockito.mock(ServletResponse.class);
-
- final AtomicBoolean invoked = new AtomicBoolean();
-
- FilterChain chain = new FilterChain() {
- @Override
- public void doFilter(ServletRequest servletRequest, ServletResponse servletResponse)
- throws IOException, ServletException {
- // Hostname was set to "localhost", but may get resolved automatically to
- // "127.0.0.1" depending on OS.
- assertTrue(HostnameFilter.get().contains("localhost") ||
- HostnameFilter.get().contains("127.0.0.1"));
- invoked.set(true);
- }
- };
-
- Filter filter = new HostnameFilter();
- filter.init(null);
- assertNull(HostnameFilter.get());
- filter.doFilter(request, response, chain);
- assertTrue(invoked.get());
- assertNull(HostnameFilter.get());
- filter.destroy();
- }
-
- @Test
- public void testMissingHostname() throws Exception {
- ServletRequest request = Mockito.mock(ServletRequest.class);
- Mockito.when(request.getRemoteAddr()).thenReturn(null);
-
- ServletResponse response = Mockito.mock(ServletResponse.class);
-
- final AtomicBoolean invoked = new AtomicBoolean();
-
- FilterChain chain = new FilterChain() {
- @Override
- public void doFilter(ServletRequest servletRequest, ServletResponse servletResponse)
- throws IOException, ServletException {
- assertTrue(HostnameFilter.get().contains("???"));
- invoked.set(true);
- }
- };
-
- Filter filter = new HostnameFilter();
- filter.init(null);
- assertNull(HostnameFilter.get());
- filter.doFilter(request, response, chain);
- assertTrue(invoked.get());
- assertNull(HostnameFilter.get());
- filter.destroy();
- }
-}
diff --git a/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/lib/servlet/TestMDCFilter.java b/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/lib/servlet/TestMDCFilter.java
deleted file mode 100644
index 911cc0a..0000000
--- a/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/lib/servlet/TestMDCFilter.java
+++ /dev/null
@@ -1,121 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.lib.servlet;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-
-import java.io.IOException;
-import java.security.Principal;
-import java.util.concurrent.atomic.AtomicBoolean;
-
-import javax.servlet.Filter;
-import javax.servlet.FilterChain;
-import javax.servlet.ServletException;
-import javax.servlet.ServletRequest;
-import javax.servlet.ServletResponse;
-import javax.servlet.http.HttpServletRequest;
-
-import org.apache.hadoop.test.HTestCase;
-import org.junit.Test;
-import org.mockito.Mockito;
-import org.slf4j.MDC;
-
-
-public class TestMDCFilter extends HTestCase {
-
- @Test
- public void mdc() throws Exception {
- HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
- Mockito.when(request.getUserPrincipal()).thenReturn(null);
- Mockito.when(request.getMethod()).thenReturn("METHOD");
- Mockito.when(request.getPathInfo()).thenReturn("/pathinfo");
-
- ServletResponse response = Mockito.mock(ServletResponse.class);
-
- final AtomicBoolean invoked = new AtomicBoolean();
-
- FilterChain chain = new FilterChain() {
- @Override
- public void doFilter(ServletRequest servletRequest, ServletResponse servletResponse)
- throws IOException, ServletException {
- assertEquals(MDC.get("hostname"), null);
- assertEquals(MDC.get("user"), null);
- assertEquals(MDC.get("method"), "METHOD");
- assertEquals(MDC.get("path"), "/pathinfo");
- invoked.set(true);
- }
- };
-
- MDC.clear();
- Filter filter = new MDCFilter();
- filter.init(null);
-
- filter.doFilter(request, response, chain);
- assertTrue(invoked.get());
- assertNull(MDC.get("hostname"));
- assertNull(MDC.get("user"));
- assertNull(MDC.get("method"));
- assertNull(MDC.get("path"));
-
- Mockito.when(request.getUserPrincipal()).thenReturn(new Principal() {
- @Override
- public String getName() {
- return "name";
- }
- });
-
- invoked.set(false);
- chain = new FilterChain() {
- @Override
- public void doFilter(ServletRequest servletRequest, ServletResponse servletResponse)
- throws IOException, ServletException {
- assertEquals(MDC.get("hostname"), null);
- assertEquals(MDC.get("user"), "name");
- assertEquals(MDC.get("method"), "METHOD");
- assertEquals(MDC.get("path"), "/pathinfo");
- invoked.set(true);
- }
- };
- filter.doFilter(request, response, chain);
- assertTrue(invoked.get());
-
- HostnameFilter.HOSTNAME_TL.set("HOST");
-
- invoked.set(false);
- chain = new FilterChain() {
- @Override
- public void doFilter(ServletRequest servletRequest, ServletResponse servletResponse)
- throws IOException, ServletException {
- assertEquals(MDC.get("hostname"), "HOST");
- assertEquals(MDC.get("user"), "name");
- assertEquals(MDC.get("method"), "METHOD");
- assertEquals(MDC.get("path"), "/pathinfo");
- invoked.set(true);
- }
- };
- filter.doFilter(request, response, chain);
- assertTrue(invoked.get());
-
- HostnameFilter.HOSTNAME_TL.remove();
-
- filter.destroy();
- }
-}
diff --git a/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/lib/servlet/TestServerWebApp.java b/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/lib/servlet/TestServerWebApp.java
deleted file mode 100644
index 889d20b..0000000
--- a/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/lib/servlet/TestServerWebApp.java
+++ /dev/null
@@ -1,99 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.lib.servlet;
-
-import static org.junit.Assert.assertEquals;
-
-import org.apache.hadoop.lib.server.Server;
-import org.apache.hadoop.test.HTestCase;
-import org.apache.hadoop.test.TestDir;
-import org.apache.hadoop.test.TestDirHelper;
-import org.junit.Assert;
-import org.junit.Test;
-
-import java.net.InetSocketAddress;
-
-public class TestServerWebApp extends HTestCase {
-
- @Test(expected = IllegalArgumentException.class)
- public void getHomeDirNotDef() {
- ServerWebApp.getHomeDir("TestServerWebApp00");
- }
-
- @Test
- public void getHomeDir() {
- System.setProperty("TestServerWebApp0.home.dir", "/tmp");
- assertEquals(ServerWebApp.getHomeDir("TestServerWebApp0"), "/tmp");
- assertEquals(ServerWebApp.getDir("TestServerWebApp0", ".log.dir", "/tmp/log"), "/tmp/log");
- System.setProperty("TestServerWebApp0.log.dir", "/tmplog");
- assertEquals(ServerWebApp.getDir("TestServerWebApp0", ".log.dir", "/tmp/log"), "/tmplog");
- }
-
- @Test
- @TestDir
- public void lifecycle() throws Exception {
- String dir = TestDirHelper.getTestDir().getAbsolutePath();
- System.setProperty("TestServerWebApp1.home.dir", dir);
- System.setProperty("TestServerWebApp1.config.dir", dir);
- System.setProperty("TestServerWebApp1.log.dir", dir);
- System.setProperty("TestServerWebApp1.temp.dir", dir);
- ServerWebApp server = new ServerWebApp("TestServerWebApp1") {
- };
-
- assertEquals(server.getStatus(), Server.Status.UNDEF);
- server.contextInitialized(null);
- assertEquals(server.getStatus(), Server.Status.NORMAL);
- server.contextDestroyed(null);
- assertEquals(server.getStatus(), Server.Status.SHUTDOWN);
- }
-
- @Test(expected = RuntimeException.class)
- @TestDir
- public void failedInit() throws Exception {
- String dir = TestDirHelper.getTestDir().getAbsolutePath();
- System.setProperty("TestServerWebApp2.home.dir", dir);
- System.setProperty("TestServerWebApp2.config.dir", dir);
- System.setProperty("TestServerWebApp2.log.dir", dir);
- System.setProperty("TestServerWebApp2.temp.dir", dir);
- System.setProperty("testserverwebapp2.services", "FOO");
- ServerWebApp server = new ServerWebApp("TestServerWebApp2") {
- };
-
- server.contextInitialized(null);
- }
-
- @Test
- @TestDir
- public void testResolveAuthority() throws Exception {
- String dir = TestDirHelper.getTestDir().getAbsolutePath();
- System.setProperty("TestServerWebApp3.home.dir", dir);
- System.setProperty("TestServerWebApp3.config.dir", dir);
- System.setProperty("TestServerWebApp3.log.dir", dir);
- System.setProperty("TestServerWebApp3.temp.dir", dir);
- System.setProperty("testserverwebapp3.http.hostname", "localhost");
- System.setProperty("testserverwebapp3.http.port", "14000");
- ServerWebApp server = new ServerWebApp("TestServerWebApp3") {
- };
-
- InetSocketAddress address = server.resolveAuthority();
- Assert.assertEquals("localhost", address.getHostName());
- Assert.assertEquals(14000, address.getPort());
- }
-
-}
diff --git a/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/lib/util/TestCheck.java b/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/lib/util/TestCheck.java
deleted file mode 100644
index 877dcd4..0000000
--- a/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/lib/util/TestCheck.java
+++ /dev/null
@@ -1,145 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.lib.util;
-
-
-import static org.junit.Assert.assertEquals;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-
-import org.apache.hadoop.test.HTestCase;
-import org.junit.Test;
-
-public class TestCheck extends HTestCase {
-
- @Test
- public void notNullNotNull() {
- assertEquals(Check.notNull("value", "name"), "value");
- }
-
- @Test(expected = IllegalArgumentException.class)
- public void notNullNull() {
- Check.notNull(null, "name");
- }
-
- @Test
- public void notNullElementsNotNull() {
- Check.notNullElements(new ArrayList<String>(), "name");
- Check.notNullElements(Arrays.asList("a"), "name");
- }
-
- @Test(expected = IllegalArgumentException.class)
- public void notNullElementsNullList() {
- Check.notNullElements(null, "name");
- }
-
- @Test(expected = IllegalArgumentException.class)
- public void notNullElementsNullElements() {
- Check.notNullElements(Arrays.asList("a", "", null), "name");
- }
-
- @Test
- public void notEmptyElementsNotNull() {
- Check.notEmptyElements(new ArrayList<String>(), "name");
- Check.notEmptyElements(Arrays.asList("a"), "name");
- }
-
- @Test(expected = IllegalArgumentException.class)
- public void notEmptyElementsNullList() {
- Check.notEmptyElements(null, "name");
- }
-
- @Test(expected = IllegalArgumentException.class)
- public void notEmptyElementsNullElements() {
- Check.notEmptyElements(Arrays.asList("a", null), "name");
- }
-
-
- @Test(expected = IllegalArgumentException.class)
- public void notEmptyElementsEmptyElements() {
- Check.notEmptyElements(Arrays.asList("a", ""), "name");
- }
-
-
- @Test
- public void notEmptyNotEmtpy() {
- assertEquals(Check.notEmpty("value", "name"), "value");
- }
-
- @Test(expected = IllegalArgumentException.class)
- public void notEmptyNull() {
- Check.notEmpty(null, "name");
- }
-
- @Test(expected = IllegalArgumentException.class)
- public void notEmptyEmpty() {
- Check.notEmpty("", "name");
- }
-
- @Test
- public void validIdentifierValid() throws Exception {
- assertEquals(Check.validIdentifier("a", 1, ""), "a");
- assertEquals(Check.validIdentifier("a1", 2, ""), "a1");
- assertEquals(Check.validIdentifier("a_", 3, ""), "a_");
- assertEquals(Check.validIdentifier("_", 1, ""), "_");
- }
-
- @Test(expected = IllegalArgumentException.class)
- public void validIdentifierInvalid1() throws Exception {
- Check.validIdentifier("!", 1, "");
- }
-
- @Test(expected = IllegalArgumentException.class)
- public void validIdentifierInvalid2() throws Exception {
- Check.validIdentifier("a1", 1, "");
- }
-
- @Test(expected = IllegalArgumentException.class)
- public void validIdentifierInvalid3() throws Exception {
- Check.validIdentifier("1", 1, "");
- }
-
- @Test
- public void checkGTZeroGreater() {
- assertEquals(Check.gt0(120, "test"), 120);
- }
-
- @Test(expected = IllegalArgumentException.class)
- public void checkGTZeroZero() {
- Check.gt0(0, "test");
- }
-
- @Test(expected = IllegalArgumentException.class)
- public void checkGTZeroLessThanZero() {
- Check.gt0(-1, "test");
- }
-
- @Test
- public void checkGEZero() {
- assertEquals(Check.ge0(120, "test"), 120);
- assertEquals(Check.ge0(0, "test"), 0);
- }
-
- @Test(expected = IllegalArgumentException.class)
- public void checkGELessThanZero() {
- Check.ge0(-1, "test");
- }
-
-}
diff --git a/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/lib/util/TestConfigurationUtils.java b/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/lib/util/TestConfigurationUtils.java
deleted file mode 100644
index b868d0b..0000000
--- a/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/lib/util/TestConfigurationUtils.java
+++ /dev/null
@@ -1,141 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.lib.util;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNull;
-
-import java.io.ByteArrayInputStream;
-import java.io.IOException;
-import java.io.InputStream;
-
-import org.apache.hadoop.conf.Configuration;
-import org.junit.Test;
-
-public class TestConfigurationUtils {
-
- @Test
- public void constructors() throws Exception {
- Configuration conf = new Configuration(false);
- assertEquals(conf.size(), 0);
-
- byte[] bytes = "<configuration><property><name>a</name><value>A</value></property></configuration>".getBytes();
- InputStream is = new ByteArrayInputStream(bytes);
- conf = new Configuration(false);
- ConfigurationUtils.load(conf, is);
- assertEquals(conf.size(), 1);
- assertEquals(conf.get("a"), "A");
- }
-
-
- @Test
- public void constructors3() throws Exception {
- InputStream is = new ByteArrayInputStream(
- "<xxx><property name=\"key1\" value=\"val1\"/></xxx>".getBytes());
- Configuration conf = new Configuration(false);
- ConfigurationUtils.load(conf, is);
- assertEquals("val1", conf.get("key1"));
- }
-
- @Test
- public void copy() throws Exception {
- Configuration srcConf = new Configuration(false);
- Configuration targetConf = new Configuration(false);
-
- srcConf.set("testParameter1", "valueFromSource");
- srcConf.set("testParameter2", "valueFromSource");
-
- targetConf.set("testParameter2", "valueFromTarget");
- targetConf.set("testParameter3", "valueFromTarget");
-
- ConfigurationUtils.copy(srcConf, targetConf);
-
- assertEquals("valueFromSource", targetConf.get("testParameter1"));
- assertEquals("valueFromSource", targetConf.get("testParameter2"));
- assertEquals("valueFromTarget", targetConf.get("testParameter3"));
- }
-
- @Test
- public void injectDefaults() throws Exception {
- Configuration srcConf = new Configuration(false);
- Configuration targetConf = new Configuration(false);
-
- srcConf.set("testParameter1", "valueFromSource");
- srcConf.set("testParameter2", "valueFromSource");
-
- targetConf.set("testParameter2", "originalValueFromTarget");
- targetConf.set("testParameter3", "originalValueFromTarget");
-
- ConfigurationUtils.injectDefaults(srcConf, targetConf);
-
- assertEquals("valueFromSource", targetConf.get("testParameter1"));
- assertEquals("originalValueFromTarget", targetConf.get("testParameter2"));
- assertEquals("originalValueFromTarget", targetConf.get("testParameter3"));
-
- assertEquals("valueFromSource", srcConf.get("testParameter1"));
- assertEquals("valueFromSource", srcConf.get("testParameter2"));
- assertNull(srcConf.get("testParameter3"));
- }
-
-
- @Test
- public void resolve() {
- Configuration conf = new Configuration(false);
- conf.set("a", "A");
- conf.set("b", "${a}");
- assertEquals(conf.getRaw("a"), "A");
- assertEquals(conf.getRaw("b"), "${a}");
- conf = ConfigurationUtils.resolve(conf);
- assertEquals(conf.getRaw("a"), "A");
- assertEquals(conf.getRaw("b"), "A");
- }
-
- @Test
- public void testVarResolutionAndSysProps() {
- String userName = System.getProperty("user.name");
- Configuration conf = new Configuration(false);
- conf.set("a", "A");
- conf.set("b", "${a}");
- conf.set("c", "${user.name}");
- conf.set("d", "${aaa}");
- assertEquals(conf.getRaw("a"), "A");
- assertEquals(conf.getRaw("b"), "${a}");
- assertEquals(conf.getRaw("c"), "${user.name}");
- assertEquals(conf.get("a"), "A");
- assertEquals(conf.get("b"), "A");
- assertEquals(conf.get("c"), userName);
- assertEquals(conf.get("d"), "${aaa}");
-
- conf.set("user.name", "foo");
- assertEquals(conf.get("user.name"), "foo");
- }
-
- @Test
- public void testCompactFormatProperty() throws IOException {
- final String testfile = "test-compact-format-property.xml";
- Configuration conf = new Configuration(false);
- assertEquals(0, conf.size());
- ConfigurationUtils.load(conf,
- Thread.currentThread()
- .getContextClassLoader().getResource(testfile).openStream());
- assertEquals(2, conf.size());
- assertEquals("val1", conf.get("key.1"));
- assertEquals("val2", conf.get("key.2"));
- }
-}
diff --git a/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/lib/wsrs/TestInputStreamEntity.java b/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/lib/wsrs/TestInputStreamEntity.java
deleted file mode 100644
index 0fa9409..0000000
--- a/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/lib/wsrs/TestInputStreamEntity.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.lib.wsrs;
-
-import static org.junit.Assert.assertEquals;
-
-import java.io.ByteArrayInputStream;
-import java.io.ByteArrayOutputStream;
-import java.io.InputStream;
-
-import org.junit.Test;
-
-public class TestInputStreamEntity {
-
- @Test
- public void test() throws Exception {
- InputStream is = new ByteArrayInputStream("abc".getBytes());
- ByteArrayOutputStream baos = new ByteArrayOutputStream();
- InputStreamEntity i = new InputStreamEntity(is);
- i.write(baos);
- baos.close();
- assertEquals(new String(baos.toByteArray()), "abc");
-
- is = new ByteArrayInputStream("abc".getBytes());
- baos = new ByteArrayOutputStream();
- i = new InputStreamEntity(is, 1, 1);
- i.write(baos);
- baos.close();
- assertEquals(baos.toByteArray()[0], 'b');
- }
-
-}
diff --git a/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/lib/wsrs/TestJSONMapProvider.java b/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/lib/wsrs/TestJSONMapProvider.java
deleted file mode 100644
index 0993780..0000000
--- a/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/lib/wsrs/TestJSONMapProvider.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.lib.wsrs;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-import java.io.ByteArrayOutputStream;
-import java.util.Map;
-
-import org.json.simple.JSONObject;
-import org.junit.Test;
-
-public class TestJSONMapProvider {
-
- @Test
- @SuppressWarnings("unchecked")
- public void test() throws Exception {
- JSONMapProvider p = new JSONMapProvider();
- assertTrue(p.isWriteable(Map.class, null, null, null));
- assertFalse(p.isWriteable(this.getClass(), null, null, null));
- assertEquals(p.getSize(null, null, null, null, null), -1);
- ByteArrayOutputStream baos = new ByteArrayOutputStream();
- JSONObject json = new JSONObject();
- json.put("a", "A");
- p.writeTo(json, JSONObject.class, null, null, null, null, baos);
- baos.close();
- assertEquals(new String(baos.toByteArray()).trim(), "{\"a\":\"A\"}");
- }
-
-}
diff --git a/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/lib/wsrs/TestJSONProvider.java b/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/lib/wsrs/TestJSONProvider.java
deleted file mode 100644
index 5f74750..0000000
--- a/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/lib/wsrs/TestJSONProvider.java
+++ /dev/null
@@ -1,47 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.lib.wsrs;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-import java.io.ByteArrayOutputStream;
-
-import org.json.simple.JSONObject;
-import org.junit.Test;
-
-public class TestJSONProvider {
-
- @Test
- @SuppressWarnings("unchecked")
- public void test() throws Exception {
- JSONProvider p = new JSONProvider();
- assertTrue(p.isWriteable(JSONObject.class, null, null, null));
- assertFalse(p.isWriteable(this.getClass(), null, null, null));
- assertEquals(p.getSize(null, null, null, null, null), -1);
- ByteArrayOutputStream baos = new ByteArrayOutputStream();
- JSONObject json = new JSONObject();
- json.put("a", "A");
- p.writeTo(json, JSONObject.class, null, null, null, null, baos);
- baos.close();
- assertEquals(new String(baos.toByteArray()).trim(), "{\"a\":\"A\"}");
- }
-
-}
diff --git a/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/lib/wsrs/TestParam.java b/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/lib/wsrs/TestParam.java
deleted file mode 100644
index 553ce9e..0000000
--- a/hadoop-ozone/httpfsgateway/src/test/java/org/apache/hadoop/lib/wsrs/TestParam.java
+++ /dev/null
@@ -1,127 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.lib.wsrs;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.fail;
-
-import java.util.regex.Pattern;
-
-import org.junit.Test;
-
-public class TestParam {
-
- private <T> void test(Param<T> param, String name,
- String domain, T defaultValue, T validValue,
- String invalidStrValue, String outOfRangeValue) throws Exception {
-
- assertEquals(name, param.getName());
- assertEquals(domain, param.getDomain());
- assertEquals(defaultValue, param.value());
- assertEquals(defaultValue, param.parseParam(""));
- assertEquals(defaultValue, param.parseParam(null));
- assertEquals(validValue, param.parseParam(validValue.toString()));
- if (invalidStrValue != null) {
- try {
- param.parseParam(invalidStrValue);
- fail();
- } catch (IllegalArgumentException ex) {
- //NOP
- } catch (Exception ex) {
- fail();
- }
- }
- if (outOfRangeValue != null) {
- try {
- param.parseParam(outOfRangeValue);
- fail();
- } catch (IllegalArgumentException ex) {
- //NOP
- } catch (Exception ex) {
- fail();
- }
- }
- }
-
- @Test
- public void testBoolean() throws Exception {
- Param<Boolean> param = new BooleanParam("b", false) {
- };
- test(param, "b", "a boolean", false, true, "x", null);
- }
-
- @Test
- public void testByte() throws Exception {
- Param<Byte> param = new ByteParam("B", (byte) 1) {
- };
- test(param, "B", "a byte", (byte) 1, (byte) 2, "x", "256");
- }
-
- @Test
- public void testShort() throws Exception {
- Param<Short> param = new ShortParam("S", (short) 1) {
- };
- test(param, "S", "a short", (short) 1, (short) 2, "x",
- "" + ((int)Short.MAX_VALUE + 1));
... 2111 lines suppressed ...
---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org