You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by st...@apache.org on 2016/06/29 15:05:08 UTC
[3/3] hadoop git commit: HADOOP-12709 Cut s3:// from trunk.
Contributed by Mingliang Liu.
HADOOP-12709 Cut s3:// from trunk. Contributed by Mingliang Liu.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/96fa0f84
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/96fa0f84
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/96fa0f84
Branch: refs/heads/trunk
Commit: 96fa0f848bbd623ab55092889fb7b040a2a2971c
Parents: 8d202f1
Author: Steve Loughran <st...@apache.org>
Authored: Wed Jun 29 14:06:04 2016 +0100
Committer: Steve Loughran <st...@apache.org>
Committed: Wed Jun 29 16:04:50 2016 +0100
----------------------------------------------------------------------
.../src/main/resources/core-default.xml | 62 +--
.../conf/TestCommonConfigurationFields.java | 7 +-
.../hadoop/fs/FileSystemContractBaseTest.java | 2 +-
.../hadoop/fs/TestLocal_S3FileContextURI.java | 38 --
.../hadoop/fs/TestS3_LocalFileContextURI.java | 38 --
.../src/test/resources/core-site.xml | 12 -
.../resources/job_1329348432655_0001_conf.xml | 13 +-
.../java/org/apache/hadoop/fs/s3/Block.java | 52 --
.../apache/hadoop/fs/s3/FileSystemStore.java | 67 ---
.../java/org/apache/hadoop/fs/s3/INode.java | 128 -----
.../hadoop/fs/s3/Jets3tFileSystemStore.java | 449 ----------------
.../org/apache/hadoop/fs/s3/MigrationTool.java | 291 -----------
.../org/apache/hadoop/fs/s3/S3Credentials.java | 102 ----
.../org/apache/hadoop/fs/s3/S3Exception.java | 39 --
.../org/apache/hadoop/fs/s3/S3FileSystem.java | 516 -------------------
.../hadoop/fs/s3/S3FileSystemConfigKeys.java | 47 --
.../hadoop/fs/s3/S3FileSystemException.java | 36 --
.../org/apache/hadoop/fs/s3/S3InputStream.java | 220 --------
.../org/apache/hadoop/fs/s3/S3OutputStream.java | 235 ---------
.../hadoop/fs/s3/VersionMismatchException.java | 37 --
.../java/org/apache/hadoop/fs/s3/package.html | 55 --
.../s3native/Jets3tNativeFileSystemStore.java | 2 -
.../hadoop/fs/s3native/NativeS3FileSystem.java | 34 +-
.../hadoop/fs/s3native/S3Credentials.java | 100 ++++
.../apache/hadoop/fs/s3native/S3Exception.java | 39 ++
.../s3native/S3NativeFileSystemConfigKeys.java | 21 +-
.../org/apache/hadoop/fs/s3native/package.html | 5 +-
.../services/org.apache.hadoop.fs.FileSystem | 1 -
.../src/site/markdown/tools/hadoop-aws/index.md | 36 +-
.../hadoop/fs/contract/s3/S3Contract.java | 50 --
.../fs/contract/s3/TestS3ContractCreate.java | 32 --
.../fs/contract/s3/TestS3ContractDelete.java | 31 --
.../fs/contract/s3/TestS3ContractMkdir.java | 32 --
.../fs/contract/s3/TestS3ContractOpen.java | 32 --
.../fs/contract/s3/TestS3ContractRename.java | 32 --
.../fs/contract/s3/TestS3ContractRootDir.java | 49 --
.../fs/contract/s3/TestS3ContractSeek.java | 41 --
.../hadoop/fs/s3/InMemoryFileSystemStore.java | 200 -------
.../fs/s3/Jets3tS3FileSystemContractTest.java | 31 --
.../fs/s3/S3FileSystemContractBaseTest.java | 61 ---
.../hadoop/fs/s3/S3InMemoryFileSystem.java | 32 --
.../java/org/apache/hadoop/fs/s3/TestINode.java | 60 ---
.../fs/s3/TestInMemoryS3FileSystemContract.java | 31 --
.../apache/hadoop/fs/s3/TestS3Credentials.java | 140 -----
.../apache/hadoop/fs/s3/TestS3FileSystem.java | 52 --
.../hadoop/fs/s3/TestS3InMemoryFileSystem.java | 67 ---
.../s3native/InMemoryNativeFileSystemStore.java | 11 +-
.../hadoop/fs/s3native/TestS3Credentials.java | 147 ++++++
.../src/test/resources/contract/s3.xml | 104 ----
.../src/main/data/2jobs2min-rumen-jh.json | 36 +-
50 files changed, 360 insertions(+), 3595 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/96fa0f84/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 3b896be..3018884 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -709,32 +709,16 @@
</property>
<property>
- <name>fs.s3.awsAccessKeyId</name>
- <description>AWS access key ID used by S3 block file system.</description>
-</property>
-
-<property>
- <name>fs.s3.awsSecretAccessKey</name>
- <description>AWS secret key used by S3 block file system.</description>
-</property>
-
-<property>
- <name>fs.s3.block.size</name>
- <value>67108864</value>
- <description>Block size to use when writing files to S3.</description>
-</property>
-
-<property>
- <name>fs.s3.buffer.dir</name>
- <value>${hadoop.tmp.dir}/s3</value>
- <description>Determines where on the local filesystem the s3:/s3n: filesystem
+ <name>fs.s3n.buffer.dir</name>
+ <value>${hadoop.tmp.dir}/s3n</value>
+ <description>Determines where on the local filesystem the s3n:// filesystem
should store files before sending them to S3
(or after retrieving them from S3).
</description>
</property>
<property>
- <name>fs.s3.maxRetries</name>
+ <name>fs.s3n.maxRetries</name>
<value>4</value>
<description>The maximum number of retries for reading or writing files to S3,
before we signal failure to the application.
@@ -742,7 +726,7 @@
</property>
<property>
- <name>fs.s3.sleepTimeSeconds</name>
+ <name>fs.s3n.sleepTimeSeconds</name>
<value>10</value>
<description>The number of seconds to sleep between each S3 retry.
</description>
@@ -1377,42 +1361,6 @@
<description>Replication factor</description>
</property>
-<!-- s3 File System -->
-
-<property>
- <name>s3.stream-buffer-size</name>
- <value>4096</value>
- <description>The size of buffer to stream files.
- The size of this buffer should probably be a multiple of hardware
- page size (4096 on Intel x86), and it determines how much data is
- buffered during read and write operations.</description>
-</property>
-
-<property>
- <name>s3.bytes-per-checksum</name>
- <value>512</value>
- <description>The number of bytes per checksum. Must not be larger than
- s3.stream-buffer-size</description>
-</property>
-
-<property>
- <name>s3.client-write-packet-size</name>
- <value>65536</value>
- <description>Packet size for clients to write</description>
-</property>
-
-<property>
- <name>s3.blocksize</name>
- <value>67108864</value>
- <description>Block size</description>
-</property>
-
-<property>
- <name>s3.replication</name>
- <value>3</value>
- <description>Replication factor</description>
-</property>
-
<!-- s3native File System -->
<property>
http://git-wip-us.apache.org/repos/asf/hadoop/blob/96fa0f84/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
index 020474f..0727157 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
@@ -86,20 +86,15 @@ public class TestCommonConfigurationFields extends TestConfigurationFieldsBase {
// Lots of properties not in the above classes
xmlPropsToSkipCompare.add("fs.ftp.password.localhost");
xmlPropsToSkipCompare.add("fs.ftp.user.localhost");
- xmlPropsToSkipCompare.add("fs.s3.block.size");
xmlPropsToSkipCompare.add("hadoop.tmp.dir");
xmlPropsToSkipCompare.add("nfs3.mountd.port");
xmlPropsToSkipCompare.add("nfs3.server.port");
- xmlPropsToSkipCompare.add("test.fs.s3.name");
xmlPropsToSkipCompare.add("test.fs.s3n.name");
- // S3/S3A properties are in a different subtree.
- // - org.apache.hadoop.fs.s3.S3FileSystemConfigKeys
+ // S3N/S3A properties are in a different subtree.
// - org.apache.hadoop.fs.s3native.S3NativeFileSystemConfigKeys
- xmlPrefixToSkipCompare.add("fs.s3.");
xmlPrefixToSkipCompare.add("fs.s3a.");
xmlPrefixToSkipCompare.add("fs.s3n.");
- xmlPrefixToSkipCompare.add("s3.");
xmlPrefixToSkipCompare.add("s3native.");
// ADL properties are in a different subtree
http://git-wip-us.apache.org/repos/asf/hadoop/blob/96fa0f84/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
index c85981b..5b8987c 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
@@ -172,7 +172,7 @@ public abstract class FileSystemContractBaseTest extends TestCase {
}
public void testMkdirsWithUmask() throws Exception {
- if (fs.getScheme().equals("s3") || fs.getScheme().equals("s3n")) {
+ if (fs.getScheme().equals("s3n")) {
// skip permission tests for S3FileSystem until HDFS-1333 is fixed.
return;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/96fa0f84/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocal_S3FileContextURI.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocal_S3FileContextURI.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocal_S3FileContextURI.java
deleted file mode 100644
index 6c417cd..0000000
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocal_S3FileContextURI.java
+++ /dev/null
@@ -1,38 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs;
-
-import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_DEFAULT;
-import org.apache.hadoop.conf.Configuration;
-import org.junit.Before;
-
-public class TestLocal_S3FileContextURI extends FileContextURIBase {
-
- @Override
- @Before
- public void setUp() throws Exception {
- Configuration S3Conf = new Configuration();
- Configuration localConf = new Configuration();
-
- S3Conf.set(FS_DEFAULT_NAME_DEFAULT, S3Conf.get("test.fs.s3.name"));
- fc1 = FileContext.getFileContext(S3Conf);
- fc2 = FileContext.getFileContext(localConf);
- }
-
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/96fa0f84/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestS3_LocalFileContextURI.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestS3_LocalFileContextURI.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestS3_LocalFileContextURI.java
deleted file mode 100644
index 22fa5b0..0000000
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestS3_LocalFileContextURI.java
+++ /dev/null
@@ -1,38 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs;
-
-import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_DEFAULT;
-import org.apache.hadoop.conf.Configuration;
-import org.junit.Before;
-
-public class TestS3_LocalFileContextURI extends FileContextURIBase {
-
- @Override
- @Before
- public void setUp() throws Exception {
-
- Configuration localConf = new Configuration();
- fc2 = FileContext.getFileContext(localConf);
-
- Configuration s3conf = new Configuration();
- s3conf.set(FS_DEFAULT_NAME_DEFAULT, s3conf.get("test.fs.s3.name"));
- fc1 = FileContext.getFileContext(s3conf);
- }
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/96fa0f84/hadoop-common-project/hadoop-common/src/test/resources/core-site.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/resources/core-site.xml b/hadoop-common-project/hadoop-common/src/test/resources/core-site.xml
index 6053363..d85472c 100644
--- a/hadoop-common-project/hadoop-common/src/test/resources/core-site.xml
+++ b/hadoop-common-project/hadoop-common/src/test/resources/core-site.xml
@@ -32,18 +32,6 @@
</property>
<property>
- <name>test.fs.s3.name</name>
- <value>s3:///</value>
- <description>The name of the s3 file system for testing.</description>
-</property>
-
-<property>
- <name>fs.s3.block.size</name>
- <value>128</value>
- <description>Size of a block in bytes.</description>
-</property>
-
-<property>
<name>fs.ftp.user.localhost</name>
<value>user</value>
<description>The username for connecting to FTP server running on localhost.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/96fa0f84/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/resources/job_1329348432655_0001_conf.xml
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/resources/job_1329348432655_0001_conf.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/resources/job_1329348432655_0001_conf.xml
index bdcfc28..cbae524 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/resources/job_1329348432655_0001_conf.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/resources/job_1329348432655_0001_conf.xml
@@ -17,7 +17,6 @@
<property><!--Loaded from job.xml--><name>hadoop.http.authentication.kerberos.keytab</name><value>${user.home}/hadoop.keytab</value></property>
<property><!--Loaded from job.xml--><name>yarn.nodemanager.keytab</name><value>/etc/krb5.keytab</value></property>
<property><!--Loaded from job.xml--><name>io.seqfile.sorter.recordlimit</name><value>1000000</value></property>
-<property><!--Loaded from job.xml--><name>s3.blocksize</name><value>67108864</value></property>
<property><!--Loaded from job.xml--><name>mapreduce.task.io.sort.factor</name><value>10</value></property>
<property><!--Loaded from job.xml--><name>yarn.nodemanager.disk-health-checker.interval-ms</name><value>120000</value></property>
<property><!--Loaded from job.xml--><name>mapreduce.job.working.dir</name><value>hdfs://localhost:8021/user/user</value></property>
@@ -27,12 +26,10 @@
<property><!--Loaded from job.xml--><name>dfs.namenode.delegation.token.renew-interval</name><value>86400000</value></property>
<property><!--Loaded from job.xml--><name>yarn.nodemanager.resource.memory-mb</name><value>8192</value></property>
<property><!--Loaded from job.xml--><name>io.map.index.interval</name><value>128</value></property>
-<property><!--Loaded from job.xml--><name>s3.client-write-packet-size</name><value>65536</value></property>
<property><!--Loaded from job.xml--><name>dfs.namenode.http-address</name><value>0.0.0.0:9870</value></property>
<property><!--Loaded from job.xml--><name>mapreduce.task.files.preserve.failedtasks</name><value>false</value></property>
<property><!--Loaded from job.xml--><name>mapreduce.job.reduce.class</name><value>org.apache.hadoop.mapreduce.SleepJob$SleepReducer</value></property>
<property><!--Loaded from job.xml--><name>hadoop.hdfs.configuration.version</name><value>1</value></property>
-<property><!--Loaded from job.xml--><name>s3.replication</name><value>3</value></property>
<property><!--Loaded from job.xml--><name>dfs.datanode.balance.bandwidthPerSec</name><value>1048576</value></property>
<property><!--Loaded from job.xml--><name>mapreduce.reduce.shuffle.connect.timeout</name><value>180000</value></property>
<property><!--Loaded from job.xml--><name>yarn.nodemanager.aux-services</name><value>mapreduce.shuffle</value></property>
@@ -105,7 +102,7 @@
<property><!--Loaded from job.xml--><name>mapreduce.job.maxtaskfailures.per.tracker</name><value>4</value></property>
<property><!--Loaded from job.xml--><name>ipc.client.connection.maxidletime</name><value>10000</value></property>
<property><!--Loaded from job.xml--><name>dfs.blockreport.intervalMsec</name><value>21600000</value></property>
-<property><!--Loaded from job.xml--><name>fs.s3.sleepTimeSeconds</name><value>10</value></property>
+<property><!--Loaded from job.xml--><name>fs.s3n.sleepTimeSeconds</name><value>10</value></property>
<property><!--Loaded from job.xml--><name>dfs.namenode.replication.considerLoad</name><value>true</value></property>
<property><!--Loaded from job.xml--><name>dfs.client.block.write.retries</name><value>3</value></property>
<property><!--Loaded from job.xml--><name>hadoop.proxyuser.user.groups</name><value>users</value></property>
@@ -117,7 +114,6 @@
<property><!--Loaded from job.xml--><name>ipc.client.tcpnodelay</name><value>false</value></property>
<property><!--Loaded from job.xml--><name>mapreduce.map.output.value.class</name><value>org.apache.hadoop.io.NullWritable</value></property>
<property><!--Loaded from job.xml--><name>dfs.namenode.accesstime.precision</name><value>3600000</value></property>
-<property><!--Loaded from job.xml--><name>s3.stream-buffer-size</name><value>4096</value></property>
<property><!--Loaded from job.xml--><name>mapreduce.jobtracker.tasktracker.maxblacklists</name><value>4</value></property>
<property><!--Loaded from Unknown--><name>rpc.engine.com.google.protobuf.BlockingService</name><value>org.apache.hadoop.yarn.ipc.ProtoOverHadoopRpcEngine</value></property>
<property><!--Loaded from job.xml--><name>mapreduce.job.jvm.numtasks</name><value>1</value></property>
@@ -136,7 +132,7 @@
<property><!--Loaded from job.xml--><name>kfs.stream-buffer-size</name><value>4096</value></property>
<property><!--Loaded from job.xml--><name>fs.har.impl</name><value>org.apache.hadoop.fs.HarFileSystem</value></property>
<property><!--Loaded from job.xml--><name>hadoop.security.authentication</name><value>simple</value></property>
-<property><!--Loaded from job.xml--><name>fs.s3.buffer.dir</name><value>${hadoop.tmp.dir}/s3</value></property>
+<property><!--Loaded from job.xml--><name>fs.s3n.buffer.dir</name><value>${hadoop.tmp.dir}/s3n</value></property>
<property><!--Loaded from job.xml--><name>mapreduce.jobtracker.taskscheduler</name><value>org.apache.hadoop.mapred.JobQueueTaskScheduler</value></property>
<property><!--Loaded from job.xml--><name>yarn.app.mapreduce.am.job.task.listener.thread-count</name><value>30</value></property>
<property><!--Loaded from job.xml--><name>mapreduce.job.reduces</name><value>1</value></property>
@@ -205,7 +201,7 @@
<property><!--Loaded from job.xml--><name>mapreduce.job.dir</name><value>/tmp/hadoop-yarn/staging/user/.staging/job_1329348432655_0001</value></property>
<property><!--Loaded from job.xml--><name>io.map.index.skip</name><value>0</value></property>
<property><!--Loaded from job.xml--><name>net.topology.node.switch.mapping.impl</name><value>org.apache.hadoop.net.ScriptBasedMapping</value></property>
-<property><!--Loaded from job.xml--><name>fs.s3.maxRetries</name><value>4</value></property>
+<property><!--Loaded from job.xml--><name>fs.s3n.maxRetries</name><value>4</value></property>
<property><!--Loaded from job.xml--><name>s3native.client-write-packet-size</name><value>65536</value></property>
<property><!--Loaded from job.xml--><name>yarn.resourcemanager.amliveliness-monitor.interval-ms</name><value>1000</value></property>
<property><!--Loaded from job.xml--><name>mapreduce.reduce.speculative</name><value>false</value></property>
@@ -263,11 +259,9 @@
<property><!--Loaded from job.xml--><name>ipc.client.idlethreshold</name><value>4000</value></property>
<property><!--Loaded from job.xml--><name>ipc.server.tcpnodelay</name><value>false</value></property>
<property><!--Loaded from job.xml--><name>ftp.bytes-per-checksum</name><value>512</value></property>
-<property><!--Loaded from job.xml--><name>s3.bytes-per-checksum</name><value>512</value></property>
<property><!--Loaded from job.xml--><name>mapreduce.job.speculative.slowtaskthreshold</name><value>1.0</value></property>
<property><!--Loaded from job.xml--><name>yarn.nodemanager.localizer.cache.target-size-mb</name><value>1</value></property>
<property><!--Loaded from job.xml--><name>yarn.nodemanager.remote-app-log-dir</name><value>/tmp/logs</value></property>
-<property><!--Loaded from job.xml--><name>fs.s3.block.size</name><value>67108864</value></property>
<property><!--Loaded from job.xml--><name>mapreduce.job.queuename</name><value>default</value></property>
<property><!--Loaded from job.xml--><name>mapreduce.sleepjob.reduce.sleep.time</name><value>1</value></property>
<property><!--Loaded from job.xml--><name>hadoop.rpc.protection</name><value>authentication</value></property>
@@ -321,7 +315,6 @@
<property><!--Loaded from job.xml--><name>dfs.datanode.address</name><value>0.0.0.0:9866</value></property>
<property><!--Loaded from job.xml--><name>mapreduce.map.skip.maxrecords</name><value>0</value></property>
<property><!--Loaded from job.xml--><name>dfs.datanode.https.address</name><value>0.0.0.0:9865</value></property>
-<property><!--Loaded from job.xml--><name>fs.s3.impl</name><value>org.apache.hadoop.fs.s3.S3FileSystem</value></property>
<property><!--Loaded from job.xml--><name>file.replication</name><value>1</value></property>
<property><!--Loaded from job.xml--><name>yarn.resourcemanager.resource-tracker.address</name><value>0.0.0.0:8025</value></property>
<property><!--Loaded from job.xml--><name>mapreduce.jobtracker.restart.recover</name><value>false</value></property>
http://git-wip-us.apache.org/repos/asf/hadoop/blob/96fa0f84/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/Block.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/Block.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/Block.java
deleted file mode 100644
index 6926f17..0000000
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/Block.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.s3;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
-/**
- * Holds metadata about a block of data being stored in a {@link FileSystemStore}.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
-public class Block {
- private long id;
-
- private long length;
-
- public Block(long id, long length) {
- this.id = id;
- this.length = length;
- }
-
- public long getId() {
- return id;
- }
-
- public long getLength() {
- return length;
- }
-
- @Override
- public String toString() {
- return "Block[" + id + ", " + length + "]";
- }
-
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/96fa0f84/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/FileSystemStore.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/FileSystemStore.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/FileSystemStore.java
deleted file mode 100644
index 3c7ed60..0000000
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/FileSystemStore.java
+++ /dev/null
@@ -1,67 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.s3;
-
-import java.io.File;
-import java.io.IOException;
-import java.net.URI;
-import java.util.Set;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-
-/**
- * A facility for storing and retrieving {@link INode}s and {@link Block}s.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
-public interface FileSystemStore {
-
- void initialize(URI uri, Configuration conf) throws IOException;
- String getVersion() throws IOException;
-
- void storeINode(Path path, INode inode) throws IOException;
- void storeBlock(Block block, File file) throws IOException;
-
- boolean inodeExists(Path path) throws IOException;
- boolean blockExists(long blockId) throws IOException;
-
- INode retrieveINode(Path path) throws IOException;
- File retrieveBlock(Block block, long byteRangeStart) throws IOException;
-
- void deleteINode(Path path) throws IOException;
- void deleteBlock(Block block) throws IOException;
-
- Set<Path> listSubPaths(Path path) throws IOException;
- Set<Path> listDeepSubPaths(Path path) throws IOException;
-
- /**
- * Delete everything. Used for testing.
- * @throws IOException on any problem
- */
- void purge() throws IOException;
-
- /**
- * Diagnostic method to dump all INodes to the console.
- * @throws IOException on any problem
- */
- void dump() throws IOException;
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/96fa0f84/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/INode.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/INode.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/INode.java
deleted file mode 100644
index 5d08b77..0000000
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/INode.java
+++ /dev/null
@@ -1,128 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.s3;
-
-import java.io.ByteArrayInputStream;
-import java.io.ByteArrayOutputStream;
-import java.io.DataInputStream;
-import java.io.DataOutputStream;
-import java.io.IOException;
-import java.io.InputStream;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.io.IOUtils;
-
-/**
- * Holds file metadata including type (regular file, or directory),
- * and the list of blocks that are pointers to the data.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
-public class INode {
-
- enum FileType {
- DIRECTORY, FILE
- }
-
- public static final FileType[] FILE_TYPES = {
- FileType.DIRECTORY,
- FileType.FILE
- };
-
- public static final INode DIRECTORY_INODE = new INode(FileType.DIRECTORY, null);
-
- private FileType fileType;
- private Block[] blocks;
-
- public INode(FileType fileType, Block[] blocks) {
- this.fileType = fileType;
- if (isDirectory() && blocks != null) {
- throw new IllegalArgumentException("A directory cannot contain blocks.");
- }
- this.blocks = blocks;
- }
-
- public Block[] getBlocks() {
- return blocks;
- }
-
- public FileType getFileType() {
- return fileType;
- }
-
- public boolean isDirectory() {
- return fileType == FileType.DIRECTORY;
- }
-
- public boolean isFile() {
- return fileType == FileType.FILE;
- }
-
- public long getSerializedLength() {
- return 1L + (blocks == null ? 0 : 4 + blocks.length * 16);
- }
-
-
- public InputStream serialize() throws IOException {
- ByteArrayOutputStream bytes = new ByteArrayOutputStream();
- DataOutputStream out = new DataOutputStream(bytes);
- try {
- out.writeByte(fileType.ordinal());
- if (isFile()) {
- out.writeInt(blocks.length);
- for (int i = 0; i < blocks.length; i++) {
- out.writeLong(blocks[i].getId());
- out.writeLong(blocks[i].getLength());
- }
- }
- out.close();
- out = null;
- } finally {
- IOUtils.closeStream(out);
- }
- return new ByteArrayInputStream(bytes.toByteArray());
- }
-
- public static INode deserialize(InputStream in) throws IOException {
- if (in == null) {
- return null;
- }
- DataInputStream dataIn = new DataInputStream(in);
- FileType fileType = INode.FILE_TYPES[dataIn.readByte()];
- switch (fileType) {
- case DIRECTORY:
- in.close();
- return INode.DIRECTORY_INODE;
- case FILE:
- int numBlocks = dataIn.readInt();
- Block[] blocks = new Block[numBlocks];
- for (int i = 0; i < numBlocks; i++) {
- long id = dataIn.readLong();
- long length = dataIn.readLong();
- blocks[i] = new Block(id, length);
- }
- in.close();
- return new INode(fileType, blocks);
- default:
- throw new IllegalArgumentException("Cannot deserialize inode.");
- }
- }
-
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/96fa0f84/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/Jets3tFileSystemStore.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/Jets3tFileSystemStore.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/Jets3tFileSystemStore.java
deleted file mode 100644
index a186c14..0000000
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/Jets3tFileSystemStore.java
+++ /dev/null
@@ -1,449 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.s3;
-
-import java.io.BufferedInputStream;
-import java.io.BufferedOutputStream;
-import java.io.Closeable;
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.net.URI;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Set;
-import java.util.TreeSet;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.s3.INode.FileType;
-import org.jets3t.service.S3Service;
-import org.jets3t.service.S3ServiceException;
-import org.jets3t.service.ServiceException;
-import org.jets3t.service.impl.rest.httpclient.RestS3Service;
-import org.jets3t.service.model.S3Bucket;
-import org.jets3t.service.model.S3Object;
-import org.jets3t.service.security.AWSCredentials;
-
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
-class Jets3tFileSystemStore implements FileSystemStore {
-
- private static final String FILE_SYSTEM_NAME = "fs";
- private static final String FILE_SYSTEM_VALUE = "Hadoop";
-
- private static final String FILE_SYSTEM_TYPE_NAME = "fs-type";
- private static final String FILE_SYSTEM_TYPE_VALUE = "block";
-
- private static final String FILE_SYSTEM_VERSION_NAME = "fs-version";
- private static final String FILE_SYSTEM_VERSION_VALUE = "1";
-
- private static final Map<String, Object> METADATA =
- new HashMap<String, Object>();
-
- static {
- METADATA.put(FILE_SYSTEM_NAME, FILE_SYSTEM_VALUE);
- METADATA.put(FILE_SYSTEM_TYPE_NAME, FILE_SYSTEM_TYPE_VALUE);
- METADATA.put(FILE_SYSTEM_VERSION_NAME, FILE_SYSTEM_VERSION_VALUE);
- }
-
- private static final String PATH_DELIMITER = Path.SEPARATOR;
- private static final String BLOCK_PREFIX = "block_";
-
- private Configuration conf;
-
- private S3Service s3Service;
-
- private S3Bucket bucket;
-
- private int bufferSize;
-
- private static final Log LOG =
- LogFactory.getLog(Jets3tFileSystemStore.class.getName());
-
- @Override
- public void initialize(URI uri, Configuration conf) throws IOException {
-
- this.conf = conf;
-
- S3Credentials s3Credentials = new S3Credentials();
- s3Credentials.initialize(uri, conf);
- try {
- AWSCredentials awsCredentials =
- new AWSCredentials(s3Credentials.getAccessKey(),
- s3Credentials.getSecretAccessKey());
- this.s3Service = new RestS3Service(awsCredentials);
- } catch (S3ServiceException e) {
- if (e.getCause() instanceof IOException) {
- throw (IOException) e.getCause();
- }
- throw new S3Exception(e);
- }
- bucket = new S3Bucket(uri.getHost());
-
- this.bufferSize = conf.getInt(
- S3FileSystemConfigKeys.S3_STREAM_BUFFER_SIZE_KEY,
- S3FileSystemConfigKeys.S3_STREAM_BUFFER_SIZE_DEFAULT
- );
- }
-
- @Override
- public String getVersion() throws IOException {
- return FILE_SYSTEM_VERSION_VALUE;
- }
-
- private void delete(String key) throws IOException {
- try {
- s3Service.deleteObject(bucket, key);
- } catch (S3ServiceException e) {
- if (e.getCause() instanceof IOException) {
- throw (IOException) e.getCause();
- }
- throw new S3Exception(e);
- }
- }
-
- @Override
- public void deleteINode(Path path) throws IOException {
- delete(pathToKey(path));
- }
-
- @Override
- public void deleteBlock(Block block) throws IOException {
- delete(blockToKey(block));
- }
-
- @Override
- public boolean inodeExists(Path path) throws IOException {
- String key = pathToKey(path);
- InputStream in = get(key, true);
- if (in == null) {
- if (isRoot(key)) {
- storeINode(path, INode.DIRECTORY_INODE);
- return true;
- } else {
- return false;
- }
- }
- in.close();
- return true;
- }
-
- @Override
- public boolean blockExists(long blockId) throws IOException {
- InputStream in = get(blockToKey(blockId), false);
- if (in == null) {
- return false;
- }
- in.close();
- return true;
- }
-
- private InputStream get(String key, boolean checkMetadata)
- throws IOException {
-
- try {
- S3Object object = s3Service.getObject(bucket.getName(), key);
- if (checkMetadata) {
- checkMetadata(object);
- }
- return object.getDataInputStream();
- } catch (S3ServiceException e) {
- if ("NoSuchKey".equals(e.getS3ErrorCode())) {
- return null;
- }
- if (e.getCause() instanceof IOException) {
- throw (IOException) e.getCause();
- }
- throw new S3Exception(e);
- } catch (ServiceException e) {
- handleServiceException(e);
- return null;
- }
- }
-
- private InputStream get(String key, long byteRangeStart) throws IOException {
- try {
- S3Object object = s3Service.getObject(bucket, key, null, null, null,
- null, byteRangeStart, null);
- return object.getDataInputStream();
- } catch (S3ServiceException e) {
- if ("NoSuchKey".equals(e.getS3ErrorCode())) {
- return null;
- }
- if (e.getCause() instanceof IOException) {
- throw (IOException) e.getCause();
- }
- throw new S3Exception(e);
- } catch (ServiceException e) {
- handleServiceException(e);
- return null;
- }
- }
-
- private void checkMetadata(S3Object object) throws S3FileSystemException,
- S3ServiceException {
-
- String name = (String) object.getMetadata(FILE_SYSTEM_NAME);
- if (!FILE_SYSTEM_VALUE.equals(name)) {
- throw new S3FileSystemException("Not a Hadoop S3 file.");
- }
- String type = (String) object.getMetadata(FILE_SYSTEM_TYPE_NAME);
- if (!FILE_SYSTEM_TYPE_VALUE.equals(type)) {
- throw new S3FileSystemException("Not a block file.");
- }
- String dataVersion = (String) object.getMetadata(FILE_SYSTEM_VERSION_NAME);
- if (!FILE_SYSTEM_VERSION_VALUE.equals(dataVersion)) {
- throw new VersionMismatchException(FILE_SYSTEM_VERSION_VALUE,
- dataVersion);
- }
- }
-
- @Override
- public INode retrieveINode(Path path) throws IOException {
- String key = pathToKey(path);
- InputStream in = get(key, true);
- if (in == null && isRoot(key)) {
- storeINode(path, INode.DIRECTORY_INODE);
- return INode.DIRECTORY_INODE;
- }
- return INode.deserialize(in);
- }
-
- @Override
- public File retrieveBlock(Block block, long byteRangeStart)
- throws IOException {
- File fileBlock = null;
- InputStream in = null;
- OutputStream out = null;
- try {
- fileBlock = newBackupFile();
- String blockId = blockToKey(block);
- in = get(blockId, byteRangeStart);
- if (in == null) {
- throw new IOException("Block missing from S3 store: " + blockId);
- }
- out = new BufferedOutputStream(new FileOutputStream(fileBlock));
- byte[] buf = new byte[bufferSize];
- int numRead;
- while ((numRead = in.read(buf)) >= 0) {
- out.write(buf, 0, numRead);
- }
- return fileBlock;
- } catch (IOException e) {
- // close output stream to file then delete file
- closeQuietly(out);
- out = null; // to prevent a second close
- if (fileBlock != null) {
- boolean b = fileBlock.delete();
- if (!b) {
- LOG.warn("Ignoring failed delete");
- }
- }
- throw e;
- } finally {
- closeQuietly(out);
- closeQuietly(in);
- }
- }
-
- private File newBackupFile() throws IOException {
- File dir = new File(conf.get("fs.s3.buffer.dir"));
- if (!dir.exists() && !dir.mkdirs()) {
- throw new IOException("Cannot create S3 buffer directory: " + dir);
- }
- File result = File.createTempFile("input-", ".tmp", dir);
- result.deleteOnExit();
- return result;
- }
-
- @Override
- public Set<Path> listSubPaths(Path path) throws IOException {
- try {
- String prefix = pathToKey(path);
- if (!prefix.endsWith(PATH_DELIMITER)) {
- prefix += PATH_DELIMITER;
- }
- S3Object[] objects = s3Service.listObjects(bucket.getName(), prefix, PATH_DELIMITER);
- Set<Path> prefixes = new TreeSet<Path>();
- for (int i = 0; i < objects.length; i++) {
- prefixes.add(keyToPath(objects[i].getKey()));
- }
- prefixes.remove(path);
- return prefixes;
- } catch (S3ServiceException e) {
- if (e.getCause() instanceof IOException) {
- throw (IOException) e.getCause();
- }
- throw new S3Exception(e);
- }
- }
-
- @Override
- public Set<Path> listDeepSubPaths(Path path) throws IOException {
- try {
- String prefix = pathToKey(path);
- if (!prefix.endsWith(PATH_DELIMITER)) {
- prefix += PATH_DELIMITER;
- }
- S3Object[] objects = s3Service.listObjects(bucket.getName(), prefix, null);
- Set<Path> prefixes = new TreeSet<Path>();
- for (int i = 0; i < objects.length; i++) {
- prefixes.add(keyToPath(objects[i].getKey()));
- }
- prefixes.remove(path);
- return prefixes;
- } catch (S3ServiceException e) {
- if (e.getCause() instanceof IOException) {
- throw (IOException) e.getCause();
- }
- throw new S3Exception(e);
- }
- }
-
- private void put(String key, InputStream in, long length, boolean storeMetadata)
- throws IOException {
-
- try {
- S3Object object = new S3Object(key);
- object.setDataInputStream(in);
- object.setContentType("binary/octet-stream");
- object.setContentLength(length);
- if (storeMetadata) {
- object.addAllMetadata(METADATA);
- }
- s3Service.putObject(bucket, object);
- } catch (S3ServiceException e) {
- if (e.getCause() instanceof IOException) {
- throw (IOException) e.getCause();
- }
- throw new S3Exception(e);
- }
- }
-
- @Override
- public void storeINode(Path path, INode inode) throws IOException {
- put(pathToKey(path), inode.serialize(), inode.getSerializedLength(), true);
- }
-
- @Override
- public void storeBlock(Block block, File file) throws IOException {
- BufferedInputStream in = null;
- try {
- in = new BufferedInputStream(new FileInputStream(file));
- put(blockToKey(block), in, block.getLength(), false);
- } finally {
- closeQuietly(in);
- }
- }
-
- private void closeQuietly(Closeable closeable) {
- if (closeable != null) {
- try {
- closeable.close();
- } catch (IOException e) {
- // ignore
- }
- }
- }
-
- private String pathToKey(Path path) {
- if (!path.isAbsolute()) {
- throw new IllegalArgumentException("Path must be absolute: " + path);
- }
- return path.toUri().getPath();
- }
-
- private Path keyToPath(String key) {
- return new Path(key);
- }
-
- private String blockToKey(long blockId) {
- return BLOCK_PREFIX + blockId;
- }
-
- private String blockToKey(Block block) {
- return blockToKey(block.getId());
- }
-
- private boolean isRoot(String key) {
- return key.isEmpty() || key.equals("/");
- }
-
- @Override
- public void purge() throws IOException {
- try {
- S3Object[] objects = s3Service.listObjects(bucket.getName());
- for (int i = 0; i < objects.length; i++) {
- s3Service.deleteObject(bucket, objects[i].getKey());
- }
- } catch (S3ServiceException e) {
- if (e.getCause() instanceof IOException) {
- throw (IOException) e.getCause();
- }
- throw new S3Exception(e);
- }
- }
-
- @Override
- public void dump() throws IOException {
- StringBuilder sb = new StringBuilder("S3 Filesystem, ");
- sb.append(bucket.getName()).append("\n");
- try {
- S3Object[] objects = s3Service.listObjects(bucket.getName(), PATH_DELIMITER, null);
- for (int i = 0; i < objects.length; i++) {
- Path path = keyToPath(objects[i].getKey());
- sb.append(path).append("\n");
- INode m = retrieveINode(path);
- sb.append("\t").append(m.getFileType()).append("\n");
- if (m.getFileType() == FileType.DIRECTORY) {
- continue;
- }
- for (int j = 0; j < m.getBlocks().length; j++) {
- sb.append("\t").append(m.getBlocks()[j]).append("\n");
- }
- }
- } catch (S3ServiceException e) {
- if (e.getCause() instanceof IOException) {
- throw (IOException) e.getCause();
- }
- throw new S3Exception(e);
- }
- System.out.println(sb);
- }
-
- private void handleServiceException(ServiceException e) throws IOException {
- if (e.getCause() instanceof IOException) {
- throw (IOException) e.getCause();
- }
- else {
- if(LOG.isDebugEnabled()) {
- LOG.debug("Got ServiceException with Error code: " + e.getErrorCode() + ";and Error message: " + e.getErrorMessage());
- }
- }
- }
-
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/96fa0f84/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/MigrationTool.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/MigrationTool.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/MigrationTool.java
deleted file mode 100644
index 429c272..0000000
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/MigrationTool.java
+++ /dev/null
@@ -1,291 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.fs.s3;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.UnsupportedEncodingException;
-import java.net.URI;
-import java.net.URLDecoder;
-import java.net.URLEncoder;
-import java.util.Set;
-import java.util.TreeSet;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configured;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.util.Tool;
-import org.apache.hadoop.util.ToolRunner;
-import org.jets3t.service.S3Service;
-import org.jets3t.service.S3ServiceException;
-import org.jets3t.service.ServiceException;
-import org.jets3t.service.impl.rest.httpclient.RestS3Service;
-import org.jets3t.service.model.S3Bucket;
-import org.jets3t.service.model.S3Object;
-import org.jets3t.service.security.AWSCredentials;
-
-/**
- * <p>
- * This class is a tool for migrating data from an older to a newer version
- * of an S3 filesystem.
- * </p>
- * <p>
- * All files in the filesystem are migrated by re-writing the block metadata
- * - no datafiles are touched.
- * </p>
- */
-@InterfaceAudience.Public
-@InterfaceStability.Unstable
-public class MigrationTool extends Configured implements Tool {
-
- private S3Service s3Service;
- private S3Bucket bucket;
-
- public static void main(String[] args) throws Exception {
- int res = ToolRunner.run(new MigrationTool(), args);
- System.exit(res);
- }
-
- @Override
- public int run(String[] args) throws Exception {
-
- if (args.length == 0) {
- System.err.println("Usage: MigrationTool <S3 file system URI>");
- System.err.println("\t<S3 file system URI>\tfilesystem to migrate");
- ToolRunner.printGenericCommandUsage(System.err);
- return -1;
- }
-
- URI uri = URI.create(args[0]);
-
- initialize(uri);
-
- FileSystemStore newStore = new Jets3tFileSystemStore();
- newStore.initialize(uri, getConf());
-
- if (get("%2F") != null) {
- System.err.println("Current version number is [unversioned].");
- System.err.println("Target version number is " +
- newStore.getVersion() + ".");
- Store oldStore = new UnversionedStore();
- migrate(oldStore, newStore);
- return 0;
- } else {
- S3Object root = get("/");
- if (root != null) {
- String version = (String) root.getMetadata("fs-version");
- if (version == null) {
- System.err.println("Can't detect version - exiting.");
- } else {
- String newVersion = newStore.getVersion();
- System.err.println("Current version number is " + version + ".");
- System.err.println("Target version number is " + newVersion + ".");
- if (version.equals(newStore.getVersion())) {
- System.err.println("No migration required.");
- return 0;
- }
- // use version number to create Store
- //Store oldStore = ...
- //migrate(oldStore, newStore);
- System.err.println("Not currently implemented.");
- return 0;
- }
- }
- System.err.println("Can't detect version - exiting.");
- return 0;
- }
-
- }
-
- public void initialize(URI uri) throws IOException {
-
-
-
- try {
- String accessKey = null;
- String secretAccessKey = null;
- String userInfo = uri.getUserInfo();
- if (userInfo != null) {
- int index = userInfo.indexOf(':');
- if (index != -1) {
- accessKey = userInfo.substring(0, index);
- secretAccessKey = userInfo.substring(index + 1);
- } else {
- accessKey = userInfo;
- }
- }
- if (accessKey == null) {
- accessKey = getConf().get("fs.s3.awsAccessKeyId");
- }
- if (secretAccessKey == null) {
- secretAccessKey = getConf().get("fs.s3.awsSecretAccessKey");
- }
- if (accessKey == null && secretAccessKey == null) {
- throw new IllegalArgumentException("AWS " +
- "Access Key ID and Secret Access Key " +
- "must be specified as the username " +
- "or password (respectively) of a s3 URL, " +
- "or by setting the " +
- "fs.s3.awsAccessKeyId or " +
- "fs.s3.awsSecretAccessKey properties (respectively).");
- } else if (accessKey == null) {
- throw new IllegalArgumentException("AWS " +
- "Access Key ID must be specified " +
- "as the username of a s3 URL, or by setting the " +
- "fs.s3.awsAccessKeyId property.");
- } else if (secretAccessKey == null) {
- throw new IllegalArgumentException("AWS " +
- "Secret Access Key must be specified " +
- "as the password of a s3 URL, or by setting the " +
- "fs.s3.awsSecretAccessKey property.");
- }
- AWSCredentials awsCredentials =
- new AWSCredentials(accessKey, secretAccessKey);
- this.s3Service = new RestS3Service(awsCredentials);
- } catch (S3ServiceException e) {
- if (e.getCause() instanceof IOException) {
- throw (IOException) e.getCause();
- }
- throw new S3Exception(e);
- }
- bucket = new S3Bucket(uri.getHost());
- }
-
- private void migrate(Store oldStore, FileSystemStore newStore)
- throws IOException {
- for (Path path : oldStore.listAllPaths()) {
- INode inode = oldStore.retrieveINode(path);
- oldStore.deleteINode(path);
- newStore.storeINode(path, inode);
- }
- }
-
- private S3Object get(String key) {
- try {
- return s3Service.getObject(bucket.getName(), key);
- } catch (S3ServiceException e) {
- if ("NoSuchKey".equals(e.getS3ErrorCode())) {
- return null;
- }
- }
- return null;
- }
-
- interface Store {
-
- Set<Path> listAllPaths() throws IOException;
- INode retrieveINode(Path path) throws IOException;
- void deleteINode(Path path) throws IOException;
-
- }
-
- class UnversionedStore implements Store {
-
- @Override
- public Set<Path> listAllPaths() throws IOException {
- try {
- String prefix = urlEncode(Path.SEPARATOR);
- S3Object[] objects = s3Service.listObjects(bucket.getName(), prefix, null);
- Set<Path> prefixes = new TreeSet<Path>();
- for (int i = 0; i < objects.length; i++) {
- prefixes.add(keyToPath(objects[i].getKey()));
- }
- return prefixes;
- } catch (S3ServiceException e) {
- if (e.getCause() instanceof IOException) {
- throw (IOException) e.getCause();
- }
- throw new S3Exception(e);
- }
- }
-
- @Override
- public void deleteINode(Path path) throws IOException {
- delete(pathToKey(path));
- }
-
- private void delete(String key) throws IOException {
- try {
- s3Service.deleteObject(bucket, key);
- } catch (S3ServiceException e) {
- if (e.getCause() instanceof IOException) {
- throw (IOException) e.getCause();
- }
- throw new S3Exception(e);
- }
- }
-
- @Override
- public INode retrieveINode(Path path) throws IOException {
- return INode.deserialize(get(pathToKey(path)));
- }
-
- private InputStream get(String key) throws IOException {
- try {
- S3Object object = s3Service.getObject(bucket.getName(), key);
- return object.getDataInputStream();
- } catch (S3ServiceException e) {
- if ("NoSuchKey".equals(e.getS3ErrorCode())) {
- return null;
- }
- if (e.getCause() instanceof IOException) {
- throw (IOException) e.getCause();
- }
- throw new S3Exception(e);
- } catch (ServiceException e) {
- return null;
- }
- }
-
- private String pathToKey(Path path) {
- if (!path.isAbsolute()) {
- throw new IllegalArgumentException("Path must be absolute: " + path);
- }
- return urlEncode(path.toUri().getPath());
- }
-
- private Path keyToPath(String key) {
- return new Path(urlDecode(key));
- }
-
- private String urlEncode(String s) {
- try {
- return URLEncoder.encode(s, "UTF-8");
- } catch (UnsupportedEncodingException e) {
- // Should never happen since every implementation of the Java Platform
- // is required to support UTF-8.
- // See http://java.sun.com/j2se/1.5.0/docs/api/java/nio/charset/Charset.html
- throw new IllegalStateException(e);
- }
- }
-
- private String urlDecode(String s) {
- try {
- return URLDecoder.decode(s, "UTF-8");
- } catch (UnsupportedEncodingException e) {
- // Should never happen since every implementation of the Java Platform
- // is required to support UTF-8.
- // See http://java.sun.com/j2se/1.5.0/docs/api/java/nio/charset/Charset.html
- throw new IllegalStateException(e);
- }
- }
-
- }
-
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/96fa0f84/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/S3Credentials.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/S3Credentials.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/S3Credentials.java
deleted file mode 100644
index 3951a08..0000000
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/S3Credentials.java
+++ /dev/null
@@ -1,102 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.s3;
-
-import java.io.IOException;
-import java.net.URI;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.s3native.S3xLoginHelper;
-
-/**
- * <p>
- * Extracts AWS credentials from the filesystem URI or configuration.
- * </p>
- */
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
-public class S3Credentials {
-
- private String accessKey;
- private String secretAccessKey;
-
- /**
- * @param uri bucket URI optionally containing username and password.
- * @param conf configuration
- * @throws IllegalArgumentException if credentials for S3 cannot be
- * determined.
- * @throws IOException if credential providers are misconfigured and we have
- * to talk to them.
- */
- public void initialize(URI uri, Configuration conf) throws IOException {
- if (uri.getHost() == null) {
- throw new IllegalArgumentException("Invalid hostname in URI " + uri);
- }
- S3xLoginHelper.Login login =
- S3xLoginHelper.extractLoginDetailsWithWarnings(uri);
- if (login.hasLogin()) {
- accessKey = login.getUser();
- secretAccessKey = login.getPassword();
- }
- String scheme = uri.getScheme();
- String accessKeyProperty = String.format("fs.%s.awsAccessKeyId", scheme);
- String secretAccessKeyProperty =
- String.format("fs.%s.awsSecretAccessKey", scheme);
- if (accessKey == null) {
- accessKey = conf.getTrimmed(accessKeyProperty);
- }
- if (secretAccessKey == null) {
- final char[] pass = conf.getPassword(secretAccessKeyProperty);
- if (pass != null) {
- secretAccessKey = (new String(pass)).trim();
- }
- }
- if (accessKey == null && secretAccessKey == null) {
- throw new IllegalArgumentException("AWS " +
- "Access Key ID and Secret Access " +
- "Key must be specified " +
- "by setting the " +
- accessKeyProperty + " and " +
- secretAccessKeyProperty +
- " properties (respectively).");
- } else if (accessKey == null) {
- throw new IllegalArgumentException("AWS " +
- "Access Key ID must be specified " +
- "by setting the " +
- accessKeyProperty + " property.");
- } else if (secretAccessKey == null) {
- throw new IllegalArgumentException("AWS " +
- "Secret Access Key must be " +
- "specified by setting the " +
- secretAccessKeyProperty +
- " property.");
- }
-
- }
-
- public String getAccessKey() {
- return accessKey;
- }
-
- public String getSecretAccessKey() {
- return secretAccessKey;
- }
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/96fa0f84/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/S3Exception.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/S3Exception.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/S3Exception.java
deleted file mode 100644
index 4f07c4e..0000000
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/S3Exception.java
+++ /dev/null
@@ -1,39 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.s3;
-
-import java.io.IOException;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
-/**
- * Thrown if there is a problem communicating with Amazon S3.
- */
-@InterfaceAudience.Public
-@InterfaceStability.Stable
-public class S3Exception extends IOException {
-
- private static final long serialVersionUID = 1L;
-
- public S3Exception(Throwable t) {
- super(t);
- }
-
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/96fa0f84/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/S3FileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/S3FileSystem.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/S3FileSystem.java
deleted file mode 100644
index 6a49d1a..0000000
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/S3FileSystem.java
+++ /dev/null
@@ -1,516 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.s3;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.net.URI;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileAlreadyExistsException;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.ParentNotDirectoryException;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.fs.s3native.NativeS3FileSystem;
-import org.apache.hadoop.fs.s3native.S3xLoginHelper;
-import org.apache.hadoop.io.retry.RetryPolicies;
-import org.apache.hadoop.io.retry.RetryPolicy;
-import org.apache.hadoop.io.retry.RetryProxy;
-import org.apache.hadoop.util.Progressable;
-
-/**
- * A block-based {@link FileSystem} backed by
- * <a href="http://aws.amazon.com/s3">Amazon S3</a>.
- *
- * @see NativeS3FileSystem
- */
-@InterfaceAudience.Public
-@InterfaceStability.Stable
-public class S3FileSystem extends FileSystem {
-
- private URI uri;
-
- private FileSystemStore store;
-
- private Path workingDir;
-
- public S3FileSystem() {
- // set store in initialize()
- }
-
- public S3FileSystem(FileSystemStore store) {
- this.store = store;
- }
-
- /**
- * Return the protocol scheme for the FileSystem.
- *
- * @return <code>s3</code>
- */
- @Override
- public String getScheme() {
- return "s3";
- }
-
- @Override
- public URI getUri() {
- return uri;
- }
-
- @Override
- public void initialize(URI uri, Configuration conf) throws IOException {
- super.initialize(uri, conf);
- if (store == null) {
- store = createDefaultStore(conf);
- }
- store.initialize(uri, conf);
- setConf(conf);
- this.uri = S3xLoginHelper.buildFSURI(uri);
- this.workingDir =
- new Path("/user", System.getProperty("user.name")).makeQualified(this);
- }
-
- private static FileSystemStore createDefaultStore(Configuration conf) {
- FileSystemStore store = new Jets3tFileSystemStore();
-
- RetryPolicy basePolicy = RetryPolicies.retryUpToMaximumCountWithFixedSleep(
- conf.getInt("fs.s3.maxRetries", 4),
- conf.getLong("fs.s3.sleepTimeSeconds", 10), TimeUnit.SECONDS);
- Map<Class<? extends Exception>,RetryPolicy> exceptionToPolicyMap =
- new HashMap<Class<? extends Exception>, RetryPolicy>();
- exceptionToPolicyMap.put(IOException.class, basePolicy);
- exceptionToPolicyMap.put(S3Exception.class, basePolicy);
-
- RetryPolicy methodPolicy = RetryPolicies.retryByException(
- RetryPolicies.TRY_ONCE_THEN_FAIL, exceptionToPolicyMap);
- Map<String,RetryPolicy> methodNameToPolicyMap = new HashMap<String,RetryPolicy>();
- methodNameToPolicyMap.put("storeBlock", methodPolicy);
- methodNameToPolicyMap.put("retrieveBlock", methodPolicy);
-
- return (FileSystemStore) RetryProxy.create(FileSystemStore.class,
- store, methodNameToPolicyMap);
- }
-
- @Override
- public Path getWorkingDirectory() {
- return workingDir;
- }
-
- @Override
- public void setWorkingDirectory(Path dir) {
- workingDir = makeAbsolute(dir);
- }
-
- private Path makeAbsolute(Path path) {
- if (path.isAbsolute()) {
- return path;
- }
- return new Path(workingDir, path);
- }
-
- /**
- * Check that a Path belongs to this FileSystem.
- * Unlike the superclass, this version does not look at authority,
- * only hostnames.
- * @param path to check
- * @throws IllegalArgumentException if there is an FS mismatch
- */
- @Override
- protected void checkPath(Path path) {
- S3xLoginHelper.checkPath(getConf(), getUri(), path, getDefaultPort());
- }
-
- @Override
- protected URI canonicalizeUri(URI rawUri) {
- return S3xLoginHelper.canonicalizeUri(rawUri, getDefaultPort());
- }
-
- /**
- * @param permission Currently ignored.
- */
- @Override
- public boolean mkdirs(Path path, FsPermission permission) throws IOException {
- Path absolutePath = makeAbsolute(path);
- List<Path> paths = new ArrayList<Path>();
- do {
- paths.add(0, absolutePath);
- absolutePath = absolutePath.getParent();
- } while (absolutePath != null);
-
- boolean result = true;
- for (int i = 0; i < paths.size(); i++) {
- Path p = paths.get(i);
- try {
- result &= mkdir(p);
- } catch(FileAlreadyExistsException e) {
- if (i + 1 < paths.size()) {
- throw new ParentNotDirectoryException(e.getMessage());
- }
- throw e;
- }
- }
- return result;
- }
-
- private boolean mkdir(Path path) throws IOException {
- Path absolutePath = makeAbsolute(path);
- INode inode = store.retrieveINode(absolutePath);
- if (inode == null) {
- store.storeINode(absolutePath, INode.DIRECTORY_INODE);
- } else if (inode.isFile()) {
- throw new FileAlreadyExistsException(String.format(
- "Can't make directory for path %s since it is a file.",
- absolutePath));
- }
- return true;
- }
-
- @Override
- public boolean isFile(Path path) throws IOException {
- INode inode = store.retrieveINode(makeAbsolute(path));
- if (inode == null) {
- return false;
- }
- return inode.isFile();
- }
-
- private INode checkFile(Path path) throws IOException {
- INode inode = store.retrieveINode(makeAbsolute(path));
- String message = String.format("No such file: '%s'", path.toString());
- if (inode == null) {
- throw new FileNotFoundException(message + " does not exist");
- }
- if (inode.isDirectory()) {
- throw new FileNotFoundException(message + " is a directory");
- }
- return inode;
- }
-
- @Override
- public FileStatus[] listStatus(Path f) throws IOException {
- Path absolutePath = makeAbsolute(f);
- INode inode = store.retrieveINode(absolutePath);
- if (inode == null) {
- throw new FileNotFoundException("File " + f + " does not exist.");
- }
- if (inode.isFile()) {
- return new FileStatus[] {
- new S3FileStatus(f.makeQualified(this), inode)
- };
- }
- ArrayList<FileStatus> ret = new ArrayList<FileStatus>();
- for (Path p : store.listSubPaths(absolutePath)) {
- ret.add(getFileStatus(p.makeQualified(this)));
- }
- return ret.toArray(new FileStatus[0]);
- }
-
- /** This optional operation is not yet supported. */
- @Override
- public FSDataOutputStream append(Path f, int bufferSize,
- Progressable progress) throws IOException {
- throw new IOException("Not supported");
- }
-
- /**
- * @param permission Currently ignored.
- */
- @Override
- public FSDataOutputStream create(Path file, FsPermission permission,
- boolean overwrite, int bufferSize,
- short replication, long blockSize, Progressable progress)
- throws IOException {
-
- INode inode = store.retrieveINode(makeAbsolute(file));
- if (inode != null) {
- if (overwrite && !inode.isDirectory()) {
- delete(file, true);
- } else {
- String message = String.format("File already exists: '%s'", file);
- if (inode.isDirectory()) {
- message = message + " is a directory";
- }
- throw new FileAlreadyExistsException(message);
- }
- } else {
- Path parent = file.getParent();
- if (parent != null) {
- if (!mkdirs(parent)) {
- throw new IOException("Mkdirs failed to create " + parent.toString());
- }
- }
- }
- return new FSDataOutputStream
- (new S3OutputStream(getConf(), store, makeAbsolute(file),
- blockSize, progress, bufferSize),
- statistics);
- }
-
- @Override
- public FSDataInputStream open(Path path, int bufferSize) throws IOException {
- INode inode = checkFile(path);
- return new FSDataInputStream(new S3InputStream(getConf(), store, inode,
- statistics));
- }
-
- @Override
- public boolean rename(Path src, Path dst) throws IOException {
- Path absoluteSrc = makeAbsolute(src);
- final String debugPreamble = "Renaming '" + src + "' to '" + dst + "' - ";
- INode srcINode = store.retrieveINode(absoluteSrc);
- boolean debugEnabled = LOG.isDebugEnabled();
- if (srcINode == null) {
- // src path doesn't exist
- if (debugEnabled) {
- LOG.debug(debugPreamble + "returning false as src does not exist");
- }
- return false;
- }
-
- Path absoluteDst = makeAbsolute(dst);
-
- //validate the parent dir of the destination
- Path dstParent = absoluteDst.getParent();
- if (dstParent != null) {
- //if the dst parent is not root, make sure it exists
- INode dstParentINode = store.retrieveINode(dstParent);
- if (dstParentINode == null) {
- // dst parent doesn't exist
- if (debugEnabled) {
- LOG.debug(debugPreamble +
- "returning false as dst parent does not exist");
- }
- return false;
- }
- if (dstParentINode.isFile()) {
- // dst parent exists but is a file
- if (debugEnabled) {
- LOG.debug(debugPreamble +
- "returning false as dst parent exists and is a file");
- }
- return false;
- }
- }
-
- //get status of source
- boolean srcIsFile = srcINode.isFile();
-
- INode dstINode = store.retrieveINode(absoluteDst);
- boolean destExists = dstINode != null;
- boolean destIsDir = destExists && !dstINode.isFile();
- if (srcIsFile) {
-
- //source is a simple file
- if (destExists) {
- if (destIsDir) {
- //outcome #1 dest exists and is dir -filename to subdir of dest
- if (debugEnabled) {
- LOG.debug(debugPreamble +
- "copying src file under dest dir to " + absoluteDst);
- }
- absoluteDst = new Path(absoluteDst, absoluteSrc.getName());
- } else {
- //outcome #2 dest it's a file: fail iff different from src
- boolean renamingOnToSelf = absoluteSrc.equals(absoluteDst);
- if (debugEnabled) {
- LOG.debug(debugPreamble +
- "copying file onto file, outcome is " + renamingOnToSelf);
- }
- return renamingOnToSelf;
- }
- } else {
- // #3 dest does not exist: use dest as path for rename
- if (debugEnabled) {
- LOG.debug(debugPreamble +
- "copying file onto file");
- }
- }
- } else {
- //here the source exists and is a directory
- // outcomes (given we know the parent dir exists if we get this far)
- // #1 destination is a file: fail
- // #2 destination is a directory: create a new dir under that one
- // #3 destination doesn't exist: create a new dir with that name
- // #3 and #4 are only allowed if the dest path is not == or under src
-
- if (destExists) {
- if (!destIsDir) {
- // #1 destination is a file: fail
- if (debugEnabled) {
- LOG.debug(debugPreamble +
- "returning false as src is a directory, but not dest");
- }
- return false;
- } else {
- // the destination dir exists
- // destination for rename becomes a subdir of the target name
- absoluteDst = new Path(absoluteDst, absoluteSrc.getName());
- if (debugEnabled) {
- LOG.debug(debugPreamble +
- "copying src dir under dest dir to " + absoluteDst);
- }
- }
- }
- //the final destination directory is now know, so validate it for
- //illegal moves
-
- if (absoluteSrc.equals(absoluteDst)) {
- //you can't rename a directory onto itself
- if (debugEnabled) {
- LOG.debug(debugPreamble +
- "Dest==source && isDir -failing");
- }
- return false;
- }
- if (absoluteDst.toString().startsWith(absoluteSrc.toString() + "/")) {
- //you can't move a directory under itself
- if (debugEnabled) {
- LOG.debug(debugPreamble +
- "dst is equal to or under src dir -failing");
- }
- return false;
- }
- }
- //here the dest path is set up -so rename
- return renameRecursive(absoluteSrc, absoluteDst);
- }
-
- private boolean renameRecursive(Path src, Path dst) throws IOException {
- INode srcINode = store.retrieveINode(src);
- store.storeINode(dst, srcINode);
- store.deleteINode(src);
- if (srcINode.isDirectory()) {
- for (Path oldSrc : store.listDeepSubPaths(src)) {
- INode inode = store.retrieveINode(oldSrc);
- if (inode == null) {
- return false;
- }
- String oldSrcPath = oldSrc.toUri().getPath();
- String srcPath = src.toUri().getPath();
- String dstPath = dst.toUri().getPath();
- Path newDst = new Path(oldSrcPath.replaceFirst(srcPath, dstPath));
- store.storeINode(newDst, inode);
- store.deleteINode(oldSrc);
- }
- }
- return true;
- }
-
- @Override
- public boolean delete(Path path, boolean recursive) throws IOException {
- Path absolutePath = makeAbsolute(path);
- INode inode = store.retrieveINode(absolutePath);
- if (inode == null) {
- return false;
- }
- if (inode.isFile()) {
- store.deleteINode(absolutePath);
- for (Block block: inode.getBlocks()) {
- store.deleteBlock(block);
- }
- } else {
- FileStatus[] contents = null;
- try {
- contents = listStatus(absolutePath);
- } catch(FileNotFoundException fnfe) {
- return false;
- }
-
- if ((contents.length !=0) && (!recursive)) {
- throw new IOException("Directory " + path.toString()
- + " is not empty.");
- }
- for (FileStatus p:contents) {
- if (!delete(p.getPath(), recursive)) {
- return false;
- }
- }
- store.deleteINode(absolutePath);
- }
- return true;
- }
-
- /**
- * FileStatus for S3 file systems.
- */
- @Override
- public FileStatus getFileStatus(Path f) throws IOException {
- INode inode = store.retrieveINode(makeAbsolute(f));
- if (inode == null) {
- throw new FileNotFoundException(f + ": No such file or directory.");
- }
- return new S3FileStatus(f.makeQualified(this), inode);
- }
-
- @Override
- public long getDefaultBlockSize() {
- return getConf().getLong("fs.s3.block.size", 64 * 1024 * 1024);
- }
-
- @Override
- public String getCanonicalServiceName() {
- // Does not support Token
- return null;
- }
-
- // diagnostic methods
-
- void dump() throws IOException {
- store.dump();
- }
-
- void purge() throws IOException {
- store.purge();
- }
-
- private static class S3FileStatus extends FileStatus {
-
- S3FileStatus(Path f, INode inode) throws IOException {
- super(findLength(inode), inode.isDirectory(), 1,
- findBlocksize(inode), 0, f);
- }
-
- private static long findLength(INode inode) {
- if (!inode.isDirectory()) {
- long length = 0L;
- for (Block block : inode.getBlocks()) {
- length += block.getLength();
- }
- return length;
- }
- return 0;
- }
-
- private static long findBlocksize(INode inode) {
- final Block[] ret = inode.getBlocks();
- return ret == null ? 0L : ret[0].getLength();
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/96fa0f84/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/S3FileSystemConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/S3FileSystemConfigKeys.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/S3FileSystemConfigKeys.java
deleted file mode 100644
index 8172a46..0000000
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/S3FileSystemConfigKeys.java
+++ /dev/null
@@ -1,47 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.s3;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.fs.CommonConfigurationKeys;
-
-/**
- * This class contains constants for configuration keys used
- * in the s3 file system.
- *
- */
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
-public class S3FileSystemConfigKeys extends CommonConfigurationKeys {
- public static final String S3_BLOCK_SIZE_KEY = "s3.blocksize";
- public static final long S3_BLOCK_SIZE_DEFAULT = 64*1024*1024;
- public static final String S3_REPLICATION_KEY = "s3.replication";
- public static final short S3_REPLICATION_DEFAULT = 1;
- public static final String S3_STREAM_BUFFER_SIZE_KEY =
- "s3.stream-buffer-size";
- public static final int S3_STREAM_BUFFER_SIZE_DEFAULT = 4096;
- public static final String S3_BYTES_PER_CHECKSUM_KEY =
- "s3.bytes-per-checksum";
- public static final int S3_BYTES_PER_CHECKSUM_DEFAULT = 512;
- public static final String S3_CLIENT_WRITE_PACKET_SIZE_KEY =
- "s3.client-write-packet-size";
- public static final int S3_CLIENT_WRITE_PACKET_SIZE_DEFAULT = 64*1024;
-}
-
http://git-wip-us.apache.org/repos/asf/hadoop/blob/96fa0f84/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/S3FileSystemException.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/S3FileSystemException.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/S3FileSystemException.java
deleted file mode 100644
index cc1b463..0000000
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/S3FileSystemException.java
+++ /dev/null
@@ -1,36 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.fs.s3;
-
-import java.io.IOException;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
-/**
- * Thrown when there is a fatal exception while using {@link S3FileSystem}.
- */
-@InterfaceAudience.Public
-@InterfaceStability.Stable
-public class S3FileSystemException extends IOException {
- private static final long serialVersionUID = 1L;
-
- public S3FileSystemException(String message) {
- super(message);
- }
-}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org