You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by wa...@apache.org on 2013/08/15 02:15:13 UTC
svn commit: r1514105 [2/2] - in
/hadoop/common/branches/HDFS-4949/hadoop-common-project: ./
hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/
hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/
hadoop-a...
Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/site/apt/FileSystemShell.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/site/apt/FileSystemShell.apt.vm?rev=1514105&r1=1514104&r2=1514105&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/site/apt/FileSystemShell.apt.vm (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/site/apt/FileSystemShell.apt.vm Thu Aug 15 00:15:11 2013
@@ -86,11 +86,14 @@ chgrp
Usage: <<<hdfs dfs -chgrp [-R] GROUP URI [URI ...]>>>
- Change group association of files. With -R, make the change recursively
- through the directory structure. The user must be the owner of files, or
+ Change group association of files. The user must be the owner of files, or
else a super-user. Additional information is in the
{{{betterurl}Permissions Guide}}.
+ Options
+
+ * The -R option will make the change recursively through the directory structure.
+
chmod
Usage: <<<hdfs dfs -chmod [-R] <MODE[,MODE]... | OCTALMODE> URI [URI ...]>>>
@@ -100,14 +103,21 @@ chmod
else a super-user. Additional information is in the
{{{betterurl}Permissions Guide}}.
+ Options
+
+ * The -R option will make the change recursively through the directory structure.
+
chown
Usage: <<<hdfs dfs -chown [-R] [OWNER][:[GROUP]] URI [URI ]>>>
- Change the owner of files. With -R, make the change recursively through the
- directory structure. The user must be a super-user. Additional information
+ Change the owner of files. The user must be a super-user. Additional information
is in the {{{betterurl}Permissions Guide}}.
+ Options
+
+ * The -R option will make the change recursively through the directory structure.
+
copyFromLocal
Usage: <<<hdfs dfs -copyFromLocal <localsrc> URI>>>
@@ -115,6 +125,10 @@ copyFromLocal
Similar to put command, except that the source is restricted to a local
file reference.
+ Options:
+
+ * The -f option will overwrite the destination if it already exists.
+
copyToLocal
Usage: <<<hdfs dfs -copyToLocal [-ignorecrc] [-crc] URI <localdst> >>>
@@ -145,11 +159,15 @@ count
cp
- Usage: <<<hdfs dfs -cp URI [URI ...] <dest> >>>
+ Usage: <<<hdfs dfs -cp [-f] URI [URI ...] <dest> >>>
Copy files from source to destination. This command allows multiple sources
as well in which case the destination must be a directory.
+ Options:
+
+ * The -f option will overwrite the destination if it already exists.
+
Example:
* <<<hdfs dfs -cp /user/hadoop/file1 /user/hadoop/file2>>>
@@ -232,7 +250,7 @@ ls
permissions number_of_replicas userid groupid filesize modification_date modification_time filename
+---+
- For a directory it returns list of its direct children as in unix.A directory is listed as:
+ For a directory it returns list of its direct children as in Unix. A directory is listed as:
+---+
permissions userid groupid modification_date modification_time dirname
@@ -256,8 +274,11 @@ mkdir
Usage: <<<hdfs dfs -mkdir [-p] <paths> >>>
- Takes path uri's as argument and creates directories. With -p the behavior
- is much like unix mkdir -p creating parent directories along the path.
+ Takes path uri's as argument and creates directories.
+
+ Options:
+
+ * The -p option behavior is much like Unix mkdir -p, creating parent directories along the path.
Example:
@@ -362,8 +383,11 @@ setrep
Usage: <<<hdfs dfs -setrep [-R] <path> >>>
- Changes the replication factor of a file. -R option is for recursively
- increasing the replication factor of files within a directory.
+ Changes the replication factor of a file.
+
+ Options:
+
+ * The -R option will recursively increase the replication factor of files within a directory.
Example:
@@ -390,8 +414,11 @@ tail
Usage: <<<hdfs dfs -tail [-f] URI>>>
- Displays last kilobyte of the file to stdout. -f option can be used as in
- Unix.
+ Displays last kilobyte of the file to stdout.
+
+ Options:
+
+ * The -f option will output appended data as the file grows, as in Unix.
Example:
@@ -406,13 +433,9 @@ test
Options:
-*----+------------+
-| -e | check to see if the file exists. Return 0 if true.
-*----+------------+
-| -z | check to see if the file is zero length. Return 0 if true.
-*----+------------+
-| -d | check to see if the path is directory. Return 0 if true.
-*----+------------+
+ * The -e option will check to see if the file exists, returning 0 if true.
+ * The -z option will check to see if the file is zero length, returning 0 if true.
+ * The -d option will check to see if the path is directory, returning 0 if true.
Example:
Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/site/apt/SingleCluster.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/site/apt/SingleCluster.apt.vm?rev=1514105&r1=1514104&r2=1514105&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/site/apt/SingleCluster.apt.vm (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/site/apt/SingleCluster.apt.vm Thu Aug 15 00:15:11 2013
@@ -32,7 +32,7 @@ $ mvn clean install -DskipTests
$ cd hadoop-mapreduce-project
$ mvn clean install assembly:assembly -Pnative
+---+
- <<NOTE:>> You will need protoc installed of version 2.4.1 or greater.
+ <<NOTE:>> You will need protoc 2.5.0 installed.
To ignore the native builds in mapreduce you can omit the <<<-Pnative>>> argument
for maven. The tarball should be available in <<<target/>>> directory.
Propchange: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/core/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/core:r1509426-1514104
Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystemBasics.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystemBasics.java?rev=1514105&r1=1514104&r2=1514105&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystemBasics.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystemBasics.java Thu Aug 15 00:15:11 2013
@@ -82,7 +82,7 @@ public class TestHarFileSystemBasics {
localFileSystem.createNewFile(masterIndexPath);
assertTrue(localFileSystem.exists(masterIndexPath));
- writeVersionToMasterIndexImpl(HarFileSystem.VERSION);
+ writeVersionToMasterIndexImpl(HarFileSystem.VERSION, masterIndexPath);
final HarFileSystem harFileSystem = new HarFileSystem(localFileSystem);
final URI uri = new URI("har://" + harPath.toString());
@@ -90,8 +90,25 @@ public class TestHarFileSystemBasics {
return harFileSystem;
}
- private void writeVersionToMasterIndexImpl(int version) throws IOException {
- final Path masterIndexPath = new Path(harPath, "_masterindex");
+ private HarFileSystem createHarFileSystem(final Configuration conf, Path aHarPath)
+ throws Exception {
+ localFileSystem.mkdirs(aHarPath);
+ final Path indexPath = new Path(aHarPath, "_index");
+ final Path masterIndexPath = new Path(aHarPath, "_masterindex");
+ localFileSystem.createNewFile(indexPath);
+ assertTrue(localFileSystem.exists(indexPath));
+ localFileSystem.createNewFile(masterIndexPath);
+ assertTrue(localFileSystem.exists(masterIndexPath));
+
+ writeVersionToMasterIndexImpl(HarFileSystem.VERSION, masterIndexPath);
+
+ final HarFileSystem harFileSystem = new HarFileSystem(localFileSystem);
+ final URI uri = new URI("har://" + aHarPath.toString());
+ harFileSystem.initialize(uri, conf);
+ return harFileSystem;
+ }
+
+ private void writeVersionToMasterIndexImpl(int version, Path masterIndexPath) throws IOException {
// write Har version into the master index:
final FSDataOutputStream fsdos = localFileSystem.create(masterIndexPath);
try {
@@ -173,6 +190,29 @@ public class TestHarFileSystemBasics {
}
@Test
+ public void testPositiveLruMetadataCacheFs() throws Exception {
+ // Init 2nd har file system on the same underlying FS, so the
+ // metadata gets reused:
+ HarFileSystem hfs = new HarFileSystem(localFileSystem);
+ URI uri = new URI("har://" + harPath.toString());
+ hfs.initialize(uri, new Configuration());
+ // the metadata should be reused from cache:
+ assertTrue(hfs.getMetadata() == harFileSystem.getMetadata());
+
+ // Create more hars, until the cache is full + 1; the last creation should evict the first entry from the cache
+ for (int i = 0; i <= hfs.METADATA_CACHE_ENTRIES_DEFAULT; i++) {
+ Path p = new Path(rootPath, "path1/path2/my" + i +".har");
+ createHarFileSystem(conf, p);
+ }
+
+ // The first entry should not be in the cache anymore:
+ hfs = new HarFileSystem(localFileSystem);
+ uri = new URI("har://" + harPath.toString());
+ hfs.initialize(uri, new Configuration());
+ assertTrue(hfs.getMetadata() != harFileSystem.getMetadata());
+ }
+
+ @Test
public void testPositiveInitWithoutUnderlyingFS() throws Exception {
// Init HarFS with no constructor arg, so that the underlying FS object
// is created on demand or got from cache in #initialize() method.
@@ -218,7 +258,7 @@ public class TestHarFileSystemBasics {
// time with 1 second accuracy:
Thread.sleep(1000);
// write an unsupported version:
- writeVersionToMasterIndexImpl(7777);
+ writeVersionToMasterIndexImpl(7777, new Path(harPath, "_masterindex"));
// init the Har:
final HarFileSystem hfs = new HarFileSystem(localFileSystem);
Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java?rev=1514105&r1=1514104&r2=1514105&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java Thu Aug 15 00:15:11 2013
@@ -26,6 +26,7 @@ import org.apache.hadoop.util.StringUtil
import static org.apache.hadoop.fs.FileSystemTestHelper.*;
import java.io.*;
+import java.net.URI;
import java.util.Arrays;
import java.util.Random;
@@ -363,12 +364,12 @@ public class TestLocalFileSystem {
FileStatus status = fileSys.getFileStatus(path);
assertTrue("check we're actually changing something", newModTime != status.getModificationTime());
- assertEquals(0, status.getAccessTime());
+ long accessTime = status.getAccessTime();
fileSys.setTimes(path, newModTime, -1);
status = fileSys.getFileStatus(path);
assertEquals(newModTime, status.getModificationTime());
- assertEquals(0, status.getAccessTime());
+ assertEquals(accessTime, status.getAccessTime());
}
/**
@@ -520,4 +521,18 @@ public class TestLocalFileSystem {
fail(s);
}
}
+
+ @Test
+ public void testStripFragmentFromPath() throws Exception {
+ FileSystem fs = FileSystem.getLocal(new Configuration());
+ Path pathQualified = TEST_PATH.makeQualified(fs.getUri(),
+ fs.getWorkingDirectory());
+ Path pathWithFragment = new Path(
+ new URI(pathQualified.toString() + "#glacier"));
+ // Create test file with fragment
+ FileSystemTestHelper.createFile(fs, pathWithFragment);
+ Path resolved = fs.resolvePath(pathWithFragment);
+ assertEquals("resolvePath did not strip fragment from Path", pathQualified,
+ resolved);
+ }
}
Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestSymlinkLocalFS.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestSymlinkLocalFS.java?rev=1514105&r1=1514104&r2=1514105&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestSymlinkLocalFS.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestSymlinkLocalFS.java Thu Aug 15 00:15:11 2013
@@ -31,6 +31,7 @@ import java.net.URISyntaxException;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.util.Shell;
+import org.apache.hadoop.security.UserGroupInformation;
import org.junit.Test;
/**
@@ -134,6 +135,7 @@ abstract public class TestSymlinkLocalFS
Path fileAbs = new Path(testBaseDir1()+"/file");
Path fileQual = new Path(testURI().toString(), fileAbs);
Path link = new Path(testBaseDir1()+"/linkToFile");
+ Path linkQual = new Path(testURI().toString(), link.toString());
wrapper.createSymlink(fileAbs, link, false);
// Deleting the link using FileContext currently fails because
// resolve looks up LocalFs rather than RawLocalFs for the path
@@ -151,18 +153,15 @@ abstract public class TestSymlinkLocalFS
// Expected. File's exists method returns false for dangling links
}
// We can stat a dangling link
+ UserGroupInformation user = UserGroupInformation.getCurrentUser();
FileStatus fsd = wrapper.getFileLinkStatus(link);
assertEquals(fileQual, fsd.getSymlink());
assertTrue(fsd.isSymlink());
assertFalse(fsd.isDirectory());
- assertEquals("", fsd.getOwner());
- assertEquals("", fsd.getGroup());
- assertEquals(link, fsd.getPath());
- assertEquals(0, fsd.getLen());
- assertEquals(0, fsd.getBlockSize());
- assertEquals(0, fsd.getReplication());
- assertEquals(0, fsd.getAccessTime());
- assertEquals(FsPermission.getDefault(), fsd.getPermission());
+ assertEquals(user.getUserName(), fsd.getOwner());
+ // Compare against user's primary group
+ assertEquals(user.getGroupNames()[0], fsd.getGroup());
+ assertEquals(linkQual, fsd.getPath());
// Accessing the link
try {
readFile(link);
Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml?rev=1514105&r1=1514104&r2=1514105&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml Thu Aug 15 00:15:11 2013
@@ -296,7 +296,11 @@
</comparator>
<comparator>
<type>RegexpComparator</type>
- <expected-output>^( |\t)*modification times, ownership and the mode.( )*</expected-output>
+ <expected-output>^( |\t)*modification times, ownership and the mode. Passing -f( )*</expected-output>
+ </comparator>
+ <comparator>
+ <type>RegexpComparator</type>
+ <expected-output>^( |\t)*overwrites the destination if it already exists.( )*</expected-output>
</comparator>
</comparators>
</test>
@@ -400,7 +404,11 @@
</comparator>
<comparator>
<type>RegexpComparator</type>
- <expected-output>^( |\t)*ownership and the mode.( )*</expected-output>
+ <expected-output>^( |\t)*ownership and the mode. Passing -f overwrites( )*</expected-output>
+ </comparator>
+ <comparator>
+ <type>RegexpComparator</type>
+ <expected-output>^( |\t)*the destination if it already exists.( )*</expected-output>
</comparator>
</comparators>
</test>
Propchange: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-minikdc/
------------------------------------------------------------------------------
--- svn:ignore (added)
+++ svn:ignore Thu Aug 15 00:15:11 2013
@@ -0,0 +1 @@
+target
Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/pom.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/pom.xml?rev=1514105&r1=1514104&r2=1514105&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/pom.xml (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/pom.xml Thu Aug 15 00:15:11 2013
@@ -36,6 +36,7 @@
<module>hadoop-common</module>
<module>hadoop-annotations</module>
<module>hadoop-nfs</module>
+ <module>hadoop-minikdc</module>
</modules>
<build>