You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by gk...@apache.org on 2012/08/03 21:00:59 UTC
svn commit: r1369164 [5/16] - in
/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project: ./
hadoop-hdfs-httpfs/ hadoop-hdfs-httpfs/dev-support/
hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/
hadoop-hdfs-httpfs/src/main/java/or...
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Fri Aug 3 19:00:15 2012
@@ -82,9 +82,6 @@ Trunk (unreleased changes)
HDFS-3197. Incorrect class comments in a few tests. (Andy Isaacson via eli)
- HDFS-2391. Newly set BalancerBandwidth value is not displayed anywhere.
- (harsh)
-
HDFS-3476. Correct the default used in TestDFSClientRetries.busyTest()
after HDFS-3462 (harsh)
@@ -110,6 +107,8 @@ Trunk (unreleased changes)
HDFS-3190. Simple refactors in existing NN code to assist
QuorumJournalManager extension. (todd)
+ HDFS-3630 Modify TestPersistBlocks to use both flush and hflush (sanjay)
+
OPTIMIZATIONS
BUG FIXES
@@ -144,9 +143,6 @@ Trunk (unreleased changes)
HDFS-2966. TestNameNodeMetrics tests can fail under load. (stevel)
- HDFS-3067. NPE in DFSInputStream.readBuffer if read is repeated on
- corrupted block. (Henry Robinson via atm)
-
HDFS-3116. Typo in fetchdt error message. (AOE Takashi via atm)
HDFS-3126. Journal stream from Namenode to BackupNode needs to have
@@ -175,8 +171,19 @@ Trunk (unreleased changes)
HDFS-3549. Fix dist tar build fails in hadoop-hdfs-raid project. (Jason Lowe via daryn)
+ HDFS-3541. Deadlock between recovery, xceiver and packet responder (Vinay via umamahesh)
+
+ HDFS-3482. hdfs balancer throws ArrayIndexOutOfBoundsException
+ if option is specified without values. ( Madhukara Phatak via umamahesh)
+
+ HDFS-3614. Revert unused MiniDFSCluster constructor from HDFS-3049.
+ (acmurthy via eli)
+
+ HDFS-3625. Fix TestBackupNode by properly initializing edit log during
+ startup. (Junping Du via todd)
+
Branch-2 ( Unreleased changes )
-
+
INCOMPATIBLE CHANGES
HDFS-3446. HostsFileReader silently ignores bad includes/excludes
@@ -192,6 +199,10 @@ Branch-2 ( Unreleased changes )
HDFS-3518. Add a utility method HdfsUtils.isHealthy(uri) for checking if
the given HDFS is healthy. (szetszwo)
+ HDFS-3113. httpfs does not support delegation tokens. (tucu)
+
+ HDFS-3513. HttpFS should cache filesystems. (tucu)
+
IMPROVEMENTS
HDFS-3390. DFSAdmin should print full stack traces of errors when DEBUG
@@ -274,6 +285,85 @@ Branch-2 ( Unreleased changes )
HDFS-3170. Add more useful metrics for write latency (Matthew Jacobs via
todd)
+ HDFS-3604. Add dfs.webhdfs.enabled to hdfs-default.xml. (eli)
+
+ HDFS-2988. Improve error message when storage directory lock fails
+ (Miomir Boljanovic via harsh)
+
+ HDFS-2391. Newly set BalancerBandwidth value is not displayed anywhere.
+ (harsh)
+
+ HDFS-3067. NPE in DFSInputStream.readBuffer if read is repeated on
+ corrupted block. (Henry Robinson via atm)
+
+ HDFS-3555. idle client socket triggers DN ERROR log
+ (should be INFO or DEBUG). (Andy Isaacson via harsh)
+
+ HDFS-3568. fuse_dfs: add support for security. (Colin McCabe via atm)
+
+ HDFS-3629. Fix the typo in the error message about inconsistent
+ storage layout version. (Brandon Li via harsh)
+
+ HDFS-3613. GSet prints some INFO level values, which aren't
+ really very useful to all (Andrew Wang via harsh)
+
+ HDFS-3611. NameNode prints unnecessary WARNs about edit log normally skipping
+ a few bytes. (Colin Patrick McCabe via harsh)
+
+ HDFS-3582. Hook daemon process exit for testing. (eli)
+
+ HDFS-3641. Move server Util time methods to common and use now
+ instead of System#currentTimeMillis. (eli)
+
+ HDFS-3633. libhdfs: hdfsDelete should pass JNI_FALSE or JNI_TRUE.
+ (Colin Patrick McCabe via eli)
+
+ HDFS-799. libhdfs must call DetachCurrentThread when a thread is destroyed.
+ (Colin Patrick McCabe via eli)
+
+ HDFS-3306. fuse_dfs: don't lock release operations.
+ (Colin Patrick McCabe via eli)
+
+ HDFS-3612. Single namenode image directory config warning can
+ be improved. (Andy Isaacson via harsh)
+
+ HDFS-3606. libhdfs: create self-contained unit test.
+ (Colin Patrick McCabe via eli)
+
+ HDFS-3539. libhdfs code cleanups. (Colin Patrick McCabe via eli)
+
+ HDFS-3610. fuse_dfs: Provide a way to use the default (configured) NN URI.
+ (Colin Patrick McCabe via eli)
+
+ HDFS-3663. MiniDFSCluster should capture the code path that led to
+ the first ExitException. (eli)
+
+ HDFS-3659. Add missing @Override to methods across the hadoop-hdfs
+ project. (Brandon Li via harsh)
+
+ HDFS-3537. Move libhdfs and fuse-dfs source to native subdirectories.
+ (Colin Patrick McCabe via eli)
+
+ HDFS-3665. Add a test for renaming across file systems via a symlink. (eli)
+
+ HDFS-3666. Plumb more exception messages to terminate. (eli)
+
+ HDFS-3673. libhdfs: fix some compiler warnings. (Colin Patrick McCabe via eli)
+
+ HDFS-3675. libhdfs: follow documented return codes. (Colin Patrick McCabe via eli)
+
+ HDFS-1249. With fuse-dfs, chown which only has owner (or only group)
+ argument fails with Input/output error. (Colin Patrick McCabe via eli)
+
+ HDFS-3583. Convert remaining tests to Junit4. (Andrew Wang via atm)
+
+ HDFS-3711. Manually convert remaining tests to JUnit4. (Andrew Wang via atm)
+
+ HDFS-3650. Use MutableQuantiles to provide latency histograms for various
+ operations. (Andrew Wang via atm)
+
+ HDFS-3667. Add retry support to WebHdfsFileSystem. (szetszwo)
+
OPTIMIZATIONS
HDFS-2982. Startup performance suffers when there are many edit log
@@ -285,6 +375,8 @@ Branch-2 ( Unreleased changes )
HDFS-3110. Use directRead API to reduce the number of buffer copies in
libhdfs (Henry Robinson via todd)
+ HDFS-3697. Enable fadvise readahead by default. (todd)
+
BUG FIXES
HDFS-3385. The last block of INodeFileUnderConstruction is not
@@ -408,6 +500,60 @@ Branch-2 ( Unreleased changes )
HDFS-3581. FSPermissionChecker#checkPermission sticky bit check
missing range check. (eli)
+ HDFS-3428. Move DelegationTokenRenewer to common (tucu)
+
+ HDFS-3491. HttpFs does not set permissions correctly (tucu)
+
+ HDFS-3580. incompatible types; no instance(s) of type variable(s) V exist
+ so that V conforms to boolean compiling HttpFSServer.java with OpenJDK
+ (adi2 via tucu)
+
+ HDFS-3603. Decouple TestHDFSTrash from TestTrash. (Jason Lowe via eli)
+
+ HDFS-711. hdfsUtime does not handle atime = 0 or mtime = 0 correctly.
+ (Colin Patrick McCabe via eli)
+
+ HDFS-3548. NamenodeFsck.copyBlock fails to create a Block Reader.
+ (Colin Patrick McCabe via eli)
+
+ HDFS-3615. Two BlockTokenSecretManager findbugs warnings. (atm)
+
+ HDFS-470. libhdfs should handle 0-length reads from FSInputStream
+ correctly. (Colin Patrick McCabe via eli)
+
+ HDFS-3492. fix some misuses of InputStream#skip.
+ (Colin Patrick McCabe via eli)
+
+ HDFS-3609. libhdfs: don't force the URI to look like hdfs://hostname:port.
+ (Colin Patrick McCabe via eli)
+
+ HDFS-3605. Block mistakenly marked corrupt during edit log catchup
+ phase of failover. (todd and Brahma Reddy Battula via todd)
+
+ HDFS-3690. BlockPlacementPolicyDefault incorrectly casts LOG. (eli)
+
+ HDFS-3597. SNN fails to start after DFS upgrade. (Andy Isaacson via todd)
+
+ HDFS-3608. fuse_dfs: detect changes in UID ticket cache. (Colin Patrick
+ McCabe via atm)
+
+ HDFS-3709. TestStartup tests still binding to the ephemeral port. (eli)
+
+ HDFS-3720. hdfs.h must get packaged. (Colin Patrick McCabe via atm)
+
+ HDFS-3626. Creating file with invalid path can corrupt edit log (todd)
+
+ HDFS-3679. fuse_dfs notrash option sets usetrash. (Conrad Meyer via suresh)
+
+ HDFS-3732. fuse_dfs: incorrect configuration value checked for connection
+ expiry timer period. (Colin Patrick McCabe via atm)
+
+ HDFS-3738. TestDFSClientRetries#testFailuresArePerOperation sets incorrect
+ timeout config. (atm)
+
+ HDFS-3756. DelegationTokenFetcher creates 2 HTTP connections, the second
+ one not properly configured. (tucu)
+
BREAKDOWN OF HDFS-3042 SUBTASKS
HDFS-2185. HDFS portion of ZK-based FailoverController (todd)
@@ -424,14 +570,6 @@ Branch-2 ( Unreleased changes )
HDFS-3432. TestDFSZKFailoverController tries to fail over too early (todd)
- HDFS-3428. Move DelegationTokenRenewer to common (tucu)
-
- HDFS-3491. HttpFs does not set permissions correctly (tucu)
-
- HDFS-3580. incompatible types; no instance(s) of type variable(s) V exist
- so that V conforms to boolean compiling HttpFSServer.java with OpenJDK
- (adi2 via tucu)
-
Release 2.0.0-alpha - 05-23-2012
INCOMPATIBLE CHANGES
@@ -1279,6 +1417,18 @@ Release 0.23.3 - UNRELEASED
HDFS-3331. In namenode, check superuser privilege for setBalancerBandwidth
and acquire the write lock for finalizeUpgrade. (szetszwo)
+ HDFS-3577. In DatanodeWebHdfsMethods, use MessageBodyWriter instead of
+ StreamingOutput, otherwise, it will fail to transfer large files.
+ (szetszwo)
+
+ HDFS-3646. LeaseRenewer can hold reference to inactive DFSClient
+ instances forever. (Kihwal Lee via daryn)
+
+ HDFS-3696. Set chunked streaming mode in WebHdfsFileSystem write operations
+ to get around a Java library bug causing OutOfMemoryError. (szetszwo)
+
+ HDFS-3553. Hftp proxy tokens are broken (daryn)
+
Release 0.23.2 - UNRELEASED
INCOMPATIBLE CHANGES
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/pom.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/pom.xml?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/pom.xml (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/pom.xml Fri Aug 3 19:00:15 2012
@@ -34,6 +34,7 @@ http://maven.apache.org/xsd/maven-4.0.0.
<hadoop.component>hdfs</hadoop.component>
<kdc.resource.dir>../../hadoop-common-project/hadoop-common/src/test/resources/kdc</kdc.resource.dir>
<is.hadoop.component>true</is.hadoop.component>
+ <require.fuse>false</require.fuse>
</properties>
<dependencies>
@@ -237,6 +238,9 @@ http://maven.apache.org/xsd/maven-4.0.0.
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-antrun-plugin</artifactId>
+ <configuration>
+ <skipTests>false</skipTests>
+ </configuration>
<executions>
<execution>
<id>compile-proto</id>
@@ -414,7 +418,7 @@ http://maven.apache.org/xsd/maven-4.0.0.
<mkdir dir="${project.build.directory}/native"/>
<exec executable="cmake" dir="${project.build.directory}/native"
failonerror="true">
- <arg line="${basedir}/src/ -DGENERATED_JAVAH=${project.build.directory}/native/javah -DJVM_ARCH_DATA_MODEL=${sun.arch.data.model}"/>
+ <arg line="${basedir}/src/ -DGENERATED_JAVAH=${project.build.directory}/native/javah -DJVM_ARCH_DATA_MODEL=${sun.arch.data.model} -DREQUIRE_FUSE=${require.fuse}"/>
</exec>
<exec executable="make" dir="${project.build.directory}/native" failonerror="true">
<arg line="VERBOSE=1"/>
@@ -422,18 +426,29 @@ http://maven.apache.org/xsd/maven-4.0.0.
</target>
</configuration>
</execution>
- <!-- TODO wire here native testcases
<execution>
- <id>test</id>
+ <id>native_tests</id>
<phase>test</phase>
- <goals>
- <goal>test</goal>
- </goals>
+ <goals><goal>run</goal></goals>
<configuration>
- <destDir>${project.build.directory}/native/target</destDir>
+ <target>
+ <property name="compile_classpath" refid="maven.compile.classpath"/>
+ <property name="test_classpath" refid="maven.test.classpath"/>
+ <exec executable="sh" failonerror="true" dir="${project.build.directory}/native/">
+ <arg value="-c"/>
+ <arg value="[ x$SKIPTESTS = xtrue ] || ${project.build.directory}/native/test_libhdfs_threaded"/>
+ <env key="CLASSPATH" value="${test_classpath}:${compile_classpath}"/>
+ <env key="SKIPTESTS" value="${skipTests}"/>
+ </exec>
+ <exec executable="sh" failonerror="true" dir="${project.build.directory}/native/">
+ <arg value="-c"/>
+ <arg value="[ x$SKIPTESTS = xtrue ] || ${project.build.directory}/native/test_libhdfs_threaded"/>
+ <env key="CLASSPATH" value="${test_classpath}:${compile_classpath}"/>
+ <env key="SKIPTESTS" value="${skipTests}"/>
+ </exec>
+ </target>
</configuration>
</execution>
- -->
</executions>
</plugin>
</plugins>
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt Fri Aug 3 19:00:15 2012
@@ -67,6 +67,12 @@ function(FLATTEN_LIST INPUT SEPARATOR OU
set (${OUTPUT} "${_TMPS}" PARENT_SCOPE)
endfunction()
+# Check to see if our compiler and linker support the __thread attribute.
+# On Linux and some other operating systems, this is a more efficient
+# alternative to POSIX thread local storage.
+INCLUDE(CheckCSourceCompiles)
+CHECK_C_SOURCE_COMPILES("int main(void) { static __thread int i = 0; return 0; }" HAVE_BETTER_TLS)
+
find_package(JNI REQUIRED)
if (NOT GENERATED_JAVAH)
# Must identify where the generated headers have been placed
@@ -81,15 +87,16 @@ include_directories(
${CMAKE_CURRENT_SOURCE_DIR}
${CMAKE_BINARY_DIR}
${JNI_INCLUDE_DIRS}
- main/native/
+ main/native
+ main/native/libhdfs
)
set(_FUSE_DFS_VERSION 0.1.0)
CONFIGURE_FILE(${CMAKE_SOURCE_DIR}/config.h.cmake ${CMAKE_BINARY_DIR}/config.h)
add_dual_library(hdfs
- main/native/hdfs.c
- main/native/hdfsJniHelper.c
+ main/native/libhdfs/hdfs.c
+ main/native/libhdfs/jni_helper.c
)
target_link_dual_libraries(hdfs
${JAVA_JVM_LIBRARY}
@@ -99,31 +106,51 @@ set(LIBHDFS_VERSION "0.0.0")
set_target_properties(hdfs PROPERTIES
SOVERSION ${LIBHDFS_VERSION})
-add_executable(hdfs_test
- main/native/hdfs_test.c
+add_executable(test_libhdfs_ops
+ main/native/libhdfs/test/test_libhdfs_ops.c
)
-target_link_libraries(hdfs_test
+target_link_libraries(test_libhdfs_ops
hdfs
${JAVA_JVM_LIBRARY}
)
-output_directory(hdfs_test target/usr/local/bin)
-add_executable(hdfs_read
- main/native/hdfs_read.c
+add_executable(test_libhdfs_read
+ main/native/libhdfs/test/test_libhdfs_read.c
)
-target_link_libraries(hdfs_read
+target_link_libraries(test_libhdfs_read
hdfs
${JAVA_JVM_LIBRARY}
)
-output_directory(hdfs_read target/usr/local/bin)
-add_executable(hdfs_write
- main/native/hdfs_write.c
+add_executable(test_libhdfs_write
+ main/native/libhdfs/test/test_libhdfs_write.c
)
-target_link_libraries(hdfs_write
+target_link_libraries(test_libhdfs_write
hdfs
${JAVA_JVM_LIBRARY}
)
-output_directory(hdfs_write target/usr/local/bin)
-add_subdirectory(contrib/fuse-dfs/src)
+add_library(native_mini_dfs
+ main/native/libhdfs/native_mini_dfs.c
+)
+target_link_libraries(native_mini_dfs
+ hdfs
+)
+
+add_executable(test_native_mini_dfs
+ main/native/libhdfs/test_native_mini_dfs.c
+)
+target_link_libraries(test_native_mini_dfs
+ native_mini_dfs
+)
+
+add_executable(test_libhdfs_threaded
+ main/native/libhdfs/test_libhdfs_threaded.c
+)
+target_link_libraries(test_libhdfs_threaded
+ hdfs
+ native_mini_dfs
+ pthread
+)
+
+add_subdirectory(main/native/fuse-dfs)
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/config.h.cmake
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/config.h.cmake?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/config.h.cmake (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/config.h.cmake Fri Aug 3 19:00:15 2012
@@ -3,4 +3,6 @@
#cmakedefine _FUSE_DFS_VERSION "@_FUSE_DFS_VERSION@"
+#cmakedefine HAVE_BETTER_TLS
+
#endif
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperAsHASharedDir.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperAsHASharedDir.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperAsHASharedDir.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperAsHASharedDir.java Fri Aug 3 19:00:15 2012
@@ -32,7 +32,6 @@ import org.apache.hadoop.hdfs.MiniDFSClu
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.apache.hadoop.hdfs.DFSTestUtil;
-import org.apache.hadoop.ha.ServiceFailedException;
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogTestUtil;
@@ -42,6 +41,8 @@ import org.apache.hadoop.ipc.RemoteExcep
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.util.ExitUtil.ExitException;
+
import org.apache.bookkeeper.proto.BookieServer;
import org.apache.commons.logging.Log;
@@ -49,12 +50,6 @@ import org.apache.commons.logging.LogFac
import java.io.IOException;
-import static org.mockito.Matchers.anyInt;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.atLeastOnce;
-import static org.mockito.Mockito.verify;
-
/**
* Integration test to ensure that the BookKeeper JournalManager
* works for HDFS Namenode HA
@@ -83,8 +78,6 @@ public class TestBookKeeperAsHASharedDir
*/
@Test
public void testFailoverWithBK() throws Exception {
- Runtime mockRuntime1 = mock(Runtime.class);
- Runtime mockRuntime2 = mock(Runtime.class);
MiniDFSCluster cluster = null;
try {
Configuration conf = new Configuration();
@@ -100,8 +93,6 @@ public class TestBookKeeperAsHASharedDir
.build();
NameNode nn1 = cluster.getNameNode(0);
NameNode nn2 = cluster.getNameNode(1);
- FSEditLogTestUtil.setRuntimeForEditLog(nn1, mockRuntime1);
- FSEditLogTestUtil.setRuntimeForEditLog(nn2, mockRuntime2);
cluster.waitActive();
cluster.transitionToActive(0);
@@ -117,9 +108,6 @@ public class TestBookKeeperAsHASharedDir
assertTrue(fs.exists(p));
} finally {
- verify(mockRuntime1, times(0)).exit(anyInt());
- verify(mockRuntime2, times(0)).exit(anyInt());
-
if (cluster != null) {
cluster.shutdown();
}
@@ -141,9 +129,6 @@ public class TestBookKeeperAsHASharedDir
BookieServer replacementBookie = null;
- Runtime mockRuntime1 = mock(Runtime.class);
- Runtime mockRuntime2 = mock(Runtime.class);
-
MiniDFSCluster cluster = null;
try {
@@ -161,11 +146,10 @@ public class TestBookKeeperAsHASharedDir
.nnTopology(MiniDFSNNTopology.simpleHATopology())
.numDataNodes(0)
.manageNameDfsSharedDirs(false)
+ .checkExitOnShutdown(false)
.build();
NameNode nn1 = cluster.getNameNode(0);
NameNode nn2 = cluster.getNameNode(1);
- FSEditLogTestUtil.setRuntimeForEditLog(nn1, mockRuntime1);
- FSEditLogTestUtil.setRuntimeForEditLog(nn2, mockRuntime2);
cluster.waitActive();
cluster.transitionToActive(0);
@@ -180,20 +164,22 @@ public class TestBookKeeperAsHASharedDir
assertEquals("New bookie didn't stop",
numBookies, bkutil.checkBookiesUp(numBookies, 10));
- // mkdirs will "succeed", but nn have called runtime.exit
- fs.mkdirs(p2);
- verify(mockRuntime1, atLeastOnce()).exit(anyInt());
- verify(mockRuntime2, times(0)).exit(anyInt());
+ try {
+ fs.mkdirs(p2);
+ fail("mkdirs should result in the NN exiting");
+ } catch (RemoteException re) {
+ assertTrue(re.getClassName().contains("ExitException"));
+ }
cluster.shutdownNameNode(0);
try {
cluster.transitionToActive(1);
fail("Shouldn't have been able to transition with bookies down");
- } catch (ServiceFailedException e) {
- assertTrue("Wrong exception",
- e.getMessage().contains("Failed to start active services"));
+ } catch (ExitException ee) {
+ assertTrue("Should shutdown due to required journal failure",
+ ee.getMessage().contains(
+ "starting log segment 3 failed for required journal"));
}
- verify(mockRuntime2, atLeastOnce()).exit(anyInt());
replacementBookie = bkutil.newBookie();
assertEquals("Replacement bookie didn't start",
@@ -219,8 +205,6 @@ public class TestBookKeeperAsHASharedDir
*/
@Test
public void testMultiplePrimariesStarted() throws Exception {
- Runtime mockRuntime1 = mock(Runtime.class);
- Runtime mockRuntime2 = mock(Runtime.class);
Path p1 = new Path("/testBKJMMultiplePrimary");
MiniDFSCluster cluster = null;
@@ -235,11 +219,10 @@ public class TestBookKeeperAsHASharedDir
.nnTopology(MiniDFSNNTopology.simpleHATopology())
.numDataNodes(0)
.manageNameDfsSharedDirs(false)
+ .checkExitOnShutdown(false)
.build();
NameNode nn1 = cluster.getNameNode(0);
NameNode nn2 = cluster.getNameNode(1);
- FSEditLogTestUtil.setRuntimeForEditLog(nn1, mockRuntime1);
- FSEditLogTestUtil.setRuntimeForEditLog(nn2, mockRuntime2);
cluster.waitActive();
cluster.transitionToActive(0);
@@ -248,11 +231,13 @@ public class TestBookKeeperAsHASharedDir
nn1.getRpcServer().rollEditLog();
cluster.transitionToActive(1);
fs = cluster.getFileSystem(0); // get the older active server.
- // This edit log updation on older active should make older active
- // shutdown.
- fs.delete(p1, true);
- verify(mockRuntime1, atLeastOnce()).exit(anyInt());
- verify(mockRuntime2, times(0)).exit(anyInt());
+
+ try {
+ fs.delete(p1, true);
+ fail("Log update on older active should cause it to exit");
+ } catch (RemoteException re) {
+ assertTrue(re.getClassName().contains("ExitException"));
+ }
} finally {
if (cluster != null) {
cluster.shutdown();
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogTestUtil.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogTestUtil.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogTestUtil.java Fri Aug 3 19:00:15 2012
@@ -36,9 +36,4 @@ public class FSEditLogTestUtil {
FSEditLogLoader.EditLogValidation validation = FSEditLogLoader.validateEditLog(in);
return (validation.getEndTxId() - in.getFirstTxId()) + 1;
}
-
- public static void setRuntimeForEditLog(NameNode nn, Runtime rt) {
- nn.setRuntimeForTesting(rt);
- nn.getFSImage().getEditLog().setRuntimeForTesting(rt);
- }
}
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/faultinject_framework.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/faultinject_framework.xml?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/faultinject_framework.xml (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/faultinject_framework.xml Fri Aug 3 19:00:15 2012
@@ -332,13 +332,12 @@ package org.apache.hadoop.fs;
import org.junit.Test;
import org.junit.Before;
-import junit.framework.TestCase;
-public class DemoFiTest extends TestCase {
+public class DemoFiTest {
public static final String BLOCK_RECEIVER_FAULT="hdfs.datanode.BlockReceiver";
@Override
@Before
- public void setUp(){
+ public void setUp() {
//Setting up the test's environment as required
}
Propchange: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:r1358480-1369130
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java Fri Aug 3 19:00:15 2012
@@ -39,6 +39,7 @@ import org.apache.hadoop.hdfs.security.t
import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader;
import org.apache.hadoop.hdfs.util.DirectBufferPool;
import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.DataChecksum;
@@ -315,23 +316,10 @@ class BlockReaderLocal implements BlockR
boolean success = false;
try {
// Skip both input streams to beginning of the chunk containing startOffset
- long toSkip = firstChunkOffset;
- while (toSkip > 0) {
- long skipped = dataIn.skip(toSkip);
- if (skipped == 0) {
- throw new IOException("Couldn't initialize input stream");
- }
- toSkip -= skipped;
- }
+ IOUtils.skipFully(dataIn, firstChunkOffset);
if (checksumIn != null) {
long checkSumOffset = (firstChunkOffset / bytesPerChecksum) * checksumSize;
- while (checkSumOffset > 0) {
- long skipped = checksumIn.skip(checkSumOffset);
- if (skipped == 0) {
- throw new IOException("Couldn't initialize checksum input stream");
- }
- checkSumOffset -= skipped;
- }
+ IOUtils.skipFully(checksumIn, checkSumOffset);
}
success = true;
} finally {
@@ -636,17 +624,9 @@ class BlockReaderLocal implements BlockR
slowReadBuff.position(slowReadBuff.limit());
checksumBuff.position(checksumBuff.limit());
- long dataSkipped = dataIn.skip(toskip);
- if (dataSkipped != toskip) {
- throw new IOException("skip error in data input stream");
- }
- long checkSumOffset = (dataSkipped / bytesPerChecksum) * checksumSize;
- if (checkSumOffset > 0) {
- long skipped = checksumIn.skip(checkSumOffset);
- if (skipped != checkSumOffset) {
- throw new IOException("skip error in checksum input stream");
- }
- }
+ IOUtils.skipFully(dataIn, toskip);
+ long checkSumOffset = (toskip / bytesPerChecksum) * checksumSize;
+ IOUtils.skipFully(checksumIn, checkSumOffset);
// read into the middle of the chunk
if (skipBuf == null) {
@@ -701,4 +681,4 @@ class BlockReaderLocal implements BlockR
public boolean hasSentStatusCode() {
return false;
}
-}
\ No newline at end of file
+}
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/ByteRangeInputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/ByteRangeInputStream.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/ByteRangeInputStream.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/ByteRangeInputStream.java Fri Aug 3 19:00:15 2012
@@ -57,9 +57,9 @@ public abstract class ByteRangeInputStre
return url;
}
- protected abstract HttpURLConnection openConnection() throws IOException;
-
- protected abstract HttpURLConnection openConnection(final long offset) throws IOException;
+ /** Connect to server with a data offset. */
+ protected abstract HttpURLConnection connect(final long offset,
+ final boolean resolved) throws IOException;
}
enum StreamStatus {
@@ -85,9 +85,6 @@ public abstract class ByteRangeInputStre
this.resolvedURL = r;
}
- protected abstract void checkResponseCode(final HttpURLConnection connection
- ) throws IOException;
-
protected abstract URL getResolvedUrl(final HttpURLConnection connection
) throws IOException;
@@ -113,13 +110,10 @@ public abstract class ByteRangeInputStre
protected InputStream openInputStream() throws IOException {
// Use the original url if no resolved url exists, eg. if
// it's the first time a request is made.
- final URLOpener opener =
- (resolvedURL.getURL() == null) ? originalURL : resolvedURL;
-
- final HttpURLConnection connection = opener.openConnection(startPos);
- connection.connect();
- checkResponseCode(connection);
+ final boolean resolved = resolvedURL.getURL() != null;
+ final URLOpener opener = resolved? resolvedURL: originalURL;
+ final HttpURLConnection connection = opener.connect(startPos, resolved);
final String cl = connection.getHeaderField(StreamFile.CONTENT_LENGTH);
if (cl == null) {
throw new IOException(StreamFile.CONTENT_LENGTH+" header is missing");
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java Fri Aug 3 19:00:15 2012
@@ -136,6 +136,7 @@ import org.apache.hadoop.security.token.
import org.apache.hadoop.security.token.TokenRenewer;
import org.apache.hadoop.util.DataChecksum;
import org.apache.hadoop.util.Progressable;
+import org.apache.hadoop.util.Time;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Joiner;
@@ -173,7 +174,7 @@ public class DFSClient implements java.i
final ReplaceDatanodeOnFailure dtpReplaceDatanodeOnFailure;
final FileSystem.Statistics stats;
final int hdfsTimeout; // timeout value for a DFS operation.
- final LeaseRenewer leaserenewer;
+ private final String authority;
final SocketCache socketCache;
final Conf dfsClientConf;
private Random r = new Random();
@@ -343,9 +344,9 @@ public class DFSClient implements java.i
this.hdfsTimeout = Client.getTimeout(conf);
this.ugi = UserGroupInformation.getCurrentUser();
- final String authority = nameNodeUri == null? "null": nameNodeUri.getAuthority();
- this.leaserenewer = LeaseRenewer.getInstance(authority, ugi, this);
- this.clientName = leaserenewer.getClientName(dfsClientConf.taskId);
+ this.authority = nameNodeUri == null? "null": nameNodeUri.getAuthority();
+ this.clientName = "DFSClient_" + dfsClientConf.taskId + "_" +
+ DFSUtil.getRandom().nextInt() + "_" + Thread.currentThread().getId();
this.socketCache = new SocketCache(dfsClientConf.socketCacheCapacity);
@@ -473,7 +474,30 @@ public class DFSClient implements java.i
}
}
- /** Put a file. */
+ /** Return the lease renewer instance. The renewer thread won't start
+ * until the first output stream is created. The same instance will
+ * be returned until all output streams are closed.
+ */
+ public synchronized LeaseRenewer getLeaseRenewer() throws IOException {
+ return LeaseRenewer.getInstance(authority, ugi, this);
+ }
+
+ /** Get a lease and start automatic renewal */
+ private void beginFileLease(final String src, final DFSOutputStream out)
+ throws IOException {
+ getLeaseRenewer().put(src, out, this);
+ }
+
+ /** Stop renewal of lease for the file. */
+ void endFileLease(final String src) throws IOException {
+ getLeaseRenewer().closeFile(src, this);
+ }
+
+
+ /** Put a file. Only called from LeaseRenewer, where proper locking is
+ * enforced to consistently update its local dfsclients array and
+ * client's filesBeingWritten map.
+ */
void putFileBeingWritten(final String src, final DFSOutputStream out) {
synchronized(filesBeingWritten) {
filesBeingWritten.put(src, out);
@@ -486,7 +510,7 @@ public class DFSClient implements java.i
}
}
- /** Remove a file. */
+ /** Remove a file. Only called from LeaseRenewer. */
void removeFileBeingWritten(final String src) {
synchronized(filesBeingWritten) {
filesBeingWritten.remove(src);
@@ -517,7 +541,7 @@ public class DFSClient implements java.i
if (filesBeingWritten.isEmpty()) {
return;
}
- lastLeaseRenewal = System.currentTimeMillis();
+ lastLeaseRenewal = Time.now();
}
}
@@ -534,7 +558,7 @@ public class DFSClient implements java.i
return true;
} catch (IOException e) {
// Abort if the lease has already expired.
- final long elapsed = System.currentTimeMillis() - getLastLeaseRenewal();
+ final long elapsed = Time.now() - getLastLeaseRenewal();
if (elapsed > HdfsConstants.LEASE_SOFTLIMIT_PERIOD) {
LOG.warn("Failed to renew lease for " + clientName + " for "
+ (elapsed/1000) + " seconds (>= soft-limit ="
@@ -562,6 +586,14 @@ public class DFSClient implements java.i
clientRunning = false;
closeAllFilesBeingWritten(true);
socketCache.clear();
+
+ try {
+ // remove reference to this client and stop the renewer,
+ // if there is no more clients under the renewer.
+ getLeaseRenewer().closeClient(this);
+ } catch (IOException ioe) {
+ LOG.info("Exception occurred while aborting the client. " + ioe);
+ }
closeConnectionToNamenode();
}
@@ -596,12 +628,13 @@ public class DFSClient implements java.i
* Close the file system, abandoning all of the leases and files being
* created and close connections to the namenode.
*/
+ @Override
public synchronized void close() throws IOException {
if(clientRunning) {
closeAllFilesBeingWritten(false);
socketCache.clear();
clientRunning = false;
- leaserenewer.closeClient(this);
+ getLeaseRenewer().closeClient(this);
// close connections to the namenode
closeConnectionToNamenode();
}
@@ -632,7 +665,7 @@ public class DFSClient implements java.i
* @see ClientProtocol#getServerDefaults()
*/
public FsServerDefaults getServerDefaults() throws IOException {
- long now = System.currentTimeMillis();
+ long now = Time.now();
if (now - serverDefaultsLastUpdate > SERVER_DEFAULTS_VALIDITY_PERIOD) {
serverDefaults = namenode.getServerDefaults();
serverDefaultsLastUpdate = now;
@@ -1062,7 +1095,7 @@ public class DFSClient implements java.i
final DFSOutputStream result = DFSOutputStream.newStreamForCreate(this,
src, masked, flag, createParent, replication, blockSize, progress,
buffersize, dfsClientConf.createChecksum());
- leaserenewer.put(src, result, this);
+ beginFileLease(src, result);
return result;
}
@@ -1112,7 +1145,7 @@ public class DFSClient implements java.i
flag, createParent, replication, blockSize, progress, buffersize,
checksum);
}
- leaserenewer.put(src, result, this);
+ beginFileLease(src, result);
return result;
}
@@ -1198,7 +1231,7 @@ public class DFSClient implements java.i
+ src + " on client " + clientName);
}
final DFSOutputStream result = callAppend(stat, src, buffersize, progress);
- leaserenewer.put(src, result, this);
+ beginFileLease(src, result);
return result;
}
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java Fri Aug 3 19:00:15 2012
@@ -74,7 +74,7 @@ public class DFSConfigKeys extends Commo
public static final String DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_KEY = "dfs.datanode.balance.bandwidthPerSec";
public static final long DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_DEFAULT = 1024*1024;
public static final String DFS_DATANODE_READAHEAD_BYTES_KEY = "dfs.datanode.readahead.bytes";
- public static final long DFS_DATANODE_READAHEAD_BYTES_DEFAULT = 0;
+ public static final long DFS_DATANODE_READAHEAD_BYTES_DEFAULT = 4 * 1024 * 1024; // 4MB
public static final String DFS_DATANODE_DROP_CACHE_BEHIND_WRITES_KEY = "dfs.datanode.drop.cache.behind.writes";
public static final boolean DFS_DATANODE_DROP_CACHE_BEHIND_WRITES_DEFAULT = false;
public static final String DFS_DATANODE_SYNC_BEHIND_WRITES_KEY = "dfs.datanode.sync.behind.writes";
@@ -203,6 +203,7 @@ public class DFSConfigKeys extends Commo
public static final String DFS_CLIENT_READ_PREFETCH_SIZE_KEY = "dfs.client.read.prefetch.size";
public static final String DFS_CLIENT_RETRY_WINDOW_BASE= "dfs.client.retry.window.base";
public static final String DFS_METRICS_SESSION_ID_KEY = "dfs.metrics.session-id";
+ public static final String DFS_METRICS_PERCENTILES_INTERVALS_KEY = "dfs.metrics.percentiles.intervals";
public static final String DFS_DATANODE_HOST_NAME_KEY = "dfs.datanode.hostname";
public static final String DFS_NAMENODE_HOSTS_KEY = "dfs.namenode.hosts";
public static final String DFS_NAMENODE_HOSTS_EXCLUDE_KEY = "dfs.namenode.hosts.exclude";
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java Fri Aug 3 19:00:15 2012
@@ -74,6 +74,9 @@ import org.apache.hadoop.security.token.
import org.apache.hadoop.util.Daemon;
import org.apache.hadoop.util.DataChecksum;
import org.apache.hadoop.util.Progressable;
+import org.apache.hadoop.util.Time;
+
+import com.google.common.annotations.VisibleForTesting;
/****************************************************************
@@ -267,6 +270,7 @@ public class DFSOutputStream extends FSO
return seqno == HEART_BEAT_SEQNO;
}
+ @Override
public String toString() {
return "packet seqno:" + this.seqno +
" offsetInBlock:" + this.offsetInBlock +
@@ -395,8 +399,9 @@ public class DFSOutputStream extends FSO
* streamer thread is the only thread that opens streams to datanode,
* and closes them. Any error recovery is also done by this thread.
*/
+ @Override
public void run() {
- long lastPacket = System.currentTimeMillis();
+ long lastPacket = Time.now();
while (!streamerClosed && dfsClient.clientRunning) {
// if the Responder encountered an error, shutdown Responder
@@ -420,7 +425,7 @@ public class DFSOutputStream extends FSO
synchronized (dataQueue) {
// wait for a packet to be sent.
- long now = System.currentTimeMillis();
+ long now = Time.now();
while ((!streamerClosed && !hasError && dfsClient.clientRunning
&& dataQueue.size() == 0 &&
(stage != BlockConstructionStage.DATA_STREAMING ||
@@ -435,7 +440,7 @@ public class DFSOutputStream extends FSO
} catch (InterruptedException e) {
}
doSleep = false;
- now = System.currentTimeMillis();
+ now = Time.now();
}
if (streamerClosed || hasError || !dfsClient.clientRunning) {
continue;
@@ -518,7 +523,7 @@ public class DFSOutputStream extends FSO
errorIndex = 0;
throw e;
}
- lastPacket = System.currentTimeMillis();
+ lastPacket = Time.now();
if (one.isHeartbeatPacket()) { //heartbeat packet
}
@@ -653,6 +658,7 @@ public class DFSOutputStream extends FSO
this.targets = targets;
}
+ @Override
public void run() {
setName("ResponseProcessor for block " + block);
@@ -981,7 +987,7 @@ public class DFSOutputStream extends FSO
errorIndex = -1;
success = false;
- long startTime = System.currentTimeMillis();
+ long startTime = Time.now();
DatanodeInfo[] excluded = excludedNodes.toArray(
new DatanodeInfo[excludedNodes.size()]);
block = oldBlock;
@@ -1107,7 +1113,7 @@ public class DFSOutputStream extends FSO
int retries = dfsClient.getConf().nBlockWriteLocateFollowingRetry;
long sleeptime = 400;
while (true) {
- long localstart = System.currentTimeMillis();
+ long localstart = Time.now();
while (true) {
try {
return dfsClient.namenode.addBlock(src, dfsClient.clientName, block, excludedNodes);
@@ -1130,9 +1136,9 @@ public class DFSOutputStream extends FSO
} else {
--retries;
DFSClient.LOG.info("Exception while adding a block", e);
- if (System.currentTimeMillis() - localstart > 5000) {
+ if (Time.now() - localstart > 5000) {
DFSClient.LOG.info("Waiting for replication for "
- + (System.currentTimeMillis() - localstart) / 1000
+ + (Time.now() - localstart) / 1000
+ " seconds");
}
try {
@@ -1206,7 +1212,8 @@ public class DFSOutputStream extends FSO
//
// returns the list of targets, if any, that is being currently used.
//
- synchronized DatanodeInfo[] getPipeline() {
+ @VisibleForTesting
+ public synchronized DatanodeInfo[] getPipeline() {
if (streamer == null) {
return null;
}
@@ -1658,6 +1665,7 @@ public class DFSOutputStream extends FSO
streamer.setLastException(new IOException("Lease timeout of " +
(dfsClient.hdfsTimeout/1000) + " seconds expired."));
closeThreads(true);
+ dfsClient.endFileLease(src);
}
// shutdown datastreamer and responseprocessor threads.
@@ -1712,7 +1720,7 @@ public class DFSOutputStream extends FSO
ExtendedBlock lastBlock = streamer.getBlock();
closeThreads(false);
completeFile(lastBlock);
- dfsClient.leaserenewer.closeFile(src, dfsClient);
+ dfsClient.endFileLease(src);
} finally {
closed = true;
}
@@ -1721,14 +1729,14 @@ public class DFSOutputStream extends FSO
// should be called holding (this) lock since setTestFilename() may
// be called during unit tests
private void completeFile(ExtendedBlock last) throws IOException {
- long localstart = System.currentTimeMillis();
+ long localstart = Time.now();
boolean fileComplete = false;
while (!fileComplete) {
fileComplete = dfsClient.namenode.complete(src, dfsClient.clientName, last);
if (!fileComplete) {
if (!dfsClient.clientRunning ||
(dfsClient.hdfsTimeout > 0 &&
- localstart + dfsClient.hdfsTimeout < System.currentTimeMillis())) {
+ localstart + dfsClient.hdfsTimeout < Time.now())) {
String msg = "Unable to close file because dfsclient " +
" was unable to contact the HDFS servers." +
" clientRunning " + dfsClient.clientRunning +
@@ -1738,7 +1746,7 @@ public class DFSOutputStream extends FSO
}
try {
Thread.sleep(400);
- if (System.currentTimeMillis() - localstart > 5000) {
+ if (Time.now() - localstart > 5000) {
DFSClient.LOG.info("Could not complete file " + src + " retrying...");
}
} catch (InterruptedException ie) {
@@ -1747,11 +1755,13 @@ public class DFSOutputStream extends FSO
}
}
- void setArtificialSlowdown(long period) {
+ @VisibleForTesting
+ public void setArtificialSlowdown(long period) {
artificialSlowdown = period;
}
- synchronized void setChunksPerPacket(int value) {
+ @VisibleForTesting
+ public synchronized void setChunksPerPacket(int value) {
chunksPerPacket = Math.min(chunksPerPacket, value);
packetSize = PacketHeader.PKT_HEADER_LEN +
(checksum.getBytesPerChecksum() +
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java Fri Aug 3 19:00:15 2012
@@ -56,6 +56,7 @@ import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.net.NodeBase;
import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.util.StringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -110,6 +111,7 @@ public class DFSUtil {
* Address matcher for matching an address to local address
*/
static final AddressMatcher LOCAL_ADDRESS_MATCHER = new AddressMatcher() {
+ @Override
public boolean match(InetSocketAddress s) {
return NetUtils.isLocalAddress(s.getAddress());
};
@@ -117,7 +119,7 @@ public class DFSUtil {
/**
* Whether the pathname is valid. Currently prohibits relative paths,
- * and names which contain a ":" or "/"
+ * names which contain a ":" or "//", or other non-canonical paths.
*/
public static boolean isValidName(String src) {
// Path must be absolute.
@@ -126,15 +128,22 @@ public class DFSUtil {
}
// Check for ".." "." ":" "/"
- StringTokenizer tokens = new StringTokenizer(src, Path.SEPARATOR);
- while(tokens.hasMoreTokens()) {
- String element = tokens.nextToken();
+ String[] components = StringUtils.split(src, '/');
+ for (int i = 0; i < components.length; i++) {
+ String element = components[i];
if (element.equals("..") ||
element.equals(".") ||
(element.indexOf(":") >= 0) ||
(element.indexOf("/") >= 0)) {
return false;
}
+
+ // The string may start or end with a /, but not have
+ // "//" in the middle.
+ if (element.isEmpty() && i != components.length - 1 &&
+ i != 0) {
+ return false;
+ }
}
return true;
}
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java Fri Aug 3 19:00:15 2012
@@ -113,6 +113,7 @@ public class HftpFileSystem extends File
protected static final ThreadLocal<SimpleDateFormat> df =
new ThreadLocal<SimpleDateFormat>() {
+ @Override
protected SimpleDateFormat initialValue() {
return getDateFormat();
}
@@ -240,6 +241,7 @@ public class HftpFileSystem extends File
//Renew TGT if needed
ugi.reloginFromKeytab();
return ugi.doAs(new PrivilegedExceptionAction<Token<?>>() {
+ @Override
public Token<?> run() throws IOException {
final String nnHttpUrl = nnSecureUri.toString();
Credentials c;
@@ -340,19 +342,28 @@ public class HftpFileSystem extends File
super(url);
}
- @Override
protected HttpURLConnection openConnection() throws IOException {
return (HttpURLConnection)URLUtils.openConnection(url);
}
/** Use HTTP Range header for specifying offset. */
@Override
- protected HttpURLConnection openConnection(final long offset) throws IOException {
+ protected HttpURLConnection connect(final long offset,
+ final boolean resolved) throws IOException {
final HttpURLConnection conn = openConnection();
conn.setRequestMethod("GET");
if (offset != 0L) {
conn.setRequestProperty("Range", "bytes=" + offset + "-");
}
+ conn.connect();
+
+ //Expects HTTP_OK or HTTP_PARTIAL response codes.
+ final int code = conn.getResponseCode();
+ if (offset != 0L && code != HttpURLConnection.HTTP_PARTIAL) {
+ throw new IOException("HTTP_PARTIAL expected, received " + code);
+ } else if (offset == 0L && code != HttpURLConnection.HTTP_OK) {
+ throw new IOException("HTTP_OK expected, received " + code);
+ }
return conn;
}
}
@@ -366,22 +377,6 @@ public class HftpFileSystem extends File
this(new RangeHeaderUrlOpener(url), new RangeHeaderUrlOpener(null));
}
- /** Expects HTTP_OK and HTTP_PARTIAL response codes. */
- @Override
- protected void checkResponseCode(final HttpURLConnection connection
- ) throws IOException {
- final int code = connection.getResponseCode();
- if (startPos != 0 && code != HttpURLConnection.HTTP_PARTIAL) {
- // We asked for a byte range but did not receive a partial content
- // response...
- throw new IOException("HTTP_PARTIAL expected, received " + code);
- } else if (startPos == 0 && code != HttpURLConnection.HTTP_OK) {
- // We asked for all bytes from the beginning but didn't receive a 200
- // response (none of the other 2xx codes are valid here)
- throw new IOException("HTTP_OK expected, received " + code);
- }
- }
-
@Override
protected URL getResolvedUrl(final HttpURLConnection connection) {
return connection.getURL();
@@ -402,6 +397,7 @@ public class HftpFileSystem extends File
ArrayList<FileStatus> fslist = new ArrayList<FileStatus>();
+ @Override
public void startElement(String ns, String localname, String qname,
Attributes attrs) throws SAXException {
if ("listing".equals(qname)) return;
@@ -541,6 +537,7 @@ public class HftpFileSystem extends File
public void setWorkingDirectory(Path f) { }
/** This optional operation is not yet supported. */
+ @Override
public FSDataOutputStream append(Path f, int bufferSize,
Progressable progress) throws IOException {
throw new IOException("Not supported");
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HsftpFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HsftpFileSystem.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HsftpFileSystem.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HsftpFileSystem.java Fri Aug 3 19:00:15 2012
@@ -42,6 +42,7 @@ import org.apache.hadoop.classification.
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.web.URLUtils;
+import org.apache.hadoop.util.Time;
/**
* An implementation of a protocol for accessing filesystems over HTTPS. The
@@ -164,8 +165,7 @@ public class HsftpFileSystem extends Hft
final int warnDays = ExpWarnDays;
if (warnDays > 0) { // make sure only check once
ExpWarnDays = 0;
- long expTimeThreshold = warnDays * MM_SECONDS_PER_DAY
- + System.currentTimeMillis();
+ long expTimeThreshold = warnDays * MM_SECONDS_PER_DAY + Time.now();
X509Certificate[] clientCerts = (X509Certificate[]) conn
.getLocalCertificates();
if (clientCerts != null) {
@@ -175,7 +175,7 @@ public class HsftpFileSystem extends Hft
StringBuilder sb = new StringBuilder();
sb.append("\n Client certificate "
+ cert.getSubjectX500Principal().getName());
- int dayOffSet = (int) ((expTime - System.currentTimeMillis()) / MM_SECONDS_PER_DAY);
+ int dayOffSet = (int) ((expTime - Time.now()) / MM_SECONDS_PER_DAY);
sb.append(" have " + dayOffSet + " days to expire");
LOG.warn(sb.toString());
}
@@ -189,6 +189,7 @@ public class HsftpFileSystem extends Hft
* Dummy hostname verifier that is used to bypass hostname checking
*/
protected static class DummyHostnameVerifier implements HostnameVerifier {
+ @Override
public boolean verify(String hostname, SSLSession session) {
return true;
}
@@ -198,12 +199,15 @@ public class HsftpFileSystem extends Hft
* Dummy trustmanager that is used to trust all server certificates
*/
protected static class DummyTrustManager implements X509TrustManager {
+ @Override
public void checkClientTrusted(X509Certificate[] chain, String authType) {
}
+ @Override
public void checkServerTrusted(X509Certificate[] chain, String authType) {
}
+ @Override
public X509Certificate[] getAcceptedIssuers() {
return null;
}
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/LeaseRenewer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/LeaseRenewer.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/LeaseRenewer.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/LeaseRenewer.java Fri Aug 3 19:00:15 2012
@@ -34,6 +34,7 @@ import org.apache.hadoop.hdfs.protocol.H
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.Daemon;
import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.Time;
/**
* <p>
@@ -156,9 +157,6 @@ class LeaseRenewer {
}
}
- private final String clienNamePostfix = DFSUtil.getRandom().nextInt()
- + "_" + Thread.currentThread().getId();
-
/** The time in milliseconds that the map became empty. */
private long emptyTime = Long.MAX_VALUE;
/** A fixed lease renewal time period in milliseconds */
@@ -212,11 +210,6 @@ class LeaseRenewer {
return renewal;
}
- /** @return the client name for the given id. */
- String getClientName(final String id) {
- return "DFSClient_" + id + "_" + clienNamePostfix;
- }
-
/** Add a client. */
private synchronized void addClient(final DFSClient dfsc) {
for(DFSClient c : dfsclients) {
@@ -270,6 +263,11 @@ class LeaseRenewer {
synchronized boolean isRunning() {
return daemon != null && daemon.isAlive();
}
+
+ /** Does this renewer have nothing to renew? */
+ public boolean isEmpty() {
+ return dfsclients.isEmpty();
+ }
/** Used only by tests */
synchronized String getDaemonName() {
@@ -279,7 +277,7 @@ class LeaseRenewer {
/** Is the empty period longer than the grace period? */
private synchronized boolean isRenewerExpired() {
return emptyTime != Long.MAX_VALUE
- && System.currentTimeMillis() - emptyTime > gracePeriod;
+ && Time.now() - emptyTime > gracePeriod;
}
synchronized void put(final String src, final DFSOutputStream out,
@@ -330,6 +328,9 @@ class LeaseRenewer {
dfsc.removeFileBeingWritten(src);
synchronized(this) {
+ if (dfsc.isFilesBeingWrittenEmpty()) {
+ dfsclients.remove(dfsc);
+ }
//update emptyTime if necessary
if (emptyTime == Long.MAX_VALUE) {
for(DFSClient c : dfsclients) {
@@ -339,7 +340,7 @@ class LeaseRenewer {
}
}
//discover the first time that all file-being-written maps are empty.
- emptyTime = System.currentTimeMillis();
+ emptyTime = Time.now();
}
}
}
@@ -354,7 +355,7 @@ class LeaseRenewer {
}
if (emptyTime == Long.MAX_VALUE) {
//discover the first time that the client list is empty.
- emptyTime = System.currentTimeMillis();
+ emptyTime = Time.now();
}
}
@@ -427,10 +428,9 @@ class LeaseRenewer {
* when the lease period is half over.
*/
private void run(final int id) throws InterruptedException {
- for(long lastRenewed = System.currentTimeMillis();
- clientsRunning() && !Thread.interrupted();
+ for(long lastRenewed = Time.now(); !Thread.interrupted();
Thread.sleep(getSleepPeriod())) {
- final long elapsed = System.currentTimeMillis() - lastRenewed;
+ final long elapsed = Time.now() - lastRenewed;
if (elapsed >= getRenewalTime()) {
try {
renew();
@@ -438,7 +438,7 @@ class LeaseRenewer {
LOG.debug("Lease renewer daemon for " + clientsString()
+ " with renew id " + id + " executed");
}
- lastRenewed = System.currentTimeMillis();
+ lastRenewed = Time.now();
} catch (SocketTimeoutException ie) {
LOG.warn("Failed to renew lease for " + clientsString() + " for "
+ (elapsed/1000) + " seconds. Aborting ...", ie);
@@ -468,6 +468,13 @@ class LeaseRenewer {
//no longer the current daemon or expired
return;
}
+
+ // if no clients are in running state or there is no more clients
+ // registered with this renewer, stop the daemon after the grace
+ // period.
+ if (!clientsRunning() && emptyTime == Long.MAX_VALUE) {
+ emptyTime = Time.now();
+ }
}
}
}
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java Fri Aug 3 19:00:15 2012
@@ -259,7 +259,7 @@ public class NameNodeProxies {
*
* Note that dfs.client.retry.max < 0 is not allowed.
*/
- private static RetryPolicy getDefaultRpcRetryPolicy(Configuration conf) {
+ public static RetryPolicy getDefaultRetryPolicy(Configuration conf) {
final RetryPolicy multipleLinearRandomRetry = getMultipleLinearRandomRetry(conf);
if (LOG.isDebugEnabled()) {
LOG.debug("multipleLinearRandomRetry = " + multipleLinearRandomRetry);
@@ -300,6 +300,13 @@ public class NameNodeProxies {
+ p.getClass().getSimpleName() + ", exception=" + e);
return p.shouldRetry(e, retries, failovers, isMethodIdempotent);
}
+
+ @Override
+ public String toString() {
+ return "RetryPolicy[" + multipleLinearRandomRetry + ", "
+ + RetryPolicies.TRY_ONCE_THEN_FAIL.getClass().getSimpleName()
+ + "]";
+ }
};
}
}
@@ -335,7 +342,7 @@ public class NameNodeProxies {
boolean withRetries) throws IOException {
RPC.setProtocolEngine(conf, ClientNamenodeProtocolPB.class, ProtobufRpcEngine.class);
- final RetryPolicy defaultPolicy = getDefaultRpcRetryPolicy(conf);
+ final RetryPolicy defaultPolicy = getDefaultRetryPolicy(conf);
final long version = RPC.getProtocolVersion(ClientNamenodeProtocolPB.class);
ClientNamenodeProtocolPB proxy = RPC.getProtocolProxy(
ClientNamenodeProtocolPB.class, version, address, ugi, conf,
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/SocketCache.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/SocketCache.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/SocketCache.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/SocketCache.java Fri Aug 3 19:00:15 2012
@@ -134,6 +134,7 @@ class SocketCache {
multimap.clear();
}
+ @Override
protected void finalize() {
clear();
}
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/Block.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/Block.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/Block.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/Block.java Fri Aug 3 19:00:15 2012
@@ -40,6 +40,7 @@ public class Block implements Writable,
WritableFactories.setFactory
(Block.class,
new WritableFactory() {
+ @Override
public Writable newInstance() { return new Block(); }
});
}
@@ -146,6 +147,7 @@ public class Block implements Writable,
/**
*/
+ @Override
public String toString() {
return getBlockName() + "_" + getGenerationStamp();
}
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java Fri Aug 3 19:00:15 2012
@@ -148,10 +148,12 @@ public class BlockListAsLongs implements
this.currentReplicaState = null;
}
+ @Override
public boolean hasNext() {
return currentBlockIndex < getNumberOfBlocks();
}
+ @Override
public Block next() {
block.set(blockId(currentBlockIndex),
blockLength(currentBlockIndex),
@@ -161,6 +163,7 @@ public class BlockListAsLongs implements
return block;
}
+ @Override
public void remove() {
throw new UnsupportedOperationException("Sorry. can't remove.");
}
@@ -178,6 +181,7 @@ public class BlockListAsLongs implements
/**
* Returns an iterator over blocks in the block report.
*/
+ @Override
public Iterator<Block> iterator() {
return getBlockReportIterator();
}
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DSQuotaExceededException.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DSQuotaExceededException.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DSQuotaExceededException.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DSQuotaExceededException.java Fri Aug 3 19:00:15 2012
@@ -37,6 +37,7 @@ public class DSQuotaExceededException ex
super(quota, count);
}
+ @Override
public String getMessage() {
String msg = super.getMessage();
if (msg == null) {
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java Fri Aug 3 19:00:15 2012
@@ -150,6 +150,7 @@ public class DatanodeID implements Compa
return ipcPort;
}
+ @Override
public boolean equals(Object to) {
if (this == to) {
return true;
@@ -161,10 +162,12 @@ public class DatanodeID implements Compa
storageID.equals(((DatanodeID)to).getStorageID()));
}
+ @Override
public int hashCode() {
return getXferAddr().hashCode()^ storageID.hashCode();
}
+ @Override
public String toString() {
return getXferAddr();
}
@@ -187,6 +190,7 @@ public class DatanodeID implements Compa
* @param that
* @return as specified by Comparable
*/
+ @Override
public int compareTo(DatanodeID that) {
return getXferAddr().compareTo(that.getXferAddr());
}
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java Fri Aug 3 19:00:15 2012
@@ -56,6 +56,7 @@ public class DatanodeInfo extends Datano
this.value = v;
}
+ @Override
public String toString() {
return value;
}
@@ -126,6 +127,7 @@ public class DatanodeInfo extends Datano
}
/** Network location name */
+ @Override
public String getName() {
return getXferAddr();
}
@@ -200,9 +202,11 @@ public class DatanodeInfo extends Datano
}
/** network location */
+ @Override
public synchronized String getNetworkLocation() {return location;}
/** Sets the network location */
+ @Override
public synchronized void setNetworkLocation(String location) {
this.location = NodeBase.normalize(location);
}
@@ -334,13 +338,17 @@ public class DatanodeInfo extends Datano
private transient Node parent; //its parent
/** Return this node's parent */
+ @Override
public Node getParent() { return parent; }
+ @Override
public void setParent(Node parent) {this.parent = parent;}
/** Return this node's level in the tree.
* E.g. the root of a tree returns 0 and its children return 1
*/
+ @Override
public int getLevel() { return level; }
+ @Override
public void setLevel(int level) {this.level = level;}
@Override
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java Fri Aug 3 19:00:15 2012
@@ -113,6 +113,7 @@ public class LocatedBlocks {
Comparator<LocatedBlock> comp =
new Comparator<LocatedBlock>() {
// Returns 0 iff a is inside b or b is inside a
+ @Override
public int compare(LocatedBlock a, LocatedBlock b) {
long aBeg = a.getStartOffset();
long bBeg = b.getStartOffset();
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/NSQuotaExceededException.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/NSQuotaExceededException.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/NSQuotaExceededException.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/NSQuotaExceededException.java Fri Aug 3 19:00:15 2012
@@ -36,6 +36,7 @@ public final class NSQuotaExceededExcept
super(quota, count);
}
+ @Override
public String getMessage() {
String msg = super.getMessage();
if (msg == null) {
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/QuotaExceededException.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/QuotaExceededException.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/QuotaExceededException.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/QuotaExceededException.java Fri Aug 3 19:00:15 2012
@@ -58,6 +58,7 @@ public class QuotaExceededException exte
this.pathName = path;
}
+ @Override
public String getMessage() {
return super.getMessage();
}
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java Fri Aug 3 19:00:15 2012
@@ -131,6 +131,7 @@ public class ClientNamenodeProtocolTrans
rpcProxy = proxy;
}
+ @Override
public void close() {
RPC.stopProxy(rpcProxy);
}
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolTranslatorPB.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolTranslatorPB.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolTranslatorPB.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolTranslatorPB.java Fri Aug 3 19:00:15 2012
@@ -82,6 +82,7 @@ public class NamenodeProtocolTranslatorP
this.rpcProxy = rpcProxy;
}
+ @Override
public void close() {
RPC.stopProxy(rpcProxy);
}
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenIdentifier.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenIdentifier.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenIdentifier.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenIdentifier.java Fri Aug 3 19:00:15 2012
@@ -119,6 +119,7 @@ public class BlockTokenIdentifier extend
}
/** {@inheritDoc} */
+ @Override
public boolean equals(Object obj) {
if (obj == this) {
return true;
@@ -135,12 +136,14 @@ public class BlockTokenIdentifier extend
}
/** {@inheritDoc} */
+ @Override
public int hashCode() {
return (int) expiryDate ^ keyId ^ (int) blockId ^ modes.hashCode()
^ (userId == null ? 0 : userId.hashCode())
^ (blockPoolId == null ? 0 : blockPoolId.hashCode());
}
+ @Override
public void readFields(DataInput in) throws IOException {
this.cache = null;
expiryDate = WritableUtils.readVLong(in);
@@ -155,6 +158,7 @@ public class BlockTokenIdentifier extend
}
}
+ @Override
public void write(DataOutput out) throws IOException {
WritableUtils.writeVLong(out, expiryDate);
WritableUtils.writeVInt(out, keyId);
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java Fri Aug 3 19:00:15 2012
@@ -36,6 +36,7 @@ import org.apache.hadoop.io.WritableUtil
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.SecretManager;
import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.util.Time;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
@@ -116,7 +117,7 @@ public class BlockTokenSecretManager ext
}
@VisibleForTesting
- public void setSerialNo(int serialNo) {
+ public synchronized void setSerialNo(int serialNo) {
this.serialNo = (serialNo & LOW_MASK) | (nnIndex << 31);
}
@@ -137,10 +138,10 @@ public class BlockTokenSecretManager ext
* more.
*/
setSerialNo(serialNo + 1);
- currentKey = new BlockKey(serialNo, System.currentTimeMillis() + 2
+ currentKey = new BlockKey(serialNo, Time.now() + 2
* keyUpdateInterval + tokenLifetime, generateSecret());
setSerialNo(serialNo + 1);
- nextKey = new BlockKey(serialNo, System.currentTimeMillis() + 3
+ nextKey = new BlockKey(serialNo, Time.now() + 3
* keyUpdateInterval + tokenLifetime, generateSecret());
allKeys.put(currentKey.getKeyId(), currentKey);
allKeys.put(nextKey.getKeyId(), nextKey);
@@ -157,7 +158,7 @@ public class BlockTokenSecretManager ext
}
private synchronized void removeExpiredKeys() {
- long now = System.currentTimeMillis();
+ long now = Time.now();
for (Iterator<Map.Entry<Integer, BlockKey>> it = allKeys.entrySet()
.iterator(); it.hasNext();) {
Map.Entry<Integer, BlockKey> e = it.next();
@@ -189,7 +190,7 @@ public class BlockTokenSecretManager ext
* Update block keys if update time > update interval.
* @return true if the keys are updated.
*/
- public boolean updateKeys(final long updateTime) throws IOException {
+ public synchronized boolean updateKeys(final long updateTime) throws IOException {
if (updateTime > keyUpdateInterval) {
return updateKeys();
}
@@ -207,15 +208,15 @@ public class BlockTokenSecretManager ext
removeExpiredKeys();
// set final expiry date of retiring currentKey
allKeys.put(currentKey.getKeyId(), new BlockKey(currentKey.getKeyId(),
- System.currentTimeMillis() + keyUpdateInterval + tokenLifetime,
+ Time.now() + keyUpdateInterval + tokenLifetime,
currentKey.getKey()));
// update the estimated expiry date of new currentKey
- currentKey = new BlockKey(nextKey.getKeyId(), System.currentTimeMillis()
+ currentKey = new BlockKey(nextKey.getKeyId(), Time.now()
+ 2 * keyUpdateInterval + tokenLifetime, nextKey.getKey());
allKeys.put(currentKey.getKeyId(), currentKey);
// generate a new nextKey
setSerialNo(serialNo + 1);
- nextKey = new BlockKey(serialNo, System.currentTimeMillis() + 3
+ nextKey = new BlockKey(serialNo, Time.now() + 3
* keyUpdateInterval + tokenLifetime, generateSecret());
allKeys.put(nextKey.getKeyId(), nextKey);
return true;
@@ -290,7 +291,7 @@ public class BlockTokenSecretManager ext
}
private static boolean isExpired(long expiryDate) {
- return System.currentTimeMillis() > expiryDate;
+ return Time.now() > expiryDate;
}
/**
@@ -335,7 +336,7 @@ public class BlockTokenSecretManager ext
}
if (key == null)
throw new IllegalStateException("currentKey hasn't been initialized.");
- identifier.setExpiryDate(System.currentTimeMillis() + tokenLifetime);
+ identifier.setExpiryDate(Time.now() + tokenLifetime);
identifier.setKeyId(key.getKeyId());
if (LOG.isDebugEnabled()) {
LOG.debug("Generating block token for " + identifier.toString());
@@ -371,7 +372,7 @@ public class BlockTokenSecretManager ext
}
@VisibleForTesting
- public void setKeyUpdateIntervalForTesting(long millis) {
+ public synchronized void setKeyUpdateIntervalForTesting(long millis) {
this.keyUpdateInterval = millis;
}
@@ -381,7 +382,7 @@ public class BlockTokenSecretManager ext
}
@VisibleForTesting
- public int getSerialNoForTesting() {
+ public synchronized int getSerialNoForTesting() {
return serialNo;
}
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSelector.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSelector.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSelector.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSelector.java Fri Aug 3 19:00:15 2012
@@ -31,6 +31,7 @@ import org.apache.hadoop.security.token.
@InterfaceAudience.Private
public class BlockTokenSelector implements TokenSelector<BlockTokenIdentifier> {
+ @Override
@SuppressWarnings("unchecked")
public Token<BlockTokenIdentifier> selectToken(Text service,
Collection<Token<? extends TokenIdentifier>> tokens) {
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/ExportedBlockKeys.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/ExportedBlockKeys.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/ExportedBlockKeys.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/ExportedBlockKeys.java Fri Aug 3 19:00:15 2012
@@ -78,6 +78,7 @@ public class ExportedBlockKeys implement
static { // register a ctor
WritableFactories.setFactory(ExportedBlockKeys.class,
new WritableFactory() {
+ @Override
public Writable newInstance() {
return new ExportedBlockKeys();
}
@@ -86,6 +87,7 @@ public class ExportedBlockKeys implement
/**
*/
+ @Override
public void write(DataOutput out) throws IOException {
out.writeBoolean(isBlockTokenEnabled);
out.writeLong(keyUpdateInterval);
@@ -99,6 +101,7 @@ public class ExportedBlockKeys implement
/**
*/
+ @Override
public void readFields(DataInput in) throws IOException {
isBlockTokenEnabled = in.readBoolean();
keyUpdateInterval = in.readLong();