You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by sz...@apache.org on 2012/11/18 23:31:54 UTC
svn commit: r1411007 [1/2] - in
/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project:
hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/
hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/ hadoop-hdfs/
hadoop-hdfs/src/contrib/bkjour...
Author: szetszwo
Date: Sun Nov 18 22:31:28 2012
New Revision: 1411007
URL: http://svn.apache.org/viewvc?rev=1411007&view=rev
Log:
Merge r1408927 through r1410997 from trunk
Modified:
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/ (props changed)
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/UserProvider.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestUserProvider.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperEditLogInputStream.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-dfs.sh
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-dfs.sh
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/ (props changed)
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/GetJournalEditServlet.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryWithQuota.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameCache.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/StringParam.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/UserParam.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/ (props changed)
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_context_handle.h
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_dfs.c
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_mkdir.c
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_rename.c
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_rmdir.c
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_unlink.c
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_init.c
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/ (props changed)
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/ (props changed)
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/ (props changed)
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/ (props changed)
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFetchImage.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSeekBug.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionFunctional.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameCache.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
Propchange: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:r1408927-1410997
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/UserProvider.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/UserProvider.java?rev=1411007&r1=1411006&r2=1411007&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/UserProvider.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/UserProvider.java Sun Nov 18 22:31:28 2012
@@ -31,6 +31,7 @@ import javax.ws.rs.core.Context;
import javax.ws.rs.ext.Provider;
import java.lang.reflect.Type;
import java.security.Principal;
+import java.text.MessageFormat;
import java.util.regex.Pattern;
@Provider
@@ -40,13 +41,26 @@ public class UserProvider extends Abstra
public static final String USER_NAME_PARAM = "user.name";
- public static final Pattern USER_PATTERN = Pattern.compile("[_a-zA-Z0-9]+");
+ public static final Pattern USER_PATTERN = Pattern.compile("^[A-Za-z_][A-Za-z0-9._-]*[$]?$");
- private static class UserParam extends StringParam {
+ static class UserParam extends StringParam {
public UserParam(String user) {
super(USER_NAME_PARAM, user, USER_PATTERN);
}
+
+ @Override
+ public String parseParam(String str) {
+ if (str != null) {
+ int len = str.length();
+ if (len < 1 || len > 31) {
+ throw new IllegalArgumentException(MessageFormat.format(
+ "Parameter [{0}], invalid value [{1}], it's length must be between 1 and 31",
+ getName(), str));
+ }
+ }
+ return super.parseParam(str);
+ }
}
@Override
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestUserProvider.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestUserProvider.java?rev=1411007&r1=1411006&r2=1411007&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestUserProvider.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestUserProvider.java Sun Nov 18 22:31:28 2012
@@ -19,13 +19,18 @@
package org.apache.hadoop.lib.wsrs;
import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import java.security.Principal;
import javax.ws.rs.core.MultivaluedMap;
+import org.apache.hadoop.test.TestException;
+import org.apache.hadoop.test.TestExceptionHelper;
+import org.junit.Rule;
import org.junit.Test;
+import org.junit.rules.MethodRule;
import org.mockito.Mockito;
import org.slf4j.MDC;
@@ -35,6 +40,9 @@ import com.sun.jersey.core.spi.component
public class TestUserProvider {
+ @Rule
+ public MethodRule exceptionHelper = new TestExceptionHelper();
+
@Test
@SuppressWarnings("unchecked")
public void noUser() {
@@ -92,4 +100,51 @@ public class TestUserProvider {
assertEquals(up.getInjectable(null, null, Principal.class), up);
assertNull(up.getInjectable(null, null, String.class));
}
+
+ @Test
+ @TestException(exception = IllegalArgumentException.class)
+ public void userNameEmpty() {
+ UserProvider.UserParam userParam = new UserProvider.UserParam("username");
+ userParam.parseParam("");
+ }
+
+ @Test
+ @TestException(exception = IllegalArgumentException.class)
+ public void userNameTooLong() {
+ UserProvider.UserParam userParam = new UserProvider.UserParam("username");
+ userParam.parseParam("a123456789012345678901234567890x");
+ }
+
+ @Test
+ @TestException(exception = IllegalArgumentException.class)
+ public void userNameInvalidStart() {
+ UserProvider.UserParam userParam = new UserProvider.UserParam("username");
+ userParam.parseParam("1x");
+ }
+
+ @Test
+ @TestException(exception = IllegalArgumentException.class)
+ public void userNameInvalidDollarSign() {
+ UserProvider.UserParam userParam = new UserProvider.UserParam("username");
+ userParam.parseParam("1$x");
+ }
+
+ @Test
+ public void userNameMinLength() {
+ UserProvider.UserParam userParam = new UserProvider.UserParam("username");
+ assertNotNull(userParam.parseParam("a"));
+ }
+
+ @Test
+ public void userNameMaxLength() {
+ UserProvider.UserParam userParam = new UserProvider.UserParam("username");
+ assertNotNull(userParam.parseParam("a123456789012345678901234567890"));
+ }
+
+ @Test
+ public void userNameValidDollarSign() {
+ UserProvider.UserParam userParam = new UserProvider.UserParam("username");
+ assertNotNull(userParam.parseParam("a$"));
+ }
+
}
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1411007&r1=1411006&r2=1411007&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Sun Nov 18 22:31:28 2012
@@ -159,6 +159,11 @@ Trunk (Unreleased)
HDFS-4153. Add START_MSG/SHUTDOWN_MSG for JournalNode. (liang xie via atm)
+ HDFS-3935. Add JournalNode to the start/stop scripts (Andy Isaacson via todd)
+
+ HDFS-4206. Change the fields in INode and its subclasses to private.
+ (szetszwo)
+
OPTIMIZATIONS
BUG FIXES
@@ -245,12 +250,12 @@ Trunk (Unreleased)
HDFS-4115. TestHDFSCLI.testAll fails one test due to number format.
(Trevor Robinson via suresh)
- HDFS-4106. BPServiceActor#lastHeartbeat, lastBlockReport and
- lastDeletedReport should be volatile. (Jing Zhao via suresh)
-
HDFS-4165. Faulty sanity check in FsDirectory.unprotectedSetQuota.
(Binglin Chang via suresh)
+ HDFS-4105. The SPNEGO user for secondary namenode should use the web
+ keytab. (Arpit Gupta via jitendra)
+
BREAKDOWN OF HDFS-3077 SUBTASKS
HDFS-3077. Quorum-based protocol for reading and writing edit logs.
@@ -467,6 +472,9 @@ Release 2.0.3-alpha - Unreleased
HDFS-1322. Document umask in DistributedFileSystem#mkdirs javadocs.
(Colin Patrick McCabe via eli)
+ HDFS-4038. Override toString() for BookKeeperEditLogInputStream.
+ (Vinay via umamahesh)
+
OPTIMIZATIONS
BUG FIXES
@@ -586,6 +594,29 @@ Release 2.0.3-alpha - Unreleased
HDFS-3921. NN will prematurely consider blocks missing when entering active
state while still in safe mode. (atm)
+ HDFS-4106. BPServiceActor#lastHeartbeat, lastBlockReport and
+ lastDeletedReport should be volatile. (Jing Zhao via suresh)
+
+ HDFS-4139. fuse-dfs RO mode still allows file truncation.
+ (Colin Patrick McCabe via eli)
+
+ HDFS-4104. dfs -test -d prints inappropriate error on nonexistent directory
+ (Andy Isaacson via daryn)
+
+ HDFS-3623. BKJM: zkLatchWaitTimeout hard coded to 6000. Make use of ZKSessionTimeout instead.
+ (umamahesh)
+
+ HDFS-4100. Fix all findbug security warings. (Liang Xie via eli)
+
+ HDFS-3507. DFS#isInSafeMode needs to execute only on Active NameNode.
+ (Vinay via atm)
+
+ HDFS-4156. Seeking to a negative position should throw an IOE.
+ (Eli Reisman via eli)
+
+ HDFS-4171. WebHDFS and HttpFs should accept only valid Unix user
+ names. (tucu)
+
Release 2.0.2-alpha - 2012-09-07
INCOMPATIBLE CHANGES
@@ -1963,6 +1994,18 @@ Release 2.0.0-alpha - 05-23-2012
HDFS-3039. Address findbugs and javadoc warnings on branch. (todd via atm)
+Release 0.23.6 - UNRELEASED
+
+ INCOMPATIBLE CHANGES
+
+ NEW FEATURES
+
+ IMPROVEMENTS
+
+ OPTIMIZATIONS
+
+ BUG FIXES
+
Release 0.23.5 - UNRELEASED
INCOMPATIBLE CHANGES
@@ -2000,7 +2043,12 @@ Release 0.23.5 - UNRELEASED
HDFS-4172. namenode does not URI-encode parameters when building URI for
datanode request (Derek Dagit via bobby)
-Release 0.23.4 - UNRELEASED
+ HDFS-4182. SecondaryNameNode leaks NameCache entries (bobby)
+
+ HDFS-4186. logSync() is called with the write lock held while releasing
+ lease (Kihwal Lee via daryn)
+
+Release 0.23.4
INCOMPATIBLE CHANGES
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperEditLogInputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperEditLogInputStream.java?rev=1411007&r1=1411006&r2=1411007&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperEditLogInputStream.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperEditLogInputStream.java Sun Nov 18 22:31:28 2012
@@ -129,8 +129,9 @@ class BookKeeperEditLogInputStream exten
@Override
public String getName() {
- return String.format("BookKeeper[%s,first=%d,last=%d]",
- lh.toString(), firstTxId, lastTxId);
+ return String.format(
+ "BookKeeperLedger[ledgerId=%d,firstTxId=%d,lastTxId=%d]", lh.getId(),
+ firstTxId, lastTxId);
}
@Override
@@ -157,6 +158,11 @@ class BookKeeperEditLogInputStream exten
}
}
+ @Override
+ public String toString() {
+ return ("BookKeeperEditLogInputStream {" + this.getName() + "}");
+ }
+
/**
* Input stream implementation which can be used by
* FSEditLogOp.Reader
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java?rev=1411007&r1=1411006&r2=1411007&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java Sun Nov 18 22:31:28 2012
@@ -180,9 +180,16 @@ public class BookKeeperJournalManager im
try {
zkConnectLatch = new CountDownLatch(1);
- zkc = new ZooKeeper(zkConnect, conf.getInt(BKJM_ZK_SESSION_TIMEOUT,
- BKJM_ZK_SESSION_TIMEOUT_DEFAULT), new ZkConnectionWatcher());
- if (!zkConnectLatch.await(6000, TimeUnit.MILLISECONDS)) {
+ int bkjmZKSessionTimeout = conf.getInt(BKJM_ZK_SESSION_TIMEOUT,
+ BKJM_ZK_SESSION_TIMEOUT_DEFAULT);
+ zkc = new ZooKeeper(zkConnect, bkjmZKSessionTimeout,
+ new ZkConnectionWatcher());
+ // Configured zk session timeout + some extra grace period (here
+ // BKJM_ZK_SESSION_TIMEOUT_DEFAULT used as grace period)
+ int zkConnectionLatchTimeout = bkjmZKSessionTimeout
+ + BKJM_ZK_SESSION_TIMEOUT_DEFAULT;
+ if (!zkConnectLatch
+ .await(zkConnectionLatchTimeout, TimeUnit.MILLISECONDS)) {
throw new IOException("Error connecting to zookeeper");
}
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-dfs.sh
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-dfs.sh?rev=1411007&r1=1411006&r2=1411007&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-dfs.sh (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-dfs.sh Sun Nov 18 22:31:28 2012
@@ -86,6 +86,21 @@ if [ -n "$SECONDARY_NAMENODES" ]; then
fi
#---------------------------------------------------------
+# quorumjournal nodes (if any)
+
+SHARED_EDITS_DIR=$($HADOOP_PREFIX/bin/hdfs getconf -confKey dfs.namenode.shared.edits.dir 2>&-)
+
+case "$SHARED_EDITS_DIR" in
+qjournal://*)
+ JOURNAL_NODES=$(echo "$SHARED_EDITS_DIR" | sed 's,qjournal://\([^/]*\)/.*,\1,g; s/;/ /g; s/:[0-9]*//g')
+ echo "Starting journal nodes [$JOURNAL_NODES]"
+ "$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
+ --config "$HADOOP_CONF_DIR" \
+ --hostnames "$JOURNAL_NODES" \
+ --script "$bin/hdfs" start journalnode ;;
+esac
+
+#---------------------------------------------------------
# ZK Failover controllers, if auto-HA is enabled
AUTOHA_ENABLED=$($HADOOP_PREFIX/bin/hdfs getconf -confKey dfs.ha.automatic-failover.enabled)
if [ "$(echo "$AUTOHA_ENABLED" | tr A-Z a-z)" = "true" ]; then
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-dfs.sh
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-dfs.sh?rev=1411007&r1=1411006&r2=1411007&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-dfs.sh (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-dfs.sh Sun Nov 18 22:31:28 2012
@@ -62,6 +62,21 @@ if [ -n "$SECONDARY_NAMENODES" ]; then
fi
#---------------------------------------------------------
+# quorumjournal nodes (if any)
+
+SHARED_EDITS_DIR=$($HADOOP_PREFIX/bin/hdfs getconf -confKey dfs.namenode.shared.edits.dir 2>&-)
+
+case "$SHARED_EDITS_DIR" in
+qjournal://*)
+ JOURNAL_NODES=$(echo "$SHARED_EDITS_DIR" | sed 's,qjournal://\([^/]*\)/.*,\1,g; s/;/ /g; s/:[0-9]*//g')
+ echo "Stopping journal nodes [$JOURNAL_NODES]"
+ "$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
+ --config "$HADOOP_CONF_DIR" \
+ --hostnames "$JOURNAL_NODES" \
+ --script "$bin/hdfs" stop journalnode ;;
+esac
+
+#---------------------------------------------------------
# ZK Failover controllers, if auto-HA is enabled
AUTOHA_ENABLED=$($HADOOP_PREFIX/bin/hdfs getconf -confKey dfs.ha.automatic-failover.enabled)
if [ "$(echo "$AUTOHA_ENABLED" | tr A-Z a-z)" = "true" ]; then
Propchange: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:r1408927-1410997
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java?rev=1411007&r1=1411006&r2=1411007&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java Sun Nov 18 22:31:28 2012
@@ -1883,10 +1883,25 @@ public class DFSClient implements java.i
/**
* Enter, leave or get safe mode.
*
- * @see ClientProtocol#setSafeMode(HdfsConstants.SafeModeAction)
+ * @see ClientProtocol#setSafeMode(HdfsConstants.SafeModeAction,boolean)
*/
public boolean setSafeMode(SafeModeAction action) throws IOException {
- return namenode.setSafeMode(action);
+ return setSafeMode(action, false);
+ }
+
+ /**
+ * Enter, leave or get safe mode.
+ *
+ * @param action
+ * One of SafeModeAction.GET, SafeModeAction.ENTER and
+ * SafeModeActiob.LEAVE
+ * @param isChecked
+ * If true, then check only active namenode's safemode status, else
+ * check first namenode's status.
+ * @see ClientProtocol#setSafeMode(HdfsConstants.SafeModeActio,boolean)
+ */
+ public boolean setSafeMode(SafeModeAction action, boolean isChecked) throws IOException{
+ return namenode.setSafeMode(action, isChecked);
}
/**
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java?rev=1411007&r1=1411006&r2=1411007&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java Sun Nov 18 22:31:28 2012
@@ -1076,6 +1076,9 @@ public class DFSInputStream extends FSIn
if (targetPos > getFileLength()) {
throw new IOException("Cannot seek after EOF");
}
+ if (targetPos < 0) {
+ throw new IOException("Cannot seek to negative offset");
+ }
if (closed) {
throw new IOException("Stream is closed!");
}
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java?rev=1411007&r1=1411006&r2=1411007&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java Sun Nov 18 22:31:28 2012
@@ -627,11 +627,27 @@ public class DistributedFileSystem exten
* Enter, leave or get safe mode.
*
* @see org.apache.hadoop.hdfs.protocol.ClientProtocol#setSafeMode(
- * HdfsConstants.SafeModeAction)
+ * HdfsConstants.SafeModeAction,boolean)
*/
public boolean setSafeMode(HdfsConstants.SafeModeAction action)
throws IOException {
- return dfs.setSafeMode(action);
+ return setSafeMode(action, false);
+ }
+
+ /**
+ * Enter, leave or get safe mode.
+ *
+ * @param action
+ * One of SafeModeAction.ENTER, SafeModeAction.LEAVE and
+ * SafeModeAction.GET
+ * @param isChecked
+ * If true check only for Active NNs status, else check first NN's
+ * status
+ * @see org.apache.hadoop.hdfs.protocol.ClientProtocol#setSafeMode(SafeModeAction, boolean)
+ */
+ public boolean setSafeMode(HdfsConstants.SafeModeAction action,
+ boolean isChecked) throws IOException {
+ return dfs.setSafeMode(action, isChecked);
}
/**
@@ -878,13 +894,15 @@ public class DistributedFileSystem exten
}
/**
- * Utility function that returns if the NameNode is in safemode or not.
- *
+ * Utility function that returns if the NameNode is in safemode or not. In HA
+ * mode, this API will return only ActiveNN's safemode status.
+ *
* @return true if NameNode is in safemode, false otherwise.
- * @throws IOException when there is an issue communicating with the NameNode
+ * @throws IOException
+ * when there is an issue communicating with the NameNode
*/
public boolean isInSafeMode() throws IOException {
- return setSafeMode(SafeModeAction.SAFEMODE_GET);
+ return setSafeMode(SafeModeAction.SAFEMODE_GET, true);
}
/**
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java?rev=1411007&r1=1411006&r2=1411007&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java Sun Nov 18 22:31:28 2012
@@ -621,7 +621,7 @@ public interface ClientProtocol {
* <p>
* Safe mode is entered automatically at name node startup.
* Safe mode can also be entered manually using
- * {@link #setSafeMode(HdfsConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_GET)}.
+ * {@link #setSafeMode(HdfsConstants.SafeModeAction,boolean) setSafeMode(SafeModeAction.SAFEMODE_ENTER,false)}.
* <p>
* At startup the name node accepts data node reports collecting
* information about block locations.
@@ -637,11 +637,11 @@ public interface ClientProtocol {
* Then the name node leaves safe mode.
* <p>
* If safe mode is turned on manually using
- * {@link #setSafeMode(HdfsConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_ENTER)}
+ * {@link #setSafeMode(HdfsConstants.SafeModeAction,boolean) setSafeMode(SafeModeAction.SAFEMODE_ENTER,false)}
* then the name node stays in safe mode until it is manually turned off
- * using {@link #setSafeMode(HdfsConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_LEAVE)}.
+ * using {@link #setSafeMode(HdfsConstants.SafeModeAction,boolean) setSafeMode(SafeModeAction.SAFEMODE_LEAVE,false)}.
* Current state of the name node can be verified using
- * {@link #setSafeMode(HdfsConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_GET)}
+ * {@link #setSafeMode(HdfsConstants.SafeModeAction,boolean) setSafeMode(SafeModeAction.SAFEMODE_GET,false)}
* <h4>Configuration parameters:</h4>
* <tt>dfs.safemode.threshold.pct</tt> is the threshold parameter.<br>
* <tt>dfs.safemode.extension</tt> is the safe mode extension parameter.<br>
@@ -659,12 +659,15 @@ public interface ClientProtocol {
* @param action <ul> <li>0 leave safe mode;</li>
* <li>1 enter safe mode;</li>
* <li>2 get safe mode state.</li></ul>
+ * @param isChecked If true then action will be done only in ActiveNN.
+ *
* @return <ul><li>0 if the safe mode is OFF or</li>
* <li>1 if the safe mode is ON.</li></ul>
*
* @throws IOException
*/
- public boolean setSafeMode(HdfsConstants.SafeModeAction action)
+ @Idempotent
+ public boolean setSafeMode(HdfsConstants.SafeModeAction action, boolean isChecked)
throws IOException;
/**
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java?rev=1411007&r1=1411006&r2=1411007&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java Sun Nov 18 22:31:28 2012
@@ -535,7 +535,8 @@ public class ClientNamenodeProtocolServe
public SetSafeModeResponseProto setSafeMode(RpcController controller,
SetSafeModeRequestProto req) throws ServiceException {
try {
- boolean result = server.setSafeMode(PBHelper.convert(req.getAction()));
+ boolean result = server.setSafeMode(PBHelper.convert(req.getAction()),
+ req.getChecked());
return SetSafeModeResponseProto.newBuilder().setResult(result).build();
} catch (IOException e) {
throw new ServiceException(e);
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java?rev=1411007&r1=1411006&r2=1411007&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java Sun Nov 18 22:31:28 2012
@@ -511,9 +511,9 @@ public class ClientNamenodeProtocolTrans
}
@Override
- public boolean setSafeMode(SafeModeAction action) throws IOException {
- SetSafeModeRequestProto req = SetSafeModeRequestProto.newBuilder().
- setAction(PBHelper.convert(action)).build();
+ public boolean setSafeMode(SafeModeAction action, boolean isChecked) throws IOException {
+ SetSafeModeRequestProto req = SetSafeModeRequestProto.newBuilder()
+ .setAction(PBHelper.convert(action)).setChecked(isChecked).build();
try {
return rpcProxy.setSafeMode(null, req).getResult();
} catch (ServiceException e) {
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/GetJournalEditServlet.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/GetJournalEditServlet.java?rev=1411007&r1=1411006&r2=1411007&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/GetJournalEditServlet.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/GetJournalEditServlet.java Sun Nov 18 22:31:28 2012
@@ -31,6 +31,7 @@ import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
+import org.apache.commons.lang.StringEscapeUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
@@ -139,8 +140,9 @@ public class GetJournalEditServlet exten
HttpServletRequest request, HttpServletResponse response)
throws IOException {
String myStorageInfoString = storage.toColonSeparatedString();
- String theirStorageInfoString = request.getParameter(STORAGEINFO_PARAM);
-
+ String theirStorageInfoString = StringEscapeUtils.escapeHtml(
+ request.getParameter(STORAGEINFO_PARAM));
+
if (theirStorageInfoString != null
&& !myStorageInfoString.equals(theirStorageInfoString)) {
String msg = "This node has storage info '" + myStorageInfoString
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java?rev=1411007&r1=1411006&r2=1411007&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java Sun Nov 18 22:31:28 2012
@@ -259,7 +259,8 @@ public class DatanodeJspHelper {
int namenodeInfoPort = -1;
if (namenodeInfoPortStr != null)
namenodeInfoPort = Integer.parseInt(namenodeInfoPortStr);
- final String nnAddr = req.getParameter(JspHelper.NAMENODE_ADDRESS);
+ final String nnAddr = StringEscapeUtils.escapeHtml(
+ req.getParameter(JspHelper.NAMENODE_ADDRESS));
if (nnAddr == null){
out.print(JspHelper.NAMENODE_ADDRESS + " url param is null");
return;
@@ -637,7 +638,7 @@ public class DatanodeJspHelper {
UserGroupInformation ugi = JspHelper.getUGI(req, conf);
String namenodeInfoPortStr = req.getParameter("namenodeInfoPort");
- String nnAddr = req.getParameter(JspHelper.NAMENODE_ADDRESS);
+ String nnAddr = StringEscapeUtils.escapeHtml(req.getParameter(JspHelper.NAMENODE_ADDRESS));
int namenodeInfoPort = -1;
if (namenodeInfoPortStr != null)
namenodeInfoPort = Integer.parseInt(namenodeInfoPortStr);
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java?rev=1411007&r1=1411006&r2=1411007&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java Sun Nov 18 22:31:28 2012
@@ -64,6 +64,7 @@ import org.apache.hadoop.hdfs.server.nam
import org.apache.hadoop.hdfs.util.ByteArray;
import org.apache.hadoop.hdfs.util.ReadOnlyList;
+import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
/*************************************************
@@ -126,6 +127,12 @@ public class FSDirectory implements Clos
this.cond = dirLock.writeLock().newCondition();
this.namesystem = ns;
+ int threshold = conf.getInt(
+ DFSConfigKeys.DFS_NAMENODE_NAME_CACHE_THRESHOLD_KEY,
+ DFSConfigKeys.DFS_NAMENODE_NAME_CACHE_THRESHOLD_DEFAULT);
+ NameNode.LOG.info("Caching file names occuring more than " + threshold
+ + " times");
+ this.nameCache = new NameCache<ByteArray>(threshold);
reset();
this.fsImage = fsImage;
@@ -141,13 +148,6 @@ public class FSDirectory implements Clos
this.maxDirItems = conf.getInt(
DFSConfigKeys.DFS_NAMENODE_MAX_DIRECTORY_ITEMS_KEY,
DFSConfigKeys.DFS_NAMENODE_MAX_DIRECTORY_ITEMS_DEFAULT);
-
- int threshold = conf.getInt(
- DFSConfigKeys.DFS_NAMENODE_NAME_CACHE_THRESHOLD_KEY,
- DFSConfigKeys.DFS_NAMENODE_NAME_CACHE_THRESHOLD_DEFAULT);
- NameNode.LOG.info("Caching file names occuring more than " + threshold
- + " times");
- nameCache = new NameCache<ByteArray>(threshold);
}
private FSNamesystem getFSNamesystem() {
@@ -178,6 +178,12 @@ public class FSDirectory implements Clos
writeUnlock();
}
}
+
+ //This is for testing purposes only
+ @VisibleForTesting
+ boolean isReady() {
+ return ready;
+ }
// exposed for unit tests
protected void setReady(boolean flag) {
@@ -303,14 +309,14 @@ public class FSDirectory implements Clos
return newNode;
}
- INodeDirectory addToParent(byte[] src, INodeDirectory parentINode,
+ INodeDirectory addToParent(INodeDirectory parentINode,
INode newNode, boolean propagateModTime) {
// NOTE: This does not update space counts for parents
INodeDirectory newParent = null;
writeLock();
try {
try {
- newParent = rootDir.addToParent(src, newNode, parentINode,
+ newParent = rootDir.addToParent(newNode, parentINode,
propagateModTime);
cacheName(newNode);
} catch (FileNotFoundException e) {
@@ -539,7 +545,7 @@ public class FSDirectory implements Clos
return true;
}
if (srcInode.isSymlink() &&
- dst.equals(((INodeSymlink)srcInode).getLinkValue())) {
+ dst.equals(((INodeSymlink)srcInode).getSymlinkString())) {
throw new FileAlreadyExistsException(
"Cannot rename symlink "+src+" to its target "+dst);
}
@@ -667,7 +673,7 @@ public class FSDirectory implements Clos
"The source "+src+" and destination "+dst+" are the same");
}
if (srcInode.isSymlink() &&
- dst.equals(((INodeSymlink)srcInode).getLinkValue())) {
+ dst.equals(((INodeSymlink)srcInode).getSymlinkString())) {
throw new FileAlreadyExistsException(
"Cannot rename symlink "+src+" to its target "+dst);
}
@@ -1291,7 +1297,7 @@ public class FSDirectory implements Clos
HdfsFileStatus listing[] = new HdfsFileStatus[numOfListing];
for (int i=0; i<numOfListing; i++) {
INode cur = contents.get(startChild+i);
- listing[i] = createFileStatus(cur.name, cur, needLocation);
+ listing[i] = createFileStatus(cur.getLocalNameBytes(), cur, needLocation);
}
return new DirectoryListing(
listing, totalNumChildren-startChild-numOfListing);
@@ -1519,7 +1525,7 @@ public class FSDirectory implements Clos
for(int i=0; i < numOfINodes; i++) {
if (inodes[i].isQuotaSet()) { // a directory with quota
INodeDirectoryWithQuota node =(INodeDirectoryWithQuota)inodes[i];
- node.unprotectedUpdateNumItemsInTree(nsDelta, dsDelta);
+ node.addSpaceConsumed(nsDelta, dsDelta);
}
}
}
@@ -2142,11 +2148,18 @@ public class FSDirectory implements Clos
* Reset the entire namespace tree.
*/
void reset() {
- final INodeDirectoryWithQuota r = new INodeDirectoryWithQuota(
- INodeDirectory.ROOT_NAME,
- getFSNamesystem().createFsOwnerPermissions(new FsPermission((short)0755)),
- Long.MAX_VALUE, UNKNOWN_DISK_SPACE);
- rootDir = INodeDirectorySnapshottable.newInstance(r, 0);
+ writeLock();
+ try {
+ setReady(false);
+ final INodeDirectoryWithQuota r = new INodeDirectoryWithQuota(
+ INodeDirectory.ROOT_NAME,
+ getFSNamesystem().createFsOwnerPermissions(new FsPermission((short)0755)),
+ Long.MAX_VALUE, UNKNOWN_DISK_SPACE);
+ rootDir = INodeDirectorySnapshottable.newInstance(r, 0);
+ nameCache.reset();
+ } finally {
+ writeUnlock();
+ }
}
/**
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java?rev=1411007&r1=1411006&r2=1411007&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java Sun Nov 18 22:31:28 2012
@@ -257,7 +257,8 @@ class FSImageFormat {
INode newNode = loadINode(in); // read rest of inode
// add to parent
- namesystem.dir.addToParent(localName, parent, newNode, false);
+ newNode.setLocalName(localName);
+ namesystem.dir.addToParent(parent, newNode, false);
}
return numChildren;
}
@@ -291,8 +292,8 @@ class FSImageFormat {
}
// add new inode
- parentINode = fsDir.addToParent(pathComponents[pathComponents.length-1],
- parentINode, newNode, false);
+ newNode.setLocalName(pathComponents[pathComponents.length-1]);
+ parentINode = fsDir.addToParent(parentINode, newNode, false);
}
}
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java?rev=1411007&r1=1411006&r2=1411007&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java Sun Nov 18 22:31:28 2012
@@ -168,7 +168,7 @@ public class FSImageSerialization {
out.writeLong(0); // access time
out.writeLong(0); // preferred block size
out.writeInt(-2); // # of blocks
- Text.writeString(out, ((INodeSymlink)node).getLinkValue());
+ Text.writeString(out, ((INodeSymlink)node).getSymlinkString());
filePerm.fromShort(node.getFsPermissionShort());
PermissionStatus.write(out, node.getUserName(),
node.getGroupName(),
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1411007&r1=1411006&r2=1411007&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Sun Nov 18 22:31:28 2012
@@ -1731,16 +1731,25 @@ public class FSNamesystem implements Nam
short replication, long blockSize) throws AccessControlException,
SafeModeException, FileAlreadyExistsException, UnresolvedLinkException,
FileNotFoundException, ParentNotDirectoryException, IOException {
+ boolean skipSync = false;
writeLock();
try {
checkOperation(OperationCategory.WRITE);
startFileInternal(src, permissions, holder, clientMachine, flag,
createParent, replication, blockSize);
+ } catch (StandbyException se) {
+ skipSync = true;
+ throw se;
} finally {
writeUnlock();
- }
- getEditLog().logSync();
+ // There might be transactions logged while trying to recover the lease.
+ // They need to be sync'ed even when an exception was thrown.
+ if (!skipSync) {
+ getEditLog().logSync();
+ }
+ }
+
if (auditLog.isInfoEnabled() && isExternalInvocation()) {
final HdfsFileStatus stat = dir.getFileInfo(src, false);
logAuditEvent(UserGroupInformation.getCurrentUser(),
@@ -1922,6 +1931,7 @@ public class FSNamesystem implements Nam
*/
boolean recoverLease(String src, String holder, String clientMachine)
throws IOException {
+ boolean skipSync = false;
writeLock();
try {
checkOperation(OperationCategory.WRITE);
@@ -1943,8 +1953,16 @@ public class FSNamesystem implements Nam
}
recoverLeaseInternal(inode, src, holder, clientMachine, true);
+ } catch (StandbyException se) {
+ skipSync = true;
+ throw se;
} finally {
writeUnlock();
+ // There might be transactions logged while trying to recover the lease.
+ // They need to be sync'ed even when an exception was thrown.
+ if (!skipSync) {
+ getEditLog().logSync();
+ }
}
return false;
}
@@ -2047,6 +2065,7 @@ public class FSNamesystem implements Nam
throws AccessControlException, SafeModeException,
FileAlreadyExistsException, FileNotFoundException,
ParentNotDirectoryException, IOException {
+ boolean skipSync = false;
if (!supportAppends) {
throw new UnsupportedOperationException(
"Append is not enabled on this NameNode. Use the " +
@@ -2060,10 +2079,17 @@ public class FSNamesystem implements Nam
lb = startFileInternal(src, null, holder, clientMachine,
EnumSet.of(CreateFlag.APPEND),
false, blockManager.maxReplication, 0);
+ } catch (StandbyException se) {
+ skipSync = true;
+ throw se;
} finally {
writeUnlock();
+ // There might be transactions logged while trying to recover the lease.
+ // They need to be sync'ed even when an exception was thrown.
+ if (!skipSync) {
+ getEditLog().logSync();
+ }
}
- getEditLog().logSync();
if (lb != null) {
if (NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("DIR* NameSystem.appendFile: file "
@@ -3027,7 +3053,8 @@ public class FSNamesystem implements Nam
* RecoveryInProgressException if lease recovery is in progress.<br>
* IOException in case of an error.
* @return true if file has been successfully finalized and closed or
- * false if block recovery has been initiated
+ * false if block recovery has been initiated. Since the lease owner
+ * has been changed and logged, caller should call logSync().
*/
boolean internalReleaseLease(Lease lease, String src,
String recoveryLeaseHolder) throws AlreadyBeingCreatedException,
@@ -3148,6 +3175,7 @@ public class FSNamesystem implements Nam
assert hasWriteLock();
if(newHolder == null)
return lease;
+ // The following transaction is not synced. Make sure it's sync'ed later.
logReassignLease(lease.getHolder(), src, newHolder);
return reassignLeaseInternal(lease, src, newHolder, pendingFile);
}
@@ -5257,13 +5285,8 @@ public class FSNamesystem implements Nam
private void logReassignLease(String leaseHolder, String src,
String newHolder) {
- writeLock();
- try {
- getEditLog().logReassignLease(leaseHolder, src, newHolder);
- } finally {
- writeUnlock();
- }
- getEditLog().logSync();
+ assert hasWriteLock();
+ getEditLog().logReassignLease(leaseHolder, src, newHolder);
}
/**
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java?rev=1411007&r1=1411006&r2=1411007&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java Sun Nov 18 22:31:28 2012
@@ -49,23 +49,12 @@ public abstract class INode implements C
static final ReadOnlyList<INode> EMPTY_READ_ONLY_LIST
= ReadOnlyList.Util.emptyList();
- /**
- * The inode name is in java UTF8 encoding;
- * The name in HdfsFileStatus should keep the same encoding as this.
- * if this encoding is changed, implicitly getFileInfo and listStatus in
- * clientProtocol are changed; The decoding at the client
- * side should change accordingly.
- */
- protected byte[] name;
- protected INodeDirectory parent;
- protected long modificationTime;
- protected long accessTime;
- /** Simple wrapper for two counters :
- * nsCount (namespace consumed) and dsCount (diskspace consumed).
- */
+ /** Wrapper of two counters for namespace consumed and diskspace consumed. */
static class DirCounts {
+ /** namespace count */
long nsCount = 0;
+ /** diskspace count */
long dsCount = 0;
/** returns namespace count */
@@ -78,10 +67,6 @@ public abstract class INode implements C
}
}
- //Only updated by updatePermissionStatus(...).
- //Other codes should not modify it.
- private long permission;
-
private static enum PermissionStatusFormat {
MODE(0, 16),
GROUP(MODE.OFFSET + MODE.LENGTH, 25),
@@ -104,31 +89,67 @@ public abstract class INode implements C
long combine(long bits, long record) {
return (record & ~MASK) | (bits << OFFSET);
}
+
+ /** Set the {@link PermissionStatus} */
+ static long toLong(PermissionStatus ps) {
+ long permission = 0L;
+ final int user = SerialNumberManager.INSTANCE.getUserSerialNumber(
+ ps.getUserName());
+ permission = PermissionStatusFormat.USER.combine(user, permission);
+ final int group = SerialNumberManager.INSTANCE.getGroupSerialNumber(
+ ps.getGroupName());
+ permission = PermissionStatusFormat.GROUP.combine(group, permission);
+ final int mode = ps.getPermission().toShort();
+ permission = PermissionStatusFormat.MODE.combine(mode, permission);
+ return permission;
+ }
+ }
+
+ /**
+ * The inode name is in java UTF8 encoding;
+ * The name in HdfsFileStatus should keep the same encoding as this.
+ * if this encoding is changed, implicitly getFileInfo and listStatus in
+ * clientProtocol are changed; The decoding at the client
+ * side should change accordingly.
+ */
+ private byte[] name = null;
+ /**
+ * Permission encoded using PermissionStatusFormat.
+ * Codes other than {@link #updatePermissionStatus(PermissionStatusFormat, long)}.
+ * should not modify it.
+ */
+ private long permission = 0L;
+ protected INodeDirectory parent = null;
+ protected long modificationTime = 0L;
+ protected long accessTime = 0L;
+
+ private INode(byte[] name, long permission, INodeDirectory parent,
+ long modificationTime, long accessTime) {
+ this.name = name;
+ this.permission = permission;
+ this.parent = parent;
+ this.modificationTime = modificationTime;
+ this.accessTime = accessTime;
}
- INode(PermissionStatus permissions, long mTime, long atime) {
- this.name = null;
- this.parent = null;
- this.modificationTime = mTime;
- setAccessTime(atime);
- setPermissionStatus(permissions);
+ INode(byte[] name, PermissionStatus permissions, INodeDirectory parent,
+ long modificationTime, long accessTime) {
+ this(name, PermissionStatusFormat.toLong(permissions), parent,
+ modificationTime, accessTime);
+ }
+
+ INode(PermissionStatus permissions, long mtime, long atime) {
+ this(null, permissions, null, mtime, atime);
}
protected INode(String name, PermissionStatus permissions) {
- this(permissions, 0L, 0L);
- setLocalName(name);
+ this(DFSUtil.string2Bytes(name), permissions, null, 0L, 0L);
}
- /** copy constructor
- *
- * @param other Other node to be copied
- */
+ /** @param other Other node to be copied */
INode(INode other) {
- setLocalName(other.getLocalName());
- this.parent = other.getParent();
- setPermissionStatus(other.getPermissionStatus());
- setModificationTime(other.getModificationTime());
- setAccessTime(other.getAccessTime());
+ this(other.getLocalNameBytes(), other.permission, other.getParent(),
+ other.getModificationTime(), other.getAccessTime());
}
/**
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java?rev=1411007&r1=1411006&r2=1411007&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java Sun Nov 18 22:31:28 2012
@@ -68,9 +68,8 @@ public class INodeDirectory extends INod
}
/** constructor */
- INodeDirectory(byte[] localName, PermissionStatus permissions, long mTime) {
- this(permissions, mTime);
- this.name = localName;
+ INodeDirectory(byte[] name, PermissionStatus permissions, long mtime) {
+ super(name, permissions, null, mtime, 0L);
}
/** copy constructor
@@ -93,25 +92,30 @@ public class INodeDirectory extends INod
return false;
}
- INode removeChild(INode node) {
- assert children != null;
- int low = Collections.binarySearch(children, node.name);
- if (low >= 0) {
- return children.remove(low);
- } else {
- return null;
+ private void assertChildrenNonNull() {
+ if (children == null) {
+ throw new AssertionError("children is null: " + this);
}
}
+ private int searchChildren(INode inode) {
+ return Collections.binarySearch(children, inode.getLocalNameBytes());
+ }
+
+ INode removeChild(INode node) {
+ assertChildrenNonNull();
+ final int i = searchChildren(node);
+ return i >= 0? children.remove(i): null;
+ }
+
/** Replace a child that has the same name as newChild by newChild.
*
* @param newChild Child node to be added
*/
void replaceChild(INode newChild) {
- if ( children == null ) {
- throw new IllegalArgumentException("The directory is empty");
- }
- int low = Collections.binarySearch(children, newChild.name);
+ assertChildrenNonNull();
+
+ final int low = searchChildren(newChild);
if (low>=0) { // an old child exists so replace by the newChild
children.set(low, newChild);
} else {
@@ -248,7 +252,7 @@ public class INodeDirectory extends INod
final String remainder =
constructPath(components, count + 1, components.length);
final String link = DFSUtil.bytes2String(components[count]);
- final String target = ((INodeSymlink)curNode).getLinkValue();
+ final String target = ((INodeSymlink)curNode).getSymlinkString();
if (NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("UnresolvedPathException " +
" path: " + path + " preceding: " + preceding +
@@ -360,7 +364,7 @@ public class INodeDirectory extends INod
if (children == null) {
children = new ArrayList<INode>(DEFAULT_FILES_PER_DIRECTORY);
}
- int low = Collections.binarySearch(children, node.name);
+ final int low = searchChildren(node);
if(low >= 0)
return null;
node.parent = this;
@@ -400,13 +404,9 @@ public class INodeDirectory extends INod
* @throws FileNotFoundException if parent does not exist or
* is not a directory.
*/
- INodeDirectory addToParent( byte[] localname,
- INode newNode,
- INodeDirectory parent,
- boolean propagateModTime
- ) throws FileNotFoundException {
+ INodeDirectory addToParent(INode newNode, INodeDirectory parent,
+ boolean propagateModTime) throws FileNotFoundException {
// insert into the parent children list
- newNode.name = localname;
if(parent.addChild(newNode, propagateModTime) == null)
return null;
return parent;
@@ -444,7 +444,7 @@ public class INodeDirectory extends INod
if (pathComponents.length < 2) { // add root
return null;
}
- newNode.name = pathComponents[pathComponents.length - 1];
+ newNode.setLocalName(pathComponents[pathComponents.length - 1]);
// insert into the parent children list
INodeDirectory parent = getParent(pathComponents);
return parent.addChild(newNode, propagateModTime) == null? null: parent;
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryWithQuota.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryWithQuota.java?rev=1411007&r1=1411006&r2=1411007&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryWithQuota.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryWithQuota.java Sun Nov 18 22:31:28 2012
@@ -27,9 +27,9 @@ import org.apache.hadoop.hdfs.protocol.Q
*/
public class INodeDirectoryWithQuota extends INodeDirectory {
private long nsQuota; /// NameSpace quota
- private long nsCount;
+ private long nsCount = 1L;
private long dsQuota; /// disk space quota
- private long diskspace;
+ private long diskspace = 0L;
/** Convert an existing directory inode to one with the given quota
*
@@ -44,7 +44,8 @@ public class INodeDirectoryWithQuota ext
other.spaceConsumedInTree(counts);
this.nsCount = counts.getNsCount();
this.diskspace = counts.getDsCount();
- setQuota(nsQuota, dsQuota);
+ this.nsQuota = nsQuota;
+ this.dsQuota = dsQuota;
}
/** constructor with no quota verification */
@@ -53,7 +54,6 @@ public class INodeDirectoryWithQuota ext
super(permissions, modificationTime);
this.nsQuota = nsQuota;
this.dsQuota = dsQuota;
- this.nsCount = 1;
}
/** constructor with no quota verification */
@@ -62,7 +62,6 @@ public class INodeDirectoryWithQuota ext
super(name, permissions);
this.nsQuota = nsQuota;
this.dsQuota = dsQuota;
- this.nsCount = 1;
}
/** Get this directory's namespace quota
@@ -116,19 +115,8 @@ public class INodeDirectoryWithQuota ext
* @param nsDelta the change of the tree size
* @param dsDelta change to disk space occupied
*/
- void updateNumItemsInTree(long nsDelta, long dsDelta) {
- nsCount += nsDelta;
- diskspace += dsDelta;
- }
-
- /** Update the size of the tree
- *
- * @param nsDelta the change of the tree size
- * @param dsDelta change to disk space occupied
- **/
- void unprotectedUpdateNumItemsInTree(long nsDelta, long dsDelta) {
- nsCount = nsCount + nsDelta;
- diskspace = diskspace + dsDelta;
+ void addSpaceConsumed(long nsDelta, long dsDelta) {
+ setSpaceConsumed(nsCount + nsDelta, diskspace + dsDelta);
}
/**
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java?rev=1411007&r1=1411006&r2=1411007&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java Sun Nov 18 22:31:28 2012
@@ -45,14 +45,43 @@ public class INodeFile extends INode imp
static final FsPermission UMASK = FsPermission.createImmutable((short)0111);
- //Number of bits for Block size
- static final short BLOCKBITS = 48;
- //Header mask 64-bit representation
- //Format: [16 bits for replication][48 bits for PreferredBlockSize]
- static final long HEADERMASK = 0xffffL << BLOCKBITS;
+ /** Format: [16 bits for replication][48 bits for PreferredBlockSize] */
+ private static class HeaderFormat {
+ /** Number of bits for Block size */
+ static final int BLOCKBITS = 48;
+ /** Header mask 64-bit representation */
+ static final long HEADERMASK = 0xffffL << BLOCKBITS;
+ static final long MAX_BLOCK_SIZE = ~HEADERMASK;
+
+ static short getReplication(long header) {
+ return (short) ((header & HEADERMASK) >> BLOCKBITS);
+ }
+
+ static long combineReplication(long header, short replication) {
+ if (replication <= 0) {
+ throw new IllegalArgumentException(
+ "Unexpected value for the replication: " + replication);
+ }
+ return ((long)replication << BLOCKBITS) | (header & MAX_BLOCK_SIZE);
+ }
+
+ static long getPreferredBlockSize(long header) {
+ return header & MAX_BLOCK_SIZE;
+ }
+
+ static long combinePreferredBlockSize(long header, long blockSize) {
+ if (blockSize < 0) {
+ throw new IllegalArgumentException("Block size < 0: " + blockSize);
+ } else if (blockSize > MAX_BLOCK_SIZE) {
+ throw new IllegalArgumentException("Block size = " + blockSize
+ + " > MAX_BLOCK_SIZE = " + MAX_BLOCK_SIZE);
+ }
+ return (header & HEADERMASK) | (blockSize & MAX_BLOCK_SIZE);
+ }
+ }
- private long header;
+ private long header = 0L;
private BlockInfo[] blocks;
@@ -60,15 +89,15 @@ public class INodeFile extends INode imp
short replication, long modificationTime,
long atime, long preferredBlockSize) {
super(permissions, modificationTime, atime);
- this.setFileReplication(replication);
- this.setPreferredBlockSize(preferredBlockSize);
+ header = HeaderFormat.combineReplication(header, replication);
+ header = HeaderFormat.combinePreferredBlockSize(header, preferredBlockSize);
this.blocks = blklist;
}
protected INodeFile(INodeFile f) {
this(f.getPermissionStatus(), f.getBlocks(), f.getFileReplication(),
f.getModificationTime(), f.getAccessTime(), f.getPreferredBlockSize());
- this.name = f.getLocalNameBytes();
+ this.setLocalName(f.getLocalNameBytes());
}
/**
@@ -83,7 +112,7 @@ public class INodeFile extends INode imp
/** @return the replication factor of the file. */
public final short getFileReplication() {
- return (short) ((header & HEADERMASK) >> BLOCKBITS);
+ return HeaderFormat.getReplication(header);
}
@Override
@@ -92,21 +121,13 @@ public class INodeFile extends INode imp
}
protected void setFileReplication(short replication) {
- if(replication <= 0)
- throw new IllegalArgumentException("Unexpected value for the replication");
- header = ((long)replication << BLOCKBITS) | (header & ~HEADERMASK);
+ header = HeaderFormat.combineReplication(header, replication);
}
/** @return preferred block size (in bytes) of the file. */
@Override
public long getPreferredBlockSize() {
- return header & ~HEADERMASK;
- }
-
- private void setPreferredBlockSize(long preferredBlkSize) {
- if((preferredBlkSize < 0) || (preferredBlkSize > ~HEADERMASK ))
- throw new IllegalArgumentException("Unexpected value for the block size");
- header = (header & HEADERMASK) | (preferredBlkSize & ~HEADERMASK);
+ return HeaderFormat.getPreferredBlockSize(header);
}
/** @return the blocks of the file. */
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java?rev=1411007&r1=1411006&r2=1411007&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java Sun Nov 18 22:31:28 2012
@@ -22,19 +22,16 @@ import org.apache.hadoop.fs.permission.P
import org.apache.hadoop.hdfs.DFSUtil;
/**
- * An INode representing a symbolic link.
+ * An {@link INode} representing a symbolic link.
*/
@InterfaceAudience.Private
public class INodeSymlink extends INode {
- private byte[] symlink; // The target URI
+ private final byte[] symlink; // The target URI
- INodeSymlink(String value, long modTime, long atime,
+ INodeSymlink(String value, long mtime, long atime,
PermissionStatus permissions) {
- super(permissions, modTime, atime);
- assert value != null;
- setLinkValue(value);
- setModificationTimeForce(modTime);
- setAccessTime(atime);
+ super(permissions, mtime, atime);
+ this.symlink = DFSUtil.string2Bytes(value);
}
public INodeSymlink(INodeSymlink that) {
@@ -49,12 +46,8 @@ public class INodeSymlink extends INode
public boolean isSymlink() {
return true;
}
-
- void setLinkValue(String value) {
- this.symlink = DFSUtil.string2Bytes(value);
- }
- public String getLinkValue() {
+ public String getSymlinkString() {
return DFSUtil.bytes2String(symlink);
}
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java?rev=1411007&r1=1411006&r2=1411007&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java Sun Nov 18 22:31:28 2012
@@ -401,17 +401,21 @@ public class LeaseManager {
@Override
public void run() {
for(; shouldRunMonitor && fsnamesystem.isRunning(); ) {
+ boolean needSync = false;
try {
fsnamesystem.writeLockInterruptibly();
try {
if (!fsnamesystem.isInSafeMode()) {
- checkLeases();
+ needSync = checkLeases();
}
} finally {
fsnamesystem.writeUnlock();
+ // lease reassignments should to be sync'ed.
+ if (needSync) {
+ fsnamesystem.getEditLog().logSync();
+ }
}
-
Thread.sleep(HdfsServerConstants.NAMENODE_LEASE_RECHECK_INTERVAL);
} catch(InterruptedException ie) {
if (LOG.isDebugEnabled()) {
@@ -422,13 +426,16 @@ public class LeaseManager {
}
}
- /** Check the leases beginning from the oldest. */
- private synchronized void checkLeases() {
+ /** Check the leases beginning from the oldest.
+ * @return true is sync is needed.
+ */
+ private synchronized boolean checkLeases() {
+ boolean needSync = false;
assert fsnamesystem.hasWriteLock();
for(; sortedLeases.size() > 0; ) {
final Lease oldest = sortedLeases.first();
if (!oldest.expiredHardLimit()) {
- return;
+ return needSync;
}
LOG.info(oldest + " has expired hard limit");
@@ -451,6 +458,10 @@ public class LeaseManager {
LOG.debug("Started block recovery " + p + " lease " + oldest);
}
}
+ // If a lease recovery happened, we need to sync later.
+ if (!needSync && !completed) {
+ needSync = true;
+ }
} catch (IOException e) {
LOG.error("Cannot release the path " + p + " in the lease "
+ oldest, e);
@@ -462,6 +473,7 @@ public class LeaseManager {
removeLease(oldest, p);
}
}
+ return needSync;
}
@Override
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameCache.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameCache.java?rev=1411007&r1=1411006&r2=1411007&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameCache.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameCache.java Sun Nov 18 22:31:28 2012
@@ -152,4 +152,14 @@ class NameCache<K> {
cache.put(name, name);
lookups += useThreshold;
}
+
+ public void reset() {
+ initialized = false;
+ cache.clear();
+ if (transientMap == null) {
+ transientMap = new HashMap<K, UseCount>();
+ } else {
+ transientMap.clear();
+ }
+ }
}
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java?rev=1411007&r1=1411006&r2=1411007&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java Sun Nov 18 22:31:28 2012
@@ -714,8 +714,17 @@ class NameNodeRpcServer implements Namen
}
@Override // ClientProtocol
- public boolean setSafeMode(SafeModeAction action) throws IOException {
- namesystem.checkOperation(OperationCategory.UNCHECKED);
+ public boolean setSafeMode(SafeModeAction action, boolean isChecked)
+ throws IOException {
+ OperationCategory opCategory = OperationCategory.UNCHECKED;
+ if (isChecked) {
+ if (action == SafeModeAction.SAFEMODE_GET) {
+ opCategory = OperationCategory.READ;
+ } else {
+ opCategory = OperationCategory.WRITE;
+ }
+ }
+ namesystem.checkOperation(opCategory);
return namesystem.setSafeMode(action);
}
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java?rev=1411007&r1=1411006&r2=1411007&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java Sun Nov 18 22:31:28 2012
@@ -250,8 +250,15 @@ public class SecondaryNameNode implement
new AccessControlList(conf.get(DFS_ADMIN, " "))) {
{
if (UserGroupInformation.isSecurityEnabled()) {
- initSpnego(conf, DFSConfigKeys.DFS_SECONDARY_NAMENODE_INTERNAL_SPNEGO_USER_NAME_KEY,
- DFSConfigKeys.DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY);
+ String httpKeytabKey = DFSConfigKeys.
+ DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY;
+ if (null == conf.get(httpKeytabKey)) {
+ httpKeytabKey = DFSConfigKeys.DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY;
+ }
+ initSpnego(
+ conf,
+ DFSConfigKeys.DFS_SECONDARY_NAMENODE_INTERNAL_SPNEGO_USER_NAME_KEY,
+ httpKeytabKey);
}
}
};
@@ -886,6 +893,7 @@ public class SecondaryNameNode implement
"just been downloaded");
}
dstImage.reloadFromImageFile(file, dstNamesystem);
+ dstNamesystem.dir.imageLoadComplete();
}
Checkpointer.rollForwardByApplyingLogs(manifest, dstImage, dstNamesystem);
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java?rev=1411007&r1=1411006&r2=1411007&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java Sun Nov 18 22:31:28 2012
@@ -48,6 +48,7 @@ import org.apache.hadoop.hdfs.protocol.C
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.TransferFsImage;
import org.apache.hadoop.ipc.RPC;
@@ -399,7 +400,7 @@ public class DFSAdmin extends FsShell {
} catch (java.lang.InterruptedException e) {
throw new IOException("Wait Interrupted");
}
- inSafeMode = dfs.isInSafeMode();
+ inSafeMode = dfs.setSafeMode(SafeModeAction.SAFEMODE_GET);
}
}
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/StringParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/StringParam.java?rev=1411007&r1=1411006&r2=1411007&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/StringParam.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/StringParam.java Sun Nov 18 22:31:28 2012
@@ -48,7 +48,7 @@ abstract class StringParam extends Param
@Override
final String parse(final String str) {
- if (pattern != null) {
+ if (str != null && pattern != null) {
if (!pattern.matcher(str).matches()) {
throw new IllegalArgumentException("Invalid value: \"" + str
+ "\" does not belong to the domain " + getDomain());
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/UserParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/UserParam.java?rev=1411007&r1=1411006&r2=1411007&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/UserParam.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/UserParam.java Sun Nov 18 22:31:28 2012
@@ -19,6 +19,9 @@ package org.apache.hadoop.hdfs.web.resou
import org.apache.hadoop.security.UserGroupInformation;
+import java.text.MessageFormat;
+import java.util.regex.Pattern;
+
/** User parameter. */
public class UserParam extends StringParam {
/** Parameter name. */
@@ -26,14 +29,29 @@ public class UserParam extends StringPar
/** Default parameter value. */
public static final String DEFAULT = "";
- private static final Domain DOMAIN = new Domain(NAME, null);
+ private static final Domain DOMAIN = new Domain(NAME,
+ Pattern.compile("^[A-Za-z_][A-Za-z0-9._-]*[$]?$"));
+
+ private static String validateLength(String str) {
+ if (str == null) {
+ throw new IllegalArgumentException(
+ MessageFormat.format("Parameter [{0}], cannot be NULL", NAME));
+ }
+ int len = str.length();
+ if (len < 1 || len > 31) {
+ throw new IllegalArgumentException(MessageFormat.format(
+ "Parameter [{0}], invalid value [{1}], it's length must be between 1 and 31",
+ NAME, str));
+ }
+ return str;
+ }
/**
* Constructor.
* @param str a string representation of the parameter value.
*/
public UserParam(final String str) {
- super(DOMAIN, str == null || str.equals(DEFAULT)? null: str);
+ super(DOMAIN, str == null || str.equals(DEFAULT)? null : validateLength(str));
}
/**
Propchange: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native:r1408927-1410997
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_context_handle.h
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_context_handle.h?rev=1411007&r1=1411006&r2=1411007&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_context_handle.h (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_context_handle.h Sun Nov 18 22:31:28 2012
@@ -31,7 +31,6 @@
//
typedef struct dfs_context_struct {
int debug;
- int read_only;
int usetrash;
int direct_io;
char **protectedpaths;
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_dfs.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_dfs.c?rev=1411007&r1=1411006&r2=1411007&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_dfs.c (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_dfs.c Sun Nov 18 22:31:28 2012
@@ -93,6 +93,18 @@ int main(int argc, char *argv[])
if (!options.no_permissions) {
fuse_opt_add_arg(&args, "-odefault_permissions");
}
+ /*
+ * FUSE already has a built-in parameter for mounting the filesystem as
+ * read-only, -r. We defined our own parameter for doing this called -oro.
+ * We support it by translating it into -r internally.
+ * The kernel intercepts and returns an error message for any "write"
+ * operations that the user attempts to perform on a read-only filesystem.
+ * That means that we don't have to write any code to handle read-only mode.
+ * See HDFS-4139 for more details.
+ */
+ if (options.read_only) {
+ fuse_opt_add_arg(&args, "-r");
+ }
{
char buf[80];
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_mkdir.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_mkdir.c?rev=1411007&r1=1411006&r2=1411007&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_mkdir.c (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_mkdir.c Sun Nov 18 22:31:28 2012
@@ -39,11 +39,6 @@ int dfs_mkdir(const char *path, mode_t m
return -EACCES;
}
- if (dfs->read_only) {
- ERROR("HDFS is configured read-only, cannot create directory %s", path);
- return -EACCES;
- }
-
ret = fuseConnectAsThreadUid(&conn);
if (ret) {
fprintf(stderr, "fuseConnectAsThreadUid: failed to open a libhdfs "
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_rename.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_rename.c?rev=1411007&r1=1411006&r2=1411007&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_rename.c (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_rename.c Sun Nov 18 22:31:28 2012
@@ -43,11 +43,6 @@ int dfs_rename(const char *from, const c
return -EACCES;
}
- if (dfs->read_only) {
- ERROR("HDFS configured read-only, cannot rename directory %s", from);
- return -EACCES;
- }
-
ret = fuseConnectAsThreadUid(&conn);
if (ret) {
fprintf(stderr, "fuseConnectAsThreadUid: failed to open a libhdfs "
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_rmdir.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_rmdir.c?rev=1411007&r1=1411006&r2=1411007&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_rmdir.c (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_rmdir.c Sun Nov 18 22:31:28 2012
@@ -44,12 +44,6 @@ int dfs_rmdir(const char *path)
goto cleanup;
}
- if (dfs->read_only) {
- ERROR("HDFS configured read-only, cannot delete directory %s", path);
- ret = -EACCES;
- goto cleanup;
- }
-
ret = fuseConnectAsThreadUid(&conn);
if (ret) {
fprintf(stderr, "fuseConnectAsThreadUid: failed to open a libhdfs "
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_unlink.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_unlink.c?rev=1411007&r1=1411006&r2=1411007&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_unlink.c (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_unlink.c Sun Nov 18 22:31:28 2012
@@ -40,12 +40,6 @@ int dfs_unlink(const char *path)
goto cleanup;
}
- if (dfs->read_only) {
- ERROR("HDFS configured read-only, cannot create directory %s", path);
- ret = -EACCES;
- goto cleanup;
- }
-
ret = fuseConnectAsThreadUid(&conn);
if (ret) {
fprintf(stderr, "fuseConnectAsThreadUid: failed to open a libhdfs "