You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by wh...@apache.org on 2015/05/02 19:03:12 UTC
[4/4] hadoop git commit: HDFS-8249. Separate HdfsConstants into the
client and the server side class. Contributed by Haohui Mai.
HDFS-8249. Separate HdfsConstants into the client and the server side class. Contributed by Haohui Mai.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/65a19fbc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/65a19fbc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/65a19fbc
Branch: refs/heads/branch-2
Commit: 65a19fbc3005e9ee35064b23393d88b1e78764e2
Parents: 8068e79
Author: Haohui Mai <wh...@apache.org>
Authored: Fri May 1 15:27:28 2015 -0700
Committer: Haohui Mai <wh...@apache.org>
Committed: Sat May 2 10:03:04 2015 -0700
----------------------------------------------------------------------
.../org/apache/hadoop/hdfs/HAUtilClient.java | 2 +-
.../org/apache/hadoop/hdfs/protocol/Block.java | 4 +-
.../hadoop/hdfs/protocol/HdfsConstants.java | 109 ++++++++++++
.../hdfs/protocol/HdfsConstantsClient.java | 45 -----
.../apache/hadoop/hdfs/web/JsonUtilClient.java | 6 +-
.../hadoop/hdfs/web/WebHdfsFileSystem.java | 4 +-
.../hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java | 4 +-
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +
.../bkjournal/BookKeeperJournalManager.java | 14 +-
.../bkjournal/EditLogLedgerMetadata.java | 8 +-
.../bkjournal/TestBookKeeperEditLogStreams.java | 6 +-
.../java/org/apache/hadoop/hdfs/DFSClient.java | 8 +-
.../java/org/apache/hadoop/hdfs/DFSUtil.java | 7 +-
.../org/apache/hadoop/hdfs/DataStreamer.java | 5 +-
.../org/apache/hadoop/hdfs/NameNodeProxies.java | 3 +-
.../hadoop/hdfs/client/impl/LeaseRenewer.java | 6 +-
.../hadoop/hdfs/protocol/ClientProtocol.java | 4 +-
.../hadoop/hdfs/protocol/HdfsConstants.java | 176 -------------------
.../protocol/SnapshottableDirectoryStatus.java | 2 +-
.../protocolPB/ClientNamenodeProtocolPB.java | 2 +-
...tNamenodeProtocolServerSideTranslatorPB.java | 4 +-
.../apache/hadoop/hdfs/protocolPB/PBHelper.java | 6 +-
.../hdfs/qjournal/client/IPCLoggerChannel.java | 4 +-
.../hdfs/qjournal/protocol/RequestInfo.java | 4 +-
.../QJournalProtocolServerSideTranslatorPB.java | 4 +-
.../hadoop/hdfs/qjournal/server/Journal.java | 22 +--
.../hadoop/hdfs/server/balancer/Dispatcher.java | 5 +-
.../server/blockmanagement/BlockIdManager.java | 10 +-
.../BlockPlacementPolicyDefault.java | 4 +-
.../BlockStoragePolicySuite.java | 21 +--
.../hdfs/server/common/HdfsServerConstants.java | 107 ++++++++---
.../hadoop/hdfs/server/common/StorageInfo.java | 5 +-
.../server/datanode/BlockMetadataHeader.java | 4 +-
.../server/datanode/BlockPoolSliceStorage.java | 20 +--
.../hdfs/server/datanode/BlockReceiver.java | 4 +-
.../hdfs/server/datanode/BlockSender.java | 8 +-
.../hadoop/hdfs/server/datanode/DataNode.java | 2 +-
.../hdfs/server/datanode/DataStorage.java | 35 ++--
.../hdfs/server/datanode/DataXceiver.java | 16 +-
.../hdfs/server/datanode/DirectoryScanner.java | 4 +-
.../datanode/fsdataset/impl/BlockPoolSlice.java | 4 +-
.../datanode/fsdataset/impl/FsDatasetImpl.java | 8 +-
.../datanode/fsdataset/impl/FsDatasetUtil.java | 4 +-
.../apache/hadoop/hdfs/server/mover/Mover.java | 2 +-
.../hadoop/hdfs/server/namenode/BackupNode.java | 7 +-
.../namenode/EditLogBackupInputStream.java | 6 +-
.../server/namenode/EditLogFileInputStream.java | 22 +--
.../server/namenode/EditLogInputStream.java | 4 +-
.../hdfs/server/namenode/EditsDoubleBuffer.java | 6 +-
.../server/namenode/FSDirStatAndListingOp.java | 16 +-
.../hdfs/server/namenode/FSDirectory.java | 6 +-
.../hadoop/hdfs/server/namenode/FSEditLog.java | 8 +-
.../hdfs/server/namenode/FSEditLogLoader.java | 24 +--
.../hdfs/server/namenode/FSEditLogOp.java | 28 +--
.../hadoop/hdfs/server/namenode/FSImage.java | 18 +-
.../hdfs/server/namenode/FSImageFormat.java | 7 +-
.../server/namenode/FSImageFormatProtobuf.java | 8 +-
...FSImagePreTransactionalStorageInspector.java | 4 +-
.../namenode/FSImageStorageInspector.java | 4 +-
.../hdfs/server/namenode/FSNamesystem.java | 19 +-
.../server/namenode/FileJournalManager.java | 22 +--
.../hadoop/hdfs/server/namenode/INode.java | 13 +-
.../hdfs/server/namenode/INodeDirectory.java | 2 +-
.../hadoop/hdfs/server/namenode/INodeFile.java | 2 +-
.../hadoop/hdfs/server/namenode/INodeId.java | 4 +-
.../hadoop/hdfs/server/namenode/INodeMap.java | 6 +-
.../hdfs/server/namenode/INodesInPath.java | 3 +-
.../hdfs/server/namenode/LeaseManager.java | 5 +-
.../hadoop/hdfs/server/namenode/NNStorage.java | 11 +-
.../hadoop/hdfs/server/namenode/NameNode.java | 2 +-
.../hdfs/server/namenode/NameNodeRpcServer.java | 10 +-
.../namenode/RedundantEditLogInputStream.java | 12 +-
.../hdfs/server/namenode/TransferFsImage.java | 8 +-
.../server/namenode/ha/BootstrapStandby.java | 6 +-
.../hdfs/server/namenode/ha/EditLogTailer.java | 6 +-
.../snapshot/FileWithSnapshotFeature.java | 4 +-
.../server/protocol/NNHAStatusHeartbeat.java | 4 +-
.../hdfs/server/protocol/NamespaceInfo.java | 4 +-
.../hdfs/server/protocol/RemoteEditLog.java | 11 +-
.../hadoop/hdfs/tools/StoragePolicyAdmin.java | 4 +-
.../offlineEditsViewer/OfflineEditsLoader.java | 7 +-
.../offlineImageViewer/ImageLoaderCurrent.java | 4 +-
.../org/apache/hadoop/fs/TestSymlinkHdfs.java | 3 +-
.../org/apache/hadoop/hdfs/DFSTestUtil.java | 8 +-
.../hadoop/hdfs/TestBlockStoragePolicy.java | 39 ++--
.../org/apache/hadoop/hdfs/TestDFSRollback.java | 6 +-
.../hadoop/hdfs/TestDFSStartupVersions.java | 8 +-
.../org/apache/hadoop/hdfs/TestDFSUpgrade.java | 4 +-
.../hadoop/hdfs/TestDatanodeRegistration.java | 9 +-
.../org/apache/hadoop/hdfs/TestFileAppend4.java | 5 +-
.../apache/hadoop/hdfs/TestFileCreation.java | 5 +-
.../org/apache/hadoop/hdfs/TestFileStatus.java | 4 +-
.../org/apache/hadoop/hdfs/TestGetBlocks.java | 4 +-
.../java/org/apache/hadoop/hdfs/TestLease.java | 11 +-
.../apache/hadoop/hdfs/TestLeaseRecovery2.java | 5 +-
.../apache/hadoop/hdfs/UpgradeUtilities.java | 4 +-
.../hdfs/qjournal/server/TestJournalNode.java | 4 +-
.../blockmanagement/TestBlockManager.java | 9 +-
.../blockmanagement/TestReplicationPolicy.java | 42 ++---
.../TestReplicationPolicyConsiderLoad.java | 6 +-
.../TestReplicationPolicyWithNodeGroup.java | 46 ++---
.../server/datanode/TestDatanodeRegister.java | 8 +-
.../server/datanode/TestDirectoryScanner.java | 6 +-
.../hdfs/server/mover/TestStorageMover.java | 8 +-
.../server/namenode/NNThroughputBenchmark.java | 7 +-
.../hdfs/server/namenode/TestAddBlockRetry.java | 12 +-
.../hdfs/server/namenode/TestEditLog.java | 6 +-
.../namenode/TestEditLogFileInputStream.java | 4 +-
.../server/namenode/TestFSEditLogLoader.java | 6 +-
.../namenode/TestFSPermissionChecker.java | 6 +-
.../hdfs/server/namenode/TestFileTruncate.java | 5 +-
.../hdfs/server/namenode/TestINodeFile.java | 16 +-
.../namenode/TestMetadataVersionOutput.java | 4 +-
.../namenode/TestNameNodeOptionParsing.java | 6 +-
.../namenode/TestTruncateQuotaUpdate.java | 4 +-
.../namenode/ha/TestDFSUpgradeWithHA.java | 4 +-
.../snapshot/TestOpenFilesWithSnapshot.java | 4 +-
.../server/namenode/snapshot/TestSnapshot.java | 1 -
.../apache/hadoop/hdfs/web/TestJsonUtil.java | 4 +-
119 files changed, 667 insertions(+), 728 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/65a19fbc/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/HAUtilClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/HAUtilClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/HAUtilClient.java
index 7956838..9f28cfc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/HAUtilClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/HAUtilClient.java
@@ -25,7 +25,7 @@ import org.apache.hadoop.security.token.Token;
import java.net.URI;
-import static org.apache.hadoop.hdfs.protocol.HdfsConstantsClient.HA_DT_SERVICE_PREFIX;
+import static org.apache.hadoop.hdfs.protocol.HdfsConstants.HA_DT_SERVICE_PREFIX;
@InterfaceAudience.Private
public class HAUtilClient {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/65a19fbc/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/Block.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/Block.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/Block.java
index 11daccc..0dac290 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/Block.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/Block.java
@@ -78,7 +78,7 @@ public class Block implements Writable, Comparable<Block> {
public static long getGenerationStamp(String metaFile) {
Matcher m = metaFilePattern.matcher(metaFile);
return m.matches() ? Long.parseLong(m.group(2))
- : HdfsConstantsClient.GRANDFATHER_GENERATION_STAMP;
+ : HdfsConstants.GRANDFATHER_GENERATION_STAMP;
}
/**
@@ -100,7 +100,7 @@ public class Block implements Writable, Comparable<Block> {
}
public Block(final long blkid) {
- this(blkid, 0, HdfsConstantsClient.GRANDFATHER_GENERATION_STAMP);
+ this(blkid, 0, HdfsConstants.GRANDFATHER_GENERATION_STAMP);
}
public Block(Block blk) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/65a19fbc/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
new file mode 100644
index 0000000..58c7ea1
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
@@ -0,0 +1,109 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocol;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.util.StringUtils;
+
+@InterfaceAudience.Private
+public final class HdfsConstants {
+ // Long that indicates "leave current quota unchanged"
+ public static final long QUOTA_DONT_SET = Long.MAX_VALUE;
+ public static final long QUOTA_RESET = -1L;
+ public static final int BYTES_IN_INTEGER = Integer.SIZE / Byte.SIZE;
+ /**
+ * URI Scheme for hdfs://namenode/ URIs.
+ */
+ public static final String HDFS_URI_SCHEME = "hdfs";
+ public static final String MEMORY_STORAGE_POLICY_NAME = "LAZY_PERSIST";
+ public static final String ALLSSD_STORAGE_POLICY_NAME = "ALL_SSD";
+ public static final String ONESSD_STORAGE_POLICY_NAME = "ONE_SSD";
+ // TODO should be conf injected?
+ public static final int DEFAULT_DATA_SOCKET_SIZE = 128 * 1024;
+ /**
+ * A special path component contained in the path for a snapshot file/dir
+ */
+ public static final String DOT_SNAPSHOT_DIR = ".snapshot";
+ public static final String SEPARATOR_DOT_SNAPSHOT_DIR
+ = Path.SEPARATOR + DOT_SNAPSHOT_DIR;
+ public static final String SEPARATOR_DOT_SNAPSHOT_DIR_SEPARATOR
+ = Path.SEPARATOR + DOT_SNAPSHOT_DIR + Path.SEPARATOR;
+
+ /**
+ * Generation stamp of blocks that pre-date the introduction
+ * of a generation stamp.
+ */
+ public static final long GRANDFATHER_GENERATION_STAMP = 0;
+ /**
+ * The inode id validation of lease check will be skipped when the request
+ * uses GRANDFATHER_INODE_ID for backward compatibility.
+ */
+ public static final long GRANDFATHER_INODE_ID = 0;
+ public static final byte BLOCK_STORAGE_POLICY_ID_UNSPECIFIED = 0;
+ /**
+ * A prefix put before the namenode URI inside the "service" field
+ * of a delgation token, indicating that the URI is a logical (HA)
+ * URI.
+ */
+ public static final String HA_DT_SERVICE_PREFIX = "ha-";
+ // The name of the SafeModeException. FileSystem should retry if it sees
+ // the below exception in RPC
+ public static final String SAFEMODE_EXCEPTION_CLASS_NAME =
+ "org.apache.hadoop.hdfs.server.namenode.SafeModeException";
+ /**
+ * HDFS Protocol Names:
+ */
+ public static final String CLIENT_NAMENODE_PROTOCOL_NAME =
+ "org.apache.hadoop.hdfs.protocol.ClientProtocol";
+
+ // SafeMode actions
+ public enum SafeModeAction {
+ SAFEMODE_LEAVE, SAFEMODE_ENTER, SAFEMODE_GET
+ }
+
+ public enum RollingUpgradeAction {
+ QUERY, PREPARE, FINALIZE;
+
+ private static final Map<String, RollingUpgradeAction> MAP
+ = new HashMap<>();
+ static {
+ MAP.put("", QUERY);
+ for(RollingUpgradeAction a : values()) {
+ MAP.put(a.name(), a);
+ }
+ }
+
+ /** Covert the given String to a RollingUpgradeAction. */
+ public static RollingUpgradeAction fromString(String s) {
+ return MAP.get(StringUtils.toUpperCase(s));
+ }
+ }
+
+ // type of the datanode report
+ public enum DatanodeReportType {
+ ALL, LIVE, DEAD, DECOMMISSIONING
+ }
+
+ /* Hidden constructor */
+ protected HdfsConstants() {
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/65a19fbc/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstantsClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstantsClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstantsClient.java
deleted file mode 100644
index 00f07e8..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstantsClient.java
+++ /dev/null
@@ -1,45 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.protocol;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-
-@InterfaceAudience.Private
-public interface HdfsConstantsClient {
- /**
- * Generation stamp of blocks that pre-date the introduction
- * of a generation stamp.
- */
- long GRANDFATHER_GENERATION_STAMP = 0;
- /**
- * The inode id validation of lease check will be skipped when the request
- * uses GRANDFATHER_INODE_ID for backward compatibility.
- */
- long GRANDFATHER_INODE_ID = 0;
- byte BLOCK_STORAGE_POLICY_ID_UNSPECIFIED = 0;
- /**
- * A prefix put before the namenode URI inside the "service" field
- * of a delgation token, indicating that the URI is a logical (HA)
- * URI.
- */
- String HA_DT_SERVICE_PREFIX = "ha-";
- // The name of the SafeModeException. FileSystem should retry if it sees
- // the below exception in RPC
- String SAFEMODE_EXCEPTION_CLASS_NAME = "org.apache.hadoop.hdfs.server" +
- ".namenode.SafeModeException";
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/65a19fbc/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
index e263a0a..ca94840 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
@@ -33,7 +33,7 @@ import org.apache.hadoop.hdfs.DFSUtilClient;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.FsPermissionExtension;
-import org.apache.hadoop.hdfs.protocol.HdfsConstantsClient;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
@@ -123,11 +123,11 @@ class JsonUtilClient {
final long blockSize = ((Number) m.get("blockSize")).longValue();
final short replication = ((Number) m.get("replication")).shortValue();
final long fileId = m.containsKey("fileId") ?
- ((Number) m.get("fileId")).longValue() : HdfsConstantsClient.GRANDFATHER_INODE_ID;
+ ((Number) m.get("fileId")).longValue() : HdfsConstants.GRANDFATHER_INODE_ID;
final int childrenNum = getInt(m, "childrenNum", -1);
final byte storagePolicy = m.containsKey("storagePolicy") ?
(byte) ((Number) m.get("storagePolicy")).longValue() :
- HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
+ HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
return new HdfsFileStatus(len, type == WebHdfsConstants.PathType.DIRECTORY, replication,
blockSize, mTime, aTime, permission, owner, group,
symlink, DFSUtilClient.string2Bytes(localName),
http://git-wip-us.apache.org/repos/asf/hadoop/blob/65a19fbc/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
index e5d7925..ccf7bcf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
@@ -59,7 +59,7 @@ import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSUtilClient;
import org.apache.hadoop.hdfs.HAUtilClient;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
-import org.apache.hadoop.hdfs.protocol.HdfsConstantsClient;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.web.resources.*;
@@ -170,7 +170,7 @@ public class WebHdfsFileSystem extends FileSystem
HdfsClientConfigKeys.HttpClient.RETRY_POLICY_ENABLED_DEFAULT,
HdfsClientConfigKeys.HttpClient.RETRY_POLICY_SPEC_KEY,
HdfsClientConfigKeys.HttpClient.RETRY_POLICY_SPEC_DEFAULT,
- HdfsConstantsClient.SAFEMODE_EXCEPTION_CLASS_NAME);
+ HdfsConstants.SAFEMODE_EXCEPTION_CLASS_NAME);
} else {
int maxFailoverAttempts = conf.getInt(
http://git-wip-us.apache.org/repos/asf/hadoop/blob/65a19fbc/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
index 161f3e9..05d806a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
@@ -44,8 +44,8 @@ import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys;
import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.net.DNS;
@@ -2031,7 +2031,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
}
return new PATHCONF3Response(Nfs3Status.NFS3_OK, attrs, 0,
- HdfsConstants.MAX_PATH_LENGTH, true, false, false, true);
+ HdfsServerConstants.MAX_PATH_LENGTH, true, false, false, true);
} catch (IOException e) {
LOG.warn("Exception ", e);
int status = mapErrorStatus(e);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/65a19fbc/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index fbbfca2..ac037f4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -170,6 +170,9 @@ Release 2.8.0 - UNRELEASED
HDFS-8086. Move LeaseRenewer to the hdfs.client.impl package. (Takanobu
Asanuma via szetszwo)
+ HDFS-8249. Separate HdfsConstants into the client and the server side
+ class. (wheat9)
+
OPTIMIZATIONS
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
http://git-wip-us.apache.org/repos/asf/hadoop/blob/65a19fbc/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java
index 16ffe52..96f2c37 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java
@@ -17,7 +17,7 @@
*/
package org.apache.hadoop.contrib.bkjournal;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.apache.hadoop.hdfs.server.namenode.JournalManager;
@@ -568,7 +568,7 @@ public class BookKeeperJournalManager implements JournalManager {
return;
}
streams.add(elis);
- if (elis.getLastTxId() == HdfsConstants.INVALID_TXID) {
+ if (elis.getLastTxId() == HdfsServerConstants.INVALID_TXID) {
return;
}
fromTxId = elis.getLastTxId() + 1;
@@ -589,7 +589,7 @@ public class BookKeeperJournalManager implements JournalManager {
long lastTxId = l.getLastTxId();
if (l.isInProgress()) {
lastTxId = recoverLastTxId(l, false);
- if (lastTxId == HdfsConstants.INVALID_TXID) {
+ if (lastTxId == HdfsServerConstants.INVALID_TXID) {
break;
}
}
@@ -634,7 +634,7 @@ public class BookKeeperJournalManager implements JournalManager {
EditLogLedgerMetadata l = EditLogLedgerMetadata.read(zkc, znode);
try {
long endTxId = recoverLastTxId(l, true);
- if (endTxId == HdfsConstants.INVALID_TXID) {
+ if (endTxId == HdfsServerConstants.INVALID_TXID) {
LOG.error("Unrecoverable corruption has occurred in segment "
+ l.toString() + " at path " + znode
+ ". Unable to continue recovery.");
@@ -788,10 +788,10 @@ public class BookKeeperJournalManager implements JournalManager {
in = new BookKeeperEditLogInputStream(lh, l, lastAddConfirmed);
- long endTxId = HdfsConstants.INVALID_TXID;
+ long endTxId = HdfsServerConstants.INVALID_TXID;
FSEditLogOp op = in.readOp();
while (op != null) {
- if (endTxId == HdfsConstants.INVALID_TXID
+ if (endTxId == HdfsServerConstants.INVALID_TXID
|| op.getTransactionId() == endTxId+1) {
endTxId = op.getTransactionId();
}
@@ -827,7 +827,7 @@ public class BookKeeperJournalManager implements JournalManager {
try {
EditLogLedgerMetadata editLogLedgerMetadata = EditLogLedgerMetadata
.read(zkc, legderMetadataPath);
- if (editLogLedgerMetadata.getLastTxId() != HdfsConstants.INVALID_TXID
+ if (editLogLedgerMetadata.getLastTxId() != HdfsServerConstants.INVALID_TXID
&& editLogLedgerMetadata.getLastTxId() < fromTxId) {
// exclude already read closed edits, but include inprogress edits
// as this will be handled in caller
http://git-wip-us.apache.org/repos/asf/hadoop/blob/65a19fbc/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/EditLogLedgerMetadata.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/EditLogLedgerMetadata.java b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/EditLogLedgerMetadata.java
index 039cb3a..2d1f8b9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/EditLogLedgerMetadata.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/EditLogLedgerMetadata.java
@@ -19,13 +19,13 @@ package org.apache.hadoop.contrib.bkjournal;
import java.io.IOException;
import java.util.Comparator;
+
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.zookeeper.ZooKeeper;
import org.apache.zookeeper.CreateMode;
import org.apache.zookeeper.ZooDefs.Ids;
import org.apache.zookeeper.KeeperException;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -67,7 +67,7 @@ public class EditLogLedgerMetadata {
this.dataLayoutVersion = dataLayoutVersion;
this.ledgerId = ledgerId;
this.firstTxId = firstTxId;
- this.lastTxId = HdfsConstants.INVALID_TXID;
+ this.lastTxId = HdfsServerConstants.INVALID_TXID;
this.inprogress = true;
}
@@ -107,7 +107,7 @@ public class EditLogLedgerMetadata {
}
void finalizeLedger(long newLastTxId) {
- assert this.lastTxId == HdfsConstants.INVALID_TXID;
+ assert this.lastTxId == HdfsServerConstants.INVALID_TXID;
this.lastTxId = newLastTxId;
this.inprogress = false;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/65a19fbc/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperEditLogStreams.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperEditLogStreams.java b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperEditLogStreams.java
index ec9692b..52e4568 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperEditLogStreams.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperEditLogStreams.java
@@ -27,7 +27,7 @@ import org.apache.bookkeeper.client.LedgerHandle;
import org.apache.bookkeeper.conf.ClientConfiguration;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.zookeeper.ZooKeeper;
import org.junit.AfterClass;
import org.junit.BeforeClass;
@@ -68,7 +68,7 @@ public class TestBookKeeperEditLogStreams {
lh.close();
EditLogLedgerMetadata metadata = new EditLogLedgerMetadata("/foobar",
- HdfsConstants.NAMENODE_LAYOUT_VERSION, lh.getId(), 0x1234);
+ HdfsServerConstants.NAMENODE_LAYOUT_VERSION, lh.getId(), 0x1234);
try {
new BookKeeperEditLogInputStream(lh, metadata, -1);
fail("Shouldn't get this far, should have thrown");
@@ -77,7 +77,7 @@ public class TestBookKeeperEditLogStreams {
}
metadata = new EditLogLedgerMetadata("/foobar",
- HdfsConstants.NAMENODE_LAYOUT_VERSION, lh.getId(), 0x1234);
+ HdfsServerConstants.NAMENODE_LAYOUT_VERSION, lh.getId(), 0x1234);
try {
new BookKeeperEditLogInputStream(lh, metadata, 0);
fail("Shouldn't get this far, should have thrown");
http://git-wip-us.apache.org/repos/asf/hadoop/blob/65a19fbc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 205fbe8..acfb41b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -545,10 +545,10 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
} catch (IOException e) {
// Abort if the lease has already expired.
final long elapsed = Time.monotonicNow() - getLastLeaseRenewal();
- if (elapsed > HdfsConstants.LEASE_HARDLIMIT_PERIOD) {
+ if (elapsed > HdfsServerConstants.LEASE_HARDLIMIT_PERIOD) {
LOG.warn("Failed to renew lease for " + clientName + " for "
+ (elapsed/1000) + " seconds (>= hard-limit ="
- + (HdfsConstants.LEASE_HARDLIMIT_PERIOD/1000) + " seconds.) "
+ + (HdfsServerConstants.LEASE_HARDLIMIT_PERIOD/1000) + " seconds.) "
+ "Closing all files being written ...", e);
closeAllFilesBeingWritten(true);
} else {
@@ -1901,7 +1901,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
//connect to a datanode
IOStreamPair pair = connectToDN(datanodes[j], timeout, lb);
out = new DataOutputStream(new BufferedOutputStream(pair.out,
- HdfsConstants.SMALL_BUFFER_SIZE));
+ HdfsServerConstants.SMALL_BUFFER_SIZE));
in = new DataInputStream(pair.in);
if (LOG.isDebugEnabled()) {
@@ -2066,7 +2066,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
try {
DataOutputStream out = new DataOutputStream(new BufferedOutputStream(pair.out,
- HdfsConstants.SMALL_BUFFER_SIZE));
+ HdfsServerConstants.SMALL_BUFFER_SIZE));
DataInputStream in = new DataInputStream(pair.in);
new Sender(out).readBlock(lb.getBlock(), lb.getBlockToken(), clientName,
http://git-wip-us.apache.org/repos/asf/hadoop/blob/65a19fbc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
index 12d5ad0..bb67e98 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
@@ -81,20 +81,17 @@ import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
-import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolTranslatorPB;
-import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.http.HttpServer2;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.net.NodeBase;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.AccessControlList;
-import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.ToolRunner;
import com.google.common.annotations.VisibleForTesting;
@@ -263,7 +260,7 @@ public class DFSUtil {
* @return true, if the component is reserved
*/
public static boolean isReservedPathComponent(String component) {
- for (String reserved : HdfsConstants.RESERVED_PATH_COMPONENTS) {
+ for (String reserved : HdfsServerConstants.RESERVED_PATH_COMPONENTS) {
if (component.equals(reserved)) {
return true;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/65a19fbc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
index 8e874eb..43787ab 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
@@ -71,6 +71,7 @@ import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
import org.apache.hadoop.hdfs.protocolPB.PBHelper;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
import org.apache.hadoop.hdfs.util.ByteArrayManager;
@@ -1144,7 +1145,7 @@ class DataStreamer extends Daemon {
unbufOut = saslStreams.out;
unbufIn = saslStreams.in;
out = new DataOutputStream(new BufferedOutputStream(unbufOut,
- HdfsConstants.SMALL_BUFFER_SIZE));
+ HdfsServerConstants.SMALL_BUFFER_SIZE));
in = new DataInputStream(unbufIn);
//send the TRANSFER_BLOCK request
@@ -1424,7 +1425,7 @@ class DataStreamer extends Daemon {
unbufOut = saslStreams.out;
unbufIn = saslStreams.in;
out = new DataOutputStream(new BufferedOutputStream(unbufOut,
- HdfsConstants.SMALL_BUFFER_SIZE));
+ HdfsServerConstants.SMALL_BUFFER_SIZE));
blockReplyStream = new DataInputStream(unbufIn);
//
http://git-wip-us.apache.org/repos/asf/hadoop/blob/65a19fbc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java
index 913baea..09ea11e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.hdfs.protocolPB.JournalProtocolPB;
import org.apache.hadoop.hdfs.protocolPB.JournalProtocolTranslatorPB;
import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolPB;
import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolTranslatorPB;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
import org.apache.hadoop.hdfs.server.namenode.ha.AbstractNNFailoverProxyProvider;
@@ -417,7 +418,7 @@ public class NameNodeProxies {
RetryPolicy createPolicy = RetryPolicies
.retryUpToMaximumCountWithFixedSleep(5,
- HdfsConstants.LEASE_SOFTLIMIT_PERIOD, TimeUnit.MILLISECONDS);
+ HdfsServerConstants.LEASE_SOFTLIMIT_PERIOD, TimeUnit.MILLISECONDS);
Map<Class<? extends Exception>, RetryPolicy> remoteExceptionToPolicyMap
= new HashMap<Class<? extends Exception>, RetryPolicy>();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/65a19fbc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/impl/LeaseRenewer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/impl/LeaseRenewer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/impl/LeaseRenewer.java
index 4cdf168..99323bb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/impl/LeaseRenewer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/impl/LeaseRenewer.java
@@ -33,7 +33,7 @@ import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSOutputStream;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.Daemon;
import org.apache.hadoop.util.StringUtils;
@@ -165,7 +165,7 @@ public class LeaseRenewer {
/** The time in milliseconds that the map became empty. */
private long emptyTime = Long.MAX_VALUE;
/** A fixed lease renewal time period in milliseconds */
- private long renewal = HdfsConstants.LEASE_SOFTLIMIT_PERIOD/2;
+ private long renewal = HdfsServerConstants.LEASE_SOFTLIMIT_PERIOD/2;
/** A daemon for renewing lease */
private Daemon daemon = null;
@@ -372,7 +372,7 @@ public class LeaseRenewer {
//update renewal time
if (renewal == dfsc.getConf().getHdfsTimeout()/2) {
- long min = HdfsConstants.LEASE_SOFTLIMIT_PERIOD;
+ long min = HdfsServerConstants.LEASE_SOFTLIMIT_PERIOD;
for(DFSClient c : dfsclients) {
final int timeout = c.getConf().getHdfsTimeout();
if (timeout > 0 && timeout < min) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/65a19fbc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
index 2b07789..faa0460 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
@@ -962,8 +962,8 @@ public interface ClientProtocol {
* <br><br>
*
* The quota can have three types of values : (1) 0 or more will set
- * the quota to that value, (2) {@link HdfsConstants#QUOTA_DONT_SET} implies
- * the quota will not be changed, and (3) {@link HdfsConstants#QUOTA_RESET}
+ * the quota to that value, (2) {@link HdfsConstants#QUOTA_DONT_SET} implies
+ * the quota will not be changed, and (3) {@link HdfsConstants#QUOTA_RESET}
* implies the quota will be reset. Any other value is a runtime error.
*
* @throws AccessControlException permission denied
http://git-wip-us.apache.org/repos/asf/hadoop/blob/65a19fbc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
deleted file mode 100644
index d16e267..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
+++ /dev/null
@@ -1,176 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.protocol;
-
-import java.util.HashMap;
-import java.util.Map;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
-import org.apache.hadoop.hdfs.server.datanode.DataNode;
-import org.apache.hadoop.hdfs.server.datanode.DataNodeLayoutVersion;
-import org.apache.hadoop.hdfs.server.namenode.NameNode;
-import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion;
-import org.apache.hadoop.util.StringUtils;
-
-/************************************
- * Some handy constants
- *
- ************************************/
-@InterfaceAudience.Private
-public class HdfsConstants {
- /* Hidden constructor */
- protected HdfsConstants() {
- }
-
- /**
- * HDFS Protocol Names:
- */
- public static final String CLIENT_NAMENODE_PROTOCOL_NAME =
- "org.apache.hadoop.hdfs.protocol.ClientProtocol";
- public static final String CLIENT_DATANODE_PROTOCOL_NAME =
- "org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol";
-
-
- public static final int MIN_BLOCKS_FOR_WRITE = 1;
-
- // Long that indicates "leave current quota unchanged"
- public static final long QUOTA_DONT_SET = Long.MAX_VALUE;
- public static final long QUOTA_RESET = -1L;
-
- //
- // Timeouts, constants
- //
- public static final long LEASE_SOFTLIMIT_PERIOD = 60 * 1000;
- public static final long LEASE_HARDLIMIT_PERIOD = 60 * LEASE_SOFTLIMIT_PERIOD;
- public static final long LEASE_RECOVER_PERIOD = 10 * 1000; // in ms
-
- // We need to limit the length and depth of a path in the filesystem.
- // HADOOP-438
- // Currently we set the maximum length to 8k characters and the maximum depth
- // to 1k.
- public static final int MAX_PATH_LENGTH = 8000;
- public static final int MAX_PATH_DEPTH = 1000;
-
- // TODO should be conf injected?
- public static final int DEFAULT_DATA_SOCKET_SIZE = 128 * 1024;
- public static final int IO_FILE_BUFFER_SIZE = new HdfsConfiguration().getInt(
- DFSConfigKeys.IO_FILE_BUFFER_SIZE_KEY,
- DFSConfigKeys.IO_FILE_BUFFER_SIZE_DEFAULT);
- // Used for writing header etc.
- public static final int SMALL_BUFFER_SIZE = Math.min(IO_FILE_BUFFER_SIZE / 2,
- 512);
-
- public static final int BYTES_IN_INTEGER = Integer.SIZE / Byte.SIZE;
-
- // SafeMode actions
- public static enum SafeModeAction {
- SAFEMODE_LEAVE, SAFEMODE_ENTER, SAFEMODE_GET;
- }
-
- public static enum RollingUpgradeAction {
- QUERY, PREPARE, FINALIZE;
-
- private static final Map<String, RollingUpgradeAction> MAP
- = new HashMap<String, RollingUpgradeAction>();
- static {
- MAP.put("", QUERY);
- for(RollingUpgradeAction a : values()) {
- MAP.put(a.name(), a);
- }
- }
-
- /** Covert the given String to a RollingUpgradeAction. */
- public static RollingUpgradeAction fromString(String s) {
- return MAP.get(StringUtils.toUpperCase(s));
- }
- }
-
- // type of the datanode report
- public static enum DatanodeReportType {
- ALL, LIVE, DEAD, DECOMMISSIONING
- }
-
- // An invalid transaction ID that will never be seen in a real namesystem.
- public static final long INVALID_TXID = -12345;
-
- // Number of generation stamps reserved for legacy blocks.
- public static final long RESERVED_GENERATION_STAMPS_V1 =
- 1024L * 1024 * 1024 * 1024;
-
- /**
- * URI Scheme for hdfs://namenode/ URIs.
- */
- public static final String HDFS_URI_SCHEME = "hdfs";
-
- /**
- * Path components that are reserved in HDFS.
- * <p>
- * .reserved is only reserved under root ("/").
- */
- public static final String[] RESERVED_PATH_COMPONENTS = new String[] {
- HdfsConstants.DOT_SNAPSHOT_DIR,
- FSDirectory.DOT_RESERVED_STRING
- };
-
- /**
- * Current layout version for NameNode.
- * Please see {@link NameNodeLayoutVersion.Feature} on adding new layout version.
- */
- public static final int NAMENODE_LAYOUT_VERSION
- = NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION;
-
- /**
- * Current layout version for DataNode.
- * Please see {@link DataNodeLayoutVersion.Feature} on adding new layout version.
- */
- public static final int DATANODE_LAYOUT_VERSION
- = DataNodeLayoutVersion.CURRENT_LAYOUT_VERSION;
-
- /**
- * A special path component contained in the path for a snapshot file/dir
- */
- public static final String DOT_SNAPSHOT_DIR = ".snapshot";
-
- public static final byte[] DOT_SNAPSHOT_DIR_BYTES
- = DFSUtil.string2Bytes(DOT_SNAPSHOT_DIR);
-
- public static final String SEPARATOR_DOT_SNAPSHOT_DIR
- = Path.SEPARATOR + DOT_SNAPSHOT_DIR;
-
- public static final String SEPARATOR_DOT_SNAPSHOT_DIR_SEPARATOR
- = Path.SEPARATOR + DOT_SNAPSHOT_DIR + Path.SEPARATOR;
-
- public static final String MEMORY_STORAGE_POLICY_NAME = "LAZY_PERSIST";
- public static final String ALLSSD_STORAGE_POLICY_NAME = "ALL_SSD";
- public static final String ONESSD_STORAGE_POLICY_NAME = "ONE_SSD";
- public static final String HOT_STORAGE_POLICY_NAME = "HOT";
- public static final String WARM_STORAGE_POLICY_NAME = "WARM";
- public static final String COLD_STORAGE_POLICY_NAME = "COLD";
-
- public static final byte MEMORY_STORAGE_POLICY_ID = 15;
- public static final byte ALLSSD_STORAGE_POLICY_ID = 12;
- public static final byte ONESSD_STORAGE_POLICY_ID = 10;
- public static final byte HOT_STORAGE_POLICY_ID = 7;
- public static final byte WARM_STORAGE_POLICY_ID = 5;
- public static final byte COLD_STORAGE_POLICY_ID = 2;
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/65a19fbc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java
index 518e91a..3067696 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java
@@ -61,7 +61,7 @@ public class SnapshottableDirectoryStatus {
int snapshotNumber, int snapshotQuota, byte[] parentFullPath) {
this.dirStatus = new HdfsFileStatus(0, true, 0, 0, modification_time,
access_time, permission, owner, group, null, localName, inodeId,
- childrenNum, null, HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED);
+ childrenNum, null, HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED);
this.snapshotNumber = snapshotNumber;
this.snapshotQuota = snapshotQuota;
this.parentFullPath = parentFullPath;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/65a19fbc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolPB.java
index 4f25694..0d3796c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolPB.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolPB.java
@@ -33,7 +33,7 @@ import org.apache.hadoop.security.token.TokenInfo;
@KerberosInfo(
serverPrincipal = DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY)
@TokenInfo(DelegationTokenSelector.class)
-@ProtocolInfo(protocolName = HdfsConstants.CLIENT_NAMENODE_PROTOCOL_NAME,
+@ProtocolInfo(protocolName = HdfsConstants.CLIENT_NAMENODE_PROTOCOL_NAME,
protocolVersion = 1)
/**
* Protocol that a clients use to communicate with the NameNode.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/65a19fbc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
index bbf05e7..74c27ac 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
@@ -36,7 +36,7 @@ import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
-import org.apache.hadoop.hdfs.protocol.HdfsConstantsClient;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@@ -536,7 +536,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
boolean result =
server.complete(req.getSrc(), req.getClientName(),
req.hasLast() ? PBHelper.convert(req.getLast()) : null,
- req.hasFileId() ? req.getFileId() : HdfsConstantsClient.GRANDFATHER_INODE_ID);
+ req.hasFileId() ? req.getFileId() : HdfsConstants.GRANDFATHER_INODE_ID);
return CompleteResponseProto.newBuilder().setResult(result).build();
} catch (IOException e) {
throw new ServiceException(e);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/65a19fbc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
index 689b54d..1f2ffaf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
@@ -73,10 +73,10 @@ import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.fs.FileEncryptionInfo;
import org.apache.hadoop.hdfs.protocol.FsPermissionExtension;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
-import org.apache.hadoop.hdfs.protocol.HdfsConstantsClient;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@@ -1437,12 +1437,12 @@ public class PBHelper {
fs.getFileType().equals(FileType.IS_SYMLINK) ?
fs.getSymlink().toByteArray() : null,
fs.getPath().toByteArray(),
- fs.hasFileId()? fs.getFileId(): HdfsConstantsClient.GRANDFATHER_INODE_ID,
+ fs.hasFileId()? fs.getFileId(): HdfsConstants.GRANDFATHER_INODE_ID,
fs.hasLocations() ? PBHelper.convert(fs.getLocations()) : null,
fs.hasChildrenNum() ? fs.getChildrenNum() : -1,
fs.hasFileEncryptionInfo() ? convert(fs.getFileEncryptionInfo()) : null,
fs.hasStoragePolicy() ? (byte) fs.getStoragePolicy()
- : HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED);
+ : HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED);
}
public static SnapshottableDirectoryStatus convert(
http://git-wip-us.apache.org/repos/asf/hadoop/blob/65a19fbc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java
index 2d3215b..3e32d41 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java
@@ -33,7 +33,6 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocolPB.PBHelper;
import org.apache.hadoop.hdfs.qjournal.protocol.JournalOutOfSyncException;
import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocol;
@@ -46,6 +45,7 @@ import org.apache.hadoop.hdfs.qjournal.protocol.RequestInfo;
import org.apache.hadoop.hdfs.qjournal.protocolPB.QJournalProtocolPB;
import org.apache.hadoop.hdfs.qjournal.protocolPB.QJournalProtocolTranslatorPB;
import org.apache.hadoop.hdfs.qjournal.server.GetJournalEditServlet;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
@@ -92,7 +92,7 @@ public class IPCLoggerChannel implements AsyncLogger {
private final ListeningExecutorService parallelExecutor;
private long ipcSerial = 0;
private long epoch = -1;
- private long committedTxId = HdfsConstants.INVALID_TXID;
+ private long committedTxId = HdfsServerConstants.INVALID_TXID;
private final String journalId;
private final NamespaceInfo nsInfo;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/65a19fbc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocol/RequestInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocol/RequestInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocol/RequestInfo.java
index dfd1e4d..2569aad 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocol/RequestInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocol/RequestInfo.java
@@ -18,7 +18,7 @@
package org.apache.hadoop.hdfs.qjournal.protocol;
import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
@InterfaceAudience.Private
public class RequestInfo {
@@ -60,6 +60,6 @@ public class RequestInfo {
}
public boolean hasCommittedTxId() {
- return (committedTxId != HdfsConstants.INVALID_TXID);
+ return (committedTxId != HdfsServerConstants.INVALID_TXID);
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/65a19fbc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolServerSideTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolServerSideTranslatorPB.java
index 3a4e392..cc4c3c4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolServerSideTranslatorPB.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolServerSideTranslatorPB.java
@@ -21,7 +21,6 @@ import java.io.IOException;
import java.net.URL;
import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocolPB.JournalProtocolPB;
import org.apache.hadoop.hdfs.protocolPB.PBHelper;
import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocol;
@@ -66,6 +65,7 @@ import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogs
import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto;
import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto;
import org.apache.hadoop.hdfs.qjournal.protocol.RequestInfo;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion;
@@ -263,7 +263,7 @@ public class QJournalProtocolServerSideTranslatorPB implements QJournalProtocolP
reqInfo.getEpoch(),
reqInfo.getIpcSerialNumber(),
reqInfo.hasCommittedTxId() ?
- reqInfo.getCommittedTxId() : HdfsConstants.INVALID_TXID);
+ reqInfo.getCommittedTxId() : HdfsServerConstants.INVALID_TXID);
}
@Override
http://git-wip-us.apache.org/repos/asf/hadoop/blob/65a19fbc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
index 389b5d1..4894a41 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
@@ -34,7 +34,6 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.qjournal.protocol.JournalNotFormattedException;
import org.apache.hadoop.hdfs.qjournal.protocol.JournalOutOfSyncException;
import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocol;
@@ -44,6 +43,7 @@ import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.Persisted
import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto;
import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto;
import org.apache.hadoop.hdfs.qjournal.protocol.RequestInfo;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.common.StorageErrorReporter;
import org.apache.hadoop.hdfs.server.common.StorageInfo;
@@ -81,8 +81,8 @@ public class Journal implements Closeable {
// Current writing state
private EditLogOutputStream curSegment;
- private long curSegmentTxId = HdfsConstants.INVALID_TXID;
- private long nextTxId = HdfsConstants.INVALID_TXID;
+ private long curSegmentTxId = HdfsServerConstants.INVALID_TXID;
+ private long nextTxId = HdfsServerConstants.INVALID_TXID;
private long highestWrittenTxId = 0;
private final String journalId;
@@ -170,7 +170,7 @@ public class Journal implements Closeable {
new File(currentDir, LAST_WRITER_EPOCH), 0);
this.committedTxnId = new BestEffortLongFile(
new File(currentDir, COMMITTED_TXID_FILENAME),
- HdfsConstants.INVALID_TXID);
+ HdfsServerConstants.INVALID_TXID);
}
/**
@@ -191,7 +191,7 @@ public class Journal implements Closeable {
EditLogFile latestLog = files.remove(files.size() - 1);
latestLog.scanLog();
LOG.info("Latest log is " + latestLog);
- if (latestLog.getLastTxId() == HdfsConstants.INVALID_TXID) {
+ if (latestLog.getLastTxId() == HdfsServerConstants.INVALID_TXID) {
// the log contains no transactions
LOG.warn("Latest log " + latestLog + " has no transactions. " +
"moving it aside and looking for previous log");
@@ -327,7 +327,7 @@ public class Journal implements Closeable {
curSegment.abort();
curSegment = null;
- curSegmentTxId = HdfsConstants.INVALID_TXID;
+ curSegmentTxId = HdfsServerConstants.INVALID_TXID;
}
/**
@@ -565,7 +565,7 @@ public class Journal implements Closeable {
if (curSegment != null) {
curSegment.close();
curSegment = null;
- curSegmentTxId = HdfsConstants.INVALID_TXID;
+ curSegmentTxId = HdfsServerConstants.INVALID_TXID;
}
checkSync(nextTxId == endTxId + 1,
@@ -677,7 +677,7 @@ public class Journal implements Closeable {
if (elf.isInProgress()) {
elf.scanLog();
}
- if (elf.getLastTxId() == HdfsConstants.INVALID_TXID) {
+ if (elf.getLastTxId() == HdfsServerConstants.INVALID_TXID) {
LOG.info("Edit log file " + elf + " appears to be empty. " +
"Moving it aside...");
elf.moveAsideEmptyFile();
@@ -727,7 +727,7 @@ public class Journal implements Closeable {
}
builder.setLastWriterEpoch(lastWriterEpoch.get());
- if (committedTxnId.get() != HdfsConstants.INVALID_TXID) {
+ if (committedTxnId.get() != HdfsServerConstants.INVALID_TXID) {
builder.setLastCommittedTxId(committedTxnId.get());
}
@@ -1027,7 +1027,7 @@ public class Journal implements Closeable {
new File(previousDir, LAST_WRITER_EPOCH), 0);
BestEffortLongFile prevCommittedTxnId = new BestEffortLongFile(
new File(previousDir, COMMITTED_TXID_FILENAME),
- HdfsConstants.INVALID_TXID);
+ HdfsServerConstants.INVALID_TXID);
lastPromisedEpoch = new PersistentLongFile(
new File(currentDir, LAST_PROMISED_FILENAME), 0);
@@ -1035,7 +1035,7 @@ public class Journal implements Closeable {
new File(currentDir, LAST_WRITER_EPOCH), 0);
committedTxnId = new BestEffortLongFile(
new File(currentDir, COMMITTED_TXID_FILENAME),
- HdfsConstants.INVALID_TXID);
+ HdfsServerConstants.INVALID_TXID);
try {
lastPromisedEpoch.set(prevLastPromisedEpoch.get());
http://git-wip-us.apache.org/repos/asf/hadoop/blob/65a19fbc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
index 144cf9d..f7c299d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
@@ -54,7 +54,6 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil;
import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
@@ -311,9 +310,9 @@ public class Dispatcher {
unbufOut = saslStreams.out;
unbufIn = saslStreams.in;
out = new DataOutputStream(new BufferedOutputStream(unbufOut,
- HdfsConstants.IO_FILE_BUFFER_SIZE));
+ HdfsServerConstants.IO_FILE_BUFFER_SIZE));
in = new DataInputStream(new BufferedInputStream(unbufIn,
- HdfsConstants.IO_FILE_BUFFER_SIZE));
+ HdfsServerConstants.IO_FILE_BUFFER_SIZE));
sendRequest(out, eb, accessToken);
receiveResponse(in);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/65a19fbc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
index eac6ed2..feebd87 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
@@ -21,8 +21,8 @@ import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-import org.apache.hadoop.hdfs.protocol.HdfsConstantsClient;
import org.apache.hadoop.hdfs.server.common.GenerationStamp;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import java.io.IOException;
@@ -56,7 +56,7 @@ public class BlockIdManager {
private final SequentialBlockIdGenerator blockIdGenerator;
public BlockIdManager(BlockManager blockManager) {
- this.generationStampV1Limit = HdfsConstantsClient.GRANDFATHER_GENERATION_STAMP;
+ this.generationStampV1Limit = HdfsConstants.GRANDFATHER_GENERATION_STAMP;
this.blockIdGenerator = new SequentialBlockIdGenerator(blockManager);
}
@@ -70,7 +70,7 @@ public class BlockIdManager {
Preconditions.checkState(generationStampV2.getCurrentValue() ==
GenerationStamp.LAST_RESERVED_STAMP);
generationStampV2.skipTo(generationStampV1.getCurrentValue() +
- HdfsConstants.RESERVED_GENERATION_STAMPS_V1);
+ HdfsServerConstants.RESERVED_GENERATION_STAMPS_V1);
generationStampV1Limit = generationStampV2.getCurrentValue();
return generationStampV2.getCurrentValue();
@@ -83,7 +83,7 @@ public class BlockIdManager {
* @param stamp set generation stamp limit to this value
*/
public void setGenerationStampV1Limit(long stamp) {
- Preconditions.checkState(generationStampV1Limit == HdfsConstantsClient
+ Preconditions.checkState(generationStampV1Limit == HdfsConstants
.GRANDFATHER_GENERATION_STAMP);
generationStampV1Limit = stamp;
}
@@ -204,6 +204,6 @@ public class BlockIdManager {
generationStampV2.setCurrentValue(GenerationStamp.LAST_RESERVED_STAMP);
getBlockIdGenerator().setCurrentValue(SequentialBlockIdGenerator
.LAST_RESERVED_BLOCK_ID);
- generationStampV1Limit = HdfsConstantsClient.GRANDFATHER_GENERATION_STAMP;
+ generationStampV1Limit = HdfsConstants.GRANDFATHER_GENERATION_STAMP;
}
}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/65a19fbc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
index c2752ac..620d2a6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
@@ -29,8 +29,8 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State;
import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.net.Node;
@@ -792,7 +792,7 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
}
}
- final long requiredSize = blockSize * HdfsConstants.MIN_BLOCKS_FOR_WRITE;
+ final long requiredSize = blockSize * HdfsServerConstants.MIN_BLOCKS_FOR_WRITE;
final long scheduledSize = blockSize * node.getBlocksScheduled(storage.getStorageType());
final long remaining = node.getRemaining(storage.getStorageType());
if (requiredSize > remaining - scheduledSize) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/65a19fbc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java
index 942227e..2a71c29 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java
@@ -26,6 +26,7 @@ import org.apache.hadoop.fs.XAttr;
import org.apache.hadoop.hdfs.XAttrHelper;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.util.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -47,39 +48,39 @@ public class BlockStoragePolicySuite {
public static BlockStoragePolicySuite createDefaultSuite() {
final BlockStoragePolicy[] policies =
new BlockStoragePolicy[1 << ID_BIT_LENGTH];
- final byte lazyPersistId = HdfsConstants.MEMORY_STORAGE_POLICY_ID;
- policies[lazyPersistId] = new BlockStoragePolicy(lazyPersistId,
+ final byte lazyPersistId = HdfsServerConstants.MEMORY_STORAGE_POLICY_ID;
+ policies[lazyPersistId] = new BlockStoragePolicy(lazyPersistId,
HdfsConstants.MEMORY_STORAGE_POLICY_NAME,
new StorageType[]{StorageType.RAM_DISK, StorageType.DISK},
new StorageType[]{StorageType.DISK},
new StorageType[]{StorageType.DISK},
true); // Cannot be changed on regular files, but inherited.
- final byte allssdId = HdfsConstants.ALLSSD_STORAGE_POLICY_ID;
+ final byte allssdId = HdfsServerConstants.ALLSSD_STORAGE_POLICY_ID;
policies[allssdId] = new BlockStoragePolicy(allssdId,
HdfsConstants.ALLSSD_STORAGE_POLICY_NAME,
new StorageType[]{StorageType.SSD},
new StorageType[]{StorageType.DISK},
new StorageType[]{StorageType.DISK});
- final byte onessdId = HdfsConstants.ONESSD_STORAGE_POLICY_ID;
+ final byte onessdId = HdfsServerConstants.ONESSD_STORAGE_POLICY_ID;
policies[onessdId] = new BlockStoragePolicy(onessdId,
HdfsConstants.ONESSD_STORAGE_POLICY_NAME,
new StorageType[]{StorageType.SSD, StorageType.DISK},
new StorageType[]{StorageType.SSD, StorageType.DISK},
new StorageType[]{StorageType.SSD, StorageType.DISK});
- final byte hotId = HdfsConstants.HOT_STORAGE_POLICY_ID;
+ final byte hotId = HdfsServerConstants.HOT_STORAGE_POLICY_ID;
policies[hotId] = new BlockStoragePolicy(hotId,
- HdfsConstants.HOT_STORAGE_POLICY_NAME,
+ HdfsServerConstants.HOT_STORAGE_POLICY_NAME,
new StorageType[]{StorageType.DISK}, StorageType.EMPTY_ARRAY,
new StorageType[]{StorageType.ARCHIVE});
- final byte warmId = HdfsConstants.WARM_STORAGE_POLICY_ID;
+ final byte warmId = HdfsServerConstants.WARM_STORAGE_POLICY_ID;
policies[warmId] = new BlockStoragePolicy(warmId,
- HdfsConstants.WARM_STORAGE_POLICY_NAME,
+ HdfsServerConstants.WARM_STORAGE_POLICY_NAME,
new StorageType[]{StorageType.DISK, StorageType.ARCHIVE},
new StorageType[]{StorageType.DISK, StorageType.ARCHIVE},
new StorageType[]{StorageType.DISK, StorageType.ARCHIVE});
- final byte coldId = HdfsConstants.COLD_STORAGE_POLICY_ID;
+ final byte coldId = HdfsServerConstants.COLD_STORAGE_POLICY_ID;
policies[coldId] = new BlockStoragePolicy(coldId,
- HdfsConstants.COLD_STORAGE_POLICY_NAME,
+ HdfsServerConstants.COLD_STORAGE_POLICY_NAME,
new StorageType[]{StorageType.ARCHIVE}, StorageType.EMPTY_ARRAY,
StorageType.EMPTY_ARRAY);
return new BlockStoragePolicySuite(hotId, policies);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/65a19fbc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
index 8af3af7..6934d84 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
@@ -24,9 +24,16 @@ import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.server.datanode.DataNodeLayoutVersion;
+import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
import org.apache.hadoop.hdfs.server.namenode.MetaRecoveryContext;
import com.google.common.base.Preconditions;
+import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion;
import org.apache.hadoop.util.StringUtils;
/************************************
@@ -35,17 +42,71 @@ import org.apache.hadoop.util.StringUtils;
************************************/
@InterfaceAudience.Private
-public final class HdfsServerConstants {
- /* Hidden constructor */
- private HdfsServerConstants() { }
-
+public interface HdfsServerConstants {
+ int MIN_BLOCKS_FOR_WRITE = 1;
+ //
+ // Timeouts, constants
+ //
+ long LEASE_SOFTLIMIT_PERIOD = 60 * 1000;
+ long LEASE_HARDLIMIT_PERIOD = 60 * LEASE_SOFTLIMIT_PERIOD;
+ long LEASE_RECOVER_PERIOD = 10 * 1000; // in ms
+ // We need to limit the length and depth of a path in the filesystem.
+ // HADOOP-438
+ // Currently we set the maximum length to 8k characters and the maximum depth
+ // to 1k.
+ int MAX_PATH_LENGTH = 8000;
+ int MAX_PATH_DEPTH = 1000;
+ int IO_FILE_BUFFER_SIZE = new HdfsConfiguration().getInt(
+ CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY,
+ CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT);
+ // Used for writing header etc.
+ int SMALL_BUFFER_SIZE = Math.min(IO_FILE_BUFFER_SIZE / 2,
+ 512);
+ // An invalid transaction ID that will never be seen in a real namesystem.
+ long INVALID_TXID = -12345;
+ // Number of generation stamps reserved for legacy blocks.
+ long RESERVED_GENERATION_STAMPS_V1 =
+ 1024L * 1024 * 1024 * 1024;
+ /**
+ * Current layout version for NameNode.
+ * Please see {@link NameNodeLayoutVersion.Feature} on adding new layout version.
+ */
+ int NAMENODE_LAYOUT_VERSION
+ = NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION;
+ /**
+ * Current layout version for DataNode.
+ * Please see {@link DataNodeLayoutVersion.Feature} on adding new layout version.
+ */
+ int DATANODE_LAYOUT_VERSION
+ = DataNodeLayoutVersion.CURRENT_LAYOUT_VERSION;
+ /**
+ * Path components that are reserved in HDFS.
+ * <p>
+ * .reserved is only reserved under root ("/").
+ */
+ String[] RESERVED_PATH_COMPONENTS = new String[] {
+ HdfsConstants.DOT_SNAPSHOT_DIR,
+ FSDirectory.DOT_RESERVED_STRING
+ };
+ byte[] DOT_SNAPSHOT_DIR_BYTES
+ = DFSUtil.string2Bytes(HdfsConstants.DOT_SNAPSHOT_DIR);
+ String HOT_STORAGE_POLICY_NAME = "HOT";
+ String WARM_STORAGE_POLICY_NAME = "WARM";
+ String COLD_STORAGE_POLICY_NAME = "COLD";
+ byte MEMORY_STORAGE_POLICY_ID = 15;
+ byte ALLSSD_STORAGE_POLICY_ID = 12;
+ byte ONESSD_STORAGE_POLICY_ID = 10;
+ byte HOT_STORAGE_POLICY_ID = 7;
+ byte WARM_STORAGE_POLICY_ID = 5;
+ byte COLD_STORAGE_POLICY_ID = 2;
+
/**
* Type of the node
*/
- static public enum NodeType {
+ enum NodeType {
NAME_NODE,
DATA_NODE,
- JOURNAL_NODE;
+ JOURNAL_NODE
}
/** Startup options for rolling upgrade. */
@@ -85,7 +146,7 @@ public final class HdfsServerConstants {
}
/** Startup options */
- static public enum StartupOption{
+ enum StartupOption{
FORMAT ("-format"),
CLUSTERID ("-clusterid"),
GENCLUSTERID ("-genclusterid"),
@@ -129,7 +190,7 @@ public final class HdfsServerConstants {
// Used only with recovery option
private int force = 0;
- private StartupOption(String arg) {this.name = arg;}
+ StartupOption(String arg) {this.name = arg;}
public String getName() {return name;}
public NamenodeRole toNodeRole() {
switch(this) {
@@ -213,21 +274,21 @@ public final class HdfsServerConstants {
}
// Timeouts for communicating with DataNode for streaming writes/reads
- public static final int READ_TIMEOUT = 60 * 1000;
- public static final int READ_TIMEOUT_EXTENSION = 5 * 1000;
- public static final int WRITE_TIMEOUT = 8 * 60 * 1000;
- public static final int WRITE_TIMEOUT_EXTENSION = 5 * 1000; //for write pipeline
+ int READ_TIMEOUT = 60 * 1000;
+ int READ_TIMEOUT_EXTENSION = 5 * 1000;
+ int WRITE_TIMEOUT = 8 * 60 * 1000;
+ int WRITE_TIMEOUT_EXTENSION = 5 * 1000; //for write pipeline
/**
* Defines the NameNode role.
*/
- static public enum NamenodeRole {
+ enum NamenodeRole {
NAMENODE ("NameNode"),
BACKUP ("Backup Node"),
CHECKPOINT("Checkpoint Node");
private String description = null;
- private NamenodeRole(String arg) {this.description = arg;}
+ NamenodeRole(String arg) {this.description = arg;}
@Override
public String toString() {
@@ -238,7 +299,7 @@ public final class HdfsServerConstants {
/**
* Block replica states, which it can go through while being constructed.
*/
- static public enum ReplicaState {
+ enum ReplicaState {
/** Replica is finalized. The state when replica is not modified. */
FINALIZED(0),
/** Replica is being written to. */
@@ -252,7 +313,7 @@ public final class HdfsServerConstants {
private final int value;
- private ReplicaState(int v) {
+ ReplicaState(int v) {
value = v;
}
@@ -278,7 +339,7 @@ public final class HdfsServerConstants {
/**
* States, which a block can go through while it is under construction.
*/
- static public enum BlockUCState {
+ enum BlockUCState {
/**
* Block construction completed.<br>
* The block has at least the configured minimal replication number
@@ -307,16 +368,16 @@ public final class HdfsServerConstants {
* {@link ReplicaState#FINALIZED}
* replicas has yet been reported by data-nodes themselves.
*/
- COMMITTED;
+ COMMITTED
}
- public static final String NAMENODE_LEASE_HOLDER = "HDFS_NameNode";
- public static final long NAMENODE_LEASE_RECHECK_INTERVAL = 2000;
+ String NAMENODE_LEASE_HOLDER = "HDFS_NameNode";
+ long NAMENODE_LEASE_RECHECK_INTERVAL = 2000;
- public static final String CRYPTO_XATTR_ENCRYPTION_ZONE =
+ String CRYPTO_XATTR_ENCRYPTION_ZONE =
"raw.hdfs.crypto.encryption.zone";
- public static final String CRYPTO_XATTR_FILE_ENCRYPTION_INFO =
+ String CRYPTO_XATTR_FILE_ENCRYPTION_INFO =
"raw.hdfs.crypto.file.encryption.info";
- public static final String SECURITY_XATTR_UNREADABLE_BY_SUPERUSER =
+ String SECURITY_XATTR_UNREADABLE_BY_SUPERUSER =
"security.hdfs.unreadable.by.superuser";
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/65a19fbc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/StorageInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/StorageInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/StorageInfo.java
index a3f82ff..e54a11d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/StorageInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/StorageInfo.java
@@ -26,7 +26,6 @@ import java.util.Properties;
import java.util.SortedSet;
import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
import org.apache.hadoop.hdfs.protocol.LayoutVersion.LayoutFeature;
@@ -197,8 +196,8 @@ public class StorageInfo {
}
public int getServiceLayoutVersion() {
- return storageType == NodeType.DATA_NODE ? HdfsConstants.DATANODE_LAYOUT_VERSION
- : HdfsConstants.NAMENODE_LAYOUT_VERSION;
+ return storageType == NodeType.DATA_NODE ? HdfsServerConstants.DATANODE_LAYOUT_VERSION
+ : HdfsServerConstants.NAMENODE_LAYOUT_VERSION;
}
public Map<Integer, SortedSet<LayoutFeature>> getServiceLayoutFeatureMap() {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/65a19fbc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockMetadataHeader.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockMetadataHeader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockMetadataHeader.java
index 94493aa..04700b8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockMetadataHeader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockMetadataHeader.java
@@ -33,7 +33,7 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.util.DataChecksum;
@@ -85,7 +85,7 @@ public class BlockMetadataHeader {
DataInputStream in = null;
try {
in = new DataInputStream(new BufferedInputStream(
- new FileInputStream(metaFile), HdfsConstants.IO_FILE_BUFFER_SIZE));
+ new FileInputStream(metaFile), HdfsServerConstants.IO_FILE_BUFFER_SIZE));
return readDataChecksum(in, metaFile);
} finally {
IOUtils.closeStream(in);